diff --git a/.github/ISSUE_TEMPLATE/docs_improvement.md b/.github/ISSUE_TEMPLATE/docs_improvement.md index f2dc170a8185f..4bc84c5fc9eb7 100644 --- a/.github/ISSUE_TEMPLATE/docs_improvement.md +++ b/.github/ISSUE_TEMPLATE/docs_improvement.md @@ -10,4 +10,4 @@ assignees: '' Provide a link to the documentation and describe how it could be improved. In what ways is it incomplete, incorrect, or misleading? -If you have suggestions on exactly what the new docs should say, feel free to include them here. Alternatively, make the changes yourself and [create a pull request](https://bevyengine.org/learn/book/contributing/code/) instead. +If you have suggestions on exactly what the new docs should say, feel free to include them here. Alternatively, make the changes yourself and [create a pull request](https://bevyengine.org/learn/contribute/helping-out/writing-docs/) instead. diff --git a/.github/example-run/testbed_2d.ron b/.github/example-run/testbed_2d.ron index 467e2fe98f99f..3e2b22dd983eb 100644 --- a/.github/example-run/testbed_2d.ron +++ b/.github/example-run/testbed_2d.ron @@ -1,12 +1,4 @@ ( events: [ - (100, Screenshot), - (200, Custom("switch_scene")), - (300, Screenshot), - (400, Custom("switch_scene")), - (500, Screenshot), - (600, Custom("switch_scene")), - (700, Screenshot), - (800, AppExit), ] ) diff --git a/.github/example-run/testbed_3d.ron b/.github/example-run/testbed_3d.ron index 467e2fe98f99f..3e2b22dd983eb 100644 --- a/.github/example-run/testbed_3d.ron +++ b/.github/example-run/testbed_3d.ron @@ -1,12 +1,4 @@ ( events: [ - (100, Screenshot), - (200, Custom("switch_scene")), - (300, Screenshot), - (400, Custom("switch_scene")), - (500, Screenshot), - (600, Custom("switch_scene")), - (700, Screenshot), - (800, AppExit), ] ) diff --git a/.github/example-run/testbed_ui.ron b/.github/example-run/testbed_ui.ron index 579f791d66400..3e2b22dd983eb 100644 --- a/.github/example-run/testbed_ui.ron +++ b/.github/example-run/testbed_ui.ron @@ -1,6 +1,4 @@ ( events: [ - (100, Screenshot), - (200, AppExit), ] ) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 82446ac5b4dd0..c1e5575a524fc 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -36,11 +36,3 @@ println!("My super cool code."); ``` - -## Migration Guide - -> This section is optional. If there are no breaking changes, you can delete this section. - -- If this PR is a breaking change (relative to the last release of Bevy), describe how a user might need to migrate their code to support these changes -- Simply adding new functionality is not a breaking change. -- Fixing behavior that was definitely a bug, rather than a questionable design choice is not a breaking change. diff --git a/.github/start-wasm-example/package.json b/.github/start-wasm-example/package.json index 1ce7e5e2dfb85..5ab340b2f95a4 100644 --- a/.github/start-wasm-example/package.json +++ b/.github/start-wasm-example/package.json @@ -8,9 +8,9 @@ "author": "", "license": "ISC", "devDependencies": { - "@playwright/test": "^1.28.1" + "@playwright/test": "^1.49.1" }, "dependencies": { "dotenv": "^16.0.1" } -} +} \ No newline at end of file diff --git a/.github/workflows/action-on-PR-labeled.yml b/.github/workflows/action-on-PR-labeled.yml index 9887494a487d5..9e5835c1f79ea 100644 --- a/.github/workflows/action-on-PR-labeled.yml +++ b/.github/workflows/action-on-PR-labeled.yml @@ -12,19 +12,63 @@ permissions: pull-requests: 'write' jobs: - comment-on-breaking-change-label: + comment-on-migration-guide-label: runs-on: ubuntu-latest - if: github.event.label.name == 'M-Needs-Migration-Guide' && !contains(github.event.pull_request.body, '## Migration Guide') + if: github.event.label.name == 'M-Needs-Migration-Guide' steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 2 + - name: Get changes + id: get_changes + shell: bash {0} + run: | + git fetch --depth=1 origin $BASE_SHA + git diff --exit-code $BASE_SHA $HEAD_SHA -- ./release-content/migration-guides + echo "found_changes=$?" >> $GITHUB_OUTPUT + env: + BASE_SHA: ${{ github.event.pull_request.base.sha }} + HEAD_SHA: ${{ github.event.pull_request.head.sha }} + - uses: actions/github-script@v7 + if: steps.get_changes.outputs.found_changes == '0' + with: + script: | + await github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: `It looks like your PR is a breaking change, but **you didn't provide a migration guide**. + + Please review the [instructions for writing migration guides](https://github.com/bevyengine/bevy/tree/main/release-content/migration_guides.md), then expand or revise the content in the [migration guides directory](https://github.com/bevyengine/bevy/tree/main/release-content/migration-guides) to reflect your changes.` + }) + comment-on-release-note-label: + runs-on: ubuntu-latest + if: github.event.label.name == 'M-Needs-Release-Note' + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 2 + - name: Get changes + id: get_changes + shell: bash {0} + run: | + git fetch --depth=1 origin $BASE_SHA + git diff --exit-code $BASE_SHA $HEAD_SHA -- ./release-content/release-notes + echo "found_changes=$?" >> $GITHUB_OUTPUT + env: + BASE_SHA: ${{ github.event.pull_request.base.sha }} + HEAD_SHA: ${{ github.event.pull_request.head.sha }} - uses: actions/github-script@v7 + if: steps.get_changes.outputs.found_changes == '0' with: script: | await github.rest.issues.createComment({ issue_number: context.issue.number, owner: context.repo.owner, repo: context.repo.repo, - body: `It looks like your PR is a breaking change, but you didn't provide a migration guide. + body: `It looks like your PR has been selected for a highlight in the next release blog post, but **you didn't provide a release note**. - Could you add some context on what users should update when this change get released in a new version of Bevy? - It will be used to help writing the migration guide for the version. Putting it after a \`## Migration Guide\` will help it get automatically picked up by our tooling.` + Please review the [instructions for writing release notes](https://github.com/bevyengine/bevy/tree/main/release-content/release_notes.md), then expand or revise the content in the [release notes directory](https://github.com/bevyengine/bevy/tree/main/release-content/release_notes) to showcase your changes.` }) diff --git a/.github/workflows/ci-comment-failures.yml b/.github/workflows/ci-comment-failures.yml index d926390993e28..f1fb5a54be563 100644 --- a/.github/workflows/ci-comment-failures.yml +++ b/.github/workflows/ci-comment-failures.yml @@ -48,8 +48,21 @@ jobs: return "true" - run: unzip missing-examples.zip if: ${{ steps.find-artifact.outputs.result == 'true' }} - - name: 'Comment on PR' + - name: "Check if last comment is already from actions" if: ${{ steps.find-artifact.outputs.result == 'true' }} + id: check-last-comment + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + PR=`cat ./NR` + if [[ `gh api --jq '.[-1].user.login' /repos/bevyengine/bevy/issues/$PR/comments` == 'github-actions[bot]' ]] + then + echo "result=true" >> $GITHUB_OUTPUT + else + echo "result=false" >> $GITHUB_OUTPUT + fi + - name: "Comment on PR" + if: ${{ steps.find-artifact.outputs.result == 'true' && steps.check-last-comment.outputs.result == 'false' }} uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} @@ -106,8 +119,21 @@ jobs: return "true" - run: unzip missing-features.zip if: ${{ steps.find-artifact.outputs.result == 'true' }} - - name: 'Comment on PR' + - name: "Check if last comment is already from actions" if: ${{ steps.find-artifact.outputs.result == 'true' }} + id: check-last-comment + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + PR=`cat ./NR` + if [[ `gh api --jq '.[-1].user.login' /repos/bevyengine/bevy/issues/$PR/comments` == 'github-actions[bot]' ]] + then + echo "result=true" >> $GITHUB_OUTPUT + else + echo "result=false" >> $GITHUB_OUTPUT + fi + - name: "Comment on PR" + if: ${{ steps.find-artifact.outputs.result == 'true' && steps.check-last-comment.outputs.result == 'false' }} uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} @@ -164,8 +190,21 @@ jobs: return "true" - run: unzip msrv.zip if: ${{ steps.find-artifact.outputs.result == 'true' }} - - name: 'Comment on PR' + - name: "Check if last comment is already from actions" if: ${{ steps.find-artifact.outputs.result == 'true' }} + id: check-last-comment + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + PR=`cat ./NR` + if [[ `gh api --jq '.[-1].user.login' /repos/bevyengine/bevy/issues/$PR/comments` == 'github-actions[bot]' ]] + then + echo "result=true" >> $GITHUB_OUTPUT + else + echo "result=false" >> $GITHUB_OUTPUT + fi + - name: "Comment on PR" + if: ${{ steps.find-artifact.outputs.result == 'true' && steps.check-last-comment.outputs.result == 'false' }} uses: actions/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} @@ -178,64 +217,3 @@ jobs: issue_number: issue_number, body: 'Your PR increases Bevy Minimum Supported Rust Version. Please update the `rust-version` field in the root Cargo.toml file.' }); - - make-macos-screenshots-available: - runs-on: ubuntu-latest - timeout-minutes: 30 - outputs: - branch-name: ${{ steps.branch-name.outputs.result }} - steps: - - name: 'Download artifact' - id: find-artifact - uses: actions/github-script@v7 - with: - result-encoding: string - script: | - var artifacts = await github.rest.actions.listWorkflowRunArtifacts({ - owner: context.repo.owner, - repo: context.repo.repo, - run_id: ${{github.event.workflow_run.id }}, - }); - var matchArtifacts = artifacts.data.artifacts.filter((artifact) => { - return artifact.name == "screenshots-macos" - }); - if (matchArtifacts.length == 0) { return "false" } - var matchArtifact = matchArtifacts[0]; - var download = await github.rest.actions.downloadArtifact({ - owner: context.repo.owner, - repo: context.repo.repo, - artifact_id: matchArtifact.id, - archive_format: 'zip', - }); - var fs = require('fs'); - fs.writeFileSync('${{github.workspace}}/screenshots-macos.zip', Buffer.from(download.data)); - return "true" - - name: prepare artifact folder - run: | - unzip screenshots-macos.zip - mkdir screenshots - mv screenshots-* screenshots/ - - name: save screenshots - uses: actions/upload-artifact@v4 - with: - name: screenshots-macos - path: screenshots - - name: branch name - id: branch-name - run: | - if [ -f PR ]; then - echo "result=PR-$(cat PR)-${{ github.event.workflow_run.head_branch }}" >> $GITHUB_OUTPUT - else - echo "result=${{ github.event.workflow_run.head_branch }}" >> $GITHUB_OUTPUT - fi - - compare-macos-screenshots: - name: Compare macOS screenshots - needs: [make-macos-screenshots-available] - uses: ./.github/workflows/send-screenshots-to-pixeleagle.yml - with: - commit: ${{ github.event.workflow_run.head_sha }} - branch: ${{ needs.make-macos-screenshots-available.outputs.branch-name }} - artifact: screenshots-macos - os: macos - secrets: inherit diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d8a6c2de60929..f57f403115816 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -5,13 +5,16 @@ on: pull_request: push: branches: - - main - release-* env: CARGO_TERM_COLOR: always + CARGO_INCREMENTAL: 0 + CARGO_PROFILE_TEST_DEBUG: 0 + CARGO_PROFILE_DEV_DEBUG: 0 # If nightly is breaking CI, modify this variable to target a specific nightly version. NIGHTLY_TOOLCHAIN: nightly + RUSTFLAGS: "-D warnings" concurrency: group: ${{github.workflow}}-${{github.ref}} @@ -42,7 +45,6 @@ jobs: # See tools/ci/src/main.rs for the commands this runs run: cargo run -p ci -- test env: - CARGO_INCREMENTAL: 0 RUSTFLAGS: "-C debuginfo=0 -D warnings" ci: @@ -149,7 +151,56 @@ jobs: - name: Install Linux dependencies uses: ./.github/actions/install-linux-deps - name: Check Compile - run: cargo run -p ci -- compile-check-no-std + run: cargo check -p bevy --no-default-features --features default_no_std --target x86_64-unknown-none + check-compiles-no-std-portable-atomic: + runs-on: ubuntu-latest + timeout-minutes: 30 + needs: ci + steps: + - uses: actions/checkout@v4 + - uses: actions/cache@v4 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + crates/bevy_ecs_compile_fail_tests/target/ + crates/bevy_reflect_compile_fail_tests/target/ + key: ${{ runner.os }}-cargo-check-compiles-no-std-portable-atomic-${{ hashFiles('**/Cargo.toml') }} + - uses: dtolnay/rust-toolchain@stable + with: + targets: thumbv6m-none-eabi + - name: Install Linux dependencies + uses: ./.github/actions/install-linux-deps + - name: Check Compile + run: cargo check -p bevy --no-default-features --features default_no_std --target thumbv6m-none-eabi + + check-compiles-no-std-examples: + runs-on: ubuntu-latest + timeout-minutes: 30 + needs: ci + steps: + - uses: actions/checkout@v4 + - uses: actions/cache@v4 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + crates/bevy_ecs_compile_fail_tests/target/ + crates/bevy_reflect_compile_fail_tests/target/ + key: ${{ runner.os }}-cargo-check-compiles-no-std-examples-${{ hashFiles('**/Cargo.toml') }} + - uses: dtolnay/rust-toolchain@stable + with: + targets: x86_64-unknown-none + - name: Install Linux dependencies + uses: ./.github/actions/install-linux-deps + - name: Check Compile + run: cd examples/no_std/library && cargo check --no-default-features --features libm,critical-section --target x86_64-unknown-none build-wasm: runs-on: ubuntu-latest @@ -195,7 +246,7 @@ jobs: - name: Check wasm run: cargo check --target wasm32-unknown-unknown -Z build-std=std,panic_abort env: - RUSTFLAGS: "-C target-feature=+atomics,+bulk-memory" + RUSTFLAGS: "-C target-feature=+atomics,+bulk-memory -D warnings" markdownlint: runs-on: ubuntu-latest @@ -208,7 +259,7 @@ jobs: # Full git history is needed to get a proper list of changed files within `super-linter` fetch-depth: 0 - name: Run Markdown Lint - uses: docker://ghcr.io/github/super-linter:slim-v4 + uses: super-linter/super-linter/slim@v7.3.0 env: MULTI_STATUS: false VALIDATE_ALL_CODEBASE: false @@ -241,7 +292,7 @@ jobs: steps: - uses: actions/checkout@v4 - name: Check for typos - uses: crate-ci/typos@v1.28.4 + uses: crate-ci/typos@v1.31.1 - name: Typos info if: failure() run: | @@ -251,49 +302,6 @@ jobs: echo 'if you use VSCode, you can also install `Typos Spell Checker' echo 'You can find the extension here: https://marketplace.visualstudio.com/items?itemName=tekumara.typos-vscode' - run-examples-macos-metal: - runs-on: macos-latest - timeout-minutes: 30 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - - name: Disable audio - # Disable audio through a patch. on github m1 runners, audio timeouts after 15 minutes - run: git apply --ignore-whitespace tools/example-showcase/disable-audio.patch - - name: Run examples - run: | - for example in .github/example-run/*.ron; do - example_name=`basename $example .ron` - echo -n $example_name > last_example_run - echo "running $example_name - "`date` - time TRACE_CHROME=trace-$example_name.json CI_TESTING_CONFIG=$example cargo run --example $example_name --features "bevy_ci_testing,trace,trace_chrome" - sleep 10 - if [ `find ./ -maxdepth 1 -name 'screenshot-*.png' -print -quit` ]; then - mkdir screenshots-$example_name - mv screenshot-*.png screenshots-$example_name/ - fi - done - mkdir traces && mv trace*.json traces/ - mkdir screenshots && mv screenshots-* screenshots/ - - name: save traces - uses: actions/upload-artifact@v4 - with: - name: example-traces-macos - path: traces - - name: Save PR number - if: ${{ github.event_name == 'pull_request' }} - run: | - echo ${{ github.event.number }} > ./screenshots/PR - - name: save screenshots - uses: actions/upload-artifact@v4 - with: - name: screenshots-macos - path: screenshots - - uses: actions/upload-artifact@v4 - if: ${{ failure() && github.event_name == 'pull_request' }} - with: - name: example-run-macos - path: example-run/ check-doc: runs-on: ubuntu-latest timeout-minutes: 30 @@ -318,8 +326,7 @@ jobs: # See tools/ci/src/main.rs for the commands this runs run: cargo run -p ci -- doc env: - CARGO_INCREMENTAL: 0 - RUSTFLAGS: "-C debuginfo=0" + RUSTFLAGS: "-C debuginfo=0 -D warnings" # This currently report a lot of false positives # Enable it again once it's fixed - https://github.com/bevyengine/bevy/issues/1983 # - name: Installs cargo-deadlinks @@ -333,6 +340,7 @@ jobs: timeout-minutes: 30 steps: - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable - name: check for missing metadata id: missing-metadata run: cargo run -p build-templated-pages -- check-missing examples @@ -367,6 +375,7 @@ jobs: needs: check-missing-examples-in-docs steps: - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable - name: check for missing features id: missing-features run: cargo run -p build-templated-pages -- check-missing features @@ -410,6 +419,7 @@ jobs: ~/.cargo/git/db/ target/ key: ${{ runner.os }}-cargo-msrv-${{ hashFiles('**/Cargo.toml') }} + - uses: dtolnay/rust-toolchain@stable - name: get MSRV id: msrv run: | @@ -443,7 +453,7 @@ jobs: shell: bash run: | errors="" - for file in $(find examples tests -name '*.rs'); do + for file in $(find examples tests -name '*.rs' -not -path 'examples/mobile/*'); do if grep -q "use bevy_" "$file"; then errors+="ERROR: Detected internal Bevy import in $file\n" fi diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml deleted file mode 100644 index d187165a763e9..0000000000000 --- a/.github/workflows/daily.yml +++ /dev/null @@ -1,147 +0,0 @@ -name: Daily Jobs - -on: - schedule: - - cron: '0 12 * * *' - workflow_dispatch: - -env: - CARGO_TERM_COLOR: always - -jobs: - build-for-iOS: - if: github.repository == 'bevyengine/bevy' - runs-on: macos-latest - timeout-minutes: 30 - steps: - - uses: actions/checkout@v4 - - - uses: dtolnay/rust-toolchain@stable - - - name: Add iOS targets - run: rustup target add aarch64-apple-ios x86_64-apple-ios - - - name: Build app for iOS - run: | - cd examples/mobile - make xcodebuild-iphone - mkdir Payload - mv build/Build/Products/Debug-iphoneos/bevy_mobile_example.app Payload - zip -r bevy_mobile_example.zip Payload - mv bevy_mobile_example.zip bevy_mobile_example.ipa - - - name: Upload to Browser Stack - run: | - curl -u "${{ secrets.BROWSERSTACK_USERNAME }}:${{ secrets.BROWSERSTACK_ACCESS_KEY }}" \ - -X POST "https://api-cloud.browserstack.com/app-automate/upload" \ - -F "file=@examples/mobile/bevy_mobile_example.ipa" \ - -F "custom_id=$GITHUB_RUN_ID" - - build-for-Android: - if: github.repository == 'bevyengine/bevy' - runs-on: ubuntu-latest - timeout-minutes: 30 - steps: - - uses: actions/checkout@v4 - - - uses: dtolnay/rust-toolchain@stable - - - name: Set up JDK 17 - uses: actions/setup-java@v4 - with: - java-version: '17' - distribution: 'temurin' - - - name: Add Android targets - run: rustup target add aarch64-linux-android - - - name: Install Cargo NDK - run: cargo install --force cargo-ndk - - - name: Build .so file - run: cargo ndk -t arm64-v8a -o android_example/app/src/main/jniLibs build --package bevy_mobile_example - env: - # This will reduce the APK size from 1GB to ~200MB - CARGO_PROFILE_DEV_DEBUG: false - - - name: Build app for Android - run: cd examples/mobile/android_example && chmod +x gradlew && ./gradlew build - - - name: Upload to Browser Stack - run: | - curl -u "${{ secrets.BROWSERSTACK_USERNAME }}:${{ secrets.BROWSERSTACK_ACCESS_KEY }}" \ - -X POST "https://api-cloud.browserstack.com/app-automate/upload" \ - -F "file=@app/build/outputs/apk/debug/app-debug.apk" \ - -F "custom_id=$GITHUB_RUN_ID" - - nonce: - if: github.repository == 'bevyengine/bevy' - runs-on: ubuntu-latest - timeout-minutes: 30 - outputs: - result: ${{ steps.nonce.outputs.result }} - steps: - - id: nonce - run: echo "result=${{ github.run_id }}-$(date +%s)" >> $GITHUB_OUTPUT - - run: - if: github.repository == 'bevyengine/bevy' - runs-on: ubuntu-latest - timeout-minutes: 30 - needs: [nonce, build-for-iOS, build-for-Android] - env: - PERCY_PARALLEL_NONCE: ${{ needs.nonce.outputs.result }} - PERCY_PARALLEL_TOTAL: ${{ strategy.job-total }} - strategy: - matrix: - include: - - device: "iPhone 13" - os_version: "15" - - device: "iPhone 14" - os_version: "16" - - device: "iPhone 15" - os_version: "17" - - device: "Xiaomi Redmi Note 11" - os_version: "11.0" - - device: "Google Pixel 6" - os_version: "12.0" - - device: "Samsung Galaxy S23" - os_version: "13.0" - - device: "Google Pixel 8" - os_version: "14.0" - steps: - - uses: actions/checkout@v4 - - - name: Run Example - run: | - cd .github/start-mobile-example - npm install - npm install -g @percy/cli@latest - npx percy app:exec --parallel -- npm run mobile - env: - BROWSERSTACK_APP_ID: ${{ github.run_id }} - BROWSERSTACK_USERNAME: ${{ secrets.BROWSERSTACK_USERNAME }} - BROWSERSTACK_ACCESS_KEY: ${{ secrets.BROWSERSTACK_ACCESS_KEY }} - PERCY_TOKEN: ${{ secrets.PERCY_TOKEN }} - DEVICE: ${{ matrix.device }} - OS_VERSION: ${{ matrix.os_version }} - - - name: Save screenshots - if: ${{ always() }} - uses: actions/upload-artifact@v4 - with: - name: screenshots-${{ matrix.device }}-${{ matrix.os_version }} - path: .github/start-mobile-example/*.png - - check-result: - if: github.repository == 'bevyengine/bevy' - runs-on: ubuntu-latest - timeout-minutes: 30 - needs: [run] - steps: - - name: Wait for screenshots comparison - run: | - npm install -g @percy/cli@latest - npx percy build:wait --project dede4209/Bevy-Mobile-Example --commit ${{ github.sha }} --fail-on-changes --pass-if-approved - env: - PERCY_TOKEN: ${{ secrets.PERCY_TOKEN }} diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 17ac22019ebf0..8a04fadc94530 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -59,7 +59,7 @@ jobs: env: # needs to be in sync with [package.metadata.docs.rs] RUSTFLAGS: --cfg docsrs_dep - RUSTDOCFLAGS: -Zunstable-options --cfg=docsrs --generate-link-to-definition + RUSTDOCFLAGS: -Zunstable-options --cfg=docsrs --generate-link-to-definition --html-after-content docs-rs/trait-tags.html run: | cargo doc \ -Zunstable-options \ diff --git a/.github/workflows/example-run-report.yml b/.github/workflows/example-run-report.yml new file mode 100644 index 0000000000000..198dee72e4586 --- /dev/null +++ b/.github/workflows/example-run-report.yml @@ -0,0 +1,120 @@ +name: Example Run - PR Comments + +# This workflow has write permissions on the repo +# It must not checkout a PR and run untrusted code + +# Also requesting write permissions on PR to be able to comment +permissions: + pull-requests: "write" + +on: + workflow_run: + workflows: ["Example Run"] + types: + - completed + +jobs: + make-macos-screenshots-available: + if: github.event.workflow_run.event == 'pull_request' + runs-on: ubuntu-latest + timeout-minutes: 30 + outputs: + branch-name: ${{ steps.branch-name.outputs.result }} + pr-number: ${{ steps.pr-number.outputs.result }} + steps: + - name: "Download artifact" + id: find-artifact + uses: actions/github-script@v7 + with: + result-encoding: string + script: | + var artifacts = await github.rest.actions.listWorkflowRunArtifacts({ + owner: context.repo.owner, + repo: context.repo.repo, + run_id: ${{github.event.workflow_run.id }}, + }); + var matchArtifacts = artifacts.data.artifacts.filter((artifact) => { + return artifact.name == "screenshots-macos" + }); + if (matchArtifacts.length == 0) { return "false" } + var matchArtifact = matchArtifacts[0]; + var download = await github.rest.actions.downloadArtifact({ + owner: context.repo.owner, + repo: context.repo.repo, + artifact_id: matchArtifact.id, + archive_format: 'zip', + }); + var fs = require('fs'); + fs.writeFileSync('${{github.workspace}}/screenshots-macos.zip', Buffer.from(download.data)); + return "true" + - name: prepare artifact folder + run: | + unzip screenshots-macos.zip + mkdir screenshots + mv screenshots-* screenshots/ + - name: save screenshots + uses: actions/upload-artifact@v4 + with: + name: screenshots-macos + path: screenshots + - name: branch name + id: branch-name + run: | + echo "result=PR-$(cat PR)-${{ github.event.workflow_run.head_branch }}" >> $GITHUB_OUTPUT + - name: PR number + id: pr-number + run: | + echo "result=$(cat PR)" >> $GITHUB_OUTPUT + + compare-macos-screenshots: + name: Compare macOS screenshots + needs: [make-macos-screenshots-available] + uses: ./.github/workflows/send-screenshots-to-pixeleagle.yml + with: + commit: ${{ github.event.workflow_run.head_sha }} + branch: ${{ needs.make-macos-screenshots-available.outputs.branch-name }} + artifact: screenshots-macos + os: macos + secrets: inherit + + comment-on-pr: + name: Comment on PR + runs-on: ubuntu-latest + needs: [make-macos-screenshots-available, compare-macos-screenshots] + if: ${{ always() && needs.compare-macos-screenshots.result == 'failure' }} + steps: + - uses: actions/checkout@v4 + - name: "Check if PR already has label" + id: check-label + env: + PR: ${{ needs.make-macos-screenshots-available.outputs.pr-number }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + if [[ `gh api --jq '.labels.[].name' /repos/bevyengine/bevy/pulls/$PR` =~ "M-Deliberate-Rendering-Change" ]] + then + echo "result=true" >> $GITHUB_OUTPUT + else + echo "result=false" >> $GITHUB_OUTPUT + fi + - name: "Check if last comment is already from actions" + id: check-last-comment + env: + PR: ${{ needs.make-macos-screenshots-available.outputs.pr-number }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + if [[ `gh api --jq '.[-1].user.login' /repos/bevyengine/bevy/issues/$PR/comments` == 'github-actions[bot' ]] + then + echo "result=true" >> $GITHUB_OUTPUT + else + echo "result=false" >> $GITHUB_OUTPUT + fi + - name: "Comment on PR" + if: ${{ steps.check-label.outputs.result == 'false' && steps.check-last-comment.outputs.result == 'false' }} + env: + PROJECT: B04F67C0-C054-4A6F-92EC-F599FEC2FD1D + PR: ${{ needs.make-macos-screenshots-available.outputs.pr-number }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + LF=$'\n' + COMMENT_BODY="Your PR caused a change in the graphical output of an example or rendering test. This might be intentional, but it could also mean that something broke! ${LF}You can review it at https://pixel-eagle.com/project/$PROJECT?filter=PR-$PR ${LF} ${LF}If it's expected, please add the M-Deliberate-Rendering-Change label. ${LF} ${LF}If this change seems unrelated to your PR, you can consider updating your PR to target the latest main branch, either by rebasing or merging main into it." + gh issue comment $PR --body "$COMMENT_BODY" diff --git a/.github/workflows/example-run.yml b/.github/workflows/example-run.yml new file mode 100644 index 0000000000000..676f676db5758 --- /dev/null +++ b/.github/workflows/example-run.yml @@ -0,0 +1,187 @@ +name: Example Run + +on: + merge_group: + pull_request: + # also run when pushed to main to update reference screenshots + push: + branches: + - main + +env: + CARGO_TERM_COLOR: always + CARGO_INCREMENTAL: 0 + CARGO_PROFILE_TEST_DEBUG: 0 + CARGO_PROFILE_DEV_DEBUG: 0 + +jobs: + run-examples-macos-metal: + runs-on: macos-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - name: Disable audio + # Disable audio through a patch. on github m1 runners, audio timeouts after 15 minutes + run: git apply --ignore-whitespace tools/example-showcase/disable-audio.patch + - name: Run examples + run: | + for example in .github/example-run/*.ron; do + example_name=`basename $example .ron` + echo -n $example_name > last_example_run + echo "running $example_name - "`date` + time TRACE_CHROME=trace-$example_name.json CI_TESTING_CONFIG=$example cargo run --example $example_name --features "bevy_ci_testing,trace,trace_chrome" + sleep 10 + if [ `find ./ -maxdepth 1 -name 'screenshot-*.png' -print -quit` ]; then + mkdir screenshots-$example_name + mv screenshot-*.png screenshots-$example_name/ + fi + done + mkdir traces && mv trace*.json traces/ + mkdir screenshots && mv screenshots-* screenshots/ + - name: save traces + uses: actions/upload-artifact@v4 + with: + name: example-traces-macos + path: traces + - name: Save PR number + if: ${{ github.event_name == 'pull_request' }} + run: | + echo ${{ github.event.number }} > ./screenshots/PR + - name: save screenshots + uses: actions/upload-artifact@v4 + with: + name: screenshots-macos + path: screenshots + - uses: actions/upload-artifact@v4 + if: ${{ failure() && github.event_name == 'pull_request' }} + with: + name: example-run-macos + path: example-run/ + + compare-macos-screenshots: + if: ${{ github.event_name != 'pull_request' }} + name: Compare Macos screenshots + needs: [run-examples-macos-metal] + uses: ./.github/workflows/send-screenshots-to-pixeleagle.yml + with: + commit: ${{ github.sha }} + branch: ${{ github.ref_name }} + artifact: screenshots-macos + os: macos + secrets: inherit + + run-examples-linux-vulkan: + if: ${{ github.event_name != 'pull_request' }} + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v4 + - name: Install Linux dependencies + uses: ./.github/actions/install-linux-deps + # At some point this may be merged into `install-linux-deps`, but for now it is its own step. + - name: Install additional Linux dependencies for Vulkan + run: | + sudo add-apt-repository ppa:kisak/turtle -y + sudo apt-get install --no-install-recommends libxkbcommon-x11-0 xvfb libgl1-mesa-dri libxcb-xfixes0-dev mesa-vulkan-drivers + - uses: actions/cache@v4 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: ${{ runner.os }}-cargo-run-examples-${{ hashFiles('**/Cargo.toml') }} + - uses: dtolnay/rust-toolchain@stable + - name: Run examples + run: | + for example in .github/example-run/*.ron; do + example_name=`basename $example .ron` + echo -n $example_name > last_example_run + echo "running $example_name - "`date` + time TRACE_CHROME=trace-$example_name.json CI_TESTING_CONFIG=$example xvfb-run cargo run --example $example_name --features "bevy_ci_testing,trace,trace_chrome" + sleep 10 + if [ `find ./ -maxdepth 1 -name 'screenshot-*.png' -print -quit` ]; then + mkdir screenshots-$example_name + mv screenshot-*.png screenshots-$example_name/ + fi + done + mkdir traces && mv trace*.json traces/ + mkdir screenshots && mv screenshots-* screenshots/ + - name: save traces + uses: actions/upload-artifact@v4 + with: + name: example-traces-linux + path: traces + - name: save screenshots + uses: actions/upload-artifact@v4 + with: + name: screenshots-linux + path: screenshots + - uses: actions/upload-artifact@v4 + if: ${{ failure() && github.event_name == 'pull_request' }} + with: + name: example-run-linux + path: example-run/ + + compare-linux-screenshots: + name: Compare Linux screenshots + needs: [run-examples-linux-vulkan] + uses: ./.github/workflows/send-screenshots-to-pixeleagle.yml + with: + commit: ${{ github.sha }} + branch: ${{ github.ref_name }} + artifact: screenshots-linux + os: linux + secrets: inherit + + run-examples-on-windows-dx12: + if: ${{ github.event_name != 'pull_request' }} + runs-on: windows-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - name: Run examples + shell: bash + run: | + for example in .github/example-run/*.ron; do + example_name=`basename $example .ron` + echo -n $example_name > last_example_run + echo "running $example_name - "`date` + time WGPU_BACKEND=dx12 TRACE_CHROME=trace-$example_name.json CI_TESTING_CONFIG=$example cargo run --example $example_name --features "statically-linked-dxc,bevy_ci_testing,trace,trace_chrome" + sleep 10 + if [ `find ./ -maxdepth 1 -name 'screenshot-*.png' -print -quit` ]; then + mkdir screenshots-$example_name + mv screenshot-*.png screenshots-$example_name/ + fi + done + mkdir traces && mv trace*.json traces/ + mkdir screenshots && mv screenshots-* screenshots/ + - name: save traces + uses: actions/upload-artifact@v4 + with: + name: example-traces-windows + path: traces + - name: save screenshots + uses: actions/upload-artifact@v4 + with: + name: screenshots-windows + path: screenshots + - uses: actions/upload-artifact@v4 + if: ${{ failure() && github.event_name == 'pull_request' }} + with: + name: example-run-windows + path: example-run/ + + compare-windows-screenshots: + name: Compare Windows screenshots + needs: [run-examples-on-windows-dx12] + uses: ./.github/workflows/send-screenshots-to-pixeleagle.yml + with: + commit: ${{ github.sha }} + branch: ${{ github.ref_name }} + artifact: screenshots-windows + os: windows + secrets: inherit diff --git a/.github/workflows/post-release.yml b/.github/workflows/post-release.yml index 7902584a9fdb9..91a98f3ea7acc 100644 --- a/.github/workflows/post-release.yml +++ b/.github/workflows/post-release.yml @@ -8,9 +8,12 @@ env: CARGO_TERM_COLOR: always jobs: - ci: + bump: if: github.repository == 'bevyengine/bevy' runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index 32e481b23047c..0000000000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,55 +0,0 @@ -name: Release - -# how to trigger: https://docs.github.com/en/actions/managing-workflow-runs/manually-running-a-workflow -on: - workflow_dispatch: - -env: - CARGO_TERM_COLOR: always - -jobs: - ci: - if: github.repository == 'bevyengine/bevy' - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - - name: Install cargo-release - run: cargo install cargo-release - - - name: Setup release - run: | - # Set the commit author to the github-actions bot. See discussion here for more information: - # https://github.com/actions/checkout/issues/13#issuecomment-724415212 - # https://github.community/t/github-actions-bot-email-address/17204/6 - git config user.name 'Bevy Auto Releaser' - git config user.email '41898282+github-actions[bot]@users.noreply.github.com' - # release: remove the dev suffix, like going from 0.X.0-dev to 0.X.0 - # --workspace: updating all crates in the workspace - # --no-publish: do not publish to crates.io - # --execute: not a dry run - # --no-tag: do not push tag for each new version - # --no-push: do not push the update commits - # --dependent-version upgrade: change 0.X.0-dev in internal dependencies to 0.X.0 - # --exclude: ignore those packages - cargo release release \ - --workspace \ - --no-publish \ - --execute \ - --no-tag \ - --no-confirm \ - --no-push \ - --dependent-version upgrade \ - --exclude ci \ - --exclude errors \ - --exclude bevy_mobile_example \ - --exclude build-wasm-example - - - name: Create PR - uses: peter-evans/create-pull-request@v7 - with: - delete-branch: true - base: "main" - title: "Preparing Next Release" - body: | - Preparing next release. This PR has been auto-generated. diff --git a/.github/workflows/send-screenshots-to-pixeleagle.yml b/.github/workflows/send-screenshots-to-pixeleagle.yml index 4372d75ec865a..ee2b5e3dd1160 100644 --- a/.github/workflows/send-screenshots-to-pixeleagle.yml +++ b/.github/workflows/send-screenshots-to-pixeleagle.yml @@ -34,13 +34,13 @@ jobs: if: ${{ ! fromJSON(env.PIXELEAGLE_TOKEN_EXISTS) }} run: | echo "The PIXELEAGLE_TOKEN secret does not exist, so uploading screenshots to Pixel Eagle was skipped." >> $GITHUB_STEP_SUMMARY - + - name: Download artifact if: ${{ fromJSON(env.PIXELEAGLE_TOKEN_EXISTS) }} uses: actions/download-artifact@v4 with: pattern: ${{ inputs.artifact }} - + - name: Send to Pixel Eagle if: ${{ fromJSON(env.PIXELEAGLE_TOKEN_EXISTS) }} env: @@ -48,12 +48,12 @@ jobs: run: | # Create a new run with its associated metadata metadata='{"os":"${{ inputs.os }}", "commit": "${{ inputs.commit }}", "branch": "${{ inputs.branch }}"}' - run=`curl https://pixel-eagle.vleue.com/$project/runs --json "$metadata" --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }} | jq '.id'` - + run=`curl https://pixel-eagle.com/$project/runs --json "$metadata" --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }} | jq '.id'` + SAVEIFS=$IFS - + cd ${{ inputs.artifact }} - + # Read the hashes of the screenshot for fast comparison when they are equal IFS=$'\n' # Build a json array of screenshots and their hashes @@ -67,24 +67,24 @@ jobs: done hashes=`echo $hashes | rev | cut -c 2- | rev` hashes="$hashes]" - + IFS=$SAVEIFS # Upload screenshots with unknown hashes - curl https://pixel-eagle.vleue.com/$project/runs/$run/hashes --json "$hashes" --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }} | jq '.[]|[.name] | @tsv' | + curl https://pixel-eagle.com/$project/runs/$run/hashes --json "$hashes" --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }} | jq '.[]|[.name] | @tsv' | while IFS=$'\t' read -r name; do name=`echo $name | tr -d '"'` echo "Uploading $name" - curl https://pixel-eagle.vleue.com/$project/runs/$run/screenshots -F "data=@./screenshots-$name" -F "screenshot=$name" --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }} + curl https://pixel-eagle.com/$project/runs/$run/screenshots -F "data=@./screenshots-$name" -F "screenshot=$name" --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }} echo done - + IFS=$SAVEIFS cd .. # Trigger comparison with the main branch on the same os - curl https://pixel-eagle.vleue.com/$project/runs/$run/compare/auto --json '{"os":"", "branch": "main"}' --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }} > pixeleagle.json + curl https://pixel-eagle.com/$project/runs/$run/compare/auto --json '{"os":"", "branch": "main"}' --oauth2-bearer ${{ secrets.PIXELEAGLE_TOKEN }} > pixeleagle.json # Log results compared_with=`cat pixeleagle.json | jq '.to'` @@ -93,17 +93,17 @@ jobs: missing=`cat pixeleagle.json | jq '.missing | length'` if [ ! $missing -eq 0 ]; then echo "There are $missing missing screenshots" - echo "::warning title=$missing missing screenshots on ${{ inputs.os }}::https://pixel-eagle.vleue.com/$project/runs/$run/compare/$compared_with" + echo "::warning title=$missing missing screenshots on ${{ inputs.os }}::https://pixel-eagle.com/project/$project/run/$run/compare/$compared_with" status=1 fi diff=`cat pixeleagle.json | jq '.diff | length'` if [ ! $diff -eq 0 ]; then echo "There are $diff screenshots with a difference" - echo "::warning title=$diff different screenshots on ${{ inputs.os }}::https://pixel-eagle.vleue.com/$project/runs/$run/compare/$compared_with" + echo "::warning title=$diff different screenshots on ${{ inputs.os }}::https://pixel-eagle.com/project/$project/run/$run/compare/$compared_with" status=1 fi - echo "created run $run: https://pixel-eagle.vleue.com/$project/runs/$run/compare/$compared_with" + echo "created run $run: https://pixel-eagle.com/project/$project/run/$run/compare/$compared_with" exit $status diff --git a/.github/workflows/validation-jobs.yml b/.github/workflows/validation-jobs.yml index 4c576ac1e176a..36679408578ed 100644 --- a/.github/workflows/validation-jobs.yml +++ b/.github/workflows/validation-jobs.yml @@ -5,7 +5,6 @@ on: pull_request: push: branches: - - main - release-* concurrency: @@ -14,6 +13,9 @@ concurrency: env: CARGO_TERM_COLOR: always + CARGO_INCREMENTAL: 0 + CARGO_PROFILE_TEST_DEBUG: 0 + CARGO_PROFILE_DEV_DEBUG: 0 # If nightly is breaking CI, modify this variable to target a specific nightly version. NIGHTLY_TOOLCHAIN: nightly @@ -77,115 +79,9 @@ jobs: - name: Build app for Android run: cd examples/mobile/android_example && chmod +x gradlew && ./gradlew build - run-examples-linux-vulkan: - # also run when pushed to main to update reference screenshots - if: ${{ github.event_name != 'pull_request' }} - runs-on: ubuntu-22.04 - timeout-minutes: 30 - steps: - - uses: actions/checkout@v4 - - name: Install Linux dependencies - uses: ./.github/actions/install-linux-deps - # At some point this may be merged into `install-linux-deps`, but for now it is its own step. - - name: Install additional Linux dependencies for Vulkan - run: | - sudo add-apt-repository ppa:kisak/turtle -y - sudo apt-get install --no-install-recommends libxkbcommon-x11-0 xvfb libegl1-mesa libgl1-mesa-dri libxcb-xfixes0-dev mesa-vulkan-drivers - - uses: actions/cache@v4 - with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - target/ - key: ${{ runner.os }}-cargo-run-examples-${{ hashFiles('**/Cargo.toml') }} - - uses: dtolnay/rust-toolchain@stable - - name: Run examples - run: | - for example in .github/example-run/*.ron; do - example_name=`basename $example .ron` - echo -n $example_name > last_example_run - echo "running $example_name - "`date` - time TRACE_CHROME=trace-$example_name.json CI_TESTING_CONFIG=$example xvfb-run cargo run --example $example_name --features "bevy_ci_testing,trace,trace_chrome" - sleep 10 - if [ `find ./ -maxdepth 1 -name 'screenshot-*.png' -print -quit` ]; then - mkdir screenshots-$example_name - mv screenshot-*.png screenshots-$example_name/ - fi - done - mkdir traces && mv trace*.json traces/ - mkdir screenshots && mv screenshots-* screenshots/ - - name: save traces - uses: actions/upload-artifact@v4 - with: - name: example-traces-linux - path: traces - - name: save screenshots - uses: actions/upload-artifact@v4 - with: - name: screenshots-linux - path: screenshots - - uses: actions/upload-artifact@v4 - if: ${{ failure() && github.event_name == 'pull_request' }} - with: - name: example-run-linux - path: example-run/ - - compare-linux-screenshots: - name: Compare Linux screenshots - needs: [run-examples-linux-vulkan] - uses: ./.github/workflows/send-screenshots-to-pixeleagle.yml - with: - commit: ${{ github.sha }} - branch: ${{ github.ref_name }} - artifact: screenshots-linux - os: linux - secrets: inherit - - run-examples-on-windows-dx12: - # also run when pushed to main to update reference screenshots - if: ${{ github.event_name != 'pull_request' }} - runs-on: windows-latest - timeout-minutes: 30 - steps: - - uses: actions/checkout@v4 - - uses: dtolnay/rust-toolchain@stable - - name: Run examples - shell: bash - run: | - for example in .github/example-run/*.ron; do - example_name=`basename $example .ron` - echo -n $example_name > last_example_run - echo "running $example_name - "`date` - time WGPU_BACKEND=dx12 TRACE_CHROME=trace-$example_name.json CI_TESTING_CONFIG=$example cargo run --example $example_name --features "bevy_ci_testing,trace,trace_chrome" - sleep 10 - if [ `find ./ -maxdepth 1 -name 'screenshot-*.png' -print -quit` ]; then - mkdir screenshots-$example_name - mv screenshot-*.png screenshots-$example_name/ - fi - done - mkdir traces && mv trace*.json traces/ - mkdir screenshots && mv screenshots-* screenshots/ - - name: save traces - uses: actions/upload-artifact@v4 - with: - name: example-traces-windows - path: traces - - name: save screenshots - uses: actions/upload-artifact@v4 - with: - name: screenshots-windows - path: screenshots - - uses: actions/upload-artifact@v4 - if: ${{ failure() && github.event_name == 'pull_request' }} - with: - name: example-run-windows - path: example-run/ - run-examples-on-wasm: if: ${{ github.event_name == 'merge_group' }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-latest timeout-minutes: 60 steps: - uses: actions/checkout@v4 @@ -205,13 +101,6 @@ jobs: target/ key: ${{ runner.os }}-wasm-run-examples-${{ hashFiles('**/Cargo.toml') }} - - name: install xvfb, llvmpipe and lavapipe - run: | - sudo apt-get update -y -qq - sudo add-apt-repository ppa:kisak/turtle -y - sudo apt-get update - sudo apt install -y xvfb libegl1-mesa libgl1-mesa-dri libxcb-xfixes0-dev mesa-vulkan-drivers - - name: Install wasm-bindgen run: cargo install --force wasm-bindgen-cli @@ -256,7 +145,6 @@ jobs: - name: Build run: cargo build -p ${{ matrix.crate }} --no-default-features env: - CARGO_INCREMENTAL: 0 RUSTFLAGS: "-C debuginfo=0 -D warnings" build-without-default-features-status: diff --git a/.github/workflows/weekly.yml b/.github/workflows/weekly.yml index f1d621cde9e16..b4ddffdb9dbb7 100644 --- a/.github/workflows/weekly.yml +++ b/.github/workflows/weekly.yml @@ -9,6 +9,10 @@ on: env: CARGO_TERM_COLOR: always + CARGO_INCREMENTAL: 0 + CARGO_PROFILE_TEST_DEBUG: 0 + CARGO_PROFILE_DEV_DEBUG: 0 + ISSUE_TITLE: Main branch fails to compile on Rust beta. # The jobs listed here are intentionally skipped when running on forks, for a number of reasons: # @@ -43,7 +47,6 @@ jobs: # See tools/ci/src/main.rs for the commands this runs run: cargo run -p ci -- test env: - CARGO_INCREMENTAL: 0 RUSTFLAGS: "-C debuginfo=0 -D warnings" lint: @@ -80,6 +83,30 @@ jobs: # See tools/ci/src/main.rs for the commands this runs run: cargo run -p ci -- compile + close-any-open-issues: + runs-on: ubuntu-latest + needs: ['test', 'lint', 'check-compiles'] + permissions: + issues: write + steps: + - name: Close issues + run: | + previous_issue_number=$(gh issue list \ + --search "$ISSUE_TITLE in:title" \ + --json number \ + --jq '.[0].number') + if [[ -n $previous_issue_number ]]; then + gh issue close $previous_issue_number \ + -r completed \ + -c $COMMENT + fi + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GH_REPO: ${{ github.repository }} + COMMENT: | + [Last pipeline run](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) successfully completed. Closing issue. + + open-issue: name: Warn that weekly CI fails runs-on: ubuntu-latest @@ -93,7 +120,7 @@ jobs: - name: Create issue run: | previous_issue_number=$(gh issue list \ - --search "$TITLE in:title" \ + --search "$ISSUE_TITLE in:title" \ --json number \ --jq '.[0].number') if [[ -n $previous_issue_number ]]; then @@ -101,14 +128,13 @@ jobs: --body "Weekly pipeline still fails: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" else gh issue create \ - --title "$TITLE" \ + --title "$ISSUE_TITLE" \ --label "$LABELS" \ --body "$BODY" fi env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_REPO: ${{ github.repository }} - TITLE: Main branch fails to compile on Rust beta. LABELS: C-Bug,S-Needs-Triage BODY: | ## Weekly CI run has failed. diff --git a/.gitignore b/.gitignore index d3b84d9590bb8..0d39edea49083 100644 --- a/.gitignore +++ b/.gitignore @@ -1,22 +1,23 @@ +# If your IDE needs additional project specific files, configure git to ignore them: +# https://docs.github.com/en/get-started/getting-started-with-git/ignoring-files#configuring-ignored-files-for-all-repositories-on-your-computer + # Rust build artifacts target crates/**/target benches/**/target tools/**/target **/*.rs.bk +rustc-ice-*.txt + +# DX12 wgpu backend +dxcompiler.dll +dxil.dll # Cargo Cargo.lock .cargo/config .cargo/config.toml -# IDE files -.idea -.vscode -.zed -dxcompiler.dll -dxil.dll - # Bevy Assets assets/**/*.meta crates/bevy_asset/imported_assets diff --git a/CREDITS.md b/CREDITS.md index cc8b15083ac69..c0375bd38edac 100644 --- a/CREDITS.md +++ b/CREDITS.md @@ -20,8 +20,8 @@ * Cake from [Kenney's Food Kit](https://www.kenney.nl/assets/food-kit) (CC0 1.0 Universal) * Ground tile from [Kenney's Tower Defense Kit](https://www.kenney.nl/assets/tower-defense-kit) (CC0 1.0 Universal) * Game icons from [Kenney's Game Icons](https://www.kenney.nl/assets/game-icons) (CC0 1.0 Universal) -* Space ships from [Kenny's Simple Space Kit](https://www.kenney.nl/assets/simple-space) (CC0 1.0 Universal) -* UI borders from [Kenny's Fantasy UI Borders Kit](https://kenney.nl/assets/fantasy-ui-borders) (CC0 1.0 Universal) +* Space ships from [Kenney's Simple Space Kit](https://www.kenney.nl/assets/simple-space) (CC0 1.0 Universal) +* UI borders from [Kenney's Fantasy UI Borders Kit](https://kenney.nl/assets/fantasy-ui-borders) (CC0 1.0 Universal) * glTF animated fox from [glTF Sample Models][fox] * Low poly fox [by PixelMannen] (CC0 1.0 Universal) * Rigging and animation [by @tomkranis on Sketchfab] ([CC-BY 4.0]) @@ -32,7 +32,7 @@ * Epic orchestra music sample, modified to loop, from [Migfus20](https://freesound.org/people/Migfus20/sounds/560449/) ([CC BY 4.0 DEED](https://creativecommons.org/licenses/by/4.0/)) [MorphStressTest]: https://github.com/KhronosGroup/glTF-Sample-Models/tree/master/2.0/MorphStressTest -[fox]: https://github.com/KhronosGroup/glTF-Sample-Models/tree/master/2.0/Fox +[fox]: https://github.com/KhronosGroup/glTF-Sample-Assets/tree/main/Models/Fox [by PixelMannen]: https://opengameart.org/content/fox-and-shiba [by @tomkranis on Sketchfab]: https://sketchfab.com/models/371dea88d7e04a76af5763f2a36866bc [CC-BY 4.0]: https://creativecommons.org/licenses/by/4.0/ diff --git a/Cargo.toml b/Cargo.toml index 922a2176513a0..8bb16b741db86 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" categories = ["game-engines", "graphics", "gui", "rendering"] description = "A refreshingly simple data-driven game engine and app framework" exclude = ["assets/", "tools/", ".github/", "crates/", "examples/wasm/assets/"] @@ -10,7 +10,7 @@ keywords = ["game", "engine", "gamedev", "graphics", "bevy"] license = "MIT OR Apache-2.0" repository = "https://github.com/bevyengine/bevy" documentation = "https://docs.rs/bevy" -rust-version = "1.83.0" +rust-version = "1.85.0" [workspace] resolver = "2" @@ -19,9 +19,15 @@ members = [ "crates/*", # Several crates with macros have "compile fail" tests nested inside them, also known as UI # tests, that verify diagnostic output does not accidentally change. - "crates/*/compile_fail", + # TODO: Use a glob pattern once they are fixed in `dependabot-core` + # TODO: See https://github.com/bevyengine/bevy/issues/17876 for context. + "crates/bevy_derive/compile_fail", + "crates/bevy_ecs/compile_fail", + "crates/bevy_reflect/compile_fail", # Examples of compiling Bevy for mobile platforms. "examples/mobile", + # Examples of using Bevy on no_std platforms. + "examples/no_std/*", # Benchmarks "benches", # Internal tools that are not published. @@ -29,6 +35,10 @@ members = [ # Bevy's error codes. This is a crate so we can automatically check all of the code blocks. "errors", ] +exclude = [ + # Integration tests are not part of the workspace + "tests-integration", +] [workspace.lints.clippy] doc_markdown = "warn" @@ -41,6 +51,10 @@ type_complexity = "allow" undocumented_unsafe_blocks = "warn" unwrap_or_default = "warn" needless_lifetimes = "allow" +too_many_arguments = "allow" +nonstandard_macro_braces = "warn" +print_stdout = "warn" +print_stderr = "warn" ptr_as_ptr = "warn" ptr_cast_constness = "warn" @@ -53,6 +67,9 @@ std_instead_of_core = "warn" std_instead_of_alloc = "warn" alloc_instead_of_core = "warn" +allow_attributes = "warn" +allow_attributes_without_reason = "warn" + [workspace.lints.rust] missing_docs = "warn" unexpected_cfgs = { level = "warn", check-cfg = ['cfg(docsrs_dep)'] } @@ -82,6 +99,8 @@ type_complexity = "allow" undocumented_unsafe_blocks = "warn" unwrap_or_default = "warn" needless_lifetimes = "allow" +too_many_arguments = "allow" +nonstandard_macro_braces = "warn" ptr_as_ptr = "warn" ptr_cast_constness = "warn" @@ -93,6 +112,9 @@ std_instead_of_core = "allow" std_instead_of_alloc = "allow" alloc_instead_of_core = "allow" +allow_attributes = "warn" +allow_attributes_without_reason = "warn" + [lints.rust] missing_docs = "warn" unexpected_cfgs = { level = "warn", check-cfg = ['cfg(docsrs_dep)'] } @@ -102,6 +124,8 @@ unused_qualifications = "warn" [features] default = [ + "std", + "async_executor", "android-game-activity", "android_shared_stdcxx", "animation", @@ -109,9 +133,12 @@ default = [ "bevy_audio", "bevy_color", "bevy_core_pipeline", + "bevy_anti_aliasing", "bevy_gilrs", "bevy_gizmos", "bevy_gltf", + "bevy_input_focus", + "bevy_log", "bevy_mesh_picking_backend", "bevy_pbr", "bevy_picking", @@ -138,6 +165,9 @@ default = [ "x11", ] +# Recommended defaults for no_std applications +default_no_std = ["libm", "critical-section", "bevy_color", "bevy_state"] + # Provides an implementation for picking meshes bevy_mesh_picking_backend = [ "bevy_picking", @@ -184,6 +214,13 @@ bevy_core_pipeline = [ "bevy_render", ] +# Provides various anti aliasing solutions +bevy_anti_aliasing = [ + "bevy_internal/bevy_anti_aliasing", + "bevy_asset", + "bevy_render", +] + # Adds gamepad support bevy_gilrs = ["bevy_internal/bevy_gilrs"] @@ -196,6 +233,7 @@ bevy_pbr = [ "bevy_asset", "bevy_render", "bevy_core_pipeline", + "bevy_anti_aliasing", ] # Provides picking functionality @@ -213,7 +251,7 @@ bevy_sprite = [ "bevy_render", "bevy_core_pipeline", "bevy_color", - "bevy_sprite_picking_backend", + "bevy_anti_aliasing", ] # Provides text functionality @@ -226,7 +264,7 @@ bevy_ui = [ "bevy_text", "bevy_sprite", "bevy_color", - "bevy_ui_picking_backend", + "bevy_anti_aliasing", ] # Windowing layer @@ -247,9 +285,21 @@ bevy_dev_tools = ["bevy_internal/bevy_dev_tools"] # Enable the Bevy Remote Protocol bevy_remote = ["bevy_internal/bevy_remote"] +# Enable integration with `tracing` and `log` +bevy_log = ["bevy_internal/bevy_log"] + +# Enable input focus subsystem +bevy_input_focus = ["bevy_internal/bevy_input_focus"] + +# Use the configurable global error handler as the default error handler. +configurable_error_handler = ["bevy_internal/configurable_error_handler"] + # Enable passthrough loading for SPIR-V shaders (Only supported on Vulkan, shader capabilities and extensions must agree with the platform implementation) spirv_shader_passthrough = ["bevy_internal/spirv_shader_passthrough"] +# Statically linked DXC shader compiler for DirectX 12 +statically-linked-dxc = ["bevy_internal/statically-linked-dxc"] + # Tracing support, saving a file in Chrome Tracing format trace_chrome = ["trace", "bevy_internal/trace_chrome"] @@ -264,7 +314,7 @@ trace_tracy_memory = [ ] # Tracing support -trace = ["bevy_internal/trace"] +trace = ["bevy_internal/trace", "dep:tracing"] # Basis Universal compressed texture support basis-universal = ["bevy_internal/basis-universal"] @@ -410,6 +460,9 @@ shader_format_glsl = ["bevy_internal/shader_format_glsl"] # Enable support for shaders in SPIR-V shader_format_spirv = ["bevy_internal/shader_format_spirv"] +# Enable support for shaders in WESL +shader_format_wesl = ["bevy_internal/shader_format_wesl"] + # Enable support for transmission-related textures in the `StandardMaterial`, at the risk of blowing past the global, per-shader texture limit on older/lower-end GPUs pbr_transmission_textures = ["bevy_internal/pbr_transmission_textures"] @@ -424,6 +477,9 @@ pbr_anisotropy_texture = ["bevy_internal/pbr_anisotropy_texture"] # Enable support for PCSS, at the risk of blowing past the global, per-shader sampler limit on older/lower-end GPUs experimental_pbr_pcss = ["bevy_internal/experimental_pbr_pcss"] +# Enable support for specular textures in the `StandardMaterial`, at the risk of blowing past the global, per-shader texture limit on older/lower-end GPUs +pbr_specular_textures = ["bevy_internal/pbr_specular_textures"] + # Enable some limitations to be able to use WebGL2. Please refer to the [WebGL2 and WebGPU](https://github.com/bevyengine/bevy/tree/latest/examples#webgl2-and-webgpu) section of the examples README for more information on how to run Wasm builds with WebGPU. webgl2 = ["bevy_internal/webgl"] @@ -448,30 +504,46 @@ meshlet = ["bevy_internal/meshlet"] # Enables processing meshes into meshlet meshes for bevy_pbr meshlet_processor = ["bevy_internal/meshlet_processor"] -# Enable support for the ios_simulator by downgrading some rendering capabilities -ios_simulator = ["bevy_internal/ios_simulator"] - # Enable built in global state machines bevy_state = ["bevy_internal/bevy_state"] # Enables source location tracking for change detection and spawning/despawning, which can assist with debugging -track_change_detection = ["bevy_internal/track_change_detection"] +track_location = ["bevy_internal/track_location"] # Enable function reflection reflect_functions = ["bevy_internal/reflect_functions"] +# Enable documentation reflection +reflect_documentation = ["bevy_internal/reflect_documentation"] + # Enable winit custom cursor support custom_cursor = ["bevy_internal/custom_cursor"] # Experimental support for nodes that are ignored for UI layouting ghost_nodes = ["bevy_internal/ghost_nodes"] +# Uses `async-executor` as a task execution backend. +async_executor = ["std", "bevy_internal/async_executor"] + +# Allows access to the `std` crate. +std = ["bevy_internal/std"] + +# `critical-section` provides the building blocks for synchronization primitives on all platforms, including `no_std`. +critical-section = ["bevy_internal/critical-section"] + +# Uses the `libm` maths library instead of the one provided in `std` and `core`. +libm = ["bevy_internal/libm"] + +# Enables use of browser APIs. Note this is currently only applicable on `wasm32` architectures. +web = ["bevy_internal/web"] + [dependencies] -bevy_internal = { path = "crates/bevy_internal", version = "0.15.0-dev", default-features = false } +bevy_internal = { path = "crates/bevy_internal", version = "0.16.0-dev", default-features = false } +tracing = { version = "0.1", default-features = false, optional = true } # Wasm does not support dynamic linking. [target.'cfg(not(target_family = "wasm"))'.dependencies] -bevy_dylib = { path = "crates/bevy_dylib", version = "0.15.0-dev", default-features = false, optional = true } +bevy_dylib = { path = "crates/bevy_dylib", version = "0.16.0-dev", default-features = false, optional = true } [dev-dependencies] rand = "0.8.0" @@ -481,7 +553,14 @@ flate2 = "1.0" serde = { version = "1", features = ["derive"] } serde_json = "1" bytemuck = "1.7" -bevy_render = { path = "crates/bevy_render", version = "0.15.0-dev", default-features = false } +bevy_render = { path = "crates/bevy_render", version = "0.16.0-dev", default-features = false } +# The following explicit dependencies are needed for proc macros to work inside of examples as they are part of the bevy crate itself. +bevy_ecs = { path = "crates/bevy_ecs", version = "0.16.0-dev", default-features = false } +bevy_state = { path = "crates/bevy_state", version = "0.16.0-dev", default-features = false } +bevy_asset = { path = "crates/bevy_asset", version = "0.16.0-dev", default-features = false } +bevy_reflect = { path = "crates/bevy_reflect", version = "0.16.0-dev", default-features = false } +bevy_image = { path = "crates/bevy_image", version = "0.16.0-dev", default-features = false } +bevy_gizmos = { path = "crates/bevy_gizmos", version = "0.16.0-dev", default-features = false } # Needed to poll Task examples futures-lite = "2.0.1" async-std = "1.13" @@ -493,13 +572,14 @@ hyper = { version = "1", features = ["server", "http1"] } http-body-util = "0.1" anyhow = "1" macro_rules_attribute = "0.2" -accesskit = "0.17" +accesskit = "0.18" +nonmax = "0.5" [target.'cfg(not(target_family = "wasm"))'.dev-dependencies] smol = "2" smol-macros = "0.1" smol-hyper = "0.1" -ureq = { version = "2.10.1", features = ["json"] } +ureq = { version = "3.0.8", features = ["json"] } [target.'cfg(target_arch = "wasm32")'.dev-dependencies] wasm-bindgen = { version = "0.2" } @@ -543,7 +623,7 @@ doc-scrape-examples = true [package.metadata.example.2d_viewport_to_world] name = "2D Viewport To World" -description = "Demonstrates how to use the `Camera::viewport_to_world_2d` method" +description = "Demonstrates how to use the `Camera::viewport_to_world_2d` method with a dynamic viewport and camera." category = "2D Rendering" wasm = true @@ -657,6 +737,17 @@ description = "Animates a sprite in response to an event" category = "2D Rendering" wasm = true +[[example]] +name = "sprite_scale" +path = "examples/2d/sprite_scale.rs" +doc-scrape-examples = true + +[package.metadata.example.sprite_scale] +name = "Sprite Scale" +description = "Shows how a sprite can be scaled into a rectangle while keeping the aspect ratio" +category = "2D Rendering" +wasm = true + [[example]] name = "sprite_flipping" path = "examples/2d/sprite_flipping.rs" @@ -745,6 +836,17 @@ description = "Used to test alpha modes with mesh2d" category = "2D Rendering" wasm = true +[[example]] +name = "mesh2d_repeated_texture" +path = "examples/2d/mesh2d_repeated_texture.rs" +doc-scrape-examples = true + +[package.metadata.example.mesh2d_repeated_texture] +name = "Mesh2d Repeated Texture" +description = "Showcase of using `uv_transform` on the `ColorMaterial` of a `Mesh2d`" +category = "2D Rendering" +wasm = true + [[example]] name = "pixel_grid_snap" path = "examples/2d/pixel_grid_snap.rs" @@ -758,13 +860,13 @@ wasm = true [[example]] name = "bounding_2d" -path = "examples/2d/bounding_2d.rs" +path = "examples/math/bounding_2d.rs" doc-scrape-examples = true [package.metadata.example.bounding_2d] -name = "2D Bounding Volume Intersections" +name = "Bounding Volume Intersections (2D)" description = "Showcases bounding volumes and intersection tests" -category = "2D Rendering" +category = "Math" wasm = true [[example]] @@ -856,6 +958,17 @@ description = "A scene showcasing the atmospheric fog effect" category = "3D Rendering" wasm = true +[[example]] +name = "atmosphere" +path = "examples/3d/atmosphere.rs" +doc-scrape-examples = true + +[package.metadata.example.atmosphere] +name = "Atmosphere" +description = "A scene showcasing pbr atmospheric scattering" +category = "3D Rendering" +wasm = true + [[example]] name = "fog" path = "examples/3d/fog.rs" @@ -889,6 +1002,17 @@ description = "Showcases different blend modes" category = "3D Rendering" wasm = true +[[example]] +name = "edit_material_on_gltf" +path = "examples/3d/edit_material_on_gltf.rs" +doc-scrape-examples = true + +[package.metadata.example.edit_material_on_gltf] +name = "Edit Gltf Material" +description = "Showcases changing materials of a Gltf after Scene spawn" +category = "3D Rendering" +wasm = true + [[example]] name = "lighting" path = "examples/3d/lighting.rs" @@ -945,6 +1069,17 @@ description = "Illustrates bloom configuration using HDR and emissive materials" category = "3D Rendering" wasm = true +[[example]] +name = "decal" +path = "examples/3d/decal.rs" +doc-scrape-examples = true + +[package.metadata.example.decal] +name = "Decal" +description = "Decal rendering" +category = "3D Rendering" +wasm = true + [[example]] name = "deferred_rendering" path = "examples/3d/deferred_rendering.rs" @@ -1232,11 +1367,16 @@ description = "Meshlet rendering for dense high-poly scenes (experimental)" category = "3D Rendering" wasm = false setup = [ + [ + "mkdir", + "-p", + "assets/external/models", + ], [ "curl", "-o", - "assets/models/bunny.meshlet_mesh", - "https://raw.githubusercontent.com/JMS55/bevy_meshlet_asset/defbd9b32072624d40d57de7d345c66a9edf5d0b/bunny.meshlet_mesh", + "assets/external/models/bunny.meshlet_mesh", + "https://raw.githubusercontent.com/JMS55/bevy_meshlet_asset/7a7c14138021f63904b584d5f7b73b695c7f4bbf/bunny.meshlet_mesh", ], ] @@ -1283,13 +1423,35 @@ category = "Animation" wasm = true [[example]] -name = "animated_fox" -path = "examples/animation/animated_fox.rs" +name = "animated_mesh" +path = "examples/animation/animated_mesh.rs" doc-scrape-examples = true -[package.metadata.example.animated_fox] -name = "Animated Fox" -description = "Plays an animation from a skinned glTF" +[package.metadata.example.animated_mesh] +name = "Animated Mesh" +description = "Plays an animation on a skinned glTF model of a fox" +category = "Animation" +wasm = true + +[[example]] +name = "animated_mesh_control" +path = "examples/animation/animated_mesh_control.rs" +doc-scrape-examples = true + +[package.metadata.example.animated_mesh_control] +name = "Animated Mesh Control" +description = "Plays an animation from a skinned glTF with keyboard controls" +category = "Animation" +wasm = true + +[[example]] +name = "animated_mesh_events" +path = "examples/animation/animated_mesh_events.rs" +doc-scrape-examples = true + +[package.metadata.example.animated_mesh_events] +name = "Animated Mesh Events" +description = "Plays an animation from a skinned glTF with events" category = "Animation" wasm = true @@ -1430,6 +1592,7 @@ wasm = true name = "headless" path = "examples/app/headless.rs" doc-scrape-examples = true +required-features = ["bevy_log"] [package.metadata.example.headless] name = "Headless" @@ -1691,7 +1854,7 @@ path = "examples/asset/multi_asset_sync.rs" doc-scrape-examples = true [package.metadata.example.multi_asset_sync] -name = "Mult-asset synchronization" +name = "Multi-asset synchronization" description = "Demonstrates how to wait for multiple assets to be loaded." category = "Assets" wasm = true @@ -1853,7 +2016,7 @@ wasm = false name = "change_detection" path = "examples/ecs/change_detection.rs" doc-scrape-examples = true -required-features = ["track_change_detection"] +required-features = ["track_location"] [package.metadata.example.change_detection] name = "Change Detection" @@ -1927,6 +2090,17 @@ description = "Demonstrates how to send and receive events of the same type in a category = "ECS (Entity Component System)" wasm = false +[[example]] +name = "entity_disabling" +path = "examples/ecs/entity_disabling.rs" +doc-scrape-examples = true + +[package.metadata.example.entity_disabling] +name = "Entity disabling" +description = "Demonstrates how to hide entities from the ECS without deleting them" +category = "ECS (Entity Component System)" +wasm = true + [[example]] name = "fixed_timestep" path = "examples/ecs/fixed_timestep.rs" @@ -2004,6 +2178,17 @@ description = "Illustrates parallel queries with `ParallelIterator`" category = "ECS (Entity Component System)" wasm = false +[[example]] +name = "relationships" +path = "examples/ecs/relationships.rs" +doc-scrape-examples = true + +[package.metadata.example.relationships] +name = "Relationships" +description = "Define and work with custom relationships between entities" +category = "ECS (Entity Component System)" +wasm = false + [[example]] name = "removal_detection" path = "examples/ecs/removal_detection.rs" @@ -2030,6 +2215,7 @@ wasm = false name = "fallible_params" path = "examples/ecs/fallible_params.rs" doc-scrape-examples = true +required-features = ["configurable_error_handler"] [package.metadata.example.fallible_params] name = "Fallible System Parameters" @@ -2038,13 +2224,14 @@ category = "ECS (Entity Component System)" wasm = false [[example]] -name = "fallible_systems" -path = "examples/ecs/fallible_systems.rs" +name = "error_handling" +path = "examples/ecs/error_handling.rs" doc-scrape-examples = true +required-features = ["bevy_mesh_picking_backend", "configurable_error_handler"] -[package.metadata.example.fallible_systems] -name = "Fallible Systems" -description = "Systems that return results to handle errors" +[package.metadata.example.error_handling] +name = "Error handling" +description = "How to return and handle errors across the ECS" category = "ECS (Entity Component System)" wasm = false @@ -2600,6 +2787,18 @@ description = "A shader that uses the GLSL shading language" category = "Shaders" wasm = true +[[example]] +name = "shader_material_wesl" +path = "examples/shader/shader_material_wesl.rs" +doc-scrape-examples = true +required-features = ["shader_format_wesl"] + +[package.metadata.example.shader_material_wesl] +name = "Material - WESL" +description = "A shader that uses WESL" +category = "Shaders" +wasm = true + [[example]] name = "custom_shader_instancing" path = "examples/shader/custom_shader_instancing.rs" @@ -2611,6 +2810,18 @@ description = "A shader that renders a mesh multiple times in one draw call usin category = "Shaders" wasm = true +[[example]] +name = "custom_render_phase" +path = "examples/shader/custom_render_phase.rs" +doc-scrape-examples = true + +[package.metadata.example.custom_render_phase] +name = "Custom Render Phase" +description = "Shows how to make a complete render phase" +category = "Shaders" +wasm = true + + [[example]] name = "automatic_instancing" path = "examples/shader/automatic_instancing.rs" @@ -2833,6 +3044,28 @@ description = "Displays many sprites in a grid arrangement! Used for performance category = "Stress Tests" wasm = true +[[example]] +name = "many_text2d" +path = "examples/stress_tests/many_text2d.rs" +doc-scrape-examples = true + +[package.metadata.example.many_text2d] +name = "Many Text2d" +description = "Displays many Text2d! Used for performance testing." +category = "Stress Tests" +wasm = true + +[[example]] +name = "many_materials" +path = "examples/stress_tests/many_materials.rs" +doc-scrape-examples = true + +[package.metadata.example.many_materials] +name = "Many Animated Materials" +description = "Benchmark to test rendering many animated materials" +category = "Stress Tests" +wasm = true + [[example]] name = "transform_hierarchy" path = "examples/stress_tests/transform_hierarchy.rs" @@ -3278,6 +3511,18 @@ description = "Creates a solid color window" category = "Window" wasm = true +[[example]] +name = "custom_cursor_image" +path = "examples/window/custom_cursor_image.rs" +doc-scrape-examples = true +required-features = ["custom_cursor"] + +[package.metadata.example.custom_cursor_image] +name = "Custom Cursor Image" +description = "Demonstrates creating an animated custom cursor from an image" +category = "Window" +wasm = true + [[example]] name = "custom_user_event" path = "examples/window/custom_user_event.rs" @@ -3560,6 +3805,17 @@ description = "A 2D top-down camera smoothly following player movements" category = "Camera" wasm = true +[[example]] +name = "custom_projection" +path = "examples/camera/custom_projection.rs" +doc-scrape-examples = true + +[package.metadata.example.custom_projection] +name = "Custom Projection" +description = "Shows how to create custom camera projections." +category = "Camera" +wasm = true + [[example]] name = "first_person_view_model" path = "examples/camera/first_person_view_model.rs" @@ -3827,7 +4083,18 @@ name = "Sprite Picking" description = "Demonstrates picking sprites and sprite atlases" category = "Picking" wasm = true -required-features = ["bevy_sprite_picking_backend"] + +[[example]] +name = "debug_picking" +path = "examples/picking/debug_picking.rs" +doc-scrape-examples = true +required-features = ["bevy_dev_tools"] + +[package.metadata.example.debug_picking] +name = "Picking Debug Tools" +description = "Demonstrates picking debug overlay" +category = "Picking" +wasm = true [[example]] name = "animation_masks" @@ -3886,6 +4153,18 @@ description = "Demonstrates how to make materials that use bindless textures" category = "Shaders" wasm = true +[[example]] +name = "specular_tint" +path = "examples/3d/specular_tint.rs" +doc-scrape-examples = true +required-features = ["pbr_specular_textures"] + +[package.metadata.example.specular_tint] +name = "Specular Tint" +description = "Demonstrates specular tints and maps" +category = "3D Rendering" +wasm = true + [profile.wasm-release] inherits = "release" opt-level = "z" @@ -3903,7 +4182,15 @@ panic = "abort" # for details on why this is needed. Since dependencies don't expect to be built # with `--cfg docsrs` (and thus fail to compile) we use a different cfg. rustc-args = ["--cfg", "docsrs_dep"] -rustdoc-args = ["-Zunstable-options", "--generate-link-to-definition"] +rustdoc-args = [ + "-Zunstable-options", + "--generate-link-to-definition", + # Embed tags to the top of documentation pages for common Bevy traits + # that are implemented by the current type, like `Component` or `Resource`. + # This makes it easier to see at a glance what types are used for. + "--html-after-content", + "docs-rs/trait-tags.html", +] all-features = true cargo-args = ["-Zunstable-options", "-Zrustdoc-scrape-examples"] @@ -3944,11 +4231,11 @@ doc-scrape-examples = true hidden = true [[example]] -name = "testbed_ui_layout_rounding" -path = "examples/testbed/ui_layout_rounding.rs" +name = "testbed_full_ui" +path = "examples/testbed/full_ui.rs" doc-scrape-examples = true -[package.metadata.example.testbed_ui_layout_rounding] +[package.metadata.example.testbed_full_ui] hidden = true [[example]] @@ -3958,6 +4245,86 @@ doc-scrape-examples = true [package.metadata.example.tab_navigation] name = "Tab Navigation" -description = "Demonstration of Tab Navigation" +description = "Demonstration of Tab Navigation between UI elements" category = "UI (User Interface)" wasm = true + +[[example]] +name = "directional_navigation" +path = "examples/ui/directional_navigation.rs" +doc-scrape-examples = true + +[package.metadata.example.directional_navigation] +name = "Directional Navigation" +description = "Demonstration of Directional Navigation between UI elements" +category = "UI (User Interface)" +wasm = true + +[[example]] +name = "clustered_decals" +path = "examples/3d/clustered_decals.rs" +doc-scrape-examples = true + +[package.metadata.example.clustered_decals] +name = "Clustered Decals" +description = "Demonstrates clustered decals" +category = "3D Rendering" +wasm = false + +[[example]] +name = "occlusion_culling" +path = "examples/3d/occlusion_culling.rs" +doc-scrape-examples = true + +[package.metadata.example.occlusion_culling] +name = "Occlusion Culling" +description = "Demonstration of Occlusion Culling" +category = "3D Rendering" +wasm = false + +[[example]] +name = "camera_controller" +path = "examples/helpers/camera_controller.rs" +doc-scrape-examples = true +crate-type = ["lib"] + +[package.metadata.example.camera_controller] +name = "Camera Controller" +description = "Example Free-Cam Styled Camera Controller" +category = "Helpers" +wasm = true + +[[example]] +name = "widgets" +path = "examples/helpers/widgets.rs" +doc-scrape-examples = true +crate-type = ["lib"] + +[package.metadata.example.widgets] +name = "Widgets" +description = "Example UI Widgets" +category = "Helpers" +wasm = true + +[[example]] +name = "no_std_library" +path = "examples/no_std/library/src/lib.rs" +doc-scrape-examples = true +crate-type = ["lib"] + +[package.metadata.example.no_std_library] +name = "`no_std` Compatible Library" +description = "Example library compatible with `std` and `no_std` targets" +category = "Embedded" +wasm = true + +[[example]] +name = "extended_material_bindless" +path = "examples/shader/extended_material_bindless.rs" +doc-scrape-examples = true + +[package.metadata.example.extended_material_bindless] +name = "Extended Bindless Material" +description = "Demonstrates bindless `ExtendedMaterial`" +category = "Shaders" +wasm = false diff --git a/assets/cursors/kenney_crosshairPack/License.txt b/assets/cursors/kenney_crosshairPack/License.txt new file mode 100644 index 0000000000000..d6eaa6cb6b7d6 --- /dev/null +++ b/assets/cursors/kenney_crosshairPack/License.txt @@ -0,0 +1,19 @@ + + + Crosshair Pack + + by Kenney Vleugels (Kenney.nl) + + ------------------------------ + + License (Creative Commons Zero, CC0) + http://creativecommons.org/publicdomain/zero/1.0/ + + You may use these assets in personal and commercial projects. + Credit (Kenney or www.kenney.nl) would be nice but is not mandatory. + + ------------------------------ + + Donate: http://support.kenney.nl + + Follow on Twitter for updates: @KenneyNL (www.twitter.com/kenneynl) diff --git a/assets/cursors/kenney_crosshairPack/Tilesheet/crosshairs_tilesheet_white.png b/assets/cursors/kenney_crosshairPack/Tilesheet/crosshairs_tilesheet_white.png new file mode 100644 index 0000000000000..76c8b2f851414 Binary files /dev/null and b/assets/cursors/kenney_crosshairPack/Tilesheet/crosshairs_tilesheet_white.png differ diff --git a/assets/external/.gitignore b/assets/external/.gitignore new file mode 100644 index 0000000000000..d6b7ef32c8478 --- /dev/null +++ b/assets/external/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore diff --git a/assets/models/animated/Fox.glb b/assets/models/animated/Fox.glb index 2bb946e2d4815..1ef5c0d05658c 100644 Binary files a/assets/models/animated/Fox.glb and b/assets/models/animated/Fox.glb differ diff --git a/assets/models/terrain/terrain.glb b/assets/models/terrain/terrain.glb new file mode 100644 index 0000000000000..b09e0f414832e Binary files /dev/null and b/assets/models/terrain/terrain.glb differ diff --git a/assets/scenes/load_scene_example.scn.ron b/assets/scenes/load_scene_example.scn.ron index 813deb251e0b1..e768a7b149c41 100644 --- a/assets/scenes/load_scene_example.scn.ron +++ b/assets/scenes/load_scene_example.scn.ron @@ -7,10 +7,7 @@ entities: { 4294967296: ( components: { - "bevy_ecs::name::Name": ( - hash: 17588334858059901562, - name: "joe", - ), + "bevy_ecs::name::Name": "joe", "bevy_transform::components::global_transform::GlobalTransform": ((1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0)), "bevy_transform::components::transform::Transform": ( translation: (0.0, 0.0, 0.0), diff --git a/assets/shaders/automatic_instancing.wgsl b/assets/shaders/automatic_instancing.wgsl new file mode 100644 index 0000000000000..35276246b094e --- /dev/null +++ b/assets/shaders/automatic_instancing.wgsl @@ -0,0 +1,43 @@ +#import bevy_pbr::{ + mesh_functions, + view_transformations::position_world_to_clip +} + +@group(2) @binding(0) var texture: texture_2d; +@group(2) @binding(1) var texture_sampler: sampler; + +struct Vertex { + @builtin(instance_index) instance_index: u32, + @location(0) position: vec3, +}; + +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) world_position: vec4, + @location(1) color: vec4, +}; + +@vertex +fn vertex(vertex: Vertex) -> VertexOutput { + var out: VertexOutput; + + // Lookup the tag for the given mesh + let tag = mesh_functions::get_tag(vertex.instance_index); + var world_from_local = mesh_functions::get_world_from_local(vertex.instance_index); + out.world_position = mesh_functions::mesh_position_local_to_world(world_from_local, vec4(vertex.position, 1.0)); + out.clip_position = position_world_to_clip(out.world_position.xyz); + + let tex_dim = textureDimensions(texture); + // Find the texel coordinate as derived from the tag + let texel_coord = vec2(tag % tex_dim.x, tag / tex_dim.x); + + out.color = textureLoad(texture, texel_coord, 0); + return out; +} + +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + return mesh.color; +} \ No newline at end of file diff --git a/assets/shaders/bindless_material.wgsl b/assets/shaders/bindless_material.wgsl index 9d9d068d4c069..3de313b81afa9 100644 --- a/assets/shaders/bindless_material.wgsl +++ b/assets/shaders/bindless_material.wgsl @@ -1,14 +1,22 @@ #import bevy_pbr::forward_io::VertexOutput #import bevy_pbr::mesh_bindings::mesh +#import bevy_render::bindless::{bindless_samplers_filtering, bindless_textures_2d} struct Color { base_color: vec4, } +// This structure is a mapping from bindless index to the index in the +// appropriate slab +struct MaterialBindings { + material: u32, // 0 + color_texture: u32, // 1 + color_texture_sampler: u32, // 2 +} + #ifdef BINDLESS -@group(2) @binding(0) var material_color: binding_array; -@group(2) @binding(1) var material_color_texture: binding_array, 4>; -@group(2) @binding(2) var material_color_sampler: binding_array; +@group(2) @binding(0) var materials: array; +@group(2) @binding(10) var material_color: binding_array; #else // BINDLESS @group(2) @binding(0) var material_color: Color; @group(2) @binding(1) var material_color_texture: texture_2d; @@ -19,15 +27,15 @@ struct Color { fn fragment(in: VertexOutput) -> @location(0) vec4 { #ifdef BINDLESS let slot = mesh[in.instance_index].material_and_lightmap_bind_group_slot & 0xffffu; - let base_color = material_color[slot].base_color; + let base_color = material_color[materials[slot].material].base_color; #else // BINDLESS let base_color = material_color.base_color; #endif // BINDLESS return base_color * textureSampleLevel( #ifdef BINDLESS - material_color_texture[slot], - material_color_sampler[slot], + bindless_textures_2d[materials[slot].color_texture], + bindless_samplers_filtering[materials[slot].color_texture_sampler], #else // BINDLESS material_color_texture, material_color_sampler, diff --git a/assets/shaders/custom_clustered_decal.wgsl b/assets/shaders/custom_clustered_decal.wgsl new file mode 100644 index 0000000000000..6aaf408097ca9 --- /dev/null +++ b/assets/shaders/custom_clustered_decal.wgsl @@ -0,0 +1,86 @@ +// This shader, a part of the `clustered_decals` example, shows how to use the +// decal `tag` field to apply arbitrary decal effects. + +#import bevy_pbr::{ + clustered_forward, + decal::clustered, + forward_io::{VertexOutput, FragmentOutput}, + mesh_view_bindings, + pbr_fragment::pbr_input_from_standard_material, + pbr_functions::{alpha_discard, apply_pbr_lighting, main_pass_post_lighting_processing}, +} + +@fragment +fn fragment( + in: VertexOutput, + @builtin(front_facing) is_front: bool, +) -> FragmentOutput { + // Generate a `PbrInput` struct from the `StandardMaterial` bindings. + var pbr_input = pbr_input_from_standard_material(in, is_front); + + // Alpha discard. + pbr_input.material.base_color = alpha_discard(pbr_input.material, pbr_input.material.base_color); + + // Apply the normal decals. + pbr_input.material.base_color = clustered::apply_decal_base_color( + in.world_position.xyz, + in.position.xy, + pbr_input.material.base_color + ); + + // Here we tint the color based on the tag of the decal. + // We could optionally do other things, such as adjust the normal based on a normal map. + let view_z = clustered::get_view_z(in.world_position.xyz); + let is_orthographic = clustered::view_is_orthographic(); + let cluster_index = + clustered_forward::fragment_cluster_index(in.position.xy, view_z, is_orthographic); + var clusterable_object_index_ranges = + clustered_forward::unpack_clusterable_object_index_ranges(cluster_index); + var decal_iterator = clustered::clustered_decal_iterator_new( + in.world_position.xyz, + &clusterable_object_index_ranges + ); + while (clustered::clustered_decal_iterator_next(&decal_iterator)) { + var decal_base_color = textureSampleLevel( + mesh_view_bindings::clustered_decal_textures[decal_iterator.texture_index], + mesh_view_bindings::clustered_decal_sampler, + decal_iterator.uv, + 0.0 + ); + + switch (decal_iterator.tag) { + case 1u: { + // Tint with red. + decal_base_color = vec4( + mix(pbr_input.material.base_color.rgb, vec3(1.0, 0.0, 0.0), 0.5), + decal_base_color.a, + ); + } + case 2u: { + // Tint with blue. + decal_base_color = vec4( + mix(pbr_input.material.base_color.rgb, vec3(0.0, 0.0, 1.0), 0.5), + decal_base_color.a, + ); + } + default: {} + } + + pbr_input.material.base_color = vec4( + mix(pbr_input.material.base_color.rgb, decal_base_color.rgb, decal_base_color.a), + pbr_input.material.base_color.a + decal_base_color.a + ); + } + + // Apply lighting. + var out: FragmentOutput; + out.color = apply_pbr_lighting(pbr_input); + + // Apply in-shader post processing (fog, alpha-premultiply, and also + // tonemapping, debanding if the camera is non-HDR). Note this does not + // include fullscreen postprocessing effects like bloom. + out.color = main_pass_post_lighting_processing(pbr_input, out.color); + + return out; +} + diff --git a/assets/shaders/custom_material.wesl b/assets/shaders/custom_material.wesl new file mode 100644 index 0000000000000..5113e1cbe0b39 --- /dev/null +++ b/assets/shaders/custom_material.wesl @@ -0,0 +1,20 @@ +import super::util::make_polka_dots; + +struct VertexOutput { + @builtin(position) position: vec4, + @location(2) uv: vec2, +} + +struct CustomMaterial { + // Needed for 16-bit alignment on WebGL2 + time: vec4, +} + +@group(2) @binding(0) var material: CustomMaterial; + +@fragment +fn fragment( + mesh: VertexOutput, +) -> @location(0) vec4 { + return make_polka_dots(mesh.uv, material.time.x); +} diff --git a/assets/shaders/custom_stencil.wgsl b/assets/shaders/custom_stencil.wgsl new file mode 100644 index 0000000000000..6f2fa2da4f977 --- /dev/null +++ b/assets/shaders/custom_stencil.wgsl @@ -0,0 +1,41 @@ +//! A shader showing how to use the vertex position data to output the +//! stencil in the right position + +// First we import everything we need from bevy_pbr +// A 2d shader would be vevry similar but import from bevy_sprite instead +#import bevy_pbr::{ + mesh_functions, + view_transformations::position_world_to_clip +} + +struct Vertex { + // This is needed if you are using batching and/or gpu preprocessing + // It's a built in so you don't need to define it in the vertex layout + @builtin(instance_index) instance_index: u32, + // Like we defined for the vertex layout + // position is at location 0 + @location(0) position: vec3, +}; + +// This is the output of the vertex shader and we also use it as the input for the fragment shader +struct VertexOutput { + @builtin(position) clip_position: vec4, + @location(0) world_position: vec4, +}; + +@vertex +fn vertex(vertex: Vertex) -> VertexOutput { + var out: VertexOutput; + // This is how bevy computes the world position + // The vertex.instance_index is very important. Especially if you are using batching and gpu preprocessing + var world_from_local = mesh_functions::get_world_from_local(vertex.instance_index); + out.world_position = mesh_functions::mesh_position_local_to_world(world_from_local, vec4(vertex.position, 1.0)); + out.clip_position = position_world_to_clip(out.world_position.xyz); + return out; +} + +@fragment +fn fragment(in: VertexOutput) -> @location(0) vec4 { + // Output a red color to represent the stencil of the mesh + return vec4(1.0, 0.0, 0.0, 1.0); +} diff --git a/assets/shaders/custom_ui_material.wgsl b/assets/shaders/custom_ui_material.wgsl index ef000aef7e87e..528fa55302fab 100644 --- a/assets/shaders/custom_ui_material.wgsl +++ b/assets/shaders/custom_ui_material.wgsl @@ -2,7 +2,7 @@ #import bevy_ui::ui_vertex_output::UiVertexOutput @group(1) @binding(0) var color: vec4; -@group(1) @binding(1) var slider: f32; +@group(1) @binding(1) var slider: vec4; @group(1) @binding(2) var material_color_texture: texture_2d; @group(1) @binding(3) var material_color_sampler: sampler; @group(1) @binding(4) var border_color: vec4; @@ -10,17 +10,47 @@ @fragment fn fragment(in: UiVertexOutput) -> @location(0) vec4 { - let r = in.uv - 0.5; + let output_color = textureSample(material_color_texture, material_color_sampler, in.uv) * color; + + // half size of the UI node + let half_size = 0.5 * in.size; + + // position relative to the center of the UI node + let p = in.uv * in.size - half_size; + + // thickness of the border closest to the current position let b = vec2( - select(in.border_widths.x, in.border_widths.y, r.x < 0.), - select(in.border_widths.z, in.border_widths.w, r.y < 0.) + select(in.border_widths.x, in.border_widths.z, 0. < p.x), + select(in.border_widths.y, in.border_widths.w, 0. < p.y) ); - if any(0.5 - b < abs(r)) { - return border_color; + // select radius for the nearest corner + let rs = select(in.border_radius.xy, in.border_radius.wz, 0.0 < p.y); + let radius = select(rs.x, rs.y, 0.0 < p.x); + + // distance along each axis from the corner + let d = half_size - abs(p); + + // if the distance to the edge from the current position on any axis + // is less than the border width on that axis then the position is within + // the border and we return the border color + if d.x < b.x || d.y < b.y { + // select radius for the nearest corner + let rs = select(in.border_radius.xy, in.border_radius.wz, 0.0 < p.y); + let radius = select(rs.x, rs.y, 0.0 < p.x); + + // determine if the point is inside the curved corner and return the corresponding color + let q = radius - d; + if radius < min(max(q.x, q.y), 0.0) + length(vec2(max(q.x, 0.0), max(q.y, 0.0))) { + return vec4(0.0); + } else { + return border_color; + } } - if in.uv.x < slider { + // sample the texture at this position if it's to the left of the slider value + // otherwise return a fully transparent color + if in.uv.x < slider.x { let output_color = textureSample(material_color_texture, material_color_sampler, in.uv) * color; return output_color; } else { diff --git a/assets/shaders/extended_material_bindless.wgsl b/assets/shaders/extended_material_bindless.wgsl new file mode 100644 index 0000000000000..f8650b0da7f60 --- /dev/null +++ b/assets/shaders/extended_material_bindless.wgsl @@ -0,0 +1,107 @@ +// The shader that goes with `extended_material_bindless.rs`. +// +// This code demonstrates how to write shaders that are compatible with both +// bindless and non-bindless mode. See the `#ifdef BINDLESS` blocks. + +#import bevy_pbr::{ + forward_io::{FragmentOutput, VertexOutput}, + mesh_bindings::mesh, + pbr_fragment::pbr_input_from_standard_material, + pbr_functions::{apply_pbr_lighting, main_pass_post_lighting_processing}, +} +#import bevy_render::bindless::{bindless_samplers_filtering, bindless_textures_2d} + +#ifdef BINDLESS +#import bevy_pbr::pbr_bindings::{material_array, material_indices} +#else // BINDLESS +#import bevy_pbr::pbr_bindings::material +#endif // BINDLESS + +// Stores the indices of the bindless resources in the bindless resource arrays, +// for the `ExampleBindlessExtension` fields. +struct ExampleBindlessExtendedMaterialIndices { + // The index of the `ExampleBindlessExtendedMaterial` data in + // `example_extended_material`. + material: u32, + // The index of the texture we're going to modulate the base color with in + // the `bindless_textures_2d` array. + modulate_texture: u32, + // The index of the sampler we're going to sample the modulated texture with + // in the `bindless_samplers_filtering` array. + modulate_texture_sampler: u32, +} + +// Plain data associated with this example material. +struct ExampleBindlessExtendedMaterial { + // The color that we multiply the base color, base color texture, and + // modulated texture with. + modulate_color: vec4, +} + +#ifdef BINDLESS + +// The indices of the bindless resources in the bindless resource arrays, for +// the `ExampleBindlessExtension` fields. +@group(2) @binding(100) var example_extended_material_indices: + array; +// An array that holds the `ExampleBindlessExtendedMaterial` plain old data, +// indexed by `ExampleBindlessExtendedMaterialIndices.material`. +@group(2) @binding(101) var example_extended_material: + array; + +#else // BINDLESS + +// In non-bindless mode, we simply use a uniform for the plain old data. +@group(2) @binding(50) var example_extended_material: ExampleBindlessExtendedMaterial; +@group(2) @binding(51) var modulate_texture: texture_2d; +@group(2) @binding(52) var modulate_sampler: sampler; + +#endif // BINDLESS + +@fragment +fn fragment( + in: VertexOutput, + @builtin(front_facing) is_front: bool, +) -> FragmentOutput { +#ifdef BINDLESS + // Fetch the material slot. We'll use this in turn to fetch the bindless + // indices from `example_extended_material_indices`. + let slot = mesh[in.instance_index].material_and_lightmap_bind_group_slot & 0xffffu; +#endif // BINDLESS + + // Generate a `PbrInput` struct from the `StandardMaterial` bindings. + var pbr_input = pbr_input_from_standard_material(in, is_front); + + // Calculate the UV for the texture we're about to sample. +#ifdef BINDLESS + let uv_transform = material_array[material_indices[slot].material].uv_transform; +#else // BINDLESS + let uv_transform = material.uv_transform; +#endif // BINDLESS + let uv = (uv_transform * vec3(in.uv, 1.0)).xy; + + // Multiply the base color by the `modulate_texture` and `modulate_color`. +#ifdef BINDLESS + // Notice how we fetch the texture, sampler, and plain extended material + // data from the appropriate arrays. + pbr_input.material.base_color *= textureSample( + bindless_textures_2d[example_extended_material_indices[slot].modulate_texture], + bindless_samplers_filtering[ + example_extended_material_indices[slot].modulate_texture_sampler + ], + uv + ) * example_extended_material[example_extended_material_indices[slot].material].modulate_color; +#else // BINDLESS + pbr_input.material.base_color *= textureSample(modulate_texture, modulate_sampler, uv) * + example_extended_material.modulate_color; +#endif // BINDLESS + + var out: FragmentOutput; + // Apply lighting. + out.color = apply_pbr_lighting(pbr_input); + // Apply in-shader post processing (fog, alpha-premultiply, and also + // tonemapping, debanding if the camera is non-HDR). Note this does not + // include fullscreen postprocessing effects like bloom. + out.color = main_pass_post_lighting_processing(pbr_input, out.color); + return out; +} diff --git a/assets/shaders/specialized_mesh_pipeline.wgsl b/assets/shaders/specialized_mesh_pipeline.wgsl index 82b5cea911658..29c9069ec88d7 100644 --- a/assets/shaders/specialized_mesh_pipeline.wgsl +++ b/assets/shaders/specialized_mesh_pipeline.wgsl @@ -2,7 +2,7 @@ //! between the vertex and fragment shader. Also shows the custom vertex layout. // First we import everything we need from bevy_pbr -// A 2d shader would be vevry similar but import from bevy_sprite instead +// A 2D shader would be very similar but import from bevy_sprite instead #import bevy_pbr::{ mesh_functions, view_transformations::position_world_to_clip @@ -30,7 +30,7 @@ struct VertexOutput { fn vertex(vertex: Vertex) -> VertexOutput { var out: VertexOutput; // This is how bevy computes the world position - // The vertex.instance_index is very important. Esepecially if you are using batching and gpu preprocessing + // The vertex.instance_index is very important. Especially if you are using batching and gpu preprocessing var world_from_local = mesh_functions::get_world_from_local(vertex.instance_index); out.world_position = mesh_functions::mesh_position_local_to_world(world_from_local, vec4(vertex.position, 1.0)); out.clip_position = position_world_to_clip(out.world_position.xyz); @@ -45,4 +45,4 @@ fn vertex(vertex: Vertex) -> VertexOutput { fn fragment(in: VertexOutput) -> @location(0) vec4 { // output the color directly return vec4(in.color, 1.0); -} \ No newline at end of file +} diff --git a/assets/shaders/storage_buffer.wgsl b/assets/shaders/storage_buffer.wgsl index c052411e3f198..c27053b9a21b3 100644 --- a/assets/shaders/storage_buffer.wgsl +++ b/assets/shaders/storage_buffer.wgsl @@ -19,14 +19,12 @@ struct VertexOutput { @vertex fn vertex(vertex: Vertex) -> VertexOutput { var out: VertexOutput; + let tag = mesh_functions::get_tag(vertex.instance_index); var world_from_local = mesh_functions::get_world_from_local(vertex.instance_index); out.world_position = mesh_functions::mesh_position_local_to_world(world_from_local, vec4(vertex.position, 1.0)); out.clip_position = position_world_to_clip(out.world_position.xyz); - // We have 5 colors in the storage buffer, but potentially many instances of the mesh, so - // we use the instance index to select a color from the storage buffer. - out.color = colors[vertex.instance_index % 5]; - + out.color = colors[tag]; return out; } diff --git a/assets/shaders/tonemapping_test_patterns.wgsl b/assets/shaders/tonemapping_test_patterns.wgsl index 7fe88bf5485b3..2237bdf6c5772 100644 --- a/assets/shaders/tonemapping_test_patterns.wgsl +++ b/assets/shaders/tonemapping_test_patterns.wgsl @@ -9,19 +9,19 @@ #import bevy_core_pipeline::tonemapping::tone_mapping #endif -// Sweep across hues on y axis with value from 0.0 to +15EV across x axis +// Sweep across hues on y axis with value from 0.0 to +15EV across x axis // quantized into 24 steps for both axis. fn color_sweep(uv_input: vec2) -> vec3 { var uv = uv_input; let steps = 24.0; uv.y = uv.y * (1.0 + 1.0 / steps); let ratio = 2.0; - + let h = PI * 2.0 * floor(1.0 + steps * uv.y) / steps; let L = floor(uv.x * steps * ratio) / (steps * ratio) - 0.5; - + var color = vec3(0.0); - if uv.y < 1.0 { + if uv.y < 1.0 { color = cos(h + vec3(0.0, 1.0, 2.0) * PI * 2.0 / 3.0); let maxRGB = max(color.r, max(color.g, color.b)); let minRGB = min(color.r, min(color.g, color.b)); diff --git a/assets/shaders/util.wesl b/assets/shaders/util.wesl new file mode 100644 index 0000000000000..ebbf023926ec2 --- /dev/null +++ b/assets/shaders/util.wesl @@ -0,0 +1,44 @@ +fn make_polka_dots(pos: vec2, time: f32) -> vec4 { + let scaled_pos = pos * 6.0; + let cell = vec2(fract(scaled_pos.x), fract(scaled_pos.y)); + var dist_from_center = distance(cell, vec2(0.5)); + + let is_even = (floor(scaled_pos.x) + floor(scaled_pos.y)) % 2.0; + + var dot_color = vec3(0.0); + var is_dot = 0.0; + + @if(!PARTY_MODE) { + let color1 = vec3(1.0, 0.4, 0.8); // pink + let color2 = vec3(0.6, 0.2, 1.0); // purple + dot_color = mix(color1, color2, is_even); + is_dot = step(dist_from_center, 0.3); + } @else { + let grid_x = floor(scaled_pos.x); + let grid_y = floor(scaled_pos.y); + let wave_speed = 3.0; + let wave_phase = time * wave_speed; + + let diagonal_pos = (grid_x + grid_y) * 0.5; + let wave_value = sin(diagonal_pos + wave_phase); + + let wave_normalized = (wave_value + 1.0) * 0.5; + + let color1 = vec3(1.0, 0.3, 0.7); + let color2 = vec3(0.5, 0.1, 1.0); + let intense_color1 = vec3(1.0, 0.1, 0.9); + let intense_color2 = vec3(0.8, 0.0, 1.0); + + let animated_color1 = mix(color1, intense_color1, wave_normalized); + let animated_color2 = mix(color2, intense_color2, wave_normalized); + + dot_color = mix(animated_color1, animated_color2, is_even); + + let size_mod = 0.15 * wave_value; + dist_from_center = dist_from_center * (1.0 - size_mod); + // Animate whether something is a dot by position but also time + is_dot = step(dist_from_center, 0.3 + wave_normalized * 0.2); + } + + return vec4(dot_color * is_dot, 1.0); +} diff --git a/assets/textures/AlphaNoise.png b/assets/textures/AlphaNoise.png new file mode 100644 index 0000000000000..4fa9518103575 Binary files /dev/null and b/assets/textures/AlphaNoise.png differ diff --git a/benches/Cargo.toml b/benches/Cargo.toml index 2ed96d3a48c8c..3f547d80d984b 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -1,17 +1,21 @@ [package] name = "benches" -edition = "2021" +edition = "2024" description = "Benchmarks that test Bevy's performance" publish = false license = "MIT OR Apache-2.0" # Do not automatically discover benchmarks, we specify them manually instead. autobenches = false +[dependencies] +# The primary crate that runs and analyzes our benchmarks. This is a regular dependency because the +# `bench!` macro refers to it in its documentation. +criterion = { version = "0.5.1", features = ["html_reports"] } + [dev-dependencies] # Bevy crates bevy_app = { path = "../crates/bevy_app" } bevy_ecs = { path = "../crates/bevy_ecs", features = ["multi_threaded"] } -bevy_hierarchy = { path = "../crates/bevy_hierarchy" } bevy_math = { path = "../crates/bevy_math" } bevy_picking = { path = "../crates/bevy_picking", features = [ "bevy_mesh_picking_backend", @@ -20,9 +24,11 @@ bevy_reflect = { path = "../crates/bevy_reflect", features = ["functions"] } bevy_render = { path = "../crates/bevy_render" } bevy_tasks = { path = "../crates/bevy_tasks" } bevy_utils = { path = "../crates/bevy_utils" } +bevy_platform = { path = "../crates/bevy_platform", default-features = false, features = [ + "std", +] } # Other crates -criterion = { version = "0.5.1", features = ["html_reports"] } glam = "0.29" rand = "0.8" rand_chacha = "0.3" @@ -43,6 +49,8 @@ type_complexity = "allow" undocumented_unsafe_blocks = "warn" unwrap_or_default = "warn" needless_lifetimes = "allow" +too_many_arguments = "allow" +nonstandard_macro_braces = "warn" ptr_as_ptr = "warn" ptr_cast_constness = "warn" @@ -51,15 +59,20 @@ ref_as_ptr = "warn" # see: https://github.com/bevyengine/bevy/pull/15375#issuecomment-2366966219 too_long_first_doc_paragraph = "allow" +allow_attributes = "warn" +allow_attributes_without_reason = "warn" + [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ['cfg(docsrs_dep)'] } unsafe_op_in_unsafe_fn = "warn" unused_qualifications = "warn" -[[bench]] -name = "entity_cloning" -path = "benches/bevy_ecs/entity_cloning.rs" -harness = false +[lib] +# This fixes the "Unrecognized Option" error when running commands like +# `cargo bench -- --save-baseline before` by disabling the default benchmark harness. +# See +# for more information. +bench = false [[bench]] name = "ecs" diff --git a/benches/README.md b/benches/README.md index 2e91916e481f1..5f256f877bc51 100644 --- a/benches/README.md +++ b/benches/README.md @@ -25,10 +25,10 @@ cargo bench -p benches -- name_fragment cargo bench -p benches -- --list # Save a baseline to be compared against later. -cargo bench -p benches --save-baseline before +cargo bench -p benches -- --save-baseline before # Compare the current benchmarks against a baseline to find performance gains and regressions. -cargo bench -p benches --baseline before +cargo bench -p benches -- --baseline before ``` ## Criterion diff --git a/benches/benches/bevy_ecs/change_detection.rs b/benches/benches/bevy_ecs/change_detection.rs index ca57da93fabe6..92f3251abc2b4 100644 --- a/benches/benches/bevy_ecs/change_detection.rs +++ b/benches/benches/bevy_ecs/change_detection.rs @@ -1,3 +1,5 @@ +use core::hint::black_box; + use bevy_ecs::{ component::{Component, Mutable}, entity::Entity, @@ -5,7 +7,7 @@ use bevy_ecs::{ query::QueryFilter, world::World, }; -use criterion::{black_box, criterion_group, Criterion}; +use criterion::{criterion_group, Criterion}; use rand::{prelude::SliceRandom, SeedableRng}; use rand_chacha::ChaCha8Rng; @@ -93,7 +95,7 @@ fn all_added_detection_generic(group: &mut BenchGroup, e let query = generic_filter_query::>(&mut world); (world, query) }, - |(ref mut world, ref mut query)| { + |(world, query)| { let mut count = 0; for entity in query.iter(world) { black_box(entity); @@ -141,7 +143,7 @@ fn all_changed_detection_generic + Default + let query = generic_filter_query::>(&mut world); (world, query) }, - |(ref mut world, ref mut query)| { + |(world, query)| { let mut count = 0; for entity in query.iter(world) { black_box(entity); @@ -194,7 +196,7 @@ fn few_changed_detection_generic + Default + let query = generic_filter_query::>(&mut world); (world, query) }, - |(ref mut world, ref mut query)| { + |(world, query)| { for entity in query.iter(world) { black_box(entity); } @@ -235,7 +237,7 @@ fn none_changed_detection_generic + Default>( let query = generic_filter_query::>(&mut world); (world, query) }, - |(ref mut world, ref mut query)| { + |(world, query)| { let mut count = 0; for entity in query.iter(world) { black_box(entity); @@ -265,7 +267,7 @@ fn none_changed_detection(criterion: &mut Criterion) { } } fn insert_if_bit_enabled(entity: &mut EntityWorldMut, i: u16) { - if i & 1 << B != 0 { + if i & (1 << B) != 0 { entity.insert(Data::(1.0)); } } @@ -341,7 +343,7 @@ fn multiple_archetype_none_changed_detection_generic< let query = generic_filter_query::>(&mut world); (world, query) }, - |(ref mut world, ref mut query)| { + |(world, query)| { let mut count = 0; for entity in query.iter(world) { black_box(entity); diff --git a/benches/benches/bevy_ecs/components/add_remove.rs b/benches/benches/bevy_ecs/components/add_remove.rs index b381ccb434fe5..9b654e7a82ec1 100644 --- a/benches/benches/bevy_ecs/components/add_remove.rs +++ b/benches/benches/bevy_ecs/components/add_remove.rs @@ -12,7 +12,7 @@ impl Benchmark { let mut world = World::default(); let entities = world - .spawn_batch(core::iter::repeat(A(0.)).take(10000)) + .spawn_batch(core::iter::repeat_n(A(0.), 10_000)) .collect(); Self(world, entities) } diff --git a/benches/benches/bevy_ecs/components/add_remove_very_big_table.rs b/benches/benches/bevy_ecs/components/add_remove_very_big_table.rs index 1a4f238cd32c2..72555be8c28b0 100644 --- a/benches/benches/bevy_ecs/components/add_remove_very_big_table.rs +++ b/benches/benches/bevy_ecs/components/add_remove_very_big_table.rs @@ -1,4 +1,7 @@ -#![allow(dead_code)] +#![expect( + dead_code, + reason = "The `Mat4`s in the structs are used to bloat the size of the structs for benchmarking purposes." +)] use bevy_ecs::prelude::*; use glam::*; diff --git a/benches/benches/bevy_ecs/components/archetype_updates.rs b/benches/benches/bevy_ecs/components/archetype_updates.rs index b11c5b2b7576e..2908332ea5e5f 100644 --- a/benches/benches/bevy_ecs/components/archetype_updates.rs +++ b/benches/benches/bevy_ecs/components/archetype_updates.rs @@ -22,7 +22,7 @@ fn setup(system_count: usize) -> (World, Schedule) { } fn insert_if_bit_enabled(entity: &mut EntityWorldMut, i: u16) { - if i & 1 << B != 0 { + if i & (1 << B) != 0 { entity.insert(A::(1.0)); } } diff --git a/benches/benches/bevy_ecs/empty_archetypes.rs b/benches/benches/bevy_ecs/empty_archetypes.rs index daec970b74a87..e5e7639066fe8 100644 --- a/benches/benches/bevy_ecs/empty_archetypes.rs +++ b/benches/benches/bevy_ecs/empty_archetypes.rs @@ -1,5 +1,7 @@ +use core::hint::black_box; + use bevy_ecs::{component::Component, prelude::*, schedule::ExecutorKind, world::World}; -use criterion::{black_box, criterion_group, BenchmarkId, Criterion}; +use criterion::{criterion_group, BenchmarkId, Criterion}; criterion_group!(benches, empty_archetypes); @@ -103,49 +105,49 @@ fn add_archetypes(world: &mut World, count: u16) { e.insert(A::<10>(1.0)); e.insert(A::<11>(1.0)); e.insert(A::<12>(1.0)); - if i & 1 << 1 != 0 { + if i & (1 << 1) != 0 { e.insert(A::<13>(1.0)); } - if i & 1 << 2 != 0 { + if i & (1 << 2) != 0 { e.insert(A::<14>(1.0)); } - if i & 1 << 3 != 0 { + if i & (1 << 3) != 0 { e.insert(A::<15>(1.0)); } - if i & 1 << 4 != 0 { + if i & (1 << 4) != 0 { e.insert(A::<16>(1.0)); } - if i & 1 << 5 != 0 { + if i & (1 << 5) != 0 { e.insert(A::<18>(1.0)); } - if i & 1 << 6 != 0 { + if i & (1 << 6) != 0 { e.insert(A::<19>(1.0)); } - if i & 1 << 7 != 0 { + if i & (1 << 7) != 0 { e.insert(A::<20>(1.0)); } - if i & 1 << 8 != 0 { + if i & (1 << 8) != 0 { e.insert(A::<21>(1.0)); } - if i & 1 << 9 != 0 { + if i & (1 << 9) != 0 { e.insert(A::<22>(1.0)); } - if i & 1 << 10 != 0 { + if i & (1 << 10) != 0 { e.insert(A::<23>(1.0)); } - if i & 1 << 11 != 0 { + if i & (1 << 11) != 0 { e.insert(A::<24>(1.0)); } - if i & 1 << 12 != 0 { + if i & (1 << 12) != 0 { e.insert(A::<25>(1.0)); } - if i & 1 << 13 != 0 { + if i & (1 << 13) != 0 { e.insert(A::<26>(1.0)); } - if i & 1 << 14 != 0 { + if i & (1 << 14) != 0 { e.insert(A::<27>(1.0)); } - if i & 1 << 15 != 0 { + if i & (1 << 15) != 0 { e.insert(A::<28>(1.0)); } } diff --git a/benches/benches/bevy_ecs/entity_cloning.rs b/benches/benches/bevy_ecs/entity_cloning.rs index 51af20b7b187d..0eaae27ce4b00 100644 --- a/benches/benches/bevy_ecs/entity_cloning.rs +++ b/benches/benches/bevy_ecs/entity_cloning.rs @@ -1,171 +1,238 @@ +use core::hint::black_box; + +use benches::bench; use bevy_ecs::bundle::Bundle; +use bevy_ecs::component::ComponentCloneBehavior; +use bevy_ecs::entity::EntityCloner; +use bevy_ecs::hierarchy::ChildOf; use bevy_ecs::reflect::AppTypeRegistry; -use bevy_ecs::{component::Component, reflect::ReflectComponent, world::World}; -use bevy_hierarchy::{BuildChildren, CloneEntityHierarchyExt}; +use bevy_ecs::{component::Component, world::World}; use bevy_math::Mat4; use bevy_reflect::{GetTypeRegistration, Reflect}; -use criterion::{black_box, criterion_group, criterion_main, Bencher, Criterion}; +use criterion::{criterion_group, Bencher, Criterion, Throughput}; -criterion_group!(benches, reflect_benches, clone_benches); -criterion_main!(benches); +criterion_group!( + benches, + single, + hierarchy_tall, + hierarchy_wide, + hierarchy_many, +); #[derive(Component, Reflect, Default, Clone)] -#[reflect(Component)] struct C1(Mat4); #[derive(Component, Reflect, Default, Clone)] -#[reflect(Component)] struct C2(Mat4); #[derive(Component, Reflect, Default, Clone)] -#[reflect(Component)] struct C3(Mat4); #[derive(Component, Reflect, Default, Clone)] -#[reflect(Component)] struct C4(Mat4); #[derive(Component, Reflect, Default, Clone)] -#[reflect(Component)] struct C5(Mat4); #[derive(Component, Reflect, Default, Clone)] -#[reflect(Component)] struct C6(Mat4); #[derive(Component, Reflect, Default, Clone)] -#[reflect(Component)] struct C7(Mat4); #[derive(Component, Reflect, Default, Clone)] -#[reflect(Component)] struct C8(Mat4); #[derive(Component, Reflect, Default, Clone)] -#[reflect(Component)] struct C9(Mat4); #[derive(Component, Reflect, Default, Clone)] -#[reflect(Component)] struct C10(Mat4); type ComplexBundle = (C1, C2, C3, C4, C5, C6, C7, C8, C9, C10); -fn hierarchy( +/// Sets the [`ComponentCloneHandler`] for all explicit and required components in a bundle `B` to +/// use the [`Reflect`] trait instead of [`Clone`]. +fn reflection_cloner( + world: &mut World, + linked_cloning: bool, +) -> EntityCloner { + // Get mutable access to the type registry, creating it if it does not exist yet. + let registry = world.get_resource_or_init::(); + + // Recursively register all components in the bundle to the reflection type registry. + { + let mut r = registry.write(); + r.register::(); + } + + // Recursively register all components in the bundle, then save the component IDs to a list. + // This uses `contributed_components()`, meaning both explicit and required component IDs in + // this bundle are saved. + let component_ids: Vec<_> = world.register_bundle::().contributed_components().into(); + + let mut builder = EntityCloner::build(world); + + // Overwrite the clone handler for all components in the bundle to use `Reflect`, not `Clone`. + for component in component_ids { + builder.override_clone_behavior_with_id(component, ComponentCloneBehavior::reflect()); + } + builder.linked_cloning(linked_cloning); + + builder.finish() +} + +/// A helper function that benchmarks running the [`EntityCommands::clone_and_spawn()`] command on a +/// bundle `B`. +/// +/// The bundle must implement [`Default`], which is used to create the first entity that gets cloned +/// in the benchmark. +/// +/// If `clone_via_reflect` is false, this will use the default [`ComponentCloneHandler`] for all +/// components (which is usually [`ComponentCloneHandler::clone_handler()`]). If `clone_via_reflect` +/// is true, it will overwrite the handler for all components in the bundle to be +/// [`ComponentCloneHandler::reflect_handler()`]. +fn bench_clone( + b: &mut Bencher, + clone_via_reflect: bool, +) { + let mut world = World::default(); + + let mut cloner = if clone_via_reflect { + reflection_cloner::(&mut world, false) + } else { + EntityCloner::default() + }; + + // Spawn the first entity, which will be cloned in the benchmark routine. + let id = world.spawn(B::default()).id(); + + b.iter(|| { + // clones the given entity + cloner.spawn_clone(&mut world, black_box(id)); + world.flush(); + }); +} + +/// A helper function that benchmarks running the [`EntityCommands::clone_and_spawn()`] command on a +/// bundle `B`. +/// +/// As compared to [`bench_clone()`], this benchmarks recursively cloning an entity with several +/// children. It does so by setting up an entity tree with a given `height` where each entity has a +/// specified number of `children`. +/// +/// For example, setting `height` to 5 and `children` to 1 creates a single chain of entities with +/// no siblings. Alternatively, setting `height` to 1 and `children` to 5 will spawn 5 direct +/// children of the root entity. +fn bench_clone_hierarchy( b: &mut Bencher, - width: usize, height: usize, + children: usize, clone_via_reflect: bool, ) { let mut world = World::default(); - let registry = AppTypeRegistry::default(); - { - let mut r = registry.write(); - r.register::(); - } - world.insert_resource(registry); - world.register_bundle::(); - if clone_via_reflect { - let mut components = Vec::new(); - C::get_component_ids(world.components(), &mut |id| components.push(id.unwrap())); - for component in components { - world - .get_component_clone_handlers_mut() - .set_component_handler( - component, - bevy_ecs::component::ComponentCloneHandler::reflect_handler(), - ); - } - } - let id = world.spawn(black_box(C::default())).id(); + let mut cloner = if clone_via_reflect { + reflection_cloner::(&mut world, true) + } else { + let mut builder = EntityCloner::build(&mut world); + builder.linked_cloning(true); + builder.finish() + }; + + // Make the clone command recursive, so children are cloned as well. + + // Spawn the first entity, which will be cloned in the benchmark routine. + let id = world.spawn(B::default()).id(); let mut hierarchy_level = vec![id]; + // Set up the hierarchy tree by spawning all children. for _ in 0..height { let current_hierarchy_level = hierarchy_level.clone(); + hierarchy_level.clear(); - for parent_id in current_hierarchy_level { - for _ in 0..width { - let child_id = world - .spawn(black_box(C::default())) - .set_parent(parent_id) - .id(); + + for parent in current_hierarchy_level { + for _ in 0..children { + let child_id = world.spawn((B::default(), ChildOf(parent))).id(); hierarchy_level.push(child_id); } } } - world.flush(); - b.iter(move || { - world.commands().entity(id).clone_and_spawn_with(|builder| { - builder.recursive(true); - }); + b.iter(|| { + cloner.spawn_clone(&mut world, black_box(id)); world.flush(); }); } -fn simple(b: &mut Bencher, clone_via_reflect: bool) { - let mut world = World::default(); - let registry = AppTypeRegistry::default(); - { - let mut r = registry.write(); - r.register::(); +// Each benchmark runs twice: using either the `Clone` or `Reflect` traits to clone entities. This +// constant represents this as an easy array that can be used in a `for` loop. +const SCENARIOS: [(&str, bool); 2] = [("clone", false), ("reflect", true)]; + +/// Benchmarks cloning a single entity with 10 components and no children. +fn single(c: &mut Criterion) { + let mut group = c.benchmark_group(bench!("single")); + + // We're cloning 1 entity. + group.throughput(Throughput::Elements(1)); + + for (id, clone_via_reflect) in SCENARIOS { + group.bench_function(id, |b| { + bench_clone::(b, clone_via_reflect); + }); } - world.insert_resource(registry); - world.register_bundle::(); - if clone_via_reflect { - let mut components = Vec::new(); - C::get_component_ids(world.components(), &mut |id| components.push(id.unwrap())); - for component in components { - world - .get_component_clone_handlers_mut() - .set_component_handler( - component, - bevy_ecs::component::ComponentCloneHandler::reflect_handler(), - ); - } + + group.finish(); +} + +/// Benchmarks cloning an an entity and its 50 descendents, each with only 1 component. +fn hierarchy_tall(c: &mut Criterion) { + let mut group = c.benchmark_group(bench!("hierarchy_tall")); + + // We're cloning both the root entity and its 50 descendents. + group.throughput(Throughput::Elements(51)); + + for (id, clone_via_reflect) in SCENARIOS { + group.bench_function(id, |b| { + bench_clone_hierarchy::(b, 50, 1, clone_via_reflect); + }); } - let id = world.spawn(black_box(C::default())).id(); - b.iter(move || { - world.commands().entity(id).clone_and_spawn(); - world.flush(); - }); + group.finish(); } -fn reflect_benches(c: &mut Criterion) { - c.bench_function("many components reflect", |b| { - simple::(b, true); - }); +/// Benchmarks cloning an an entity and its 50 direct children, each with only 1 component. +fn hierarchy_wide(c: &mut Criterion) { + let mut group = c.benchmark_group(bench!("hierarchy_wide")); - c.bench_function("hierarchy wide reflect", |b| { - hierarchy::(b, 10, 4, true); - }); + // We're cloning both the root entity and its 50 direct children. + group.throughput(Throughput::Elements(51)); - c.bench_function("hierarchy tall reflect", |b| { - hierarchy::(b, 1, 50, true); - }); + for (id, clone_via_reflect) in SCENARIOS { + group.bench_function(id, |b| { + bench_clone_hierarchy::(b, 1, 50, clone_via_reflect); + }); + } - c.bench_function("hierarchy many reflect", |b| { - hierarchy::(b, 5, 5, true); - }); + group.finish(); } -fn clone_benches(c: &mut Criterion) { - c.bench_function("many components clone", |b| { - simple::(b, false); - }); +/// Benchmarks cloning a large hierarchy of entities with several children each. Each entity has 10 +/// components. +fn hierarchy_many(c: &mut Criterion) { + let mut group = c.benchmark_group(bench!("hierarchy_many")); - c.bench_function("hierarchy wide clone", |b| { - hierarchy::(b, 10, 4, false); - }); + // We're cloning 364 entities total. This number was calculated by manually counting the number + // of entities spawned in `bench_clone_hierarchy()` with a `println!()` statement. :) + group.throughput(Throughput::Elements(364)); - c.bench_function("hierarchy tall clone", |b| { - hierarchy::(b, 1, 50, false); - }); + for (id, clone_via_reflect) in SCENARIOS { + group.bench_function(id, |b| { + bench_clone_hierarchy::(b, 5, 3, clone_via_reflect); + }); + } - c.bench_function("hierarchy many clone", |b| { - hierarchy::(b, 5, 5, false); - }); + group.finish(); } diff --git a/benches/benches/bevy_ecs/iteration/iter_simple.rs b/benches/benches/bevy_ecs/iteration/iter_simple.rs index 1fc86f5087679..14cca69082752 100644 --- a/benches/benches/bevy_ecs/iteration/iter_simple.rs +++ b/benches/benches/bevy_ecs/iteration/iter_simple.rs @@ -19,15 +19,15 @@ impl<'w> Benchmark<'w> { pub fn new() -> Self { let mut world = World::new(); - world.spawn_batch( - core::iter::repeat(( + world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Position(Vec3::X), Rotation(Vec3::X), Velocity(Vec3::X), - )) - .take(10_000), - ); + ), + 10_000, + )); let query = world.query::<(&Velocity, &mut Position)>(); Self(world, query) diff --git a/benches/benches/bevy_ecs/iteration/iter_simple_foreach.rs b/benches/benches/bevy_ecs/iteration/iter_simple_foreach.rs index f0a41d18be53b..19396e95b0820 100644 --- a/benches/benches/bevy_ecs/iteration/iter_simple_foreach.rs +++ b/benches/benches/bevy_ecs/iteration/iter_simple_foreach.rs @@ -19,15 +19,15 @@ impl<'w> Benchmark<'w> { pub fn new() -> Self { let mut world = World::new(); - world.spawn_batch( - core::iter::repeat(( + world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Position(Vec3::X), Rotation(Vec3::X), Velocity(Vec3::X), - )) - .take(10_000), - ); + ), + 10_000, + )); let query = world.query::<(&Velocity, &mut Position)>(); Self(world, query) diff --git a/benches/benches/bevy_ecs/iteration/iter_simple_foreach_sparse_set.rs b/benches/benches/bevy_ecs/iteration/iter_simple_foreach_sparse_set.rs index 0075c2706ba20..1e0db505c1667 100644 --- a/benches/benches/bevy_ecs/iteration/iter_simple_foreach_sparse_set.rs +++ b/benches/benches/bevy_ecs/iteration/iter_simple_foreach_sparse_set.rs @@ -21,15 +21,15 @@ impl<'w> Benchmark<'w> { pub fn new() -> Self { let mut world = World::new(); - world.spawn_batch( - core::iter::repeat(( + world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Position(Vec3::X), Rotation(Vec3::X), Velocity(Vec3::X), - )) - .take(10_000), - ); + ), + 10_000, + )); let query = world.query::<(&Velocity, &mut Position)>(); Self(world, query) diff --git a/benches/benches/bevy_ecs/iteration/iter_simple_foreach_wide.rs b/benches/benches/bevy_ecs/iteration/iter_simple_foreach_wide.rs index 7dbd11d1e0499..505d624eb8164 100644 --- a/benches/benches/bevy_ecs/iteration/iter_simple_foreach_wide.rs +++ b/benches/benches/bevy_ecs/iteration/iter_simple_foreach_wide.rs @@ -33,8 +33,8 @@ impl<'w> Benchmark<'w> { pub fn new() -> Self { let mut world = World::new(); - world.spawn_batch( - core::iter::repeat(( + world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Rotation(Vec3::X), Position::<0>(Vec3::X), @@ -47,9 +47,9 @@ impl<'w> Benchmark<'w> { Velocity::<3>(Vec3::X), Position::<4>(Vec3::X), Velocity::<4>(Vec3::X), - )) - .take(10_000), - ); + ), + 10_000, + )); let query = world.query(); Self(world, query) diff --git a/benches/benches/bevy_ecs/iteration/iter_simple_foreach_wide_sparse_set.rs b/benches/benches/bevy_ecs/iteration/iter_simple_foreach_wide_sparse_set.rs index f520ffde42662..88b58be0f250a 100644 --- a/benches/benches/bevy_ecs/iteration/iter_simple_foreach_wide_sparse_set.rs +++ b/benches/benches/bevy_ecs/iteration/iter_simple_foreach_wide_sparse_set.rs @@ -35,8 +35,8 @@ impl<'w> Benchmark<'w> { pub fn new() -> Self { let mut world = World::new(); - world.spawn_batch( - core::iter::repeat(( + world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Rotation(Vec3::X), Position::<0>(Vec3::X), @@ -49,9 +49,9 @@ impl<'w> Benchmark<'w> { Velocity::<3>(Vec3::X), Position::<4>(Vec3::X), Velocity::<4>(Vec3::X), - )) - .take(10_000), - ); + ), + 10_000, + )); let query = world.query(); Self(world, query) diff --git a/benches/benches/bevy_ecs/iteration/iter_simple_sparse_set.rs b/benches/benches/bevy_ecs/iteration/iter_simple_sparse_set.rs index e4ba3759412c7..ed1c531c1ded8 100644 --- a/benches/benches/bevy_ecs/iteration/iter_simple_sparse_set.rs +++ b/benches/benches/bevy_ecs/iteration/iter_simple_sparse_set.rs @@ -21,15 +21,15 @@ impl<'w> Benchmark<'w> { pub fn new() -> Self { let mut world = World::new(); - world.spawn_batch( - core::iter::repeat(( + world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Position(Vec3::X), Rotation(Vec3::X), Velocity(Vec3::X), - )) - .take(10_000), - ); + ), + 10_000, + )); let query = world.query::<(&Velocity, &mut Position)>(); Self(world, query) diff --git a/benches/benches/bevy_ecs/iteration/iter_simple_system.rs b/benches/benches/bevy_ecs/iteration/iter_simple_system.rs index 18918ee234f9f..2b6e8287218c2 100644 --- a/benches/benches/bevy_ecs/iteration/iter_simple_system.rs +++ b/benches/benches/bevy_ecs/iteration/iter_simple_system.rs @@ -19,15 +19,15 @@ impl Benchmark { pub fn new() -> Self { let mut world = World::new(); - world.spawn_batch( - core::iter::repeat(( + world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Position(Vec3::X), Rotation(Vec3::X), Velocity(Vec3::X), - )) - .take(10_000), - ); + ), + 10_000, + )); fn query_system(mut query: Query<(&Velocity, &mut Position)>) { for (velocity, mut position) in &mut query { diff --git a/benches/benches/bevy_ecs/iteration/iter_simple_wide.rs b/benches/benches/bevy_ecs/iteration/iter_simple_wide.rs index 7d013b3bf6003..dccd1fe8b362b 100644 --- a/benches/benches/bevy_ecs/iteration/iter_simple_wide.rs +++ b/benches/benches/bevy_ecs/iteration/iter_simple_wide.rs @@ -33,8 +33,8 @@ impl<'w> Benchmark<'w> { pub fn new() -> Self { let mut world = World::new(); - world.spawn_batch( - core::iter::repeat(( + world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Rotation(Vec3::X), Position::<0>(Vec3::X), @@ -47,9 +47,9 @@ impl<'w> Benchmark<'w> { Velocity::<3>(Vec3::X), Position::<4>(Vec3::X), Velocity::<4>(Vec3::X), - )) - .take(10_000), - ); + ), + 10_000, + )); let query = world.query(); Self(world, query) diff --git a/benches/benches/bevy_ecs/iteration/iter_simple_wide_sparse_set.rs b/benches/benches/bevy_ecs/iteration/iter_simple_wide_sparse_set.rs index 28a6dbd85dc28..49677dc1b9ac8 100644 --- a/benches/benches/bevy_ecs/iteration/iter_simple_wide_sparse_set.rs +++ b/benches/benches/bevy_ecs/iteration/iter_simple_wide_sparse_set.rs @@ -35,8 +35,8 @@ impl<'w> Benchmark<'w> { pub fn new() -> Self { let mut world = World::new(); - world.spawn_batch( - core::iter::repeat(( + world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Rotation(Vec3::X), Position::<0>(Vec3::X), @@ -49,9 +49,9 @@ impl<'w> Benchmark<'w> { Velocity::<3>(Vec3::X), Position::<4>(Vec3::X), Velocity::<4>(Vec3::X), - )) - .take(10_000), - ); + ), + 10_000, + )); let query = world.query(); Self(world, query) diff --git a/benches/benches/bevy_ecs/iteration/par_iter_simple.rs b/benches/benches/bevy_ecs/iteration/par_iter_simple.rs index dfd3f9dfdab0d..92259cb98fecf 100644 --- a/benches/benches/bevy_ecs/iteration/par_iter_simple.rs +++ b/benches/benches/bevy_ecs/iteration/par_iter_simple.rs @@ -19,7 +19,7 @@ struct Data(f32); pub struct Benchmark<'w>(World, QueryState<(&'w Velocity, &'w mut Position)>); fn insert_if_bit_enabled(entity: &mut EntityWorldMut, i: u16) { - if i & 1 << B != 0 { + if i & (1 << B) != 0 { entity.insert(Data::(1.0)); } } @@ -30,15 +30,15 @@ impl<'w> Benchmark<'w> { let mut world = World::new(); - let iter = world.spawn_batch( - core::iter::repeat(( + let iter = world.spawn_batch(core::iter::repeat_n( + ( Transform(Mat4::from_scale(Vec3::ONE)), Position(Vec3::X), Rotation(Vec3::X), Velocity(Vec3::X), - )) - .take(100_000), - ); + ), + 100_000, + )); let entities = iter.into_iter().collect::>(); for i in 0..fragment { let mut e = world.entity_mut(entities[i as usize]); diff --git a/benches/benches/bevy_ecs/main.rs b/benches/benches/bevy_ecs/main.rs index 83f0cde0286d6..4a025ab829369 100644 --- a/benches/benches/bevy_ecs/main.rs +++ b/benches/benches/bevy_ecs/main.rs @@ -2,13 +2,13 @@ dead_code, reason = "Many fields are unused/unread as they are just for benchmarking purposes." )] -#![expect(clippy::type_complexity)] use criterion::criterion_main; mod change_detection; mod components; mod empty_archetypes; +mod entity_cloning; mod events; mod fragmentation; mod iteration; @@ -21,6 +21,7 @@ criterion_main!( change_detection::benches, components::benches, empty_archetypes::benches, + entity_cloning::benches, events::benches, iteration::benches, fragmentation::benches, diff --git a/benches/benches/bevy_ecs/observers/propagation.rs b/benches/benches/bevy_ecs/observers/propagation.rs index 5de85bc3269b2..65c15f7308deb 100644 --- a/benches/benches/bevy_ecs/observers/propagation.rs +++ b/benches/benches/bevy_ecs/observers/propagation.rs @@ -1,9 +1,7 @@ -use bevy_ecs::{ - component::Component, entity::Entity, event::Event, observer::Trigger, world::World, -}; -use bevy_hierarchy::{BuildChildren, Parent}; +use core::hint::black_box; -use criterion::{black_box, Criterion}; +use bevy_ecs::prelude::*; +use criterion::Criterion; use rand::SeedableRng; use rand::{seq::IteratorRandom, Rng}; use rand_chacha::ChaCha8Rng; @@ -67,7 +65,7 @@ pub fn event_propagation(criterion: &mut Criterion) { struct TestEvent {} impl Event for TestEvent { - type Traversal = &'static Parent; + type Traversal = &'static ChildOf; const AUTO_PROPAGATE: bool = true; } diff --git a/benches/benches/bevy_ecs/observers/simple.rs b/benches/benches/bevy_ecs/observers/simple.rs index 81dd8e021e8ce..85207624e837f 100644 --- a/benches/benches/bevy_ecs/observers/simple.rs +++ b/benches/benches/bevy_ecs/observers/simple.rs @@ -1,6 +1,12 @@ -use bevy_ecs::{entity::Entity, event::Event, observer::Trigger, world::World}; +use core::hint::black_box; -use criterion::{black_box, Criterion}; +use bevy_ecs::{ + event::Event, + observer::{Trigger, TriggerTargets}, + world::World, +}; + +use criterion::Criterion; use rand::{prelude::SliceRandom, SeedableRng}; use rand_chacha::ChaCha8Rng; fn deterministic_rand() -> ChaCha8Rng { @@ -44,6 +50,6 @@ fn empty_listener_base(trigger: Trigger) { black_box(trigger); } -fn send_base_event(world: &mut World, entities: &Vec) { +fn send_base_event(world: &mut World, entities: impl TriggerTargets) { world.trigger_targets(EventBase, entities); } diff --git a/benches/benches/bevy_ecs/scheduling/schedule.rs b/benches/benches/bevy_ecs/scheduling/schedule.rs index 0450428535713..9844461d399ce 100644 --- a/benches/benches/bevy_ecs/scheduling/schedule.rs +++ b/benches/benches/bevy_ecs/scheduling/schedule.rs @@ -79,7 +79,7 @@ pub fn build_schedule(criterion: &mut Criterion) { // Benchmark graphs of different sizes. for graph_size in [100, 500, 1000] { // Basic benchmark without constraints. - group.bench_function(format!("{graph_size}_schedule_noconstraints"), |bencher| { + group.bench_function(format!("{graph_size}_schedule_no_constraints"), |bencher| { bencher.iter(|| { let mut app = App::new(); for _ in 0..graph_size { diff --git a/benches/benches/bevy_ecs/world/commands.rs b/benches/benches/bevy_ecs/world/commands.rs index a1d7cdb09e382..8ad87862eba24 100644 --- a/benches/benches/bevy_ecs/world/commands.rs +++ b/benches/benches/bevy_ecs/world/commands.rs @@ -1,9 +1,11 @@ +use core::hint::black_box; + use bevy_ecs::{ component::Component, - system::Commands, - world::{Command, CommandQueue, World}, + system::{Command, Commands}, + world::{CommandQueue, World}, }; -use criterion::{black_box, Criterion}; +use criterion::Criterion; #[derive(Component)] struct A; @@ -104,6 +106,10 @@ pub fn insert_commands(criterion: &mut Criterion) { for entity in &entities { values.push((*entity, (Matrix::default(), Vec3::default()))); } + #[expect( + deprecated, + reason = "This needs to be supported for now, and therefore still needs the benchmark." + )] commands.insert_or_spawn_batch(values); command_queue.apply(&mut world); }); diff --git a/benches/benches/bevy_ecs/world/despawn.rs b/benches/benches/bevy_ecs/world/despawn.rs index ace88e744a482..5419867a9ea68 100644 --- a/benches/benches/bevy_ecs/world/despawn.rs +++ b/benches/benches/bevy_ecs/world/despawn.rs @@ -1,5 +1,5 @@ use bevy_ecs::prelude::*; -use criterion::Criterion; +use criterion::{BatchSize, Criterion}; use glam::*; #[derive(Component)] @@ -13,18 +13,23 @@ pub fn world_despawn(criterion: &mut Criterion) { group.measurement_time(core::time::Duration::from_secs(4)); for entity_count in (0..5).map(|i| 10_u32.pow(i)) { - let mut world = World::default(); - for _ in 0..entity_count { - world.spawn((A(Mat4::default()), B(Vec4::default()))); - } - - let ents = world.iter_entities().map(|e| e.id()).collect::>(); group.bench_function(format!("{}_entities", entity_count), |bencher| { - bencher.iter(|| { - ents.iter().for_each(|e| { - world.despawn(*e); - }); - }); + bencher.iter_batched_ref( + || { + let mut world = World::default(); + for _ in 0..entity_count { + world.spawn((A(Mat4::default()), B(Vec4::default()))); + } + let ents = world.iter_entities().map(|e| e.id()).collect::>(); + (world, ents) + }, + |(world, ents)| { + ents.iter().for_each(|e| { + world.despawn(*e); + }); + }, + BatchSize::SmallInput, + ); }); } diff --git a/benches/benches/bevy_ecs/world/despawn_recursive.rs b/benches/benches/bevy_ecs/world/despawn_recursive.rs index 482086ab17444..6ae59b10a54a5 100644 --- a/benches/benches/bevy_ecs/world/despawn_recursive.rs +++ b/benches/benches/bevy_ecs/world/despawn_recursive.rs @@ -1,8 +1,5 @@ use bevy_ecs::prelude::*; -use bevy_hierarchy::despawn_with_children_recursive; -use bevy_hierarchy::BuildChildren; -use bevy_hierarchy::ChildBuild; -use criterion::Criterion; +use criterion::{BatchSize, Criterion}; use glam::*; #[derive(Component)] @@ -16,22 +13,30 @@ pub fn world_despawn_recursive(criterion: &mut Criterion) { group.measurement_time(core::time::Duration::from_secs(4)); for entity_count in (0..5).map(|i| 10_u32.pow(i)) { - let mut world = World::default(); - for _ in 0..entity_count { - world - .spawn((A(Mat4::default()), B(Vec4::default()))) - .with_children(|parent| { - parent.spawn((A(Mat4::default()), B(Vec4::default()))); - }); - } - - let ents = world.iter_entities().map(|e| e.id()).collect::>(); group.bench_function(format!("{}_entities", entity_count), |bencher| { - bencher.iter(|| { - ents.iter().for_each(|e| { - despawn_with_children_recursive(&mut world, *e, true); - }); - }); + bencher.iter_batched_ref( + || { + let mut world = World::default(); + let parent_ents = (0..entity_count) + .map(|_| { + world + .spawn((A(Mat4::default()), B(Vec4::default()))) + .with_children(|parent| { + parent.spawn((A(Mat4::default()), B(Vec4::default()))); + }) + .id() + }) + .collect::>(); + + (world, parent_ents) + }, + |(world, parent_ents)| { + parent_ents.iter().for_each(|e| { + world.despawn(*e); + }); + }, + BatchSize::SmallInput, + ); }); } diff --git a/benches/benches/bevy_ecs/world/entity_hash.rs b/benches/benches/bevy_ecs/world/entity_hash.rs index d4ba9b659820f..7e8dfb4a21f2f 100644 --- a/benches/benches/bevy_ecs/world/entity_hash.rs +++ b/benches/benches/bevy_ecs/world/entity_hash.rs @@ -11,16 +11,16 @@ fn make_entity(rng: &mut impl Rng, size: usize) -> Entity { // * For ids, half are in [0, size), half are unboundedly larger. // * For generations, half are in [1, 3), half are unboundedly larger. - let x: f64 = rng.gen(); + let x: f64 = rng.r#gen(); let id = -(1.0 - x).log2() * (size as f64); - let x: f64 = rng.gen(); - let gen = 1.0 + -(1.0 - x).log2() * 2.0; + let x: f64 = rng.r#gen(); + let generation = 1.0 + -(1.0 - x).log2() * 2.0; // this is not reliable, but we're internal so a hack is ok - let bits = ((gen as u64) << 32) | (id as u64); + let bits = ((generation as u64) << 32) | (id as u64); let e = Entity::from_bits(bits); assert_eq!(e.index(), id as u32); - assert_eq!(e.generation(), gen as u32); + assert_eq!(e.generation(), generation as u32); e } diff --git a/benches/benches/bevy_ecs/world/world_get.rs b/benches/benches/bevy_ecs/world/world_get.rs index 190402fbadb27..283b984186150 100644 --- a/benches/benches/bevy_ecs/world/world_get.rs +++ b/benches/benches/bevy_ecs/world/world_get.rs @@ -1,11 +1,13 @@ +use core::hint::black_box; + use bevy_ecs::{ - bundle::Bundle, + bundle::{Bundle, NoBundleEffect}, component::Component, entity::Entity, system::{Query, SystemState}, world::World, }; -use criterion::{black_box, Criterion}; +use criterion::Criterion; use rand::{prelude::SliceRandom, SeedableRng}; use rand_chacha::ChaCha8Rng; @@ -34,7 +36,7 @@ fn setup(entity_count: u32) -> World { black_box(world) } -fn setup_wide(entity_count: u32) -> World { +fn setup_wide + Default>(entity_count: u32) -> World { let mut world = World::default(); world.spawn_batch((0..entity_count).map(|_| T::default())); black_box(world) diff --git a/benches/benches/bevy_math/bezier.rs b/benches/benches/bevy_math/bezier.rs index 404ab08a63eb2..a95cb4a821ed6 100644 --- a/benches/benches/bevy_math/bezier.rs +++ b/benches/benches/bevy_math/bezier.rs @@ -1,75 +1,86 @@ -use criterion::{black_box, criterion_group, Criterion}; +use benches::bench; +use bevy_math::{prelude::*, VectorSpace}; +use core::hint::black_box; +use criterion::{ + criterion_group, measurement::Measurement, BatchSize, BenchmarkGroup, BenchmarkId, Criterion, +}; -use bevy_math::prelude::*; +criterion_group!(benches, segment_ease, curve_position, curve_iter_positions); -fn easing(c: &mut Criterion) { - let cubic_bezier = CubicSegment::new_bezier(vec2(0.25, 0.1), vec2(0.25, 1.0)); - c.bench_function("easing_1000", |b| { - b.iter(|| { - (0..1000).map(|i| i as f32 / 1000.0).for_each(|t| { - black_box(cubic_bezier.ease(black_box(t))); - }); - }); +fn segment_ease(c: &mut Criterion) { + let segment = black_box(CubicSegment::new_bezier_easing( + vec2(0.25, 0.1), + vec2(0.25, 1.0), + )); + + c.bench_function(bench!("segment_ease"), |b| { + let mut t = 0; + + b.iter_batched( + || { + // Increment `t` by 1, but use modulo to constrain it to `0..=1000`. + t = (t + 1) % 1001; + + // Return time as a decimal between 0 and 1, inclusive. + t as f32 / 1000.0 + }, + |t| segment.ease(t), + BatchSize::SmallInput, + ); }); } -fn cubic_2d(c: &mut Criterion) { - let bezier = CubicBezier::new([[ +fn curve_position(c: &mut Criterion) { + /// A helper function that benchmarks calling [`CubicCurve::position()`] over a generic [`VectorSpace`]. + fn bench_curve( + group: &mut BenchmarkGroup, + name: &str, + curve: CubicCurve

, + ) { + group.bench_with_input(BenchmarkId::from_parameter(name), &curve, |b, curve| { + b.iter(|| curve.position(black_box(0.5))); + }); + } + + let mut group = c.benchmark_group(bench!("curve_position")); + + let bezier_2 = CubicBezier::new([[ vec2(0.0, 0.0), vec2(0.0, 1.0), vec2(1.0, 0.0), vec2(1.0, 1.0), ]]) .to_curve() - .expect("Unable to build a curve from this data"); - c.bench_function("cubic_position_Vec2", |b| { - b.iter(|| black_box(bezier.position(black_box(0.5)))); - }); -} + .unwrap(); -fn cubic(c: &mut Criterion) { - let bezier = CubicBezier::new([[ - vec3a(0.0, 0.0, 0.0), - vec3a(0.0, 1.0, 0.0), - vec3a(1.0, 0.0, 0.0), - vec3a(1.0, 1.0, 1.0), - ]]) - .to_curve() - .expect("Unable to build a curve from this data"); - c.bench_function("cubic_position_Vec3A", |b| { - b.iter(|| black_box(bezier.position(black_box(0.5)))); - }); -} + bench_curve(&mut group, "vec2", bezier_2); -fn cubic_vec3(c: &mut Criterion) { - let bezier = CubicBezier::new([[ + let bezier_3 = CubicBezier::new([[ vec3(0.0, 0.0, 0.0), vec3(0.0, 1.0, 0.0), vec3(1.0, 0.0, 0.0), vec3(1.0, 1.0, 1.0), ]]) .to_curve() - .expect("Unable to build a curve from this data"); - c.bench_function("cubic_position_Vec3", |b| { - b.iter(|| black_box(bezier.position(black_box(0.5)))); - }); -} + .unwrap(); -fn build_pos_cubic(c: &mut Criterion) { - let bezier = CubicBezier::new([[ + bench_curve(&mut group, "vec3", bezier_3); + + let bezier_3a = CubicBezier::new([[ vec3a(0.0, 0.0, 0.0), vec3a(0.0, 1.0, 0.0), vec3a(1.0, 0.0, 0.0), vec3a(1.0, 1.0, 1.0), ]]) .to_curve() - .expect("Unable to build a curve from this data"); - c.bench_function("build_pos_cubic_100_points", |b| { - b.iter(|| black_box(bezier.iter_positions(black_box(100)).collect::>())); - }); + .unwrap(); + + bench_curve(&mut group, "vec3a", bezier_3a); + + group.finish(); } -fn build_accel_cubic(c: &mut Criterion) { +fn curve_iter_positions(c: &mut Criterion) { let bezier = CubicBezier::new([[ vec3a(0.0, 0.0, 0.0), vec3a(0.0, 1.0, 0.0), @@ -77,18 +88,15 @@ fn build_accel_cubic(c: &mut Criterion) { vec3a(1.0, 1.0, 1.0), ]]) .to_curve() - .expect("Unable to build a curve from this data"); - c.bench_function("build_accel_cubic_100_points", |b| { - b.iter(|| black_box(bezier.iter_positions(black_box(100)).collect::>())); + .unwrap(); + + c.bench_function(bench!("curve_iter_positions"), |b| { + b.iter(|| { + for x in bezier.iter_positions(black_box(100)) { + // Discard `x`, since we just care about `iter_positions()` being consumed, but make + // the compiler believe `x` is being used so it doesn't eliminate the iterator. + black_box(x); + } + }); }); } - -criterion_group!( - benches, - easing, - cubic_2d, - cubic_vec3, - cubic, - build_pos_cubic, - build_accel_cubic, -); diff --git a/benches/benches/bevy_picking/ray_mesh_intersection.rs b/benches/benches/bevy_picking/ray_mesh_intersection.rs index 1d019d43ee37f..ee81f1ac7fd87 100644 --- a/benches/benches/bevy_picking/ray_mesh_intersection.rs +++ b/benches/benches/bevy_picking/ray_mesh_intersection.rs @@ -1,31 +1,53 @@ +use core::hint::black_box; +use std::time::Duration; + +use benches::bench; use bevy_math::{Dir3, Mat4, Ray3d, Vec3}; -use bevy_picking::mesh_picking::ray_cast; -use criterion::{black_box, criterion_group, Criterion}; +use bevy_picking::mesh_picking::ray_cast::{self, Backfaces}; +use criterion::{criterion_group, AxisScale, BenchmarkId, Criterion, PlotConfiguration}; -fn ptoxznorm(p: u32, size: u32) -> (f32, f32) { - let ij = (p / (size), p % (size)); - (ij.0 as f32 / size as f32, ij.1 as f32 / size as f32) -} +criterion_group!(benches, bench); +/// A mesh that can be passed to [`ray_cast::ray_mesh_intersection()`]. struct SimpleMesh { positions: Vec<[f32; 3]>, normals: Vec<[f32; 3]>, indices: Vec, } -fn mesh_creation(vertices_per_side: u32) -> SimpleMesh { +/// Selects a point within a normal square. +/// +/// `p` is an index within `0..vertices_per_side.pow(2)`. The returned value is a coordinate where +/// both `x` and `z` are within `0..1`. +fn p_to_xz_norm(p: u32, vertices_per_side: u32) -> (f32, f32) { + let x = (p / vertices_per_side) as f32; + let z = (p % vertices_per_side) as f32; + + let vertices_per_side = vertices_per_side as f32; + + // Scale `x` and `z` to be between 0 and 1. + (x / vertices_per_side, z / vertices_per_side) +} + +fn create_mesh(vertices_per_side: u32) -> SimpleMesh { let mut positions = Vec::new(); let mut normals = Vec::new(); + let mut indices = Vec::new(); + for p in 0..vertices_per_side.pow(2) { - let xz = ptoxznorm(p, vertices_per_side); - positions.push([xz.0 - 0.5, 0.0, xz.1 - 0.5]); + let (x, z) = p_to_xz_norm(p, vertices_per_side); + + // Push a new vertice to the mesh. We translate all vertices so the final square is + // centered at (0, 0), instead of (0.5, 0.5). + positions.push([x - 0.5, 0.0, z - 0.5]); + + // All vertices have the same normal. normals.push([0.0, 1.0, 0.0]); - } - let mut indices = vec![]; - for p in 0..vertices_per_side.pow(2) { - if p % (vertices_per_side) != vertices_per_side - 1 - && p / (vertices_per_side) != vertices_per_side - 1 + // Extend the indices for for all vertices except for the final row and column, since + // indices are "between" points. + if p % vertices_per_side != vertices_per_side - 1 + && p / vertices_per_side != vertices_per_side - 1 { indices.extend_from_slice(&[p, p + 1, p + vertices_per_side]); indices.extend_from_slice(&[p + vertices_per_side, p + 1, p + vertices_per_side + 1]); @@ -39,81 +61,110 @@ fn mesh_creation(vertices_per_side: u32) -> SimpleMesh { } } -fn ray_mesh_intersection(c: &mut Criterion) { - let mut group = c.benchmark_group("ray_mesh_intersection"); - group.warm_up_time(std::time::Duration::from_millis(500)); - - for vertices_per_side in [10_u32, 100, 1000] { - group.bench_function(format!("{}_vertices", vertices_per_side.pow(2)), |b| { - let ray = Ray3d::new(Vec3::new(0.0, 1.0, 0.0), Dir3::NEG_Y); - let mesh_to_world = Mat4::IDENTITY; - let mesh = mesh_creation(vertices_per_side); - - b.iter(|| { - black_box(ray_cast::ray_mesh_intersection( - ray, - &mesh_to_world, - &mesh.positions, - Some(&mesh.normals), - Some(&mesh.indices), - ray_cast::Backfaces::Cull, - )); - }); - }); - } +/// An enum that represents the configuration for all variations of the ray mesh intersection +/// benchmarks. +enum Benchmarks { + /// The ray intersects the mesh, and culling is enabled. + CullHit, + + /// The ray intersects the mesh, and culling is disabled. + NoCullHit, + + /// The ray does not intersect the mesh, and culling is enabled. + CullMiss, } -fn ray_mesh_intersection_no_cull(c: &mut Criterion) { - let mut group = c.benchmark_group("ray_mesh_intersection_no_cull"); - group.warm_up_time(std::time::Duration::from_millis(500)); - - for vertices_per_side in [10_u32, 100, 1000] { - group.bench_function(format!("{}_vertices", vertices_per_side.pow(2)), |b| { - let ray = Ray3d::new(Vec3::new(0.0, 1.0, 0.0), Dir3::NEG_Y); - let mesh_to_world = Mat4::IDENTITY; - let mesh = mesh_creation(vertices_per_side); - - b.iter(|| { - black_box(ray_cast::ray_mesh_intersection( - ray, - &mesh_to_world, - &mesh.positions, - Some(&mesh.normals), - Some(&mesh.indices), - ray_cast::Backfaces::Include, - )); - }); - }); +impl Benchmarks { + const WARM_UP_TIME: Duration = Duration::from_millis(500); + const VERTICES_PER_SIDE: [u32; 3] = [10, 100, 1000]; + + /// Returns an iterator over every variant in this enum. + fn iter() -> impl Iterator { + [Self::CullHit, Self::NoCullHit, Self::CullMiss].into_iter() } -} -fn ray_mesh_intersection_no_intersection(c: &mut Criterion) { - let mut group = c.benchmark_group("ray_mesh_intersection_no_intersection"); - group.warm_up_time(std::time::Duration::from_millis(500)); - - for vertices_per_side in [10_u32, 100, 1000] { - group.bench_function(format!("{}_vertices", (vertices_per_side).pow(2)), |b| { - let ray = Ray3d::new(Vec3::new(0.0, 1.0, 0.0), Dir3::X); - let mesh_to_world = Mat4::IDENTITY; - let mesh = mesh_creation(vertices_per_side); - - b.iter(|| { - black_box(ray_cast::ray_mesh_intersection( - ray, - &mesh_to_world, - &mesh.positions, - Some(&mesh.normals), - Some(&mesh.indices), - ray_cast::Backfaces::Cull, - )); - }); - }); + /// Returns the benchmark group name. + fn name(&self) -> &'static str { + match *self { + Self::CullHit => bench!("cull_intersect"), + Self::NoCullHit => bench!("no_cull_intersect"), + Self::CullMiss => bench!("cull_no_intersect"), + } + } + + fn ray(&self) -> Ray3d { + Ray3d::new( + Vec3::new(0.0, 1.0, 0.0), + match *self { + Self::CullHit | Self::NoCullHit => Dir3::NEG_Y, + // `NoIntersection` should not hit the mesh, so it goes an orthogonal direction. + Self::CullMiss => Dir3::X, + }, + ) + } + + fn mesh_to_world(&self) -> Mat4 { + Mat4::IDENTITY + } + + fn backface_culling(&self) -> Backfaces { + match *self { + Self::CullHit | Self::CullMiss => Backfaces::Cull, + Self::NoCullHit => Backfaces::Include, + } + } + + /// Returns whether the ray should intersect with the mesh. + #[cfg(test)] + fn should_intersect(&self) -> bool { + match *self { + Self::CullHit | Self::NoCullHit => true, + Self::CullMiss => false, + } } } -criterion_group!( - benches, - ray_mesh_intersection, - ray_mesh_intersection_no_cull, - ray_mesh_intersection_no_intersection -); +/// A benchmark that times [`ray_cast::ray_mesh_intersection()`]. +/// +/// There are multiple different scenarios that are tracked, which are described by the +/// [`Benchmarks`] enum. Each scenario has its own benchmark group, where individual benchmarks +/// track a ray intersecting a square mesh of an increasing amount of vertices. +fn bench(c: &mut Criterion) { + for benchmark in Benchmarks::iter() { + let mut group = c.benchmark_group(benchmark.name()); + + group + .warm_up_time(Benchmarks::WARM_UP_TIME) + // Make the scale logarithmic, to match `VERTICES_PER_SIDE`. + .plot_config(PlotConfiguration::default().summary_scale(AxisScale::Logarithmic)); + + for vertices_per_side in Benchmarks::VERTICES_PER_SIDE { + group.bench_with_input( + BenchmarkId::from_parameter(format!("{}_vertices", vertices_per_side.pow(2))), + &vertices_per_side, + |b, &vertices_per_side| { + let ray = black_box(benchmark.ray()); + let mesh_to_world = black_box(benchmark.mesh_to_world()); + let mesh = black_box(create_mesh(vertices_per_side)); + let backface_culling = black_box(benchmark.backface_culling()); + + b.iter(|| { + let intersected = ray_cast::ray_mesh_intersection( + ray, + &mesh_to_world, + &mesh.positions, + Some(&mesh.normals), + Some(&mesh.indices), + backface_culling, + ); + + #[cfg(test)] + assert_eq!(intersected.is_some(), benchmark.should_intersect()); + + intersected + }); + }, + ); + } + } +} diff --git a/benches/benches/bevy_reflect/function.rs b/benches/benches/bevy_reflect/function.rs index 5398cc3da91e2..ff4c8dca2ffc0 100644 --- a/benches/benches/bevy_reflect/function.rs +++ b/benches/benches/bevy_reflect/function.rs @@ -1,14 +1,25 @@ +use core::hint::black_box; + +use benches::bench; use bevy_reflect::func::{ArgList, IntoFunction, IntoFunctionMut, TypedFunction}; -use criterion::{criterion_group, BatchSize, Criterion}; +use criterion::{criterion_group, BatchSize, BenchmarkId, Criterion}; -criterion_group!(benches, typed, into, call, overload, clone); +criterion_group!( + benches, + typed, + into, + call, + clone, + with_overload, + call_overload, +); fn add(a: i32, b: i32) -> i32 { a + b } fn typed(c: &mut Criterion) { - c.benchmark_group("typed") + c.benchmark_group(bench!("typed")) .bench_function("function", |b| { b.iter(|| add.get_function_info()); }) @@ -25,7 +36,7 @@ fn typed(c: &mut Criterion) { } fn into(c: &mut Criterion) { - c.benchmark_group("into") + c.benchmark_group(bench!("into")) .bench_function("function", |b| { b.iter(|| add.into_function()); }) @@ -36,24 +47,25 @@ fn into(c: &mut Criterion) { }) .bench_function("closure_mut", |b| { let mut _capture = 25; + // `move` is required here because `into_function_mut()` takes ownership of `self`. let closure = move |a: i32| _capture += a; b.iter(|| closure.into_function_mut()); }); } fn call(c: &mut Criterion) { - c.benchmark_group("call") + c.benchmark_group(bench!("call")) .bench_function("trait_object", |b| { b.iter_batched( || Box::new(add) as Box i32>, - |func| func(75, 25), + |func| func(black_box(75), black_box(25)), BatchSize::SmallInput, ); }) .bench_function("function", |b| { let add = add.into_function(); b.iter_batched( - || ArgList::new().push_owned(75_i32).push_owned(25_i32), + || ArgList::new().with_owned(75_i32).with_owned(25_i32), |args| add.call(args), BatchSize::SmallInput, ); @@ -62,7 +74,7 @@ fn call(c: &mut Criterion) { let capture = 25; let add = (|a: i32| a + capture).into_function(); b.iter_batched( - || ArgList::new().push_owned(75_i32), + || ArgList::new().with_owned(75_i32), |args| add.call(args), BatchSize::SmallInput, ); @@ -71,42 +83,49 @@ fn call(c: &mut Criterion) { let mut capture = 25; let mut add = (|a: i32| capture += a).into_function_mut(); b.iter_batched( - || ArgList::new().push_owned(75_i32), + || ArgList::new().with_owned(75_i32), |args| add.call(args), BatchSize::SmallInput, ); }); } -fn overload(c: &mut Criterion) { - fn add>(a: T, b: T) -> T { - a + b - } +fn clone(c: &mut Criterion) { + c.benchmark_group(bench!("clone")) + .bench_function("function", |b| { + let add = add.into_function(); + b.iter(|| add.clone()); + }); +} + +fn simple>(a: T, b: T) -> T { + a + b +} - #[expect(clippy::too_many_arguments)] - fn complex( - _: T0, - _: T1, - _: T2, - _: T3, - _: T4, - _: T5, - _: T6, - _: T7, - _: T8, - _: T9, - ) { - } +fn complex( + _: T0, + _: T1, + _: T2, + _: T3, + _: T4, + _: T5, + _: T6, + _: T7, + _: T8, + _: T9, +) { +} - c.benchmark_group("with_overload") - .bench_function("01_simple_overload", |b| { +fn with_overload(c: &mut Criterion) { + c.benchmark_group(bench!("with_overload")) + .bench_function(BenchmarkId::new("simple_overload", 1), |b| { b.iter_batched( - || add::.into_function(), - |func| func.with_overload(add::), + || simple::.into_function(), + |func| func.with_overload(simple::), BatchSize::SmallInput, ); }) - .bench_function("01_complex_overload", |b| { + .bench_function(BenchmarkId::new("complex_overload", 1), |b| { b.iter_batched( || complex::.into_function(), |func| { @@ -115,18 +134,18 @@ fn overload(c: &mut Criterion) { BatchSize::SmallInput, ); }) - .bench_function("03_simple_overload", |b| { + .bench_function(BenchmarkId::new("simple_overload", 3), |b| { b.iter_batched( - || add::.into_function(), + || simple::.into_function(), |func| { - func.with_overload(add::) - .with_overload(add::) - .with_overload(add::) + func.with_overload(simple::) + .with_overload(simple::) + .with_overload(simple::) }, BatchSize::SmallInput, ); }) - .bench_function("03_complex_overload", |b| { + .bench_function(BenchmarkId::new("complex_overload", 3), |b| { b.iter_batched( || complex::.into_function(), |func| { @@ -137,24 +156,24 @@ fn overload(c: &mut Criterion) { BatchSize::SmallInput, ); }) - .bench_function("10_simple_overload", |b| { + .bench_function(BenchmarkId::new("simple_overload", 10), |b| { b.iter_batched( - || add::.into_function(), + || simple::.into_function(), |func| { - func.with_overload(add::) - .with_overload(add::) - .with_overload(add::) - .with_overload(add::) - .with_overload(add::) - .with_overload(add::) - .with_overload(add::) - .with_overload(add::) - .with_overload(add::) + func.with_overload(simple::) + .with_overload(simple::) + .with_overload(simple::) + .with_overload(simple::) + .with_overload(simple::) + .with_overload(simple::) + .with_overload(simple::) + .with_overload(simple::) + .with_overload(simple::) }, BatchSize::SmallInput, ); }) - .bench_function("10_complex_overload", |b| { + .bench_function(BenchmarkId::new("complex_overload", 10), |b| { b.iter_batched( || complex::.into_function(), |func| { @@ -171,41 +190,41 @@ fn overload(c: &mut Criterion) { BatchSize::SmallInput, ); }) - .bench_function("01_nested_simple_overload", |b| { + .bench_function(BenchmarkId::new("nested_simple_overload", 1), |b| { b.iter_batched( - || add::.into_function(), - |func| func.with_overload(add::), + || simple::.into_function(), + |func| func.with_overload(simple::), BatchSize::SmallInput, ); }) - .bench_function("03_nested_simple_overload", |b| { + .bench_function(BenchmarkId::new("nested_simple_overload", 3), |b| { b.iter_batched( - || add::.into_function(), + || simple::.into_function(), |func| { func.with_overload( - add:: - .into_function() - .with_overload(add::.into_function().with_overload(add::)), + simple::.into_function().with_overload( + simple::.into_function().with_overload(simple::), + ), ) }, BatchSize::SmallInput, ); }) - .bench_function("10_nested_simple_overload", |b| { + .bench_function(BenchmarkId::new("nested_simple_overload", 10), |b| { b.iter_batched( - || add::.into_function(), + || simple::.into_function(), |func| { func.with_overload( - add::.into_function().with_overload( - add::.into_function().with_overload( - add::.into_function().with_overload( - add::.into_function().with_overload( - add::.into_function().with_overload( - add::.into_function().with_overload( - add::.into_function().with_overload( - add:: + simple::.into_function().with_overload( + simple::.into_function().with_overload( + simple::.into_function().with_overload( + simple::.into_function().with_overload( + simple::.into_function().with_overload( + simple::.into_function().with_overload( + simple::.into_function().with_overload( + simple:: .into_function() - .with_overload(add::), + .with_overload(simple::), ), ), ), @@ -218,21 +237,23 @@ fn overload(c: &mut Criterion) { BatchSize::SmallInput, ); }); +} - c.benchmark_group("call_overload") - .bench_function("01_simple_overload", |b| { +fn call_overload(c: &mut Criterion) { + c.benchmark_group(bench!("call_overload")) + .bench_function(BenchmarkId::new("simple_overload", 1), |b| { b.iter_batched( || { ( - add::.into_function().with_overload(add::), - ArgList::new().push_owned(75_i8).push_owned(25_i8), + simple::.into_function().with_overload(simple::), + ArgList::new().with_owned(75_i8).with_owned(25_i8), ) }, |(func, args)| func.call(args), BatchSize::SmallInput, ); }) - .bench_function("01_complex_overload", |b| { + .bench_function(BenchmarkId::new("complex_overload", 1), |b| { b.iter_batched( || { ( @@ -242,39 +263,39 @@ fn overload(c: &mut Criterion) { complex::, ), ArgList::new() - .push_owned(1_i8) - .push_owned(2_i16) - .push_owned(3_i32) - .push_owned(4_i64) - .push_owned(5_i128) - .push_owned(6_u8) - .push_owned(7_u16) - .push_owned(8_u32) - .push_owned(9_u64) - .push_owned(10_u128), + .with_owned(1_i8) + .with_owned(2_i16) + .with_owned(3_i32) + .with_owned(4_i64) + .with_owned(5_i128) + .with_owned(6_u8) + .with_owned(7_u16) + .with_owned(8_u32) + .with_owned(9_u64) + .with_owned(10_u128), ) }, |(func, args)| func.call(args), BatchSize::SmallInput, ); }) - .bench_function("03_simple_overload", |b| { + .bench_function(BenchmarkId::new("simple_overload", 3), |b| { b.iter_batched( || { ( - add:: + simple:: .into_function() - .with_overload(add::) - .with_overload(add::) - .with_overload(add::), - ArgList::new().push_owned(75_i32).push_owned(25_i32), + .with_overload(simple::) + .with_overload(simple::) + .with_overload(simple::), + ArgList::new().with_owned(75_i32).with_owned(25_i32), ) }, |(func, args)| func.call(args), BatchSize::SmallInput, ); }) - .bench_function("03_complex_overload", |b| { + .bench_function(BenchmarkId::new("complex_overload", 3), |b| { b.iter_batched( || { ( @@ -290,45 +311,45 @@ fn overload(c: &mut Criterion) { complex::, ), ArgList::new() - .push_owned(1_i32) - .push_owned(2_i64) - .push_owned(3_i128) - .push_owned(4_u8) - .push_owned(5_u16) - .push_owned(6_u32) - .push_owned(7_u64) - .push_owned(8_u128) - .push_owned(9_i8) - .push_owned(10_i16), + .with_owned(1_i32) + .with_owned(2_i64) + .with_owned(3_i128) + .with_owned(4_u8) + .with_owned(5_u16) + .with_owned(6_u32) + .with_owned(7_u64) + .with_owned(8_u128) + .with_owned(9_i8) + .with_owned(10_i16), ) }, |(func, args)| func.call(args), BatchSize::SmallInput, ); }) - .bench_function("10_simple_overload", |b| { + .bench_function(BenchmarkId::new("simple_overload", 10), |b| { b.iter_batched( || { ( - add:: + simple:: .into_function() - .with_overload(add::) - .with_overload(add::) - .with_overload(add::) - .with_overload(add::) - .with_overload(add::) - .with_overload(add::) - .with_overload(add::) - .with_overload(add::) - .with_overload(add::), - ArgList::new().push_owned(75_u8).push_owned(25_u8), + .with_overload(simple::) + .with_overload(simple::) + .with_overload(simple::) + .with_overload(simple::) + .with_overload(simple::) + .with_overload(simple::) + .with_overload(simple::) + .with_overload(simple::) + .with_overload(simple::), + ArgList::new().with_owned(75_u8).with_owned(25_u8), ) }, |(func, args)| func.call(args), BatchSize::SmallInput, ); }) - .bench_function("10_complex_overload", |b| { + .bench_function(BenchmarkId::new("complex_overload", 10), |b| { b.iter_batched( || { ( @@ -362,16 +383,16 @@ fn overload(c: &mut Criterion) { complex::, ), ArgList::new() - .push_owned(1_u8) - .push_owned(2_u16) - .push_owned(3_u32) - .push_owned(4_u64) - .push_owned(5_u128) - .push_owned(6_i8) - .push_owned(7_i16) - .push_owned(8_i32) - .push_owned(9_i64) - .push_owned(10_i128), + .with_owned(1_u8) + .with_owned(2_u16) + .with_owned(3_u32) + .with_owned(4_u64) + .with_owned(5_u128) + .with_owned(6_i8) + .with_owned(7_i16) + .with_owned(8_i32) + .with_owned(9_i64) + .with_owned(10_i128), ) }, |(func, args)| func.call(args), @@ -379,10 +400,3 @@ fn overload(c: &mut Criterion) { ); }); } - -fn clone(c: &mut Criterion) { - c.benchmark_group("clone").bench_function("function", |b| { - let add = add.into_function(); - b.iter(|| add.clone()); - }); -} diff --git a/benches/benches/bevy_reflect/list.rs b/benches/benches/bevy_reflect/list.rs index d9c92dd03ef06..fcbe59accdbc7 100644 --- a/benches/benches/bevy_reflect/list.rs +++ b/benches/benches/bevy_reflect/list.rs @@ -1,24 +1,43 @@ -use core::{iter, time::Duration}; +use core::{hint::black_box, iter, time::Duration}; +use benches::bench; use bevy_reflect::{DynamicList, List}; use criterion::{ - black_box, criterion_group, measurement::Measurement, BatchSize, BenchmarkGroup, BenchmarkId, - Criterion, Throughput, + criterion_group, measurement::Measurement, AxisScale, BatchSize, BenchmarkGroup, BenchmarkId, + Criterion, PlotConfiguration, Throughput, }; criterion_group!( benches, concrete_list_apply, - concrete_list_clone_dynamic, + concrete_list_to_dynamic_list, dynamic_list_apply, dynamic_list_push ); +// Use a shorter warm-up time (from 3 to 0.5 seconds) and measurement time (from 5 to 4) because we +// have so many combinations (>50) to benchmark. const WARM_UP_TIME: Duration = Duration::from_millis(500); const MEASUREMENT_TIME: Duration = Duration::from_secs(4); -// log10 scaling -const SIZES: [usize; 5] = [100_usize, 316, 1000, 3162, 10000]; +/// An array of list sizes used in benchmarks. +/// +/// This scales logarithmically. +const SIZES: [usize; 5] = [100, 316, 1000, 3162, 10000]; + +/// Creates a [`BenchmarkGroup`] with common configuration shared by all benchmarks within this +/// module. +fn create_group<'a, M: Measurement>(c: &'a mut Criterion, name: &str) -> BenchmarkGroup<'a, M> { + let mut group = c.benchmark_group(name); + + group + .warm_up_time(WARM_UP_TIME) + .measurement_time(MEASUREMENT_TIME) + // Make the plots logarithmic, matching `SIZES`' scale. + .plot_config(PlotConfiguration::default().summary_scale(AxisScale::Logarithmic)); + + group +} fn list_apply( group: &mut BenchmarkGroup, @@ -53,33 +72,29 @@ fn list_apply( } fn concrete_list_apply(criterion: &mut Criterion) { - let mut group = criterion.benchmark_group("concrete_list_apply"); - group.warm_up_time(WARM_UP_TIME); - group.measurement_time(MEASUREMENT_TIME); + let mut group = create_group(criterion, bench!("concrete_list_apply")); let empty_base = |_: usize| Vec::::new; - let full_base = |size: usize| move || iter::repeat(0).take(size).collect::>(); - let patch = |size: usize| iter::repeat(1).take(size).collect::>(); + let full_base = |size: usize| move || iter::repeat_n(0, size).collect::>(); + let patch = |size: usize| iter::repeat_n(1, size).collect::>(); list_apply(&mut group, "empty_base_concrete_patch", empty_base, patch); list_apply(&mut group, "empty_base_dynamic_patch", empty_base, |size| { - patch(size).clone_dynamic() + patch(size).to_dynamic_list() }); list_apply(&mut group, "same_len_concrete_patch", full_base, patch); list_apply(&mut group, "same_len_dynamic_patch", full_base, |size| { - patch(size).clone_dynamic() + patch(size).to_dynamic_list() }); group.finish(); } -fn concrete_list_clone_dynamic(criterion: &mut Criterion) { - let mut group = criterion.benchmark_group("concrete_list_clone_dynamic"); - group.warm_up_time(WARM_UP_TIME); - group.measurement_time(MEASUREMENT_TIME); +fn concrete_list_to_dynamic_list(criterion: &mut Criterion) { + let mut group = create_group(criterion, bench!("concrete_list_to_dynamic_list")); for size in SIZES { group.throughput(Throughput::Elements(size as u64)); @@ -88,9 +103,9 @@ fn concrete_list_clone_dynamic(criterion: &mut Criterion) { BenchmarkId::from_parameter(size), &size, |bencher, &size| { - let v = iter::repeat(0).take(size).collect::>(); + let v = iter::repeat_n(0, size).collect::>(); - bencher.iter(|| black_box(&v).clone_dynamic()); + bencher.iter(|| black_box(&v).to_dynamic_list()); }, ); } @@ -99,9 +114,7 @@ fn concrete_list_clone_dynamic(criterion: &mut Criterion) { } fn dynamic_list_push(criterion: &mut Criterion) { - let mut group = criterion.benchmark_group("dynamic_list_push"); - group.warm_up_time(WARM_UP_TIME); - group.measurement_time(MEASUREMENT_TIME); + let mut group = create_group(criterion, bench!("dynamic_list_push")); for size in SIZES { group.throughput(Throughput::Elements(size as u64)); @@ -110,11 +123,11 @@ fn dynamic_list_push(criterion: &mut Criterion) { BenchmarkId::from_parameter(size), &size, |bencher, &size| { - let src = iter::repeat(()).take(size).collect::>(); + let src = iter::repeat_n((), size).collect::>(); let dst = DynamicList::default(); bencher.iter_batched( - || (src.clone(), dst.clone_dynamic()), + || (src.clone(), dst.to_dynamic_list()), |(src, mut dst)| { for item in src { dst.push(item); @@ -130,24 +143,22 @@ fn dynamic_list_push(criterion: &mut Criterion) { } fn dynamic_list_apply(criterion: &mut Criterion) { - let mut group = criterion.benchmark_group("dynamic_list_apply"); - group.warm_up_time(WARM_UP_TIME); - group.measurement_time(MEASUREMENT_TIME); + let mut group = create_group(criterion, bench!("dynamic_list_apply")); - let empty_base = |_: usize| || Vec::::new().clone_dynamic(); - let full_base = |size: usize| move || iter::repeat(0).take(size).collect::>(); - let patch = |size: usize| iter::repeat(1).take(size).collect::>(); + let empty_base = |_: usize| || Vec::::new().to_dynamic_list(); + let full_base = |size: usize| move || iter::repeat_n(0, size).collect::>(); + let patch = |size: usize| iter::repeat_n(1, size).collect::>(); list_apply(&mut group, "empty_base_concrete_patch", empty_base, patch); list_apply(&mut group, "empty_base_dynamic_patch", empty_base, |size| { - patch(size).clone_dynamic() + patch(size).to_dynamic_list() }); list_apply(&mut group, "same_len_concrete_patch", full_base, patch); list_apply(&mut group, "same_len_dynamic_patch", full_base, |size| { - patch(size).clone_dynamic() + patch(size).to_dynamic_list() }); group.finish(); diff --git a/benches/benches/bevy_reflect/main.rs b/benches/benches/bevy_reflect/main.rs index d347baccd0fa3..3785652295318 100644 --- a/benches/benches/bevy_reflect/main.rs +++ b/benches/benches/bevy_reflect/main.rs @@ -1,5 +1,3 @@ -#![expect(clippy::type_complexity)] - use criterion::criterion_main; mod function; diff --git a/benches/benches/bevy_reflect/map.rs b/benches/benches/bevy_reflect/map.rs index fc9da0aa08dcd..1eab01a587459 100644 --- a/benches/benches/bevy_reflect/map.rs +++ b/benches/benches/bevy_reflect/map.rs @@ -1,10 +1,11 @@ -use core::{fmt::Write, iter, time::Duration}; +use core::{fmt::Write, hint::black_box, iter, time::Duration}; +use benches::bench; +use bevy_platform::collections::HashMap; use bevy_reflect::{DynamicMap, Map}; -use bevy_utils::HashMap; use criterion::{ - black_box, criterion_group, measurement::Measurement, BatchSize, BenchmarkGroup, BenchmarkId, - Criterion, Throughput, + criterion_group, measurement::Measurement, AxisScale, BatchSize, BenchmarkGroup, BenchmarkId, + Criterion, PlotConfiguration, Throughput, }; criterion_group!( @@ -15,10 +16,30 @@ criterion_group!( dynamic_map_insert ); +// Use a shorter warm-up time (from 3 to 0.5 seconds) and measurement time (from 5 to 4) because we +// have so many combinations (>50) to benchmark. const WARM_UP_TIME: Duration = Duration::from_millis(500); const MEASUREMENT_TIME: Duration = Duration::from_secs(4); + +/// An array of list sizes used in benchmarks. +/// +/// This scales logarithmically. const SIZES: [usize; 5] = [100, 316, 1000, 3162, 10000]; +/// Creates a [`BenchmarkGroup`] with common configuration shared by all benchmarks within this +/// module. +fn create_group<'a, M: Measurement>(c: &'a mut Criterion, name: &str) -> BenchmarkGroup<'a, M> { + let mut group = c.benchmark_group(name); + + group + .warm_up_time(WARM_UP_TIME) + .measurement_time(MEASUREMENT_TIME) + // Make the plots logarithmic, matching `SIZES`' scale. + .plot_config(PlotConfiguration::default().summary_scale(AxisScale::Logarithmic)); + + group +} + /// Generic benchmark for applying one `Map` to another. /// /// `f_base` is a function which takes an input size and produces a generator @@ -55,9 +76,7 @@ fn map_apply( } fn concrete_map_apply(criterion: &mut Criterion) { - let mut group = criterion.benchmark_group("concrete_map_apply"); - group.warm_up_time(WARM_UP_TIME); - group.measurement_time(MEASUREMENT_TIME); + let mut group = create_group(criterion, bench!("concrete_map_apply")); let empty_base = |_: usize| HashMap::::default; @@ -89,7 +108,7 @@ fn concrete_map_apply(criterion: &mut Criterion) { ); map_apply(&mut group, "empty_base_dynamic_patch", empty_base, |size| { - key_range_patch(size).clone_dynamic() + key_range_patch(size).to_dynamic_map() }); map_apply( @@ -103,7 +122,7 @@ fn concrete_map_apply(criterion: &mut Criterion) { &mut group, "same_keys_dynamic_patch", key_range_base, - |size| key_range_patch(size).clone_dynamic(), + |size| key_range_patch(size).to_dynamic_map(), ); map_apply( @@ -117,7 +136,7 @@ fn concrete_map_apply(criterion: &mut Criterion) { &mut group, "disjoint_keys_dynamic_patch", key_range_base, - |size| disjoint_patch(size).clone_dynamic(), + |size| disjoint_patch(size).to_dynamic_map(), ); } @@ -126,14 +145,12 @@ fn u64_to_n_byte_key(k: u64, n: usize) -> String { write!(&mut key, "{}", k).unwrap(); // Pad key to n bytes. - key.extend(iter::repeat('\0').take(n - key.len())); + key.extend(iter::repeat_n('\0', n - key.len())); key } fn dynamic_map_apply(criterion: &mut Criterion) { - let mut group = criterion.benchmark_group("dynamic_map_apply"); - group.warm_up_time(WARM_UP_TIME); - group.measurement_time(MEASUREMENT_TIME); + let mut group = create_group(criterion, bench!("dynamic_map_apply")); let empty_base = |_: usize| DynamicMap::default; @@ -142,7 +159,7 @@ fn dynamic_map_apply(criterion: &mut Criterion) { (0..size as u64) .zip(iter::repeat(0)) .collect::>() - .clone_dynamic() + .to_dynamic_map() } }; @@ -166,7 +183,7 @@ fn dynamic_map_apply(criterion: &mut Criterion) { ); map_apply(&mut group, "empty_base_dynamic_patch", empty_base, |size| { - key_range_patch(size).clone_dynamic() + key_range_patch(size).to_dynamic_map() }); map_apply( @@ -180,7 +197,7 @@ fn dynamic_map_apply(criterion: &mut Criterion) { &mut group, "same_keys_dynamic_patch", key_range_base, - |size| key_range_patch(size).clone_dynamic(), + |size| key_range_patch(size).to_dynamic_map(), ); map_apply( @@ -194,14 +211,12 @@ fn dynamic_map_apply(criterion: &mut Criterion) { &mut group, "disjoint_keys_dynamic_patch", key_range_base, - |size| disjoint_patch(size).clone_dynamic(), + |size| disjoint_patch(size).to_dynamic_map(), ); } fn dynamic_map_get(criterion: &mut Criterion) { - let mut group = criterion.benchmark_group("dynamic_map_get"); - group.warm_up_time(WARM_UP_TIME); - group.measurement_time(MEASUREMENT_TIME); + let mut group = create_group(criterion, bench!("dynamic_map_get")); for size in SIZES { group.throughput(Throughput::Elements(size as u64)); @@ -250,9 +265,7 @@ fn dynamic_map_get(criterion: &mut Criterion) { } fn dynamic_map_insert(criterion: &mut Criterion) { - let mut group = criterion.benchmark_group("dynamic_map_insert"); - group.warm_up_time(WARM_UP_TIME); - group.measurement_time(MEASUREMENT_TIME); + let mut group = create_group(criterion, bench!("dynamic_map_insert")); for size in SIZES { group.throughput(Throughput::Elements(size as u64)); diff --git a/benches/benches/bevy_reflect/path.rs b/benches/benches/bevy_reflect/path.rs index 2cca245239e89..c0d8bfe0da732 100644 --- a/benches/benches/bevy_reflect/path.rs +++ b/benches/benches/bevy_reflect/path.rs @@ -1,7 +1,8 @@ -use core::{fmt::Write, str, time::Duration}; +use core::{fmt::Write, hint::black_box, str, time::Duration}; +use benches::bench; use bevy_reflect::ParsedPath; -use criterion::{black_box, criterion_group, BatchSize, BenchmarkId, Criterion, Throughput}; +use criterion::{criterion_group, BatchSize, BenchmarkId, Criterion, Throughput}; use rand::{distributions::Uniform, Rng, SeedableRng}; use rand_chacha::ChaCha8Rng; @@ -11,7 +12,7 @@ const WARM_UP_TIME: Duration = Duration::from_millis(500); const MEASUREMENT_TIME: Duration = Duration::from_secs(2); const SAMPLE_SIZE: usize = 500; const NOISE_THRESHOLD: f64 = 0.03; -const SIZES: [usize; 6] = [100, 3160, 1000, 3_162, 10_000, 24_000]; +const SIZES: [usize; 6] = [100, 316, 1_000, 3_162, 10_000, 24_000]; fn deterministic_rand() -> ChaCha8Rng { ChaCha8Rng::seed_from_u64(42) @@ -66,23 +67,32 @@ fn mk_paths(size: usize) -> impl FnMut() -> String { } fn parse_reflect_path(criterion: &mut Criterion) { - let mut group = criterion.benchmark_group("parse_reflect_path"); + let mut group = criterion.benchmark_group(bench!("parse_reflect_path")); + group.warm_up_time(WARM_UP_TIME); group.measurement_time(MEASUREMENT_TIME); group.sample_size(SAMPLE_SIZE); group.noise_threshold(NOISE_THRESHOLD); - let group = &mut group; for size in SIZES { group.throughput(Throughput::Elements(size as u64)); + group.bench_with_input( - BenchmarkId::new("parse_reflect_path", size), + BenchmarkId::from_parameter(size), &size, |bencher, &size| { let mk_paths = mk_paths(size); bencher.iter_batched( mk_paths, - |path| assert!(ParsedPath::parse(black_box(&path)).is_ok()), + |path| { + let parsed_path = black_box(ParsedPath::parse(black_box(&path))); + + // When `cargo test --benches` is run, each benchmark is run once. This + // verifies that we are benchmarking a successful parse without it + // affecting the recorded time. + #[cfg(test)] + assert!(parsed_path.is_ok()); + }, BatchSize::SmallInput, ); }, diff --git a/benches/benches/bevy_reflect/struct.rs b/benches/benches/bevy_reflect/struct.rs index dfd324e7053e6..7750213b6dd76 100644 --- a/benches/benches/bevy_reflect/struct.rs +++ b/benches/benches/bevy_reflect/struct.rs @@ -1,15 +1,19 @@ -use core::time::Duration; +use core::{hint::black_box, time::Duration}; +use benches::bench; use bevy_reflect::{DynamicStruct, GetField, PartialReflect, Reflect, Struct}; -use criterion::{black_box, criterion_group, BatchSize, BenchmarkId, Criterion, Throughput}; +use criterion::{ + criterion_group, measurement::Measurement, AxisScale, BatchSize, BenchmarkGroup, BenchmarkId, + Criterion, PlotConfiguration, Throughput, +}; criterion_group!( benches, concrete_struct_apply, concrete_struct_field, concrete_struct_type_info, - concrete_struct_clone, - dynamic_struct_clone, + concrete_struct_to_dynamic_struct, + dynamic_struct_to_dynamic_struct, dynamic_struct_apply, dynamic_struct_get_field, dynamic_struct_insert, @@ -19,10 +23,22 @@ const WARM_UP_TIME: Duration = Duration::from_millis(500); const MEASUREMENT_TIME: Duration = Duration::from_secs(4); const SIZES: [usize; 4] = [16, 32, 64, 128]; +/// Creates a [`BenchmarkGroup`] with common configuration shared by all benchmarks within this +/// module. +fn create_group<'a, M: Measurement>(c: &'a mut Criterion, name: &str) -> BenchmarkGroup<'a, M> { + let mut group = c.benchmark_group(name); + + group + .warm_up_time(WARM_UP_TIME) + .measurement_time(MEASUREMENT_TIME) + // Make the plots logarithmic, matching `SIZES`' scale. + .plot_config(PlotConfiguration::default().summary_scale(AxisScale::Logarithmic)); + + group +} + fn concrete_struct_field(criterion: &mut Criterion) { - let mut group = criterion.benchmark_group("concrete_struct_field"); - group.warm_up_time(WARM_UP_TIME); - group.measurement_time(MEASUREMENT_TIME); + let mut group = create_group(criterion, bench!("concrete_struct_field")); let structs: [Box; 4] = [ Box::new(Struct16::default()), @@ -44,7 +60,7 @@ fn concrete_struct_field(criterion: &mut Criterion) { bencher.iter(|| { for name in &field_names { - s.field(black_box(name)); + black_box(s.field(black_box(name))); } }); }, @@ -53,9 +69,7 @@ fn concrete_struct_field(criterion: &mut Criterion) { } fn concrete_struct_apply(criterion: &mut Criterion) { - let mut group = criterion.benchmark_group("concrete_struct_apply"); - group.warm_up_time(WARM_UP_TIME); - group.measurement_time(MEASUREMENT_TIME); + let mut group = create_group(criterion, bench!("concrete_struct_apply")); // Use functions that produce trait objects of varying concrete types as the // input to the benchmark. @@ -99,7 +113,7 @@ fn concrete_struct_apply(criterion: &mut Criterion) { bencher.iter_batched( || { let (obj, _) = input(); - let patch = obj.clone_dynamic(); + let patch = obj.to_dynamic_struct(); (obj, patch) }, |(mut obj, patch)| obj.apply(black_box(&patch)), @@ -111,9 +125,7 @@ fn concrete_struct_apply(criterion: &mut Criterion) { } fn concrete_struct_type_info(criterion: &mut Criterion) { - let mut group = criterion.benchmark_group("concrete_struct_type_info"); - group.warm_up_time(WARM_UP_TIME); - group.measurement_time(MEASUREMENT_TIME); + let mut group = create_group(criterion, bench!("concrete_struct_type_info")); let structs: [(Box, Box); 5] = [ ( @@ -145,23 +157,21 @@ fn concrete_struct_type_info(criterion: &mut Criterion) { BenchmarkId::new("NonGeneric", field_count), &standard, |bencher, s| { - bencher.iter(|| black_box(s.get_represented_type_info())); + bencher.iter(|| s.get_represented_type_info()); }, ); group.bench_with_input( BenchmarkId::new("Generic", field_count), &generic, |bencher, s| { - bencher.iter(|| black_box(s.get_represented_type_info())); + bencher.iter(|| s.get_represented_type_info()); }, ); } } -fn concrete_struct_clone(criterion: &mut Criterion) { - let mut group = criterion.benchmark_group("concrete_struct_clone"); - group.warm_up_time(WARM_UP_TIME); - group.measurement_time(MEASUREMENT_TIME); +fn concrete_struct_to_dynamic_struct(criterion: &mut Criterion) { + let mut group = create_group(criterion, bench!("concrete_struct_to_dynamic_struct")); let structs: [(Box, Box); 5] = [ ( @@ -193,30 +203,28 @@ fn concrete_struct_clone(criterion: &mut Criterion) { BenchmarkId::new("NonGeneric", field_count), &standard, |bencher, s| { - bencher.iter(|| black_box(s.clone_dynamic())); + bencher.iter(|| s.to_dynamic_struct()); }, ); group.bench_with_input( BenchmarkId::new("Generic", field_count), &generic, |bencher, s| { - bencher.iter(|| black_box(s.clone_dynamic())); + bencher.iter(|| s.to_dynamic_struct()); }, ); } } -fn dynamic_struct_clone(criterion: &mut Criterion) { - let mut group = criterion.benchmark_group("dynamic_struct_clone"); - group.warm_up_time(WARM_UP_TIME); - group.measurement_time(MEASUREMENT_TIME); +fn dynamic_struct_to_dynamic_struct(criterion: &mut Criterion) { + let mut group = create_group(criterion, bench!("dynamic_struct_to_dynamic_struct")); let structs: [Box; 5] = [ - Box::new(Struct1::default().clone_dynamic()), - Box::new(Struct16::default().clone_dynamic()), - Box::new(Struct32::default().clone_dynamic()), - Box::new(Struct64::default().clone_dynamic()), - Box::new(Struct128::default().clone_dynamic()), + Box::new(Struct1::default().to_dynamic_struct()), + Box::new(Struct16::default().to_dynamic_struct()), + Box::new(Struct32::default().to_dynamic_struct()), + Box::new(Struct64::default().to_dynamic_struct()), + Box::new(Struct128::default().to_dynamic_struct()), ]; for s in structs { @@ -226,16 +234,14 @@ fn dynamic_struct_clone(criterion: &mut Criterion) { BenchmarkId::from_parameter(field_count), &s, |bencher, s| { - bencher.iter(|| black_box(s.clone_dynamic())); + bencher.iter(|| s.to_dynamic_struct()); }, ); } } fn dynamic_struct_apply(criterion: &mut Criterion) { - let mut group = criterion.benchmark_group("dynamic_struct_apply"); - group.warm_up_time(WARM_UP_TIME); - group.measurement_time(MEASUREMENT_TIME); + let mut group = create_group(criterion, bench!("dynamic_struct_apply")); let patches: &[(fn() -> Box, usize)] = &[ (|| Box::new(Struct16::default()), 16), @@ -259,7 +265,7 @@ fn dynamic_struct_apply(criterion: &mut Criterion) { &patch, |bencher, patch| { bencher.iter_batched( - || (base.clone_dynamic(), patch()), + || (base.to_dynamic_struct(), patch()), |(mut base, patch)| base.apply(black_box(&*patch)), BatchSize::SmallInput, ); @@ -283,7 +289,7 @@ fn dynamic_struct_apply(criterion: &mut Criterion) { } bencher.iter_batched( - || base.clone_dynamic(), + || base.to_dynamic_struct(), |mut base| base.apply(black_box(&patch)), BatchSize::SmallInput, ); @@ -293,9 +299,7 @@ fn dynamic_struct_apply(criterion: &mut Criterion) { } fn dynamic_struct_insert(criterion: &mut Criterion) { - let mut group = criterion.benchmark_group("dynamic_struct_insert"); - group.warm_up_time(WARM_UP_TIME); - group.measurement_time(MEASUREMENT_TIME); + let mut group = create_group(criterion, bench!("dynamic_struct_insert")); for field_count in SIZES { group.throughput(Throughput::Elements(field_count as u64)); @@ -311,7 +315,7 @@ fn dynamic_struct_insert(criterion: &mut Criterion) { let field = format!("field_{}", field_count); bencher.iter_batched( - || s.clone_dynamic(), + || s.to_dynamic_struct(), |mut s| { s.insert(black_box(&field), ()); }, @@ -325,9 +329,7 @@ fn dynamic_struct_insert(criterion: &mut Criterion) { } fn dynamic_struct_get_field(criterion: &mut Criterion) { - let mut group = criterion.benchmark_group("dynamic_struct_get"); - group.warm_up_time(WARM_UP_TIME); - group.measurement_time(MEASUREMENT_TIME); + let mut group = create_group(criterion, bench!("dynamic_struct_get_field")); for field_count in SIZES { group.throughput(Throughput::Elements(field_count as u64)); @@ -342,9 +344,7 @@ fn dynamic_struct_get_field(criterion: &mut Criterion) { } let field = black_box("field_63"); - bencher.iter(|| { - black_box(s.get_field::<()>(field)); - }); + bencher.iter(|| s.get_field::<()>(field)); }, ); } diff --git a/benches/benches/bevy_render/compute_normals.rs b/benches/benches/bevy_render/compute_normals.rs new file mode 100644 index 0000000000000..41bda05de9ca4 --- /dev/null +++ b/benches/benches/bevy_render/compute_normals.rs @@ -0,0 +1,96 @@ +use core::hint::black_box; + +use criterion::{criterion_group, Criterion}; +use rand::random; +use std::time::{Duration, Instant}; + +use bevy_render::{ + mesh::{Indices, Mesh, PrimitiveTopology}, + render_asset::RenderAssetUsages, +}; + +const GRID_SIZE: usize = 256; + +fn compute_normals(c: &mut Criterion) { + let indices = Indices::U32( + (0..GRID_SIZE - 1) + .flat_map(|i| std::iter::repeat(i).zip(0..GRID_SIZE - 1)) + .flat_map(|(i, j)| { + let tl = ((GRID_SIZE * j) + i) as u32; + let tr = tl + 1; + let bl = ((GRID_SIZE * (j + 1)) + i) as u32; + let br = bl + 1; + [tl, bl, tr, tr, bl, br] + }) + .collect(), + ); + + let new_mesh = || { + let positions = (0..GRID_SIZE) + .flat_map(|i| std::iter::repeat(i).zip(0..GRID_SIZE)) + .map(|(i, j)| [i as f32, j as f32, random::()]) + .collect::>(); + Mesh::new( + PrimitiveTopology::TriangleList, + RenderAssetUsages::MAIN_WORLD, + ) + .with_inserted_attribute(Mesh::ATTRIBUTE_POSITION, positions) + .with_inserted_indices(indices.clone()) + }; + + c.bench_function("smooth_normals", |b| { + b.iter_custom(|iters| { + let mut total = Duration::default(); + for _ in 0..iters { + let mut mesh = new_mesh(); + black_box(mesh.attribute(Mesh::ATTRIBUTE_NORMAL)); + let start = Instant::now(); + mesh.compute_smooth_normals(); + let end = Instant::now(); + black_box(mesh.attribute(Mesh::ATTRIBUTE_NORMAL)); + total += end.duration_since(start); + } + total + }); + }); + + c.bench_function("face_weighted_normals", |b| { + b.iter_custom(|iters| { + let mut total = Duration::default(); + for _ in 0..iters { + let mut mesh = new_mesh(); + black_box(mesh.attribute(Mesh::ATTRIBUTE_NORMAL)); + let start = Instant::now(); + mesh.compute_smooth_normals(); + let end = Instant::now(); + black_box(mesh.attribute(Mesh::ATTRIBUTE_NORMAL)); + total += end.duration_since(start); + } + total + }); + }); + + let new_mesh = || { + new_mesh() + .with_duplicated_vertices() + .with_computed_flat_normals() + }; + + c.bench_function("flat_normals", |b| { + b.iter_custom(|iters| { + let mut total = Duration::default(); + for _ in 0..iters { + let mut mesh = new_mesh(); + black_box(mesh.attribute(Mesh::ATTRIBUTE_NORMAL)); + let start = Instant::now(); + mesh.compute_flat_normals(); + let end = Instant::now(); + black_box(mesh.attribute(Mesh::ATTRIBUTE_NORMAL)); + total += end.duration_since(start); + } + total + }); + }); +} + +criterion_group!(benches, compute_normals); diff --git a/benches/benches/bevy_render/main.rs b/benches/benches/bevy_render/main.rs index 7a369bc905705..e335670222641 100644 --- a/benches/benches/bevy_render/main.rs +++ b/benches/benches/bevy_render/main.rs @@ -1,6 +1,11 @@ use criterion::criterion_main; +mod compute_normals; mod render_layers; mod torus; -criterion_main!(render_layers::benches, torus::benches); +criterion_main!( + render_layers::benches, + compute_normals::benches, + torus::benches +); diff --git a/benches/benches/bevy_render/render_layers.rs b/benches/benches/bevy_render/render_layers.rs index 42dd5356b55ed..d460a7bc96c48 100644 --- a/benches/benches/bevy_render/render_layers.rs +++ b/benches/benches/bevy_render/render_layers.rs @@ -1,4 +1,6 @@ -use criterion::{black_box, criterion_group, Criterion}; +use core::hint::black_box; + +use criterion::{criterion_group, Criterion}; use bevy_render::view::RenderLayers; diff --git a/benches/benches/bevy_render/torus.rs b/benches/benches/bevy_render/torus.rs index a5ef753bc8ccb..dcadd09180f9d 100644 --- a/benches/benches/bevy_render/torus.rs +++ b/benches/benches/bevy_render/torus.rs @@ -1,4 +1,6 @@ -use criterion::{black_box, criterion_group, Criterion}; +use core::hint::black_box; + +use criterion::{criterion_group, Criterion}; use bevy_render::mesh::TorusMeshBuilder; diff --git a/benches/benches/bevy_tasks/iter.rs b/benches/benches/bevy_tasks/iter.rs index 4f8f75c8ed0e8..7fe00ecb794db 100644 --- a/benches/benches/bevy_tasks/iter.rs +++ b/benches/benches/bevy_tasks/iter.rs @@ -1,5 +1,7 @@ +use core::hint::black_box; + use bevy_tasks::{ParallelIterator, TaskPoolBuilder}; -use criterion::{black_box, criterion_group, BenchmarkId, Criterion}; +use criterion::{criterion_group, BenchmarkId, Criterion}; struct ParChunks<'a, T>(core::slice::Chunks<'a, T>); impl<'a, T> ParallelIterator> for ParChunks<'a, T> @@ -61,7 +63,7 @@ fn bench_for_each(c: &mut Criterion) { b.iter(|| { v.iter_mut().for_each(|x| { busy_work(10000); - *x *= *x; + *x = x.wrapping_mul(*x); }); }); }); @@ -77,7 +79,7 @@ fn bench_for_each(c: &mut Criterion) { b.iter(|| { ParChunksMut(v.chunks_mut(100)).for_each(&pool, |x| { busy_work(10000); - *x *= *x; + *x = x.wrapping_mul(*x); }); }); }, diff --git a/benches/src/lib.rs b/benches/src/lib.rs new file mode 100644 index 0000000000000..699ab13e86461 --- /dev/null +++ b/benches/src/lib.rs @@ -0,0 +1,44 @@ +/// Automatically generates the qualified name of a benchmark given its function name and module +/// path. +/// +/// This macro takes a single string literal as input and returns a [`&'static str`](str). Its +/// result is determined at compile-time. If you need to create variations of a benchmark name +/// based on its input, use this in combination with [`BenchmarkId`](criterion::BenchmarkId). +/// +/// # When to use this +/// +/// Use this macro to name benchmarks that are not within a group and benchmark groups themselves. +/// You'll most commonly use this macro with: +/// +/// - [`Criterion::bench_function()`](criterion::Criterion::bench_function) +/// - [`Criterion::bench_with_input()`](criterion::Criterion::bench_with_input) +/// - [`Criterion::benchmark_group()`](criterion::Criterion::benchmark_group) +/// +/// You do not want to use this macro with +/// [`BenchmarkGroup::bench_function()`](criterion::BenchmarkGroup::bench_function) or +/// [`BenchmarkGroup::bench_with_input()`](criterion::BenchmarkGroup::bench_with_input), because +/// the group they are in already has the qualified path in it. +/// +/// # Example +/// +/// ``` +/// mod ecs { +/// mod query { +/// use criterion::Criterion; +/// use benches::bench; +/// +/// fn iter(c: &mut Criterion) { +/// // Benchmark name ends in `ecs::query::iter`. +/// c.bench_function(bench!("iter"), |b| { +/// // ... +/// }); +/// } +/// } +/// } +/// ``` +#[macro_export] +macro_rules! bench { + ($name:literal) => { + concat!(module_path!(), "::", $name) + }; +} diff --git a/clippy.toml b/clippy.toml index d1d234817a913..2c98e8ed02c30 100644 --- a/clippy.toml +++ b/clippy.toml @@ -41,4 +41,8 @@ disallowed-methods = [ { path = "f32::asinh", reason = "use bevy_math::ops::asinh instead for libm determinism" }, { path = "f32::acosh", reason = "use bevy_math::ops::acosh instead for libm determinism" }, { path = "f32::atanh", reason = "use bevy_math::ops::atanh instead for libm determinism" }, + { path = "criterion::black_box", reason = "use core::hint::black_box instead" }, ] + +# Require `bevy_ecs::children!` to use `[]` braces, instead of `()` or `{}`. +standard-macro-braces = [{ name = "children", brace = "[" }] diff --git a/crates/bevy_a11y/Cargo.toml b/crates/bevy_a11y/Cargo.toml index 73464d568ed4b..759cf3e7875c4 100644 --- a/crates/bevy_a11y/Cargo.toml +++ b/crates/bevy_a11y/Cargo.toml @@ -1,22 +1,55 @@ [package] name = "bevy_a11y" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Provides accessibility support for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" license = "MIT OR Apache-2.0" keywords = ["bevy", "accessibility", "a11y"] +[features] +default = ["std", "bevy_reflect", "bevy_ecs/async_executor"] + +# Functionality + +## Adds runtime reflection support using `bevy_reflect`. +bevy_reflect = [ + "dep:bevy_reflect", + "bevy_app/bevy_reflect", + "bevy_ecs/bevy_reflect", +] + +## Adds serialization support through `serde`. +serialize = ["dep:serde", "bevy_ecs/serialize", "accesskit/serde"] + +# Platform Compatibility + +## Allows access to the `std` crate. Enabling this feature will prevent compilation +## on `no_std` targets, but provides access to certain additional features on +## supported platforms. +std = ["bevy_app/std", "bevy_ecs/std", "bevy_reflect/std"] + +## `critical-section` provides the building blocks for synchronization primitives +## on all platforms, including `no_std`. +critical-section = [ + "bevy_app/critical-section", + "bevy_ecs/critical-section", + "bevy_reflect?/critical-section", +] + [dependencies] # bevy -bevy_app = { path = "../bevy_app", version = "0.15.0-dev" } -bevy_derive = { path = "../bevy_derive", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev" } -bevy_input_focus = { path = "../bevy_input_focus", version = "0.15.0-dev" } +bevy_app = { path = "../bevy_app", version = "0.16.0-dev", default-features = false } +bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev", default-features = false } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, optional = true } -accesskit = "0.17" +# other +accesskit = { version = "0.18", default-features = false } +serde = { version = "1", default-features = false, features = [ + "alloc", +], optional = true } [lints] workspace = true diff --git a/crates/bevy_a11y/LICENSE-APACHE b/crates/bevy_a11y/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_a11y/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_a11y/LICENSE-MIT b/crates/bevy_a11y/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_a11y/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_a11y/src/lib.rs b/crates/bevy_a11y/src/lib.rs index 453ac7b7f88cd..910ec3ca3533f 100644 --- a/crates/bevy_a11y/src/lib.rs +++ b/crates/bevy_a11y/src/lib.rs @@ -4,6 +4,7 @@ html_logo_url = "https://bevyengine.org/assets/icon.png", html_favicon_url = "https://bevyengine.org/assets/icon.png" )] +#![no_std] //! Accessibility for Bevy //! @@ -13,6 +14,9 @@ //! //! Make sure to use the same version of `accesskit` as Bevy. +#[cfg(feature = "std")] +extern crate std; + extern crate alloc; use alloc::sync::Arc; @@ -23,12 +27,25 @@ use bevy_app::Plugin; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ prelude::{Component, Event}, + resource::Resource, schedule::SystemSet, - system::Resource, }; +#[cfg(feature = "bevy_reflect")] +use { + bevy_ecs::reflect::ReflectResource, bevy_reflect::std_traits::ReflectDefault, + bevy_reflect::Reflect, +}; + +#[cfg(feature = "serialize")] +use serde::{Deserialize, Serialize}; + +#[cfg(all(feature = "bevy_reflect", feature = "serialize"))] +use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; + /// Wrapper struct for [`accesskit::ActionRequest`]. Required to allow it to be used as an `Event`. #[derive(Event, Deref, DerefMut)] +#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))] pub struct ActionRequest(pub accesskit::ActionRequest); /// Resource that tracks whether an assistive technology has requested @@ -37,6 +54,11 @@ pub struct ActionRequest(pub accesskit::ActionRequest); /// Useful if a third-party plugin needs to conditionally integrate with /// `AccessKit` #[derive(Resource, Default, Clone, Debug, Deref, DerefMut)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Default, Clone, Resource) +)] pub struct AccessibilityRequested(Arc); impl AccessibilityRequested { @@ -59,6 +81,16 @@ impl AccessibilityRequested { /// accessibility updates instead. Without this, the external library and ECS /// will generate conflicting updates. #[derive(Resource, Clone, Debug, Deref, DerefMut)] +#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Resource, Clone, Default) +)] +#[cfg_attr( + all(feature = "bevy_reflect", feature = "serialize"), + reflect(Serialize, Deserialize) +)] pub struct ManageAccessibilityUpdates(bool); impl Default for ManageAccessibilityUpdates { @@ -88,6 +120,7 @@ impl ManageAccessibilityUpdates { /// If the entity doesn't have a parent, or if the immediate parent doesn't have /// an `AccessibilityNode`, its node will be an immediate child of the primary window. #[derive(Component, Clone, Deref, DerefMut)] +#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))] pub struct AccessibilityNode(pub Node); impl From for AccessibilityNode { @@ -98,6 +131,12 @@ impl From for AccessibilityNode { /// Set enum for the systems relating to accessibility #[derive(Debug, Hash, PartialEq, Eq, Clone, SystemSet)] +#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect))] +#[cfg_attr( + all(feature = "bevy_reflect", feature = "serialize"), + reflect(Serialize, Deserialize, Clone) +)] pub enum AccessibilitySystem { /// Update the accessibility tree Update, diff --git a/crates/bevy_animation/Cargo.toml b/crates/bevy_animation/Cargo.toml index b3312aa0ce2d7..11e819806c506 100644 --- a/crates/bevy_animation/Cargo.toml +++ b/crates/bevy_animation/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_animation" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Provides animation functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -10,35 +10,43 @@ keywords = ["bevy"] [dependencies] # bevy -bevy_app = { path = "../bevy_app", version = "0.15.0-dev" } -bevy_asset = { path = "../bevy_asset", version = "0.15.0-dev" } -bevy_color = { path = "../bevy_color", version = "0.15.0-dev" } -bevy_derive = { path = "../bevy_derive", version = "0.15.0-dev" } -bevy_log = { path = "../bevy_log", version = "0.15.0-dev" } -bevy_math = { path = "../bevy_math", version = "0.15.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev", features = [ - "bevy", +bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } +bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } +bevy_color = { path = "../bevy_color", version = "0.16.0-dev" } +bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } +bevy_log = { path = "../bevy_log", version = "0.16.0-dev" } +bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } +bevy_mesh = { path = "../bevy_mesh", version = "0.16.0-dev" } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", features = [ "petgraph", ] } -bevy_render = { path = "../bevy_render", version = "0.15.0-dev" } -bevy_time = { path = "../bevy_time", version = "0.15.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" } -bevy_transform = { path = "../bevy_transform", version = "0.15.0-dev" } -bevy_hierarchy = { path = "../bevy_hierarchy", version = "0.15.0-dev" } +bevy_render = { path = "../bevy_render", version = "0.16.0-dev" } +bevy_time = { path = "../bevy_time", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } +bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "std", + "serialize", +] } # other -petgraph = { version = "0.6", features = ["serde-1"] } +petgraph = { version = "0.7", features = ["serde-1"] } ron = "0.8" serde = "1" blake3 = { version = "1.0" } -downcast-rs = "1.2.0" +downcast-rs = { version = "2", default-features = false, features = ["std"] } thiserror = { version = "2", default-features = false } derive_more = { version = "1", default-features = false, features = ["from"] } either = "1.13" thread_local = "1" -uuid = { version = "1.7", features = ["v4"] } +uuid = { version = "1.13.1", features = ["v4"] } smallvec = "1" +tracing = { version = "0.1", default-features = false, features = ["std"] } + +[target.'cfg(target_arch = "wasm32")'.dependencies] +# TODO: Assuming all wasm builds are for the browser. Require `no_std` support to break assumption. +uuid = { version = "1.13.1", default-features = false, features = ["js"] } [lints] workspace = true diff --git a/crates/bevy_animation/LICENSE-APACHE b/crates/bevy_animation/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_animation/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_animation/LICENSE-MIT b/crates/bevy_animation/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_animation/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_animation/src/animatable.rs b/crates/bevy_animation/src/animatable.rs index 6af653d3077b4..a345c5fce4f0a 100644 --- a/crates/bevy_animation/src/animatable.rs +++ b/crates/bevy_animation/src/animatable.rs @@ -18,7 +18,7 @@ pub struct BlendInput { /// An animatable value type. pub trait Animatable: Reflect + Sized + Send + Sync + 'static { - /// Interpolates between `a` and `b` with a interpolation factor of `time`. + /// Interpolates between `a` and `b` with an interpolation factor of `time`. /// /// The `time` parameter here may not be clamped to the range `[0.0, 1.0]`. fn interpolate(a: &Self, b: &Self, time: f32) -> Self; @@ -125,9 +125,8 @@ impl Animatable for bool { #[inline] fn blend(inputs: impl Iterator>) -> Self { inputs - .max_by(|a, b| FloatOrd(a.weight).cmp(&FloatOrd(b.weight))) - .map(|input| input.value) - .unwrap_or(false) + .max_by_key(|x| FloatOrd(x.weight)) + .is_some_and(|input| input.value) } } diff --git a/crates/bevy_animation/src/animation_curves.rs b/crates/bevy_animation/src/animation_curves.rs index 28069c1af4928..45fa393e05965 100644 --- a/crates/bevy_animation/src/animation_curves.rs +++ b/crates/bevy_animation/src/animation_curves.rs @@ -89,50 +89,59 @@ use core::{ marker::PhantomData, }; +use crate::{ + graph::AnimationNodeIndex, + prelude::{Animatable, BlendInput}, + AnimationEntityMut, AnimationEvaluationError, +}; use bevy_ecs::component::{Component, Mutable}; use bevy_math::curve::{ cores::{UnevenCore, UnevenCoreError}, iterable::IterableCurve, Curve, Interval, }; +use bevy_mesh::morph::MorphWeights; +use bevy_platform::hash::Hashed; use bevy_reflect::{FromReflect, Reflect, Reflectable, TypeInfo, Typed}; -use bevy_render::mesh::morph::MorphWeights; - -use crate::{ - graph::AnimationNodeIndex, - prelude::{Animatable, BlendInput}, - AnimationEntityMut, AnimationEvaluationError, -}; -use bevy_utils::Hashed; use downcast_rs::{impl_downcast, Downcast}; -/// A value on a component that Bevy can animate. +/// A trait for exposing a value in an entity so that it can be animated. /// -/// You can implement this trait on a unit struct in order to support animating -/// custom components other than transforms and morph weights. Use that type in -/// conjunction with [`AnimatableCurve`] (and perhaps [`AnimatableKeyframeCurve`] -/// to define the animation itself). -/// For example, in order to animate field of view, you might use: +/// `AnimatableProperty` allows any value contained in an entity to be animated +/// as long as it can be obtained by mutable reference. This makes it more +/// flexible than [`animated_field`]. +/// +/// [`animated_field`]: crate::animated_field +/// +/// Here, `AnimatableProperty` is used to animate a value inside an `Option`, +/// returning an error if the option is `None`. /// /// # use bevy_animation::{prelude::AnimatableProperty, AnimationEntityMut, AnimationEvaluationError, animation_curves::EvaluatorId}; -/// # use bevy_reflect::Reflect; +/// # use bevy_ecs::component::Component; /// # use std::any::TypeId; -/// # use bevy_render::camera::PerspectiveProjection; -/// #[derive(Reflect)] -/// struct FieldOfViewProperty; +/// #[derive(Component)] +/// struct ExampleComponent { +/// power_level: Option +/// } +/// +/// #[derive(Clone)] +/// struct PowerLevelProperty; /// -/// impl AnimatableProperty for FieldOfViewProperty { +/// impl AnimatableProperty for PowerLevelProperty { /// type Property = f32; -/// fn get_mut<'a>(&self, entity: &'a mut AnimationEntityMut) -> Result<&'a mut Self::Property, AnimationEvaluationError> { -/// let component = entity -/// .get_mut::() -/// .ok_or( -/// AnimationEvaluationError::ComponentNotPresent( -/// TypeId::of::() -/// ) -/// )? +/// fn get_mut<'a>( +/// &self, +/// entity: &'a mut AnimationEntityMut +/// ) -> Result<&'a mut Self::Property, AnimationEvaluationError> { +/// let component = entity +/// .get_mut::() +/// .ok_or(AnimationEvaluationError::ComponentNotPresent( +/// TypeId::of::() +/// ))? /// .into_inner(); -/// Ok(&mut component.fov) +/// component.power_level.as_mut().ok_or(AnimationEvaluationError::PropertyNotPresent( +/// TypeId::of::>() +/// )) /// } /// /// fn evaluator_id(&self) -> EvaluatorId { @@ -140,53 +149,44 @@ use downcast_rs::{impl_downcast, Downcast}; /// } /// } /// -/// You can then create an [`AnimationClip`] to animate this property like so: /// -/// # use bevy_animation::{AnimationClip, AnimationTargetId, VariableCurve, AnimationEntityMut, AnimationEvaluationError, animation_curves::EvaluatorId}; +/// You can then create an [`AnimatableCurve`] to animate this property like so: +/// +/// # use bevy_animation::{VariableCurve, AnimationEntityMut, AnimationEvaluationError, animation_curves::EvaluatorId}; /// # use bevy_animation::prelude::{AnimatableProperty, AnimatableKeyframeCurve, AnimatableCurve}; -/// # use bevy_ecs::name::Name; -/// # use bevy_reflect::Reflect; -/// # use bevy_render::camera::PerspectiveProjection; +/// # use bevy_ecs::{name::Name, component::Component}; /// # use std::any::TypeId; -/// # let animation_target_id = AnimationTargetId::from(&Name::new("Test")); -/// # #[derive(Reflect, Clone)] -/// # struct FieldOfViewProperty; -/// # impl AnimatableProperty for FieldOfViewProperty { -/// # type Property = f32; -/// # fn get_mut<'a>(&self, entity: &'a mut AnimationEntityMut) -> Result<&'a mut Self::Property, AnimationEvaluationError> { -/// # let component = entity -/// # .get_mut::() -/// # .ok_or( -/// # AnimationEvaluationError::ComponentNotPresent( -/// # TypeId::of::() -/// # ) -/// # )? -/// # .into_inner(); -/// # Ok(&mut component.fov) -/// # } -/// # fn evaluator_id(&self) -> EvaluatorId { -/// # EvaluatorId::Type(TypeId::of::()) -/// # } +/// # #[derive(Component)] +/// # struct ExampleComponent { power_level: Option } +/// # #[derive(Clone)] +/// # struct PowerLevelProperty; +/// # impl AnimatableProperty for PowerLevelProperty { +/// # type Property = f32; +/// # fn get_mut<'a>( +/// # &self, +/// # entity: &'a mut AnimationEntityMut +/// # ) -> Result<&'a mut Self::Property, AnimationEvaluationError> { +/// # let component = entity +/// # .get_mut::() +/// # .ok_or(AnimationEvaluationError::ComponentNotPresent( +/// # TypeId::of::() +/// # ))? +/// # .into_inner(); +/// # component.power_level.as_mut().ok_or(AnimationEvaluationError::PropertyNotPresent( +/// # TypeId::of::>() +/// # )) +/// # } +/// # fn evaluator_id(&self) -> EvaluatorId { +/// # EvaluatorId::Type(TypeId::of::()) +/// # } /// # } -/// let mut animation_clip = AnimationClip::default(); -/// animation_clip.add_curve_to_target( -/// animation_target_id, -/// AnimatableCurve::new( -/// FieldOfViewProperty, -/// AnimatableKeyframeCurve::new([ -/// (0.0, core::f32::consts::PI / 4.0), -/// (1.0, core::f32::consts::PI / 3.0), -/// ]).expect("Failed to create font size curve") -/// ) +/// AnimatableCurve::new( +/// PowerLevelProperty, +/// AnimatableKeyframeCurve::new([ +/// (0.0, 0.0), +/// (1.0, 9001.0), +/// ]).expect("Failed to create power level curve") /// ); -/// -/// Here, the use of [`AnimatableKeyframeCurve`] creates a curve out of the given keyframe time-value -/// pairs, using the [`Animatable`] implementation of `f32` to interpolate between them. The -/// invocation of [`AnimatableCurve::new`] with `FieldOfViewProperty` indicates that the `f32` -/// output from that curve is to be used to animate the font size of a `PerspectiveProjection` component (as -/// configured above). -/// -/// [`AnimationClip`]: crate::AnimationClip pub trait AnimatableProperty: Send + Sync + 'static { /// The animated property type. type Property: Animatable; @@ -974,6 +974,7 @@ where /// /// ``` /// # use bevy_animation::{animation_curves::AnimatedField, animated_field}; +/// # use bevy_color::Srgba; /// # use bevy_ecs::component::Component; /// # use bevy_math::Vec3; /// # use bevy_reflect::Reflect; @@ -983,10 +984,15 @@ where /// } /// /// let field = animated_field!(Transform::translation); +/// +/// #[derive(Component, Reflect)] +/// struct Color(Srgba); +/// +/// let tuple_field = animated_field!(Color::0); /// ``` #[macro_export] macro_rules! animated_field { - ($component:ident::$field:ident) => { + ($component:ident::$field:tt) => { AnimatedField::new_unchecked(stringify!($field), |component: &mut $component| { &mut component.$field }) diff --git a/crates/bevy_animation/src/gltf_curves.rs b/crates/bevy_animation/src/gltf_curves.rs index d5b2cbb6b984e..688011a32cf71 100644 --- a/crates/bevy_animation/src/gltf_curves.rs +++ b/crates/bevy_animation/src/gltf_curves.rs @@ -111,6 +111,7 @@ impl CubicKeyframeCurve { /// A keyframe-defined curve that uses cubic spline interpolation, special-cased for quaternions /// since it uses `Vec4` internally. #[derive(Debug, Clone, Reflect)] +#[reflect(Clone)] pub struct CubicRotationCurve { // Note: The sample width here should be 3. core: ChunkedUnevenCore, @@ -372,8 +373,9 @@ impl WideCubicKeyframeCurve { /// recommended to use its implementation of the [`IterableCurve`] trait, which allows iterating /// directly over information derived from the curve without allocating. /// -/// [`MorphWeights`]: bevy_render::prelude::MorphWeights +/// [`MorphWeights`]: bevy_mesh::morph::MorphWeights #[derive(Debug, Clone, Reflect)] +#[reflect(Clone)] pub enum WeightsCurve { /// A curve which takes a constant value over its domain. Notably, this is how animations with /// only a single keyframe are interpreted. diff --git a/crates/bevy_animation/src/graph.rs b/crates/bevy_animation/src/graph.rs index e570d25ab15e3..aa6d252fee94f 100644 --- a/crates/bevy_animation/src/graph.rs +++ b/crates/bevy_animation/src/graph.rs @@ -14,10 +14,11 @@ use bevy_ecs::{ component::Component, event::EventReader, reflect::ReflectComponent, - system::{Res, ResMut, Resource}, + resource::Resource, + system::{Res, ResMut}, }; +use bevy_platform::collections::HashMap; use bevy_reflect::{prelude::ReflectDefault, Reflect, ReflectSerialize}; -use bevy_utils::HashMap; use derive_more::derive::From; use petgraph::{ graph::{DiGraph, NodeIndex}, @@ -107,7 +108,7 @@ use crate::{AnimationClip, AnimationTargetId}; /// /// [RFC 51]: https://github.com/bevyengine/rfcs/blob/main/rfcs/51-animation-composition.md #[derive(Asset, Reflect, Clone, Debug, Serialize)] -#[reflect(Serialize, Debug)] +#[reflect(Serialize, Debug, Clone)] #[serde(into = "SerializedAnimationGraph")] pub struct AnimationGraph { /// The `petgraph` data structure that defines the animation graph. @@ -130,7 +131,7 @@ pub struct AnimationGraph { /// A [`Handle`] to the [`AnimationGraph`] to be used by the [`AnimationPlayer`](crate::AnimationPlayer) on the same entity. #[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq, From)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct AnimationGraphHandle(pub Handle); impl From for AssetId { @@ -163,6 +164,7 @@ pub type AnimationNodeIndex = NodeIndex; /// of the graph, contain animation clips to play. Blend and add nodes describe /// how to combine their children to produce a final animation. #[derive(Clone, Reflect, Debug)] +#[reflect(Clone)] pub struct AnimationGraphNode { /// Animation node data specific to the type of node (clip, blend, or add). /// @@ -204,6 +206,7 @@ pub struct AnimationGraphNode { /// In the case of clip nodes, this contains the actual animation clip /// associated with the node. #[derive(Clone, Default, Reflect, Debug)] +#[reflect(Clone)] pub enum AnimationNodeType { /// A *clip node*, which plays an animation clip. /// @@ -883,10 +886,10 @@ impl ThreadedAnimationGraph { self.sorted_edge_ranges.clear(); self.sorted_edge_ranges - .extend(iter::repeat(0..0).take(node_count)); + .extend(iter::repeat_n(0..0, node_count)); self.computed_masks.clear(); - self.computed_masks.extend(iter::repeat(0).take(node_count)); + self.computed_masks.extend(iter::repeat_n(0, node_count)); } /// Recursively constructs the [`ThreadedAnimationGraph`] for the subtree diff --git a/crates/bevy_animation/src/lib.rs b/crates/bevy_animation/src/lib.rs index 04033447912d0..43ea343aa311c 100644 --- a/crates/bevy_animation/src/lib.rs +++ b/crates/bevy_animation/src/lib.rs @@ -32,24 +32,18 @@ use crate::{ }; use bevy_app::{Animation, App, Plugin, PostUpdate}; -use bevy_asset::{Asset, AssetApp, Assets}; -use bevy_ecs::{ - entity::{VisitEntities, VisitEntitiesMut}, - prelude::*, - reflect::{ReflectMapEntities, ReflectVisitEntities, ReflectVisitEntitiesMut}, - world::EntityMutExcept, -}; +use bevy_asset::{Asset, AssetApp, AssetEvents, Assets}; +use bevy_ecs::{prelude::*, world::EntityMutExcept}; use bevy_math::FloatOrd; +use bevy_platform::{collections::HashMap, hash::NoOpHash}; use bevy_reflect::{prelude::ReflectDefault, Reflect, TypePath}; use bevy_time::Time; use bevy_transform::TransformSystem; -use bevy_utils::{ - tracing::{trace, warn}, - HashMap, NoOpHash, PreHashMap, PreHashMapExt, TypeIdMap, -}; +use bevy_utils::{PreHashMap, PreHashMapExt, TypeIdMap}; use petgraph::graph::NodeIndex; use serde::{Deserialize, Serialize}; use thread_local::ThreadLocal; +use tracing::{trace, warn}; use uuid::Uuid; /// The animation prelude. @@ -102,23 +96,26 @@ impl VariableCurve { /// Because animation clips refer to targets by UUID, they can target any /// [`AnimationTarget`] with that ID. #[derive(Asset, Reflect, Clone, Debug, Default)] +#[reflect(Clone, Default)] pub struct AnimationClip { // This field is ignored by reflection because AnimationCurves can contain things that are not reflect-able - #[reflect(ignore)] + #[reflect(ignore, clone)] curves: AnimationCurves, events: AnimationEvents, duration: f32, } #[derive(Reflect, Debug, Clone)] +#[reflect(Clone)] struct TimedAnimationEvent { time: f32, event: AnimationEvent, } #[derive(Reflect, Debug, Clone)] +#[reflect(Clone)] struct AnimationEvent { - #[reflect(ignore)] + #[reflect(ignore, clone)] trigger: AnimationEventFn, } @@ -130,6 +127,7 @@ impl AnimationEvent { #[derive(Reflect, Clone)] #[reflect(opaque)] +#[reflect(Clone, Default, Debug)] struct AnimationEventFn(Arc); impl Default for AnimationEventFn { @@ -145,6 +143,7 @@ impl Debug for AnimationEventFn { } #[derive(Reflect, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone)] +#[reflect(Clone)] enum AnimationEventTarget { Root, Node(AnimationTargetId), @@ -178,6 +177,7 @@ pub type AnimationCurves = HashMap, NoOpHa /// /// [UUID]: https://en.wikipedia.org/wiki/Universally_unique_identifier #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Reflect, Debug, Serialize, Deserialize)] +#[reflect(Clone)] pub struct AnimationTargetId(pub Uuid); impl Hash for AnimationTargetId { @@ -208,16 +208,16 @@ impl Hash for AnimationTargetId { /// Note that each entity can only be animated by one animation player at a /// time. However, you can change [`AnimationTarget`]'s `player` property at /// runtime to change which player is responsible for animating the entity. -#[derive(Clone, Copy, Component, Reflect, VisitEntities, VisitEntitiesMut)] -#[reflect(Component, MapEntities, VisitEntities, VisitEntitiesMut)] +#[derive(Clone, Copy, Component, Reflect)] +#[reflect(Component, Clone)] pub struct AnimationTarget { /// The ID of this animation target. /// /// Typically, this is derived from the path. - #[visit_entities(ignore)] pub id: AnimationTargetId, /// The entity containing the [`AnimationPlayer`]. + #[entities] pub player: Entity, } @@ -431,6 +431,7 @@ impl AnimationClip { /// Repetition behavior of an animation. #[derive(Reflect, Debug, PartialEq, Eq, Copy, Clone, Default)] +#[reflect(Clone, Default)] pub enum RepeatAnimation { /// The animation will finish after running once. #[default] @@ -466,8 +467,9 @@ pub enum AnimationEvaluationError { /// An animation that an [`AnimationPlayer`] is currently either playing or was /// playing, but is presently paused. /// -/// An stopped animation is considered no longer active. +/// A stopped animation is considered no longer active. #[derive(Debug, Clone, Copy, Reflect)] +#[reflect(Clone, Default)] pub struct ActiveAnimation { /// The factor by which the weight from the [`AnimationGraph`] is multiplied. weight: f32, @@ -680,10 +682,9 @@ impl ActiveAnimation { /// Automatically added to any root animations of a scene when it is /// spawned. #[derive(Component, Default, Reflect)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct AnimationPlayer { active_animations: HashMap, - blend_weights: HashMap, } // This is needed since `#[derive(Clone)]` does not generate optimized `clone_from`. @@ -691,13 +692,11 @@ impl Clone for AnimationPlayer { fn clone(&self) -> Self { Self { active_animations: self.active_animations.clone(), - blend_weights: self.blend_weights.clone(), } } fn clone_from(&mut self, source: &Self) { self.active_animations.clone_from(&source.active_animations); - self.blend_weights.clone_from(&source.blend_weights); } } @@ -756,10 +755,10 @@ impl AnimationCurveEvaluators { .component_property_curve_evaluators .get_or_insert_with(component_property, func), EvaluatorId::Type(type_id) => match self.type_id_curve_evaluators.entry(type_id) { - bevy_utils::hashbrown::hash_map::Entry::Occupied(occupied_entry) => { + bevy_platform::collections::hash_map::Entry::Occupied(occupied_entry) => { &mut **occupied_entry.into_mut() } - bevy_utils::hashbrown::hash_map::Entry::Vacant(vacant_entry) => { + bevy_platform::collections::hash_map::Entry::Vacant(vacant_entry) => { &mut **vacant_entry.insert(func()) } }, @@ -938,13 +937,6 @@ impl AnimationPlayer { pub fn animation_mut(&mut self, animation: AnimationNodeIndex) -> Option<&mut ActiveAnimation> { self.active_animations.get_mut(&animation) } - - #[deprecated = "Use `is_playing_animation` instead"] - /// Returns true if the animation is currently playing or paused, or false - /// if the animation is stopped. - pub fn animation_is_playing(&self, animation: AnimationNodeIndex) -> bool { - self.active_animations.contains_key(&animation) - } } /// A system that triggers untargeted animation events for the currently-playing animations. @@ -1057,8 +1049,8 @@ pub fn animate_targets( (player, graph_handle.id()) } else { trace!( - "Either an animation player {:?} or a graph was missing for the target \ - entity {:?} ({:?}); no animations will play this frame", + "Either an animation player {} or a graph was missing for the target \ + entity {} ({:?}); no animations will play this frame", player_id, entity_mut.id(), entity_mut.get::(), @@ -1252,7 +1244,7 @@ impl Plugin for AnimationPlugin { .add_systems( PostUpdate, ( - graph::thread_animation_graphs, + graph::thread_animation_graphs.before(AssetEvents), advance_transitions, advance_animations, // TODO: `animate_targets` can animate anything, so @@ -1262,7 +1254,7 @@ impl Plugin for AnimationPlugin { // `PostUpdate`. For now, we just disable ambiguity testing // for this system. animate_targets - .after(bevy_render::mesh::inherit_weights) + .before(bevy_render::mesh::inherit_weights) .ambiguous_with_all(), trigger_untargeted_animation_events, expire_completed_transitions, @@ -1538,6 +1530,8 @@ impl<'a> Iterator for TriggeredEventsIter<'a> { #[cfg(test)] mod tests { + use bevy_reflect::{DynamicMap, Map}; + use super::*; #[derive(Event, Reflect, Clone)] @@ -1669,4 +1663,13 @@ mod tests { active_animation.update(clip.duration, clip.duration); // 0.3 : 0.0 assert_triggered_events_with(&active_animation, &clip, [0.3, 0.2]); } + + #[test] + fn test_animation_node_index_as_key_of_dynamic_map() { + let mut map = DynamicMap::default(); + map.insert_boxed( + Box::new(AnimationNodeIndex::new(0)), + Box::new(ActiveAnimation::default()), + ); + } } diff --git a/crates/bevy_animation/src/transition.rs b/crates/bevy_animation/src/transition.rs index 679c63bec3ffb..494855970441d 100644 --- a/crates/bevy_animation/src/transition.rs +++ b/crates/bevy_animation/src/transition.rs @@ -10,7 +10,7 @@ use bevy_ecs::{ }; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_time::Time; -use bevy_utils::Duration; +use core::time::Duration; use crate::{graph::AnimationNodeIndex, ActiveAnimation, AnimationPlayer}; @@ -29,7 +29,7 @@ use crate::{graph::AnimationNodeIndex, ActiveAnimation, AnimationPlayer}; /// component to get confused about which animation is the "main" animation, and /// transitions will usually be incorrect as a result. #[derive(Component, Default, Reflect)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct AnimationTransitions { main_animation: Option, transitions: Vec, @@ -52,6 +52,7 @@ impl Clone for AnimationTransitions { /// An animation that is being faded out as part of a transition #[derive(Debug, Clone, Copy, Reflect)] +#[reflect(Clone)] pub struct AnimationTransition { /// The current weight. Starts at 1.0 and goes to 0.0 during the fade-out. current_weight: f32, @@ -117,8 +118,9 @@ pub fn advance_transitions( // is divided between all the other layers, eventually culminating in the // currently-playing animation receiving whatever's left. This results in a // nicely normalized weight. - let mut remaining_weight = 1.0; for (mut animation_transitions, mut player) in query.iter_mut() { + let mut remaining_weight = 1.0; + for transition in &mut animation_transitions.transitions.iter_mut().rev() { // Decrease weight. transition.current_weight = (transition.current_weight diff --git a/crates/bevy_anti_aliasing/Cargo.toml b/crates/bevy_anti_aliasing/Cargo.toml new file mode 100644 index 0000000000000..5a8e48ecb56ef --- /dev/null +++ b/crates/bevy_anti_aliasing/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "bevy_anti_aliasing" +version = "0.16.0-dev" +edition = "2024" +description = "Provides various anti aliasing implementations for Bevy Engine" +homepage = "https://bevyengine.org" +repository = "https://github.com/bevyengine/bevy" +license = "MIT OR Apache-2.0" +keywords = ["bevy"] + +[features] +trace = [] +webgl = [] +webgpu = [] +smaa_luts = ["bevy_render/ktx2", "bevy_image/ktx2", "bevy_image/zstd"] + +[dependencies] +# bevy +bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } +bevy_render = { path = "../bevy_render", version = "0.16.0-dev" } +bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } +bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } +bevy_image = { path = "../bevy_image", version = "0.16.0-dev" } +bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } +bevy_core_pipeline = { path = "../bevy_core_pipeline", version = "0.16.0-dev" } +bevy_diagnostic = { path = "../bevy_diagnostic", version = "0.16.0-dev" } + +# other +tracing = { version = "0.1", default-features = false, features = ["std"] } + +[lints] +workspace = true + +[package.metadata.docs.rs] +rustdoc-args = ["-Zunstable-options", "--generate-link-to-definition"] +all-features = true diff --git a/crates/bevy_anti_aliasing/LICENSE-APACHE b/crates/bevy_anti_aliasing/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_anti_aliasing/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_anti_aliasing/LICENSE-MIT b/crates/bevy_anti_aliasing/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_anti_aliasing/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_anti_aliasing/README.md b/crates/bevy_anti_aliasing/README.md new file mode 100644 index 0000000000000..ba0123c31bc44 --- /dev/null +++ b/crates/bevy_anti_aliasing/README.md @@ -0,0 +1,7 @@ +# Bevy Anti Aliasing + +[![License](https://img.shields.io/badge/license-MIT%2FApache-blue.svg)](https://github.com/bevyengine/bevy#license) +[![Crates.io](https://img.shields.io/crates/v/bevy_core_pipeline.svg)](https://crates.io/crates/bevy_core_pipeline) +[![Downloads](https://img.shields.io/crates/d/bevy_core_pipeline.svg)](https://crates.io/crates/bevy_core_pipeline) +[![Docs](https://docs.rs/bevy_core_pipeline/badge.svg)](https://docs.rs/bevy_core_pipeline/latest/bevy_core_pipeline/) +[![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) diff --git a/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/mod.rs b/crates/bevy_anti_aliasing/src/contrast_adaptive_sharpening/mod.rs similarity index 96% rename from crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/mod.rs rename to crates/bevy_anti_aliasing/src/contrast_adaptive_sharpening/mod.rs index fbc3ecfec3f75..a07b5e223926d 100644 --- a/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/mod.rs +++ b/crates/bevy_anti_aliasing/src/contrast_adaptive_sharpening/mod.rs @@ -1,10 +1,10 @@ -use crate::{ +use bevy_app::prelude::*; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; +use bevy_core_pipeline::{ core_2d::graph::{Core2d, Node2d}, core_3d::graph::{Core3d, Node3d}, fullscreen_vertex_shader::fullscreen_shader_vertex_state, }; -use bevy_app::prelude::*; -use bevy_asset::{load_internal_asset, Handle}; use bevy_ecs::{prelude::*, query::QueryItem}; use bevy_image::BevyDefault as _; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; @@ -36,7 +36,7 @@ pub use node::CasNode; /// /// To use this, add the [`ContrastAdaptiveSharpening`] component to a 2D or 3D camera. #[derive(Component, Reflect, Clone)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct ContrastAdaptiveSharpening { /// Enable or disable sharpening. pub enabled: bool, @@ -54,9 +54,6 @@ pub struct ContrastAdaptiveSharpening { pub denoise: bool, } -#[deprecated(since = "0.15.0", note = "Renamed to `ContrastAdaptiveSharpening`")] -pub type ContrastAdaptiveSharpeningSettings = ContrastAdaptiveSharpening; - impl Default for ContrastAdaptiveSharpening { fn default() -> Self { ContrastAdaptiveSharpening { @@ -68,7 +65,7 @@ impl Default for ContrastAdaptiveSharpening { } #[derive(Component, Default, Reflect, Clone)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct DenoiseCas(bool); /// The uniform struct extracted from [`ContrastAdaptiveSharpening`] attached to a [`Camera`]. @@ -99,7 +96,7 @@ impl ExtractComponent for ContrastAdaptiveSharpening { } const CONTRAST_ADAPTIVE_SHARPENING_SHADER_HANDLE: Handle = - Handle::weak_from_u128(6925381244141981602); + weak_handle!("ef83f0a5-51df-4b51-9ab7-b5fd1ae5a397"); /// Adds Support for Contrast Adaptive Sharpening (CAS). pub struct CasPlugin; diff --git a/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/node.rs b/crates/bevy_anti_aliasing/src/contrast_adaptive_sharpening/node.rs similarity index 100% rename from crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/node.rs rename to crates/bevy_anti_aliasing/src/contrast_adaptive_sharpening/node.rs diff --git a/crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening.wgsl b/crates/bevy_anti_aliasing/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening.wgsl similarity index 100% rename from crates/bevy_core_pipeline/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening.wgsl rename to crates/bevy_anti_aliasing/src/contrast_adaptive_sharpening/robust_contrast_adaptive_sharpening.wgsl diff --git a/crates/bevy_anti_aliasing/src/experimental/mod.rs b/crates/bevy_anti_aliasing/src/experimental/mod.rs new file mode 100644 index 0000000000000..a8dc522c56293 --- /dev/null +++ b/crates/bevy_anti_aliasing/src/experimental/mod.rs @@ -0,0 +1,9 @@ +//! Experimental rendering features. +//! +//! Experimental features are features with known problems, missing features, +//! compatibility issues, low performance, and/or future breaking changes, but +//! are included nonetheless for testing purposes. + +pub mod taa { + pub use crate::taa::{TemporalAntiAliasNode, TemporalAntiAliasPlugin, TemporalAntiAliasing}; +} diff --git a/crates/bevy_core_pipeline/src/fxaa/fxaa.wgsl b/crates/bevy_anti_aliasing/src/fxaa/fxaa.wgsl similarity index 100% rename from crates/bevy_core_pipeline/src/fxaa/fxaa.wgsl rename to crates/bevy_anti_aliasing/src/fxaa/fxaa.wgsl diff --git a/crates/bevy_core_pipeline/src/fxaa/mod.rs b/crates/bevy_anti_aliasing/src/fxaa/mod.rs similarity index 96% rename from crates/bevy_core_pipeline/src/fxaa/mod.rs rename to crates/bevy_anti_aliasing/src/fxaa/mod.rs index 547b59762442f..6d7824cf21095 100644 --- a/crates/bevy_core_pipeline/src/fxaa/mod.rs +++ b/crates/bevy_anti_aliasing/src/fxaa/mod.rs @@ -1,10 +1,10 @@ -use crate::{ +use bevy_app::prelude::*; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; +use bevy_core_pipeline::{ core_2d::graph::{Core2d, Node2d}, core_3d::graph::{Core3d, Node3d}, fullscreen_vertex_shader::fullscreen_shader_vertex_state, }; -use bevy_app::prelude::*; -use bevy_asset::{load_internal_asset, Handle}; use bevy_ecs::prelude::*; use bevy_image::BevyDefault as _; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; @@ -27,7 +27,7 @@ mod node; pub use node::FxaaNode; #[derive(Debug, Reflect, Eq, PartialEq, Hash, Clone, Copy)] -#[reflect(PartialEq, Hash)] +#[reflect(PartialEq, Hash, Clone)] pub enum Sensitivity { Low, Medium, @@ -51,7 +51,7 @@ impl Sensitivity { /// A component for enabling Fast Approximate Anti-Aliasing (FXAA) /// for a [`bevy_render::camera::Camera`]. #[derive(Reflect, Component, Clone, ExtractComponent)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] #[extract_component_filter(With)] #[doc(alias = "FastApproximateAntiAliasing")] pub struct Fxaa { @@ -80,7 +80,7 @@ impl Default for Fxaa { } } -const FXAA_SHADER_HANDLE: Handle = Handle::weak_from_u128(4182761465141723543); +const FXAA_SHADER_HANDLE: Handle = weak_handle!("fc58c0a8-01c0-46e9-94cc-83a794bae7b0"); /// Adds support for Fast Approximate Anti-Aliasing (FXAA) pub struct FxaaPlugin; diff --git a/crates/bevy_core_pipeline/src/fxaa/node.rs b/crates/bevy_anti_aliasing/src/fxaa/node.rs similarity index 100% rename from crates/bevy_core_pipeline/src/fxaa/node.rs rename to crates/bevy_anti_aliasing/src/fxaa/node.rs diff --git a/crates/bevy_anti_aliasing/src/lib.rs b/crates/bevy_anti_aliasing/src/lib.rs new file mode 100644 index 0000000000000..be09a2e5b2665 --- /dev/null +++ b/crates/bevy_anti_aliasing/src/lib.rs @@ -0,0 +1,27 @@ +#![expect(missing_docs, reason = "Not all docs are written yet, see #3492.")] +#![forbid(unsafe_code)] +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc( + html_logo_url = "https://bevyengine.org/assets/icon.png", + html_favicon_url = "https://bevyengine.org/assets/icon.png" +)] + +use bevy_app::Plugin; +use contrast_adaptive_sharpening::CasPlugin; +use fxaa::FxaaPlugin; +use smaa::SmaaPlugin; + +pub mod contrast_adaptive_sharpening; +pub mod experimental; +pub mod fxaa; +pub mod smaa; + +mod taa; + +#[derive(Default)] +pub struct AntiAliasingPlugin; +impl Plugin for AntiAliasingPlugin { + fn build(&self, app: &mut bevy_app::App) { + app.add_plugins((FxaaPlugin, CasPlugin, SmaaPlugin)); + } +} diff --git a/crates/bevy_core_pipeline/src/smaa/SMAAAreaLUT.ktx2 b/crates/bevy_anti_aliasing/src/smaa/SMAAAreaLUT.ktx2 similarity index 100% rename from crates/bevy_core_pipeline/src/smaa/SMAAAreaLUT.ktx2 rename to crates/bevy_anti_aliasing/src/smaa/SMAAAreaLUT.ktx2 diff --git a/crates/bevy_core_pipeline/src/smaa/SMAASearchLUT.ktx2 b/crates/bevy_anti_aliasing/src/smaa/SMAASearchLUT.ktx2 similarity index 100% rename from crates/bevy_core_pipeline/src/smaa/SMAASearchLUT.ktx2 rename to crates/bevy_anti_aliasing/src/smaa/SMAASearchLUT.ktx2 diff --git a/crates/bevy_core_pipeline/src/smaa/mod.rs b/crates/bevy_anti_aliasing/src/smaa/mod.rs similarity index 97% rename from crates/bevy_core_pipeline/src/smaa/mod.rs rename to crates/bevy_anti_aliasing/src/smaa/mod.rs index 7471cdb09ab76..f1e4d28678ebc 100644 --- a/crates/bevy_core_pipeline/src/smaa/mod.rs +++ b/crates/bevy_anti_aliasing/src/smaa/mod.rs @@ -29,24 +29,25 @@ //! * Compatibility with SSAA and MSAA. //! //! [SMAA]: https://www.iryoku.com/smaa/ +use bevy_app::{App, Plugin}; +#[cfg(feature = "smaa_luts")] +use bevy_asset::load_internal_binary_asset; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; #[cfg(not(feature = "smaa_luts"))] -use crate::tonemapping::lut_placeholder; -use crate::{ +use bevy_core_pipeline::tonemapping::lut_placeholder; +use bevy_core_pipeline::{ core_2d::graph::{Core2d, Node2d}, core_3d::graph::{Core3d, Node3d}, }; -use bevy_app::{App, Plugin}; -#[cfg(feature = "smaa_luts")] -use bevy_asset::load_internal_binary_asset; -use bevy_asset::{load_internal_asset, Handle}; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ component::Component, entity::Entity, query::{QueryItem, With}, reflect::ReflectComponent, - schedule::IntoSystemConfigs as _, - system::{lifetimeless::Read, Commands, Query, Res, ResMut, Resource}, + resource::Resource, + schedule::IntoScheduleConfigs as _, + system::{lifetimeless::Read, Commands, Query, Res, ResMut}, world::{FromWorld, World}, }; use bevy_image::{BevyDefault, Image}; @@ -80,11 +81,13 @@ use bevy_render::{ use bevy_utils::prelude::default; /// The handle of the `smaa.wgsl` shader. -const SMAA_SHADER_HANDLE: Handle = Handle::weak_from_u128(12247928498010601081); +const SMAA_SHADER_HANDLE: Handle = weak_handle!("fdd9839f-1ab4-4e0d-88a0-240b67da2ddf"); /// The handle of the area LUT, a KTX2 format texture that SMAA uses internally. -const SMAA_AREA_LUT_TEXTURE_HANDLE: Handle = Handle::weak_from_u128(15283551734567401670); +const SMAA_AREA_LUT_TEXTURE_HANDLE: Handle = + weak_handle!("569c4d67-c7fa-4958-b1af-0836023603c0"); /// The handle of the search LUT, a KTX2 format texture that SMAA uses internally. -const SMAA_SEARCH_LUT_TEXTURE_HANDLE: Handle = Handle::weak_from_u128(3187314362190283210); +const SMAA_SEARCH_LUT_TEXTURE_HANDLE: Handle = + weak_handle!("43b97515-252e-4c8a-b9af-f2fc528a1c27"); /// Adds support for subpixel morphological antialiasing, or SMAA. pub struct SmaaPlugin; @@ -92,7 +95,7 @@ pub struct SmaaPlugin; /// A component for enabling Subpixel Morphological Anti-Aliasing (SMAA) /// for a [`bevy_render::camera::Camera`]. #[derive(Clone, Copy, Default, Component, Reflect, ExtractComponent)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] #[doc(alias = "SubpixelMorphologicalAntiAliasing")] pub struct Smaa { /// A predefined set of SMAA parameters: i.e. a quality level. @@ -101,16 +104,13 @@ pub struct Smaa { pub preset: SmaaPreset, } -#[deprecated(since = "0.15.0", note = "Renamed to `Smaa`")] -pub type SmaaSettings = Smaa; - /// A preset quality level for SMAA. /// /// Higher values are slower but result in a higher-quality image. /// /// The default value is *high*. #[derive(Clone, Copy, Reflect, Default, PartialEq, Eq, Hash)] -#[reflect(Default)] +#[reflect(Default, Clone, PartialEq, Hash)] pub enum SmaaPreset { /// Four search steps; no diagonal or corner detection. Low, @@ -297,8 +297,6 @@ impl Plugin for SmaaPlugin { SMAA_AREA_LUT_TEXTURE_HANDLE, "SMAAAreaLUT.ktx2", |bytes, _: String| Image::from_buffer( - #[cfg(all(debug_assertions, feature = "dds"))] - "SMAAAreaLUT".to_owned(), bytes, bevy_image::ImageType::Format(bevy_image::ImageFormat::Ktx2), bevy_image::CompressedImageFormats::NONE, @@ -315,8 +313,6 @@ impl Plugin for SmaaPlugin { SMAA_SEARCH_LUT_TEXTURE_HANDLE, "SMAASearchLUT.ktx2", |bytes, _: String| Image::from_buffer( - #[cfg(all(debug_assertions, feature = "dds"))] - "SMAASearchLUT".to_owned(), bytes, bevy_image::ImageType::Format(bevy_image::ImageFormat::Ktx2), bevy_image::CompressedImageFormats::NONE, @@ -914,7 +910,6 @@ impl ViewNode for SmaaNode { /// writes to the two-channel RG edges texture. Additionally, it ensures that /// all pixels it didn't touch are stenciled out so that phase 2 won't have to /// examine them. -#[allow(clippy::too_many_arguments)] fn perform_edge_detection( render_context: &mut RenderContext, smaa_pipelines: &SmaaPipelines, @@ -968,7 +963,6 @@ fn perform_edge_detection( /// This runs as part of the [`SmaaNode`]. It reads the edges texture and writes /// to the blend weight texture, using the stencil buffer to avoid processing /// pixels it doesn't need to examine. -#[allow(clippy::too_many_arguments)] fn perform_blending_weight_calculation( render_context: &mut RenderContext, smaa_pipelines: &SmaaPipelines, @@ -1027,7 +1021,6 @@ fn perform_blending_weight_calculation( /// /// This runs as part of the [`SmaaNode`]. It reads from the blend weight /// texture. It's the only phase that writes to the postprocessing destination. -#[allow(clippy::too_many_arguments)] fn perform_neighborhood_blending( render_context: &mut RenderContext, smaa_pipelines: &SmaaPipelines, diff --git a/crates/bevy_core_pipeline/src/smaa/smaa.wgsl b/crates/bevy_anti_aliasing/src/smaa/smaa.wgsl similarity index 99% rename from crates/bevy_core_pipeline/src/smaa/smaa.wgsl rename to crates/bevy_anti_aliasing/src/smaa/smaa.wgsl index 5c95c18c2602f..08723254483c6 100644 --- a/crates/bevy_core_pipeline/src/smaa/smaa.wgsl +++ b/crates/bevy_anti_aliasing/src/smaa/smaa.wgsl @@ -44,7 +44,7 @@ * Here you'll find instructions to get the shader up and running as fast as * possible. * - * IMPORTANTE NOTICE: when updating, remember to update both this file and the + * IMPORTANT NOTICE: when updating, remember to update both this file and the * precomputed textures! They may change from version to version. * * The shader has three passes, chained together as follows: @@ -429,7 +429,7 @@ const SMAA_CORNER_ROUNDING: u32 = 25u; // "SMAA Presets".) /** - * If there is an neighbor edge that has SMAA_LOCAL_CONTRAST_FACTOR times + * If there is a neighbor edge that has SMAA_LOCAL_CONTRAST_FACTOR times * bigger contrast than current edge, current edge will be discarded. * * This allows to eliminate spurious crossing edges, and is based on the fact diff --git a/crates/bevy_core_pipeline/src/taa/mod.rs b/crates/bevy_anti_aliasing/src/taa/mod.rs similarity index 94% rename from crates/bevy_core_pipeline/src/taa/mod.rs rename to crates/bevy_anti_aliasing/src/taa/mod.rs index 559ce4e3a55bc..cf5ac269e23ba 100644 --- a/crates/bevy_core_pipeline/src/taa/mod.rs +++ b/crates/bevy_anti_aliasing/src/taa/mod.rs @@ -1,19 +1,18 @@ -#![expect(deprecated)] - -use crate::{ +use bevy_app::{App, Plugin}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; +use bevy_core_pipeline::{ core_3d::graph::{Core3d, Node3d}, fullscreen_vertex_shader::fullscreen_shader_vertex_state, prelude::Camera3d, prepass::{DepthPrepass, MotionVectorPrepass, ViewPrepassTextures}, }; -use bevy_app::{App, Plugin}; -use bevy_asset::{load_internal_asset, Handle}; use bevy_diagnostic::FrameCount; use bevy_ecs::{ - prelude::{require, Bundle, Component, Entity, ReflectComponent}, + prelude::{Component, Entity, ReflectComponent}, query::{QueryItem, With}, - schedule::IntoSystemConfigs, - system::{Commands, Query, Res, ResMut, Resource}, + resource::Resource, + schedule::IntoScheduleConfigs, + system::{Commands, Query, Res, ResMut}, world::{FromWorld, World}, }; use bevy_image::BevyDefault as _; @@ -39,9 +38,9 @@ use bevy_render::{ view::{ExtractedView, Msaa, ViewTarget}, ExtractSchedule, MainWorld, Render, RenderApp, RenderSet, }; -use bevy_utils::tracing::warn; +use tracing::warn; -const TAA_SHADER_HANDLE: Handle = Handle::weak_from_u128(656865235226276); +const TAA_SHADER_HANDLE: Handle = weak_handle!("fea20d50-86b6-4069-aa32-374346aec00c"); /// Plugin for temporal anti-aliasing. /// @@ -92,19 +91,6 @@ impl Plugin for TemporalAntiAliasPlugin { } } -/// Bundle to apply temporal anti-aliasing. -#[derive(Bundle, Default, Clone)] -#[deprecated( - since = "0.15.0", - note = "Use the `TemporalAntiAlias` component instead. Inserting it will now also insert the other components required by it automatically." -)] -pub struct TemporalAntiAliasBundle { - pub settings: TemporalAntiAliasing, - pub jitter: TemporalJitter, - pub depth_prepass: DepthPrepass, - pub motion_vector_prepass: MotionVectorPrepass, -} - /// Component to apply temporal anti-aliasing to a 3D perspective camera. /// /// Temporal anti-aliasing (TAA) is a form of image smoothing/filtering, like @@ -145,7 +131,7 @@ pub struct TemporalAntiAliasBundle { /// /// If no [`MipBias`] component is attached to the camera, TAA will add a `MipBias(-1.0)` component. #[derive(Component, Reflect, Clone)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] #[require(TemporalJitter, DepthPrepass, MotionVectorPrepass)] #[doc(alias = "Taa")] pub struct TemporalAntiAliasing { @@ -159,9 +145,6 @@ pub struct TemporalAntiAliasing { pub reset: bool, } -#[deprecated(since = "0.15.0", note = "Renamed to `TemporalAntiAliasing`")] -pub type TemporalAntiAliasSettings = TemporalAntiAliasing; - impl Default for TemporalAntiAliasing { fn default() -> Self { Self { reset: true } diff --git a/crates/bevy_core_pipeline/src/taa/taa.wgsl b/crates/bevy_anti_aliasing/src/taa/taa.wgsl similarity index 100% rename from crates/bevy_core_pipeline/src/taa/taa.wgsl rename to crates/bevy_anti_aliasing/src/taa/taa.wgsl diff --git a/crates/bevy_app/Cargo.toml b/crates/bevy_app/Cargo.toml index 416e8cf16553f..f46db94db36bc 100644 --- a/crates/bevy_app/Cargo.toml +++ b/crates/bevy_app/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_app" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Provides core App functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -9,7 +9,7 @@ license = "MIT OR Apache-2.0" keywords = ["bevy"] [features] -default = ["std", "bevy_reflect", "bevy_tasks", "bevy_ecs/default", "downcast"] +default = ["std", "bevy_reflect", "bevy_ecs/default", "error_panic_hook"] # Functionality @@ -23,12 +23,6 @@ reflect_functions = [ "bevy_ecs/reflect_functions", ] -## Adds support for running async background tasks -bevy_tasks = ["dep:bevy_tasks"] - -## Adds `downcast-rs` integration for `Plugin` -downcast = ["dep:downcast-rs"] - # Debugging Features ## Enables `tracing` integration, allowing spans and other metrics to be reported @@ -39,6 +33,10 @@ trace = ["dep:tracing"] ## other debug operations which can help with diagnosing certain behaviors. bevy_debug_stepping = [] +## Will set the BevyError panic hook, which gives cleaner filtered backtraces when +## a BevyError is hit. +error_panic_hook = [] + # Platform Compatibility ## Allows access to the `std` crate. Enabling this feature will prevent compilation @@ -48,58 +46,58 @@ std = [ "bevy_reflect?/std", "bevy_ecs/std", "dep:ctrlc", - "downcast-rs?/std", + "downcast-rs/std", "bevy_utils/std", - "bevy_tasks?/std", + "bevy_tasks/std", + "bevy_platform/std", ] ## `critical-section` provides the building blocks for synchronization primitives ## on all platforms, including `no_std`. critical-section = [ - "portable-atomic?/critical-section", - "bevy_tasks?/critical-section", + "bevy_tasks/critical-section", "bevy_ecs/critical-section", + "bevy_platform/critical-section", + "bevy_reflect?/critical-section", ] -## `portable-atomic` provides additional platform support for atomic types and -## operations, even on targets without native support. -portable-atomic = [ - "dep:portable-atomic", - "dep:portable-atomic-util", - "bevy_tasks?/portable-atomic", - "bevy_ecs/portable-atomic", +## Enables use of browser APIs. +## Note this is currently only applicable on `wasm32` architectures. +web = [ + "bevy_platform/web", + "bevy_tasks/web", + "bevy_reflect?/web", + "dep:wasm-bindgen", + "dep:web-sys", + "dep:console_error_panic_hook", ] [dependencies] # bevy -bevy_derive = { path = "../bevy_derive", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev", default-features = false } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev", default-features = false, optional = true } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev", default-features = false, features = [ +bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev", default-features = false } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, optional = true } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev", default-features = false, features = [ "alloc", ] } -bevy_tasks = { path = "../bevy_tasks", version = "0.15.0-dev", default-features = false, optional = true } +bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev", default-features = false } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false } # other -downcast-rs = { version = "1.2.0", default-features = false, optional = true } +downcast-rs = { version = "2", default-features = false } thiserror = { version = "2", default-features = false } variadics_please = "1.1" tracing = { version = "0.1", default-features = false, optional = true } log = { version = "0.4", default-features = false } -portable-atomic = { version = "1", default-features = false, features = [ - "fallback", -], optional = true } -portable-atomic-util = { version = "0.2.4", features = [ - "alloc", -], optional = true } +cfg-if = "1.0.0" -[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +[target.'cfg(any(unix, windows))'.dependencies] ctrlc = { version = "3.4.4", optional = true } [target.'cfg(target_arch = "wasm32")'.dependencies] -wasm-bindgen = { version = "0.2" } -web-sys = { version = "0.3", features = ["Window"] } -console_error_panic_hook = "0.1.6" +wasm-bindgen = { version = "0.2", optional = true } +web-sys = { version = "0.3", features = ["Window"], optional = true } +console_error_panic_hook = { version = "0.1.6", optional = true } [dev-dependencies] crossbeam-channel = "0.5.0" diff --git a/crates/bevy_app/LICENSE-APACHE b/crates/bevy_app/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_app/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_app/LICENSE-MIT b/crates/bevy_app/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_app/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_app/src/app.rs b/crates/bevy_app/src/app.rs index aa6dfb62e99cd..654a7098b9c34 100644 --- a/crates/bevy_app/src/app.rs +++ b/crates/bevy_app/src/app.rs @@ -5,6 +5,7 @@ use crate::{ use alloc::{ boxed::Box, string::{String, ToString}, + vec::Vec, }; pub use bevy_derive::AppLabel; use bevy_ecs::{ @@ -12,13 +13,12 @@ use bevy_ecs::{ event::{event_update_system, EventCursor}, intern::Interned, prelude::*, - schedule::{ScheduleBuildSettings, ScheduleLabel}, - system::{IntoObserverSystem, SystemId, SystemInput}, + schedule::{InternedSystemSet, ScheduleBuildSettings, ScheduleLabel}, + system::{IntoObserverSystem, ScheduleSystem, SystemId, SystemInput}, }; -use bevy_utils::HashMap; +use bevy_platform::collections::HashMap; use core::{fmt::Debug, num::NonZero, panic::AssertUnwindSafe}; use log::debug; -use thiserror::Error; #[cfg(feature = "trace")] use tracing::info_span; @@ -29,11 +29,11 @@ use std::{ process::{ExitCode, Termination}, }; -#[cfg(feature = "downcast")] -use alloc::vec::Vec; - bevy_ecs::define_label!( /// A strongly-typed class of labels used to identify an [`App`]. + #[diagnostic::on_unimplemented( + note = "consider annotating `{Self}` with `#[derive(AppLabel)]`" + )] AppLabel, APP_LABEL_INTERNER ); @@ -43,7 +43,7 @@ pub use bevy_ecs::label::DynEq; /// A shorthand for `Interned`. pub type InternedAppLabel = Interned; -#[derive(Debug, Error)] +#[derive(Debug, thiserror::Error)] pub(crate) enum AppError { #[error("duplicate plugin {plugin_name:?}")] DuplicatePlugin { plugin_name: String }, @@ -106,6 +106,8 @@ impl Default for App { { app.init_resource::(); app.register_type::(); + app.register_type::(); + app.register_type::(); } #[cfg(feature = "reflect_functions")] @@ -299,7 +301,7 @@ impl App { pub fn add_systems( &mut self, schedule: impl ScheduleLabel, - systems: impl IntoSystemConfigs, + systems: impl IntoScheduleConfigs, ) -> &mut Self { self.main_mut().add_systems(schedule, systems); self @@ -327,10 +329,10 @@ impl App { /// Configures a collection of system sets in the provided schedule, adding any sets that do not exist. #[track_caller] - pub fn configure_sets( + pub fn configure_sets( &mut self, schedule: impl ScheduleLabel, - sets: impl IntoSystemSetConfigs, + sets: impl IntoScheduleConfigs, ) -> &mut Self { self.main_mut().configure_sets(schedule, sets); self @@ -522,7 +524,6 @@ impl App { /// # app.add_plugins(ImagePlugin::default()); /// let default_sampler = app.get_added_plugins::()[0].default_sampler; /// ``` - #[cfg(feature = "downcast")] pub fn get_added_plugins(&self) -> Vec<&T> where T: Plugin, @@ -1032,6 +1033,17 @@ impl App { .try_register_required_components_with::(constructor) } + /// Registers a component type as "disabling", + /// using [default query filters](bevy_ecs::entity_disabling::DefaultQueryFilters) to exclude entities with the component from queries. + /// + /// # Warning + /// + /// As discussed in the [module docs](bevy_ecs::entity_disabling), this can have performance implications, + /// as well as create interoperability issues, and should be used with caution. + pub fn register_disabling_component(&mut self) { + self.world_mut().register_disabling_component::(); + } + /// Returns a reference to the main [`SubApp`]'s [`World`]. This is the same as calling /// [`app.main().world()`]. /// @@ -1328,7 +1340,7 @@ type RunnerFn = Box AppExit>; fn run_once(mut app: App) -> AppExit { while app.plugins_state() == PluginsState::Adding { - #[cfg(all(not(target_arch = "wasm32"), feature = "bevy_tasks"))] + #[cfg(not(all(target_arch = "wasm32", feature = "web")))] bevy_tasks::tick_global_task_pools_on_main_thread(); } app.finish(); @@ -1360,7 +1372,7 @@ pub enum AppExit { } impl AppExit { - /// Creates a [`AppExit::Error`] with a error code of 1. + /// Creates a [`AppExit::Error`] with an error code of 1. #[must_use] pub const fn error() -> Self { Self::Error(NonZero::::MIN) @@ -1392,7 +1404,6 @@ impl AppExit { } impl From for AppExit { - #[must_use] fn from(value: u8) -> Self { Self::from_code(value) } @@ -1411,7 +1422,7 @@ impl Termination for AppExit { #[cfg(test)] mod tests { - use core::{iter, marker::PhantomData}; + use core::marker::PhantomData; use std::sync::Mutex; use bevy_ecs::{ @@ -1421,8 +1432,9 @@ mod tests { event::{Event, EventWriter, Events}, query::With, removal_detection::RemovedComponents, - schedule::{IntoSystemConfigs, ScheduleLabel}, - system::{Commands, Query, Resource}, + resource::Resource, + schedule::{IntoScheduleConfigs, ScheduleLabel}, + system::{Commands, Query}, world::{FromWorld, World}, }; @@ -1529,7 +1541,6 @@ mod tests { #[test] fn test_derive_app_label() { use super::AppLabel; - use crate::{self as bevy_app}; #[derive(AppLabel, Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] struct UnitLabel; @@ -1635,7 +1646,7 @@ mod tests { struct Foo; let mut app = App::new(); - app.world_mut().spawn_batch(iter::repeat(Foo).take(5)); + app.world_mut().spawn_batch(core::iter::repeat_n(Foo, 5)); fn despawn_one_foo(mut commands: Commands, foos: Query>) { if let Some(e) = foos.iter().next() { @@ -1661,7 +1672,6 @@ mod tests { #[test] fn test_extract_sees_changes() { use super::AppLabel; - use crate::{self as bevy_app}; #[derive(AppLabel, Clone, Copy, Hash, PartialEq, Eq, Debug)] struct MySubApp; @@ -1690,9 +1700,9 @@ mod tests { fn raise_exits(mut exits: EventWriter) { // Exit codes chosen by a fair dice roll. // Unlikely to overlap with default values. - exits.send(AppExit::Success); - exits.send(AppExit::from_code(4)); - exits.send(AppExit::from_code(73)); + exits.write(AppExit::Success); + exits.write(AppExit::from_code(4)); + exits.write(AppExit::from_code(73)); } let exit = App::new().add_systems(Update, raise_exits).run(); @@ -1736,7 +1746,7 @@ mod tests { #[test] fn app_exit_size() { - // There wont be many of them so the size isn't a issue but + // There wont be many of them so the size isn't an issue but // it's nice they're so small let's keep it that way. assert_eq!(size_of::(), size_of::()); } diff --git a/crates/bevy_app/src/lib.rs b/crates/bevy_app/src/lib.rs index ff680e4f04ac6..6772136414876 100644 --- a/crates/bevy_app/src/lib.rs +++ b/crates/bevy_app/src/lib.rs @@ -11,12 +11,18 @@ html_logo_url = "https://bevyengine.org/assets/icon.png", html_favicon_url = "https://bevyengine.org/assets/icon.png" )] -#![cfg_attr(not(feature = "std"), no_std)] +#![no_std] //! This crate is about everything concerning the highest-level, application layer of a Bevy app. +#[cfg(feature = "std")] +extern crate std; + extern crate alloc; +// Required to make proc macros work in bevy itself. +extern crate self as bevy_app; + mod app; mod main_schedule; mod panic_handler; @@ -24,9 +30,8 @@ mod plugin; mod plugin_group; mod schedule_runner; mod sub_app; -#[cfg(feature = "bevy_tasks")] mod task_pool_plugin; -#[cfg(all(not(target_arch = "wasm32"), feature = "std"))] +#[cfg(all(any(unix, windows), feature = "std"))] mod terminal_ctrl_c_handler; pub use app::*; @@ -36,9 +41,8 @@ pub use plugin::*; pub use plugin_group::*; pub use schedule_runner::*; pub use sub_app::*; -#[cfg(feature = "bevy_tasks")] pub use task_pool_plugin::*; -#[cfg(all(not(target_arch = "wasm32"), feature = "std"))] +#[cfg(all(any(unix, windows), feature = "std"))] pub use terminal_ctrl_c_handler::*; /// The app prelude. @@ -54,10 +58,6 @@ pub mod prelude { RunFixedMainLoopSystem, SpawnScene, Startup, Update, }, sub_app::SubApp, - Plugin, PluginGroup, + Plugin, PluginGroup, TaskPoolOptions, TaskPoolPlugin, }; - - #[cfg(feature = "bevy_tasks")] - #[doc(hidden)] - pub use crate::{NonSendMarker, TaskPoolOptions, TaskPoolPlugin}; } diff --git a/crates/bevy_app/src/main_schedule.rs b/crates/bevy_app/src/main_schedule.rs index 227a976de7e97..23e8ca0c330a4 100644 --- a/crates/bevy_app/src/main_schedule.rs +++ b/crates/bevy_app/src/main_schedule.rs @@ -1,11 +1,12 @@ use crate::{App, Plugin}; use alloc::{vec, vec::Vec}; use bevy_ecs::{ + resource::Resource, schedule::{ - ExecutorKind, InternedScheduleLabel, IntoSystemSetConfigs, Schedule, ScheduleLabel, + ExecutorKind, InternedScheduleLabel, IntoScheduleConfigs, Schedule, ScheduleLabel, SystemSet, }, - system::{Local, Resource}, + system::Local, world::{Mut, World}, }; @@ -14,6 +15,13 @@ use bevy_ecs::{ /// By default, it will run the following schedules in the given order: /// /// On the first run of the schedule (and only on the first run), it will run: +/// * [`StateTransition`] [^1] +/// * This means that [`OnEnter(MyState::Foo)`] will be called *before* [`PreStartup`] +/// if `MyState` was added to the app with `MyState::Foo` as the initial state, +/// as well as [`OnEnter(MyComputedState)`] if it `compute`s to `Some(Self)` in `MyState::Foo`. +/// * If you want to run systems before any state transitions, regardless of which state is the starting state, +/// for example, for registering required components, you can add your own custom startup schedule +/// before [`StateTransition`]. See [`MainScheduleOrder::insert_startup_before`] for more details. /// * [`PreStartup`] /// * [`Startup`] /// * [`PostStartup`] @@ -21,7 +29,7 @@ use bevy_ecs::{ /// Then it will run: /// * [`First`] /// * [`PreUpdate`] -/// * [`StateTransition`] +/// * [`StateTransition`] [^1] /// * [`RunFixedMainLoop`] /// * This will run [`FixedMain`] zero to many times, based on how much time has elapsed. /// * [`Update`] @@ -36,35 +44,39 @@ use bevy_ecs::{ /// /// See [`RenderPlugin`] and [`PipelinedRenderingPlugin`] for more details. /// +/// [^1]: [`StateTransition`] is inserted only if you have `bevy_state` feature enabled. It is enabled in `default` features. +/// /// [`StateTransition`]: https://docs.rs/bevy/latest/bevy/prelude/struct.StateTransition.html +/// [`OnEnter(MyState::Foo)`]: https://docs.rs/bevy/latest/bevy/prelude/struct.OnEnter.html +/// [`OnEnter(MyComputedState)`]: https://docs.rs/bevy/latest/bevy/prelude/struct.OnEnter.html /// [`RenderPlugin`]: https://docs.rs/bevy/latest/bevy/render/struct.RenderPlugin.html /// [`PipelinedRenderingPlugin`]: https://docs.rs/bevy/latest/bevy/render/pipelined_rendering/struct.PipelinedRenderingPlugin.html /// [`SubApp`]: crate::SubApp -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct Main; /// The schedule that runs before [`Startup`]. /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct PreStartup; /// The schedule that runs once when the app starts. /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct Startup; /// The schedule that runs once after [`Startup`]. /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct PostStartup; /// Runs first in the schedule. /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct First; /// The schedule that contains logic that must run before [`Update`]. For example, a system that reads raw keyboard @@ -75,7 +87,7 @@ pub struct First; /// [`PreUpdate`] abstracts out "pre work implementation details". /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct PreUpdate; /// Runs the [`FixedMain`] schedule in a loop according until all relevant elapsed time has been "consumed". @@ -87,21 +99,21 @@ pub struct PreUpdate; /// [`RunFixedMainLoop`] will *not* be parallelized between each other. /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct RunFixedMainLoop; /// Runs first in the [`FixedMain`] schedule. /// /// See the [`FixedMain`] schedule for details on how fixed updates work. /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct FixedFirst; /// The schedule that contains logic that must run before [`FixedUpdate`]. /// /// See the [`FixedMain`] schedule for details on how fixed updates work. /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct FixedPreUpdate; /// The schedule that contains most gameplay logic, which runs at a fixed rate rather than every render frame. @@ -116,7 +128,7 @@ pub struct FixedPreUpdate; /// See the [`Update`] schedule for examples of systems that *should not* use this schedule. /// See the [`FixedMain`] schedule for details on how fixed updates work. /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct FixedUpdate; /// The schedule that runs after the [`FixedUpdate`] schedule, for reacting @@ -124,14 +136,14 @@ pub struct FixedUpdate; /// /// See the [`FixedMain`] schedule for details on how fixed updates work. /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct FixedPostUpdate; /// The schedule that runs last in [`FixedMain`] /// /// See the [`FixedMain`] schedule for details on how fixed updates work. /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct FixedLast; /// The schedule that contains systems which only run after a fixed period of time has elapsed. @@ -143,7 +155,7 @@ pub struct FixedLast; /// See [this example](https://github.com/bevyengine/bevy/blob/latest/examples/time/time.rs). /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct FixedMain; /// The schedule that contains any app logic that must run once per render frame. @@ -156,13 +168,13 @@ pub struct FixedMain; /// /// See the [`FixedUpdate`] schedule for examples of systems that *should not* use this schedule. /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct Update; /// The schedule that contains scene spawning. /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct SpawnScene; /// The schedule that contains logic that must run after [`Update`]. For example, synchronizing "local transforms" in a hierarchy @@ -173,13 +185,13 @@ pub struct SpawnScene; /// [`PostUpdate`] abstracts out "implementation details" from users defining systems in [`Update`]. /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct PostUpdate; /// Runs last in the schedule. /// /// See the [`Main`] schedule for some details about how schedules are run. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct Last; /// Animation system set. This exists in [`PostUpdate`]. @@ -315,7 +327,7 @@ impl Plugin for MainSchedulePlugin { #[cfg(feature = "bevy_debug_stepping")] { - use bevy_ecs::schedule::{IntoSystemConfigs, Stepping}; + use bevy_ecs::schedule::{IntoScheduleConfigs, Stepping}; app.add_systems(Main, Stepping::begin_frame.before(Main::run_main)); } } diff --git a/crates/bevy_app/src/panic_handler.rs b/crates/bevy_app/src/panic_handler.rs index 56d66da7281b2..1021a3dc2e5ab 100644 --- a/crates/bevy_app/src/panic_handler.rs +++ b/crates/bevy_app/src/panic_handler.rs @@ -11,7 +11,7 @@ use crate::{App, Plugin}; /// Adds sensible panic handlers to Apps. This plugin is part of the `DefaultPlugins`. Adding /// this plugin will setup a panic hook appropriate to your target platform: /// * On Wasm, uses [`console_error_panic_hook`](https://crates.io/crates/console_error_panic_hook), logging -/// to the browser console. +/// to the browser console. /// * Other platforms are currently not setup. /// /// ```no_run @@ -39,13 +39,23 @@ pub struct PanicHandlerPlugin; impl Plugin for PanicHandlerPlugin { fn build(&self, _app: &mut App) { - #[cfg(target_arch = "wasm32")] + #[cfg(feature = "std")] { - console_error_panic_hook::set_once(); - } - #[cfg(not(target_arch = "wasm32"))] - { - // Use the default target panic hook - Do nothing. + static SET_HOOK: std::sync::Once = std::sync::Once::new(); + SET_HOOK.call_once(|| { + cfg_if::cfg_if! { + if #[cfg(all(target_arch = "wasm32", feature = "web"))] { + // This provides better panic handling in JS engines (displays the panic message and improves the backtrace). + std::panic::set_hook(alloc::boxed::Box::new(console_error_panic_hook::hook)); + } else if #[cfg(feature = "error_panic_hook")] { + let current_hook = std::panic::take_hook(); + std::panic::set_hook(alloc::boxed::Box::new( + bevy_ecs::error::bevy_error_panic_hook(current_hook), + )); + } + // Otherwise use the default target panic hook - Do nothing. + } + }); } } } diff --git a/crates/bevy_app/src/plugin.rs b/crates/bevy_app/src/plugin.rs index 724dbfde93e27..dc26e273f42c7 100644 --- a/crates/bevy_app/src/plugin.rs +++ b/crates/bevy_app/src/plugin.rs @@ -1,20 +1,6 @@ -// TODO: Upstream `portable-atomic` support to `downcast_rs` and unconditionally -// include it as a dependency. -// See https://github.com/marcianx/downcast-rs/pull/22 for details -#[cfg(feature = "downcast")] -use downcast_rs::{impl_downcast, Downcast}; - use crate::App; use core::any::Any; - -/// Dummy trait with the same name as `downcast_rs::Downcast`. This is to ensure -/// the `Plugin: Downcast` bound can remain even when `downcast` isn't enabled. -#[cfg(not(feature = "downcast"))] -#[doc(hidden)] -pub trait Downcast {} - -#[cfg(not(feature = "downcast"))] -impl Downcast for T {} +use downcast_rs::{impl_downcast, Downcast}; /// A collection of Bevy app logic and configuration. /// @@ -105,7 +91,6 @@ pub trait Plugin: Downcast + Any + Send + Sync { } } -#[cfg(feature = "downcast")] impl_downcast!(Plugin); impl Plugin for T { @@ -183,7 +168,10 @@ mod sealed { where $($plugins: Plugins<$param>),* { - // We use `allow` instead of `expect` here because the lint is not generated for all cases. + #[expect( + clippy::allow_attributes, + reason = "This is inside a macro, and as such, may not trigger in all cases." + )] #[allow(non_snake_case, reason = "`all_tuples!()` generates non-snake-case variable names.")] #[allow(unused_variables, reason = "`app` is unused when implemented for the unit type `()`.")] #[track_caller] diff --git a/crates/bevy_app/src/plugin_group.rs b/crates/bevy_app/src/plugin_group.rs index ce78f52315c62..60897d5453066 100644 --- a/crates/bevy_app/src/plugin_group.rs +++ b/crates/bevy_app/src/plugin_group.rs @@ -4,6 +4,7 @@ use alloc::{ string::{String, ToString}, vec::Vec, }; +use bevy_platform::collections::hash_map::Entry; use bevy_utils::TypeIdMap; use core::any::TypeId; use log::{debug, warn}; @@ -224,14 +225,9 @@ impl PluginGroup for PluginGroupBuilder { } } -/// Helper method to get the [`TypeId`] of a value without having to name its type. -fn type_id_of_val(_: &T) -> TypeId { - TypeId::of::() -} - /// Facilitates the creation and configuration of a [`PluginGroup`]. /// -/// Provides a build ordering to ensure that [`Plugin`]s which produce/require a [`Resource`](bevy_ecs::system::Resource) +/// Provides a build ordering to ensure that [`Plugin`]s which produce/require a [`Resource`](bevy_ecs::resource::Resource) /// are built before/after dependent/depending [`Plugin`]s. [`Plugin`]s inside the group /// can be disabled, enabled or reordered. pub struct PluginGroupBuilder { @@ -250,20 +246,23 @@ impl PluginGroupBuilder { } } - /// Finds the index of a target [`Plugin`]. Panics if the target's [`TypeId`] is not found. - fn index_of(&self) -> usize { - let index = self - .order + /// Checks if the [`PluginGroupBuilder`] contains the given [`Plugin`]. + pub fn contains(&self) -> bool { + self.plugins.contains_key(&TypeId::of::()) + } + + /// Returns `true` if the [`PluginGroupBuilder`] contains the given [`Plugin`] and it's enabled. + pub fn enabled(&self) -> bool { + self.plugins + .get(&TypeId::of::()) + .is_some_and(|e| e.enabled) + } + + /// Finds the index of a target [`Plugin`]. + fn index_of(&self) -> Option { + self.order .iter() - .position(|&ty| ty == TypeId::of::()); - - match index { - Some(i) => i, - None => panic!( - "Plugin does not exist in group: {}.", - core::any::type_name::() - ), - } + .position(|&ty| ty == TypeId::of::()) } // Insert the new plugin as enabled, and removes its previous ordering if it was @@ -311,15 +310,27 @@ impl PluginGroupBuilder { /// # Panics /// /// Panics if the [`Plugin`] does not exist. - pub fn set(mut self, plugin: T) -> Self { - let entry = self.plugins.get_mut(&TypeId::of::()).unwrap_or_else(|| { + pub fn set(self, plugin: T) -> Self { + self.try_set(plugin).unwrap_or_else(|_| { panic!( "{} does not exist in this PluginGroup", core::any::type_name::(), ) - }); - entry.plugin = Box::new(plugin); - self + }) + } + + /// Tries to set the value of the given [`Plugin`], if it exists. + /// + /// If the given plugin doesn't exist returns self and the passed in [`Plugin`]. + pub fn try_set(mut self, plugin: T) -> Result { + match self.plugins.entry(TypeId::of::()) { + Entry::Occupied(mut entry) => { + entry.get_mut().plugin = Box::new(plugin); + + Ok(self) + } + Entry::Vacant(_) => Err((self, plugin)), + } } /// Adds the plugin [`Plugin`] at the end of this [`PluginGroupBuilder`]. If the plugin was @@ -336,6 +347,17 @@ impl PluginGroupBuilder { self } + /// Attempts to add the plugin [`Plugin`] at the end of this [`PluginGroupBuilder`]. + /// + /// If the plugin was already in the group the addition fails. + pub fn try_add(self, plugin: T) -> Result { + if self.contains::() { + return Err((self, plugin)); + } + + Ok(self.add(plugin)) + } + /// Adds a [`PluginGroup`] at the end of this [`PluginGroupBuilder`]. If the plugin was /// already in the group, it is removed from its previous place. pub fn add_group(mut self, group: impl PluginGroup) -> Self { @@ -357,23 +379,105 @@ impl PluginGroupBuilder { } /// Adds a [`Plugin`] in this [`PluginGroupBuilder`] before the plugin of type `Target`. - /// If the plugin was already the group, it is removed from its previous place. There must - /// be a plugin of type `Target` in the group or it will panic. - pub fn add_before(mut self, plugin: impl Plugin) -> Self { - let target_index = self.index_of::(); - self.order.insert(target_index, type_id_of_val(&plugin)); + /// + /// If the plugin was already the group, it is removed from its previous place. + /// + /// # Panics + /// + /// Panics if `Target` is not already in this [`PluginGroupBuilder`]. + pub fn add_before(self, plugin: impl Plugin) -> Self { + self.try_add_before_overwrite::(plugin) + .unwrap_or_else(|_| { + panic!( + "Plugin does not exist in group: {}.", + core::any::type_name::() + ) + }) + } + + /// Adds a [`Plugin`] in this [`PluginGroupBuilder`] before the plugin of type `Target`. + /// + /// If the plugin was already in the group the add fails. If there isn't a plugin + /// of type `Target` in the group the plugin we're trying to insert is returned. + pub fn try_add_before( + self, + plugin: Insert, + ) -> Result { + if self.contains::() { + return Err((self, plugin)); + } + + self.try_add_before_overwrite::(plugin) + } + + /// Adds a [`Plugin`] in this [`PluginGroupBuilder`] before the plugin of type `Target`. + /// + /// If the plugin was already in the group, it is removed from its previous places. + /// If there isn't a plugin of type `Target` in the group the plugin we're trying to insert + /// is returned. + pub fn try_add_before_overwrite( + mut self, + plugin: Insert, + ) -> Result { + let Some(target_index) = self.index_of::() else { + return Err((self, plugin)); + }; + + self.order.insert(target_index, TypeId::of::()); self.upsert_plugin_state(plugin, target_index); - self + Ok(self) } /// Adds a [`Plugin`] in this [`PluginGroupBuilder`] after the plugin of type `Target`. - /// If the plugin was already the group, it is removed from its previous place. There must - /// be a plugin of type `Target` in the group or it will panic. - pub fn add_after(mut self, plugin: impl Plugin) -> Self { - let target_index = self.index_of::() + 1; - self.order.insert(target_index, type_id_of_val(&plugin)); + /// + /// If the plugin was already the group, it is removed from its previous place. + /// + /// # Panics + /// + /// Panics if `Target` is not already in this [`PluginGroupBuilder`]. + pub fn add_after(self, plugin: impl Plugin) -> Self { + self.try_add_after_overwrite::(plugin) + .unwrap_or_else(|_| { + panic!( + "Plugin does not exist in group: {}.", + core::any::type_name::() + ) + }) + } + + /// Adds a [`Plugin`] in this [`PluginGroupBuilder`] after the plugin of type `Target`. + /// + /// If the plugin was already in the group the add fails. If there isn't a plugin + /// of type `Target` in the group the plugin we're trying to insert is returned. + pub fn try_add_after( + self, + plugin: Insert, + ) -> Result { + if self.contains::() { + return Err((self, plugin)); + } + + self.try_add_after_overwrite::(plugin) + } + + /// Adds a [`Plugin`] in this [`PluginGroupBuilder`] after the plugin of type `Target`. + /// + /// If the plugin was already in the group, it is removed from its previous places. + /// If there isn't a plugin of type `Target` in the group the plugin we're trying to insert + /// is returned. + pub fn try_add_after_overwrite( + mut self, + plugin: Insert, + ) -> Result { + let Some(target_index) = self.index_of::() else { + return Err((self, plugin)); + }; + + let target_index = target_index + 1; + + self.order.insert(target_index, TypeId::of::()); self.upsert_plugin_state(plugin, target_index); - self + Ok(self) } /// Enables a [`Plugin`]. @@ -451,6 +555,9 @@ impl PluginGroup for NoopPluginGroup { #[cfg(test)] mod tests { + use alloc::vec; + use core::{any::TypeId, fmt::Debug}; + use super::PluginGroupBuilder; use crate::{App, NoopPluginGroup, Plugin}; @@ -469,6 +576,35 @@ mod tests { fn build(&self, _: &mut App) {} } + #[derive(PartialEq, Debug)] + struct PluginWithData(u32); + impl Plugin for PluginWithData { + fn build(&self, _: &mut App) {} + } + + fn get_plugin(group: &PluginGroupBuilder, id: TypeId) -> &T { + group.plugins[&id] + .plugin + .as_any() + .downcast_ref::() + .unwrap() + } + + #[test] + fn contains() { + let group = PluginGroupBuilder::start::() + .add(PluginA) + .add(PluginB); + + assert!(group.contains::()); + assert!(!group.contains::()); + + let group = group.disable::(); + + assert!(group.enabled::()); + assert!(!group.enabled::()); + } + #[test] fn basic_ordering() { let group = PluginGroupBuilder::start::() @@ -479,13 +615,56 @@ mod tests { assert_eq!( group.order, vec![ - core::any::TypeId::of::(), - core::any::TypeId::of::(), - core::any::TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + ] + ); + } + + #[test] + fn add_before() { + let group = PluginGroupBuilder::start::() + .add(PluginA) + .add(PluginB) + .add_before::(PluginC); + + assert_eq!( + group.order, + vec![ + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), ] ); } + #[test] + fn try_add_before() { + let group = PluginGroupBuilder::start::().add(PluginA); + + let Ok(group) = group.try_add_before::(PluginC) else { + panic!("PluginA wasn't in group"); + }; + + assert_eq!( + group.order, + vec![TypeId::of::(), TypeId::of::(),] + ); + + assert!(group.try_add_before::(PluginC).is_err()); + } + + #[test] + #[should_panic( + expected = "Plugin does not exist in group: bevy_app::plugin_group::tests::PluginB." + )] + fn add_before_nonexistent() { + PluginGroupBuilder::start::() + .add(PluginA) + .add_before::(PluginC); + } + #[test] fn add_after() { let group = PluginGroupBuilder::start::() @@ -496,26 +675,103 @@ mod tests { assert_eq!( group.order, vec![ - core::any::TypeId::of::(), - core::any::TypeId::of::(), - core::any::TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), ] ); } #[test] - fn add_before() { + fn try_add_after() { let group = PluginGroupBuilder::start::() .add(PluginA) - .add(PluginB) - .add_before::(PluginC); + .add(PluginB); + + let Ok(group) = group.try_add_after::(PluginC) else { + panic!("PluginA wasn't in group"); + }; assert_eq!( group.order, vec![ - core::any::TypeId::of::(), - core::any::TypeId::of::(), - core::any::TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + ] + ); + + assert!(group.try_add_after::(PluginC).is_err()); + } + + #[test] + #[should_panic( + expected = "Plugin does not exist in group: bevy_app::plugin_group::tests::PluginB." + )] + fn add_after_nonexistent() { + PluginGroupBuilder::start::() + .add(PluginA) + .add_after::(PluginC); + } + + #[test] + fn add_overwrite() { + let group = PluginGroupBuilder::start::() + .add(PluginA) + .add(PluginWithData(0x0F)) + .add(PluginC); + + let id = TypeId::of::(); + assert_eq!( + get_plugin::(&group, id), + &PluginWithData(0x0F) + ); + + let group = group.add(PluginWithData(0xA0)); + + assert_eq!( + get_plugin::(&group, id), + &PluginWithData(0xA0) + ); + assert_eq!( + group.order, + vec![ + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + ] + ); + + let Ok(group) = group.try_add_before_overwrite::(PluginWithData(0x01)) else { + panic!("PluginA wasn't in group"); + }; + assert_eq!( + get_plugin::(&group, id), + &PluginWithData(0x01) + ); + assert_eq!( + group.order, + vec![ + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + ] + ); + + let Ok(group) = group.try_add_after_overwrite::(PluginWithData(0xdeadbeef)) + else { + panic!("PluginA wasn't in group"); + }; + assert_eq!( + get_plugin::(&group, id), + &PluginWithData(0xdeadbeef) + ); + assert_eq!( + group.order, + vec![ + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), ] ); } @@ -531,45 +787,45 @@ mod tests { assert_eq!( group.order, vec![ - core::any::TypeId::of::(), - core::any::TypeId::of::(), - core::any::TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), ] ); } #[test] - fn readd_after() { + fn readd_before() { let group = PluginGroupBuilder::start::() .add(PluginA) .add(PluginB) .add(PluginC) - .add_after::(PluginC); + .add_before::(PluginC); assert_eq!( group.order, vec![ - core::any::TypeId::of::(), - core::any::TypeId::of::(), - core::any::TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), ] ); } #[test] - fn readd_before() { + fn readd_after() { let group = PluginGroupBuilder::start::() .add(PluginA) .add(PluginB) .add(PluginC) - .add_before::(PluginC); + .add_after::(PluginC); assert_eq!( group.order, vec![ - core::any::TypeId::of::(), - core::any::TypeId::of::(), - core::any::TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), ] ); } @@ -587,9 +843,9 @@ mod tests { assert_eq!( group_b.order, vec![ - core::any::TypeId::of::(), - core::any::TypeId::of::(), - core::any::TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), ] ); } @@ -611,9 +867,9 @@ mod tests { assert_eq!( group.order, vec![ - core::any::TypeId::of::(), - core::any::TypeId::of::(), - core::any::TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), ] ); } diff --git a/crates/bevy_app/src/schedule_runner.rs b/crates/bevy_app/src/schedule_runner.rs index d1e3865b52ac8..594f849b2f905 100644 --- a/crates/bevy_app/src/schedule_runner.rs +++ b/crates/bevy_app/src/schedule_runner.rs @@ -3,14 +3,12 @@ use crate::{ plugin::Plugin, PluginsState, }; -use bevy_utils::Duration; +use bevy_platform::time::Instant; +use core::time::Duration; -#[cfg(any(target_arch = "wasm32", feature = "std"))] -use bevy_utils::Instant; - -#[cfg(target_arch = "wasm32")] +#[cfg(all(target_arch = "wasm32", feature = "web"))] use { - alloc::rc::Rc, + alloc::{boxed::Box, rc::Rc}, core::cell::RefCell, wasm_bindgen::{prelude::*, JsCast}, }; @@ -79,7 +77,7 @@ impl Plugin for ScheduleRunnerPlugin { let plugins_state = app.plugins_state(); if plugins_state != PluginsState::Cleaned { while app.plugins_state() == PluginsState::Adding { - #[cfg(all(not(target_arch = "wasm32"), feature = "bevy_tasks"))] + #[cfg(not(all(target_arch = "wasm32", feature = "web")))] bevy_tasks::tick_global_task_pools_on_main_thread(); } app.finish(); @@ -100,7 +98,6 @@ impl Plugin for ScheduleRunnerPlugin { let tick = move |app: &mut App, _wait: Option| -> Result, AppExit> { - #[cfg(any(target_arch = "wasm32", feature = "std"))] let start_time = Instant::now(); app.update(); @@ -109,10 +106,8 @@ impl Plugin for ScheduleRunnerPlugin { return Err(exit); }; - #[cfg(any(target_arch = "wasm32", feature = "std"))] let end_time = Instant::now(); - #[cfg(any(target_arch = "wasm32", feature = "std"))] if let Some(wait) = _wait { let exe_time = end_time - start_time; if exe_time < wait { @@ -123,58 +118,55 @@ impl Plugin for ScheduleRunnerPlugin { Ok(None) }; - #[cfg(not(target_arch = "wasm32"))] - { - loop { - match tick(&mut app, wait) { - Ok(Some(_delay)) => { - #[cfg(feature = "std")] - std::thread::sleep(_delay); - } - Ok(None) => continue, - Err(exit) => return exit, + cfg_if::cfg_if! { + if #[cfg(all(target_arch = "wasm32", feature = "web"))] { + fn set_timeout(callback: &Closure, dur: Duration) { + web_sys::window() + .unwrap() + .set_timeout_with_callback_and_timeout_and_arguments_0( + callback.as_ref().unchecked_ref(), + dur.as_millis() as i32, + ) + .expect("Should register `setTimeout`."); } - } - } - - #[cfg(target_arch = "wasm32")] - { - fn set_timeout(callback: &Closure, dur: Duration) { - web_sys::window() - .unwrap() - .set_timeout_with_callback_and_timeout_and_arguments_0( - callback.as_ref().unchecked_ref(), - dur.as_millis() as i32, - ) - .expect("Should register `setTimeout`."); - } - let asap = Duration::from_millis(1); - - let exit = Rc::new(RefCell::new(AppExit::Success)); - let closure_exit = exit.clone(); - - let mut app = Rc::new(app); - let moved_tick_closure = Rc::new(RefCell::new(None)); - let base_tick_closure = moved_tick_closure.clone(); - - let tick_app = move || { - let app = Rc::get_mut(&mut app).unwrap(); - let delay = tick(app, wait); - match delay { - Ok(delay) => set_timeout( - moved_tick_closure.borrow().as_ref().unwrap(), - delay.unwrap_or(asap), - ), - Err(code) => { - closure_exit.replace(code); + let asap = Duration::from_millis(1); + + let exit = Rc::new(RefCell::new(AppExit::Success)); + let closure_exit = exit.clone(); + + let mut app = Rc::new(app); + let moved_tick_closure = Rc::new(RefCell::new(None)); + let base_tick_closure = moved_tick_closure.clone(); + + let tick_app = move || { + let app = Rc::get_mut(&mut app).unwrap(); + let delay = tick(app, wait); + match delay { + Ok(delay) => set_timeout( + moved_tick_closure.borrow().as_ref().unwrap(), + delay.unwrap_or(asap), + ), + Err(code) => { + closure_exit.replace(code); + } + } + }; + *base_tick_closure.borrow_mut() = + Some(Closure::wrap(Box::new(tick_app) as Box)); + set_timeout(base_tick_closure.borrow().as_ref().unwrap(), asap); + + exit.take() + } else { + loop { + match tick(&mut app, wait) { + Ok(Some(delay)) => { + bevy_platform::thread::sleep(delay); + } + Ok(None) => continue, + Err(exit) => return exit, } } - }; - *base_tick_closure.borrow_mut() = - Some(Closure::wrap(Box::new(tick_app) as Box)); - set_timeout(base_tick_closure.borrow().as_ref().unwrap(), asap); - - exit.take() + } } } } diff --git a/crates/bevy_app/src/sub_app.rs b/crates/bevy_app/src/sub_app.rs index ab7f00750bbe6..c340b80654a86 100644 --- a/crates/bevy_app/src/sub_app.rs +++ b/crates/bevy_app/src/sub_app.rs @@ -3,10 +3,10 @@ use alloc::{boxed::Box, string::String, vec::Vec}; use bevy_ecs::{ event::EventRegistry, prelude::*, - schedule::{InternedScheduleLabel, ScheduleBuildSettings, ScheduleLabel}, - system::{SystemId, SystemInput}, + schedule::{InternedScheduleLabel, InternedSystemSet, ScheduleBuildSettings, ScheduleLabel}, + system::{ScheduleSystem, SystemId, SystemInput}, }; -use bevy_utils::{HashMap, HashSet}; +use bevy_platform::collections::{HashMap, HashSet}; use core::fmt::Debug; #[cfg(feature = "trace")] @@ -211,7 +211,7 @@ impl SubApp { pub fn add_systems( &mut self, schedule: impl ScheduleLabel, - systems: impl IntoSystemConfigs, + systems: impl IntoScheduleConfigs, ) -> &mut Self { let mut schedules = self.world.resource_mut::(); schedules.add_systems(schedule, systems); @@ -233,10 +233,10 @@ impl SubApp { /// See [`App::configure_sets`]. #[track_caller] - pub fn configure_sets( + pub fn configure_sets( &mut self, schedule: impl ScheduleLabel, - sets: impl IntoSystemSetConfigs, + sets: impl IntoScheduleConfigs, ) -> &mut Self { let mut schedules = self.world.resource_mut::(); schedules.configure_sets(schedule, sets); @@ -362,7 +362,6 @@ impl SubApp { } /// See [`App::get_added_plugins`]. - #[cfg(feature = "downcast")] pub fn get_added_plugins(&self) -> Vec<&T> where T: Plugin, diff --git a/crates/bevy_app/src/task_pool_plugin.rs b/crates/bevy_app/src/task_pool_plugin.rs index 5623371dad58f..5ed4e3fa5da6e 100644 --- a/crates/bevy_app/src/task_pool_plugin.rs +++ b/crates/bevy_app/src/task_pool_plugin.rs @@ -1,27 +1,25 @@ -#![cfg_attr( - feature = "portable-atomic", - expect( - clippy::redundant_closure, - reason = "portable_atomic_util::Arc has subtly different implicit behavior" - ) -)] - -use crate::{App, Last, Plugin}; +use crate::{App, Plugin}; use alloc::string::ToString; -use bevy_ecs::prelude::*; +use bevy_platform::sync::Arc; use bevy_tasks::{AsyncComputeTaskPool, ComputeTaskPool, IoTaskPool, TaskPoolBuilder}; -use core::{fmt::Debug, marker::PhantomData}; +use core::fmt::Debug; use log::trace; -#[cfg(feature = "portable-atomic")] -use portable_atomic_util::Arc; - -#[cfg(not(feature = "portable-atomic"))] -use alloc::sync::Arc; - -#[cfg(not(target_arch = "wasm32"))] -use bevy_tasks::tick_global_task_pools_on_main_thread; +cfg_if::cfg_if! { + if #[cfg(not(all(target_arch = "wasm32", feature = "web")))] { + use {crate::Last, bevy_tasks::tick_global_task_pools_on_main_thread}; + use bevy_ecs::system::NonSendMarker; + + /// A system used to check and advanced our task pools. + /// + /// Calls [`tick_global_task_pools_on_main_thread`], + /// and uses [`NonSendMarker`] to ensure that this system runs on the main thread + fn tick_global_task_pools(_main_thread_marker: NonSendMarker) { + tick_global_task_pools_on_main_thread(); + } + } +} /// Setup of default task pools: [`AsyncComputeTaskPool`], [`ComputeTaskPool`], [`IoTaskPool`]. #[derive(Default)] @@ -35,21 +33,10 @@ impl Plugin for TaskPoolPlugin { // Setup the default bevy task pools self.task_pool_options.create_default_pools(); - #[cfg(not(target_arch = "wasm32"))] + #[cfg(not(all(target_arch = "wasm32", feature = "web")))] _app.add_systems(Last, tick_global_task_pools); } } -/// A dummy type that is [`!Send`](Send), to force systems to run on the main thread. -pub struct NonSendMarker(PhantomData<*mut ()>); - -/// A system used to check and advanced our task pools. -/// -/// Calls [`tick_global_task_pools_on_main_thread`], -/// and uses [`NonSendMarker`] to ensure that this system runs on the main thread -#[cfg(not(target_arch = "wasm32"))] -fn tick_global_task_pools(_main_thread_marker: Option>) { - tick_global_task_pools_on_main_thread(); -} /// Defines a simple way to determine how many threads to use given the number of remaining cores /// and number of total cores @@ -187,19 +174,21 @@ impl TaskPoolOptions { remaining_threads = remaining_threads.saturating_sub(io_threads); IoTaskPool::get_or_init(|| { - let mut builder = TaskPoolBuilder::default() + let builder = TaskPoolBuilder::default() .num_threads(io_threads) .thread_name("IO Task Pool".to_string()); - #[cfg(not(target_arch = "wasm32"))] - { + #[cfg(not(all(target_arch = "wasm32", feature = "web")))] + let builder = { + let mut builder = builder; if let Some(f) = self.io.on_thread_spawn.clone() { builder = builder.on_thread_spawn(move || f()); } if let Some(f) = self.io.on_thread_destroy.clone() { builder = builder.on_thread_destroy(move || f()); } - } + builder + }; builder.build() }); @@ -215,19 +204,21 @@ impl TaskPoolOptions { remaining_threads = remaining_threads.saturating_sub(async_compute_threads); AsyncComputeTaskPool::get_or_init(|| { - let mut builder = TaskPoolBuilder::default() + let builder = TaskPoolBuilder::default() .num_threads(async_compute_threads) .thread_name("Async Compute Task Pool".to_string()); - #[cfg(not(target_arch = "wasm32"))] - { + #[cfg(not(all(target_arch = "wasm32", feature = "web")))] + let builder = { + let mut builder = builder; if let Some(f) = self.async_compute.on_thread_spawn.clone() { builder = builder.on_thread_spawn(move || f()); } if let Some(f) = self.async_compute.on_thread_destroy.clone() { builder = builder.on_thread_destroy(move || f()); } - } + builder + }; builder.build() }); @@ -243,19 +234,21 @@ impl TaskPoolOptions { trace!("Compute Threads: {}", compute_threads); ComputeTaskPool::get_or_init(|| { - let mut builder = TaskPoolBuilder::default() + let builder = TaskPoolBuilder::default() .num_threads(compute_threads) .thread_name("Compute Task Pool".to_string()); - #[cfg(not(target_arch = "wasm32"))] - { + #[cfg(not(all(target_arch = "wasm32", feature = "web")))] + let builder = { + let mut builder = builder; if let Some(f) = self.compute.on_thread_spawn.clone() { builder = builder.on_thread_spawn(move || f()); } if let Some(f) = self.compute.on_thread_destroy.clone() { builder = builder.on_thread_destroy(move || f()); } - } + builder + }; builder.build() }); diff --git a/crates/bevy_app/src/terminal_ctrl_c_handler.rs b/crates/bevy_app/src/terminal_ctrl_c_handler.rs index 0eb34ccdbe98e..48af3c09f18a5 100644 --- a/crates/bevy_app/src/terminal_ctrl_c_handler.rs +++ b/crates/bevy_app/src/terminal_ctrl_c_handler.rs @@ -50,7 +50,7 @@ impl TerminalCtrlCHandlerPlugin { /// Sends a [`AppExit`] event when the user presses `Ctrl+C` on the terminal. pub fn exit_on_flag(mut events: EventWriter) { if SHOULD_EXIT.load(Ordering::Relaxed) { - events.send(AppExit::from_code(130)); + events.write(AppExit::from_code(130)); } } } diff --git a/crates/bevy_asset/Cargo.toml b/crates/bevy_asset/Cargo.toml index de7094a63663e..2c7c918300536 100644 --- a/crates/bevy_asset/Cargo.toml +++ b/crates/bevy_asset/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_asset" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Provides asset functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -19,23 +19,26 @@ watch = [] trace = [] [dependencies] -bevy_app = { path = "../bevy_app", version = "0.15.0-dev" } -bevy_asset_macros = { path = "macros", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev", features = [ +bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } +bevy_asset_macros = { path = "macros", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", features = [ "uuid", ] } -bevy_tasks = { path = "../bevy_tasks", version = "0.15.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } +bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "std", +] } stackfuture = "0.3" atomicow = "1.0" -async-broadcast = "0.5" +async-broadcast = "0.7.2" async-fs = "2.0" async-lock = "3.0" bitflags = { version = "2.3", features = ["serde"] } crossbeam-channel = "0.5" -downcast-rs = "1.2" +downcast-rs = { version = "2", default-features = false, features = ["std"] } disqualified = "1.0" either = "1.13" futures-io = "0.3" @@ -46,12 +49,14 @@ ron = "0.8" serde = { version = "1", features = ["derive"] } thiserror = { version = "2", default-features = false } derive_more = { version = "1", default-features = false, features = ["from"] } -uuid = { version = "1.0", features = ["v4"] } +uuid = { version = "1.13.1", features = ["v4"] } +tracing = { version = "0.1", default-features = false, features = ["std"] } [target.'cfg(target_os = "android")'.dependencies] -bevy_window = { path = "../bevy_window", version = "0.15.0-dev" } +bevy_window = { path = "../bevy_window", version = "0.16.0-dev" } [target.'cfg(target_arch = "wasm32")'.dependencies] +# TODO: Assuming all wasm builds are for the browser. Require `no_std` support to break assumption. wasm-bindgen = { version = "0.2" } web-sys = { version = "0.3", features = [ "Window", @@ -60,12 +65,19 @@ web-sys = { version = "0.3", features = [ ] } wasm-bindgen-futures = "0.4" js-sys = "0.3" +uuid = { version = "1.13.1", default-features = false, features = ["js"] } +bevy_app = { path = "../bevy_app", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } +bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -notify-debouncer-full = { version = "0.4.0", optional = true } - -[dev-dependencies] -bevy_log = { path = "../bevy_log", version = "0.15.0-dev" } +notify-debouncer-full = { version = "0.5.0", optional = true } [lints] workspace = true diff --git a/crates/bevy_asset/LICENSE-APACHE b/crates/bevy_asset/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_asset/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_asset/LICENSE-MIT b/crates/bevy_asset/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_asset/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_asset/macros/Cargo.toml b/crates/bevy_asset/macros/Cargo.toml index a210d535299f6..43562ae8063fd 100644 --- a/crates/bevy_asset/macros/Cargo.toml +++ b/crates/bevy_asset/macros/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_asset_macros" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Derive implementations for bevy_asset" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -12,7 +12,7 @@ keywords = ["bevy"] proc-macro = true [dependencies] -bevy_macro_utils = { path = "../../bevy_macro_utils", version = "0.15.0-dev" } +bevy_macro_utils = { path = "../../bevy_macro_utils", version = "0.16.0-dev" } syn = "2.0" proc-macro2 = "1.0" diff --git a/crates/bevy_asset/macros/LICENSE-APACHE b/crates/bevy_asset/macros/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_asset/macros/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_asset/macros/LICENSE-MIT b/crates/bevy_asset/macros/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_asset/macros/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_asset/src/asset_changed.rs b/crates/bevy_asset/src/asset_changed.rs index b7902587d420b..40723d7b08d45 100644 --- a/crates/bevy_asset/src/asset_changed.rs +++ b/crates/bevy_asset/src/asset_changed.rs @@ -9,14 +9,14 @@ use bevy_ecs::{ archetype::Archetype, component::{ComponentId, Tick}, prelude::{Entity, Resource, World}, - query::{FilteredAccess, QueryFilter, QueryItem, ReadFetch, WorldQuery}, + query::{FilteredAccess, QueryData, QueryFilter, ReadFetch, WorldQuery}, storage::{Table, TableRow}, world::unsafe_world_cell::UnsafeWorldCell, }; -use bevy_utils::tracing::error; -use bevy_utils::HashMap; +use bevy_platform::collections::HashMap; use core::marker::PhantomData; use disqualified::ShortName; +use tracing::error; /// A resource that stores the last tick an asset was changed. This is used by /// the [`AssetChanged`] filter to determine if an asset has changed since the last time @@ -86,7 +86,7 @@ impl<'w, A: AsAssetId> AssetChangeCheck<'w, A> { } } -/// Filter that selects entities with a `A` for an asset that changed +/// Filter that selects entities with an `A` for an asset that changed /// after the system last ran, where `A` is a component that implements /// [`AsAssetId`]. /// @@ -114,8 +114,8 @@ impl<'w, A: AsAssetId> AssetChangeCheck<'w, A> { /// # Performance /// /// When at least one `A` is updated, this will -/// read a hashmap once per entity with a `A` component. The -/// runtime of the query is proportional to how many entities with a `A` +/// read a hashmap once per entity with an `A` component. The +/// runtime of the query is proportional to how many entities with an `A` /// it matches. /// /// If no `A` asset updated since the last time the system ran, then no lookups occur. @@ -148,16 +148,13 @@ pub struct AssetChangedState { _asset: PhantomData, } -#[allow(unsafe_code)] +#[expect(unsafe_code, reason = "WorldQuery is an unsafe trait.")] /// SAFETY: `ROQueryFetch` is the same as `QueryFetch` unsafe impl WorldQuery for AssetChanged { - type Item<'w> = (); type Fetch<'w> = AssetChangedFetch<'w, A>; type State = AssetChangedState; - fn shrink<'wlong: 'wshort, 'wshort>(_: QueryItem<'wlong, Self>) -> QueryItem<'wshort, Self> {} - fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { fetch } @@ -228,16 +225,9 @@ unsafe impl WorldQuery for AssetChanged { } } - unsafe fn fetch<'w>(_: &mut Self::Fetch<'w>, _: Entity, _: TableRow) -> Self::Item<'w> {} - #[inline] fn update_component_access(state: &Self::State, access: &mut FilteredAccess) { <&A>::update_component_access(&state.asset_id, access); - assert!( - !access.access().has_resource_write(state.resource_id), - "AssetChanged<{ty}> requires read-only access to AssetChanges<{ty}>", - ty = ShortName::of::() - ); access.add_resource_read(state.resource_id); } @@ -269,7 +259,7 @@ unsafe impl WorldQuery for AssetChanged { } } -#[allow(unsafe_code)] +#[expect(unsafe_code, reason = "QueryFilter is an unsafe trait.")] /// SAFETY: read-only access unsafe impl QueryFilter for AssetChanged { const IS_ARCHETYPAL: bool = false; @@ -280,7 +270,7 @@ unsafe impl QueryFilter for AssetChanged { entity: Entity, table_row: TableRow, ) -> bool { - fetch.inner.as_mut().map_or(false, |inner| { + fetch.inner.as_mut().is_some_and(|inner| { // SAFETY: We delegate to the inner `fetch` for `A` unsafe { let handle = <&A>::fetch(inner, entity, table_row); @@ -291,17 +281,21 @@ unsafe impl QueryFilter for AssetChanged { } #[cfg(test)] +#[expect(clippy::print_stdout, reason = "Allowed in tests.")] mod tests { - use crate::{self as bevy_asset, AssetEvents, AssetPlugin, Handle}; + use crate::{AssetEvents, AssetPlugin, Handle}; + use alloc::{vec, vec::Vec}; use core::num::NonZero; + use std::println; use crate::{AssetApp, Assets}; - use bevy_app::{App, AppExit, Last, Startup, TaskPoolPlugin, Update}; - use bevy_ecs::schedule::IntoSystemConfigs; + use bevy_app::{App, AppExit, PostUpdate, Startup, TaskPoolPlugin, Update}; + use bevy_ecs::schedule::IntoScheduleConfigs; use bevy_ecs::{ component::Component, event::EventWriter, - system::{Commands, IntoSystem, Local, Query, Res, ResMut, Resource}, + resource::Resource, + system::{Commands, IntoSystem, Local, Query, Res, ResMut}, }; use bevy_reflect::TypePath; @@ -337,7 +331,7 @@ mod tests { _query: Query<&mut MyComponent, AssetChanged>, mut exit: EventWriter, ) { - exit.send(AppExit::Error(NonZero::::MIN)); + exit.write(AppExit::Error(NonZero::::MIN)); } run_app(compatible_filter); } @@ -412,7 +406,7 @@ mod tests { .init_asset::() .insert_resource(Counter(vec![0, 0, 0, 0])) .add_systems(Update, add_some) - .add_systems(Last, count_update.after(AssetEvents)); + .add_systems(PostUpdate, count_update.after(AssetEvents)); // First run of the app, `add_systems(Startup…)` runs. app.update(); // run_count == 0 @@ -447,7 +441,7 @@ mod tests { }, ) .add_systems(Update, update_some) - .add_systems(Last, count_update.after(AssetEvents)); + .add_systems(PostUpdate, count_update.after(AssetEvents)); // First run of the app, `add_systems(Startup…)` runs. app.update(); // run_count == 0 diff --git a/crates/bevy_asset/src/assets.rs b/crates/bevy_asset/src/assets.rs index 6e3bf1b5cf51f..9fa8eb4381485 100644 --- a/crates/bevy_asset/src/assets.rs +++ b/crates/bevy_asset/src/assets.rs @@ -1,15 +1,13 @@ use crate::asset_changed::AssetChanges; -use crate::{ - self as bevy_asset, Asset, AssetEvent, AssetHandleProvider, AssetId, AssetServer, Handle, - UntypedHandle, -}; -use alloc::sync::Arc; +use crate::{Asset, AssetEvent, AssetHandleProvider, AssetId, AssetServer, Handle, UntypedHandle}; +use alloc::{sync::Arc, vec::Vec}; use bevy_ecs::{ prelude::EventWriter, - system::{Res, ResMut, Resource, SystemChangeTick}, + resource::Resource, + system::{Res, ResMut, SystemChangeTick}, }; +use bevy_platform::collections::HashMap; use bevy_reflect::{Reflect, TypePath}; -use bevy_utils::HashMap; use core::{any::TypeId, iter::Enumerate, marker::PhantomData, sync::atomic::AtomicU32}; use crossbeam_channel::{Receiver, Sender}; use serde::{Deserialize, Serialize}; @@ -97,6 +95,7 @@ impl AssetIndexAllocator { /// [`AssetPath`]: crate::AssetPath #[derive(Asset, TypePath)] pub struct LoadedUntypedAsset { + /// The handle to the loaded asset. #[dependency] pub handle: UntypedHandle, } @@ -282,6 +281,8 @@ impl DenseAssetStorage { /// at compile time. /// /// This tracks (and queues) [`AssetEvent`] events whenever changes to the collection occur. +/// To check whether the asset used by a given component has changed (due to a change in the handle or the underlying asset) +/// use the [`AssetChanged`](crate::asset_changed::AssetChanged) query filter. #[derive(Resource)] pub struct Assets { dense_storage: DenseAssetStorage, @@ -461,16 +462,22 @@ impl Assets { /// Removes the [`Asset`] with the given `id`. pub(crate) fn remove_dropped(&mut self, id: AssetId) { match self.duplicate_handles.get_mut(&id) { - None | Some(0) => {} + None => {} + Some(0) => { + self.duplicate_handles.remove(&id); + } Some(value) => { *value -= 1; return; } } + let existed = match id { AssetId::Index { index, .. } => self.dense_storage.remove_dropped(index).is_some(), AssetId::Uuid { uuid } => self.hash_map.remove(&uuid).is_some(), }; + + self.queued_events.push(AssetEvent::Unused { id }); if existed { self.queued_events.push(AssetEvent::Removed { id }); } @@ -552,7 +559,6 @@ impl Assets { } } - assets.queued_events.push(AssetEvent::Unused { id }); assets.remove_dropped(id); } } @@ -578,7 +584,7 @@ impl Assets { }; } } - events.send_batch(assets.queued_events.drain(..)); + events.write_batch(assets.queued_events.drain(..)); } /// A run condition for [`asset_events`]. The system will not run if there are no events to @@ -594,7 +600,7 @@ impl Assets { pub struct AssetsMutIterator<'a, A: Asset> { queued_events: &'a mut Vec>, dense_storage: Enumerate>>, - hash_map: bevy_utils::hashbrown::hash_map::IterMut<'a, Uuid, A>, + hash_map: bevy_platform::collections::hash_map::IterMut<'a, Uuid, A>, } impl<'a, A: Asset> Iterator for AssetsMutIterator<'a, A> { @@ -631,6 +637,7 @@ impl<'a, A: Asset> Iterator for AssetsMutIterator<'a, A> { } } +/// An error returned when an [`AssetIndex`] has an invalid generation. #[derive(Error, Debug)] #[error("AssetIndex {index:?} has an invalid generation. The current generation is: '{current_generation}'.")] pub struct InvalidGenerationError { diff --git a/crates/bevy_asset/src/direct_access_ext.rs b/crates/bevy_asset/src/direct_access_ext.rs index bfa7fa17b29c0..792d523a30063 100644 --- a/crates/bevy_asset/src/direct_access_ext.rs +++ b/crates/bevy_asset/src/direct_access_ext.rs @@ -5,6 +5,7 @@ use bevy_ecs::world::World; use crate::{meta::Settings, Asset, AssetPath, AssetServer, Assets, Handle}; +/// An extension trait for methods for working with assets directly from a [`World`]. pub trait DirectAssetAccessExt { /// Insert an asset similarly to [`Assets::add`]. fn add_asset(&mut self, asset: impl Into) -> Handle; diff --git a/crates/bevy_asset/src/event.rs b/crates/bevy_asset/src/event.rs index 832cc212d4b01..087cb44b5a138 100644 --- a/crates/bevy_asset/src/event.rs +++ b/crates/bevy_asset/src/event.rs @@ -8,6 +8,7 @@ use core::fmt::Debug; /// For an untyped equivalent, see [`UntypedAssetLoadFailedEvent`]. #[derive(Event, Clone, Debug)] pub struct AssetLoadFailedEvent { + /// The stable identifier of the asset that failed to load. pub id: AssetId, /// The asset path that was attempted. pub path: AssetPath<'static>, @@ -25,6 +26,7 @@ impl AssetLoadFailedEvent { /// An untyped version of [`AssetLoadFailedEvent`]. #[derive(Event, Clone, Debug)] pub struct UntypedAssetLoadFailedEvent { + /// The stable identifier of the asset that failed to load. pub id: UntypedAssetId, /// The asset path that was attempted. pub path: AssetPath<'static>, @@ -43,6 +45,7 @@ impl From<&AssetLoadFailedEvent> for UntypedAssetLoadFailedEvent { } /// Events that occur for a specific loaded [`Asset`], such as "value changed" events and "dependency" events. +#[expect(missing_docs, reason = "Documenting the id fields is unhelpful.")] #[derive(Event, Reflect)] pub enum AssetEvent { /// Emitted whenever an [`Asset`] is added. diff --git a/crates/bevy_asset/src/folder.rs b/crates/bevy_asset/src/folder.rs index e2c6b0ca2309c..c591c88688789 100644 --- a/crates/bevy_asset/src/folder.rs +++ b/crates/bevy_asset/src/folder.rs @@ -1,12 +1,16 @@ -use crate as bevy_asset; +use alloc::vec::Vec; + use crate::{Asset, UntypedHandle}; use bevy_reflect::TypePath; /// A "loaded folder" containing handles for all assets stored in a given [`AssetPath`]. /// +/// This is produced by [`AssetServer::load_folder`](crate::prelude::AssetServer::load_folder). +/// /// [`AssetPath`]: crate::AssetPath #[derive(Asset, TypePath)] pub struct LoadedFolder { + /// The handles of all assets stored in the folder. #[dependency] pub handles: Vec, } diff --git a/crates/bevy_asset/src/handle.rs b/crates/bevy_asset/src/handle.rs index 9c61ff0f88c7b..e6ad1d074a78b 100644 --- a/crates/bevy_asset/src/handle.rs +++ b/crates/bevy_asset/src/handle.rs @@ -113,16 +113,23 @@ impl core::fmt::Debug for StrongHandle { } } -/// A strong or weak handle to a specific [`Asset`]. If a [`Handle`] is [`Handle::Strong`], the [`Asset`] will be kept +/// A handle to a specific [`Asset`] of type `A`. Handles act as abstract "references" to +/// assets, whose data are stored in the [`Assets`](crate::prelude::Assets) resource, +/// avoiding the need to store multiple copies of the same data. +/// +/// If a [`Handle`] is [`Handle::Strong`], the [`Asset`] will be kept /// alive until the [`Handle`] is dropped. If a [`Handle`] is [`Handle::Weak`], it does not necessarily reference a live [`Asset`], /// nor will it keep assets alive. /// +/// Modifying a *handle* will change which existing asset is referenced, but modifying the *asset* +/// (by mutating the [`Assets`](crate::prelude::Assets) resource) will change the asset for all handles referencing it. +/// /// [`Handle`] can be cloned. If a [`Handle::Strong`] is cloned, the referenced [`Asset`] will not be freed until _all_ instances /// of the [`Handle`] are dropped. /// -/// [`Handle::Strong`] also provides access to useful [`Asset`] metadata, such as the [`AssetPath`] (if it exists). +/// [`Handle::Strong`], via [`StrongHandle`] also provides access to useful [`Asset`] metadata, such as the [`AssetPath`] (if it exists). #[derive(Reflect)] -#[reflect(Default, Debug, Hash, PartialEq)] +#[reflect(Default, Debug, Hash, PartialEq, Clone)] pub enum Handle { /// A "strong" reference to a live (or loading) [`Asset`]. If a [`Handle`] is [`Handle::Strong`], the [`Asset`] will be kept /// alive until the [`Handle`] is dropped. Strong handles also provide access to additional asset metadata. @@ -143,6 +150,10 @@ impl Clone for Handle { impl Handle { /// Create a new [`Handle::Weak`] with the given [`u128`] encoding of a [`Uuid`]. + #[deprecated( + since = "0.16.0", + note = "use the `weak_handle!` macro with a UUID string instead" + )] pub const fn weak_from_u128(value: u128) -> Self { Handle::Weak(AssetId::Uuid { uuid: Uuid::from_u128(value), @@ -283,7 +294,9 @@ impl From<&mut Handle> for UntypedAssetId { /// See [`Handle`] for more information. #[derive(Clone)] pub enum UntypedHandle { + /// A strong handle, which will keep the referenced [`Asset`] alive until all strong handles are dropped. Strong(Arc), + /// A weak handle, which does not keep the referenced [`Asset`] alive. Weak(UntypedAssetId), } @@ -501,6 +514,24 @@ impl TryFrom for Handle { } } +/// Creates a weak [`Handle`] from a string literal containing a UUID. +/// +/// # Examples +/// +/// ``` +/// # use bevy_asset::{Handle, weak_handle}; +/// # type Shader = (); +/// const SHADER: Handle = weak_handle!("1347c9b7-c46a-48e7-b7b8-023a354b7cac"); +/// ``` +#[macro_export] +macro_rules! weak_handle { + ($uuid:expr) => {{ + $crate::Handle::Weak($crate::AssetId::Uuid { + uuid: $crate::uuid::uuid!($uuid), + }) + }}; +} + /// Errors preventing the conversion of to/from an [`UntypedHandle`] and a [`Handle`]. #[derive(Error, Debug, PartialEq, Clone)] #[non_exhaustive] @@ -509,13 +540,19 @@ pub enum UntypedAssetConversionError { #[error( "This UntypedHandle is for {found:?} and cannot be converted into a Handle<{expected:?}>" )] - TypeIdMismatch { expected: TypeId, found: TypeId }, + TypeIdMismatch { + /// The expected [`TypeId`] of the [`Handle`] being converted to. + expected: TypeId, + /// The [`TypeId`] of the [`UntypedHandle`] being converted from. + found: TypeId, + }, } #[cfg(test)] mod tests { + use alloc::boxed::Box; + use bevy_platform::hash::FixedHasher; use bevy_reflect::PartialReflect; - use bevy_utils::FixedHasher; use core::hash::BuildHasher; use super::*; @@ -551,8 +588,11 @@ mod tests { } /// Typed and Untyped `Handles` should be orderable amongst each other and themselves - #[allow(clippy::cmp_owned)] #[test] + #[expect( + clippy::cmp_owned, + reason = "This lints on the assertion that a typed handle converted to an untyped handle maintains its ordering compared to an untyped handle. While the conversion would normally be useless, we need to ensure that converted handles maintain their ordering, making the conversion necessary here." + )] fn ordering() { assert!(UUID_1 < UUID_2); @@ -621,7 +661,7 @@ mod tests { assert_eq!(UntypedHandle::from(typed.clone()), untyped); } - /// `Reflect::clone_value` should increase the strong count of a strong handle + /// `PartialReflect::reflect_clone`/`PartialReflect::to_dynamic` should increase the strong count of a strong handle #[test] fn strong_handle_reflect_clone() { use crate::{AssetApp, AssetPlugin, Assets, VisitAssetDependencies}; @@ -652,7 +692,7 @@ mod tests { ); let reflected: &dyn Reflect = &handle; - let cloned_handle: Box = reflected.clone_value(); + let _cloned_handle: Box = reflected.reflect_clone().unwrap(); assert_eq!( Arc::strong_count(strong), @@ -660,10 +700,18 @@ mod tests { "Cloning the handle with reflect should increase the strong count to 2" ); + let dynamic_handle: Box = reflected.to_dynamic(); + + assert_eq!( + Arc::strong_count(strong), + 3, + "Converting the handle to a dynamic should increase the strong count to 3" + ); + let from_reflect_handle: Handle = - FromReflect::from_reflect(&*cloned_handle).unwrap(); + FromReflect::from_reflect(&*dynamic_handle).unwrap(); - assert_eq!(Arc::strong_count(strong), 3, "Converting the reflected value back to a handle should increase the strong count to 3"); + assert_eq!(Arc::strong_count(strong), 4, "Converting the reflected value back to a handle should increase the strong count to 4"); assert!( from_reflect_handle.is_strong(), "The cloned handle should still be strong" diff --git a/crates/bevy_asset/src/id.rs b/crates/bevy_asset/src/id.rs index 07a6d3db1209e..f9aa0d1b9632b 100644 --- a/crates/bevy_asset/src/id.rs +++ b/crates/bevy_asset/src/id.rs @@ -1,5 +1,5 @@ use crate::{Asset, AssetIndex}; -use bevy_reflect::Reflect; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use serde::{Deserialize, Serialize}; use uuid::Uuid; @@ -19,6 +19,7 @@ use thiserror::Error; /// /// For an "untyped" / "generic-less" id, see [`UntypedAssetId`]. #[derive(Reflect, Serialize, Deserialize, From)] +#[reflect(Clone, Default, Debug, PartialEq, Hash)] pub enum AssetId { /// A small / efficient runtime identifier that can be used to efficiently look up an asset stored in [`Assets`]. This is /// the "default" identifier used for assets. The alternative(s) (ex: [`AssetId::Uuid`]) will only be used if assets are @@ -26,15 +27,20 @@ pub enum AssetId { /// /// [`Assets`]: crate::Assets Index { + /// The unstable, opaque index of the asset. index: AssetIndex, - #[reflect(ignore)] + /// A marker to store the type information of the asset. + #[reflect(ignore, clone)] marker: PhantomData A>, }, /// A stable-across-runs / const asset identifier. This will only be used if an asset is explicitly registered in [`Assets`] /// with one. /// /// [`Assets`]: crate::Assets - Uuid { uuid: Uuid }, + Uuid { + /// The UUID provided during asset registration. + uuid: Uuid, + }, } impl AssetId { @@ -165,12 +171,22 @@ pub enum UntypedAssetId { /// explicitly registered that way. /// /// [`Assets`]: crate::Assets - Index { type_id: TypeId, index: AssetIndex }, + Index { + /// An identifier that records the underlying asset type. + type_id: TypeId, + /// The unstable, opaque index of the asset. + index: AssetIndex, + }, /// A stable-across-runs / const asset identifier. This will only be used if an asset is explicitly registered in [`Assets`] /// with one. /// /// [`Assets`]: crate::Assets - Uuid { type_id: TypeId, uuid: Uuid }, + Uuid { + /// An identifier that records the underlying asset type. + type_id: TypeId, + /// The UUID provided during asset registration. + uuid: Uuid, + }, } impl UntypedAssetId { @@ -404,7 +420,12 @@ impl TryFrom for AssetId { pub enum UntypedAssetIdConversionError { /// Caused when trying to convert an [`UntypedAssetId`] into an [`AssetId`] of the wrong type. #[error("This UntypedAssetId is for {found:?} and cannot be converted into an AssetId<{expected:?}>")] - TypeIdMismatch { expected: TypeId, found: TypeId }, + TypeIdMismatch { + /// The [`TypeId`] of the asset that we are trying to convert to. + expected: TypeId, + /// The [`TypeId`] of the asset that we are trying to convert from. + found: TypeId, + }, } #[cfg(test)] @@ -420,7 +441,7 @@ mod tests { fn hash(data: &T) -> u64 { use core::hash::BuildHasher; - bevy_utils::FixedHasher.hash_one(data) + bevy_platform::hash::FixedHasher.hash_one(data) } /// Typed and Untyped `AssetIds` should be equivalent to each other and themselves diff --git a/crates/bevy_asset/src/io/android.rs b/crates/bevy_asset/src/io/android.rs index b8b78a9681637..67ca8e339a22c 100644 --- a/crates/bevy_asset/src/io/android.rs +++ b/crates/bevy_asset/src/io/android.rs @@ -1,7 +1,7 @@ use crate::io::{get_meta_path, AssetReader, AssetReaderError, PathStream, Reader, VecReader}; -use bevy_utils::tracing::error; +use alloc::{borrow::ToOwned, boxed::Box, ffi::CString, vec::Vec}; use futures_lite::stream; -use std::{ffi::CString, path::Path}; +use std::path::Path; /// [`AssetReader`] implementation for Android devices, built on top of Android's [`AssetManager`]. /// @@ -72,10 +72,7 @@ impl AssetReader for AndroidAssetReader { Ok(read_dir) } - async fn is_directory<'a>( - &'a self, - path: &'a Path, - ) -> std::result::Result { + async fn is_directory<'a>(&'a self, path: &'a Path) -> Result { let asset_manager = bevy_window::ANDROID_APP .get() .expect("Bevy must be setup with the #[bevy_main] macro on Android") diff --git a/crates/bevy_asset/src/io/embedded/embedded_watcher.rs b/crates/bevy_asset/src/io/embedded/embedded_watcher.rs index cc97eb3cda83c..f7fb56be747b4 100644 --- a/crates/bevy_asset/src/io/embedded/embedded_watcher.rs +++ b/crates/bevy_asset/src/io/embedded/embedded_watcher.rs @@ -3,8 +3,9 @@ use crate::io::{ memory::Dir, AssetSourceEvent, AssetWatcher, }; -use alloc::sync::Arc; -use bevy_utils::{tracing::warn, Duration, HashMap}; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use bevy_platform::collections::HashMap; +use core::time::Duration; use notify_debouncer_full::{notify::RecommendedWatcher, Debouncer, RecommendedCache}; use parking_lot::RwLock; use std::{ @@ -12,6 +13,7 @@ use std::{ io::{BufReader, Read}, path::{Path, PathBuf}, }; +use tracing::warn; /// A watcher for assets stored in the `embedded` asset source. Embedded assets are assets whose /// bytes have been embedded into the Rust binary using the [`embedded_asset`](crate::embedded_asset) macro. @@ -22,6 +24,7 @@ pub struct EmbeddedWatcher { } impl EmbeddedWatcher { + /// Creates a new `EmbeddedWatcher` that watches for changes to the embedded assets in the given `dir`. pub fn new( dir: Dir, root_paths: Arc, PathBuf>>>, diff --git a/crates/bevy_asset/src/io/embedded/mod.rs b/crates/bevy_asset/src/io/embedded/mod.rs index af8176d5a0d79..13610531e299a 100644 --- a/crates/bevy_asset/src/io/embedded/mod.rs +++ b/crates/bevy_asset/src/io/embedded/mod.rs @@ -8,9 +8,15 @@ use crate::io::{ memory::{Dir, MemoryAssetReader, Value}, AssetSource, AssetSourceBuilders, }; -use bevy_ecs::system::Resource; +use alloc::boxed::Box; +use bevy_ecs::resource::Resource; use std::path::{Path, PathBuf}; +#[cfg(feature = "embedded_watcher")] +use alloc::borrow::ToOwned; + +/// The name of the `embedded` [`AssetSource`], +/// as stored in the [`AssetSourceBuilders`] resource. pub const EMBEDDED: &str = "embedded"; /// A [`Resource`] that manages "rust source files" in a virtual in memory [`Dir`], which is intended @@ -22,14 +28,16 @@ pub const EMBEDDED: &str = "embedded"; pub struct EmbeddedAssetRegistry { dir: Dir, #[cfg(feature = "embedded_watcher")] - root_paths: alloc::sync::Arc, PathBuf>>>, + root_paths: alloc::sync::Arc< + parking_lot::RwLock, PathBuf>>, + >, } impl EmbeddedAssetRegistry { /// Inserts a new asset. `full_path` is the full path (as [`file`] would return for that file, if it was capable of /// running in a non-rust file). `asset_path` is the path that will be used to identify the asset in the `embedded` /// [`AssetSource`]. `value` is the bytes that will be returned for the asset. This can be _either_ a `&'static [u8]` - /// or a [`Vec`]. + /// or a [`Vec`](alloc::vec::Vec). #[cfg_attr( not(feature = "embedded_watcher"), expect( @@ -48,7 +56,7 @@ impl EmbeddedAssetRegistry { /// Inserts new asset metadata. `full_path` is the full path (as [`file`] would return for that file, if it was capable of /// running in a non-rust file). `asset_path` is the path that will be used to identify the asset in the `embedded` /// [`AssetSource`]. `value` is the bytes that will be returned for the asset. This can be _either_ a `&'static [u8]` - /// or a [`Vec`]. + /// or a [`Vec`](alloc::vec::Vec). #[cfg_attr( not(feature = "embedded_watcher"), expect( @@ -71,6 +79,7 @@ impl EmbeddedAssetRegistry { self.dir.remove_asset(full_path) } + /// Registers the [`EMBEDDED`] [`AssetSource`] with the given [`AssetSourceBuilders`]. pub fn register_source(&self, sources: &mut AssetSourceBuilders) { let dir = self.dir.clone(); let processed_dir = self.dir.clone(); diff --git a/crates/bevy_asset/src/io/file/file_asset.rs b/crates/bevy_asset/src/io/file/file_asset.rs index a7af0197e24a9..f65680217bc2a 100644 --- a/crates/bevy_asset/src/io/file/file_asset.rs +++ b/crates/bevy_asset/src/io/file/file_asset.rs @@ -6,6 +6,7 @@ use async_fs::{read_dir, File}; use futures_io::AsyncSeek; use futures_lite::StreamExt; +use alloc::{borrow::ToOwned, boxed::Box}; use core::{pin::Pin, task, task::Poll}; use std::path::Path; diff --git a/crates/bevy_asset/src/io/file/file_watcher.rs b/crates/bevy_asset/src/io/file/file_watcher.rs index bb4cf109c32c1..e70cf1665f274 100644 --- a/crates/bevy_asset/src/io/file/file_watcher.rs +++ b/crates/bevy_asset/src/io/file/file_watcher.rs @@ -2,7 +2,8 @@ use crate::{ io::{AssetSourceEvent, AssetWatcher}, path::normalize_path, }; -use bevy_utils::{tracing::error, Duration}; +use alloc::borrow::ToOwned; +use core::time::Duration; use crossbeam_channel::Sender; use notify_debouncer_full::{ new_debouncer, @@ -14,9 +15,12 @@ use notify_debouncer_full::{ DebounceEventResult, Debouncer, RecommendedCache, }; use std::path::{Path, PathBuf}; +use tracing::error; /// An [`AssetWatcher`] that watches the filesystem for changes to asset files in a given root folder and emits [`AssetSourceEvent`] -/// for each relevant change. This uses [`notify_debouncer_full`] to retrieve "debounced" filesystem events. +/// for each relevant change. +/// +/// This uses [`notify_debouncer_full`] to retrieve "debounced" filesystem events. /// "Debouncing" defines a time window to hold on to events and then removes duplicate events that fall into this window. /// This introduces a small delay in processing events, but it helps reduce event duplicates. A small delay is also necessary /// on some systems to avoid processing a change event before it has actually been applied. @@ -25,14 +29,15 @@ pub struct FileWatcher { } impl FileWatcher { + /// Creates a new [`FileWatcher`] that watches for changes to the asset files in the given `path`. pub fn new( - root: PathBuf, + path: PathBuf, sender: Sender, debounce_wait_time: Duration, ) -> Result { - let root = normalize_path(super::get_base_path().join(root).as_path()); + let root = normalize_path(&path).canonicalize()?; let watcher = new_asset_event_debouncer( - root.clone(), + path.clone(), debounce_wait_time, FileEventHandler { root, @@ -49,15 +54,12 @@ impl AssetWatcher for FileWatcher {} pub(crate) fn get_asset_path(root: &Path, absolute_path: &Path) -> (PathBuf, bool) { let relative_path = absolute_path.strip_prefix(root).unwrap_or_else(|_| { panic!( - "FileWatcher::get_asset_path() failed to strip prefix from absolute path: absolute_path={:?}, root={:?}", - absolute_path, - root + "FileWatcher::get_asset_path() failed to strip prefix from absolute path: absolute_path={}, root={}", + absolute_path.display(), + root.display() ) }); - let is_meta = relative_path - .extension() - .map(|e| e == "meta") - .unwrap_or(false); + let is_meta = relative_path.extension().is_some_and(|e| e == "meta"); let asset_path = if is_meta { relative_path.with_extension("") } else { @@ -260,7 +262,8 @@ impl FilesystemEventHandler for FileEventHandler { self.last_event = None; } fn get_path(&self, absolute_path: &Path) -> Option<(PathBuf, bool)> { - Some(get_asset_path(&self.root, absolute_path)) + let absolute_path = absolute_path.canonicalize().ok()?; + Some(get_asset_path(&self.root, &absolute_path)) } fn handle(&mut self, _absolute_paths: &[PathBuf], event: AssetSourceEvent) { diff --git a/crates/bevy_asset/src/io/file/mod.rs b/crates/bevy_asset/src/io/file/mod.rs index 387924001f5fd..96c43072e8f47 100644 --- a/crates/bevy_asset/src/io/file/mod.rs +++ b/crates/bevy_asset/src/io/file/mod.rs @@ -6,10 +6,11 @@ mod file_asset; #[cfg(not(feature = "multi_threaded"))] mod sync_file_asset; -use bevy_utils::tracing::{debug, error}; #[cfg(feature = "file_watcher")] pub use file_watcher::*; +use tracing::{debug, error}; +use alloc::borrow::ToOwned; use std::{ env, path::{Path, PathBuf}, @@ -64,22 +65,22 @@ impl FileAssetReader { } } +/// A writer for the local filesystem. pub struct FileAssetWriter { root_path: PathBuf, } impl FileAssetWriter { - /// Creates a new `FileAssetIo` at a path relative to the executable's directory, optionally + /// Creates a new [`FileAssetWriter`] at a path relative to the executable's directory, optionally /// watching for changes. - /// - /// See `get_base_path` below. pub fn new + core::fmt::Debug>(path: P, create_root: bool) -> Self { let root_path = get_base_path().join(path.as_ref()); if create_root { if let Err(e) = std::fs::create_dir_all(&root_path) { error!( - "Failed to create root directory {:?} for file asset writer: {:?}", - root_path, e + "Failed to create root directory {} for file asset writer: {}", + root_path.display(), + e ); } } diff --git a/crates/bevy_asset/src/io/file/sync_file_asset.rs b/crates/bevy_asset/src/io/file/sync_file_asset.rs index cadea49492900..7533256204a8d 100644 --- a/crates/bevy_asset/src/io/file/sync_file_asset.rs +++ b/crates/bevy_asset/src/io/file/sync_file_asset.rs @@ -6,6 +6,7 @@ use crate::io::{ PathStream, Reader, Writer, }; +use alloc::{borrow::ToOwned, boxed::Box, vec::Vec}; use core::{pin::Pin, task::Poll}; use std::{ fs::{read_dir, File}, diff --git a/crates/bevy_asset/src/io/gated.rs b/crates/bevy_asset/src/io/gated.rs index 388145a4686b0..fa4f0f0d3fd30 100644 --- a/crates/bevy_asset/src/io/gated.rs +++ b/crates/bevy_asset/src/io/gated.rs @@ -1,6 +1,6 @@ use crate::io::{AssetReader, AssetReaderError, PathStream, Reader}; -use alloc::sync::Arc; -use bevy_utils::HashMap; +use alloc::{boxed::Box, sync::Arc}; +use bevy_platform::collections::HashMap; use crossbeam_channel::{Receiver, Sender}; use parking_lot::RwLock; use std::path::Path; diff --git a/crates/bevy_asset/src/io/memory.rs b/crates/bevy_asset/src/io/memory.rs index 2fa2579c581a9..4c56057ff9e3c 100644 --- a/crates/bevy_asset/src/io/memory.rs +++ b/crates/bevy_asset/src/io/memory.rs @@ -1,6 +1,6 @@ use crate::io::{AssetReader, AssetReaderError, PathStream, Reader}; -use alloc::sync::Arc; -use bevy_utils::HashMap; +use alloc::{borrow::ToOwned, boxed::Box, sync::Arc, vec::Vec}; +use bevy_platform::collections::HashMap; use core::{pin::Pin, task::Poll}; use futures_io::AsyncRead; use futures_lite::{ready, Stream}; @@ -60,8 +60,7 @@ impl Dir { dir = self.get_or_insert_dir(parent); } let key: Box = path.file_name().unwrap().to_string_lossy().into(); - let data = dir.0.write().assets.remove(&key); - data + dir.0.write().assets.remove(&key) } pub fn insert_meta(&self, path: &Path, value: impl Into) { diff --git a/crates/bevy_asset/src/io/mod.rs b/crates/bevy_asset/src/io/mod.rs index 0c4c0b1f00356..aa7256bbd927b 100644 --- a/crates/bevy_asset/src/io/mod.rs +++ b/crates/bevy_asset/src/io/mod.rs @@ -21,8 +21,8 @@ mod source; pub use futures_lite::AsyncWriteExt; pub use source::*; -use alloc::sync::Arc; -use bevy_utils::{BoxedFuture, ConditionalSendFuture}; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use bevy_tasks::{BoxedFuture, ConditionalSendFuture}; use core::future::Future; use core::{ mem::size_of, diff --git a/crates/bevy_asset/src/io/processor_gated.rs b/crates/bevy_asset/src/io/processor_gated.rs index 2179379070ce1..da439f56f5e18 100644 --- a/crates/bevy_asset/src/io/processor_gated.rs +++ b/crates/bevy_asset/src/io/processor_gated.rs @@ -3,12 +3,12 @@ use crate::{ processor::{AssetProcessorData, ProcessStatus}, AssetPath, }; -use alloc::sync::Arc; +use alloc::{borrow::ToOwned, boxed::Box, sync::Arc, vec::Vec}; use async_lock::RwLockReadGuardArc; -use bevy_utils::tracing::trace; use core::{pin::Pin, task::Poll}; use futures_io::AsyncRead; use std::path::Path; +use tracing::trace; use super::{AsyncSeekForward, ErasedAssetReader}; diff --git a/crates/bevy_asset/src/io/source.rs b/crates/bevy_asset/src/io/source.rs index c0bab2037f8e3..4852a2a71fff2 100644 --- a/crates/bevy_asset/src/io/source.rs +++ b/crates/bevy_asset/src/io/source.rs @@ -2,15 +2,17 @@ use crate::{ io::{processor_gated::ProcessorGatedReader, AssetSourceEvent, AssetWatcher}, processor::AssetProcessorData, }; -use alloc::sync::Arc; -use atomicow::CowArc; -use bevy_ecs::system::Resource; -use bevy_utils::{ - tracing::{error, warn}, - Duration, HashMap, +use alloc::{ + boxed::Box, + string::{String, ToString}, + sync::Arc, }; -use core::{fmt::Display, hash::Hash}; +use atomicow::CowArc; +use bevy_ecs::resource::Resource; +use bevy_platform::collections::HashMap; +use core::{fmt::Display, hash::Hash, time::Duration}; use thiserror::Error; +use tracing::{error, warn}; use super::{ErasedAssetReader, ErasedAssetWriter}; @@ -130,8 +132,11 @@ impl<'a> PartialEq for AssetSourceId<'a> { /// and whether or not the source is processed. #[derive(Default)] pub struct AssetSourceBuilder { + /// The [`ErasedAssetReader`] to use on the unprocessed asset. pub reader: Option Box + Send + Sync>>, + /// The [`ErasedAssetWriter`] to use on the unprocessed asset. pub writer: Option Option> + Send + Sync>>, + /// The [`AssetWatcher`] to use for unprocessed assets, if any. pub watcher: Option< Box< dyn FnMut(crossbeam_channel::Sender) -> Option> @@ -139,9 +144,12 @@ pub struct AssetSourceBuilder { + Sync, >, >, + /// The [`ErasedAssetReader`] to use for processed assets. pub processed_reader: Option Box + Send + Sync>>, + /// The [`ErasedAssetWriter`] to use for processed assets. pub processed_writer: Option Option> + Send + Sync>>, + /// The [`AssetWatcher`] to use for processed assets, if any. pub processed_watcher: Option< Box< dyn FnMut(crossbeam_channel::Sender) -> Option> @@ -149,7 +157,9 @@ pub struct AssetSourceBuilder { + Sync, >, >, + /// The warning message to display when watching an unprocessed asset fails. pub watch_warning: Option<&'static str>, + /// The warning message to display when watching a processed asset fails. pub processed_watch_warning: Option<&'static str>, } @@ -531,7 +541,7 @@ impl AssetSource { not(target_os = "android") ))] { - let path = std::path::PathBuf::from(path.clone()); + let path = super::file::get_base_path().join(path.clone()); if path.exists() { Some(Box::new( super::file::FileWatcher::new( diff --git a/crates/bevy_asset/src/io/wasm.rs b/crates/bevy_asset/src/io/wasm.rs index 25a5d223cbb0b..c2551a40f15a3 100644 --- a/crates/bevy_asset/src/io/wasm.rs +++ b/crates/bevy_asset/src/io/wasm.rs @@ -1,9 +1,10 @@ use crate::io::{ get_meta_path, AssetReader, AssetReaderError, EmptyPathStream, PathStream, Reader, VecReader, }; -use bevy_utils::tracing::error; +use alloc::{borrow::ToOwned, boxed::Box, format}; use js_sys::{Uint8Array, JSON}; use std::path::{Path, PathBuf}; +use tracing::error; use wasm_bindgen::{prelude::wasm_bindgen, JsCast, JsValue}; use wasm_bindgen_futures::JsFuture; use web_sys::Response; @@ -51,7 +52,7 @@ fn js_value_to_err(context: &str) -> impl FnOnce(JsValue) -> std::io::Error + '_ } impl HttpWasmAssetReader { - async fn fetch_bytes<'a>(&self, path: PathBuf) -> Result { + async fn fetch_bytes(&self, path: PathBuf) -> Result { // The JS global scope includes a self-reference via a specializing name, which can be used to determine the type of global context available. let global: Global = js_sys::global().unchecked_into(); let promise = if !global.window().is_undefined() { diff --git a/crates/bevy_asset/src/lib.rs b/crates/bevy_asset/src/lib.rs index cb14a190a335f..5d98d2b21ee9d 100644 --- a/crates/bevy_asset/src/lib.rs +++ b/crates/bevy_asset/src/lib.rs @@ -144,9 +144,13 @@ html_logo_url = "https://bevyengine.org/assets/icon.png", html_favicon_url = "https://bevyengine.org/assets/icon.png" )] +#![no_std] extern crate alloc; -extern crate core; +extern crate std; + +// Required to make proc macros work in bevy itself. +extern crate self as bevy_asset; pub mod io; pub mod meta; @@ -201,22 +205,28 @@ pub use server::*; /// Rusty Object Notation, a crate used to serialize and deserialize bevy assets. pub use ron; +pub use uuid; use crate::{ io::{embedded::EmbeddedAssetRegistry, AssetSourceBuilder, AssetSourceBuilders, AssetSourceId}, processor::{AssetProcessor, Process}, }; -use alloc::sync::Arc; -use bevy_app::{App, Last, Plugin, PreUpdate}; +use alloc::{ + string::{String, ToString}, + sync::Arc, + vec::Vec, +}; +use bevy_app::{App, Plugin, PostUpdate, PreUpdate}; use bevy_ecs::prelude::Component; use bevy_ecs::{ reflect::AppTypeRegistry, - schedule::{IntoSystemConfigs, IntoSystemSetConfigs, SystemSet}, + schedule::{IntoScheduleConfigs, SystemSet}, world::FromWorld, }; +use bevy_platform::collections::HashSet; use bevy_reflect::{FromReflect, GetTypeRegistration, Reflect, TypePath}; -use bevy_utils::{tracing::error, HashSet}; use core::any::TypeId; +use tracing::error; #[cfg(all(feature = "file_watcher", not(feature = "multi_threaded")))] compile_error!( @@ -248,6 +258,33 @@ pub struct AssetPlugin { pub mode: AssetMode, /// How/If asset meta files should be checked. pub meta_check: AssetMetaCheck, + /// How to handle load requests of files that are outside the approved directories. + /// + /// Approved folders are [`AssetPlugin::file_path`] and the folder of each + /// [`AssetSource`](io::AssetSource). Subfolders within these folders are also valid. + pub unapproved_path_mode: UnapprovedPathMode, +} + +/// Determines how to react to attempts to load assets not inside the approved folders. +/// +/// Approved folders are [`AssetPlugin::file_path`] and the folder of each +/// [`AssetSource`](io::AssetSource). Subfolders within these folders are also valid. +/// +/// It is strongly discouraged to use [`Allow`](UnapprovedPathMode::Allow) if your +/// app will include scripts or modding support, as it could allow allow arbitrary file +/// access for malicious code. +/// +/// See [`AssetPath::is_unapproved`](crate::AssetPath::is_unapproved) +#[derive(Clone, Default)] +pub enum UnapprovedPathMode { + /// Unapproved asset loading is allowed. This is strongly discouraged. + Allow, + /// Fails to load any asset that is is unapproved, unless an override method is used, like + /// [`AssetServer::load_override`]. + Deny, + /// Fails to load any asset that is is unapproved. + #[default] + Forbid, } /// Controls whether or not assets are pre-processed before being loaded. @@ -301,6 +338,7 @@ impl Default for AssetPlugin { processed_file_path: Self::DEFAULT_PROCESSED_FILE_PATH.to_string(), watch_for_changes_override: None, meta_check: AssetMetaCheck::default(), + unapproved_path_mode: UnapprovedPathMode::default(), } } } @@ -341,6 +379,7 @@ impl Plugin for AssetPlugin { AssetServerMode::Unprocessed, self.meta_check.clone(), watch, + self.unapproved_path_mode.clone(), )); } AssetMode::Processed => { @@ -357,6 +396,7 @@ impl Plugin for AssetPlugin { AssetServerMode::Processed, AssetMetaCheck::Always, watch, + self.unapproved_path_mode.clone(), )) .insert_resource(processor) .add_systems(bevy_app::Startup, AssetProcessor::start); @@ -370,6 +410,7 @@ impl Plugin for AssetPlugin { AssetServerMode::Processed, AssetMetaCheck::Always, watch, + self.unapproved_path_mode.clone(), )); } } @@ -489,8 +530,8 @@ pub trait AssetApp { /// * Initializing the [`AssetEvent`] resource for the [`Asset`] /// * Adding other relevant systems and resources for the [`Asset`] /// * Ignoring schedule ambiguities in [`Assets`] resource. Any time a system takes - /// mutable access to this resource this causes a conflict, but they rarely actually - /// modify the same underlying asset. + /// mutable access to this resource this causes a conflict, but they rarely actually + /// modify the same underlying asset. fn init_asset(&mut self) -> &mut Self; /// Registers the asset type `T` using `[App::register]`, /// and adds [`ReflectAsset`] type data to `T` and [`ReflectHandle`] type data to [`Handle`] in the type registry. @@ -574,7 +615,7 @@ impl AssetApp for App { .add_event::>() .register_type::>() .add_systems( - Last, + PostUpdate, Assets::::asset_events .run_if(Assets::::asset_events_condition) .in_set(AssetEvents), @@ -620,7 +661,6 @@ pub struct AssetEvents; #[cfg(test)] mod tests { use crate::{ - self as bevy_asset, folder::LoadedFolder, handle::Handle, io::{ @@ -630,18 +670,25 @@ mod tests { }, loader::{AssetLoader, LoadContext}, Asset, AssetApp, AssetEvent, AssetId, AssetLoadError, AssetLoadFailedEvent, AssetPath, - AssetPlugin, AssetServer, Assets, + AssetPlugin, AssetServer, Assets, LoadState, UnapprovedPathMode, + }; + use alloc::{ + boxed::Box, + format, + string::{String, ToString}, + sync::Arc, + vec, + vec::Vec, }; - use alloc::sync::Arc; use bevy_app::{App, TaskPoolPlugin, Update}; use bevy_ecs::{ event::EventCursor, prelude::*, schedule::{LogLevel, ScheduleBuildSettings}, }; - use bevy_log::LogPlugin; + use bevy_platform::collections::HashMap; use bevy_reflect::TypePath; - use bevy_utils::{Duration, HashMap}; + use core::time::Duration; use serde::{Deserialize, Serialize}; use std::path::Path; use thiserror::Error; @@ -807,11 +854,7 @@ mod tests { AssetSourceId::Default, AssetSource::build().with_reader(move || Box::new(gated_memory_reader.clone())), ) - .add_plugins(( - TaskPoolPlugin::default(), - LogPlugin::default(), - AssetPlugin::default(), - )); + .add_plugins((TaskPoolPlugin::default(), AssetPlugin::default())); (app, gate_opener) } @@ -1684,9 +1727,9 @@ mod tests { ); } } - _ => panic!("Unexpected error type {:?}", read_error), + _ => panic!("Unexpected error type {}", read_error), }, - _ => panic!("Unexpected error type {:?}", error.error), + _ => panic!("Unexpected error type {}", error.error), } } } @@ -1709,11 +1752,7 @@ mod tests { "unstable", AssetSource::build().with_reader(move || Box::new(unstable_reader.clone())), ) - .add_plugins(( - TaskPoolPlugin::default(), - LogPlugin::default(), - AssetPlugin::default(), - )) + .add_plugins((TaskPoolPlugin::default(), AssetPlugin::default())) .init_asset::() .register_asset_loader(CoolTextLoader) .init_resource::() @@ -1761,12 +1800,88 @@ mod tests { app.world_mut().run_schedule(Update); } + // This test is not checking a requirement, but documenting a current limitation. We simply are + // not capable of loading subassets when doing nested immediate loads. + #[test] + fn error_on_nested_immediate_load_of_subasset() { + let mut app = App::new(); + + let dir = Dir::default(); + dir.insert_asset_text( + Path::new("a.cool.ron"), + r#"( + text: "b", + dependencies: [], + embedded_dependencies: [], + sub_texts: ["A"], +)"#, + ); + dir.insert_asset_text(Path::new("empty.txt"), ""); + + app.register_asset_source( + AssetSourceId::Default, + AssetSource::build() + .with_reader(move || Box::new(MemoryAssetReader { root: dir.clone() })), + ) + .add_plugins((TaskPoolPlugin::default(), AssetPlugin::default())); + + app.init_asset::() + .init_asset::() + .register_asset_loader(CoolTextLoader); + + struct NestedLoadOfSubassetLoader; + + impl AssetLoader for NestedLoadOfSubassetLoader { + type Asset = TestAsset; + type Error = crate::loader::LoadDirectError; + type Settings = (); + + async fn load( + &self, + _: &mut dyn Reader, + _: &Self::Settings, + load_context: &mut LoadContext<'_>, + ) -> Result { + // We expect this load to fail. + load_context + .loader() + .immediate() + .load::("a.cool.ron#A") + .await?; + Ok(TestAsset) + } + + fn extensions(&self) -> &[&str] { + &["txt"] + } + } + + app.init_asset::() + .register_asset_loader(NestedLoadOfSubassetLoader); + + let asset_server = app.world().resource::().clone(); + let handle = asset_server.load::("empty.txt"); + + run_app_until(&mut app, |_world| match asset_server.load_state(&handle) { + LoadState::Loading => None, + LoadState::Failed(err) => { + let error_message = format!("{err}"); + assert!(error_message.contains("Requested to load an asset path (a.cool.ron#A) with a subasset, but this is unsupported"), "what? \"{error_message}\""); + Some(()) + } + state => panic!("Unexpected asset state: {state:?}"), + }); + } + // validate the Asset derive macro for various asset types #[derive(Asset, TypePath)] pub struct TestAsset; - #[allow(dead_code)] #[derive(Asset, TypePath)] + #[expect( + dead_code, + reason = "This exists to ensure that `#[derive(Asset)]` works on enums. The inner variants are known not to be used." + )] pub enum EnumTestAsset { Unnamed(#[dependency] Handle), Named { @@ -1781,7 +1896,6 @@ mod tests { Empty, } - #[allow(dead_code)] #[derive(Asset, TypePath)] pub struct StructTestAsset { #[dependency] @@ -1790,7 +1904,93 @@ mod tests { embedded: TestAsset, } - #[allow(dead_code)] #[derive(Asset, TypePath)] pub struct TupleTestAsset(#[dependency] Handle); + + fn unapproved_path_setup(mode: UnapprovedPathMode) -> App { + let dir = Dir::default(); + let a_path = "../a.cool.ron"; + let a_ron = r#" +( + text: "a", + dependencies: [], + embedded_dependencies: [], + sub_texts: [], +)"#; + + dir.insert_asset_text(Path::new(a_path), a_ron); + + let mut app = App::new(); + let memory_reader = MemoryAssetReader { root: dir }; + app.register_asset_source( + AssetSourceId::Default, + AssetSource::build().with_reader(move || Box::new(memory_reader.clone())), + ) + .add_plugins(( + TaskPoolPlugin::default(), + AssetPlugin { + unapproved_path_mode: mode, + ..Default::default() + }, + )); + app.init_asset::(); + + app + } + + fn load_a_asset(assets: Res) { + let a = assets.load::("../a.cool.ron"); + if a == Handle::default() { + panic!() + } + } + + fn load_a_asset_override(assets: Res) { + let a = assets.load_override::("../a.cool.ron"); + if a == Handle::default() { + panic!() + } + } + + #[test] + #[should_panic] + fn unapproved_path_forbid_should_panic() { + let mut app = unapproved_path_setup(UnapprovedPathMode::Forbid); + + fn uses_assets(_asset: ResMut>) {} + app.add_systems(Update, (uses_assets, load_a_asset_override)); + + app.world_mut().run_schedule(Update); + } + + #[test] + #[should_panic] + fn unapproved_path_deny_should_panic() { + let mut app = unapproved_path_setup(UnapprovedPathMode::Deny); + + fn uses_assets(_asset: ResMut>) {} + app.add_systems(Update, (uses_assets, load_a_asset)); + + app.world_mut().run_schedule(Update); + } + + #[test] + fn unapproved_path_deny_should_finish() { + let mut app = unapproved_path_setup(UnapprovedPathMode::Deny); + + fn uses_assets(_asset: ResMut>) {} + app.add_systems(Update, (uses_assets, load_a_asset_override)); + + app.world_mut().run_schedule(Update); + } + + #[test] + fn unapproved_path_allow_should_finish() { + let mut app = unapproved_path_setup(UnapprovedPathMode::Allow); + + fn uses_assets(_asset: ResMut>) {} + app.add_systems(Update, (uses_assets, load_a_asset)); + + app.world_mut().run_schedule(Update); + } } diff --git a/crates/bevy_asset/src/loader.rs b/crates/bevy_asset/src/loader.rs index bfd5064138929..8f4863b885c68 100644 --- a/crates/bevy_asset/src/loader.rs +++ b/crates/bevy_asset/src/loader.rs @@ -6,9 +6,15 @@ use crate::{ Asset, AssetLoadError, AssetServer, AssetServerMode, Assets, Handle, UntypedAssetId, UntypedHandle, }; +use alloc::{ + boxed::Box, + string::{String, ToString}, + vec::Vec, +}; use atomicow::CowArc; use bevy_ecs::world::World; -use bevy_utils::{BoxedFuture, ConditionalSendFuture, HashMap, HashSet}; +use bevy_platform::collections::{HashMap, HashSet}; +use bevy_tasks::{BoxedFuture, ConditionalSendFuture}; use core::any::{Any, TypeId}; use downcast_rs::{impl_downcast, Downcast}; use ron::error::SpannedError; @@ -290,10 +296,14 @@ impl AssetContainer for A { /// [`NestedLoader::load`]: crate::NestedLoader::load /// [immediately]: crate::Immediate #[derive(Error, Debug)] -#[error("Failed to load dependency {dependency:?} {error}")] -pub struct LoadDirectError { - pub dependency: AssetPath<'static>, - pub error: AssetLoadError, +pub enum LoadDirectError { + #[error("Requested to load an asset path ({0:?}) with a subasset, but this is unsupported. See issue #18291")] + RequestedSubasset(AssetPath<'static>), + #[error("Failed to load dependency {dependency:?} {error}")] + LoadError { + dependency: AssetPath<'static>, + error: AssetLoadError, + }, } /// An error that occurs while deserializing [`AssetMeta`]. @@ -471,8 +481,8 @@ impl<'a> LoadContext<'a> { let path = path.into(); let source = self.asset_server.get_source(path.source())?; let asset_reader = match self.asset_server.mode() { - AssetServerMode::Unprocessed { .. } => source.reader(), - AssetServerMode::Processed { .. } => source.processed_reader()?, + AssetServerMode::Unprocessed => source.reader(), + AssetServerMode::Processed => source.processed_reader()?, }; let mut reader = asset_reader.read(path.path()).await?; let hash = if self.populate_hashes { @@ -531,7 +541,7 @@ impl<'a> LoadContext<'a> { self.populate_hashes, ) .await - .map_err(|error| LoadDirectError { + .map_err(|error| LoadDirectError::LoadError { dependency: path.clone(), error, })?; diff --git a/crates/bevy_asset/src/loader_builders.rs b/crates/bevy_asset/src/loader_builders.rs index 7ebc6608ad874..13bea2b71dc2a 100644 --- a/crates/bevy_asset/src/loader_builders.rs +++ b/crates/bevy_asset/src/loader_builders.rs @@ -7,7 +7,7 @@ use crate::{ Asset, AssetLoadError, AssetPath, ErasedAssetLoader, ErasedLoadedAsset, Handle, LoadContext, LoadDirectError, LoadedAsset, LoadedUntypedAsset, UntypedHandle, }; -use alloc::sync::Arc; +use alloc::{borrow::ToOwned, boxed::Box, sync::Arc}; use core::any::TypeId; // Utility type for handling the sources of reader references @@ -305,9 +305,12 @@ impl NestedLoader<'_, '_, StaticTyped, Deferred> { pub fn load<'c, A: Asset>(self, path: impl Into>) -> Handle { let path = path.into().to_owned(); let handle = if self.load_context.should_load_dependencies { - self.load_context - .asset_server - .load_with_meta_transform(path, self.meta_transform, ()) + self.load_context.asset_server.load_with_meta_transform( + path, + self.meta_transform, + (), + true, + ) } else { self.load_context .asset_server @@ -387,13 +390,16 @@ impl<'builder, 'reader, T> NestedLoader<'_, '_, T, Immediate<'builder, 'reader>> path: &AssetPath<'static>, asset_type_id: Option, ) -> Result<(Arc, ErasedLoadedAsset), LoadDirectError> { + if path.label().is_some() { + return Err(LoadDirectError::RequestedSubasset(path.clone())); + } let (mut meta, loader, mut reader) = if let Some(reader) = self.mode.reader { let loader = if let Some(asset_type_id) = asset_type_id { self.load_context .asset_server .get_asset_loader_with_asset_type_id(asset_type_id) .await - .map_err(|error| LoadDirectError { + .map_err(|error| LoadDirectError::LoadError { dependency: path.clone(), error: error.into(), })? @@ -402,7 +408,7 @@ impl<'builder, 'reader, T> NestedLoader<'_, '_, T, Immediate<'builder, 'reader>> .asset_server .get_path_asset_loader(path) .await - .map_err(|error| LoadDirectError { + .map_err(|error| LoadDirectError::LoadError { dependency: path.clone(), error: error.into(), })? @@ -415,7 +421,7 @@ impl<'builder, 'reader, T> NestedLoader<'_, '_, T, Immediate<'builder, 'reader>> .asset_server .get_meta_loader_and_reader(path, asset_type_id) .await - .map_err(|error| LoadDirectError { + .map_err(|error| LoadDirectError::LoadError { dependency: path.clone(), error, })?; @@ -453,15 +459,17 @@ impl NestedLoader<'_, '_, StaticTyped, Immediate<'_, '_>> { self.load_internal(&path, Some(TypeId::of::())) .await .and_then(move |(loader, untyped_asset)| { - untyped_asset.downcast::().map_err(|_| LoadDirectError { - dependency: path.clone(), - error: AssetLoadError::RequestedHandleTypeMismatch { - path, - requested: TypeId::of::(), - actual_asset_name: loader.asset_type_name(), - loader_name: loader.type_name(), - }, - }) + untyped_asset + .downcast::() + .map_err(|_| LoadDirectError::LoadError { + dependency: path.clone(), + error: AssetLoadError::RequestedHandleTypeMismatch { + path, + requested: TypeId::of::(), + actual_asset_name: loader.asset_type_name(), + loader_name: loader.type_name(), + }, + }) }) } } diff --git a/crates/bevy_asset/src/meta.rs b/crates/bevy_asset/src/meta.rs index bad3a4be729f6..0e972261198cc 100644 --- a/crates/bevy_asset/src/meta.rs +++ b/crates/bevy_asset/src/meta.rs @@ -1,11 +1,17 @@ +use alloc::{ + boxed::Box, + string::{String, ToString}, + vec::Vec, +}; + use crate::{ - self as bevy_asset, loader::AssetLoader, processor::Process, Asset, AssetPath, - DeserializeMetaError, VisitAssetDependencies, + loader::AssetLoader, processor::Process, Asset, AssetPath, DeserializeMetaError, + VisitAssetDependencies, }; -use bevy_utils::tracing::error; use downcast_rs::{impl_downcast, Downcast}; use ron::ser::PrettyConfig; use serde::{Deserialize, Serialize}; +use tracing::error; pub const META_FORMAT_VERSION: &str = "1.0"; pub type MetaTransform = Box; diff --git a/crates/bevy_asset/src/path.rs b/crates/bevy_asset/src/path.rs index 3bbb643650b6f..ad127812dcfcf 100644 --- a/crates/bevy_asset/src/path.rs +++ b/crates/bevy_asset/src/path.rs @@ -1,4 +1,8 @@ use crate::io::AssetSourceId; +use alloc::{ + borrow::ToOwned, + string::{String, ToString}, +}; use atomicow::CowArc; use bevy_reflect::{Reflect, ReflectDeserialize, ReflectSerialize}; use core::{ @@ -14,10 +18,10 @@ use thiserror::Error; /// /// Asset paths consist of three main parts: /// * [`AssetPath::source`]: The name of the [`AssetSource`](crate::io::AssetSource) to load the asset from. -/// This is optional. If one is not set the default source will be used (which is the `assets` folder by default). +/// This is optional. If one is not set the default source will be used (which is the `assets` folder by default). /// * [`AssetPath::path`]: The "virtual filesystem path" pointing to an asset source file. /// * [`AssetPath::label`]: An optional "named sub asset". When assets are loaded, they are -/// allowed to load "sub assets" of any type, which are identified by a named "label". +/// allowed to load "sub assets" of any type, which are identified by a named "label". /// /// Asset paths are generally constructed (and visualized) as strings: /// @@ -49,7 +53,7 @@ use thiserror::Error; /// This also means that you should use [`AssetPath::parse`] in cases where `&str` is the explicit type. #[derive(Eq, PartialEq, Hash, Clone, Default, Reflect)] #[reflect(opaque)] -#[reflect(Debug, PartialEq, Hash, Serialize, Deserialize)] +#[reflect(Debug, PartialEq, Hash, Clone, Serialize, Deserialize)] pub struct AssetPath<'a> { source: AssetSourceId<'a>, path: CowArc<'a, Path>, @@ -316,7 +320,7 @@ impl<'a> AssetPath<'a> { /// If internally a value is a static reference, the static reference will be used unchanged. /// If internally a value is an "owned [`Arc`]", it will remain unchanged. /// - /// [`Arc`]: std::sync::Arc + /// [`Arc`]: alloc::sync::Arc pub fn into_owned(self) -> AssetPath<'static> { AssetPath { source: self.source.into_owned(), @@ -329,7 +333,7 @@ impl<'a> AssetPath<'a> { /// If internally a value is a static reference, the static reference will be used unchanged. /// If internally a value is an "owned [`Arc`]", the [`Arc`] will be cloned. /// - /// [`Arc`]: std::sync::Arc + /// [`Arc`]: alloc::sync::Arc #[inline] pub fn clone_owned(&self) -> AssetPath<'static> { self.clone().into_owned() @@ -454,7 +458,7 @@ impl<'a> AssetPath<'a> { pub fn get_full_extension(&self) -> Option { let file_name = self.path().file_name()?.to_str()?; let index = file_name.find('.')?; - let mut extension = file_name[index + 1..].to_lowercase(); + let mut extension = file_name[index + 1..].to_owned(); // Strip off any query parameters let query = extension.find('?'); @@ -474,6 +478,51 @@ impl<'a> AssetPath<'a> { } }) } + + /// Returns `true` if this [`AssetPath`] points to a file that is + /// outside of it's [`AssetSource`](crate::io::AssetSource) folder. + /// + /// ## Example + /// ``` + /// # use bevy_asset::AssetPath; + /// // Inside the default AssetSource. + /// let path = AssetPath::parse("thingy.png"); + /// assert!( ! path.is_unapproved()); + /// let path = AssetPath::parse("gui/thingy.png"); + /// assert!( ! path.is_unapproved()); + /// + /// // Inside a different AssetSource. + /// let path = AssetPath::parse("embedded://thingy.png"); + /// assert!( ! path.is_unapproved()); + /// + /// // Exits the `AssetSource`s directory. + /// let path = AssetPath::parse("../thingy.png"); + /// assert!(path.is_unapproved()); + /// let path = AssetPath::parse("folder/../../thingy.png"); + /// assert!(path.is_unapproved()); + /// + /// // This references the linux root directory. + /// let path = AssetPath::parse("/home/thingy.png"); + /// assert!(path.is_unapproved()); + /// ``` + pub fn is_unapproved(&self) -> bool { + use std::path::Component; + let mut simplified = PathBuf::new(); + for component in self.path.components() { + match component { + Component::Prefix(_) | Component::RootDir => return true, + Component::CurDir => {} + Component::ParentDir => { + if !simplified.pop() { + return true; + } + } + Component::Normal(os_str) => simplified.push(os_str), + } + } + + false + } } impl AssetPath<'static> { @@ -629,6 +678,7 @@ pub(crate) fn normalize_path(path: &Path) -> PathBuf { #[cfg(test)] mod tests { use crate::AssetPath; + use alloc::string::ToString; use std::path::Path; #[test] @@ -972,5 +1022,8 @@ mod tests { let result = AssetPath::from("http://a.tar.bz2?foo=bar#Baz"); assert_eq!(result.get_full_extension(), Some("tar.bz2".to_string())); + + let result = AssetPath::from("asset.Custom"); + assert_eq!(result.get_full_extension(), Some("Custom".to_string())); } } diff --git a/crates/bevy_asset/src/processor/log.rs b/crates/bevy_asset/src/processor/log.rs index 2649b815d8519..f4a0f81862166 100644 --- a/crates/bevy_asset/src/processor/log.rs +++ b/crates/bevy_asset/src/processor/log.rs @@ -1,9 +1,15 @@ use crate::AssetPath; +use alloc::{ + format, + string::{String, ToString}, + vec::Vec, +}; use async_fs::File; -use bevy_utils::{tracing::error, HashSet}; +use bevy_platform::collections::HashSet; use futures_lite::{AsyncReadExt, AsyncWriteExt}; use std::path::PathBuf; use thiserror::Error; +use tracing::error; /// An in-memory representation of a single [`ProcessorTransactionLog`] entry. #[derive(Debug)] @@ -26,8 +32,10 @@ pub struct ProcessorTransactionLog { /// An error that occurs when reading from the [`ProcessorTransactionLog`] fails. #[derive(Error, Debug)] pub enum ReadLogError { + /// An invalid log line was encountered, consisting of the contained string. #[error("Encountered an invalid log line: '{0}'")] InvalidLine(String), + /// A file-system-based error occurred while reading the log file. #[error("Failed to read log file: {0}")] Io(#[from] futures_io::Error), } @@ -45,10 +53,13 @@ pub struct WriteLogError { /// An error that occurs when validating the [`ProcessorTransactionLog`] fails. #[derive(Error, Debug)] pub enum ValidateLogError { + /// An error that could not be recovered from. All assets will be reprocessed. #[error("Encountered an unrecoverable error. All assets will be reprocessed.")] UnrecoverableError, + /// A [`ReadLogError`]. #[error(transparent)] ReadLogError(#[from] ReadLogError), + /// Duplicated process asset transactions occurred. #[error("Encountered a duplicate process asset transaction: {0:?}")] EntryErrors(Vec), } @@ -56,10 +67,13 @@ pub enum ValidateLogError { /// An error that occurs when validating individual [`ProcessorTransactionLog`] entries. #[derive(Error, Debug)] pub enum LogEntryError { + /// A duplicate process asset transaction occurred for the given asset path. #[error("Encountered a duplicate process asset transaction: {0}")] DuplicateTransaction(AssetPath<'static>), + /// A transaction was ended that never started for the given asset path. #[error("A transaction was ended that never started {0}")] EndedMissingTransaction(AssetPath<'static>), + /// An asset started processing but never finished at the given asset path. #[error("An asset started processing but never finished: {0}")] UnfinishedTransaction(AssetPath<'static>), } diff --git a/crates/bevy_asset/src/processor/mod.rs b/crates/bevy_asset/src/processor/mod.rs index c74fd80b5673d..a239d66a9b59a 100644 --- a/crates/bevy_asset/src/processor/mod.rs +++ b/crates/bevy_asset/src/processor/mod.rs @@ -54,25 +54,25 @@ use crate::{ AssetMetaDyn, AssetMetaMinimal, ProcessedInfo, ProcessedInfoMinimal, }, AssetLoadError, AssetMetaCheck, AssetPath, AssetServer, AssetServerMode, DeserializeMetaError, - MissingAssetLoaderForExtensionError, + MissingAssetLoaderForExtensionError, UnapprovedPathMode, WriteDefaultMetaError, }; -use alloc::{collections::VecDeque, sync::Arc}; +use alloc::{borrow::ToOwned, boxed::Box, collections::VecDeque, sync::Arc, vec, vec::Vec}; use bevy_ecs::prelude::*; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_tasks::IoTaskPool; -use bevy_utils::{ - tracing::{debug, error, trace, warn}, - HashMap, HashSet, -}; -#[cfg(feature = "trace")] -use bevy_utils::{ - tracing::{info_span, instrument::Instrument}, - ConditionalSendFuture, -}; use futures_io::ErrorKind; use futures_lite::{AsyncReadExt, AsyncWriteExt, StreamExt}; use parking_lot::RwLock; use std::path::{Path, PathBuf}; use thiserror::Error; +use tracing::{debug, error, trace, warn}; + +#[cfg(feature = "trace")] +use { + alloc::string::ToString, + bevy_tasks::ConditionalSendFuture, + tracing::{info_span, instrument::Instrument}, +}; /// A "background" asset processor that reads asset values from a source [`AssetSource`] (which corresponds to an [`AssetReader`](crate::io::AssetReader) / [`AssetWriter`](crate::io::AssetWriter) pair), /// processes them in some way, and writes them to a destination [`AssetSource`]. @@ -122,6 +122,7 @@ impl AssetProcessor { AssetServerMode::Processed, AssetMetaCheck::Always, false, + UnapprovedPathMode::default(), ); Self { server, data } } @@ -207,10 +208,13 @@ impl AssetProcessor { /// Processes all assets. This will: /// * For each "processed [`AssetSource`]: /// * Scan the [`ProcessorTransactionLog`] and recover from any failures detected - /// * Scan the processed [`AssetReader`](crate::io::AssetReader) to build the current view of already processed assets. - /// * Scan the unprocessed [`AssetReader`](crate::io::AssetReader) and remove any final processed assets that are invalid or no longer exist. - /// * For each asset in the unprocessed [`AssetReader`](crate::io::AssetReader), kick off a new "process job", which will process the asset - /// (if the latest version of the asset has not been processed). + /// * Scan the processed [`AssetReader`](crate::io::AssetReader) to build the current view of + /// already processed assets. + /// * Scan the unprocessed [`AssetReader`](crate::io::AssetReader) and remove any final + /// processed assets that are invalid or no longer exist. + /// * For each asset in the unprocessed [`AssetReader`](crate::io::AssetReader), kick off a new + /// "process job", which will process the asset + /// (if the latest version of the asset has not been processed). #[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] pub fn process_assets(&self) { let start_time = std::time::Instant::now(); @@ -258,6 +262,58 @@ impl AssetProcessor { } } + /// Writes the default meta file for the provided `path`. + /// + /// This function generates the appropriate meta file to process `path` with the default + /// processor. If there is no default processor, it falls back to the default loader. + /// + /// Note if there is already a meta file for `path`, this function returns + /// `Err(WriteDefaultMetaError::MetaAlreadyExists)`. + pub async fn write_default_meta_file_for_path( + &self, + path: impl Into>, + ) -> Result<(), WriteDefaultMetaError> { + let path = path.into(); + let Some(processor) = path + .get_full_extension() + .and_then(|extension| self.get_default_processor(&extension)) + else { + return self + .server + .write_default_loader_meta_file_for_path(path) + .await; + }; + + let meta = processor.default_meta(); + let serialized_meta = meta.serialize(); + + let source = self.get_source(path.source())?; + + // Note: we get the reader rather than the processed reader, since we want to write the meta + // file for the unprocessed version of that asset (so it will be processed by the default + // processor). + let reader = source.reader(); + match reader.read_meta_bytes(path.path()).await { + Ok(_) => return Err(WriteDefaultMetaError::MetaAlreadyExists), + Err(AssetReaderError::NotFound(_)) => { + // The meta file couldn't be found so just fall through. + } + Err(AssetReaderError::Io(err)) => { + return Err(WriteDefaultMetaError::IoErrorFromExistingMetaCheck(err)) + } + Err(AssetReaderError::HttpError(err)) => { + return Err(WriteDefaultMetaError::HttpErrorFromExistingMetaCheck(err)) + } + } + + let writer = source.writer()?; + writer + .write_meta_bytes(path.path(), &serialized_meta) + .await?; + + Ok(()) + } + async fn handle_asset_source_event(&self, source: &AssetSource, event: AssetSourceEvent) { trace!("{event:?}"); match event { @@ -381,7 +437,7 @@ impl AssetProcessor { // Therefore, we shouldn't automatically delete the asset ... that is a // user-initiated action. debug!( - "Meta for asset {:?} was removed. Attempting to re-process", + "Meta for asset {} was removed. Attempting to re-process", AssetPath::from_path(&path).with_source(source.id()) ); self.process_asset(source, path).await; @@ -389,7 +445,10 @@ impl AssetProcessor { /// Removes all processed assets stored at the given path (respecting transactionality), then removes the folder itself. async fn handle_removed_folder(&self, source: &AssetSource, path: &Path) { - debug!("Removing folder {:?} because source was removed", path); + debug!( + "Removing folder {} because source was removed", + path.display() + ); let processed_reader = source.processed_reader().unwrap(); match processed_reader.read_directory(path).await { Ok(mut path_stream) => { @@ -478,7 +537,6 @@ impl AssetProcessor { self.set_state(ProcessorState::Finished).await; } - #[allow(unused)] #[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] async fn process_assets_internal<'scope>( &'scope self, @@ -739,7 +797,7 @@ impl AssetProcessor { ) -> Result { // TODO: The extension check was removed now that AssetPath is the input. is that ok? // TODO: check if already processing to protect against duplicate hot-reload events - debug!("Processing {:?}", asset_path); + debug!("Processing {}", asset_path); let server = &self.server; let path = asset_path.path(); let reader = source.reader(); @@ -798,12 +856,6 @@ impl AssetProcessor { } }; let meta_bytes = meta.serialize(); - // write meta to source location if it doesn't already exist - source - .writer()? - .write_meta_bytes(path, &meta_bytes) - .await - .map_err(writer_err)?; (meta, meta_bytes, processor) } Err(err) => { @@ -1237,7 +1289,7 @@ impl ProcessorAssetInfos { ) { match result { Ok(ProcessResult::Processed(processed_info)) => { - debug!("Finished processing \"{:?}\"", asset_path); + debug!("Finished processing \"{}\"", asset_path); // clean up old dependents let old_processed_info = self .infos @@ -1260,7 +1312,7 @@ impl ProcessorAssetInfos { } } Ok(ProcessResult::SkippedNotChanged) => { - debug!("Skipping processing (unchanged) \"{:?}\"", asset_path); + debug!("Skipping processing (unchanged) \"{}\"", asset_path); let info = self.get_mut(&asset_path).expect("info should exist"); // NOTE: skipping an asset on a given pass doesn't mean it won't change in the future as a result // of a dependency being re-processed. This means apps might receive an "old" (but valid) asset first. @@ -1271,7 +1323,7 @@ impl ProcessorAssetInfos { info.update_status(ProcessStatus::Processed).await; } Ok(ProcessResult::Ignored) => { - debug!("Skipping processing (ignored) \"{:?}\"", asset_path); + debug!("Skipping processing (ignored) \"{}\"", asset_path); } Err(ProcessError::ExtensionRequired) => { // Skip assets without extensions diff --git a/crates/bevy_asset/src/processor/process.rs b/crates/bevy_asset/src/processor/process.rs index 64370824dc630..b37265d0fb660 100644 --- a/crates/bevy_asset/src/processor/process.rs +++ b/crates/bevy_asset/src/processor/process.rs @@ -10,7 +10,12 @@ use crate::{ AssetLoadError, AssetLoader, AssetPath, DeserializeMetaError, ErasedLoadedAsset, MissingAssetLoaderForExtensionError, MissingAssetLoaderForTypeNameError, }; -use bevy_utils::{BoxedFuture, ConditionalSendFuture}; +use alloc::{ + borrow::ToOwned, + boxed::Box, + string::{String, ToString}, +}; +use bevy_tasks::{BoxedFuture, ConditionalSendFuture}; use core::marker::PhantomData; use serde::{Deserialize, Serialize}; use thiserror::Error; @@ -105,26 +110,6 @@ impl< } } -/// A flexible [`Process`] implementation that loads the source [`Asset`] using the `L` [`AssetLoader`], then -/// saves that `L` asset using the `S` [`AssetSaver`]. -/// -/// This is a specialized use case of [`LoadTransformAndSave`] and is useful where there is no asset manipulation -/// such as when compressing assets. -/// -/// This uses [`LoadAndSaveSettings`] to configure the processor. -/// -/// [`Asset`]: crate::Asset -#[deprecated = "Use `LoadTransformAndSave::Asset>, S>` instead"] -pub type LoadAndSave = - LoadTransformAndSave::Asset>, S>; - -/// Settings for the [`LoadAndSave`] [`Process::Settings`] implementation. -/// -/// `LoaderSettings` corresponds to [`AssetLoader::Settings`] and `SaverSettings` corresponds to [`AssetSaver::Settings`]. -#[deprecated = "Use `LoadTransformAndSaveSettings` instead"] -pub type LoadAndSaveSettings = - LoadTransformAndSaveSettings; - /// An error that is encountered during [`Process::process`]. #[derive(Error, Debug)] pub enum ProcessError { diff --git a/crates/bevy_asset/src/reflect.rs b/crates/bevy_asset/src/reflect.rs index 3aaa1580bb110..5c436c10610f0 100644 --- a/crates/bevy_asset/src/reflect.rs +++ b/crates/bevy_asset/src/reflect.rs @@ -1,3 +1,4 @@ +use alloc::boxed::Box; use core::any::{Any, TypeId}; use bevy_ecs::world::{unsafe_world_cell::UnsafeWorldCell, World}; @@ -243,9 +244,9 @@ impl FromType> for ReflectHandle { #[cfg(test)] mod tests { + use alloc::{string::String, vec::Vec}; use core::any::TypeId; - use crate as bevy_asset; use crate::{Asset, AssetApp, AssetPlugin, ReflectAsset, UntypedHandle}; use bevy_app::App; use bevy_ecs::reflect::AppTypeRegistry; diff --git a/crates/bevy_asset/src/render_asset.rs b/crates/bevy_asset/src/render_asset.rs index 3bbc3dfd48458..583ee45457895 100644 --- a/crates/bevy_asset/src/render_asset.rs +++ b/crates/bevy_asset/src/render_asset.rs @@ -27,9 +27,11 @@ bitflags::bitflags! { #[repr(transparent)] #[derive(Serialize, Deserialize, Hash, Clone, Copy, PartialEq, Eq, Debug, Reflect)] #[reflect(opaque)] - #[reflect(Serialize, Deserialize, Hash, PartialEq, Debug)] + #[reflect(Serialize, Deserialize, Hash, Clone, PartialEq, Debug)] pub struct RenderAssetUsages: u8 { + /// The bit flag for the main world. const MAIN_WORLD = 1 << 0; + /// The bit flag for the render world. const RENDER_WORLD = 1 << 1; } } diff --git a/crates/bevy_asset/src/saver.rs b/crates/bevy_asset/src/saver.rs index f2cb5bb9330f9..c8b96012eea72 100644 --- a/crates/bevy_asset/src/saver.rs +++ b/crates/bevy_asset/src/saver.rs @@ -2,8 +2,10 @@ use crate::{ io::Writer, meta::Settings, transformer::TransformedAsset, Asset, AssetLoader, ErasedLoadedAsset, Handle, LabeledAsset, UntypedHandle, }; +use alloc::boxed::Box; use atomicow::CowArc; -use bevy_utils::{BoxedFuture, ConditionalSendFuture, HashMap}; +use bevy_platform::collections::HashMap; +use bevy_tasks::{BoxedFuture, ConditionalSendFuture}; use core::{borrow::Borrow, hash::Hash, ops::Deref}; use serde::{Deserialize, Serialize}; diff --git a/crates/bevy_asset/src/server/info.rs b/crates/bevy_asset/src/server/info.rs index 898b3a76ec46f..1b3bb3cb65d68 100644 --- a/crates/bevy_asset/src/server/info.rs +++ b/crates/bevy_asset/src/server/info.rs @@ -4,14 +4,21 @@ use crate::{ Handle, InternalAssetEvent, LoadState, RecursiveDependencyLoadState, StrongHandle, UntypedAssetId, UntypedHandle, }; -use alloc::sync::{Arc, Weak}; +use alloc::{ + borrow::ToOwned, + boxed::Box, + sync::{Arc, Weak}, + vec::Vec, +}; use bevy_ecs::world::World; +use bevy_platform::collections::{hash_map::Entry, HashMap, HashSet}; use bevy_tasks::Task; -use bevy_utils::{tracing::warn, Entry, HashMap, HashSet, TypeIdMap}; +use bevy_utils::TypeIdMap; use core::{any::TypeId, task::Waker}; use crossbeam_channel::Sender; use either::Either; use thiserror::Error; +use tracing::warn; #[derive(Debug)] pub(crate) struct AssetInfo { @@ -112,10 +119,6 @@ impl AssetInfos { .unwrap() } - #[expect( - clippy::too_many_arguments, - reason = "Arguments needed so that both `create_loading_handle_untyped()` and `get_or_create_path_handle_internal()` may share code." - )] fn create_handle_internal( infos: &mut HashMap, handle_providers: &TypeIdMap, @@ -344,14 +347,9 @@ impl AssetInfos { /// Returns `true` if the asset this path points to is still alive pub(crate) fn is_path_alive<'a>(&self, path: impl Into>) -> bool { - let path = path.into(); - - let result = self - .get_path_ids(&path) + self.get_path_ids(&path.into()) .filter_map(|id| self.infos.get(&id)) - .any(|info| info.weak_handle.strong_count() > 0); - - result + .any(|info| info.weak_handle.strong_count() > 0) } /// Returns `true` if the asset at this path should be reloaded @@ -443,7 +441,7 @@ impl AssetInfos { } else { // the dependency id does not exist, which implies it was manually removed or never existed in the first place warn!( - "Dependency {:?} from asset {:?} is unknown. This asset's dependency load status will not switch to 'Loaded' until the unknown dependency is loaded.", + "Dependency {} from asset {} is unknown. This asset's dependency load status will not switch to 'Loaded' until the unknown dependency is loaded.", dep_id, loaded_asset_id ); true diff --git a/crates/bevy_asset/src/server/loaders.rs b/crates/bevy_asset/src/server/loaders.rs index 2442de389ae3d..08384e9efeaa4 100644 --- a/crates/bevy_asset/src/server/loaders.rs +++ b/crates/bevy_asset/src/server/loaders.rs @@ -2,17 +2,21 @@ use crate::{ loader::{AssetLoader, ErasedAssetLoader}, path::AssetPath, }; -use alloc::sync::Arc; +use alloc::{boxed::Box, sync::Arc, vec::Vec}; use async_broadcast::RecvError; +use bevy_platform::collections::HashMap; use bevy_tasks::IoTaskPool; -use bevy_utils::{tracing::warn, HashMap, TypeIdMap}; +use bevy_utils::TypeIdMap; +use core::any::TypeId; +use thiserror::Error; +use tracing::warn; + #[cfg(feature = "trace")] -use bevy_utils::{ +use { + alloc::string::ToString, + bevy_tasks::ConditionalSendFuture, tracing::{info_span, instrument::Instrument}, - ConditionalSendFuture, }; -use core::any::TypeId; -use thiserror::Error; #[derive(Default)] pub(crate) struct AssetLoaders { @@ -47,6 +51,7 @@ impl AssetLoaders { }; if is_new { + let existing_loaders_for_type_id = self.type_id_to_loaders.get(&loader_asset_type); let mut duplicate_extensions = Vec::new(); for extension in AssetLoader::extensions(&*loader) { let list = self @@ -55,26 +60,29 @@ impl AssetLoaders { .or_default(); if !list.is_empty() { - duplicate_extensions.push(extension); + if let Some(existing_loaders_for_type_id) = existing_loaders_for_type_id { + if list + .iter() + .any(|index| existing_loaders_for_type_id.contains(index)) + { + duplicate_extensions.push(extension); + } + } } list.push(loader_index); } - - self.type_name_to_loader.insert(type_name, loader_index); - - let list = self - .type_id_to_loaders - .entry(loader_asset_type) - .or_default(); - - let duplicate_asset_registration = !list.is_empty(); - if !duplicate_extensions.is_empty() && duplicate_asset_registration { + if !duplicate_extensions.is_empty() { warn!("Duplicate AssetLoader registered for Asset type `{loader_asset_type_name}` with extensions `{duplicate_extensions:?}`. \ Loader must be specified in a .meta file in order to load assets of this type with these extensions."); } - list.push(loader_index); + self.type_name_to_loader.insert(type_name, loader_index); + + self.type_id_to_loaders + .entry(loader_asset_type) + .or_default() + .push(loader_index); self.loaders.push(MaybeAssetLoader::Ready(loader)); } else { @@ -108,6 +116,8 @@ impl AssetLoaders { self.preregistered_loaders.insert(type_name, loader_index); self.type_name_to_loader.insert(type_name, loader_index); + + let existing_loaders_for_type_id = self.type_id_to_loaders.get(&loader_asset_type); let mut duplicate_extensions = Vec::new(); for extension in extensions { let list = self @@ -116,24 +126,27 @@ impl AssetLoaders { .or_default(); if !list.is_empty() { - duplicate_extensions.push(extension); + if let Some(existing_loaders_for_type_id) = existing_loaders_for_type_id { + if list + .iter() + .any(|index| existing_loaders_for_type_id.contains(index)) + { + duplicate_extensions.push(extension); + } + } } list.push(loader_index); } - - let list = self - .type_id_to_loaders - .entry(loader_asset_type) - .or_default(); - - let duplicate_asset_registration = !list.is_empty(); - if !duplicate_extensions.is_empty() && duplicate_asset_registration { + if !duplicate_extensions.is_empty() { warn!("Duplicate AssetLoader preregistered for Asset type `{loader_asset_type_name}` with extensions `{duplicate_extensions:?}`. \ Loader must be specified in a .meta file in order to load assets of this type with these extensions."); } - list.push(loader_index); + self.type_id_to_loaders + .entry(loader_asset_type) + .or_default() + .push(loader_index); let (mut sender, receiver) = async_broadcast::broadcast(1); sender.set_overflow(true); @@ -329,6 +342,7 @@ impl AssetLoader for InstrumentedAssetLoader { #[cfg(test)] mod tests { + use alloc::{format, string::String}; use core::marker::PhantomData; use std::{ path::Path, @@ -338,22 +352,18 @@ mod tests { use bevy_reflect::TypePath; use bevy_tasks::block_on; - use crate::{self as bevy_asset, Asset}; + use crate::Asset; use super::*; - // The compiler notices these fields are never read and raises a dead_code lint which kill CI. - #[allow(dead_code)] #[derive(Asset, TypePath, Debug)] - struct A(usize); + struct A; - #[allow(dead_code)] #[derive(Asset, TypePath, Debug)] - struct B(usize); + struct B; - #[allow(dead_code)] #[derive(Asset, TypePath, Debug)] - struct C(usize); + struct C; struct Loader { sender: Sender<()>, diff --git a/crates/bevy_asset/src/server/mod.rs b/crates/bevy_asset/src/server/mod.rs index bfeb7a40f99f2..ff5800474d8b7 100644 --- a/crates/bevy_asset/src/server/mod.rs +++ b/crates/bevy_asset/src/server/mod.rs @@ -5,7 +5,8 @@ use crate::{ folder::LoadedFolder, io::{ AssetReaderError, AssetSource, AssetSourceEvent, AssetSourceId, AssetSources, - ErasedAssetReader, MissingAssetSourceError, MissingProcessedAssetReaderError, Reader, + AssetWriterError, ErasedAssetReader, MissingAssetSourceError, MissingAssetWriterError, + MissingProcessedAssetReaderError, Reader, }, loader::{AssetLoader, ErasedAssetLoader, LoadContext, LoadedAsset}, meta::{ @@ -14,17 +15,19 @@ use crate::{ }, path::AssetPath, Asset, AssetEvent, AssetHandleProvider, AssetId, AssetLoadFailedEvent, AssetMetaCheck, Assets, - DeserializeMetaError, ErasedLoadedAsset, Handle, LoadedUntypedAsset, UntypedAssetId, - UntypedAssetLoadFailedEvent, UntypedHandle, + DeserializeMetaError, ErasedLoadedAsset, Handle, LoadedUntypedAsset, UnapprovedPathMode, + UntypedAssetId, UntypedAssetLoadFailedEvent, UntypedHandle, +}; +use alloc::{borrow::ToOwned, boxed::Box, vec, vec::Vec}; +use alloc::{ + format, + string::{String, ToString}, + sync::Arc, }; -use alloc::sync::Arc; use atomicow::CowArc; use bevy_ecs::prelude::*; +use bevy_platform::collections::HashSet; use bevy_tasks::IoTaskPool; -use bevy_utils::{ - tracing::{error, info}, - HashSet, -}; use core::{any::TypeId, future::Future, panic::AssertUnwindSafe, task::Poll}; use crossbeam_channel::{Receiver, Sender}; use either::Either; @@ -34,13 +37,15 @@ use loaders::*; use parking_lot::{RwLock, RwLockWriteGuard}; use std::path::{Path, PathBuf}; use thiserror::Error; +use tracing::{error, info}; -/// Loads and tracks the state of [`Asset`] values from a configured [`AssetReader`](crate::io::AssetReader). This can be used to kick off new asset loads and -/// retrieve their current load states. +/// Loads and tracks the state of [`Asset`] values from a configured [`AssetReader`](crate::io::AssetReader). +/// This can be used to kick off new asset loads and retrieve their current load states. /// /// The general process to load an asset is: -/// 1. Initialize a new [`Asset`] type with the [`AssetServer`] via [`AssetApp::init_asset`], which will internally call [`AssetServer::register_asset`] -/// and set up related ECS [`Assets`] storage and systems. +/// 1. Initialize a new [`Asset`] type with the [`AssetServer`] via [`AssetApp::init_asset`], which +/// will internally call [`AssetServer::register_asset`] and set up related ECS [`Assets`] +/// storage and systems. /// 2. Register one or more [`AssetLoader`]s for that asset with [`AssetApp::init_asset_loader`] /// 3. Add the asset to your asset folder (defaults to `assets`). /// 4. Call [`AssetServer::load`] with a path to your asset. @@ -63,6 +68,7 @@ pub(crate) struct AssetServerData { sources: AssetSources, mode: AssetServerMode, meta_check: AssetMetaCheck, + unapproved_path_mode: UnapprovedPathMode, } /// The "asset mode" the server is currently in. @@ -77,13 +83,19 @@ pub enum AssetServerMode { impl AssetServer { /// Create a new instance of [`AssetServer`]. If `watch_for_changes` is true, the [`AssetReader`](crate::io::AssetReader) storage will watch for changes to /// asset sources and hot-reload them. - pub fn new(sources: AssetSources, mode: AssetServerMode, watching_for_changes: bool) -> Self { + pub fn new( + sources: AssetSources, + mode: AssetServerMode, + watching_for_changes: bool, + unapproved_path_mode: UnapprovedPathMode, + ) -> Self { Self::new_with_loaders( sources, Default::default(), mode, AssetMetaCheck::Always, watching_for_changes, + unapproved_path_mode, ) } @@ -94,6 +106,7 @@ impl AssetServer { mode: AssetServerMode, meta_check: AssetMetaCheck, watching_for_changes: bool, + unapproved_path_mode: UnapprovedPathMode, ) -> Self { Self::new_with_loaders( sources, @@ -101,6 +114,7 @@ impl AssetServer { mode, meta_check, watching_for_changes, + unapproved_path_mode, ) } @@ -110,6 +124,7 @@ impl AssetServer { mode: AssetServerMode, meta_check: AssetMetaCheck, watching_for_changes: bool, + unapproved_path_mode: UnapprovedPathMode, ) -> Self { let (asset_event_sender, asset_event_receiver) = crossbeam_channel::unbounded(); let mut infos = AssetInfos::default(); @@ -123,6 +138,7 @@ impl AssetServer { asset_event_receiver, loaders, infos: RwLock::new(infos), + unapproved_path_mode, }), } } @@ -200,7 +216,7 @@ impl AssetServer { loader.ok_or_else(error)?.get().await.map_err(|_| error()) } - /// Returns the registered [`AssetLoader`] associated with the given [`std::any::type_name`], if it exists. + /// Returns the registered [`AssetLoader`] associated with the given [`core::any::type_name`], if it exists. pub async fn get_asset_loader_with_type_name( &self, type_name: &str, @@ -306,7 +322,16 @@ impl AssetServer { /// The asset load will fail and an error will be printed to the logs if the asset stored at `path` is not of type `A`. #[must_use = "not using the returned strong handle may result in the unexpected release of the asset"] pub fn load<'a, A: Asset>(&self, path: impl Into>) -> Handle { - self.load_with_meta_transform(path, None, ()) + self.load_with_meta_transform(path, None, (), false) + } + + /// Same as [`load`](AssetServer::load), but you can load assets from unaproved paths + /// if [`AssetPlugin::unapproved_path_mode`](super::AssetPlugin::unapproved_path_mode) + /// is [`Deny`](UnapprovedPathMode::Deny). + /// + /// See [`UnapprovedPathMode`] and [`AssetPath::is_unapproved`] + pub fn load_override<'a, A: Asset>(&self, path: impl Into>) -> Handle { + self.load_with_meta_transform(path, None, (), true) } /// Begins loading an [`Asset`] of type `A` stored at `path` while holding a guard item. @@ -330,7 +355,20 @@ impl AssetServer { path: impl Into>, guard: G, ) -> Handle { - self.load_with_meta_transform(path, None, guard) + self.load_with_meta_transform(path, None, guard, false) + } + + /// Same as [`load`](AssetServer::load_acquire), but you can load assets from unaproved paths + /// if [`AssetPlugin::unapproved_path_mode`](super::AssetPlugin::unapproved_path_mode) + /// is [`Deny`](UnapprovedPathMode::Deny). + /// + /// See [`UnapprovedPathMode`] and [`AssetPath::is_unapproved`] + pub fn load_acquire_override<'a, A: Asset, G: Send + Sync + 'static>( + &self, + path: impl Into>, + guard: G, + ) -> Handle { + self.load_with_meta_transform(path, None, guard, true) } /// Begins loading an [`Asset`] of type `A` stored at `path`. The given `settings` function will override the asset's @@ -342,7 +380,30 @@ impl AssetServer { path: impl Into>, settings: impl Fn(&mut S) + Send + Sync + 'static, ) -> Handle { - self.load_with_meta_transform(path, Some(loader_settings_meta_transform(settings)), ()) + self.load_with_meta_transform( + path, + Some(loader_settings_meta_transform(settings)), + (), + false, + ) + } + + /// Same as [`load`](AssetServer::load_with_settings), but you can load assets from unaproved paths + /// if [`AssetPlugin::unapproved_path_mode`](super::AssetPlugin::unapproved_path_mode) + /// is [`Deny`](UnapprovedPathMode::Deny). + /// + /// See [`UnapprovedPathMode`] and [`AssetPath::is_unapproved`] + pub fn load_with_settings_override<'a, A: Asset, S: Settings>( + &self, + path: impl Into>, + settings: impl Fn(&mut S) + Send + Sync + 'static, + ) -> Handle { + self.load_with_meta_transform( + path, + Some(loader_settings_meta_transform(settings)), + (), + true, + ) } /// Begins loading an [`Asset`] of type `A` stored at `path` while holding a guard item. @@ -361,7 +422,36 @@ impl AssetServer { settings: impl Fn(&mut S) + Send + Sync + 'static, guard: G, ) -> Handle { - self.load_with_meta_transform(path, Some(loader_settings_meta_transform(settings)), guard) + self.load_with_meta_transform( + path, + Some(loader_settings_meta_transform(settings)), + guard, + false, + ) + } + + /// Same as [`load`](AssetServer::load_acquire_with_settings), but you can load assets from unaproved paths + /// if [`AssetPlugin::unapproved_path_mode`](super::AssetPlugin::unapproved_path_mode) + /// is [`Deny`](UnapprovedPathMode::Deny). + /// + /// See [`UnapprovedPathMode`] and [`AssetPath::is_unapproved`] + pub fn load_acquire_with_settings_override< + 'a, + A: Asset, + S: Settings, + G: Send + Sync + 'static, + >( + &self, + path: impl Into>, + settings: impl Fn(&mut S) + Send + Sync + 'static, + guard: G, + ) -> Handle { + self.load_with_meta_transform( + path, + Some(loader_settings_meta_transform(settings)), + guard, + true, + ) } pub(crate) fn load_with_meta_transform<'a, A: Asset, G: Send + Sync + 'static>( @@ -369,8 +459,20 @@ impl AssetServer { path: impl Into>, meta_transform: Option, guard: G, + override_unapproved: bool, ) -> Handle { let path = path.into().into_owned(); + + if path.is_unapproved() { + match (&self.data.unapproved_path_mode, override_unapproved) { + (UnapprovedPathMode::Allow, _) | (UnapprovedPathMode::Deny, true) => {} + (UnapprovedPathMode::Deny, false) | (UnapprovedPathMode::Forbid, _) => { + error!("Asset path {path} is unapproved. See UnapprovedPathMode for details."); + return Handle::default(); + } + } + } + let mut infos = self.data.infos.write(); let (handle, should_load) = infos.get_or_create_path_handle::( path.clone(), @@ -518,7 +620,8 @@ impl AssetServer { /// /// ``` /// use bevy_asset::{Assets, Handle, LoadedUntypedAsset}; - /// use bevy_ecs::system::{Res, Resource}; + /// use bevy_ecs::system::Res; + /// use bevy_ecs::resource::Resource; /// /// #[derive(Resource)] /// struct LoadingUntypedHandle(Handle); @@ -906,19 +1009,19 @@ impl AssetServer { .spawn(async move { let Ok(source) = server.get_source(path.source()) else { error!( - "Failed to load {path}. AssetSource {:?} does not exist", + "Failed to load {path}. AssetSource {} does not exist", path.source() ); return; }; let asset_reader = match server.data.mode { - AssetServerMode::Unprocessed { .. } => source.reader(), - AssetServerMode::Processed { .. } => match source.processed_reader() { + AssetServerMode::Unprocessed => source.reader(), + AssetServerMode::Processed => match source.processed_reader() { Ok(reader) => reader, Err(_) => { error!( - "Failed to load {path}. AssetSource {:?} does not have a processed AssetReader", + "Failed to load {path}. AssetSource {} does not have a processed AssetReader", path.source() ); return; @@ -1225,8 +1328,8 @@ impl AssetServer { // Then the meta reader, if meta exists, will correspond to the meta for the current "version" of the asset. // See ProcessedAssetInfo::file_transaction_lock for more context let asset_reader = match self.data.mode { - AssetServerMode::Unprocessed { .. } => source.reader(), - AssetServerMode::Processed { .. } => source.processed_reader()?, + AssetServerMode::Unprocessed => source.reader(), + AssetServerMode::Processed => source.processed_reader()?, }; let reader = asset_reader.read(asset_path.path()).await?; let read_meta = match &self.data.meta_check { @@ -1468,13 +1571,58 @@ impl AssetServer { } } } + + /// Writes the default loader meta file for the provided `path`. + /// + /// This function only generates meta files that simply load the path directly. To generate a + /// meta file that will use the default asset processor for the path, see + /// [`AssetProcessor::write_default_meta_file_for_path`]. + /// + /// Note if there is already a meta file for `path`, this function returns + /// `Err(WriteDefaultMetaError::MetaAlreadyExists)`. + /// + /// [`AssetProcessor::write_default_meta_file_for_path`]: crate::AssetProcessor::write_default_meta_file_for_path + pub async fn write_default_loader_meta_file_for_path( + &self, + path: impl Into>, + ) -> Result<(), WriteDefaultMetaError> { + let path = path.into(); + let loader = self.get_path_asset_loader(&path).await?; + + let meta = loader.default_meta(); + let serialized_meta = meta.serialize(); + + let source = self.get_source(path.source())?; + + let reader = source.reader(); + match reader.read_meta_bytes(path.path()).await { + Ok(_) => return Err(WriteDefaultMetaError::MetaAlreadyExists), + Err(AssetReaderError::NotFound(_)) => { + // The meta file couldn't be found so just fall through. + } + Err(AssetReaderError::Io(err)) => { + return Err(WriteDefaultMetaError::IoErrorFromExistingMetaCheck(err)) + } + Err(AssetReaderError::HttpError(err)) => { + return Err(WriteDefaultMetaError::HttpErrorFromExistingMetaCheck(err)) + } + } + + let writer = source.writer()?; + writer + .write_meta_bytes(path.path(), &serialized_meta) + .await?; + + Ok(()) + } } /// A system that manages internal [`AssetServer`] events, such as finalizing asset loads. pub fn handle_internal_asset_events(world: &mut World) { world.resource_scope(|world, server: Mut| { let mut infos = server.data.infos.write(); - let mut untyped_failures = vec![]; + let var_name = vec![]; + let mut untyped_failures = var_name; for event in server.data.asset_event_receiver.try_iter() { match event { InternalAssetEvent::Loaded { id, loaded_asset } => { @@ -1573,14 +1721,14 @@ pub fn handle_internal_asset_events(world: &mut World) { for source in server.data.sources.iter() { match server.data.mode { - AssetServerMode::Unprocessed { .. } => { + AssetServerMode::Unprocessed => { if let Some(receiver) = source.event_receiver() { for event in receiver.try_iter() { handle_event(source.id(), event); } } } - AssetServerMode::Processed { .. } => { + AssetServerMode::Processed => { if let Some(receiver) = source.processed_event_receiver() { for event in receiver.try_iter() { handle_event(source.id(), event); @@ -1726,6 +1874,10 @@ impl RecursiveDependencyLoadState { /// An error that occurs during an [`Asset`] load. #[derive(Error, Debug, Clone)] +#[expect( + missing_docs, + reason = "Adding docs to the variants would not add information beyond the error message and the names" +)] pub enum AssetLoadError { #[error("Requested handle of type {requested:?} for asset '{path}' does not match actual asset type '{actual_asset_name}', which used loader '{loader_name}'")] RequestedHandleTypeMismatch { @@ -1787,6 +1939,7 @@ pub enum AssetLoadError { }, } +/// An error that can occur during asset loading. #[derive(Error, Debug, Clone)] #[error("Failed to load asset '{path}' with asset loader '{loader_name}': {error}")] pub struct AssetLoaderError { @@ -1796,11 +1949,13 @@ pub struct AssetLoaderError { } impl AssetLoaderError { + /// The path of the asset that failed to load. pub fn path(&self) -> &AssetPath<'static> { &self.path } } +/// An error that occurs while resolving an asset added by `add_async`. #[derive(Error, Debug, Clone)] #[error("An error occurred while resolving an asset added by `add_async`: {error}")] pub struct AddAsyncError { @@ -1814,17 +1969,19 @@ pub struct MissingAssetLoaderForExtensionError { extensions: Vec, } -/// An error that occurs when an [`AssetLoader`] is not registered for a given [`std::any::type_name`]. +/// An error that occurs when an [`AssetLoader`] is not registered for a given [`core::any::type_name`]. #[derive(Error, Debug, Clone, PartialEq, Eq)] #[error("no `AssetLoader` found with the name '{type_name}'")] pub struct MissingAssetLoaderForTypeNameError { - type_name: String, + /// The type name that was not found. + pub type_name: String, } /// An error that occurs when an [`AssetLoader`] is not registered for a given [`Asset`] [`TypeId`]. #[derive(Error, Debug, Clone, PartialEq, Eq)] #[error("no `AssetLoader` found with the ID '{type_id:?}'")] pub struct MissingAssetLoaderForTypeIdError { + /// The type ID that was not found. pub type_id: TypeId, } @@ -1855,10 +2012,31 @@ const UNTYPED_SOURCE_SUFFIX: &str = "--untyped"; /// An error when attempting to wait asynchronously for an [`Asset`] to load. #[derive(Error, Debug, Clone)] pub enum WaitForAssetError { + /// The asset is not being loaded; waiting for it is meaningless. #[error("tried to wait for an asset that is not being loaded")] NotLoaded, + /// The asset failed to load. #[error(transparent)] Failed(Arc), + /// A dependency of the asset failed to load. #[error(transparent)] DependencyFailed(Arc), } + +#[derive(Error, Debug)] +pub enum WriteDefaultMetaError { + #[error(transparent)] + MissingAssetLoader(#[from] MissingAssetLoaderForExtensionError), + #[error(transparent)] + MissingAssetSource(#[from] MissingAssetSourceError), + #[error(transparent)] + MissingAssetWriter(#[from] MissingAssetWriterError), + #[error("failed to write default asset meta file: {0}")] + FailedToWriteMeta(#[from] AssetWriterError), + #[error("asset meta file already exists, so avoiding overwrite")] + MetaAlreadyExists, + #[error("encountered an I/O error while reading the existing meta file: {0}")] + IoErrorFromExistingMetaCheck(Arc), + #[error("encountered HTTP status {0} when reading the existing meta file")] + HttpErrorFromExistingMetaCheck(u16), +} diff --git a/crates/bevy_asset/src/transformer.rs b/crates/bevy_asset/src/transformer.rs index 484e02003f644..802e3aeaa7eff 100644 --- a/crates/bevy_asset/src/transformer.rs +++ b/crates/bevy_asset/src/transformer.rs @@ -1,6 +1,8 @@ use crate::{meta::Settings, Asset, ErasedLoadedAsset, Handle, LabeledAsset, UntypedHandle}; +use alloc::boxed::Box; use atomicow::CowArc; -use bevy_utils::{ConditionalSendFuture, HashMap}; +use bevy_platform::collections::HashMap; +use bevy_tasks::ConditionalSendFuture; use core::{ borrow::Borrow, convert::Infallible, @@ -252,6 +254,7 @@ pub struct IdentityAssetTransformer { } impl IdentityAssetTransformer { + /// Creates a new [`IdentityAssetTransformer`] with the correct internal [`PhantomData`] field. pub const fn new() -> Self { Self { _phantom: PhantomData, diff --git a/crates/bevy_audio/Cargo.toml b/crates/bevy_audio/Cargo.toml index 7df10a1bcbd57..84060fe26b484 100644 --- a/crates/bevy_audio/Cargo.toml +++ b/crates/bevy_audio/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_audio" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Provides audio functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -10,28 +10,32 @@ keywords = ["bevy"] [dependencies] # bevy -bevy_app = { path = "../bevy_app", version = "0.15.0-dev" } -bevy_asset = { path = "../bevy_asset", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" } -bevy_hierarchy = { path = "../bevy_hierarchy", version = "0.15.0-dev" } -bevy_math = { path = "../bevy_math", version = "0.15.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev", features = [ - "bevy", -] } -bevy_transform = { path = "../bevy_transform", version = "0.15.0-dev" } -bevy_derive = { path = "../bevy_derive", version = "0.15.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } +bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } +bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } +bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } +bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } +bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } # other rodio = { version = "0.20", default-features = false } +tracing = { version = "0.1", default-features = false, features = ["std"] } [target.'cfg(target_os = "android")'.dependencies] cpal = { version = "0.15", optional = true } [target.'cfg(target_arch = "wasm32")'.dependencies] +# TODO: Assuming all wasm builds are for the browser. Require `no_std` support to break assumption. rodio = { version = "0.20", default-features = false, features = [ "wasm-bindgen", ] } +bevy_app = { path = "../bevy_app", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } [features] mp3 = ["rodio/mp3"] diff --git a/crates/bevy_audio/LICENSE-APACHE b/crates/bevy_audio/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_audio/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_audio/LICENSE-MIT b/crates/bevy_audio/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_audio/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_audio/src/audio.rs b/crates/bevy_audio/src/audio.rs index 25f2e07df95df..349cf6b6a46f4 100644 --- a/crates/bevy_audio/src/audio.rs +++ b/crates/bevy_audio/src/audio.rs @@ -1,5 +1,3 @@ -#![expect(deprecated)] - use crate::{AudioSource, Decodable, Volume}; use bevy_asset::{Asset, Handle}; use bevy_ecs::prelude::*; @@ -8,6 +6,7 @@ use bevy_reflect::prelude::*; /// The way Bevy manages the sound playback. #[derive(Debug, Clone, Copy, Reflect)] +#[reflect(Clone)] pub enum PlaybackMode { /// Play the sound once. Do nothing when it ends. /// @@ -31,7 +30,7 @@ pub enum PlaybackMode { /// [`AudioSink`][crate::AudioSink] or [`SpatialAudioSink`][crate::SpatialAudioSink] /// components. Changes to this component will *not* be applied to already-playing audio. #[derive(Component, Clone, Copy, Debug, Reflect)] -#[reflect(Default, Component, Debug)] +#[reflect(Clone, Default, Component, Debug)] pub struct PlaybackSettings { /// The desired playback behavior. pub mode: PlaybackMode, @@ -76,7 +75,7 @@ impl PlaybackSettings { /// added again. pub const ONCE: PlaybackSettings = PlaybackSettings { mode: PlaybackMode::Once, - volume: Volume(1.0), + volume: Volume::Linear(1.0), speed: 1.0, paused: false, muted: false, @@ -144,7 +143,7 @@ impl PlaybackSettings { /// This must be accompanied by `Transform` and `GlobalTransform`. /// Only one entity with a `SpatialListener` should be present at any given time. #[derive(Component, Clone, Debug, Reflect)] -#[reflect(Default, Component, Debug)] +#[reflect(Clone, Default, Component, Debug)] pub struct SpatialListener { /// Left ear position relative to the `GlobalTransform`. pub left_ear_offset: Vec3, @@ -176,6 +175,7 @@ impl SpatialListener { /// /// Default is `Vec3::ONE`. #[derive(Clone, Copy, Debug, Reflect)] +#[reflect(Clone, Default)] pub struct SpatialScale(pub Vec3); impl SpatialScale { @@ -204,16 +204,9 @@ impl Default for SpatialScale { /// /// Default is `Vec3::ONE`. #[derive(Resource, Default, Clone, Copy, Reflect)] -#[reflect(Resource, Default)] +#[reflect(Resource, Default, Clone)] pub struct DefaultSpatialScale(pub SpatialScale); -/// Bundle for playing a standard bevy audio asset -#[deprecated( - since = "0.15.0", - note = "Use the `AudioPlayer` component instead. Inserting it will now also insert a `PlaybackSettings` component automatically." -)] -pub type AudioBundle = AudioSourceBundle; - /// A component for playing a sound. /// /// Insert this component onto an entity to trigger an audio source to begin playing. @@ -227,7 +220,7 @@ pub type AudioBundle = AudioSourceBundle; /// Playback can be configured using the [`PlaybackSettings`] component. Note that changes to the /// `PlaybackSettings` component will *not* affect already-playing audio. #[derive(Component, Reflect)] -#[reflect(Component)] +#[reflect(Component, Clone)] #[require(PlaybackSettings)] pub struct AudioPlayer(pub Handle) where @@ -252,48 +245,3 @@ impl AudioPlayer { Self(source) } } - -/// Bundle for playing a sound. -/// -/// Insert this bundle onto an entity to trigger a sound source to begin playing. -/// -/// If the handle refers to an unavailable asset (such as if it has not finished loading yet), -/// the audio will not begin playing immediately. The audio will play when the asset is ready. -/// -/// When Bevy begins the audio playback, an [`AudioSink`][crate::AudioSink] component will be -/// added to the entity. You can use that component to control the audio settings during playback. -#[derive(Bundle)] -#[deprecated( - since = "0.15.0", - note = "Use the `AudioPlayer` component instead. Inserting it will now also insert a `PlaybackSettings` component automatically." -)] -pub struct AudioSourceBundle -where - Source: Asset + Decodable, -{ - /// Asset containing the audio data to play. - pub source: AudioPlayer, - /// Initial settings that the audio starts playing with. - /// If you would like to control the audio while it is playing, - /// query for the [`AudioSink`][crate::AudioSink] component. - /// Changes to this component will *not* be applied to already-playing audio. - pub settings: PlaybackSettings, -} - -impl Clone for AudioSourceBundle { - fn clone(&self) -> Self { - Self { - source: self.source.clone(), - settings: self.settings, - } - } -} - -impl Default for AudioSourceBundle { - fn default() -> Self { - Self { - source: AudioPlayer(Handle::default()), - settings: Default::default(), - } - } -} diff --git a/crates/bevy_audio/src/audio_output.rs b/crates/bevy_audio/src/audio_output.rs index 45896c81ba7ac..1869fb47555db 100644 --- a/crates/bevy_audio/src/audio_output.rs +++ b/crates/bevy_audio/src/audio_output.rs @@ -4,11 +4,10 @@ use crate::{ }; use bevy_asset::{Asset, Assets}; use bevy_ecs::{prelude::*, system::SystemParam}; -use bevy_hierarchy::DespawnRecursiveExt; use bevy_math::Vec3; use bevy_transform::prelude::GlobalTransform; -use bevy_utils::tracing::warn; use rodio::{OutputStream, OutputStreamHandle, Sink, Source, SpatialSink}; +use tracing::warn; use crate::{AudioSink, AudioSinkPlayback}; @@ -47,11 +46,11 @@ impl Default for AudioOutput { } /// Marker for internal use, to despawn entities when playback finishes. -#[derive(Component)] +#[derive(Component, Default)] pub struct PlaybackDespawnMarker; /// Marker for internal use, to remove audio components when playback finishes. -#[derive(Component)] +#[derive(Component, Default)] pub struct PlaybackRemoveMarker; #[derive(SystemParam)] @@ -130,7 +129,7 @@ pub(crate) fn play_queued_audio_system( // the user may have made a mistake. if ear_positions.multiple_listeners() { warn!( - "Multiple SpatialListeners found. Using {:?}.", + "Multiple SpatialListeners found. Using {}.", ear_positions.query.iter().next().unwrap().0 ); } @@ -171,7 +170,7 @@ pub(crate) fn play_queued_audio_system( } sink.set_speed(settings.speed); - sink.set_volume(settings.volume.0 * global_volume.volume.0); + sink.set_volume(settings.volume * global_volume.volume); if settings.paused { sink.pause(); @@ -211,7 +210,7 @@ pub(crate) fn play_queued_audio_system( } sink.set_speed(settings.speed); - sink.set_volume(settings.volume.0 * global_volume.volume.0); + sink.set_volume(settings.volume * global_volume.volume); if settings.paused { sink.pause(); @@ -253,12 +252,12 @@ pub(crate) fn cleanup_finished_audio( ) { for (entity, sink) in &query_nonspatial_despawn { if sink.sink.empty() { - commands.entity(entity).despawn_recursive(); + commands.entity(entity).despawn(); } } for (entity, sink) in &query_spatial_despawn { if sink.sink.empty() { - commands.entity(entity).despawn_recursive(); + commands.entity(entity).despawn(); } } for (entity, sink) in &query_nonspatial_remove { diff --git a/crates/bevy_audio/src/lib.rs b/crates/bevy_audio/src/lib.rs index a8de9393d15e1..babae2f8a9be9 100644 --- a/crates/bevy_audio/src/lib.rs +++ b/crates/bevy_audio/src/lib.rs @@ -39,13 +39,11 @@ mod volume; /// The audio prelude. /// /// This includes the most common types in this crate, re-exported for your convenience. -#[expect(deprecated)] pub mod prelude { #[doc(hidden)] pub use crate::{ - AudioBundle, AudioPlayer, AudioSink, AudioSinkPlayback, AudioSource, AudioSourceBundle, - Decodable, GlobalVolume, Pitch, PitchBundle, PlaybackSettings, SpatialAudioSink, - SpatialListener, + AudioPlayer, AudioSink, AudioSinkPlayback, AudioSource, Decodable, GlobalVolume, Pitch, + PlaybackSettings, SpatialAudioSink, SpatialListener, }; } diff --git a/crates/bevy_audio/src/pitch.rs b/crates/bevy_audio/src/pitch.rs index 02863d6c62781..d85b9b31cf071 100644 --- a/crates/bevy_audio/src/pitch.rs +++ b/crates/bevy_audio/src/pitch.rs @@ -1,6 +1,4 @@ -#![expect(deprecated)] - -use crate::{AudioSourceBundle, Decodable}; +use crate::Decodable; use bevy_asset::Asset; use bevy_reflect::TypePath; use rodio::{ @@ -35,10 +33,3 @@ impl Decodable for Pitch { SineWave::new(self.frequency).take_duration(self.duration) } } - -/// Bundle for playing a bevy note sound -#[deprecated( - since = "0.15.0", - note = "Use the `AudioPlayer` component instead. Inserting it will now also insert a `PlaybackSettings` component automatically." -)] -pub type PitchBundle = AudioSourceBundle; diff --git a/crates/bevy_audio/src/sinks.rs b/crates/bevy_audio/src/sinks.rs index d4be43261fde0..b0c77456e1db6 100644 --- a/crates/bevy_audio/src/sinks.rs +++ b/crates/bevy_audio/src/sinks.rs @@ -3,37 +3,26 @@ use bevy_math::Vec3; use bevy_transform::prelude::Transform; use rodio::{Sink, SpatialSink}; +use crate::Volume; + /// Common interactions with an audio sink. pub trait AudioSinkPlayback { - /// Gets the volume of the sound. - /// - /// The value `1.0` is the "normal" volume (unfiltered input). Any value - /// other than `1.0` will multiply each sample by this value. + /// Gets the volume of the sound as a [`Volume`]. /// /// If the sink is muted, this returns the managed volume rather than the - /// sink's actual volume. This allows you to use the volume as if the sink - /// were not muted, because a muted sink has a volume of 0. - fn volume(&self) -> f32; + /// sink's actual volume. This allows you to use the returned volume as if + /// the sink were not muted, because a muted sink has a physical volume of + /// 0. + fn volume(&self) -> Volume; - /// Changes the volume of the sound. - /// - /// The value `1.0` is the "normal" volume (unfiltered input). Any value other than `1.0` - /// will multiply each sample by this value. + /// Changes the volume of the sound to the given [`Volume`]. /// /// If the sink is muted, changing the volume won't unmute it, i.e. the - /// sink's volume will remain at `0.0`. However, the sink will remember the - /// volume change and it will be used when [`unmute`](Self::unmute) is - /// called. This allows you to control the volume even when the sink is - /// muted. - /// - /// # Note on Audio Volume - /// - /// An increase of 10 decibels (dB) roughly corresponds to the perceived volume doubling in intensity. - /// As this function scales not the volume but the amplitude, a conversion might be necessary. - /// For example, to halve the perceived volume you need to decrease the volume by 10 dB. - /// This corresponds to 20log(x) = -10dB, solving x = 10^(-10/20) = 0.316. - /// Multiply the current volume by 0.316 to halve the perceived volume. - fn set_volume(&mut self, volume: f32); + /// sink's volume will remain "off" / "muted". However, the sink will + /// remember the volume change and it will be used when + /// [`unmute`](Self::unmute) is called. This allows you to control the + /// volume even when the sink is muted. + fn set_volume(&mut self, volume: Volume); /// Gets the speed of the sound. /// @@ -132,7 +121,7 @@ pub struct AudioSink { /// If the sink is muted, this is `Some(volume)` where `volume` is the /// user's intended volume setting, even if the underlying sink's volume is /// 0. - pub(crate) managed_volume: Option, + pub(crate) managed_volume: Option, } impl AudioSink { @@ -146,15 +135,16 @@ impl AudioSink { } impl AudioSinkPlayback for AudioSink { - fn volume(&self) -> f32 { - self.managed_volume.unwrap_or_else(|| self.sink.volume()) + fn volume(&self) -> Volume { + self.managed_volume + .unwrap_or_else(|| Volume::Linear(self.sink.volume())) } - fn set_volume(&mut self, volume: f32) { + fn set_volume(&mut self, volume: Volume) { if self.is_muted() { self.managed_volume = Some(volume); } else { - self.sink.set_volume(volume); + self.sink.set_volume(volume.to_linear()); } } @@ -197,7 +187,7 @@ impl AudioSinkPlayback for AudioSink { fn unmute(&mut self) { if let Some(volume) = self.managed_volume.take() { - self.sink.set_volume(volume); + self.sink.set_volume(volume.to_linear()); } } } @@ -227,7 +217,7 @@ pub struct SpatialAudioSink { /// If the sink is muted, this is `Some(volume)` where `volume` is the /// user's intended volume setting, even if the underlying sink's volume is /// 0. - pub(crate) managed_volume: Option, + pub(crate) managed_volume: Option, } impl SpatialAudioSink { @@ -241,15 +231,16 @@ impl SpatialAudioSink { } impl AudioSinkPlayback for SpatialAudioSink { - fn volume(&self) -> f32 { - self.managed_volume.unwrap_or_else(|| self.sink.volume()) + fn volume(&self) -> Volume { + self.managed_volume + .unwrap_or_else(|| Volume::Linear(self.sink.volume())) } - fn set_volume(&mut self, volume: f32) { + fn set_volume(&mut self, volume: Volume) { if self.is_muted() { self.managed_volume = Some(volume); } else { - self.sink.set_volume(volume); + self.sink.set_volume(volume.to_linear()); } } @@ -292,7 +283,7 @@ impl AudioSinkPlayback for SpatialAudioSink { fn unmute(&mut self) { if let Some(volume) = self.managed_volume.take() { - self.sink.set_volume(volume); + self.sink.set_volume(volume.to_linear()); } } } @@ -326,11 +317,11 @@ mod tests { fn test_audio_sink_playback(mut audio_sink: T) { // Test volume - assert_eq!(audio_sink.volume(), 1.0); // default volume - audio_sink.set_volume(0.5); - assert_eq!(audio_sink.volume(), 0.5); - audio_sink.set_volume(1.0); - assert_eq!(audio_sink.volume(), 1.0); + assert_eq!(audio_sink.volume(), Volume::Linear(1.0)); // default volume + audio_sink.set_volume(Volume::Linear(0.5)); + assert_eq!(audio_sink.volume(), Volume::Linear(0.5)); + audio_sink.set_volume(Volume::Linear(1.0)); + assert_eq!(audio_sink.volume(), Volume::Linear(1.0)); // Test speed assert_eq!(audio_sink.speed(), 1.0); // default speed @@ -361,11 +352,11 @@ mod tests { assert!(!audio_sink.is_muted()); // Test volume with mute - audio_sink.set_volume(0.5); + audio_sink.set_volume(Volume::Linear(0.5)); audio_sink.mute(); - assert_eq!(audio_sink.volume(), 0.5); // returns managed volume even though sink volume is 0 + assert_eq!(audio_sink.volume(), Volume::Linear(0.5)); // returns managed volume even though sink volume is 0 audio_sink.unmute(); - assert_eq!(audio_sink.volume(), 0.5); // managed volume is restored + assert_eq!(audio_sink.volume(), Volume::Linear(0.5)); // managed volume is restored // Test toggle mute audio_sink.toggle_mute(); diff --git a/crates/bevy_audio/src/volume.rs b/crates/bevy_audio/src/volume.rs index f12fe0497fe3f..b1378ae485747 100644 --- a/crates/bevy_audio/src/volume.rs +++ b/crates/bevy_audio/src/volume.rs @@ -1,48 +1,504 @@ -use bevy_derive::Deref; use bevy_ecs::prelude::*; +use bevy_math::ops; use bevy_reflect::prelude::*; /// Use this [`Resource`] to control the global volume of all audio. /// -/// Note: changing this value will not affect already playing audio. -#[derive(Resource, Default, Clone, Copy, Reflect)] -#[reflect(Resource, Default)] +/// Note: Changing [`GlobalVolume`] does not affect already playing audio. +#[derive(Resource, Debug, Default, Clone, Copy, Reflect)] +#[reflect(Resource, Debug, Default, Clone)] pub struct GlobalVolume { /// The global volume of all audio. pub volume: Volume, } +impl From for GlobalVolume { + fn from(volume: Volume) -> Self { + Self { volume } + } +} + impl GlobalVolume { /// Create a new [`GlobalVolume`] with the given volume. - pub fn new(volume: f32) -> Self { - Self { - volume: Volume::new(volume), - } + pub fn new(volume: Volume) -> Self { + Self { volume } } } -/// A volume level equivalent to a non-negative float. -#[derive(Clone, Copy, Deref, Debug, Reflect)] -#[reflect(Debug)] -pub struct Volume(pub(crate) f32); +/// A [`Volume`] represents an audio source's volume level. +/// +/// To create a new [`Volume`] from a linear scale value, use +/// [`Volume::Linear`]. +/// +/// To create a new [`Volume`] from decibels, use [`Volume::Decibels`]. +#[derive(Clone, Copy, Debug, Reflect)] +#[reflect(Clone, Debug, PartialEq)] +pub enum Volume { + /// Create a new [`Volume`] from the given volume in linear scale. + /// + /// In a linear scale, the value `1.0` represents the "normal" volume, + /// meaning the audio is played at its original level. Values greater than + /// `1.0` increase the volume, while values between `0.0` and `1.0` decrease + /// the volume. A value of `0.0` effectively mutes the audio. + /// + /// # Examples + /// + /// ``` + /// # use bevy_audio::Volume; + /// # use bevy_math::ops; + /// # + /// # const EPSILON: f32 = 0.01; + /// + /// let volume = Volume::Linear(0.5); + /// assert_eq!(volume.to_linear(), 0.5); + /// assert!(ops::abs(volume.to_decibels() - -6.0206) < EPSILON); + /// + /// let volume = Volume::Linear(0.0); + /// assert_eq!(volume.to_linear(), 0.0); + /// assert_eq!(volume.to_decibels(), f32::NEG_INFINITY); + /// + /// let volume = Volume::Linear(1.0); + /// assert_eq!(volume.to_linear(), 1.0); + /// assert!(ops::abs(volume.to_decibels() - 0.0) < EPSILON); + /// ``` + Linear(f32), + /// Create a new [`Volume`] from the given volume in decibels. + /// + /// In a decibel scale, the value `0.0` represents the "normal" volume, + /// meaning the audio is played at its original level. Values greater than + /// `0.0` increase the volume, while values less than `0.0` decrease the + /// volume. A value of [`f32::NEG_INFINITY`] decibels effectively mutes the + /// audio. + /// + /// # Examples + /// + /// ``` + /// # use bevy_audio::Volume; + /// # use bevy_math::ops; + /// # + /// # const EPSILON: f32 = 0.01; + /// + /// let volume = Volume::Decibels(-5.998); + /// assert!(ops::abs(volume.to_linear() - 0.5) < EPSILON); + /// + /// let volume = Volume::Decibels(f32::NEG_INFINITY); + /// assert_eq!(volume.to_linear(), 0.0); + /// + /// let volume = Volume::Decibels(0.0); + /// assert_eq!(volume.to_linear(), 1.0); + /// + /// let volume = Volume::Decibels(20.0); + /// assert_eq!(volume.to_linear(), 10.0); + /// ``` + Decibels(f32), +} impl Default for Volume { fn default() -> Self { - Self(1.0) + Self::Linear(1.0) + } +} + +impl PartialEq for Volume { + fn eq(&self, other: &Self) -> bool { + use Volume::{Decibels, Linear}; + + match (self, other) { + (Linear(a), Linear(b)) => a.abs() == b.abs(), + (Decibels(a), Decibels(b)) => a == b, + (a, b) => a.to_decibels() == b.to_decibels(), + } + } +} + +impl PartialOrd for Volume { + fn partial_cmp(&self, other: &Self) -> Option { + use Volume::{Decibels, Linear}; + + Some(match (self, other) { + (Linear(a), Linear(b)) => a.abs().total_cmp(&b.abs()), + (Decibels(a), Decibels(b)) => a.total_cmp(b), + (a, b) => a.to_decibels().total_cmp(&b.to_decibels()), + }) } } +#[inline] +fn decibels_to_linear(decibels: f32) -> f32 { + ops::powf(10.0f32, decibels / 20.0) +} + +#[inline] +fn linear_to_decibels(linear: f32) -> f32 { + 20.0 * ops::log10(linear.abs()) +} + impl Volume { - /// Create a new volume level. - pub fn new(volume: f32) -> Self { - debug_assert!(volume >= 0.0); - Self(f32::max(volume, 0.)) + /// Returns the volume in linear scale as a float. + pub fn to_linear(&self) -> f32 { + match self { + Self::Linear(v) => v.abs(), + Self::Decibels(v) => decibels_to_linear(*v), + } + } + + /// Returns the volume in decibels as a float. + /// + /// If the volume is silent / off / muted, i.e. it's underlying linear scale + /// is `0.0`, this method returns negative infinity. + pub fn to_decibels(&self) -> f32 { + match self { + Self::Linear(v) => linear_to_decibels(*v), + Self::Decibels(v) => *v, + } + } + + /// The silent volume. Also known as "off" or "muted". + pub const SILENT: Self = Volume::Linear(0.0); +} + +impl core::ops::Add for Volume { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + use Volume::{Decibels, Linear}; + + match (self, rhs) { + (Linear(a), Linear(b)) => Linear(a + b), + (Decibels(a), Decibels(b)) => Decibels(linear_to_decibels( + decibels_to_linear(a) + decibels_to_linear(b), + )), + // {Linear, Decibels} favors the left hand side of the operation by + // first converting the right hand side to the same type as the left + // hand side and then performing the operation. + (Linear(..), Decibels(db)) => self + Linear(decibels_to_linear(db)), + (Decibels(..), Linear(l)) => self + Decibels(linear_to_decibels(l)), + } + } +} + +impl core::ops::AddAssign for Volume { + fn add_assign(&mut self, rhs: Self) { + *self = *self + rhs; + } +} + +impl core::ops::Sub for Volume { + type Output = Self; + + fn sub(self, rhs: Self) -> Self { + use Volume::{Decibels, Linear}; + + match (self, rhs) { + (Linear(a), Linear(b)) => Linear(a - b), + (Decibels(a), Decibels(b)) => Decibels(linear_to_decibels( + decibels_to_linear(a) - decibels_to_linear(b), + )), + // {Linear, Decibels} favors the left hand side of the operation by + // first converting the right hand side to the same type as the left + // hand side and then performing the operation. + (Linear(..), Decibels(db)) => self - Linear(decibels_to_linear(db)), + (Decibels(..), Linear(l)) => self - Decibels(linear_to_decibels(l)), + } } - /// Get the value of the volume level. - pub fn get(&self) -> f32 { - self.0 +} + +impl core::ops::SubAssign for Volume { + fn sub_assign(&mut self, rhs: Self) { + *self = *self - rhs; } +} + +impl core::ops::Mul for Volume { + type Output = Self; + + fn mul(self, rhs: Self) -> Self { + use Volume::{Decibels, Linear}; + + match (self, rhs) { + (Linear(a), Linear(b)) => Linear(a * b), + (Decibels(a), Decibels(b)) => Decibels(a + b), + // {Linear, Decibels} favors the left hand side of the operation by + // first converting the right hand side to the same type as the left + // hand side and then performing the operation. + (Linear(..), Decibels(db)) => self * Linear(decibels_to_linear(db)), + (Decibels(..), Linear(l)) => self * Decibels(linear_to_decibels(l)), + } + } +} + +impl core::ops::MulAssign for Volume { + fn mul_assign(&mut self, rhs: Self) { + *self = *self * rhs; + } +} - /// Zero (silent) volume level - pub const ZERO: Self = Volume(0.0); +impl core::ops::Div for Volume { + type Output = Self; + + fn div(self, rhs: Self) -> Self { + use Volume::{Decibels, Linear}; + + match (self, rhs) { + (Linear(a), Linear(b)) => Linear(a / b), + (Decibels(a), Decibels(b)) => Decibels(a - b), + // {Linear, Decibels} favors the left hand side of the operation by + // first converting the right hand side to the same type as the left + // hand side and then performing the operation. + (Linear(..), Decibels(db)) => self / Linear(decibels_to_linear(db)), + (Decibels(..), Linear(l)) => self / Decibels(linear_to_decibels(l)), + } + } +} + +impl core::ops::DivAssign for Volume { + fn div_assign(&mut self, rhs: Self) { + *self = *self / rhs; + } +} + +#[cfg(test)] +mod tests { + use super::Volume::{self, Decibels, Linear}; + + /// Based on [Wikipedia's Decibel article]. + /// + /// [Wikipedia's Decibel article]: https://web.archive.org/web/20230810185300/https://en.wikipedia.org/wiki/Decibel + const DECIBELS_LINEAR_TABLE: [(f32, f32); 27] = [ + (100., 100000.), + (90., 31623.), + (80., 10000.), + (70., 3162.), + (60., 1000.), + (50., 316.2), + (40., 100.), + (30., 31.62), + (20., 10.), + (10., 3.162), + (5.998, 1.995), + (3.003, 1.413), + (1.002, 1.122), + (0., 1.), + (-1.002, 0.891), + (-3.003, 0.708), + (-5.998, 0.501), + (-10., 0.3162), + (-20., 0.1), + (-30., 0.03162), + (-40., 0.01), + (-50., 0.003162), + (-60., 0.001), + (-70., 0.0003162), + (-80., 0.0001), + (-90., 0.00003162), + (-100., 0.00001), + ]; + + #[test] + fn volume_conversion() { + for (db, linear) in DECIBELS_LINEAR_TABLE { + for volume in [Linear(linear), Decibels(db), Linear(-linear)] { + let db_test = volume.to_decibels(); + let linear_test = volume.to_linear(); + + let db_delta = db_test - db; + let linear_relative_delta = (linear_test - linear) / linear; + + assert!( + db_delta.abs() < 1e-2, + "Expected ~{}dB, got {}dB (delta {})", + db, + db_test, + db_delta + ); + assert!( + linear_relative_delta.abs() < 1e-3, + "Expected ~{}, got {} (relative delta {})", + linear, + linear_test, + linear_relative_delta + ); + } + } + } + + #[test] + fn volume_conversion_special() { + assert!( + Decibels(f32::INFINITY).to_linear().is_infinite(), + "Infinite decibels is equivalent to infinite linear scale" + ); + assert!( + Linear(f32::INFINITY).to_decibels().is_infinite(), + "Infinite linear scale is equivalent to infinite decibels" + ); + + assert!( + Linear(f32::NEG_INFINITY).to_decibels().is_infinite(), + "Negative infinite linear scale is equivalent to infinite decibels" + ); + assert!( + Decibels(f32::NEG_INFINITY).to_linear().abs() == 0.0, + "Negative infinity decibels is equivalent to zero linear scale" + ); + + assert!( + Linear(0.0).to_decibels().is_infinite(), + "Zero linear scale is equivalent to negative infinity decibels" + ); + assert!( + Linear(-0.0).to_decibels().is_infinite(), + "Negative zero linear scale is equivalent to negative infinity decibels" + ); + + assert!( + Decibels(f32::NAN).to_linear().is_nan(), + "NaN decibels is equivalent to NaN linear scale" + ); + assert!( + Linear(f32::NAN).to_decibels().is_nan(), + "NaN linear scale is equivalent to NaN decibels" + ); + } + + fn assert_approx_eq(a: Volume, b: Volume) { + const EPSILON: f32 = 0.0001; + + match (a, b) { + (Decibels(a), Decibels(b)) | (Linear(a), Linear(b)) => assert!( + (a - b).abs() < EPSILON, + "Expected {:?} to be approximately equal to {:?}", + a, + b + ), + (a, b) => assert!( + (a.to_decibels() - b.to_decibels()).abs() < EPSILON, + "Expected {:?} to be approximately equal to {:?}", + a, + b + ), + } + } + + #[test] + fn volume_ops_add() { + // Linear to Linear. + assert_approx_eq(Linear(0.5) + Linear(0.5), Linear(1.0)); + assert_approx_eq(Linear(0.5) + Linear(0.1), Linear(0.6)); + assert_approx_eq(Linear(0.5) + Linear(-0.5), Linear(0.0)); + + // Decibels to Decibels. + assert_approx_eq(Decibels(0.0) + Decibels(0.0), Decibels(6.0206003)); + assert_approx_eq(Decibels(6.0) + Decibels(6.0), Decibels(12.020599)); + assert_approx_eq(Decibels(-6.0) + Decibels(-6.0), Decibels(0.020599423)); + + // {Linear, Decibels} favors the left hand side of the operation. + assert_approx_eq(Linear(0.5) + Decibels(0.0), Linear(1.5)); + assert_approx_eq(Decibels(0.0) + Linear(0.5), Decibels(3.521825)); + } + + #[test] + fn volume_ops_add_assign() { + // Linear to Linear. + let mut volume = Linear(0.5); + volume += Linear(0.5); + assert_approx_eq(volume, Linear(1.0)); + } + + #[test] + fn volume_ops_sub() { + // Linear to Linear. + assert_approx_eq(Linear(0.5) - Linear(0.5), Linear(0.0)); + assert_approx_eq(Linear(0.5) - Linear(0.1), Linear(0.4)); + assert_approx_eq(Linear(0.5) - Linear(-0.5), Linear(1.0)); + + // Decibels to Decibels. + assert_eq!(Decibels(0.0) - Decibels(0.0), Decibels(f32::NEG_INFINITY)); + assert_approx_eq(Decibels(6.0) - Decibels(4.0), Decibels(-7.736506)); + assert_eq!(Decibels(-6.0) - Decibels(-6.0), Decibels(f32::NEG_INFINITY)); + } + + #[test] + fn volume_ops_sub_assign() { + // Linear to Linear. + let mut volume = Linear(0.5); + volume -= Linear(0.5); + assert_approx_eq(volume, Linear(0.0)); + } + + #[test] + fn volume_ops_mul() { + // Linear to Linear. + assert_approx_eq(Linear(0.5) * Linear(0.5), Linear(0.25)); + assert_approx_eq(Linear(0.5) * Linear(0.1), Linear(0.05)); + assert_approx_eq(Linear(0.5) * Linear(-0.5), Linear(-0.25)); + + // Decibels to Decibels. + assert_approx_eq(Decibels(0.0) * Decibels(0.0), Decibels(0.0)); + assert_approx_eq(Decibels(6.0) * Decibels(6.0), Decibels(12.0)); + assert_approx_eq(Decibels(-6.0) * Decibels(-6.0), Decibels(-12.0)); + + // {Linear, Decibels} favors the left hand side of the operation. + assert_approx_eq(Linear(0.5) * Decibels(0.0), Linear(0.5)); + assert_approx_eq(Decibels(0.0) * Linear(0.501), Decibels(-6.003246)); + } + + #[test] + fn volume_ops_mul_assign() { + // Linear to Linear. + let mut volume = Linear(0.5); + volume *= Linear(0.5); + assert_approx_eq(volume, Linear(0.25)); + + // Decibels to Decibels. + let mut volume = Decibels(6.0); + volume *= Decibels(6.0); + assert_approx_eq(volume, Decibels(12.0)); + + // {Linear, Decibels} favors the left hand side of the operation. + let mut volume = Linear(0.5); + volume *= Decibels(0.0); + assert_approx_eq(volume, Linear(0.5)); + let mut volume = Decibels(0.0); + volume *= Linear(0.501); + assert_approx_eq(volume, Decibels(-6.003246)); + } + + #[test] + fn volume_ops_div() { + // Linear to Linear. + assert_approx_eq(Linear(0.5) / Linear(0.5), Linear(1.0)); + assert_approx_eq(Linear(0.5) / Linear(0.1), Linear(5.0)); + assert_approx_eq(Linear(0.5) / Linear(-0.5), Linear(-1.0)); + + // Decibels to Decibels. + assert_approx_eq(Decibels(0.0) / Decibels(0.0), Decibels(0.0)); + assert_approx_eq(Decibels(6.0) / Decibels(6.0), Decibels(0.0)); + assert_approx_eq(Decibels(-6.0) / Decibels(-6.0), Decibels(0.0)); + + // {Linear, Decibels} favors the left hand side of the operation. + assert_approx_eq(Linear(0.5) / Decibels(0.0), Linear(0.5)); + assert_approx_eq(Decibels(0.0) / Linear(0.501), Decibels(6.003246)); + } + + #[test] + fn volume_ops_div_assign() { + // Linear to Linear. + let mut volume = Linear(0.5); + volume /= Linear(0.5); + assert_approx_eq(volume, Linear(1.0)); + + // Decibels to Decibels. + let mut volume = Decibels(6.0); + volume /= Decibels(6.0); + assert_approx_eq(volume, Decibels(0.0)); + + // {Linear, Decibels} favors the left hand side of the operation. + let mut volume = Linear(0.5); + volume /= Decibels(0.0); + assert_approx_eq(volume, Linear(0.5)); + let mut volume = Decibels(0.0); + volume /= Linear(0.501); + assert_approx_eq(volume, Decibels(6.003246)); + } } diff --git a/crates/bevy_color/Cargo.toml b/crates/bevy_color/Cargo.toml index cb53472950161..9b6d7d8cf6b8d 100644 --- a/crates/bevy_color/Cargo.toml +++ b/crates/bevy_color/Cargo.toml @@ -1,39 +1,44 @@ [package] name = "bevy_color" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Types for representing and manipulating color values" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" license = "MIT OR Apache-2.0" keywords = ["bevy", "color"] -rust-version = "1.82.0" +rust-version = "1.85.0" [dependencies] -bevy_math = { path = "../bevy_math", version = "0.15.0-dev", default-features = false, features = [ +bevy_math = { path = "../bevy_math", version = "0.16.0-dev", default-features = false, features = [ "curve", ] } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev", features = [ - "bevy", -], optional = true } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, optional = true } bytemuck = { version = "1", features = ["derive"] } serde = { version = "1.0", features = [ "derive", ], default-features = false, optional = true } thiserror = { version = "2", default-features = false } derive_more = { version = "1", default-features = false, features = ["from"] } -wgpu-types = { version = "23", default-features = false, optional = true } +wgpu-types = { version = "24", default-features = false, optional = true } encase = { version = "0.10", default-features = false, optional = true } [features] default = ["std", "bevy_reflect", "encase"] -std = ["alloc", "bevy_math/std", "serde?/std"] +std = [ + "alloc", + "bevy_math/std", + "serde?/std", + "wgpu-types?/std", + "bevy_reflect?/std", +] alloc = ["bevy_math/alloc", "serde?/alloc"] serialize = ["serde", "bevy_math/serialize"] -bevy_reflect = ["dep:bevy_reflect", "std"] -wgpu-types = ["dep:wgpu-types", "std"] +bevy_reflect = ["dep:bevy_reflect"] +wgpu-types = ["dep:wgpu-types"] encase = ["dep:encase", "std"] libm = ["bevy_math/libm"] +critical-section = ["bevy_reflect?/critical-section"] [lints] workspace = true diff --git a/crates/bevy_color/LICENSE-APACHE b/crates/bevy_color/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_color/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_color/LICENSE-MIT b/crates/bevy_color/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_color/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_color/crates/gen_tests/Cargo.toml b/crates/bevy_color/crates/gen_tests/Cargo.toml index 357e7aaba6f06..e0f5940d505a8 100644 --- a/crates/bevy_color/crates/gen_tests/Cargo.toml +++ b/crates/bevy_color/crates/gen_tests/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "gen_tests" version = "0.1.0" -edition = "2021" +edition = "2024" publish = false [workspace] diff --git a/crates/bevy_color/src/color.rs b/crates/bevy_color/src/color.rs index d2e4cb792187c..832394449bc4f 100644 --- a/crates/bevy_color/src/color.rs +++ b/crates/bevy_color/src/color.rs @@ -1,6 +1,6 @@ use crate::{ color_difference::EuclideanDistance, Alpha, Hsla, Hsva, Hue, Hwba, Laba, Lcha, LinearRgba, - Luminance, Mix, Oklaba, Oklcha, Srgba, StandardColor, Xyza, + Luminance, Mix, Oklaba, Oklcha, Saturation, Srgba, StandardColor, Xyza, }; #[cfg(feature = "bevy_reflect")] use bevy_reflect::prelude::*; @@ -42,7 +42,11 @@ use derive_more::derive::From; /// To avoid the cost of repeated conversion, and ensure consistent results where that is desired, /// first convert this [`Color`] into your desired color space. #[derive(Debug, Clone, Copy, PartialEq, From)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -84,13 +88,14 @@ impl Color { (*self).into() } - #[deprecated = "Use `Color::srgba` instead"] - /// Creates a new [`Color`] object storing a [`Srgba`] color. - pub const fn rgba(red: f32, green: f32, blue: f32, alpha: f32) -> Self { - Self::srgba(red, green, blue, alpha) - } - /// Creates a new [`Color`] object storing a [`Srgba`] color. + /// + /// # Arguments + /// + /// * `red` - Red channel. [0.0, 1.0] + /// * `green` - Green channel. [0.0, 1.0] + /// * `blue` - Blue channel. [0.0, 1.0] + /// * `alpha` - Alpha channel. [0.0, 1.0] pub const fn srgba(red: f32, green: f32, blue: f32, alpha: f32) -> Self { Self::Srgba(Srgba { red, @@ -100,13 +105,13 @@ impl Color { }) } - #[deprecated = "Use `Color::srgb` instead"] - /// Creates a new [`Color`] object storing a [`Srgba`] color with an alpha of 1.0. - pub const fn rgb(red: f32, green: f32, blue: f32) -> Self { - Self::srgb(red, green, blue) - } - /// Creates a new [`Color`] object storing a [`Srgba`] color with an alpha of 1.0. + /// + /// # Arguments + /// + /// * `red` - Red channel. [0.0, 1.0] + /// * `green` - Green channel. [0.0, 1.0] + /// * `blue` - Blue channel. [0.0, 1.0] pub const fn srgb(red: f32, green: f32, blue: f32) -> Self { Self::Srgba(Srgba { red, @@ -116,13 +121,10 @@ impl Color { }) } - #[deprecated = "Use `Color::srgb_from_array` instead"] - /// Reads an array of floats to creates a new [`Color`] object storing a [`Srgba`] color with an alpha of 1.0. - pub fn rgb_from_array([r, g, b]: [f32; 3]) -> Self { - Self::Srgba(Srgba::rgb(r, g, b)) - } - /// Reads an array of floats to creates a new [`Color`] object storing a [`Srgba`] color with an alpha of 1.0. + /// + /// # Arguments + /// * `array` - Red, Green and Blue channels. Each channel is in the range [0.0, 1.0] pub const fn srgb_from_array(array: [f32; 3]) -> Self { Self::Srgba(Srgba { red: array[0], @@ -132,17 +134,14 @@ impl Color { }) } - #[deprecated = "Use `Color::srgba_u8` instead"] /// Creates a new [`Color`] object storing a [`Srgba`] color from [`u8`] values. /// - /// A value of 0 is interpreted as 0.0, and a value of 255 is interpreted as 1.0. - pub fn rgba_u8(red: u8, green: u8, blue: u8, alpha: u8) -> Self { - Self::srgba_u8(red, green, blue, alpha) - } - - /// Creates a new [`Color`] object storing a [`Srgba`] color from [`u8`] values. + /// # Arguments /// - /// A value of 0 is interpreted as 0.0, and a value of 255 is interpreted as 1.0. + /// * `red` - Red channel. [0, 255] + /// * `green` - Green channel. [0, 255] + /// * `blue` - Blue channel. [0, 255] + /// * `alpha` - Alpha channel. [0, 255] pub const fn srgba_u8(red: u8, green: u8, blue: u8, alpha: u8) -> Self { Self::Srgba(Srgba { red: red as f32 / 255.0, @@ -152,17 +151,13 @@ impl Color { }) } - #[deprecated = "Use `Color::srgb_u8` instead"] /// Creates a new [`Color`] object storing a [`Srgba`] color from [`u8`] values with an alpha of 1.0. /// - /// A value of 0 is interpreted as 0.0, and a value of 255 is interpreted as 1.0. - pub fn rgb_u8(red: u8, green: u8, blue: u8) -> Self { - Self::srgb_u8(red, green, blue) - } - - /// Creates a new [`Color`] object storing a [`Srgba`] color from [`u8`] values with an alpha of 1.0. + /// # Arguments /// - /// A value of 0 is interpreted as 0.0, and a value of 255 is interpreted as 1.0. + /// * `red` - Red channel. [0, 255] + /// * `green` - Green channel. [0, 255] + /// * `blue` - Blue channel. [0, 255] pub const fn srgb_u8(red: u8, green: u8, blue: u8) -> Self { Self::Srgba(Srgba { red: red as f32 / 255.0, @@ -172,13 +167,14 @@ impl Color { }) } - #[deprecated = "Use Color::linear_rgba instead."] - /// Creates a new [`Color`] object storing a [`LinearRgba`] color. - pub const fn rbga_linear(red: f32, green: f32, blue: f32, alpha: f32) -> Self { - Self::linear_rgba(red, green, blue, alpha) - } - /// Creates a new [`Color`] object storing a [`LinearRgba`] color. + /// + /// # Arguments + /// + /// * `red` - Red channel. [0.0, 1.0] + /// * `green` - Green channel. [0.0, 1.0] + /// * `blue` - Blue channel. [0.0, 1.0] + /// * `alpha` - Alpha channel. [0.0, 1.0] pub const fn linear_rgba(red: f32, green: f32, blue: f32, alpha: f32) -> Self { Self::LinearRgba(LinearRgba { red, @@ -188,13 +184,13 @@ impl Color { }) } - #[deprecated = "Use Color::linear_rgb instead."] - /// Creates a new [`Color`] object storing a [`LinearRgba`] color with an alpha of 1.0. - pub const fn rgb_linear(red: f32, green: f32, blue: f32) -> Self { - Self::linear_rgb(red, green, blue) - } - /// Creates a new [`Color`] object storing a [`LinearRgba`] color with an alpha of 1.0. + /// + /// # Arguments + /// + /// * `red` - Red channel. [0.0, 1.0] + /// * `green` - Green channel. [0.0, 1.0] + /// * `blue` - Blue channel. [0.0, 1.0] pub const fn linear_rgb(red: f32, green: f32, blue: f32) -> Self { Self::LinearRgba(LinearRgba { red, @@ -205,6 +201,13 @@ impl Color { } /// Creates a new [`Color`] object storing a [`Hsla`] color. + /// + /// # Arguments + /// + /// * `hue` - Hue channel. [0.0, 360.0] + /// * `saturation` - Saturation channel. [0.0, 1.0] + /// * `lightness` - Lightness channel. [0.0, 1.0] + /// * `alpha` - Alpha channel. [0.0, 1.0] pub const fn hsla(hue: f32, saturation: f32, lightness: f32, alpha: f32) -> Self { Self::Hsla(Hsla { hue, @@ -215,6 +218,12 @@ impl Color { } /// Creates a new [`Color`] object storing a [`Hsla`] color with an alpha of 1.0. + /// + /// # Arguments + /// + /// * `hue` - Hue channel. [0.0, 360.0] + /// * `saturation` - Saturation channel. [0.0, 1.0] + /// * `lightness` - Lightness channel. [0.0, 1.0] pub const fn hsl(hue: f32, saturation: f32, lightness: f32) -> Self { Self::Hsla(Hsla { hue, @@ -225,6 +234,13 @@ impl Color { } /// Creates a new [`Color`] object storing a [`Hsva`] color. + /// + /// # Arguments + /// + /// * `hue` - Hue channel. [0.0, 360.0] + /// * `saturation` - Saturation channel. [0.0, 1.0] + /// * `value` - Value channel. [0.0, 1.0] + /// * `alpha` - Alpha channel. [0.0, 1.0] pub const fn hsva(hue: f32, saturation: f32, value: f32, alpha: f32) -> Self { Self::Hsva(Hsva { hue, @@ -235,6 +251,12 @@ impl Color { } /// Creates a new [`Color`] object storing a [`Hsva`] color with an alpha of 1.0. + /// + /// # Arguments + /// + /// * `hue` - Hue channel. [0.0, 360.0] + /// * `saturation` - Saturation channel. [0.0, 1.0] + /// * `value` - Value channel. [0.0, 1.0] pub const fn hsv(hue: f32, saturation: f32, value: f32) -> Self { Self::Hsva(Hsva { hue, @@ -245,6 +267,13 @@ impl Color { } /// Creates a new [`Color`] object storing a [`Hwba`] color. + /// + /// # Arguments + /// + /// * `hue` - Hue channel. [0.0, 360.0] + /// * `whiteness` - Whiteness channel. [0.0, 1.0] + /// * `blackness` - Blackness channel. [0.0, 1.0] + /// * `alpha` - Alpha channel. [0.0, 1.0] pub const fn hwba(hue: f32, whiteness: f32, blackness: f32, alpha: f32) -> Self { Self::Hwba(Hwba { hue, @@ -255,6 +284,12 @@ impl Color { } /// Creates a new [`Color`] object storing a [`Hwba`] color with an alpha of 1.0. + /// + /// # Arguments + /// + /// * `hue` - Hue channel. [0.0, 360.0] + /// * `whiteness` - Whiteness channel. [0.0, 1.0] + /// * `blackness` - Blackness channel. [0.0, 1.0] pub const fn hwb(hue: f32, whiteness: f32, blackness: f32) -> Self { Self::Hwba(Hwba { hue, @@ -265,6 +300,13 @@ impl Color { } /// Creates a new [`Color`] object storing a [`Laba`] color. + /// + /// # Arguments + /// + /// * `lightness` - Lightness channel. [0.0, 1.5] + /// * `a` - a axis. [-1.5, 1.5] + /// * `b` - b axis. [-1.5, 1.5] + /// * `alpha` - Alpha channel. [0.0, 1.0] pub const fn laba(lightness: f32, a: f32, b: f32, alpha: f32) -> Self { Self::Laba(Laba { lightness, @@ -275,6 +317,12 @@ impl Color { } /// Creates a new [`Color`] object storing a [`Laba`] color with an alpha of 1.0. + /// + /// # Arguments + /// + /// * `lightness` - Lightness channel. [0.0, 1.5] + /// * `a` - a axis. [-1.5, 1.5] + /// * `b` - b axis. [-1.5, 1.5] pub const fn lab(lightness: f32, a: f32, b: f32) -> Self { Self::Laba(Laba { lightness, @@ -285,6 +333,13 @@ impl Color { } /// Creates a new [`Color`] object storing a [`Lcha`] color. + /// + /// # Arguments + /// + /// * `lightness` - Lightness channel. [0.0, 1.5] + /// * `chroma` - Chroma channel. [0.0, 1.5] + /// * `hue` - Hue channel. [0.0, 360.0] + /// * `alpha` - Alpha channel. [0.0, 1.0] pub const fn lcha(lightness: f32, chroma: f32, hue: f32, alpha: f32) -> Self { Self::Lcha(Lcha { lightness, @@ -295,6 +350,12 @@ impl Color { } /// Creates a new [`Color`] object storing a [`Lcha`] color with an alpha of 1.0. + /// + /// # Arguments + /// + /// * `lightness` - Lightness channel. [0.0, 1.5] + /// * `chroma` - Chroma channel. [0.0, 1.5] + /// * `hue` - Hue channel. [0.0, 360.0] pub const fn lch(lightness: f32, chroma: f32, hue: f32) -> Self { Self::Lcha(Lcha { lightness, @@ -305,6 +366,13 @@ impl Color { } /// Creates a new [`Color`] object storing a [`Oklaba`] color. + /// + /// # Arguments + /// + /// * `lightness` - Lightness channel. [0.0, 1.0] + /// * `a` - Green-red channel. [-1.0, 1.0] + /// * `b` - Blue-yellow channel. [-1.0, 1.0] + /// * `alpha` - Alpha channel. [0.0, 1.0] pub const fn oklaba(lightness: f32, a: f32, b: f32, alpha: f32) -> Self { Self::Oklaba(Oklaba { lightness, @@ -315,6 +383,12 @@ impl Color { } /// Creates a new [`Color`] object storing a [`Oklaba`] color with an alpha of 1.0. + /// + /// # Arguments + /// + /// * `lightness` - Lightness channel. [0.0, 1.0] + /// * `a` - Green-red channel. [-1.0, 1.0] + /// * `b` - Blue-yellow channel. [-1.0, 1.0] pub const fn oklab(lightness: f32, a: f32, b: f32) -> Self { Self::Oklaba(Oklaba { lightness, @@ -325,6 +399,13 @@ impl Color { } /// Creates a new [`Color`] object storing a [`Oklcha`] color. + /// + /// # Arguments + /// + /// * `lightness` - Lightness channel. [0.0, 1.0] + /// * `chroma` - Chroma channel. [0.0, 1.0] + /// * `hue` - Hue channel. [0.0, 360.0] + /// * `alpha` - Alpha channel. [0.0, 1.0] pub const fn oklcha(lightness: f32, chroma: f32, hue: f32, alpha: f32) -> Self { Self::Oklcha(Oklcha { lightness, @@ -335,6 +416,12 @@ impl Color { } /// Creates a new [`Color`] object storing a [`Oklcha`] color with an alpha of 1.0. + /// + /// # Arguments + /// + /// * `lightness` - Lightness channel. [0.0, 1.0] + /// * `chroma` - Chroma channel. [0.0, 1.0] + /// * `hue` - Hue channel. [0.0, 360.0] pub const fn oklch(lightness: f32, chroma: f32, hue: f32) -> Self { Self::Oklcha(Oklcha { lightness, @@ -345,11 +432,24 @@ impl Color { } /// Creates a new [`Color`] object storing a [`Xyza`] color. + /// + /// # Arguments + /// + /// * `x` - x-axis. [0.0, 1.0] + /// * `y` - y-axis. [0.0, 1.0] + /// * `z` - z-axis. [0.0, 1.0] + /// * `alpha` - Alpha channel. [0.0, 1.0] pub const fn xyza(x: f32, y: f32, z: f32, alpha: f32) -> Self { Self::Xyza(Xyza { x, y, z, alpha }) } /// Creates a new [`Color`] object storing a [`Xyza`] color with an alpha of 1.0. + /// + /// # Arguments + /// + /// * `x` - x-axis. [0.0, 1.0] + /// * `y` - y-axis. [0.0, 1.0] + /// * `z` - z-axis. [0.0, 1.0] pub const fn xyz(x: f32, y: f32, z: f32) -> Self { Self::Xyza(Xyza { x, @@ -714,6 +814,44 @@ impl Hue for Color { } } +impl Saturation for Color { + fn with_saturation(&self, saturation: f32) -> Self { + let mut new = *self; + + match &mut new { + Color::Srgba(x) => Hsla::from(*x).with_saturation(saturation).into(), + Color::LinearRgba(x) => Hsla::from(*x).with_saturation(saturation).into(), + Color::Hsla(x) => x.with_saturation(saturation).into(), + Color::Hsva(x) => x.with_saturation(saturation).into(), + Color::Hwba(x) => Hsla::from(*x).with_saturation(saturation).into(), + Color::Laba(x) => Hsla::from(*x).with_saturation(saturation).into(), + Color::Lcha(x) => Hsla::from(*x).with_saturation(saturation).into(), + Color::Oklaba(x) => Hsla::from(*x).with_saturation(saturation).into(), + Color::Oklcha(x) => Hsla::from(*x).with_saturation(saturation).into(), + Color::Xyza(x) => Hsla::from(*x).with_saturation(saturation).into(), + } + } + + fn saturation(&self) -> f32 { + match self { + Color::Srgba(x) => Hsla::from(*x).saturation(), + Color::LinearRgba(x) => Hsla::from(*x).saturation(), + Color::Hsla(x) => x.saturation(), + Color::Hsva(x) => x.saturation(), + Color::Hwba(x) => Hsla::from(*x).saturation(), + Color::Laba(x) => Hsla::from(*x).saturation(), + Color::Lcha(x) => Hsla::from(*x).saturation(), + Color::Oklaba(x) => Hsla::from(*x).saturation(), + Color::Oklcha(x) => Hsla::from(*x).saturation(), + Color::Xyza(x) => Hsla::from(*x).saturation(), + } + } + + fn set_saturation(&mut self, saturation: f32) { + *self = self.with_saturation(saturation); + } +} + impl Mix for Color { fn mix(&self, other: &Self, factor: f32) -> Self { let mut new = *self; diff --git a/crates/bevy_color/src/color_gradient.rs b/crates/bevy_color/src/color_gradient.rs index 759b33bf93e77..b087205bb6b49 100644 --- a/crates/bevy_color/src/color_gradient.rs +++ b/crates/bevy_color/src/color_gradient.rs @@ -76,6 +76,7 @@ where mod tests { use super::*; use crate::{palettes::basic, Srgba}; + use bevy_math::curve::{Curve, CurveExt}; #[test] fn test_color_curve() { diff --git a/crates/bevy_color/src/color_ops.rs b/crates/bevy_color/src/color_ops.rs index 235c8c8bf3ae1..776ee906f9fb7 100644 --- a/crates/bevy_color/src/color_ops.rs +++ b/crates/bevy_color/src/color_ops.rs @@ -60,7 +60,7 @@ pub trait Alpha: Sized { /// Return a new version of this color with the given alpha value. fn with_alpha(&self, alpha: f32) -> Self; - /// Return a the alpha component of this color. + /// Return the alpha component of this color. fn alpha(&self) -> f32; /// Sets the alpha component of this color. @@ -95,6 +95,21 @@ pub trait Hue: Sized { } } +/// Trait for manipulating the saturation of a color. +/// +/// When working with color spaces that do not have native saturation components +/// the operations are performed in [`crate::Hsla`]. +pub trait Saturation: Sized { + /// Return a new version of this color with the saturation channel set to the given value. + fn with_saturation(&self, saturation: f32) -> Self; + + /// Return the saturation of this color [0.0, 1.0]. + fn saturation(&self) -> f32; + + /// Sets the saturation of this color. + fn set_saturation(&mut self, saturation: f32); +} + /// Trait with methods for converting colors to non-color types pub trait ColorToComponents { /// Convert to an f32 array diff --git a/crates/bevy_color/src/hsla.rs b/crates/bevy_color/src/hsla.rs index 6b26fbff8de34..b29fce72ac9dc 100644 --- a/crates/bevy_color/src/hsla.rs +++ b/crates/bevy_color/src/hsla.rs @@ -1,6 +1,6 @@ use crate::{ - Alpha, ColorToComponents, Gray, Hsva, Hue, Hwba, Lcha, LinearRgba, Luminance, Mix, Srgba, - StandardColor, Xyza, + Alpha, ColorToComponents, Gray, Hsva, Hue, Hwba, Lcha, LinearRgba, Luminance, Mix, Saturation, + Srgba, StandardColor, Xyza, }; use bevy_math::{Vec3, Vec4}; #[cfg(feature = "bevy_reflect")] @@ -13,7 +13,11 @@ use bevy_reflect::prelude::*; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -159,6 +163,26 @@ impl Hue for Hsla { } } +impl Saturation for Hsla { + #[inline] + fn with_saturation(&self, saturation: f32) -> Self { + Self { + saturation, + ..*self + } + } + + #[inline] + fn saturation(&self) -> f32 { + self.saturation + } + + #[inline] + fn set_saturation(&mut self, saturation: f32) { + self.saturation = saturation; + } +} + impl Luminance for Hsla { #[inline] fn with_luminance(&self, lightness: f32) -> Self { diff --git a/crates/bevy_color/src/hsva.rs b/crates/bevy_color/src/hsva.rs index e708ccf67e5b0..9e94eb24f672e 100644 --- a/crates/bevy_color/src/hsva.rs +++ b/crates/bevy_color/src/hsva.rs @@ -1,5 +1,6 @@ use crate::{ - Alpha, ColorToComponents, Gray, Hue, Hwba, Lcha, LinearRgba, Mix, Srgba, StandardColor, Xyza, + Alpha, ColorToComponents, Gray, Hue, Hwba, Lcha, LinearRgba, Mix, Saturation, Srgba, + StandardColor, Xyza, }; use bevy_math::{Vec3, Vec4}; #[cfg(feature = "bevy_reflect")] @@ -12,7 +13,11 @@ use bevy_reflect::prelude::*; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -129,6 +134,26 @@ impl Hue for Hsva { } } +impl Saturation for Hsva { + #[inline] + fn with_saturation(&self, saturation: f32) -> Self { + Self { + saturation, + ..*self + } + } + + #[inline] + fn saturation(&self) -> f32 { + self.saturation + } + + #[inline] + fn set_saturation(&mut self, saturation: f32) { + self.saturation = saturation; + } +} + impl From for Hwba { fn from( Hsva { diff --git a/crates/bevy_color/src/hwba.rs b/crates/bevy_color/src/hwba.rs index 459b5d82dc4fd..36d328658d575 100644 --- a/crates/bevy_color/src/hwba.rs +++ b/crates/bevy_color/src/hwba.rs @@ -16,7 +16,11 @@ use bevy_reflect::prelude::*; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_color/src/laba.rs b/crates/bevy_color/src/laba.rs index 39ac37f8ffa1a..010b3df249678 100644 --- a/crates/bevy_color/src/laba.rs +++ b/crates/bevy_color/src/laba.rs @@ -12,7 +12,11 @@ use bevy_reflect::prelude::*; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_color/src/lcha.rs b/crates/bevy_color/src/lcha.rs index f1437d34969aa..e5f5ecab32ea4 100644 --- a/crates/bevy_color/src/lcha.rs +++ b/crates/bevy_color/src/lcha.rs @@ -12,7 +12,11 @@ use bevy_reflect::prelude::*; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_color/src/lib.rs b/crates/bevy_color/src/lib.rs index 4a4a9596d545d..e1ee1fbe38cd0 100644 --- a/crates/bevy_color/src/lib.rs +++ b/crates/bevy_color/src/lib.rs @@ -4,7 +4,7 @@ html_logo_url = "https://bevyengine.org/assets/icon.png", html_favicon_url = "https://bevyengine.org/assets/icon.png" )] -#![cfg_attr(not(feature = "std"), no_std)] +#![no_std] //! Representations of colors in various color spaces. //! @@ -90,6 +90,9 @@ //! println!("Hsla: {:?}", hsla); //! ``` +#[cfg(feature = "std")] +extern crate std; + #[cfg(feature = "alloc")] extern crate alloc; @@ -142,7 +145,14 @@ pub use srgba::*; pub use xyza::*; /// Describes the traits that a color should implement for consistency. -#[allow(dead_code)] // This is an internal marker trait used to ensure that our color types impl the required traits +#[expect( + clippy::allow_attributes, + reason = "If the below attribute on `dead_code` is removed, then rustc complains that `StandardColor` is dead code. However, if we `expect` the `dead_code` lint, then rustc complains of an unfulfilled expectation." +)] +#[allow( + dead_code, + reason = "This is an internal marker trait used to ensure that our color types impl the required traits" +)] pub(crate) trait StandardColor where Self: core::fmt::Debug, diff --git a/crates/bevy_color/src/linear_rgba.rs b/crates/bevy_color/src/linear_rgba.rs index d1781bfc4192c..d00d765aaccd0 100644 --- a/crates/bevy_color/src/linear_rgba.rs +++ b/crates/bevy_color/src/linear_rgba.rs @@ -13,7 +13,11 @@ use bytemuck::{Pod, Zeroable}; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq, Pod, Zeroable)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_color/src/oklaba.rs b/crates/bevy_color/src/oklaba.rs index 0ffb35ddd33db..0203ca6a695e8 100644 --- a/crates/bevy_color/src/oklaba.rs +++ b/crates/bevy_color/src/oklaba.rs @@ -12,7 +12,11 @@ use bevy_reflect::prelude::*; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -216,7 +220,6 @@ impl ColorToComponents for Oklaba { } } -#[allow(clippy::excessive_precision)] impl From for Oklaba { fn from(value: LinearRgba) -> Self { let LinearRgba { @@ -225,21 +228,21 @@ impl From for Oklaba { blue, alpha, } = value; - // From https://github.com/DougLau/pix - let l = 0.4122214708 * red + 0.5363325363 * green + 0.0514459929 * blue; - let m = 0.2119034982 * red + 0.6806995451 * green + 0.1073969566 * blue; - let s = 0.0883024619 * red + 0.2817188376 * green + 0.6299787005 * blue; + // From https://bottosson.github.io/posts/oklab/#converting-from-linear-srgb-to-oklab + // Float literals are truncated to avoid excessive precision. + let l = 0.41222146 * red + 0.53633255 * green + 0.051445995 * blue; + let m = 0.2119035 * red + 0.6806995 * green + 0.10739696 * blue; + let s = 0.08830246 * red + 0.28171885 * green + 0.6299787 * blue; let l_ = ops::cbrt(l); let m_ = ops::cbrt(m); let s_ = ops::cbrt(s); - let l = 0.2104542553 * l_ + 0.7936177850 * m_ - 0.0040720468 * s_; - let a = 1.9779984951 * l_ - 2.4285922050 * m_ + 0.4505937099 * s_; - let b = 0.0259040371 * l_ + 0.7827717662 * m_ - 0.8086757660 * s_; + let l = 0.21045426 * l_ + 0.7936178 * m_ - 0.004072047 * s_; + let a = 1.9779985 * l_ - 2.4285922 * m_ + 0.4505937 * s_; + let b = 0.025904037 * l_ + 0.78277177 * m_ - 0.80867577 * s_; Oklaba::new(l, a, b, alpha) } } -#[allow(clippy::excessive_precision)] impl From for LinearRgba { fn from(value: Oklaba) -> Self { let Oklaba { @@ -249,18 +252,19 @@ impl From for LinearRgba { alpha, } = value; - // From https://github.com/Ogeon/palette/blob/e75eab2fb21af579353f51f6229a510d0d50a311/palette/src/oklab.rs#L312-L332 - let l_ = lightness + 0.3963377774 * a + 0.2158037573 * b; - let m_ = lightness - 0.1055613458 * a - 0.0638541728 * b; - let s_ = lightness - 0.0894841775 * a - 1.2914855480 * b; + // From https://bottosson.github.io/posts/oklab/#converting-from-linear-srgb-to-oklab + // Float literals are truncated to avoid excessive precision. + let l_ = lightness + 0.39633778 * a + 0.21580376 * b; + let m_ = lightness - 0.105561346 * a - 0.06385417 * b; + let s_ = lightness - 0.08948418 * a - 1.2914855 * b; let l = l_ * l_ * l_; let m = m_ * m_ * m_; let s = s_ * s_ * s_; - let red = 4.0767416621 * l - 3.3077115913 * m + 0.2309699292 * s; - let green = -1.2684380046 * l + 2.6097574011 * m - 0.3413193965 * s; - let blue = -0.0041960863 * l - 0.7034186147 * m + 1.7076147010 * s; + let red = 4.0767417 * l - 3.3077116 * m + 0.23096994 * s; + let green = -1.268438 * l + 2.6097574 * m - 0.34131938 * s; + let blue = -0.0041960863 * l - 0.7034186 * m + 1.7076147 * s; Self { red, diff --git a/crates/bevy_color/src/oklcha.rs b/crates/bevy_color/src/oklcha.rs index 70c150ed0fc97..91ffe422c75c3 100644 --- a/crates/bevy_color/src/oklcha.rs +++ b/crates/bevy_color/src/oklcha.rs @@ -12,7 +12,11 @@ use bevy_reflect::prelude::*; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_color/src/palettes/css.rs b/crates/bevy_color/src/palettes/css.rs index 0c1e073bec4b6..9b0dd7fe7e18f 100644 --- a/crates/bevy_color/src/palettes/css.rs +++ b/crates/bevy_color/src/palettes/css.rs @@ -4,7 +4,6 @@ use crate::Srgba; // The CSS4 colors are a superset of the CSS1 colors, so we can just re-export the CSS1 colors. -#[allow(unused_imports)] pub use crate::palettes::basic::*; ///

diff --git a/crates/bevy_color/src/srgba.rs b/crates/bevy_color/src/srgba.rs index 8f4549df3b563..ead2adf03928f 100644 --- a/crates/bevy_color/src/srgba.rs +++ b/crates/bevy_color/src/srgba.rs @@ -15,7 +15,11 @@ use thiserror::Error; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -141,17 +145,17 @@ impl Srgba { 3 => { let [l, b] = u16::from_str_radix(hex, 16)?.to_be_bytes(); let (r, g, b) = (l & 0x0F, (b & 0xF0) >> 4, b & 0x0F); - Ok(Self::rgb_u8(r << 4 | r, g << 4 | g, b << 4 | b)) + Ok(Self::rgb_u8((r << 4) | r, (g << 4) | g, (b << 4) | b)) } // RGBA 4 => { let [l, b] = u16::from_str_radix(hex, 16)?.to_be_bytes(); let (r, g, b, a) = ((l & 0xF0) >> 4, l & 0xF, (b & 0xF0) >> 4, b & 0x0F); Ok(Self::rgba_u8( - r << 4 | r, - g << 4 | g, - b << 4 | b, - a << 4 | a, + (r << 4) | r, + (g << 4) | g, + (b << 4) | b, + (a << 4) | a, )) } // RRGGBB diff --git a/crates/bevy_color/src/testing.rs b/crates/bevy_color/src/testing.rs index 0c87fe226c749..6c7747e2a540b 100644 --- a/crates/bevy_color/src/testing.rs +++ b/crates/bevy_color/src/testing.rs @@ -4,7 +4,7 @@ macro_rules! assert_approx_eq { if ($x - $y).abs() >= $d { panic!( "assertion failed: `(left !== right)` \ - (left: `{:?}`, right: `{:?}`, tolerance: `{:?}`)", + (left: `{}`, right: `{}`, tolerance: `{}`)", $x, $y, $d ); } diff --git a/crates/bevy_color/src/xyza.rs b/crates/bevy_color/src/xyza.rs index a9fb422bef110..c48a868416323 100644 --- a/crates/bevy_color/src/xyza.rs +++ b/crates/bevy_color/src/xyza.rs @@ -12,7 +12,11 @@ use bevy_reflect::prelude::*; #[doc = include_str!("../docs/diagrams/model_graph.svg")] /// #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(PartialEq, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Default) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_core_pipeline/Cargo.toml b/crates/bevy_core_pipeline/Cargo.toml index 3993b031e9e70..304c0071046e0 100644 --- a/crates/bevy_core_pipeline/Cargo.toml +++ b/crates/bevy_core_pipeline/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_core_pipeline" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" authors = [ "Bevy Contributors ", "Carter Anderson ", @@ -13,28 +13,30 @@ license = "MIT OR Apache-2.0" keywords = ["bevy"] [features] -dds = ["bevy_render/dds", "bevy_image/dds"] trace = [] webgl = [] webgpu = [] tonemapping_luts = ["bevy_render/ktx2", "bevy_image/ktx2", "bevy_image/zstd"] -smaa_luts = ["bevy_render/ktx2", "bevy_image/ktx2", "bevy_image/zstd"] [dependencies] # bevy -bevy_app = { path = "../bevy_app", version = "0.15.0-dev" } -bevy_asset = { path = "../bevy_asset", version = "0.15.0-dev" } -bevy_color = { path = "../bevy_color", version = "0.15.0-dev" } -bevy_derive = { path = "../bevy_derive", version = "0.15.0-dev" } -bevy_diagnostic = { path = "../bevy_diagnostic", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" } -bevy_image = { path = "../bevy_image", version = "0.15.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev" } -bevy_render = { path = "../bevy_render", version = "0.15.0-dev" } -bevy_transform = { path = "../bevy_transform", version = "0.15.0-dev" } -bevy_math = { path = "../bevy_math", version = "0.15.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } -bevy_window = { path = "../bevy_window", version = "0.15.0-dev" } +bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } +bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } +bevy_color = { path = "../bevy_color", version = "0.16.0-dev" } +bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } +bevy_diagnostic = { path = "../bevy_diagnostic", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } +bevy_image = { path = "../bevy_image", version = "0.16.0-dev" } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } +bevy_render = { path = "../bevy_render", version = "0.16.0-dev" } +bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } +bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } +bevy_window = { path = "../bevy_window", version = "0.16.0-dev" } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "std", + "serialize", +] } serde = { version = "1", features = ["derive"] } bitflags = "2.3" @@ -42,6 +44,8 @@ radsort = "0.1" nonmax = "0.5" smallvec = "1" thiserror = { version = "2", default-features = false } +tracing = { version = "0.1", default-features = false, features = ["std"] } +bytemuck = { version = "1" } [lints] workspace = true diff --git a/crates/bevy_core_pipeline/LICENSE-APACHE b/crates/bevy_core_pipeline/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_core_pipeline/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_core_pipeline/LICENSE-MIT b/crates/bevy_core_pipeline/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_core_pipeline/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_core_pipeline/src/auto_exposure/buffers.rs b/crates/bevy_core_pipeline/src/auto_exposure/buffers.rs index 84b4011d2c557..38d55bc9ded47 100644 --- a/crates/bevy_core_pipeline/src/auto_exposure/buffers.rs +++ b/crates/bevy_core_pipeline/src/auto_exposure/buffers.rs @@ -1,11 +1,11 @@ use bevy_ecs::prelude::*; +use bevy_platform::collections::{hash_map::Entry, HashMap}; use bevy_render::{ render_resource::{StorageBuffer, UniformBuffer}, renderer::{RenderDevice, RenderQueue}, sync_world::RenderEntity, Extract, }; -use bevy_utils::{Entry, HashMap}; use super::{pipeline::AutoExposureUniform, AutoExposure}; diff --git a/crates/bevy_core_pipeline/src/auto_exposure/compensation_curve.rs b/crates/bevy_core_pipeline/src/auto_exposure/compensation_curve.rs index 25ec27cee4df2..e2ffe1a6c4459 100644 --- a/crates/bevy_core_pipeline/src/auto_exposure/compensation_curve.rs +++ b/crates/bevy_core_pipeline/src/auto_exposure/compensation_curve.rs @@ -18,7 +18,7 @@ const LUT_SIZE: usize = 256; /// This curve is used to map the average log luminance of a scene to an /// exposure compensation value, to allow for fine control over the final exposure. #[derive(Asset, Reflect, Debug, Clone)] -#[reflect(Default)] +#[reflect(Default, Clone)] pub struct AutoExposureCompensationCurve { /// The minimum log luminance value in the curve. (the x-axis) min_log_lum: f32, @@ -136,7 +136,10 @@ impl AutoExposureCompensationCurve { let lut_inv_range = 1.0 / (lut_end - lut_begin); // Iterate over all LUT entries whose pixel centers fall within the current segment. - #[allow(clippy::needless_range_loop)] + #[expect( + clippy::needless_range_loop, + reason = "This for-loop also uses `i` to calculate a value `t`." + )] for i in lut_begin.ceil() as usize..=lut_end.floor() as usize { let t = (i as f32 - lut_begin) * lut_inv_range; lut[i] = previous.y.lerp(current.y, t); diff --git a/crates/bevy_core_pipeline/src/auto_exposure/mod.rs b/crates/bevy_core_pipeline/src/auto_exposure/mod.rs index 59f314d12e1ab..f94a61d09be16 100644 --- a/crates/bevy_core_pipeline/src/auto_exposure/mod.rs +++ b/crates/bevy_core_pipeline/src/auto_exposure/mod.rs @@ -24,8 +24,7 @@ use node::AutoExposureNode; use pipeline::{ AutoExposurePass, AutoExposurePipeline, ViewAutoExposurePipeline, METERING_SHADER_HANDLE, }; -#[allow(deprecated)] -pub use settings::{AutoExposure, AutoExposureSettings}; +pub use settings::AutoExposure; use crate::{ auto_exposure::compensation_curve::GpuAutoExposureCompensationCurve, diff --git a/crates/bevy_core_pipeline/src/auto_exposure/pipeline.rs b/crates/bevy_core_pipeline/src/auto_exposure/pipeline.rs index 87d6abd8cf8c5..06fa118827fe7 100644 --- a/crates/bevy_core_pipeline/src/auto_exposure/pipeline.rs +++ b/crates/bevy_core_pipeline/src/auto_exposure/pipeline.rs @@ -1,7 +1,7 @@ use super::compensation_curve::{ AutoExposureCompensationCurve, AutoExposureCompensationCurveUniform, }; -use bevy_asset::prelude::*; +use bevy_asset::{prelude::*, weak_handle}; use bevy_ecs::prelude::*; use bevy_image::Image; use bevy_render::{ @@ -44,7 +44,8 @@ pub enum AutoExposurePass { Average, } -pub const METERING_SHADER_HANDLE: Handle = Handle::weak_from_u128(12987620402995522466); +pub const METERING_SHADER_HANDLE: Handle = + weak_handle!("05c84384-afa4-41d9-844e-e9cd5e7609af"); pub const HISTOGRAM_BIN_COUNT: u64 = 64; diff --git a/crates/bevy_core_pipeline/src/auto_exposure/settings.rs b/crates/bevy_core_pipeline/src/auto_exposure/settings.rs index 91bdf836eebee..cf6fdd4e24d81 100644 --- a/crates/bevy_core_pipeline/src/auto_exposure/settings.rs +++ b/crates/bevy_core_pipeline/src/auto_exposure/settings.rs @@ -24,7 +24,7 @@ use bevy_utils::default; /// /// **Auto Exposure requires compute shaders and is not compatible with WebGL2.** #[derive(Component, Clone, Reflect, ExtractComponent)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct AutoExposure { /// The range of exposure values for the histogram. /// @@ -88,9 +88,6 @@ pub struct AutoExposure { pub compensation_curve: Handle, } -#[deprecated(since = "0.15.0", note = "Renamed to `AutoExposure`")] -pub type AutoExposureSettings = AutoExposure; - impl Default for AutoExposure { fn default() -> Self { Self { diff --git a/crates/bevy_core_pipeline/src/blit/mod.rs b/crates/bevy_core_pipeline/src/blit/mod.rs index 96c0394f3034a..53c54c6d2d7aa 100644 --- a/crates/bevy_core_pipeline/src/blit/mod.rs +++ b/crates/bevy_core_pipeline/src/blit/mod.rs @@ -1,5 +1,5 @@ use bevy_app::{App, Plugin}; -use bevy_asset::{load_internal_asset, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_ecs::prelude::*; use bevy_render::{ render_resource::{ @@ -12,7 +12,7 @@ use bevy_render::{ use crate::fullscreen_vertex_shader::fullscreen_shader_vertex_state; -pub const BLIT_SHADER_HANDLE: Handle = Handle::weak_from_u128(2312396983770133547); +pub const BLIT_SHADER_HANDLE: Handle = weak_handle!("59be3075-c34e-43e7-bf24-c8fe21a0192e"); /// Adds support for specialized "blit pipelines", which can be used to write one texture to another. pub struct BlitPlugin; diff --git a/crates/bevy_core_pipeline/src/bloom/bloom.wgsl b/crates/bevy_core_pipeline/src/bloom/bloom.wgsl index 0b10333192382..aa4a2f94c46a2 100644 --- a/crates/bevy_core_pipeline/src/bloom/bloom.wgsl +++ b/crates/bevy_core_pipeline/src/bloom/bloom.wgsl @@ -9,8 +9,8 @@ struct BloomUniforms { threshold_precomputations: vec4, viewport: vec4, + scale: vec2, aspect: f32, - uv_offset: f32 }; @group(0) @binding(0) var input_texture: texture_2d; @@ -51,6 +51,14 @@ fn karis_average(color: vec3) -> f32 { // [COD] slide 153 fn sample_input_13_tap(uv: vec2) -> vec3 { +#ifdef UNIFORM_SCALE + // This is the fast path. When the bloom scale is uniform, the 13 tap sampling kernel can be + // expressed with constant offsets. + // + // It's possible that this isn't meaningfully faster than the "slow" path. However, because it + // is hard to test performance on all platforms, and uniform bloom is the most common case, this + // path was retained when adding non-uniform (anamorphic) bloom. This adds a small, but nonzero, + // cost to maintainability, but it does help me sleep at night. let a = textureSample(input_texture, s, uv, vec2(-2, 2)).rgb; let b = textureSample(input_texture, s, uv, vec2(0, 2)).rgb; let c = textureSample(input_texture, s, uv, vec2(2, 2)).rgb; @@ -64,6 +72,35 @@ fn sample_input_13_tap(uv: vec2) -> vec3 { let k = textureSample(input_texture, s, uv, vec2(1, 1)).rgb; let l = textureSample(input_texture, s, uv, vec2(-1, -1)).rgb; let m = textureSample(input_texture, s, uv, vec2(1, -1)).rgb; +#else + // This is the flexible, but potentially slower, path for non-uniform sampling. Because the + // sample is not a constant, and it can fall outside of the limits imposed on constant sample + // offsets (-8..8), we have to compute the pixel offset in uv coordinates using the size of the + // texture. + // + // It isn't clear if this is meaningfully slower than using the offset syntax, the spec doesn't + // mention it anywhere: https://www.w3.org/TR/WGSL/#texturesample, but the fact that the offset + // syntax uses a const-expr implies that it allows some compiler optimizations - maybe more + // impactful on mobile? + let scale = uniforms.scale; + let ps = scale / vec2(textureDimensions(input_texture)); + let pl = 2.0 * ps; + let ns = -1.0 * ps; + let nl = -2.0 * ps; + let a = textureSample(input_texture, s, uv + vec2(nl.x, pl.y)).rgb; + let b = textureSample(input_texture, s, uv + vec2(0.00, pl.y)).rgb; + let c = textureSample(input_texture, s, uv + vec2(pl.x, pl.y)).rgb; + let d = textureSample(input_texture, s, uv + vec2(nl.x, 0.00)).rgb; + let e = textureSample(input_texture, s, uv).rgb; + let f = textureSample(input_texture, s, uv + vec2(pl.x, 0.00)).rgb; + let g = textureSample(input_texture, s, uv + vec2(nl.x, nl.y)).rgb; + let h = textureSample(input_texture, s, uv + vec2(0.00, nl.y)).rgb; + let i = textureSample(input_texture, s, uv + vec2(pl.x, nl.y)).rgb; + let j = textureSample(input_texture, s, uv + vec2(ns.x, ps.y)).rgb; + let k = textureSample(input_texture, s, uv + vec2(ps.x, ps.y)).rgb; + let l = textureSample(input_texture, s, uv + vec2(ns.x, ns.y)).rgb; + let m = textureSample(input_texture, s, uv + vec2(ps.x, ns.y)).rgb; +#endif #ifdef FIRST_DOWNSAMPLE // [COD] slide 168 @@ -95,9 +132,11 @@ fn sample_input_13_tap(uv: vec2) -> vec3 { // [COD] slide 162 fn sample_input_3x3_tent(uv: vec2) -> vec3 { - // UV offsets configured from uniforms. - let x = uniforms.uv_offset / uniforms.aspect; - let y = uniforms.uv_offset; + // While this is probably technically incorrect, it makes nonuniform bloom smoother, without + // having any impact on uniform bloom, which simply evaluates to 1.0 here. + let frag_size = uniforms.scale / vec2(textureDimensions(input_texture)); + let x = frag_size.x; + let y = frag_size.y; let a = textureSample(input_texture, s, vec2(uv.x - x, uv.y + y)).rgb; let b = textureSample(input_texture, s, vec2(uv.x, uv.y + y)).rgb; diff --git a/crates/bevy_core_pipeline/src/bloom/downsampling_pipeline.rs b/crates/bevy_core_pipeline/src/bloom/downsampling_pipeline.rs index e3efe5cad8946..544b420bfdb68 100644 --- a/crates/bevy_core_pipeline/src/bloom/downsampling_pipeline.rs +++ b/crates/bevy_core_pipeline/src/bloom/downsampling_pipeline.rs @@ -2,10 +2,11 @@ use super::{Bloom, BLOOM_SHADER_HANDLE, BLOOM_TEXTURE_FORMAT}; use crate::fullscreen_vertex_shader::fullscreen_shader_vertex_state; use bevy_ecs::{ prelude::{Component, Entity}, - system::{Commands, Query, Res, ResMut, Resource}, + resource::Resource, + system::{Commands, Query, Res, ResMut}, world::{FromWorld, World}, }; -use bevy_math::Vec4; +use bevy_math::{Vec2, Vec4}; use bevy_render::{ render_resource::{ binding_types::{sampler, texture_2d, uniform_buffer}, @@ -31,6 +32,7 @@ pub struct BloomDownsamplingPipeline { pub struct BloomDownsamplingPipelineKeys { prefilter: bool, first_downsample: bool, + uniform_scale: bool, } /// The uniform struct extracted from [`Bloom`] attached to a Camera. @@ -40,8 +42,8 @@ pub struct BloomUniforms { // Precomputed values used when thresholding, see https://catlikecoding.com/unity/tutorials/advanced-rendering/bloom/#3.4 pub threshold_precomputations: Vec4, pub viewport: Vec4, + pub scale: Vec2, pub aspect: f32, - pub uv_offset: f32, } impl FromWorld for BloomDownsamplingPipeline { @@ -102,6 +104,10 @@ impl SpecializedRenderPipeline for BloomDownsamplingPipeline { shader_defs.push("USE_THRESHOLD".into()); } + if key.uniform_scale { + shader_defs.push("UNIFORM_SCALE".into()); + } + RenderPipelineDescriptor { label: Some( if key.first_downsample { @@ -148,6 +154,7 @@ pub fn prepare_downsampling_pipeline( BloomDownsamplingPipelineKeys { prefilter, first_downsample: false, + uniform_scale: bloom.scale == Vec2::ONE, }, ); @@ -157,6 +164,7 @@ pub fn prepare_downsampling_pipeline( BloomDownsamplingPipelineKeys { prefilter, first_downsample: true, + uniform_scale: bloom.scale == Vec2::ONE, }, ); diff --git a/crates/bevy_core_pipeline/src/bloom/mod.rs b/crates/bevy_core_pipeline/src/bloom/mod.rs index bfd7ee22dbd10..8717b9096e188 100644 --- a/crates/bevy_core_pipeline/src/bloom/mod.rs +++ b/crates/bevy_core_pipeline/src/bloom/mod.rs @@ -2,18 +2,15 @@ mod downsampling_pipeline; mod settings; mod upsampling_pipeline; -use bevy_color::{Gray, LinearRgba}; -#[allow(deprecated)] -pub use settings::{ - Bloom, BloomCompositeMode, BloomPrefilter, BloomPrefilterSettings, BloomSettings, -}; +pub use settings::{Bloom, BloomCompositeMode, BloomPrefilter}; use crate::{ core_2d::graph::{Core2d, Node2d}, core_3d::graph::{Core3d, Node3d}, }; use bevy_app::{App, Plugin}; -use bevy_asset::{load_internal_asset, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; +use bevy_color::{Gray, LinearRgba}; use bevy_ecs::{prelude::*, query::QueryItem}; use bevy_math::{ops, UVec2}; use bevy_render::{ @@ -33,11 +30,13 @@ use downsampling_pipeline::{ prepare_downsampling_pipeline, BloomDownsamplingPipeline, BloomDownsamplingPipelineIds, BloomUniforms, }; +#[cfg(feature = "trace")] +use tracing::info_span; use upsampling_pipeline::{ prepare_upsampling_pipeline, BloomUpsamplingPipeline, UpsamplingPipelineIds, }; -const BLOOM_SHADER_HANDLE: Handle = Handle::weak_from_u128(929599476923908); +const BLOOM_SHADER_HANDLE: Handle = weak_handle!("c9190ddc-573b-4472-8b21-573cab502b73"); const BLOOM_TEXTURE_FORMAT: TextureFormat = TextureFormat::Rg11b10Ufloat; @@ -111,10 +110,10 @@ impl ViewNode for BloomNode { // Atypically for a post-processing effect, we do not need to // use a secondary texture normally provided by view_target.post_process_write(), // instead we write into our own bloom texture and then directly back onto main. - fn run( + fn run<'w>( &self, _graph: &mut RenderGraphContext, - render_context: &mut RenderContext, + render_context: &mut RenderContext<'w>, ( camera, view_target, @@ -124,8 +123,8 @@ impl ViewNode for BloomNode { bloom_settings, upsampling_pipeline_ids, downsampling_pipeline_ids, - ): QueryItem, - world: &World, + ): QueryItem<'w, Self::ViewQuery>, + world: &'w World, ) -> Result<(), NodeRunError> { if bloom_settings.intensity == 0.0 { return Ok(()); @@ -152,132 +151,152 @@ impl ViewNode for BloomNode { return Ok(()); }; - render_context.command_encoder().push_debug_group("bloom"); - + let view_texture = view_target.main_texture_view(); + let view_texture_unsampled = view_target.get_unsampled_color_attachment(); let diagnostics = render_context.diagnostic_recorder(); - let time_span = diagnostics.time_span(render_context.command_encoder(), "bloom"); - // First downsample pass - { - let downsampling_first_bind_group = render_context.render_device().create_bind_group( - "bloom_downsampling_first_bind_group", - &downsampling_pipeline_res.bind_group_layout, - &BindGroupEntries::sequential(( - // Read from main texture directly - view_target.main_texture_view(), - &bind_groups.sampler, - uniforms.clone(), - )), - ); + render_context.add_command_buffer_generation_task(move |render_device| { + #[cfg(feature = "trace")] + let _bloom_span = info_span!("bloom").entered(); - let view = &bloom_texture.view(0); - let mut downsampling_first_pass = - render_context.begin_tracked_render_pass(RenderPassDescriptor { - label: Some("bloom_downsampling_first_pass"), - color_attachments: &[Some(RenderPassColorAttachment { - view, - resolve_target: None, - ops: Operations::default(), - })], - depth_stencil_attachment: None, - timestamp_writes: None, - occlusion_query_set: None, + let mut command_encoder = + render_device.create_command_encoder(&CommandEncoderDescriptor { + label: Some("bloom_command_encoder"), }); - downsampling_first_pass.set_render_pipeline(downsampling_first_pipeline); - downsampling_first_pass.set_bind_group( - 0, - &downsampling_first_bind_group, - &[uniform_index.index()], - ); - downsampling_first_pass.draw(0..3, 0..1); - } + command_encoder.push_debug_group("bloom"); + let time_span = diagnostics.time_span(&mut command_encoder, "bloom"); + + // First downsample pass + { + let downsampling_first_bind_group = render_device.create_bind_group( + "bloom_downsampling_first_bind_group", + &downsampling_pipeline_res.bind_group_layout, + &BindGroupEntries::sequential(( + // Read from main texture directly + view_texture, + &bind_groups.sampler, + uniforms.clone(), + )), + ); + + let view = &bloom_texture.view(0); + let mut downsampling_first_pass = + command_encoder.begin_render_pass(&RenderPassDescriptor { + label: Some("bloom_downsampling_first_pass"), + color_attachments: &[Some(RenderPassColorAttachment { + view, + resolve_target: None, + ops: Operations::default(), + })], + depth_stencil_attachment: None, + timestamp_writes: None, + occlusion_query_set: None, + }); + downsampling_first_pass.set_pipeline(downsampling_first_pipeline); + downsampling_first_pass.set_bind_group( + 0, + &downsampling_first_bind_group, + &[uniform_index.index()], + ); + downsampling_first_pass.draw(0..3, 0..1); + } - // Other downsample passes - for mip in 1..bloom_texture.mip_count { - let view = &bloom_texture.view(mip); - let mut downsampling_pass = - render_context.begin_tracked_render_pass(RenderPassDescriptor { - label: Some("bloom_downsampling_pass"), - color_attachments: &[Some(RenderPassColorAttachment { - view, - resolve_target: None, - ops: Operations::default(), - })], - depth_stencil_attachment: None, - timestamp_writes: None, - occlusion_query_set: None, - }); - downsampling_pass.set_render_pipeline(downsampling_pipeline); - downsampling_pass.set_bind_group( - 0, - &bind_groups.downsampling_bind_groups[mip as usize - 1], - &[uniform_index.index()], - ); - downsampling_pass.draw(0..3, 0..1); - } + // Other downsample passes + for mip in 1..bloom_texture.mip_count { + let view = &bloom_texture.view(mip); + let mut downsampling_pass = + command_encoder.begin_render_pass(&RenderPassDescriptor { + label: Some("bloom_downsampling_pass"), + color_attachments: &[Some(RenderPassColorAttachment { + view, + resolve_target: None, + ops: Operations::default(), + })], + depth_stencil_attachment: None, + timestamp_writes: None, + occlusion_query_set: None, + }); + downsampling_pass.set_pipeline(downsampling_pipeline); + downsampling_pass.set_bind_group( + 0, + &bind_groups.downsampling_bind_groups[mip as usize - 1], + &[uniform_index.index()], + ); + downsampling_pass.draw(0..3, 0..1); + } - // Upsample passes except the final one - for mip in (1..bloom_texture.mip_count).rev() { - let view = &bloom_texture.view(mip - 1); - let mut upsampling_pass = - render_context.begin_tracked_render_pass(RenderPassDescriptor { - label: Some("bloom_upsampling_pass"), - color_attachments: &[Some(RenderPassColorAttachment { - view, - resolve_target: None, - ops: Operations { - load: LoadOp::Load, - store: StoreOp::Store, - }, - })], - depth_stencil_attachment: None, - timestamp_writes: None, - occlusion_query_set: None, - }); - upsampling_pass.set_render_pipeline(upsampling_pipeline); - upsampling_pass.set_bind_group( - 0, - &bind_groups.upsampling_bind_groups[(bloom_texture.mip_count - mip - 1) as usize], - &[uniform_index.index()], - ); - let blend = compute_blend_factor( - bloom_settings, - mip as f32, - (bloom_texture.mip_count - 1) as f32, - ); - upsampling_pass.set_blend_constant(LinearRgba::gray(blend)); - upsampling_pass.draw(0..3, 0..1); - } + // Upsample passes except the final one + for mip in (1..bloom_texture.mip_count).rev() { + let view = &bloom_texture.view(mip - 1); + let mut upsampling_pass = + command_encoder.begin_render_pass(&RenderPassDescriptor { + label: Some("bloom_upsampling_pass"), + color_attachments: &[Some(RenderPassColorAttachment { + view, + resolve_target: None, + ops: Operations { + load: LoadOp::Load, + store: StoreOp::Store, + }, + })], + depth_stencil_attachment: None, + timestamp_writes: None, + occlusion_query_set: None, + }); + upsampling_pass.set_pipeline(upsampling_pipeline); + upsampling_pass.set_bind_group( + 0, + &bind_groups.upsampling_bind_groups + [(bloom_texture.mip_count - mip - 1) as usize], + &[uniform_index.index()], + ); + let blend = compute_blend_factor( + bloom_settings, + mip as f32, + (bloom_texture.mip_count - 1) as f32, + ); + upsampling_pass.set_blend_constant(LinearRgba::gray(blend).into()); + upsampling_pass.draw(0..3, 0..1); + } - // Final upsample pass - // This is very similar to the above upsampling passes with the only difference - // being the pipeline (which itself is barely different) and the color attachment - { - let mut upsampling_final_pass = - render_context.begin_tracked_render_pass(RenderPassDescriptor { - label: Some("bloom_upsampling_final_pass"), - color_attachments: &[Some(view_target.get_unsampled_color_attachment())], - depth_stencil_attachment: None, - timestamp_writes: None, - occlusion_query_set: None, - }); - upsampling_final_pass.set_render_pipeline(upsampling_final_pipeline); - upsampling_final_pass.set_bind_group( - 0, - &bind_groups.upsampling_bind_groups[(bloom_texture.mip_count - 1) as usize], - &[uniform_index.index()], - ); - if let Some(viewport) = camera.viewport.as_ref() { - upsampling_final_pass.set_camera_viewport(viewport); + // Final upsample pass + // This is very similar to the above upsampling passes with the only difference + // being the pipeline (which itself is barely different) and the color attachment + { + let mut upsampling_final_pass = + command_encoder.begin_render_pass(&RenderPassDescriptor { + label: Some("bloom_upsampling_final_pass"), + color_attachments: &[Some(view_texture_unsampled)], + depth_stencil_attachment: None, + timestamp_writes: None, + occlusion_query_set: None, + }); + upsampling_final_pass.set_pipeline(upsampling_final_pipeline); + upsampling_final_pass.set_bind_group( + 0, + &bind_groups.upsampling_bind_groups[(bloom_texture.mip_count - 1) as usize], + &[uniform_index.index()], + ); + if let Some(viewport) = camera.viewport.as_ref() { + upsampling_final_pass.set_viewport( + viewport.physical_position.x as f32, + viewport.physical_position.y as f32, + viewport.physical_size.x as f32, + viewport.physical_size.y as f32, + viewport.depth.start, + viewport.depth.end, + ); + } + let blend = + compute_blend_factor(bloom_settings, 0.0, (bloom_texture.mip_count - 1) as f32); + upsampling_final_pass.set_blend_constant(LinearRgba::gray(blend).into()); + upsampling_final_pass.draw(0..3, 0..1); } - let blend = - compute_blend_factor(bloom_settings, 0.0, (bloom_texture.mip_count - 1) as f32); - upsampling_final_pass.set_blend_constant(LinearRgba::gray(blend)); - upsampling_final_pass.draw(0..3, 0..1); - } - time_span.end(render_context.command_encoder()); - render_context.command_encoder().pop_debug_group(); + time_span.end(&mut command_encoder); + command_encoder.pop_debug_group(); + command_encoder.finish() + }); Ok(()) } diff --git a/crates/bevy_core_pipeline/src/bloom/settings.rs b/crates/bevy_core_pipeline/src/bloom/settings.rs index effa135677f3b..f6ee8dbd1e358 100644 --- a/crates/bevy_core_pipeline/src/bloom/settings.rs +++ b/crates/bevy_core_pipeline/src/bloom/settings.rs @@ -1,6 +1,6 @@ use super::downsampling_pipeline::BloomUniforms; use bevy_ecs::{prelude::Component, query::QueryItem, reflect::ReflectComponent}; -use bevy_math::{AspectRatio, URect, UVec4, Vec4}; +use bevy_math::{AspectRatio, URect, UVec4, Vec2, Vec4}; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{extract_component::ExtractComponent, prelude::Camera}; @@ -25,7 +25,7 @@ use bevy_render::{extract_component::ExtractComponent, prelude::Camera}; /// See for a visualization of the parametric curve /// used in Bevy as well as a visualization of the curve's respective scattering profile. #[derive(Component, Reflect, Clone)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct Bloom { /// Controls the baseline of how much the image is scattered (default: 0.15). /// @@ -113,17 +113,14 @@ pub struct Bloom { /// Only tweak if you are seeing visual artifacts. pub max_mip_dimension: u32, - /// UV offset for bloom shader. Ideally close to 2.0 / `max_mip_dimension`. - /// Only tweak if you are seeing visual artifacts. - pub uv_offset: f32, + /// Amount to stretch the bloom on each axis. Artistic control, can be used to emulate + /// anamorphic blur by using a large x-value. For large values, you may need to increase + /// [`Bloom::max_mip_dimension`] to reduce sampling artifacts. + pub scale: Vec2, } -#[deprecated(since = "0.15.0", note = "Renamed to `Bloom`")] -pub type BloomSettings = Bloom; - impl Bloom { const DEFAULT_MAX_MIP_DIMENSION: u32 = 512; - const DEFAULT_UV_OFFSET: f32 = 0.004; /// The default bloom preset. /// @@ -139,7 +136,15 @@ impl Bloom { }, composite_mode: BloomCompositeMode::EnergyConserving, max_mip_dimension: Self::DEFAULT_MAX_MIP_DIMENSION, - uv_offset: Self::DEFAULT_UV_OFFSET, + scale: Vec2::ONE, + }; + + /// Emulates the look of stylized anamorphic bloom, stretched horizontally. + pub const ANAMORPHIC: Self = Self { + // The larger scale necessitates a larger resolution to reduce artifacts: + max_mip_dimension: Self::DEFAULT_MAX_MIP_DIMENSION * 2, + scale: Vec2::new(4.0, 1.0), + ..Self::NATURAL }; /// A preset that's similar to how older games did bloom. @@ -154,7 +159,7 @@ impl Bloom { }, composite_mode: BloomCompositeMode::Additive, max_mip_dimension: Self::DEFAULT_MAX_MIP_DIMENSION, - uv_offset: Self::DEFAULT_UV_OFFSET, + scale: Vec2::ONE, }; /// A preset that applies a very strong bloom, and blurs the whole screen. @@ -169,7 +174,7 @@ impl Bloom { }, composite_mode: BloomCompositeMode::EnergyConserving, max_mip_dimension: Self::DEFAULT_MAX_MIP_DIMENSION, - uv_offset: Self::DEFAULT_UV_OFFSET, + scale: Vec2::ONE, }; } @@ -188,6 +193,7 @@ impl Default for Bloom { /// * Changing these settings makes it easy to make the final result look worse /// * Non-default prefilter settings should be used in conjunction with [`BloomCompositeMode::Additive`] #[derive(Default, Clone, Reflect)] +#[reflect(Clone, Default)] pub struct BloomPrefilter { /// Baseline of the quadratic threshold curve (default: 0.0). /// @@ -203,10 +209,8 @@ pub struct BloomPrefilter { pub threshold_softness: f32, } -#[deprecated(since = "0.15.0", note = "Renamed to `BloomPrefilter`")] -pub type BloomPrefilterSettings = BloomPrefilter; - #[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, Copy)] +#[reflect(Clone, Hash, PartialEq)] pub enum BloomCompositeMode { EnergyConserving, Additive, @@ -246,7 +250,7 @@ impl ExtractComponent for Bloom { aspect: AspectRatio::try_from_pixels(size.x, size.y) .expect("Valid screen size values for Bloom settings") .ratio(), - uv_offset: bloom.uv_offset, + scale: bloom.scale, }; Some((bloom.clone(), uniform)) diff --git a/crates/bevy_core_pipeline/src/bloom/upsampling_pipeline.rs b/crates/bevy_core_pipeline/src/bloom/upsampling_pipeline.rs index b63a3eb633485..e4c4ed4a647f9 100644 --- a/crates/bevy_core_pipeline/src/bloom/upsampling_pipeline.rs +++ b/crates/bevy_core_pipeline/src/bloom/upsampling_pipeline.rs @@ -5,7 +5,8 @@ use super::{ use crate::fullscreen_vertex_shader::fullscreen_shader_vertex_state; use bevy_ecs::{ prelude::{Component, Entity}, - system::{Commands, Query, Res, ResMut, Resource}, + resource::Resource, + system::{Commands, Query, Res, ResMut}, world::{FromWorld, World}, }; use bevy_render::{ diff --git a/crates/bevy_core_pipeline/src/core_2d/camera_2d.rs b/crates/bevy_core_pipeline/src/core_2d/camera_2d.rs index 9f8073e3f51df..d46174192be9f 100644 --- a/crates/bevy_core_pipeline/src/core_2d/camera_2d.rs +++ b/crates/bevy_core_pipeline/src/core_2d/camera_2d.rs @@ -1,113 +1,26 @@ -#![expect(deprecated)] - use crate::{ core_2d::graph::Core2d, tonemapping::{DebandDither, Tonemapping}, }; use bevy_ecs::prelude::*; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; -use bevy_render::sync_world::SyncToRenderWorld; use bevy_render::{ - camera::{ - Camera, CameraMainTextureUsages, CameraProjection, CameraRenderGraph, - OrthographicProjection, - }, + camera::{Camera, CameraProjection, CameraRenderGraph, OrthographicProjection, Projection}, extract_component::ExtractComponent, - prelude::Msaa, primitives::Frustum, - view::VisibleEntities, }; use bevy_transform::prelude::{GlobalTransform, Transform}; /// A 2D camera component. Enables the 2D render graph for a [`Camera`]. #[derive(Component, Default, Reflect, Clone, ExtractComponent)] #[extract_component_filter(With)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] #[require( Camera, DebandDither, - CameraRenderGraph(|| CameraRenderGraph::new(Core2d)), - OrthographicProjection(OrthographicProjection::default_2d), - Frustum(|| OrthographicProjection::default_2d().compute_frustum(&GlobalTransform::from(Transform::default()))), - Tonemapping(|| Tonemapping::None), + CameraRenderGraph::new(Core2d), + Projection::Orthographic(OrthographicProjection::default_2d()), + Frustum = OrthographicProjection::default_2d().compute_frustum(&GlobalTransform::from(Transform::default())), + Tonemapping::None, )] pub struct Camera2d; - -#[derive(Bundle, Clone)] -#[deprecated( - since = "0.15.0", - note = "Use the `Camera2d` component instead. Inserting it will now also insert the other components required by it automatically." -)] -pub struct Camera2dBundle { - pub camera: Camera, - pub camera_render_graph: CameraRenderGraph, - pub projection: OrthographicProjection, - pub visible_entities: VisibleEntities, - pub frustum: Frustum, - pub transform: Transform, - pub global_transform: GlobalTransform, - pub camera_2d: Camera2d, - pub tonemapping: Tonemapping, - pub deband_dither: DebandDither, - pub main_texture_usages: CameraMainTextureUsages, - pub msaa: Msaa, - /// Marker component that indicates that its entity needs to be synchronized to the render world - pub sync: SyncToRenderWorld, -} - -impl Default for Camera2dBundle { - fn default() -> Self { - let projection = OrthographicProjection::default_2d(); - let transform = Transform::default(); - let frustum = projection.compute_frustum(&GlobalTransform::from(transform)); - Self { - camera_render_graph: CameraRenderGraph::new(Core2d), - projection, - visible_entities: VisibleEntities::default(), - frustum, - transform, - global_transform: Default::default(), - camera: Camera::default(), - camera_2d: Camera2d, - tonemapping: Tonemapping::None, - deband_dither: DebandDither::Disabled, - main_texture_usages: Default::default(), - msaa: Default::default(), - sync: Default::default(), - } - } -} - -impl Camera2dBundle { - /// Create an orthographic projection camera with a custom `Z` position. - /// - /// The camera is placed at `Z=far-0.1`, looking toward the world origin `(0,0,0)`. - /// Its orthographic projection extends from `0.0` to `-far` in camera view space, - /// corresponding to `Z=far-0.1` (closest to camera) to `Z=-0.1` (furthest away from - /// camera) in world space. - pub fn new_with_far(far: f32) -> Self { - // we want 0 to be "closest" and +far to be "farthest" in 2d, so we offset - // the camera's translation by far and use a right handed coordinate system - let projection = OrthographicProjection { - far, - ..OrthographicProjection::default_2d() - }; - let transform = Transform::from_xyz(0.0, 0.0, far - 0.1); - let frustum = projection.compute_frustum(&GlobalTransform::from(transform)); - Self { - camera_render_graph: CameraRenderGraph::new(Core2d), - projection, - visible_entities: VisibleEntities::default(), - frustum, - transform, - global_transform: Default::default(), - camera: Camera::default(), - camera_2d: Camera2d, - tonemapping: Tonemapping::None, - deband_dither: DebandDither::Disabled, - main_texture_usages: Default::default(), - msaa: Default::default(), - sync: Default::default(), - } - } -} diff --git a/crates/bevy_core_pipeline/src/core_2d/main_opaque_pass_2d_node.rs b/crates/bevy_core_pipeline/src/core_2d/main_opaque_pass_2d_node.rs index 91093d0da5c94..60f355c1153db 100644 --- a/crates/bevy_core_pipeline/src/core_2d/main_opaque_pass_2d_node.rs +++ b/crates/bevy_core_pipeline/src/core_2d/main_opaque_pass_2d_node.rs @@ -7,11 +7,11 @@ use bevy_render::{ render_phase::{TrackedRenderPass, ViewBinnedRenderPhases}, render_resource::{CommandEncoderDescriptor, RenderPassDescriptor, StoreOp}, renderer::RenderContext, - view::{ViewDepthTexture, ViewTarget}, + view::{ExtractedView, ViewDepthTexture, ViewTarget}, }; -use bevy_utils::tracing::error; +use tracing::error; #[cfg(feature = "trace")] -use bevy_utils::tracing::info_span; +use tracing::info_span; use super::AlphaMask2d; @@ -22,6 +22,7 @@ pub struct MainOpaquePass2dNode; impl ViewNode for MainOpaquePass2dNode { type ViewQuery = ( &'static ExtractedCamera, + &'static ExtractedView, &'static ViewTarget, &'static ViewDepthTexture, ); @@ -30,7 +31,7 @@ impl ViewNode for MainOpaquePass2dNode { &self, graph: &mut RenderGraphContext, render_context: &mut RenderContext<'w>, - (camera, target, depth): QueryItem<'w, Self::ViewQuery>, + (camera, view, target, depth): QueryItem<'w, Self::ViewQuery>, world: &'w World, ) -> Result<(), NodeRunError> { let (Some(opaque_phases), Some(alpha_mask_phases)) = ( @@ -47,8 +48,8 @@ impl ViewNode for MainOpaquePass2dNode { let view_entity = graph.view_entity(); let (Some(opaque_phase), Some(alpha_mask_phase)) = ( - opaque_phases.get(&view_entity), - alpha_mask_phases.get(&view_entity), + opaque_phases.get(&view.retained_view_entity), + alpha_mask_phases.get(&view.retained_view_entity), ) else { return Ok(()); }; diff --git a/crates/bevy_core_pipeline/src/core_2d/main_transparent_pass_2d_node.rs b/crates/bevy_core_pipeline/src/core_2d/main_transparent_pass_2d_node.rs index e365be954775b..494d4d0f89f7c 100644 --- a/crates/bevy_core_pipeline/src/core_2d/main_transparent_pass_2d_node.rs +++ b/crates/bevy_core_pipeline/src/core_2d/main_transparent_pass_2d_node.rs @@ -4,14 +4,14 @@ use bevy_render::{ camera::ExtractedCamera, diagnostic::RecordDiagnostics, render_graph::{NodeRunError, RenderGraphContext, ViewNode}, - render_phase::ViewSortedRenderPhases, - render_resource::{RenderPassDescriptor, StoreOp}, + render_phase::{TrackedRenderPass, ViewSortedRenderPhases}, + render_resource::{CommandEncoderDescriptor, RenderPassDescriptor, StoreOp}, renderer::RenderContext, - view::{ViewDepthTexture, ViewTarget}, + view::{ExtractedView, ViewDepthTexture, ViewTarget}, }; -use bevy_utils::tracing::error; +use tracing::error; #[cfg(feature = "trace")] -use bevy_utils::tracing::info_span; +use tracing::info_span; #[derive(Default)] pub struct MainTransparentPass2dNode {} @@ -19,6 +19,7 @@ pub struct MainTransparentPass2dNode {} impl ViewNode for MainTransparentPass2dNode { type ViewQuery = ( &'static ExtractedCamera, + &'static ExtractedView, &'static ViewTarget, &'static ViewDepthTexture, ); @@ -27,7 +28,7 @@ impl ViewNode for MainTransparentPass2dNode { &self, graph: &mut RenderGraphContext, render_context: &mut RenderContext<'w>, - (camera, target, depth): bevy_ecs::query::QueryItem<'w, Self::ViewQuery>, + (camera, view, target, depth): bevy_ecs::query::QueryItem<'w, Self::ViewQuery>, world: &'w World, ) -> Result<(), NodeRunError> { let Some(transparent_phases) = @@ -37,67 +38,82 @@ impl ViewNode for MainTransparentPass2dNode { }; let view_entity = graph.view_entity(); - let Some(transparent_phase) = transparent_phases.get(&view_entity) else { + let Some(transparent_phase) = transparent_phases.get(&view.retained_view_entity) else { return Ok(()); }; - // This needs to run at least once to clear the background color, even if there are no items to render - { - #[cfg(feature = "trace")] - let _main_pass_2d = info_span!("main_transparent_pass_2d").entered(); - - let diagnostics = render_context.diagnostic_recorder(); - - let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor { - label: Some("main_transparent_pass_2d"), - color_attachments: &[Some(target.get_color_attachment())], - // NOTE: For the transparent pass we load the depth buffer. There should be no - // need to write to it, but store is set to `true` as a workaround for issue #3776, - // https://github.com/bevyengine/bevy/issues/3776 - // so that wgpu does not clear the depth buffer. - // As the opaque and alpha mask passes run first, opaque meshes can occlude - // transparent ones. - depth_stencil_attachment: Some(depth.get_attachment(StoreOp::Store)), - timestamp_writes: None, - occlusion_query_set: None, - }); - - let pass_span = diagnostics.pass_span(&mut render_pass, "main_transparent_pass_2d"); - - if let Some(viewport) = camera.viewport.as_ref() { - render_pass.set_camera_viewport(viewport); - } + let diagnostics = render_context.diagnostic_recorder(); + + let color_attachments = [Some(target.get_color_attachment())]; + // NOTE: For the transparent pass we load the depth buffer. There should be no + // need to write to it, but store is set to `true` as a workaround for issue #3776, + // https://github.com/bevyengine/bevy/issues/3776 + // so that wgpu does not clear the depth buffer. + // As the opaque and alpha mask passes run first, opaque meshes can occlude + // transparent ones. + let depth_stencil_attachment = Some(depth.get_attachment(StoreOp::Store)); - if !transparent_phase.items.is_empty() { + render_context.add_command_buffer_generation_task(move |render_device| { + // Command encoder setup + let mut command_encoder = + render_device.create_command_encoder(&CommandEncoderDescriptor { + label: Some("main_transparent_pass_2d_command_encoder"), + }); + + // This needs to run at least once to clear the background color, even if there are no items to render + { #[cfg(feature = "trace")] - let _transparent_main_pass_2d_span = - info_span!("transparent_main_pass_2d").entered(); - if let Err(err) = transparent_phase.render(&mut render_pass, world, view_entity) { - error!("Error encountered while rendering the transparent 2D phase {err:?}"); + let _main_pass_2d = info_span!("main_transparent_pass_2d").entered(); + + let render_pass = command_encoder.begin_render_pass(&RenderPassDescriptor { + label: Some("main_transparent_pass_2d"), + color_attachments: &color_attachments, + depth_stencil_attachment, + timestamp_writes: None, + occlusion_query_set: None, + }); + let mut render_pass = TrackedRenderPass::new(&render_device, render_pass); + + let pass_span = diagnostics.pass_span(&mut render_pass, "main_transparent_pass_2d"); + + if let Some(viewport) = camera.viewport.as_ref() { + render_pass.set_camera_viewport(viewport); } + + if !transparent_phase.items.is_empty() { + #[cfg(feature = "trace")] + let _transparent_main_pass_2d_span = + info_span!("transparent_main_pass_2d").entered(); + if let Err(err) = transparent_phase.render(&mut render_pass, world, view_entity) + { + error!( + "Error encountered while rendering the transparent 2D phase {err:?}" + ); + } + } + + pass_span.end(&mut render_pass); + } + + // WebGL2 quirk: if ending with a render pass with a custom viewport, the viewport isn't + // reset for the next render pass so add an empty render pass without a custom viewport + #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] + if camera.viewport.is_some() { + #[cfg(feature = "trace")] + let _reset_viewport_pass_2d = info_span!("reset_viewport_pass_2d").entered(); + let pass_descriptor = RenderPassDescriptor { + label: Some("reset_viewport_pass_2d"), + color_attachments: &[Some(target.get_color_attachment())], + depth_stencil_attachment: None, + timestamp_writes: None, + occlusion_query_set: None, + }; + + command_encoder.begin_render_pass(&pass_descriptor); } - pass_span.end(&mut render_pass); - } - - // WebGL2 quirk: if ending with a render pass with a custom viewport, the viewport isn't - // reset for the next render pass so add an empty render pass without a custom viewport - #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] - if camera.viewport.is_some() { - #[cfg(feature = "trace")] - let _reset_viewport_pass_2d = info_span!("reset_viewport_pass_2d").entered(); - let pass_descriptor = RenderPassDescriptor { - label: Some("reset_viewport_pass_2d"), - color_attachments: &[Some(target.get_color_attachment())], - depth_stencil_attachment: None, - timestamp_writes: None, - occlusion_query_set: None, - }; - - render_context - .command_encoder() - .begin_render_pass(&pass_descriptor); - } + command_encoder.finish() + }); Ok(()) } diff --git a/crates/bevy_core_pipeline/src/core_2d/mod.rs b/crates/bevy_core_pipeline/src/core_2d/mod.rs index d57134aa3ec07..0a8ed17f8e15d 100644 --- a/crates/bevy_core_pipeline/src/core_2d/mod.rs +++ b/crates/bevy_core_pipeline/src/core_2d/mod.rs @@ -19,6 +19,7 @@ pub mod graph { MainOpaquePass, MainTransparentPass, EndMainPass, + Wireframe, Bloom, PostProcessing, Tonemapping, @@ -33,17 +34,19 @@ pub mod graph { use core::ops::Range; use bevy_asset::UntypedAssetId; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_render::{ - batching::gpu_preprocessing::GpuPreprocessingMode, render_phase::PhaseItemBinKey, + batching::gpu_preprocessing::GpuPreprocessingMode, + render_phase::PhaseItemBatchSetKey, + view::{ExtractedView, RetainedViewEntity}, }; -use bevy_utils::HashMap; pub use camera_2d::*; pub use main_opaque_pass_2d_node::*; pub use main_transparent_pass_2d_node::*; use crate::{tonemapping::TonemappingNode, upscaling::UpscalingNode}; use bevy_app::{App, Plugin}; -use bevy_ecs::{entity::EntityHashSet, prelude::*}; +use bevy_ecs::prelude::*; use bevy_math::FloatOrd; use bevy_render::{ camera::{Camera, ExtractedCamera}, @@ -59,7 +62,7 @@ use bevy_render::{ TextureFormat, TextureUsages, }, renderer::RenderDevice, - sync_world::{MainEntity, RenderEntity}, + sync_world::MainEntity, texture::TextureCache, view::{Msaa, ViewDepthTexture}, Extract, ExtractSchedule, Render, RenderApp, RenderSet, @@ -127,8 +130,13 @@ impl Plugin for Core2dPlugin { /// Opaque 2D [`BinnedPhaseItem`]s. pub struct Opaque2d { + /// Determines which objects can be placed into a *batch set*. + /// + /// Objects in a single batch set can potentially be multi-drawn together, + /// if it's enabled and the current platform supports it. + pub batch_set_key: BatchSetKey2d, /// The key, which determines which can be batched. - pub key: Opaque2dBinKey, + pub bin_key: Opaque2dBinKey, /// An entity from which data will be fetched, including the mesh if /// applicable. pub representative_entity: (Entity, MainEntity), @@ -155,14 +163,6 @@ pub struct Opaque2dBinKey { pub material_bind_group_id: Option, } -impl PhaseItemBinKey for Opaque2dBinKey { - type BatchSetKey = (); - - fn get_batch_set_key(&self) -> Option { - None - } -} - impl PhaseItem for Opaque2d { #[inline] fn entity(&self) -> Entity { @@ -175,7 +175,7 @@ impl PhaseItem for Opaque2d { #[inline] fn draw_function(&self) -> DrawFunctionId { - self.key.draw_function + self.bin_key.draw_function } #[inline] @@ -198,16 +198,22 @@ impl PhaseItem for Opaque2d { } impl BinnedPhaseItem for Opaque2d { + // Since 2D meshes presently can't be multidrawn, the batch set key is + // irrelevant. + type BatchSetKey = BatchSetKey2d; + type BinKey = Opaque2dBinKey; fn new( - key: Self::BinKey, + batch_set_key: Self::BatchSetKey, + bin_key: Self::BinKey, representative_entity: (Entity, MainEntity), batch_range: Range, extra_index: PhaseItemExtraIndex, ) -> Self { Opaque2d { - key, + batch_set_key, + bin_key, representative_entity, batch_range, extra_index, @@ -215,17 +221,36 @@ impl BinnedPhaseItem for Opaque2d { } } +/// 2D meshes aren't currently multi-drawn together, so this batch set key only +/// stores whether the mesh is indexed. +#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)] +pub struct BatchSetKey2d { + /// True if the mesh is indexed. + pub indexed: bool, +} + +impl PhaseItemBatchSetKey for BatchSetKey2d { + fn indexed(&self) -> bool { + self.indexed + } +} + impl CachedRenderPipelinePhaseItem for Opaque2d { #[inline] fn cached_pipeline(&self) -> CachedRenderPipelineId { - self.key.pipeline + self.bin_key.pipeline } } /// Alpha mask 2D [`BinnedPhaseItem`]s. pub struct AlphaMask2d { + /// Determines which objects can be placed into a *batch set*. + /// + /// Objects in a single batch set can potentially be multi-drawn together, + /// if it's enabled and the current platform supports it. + pub batch_set_key: BatchSetKey2d, /// The key, which determines which can be batched. - pub key: AlphaMask2dBinKey, + pub bin_key: AlphaMask2dBinKey, /// An entity from which data will be fetched, including the mesh if /// applicable. pub representative_entity: (Entity, MainEntity), @@ -265,7 +290,7 @@ impl PhaseItem for AlphaMask2d { #[inline] fn draw_function(&self) -> DrawFunctionId { - self.key.draw_function + self.bin_key.draw_function } #[inline] @@ -288,16 +313,22 @@ impl PhaseItem for AlphaMask2d { } impl BinnedPhaseItem for AlphaMask2d { + // Since 2D meshes presently can't be multidrawn, the batch set key is + // irrelevant. + type BatchSetKey = BatchSetKey2d; + type BinKey = AlphaMask2dBinKey; fn new( - key: Self::BinKey, + batch_set_key: Self::BatchSetKey, + bin_key: Self::BinKey, representative_entity: (Entity, MainEntity), batch_range: Range, extra_index: PhaseItemExtraIndex, ) -> Self { AlphaMask2d { - key, + batch_set_key, + bin_key, representative_entity, batch_range, extra_index, @@ -305,18 +336,10 @@ impl BinnedPhaseItem for AlphaMask2d { } } -impl PhaseItemBinKey for AlphaMask2dBinKey { - type BatchSetKey = (); - - fn get_batch_set_key(&self) -> Option { - None - } -} - impl CachedRenderPipelinePhaseItem for AlphaMask2d { #[inline] fn cached_pipeline(&self) -> CachedRenderPipelineId { - self.key.pipeline + self.bin_key.pipeline } } @@ -327,7 +350,11 @@ pub struct Transparent2d { pub pipeline: CachedRenderPipelineId, pub draw_function: DrawFunctionId, pub batch_range: Range, + pub extracted_index: usize, pub extra_index: PhaseItemExtraIndex, + /// Whether the mesh in question is indexed (uses an index buffer in + /// addition to its vertex buffer). + pub indexed: bool, } impl PhaseItem for Transparent2d { @@ -380,6 +407,10 @@ impl SortedPhaseItem for Transparent2d { // radsort is a stable radix sort that performed better than `slice::sort_by_key` or `slice::sort_unstable_by_key`. radsort::sort_by_key(items, |item| item.sort_key().0); } + + fn indexed(&self) -> bool { + self.indexed + } } impl CachedRenderPipelinePhaseItem for Transparent2d { @@ -393,20 +424,25 @@ pub fn extract_core_2d_camera_phases( mut transparent_2d_phases: ResMut>, mut opaque_2d_phases: ResMut>, mut alpha_mask_2d_phases: ResMut>, - cameras_2d: Extract>>, - mut live_entities: Local, + cameras_2d: Extract>>, + mut live_entities: Local>, ) { live_entities.clear(); - for (entity, camera) in &cameras_2d { + for (main_entity, camera) in &cameras_2d { if !camera.is_active { continue; } - transparent_2d_phases.insert_or_clear(entity); - opaque_2d_phases.insert_or_clear(entity, GpuPreprocessingMode::None); - alpha_mask_2d_phases.insert_or_clear(entity, GpuPreprocessingMode::None); - live_entities.insert(entity); + // This is the main 2D camera, so we use the first subview index (0). + let retained_view_entity = RetainedViewEntity::new(main_entity.into(), None, 0); + + transparent_2d_phases.insert_or_clear(retained_view_entity); + opaque_2d_phases.prepare_for_new_frame(retained_view_entity, GpuPreprocessingMode::None); + alpha_mask_2d_phases + .prepare_for_new_frame(retained_view_entity, GpuPreprocessingMode::None); + + live_entities.insert(retained_view_entity); } // Clear out all dead views. @@ -421,11 +457,13 @@ pub fn prepare_core_2d_depth_textures( render_device: Res, transparent_2d_phases: Res>, opaque_2d_phases: Res>, - views_2d: Query<(Entity, &ExtractedCamera, &Msaa), (With,)>, + views_2d: Query<(Entity, &ExtractedCamera, &ExtractedView, &Msaa), (With,)>, ) { let mut textures = >::default(); - for (view, camera, msaa) in &views_2d { - if !opaque_2d_phases.contains_key(&view) || !transparent_2d_phases.contains_key(&view) { + for (view, camera, extracted_view, msaa) in &views_2d { + if !opaque_2d_phases.contains_key(&extracted_view.retained_view_entity) + || !transparent_2d_phases.contains_key(&extracted_view.retained_view_entity) + { continue; }; diff --git a/crates/bevy_core_pipeline/src/core_3d/camera_3d.rs b/crates/bevy_core_pipeline/src/core_3d/camera_3d.rs index 2053b96882817..9bcb2b4f80919 100644 --- a/crates/bevy_core_pipeline/src/core_3d/camera_3d.rs +++ b/crates/bevy_core_pipeline/src/core_3d/camera_3d.rs @@ -1,5 +1,3 @@ -#![expect(deprecated)] - use crate::{ core_3d::graph::Core3d, tonemapping::{DebandDither, Tonemapping}, @@ -7,14 +5,11 @@ use crate::{ use bevy_ecs::prelude::*; use bevy_reflect::{std_traits::ReflectDefault, Reflect, ReflectDeserialize, ReflectSerialize}; use bevy_render::{ - camera::{Camera, CameraMainTextureUsages, CameraRenderGraph, Exposure, Projection}, + camera::{Camera, CameraRenderGraph, Exposure, Projection}, extract_component::ExtractComponent, - primitives::Frustum, render_resource::{LoadOp, TextureUsages}, - sync_world::SyncToRenderWorld, - view::{ColorGrading, Msaa, VisibleEntities}, + view::ColorGrading, }; -use bevy_transform::prelude::{GlobalTransform, Transform}; use serde::{Deserialize, Serialize}; /// A 3D camera component. Enables the main 3D render graph for a [`Camera`]. @@ -23,11 +18,11 @@ use serde::{Deserialize, Serialize}; /// This means "forward" is -Z. #[derive(Component, Reflect, Clone, ExtractComponent)] #[extract_component_filter(With)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] #[require( Camera, - DebandDither(|| DebandDither::Enabled), - CameraRenderGraph(|| CameraRenderGraph::new(Core3d)), + DebandDither::Enabled, + CameraRenderGraph::new(Core3d), Projection, Tonemapping, ColorGrading, @@ -61,7 +56,7 @@ pub struct Camera3d { /// /// Higher qualities are more GPU-intensive. /// - /// **Note:** You can get better-looking results at any quality level by enabling TAA. See: [`TemporalAntiAliasPlugin`](crate::experimental::taa::TemporalAntiAliasPlugin). + /// **Note:** You can get better-looking results at any quality level by enabling TAA. See: `TemporalAntiAliasPlugin` pub screen_space_specular_transmission_quality: ScreenSpaceTransmissionQuality, } @@ -77,7 +72,7 @@ impl Default for Camera3d { } #[derive(Clone, Copy, Reflect, Serialize, Deserialize)] -#[reflect(Serialize, Deserialize)] +#[reflect(Serialize, Deserialize, Clone)] pub struct Camera3dDepthTextureUsage(pub u32); impl From for Camera3dDepthTextureUsage { @@ -93,7 +88,7 @@ impl From for TextureUsages { /// The depth clear operation to perform for the main 3d pass. #[derive(Reflect, Serialize, Deserialize, Clone, Debug)] -#[reflect(Serialize, Deserialize)] +#[reflect(Serialize, Deserialize, Clone, Default)] pub enum Camera3dDepthLoadOp { /// Clear with a specified value. /// Note that 0.0 is the far plane due to bevy's use of reverse-z projections. @@ -122,9 +117,9 @@ impl From for LoadOp { /// /// Higher qualities are more GPU-intensive. /// -/// **Note:** You can get better-looking results at any quality level by enabling TAA. See: [`TemporalAntiAliasPlugin`](crate::experimental::taa::TemporalAntiAliasPlugin). +/// **Note:** You can get better-looking results at any quality level by enabling TAA. See: `TemporalAntiAliasPlugin` #[derive(Resource, Default, Clone, Copy, Reflect, PartialEq, PartialOrd, Debug)] -#[reflect(Resource, Default, Debug, PartialEq)] +#[reflect(Resource, Default, Clone, Debug, PartialEq)] pub enum ScreenSpaceTransmissionQuality { /// Best performance at the cost of quality. Suitable for lower end GPUs. (e.g. Mobile) /// @@ -147,52 +142,3 @@ pub enum ScreenSpaceTransmissionQuality { /// `num_taps` = 32 Ultra, } - -/// The camera coordinate space is right-handed x-right, y-up, z-back. -/// This means "forward" is -Z. -#[derive(Bundle, Clone)] -#[deprecated( - since = "0.15.0", - note = "Use the `Camera3d` component instead. Inserting it will now also insert the other components required by it automatically." -)] -pub struct Camera3dBundle { - pub camera: Camera, - pub camera_render_graph: CameraRenderGraph, - pub projection: Projection, - pub visible_entities: VisibleEntities, - pub frustum: Frustum, - pub transform: Transform, - pub global_transform: GlobalTransform, - pub camera_3d: Camera3d, - pub tonemapping: Tonemapping, - pub deband_dither: DebandDither, - pub color_grading: ColorGrading, - pub exposure: Exposure, - pub main_texture_usages: CameraMainTextureUsages, - pub msaa: Msaa, - /// Marker component that indicates that its entity needs to be synchronized to the render world - pub sync: SyncToRenderWorld, -} - -// NOTE: ideally Perspective and Orthographic defaults can share the same impl, but sadly it breaks rust's type inference -impl Default for Camera3dBundle { - fn default() -> Self { - Self { - camera_render_graph: CameraRenderGraph::new(Core3d), - camera: Default::default(), - projection: Default::default(), - visible_entities: Default::default(), - frustum: Default::default(), - transform: Default::default(), - global_transform: Default::default(), - camera_3d: Default::default(), - tonemapping: Default::default(), - color_grading: Default::default(), - exposure: Default::default(), - main_texture_usages: Default::default(), - deband_dither: DebandDither::Enabled, - msaa: Default::default(), - sync: Default::default(), - } - } -} diff --git a/crates/bevy_core_pipeline/src/core_3d/main_opaque_pass_3d_node.rs b/crates/bevy_core_pipeline/src/core_3d/main_opaque_pass_3d_node.rs index b51f36354340a..3b1bc96c9014d 100644 --- a/crates/bevy_core_pipeline/src/core_3d/main_opaque_pass_3d_node.rs +++ b/crates/bevy_core_pipeline/src/core_3d/main_opaque_pass_3d_node.rs @@ -2,7 +2,7 @@ use crate::{ core_3d::Opaque3d, skybox::{SkyboxBindGroup, SkyboxPipelineId}, }; -use bevy_ecs::{entity::Entity, prelude::World, query::QueryItem}; +use bevy_ecs::{prelude::World, query::QueryItem}; use bevy_render::{ camera::ExtractedCamera, diagnostic::RecordDiagnostics, @@ -10,11 +10,11 @@ use bevy_render::{ render_phase::{TrackedRenderPass, ViewBinnedRenderPhases}, render_resource::{CommandEncoderDescriptor, PipelineCache, RenderPassDescriptor, StoreOp}, renderer::RenderContext, - view::{ViewDepthTexture, ViewTarget, ViewUniformOffset}, + view::{ExtractedView, ViewDepthTexture, ViewTarget, ViewUniformOffset}, }; -use bevy_utils::tracing::error; +use tracing::error; #[cfg(feature = "trace")] -use bevy_utils::tracing::info_span; +use tracing::info_span; use super::AlphaMask3d; @@ -24,8 +24,8 @@ use super::AlphaMask3d; pub struct MainOpaquePass3dNode; impl ViewNode for MainOpaquePass3dNode { type ViewQuery = ( - Entity, &'static ExtractedCamera, + &'static ExtractedView, &'static ViewTarget, &'static ViewDepthTexture, Option<&'static SkyboxPipelineId>, @@ -38,8 +38,8 @@ impl ViewNode for MainOpaquePass3dNode { graph: &mut RenderGraphContext, render_context: &mut RenderContext<'w>, ( - view, camera, + extracted_view, target, depth, skybox_pipeline, @@ -55,9 +55,10 @@ impl ViewNode for MainOpaquePass3dNode { return Ok(()); }; - let (Some(opaque_phase), Some(alpha_mask_phase)) = - (opaque_phases.get(&view), alpha_mask_phases.get(&view)) - else { + let (Some(opaque_phase), Some(alpha_mask_phase)) = ( + opaque_phases.get(&extracted_view.retained_view_entity), + alpha_mask_phases.get(&extracted_view.retained_view_entity), + ) else { return Ok(()); }; diff --git a/crates/bevy_core_pipeline/src/core_3d/main_transmissive_pass_3d_node.rs b/crates/bevy_core_pipeline/src/core_3d/main_transmissive_pass_3d_node.rs index 225ce81da6c3a..0a2e98f0bf9ac 100644 --- a/crates/bevy_core_pipeline/src/core_3d/main_transmissive_pass_3d_node.rs +++ b/crates/bevy_core_pipeline/src/core_3d/main_transmissive_pass_3d_node.rs @@ -7,12 +7,12 @@ use bevy_render::{ render_phase::ViewSortedRenderPhases, render_resource::{Extent3d, RenderPassDescriptor, StoreOp}, renderer::RenderContext, - view::{ViewDepthTexture, ViewTarget}, + view::{ExtractedView, ViewDepthTexture, ViewTarget}, }; -use bevy_utils::tracing::error; -#[cfg(feature = "trace")] -use bevy_utils::tracing::info_span; use core::ops::Range; +use tracing::error; +#[cfg(feature = "trace")] +use tracing::info_span; /// A [`bevy_render::render_graph::Node`] that runs the [`Transmissive3d`] /// [`ViewSortedRenderPhases`]. @@ -22,6 +22,7 @@ pub struct MainTransmissivePass3dNode; impl ViewNode for MainTransmissivePass3dNode { type ViewQuery = ( &'static ExtractedCamera, + &'static ExtractedView, &'static Camera3d, &'static ViewTarget, Option<&'static ViewTransmissionTexture>, @@ -32,7 +33,7 @@ impl ViewNode for MainTransmissivePass3dNode { &self, graph: &mut RenderGraphContext, render_context: &mut RenderContext, - (camera, camera_3d, target, transmission, depth): QueryItem, + (camera, view, camera_3d, target, transmission, depth): QueryItem, world: &World, ) -> Result<(), NodeRunError> { let view_entity = graph.view_entity(); @@ -43,7 +44,7 @@ impl ViewNode for MainTransmissivePass3dNode { return Ok(()); }; - let Some(transmissive_phase) = transmissive_phases.get(&view_entity) else { + let Some(transmissive_phase) = transmissive_phases.get(&view.retained_view_entity) else { return Ok(()); }; diff --git a/crates/bevy_core_pipeline/src/core_3d/main_transparent_pass_3d_node.rs b/crates/bevy_core_pipeline/src/core_3d/main_transparent_pass_3d_node.rs index 4f0d3d0722f0e..36fe8417c4de2 100644 --- a/crates/bevy_core_pipeline/src/core_3d/main_transparent_pass_3d_node.rs +++ b/crates/bevy_core_pipeline/src/core_3d/main_transparent_pass_3d_node.rs @@ -7,11 +7,11 @@ use bevy_render::{ render_phase::ViewSortedRenderPhases, render_resource::{RenderPassDescriptor, StoreOp}, renderer::RenderContext, - view::{ViewDepthTexture, ViewTarget}, + view::{ExtractedView, ViewDepthTexture, ViewTarget}, }; -use bevy_utils::tracing::error; +use tracing::error; #[cfg(feature = "trace")] -use bevy_utils::tracing::info_span; +use tracing::info_span; /// A [`bevy_render::render_graph::Node`] that runs the [`Transparent3d`] /// [`ViewSortedRenderPhases`]. @@ -21,6 +21,7 @@ pub struct MainTransparentPass3dNode; impl ViewNode for MainTransparentPass3dNode { type ViewQuery = ( &'static ExtractedCamera, + &'static ExtractedView, &'static ViewTarget, &'static ViewDepthTexture, ); @@ -28,7 +29,7 @@ impl ViewNode for MainTransparentPass3dNode { &self, graph: &mut RenderGraphContext, render_context: &mut RenderContext, - (camera, target, depth): QueryItem, + (camera, view, target, depth): QueryItem, world: &World, ) -> Result<(), NodeRunError> { let view_entity = graph.view_entity(); @@ -39,7 +40,7 @@ impl ViewNode for MainTransparentPass3dNode { return Ok(()); }; - let Some(transparent_phase) = transparent_phases.get(&view_entity) else { + let Some(transparent_phase) = transparent_phases.get(&view.retained_view_entity) else { return Ok(()); }; diff --git a/crates/bevy_core_pipeline/src/core_3d/mod.rs b/crates/bevy_core_pipeline/src/core_3d/mod.rs index f70ad1391473f..b9f6955499cf7 100644 --- a/crates/bevy_core_pipeline/src/core_3d/mod.rs +++ b/crates/bevy_core_pipeline/src/core_3d/mod.rs @@ -16,8 +16,11 @@ pub mod graph { #[derive(Debug, Hash, PartialEq, Eq, Clone, RenderLabel)] pub enum Node3d { MsaaWriteback, - Prepass, - DeferredPrepass, + EarlyPrepass, + EarlyDownsampleDepth, + LatePrepass, + EarlyDeferredPrepass, + LateDeferredPrepass, CopyDeferredLightingId, EndPrepasses, StartMainPass, @@ -25,6 +28,8 @@ pub mod graph { MainTransmissivePass, MainTransparentPass, EndMainPass, + Wireframe, + LateDownsampleDepth, Taa, MotionBlur, Bloom, @@ -67,9 +72,10 @@ use core::ops::Range; use bevy_render::{ batching::gpu_preprocessing::{GpuPreprocessingMode, GpuPreprocessingSupport}, + experimental::occlusion_culling::OcclusionCulling, mesh::allocator::SlabId, - render_phase::PhaseItemBinKey, - view::NoIndirectDrawing, + render_phase::PhaseItemBatchSetKey, + view::{prepare_view_targets, NoIndirectDrawing, RetainedViewEntity}, }; pub use camera_3d::*; pub use main_opaque_pass_3d_node::*; @@ -78,9 +84,10 @@ pub use main_transparent_pass_3d_node::*; use bevy_app::{App, Plugin, PostUpdate}; use bevy_asset::UntypedAssetId; use bevy_color::LinearRgba; -use bevy_ecs::{entity::EntityHashSet, prelude::*}; +use bevy_ecs::prelude::*; use bevy_image::BevyDefault; use bevy_math::FloatOrd; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_render::{ camera::{Camera, ExtractedCamera}, extract_component::ExtractComponentPlugin, @@ -101,21 +108,23 @@ use bevy_render::{ view::{ExtractedView, ViewDepthTexture, ViewTarget}, Extract, ExtractSchedule, Render, RenderApp, RenderSet, }; -use bevy_utils::{tracing::warn, HashMap}; use nonmax::NonMaxU32; +use tracing::warn; use crate::{ core_3d::main_transmissive_pass_3d_node::MainTransmissivePass3dNode, deferred::{ - copy_lighting_id::CopyDeferredLightingIdNode, node::DeferredGBufferPrepassNode, + copy_lighting_id::CopyDeferredLightingIdNode, + node::{EarlyDeferredGBufferPrepassNode, LateDeferredGBufferPrepassNode}, AlphaMask3dDeferred, Opaque3dDeferred, DEFERRED_LIGHTING_PASS_ID_FORMAT, DEFERRED_PREPASS_FORMAT, }, dof::DepthOfFieldNode, prepass::{ - node::PrepassNode, AlphaMask3dPrepass, DeferredPrepass, DepthPrepass, MotionVectorPrepass, - NormalPrepass, Opaque3dPrepass, OpaqueNoLightmap3dBinKey, ViewPrepassTextures, - MOTION_VECTOR_PREPASS_FORMAT, NORMAL_PREPASS_FORMAT, + node::{EarlyPrepassNode, LatePrepassNode}, + AlphaMask3dPrepass, DeferredPrepass, DepthPrepass, MotionVectorPrepass, NormalPrepass, + Opaque3dPrepass, OpaqueNoLightmap3dBatchSetKey, OpaqueNoLightmap3dBinKey, + ViewPrepassTextures, MOTION_VECTOR_PREPASS_FORMAT, NORMAL_PREPASS_FORMAT, }, skybox::SkyboxPlugin, tonemapping::TonemappingNode, @@ -160,6 +169,9 @@ impl Plugin for Core3dPlugin { ( sort_phase_system::.in_set(RenderSet::PhaseSort), sort_phase_system::.in_set(RenderSet::PhaseSort), + configure_occlusion_culling_view_targets + .after(prepare_view_targets) + .in_set(RenderSet::ManageViews), prepare_core_3d_depth_textures.in_set(RenderSet::PrepareResources), prepare_core_3d_transmission_textures.in_set(RenderSet::PrepareResources), prepare_prepass_textures.in_set(RenderSet::PrepareResources), @@ -168,10 +180,15 @@ impl Plugin for Core3dPlugin { render_app .add_render_sub_graph(Core3d) - .add_render_graph_node::>(Core3d, Node3d::Prepass) - .add_render_graph_node::>( + .add_render_graph_node::>(Core3d, Node3d::EarlyPrepass) + .add_render_graph_node::>(Core3d, Node3d::LatePrepass) + .add_render_graph_node::>( Core3d, - Node3d::DeferredPrepass, + Node3d::EarlyDeferredPrepass, + ) + .add_render_graph_node::>( + Core3d, + Node3d::LateDeferredPrepass, ) .add_render_graph_node::>( Core3d, @@ -199,8 +216,10 @@ impl Plugin for Core3dPlugin { .add_render_graph_edges( Core3d, ( - Node3d::Prepass, - Node3d::DeferredPrepass, + Node3d::EarlyPrepass, + Node3d::EarlyDeferredPrepass, + Node3d::LatePrepass, + Node3d::LateDeferredPrepass, Node3d::CopyDeferredLightingId, Node3d::EndPrepasses, Node3d::StartMainPass, @@ -218,8 +237,13 @@ impl Plugin for Core3dPlugin { /// Opaque 3D [`BinnedPhaseItem`]s. pub struct Opaque3d { + /// Determines which objects can be placed into a *batch set*. + /// + /// Objects in a single batch set can potentially be multi-drawn together, + /// if it's enabled and the current platform supports it. + pub batch_set_key: Opaque3dBatchSetKey, /// The key, which determines which can be batched. - pub key: Opaque3dBinKey, + pub bin_key: Opaque3dBinKey, /// An entity from which data will be fetched, including the mesh if /// applicable. pub representative_entity: (Entity, MainEntity), @@ -264,17 +288,17 @@ pub struct Opaque3dBatchSetKey { pub lightmap_slab: Option, } +impl PhaseItemBatchSetKey for Opaque3dBatchSetKey { + fn indexed(&self) -> bool { + self.index_slab.is_some() + } +} + /// Data that must be identical in order to *batch* phase items together. /// /// Note that a *batch set* (if multi-draw is in use) contains multiple batches. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Opaque3dBinKey { - /// The key of the *batch set*. - /// - /// As batches belong to a batch set, meshes in a batch must obviously be - /// able to be placed in a single batch set. - pub batch_set_key: Opaque3dBatchSetKey, - /// The asset that this phase item is associated with. /// /// Normally, this is the ID of the mesh, but for non-mesh items it might be @@ -282,14 +306,6 @@ pub struct Opaque3dBinKey { pub asset_id: UntypedAssetId, } -impl PhaseItemBinKey for Opaque3dBinKey { - type BatchSetKey = Opaque3dBatchSetKey; - - fn get_batch_set_key(&self) -> Option { - Some(self.batch_set_key.clone()) - } -} - impl PhaseItem for Opaque3d { #[inline] fn entity(&self) -> Entity { @@ -303,7 +319,7 @@ impl PhaseItem for Opaque3d { #[inline] fn draw_function(&self) -> DrawFunctionId { - self.key.batch_set_key.draw_function + self.batch_set_key.draw_function } #[inline] @@ -326,17 +342,20 @@ impl PhaseItem for Opaque3d { } impl BinnedPhaseItem for Opaque3d { + type BatchSetKey = Opaque3dBatchSetKey; type BinKey = Opaque3dBinKey; #[inline] fn new( - key: Self::BinKey, + batch_set_key: Self::BatchSetKey, + bin_key: Self::BinKey, representative_entity: (Entity, MainEntity), batch_range: Range, extra_index: PhaseItemExtraIndex, ) -> Self { Opaque3d { - key, + batch_set_key, + bin_key, representative_entity, batch_range, extra_index, @@ -347,12 +366,18 @@ impl BinnedPhaseItem for Opaque3d { impl CachedRenderPipelinePhaseItem for Opaque3d { #[inline] fn cached_pipeline(&self) -> CachedRenderPipelineId { - self.key.batch_set_key.pipeline + self.batch_set_key.pipeline } } pub struct AlphaMask3d { - pub key: OpaqueNoLightmap3dBinKey, + /// Determines which objects can be placed into a *batch set*. + /// + /// Objects in a single batch set can potentially be multi-drawn together, + /// if it's enabled and the current platform supports it. + pub batch_set_key: OpaqueNoLightmap3dBatchSetKey, + /// The key, which determines which can be batched. + pub bin_key: OpaqueNoLightmap3dBinKey, pub representative_entity: (Entity, MainEntity), pub batch_range: Range, pub extra_index: PhaseItemExtraIndex, @@ -370,7 +395,7 @@ impl PhaseItem for AlphaMask3d { #[inline] fn draw_function(&self) -> DrawFunctionId { - self.key.batch_set_key.draw_function + self.batch_set_key.draw_function } #[inline] @@ -396,16 +421,19 @@ impl PhaseItem for AlphaMask3d { impl BinnedPhaseItem for AlphaMask3d { type BinKey = OpaqueNoLightmap3dBinKey; + type BatchSetKey = OpaqueNoLightmap3dBatchSetKey; #[inline] fn new( - key: Self::BinKey, + batch_set_key: Self::BatchSetKey, + bin_key: Self::BinKey, representative_entity: (Entity, MainEntity), batch_range: Range, extra_index: PhaseItemExtraIndex, ) -> Self { Self { - key, + batch_set_key, + bin_key, representative_entity, batch_range, extra_index, @@ -416,7 +444,7 @@ impl BinnedPhaseItem for AlphaMask3d { impl CachedRenderPipelinePhaseItem for AlphaMask3d { #[inline] fn cached_pipeline(&self) -> CachedRenderPipelineId { - self.key.batch_set_key.pipeline + self.batch_set_key.pipeline } } @@ -427,6 +455,9 @@ pub struct Transmissive3d { pub draw_function: DrawFunctionId, pub batch_range: Range, pub extra_index: PhaseItemExtraIndex, + /// Whether the mesh in question is indexed (uses an index buffer in + /// addition to its vertex buffer). + pub indexed: bool, } impl PhaseItem for Transmissive3d { @@ -490,6 +521,11 @@ impl SortedPhaseItem for Transmissive3d { fn sort(items: &mut [Self]) { radsort::sort_by_key(items, |item| item.distance); } + + #[inline] + fn indexed(&self) -> bool { + self.indexed + } } impl CachedRenderPipelinePhaseItem for Transmissive3d { @@ -506,6 +542,9 @@ pub struct Transparent3d { pub draw_function: DrawFunctionId, pub batch_range: Range, pub extra_index: PhaseItemExtraIndex, + /// Whether the mesh in question is indexed (uses an index buffer in + /// addition to its vertex buffer). + pub indexed: bool, } impl PhaseItem for Transparent3d { @@ -557,6 +596,11 @@ impl SortedPhaseItem for Transparent3d { fn sort(items: &mut [Self]) { radsort::sort_by_key(items, |item| item.distance); } + + #[inline] + fn indexed(&self) -> bool { + self.indexed + } } impl CachedRenderPipelinePhaseItem for Transparent3d { @@ -571,13 +615,13 @@ pub fn extract_core_3d_camera_phases( mut alpha_mask_3d_phases: ResMut>, mut transmissive_3d_phases: ResMut>, mut transparent_3d_phases: ResMut>, - cameras_3d: Extract), With>>, - mut live_entities: Local, + cameras_3d: Extract), With>>, + mut live_entities: Local>, gpu_preprocessing_support: Res, ) { live_entities.clear(); - for (entity, camera, no_indirect_drawing) in &cameras_3d { + for (main_entity, camera, no_indirect_drawing) in &cameras_3d { if !camera.is_active { continue; } @@ -590,23 +634,25 @@ pub fn extract_core_3d_camera_phases( GpuPreprocessingMode::PreprocessingOnly }); - opaque_3d_phases.insert_or_clear(entity, gpu_preprocessing_mode); - alpha_mask_3d_phases.insert_or_clear(entity, gpu_preprocessing_mode); - transmissive_3d_phases.insert_or_clear(entity); - transparent_3d_phases.insert_or_clear(entity); + // This is the main 3D camera, so use the first subview index (0). + let retained_view_entity = RetainedViewEntity::new(main_entity.into(), None, 0); + + opaque_3d_phases.prepare_for_new_frame(retained_view_entity, gpu_preprocessing_mode); + alpha_mask_3d_phases.prepare_for_new_frame(retained_view_entity, gpu_preprocessing_mode); + transmissive_3d_phases.insert_or_clear(retained_view_entity); + transparent_3d_phases.insert_or_clear(retained_view_entity); - live_entities.insert(entity); + live_entities.insert(retained_view_entity); } - opaque_3d_phases.retain(|entity, _| live_entities.contains(entity)); - alpha_mask_3d_phases.retain(|entity, _| live_entities.contains(entity)); - transmissive_3d_phases.retain(|entity, _| live_entities.contains(entity)); - transparent_3d_phases.retain(|entity, _| live_entities.contains(entity)); + opaque_3d_phases.retain(|view_entity, _| live_entities.contains(view_entity)); + alpha_mask_3d_phases.retain(|view_entity, _| live_entities.contains(view_entity)); + transmissive_3d_phases.retain(|view_entity, _| live_entities.contains(view_entity)); + transparent_3d_phases.retain(|view_entity, _| live_entities.contains(view_entity)); } // Extract the render phases for the prepass -#[allow(clippy::too_many_arguments)] pub fn extract_camera_prepass_phase( mut commands: Commands, mut opaque_3d_prepass_phases: ResMut>, @@ -616,6 +662,7 @@ pub fn extract_camera_prepass_phase( cameras_3d: Extract< Query< ( + Entity, RenderEntity, &Camera, Has, @@ -627,12 +674,13 @@ pub fn extract_camera_prepass_phase( With, >, >, - mut live_entities: Local, + mut live_entities: Local>, gpu_preprocessing_support: Res, ) { live_entities.clear(); for ( + main_entity, entity, camera, no_indirect_drawing, @@ -654,39 +702,67 @@ pub fn extract_camera_prepass_phase( GpuPreprocessingMode::PreprocessingOnly }); + // This is the main 3D camera, so we use the first subview index (0). + let retained_view_entity = RetainedViewEntity::new(main_entity.into(), None, 0); + if depth_prepass || normal_prepass || motion_vector_prepass { - opaque_3d_prepass_phases.insert_or_clear(entity, gpu_preprocessing_mode); - alpha_mask_3d_prepass_phases.insert_or_clear(entity, gpu_preprocessing_mode); + opaque_3d_prepass_phases + .prepare_for_new_frame(retained_view_entity, gpu_preprocessing_mode); + alpha_mask_3d_prepass_phases + .prepare_for_new_frame(retained_view_entity, gpu_preprocessing_mode); } else { - opaque_3d_prepass_phases.remove(&entity); - alpha_mask_3d_prepass_phases.remove(&entity); + opaque_3d_prepass_phases.remove(&retained_view_entity); + alpha_mask_3d_prepass_phases.remove(&retained_view_entity); } if deferred_prepass { - opaque_3d_deferred_phases.insert_or_clear(entity, gpu_preprocessing_mode); - alpha_mask_3d_deferred_phases.insert_or_clear(entity, gpu_preprocessing_mode); + opaque_3d_deferred_phases + .prepare_for_new_frame(retained_view_entity, gpu_preprocessing_mode); + alpha_mask_3d_deferred_phases + .prepare_for_new_frame(retained_view_entity, gpu_preprocessing_mode); } else { - opaque_3d_deferred_phases.remove(&entity); - alpha_mask_3d_deferred_phases.remove(&entity); + opaque_3d_deferred_phases.remove(&retained_view_entity); + alpha_mask_3d_deferred_phases.remove(&retained_view_entity); } - live_entities.insert(entity); + live_entities.insert(retained_view_entity); - commands + // Add or remove prepasses as appropriate. + + let mut camera_commands = commands .get_entity(entity) - .expect("Camera entity wasn't synced.") - .insert_if(DepthPrepass, || depth_prepass) - .insert_if(NormalPrepass, || normal_prepass) - .insert_if(MotionVectorPrepass, || motion_vector_prepass) - .insert_if(DeferredPrepass, || deferred_prepass); + .expect("Camera entity wasn't synced."); + + if depth_prepass { + camera_commands.insert(DepthPrepass); + } else { + camera_commands.remove::(); + } + + if normal_prepass { + camera_commands.insert(NormalPrepass); + } else { + camera_commands.remove::(); + } + + if motion_vector_prepass { + camera_commands.insert(MotionVectorPrepass); + } else { + camera_commands.remove::(); + } + + if deferred_prepass { + camera_commands.insert(DeferredPrepass); + } else { + camera_commands.remove::(); + } } - opaque_3d_prepass_phases.retain(|entity, _| live_entities.contains(entity)); - alpha_mask_3d_prepass_phases.retain(|entity, _| live_entities.contains(entity)); - opaque_3d_deferred_phases.retain(|entity, _| live_entities.contains(entity)); - alpha_mask_3d_deferred_phases.retain(|entity, _| live_entities.contains(entity)); + opaque_3d_prepass_phases.retain(|view_entity, _| live_entities.contains(view_entity)); + alpha_mask_3d_prepass_phases.retain(|view_entity, _| live_entities.contains(view_entity)); + opaque_3d_deferred_phases.retain(|view_entity, _| live_entities.contains(view_entity)); + alpha_mask_3d_deferred_phases.retain(|view_entity, _| live_entities.contains(view_entity)); } -#[allow(clippy::too_many_arguments)] pub fn prepare_core_3d_depth_textures( mut commands: Commands, mut texture_cache: ResMut, @@ -698,17 +774,18 @@ pub fn prepare_core_3d_depth_textures( views_3d: Query<( Entity, &ExtractedCamera, + &ExtractedView, Option<&DepthPrepass>, &Camera3d, &Msaa, )>, ) { let mut render_target_usage = >::default(); - for (view, camera, depth_prepass, camera_3d, _msaa) in &views_3d { - if !opaque_3d_phases.contains_key(&view) - || !alpha_mask_3d_phases.contains_key(&view) - || !transmissive_3d_phases.contains_key(&view) - || !transparent_3d_phases.contains_key(&view) + for (_, camera, extracted_view, depth_prepass, camera_3d, _msaa) in &views_3d { + if !opaque_3d_phases.contains_key(&extracted_view.retained_view_entity) + || !alpha_mask_3d_phases.contains_key(&extracted_view.retained_view_entity) + || !transmissive_3d_phases.contains_key(&extracted_view.retained_view_entity) + || !transparent_3d_phases.contains_key(&extracted_view.retained_view_entity) { continue; }; @@ -726,7 +803,7 @@ pub fn prepare_core_3d_depth_textures( } let mut textures = >::default(); - for (entity, camera, _, camera_3d, msaa) in &views_3d { + for (entity, camera, _, _, camera_3d, msaa) in &views_3d { let Some(physical_target_size) = camera.physical_target_size else { continue; }; @@ -777,7 +854,6 @@ pub struct ViewTransmissionTexture { pub sampler: Sampler, } -#[allow(clippy::too_many_arguments)] pub fn prepare_core_3d_transmission_textures( mut commands: Commands, mut texture_cache: ResMut, @@ -790,14 +866,15 @@ pub fn prepare_core_3d_transmission_textures( ) { let mut textures = >::default(); for (entity, camera, camera_3d, view) in &views_3d { - if !opaque_3d_phases.contains_key(&entity) - || !alpha_mask_3d_phases.contains_key(&entity) - || !transparent_3d_phases.contains_key(&entity) + if !opaque_3d_phases.contains_key(&view.retained_view_entity) + || !alpha_mask_3d_phases.contains_key(&view.retained_view_entity) + || !transparent_3d_phases.contains_key(&view.retained_view_entity) { continue; }; - let Some(transmissive_3d_phase) = transmissive_3d_phases.get(&entity) else { + let Some(transmissive_3d_phase) = transmissive_3d_phases.get(&view.retained_view_entity) + else { continue; }; @@ -863,6 +940,27 @@ pub fn prepare_core_3d_transmission_textures( } } +/// Sets the `TEXTURE_BINDING` flag on the depth texture if necessary for +/// occlusion culling. +/// +/// We need that flag to be set in order to read from the texture. +fn configure_occlusion_culling_view_targets( + mut view_targets: Query< + &mut Camera3d, + ( + With, + Without, + With, + ), + >, +) { + for mut camera_3d in &mut view_targets { + let mut depth_texture_usages = TextureUsages::from(camera_3d.depth_texture_usages); + depth_texture_usages |= TextureUsages::TEXTURE_BINDING; + camera_3d.depth_texture_usages = depth_texture_usages.into(); + } +} + // Disable MSAA and warn if using deferred rendering pub fn check_msaa(mut deferred_views: Query<&mut Msaa, (With, With)>) { for mut msaa in deferred_views.iter_mut() { @@ -877,7 +975,6 @@ pub fn check_msaa(mut deferred_views: Query<&mut Msaa, (With, With, @@ -889,6 +986,7 @@ pub fn prepare_prepass_textures( views_3d: Query<( Entity, &ExtractedCamera, + &ExtractedView, &Msaa, Has, Has, @@ -904,6 +1002,7 @@ pub fn prepare_prepass_textures( for ( entity, camera, + view, msaa, depth_prepass, normal_prepass, @@ -911,11 +1010,12 @@ pub fn prepare_prepass_textures( deferred_prepass, ) in &views_3d { - if !opaque_3d_prepass_phases.contains_key(&entity) - && !alpha_mask_3d_prepass_phases.contains_key(&entity) - && !opaque_3d_deferred_phases.contains_key(&entity) - && !alpha_mask_3d_deferred_phases.contains_key(&entity) + if !opaque_3d_prepass_phases.contains_key(&view.retained_view_entity) + && !alpha_mask_3d_prepass_phases.contains_key(&view.retained_view_entity) + && !opaque_3d_deferred_phases.contains_key(&view.retained_view_entity) + && !alpha_mask_3d_deferred_phases.contains_key(&view.retained_view_entity) { + commands.entity(entity).remove::(); continue; }; diff --git a/crates/bevy_core_pipeline/src/deferred/copy_lighting_id.rs b/crates/bevy_core_pipeline/src/deferred/copy_lighting_id.rs index f645d22092bfa..966be880c28ce 100644 --- a/crates/bevy_core_pipeline/src/deferred/copy_lighting_id.rs +++ b/crates/bevy_core_pipeline/src/deferred/copy_lighting_id.rs @@ -3,7 +3,7 @@ use crate::{ prepass::{DeferredPrepass, ViewPrepassTextures}, }; use bevy_app::prelude::*; -use bevy_asset::{load_internal_asset, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_ecs::prelude::*; use bevy_math::UVec2; use bevy_render::{ @@ -24,7 +24,7 @@ use bevy_render::{ use super::DEFERRED_LIGHTING_PASS_ID_DEPTH_FORMAT; pub const COPY_DEFERRED_LIGHTING_ID_SHADER_HANDLE: Handle = - Handle::weak_from_u128(5230948520734987); + weak_handle!("70d91342-1c43-4b20-973f-aa6ce93aa617"); pub struct CopyDeferredLightingIdPlugin; impl Plugin for CopyDeferredLightingIdPlugin { diff --git a/crates/bevy_core_pipeline/src/deferred/mod.rs b/crates/bevy_core_pipeline/src/deferred/mod.rs index 1ddc66a285c20..b9f5169b48f32 100644 --- a/crates/bevy_core_pipeline/src/deferred/mod.rs +++ b/crates/bevy_core_pipeline/src/deferred/mod.rs @@ -3,7 +3,7 @@ pub mod node; use core::ops::Range; -use crate::prepass::OpaqueNoLightmap3dBinKey; +use crate::prepass::{OpaqueNoLightmap3dBatchSetKey, OpaqueNoLightmap3dBinKey}; use bevy_ecs::prelude::*; use bevy_render::sync_world::MainEntity; use bevy_render::{ @@ -25,7 +25,13 @@ pub const DEFERRED_LIGHTING_PASS_ID_DEPTH_FORMAT: TextureFormat = TextureFormat: /// Used to render all 3D meshes with materials that have no transparency. #[derive(PartialEq, Eq, Hash)] pub struct Opaque3dDeferred { - pub key: OpaqueNoLightmap3dBinKey, + /// Determines which objects can be placed into a *batch set*. + /// + /// Objects in a single batch set can potentially be multi-drawn together, + /// if it's enabled and the current platform supports it. + pub batch_set_key: OpaqueNoLightmap3dBatchSetKey, + /// Information that separates items into bins. + pub bin_key: OpaqueNoLightmap3dBinKey, pub representative_entity: (Entity, MainEntity), pub batch_range: Range, pub extra_index: PhaseItemExtraIndex, @@ -43,7 +49,7 @@ impl PhaseItem for Opaque3dDeferred { #[inline] fn draw_function(&self) -> DrawFunctionId { - self.key.batch_set_key.draw_function + self.batch_set_key.draw_function } #[inline] @@ -68,17 +74,20 @@ impl PhaseItem for Opaque3dDeferred { } impl BinnedPhaseItem for Opaque3dDeferred { + type BatchSetKey = OpaqueNoLightmap3dBatchSetKey; type BinKey = OpaqueNoLightmap3dBinKey; #[inline] fn new( - key: Self::BinKey, + batch_set_key: Self::BatchSetKey, + bin_key: Self::BinKey, representative_entity: (Entity, MainEntity), batch_range: Range, extra_index: PhaseItemExtraIndex, ) -> Self { Self { - key, + batch_set_key, + bin_key, representative_entity, batch_range, extra_index, @@ -89,7 +98,7 @@ impl BinnedPhaseItem for Opaque3dDeferred { impl CachedRenderPipelinePhaseItem for Opaque3dDeferred { #[inline] fn cached_pipeline(&self) -> CachedRenderPipelineId { - self.key.batch_set_key.pipeline + self.batch_set_key.pipeline } } @@ -99,7 +108,13 @@ impl CachedRenderPipelinePhaseItem for Opaque3dDeferred { /// /// Used to render all meshes with a material with an alpha mask. pub struct AlphaMask3dDeferred { - pub key: OpaqueNoLightmap3dBinKey, + /// Determines which objects can be placed into a *batch set*. + /// + /// Objects in a single batch set can potentially be multi-drawn together, + /// if it's enabled and the current platform supports it. + pub batch_set_key: OpaqueNoLightmap3dBatchSetKey, + /// Information that separates items into bins. + pub bin_key: OpaqueNoLightmap3dBinKey, pub representative_entity: (Entity, MainEntity), pub batch_range: Range, pub extra_index: PhaseItemExtraIndex, @@ -118,7 +133,7 @@ impl PhaseItem for AlphaMask3dDeferred { #[inline] fn draw_function(&self) -> DrawFunctionId { - self.key.batch_set_key.draw_function + self.batch_set_key.draw_function } #[inline] @@ -143,16 +158,19 @@ impl PhaseItem for AlphaMask3dDeferred { } impl BinnedPhaseItem for AlphaMask3dDeferred { + type BatchSetKey = OpaqueNoLightmap3dBatchSetKey; type BinKey = OpaqueNoLightmap3dBinKey; fn new( - key: Self::BinKey, + batch_set_key: Self::BatchSetKey, + bin_key: Self::BinKey, representative_entity: (Entity, MainEntity), batch_range: Range, extra_index: PhaseItemExtraIndex, ) -> Self { Self { - key, + batch_set_key, + bin_key, representative_entity, batch_range, extra_index, @@ -163,6 +181,6 @@ impl BinnedPhaseItem for AlphaMask3dDeferred { impl CachedRenderPipelinePhaseItem for AlphaMask3dDeferred { #[inline] fn cached_pipeline(&self) -> CachedRenderPipelineId { - self.key.batch_set_key.pipeline + self.batch_set_key.pipeline } } diff --git a/crates/bevy_core_pipeline/src/deferred/node.rs b/crates/bevy_core_pipeline/src/deferred/node.rs index 5aa89a8e94a0d..ffac1eec6de82 100644 --- a/crates/bevy_core_pipeline/src/deferred/node.rs +++ b/crates/bevy_core_pipeline/src/deferred/node.rs @@ -1,6 +1,8 @@ use bevy_ecs::{prelude::*, query::QueryItem}; +use bevy_render::experimental::occlusion_culling::OcclusionCulling; use bevy_render::render_graph::ViewNode; +use bevy_render::view::{ExtractedView, NoIndirectDrawing}; use bevy_render::{ camera::ExtractedCamera, render_graph::{NodeRunError, RenderGraphContext}, @@ -9,81 +11,159 @@ use bevy_render::{ renderer::RenderContext, view::ViewDepthTexture, }; -use bevy_utils::tracing::error; +use tracing::error; #[cfg(feature = "trace")] -use bevy_utils::tracing::info_span; +use tracing::info_span; use crate::prepass::ViewPrepassTextures; use super::{AlphaMask3dDeferred, Opaque3dDeferred}; -/// Render node used by the prepass. +/// The phase of the deferred prepass that draws meshes that were visible last +/// frame. /// -/// By default, inserted before the main pass in the render graph. +/// If occlusion culling isn't in use, this prepass simply draws all meshes. +/// +/// Like all prepass nodes, this is inserted before the main pass in the render +/// graph. #[derive(Default)] -pub struct DeferredGBufferPrepassNode; +pub struct EarlyDeferredGBufferPrepassNode; + +impl ViewNode for EarlyDeferredGBufferPrepassNode { + type ViewQuery = ::ViewQuery; + + fn run<'w>( + &self, + graph: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + view_query: QueryItem<'w, Self::ViewQuery>, + world: &'w World, + ) -> Result<(), NodeRunError> { + run_deferred_prepass( + graph, + render_context, + view_query, + false, + world, + "early deferred prepass", + ) + } +} -impl ViewNode for DeferredGBufferPrepassNode { +/// The phase of the prepass that runs after occlusion culling against the +/// meshes that were visible last frame. +/// +/// If occlusion culling isn't in use, this is a no-op. +/// +/// Like all prepass nodes, this is inserted before the main pass in the render +/// graph. +#[derive(Default)] +pub struct LateDeferredGBufferPrepassNode; + +impl ViewNode for LateDeferredGBufferPrepassNode { type ViewQuery = ( - Entity, &'static ExtractedCamera, + &'static ExtractedView, &'static ViewDepthTexture, &'static ViewPrepassTextures, + Has, + Has, ); fn run<'w>( &self, graph: &mut RenderGraphContext, render_context: &mut RenderContext<'w>, - (view, camera, view_depth_texture, view_prepass_textures): QueryItem<'w, Self::ViewQuery>, + view_query: QueryItem<'w, Self::ViewQuery>, world: &'w World, ) -> Result<(), NodeRunError> { - let (Some(opaque_deferred_phases), Some(alpha_mask_deferred_phases)) = ( - world.get_resource::>(), - world.get_resource::>(), - ) else { + let (_, _, _, _, occlusion_culling, no_indirect_drawing) = view_query; + if !occlusion_culling || no_indirect_drawing { return Ok(()); - }; + } - let (Some(opaque_deferred_phase), Some(alpha_mask_deferred_phase)) = ( - opaque_deferred_phases.get(&view), - alpha_mask_deferred_phases.get(&view), - ) else { - return Ok(()); - }; - - let mut color_attachments = vec![]; - color_attachments.push( - view_prepass_textures - .normal - .as_ref() - .map(|normals_texture| normals_texture.get_attachment()), - ); - color_attachments.push( - view_prepass_textures - .motion_vectors - .as_ref() - .map(|motion_vectors_texture| motion_vectors_texture.get_attachment()), - ); - - // If we clear the deferred texture with LoadOp::Clear(Default::default()) we get these errors: - // Chrome: GL_INVALID_OPERATION: No defined conversion between clear value and attachment format. - // Firefox: WebGL warning: clearBufferu?[fi]v: This attachment is of type FLOAT, but this function is of type UINT. - // Appears to be unsupported: https://registry.khronos.org/webgl/specs/latest/2.0/#3.7.9 - // For webgl2 we fallback to manually clearing - #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] + run_deferred_prepass( + graph, + render_context, + view_query, + true, + world, + "late deferred prepass", + ) + } +} + +/// Runs the deferred prepass that draws all meshes to the depth buffer and +/// G-buffers. +/// +/// If occlusion culling isn't in use, and a prepass is enabled, then there's +/// only one prepass. If occlusion culling is in use, then any prepass is split +/// into two: an *early* prepass and a *late* prepass. The early prepass draws +/// what was visible last frame, and the last prepass performs occlusion culling +/// against a conservative hierarchical Z buffer before drawing unoccluded +/// meshes. +fn run_deferred_prepass<'w>( + graph: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + (camera, extracted_view, view_depth_texture, view_prepass_textures, _, _): QueryItem< + 'w, + ::ViewQuery, + >, + is_late: bool, + world: &'w World, + label: &'static str, +) -> Result<(), NodeRunError> { + let (Some(opaque_deferred_phases), Some(alpha_mask_deferred_phases)) = ( + world.get_resource::>(), + world.get_resource::>(), + ) else { + return Ok(()); + }; + + let (Some(opaque_deferred_phase), Some(alpha_mask_deferred_phase)) = ( + opaque_deferred_phases.get(&extracted_view.retained_view_entity), + alpha_mask_deferred_phases.get(&extracted_view.retained_view_entity), + ) else { + return Ok(()); + }; + + let mut color_attachments = vec![]; + color_attachments.push( + view_prepass_textures + .normal + .as_ref() + .map(|normals_texture| normals_texture.get_attachment()), + ); + color_attachments.push( + view_prepass_textures + .motion_vectors + .as_ref() + .map(|motion_vectors_texture| motion_vectors_texture.get_attachment()), + ); + + // If we clear the deferred texture with LoadOp::Clear(Default::default()) we get these errors: + // Chrome: GL_INVALID_OPERATION: No defined conversion between clear value and attachment format. + // Firefox: WebGL warning: clearBufferu?[fi]v: This attachment is of type FLOAT, but this function is of type UINT. + // Appears to be unsupported: https://registry.khronos.org/webgl/specs/latest/2.0/#3.7.9 + // For webgl2 we fallback to manually clearing + #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] + if !is_late { if let Some(deferred_texture) = &view_prepass_textures.deferred { render_context.command_encoder().clear_texture( &deferred_texture.texture.texture, &bevy_render::render_resource::ImageSubresourceRange::default(), ); } + } - color_attachments.push( - view_prepass_textures - .deferred - .as_ref() - .map(|deferred_texture| { + color_attachments.push( + view_prepass_textures + .deferred + .as_ref() + .map(|deferred_texture| { + if is_late { + deferred_texture.get_attachment() + } else { #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] { bevy_render::render_resource::RenderPassColorAttachment { @@ -101,86 +181,82 @@ impl ViewNode for DeferredGBufferPrepassNode { feature = "webgpu" ))] deferred_texture.get_attachment() - }), - ); - - color_attachments.push( - view_prepass_textures - .deferred_lighting_pass_id - .as_ref() - .map(|deferred_lighting_pass_id| deferred_lighting_pass_id.get_attachment()), - ); - - // If all color attachments are none: clear the color attachment list so that no fragment shader is required - if color_attachments.iter().all(Option::is_none) { - color_attachments.clear(); - } + } + }), + ); - let depth_stencil_attachment = Some(view_depth_texture.get_attachment(StoreOp::Store)); + color_attachments.push( + view_prepass_textures + .deferred_lighting_pass_id + .as_ref() + .map(|deferred_lighting_pass_id| deferred_lighting_pass_id.get_attachment()), + ); + + // If all color attachments are none: clear the color attachment list so that no fragment shader is required + if color_attachments.iter().all(Option::is_none) { + color_attachments.clear(); + } + + let depth_stencil_attachment = Some(view_depth_texture.get_attachment(StoreOp::Store)); + + let view_entity = graph.view_entity(); + render_context.add_command_buffer_generation_task(move |render_device| { + #[cfg(feature = "trace")] + let _deferred_span = info_span!("deferred_prepass").entered(); + + // Command encoder setup + let mut command_encoder = render_device.create_command_encoder(&CommandEncoderDescriptor { + label: Some("deferred_prepass_command_encoder"), + }); + + // Render pass setup + let render_pass = command_encoder.begin_render_pass(&RenderPassDescriptor { + label: Some(label), + color_attachments: &color_attachments, + depth_stencil_attachment, + timestamp_writes: None, + occlusion_query_set: None, + }); + let mut render_pass = TrackedRenderPass::new(&render_device, render_pass); + if let Some(viewport) = camera.viewport.as_ref() { + render_pass.set_camera_viewport(viewport); + } - let view_entity = graph.view_entity(); - render_context.add_command_buffer_generation_task(move |render_device| { + // Opaque draws + if !opaque_deferred_phase.multidrawable_meshes.is_empty() + || !opaque_deferred_phase.batchable_meshes.is_empty() + || !opaque_deferred_phase.unbatchable_meshes.is_empty() + { #[cfg(feature = "trace")] - let _deferred_span = info_span!("deferred_prepass").entered(); - - // Command encoder setup - let mut command_encoder = - render_device.create_command_encoder(&CommandEncoderDescriptor { - label: Some("deferred_prepass_command_encoder"), - }); - - // Render pass setup - let render_pass = command_encoder.begin_render_pass(&RenderPassDescriptor { - label: Some("deferred_prepass"), - color_attachments: &color_attachments, - depth_stencil_attachment, - timestamp_writes: None, - occlusion_query_set: None, - }); - let mut render_pass = TrackedRenderPass::new(&render_device, render_pass); - if let Some(viewport) = camera.viewport.as_ref() { - render_pass.set_camera_viewport(viewport); + let _opaque_prepass_span = info_span!("opaque_deferred_prepass").entered(); + if let Err(err) = opaque_deferred_phase.render(&mut render_pass, world, view_entity) { + error!("Error encountered while rendering the opaque deferred phase {err:?}"); } + } - // Opaque draws - if !opaque_deferred_phase.batchable_mesh_keys.is_empty() - || !opaque_deferred_phase.unbatchable_mesh_keys.is_empty() + // Alpha masked draws + if !alpha_mask_deferred_phase.is_empty() { + #[cfg(feature = "trace")] + let _alpha_mask_deferred_span = info_span!("alpha_mask_deferred_prepass").entered(); + if let Err(err) = alpha_mask_deferred_phase.render(&mut render_pass, world, view_entity) { - #[cfg(feature = "trace")] - let _opaque_prepass_span = info_span!("opaque_deferred_prepass").entered(); - if let Err(err) = opaque_deferred_phase.render(&mut render_pass, world, view_entity) - { - error!("Error encountered while rendering the opaque deferred phase {err:?}"); - } - } - - // Alpha masked draws - if !alpha_mask_deferred_phase.is_empty() { - #[cfg(feature = "trace")] - let _alpha_mask_deferred_span = info_span!("alpha_mask_deferred_prepass").entered(); - if let Err(err) = - alpha_mask_deferred_phase.render(&mut render_pass, world, view_entity) - { - error!( - "Error encountered while rendering the alpha mask deferred phase {err:?}" - ); - } + error!("Error encountered while rendering the alpha mask deferred phase {err:?}"); } + } - drop(render_pass); + drop(render_pass); - // After rendering to the view depth texture, copy it to the prepass depth texture - if let Some(prepass_depth_texture) = &view_prepass_textures.depth { - command_encoder.copy_texture_to_texture( - view_depth_texture.texture.as_image_copy(), - prepass_depth_texture.texture.texture.as_image_copy(), - view_prepass_textures.size, - ); - } + // After rendering to the view depth texture, copy it to the prepass depth texture + if let Some(prepass_depth_texture) = &view_prepass_textures.depth { + command_encoder.copy_texture_to_texture( + view_depth_texture.texture.as_image_copy(), + prepass_depth_texture.texture.texture.as_image_copy(), + view_prepass_textures.size, + ); + } - command_encoder.finish() - }); + command_encoder.finish() + }); - Ok(()) - } + Ok(()) } diff --git a/crates/bevy_core_pipeline/src/dof/mod.rs b/crates/bevy_core_pipeline/src/dof/mod.rs index 06cbbe3e9d312..87a10313f17a7 100644 --- a/crates/bevy_core_pipeline/src/dof/mod.rs +++ b/crates/bevy_core_pipeline/src/dof/mod.rs @@ -15,15 +15,16 @@ //! [Depth of field]: https://en.wikipedia.org/wiki/Depth_of_field use bevy_app::{App, Plugin}; -use bevy_asset::{load_internal_asset, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ component::Component, entity::Entity, query::{QueryItem, With}, reflect::ReflectComponent, - schedule::IntoSystemConfigs as _, - system::{lifetimeless::Read, Commands, Query, Res, ResMut, Resource}, + resource::Resource, + schedule::IntoScheduleConfigs as _, + system::{lifetimeless::Read, Commands, Query, Res, ResMut}, world::{FromWorld, World}, }; use bevy_image::BevyDefault as _; @@ -56,8 +57,9 @@ use bevy_render::{ }, Extract, ExtractSchedule, Render, RenderApp, RenderSet, }; -use bevy_utils::{info_once, prelude::default, warn_once}; +use bevy_utils::{default, once}; use smallvec::SmallVec; +use tracing::{info, warn}; use crate::{ core_3d::{ @@ -67,7 +69,7 @@ use crate::{ fullscreen_vertex_shader::fullscreen_shader_vertex_state, }; -const DOF_SHADER_HANDLE: Handle = Handle::weak_from_u128(2031861180739216043); +const DOF_SHADER_HANDLE: Handle = weak_handle!("c3580ddc-2cbc-4535-a02b-9a2959066b52"); /// A plugin that adds support for the depth of field effect to Bevy. pub struct DepthOfFieldPlugin; @@ -77,7 +79,7 @@ pub struct DepthOfFieldPlugin; /// /// [depth of field]: https://en.wikipedia.org/wiki/Depth_of_field #[derive(Component, Clone, Copy, Reflect)] -#[reflect(Component, Default)] +#[reflect(Component, Clone, Default)] pub struct DepthOfField { /// The appearance of the effect. pub mode: DepthOfFieldMode, @@ -119,12 +121,9 @@ pub struct DepthOfField { pub max_depth: f32, } -#[deprecated(since = "0.15.0", note = "Renamed to `DepthOfField`")] -pub type DepthOfFieldSettings = DepthOfField; - /// Controls the appearance of the effect. #[derive(Clone, Copy, Default, PartialEq, Debug, Reflect)] -#[reflect(Default, PartialEq)] +#[reflect(Default, Clone, PartialEq)] pub enum DepthOfFieldMode { /// A more accurate simulation, in which circles of confusion generate /// "spots" of light. @@ -384,7 +383,9 @@ impl ViewNode for DepthOfFieldNode { auxiliary_dof_texture, view_bind_group_layouts.dual_input.as_ref(), ) else { - warn_once!("Should have created the auxiliary depth of field texture by now"); + once!(warn!( + "Should have created the auxiliary depth of field texture by now" + )); continue; }; render_context.render_device().create_bind_group( @@ -426,7 +427,9 @@ impl ViewNode for DepthOfFieldNode { // `prepare_auxiliary_depth_of_field_textures``. if pipeline_render_info.is_dual_output { let Some(auxiliary_dof_texture) = auxiliary_dof_texture else { - warn_once!("Should have created the auxiliary depth of field texture by now"); + once!(warn!( + "Should have created the auxiliary depth of field texture by now" + )); continue; }; color_attachments.push(Some(RenderPassColorAttachment { @@ -818,9 +821,9 @@ fn extract_depth_of_field_settings( mut query: Extract>, ) { if !DEPTH_TEXTURE_SAMPLING_SUPPORTED { - info_once!( + once!(info!( "Disabling depth of field on this platform because depth textures aren't supported correctly" - ); + )); return; } diff --git a/crates/bevy_pbr/src/meshlet/downsample_depth.wgsl b/crates/bevy_core_pipeline/src/experimental/mip_generation/downsample_depth.wgsl similarity index 89% rename from crates/bevy_pbr/src/meshlet/downsample_depth.wgsl rename to crates/bevy_core_pipeline/src/experimental/mip_generation/downsample_depth.wgsl index 80dd7d4baafd4..12a4d2b178bff 100644 --- a/crates/bevy_pbr/src/meshlet/downsample_depth.wgsl +++ b/crates/bevy_core_pipeline/src/experimental/mip_generation/downsample_depth.wgsl @@ -1,8 +1,16 @@ #ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT -@group(0) @binding(0) var mip_0: array; // Per pixel +@group(0) @binding(0) var mip_0: texture_storage_2d; #else -@group(0) @binding(0) var mip_0: array; // Per pixel -#endif +#ifdef MESHLET +@group(0) @binding(0) var mip_0: texture_storage_2d; +#else // MESHLET +#ifdef MULTISAMPLE +@group(0) @binding(0) var mip_0: texture_depth_multisampled_2d; +#else // MULTISAMPLE +@group(0) @binding(0) var mip_0: texture_depth_2d; +#endif // MULTISAMPLE +#endif // MESHLET +#endif // MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT @group(0) @binding(1) var mip_1: texture_storage_2d; @group(0) @binding(2) var mip_2: texture_storage_2d; @group(0) @binding(3) var mip_3: texture_storage_2d; @@ -16,7 +24,7 @@ @group(0) @binding(11) var mip_11: texture_storage_2d; @group(0) @binding(12) var mip_12: texture_storage_2d; @group(0) @binding(13) var samplr: sampler; -struct Constants { max_mip_level: u32, view_width: u32 } +struct Constants { max_mip_level: u32 } var constants: Constants; /// Generates a hierarchical depth buffer. @@ -31,7 +39,6 @@ var intermediate_memory: array, 16>; @compute @workgroup_size(256, 1, 1) fn downsample_depth_first( - @builtin(num_workgroups) num_workgroups: vec3u, @builtin(workgroup_id) workgroup_id: vec3u, @builtin(local_invocation_index) local_invocation_index: u32, ) { @@ -301,12 +308,29 @@ fn reduce_load_mip_6(tex: vec2u) -> f32 { } fn load_mip_0(x: u32, y: u32) -> f32 { - let i = y * constants.view_width + x; #ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT - return bitcast(u32(mip_0[i] >> 32u)); -#else - return bitcast(mip_0[i]); -#endif + let visibility = textureLoad(mip_0, vec2(x, y)).r; + return bitcast(u32(visibility >> 32u)); +#else // MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT +#ifdef MESHLET + let visibility = textureLoad(mip_0, vec2(x, y)).r; + return bitcast(visibility); +#else // MESHLET + // Downsample the top level. +#ifdef MULTISAMPLE + // The top level is multisampled, so we need to loop over all the samples + // and reduce them to 1. + var result = textureLoad(mip_0, vec2(x, y), 0); + let sample_count = i32(textureNumSamples(mip_0)); + for (var sample = 1; sample < sample_count; sample += 1) { + result = min(result, textureLoad(mip_0, vec2(x, y), sample)); + } + return result; +#else // MULTISAMPLE + return textureLoad(mip_0, vec2(x, y), 0); +#endif // MULTISAMPLE +#endif // MESHLET +#endif // MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT } fn reduce_4(v: vec4f) -> f32 { diff --git a/crates/bevy_core_pipeline/src/experimental/mip_generation/mod.rs b/crates/bevy_core_pipeline/src/experimental/mip_generation/mod.rs new file mode 100644 index 0000000000000..cd2099e49e23a --- /dev/null +++ b/crates/bevy_core_pipeline/src/experimental/mip_generation/mod.rs @@ -0,0 +1,779 @@ +//! Downsampling of textures to produce mipmap levels. +//! +//! Currently, this module only supports generation of hierarchical Z buffers +//! for occlusion culling. It's marked experimental because the shader is +//! designed only for power-of-two texture sizes and is slightly incorrect for +//! non-power-of-two depth buffer sizes. + +use core::array; + +use crate::core_3d::{ + graph::{Core3d, Node3d}, + prepare_core_3d_depth_textures, +}; +use bevy_app::{App, Plugin}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; +use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::{ + component::Component, + entity::Entity, + prelude::{resource_exists, Without}, + query::{Or, QueryState, With}, + resource::Resource, + schedule::IntoScheduleConfigs as _, + system::{lifetimeless::Read, Commands, Local, Query, Res, ResMut}, + world::{FromWorld, World}, +}; +use bevy_math::{uvec2, UVec2, Vec4Swizzles as _}; +use bevy_render::batching::gpu_preprocessing::GpuPreprocessingSupport; +use bevy_render::{ + experimental::occlusion_culling::{ + OcclusionCulling, OcclusionCullingSubview, OcclusionCullingSubviewEntities, + }, + render_graph::{Node, NodeRunError, RenderGraphApp, RenderGraphContext}, + render_resource::{ + binding_types::{sampler, texture_2d, texture_2d_multisampled, texture_storage_2d}, + BindGroup, BindGroupEntries, BindGroupLayout, BindGroupLayoutEntries, + CachedComputePipelineId, ComputePassDescriptor, ComputePipeline, ComputePipelineDescriptor, + Extent3d, IntoBinding, PipelineCache, PushConstantRange, Sampler, SamplerBindingType, + SamplerDescriptor, Shader, ShaderStages, SpecializedComputePipeline, + SpecializedComputePipelines, StorageTextureAccess, TextureAspect, TextureDescriptor, + TextureDimension, TextureFormat, TextureSampleType, TextureUsages, TextureView, + TextureViewDescriptor, TextureViewDimension, + }, + renderer::{RenderContext, RenderDevice}, + texture::TextureCache, + view::{ExtractedView, NoIndirectDrawing, ViewDepthTexture}, + Render, RenderApp, RenderSet, +}; +use bitflags::bitflags; +use tracing::debug; + +/// Identifies the `downsample_depth.wgsl` shader. +pub const DOWNSAMPLE_DEPTH_SHADER_HANDLE: Handle = + weak_handle!("a09a149e-5922-4fa4-9170-3c1a13065364"); + +/// The maximum number of mip levels that we can produce. +/// +/// 2^12 is 4096, so that's the maximum size of the depth buffer that we +/// support. +pub const DEPTH_PYRAMID_MIP_COUNT: usize = 12; + +/// A plugin that allows Bevy to repeatedly downsample textures to create +/// mipmaps. +/// +/// Currently, this is only used for hierarchical Z buffer generation for the +/// purposes of occlusion culling. +pub struct MipGenerationPlugin; + +impl Plugin for MipGenerationPlugin { + fn build(&self, app: &mut App) { + load_internal_asset!( + app, + DOWNSAMPLE_DEPTH_SHADER_HANDLE, + "downsample_depth.wgsl", + Shader::from_wgsl + ); + + let Some(render_app) = app.get_sub_app_mut(RenderApp) else { + return; + }; + + render_app + .init_resource::>() + .add_render_graph_node::(Core3d, Node3d::EarlyDownsampleDepth) + .add_render_graph_node::(Core3d, Node3d::LateDownsampleDepth) + .add_render_graph_edges( + Core3d, + ( + Node3d::EarlyPrepass, + Node3d::EarlyDeferredPrepass, + Node3d::EarlyDownsampleDepth, + Node3d::LatePrepass, + Node3d::LateDeferredPrepass, + ), + ) + .add_render_graph_edges( + Core3d, + ( + Node3d::EndMainPass, + Node3d::LateDownsampleDepth, + Node3d::EndMainPassPostProcessing, + ), + ) + .add_systems( + Render, + create_downsample_depth_pipelines.in_set(RenderSet::Prepare), + ) + .add_systems( + Render, + ( + prepare_view_depth_pyramids, + prepare_downsample_depth_view_bind_groups, + ) + .chain() + .in_set(RenderSet::PrepareResources) + .run_if(resource_exists::) + .after(prepare_core_3d_depth_textures), + ); + } + + fn finish(&self, app: &mut App) { + let Some(render_app) = app.get_sub_app_mut(RenderApp) else { + return; + }; + render_app.init_resource::(); + } +} + +/// The nodes that produce a hierarchical Z-buffer, also known as a depth +/// pyramid. +/// +/// This runs the single-pass downsampling (SPD) shader with the *min* filter in +/// order to generate a series of mipmaps for the Z buffer. The resulting +/// hierarchical Z-buffer can be used for occlusion culling. +/// +/// There are two instances of this node. The *early* downsample depth pass is +/// the first hierarchical Z-buffer stage, which runs after the early prepass +/// and before the late prepass. It prepares the Z-buffer for the bounding box +/// tests that the late mesh preprocessing stage will perform. The *late* +/// downsample depth pass runs at the end of the main phase. It prepares the +/// Z-buffer for the occlusion culling that the early mesh preprocessing phase +/// of the *next* frame will perform. +/// +/// This node won't do anything if occlusion culling isn't on. +pub struct DownsampleDepthNode { + /// The query that we use to find views that need occlusion culling for + /// their Z-buffer. + main_view_query: QueryState<( + Read, + Read, + Read, + Option>, + )>, + /// The query that we use to find shadow maps that need occlusion culling. + shadow_view_query: QueryState<( + Read, + Read, + Read, + )>, +} + +impl FromWorld for DownsampleDepthNode { + fn from_world(world: &mut World) -> Self { + Self { + main_view_query: QueryState::new(world), + shadow_view_query: QueryState::new(world), + } + } +} + +impl Node for DownsampleDepthNode { + fn update(&mut self, world: &mut World) { + self.main_view_query.update_archetypes(world); + self.shadow_view_query.update_archetypes(world); + } + + fn run<'w>( + &self, + render_graph_context: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + world: &'w World, + ) -> Result<(), NodeRunError> { + let Ok(( + view_depth_pyramid, + view_downsample_depth_bind_group, + view_depth_texture, + maybe_view_light_entities, + )) = self + .main_view_query + .get_manual(world, render_graph_context.view_entity()) + else { + return Ok(()); + }; + + // Downsample depth for the main Z-buffer. + downsample_depth( + render_graph_context, + render_context, + world, + view_depth_pyramid, + view_downsample_depth_bind_group, + uvec2( + view_depth_texture.texture.width(), + view_depth_texture.texture.height(), + ), + view_depth_texture.texture.sample_count(), + )?; + + // Downsample depth for shadow maps that have occlusion culling enabled. + if let Some(view_light_entities) = maybe_view_light_entities { + for &view_light_entity in &view_light_entities.0 { + let Ok((view_depth_pyramid, view_downsample_depth_bind_group, occlusion_culling)) = + self.shadow_view_query.get_manual(world, view_light_entity) + else { + continue; + }; + downsample_depth( + render_graph_context, + render_context, + world, + view_depth_pyramid, + view_downsample_depth_bind_group, + UVec2::splat(occlusion_culling.depth_texture_size), + 1, + )?; + } + } + + Ok(()) + } +} + +/// Produces a depth pyramid from the current depth buffer for a single view. +/// The resulting depth pyramid can be used for occlusion testing. +fn downsample_depth<'w>( + render_graph_context: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + world: &'w World, + view_depth_pyramid: &ViewDepthPyramid, + view_downsample_depth_bind_group: &ViewDownsampleDepthBindGroup, + view_size: UVec2, + sample_count: u32, +) -> Result<(), NodeRunError> { + let downsample_depth_pipelines = world.resource::(); + let pipeline_cache = world.resource::(); + + // Despite the name "single-pass downsampling", we actually need two + // passes because of the lack of `coherent` buffers in WGPU/WGSL. + // Between each pass, there's an implicit synchronization barrier. + + // Fetch the appropriate pipeline ID, depending on whether the depth + // buffer is multisampled or not. + let (Some(first_downsample_depth_pipeline_id), Some(second_downsample_depth_pipeline_id)) = + (if sample_count > 1 { + ( + downsample_depth_pipelines.first_multisample.pipeline_id, + downsample_depth_pipelines.second_multisample.pipeline_id, + ) + } else { + ( + downsample_depth_pipelines.first.pipeline_id, + downsample_depth_pipelines.second.pipeline_id, + ) + }) + else { + return Ok(()); + }; + + // Fetch the pipelines for the two passes. + let (Some(first_downsample_depth_pipeline), Some(second_downsample_depth_pipeline)) = ( + pipeline_cache.get_compute_pipeline(first_downsample_depth_pipeline_id), + pipeline_cache.get_compute_pipeline(second_downsample_depth_pipeline_id), + ) else { + return Ok(()); + }; + + // Run the depth downsampling. + view_depth_pyramid.downsample_depth( + &format!("{:?}", render_graph_context.label()), + render_context, + view_size, + view_downsample_depth_bind_group, + first_downsample_depth_pipeline, + second_downsample_depth_pipeline, + ); + Ok(()) +} + +/// A single depth downsample pipeline. +#[derive(Resource)] +pub struct DownsampleDepthPipeline { + /// The bind group layout for this pipeline. + bind_group_layout: BindGroupLayout, + /// A handle that identifies the compiled shader. + pipeline_id: Option, +} + +impl DownsampleDepthPipeline { + /// Creates a new [`DownsampleDepthPipeline`] from a bind group layout. + /// + /// This doesn't actually specialize the pipeline; that must be done + /// afterward. + fn new(bind_group_layout: BindGroupLayout) -> DownsampleDepthPipeline { + DownsampleDepthPipeline { + bind_group_layout, + pipeline_id: None, + } + } +} + +/// Stores all depth buffer downsampling pipelines. +#[derive(Resource)] +pub struct DownsampleDepthPipelines { + /// The first pass of the pipeline, when the depth buffer is *not* + /// multisampled. + first: DownsampleDepthPipeline, + /// The second pass of the pipeline, when the depth buffer is *not* + /// multisampled. + second: DownsampleDepthPipeline, + /// The first pass of the pipeline, when the depth buffer is multisampled. + first_multisample: DownsampleDepthPipeline, + /// The second pass of the pipeline, when the depth buffer is multisampled. + second_multisample: DownsampleDepthPipeline, + /// The sampler that the depth downsampling shader uses to sample the depth + /// buffer. + sampler: Sampler, +} + +/// Creates the [`DownsampleDepthPipelines`] if downsampling is supported on the +/// current platform. +fn create_downsample_depth_pipelines( + mut commands: Commands, + render_device: Res, + pipeline_cache: Res, + mut specialized_compute_pipelines: ResMut>, + gpu_preprocessing_support: Res, + mut has_run: Local, +) { + // Only run once. + // We can't use a `resource_exists` or similar run condition here because + // this function might fail to create downsample depth pipelines if the + // current platform doesn't support compute shaders. + if *has_run { + return; + } + *has_run = true; + + if !gpu_preprocessing_support.is_culling_supported() { + debug!("Downsample depth is not supported on this platform."); + return; + } + + // Create the bind group layouts. The bind group layouts are identical + // between the first and second passes, so the only thing we need to + // treat specially is the type of the first mip level (non-multisampled + // or multisampled). + let standard_bind_group_layout = + create_downsample_depth_bind_group_layout(&render_device, false); + let multisampled_bind_group_layout = + create_downsample_depth_bind_group_layout(&render_device, true); + + // Create the depth pyramid sampler. This is shared among all shaders. + let sampler = render_device.create_sampler(&SamplerDescriptor { + label: Some("depth pyramid sampler"), + ..SamplerDescriptor::default() + }); + + // Initialize the pipelines. + let mut downsample_depth_pipelines = DownsampleDepthPipelines { + first: DownsampleDepthPipeline::new(standard_bind_group_layout.clone()), + second: DownsampleDepthPipeline::new(standard_bind_group_layout.clone()), + first_multisample: DownsampleDepthPipeline::new(multisampled_bind_group_layout.clone()), + second_multisample: DownsampleDepthPipeline::new(multisampled_bind_group_layout.clone()), + sampler, + }; + + // Specialize each pipeline with the appropriate + // `DownsampleDepthPipelineKey`. + downsample_depth_pipelines.first.pipeline_id = Some(specialized_compute_pipelines.specialize( + &pipeline_cache, + &downsample_depth_pipelines.first, + DownsampleDepthPipelineKey::empty(), + )); + downsample_depth_pipelines.second.pipeline_id = Some(specialized_compute_pipelines.specialize( + &pipeline_cache, + &downsample_depth_pipelines.second, + DownsampleDepthPipelineKey::SECOND_PHASE, + )); + downsample_depth_pipelines.first_multisample.pipeline_id = + Some(specialized_compute_pipelines.specialize( + &pipeline_cache, + &downsample_depth_pipelines.first_multisample, + DownsampleDepthPipelineKey::MULTISAMPLE, + )); + downsample_depth_pipelines.second_multisample.pipeline_id = + Some(specialized_compute_pipelines.specialize( + &pipeline_cache, + &downsample_depth_pipelines.second_multisample, + DownsampleDepthPipelineKey::SECOND_PHASE | DownsampleDepthPipelineKey::MULTISAMPLE, + )); + + commands.insert_resource(downsample_depth_pipelines); +} + +/// Creates a single bind group layout for the downsample depth pass. +fn create_downsample_depth_bind_group_layout( + render_device: &RenderDevice, + is_multisampled: bool, +) -> BindGroupLayout { + render_device.create_bind_group_layout( + if is_multisampled { + "downsample multisample depth bind group layout" + } else { + "downsample depth bind group layout" + }, + &BindGroupLayoutEntries::sequential( + ShaderStages::COMPUTE, + ( + // We only care about the multisample status of the depth buffer + // for the first mip level. After the first mip level is + // sampled, we drop to a single sample. + if is_multisampled { + texture_2d_multisampled(TextureSampleType::Depth) + } else { + texture_2d(TextureSampleType::Depth) + }, + // All the mip levels follow: + texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly), + texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly), + texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly), + texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly), + texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly), + texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::ReadWrite), + texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly), + texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly), + texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly), + texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly), + texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly), + texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly), + sampler(SamplerBindingType::NonFiltering), + ), + ), + ) +} + +bitflags! { + /// Uniquely identifies a configuration of the downsample depth shader. + /// + /// Note that meshlets maintain their downsample depth shaders on their own + /// and don't use this infrastructure; thus there's no flag for meshlets in + /// here, even though the shader has defines for it. + #[derive(Clone, Copy, PartialEq, Eq, Hash)] + pub struct DownsampleDepthPipelineKey: u8 { + /// True if the depth buffer is multisampled. + const MULTISAMPLE = 1; + /// True if this shader is the second phase of the downsample depth + /// process; false if this shader is the first phase. + const SECOND_PHASE = 2; + } +} + +impl SpecializedComputePipeline for DownsampleDepthPipeline { + type Key = DownsampleDepthPipelineKey; + + fn specialize(&self, key: Self::Key) -> ComputePipelineDescriptor { + let mut shader_defs = vec![]; + if key.contains(DownsampleDepthPipelineKey::MULTISAMPLE) { + shader_defs.push("MULTISAMPLE".into()); + } + + let label = format!( + "downsample depth{}{} pipeline", + if key.contains(DownsampleDepthPipelineKey::MULTISAMPLE) { + " multisample" + } else { + "" + }, + if key.contains(DownsampleDepthPipelineKey::SECOND_PHASE) { + " second phase" + } else { + " first phase" + } + ) + .into(); + + ComputePipelineDescriptor { + label: Some(label), + layout: vec![self.bind_group_layout.clone()], + push_constant_ranges: vec![PushConstantRange { + stages: ShaderStages::COMPUTE, + range: 0..4, + }], + shader: DOWNSAMPLE_DEPTH_SHADER_HANDLE, + shader_defs, + entry_point: if key.contains(DownsampleDepthPipelineKey::SECOND_PHASE) { + "downsample_depth_second".into() + } else { + "downsample_depth_first".into() + }, + zero_initialize_workgroup_memory: false, + } + } +} + +/// Stores a placeholder texture that can be bound to a depth pyramid binding if +/// no depth pyramid is needed. +#[derive(Resource, Deref, DerefMut)] +pub struct DepthPyramidDummyTexture(TextureView); + +impl FromWorld for DepthPyramidDummyTexture { + fn from_world(world: &mut World) -> Self { + let render_device = world.resource::(); + + DepthPyramidDummyTexture(create_depth_pyramid_dummy_texture( + render_device, + "depth pyramid dummy texture", + "depth pyramid dummy texture view", + )) + } +} + +/// Creates a placeholder texture that can be bound to a depth pyramid binding +/// if no depth pyramid is needed. +pub fn create_depth_pyramid_dummy_texture( + render_device: &RenderDevice, + texture_label: &'static str, + texture_view_label: &'static str, +) -> TextureView { + render_device + .create_texture(&TextureDescriptor { + label: Some(texture_label), + size: Extent3d { + width: 1, + height: 1, + depth_or_array_layers: 1, + }, + mip_level_count: 1, + sample_count: 1, + dimension: TextureDimension::D2, + format: TextureFormat::R32Float, + usage: TextureUsages::STORAGE_BINDING, + view_formats: &[], + }) + .create_view(&TextureViewDescriptor { + label: Some(texture_view_label), + format: Some(TextureFormat::R32Float), + dimension: Some(TextureViewDimension::D2), + usage: None, + aspect: TextureAspect::All, + base_mip_level: 0, + mip_level_count: Some(1), + base_array_layer: 0, + array_layer_count: Some(1), + }) +} + +/// Stores a hierarchical Z-buffer for a view, which is a series of mipmaps +/// useful for efficient occlusion culling. +/// +/// This will only be present on a view when occlusion culling is enabled. +#[derive(Component)] +pub struct ViewDepthPyramid { + /// A texture view containing the entire depth texture. + pub all_mips: TextureView, + /// A series of texture views containing one mip level each. + pub mips: [TextureView; DEPTH_PYRAMID_MIP_COUNT], + /// The total number of mipmap levels. + /// + /// This is the base-2 logarithm of the greatest dimension of the depth + /// buffer, rounded up. + pub mip_count: u32, +} + +impl ViewDepthPyramid { + /// Allocates a new depth pyramid for a depth buffer with the given size. + pub fn new( + render_device: &RenderDevice, + texture_cache: &mut TextureCache, + depth_pyramid_dummy_texture: &TextureView, + size: UVec2, + texture_label: &'static str, + texture_view_label: &'static str, + ) -> ViewDepthPyramid { + // Calculate the size of the depth pyramid. + let depth_pyramid_size = Extent3d { + width: size.x.div_ceil(2), + height: size.y.div_ceil(2), + depth_or_array_layers: 1, + }; + + // Calculate the number of mip levels we need. + let depth_pyramid_mip_count = depth_pyramid_size.max_mips(TextureDimension::D2); + + // Create the depth pyramid. + let depth_pyramid = texture_cache.get( + render_device, + TextureDescriptor { + label: Some(texture_label), + size: depth_pyramid_size, + mip_level_count: depth_pyramid_mip_count, + sample_count: 1, + dimension: TextureDimension::D2, + format: TextureFormat::R32Float, + usage: TextureUsages::STORAGE_BINDING | TextureUsages::TEXTURE_BINDING, + view_formats: &[], + }, + ); + + // Create individual views for each level of the depth pyramid. + let depth_pyramid_mips = array::from_fn(|i| { + if (i as u32) < depth_pyramid_mip_count { + depth_pyramid.texture.create_view(&TextureViewDescriptor { + label: Some(texture_view_label), + format: Some(TextureFormat::R32Float), + dimension: Some(TextureViewDimension::D2), + usage: None, + aspect: TextureAspect::All, + base_mip_level: i as u32, + mip_level_count: Some(1), + base_array_layer: 0, + array_layer_count: Some(1), + }) + } else { + (*depth_pyramid_dummy_texture).clone() + } + }); + + // Create the view for the depth pyramid as a whole. + let depth_pyramid_all_mips = depth_pyramid.default_view.clone(); + + Self { + all_mips: depth_pyramid_all_mips, + mips: depth_pyramid_mips, + mip_count: depth_pyramid_mip_count, + } + } + + /// Creates a bind group that allows the depth buffer to be attached to the + /// `downsample_depth.wgsl` shader. + pub fn create_bind_group<'a, R>( + &'a self, + render_device: &RenderDevice, + label: &'static str, + bind_group_layout: &BindGroupLayout, + source_image: R, + sampler: &'a Sampler, + ) -> BindGroup + where + R: IntoBinding<'a>, + { + render_device.create_bind_group( + label, + bind_group_layout, + &BindGroupEntries::sequential(( + source_image, + &self.mips[0], + &self.mips[1], + &self.mips[2], + &self.mips[3], + &self.mips[4], + &self.mips[5], + &self.mips[6], + &self.mips[7], + &self.mips[8], + &self.mips[9], + &self.mips[10], + &self.mips[11], + sampler, + )), + ) + } + + /// Invokes the shaders to generate the hierarchical Z-buffer. + /// + /// This is intended to be invoked as part of a render node. + pub fn downsample_depth( + &self, + label: &str, + render_context: &mut RenderContext, + view_size: UVec2, + downsample_depth_bind_group: &BindGroup, + downsample_depth_first_pipeline: &ComputePipeline, + downsample_depth_second_pipeline: &ComputePipeline, + ) { + let command_encoder = render_context.command_encoder(); + let mut downsample_pass = command_encoder.begin_compute_pass(&ComputePassDescriptor { + label: Some(label), + timestamp_writes: None, + }); + downsample_pass.set_pipeline(downsample_depth_first_pipeline); + // Pass the mip count as a push constant, for simplicity. + downsample_pass.set_push_constants(0, &self.mip_count.to_le_bytes()); + downsample_pass.set_bind_group(0, downsample_depth_bind_group, &[]); + downsample_pass.dispatch_workgroups(view_size.x.div_ceil(64), view_size.y.div_ceil(64), 1); + + if self.mip_count >= 7 { + downsample_pass.set_pipeline(downsample_depth_second_pipeline); + downsample_pass.dispatch_workgroups(1, 1, 1); + } + } +} + +/// Creates depth pyramids for views that have occlusion culling enabled. +pub fn prepare_view_depth_pyramids( + mut commands: Commands, + render_device: Res, + mut texture_cache: ResMut, + depth_pyramid_dummy_texture: Res, + views: Query<(Entity, &ExtractedView), (With, Without)>, +) { + for (view_entity, view) in &views { + commands.entity(view_entity).insert(ViewDepthPyramid::new( + &render_device, + &mut texture_cache, + &depth_pyramid_dummy_texture, + view.viewport.zw(), + "view depth pyramid texture", + "view depth pyramid texture view", + )); + } +} + +/// The bind group that we use to attach the depth buffer and depth pyramid for +/// a view to the `downsample_depth.wgsl` shader. +/// +/// This will only be present for a view if occlusion culling is enabled. +#[derive(Component, Deref, DerefMut)] +pub struct ViewDownsampleDepthBindGroup(BindGroup); + +/// Creates the [`ViewDownsampleDepthBindGroup`]s for all views with occlusion +/// culling enabled. +fn prepare_downsample_depth_view_bind_groups( + mut commands: Commands, + render_device: Res, + downsample_depth_pipelines: Res, + view_depth_textures: Query< + ( + Entity, + &ViewDepthPyramid, + Option<&ViewDepthTexture>, + Option<&OcclusionCullingSubview>, + ), + Or<(With, With)>, + >, +) { + for (view_entity, view_depth_pyramid, view_depth_texture, shadow_occlusion_culling) in + &view_depth_textures + { + let is_multisampled = view_depth_texture + .is_some_and(|view_depth_texture| view_depth_texture.texture.sample_count() > 1); + commands + .entity(view_entity) + .insert(ViewDownsampleDepthBindGroup( + view_depth_pyramid.create_bind_group( + &render_device, + if is_multisampled { + "downsample multisample depth bind group" + } else { + "downsample depth bind group" + }, + if is_multisampled { + &downsample_depth_pipelines + .first_multisample + .bind_group_layout + } else { + &downsample_depth_pipelines.first.bind_group_layout + }, + match (view_depth_texture, shadow_occlusion_culling) { + (Some(view_depth_texture), _) => view_depth_texture.view(), + (None, Some(shadow_occlusion_culling)) => { + &shadow_occlusion_culling.depth_texture_view + } + (None, None) => panic!("Should never happen"), + }, + &downsample_depth_pipelines.sampler, + ), + )); + } +} diff --git a/crates/bevy_core_pipeline/src/experimental/mod.rs b/crates/bevy_core_pipeline/src/experimental/mod.rs new file mode 100644 index 0000000000000..071eb97d86b73 --- /dev/null +++ b/crates/bevy_core_pipeline/src/experimental/mod.rs @@ -0,0 +1,7 @@ +//! Experimental rendering features. +//! +//! Experimental features are features with known problems, missing features, +//! compatibility issues, low performance, and/or future breaking changes, but +//! are included nonetheless for testing purposes. + +pub mod mip_generation; diff --git a/crates/bevy_core_pipeline/src/fullscreen_vertex_shader/mod.rs b/crates/bevy_core_pipeline/src/fullscreen_vertex_shader/mod.rs index d01c34477503d..fee17d1ec6412 100644 --- a/crates/bevy_core_pipeline/src/fullscreen_vertex_shader/mod.rs +++ b/crates/bevy_core_pipeline/src/fullscreen_vertex_shader/mod.rs @@ -1,7 +1,8 @@ -use bevy_asset::Handle; +use bevy_asset::{weak_handle, Handle}; use bevy_render::{prelude::Shader, render_resource::VertexState}; -pub const FULLSCREEN_SHADER_HANDLE: Handle = Handle::weak_from_u128(7837534426033940724); +pub const FULLSCREEN_SHADER_HANDLE: Handle = + weak_handle!("481fb759-d0b1-4175-8319-c439acde30a2"); /// uses the [`FULLSCREEN_SHADER_HANDLE`] to output a /// ```wgsl diff --git a/crates/bevy_core_pipeline/src/lib.rs b/crates/bevy_core_pipeline/src/lib.rs index e94daa90f4bdc..9e046142760a9 100644 --- a/crates/bevy_core_pipeline/src/lib.rs +++ b/crates/bevy_core_pipeline/src/lib.rs @@ -9,66 +9,44 @@ pub mod auto_exposure; pub mod blit; pub mod bloom; -pub mod contrast_adaptive_sharpening; pub mod core_2d; pub mod core_3d; pub mod deferred; pub mod dof; +pub mod experimental; pub mod fullscreen_vertex_shader; -pub mod fxaa; pub mod motion_blur; pub mod msaa_writeback; pub mod oit; pub mod post_process; pub mod prepass; mod skybox; -pub mod smaa; -mod taa; pub mod tonemapping; pub mod upscaling; pub use skybox::Skybox; -/// Experimental features that are not yet finished. Please report any issues you encounter! -/// -/// Expect bugs, missing features, compatibility issues, low performance, and/or future breaking changes. -pub mod experimental { - #[expect(deprecated)] - pub mod taa { - pub use crate::taa::{ - TemporalAntiAliasBundle, TemporalAntiAliasNode, TemporalAntiAliasPlugin, - TemporalAntiAliasSettings, TemporalAntiAliasing, - }; - } -} - /// The core pipeline prelude. /// /// This includes the most common types in this crate, re-exported for your convenience. -#[expect(deprecated)] pub mod prelude { #[doc(hidden)] - pub use crate::{ - core_2d::{Camera2d, Camera2dBundle}, - core_3d::{Camera3d, Camera3dBundle}, - }; + pub use crate::{core_2d::Camera2d, core_3d::Camera3d}; } use crate::{ blit::BlitPlugin, bloom::BloomPlugin, - contrast_adaptive_sharpening::CasPlugin, core_2d::Core2dPlugin, core_3d::Core3dPlugin, deferred::copy_lighting_id::CopyDeferredLightingIdPlugin, dof::DepthOfFieldPlugin, + experimental::mip_generation::MipGenerationPlugin, fullscreen_vertex_shader::FULLSCREEN_SHADER_HANDLE, - fxaa::FxaaPlugin, motion_blur::MotionBlurPlugin, msaa_writeback::MsaaWritebackPlugin, post_process::PostProcessingPlugin, prepass::{DeferredPrepass, DepthPrepass, MotionVectorPrepass, NormalPrepass}, - smaa::SmaaPlugin, tonemapping::TonemappingPlugin, upscaling::UpscalingPlugin, }; @@ -93,22 +71,18 @@ impl Plugin for CorePipelinePlugin { .register_type::() .register_type::() .register_type::() + .add_plugins((Core2dPlugin, Core3dPlugin, CopyDeferredLightingIdPlugin)) .add_plugins(( - Core2dPlugin, - Core3dPlugin, - CopyDeferredLightingIdPlugin, BlitPlugin, MsaaWritebackPlugin, TonemappingPlugin, UpscalingPlugin, BloomPlugin, - FxaaPlugin, - CasPlugin, MotionBlurPlugin, DepthOfFieldPlugin, - SmaaPlugin, PostProcessingPlugin, OrderIndependentTransparencyPlugin, + MipGenerationPlugin, )); } } diff --git a/crates/bevy_core_pipeline/src/motion_blur/mod.rs b/crates/bevy_core_pipeline/src/motion_blur/mod.rs index c6eb8524ca3ef..5898f1a8c5ce3 100644 --- a/crates/bevy_core_pipeline/src/motion_blur/mod.rs +++ b/crates/bevy_core_pipeline/src/motion_blur/mod.rs @@ -2,20 +2,17 @@ //! //! Add the [`MotionBlur`] component to a camera to enable motion blur. -#![expect(deprecated)] - use crate::{ core_3d::graph::{Core3d, Node3d}, prepass::{DepthPrepass, MotionVectorPrepass}, }; use bevy_app::{App, Plugin}; -use bevy_asset::{load_internal_asset, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_ecs::{ - bundle::Bundle, - component::{require, Component}, - query::With, + component::Component, + query::{QueryItem, With}, reflect::ReflectComponent, - schedule::IntoSystemConfigs, + schedule::IntoScheduleConfigs, }; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{ @@ -29,18 +26,6 @@ use bevy_render::{ pub mod node; pub mod pipeline; -/// Adds [`MotionBlur`] and the required depth and motion vector prepasses to a camera entity. -#[derive(Bundle, Default)] -#[deprecated( - since = "0.15.0", - note = "Use the `MotionBlur` component instead. Inserting it will now also insert the other components required by it automatically." -)] -pub struct MotionBlurBundle { - pub motion_blur: MotionBlur, - pub depth_prepass: DepthPrepass, - pub motion_vector_prepass: MotionVectorPrepass, -} - /// A component that enables and configures motion blur when added to a camera. /// /// Motion blur is an effect that simulates how moving objects blur as they change position during @@ -71,9 +56,8 @@ pub struct MotionBlurBundle { /// )); /// # } /// ```` -#[derive(Reflect, Component, Clone, ExtractComponent, ShaderType)] -#[reflect(Component, Default)] -#[extract_component_filter(With)] +#[derive(Reflect, Component, Clone)] +#[reflect(Component, Default, Clone)] #[require(DepthPrepass, MotionVectorPrepass)] pub struct MotionBlur { /// The strength of motion blur from `0.0` to `1.0`. @@ -106,9 +90,6 @@ pub struct MotionBlur { /// Setting this to `3` will result in `3 * 2 + 1 = 7` samples. Setting this to `0` is /// equivalent to disabling motion blur. pub samples: u32, - #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] - // WebGL2 structs must be 16 byte aligned. - pub _webgl2_padding: bevy_math::Vec2, } impl Default for MotionBlur { @@ -116,14 +97,37 @@ impl Default for MotionBlur { Self { shutter_angle: 0.5, samples: 1, + } + } +} + +impl ExtractComponent for MotionBlur { + type QueryData = &'static Self; + type QueryFilter = With; + type Out = MotionBlurUniform; + + fn extract_component(item: QueryItem) -> Option { + Some(MotionBlurUniform { + shutter_angle: item.shutter_angle, + samples: item.samples, #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] _webgl2_padding: Default::default(), - } + }) } } +#[doc(hidden)] +#[derive(Component, ShaderType, Clone)] +pub struct MotionBlurUniform { + shutter_angle: f32, + samples: u32, + #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] + // WebGL2 structs must be 16 byte aligned. + _webgl2_padding: bevy_math::Vec2, +} + pub const MOTION_BLUR_SHADER_HANDLE: Handle = - Handle::weak_from_u128(987457899187986082347921); + weak_handle!("d9ca74af-fa0a-4f11-b0f2-19613b618b93"); /// Adds support for per-object motion blur to the app. See [`MotionBlur`] for details. pub struct MotionBlurPlugin; @@ -137,7 +141,7 @@ impl Plugin for MotionBlurPlugin { ); app.add_plugins(( ExtractComponentPlugin::::default(), - UniformComponentPlugin::::default(), + UniformComponentPlugin::::default(), )); let Some(render_app) = app.get_sub_app_mut(RenderApp) else { diff --git a/crates/bevy_core_pipeline/src/motion_blur/node.rs b/crates/bevy_core_pipeline/src/motion_blur/node.rs index 2497bd633deda..ade5f50d77466 100644 --- a/crates/bevy_core_pipeline/src/motion_blur/node.rs +++ b/crates/bevy_core_pipeline/src/motion_blur/node.rs @@ -15,7 +15,7 @@ use crate::prepass::ViewPrepassTextures; use super::{ pipeline::{MotionBlurPipeline, MotionBlurPipelineId}, - MotionBlur, + MotionBlurUniform, }; #[derive(Default)] @@ -26,7 +26,7 @@ impl ViewNode for MotionBlurNode { &'static ViewTarget, &'static MotionBlurPipelineId, &'static ViewPrepassTextures, - &'static MotionBlur, + &'static MotionBlurUniform, &'static Msaa, ); fn run( @@ -42,7 +42,7 @@ impl ViewNode for MotionBlurNode { let motion_blur_pipeline = world.resource::(); let pipeline_cache = world.resource::(); - let settings_uniforms = world.resource::>(); + let settings_uniforms = world.resource::>(); let Some(pipeline) = pipeline_cache.get_render_pipeline(pipeline_id.0) else { return Ok(()); }; diff --git a/crates/bevy_core_pipeline/src/motion_blur/pipeline.rs b/crates/bevy_core_pipeline/src/motion_blur/pipeline.rs index 8109beeb4eb3a..4eab4ff7a617f 100644 --- a/crates/bevy_core_pipeline/src/motion_blur/pipeline.rs +++ b/crates/bevy_core_pipeline/src/motion_blur/pipeline.rs @@ -2,7 +2,8 @@ use bevy_ecs::{ component::Component, entity::Entity, query::With, - system::{Commands, Query, Res, ResMut, Resource}, + resource::Resource, + system::{Commands, Query, Res, ResMut}, world::FromWorld, }; use bevy_image::BevyDefault as _; @@ -25,7 +26,7 @@ use bevy_render::{ use crate::fullscreen_vertex_shader::fullscreen_shader_vertex_state; -use super::{MotionBlur, MOTION_BLUR_SHADER_HANDLE}; +use super::{MotionBlurUniform, MOTION_BLUR_SHADER_HANDLE}; #[derive(Resource)] pub struct MotionBlurPipeline { @@ -48,7 +49,7 @@ impl MotionBlurPipeline { // Linear Sampler sampler(SamplerBindingType::Filtering), // Motion blur settings uniform input - uniform_buffer_sized(false, Some(MotionBlur::min_size())), + uniform_buffer_sized(false, Some(MotionBlurUniform::min_size())), // Globals uniform input uniform_buffer_sized(false, Some(GlobalsUniform::min_size())), ), @@ -66,7 +67,7 @@ impl MotionBlurPipeline { // Linear Sampler sampler(SamplerBindingType::Filtering), // Motion blur settings uniform input - uniform_buffer_sized(false, Some(MotionBlur::min_size())), + uniform_buffer_sized(false, Some(MotionBlurUniform::min_size())), // Globals uniform input uniform_buffer_sized(false, Some(GlobalsUniform::min_size())), ), @@ -154,7 +155,7 @@ pub(crate) fn prepare_motion_blur_pipelines( pipeline_cache: Res, mut pipelines: ResMut>, pipeline: Res, - views: Query<(Entity, &ExtractedView, &Msaa), With>, + views: Query<(Entity, &ExtractedView, &Msaa), With>, ) { for (entity, view, msaa) in &views { let pipeline_id = pipelines.specialize( diff --git a/crates/bevy_core_pipeline/src/oit/mod.rs b/crates/bevy_core_pipeline/src/oit/mod.rs index 14e8b8d4e36e3..6a15fd126c86b 100644 --- a/crates/bevy_core_pipeline/src/oit/mod.rs +++ b/crates/bevy_core_pipeline/src/oit/mod.rs @@ -1,10 +1,12 @@ //! Order Independent Transparency (OIT) for 3d rendering. See [`OrderIndependentTransparencyPlugin`] for more details. use bevy_app::prelude::*; -use bevy_asset::{load_internal_asset, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_ecs::{component::*, prelude::*}; use bevy_math::UVec2; -use bevy_reflect::Reflect; +use bevy_platform::collections::HashSet; +use bevy_platform::time::Instant; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{ camera::{Camera, ExtractedCamera}, extract_component::{ExtractComponent, ExtractComponentPlugin}, @@ -16,15 +18,12 @@ use bevy_render::{ view::Msaa, Render, RenderApp, RenderSet, }; -use bevy_utils::{ - tracing::{trace, warn}, - HashSet, Instant, -}; use bevy_window::PrimaryWindow; use resolve::{ node::{OitResolveNode, OitResolvePass}, OitResolvePlugin, }; +use tracing::{trace, warn}; use crate::core_3d::{ graph::{Core3d, Node3d}, @@ -35,7 +34,8 @@ use crate::core_3d::{ pub mod resolve; /// Shader handle for the shader that draws the transparent meshes to the OIT layers buffer. -pub const OIT_DRAW_SHADER_HANDLE: Handle = Handle::weak_from_u128(4042527984320512); +pub const OIT_DRAW_SHADER_HANDLE: Handle = + weak_handle!("0cd3c764-39b8-437b-86b4-4e45635fc03d"); /// Used to identify which camera will use OIT to render transparent meshes /// and to configure OIT. @@ -44,6 +44,7 @@ pub const OIT_DRAW_SHADER_HANDLE: Handle = Handle::weak_from_u128(404252 // This should probably be done by adding an enum to this component. // We use the same struct to pass on the settings to the drawing shader. #[derive(Clone, Copy, ExtractComponent, Reflect, ShaderType)] +#[reflect(Clone, Default)] pub struct OrderIndependentTransparencySettings { /// Controls how many layers will be used to compute the blending. /// The more layers you use the more memory it will use but it will also give better results. @@ -70,14 +71,17 @@ impl Component for OrderIndependentTransparencySettings { const STORAGE_TYPE: StorageType = StorageType::SparseSet; type Mutability = Mutable; - fn register_component_hooks(hooks: &mut ComponentHooks) { - hooks.on_add(|world, entity, _| { - if let Some(value) = world.get::(entity) { + fn on_add() -> Option { + Some(|world, context| { + if let Some(value) = world.get::(context.entity) { if value.layer_count > 32 { - warn!("OrderIndependentTransparencySettings layer_count set to {} might be too high.", value.layer_count); + warn!("{}OrderIndependentTransparencySettings layer_count set to {} might be too high.", + context.caller.map(|location|format!("{location}: ")).unwrap_or_default(), + value.layer_count + ); } } - }); + }) } } @@ -159,7 +163,7 @@ fn configure_depth_texture_usages( } // Find all the render target that potentially uses OIT - let primary_window = p.get_single().ok(); + let primary_window = p.single().ok(); let mut render_target_has_oit = >::default(); for (camera, has_oit) in &cameras { if has_oit { @@ -235,7 +239,6 @@ pub struct OrderIndependentTransparencySettingsOffset { /// This creates or resizes the oit buffers for each camera. /// It will always create one big buffer that's as big as the biggest buffer needed. /// Cameras with smaller viewports or less layers will simply use the big buffer and ignore the rest. -#[allow(clippy::type_complexity)] pub fn prepare_oit_buffers( mut commands: Commands, render_device: Res, diff --git a/crates/bevy_core_pipeline/src/oit/resolve/mod.rs b/crates/bevy_core_pipeline/src/oit/resolve/mod.rs index 101f7b1ed941e..7db98650fd1b7 100644 --- a/crates/bevy_core_pipeline/src/oit/resolve/mod.rs +++ b/crates/bevy_core_pipeline/src/oit/resolve/mod.rs @@ -3,7 +3,7 @@ use crate::{ oit::OrderIndependentTransparencySettings, }; use bevy_app::Plugin; -use bevy_asset::{load_internal_asset, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_derive::Deref; use bevy_ecs::{ entity::{EntityHashMap, EntityHashSet}, @@ -22,16 +22,20 @@ use bevy_render::{ view::{ExtractedView, ViewTarget, ViewUniform, ViewUniforms}, Render, RenderApp, RenderSet, }; -use bevy_utils::tracing::warn; +use tracing::warn; use super::OitBuffers; /// Shader handle for the shader that sorts the OIT layers, blends the colors based on depth and renders them to the screen. -pub const OIT_RESOLVE_SHADER_HANDLE: Handle = Handle::weak_from_u128(7698420424769536); +pub const OIT_RESOLVE_SHADER_HANDLE: Handle = + weak_handle!("562d2917-eb06-444d-9ade-41de76b0f5ae"); /// Contains the render node used to run the resolve pass. pub mod node; +/// Minimum required value of `wgpu::Limits::max_storage_buffers_per_shader_stage`. +pub const OIT_REQUIRED_STORAGE_BUFFERS: u32 = 2; + /// Plugin needed to resolve the Order Independent Transparency (OIT) buffer to the screen. pub struct OitResolvePlugin; impl Plugin for OitResolvePlugin { @@ -49,14 +53,11 @@ impl Plugin for OitResolvePlugin { return; }; - if !render_app - .world() - .resource::() - .get_downlevel_capabilities() - .flags - .contains(DownlevelFlags::FRAGMENT_WRITABLE_STORAGE) - { - warn!("OrderIndependentTransparencyPlugin not loaded. GPU lacks support: DownlevelFlags::FRAGMENT_WRITABLE_STORAGE."); + if !is_oit_supported( + render_app.world().resource::(), + render_app.world().resource::(), + true, + ) { return; } @@ -72,6 +73,34 @@ impl Plugin for OitResolvePlugin { } } +pub fn is_oit_supported(adapter: &RenderAdapter, device: &RenderDevice, warn: bool) -> bool { + if !adapter + .get_downlevel_capabilities() + .flags + .contains(DownlevelFlags::FRAGMENT_WRITABLE_STORAGE) + { + if warn { + warn!("OrderIndependentTransparencyPlugin not loaded. GPU lacks support: DownlevelFlags::FRAGMENT_WRITABLE_STORAGE."); + } + return false; + } + + let max_storage_buffers_per_shader_stage = device.limits().max_storage_buffers_per_shader_stage; + + if max_storage_buffers_per_shader_stage < OIT_REQUIRED_STORAGE_BUFFERS { + if warn { + warn!( + max_storage_buffers_per_shader_stage, + OIT_REQUIRED_STORAGE_BUFFERS, + "OrderIndependentTransparencyPlugin not loaded. RenderDevice lacks support: max_storage_buffers_per_shader_stage < OIT_REQUIRED_STORAGE_BUFFERS." + ); + } + return false; + } + + true +} + /// Bind group for the OIT resolve pass. #[derive(Resource, Deref)] pub struct OitResolveBindGroup(pub BindGroup); @@ -124,7 +153,6 @@ pub struct OitResolvePipelineKey { layer_count: i32, } -#[allow(clippy::too_many_arguments)] pub fn queue_oit_resolve_pipeline( mut commands: Commands, pipeline_cache: Res, diff --git a/crates/bevy_core_pipeline/src/post_process/mod.rs b/crates/bevy_core_pipeline/src/post_process/mod.rs index a633134b276d2..2ac03c08c81a8 100644 --- a/crates/bevy_core_pipeline/src/post_process/mod.rs +++ b/crates/bevy_core_pipeline/src/post_process/mod.rs @@ -3,15 +3,16 @@ //! Currently, this consists only of chromatic aberration. use bevy_app::{App, Plugin}; -use bevy_asset::{load_internal_asset, Assets, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, Assets, Handle}; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ component::Component, entity::Entity, query::{QueryItem, With}, reflect::ReflectComponent, - schedule::IntoSystemConfigs as _, - system::{lifetimeless::Read, Commands, Query, Res, ResMut, Resource}, + resource::Resource, + schedule::IntoScheduleConfigs as _, + system::{lifetimeless::Read, Commands, Query, Res, ResMut}, world::{FromWorld, World}, }; use bevy_image::{BevyDefault, Image}; @@ -46,17 +47,18 @@ use crate::{ }; /// The handle to the built-in postprocessing shader `post_process.wgsl`. -const POST_PROCESSING_SHADER_HANDLE: Handle = Handle::weak_from_u128(14675654334038973533); +const POST_PROCESSING_SHADER_HANDLE: Handle = + weak_handle!("5e8e627a-7531-484d-a988-9a38acb34e52"); /// The handle to the chromatic aberration shader `chromatic_aberration.wgsl`. const CHROMATIC_ABERRATION_SHADER_HANDLE: Handle = - Handle::weak_from_u128(10969893303667163833); + weak_handle!("e598550e-71c3-4f5a-ba29-aebc3f88c7b5"); /// The handle to the default chromatic aberration lookup texture. /// /// This is just a 3x1 image consisting of one red pixel, one green pixel, and /// one blue pixel, in that order. const DEFAULT_CHROMATIC_ABERRATION_LUT_HANDLE: Handle = - Handle::weak_from_u128(2199972955136579180); + weak_handle!("dc3e3307-40a1-49bb-be6d-e0634e8836b2"); /// The default chromatic aberration intensity amount, in a fraction of the /// window size. @@ -96,7 +98,7 @@ pub struct PostProcessingPlugin; /// /// [Gjøl & Svendsen 2016]: https://github.com/playdeadgames/publications/blob/master/INSIDE/rendering_inside_gdc2016.pdf #[derive(Reflect, Component, Clone)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct ChromaticAberration { /// The lookup texture that determines the color gradient. /// @@ -112,7 +114,7 @@ pub struct ChromaticAberration { /// The size of the streaks around the edges of objects, as a fraction of /// the window size. /// - /// The default value is 0.2. + /// The default value is 0.02. pub intensity: f32, /// A cap on the number of texture samples that will be performed. diff --git a/crates/bevy_core_pipeline/src/prepass/mod.rs b/crates/bevy_core_pipeline/src/prepass/mod.rs index 78bac66df0f0f..deea2a5fa8380 100644 --- a/crates/bevy_core_pipeline/src/prepass/mod.rs +++ b/crates/bevy_core_pipeline/src/prepass/mod.rs @@ -34,7 +34,8 @@ use bevy_asset::UntypedAssetId; use bevy_ecs::prelude::*; use bevy_math::Mat4; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; -use bevy_render::render_phase::PhaseItemBinKey; +use bevy_render::mesh::allocator::SlabId; +use bevy_render::render_phase::PhaseItemBatchSetKey; use bevy_render::sync_world::MainEntity; use bevy_render::{ render_phase::{ @@ -53,18 +54,18 @@ pub const MOTION_VECTOR_PREPASS_FORMAT: TextureFormat = TextureFormat::Rg16Float /// If added to a [`crate::prelude::Camera3d`] then depth values will be copied to a separate texture available to the main pass. #[derive(Component, Default, Reflect, Clone)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct DepthPrepass; /// If added to a [`crate::prelude::Camera3d`] then vertex world normals will be copied to a separate texture available to the main pass. /// Normals will have normal map textures already applied. #[derive(Component, Default, Reflect, Clone)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct NormalPrepass; /// If added to a [`crate::prelude::Camera3d`] then screen space motion vectors will be copied to a separate texture available to the main pass. #[derive(Component, Default, Reflect, Clone)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct MotionVectorPrepass; /// If added to a [`crate::prelude::Camera3d`] then deferred materials will be rendered to the deferred gbuffer texture and will be available to subsequent passes. @@ -77,6 +78,7 @@ pub struct DeferredPrepass; pub struct PreviousViewData { pub view_from_world: Mat4, pub clip_from_world: Mat4, + pub clip_from_view: Mat4, } #[derive(Resource, Default)] @@ -139,8 +141,13 @@ impl ViewPrepassTextures { /// /// Used to render all 3D meshes with materials that have no transparency. pub struct Opaque3dPrepass { + /// Determines which objects can be placed into a *batch set*. + /// + /// Objects in a single batch set can potentially be multi-drawn together, + /// if it's enabled and the current platform supports it. + pub batch_set_key: OpaqueNoLightmap3dBatchSetKey, /// Information that separates items into bins. - pub key: OpaqueNoLightmap3dBinKey, + pub bin_key: OpaqueNoLightmap3dBinKey, /// An entity from which Bevy fetches data common to all instances in this /// batch, such as the mesh. @@ -166,30 +173,33 @@ pub struct OpaqueNoLightmap3dBatchSetKey { /// /// In the case of PBR, this is the `MaterialBindGroupIndex`. pub material_bind_group_index: Option, + + /// The ID of the slab of GPU memory that contains vertex data. + /// + /// For non-mesh items, you can fill this with 0 if your items can be + /// multi-drawn, or with a unique value if they can't. + pub vertex_slab: SlabId, + + /// The ID of the slab of GPU memory that contains index data, if present. + /// + /// For non-mesh items, you can safely fill this with `None`. + pub index_slab: Option, +} + +impl PhaseItemBatchSetKey for OpaqueNoLightmap3dBatchSetKey { + fn indexed(&self) -> bool { + self.index_slab.is_some() + } } // TODO: Try interning these. /// The data used to bin each opaque 3D object in the prepass and deferred pass. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct OpaqueNoLightmap3dBinKey { - /// The key of the *batch set*. - /// - /// As batches belong to a batch set, meshes in a batch must obviously be - /// able to be placed in a single batch set. - pub batch_set_key: OpaqueNoLightmap3dBatchSetKey, - /// The ID of the asset. pub asset_id: UntypedAssetId, } -impl PhaseItemBinKey for OpaqueNoLightmap3dBinKey { - type BatchSetKey = OpaqueNoLightmap3dBatchSetKey; - - fn get_batch_set_key(&self) -> Option { - Some(self.batch_set_key.clone()) - } -} - impl PhaseItem for Opaque3dPrepass { #[inline] fn entity(&self) -> Entity { @@ -202,7 +212,7 @@ impl PhaseItem for Opaque3dPrepass { #[inline] fn draw_function(&self) -> DrawFunctionId { - self.key.batch_set_key.draw_function + self.batch_set_key.draw_function } #[inline] @@ -227,17 +237,20 @@ impl PhaseItem for Opaque3dPrepass { } impl BinnedPhaseItem for Opaque3dPrepass { + type BatchSetKey = OpaqueNoLightmap3dBatchSetKey; type BinKey = OpaqueNoLightmap3dBinKey; #[inline] fn new( - key: Self::BinKey, + batch_set_key: Self::BatchSetKey, + bin_key: Self::BinKey, representative_entity: (Entity, MainEntity), batch_range: Range, extra_index: PhaseItemExtraIndex, ) -> Self { Opaque3dPrepass { - key, + batch_set_key, + bin_key, representative_entity, batch_range, extra_index, @@ -248,7 +261,7 @@ impl BinnedPhaseItem for Opaque3dPrepass { impl CachedRenderPipelinePhaseItem for Opaque3dPrepass { #[inline] fn cached_pipeline(&self) -> CachedRenderPipelineId { - self.key.batch_set_key.pipeline + self.batch_set_key.pipeline } } @@ -258,7 +271,13 @@ impl CachedRenderPipelinePhaseItem for Opaque3dPrepass { /// /// Used to render all meshes with a material with an alpha mask. pub struct AlphaMask3dPrepass { - pub key: OpaqueNoLightmap3dBinKey, + /// Determines which objects can be placed into a *batch set*. + /// + /// Objects in a single batch set can potentially be multi-drawn together, + /// if it's enabled and the current platform supports it. + pub batch_set_key: OpaqueNoLightmap3dBatchSetKey, + /// Information that separates items into bins. + pub bin_key: OpaqueNoLightmap3dBinKey, pub representative_entity: (Entity, MainEntity), pub batch_range: Range, pub extra_index: PhaseItemExtraIndex, @@ -276,7 +295,7 @@ impl PhaseItem for AlphaMask3dPrepass { #[inline] fn draw_function(&self) -> DrawFunctionId { - self.key.batch_set_key.draw_function + self.batch_set_key.draw_function } #[inline] @@ -301,17 +320,20 @@ impl PhaseItem for AlphaMask3dPrepass { } impl BinnedPhaseItem for AlphaMask3dPrepass { + type BatchSetKey = OpaqueNoLightmap3dBatchSetKey; type BinKey = OpaqueNoLightmap3dBinKey; #[inline] fn new( - key: Self::BinKey, + batch_set_key: Self::BatchSetKey, + bin_key: Self::BinKey, representative_entity: (Entity, MainEntity), batch_range: Range, extra_index: PhaseItemExtraIndex, ) -> Self { Self { - key, + batch_set_key, + bin_key, representative_entity, batch_range, extra_index, @@ -322,7 +344,7 @@ impl BinnedPhaseItem for AlphaMask3dPrepass { impl CachedRenderPipelinePhaseItem for AlphaMask3dPrepass { #[inline] fn cached_pipeline(&self) -> CachedRenderPipelineId { - self.key.batch_set_key.pipeline + self.batch_set_key.pipeline } } diff --git a/crates/bevy_core_pipeline/src/prepass/node.rs b/crates/bevy_core_pipeline/src/prepass/node.rs index 17f5dfb2cfe84..04cc1890b0235 100644 --- a/crates/bevy_core_pipeline/src/prepass/node.rs +++ b/crates/bevy_core_pipeline/src/prepass/node.rs @@ -2,15 +2,16 @@ use bevy_ecs::{prelude::*, query::QueryItem}; use bevy_render::{ camera::ExtractedCamera, diagnostic::RecordDiagnostics, + experimental::occlusion_culling::OcclusionCulling, render_graph::{NodeRunError, RenderGraphContext, ViewNode}, render_phase::{TrackedRenderPass, ViewBinnedRenderPhases}, render_resource::{CommandEncoderDescriptor, PipelineCache, RenderPassDescriptor, StoreOp}, renderer::RenderContext, - view::{ViewDepthTexture, ViewUniformOffset}, + view::{ExtractedView, NoIndirectDrawing, ViewDepthTexture, ViewUniformOffset}, }; -use bevy_utils::tracing::error; +use tracing::error; #[cfg(feature = "trace")] -use bevy_utils::tracing::info_span; +use tracing::info_span; use crate::skybox::prepass::{RenderSkyboxPrepassPipeline, SkyboxPrepassBindGroup}; @@ -19,16 +20,43 @@ use super::{ ViewPrepassTextures, }; -/// Render node used by the prepass. +/// The phase of the prepass that draws meshes that were visible last frame. /// -/// By default, inserted before the main pass in the render graph. +/// If occlusion culling isn't in use, this prepass simply draws all meshes. +/// +/// Like all prepass nodes, this is inserted before the main pass in the render +/// graph. +#[derive(Default)] +pub struct EarlyPrepassNode; + +impl ViewNode for EarlyPrepassNode { + type ViewQuery = ::ViewQuery; + + fn run<'w>( + &self, + graph: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + view_query: QueryItem<'w, Self::ViewQuery>, + world: &'w World, + ) -> Result<(), NodeRunError> { + run_prepass(graph, render_context, view_query, world, "early prepass") + } +} + +/// The phase of the prepass that runs after occlusion culling against the +/// meshes that were visible last frame. +/// +/// If occlusion culling isn't in use, this is a no-op. +/// +/// Like all prepass nodes, this is inserted before the main pass in the render +/// graph. #[derive(Default)] -pub struct PrepassNode; +pub struct LatePrepassNode; -impl ViewNode for PrepassNode { +impl ViewNode for LatePrepassNode { type ViewQuery = ( - Entity, &'static ExtractedCamera, + &'static ExtractedView, &'static ViewDepthTexture, &'static ViewPrepassTextures, &'static ViewUniformOffset, @@ -36,155 +64,185 @@ impl ViewNode for PrepassNode { Option<&'static RenderSkyboxPrepassPipeline>, Option<&'static SkyboxPrepassBindGroup>, Option<&'static PreviousViewUniformOffset>, + Has, + Has, + Has, ); fn run<'w>( &self, graph: &mut RenderGraphContext, render_context: &mut RenderContext<'w>, - ( - view, - camera, - view_depth_texture, - view_prepass_textures, - view_uniform_offset, - deferred_prepass, - skybox_prepass_pipeline, - skybox_prepass_bind_group, - view_prev_uniform_offset, - ): QueryItem<'w, Self::ViewQuery>, + query: QueryItem<'w, Self::ViewQuery>, world: &'w World, ) -> Result<(), NodeRunError> { - let (Some(opaque_prepass_phases), Some(alpha_mask_prepass_phases)) = ( - world.get_resource::>(), - world.get_resource::>(), - ) else { + // We only need a late prepass if we have occlusion culling and indirect + // drawing. + let (_, _, _, _, _, _, _, _, _, occlusion_culling, no_indirect_drawing, _) = query; + if !occlusion_culling || no_indirect_drawing { return Ok(()); - }; - - let (Some(opaque_prepass_phase), Some(alpha_mask_prepass_phase)) = ( - opaque_prepass_phases.get(&view), - alpha_mask_prepass_phases.get(&view), - ) else { - return Ok(()); - }; - - let diagnostics = render_context.diagnostic_recorder(); - - let mut color_attachments = vec![ - view_prepass_textures - .normal - .as_ref() - .map(|normals_texture| normals_texture.get_attachment()), - view_prepass_textures - .motion_vectors - .as_ref() - .map(|motion_vectors_texture| motion_vectors_texture.get_attachment()), - // Use None in place of deferred attachments - None, - None, - ]; - - // If all color attachments are none: clear the color attachment list so that no fragment shader is required - if color_attachments.iter().all(Option::is_none) { - color_attachments.clear(); } - let depth_stencil_attachment = Some(view_depth_texture.get_attachment(StoreOp::Store)); + run_prepass(graph, render_context, query, world, "late prepass") + } +} + +/// Runs a prepass that draws all meshes to the depth buffer, and possibly +/// normal and motion vector buffers as well. +/// +/// If occlusion culling isn't in use, and a prepass is enabled, then there's +/// only one prepass. If occlusion culling is in use, then any prepass is split +/// into two: an *early* prepass and a *late* prepass. The early prepass draws +/// what was visible last frame, and the last prepass performs occlusion culling +/// against a conservative hierarchical Z buffer before drawing unoccluded +/// meshes. +fn run_prepass<'w>( + graph: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + ( + camera, + extracted_view, + view_depth_texture, + view_prepass_textures, + view_uniform_offset, + deferred_prepass, + skybox_prepass_pipeline, + skybox_prepass_bind_group, + view_prev_uniform_offset, + _, + _, + has_deferred, + ): QueryItem<'w, ::ViewQuery>, + world: &'w World, + label: &'static str, +) -> Result<(), NodeRunError> { + // If we're using deferred rendering, there will be a deferred prepass + // instead of this one. Just bail out so we don't have to bother looking at + // the empty bins. + if has_deferred { + return Ok(()); + } + + let (Some(opaque_prepass_phases), Some(alpha_mask_prepass_phases)) = ( + world.get_resource::>(), + world.get_resource::>(), + ) else { + return Ok(()); + }; + + let (Some(opaque_prepass_phase), Some(alpha_mask_prepass_phase)) = ( + opaque_prepass_phases.get(&extracted_view.retained_view_entity), + alpha_mask_prepass_phases.get(&extracted_view.retained_view_entity), + ) else { + return Ok(()); + }; + + let diagnostics = render_context.diagnostic_recorder(); + + let mut color_attachments = vec![ + view_prepass_textures + .normal + .as_ref() + .map(|normals_texture| normals_texture.get_attachment()), + view_prepass_textures + .motion_vectors + .as_ref() + .map(|motion_vectors_texture| motion_vectors_texture.get_attachment()), + // Use None in place of deferred attachments + None, + None, + ]; + + // If all color attachments are none: clear the color attachment list so that no fragment shader is required + if color_attachments.iter().all(Option::is_none) { + color_attachments.clear(); + } + + let depth_stencil_attachment = Some(view_depth_texture.get_attachment(StoreOp::Store)); + + let view_entity = graph.view_entity(); + render_context.add_command_buffer_generation_task(move |render_device| { + #[cfg(feature = "trace")] + let _prepass_span = info_span!("prepass").entered(); + + // Command encoder setup + let mut command_encoder = render_device.create_command_encoder(&CommandEncoderDescriptor { + label: Some("prepass_command_encoder"), + }); + + // Render pass setup + let render_pass = command_encoder.begin_render_pass(&RenderPassDescriptor { + label: Some(label), + color_attachments: &color_attachments, + depth_stencil_attachment, + timestamp_writes: None, + occlusion_query_set: None, + }); + + let mut render_pass = TrackedRenderPass::new(&render_device, render_pass); + let pass_span = diagnostics.pass_span(&mut render_pass, label); + + if let Some(viewport) = camera.viewport.as_ref() { + render_pass.set_camera_viewport(viewport); + } - let view_entity = graph.view_entity(); - render_context.add_command_buffer_generation_task(move |render_device| { + // Opaque draws + if !opaque_prepass_phase.is_empty() { #[cfg(feature = "trace")] - let _prepass_span = info_span!("prepass").entered(); - - // Command encoder setup - let mut command_encoder = - render_device.create_command_encoder(&CommandEncoderDescriptor { - label: Some("prepass_command_encoder"), - }); - - // Render pass setup - let render_pass = command_encoder.begin_render_pass(&RenderPassDescriptor { - label: Some("prepass"), - color_attachments: &color_attachments, - depth_stencil_attachment, - timestamp_writes: None, - occlusion_query_set: None, - }); - - let mut render_pass = TrackedRenderPass::new(&render_device, render_pass); - let pass_span = diagnostics.pass_span(&mut render_pass, "prepass"); - - if let Some(viewport) = camera.viewport.as_ref() { - render_pass.set_camera_viewport(viewport); + let _opaque_prepass_span = info_span!("opaque_prepass").entered(); + if let Err(err) = opaque_prepass_phase.render(&mut render_pass, world, view_entity) { + error!("Error encountered while rendering the opaque prepass phase {err:?}"); } + } - // Opaque draws - if !opaque_prepass_phase.batchable_mesh_keys.is_empty() - || !opaque_prepass_phase.unbatchable_mesh_keys.is_empty() + // Alpha masked draws + if !alpha_mask_prepass_phase.is_empty() { + #[cfg(feature = "trace")] + let _alpha_mask_prepass_span = info_span!("alpha_mask_prepass").entered(); + if let Err(err) = alpha_mask_prepass_phase.render(&mut render_pass, world, view_entity) { - #[cfg(feature = "trace")] - let _opaque_prepass_span = info_span!("opaque_prepass").entered(); - if let Err(err) = opaque_prepass_phase.render(&mut render_pass, world, view_entity) - { - error!("Error encountered while rendering the opaque prepass phase {err:?}"); - } - } - - // Alpha masked draws - if !alpha_mask_prepass_phase.is_empty() { - #[cfg(feature = "trace")] - let _alpha_mask_prepass_span = info_span!("alpha_mask_prepass").entered(); - if let Err(err) = - alpha_mask_prepass_phase.render(&mut render_pass, world, view_entity) - { - error!( - "Error encountered while rendering the alpha mask prepass phase {err:?}" - ); - } + error!("Error encountered while rendering the alpha mask prepass phase {err:?}"); } + } - // Skybox draw using a fullscreen triangle - if let ( - Some(skybox_prepass_pipeline), - Some(skybox_prepass_bind_group), - Some(view_prev_uniform_offset), - ) = ( - skybox_prepass_pipeline, - skybox_prepass_bind_group, - view_prev_uniform_offset, - ) { - let pipeline_cache = world.resource::(); - if let Some(pipeline) = - pipeline_cache.get_render_pipeline(skybox_prepass_pipeline.0) - { - render_pass.set_render_pipeline(pipeline); - render_pass.set_bind_group( - 0, - &skybox_prepass_bind_group.0, - &[view_uniform_offset.offset, view_prev_uniform_offset.offset], - ); - render_pass.draw(0..3, 0..1); - } + // Skybox draw using a fullscreen triangle + if let ( + Some(skybox_prepass_pipeline), + Some(skybox_prepass_bind_group), + Some(view_prev_uniform_offset), + ) = ( + skybox_prepass_pipeline, + skybox_prepass_bind_group, + view_prev_uniform_offset, + ) { + let pipeline_cache = world.resource::(); + if let Some(pipeline) = pipeline_cache.get_render_pipeline(skybox_prepass_pipeline.0) { + render_pass.set_render_pipeline(pipeline); + render_pass.set_bind_group( + 0, + &skybox_prepass_bind_group.0, + &[view_uniform_offset.offset, view_prev_uniform_offset.offset], + ); + render_pass.draw(0..3, 0..1); } + } - pass_span.end(&mut render_pass); - drop(render_pass); - - // After rendering to the view depth texture, copy it to the prepass depth texture if deferred isn't going to - if deferred_prepass.is_none() { - if let Some(prepass_depth_texture) = &view_prepass_textures.depth { - command_encoder.copy_texture_to_texture( - view_depth_texture.texture.as_image_copy(), - prepass_depth_texture.texture.texture.as_image_copy(), - view_prepass_textures.size, - ); - } + pass_span.end(&mut render_pass); + drop(render_pass); + + // After rendering to the view depth texture, copy it to the prepass depth texture if deferred isn't going to + if deferred_prepass.is_none() { + if let Some(prepass_depth_texture) = &view_prepass_textures.depth { + command_encoder.copy_texture_to_texture( + view_depth_texture.texture.as_image_copy(), + prepass_depth_texture.texture.texture.as_image_copy(), + view_prepass_textures.size, + ); } + } - command_encoder.finish() - }); + command_encoder.finish() + }); - Ok(()) - } + Ok(()) } diff --git a/crates/bevy_core_pipeline/src/skybox/mod.rs b/crates/bevy_core_pipeline/src/skybox/mod.rs index 5ca7c3fce2d26..7e2dba466ccf5 100644 --- a/crates/bevy_core_pipeline/src/skybox/mod.rs +++ b/crates/bevy_core_pipeline/src/skybox/mod.rs @@ -1,13 +1,16 @@ use bevy_app::{App, Plugin}; -use bevy_asset::{load_internal_asset, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_ecs::{ prelude::{Component, Entity}, query::{QueryItem, With}, - schedule::IntoSystemConfigs, - system::{Commands, Query, Res, ResMut, Resource}, + reflect::ReflectComponent, + resource::Resource, + schedule::IntoScheduleConfigs, + system::{Commands, Query, Res, ResMut}, }; use bevy_image::{BevyDefault, Image}; use bevy_math::{Mat4, Quat}; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{ camera::Exposure, extract_component::{ @@ -29,7 +32,7 @@ use prepass::{SkyboxPrepassPipeline, SKYBOX_PREPASS_SHADER_HANDLE}; use crate::{core_3d::CORE_3D_DEPTH_FORMAT, prepass::PreviousViewUniforms}; -const SKYBOX_SHADER_HANDLE: Handle = Handle::weak_from_u128(55594763423201); +const SKYBOX_SHADER_HANDLE: Handle = weak_handle!("a66cf9cc-cab8-47f8-ac32-db82fdc4f29b"); pub mod prepass; @@ -45,7 +48,7 @@ impl Plugin for SkyboxPlugin { Shader::from_wgsl ); - app.add_plugins(( + app.register_type::().add_plugins(( ExtractComponentPlugin::::default(), UniformComponentPlugin::::default(), )); @@ -86,7 +89,8 @@ impl Plugin for SkyboxPlugin { /// To do so, use `EnvironmentMapLight` alongside this component. /// /// See also . -#[derive(Component, Clone)] +#[derive(Component, Clone, Reflect)] +#[reflect(Component, Default, Clone)] pub struct Skybox { pub image: Handle, /// Scale factor applied to the skybox image. diff --git a/crates/bevy_core_pipeline/src/skybox/prepass.rs b/crates/bevy_core_pipeline/src/skybox/prepass.rs index c51e707808e93..658660bbc62ff 100644 --- a/crates/bevy_core_pipeline/src/skybox/prepass.rs +++ b/crates/bevy_core_pipeline/src/skybox/prepass.rs @@ -1,11 +1,12 @@ //! Adds motion vector support to skyboxes. See [`SkyboxPrepassPipeline`] for details. -use bevy_asset::Handle; +use bevy_asset::{weak_handle, Handle}; use bevy_ecs::{ component::Component, entity::Entity, query::{Has, With}, - system::{Commands, Query, Res, ResMut, Resource}, + resource::Resource, + system::{Commands, Query, Res, ResMut}, world::{FromWorld, World}, }; use bevy_render::{ @@ -29,7 +30,8 @@ use crate::{ Skybox, }; -pub const SKYBOX_PREPASS_SHADER_HANDLE: Handle = Handle::weak_from_u128(376510055324461154); +pub const SKYBOX_PREPASS_SHADER_HANDLE: Handle = + weak_handle!("7a292435-bfe6-4ed9-8d30-73bf7aa673b0"); /// This pipeline writes motion vectors to the prepass for all [`Skybox`]es. /// diff --git a/crates/bevy_core_pipeline/src/skybox/skybox.wgsl b/crates/bevy_core_pipeline/src/skybox/skybox.wgsl index 41ac214050295..7982370a19794 100644 --- a/crates/bevy_core_pipeline/src/skybox/skybox.wgsl +++ b/crates/bevy_core_pipeline/src/skybox/skybox.wgsl @@ -34,13 +34,13 @@ fn coords_to_ray_direction(position: vec2, viewport: vec4) -> vec3 VertexOutput { // See the explanation above for how this works. - let clip_position = vec4( + let clip_position = vec2( f32(vertex_index & 1u), f32((vertex_index >> 1u) & 1u), - 0.25, - 0.5 - ) * 4.0 - vec4(1.0); + ) * 4.0 - vec2(1.0); - return VertexOutput(clip_position); + return VertexOutput(vec4(clip_position, 0.0, 1.0)); } @fragment diff --git a/crates/bevy_core_pipeline/src/tonemapping/mod.rs b/crates/bevy_core_pipeline/src/tonemapping/mod.rs index c6fb3217253f9..9f3964ad171d2 100644 --- a/crates/bevy_core_pipeline/src/tonemapping/mod.rs +++ b/crates/bevy_core_pipeline/src/tonemapping/mod.rs @@ -1,6 +1,6 @@ use crate::fullscreen_vertex_shader::fullscreen_shader_vertex_state; use bevy_app::prelude::*; -use bevy_asset::{load_internal_asset, Assets, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, Assets, Handle}; use bevy_ecs::prelude::*; use bevy_image::{CompressedImageFormats, Image, ImageSampler, ImageType}; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; @@ -18,22 +18,23 @@ use bevy_render::{ view::{ExtractedView, ViewTarget, ViewUniform}, Render, RenderApp, RenderSet, }; -#[cfg(not(feature = "tonemapping_luts"))] -use bevy_utils::tracing::error; use bitflags::bitflags; +#[cfg(not(feature = "tonemapping_luts"))] +use tracing::error; mod node; use bevy_utils::default; pub use node::TonemappingNode; -const TONEMAPPING_SHADER_HANDLE: Handle = Handle::weak_from_u128(17015368199668024512); +const TONEMAPPING_SHADER_HANDLE: Handle = + weak_handle!("e239c010-c25c-42a1-b4e8-08818764d667"); const TONEMAPPING_SHARED_SHADER_HANDLE: Handle = - Handle::weak_from_u128(2499430578245347910); + weak_handle!("61dbc544-4b30-4ca9-83bd-4751b5cfb1b1"); const TONEMAPPING_LUT_BINDINGS_SHADER_HANDLE: Handle = - Handle::weak_from_u128(8392056472189465073); + weak_handle!("d50e3a70-c85e-4725-a81e-72fc83281145"); /// 3D LUT (look up table) textures used for tonemapping #[derive(Resource, Clone, ExtractResource)] @@ -431,8 +432,11 @@ pub fn get_lut_bind_group_layout_entries() -> [BindGroupLayoutEntryBuilder; 2] { ] } -// allow(dead_code) so it doesn't complain when the tonemapping_luts feature is disabled -#[allow(dead_code)] +#[expect(clippy::allow_attributes, reason = "`dead_code` is not always linted.")] +#[allow( + dead_code, + reason = "There is unused code when the `tonemapping_luts` feature is disabled." +)] fn setup_tonemapping_lut_image(bytes: &[u8], image_type: ImageType) -> Image { let image_sampler = ImageSampler::Descriptor(bevy_image::ImageSamplerDescriptor { label: Some("Tonemapping LUT sampler".to_string()), @@ -445,8 +449,6 @@ fn setup_tonemapping_lut_image(bytes: &[u8], image_type: ImageType) -> Image { ..default() }); Image::from_buffer( - #[cfg(all(debug_assertions, feature = "dds"))] - "Tonemapping LUT sampler".to_string(), bytes, image_type, CompressedImageFormats::NONE, @@ -461,7 +463,7 @@ pub fn lut_placeholder() -> Image { let format = TextureFormat::Rgba8Unorm; let data = vec![255, 0, 255, 255]; Image { - data, + data: Some(data), texture_descriptor: TextureDescriptor { size: Extent3d { width: 1, diff --git a/crates/bevy_core_pipeline/src/upscaling/mod.rs b/crates/bevy_core_pipeline/src/upscaling/mod.rs index 52369fca59abc..20dd19f4ce691 100644 --- a/crates/bevy_core_pipeline/src/upscaling/mod.rs +++ b/crates/bevy_core_pipeline/src/upscaling/mod.rs @@ -1,13 +1,13 @@ use crate::blit::{BlitPipeline, BlitPipelineKey}; use bevy_app::prelude::*; use bevy_ecs::prelude::*; +use bevy_platform::collections::HashSet; use bevy_render::{ camera::{CameraOutputMode, ExtractedCamera}, render_resource::*, view::ViewTarget, Render, RenderApp, RenderSet, }; -use bevy_utils::HashSet; mod node; @@ -55,7 +55,7 @@ fn prepare_view_upscaling_pipelines( match blend_state { None => { - // If we've already seen this output for a camera and it doesn't have a output blend + // If we've already seen this output for a camera and it doesn't have an output blend // mode configured, default to alpha blend so that we don't accidentally overwrite // the output texture if already_seen { diff --git a/crates/bevy_derive/Cargo.toml b/crates/bevy_derive/Cargo.toml index 385ba359ba6d3..1c4cb4adccf98 100644 --- a/crates/bevy_derive/Cargo.toml +++ b/crates/bevy_derive/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_derive" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Provides derive implementations for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -12,7 +12,7 @@ keywords = ["bevy"] proc-macro = true [dependencies] -bevy_macro_utils = { path = "../bevy_macro_utils", version = "0.15.0-dev" } +bevy_macro_utils = { path = "../bevy_macro_utils", version = "0.16.0-dev" } quote = "1.0" syn = { version = "2.0", features = ["full"] } diff --git a/crates/bevy_derive/LICENSE-APACHE b/crates/bevy_derive/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_derive/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_derive/LICENSE-MIT b/crates/bevy_derive/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_derive/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_derive/compile_fail/Cargo.toml b/crates/bevy_derive/compile_fail/Cargo.toml index 45dcf8aaafbe7..a9ad3e95e1179 100644 --- a/crates/bevy_derive/compile_fail/Cargo.toml +++ b/crates/bevy_derive/compile_fail/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bevy_derive_compile_fail" -edition = "2021" +edition = "2024" description = "Compile fail tests for Bevy Engine's various macros" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" diff --git a/crates/bevy_derive/compile_fail/tests/deref_mut_derive/missing_deref_fail.stderr b/crates/bevy_derive/compile_fail/tests/deref_mut_derive/missing_deref_fail.stderr index 0315f2be7434b..46fec78c4304d 100644 --- a/crates/bevy_derive/compile_fail/tests/deref_mut_derive/missing_deref_fail.stderr +++ b/crates/bevy_derive/compile_fail/tests/deref_mut_derive/missing_deref_fail.stderr @@ -1,14 +1,11 @@ error[E0277]: the trait bound `TupleStruct: Deref` is not satisfied - --> tests/deref_mut_derive/missing_deref_fail.rs:10:8 - | -10 | struct TupleStruct(usize, #[deref] String); - | ^^^^^^^^^^^ the trait `Deref` is not implemented for `TupleStruct` - | + --> tests/deref_mut_derive/missing_deref_fail.rs:9:8 + | +9 | struct TupleStruct(usize, #[deref] String); + | ^^^^^^^^^^^ the trait `Deref` is not implemented for `TupleStruct` + | note: required by a bound in `DerefMut` - --> $RUSTUP_HOME/.rustup/toolchains/stable-x86_64-pc-windows-msvc/lib/rustlib/src/rust/library/core/src/ops/deref.rs:264:21 - | -264 | pub trait DerefMut: Deref { - | ^^^^^ required by this bound in `DerefMut` + --> /rustc/4d91de4e48198da2e33413efdcd9cd2cc0c46688/library/core/src/ops/deref.rs:290:1 error[E0277]: the trait bound `TupleStruct: Deref` is not satisfied --> tests/deref_mut_derive/missing_deref_fail.rs:7:10 @@ -19,21 +16,18 @@ error[E0277]: the trait bound `TupleStruct: Deref` is not satisfied = note: this error originates in the derive macro `DerefMut` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Struct: Deref` is not satisfied - --> tests/deref_mut_derive/missing_deref_fail.rs:15:8 - | -15 | struct Struct { - | ^^^^^^ the trait `Deref` is not implemented for `Struct` - | + --> tests/deref_mut_derive/missing_deref_fail.rs:14:8 + | +14 | struct Struct { + | ^^^^^^ the trait `Deref` is not implemented for `Struct` + | note: required by a bound in `DerefMut` - --> $RUSTUP_HOME/.rustup/toolchains/stable-x86_64-pc-windows-msvc/lib/rustlib/src/rust/library/core/src/ops/deref.rs:264:21 - | -264 | pub trait DerefMut: Deref { - | ^^^^^ required by this bound in `DerefMut` + --> /rustc/4d91de4e48198da2e33413efdcd9cd2cc0c46688/library/core/src/ops/deref.rs:290:1 error[E0277]: the trait bound `Struct: Deref` is not satisfied - --> tests/deref_mut_derive/missing_deref_fail.rs:13:10 + --> tests/deref_mut_derive/missing_deref_fail.rs:12:10 | -13 | #[derive(DerefMut)] +12 | #[derive(DerefMut)] | ^^^^^^^^ the trait `Deref` is not implemented for `Struct` | = note: this error originates in the derive macro `DerefMut` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/crates/bevy_derive/src/bevy_main.rs b/crates/bevy_derive/src/bevy_main.rs index 8111a31338b56..6481823ad474a 100644 --- a/crates/bevy_derive/src/bevy_main.rs +++ b/crates/bevy_derive/src/bevy_main.rs @@ -10,19 +10,16 @@ pub fn bevy_main(_attr: TokenStream, item: TokenStream) -> TokenStream { ); TokenStream::from(quote! { - #[no_mangle] + // SAFETY: `#[bevy_main]` should only be placed on a single `main` function + // TODO: Potentially make `bevy_main` and unsafe attribute as there is a safety + // guarantee required from the caller. + #[unsafe(no_mangle)] #[cfg(target_os = "android")] fn android_main(android_app: bevy::window::android_activity::AndroidApp) { let _ = bevy::window::ANDROID_APP.set(android_app); main(); } - #[no_mangle] - #[cfg(target_os = "ios")] - extern "C" fn main_rs() { - main(); - } - #[allow(unused)] #input }) diff --git a/crates/bevy_dev_tools/Cargo.toml b/crates/bevy_dev_tools/Cargo.toml index 1d426853cee80..ad0f2c515ca26 100644 --- a/crates/bevy_dev_tools/Cargo.toml +++ b/crates/bevy_dev_tools/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_dev_tools" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Collection of developer tools for the Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -13,24 +13,26 @@ bevy_ci_testing = ["serde", "ron"] [dependencies] # bevy -bevy_app = { path = "../bevy_app", version = "0.15.0-dev" } -bevy_asset = { path = "../bevy_asset", version = "0.15.0-dev" } -bevy_color = { path = "../bevy_color", version = "0.15.0-dev" } -bevy_diagnostic = { path = "../bevy_diagnostic", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" } -bevy_hierarchy = { path = "../bevy_hierarchy", version = "0.15.0-dev" } -bevy_input = { path = "../bevy_input", version = "0.15.0-dev" } -bevy_render = { path = "../bevy_render", version = "0.15.0-dev" } -bevy_time = { path = "../bevy_time", version = "0.15.0-dev" } -bevy_text = { path = "../bevy_text", version = "0.15.0-dev" } -bevy_ui = { path = "../bevy_ui", version = "0.15.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } -bevy_window = { path = "../bevy_window", version = "0.15.0-dev" } -bevy_state = { path = "../bevy_state", version = "0.15.0-dev" } +bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } +bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } +bevy_color = { path = "../bevy_color", version = "0.16.0-dev" } +bevy_diagnostic = { path = "../bevy_diagnostic", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } +bevy_input = { path = "../bevy_input", version = "0.16.0-dev" } +bevy_picking = { path = "../bevy_picking", version = "0.16.0-dev" } +bevy_render = { path = "../bevy_render", version = "0.16.0-dev" } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } +bevy_time = { path = "../bevy_time", version = "0.16.0-dev" } +bevy_text = { path = "../bevy_text", version = "0.16.0-dev" } +bevy_ui = { path = "../bevy_ui", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } +bevy_window = { path = "../bevy_window", version = "0.16.0-dev" } +bevy_state = { path = "../bevy_state", version = "0.16.0-dev" } # other serde = { version = "1.0", features = ["derive"], optional = true } ron = { version = "0.8.0", optional = true } +tracing = { version = "0.1", default-features = false, features = ["std"] } [lints] workspace = true diff --git a/crates/bevy_dev_tools/LICENSE-APACHE b/crates/bevy_dev_tools/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_dev_tools/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_dev_tools/LICENSE-MIT b/crates/bevy_dev_tools/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_dev_tools/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_dev_tools/src/ci_testing/config.rs b/crates/bevy_dev_tools/src/ci_testing/config.rs index 9d2b74947592e..6dc601f1cc32d 100644 --- a/crates/bevy_dev_tools/src/ci_testing/config.rs +++ b/crates/bevy_dev_tools/src/ci_testing/config.rs @@ -6,7 +6,7 @@ use serde::Deserialize; /// It gets used when the `bevy_ci_testing` feature is enabled to automatically /// exit a Bevy app when run through the CI. This is needed because otherwise /// Bevy apps would be stuck in the game loop and wouldn't allow the CI to progress. -#[derive(Deserialize, Resource, PartialEq, Debug)] +#[derive(Deserialize, Resource, PartialEq, Debug, Default)] pub struct CiTestingConfig { /// The setup for this test. #[serde(default)] @@ -37,6 +37,9 @@ pub enum CiTestingEvent { /// Takes a screenshot of the entire screen, and saves the results to /// `screenshot-{current_frame}.png`. Screenshot, + /// Takes a screenshot of the entire screen, and saves the results to + /// `screenshot-{name}.png`. + NamedScreenshot(String), /// Stops the program by sending [`AppExit::Success`]. /// /// [`AppExit::Success`]: bevy_app::AppExit::Success diff --git a/crates/bevy_dev_tools/src/ci_testing/mod.rs b/crates/bevy_dev_tools/src/ci_testing/mod.rs index 9f31db2140892..09f16d71a7e53 100644 --- a/crates/bevy_dev_tools/src/ci_testing/mod.rs +++ b/crates/bevy_dev_tools/src/ci_testing/mod.rs @@ -30,11 +30,12 @@ impl Plugin for CiTestingPlugin { let config: CiTestingConfig = { let filename = std::env::var("CI_TESTING_CONFIG") .unwrap_or_else(|_| "ci_testing_config.ron".to_string()); - ron::from_str( - &std::fs::read_to_string(filename) - .expect("error reading CI testing configuration file"), - ) - .expect("error deserializing CI testing configuration file") + std::fs::read_to_string(filename) + .map(|content| { + ron::from_str(&content) + .expect("error deserializing CI testing configuration file") + }) + .unwrap_or_default() }; #[cfg(target_arch = "wasm32")] @@ -62,7 +63,7 @@ impl Plugin for CiTestingPlugin { // The offending system does not exist in the wasm32 target. // As a result, we must conditionally order the two systems using a system set. - #[cfg(not(target_arch = "wasm32"))] + #[cfg(any(unix, windows))] app.configure_sets( Update, SendEvents.before(bevy_app::TerminalCtrlCHandlerPlugin::exit_on_flag), diff --git a/crates/bevy_dev_tools/src/ci_testing/systems.rs b/crates/bevy_dev_tools/src/ci_testing/systems.rs index 20c758a91cdfc..f9570133c0bba 100644 --- a/crates/bevy_dev_tools/src/ci_testing/systems.rs +++ b/crates/bevy_dev_tools/src/ci_testing/systems.rs @@ -2,7 +2,7 @@ use super::config::*; use bevy_app::AppExit; use bevy_ecs::prelude::*; use bevy_render::view::screenshot::{save_to_disk, Screenshot}; -use bevy_utils::tracing::{debug, info}; +use tracing::{debug, info}; pub(crate) fn send_events(world: &mut World, mut current_frame: Local) { let mut config = world.resource_mut::(); @@ -28,6 +28,16 @@ pub(crate) fn send_events(world: &mut World, mut current_frame: Local) { .observe(save_to_disk(path)); info!("Took a screenshot at frame {}.", *current_frame); } + CiTestingEvent::NamedScreenshot(name) => { + let path = format!("./screenshot-{}.png", name); + world + .spawn(Screenshot::primary_window()) + .observe(save_to_disk(path)); + info!( + "Took a screenshot at frame {} for {}.", + *current_frame, name + ); + } // Custom events are forwarded to the world. CiTestingEvent::Custom(event_string) => { world.send_event(CiTestingCustomEvent(event_string)); diff --git a/crates/bevy_dev_tools/src/fps_overlay.rs b/crates/bevy_dev_tools/src/fps_overlay.rs index f970fc8f434c8..7c29ae3adc096 100644 --- a/crates/bevy_dev_tools/src/fps_overlay.rs +++ b/crates/bevy_dev_tools/src/fps_overlay.rs @@ -8,18 +8,20 @@ use bevy_ecs::{ change_detection::DetectChangesMut, component::Component, entity::Entity, + prelude::Local, query::With, - schedule::{common_conditions::resource_changed, IntoSystemConfigs}, - system::{Commands, Query, Res, Resource}, + resource::Resource, + schedule::{common_conditions::resource_changed, IntoScheduleConfigs}, + system::{Commands, Query, Res}, }; -use bevy_hierarchy::{BuildChildren, ChildBuild}; use bevy_render::view::Visibility; use bevy_text::{Font, TextColor, TextFont, TextSpan}; +use bevy_time::Time; use bevy_ui::{ widget::{Text, TextUiWriter}, GlobalZIndex, Node, PositionType, }; -use bevy_utils::default; +use core::time::Duration; /// [`GlobalZIndex`] used to render the fps overlay. /// @@ -43,7 +45,7 @@ impl Plugin for FpsOverlayPlugin { fn build(&self, app: &mut bevy_app::App) { // TODO: Use plugin dependencies, see https://github.com/bevyengine/bevy/issues/69 if !app.is_plugin_added::() { - app.add_plugins(FrameTimeDiagnosticsPlugin); + app.add_plugins(FrameTimeDiagnosticsPlugin::default()); } app.insert_resource(self.config.clone()) .add_systems(Startup, setup) @@ -66,6 +68,10 @@ pub struct FpsOverlayConfig { pub text_color: Color, /// Displays the FPS overlay if true. pub enabled: bool, + /// The period after which the FPS overlay re-renders. + /// + /// Defaults to once every 100 ms. + pub refresh_interval: Duration, } impl Default for FpsOverlayConfig { @@ -74,10 +80,11 @@ impl Default for FpsOverlayConfig { text_config: TextFont { font: Handle::::default(), font_size: 32.0, - ..default() + ..Default::default() }, text_color: Color::WHITE, enabled: true, + refresh_interval: Duration::from_millis(100), } } } @@ -91,7 +98,7 @@ fn setup(mut commands: Commands, overlay_config: Res) { Node { // We need to make sure the overlay doesn't affect the position of other UI nodes position_type: PositionType::Absolute, - ..default() + ..Default::default() }, // Render overlay on top of everything GlobalZIndex(FPS_OVERLAY_ZINDEX), @@ -111,11 +118,18 @@ fn update_text( diagnostic: Res, query: Query>, mut writer: TextUiWriter, + time: Res() - .on_add(|mut world, _, _| world.resource_mut::().assert_order(0)) - .on_insert(|mut world, _, _| world.resource_mut::().assert_order(1)) - .on_replace(|mut world, _, _| world.resource_mut::().assert_order(2)) - .on_remove(|mut world, _, _| world.resource_mut::().assert_order(3)); + .on_add(|mut world, _| world.resource_mut::().assert_order(0)) + .on_insert(|mut world, _| world.resource_mut::().assert_order(1)) + .on_replace(|mut world, _| world.resource_mut::().assert_order(2)) + .on_remove(|mut world, _| world.resource_mut::().assert_order(3)); let entity = world.spawn(A).id(); world.despawn(entity); @@ -1723,10 +1843,10 @@ mod tests { world.init_resource::(); world .register_component_hooks::() - .on_add(|mut world, _, _| world.resource_mut::().assert_order(0)) - .on_insert(|mut world, _, _| world.resource_mut::().assert_order(1)) - .on_replace(|mut world, _, _| world.resource_mut::().assert_order(2)) - .on_remove(|mut world, _, _| world.resource_mut::().assert_order(3)); + .on_add(|mut world, _| world.resource_mut::().assert_order(0)) + .on_insert(|mut world, _| world.resource_mut::().assert_order(1)) + .on_replace(|mut world, _| world.resource_mut::().assert_order(2)) + .on_remove(|mut world, _| world.resource_mut::().assert_order(3)); let mut entity = world.spawn_empty(); entity.insert(A); @@ -1740,8 +1860,8 @@ mod tests { let mut world = World::new(); world .register_component_hooks::() - .on_replace(|mut world, _, _| world.resource_mut::().assert_order(0)) - .on_insert(|mut world, _, _| { + .on_replace(|mut world, _| world.resource_mut::().assert_order(0)) + .on_insert(|mut world, _| { if let Some(mut r) = world.get_resource_mut::() { r.assert_order(1); } @@ -1762,22 +1882,22 @@ mod tests { world.init_resource::(); world .register_component_hooks::() - .on_add(|mut world, entity, _| { + .on_add(|mut world, context| { world.resource_mut::().assert_order(0); - world.commands().entity(entity).insert(B); + world.commands().entity(context.entity).insert(B); }) - .on_remove(|mut world, entity, _| { + .on_remove(|mut world, context| { world.resource_mut::().assert_order(2); - world.commands().entity(entity).remove::(); + world.commands().entity(context.entity).remove::(); }); world .register_component_hooks::() - .on_add(|mut world, entity, _| { + .on_add(|mut world, context| { world.resource_mut::().assert_order(1); - world.commands().entity(entity).remove::(); + world.commands().entity(context.entity).remove::(); }) - .on_remove(|mut world, _, _| { + .on_remove(|mut world, _| { world.resource_mut::().assert_order(3); }); @@ -1794,27 +1914,27 @@ mod tests { world.init_resource::(); world .register_component_hooks::() - .on_add(|mut world, entity, _| { + .on_add(|mut world, context| { world.resource_mut::().assert_order(0); - world.commands().entity(entity).insert(B).insert(C); + world.commands().entity(context.entity).insert(B).insert(C); }); world .register_component_hooks::() - .on_add(|mut world, entity, _| { + .on_add(|mut world, context| { world.resource_mut::().assert_order(1); - world.commands().entity(entity).insert(D); + world.commands().entity(context.entity).insert(D); }); world .register_component_hooks::() - .on_add(|mut world, _, _| { + .on_add(|mut world, _| { world.resource_mut::().assert_order(3); }); world .register_component_hooks::() - .on_add(|mut world, _, _| { + .on_add(|mut world, _| { world.resource_mut::().assert_order(2); }); diff --git a/crates/bevy_ecs/src/change_detection.rs b/crates/bevy_ecs/src/change_detection.rs index 72cf8c6784edb..767134fdc69e7 100644 --- a/crates/bevy_ecs/src/change_detection.rs +++ b/crates/bevy_ecs/src/change_detection.rs @@ -3,17 +3,17 @@ use crate::{ component::{Tick, TickCells}, ptr::PtrMut, - system::Resource, + resource::Resource, }; +use alloc::borrow::ToOwned; use bevy_ptr::{Ptr, UnsafeCellDeref}; +#[cfg(feature = "bevy_reflect")] +use bevy_reflect::Reflect; use core::{ + marker::PhantomData, mem, ops::{Deref, DerefMut}, -}; -#[cfg(feature = "track_change_detection")] -use { - bevy_ptr::ThinSlicePtr, - core::{cell::UnsafeCell, panic::Location}, + panic::Location, }; /// The (arbitrarily chosen) minimum number of world tick increments between `check_tick` scans. @@ -71,9 +71,11 @@ pub trait DetectChanges { /// [`SystemParam`](crate::system::SystemParam). fn last_changed(&self) -> Tick; + /// Returns the change tick recording the time this data was added. + fn added(&self) -> Tick; + /// The location that last caused this to change. - #[cfg(feature = "track_change_detection")] - fn changed_by(&self) -> &'static Location<'static>; + fn changed_by(&self) -> MaybeLocation; } /// Types that implement reliable change detection. @@ -119,6 +121,15 @@ pub trait DetectChangesMut: DetectChanges { /// **Note**: This operation cannot be undone. fn set_changed(&mut self); + /// Flags this value as having been added. + /// + /// It is not normally necessary to call this method. + /// The 'added' tick is set when the value is first added, + /// and is not normally changed afterwards. + /// + /// **Note**: This operation cannot be undone. + fn set_added(&mut self); + /// Manually sets the change tick recording the time when this data was last mutated. /// /// # Warning @@ -127,6 +138,12 @@ pub trait DetectChangesMut: DetectChanges { /// If you want to avoid triggering change detection, use [`bypass_change_detection`](DetectChangesMut::bypass_change_detection) instead. fn set_last_changed(&mut self, last_changed: Tick); + /// Manually sets the added tick recording the time when this data was last added. + /// + /// # Warning + /// The caveats of [`set_last_changed`](DetectChangesMut::set_last_changed) apply. This modifies both the added and changed ticks together. + fn set_last_added(&mut self, last_added: Tick); + /// Manually bypasses change detection, allowing you to mutate the underlying value without updating the change tick. /// /// # Warning @@ -224,7 +241,7 @@ pub trait DetectChangesMut: DetectChanges { /// let new_score = 0; /// if let Some(Score(previous_score)) = score.replace_if_neq(Score(new_score)) { /// // If `score` change, emit a `ScoreChanged` event. - /// score_changed.send(ScoreChanged { + /// score_changed.write(ScoreChanged { /// current: new_score, /// previous: previous_score, /// }); @@ -268,6 +285,55 @@ pub trait DetectChangesMut: DetectChanges { None } } + + /// Overwrites this smart pointer with a clone of the given value, if and only if `*self != value`. + /// Returns `true` if the value was overwritten, and returns `false` if it was not. + /// + /// This method is useful when the caller only has a borrowed form of `Inner`, + /// e.g. when writing a `&str` into a `Mut`. + /// + /// # Examples + /// ``` + /// # extern crate alloc; + /// # use alloc::borrow::ToOwned; + /// # use bevy_ecs::{prelude::*, schedule::common_conditions::resource_changed}; + /// #[derive(Resource)] + /// pub struct Message(String); + /// + /// fn update_message(mut message: ResMut) { + /// // Set the score to zero, unless it is already zero. + /// ResMut::map_unchanged(message, |Message(msg)| msg).clone_from_if_neq("another string"); + /// } + /// # let mut world = World::new(); + /// # world.insert_resource(Message("initial string".into())); + /// # let mut message_changed = IntoSystem::into_system(resource_changed::); + /// # message_changed.initialize(&mut world); + /// # message_changed.run((), &mut world); + /// # + /// # let mut schedule = Schedule::default(); + /// # schedule.add_systems(update_message); + /// # + /// # // first time `reset_score` runs, the score is changed. + /// # schedule.run(&mut world); + /// # assert!(message_changed.run((), &mut world)); + /// # // second time `reset_score` runs, the score is not changed. + /// # schedule.run(&mut world); + /// # assert!(!message_changed.run((), &mut world)); + /// ``` + fn clone_from_if_neq(&mut self, value: &T) -> bool + where + T: ToOwned + ?Sized, + Self::Inner: PartialEq, + { + let old = self.bypass_change_detection(); + if old != value { + value.clone_into(old); + self.set_changed(); + true + } else { + false + } + } } macro_rules! change_detection_impl { @@ -293,9 +359,13 @@ macro_rules! change_detection_impl { } #[inline] - #[cfg(feature = "track_change_detection")] - fn changed_by(&self) -> &'static Location<'static> { - self.changed_by + fn added(&self) -> Tick { + *self.ticks.added + } + + #[inline] + fn changed_by(&self) -> MaybeLocation { + self.changed_by.copied() } } @@ -326,20 +396,30 @@ macro_rules! change_detection_mut_impl { #[track_caller] fn set_changed(&mut self) { *self.ticks.changed = self.ticks.this_run; - #[cfg(feature = "track_change_detection")] - { - *self.changed_by = Location::caller(); - } + self.changed_by.assign(MaybeLocation::caller()); + } + + #[inline] + #[track_caller] + fn set_added(&mut self) { + *self.ticks.changed = self.ticks.this_run; + *self.ticks.added = self.ticks.this_run; + self.changed_by.assign(MaybeLocation::caller()); } #[inline] #[track_caller] fn set_last_changed(&mut self, last_changed: Tick) { *self.ticks.changed = last_changed; - #[cfg(feature = "track_change_detection")] - { - *self.changed_by = Location::caller(); - } + self.changed_by.assign(MaybeLocation::caller()); + } + + #[inline] + #[track_caller] + fn set_last_added(&mut self, last_added: Tick) { + *self.ticks.added = last_added; + *self.ticks.changed = last_added; + self.changed_by.assign(MaybeLocation::caller()); } #[inline] @@ -353,10 +433,7 @@ macro_rules! change_detection_mut_impl { #[track_caller] fn deref_mut(&mut self) -> &mut Self::Target { self.set_changed(); - #[cfg(feature = "track_change_detection")] - { - *self.changed_by = Location::caller(); - } + self.changed_by.assign(MaybeLocation::caller()); self.value } } @@ -394,8 +471,7 @@ macro_rules! impl_methods { last_run: self.ticks.last_run, this_run: self.ticks.this_run, }, - #[cfg(feature = "track_change_detection")] - changed_by: self.changed_by, + changed_by: self.changed_by.as_deref_mut(), } } @@ -425,7 +501,6 @@ macro_rules! impl_methods { Mut { value: f(self.value), ticks: self.ticks, - #[cfg(feature = "track_change_detection")] changed_by: self.changed_by, } } @@ -439,7 +514,19 @@ macro_rules! impl_methods { value.map(|value| Mut { value, ticks: self.ticks, - #[cfg(feature = "track_change_detection")] + changed_by: self.changed_by, + }) + } + + /// Optionally maps to an inner value by applying a function to the contained reference, returns an error on failure. + /// This is useful in a situation where you need to convert a `Mut` to a `Mut`, but only if `T` contains `U`. + /// + /// As with `map_unchanged`, you should never modify the argument passed to the closure. + pub fn try_map_unchanged(self, f: impl FnOnce(&mut $target) -> Result<&mut U, E>) -> Result, E> { + let value = f(self.value); + value.map(|value| Mut { + value, + ticks: self.ticks, changed_by: self.changed_by, }) } @@ -550,8 +637,7 @@ impl<'w> From> for Ticks<'w> { pub struct Res<'w, T: ?Sized + Resource> { pub(crate) value: &'w T, pub(crate) ticks: Ticks<'w>, - #[cfg(feature = "track_change_detection")] - pub(crate) changed_by: &'static Location<'static>, + pub(crate) changed_by: MaybeLocation<&'w &'static Location<'static>>, } impl<'w, T: Resource> Res<'w, T> { @@ -559,12 +645,14 @@ impl<'w, T: Resource> Res<'w, T> { /// /// Note that unless you actually need an instance of `Res`, you should /// prefer to just convert it to `&T` which can be freely copied. - #[allow(clippy::should_implement_trait)] + #[expect( + clippy::should_implement_trait, + reason = "As this struct derefs to the inner resource, a `Clone` trait implementation would interfere with the common case of cloning the inner content. (A similar case of this happening can be found with `std::cell::Ref::clone()`.)" + )] pub fn clone(this: &Self) -> Self { Self { value: this.value, ticks: this.ticks.clone(), - #[cfg(feature = "track_change_detection")] changed_by: this.changed_by, } } @@ -582,8 +670,7 @@ impl<'w, T: Resource> From> for Res<'w, T> { Self { value: res.value, ticks: res.ticks.into(), - #[cfg(feature = "track_change_detection")] - changed_by: res.changed_by, + changed_by: res.changed_by.map(|changed_by| &*changed_by), } } } @@ -595,7 +682,6 @@ impl<'w, T: Resource> From> for Ref<'w, T> { Self { value: res.value, ticks: res.ticks, - #[cfg(feature = "track_change_detection")] changed_by: res.changed_by, } } @@ -622,14 +708,13 @@ impl_debug!(Res<'w, T>, Resource); /// If you need a shared borrow, use [`Res`] instead. /// /// This [`SystemParam`](crate::system::SystemParam) fails validation if resource doesn't exist. -/// /// This will cause a panic, but can be configured to do nothing or warn once. +/// This will cause a panic, but can be configured to do nothing or warn once. /// /// Use [`Option>`] instead if the resource might not always exist. pub struct ResMut<'w, T: ?Sized + Resource> { pub(crate) value: &'w mut T, pub(crate) ticks: TicksMut<'w>, - #[cfg(feature = "track_change_detection")] - pub(crate) changed_by: &'w mut &'static Location<'static>, + pub(crate) changed_by: MaybeLocation<&'w mut &'static Location<'static>>, } impl<'w, 'a, T: Resource> IntoIterator for &'a ResMut<'w, T> @@ -669,7 +754,6 @@ impl<'w, T: Resource> From> for Mut<'w, T> { Mut { value: other.value, ticks: other.ticks, - #[cfg(feature = "track_change_detection")] changed_by: other.changed_by, } } @@ -683,14 +767,13 @@ impl<'w, T: Resource> From> for Mut<'w, T> { /// over to another thread. /// /// This [`SystemParam`](crate::system::SystemParam) fails validation if non-send resource doesn't exist. -/// /// This will cause a panic, but can be configured to do nothing or warn once. +/// This will cause a panic, but can be configured to do nothing or warn once. /// /// Use [`Option>`] instead if the resource might not always exist. pub struct NonSendMut<'w, T: ?Sized + 'static> { pub(crate) value: &'w mut T, pub(crate) ticks: TicksMut<'w>, - #[cfg(feature = "track_change_detection")] - pub(crate) changed_by: &'w mut &'static Location<'static>, + pub(crate) changed_by: MaybeLocation<&'w mut &'static Location<'static>>, } change_detection_impl!(NonSendMut<'w, T>, T,); @@ -705,7 +788,6 @@ impl<'w, T: 'static> From> for Mut<'w, T> { Mut { value: other.value, ticks: other.ticks, - #[cfg(feature = "track_change_detection")] changed_by: other.changed_by, } } @@ -738,8 +820,7 @@ impl<'w, T: 'static> From> for Mut<'w, T> { pub struct Ref<'w, T: ?Sized> { pub(crate) value: &'w T, pub(crate) ticks: Ticks<'w>, - #[cfg(feature = "track_change_detection")] - pub(crate) changed_by: &'static Location<'static>, + pub(crate) changed_by: MaybeLocation<&'w &'static Location<'static>>, } impl<'w, T: ?Sized> Ref<'w, T> { @@ -756,7 +837,6 @@ impl<'w, T: ?Sized> Ref<'w, T> { Ref { value: f(self.value), ticks: self.ticks, - #[cfg(feature = "track_change_detection")] changed_by: self.changed_by, } } @@ -770,7 +850,7 @@ impl<'w, T: ?Sized> Ref<'w, T> { /// - `added` - A [`Tick`] that stores the tick when the wrapped value was created. /// - `changed` - A [`Tick`] that stores the last time the wrapped value was changed. /// - `last_run` - A [`Tick`], occurring before `this_run`, which is used - /// as a reference to determine whether the wrapped value is newly added or changed. + /// as a reference to determine whether the wrapped value is newly added or changed. /// - `this_run` - A [`Tick`] corresponding to the current point in time -- "now". pub fn new( value: &'w T, @@ -778,7 +858,7 @@ impl<'w, T: ?Sized> Ref<'w, T> { changed: &'w Tick, last_run: Tick, this_run: Tick, - #[cfg(feature = "track_change_detection")] caller: &'static Location<'static>, + caller: MaybeLocation<&'w &'static Location<'static>>, ) -> Ref<'w, T> { Ref { value, @@ -788,10 +868,18 @@ impl<'w, T: ?Sized> Ref<'w, T> { last_run, this_run, }, - #[cfg(feature = "track_change_detection")] changed_by: caller, } } + + /// Overwrite the `last_run` and `this_run` tick that are used for change detection. + /// + /// This is an advanced feature. `Ref`s are usually _created_ by engine-internal code and + /// _consumed_ by end-user code. + pub fn set_ticks(&mut self, last_run: Tick, this_run: Tick) { + self.ticks.last_run = last_run; + self.ticks.this_run = this_run; + } } impl<'w, 'a, T> IntoIterator for &'a Ref<'w, T> @@ -871,8 +959,7 @@ impl_debug!(Ref<'w, T>,); pub struct Mut<'w, T: ?Sized> { pub(crate) value: &'w mut T, pub(crate) ticks: TicksMut<'w>, - #[cfg(feature = "track_change_detection")] - pub(crate) changed_by: &'w mut &'static Location<'static>, + pub(crate) changed_by: MaybeLocation<&'w mut &'static Location<'static>>, } impl<'w, T: ?Sized> Mut<'w, T> { @@ -897,7 +984,7 @@ impl<'w, T: ?Sized> Mut<'w, T> { last_changed: &'w mut Tick, last_run: Tick, this_run: Tick, - #[cfg(feature = "track_change_detection")] caller: &'w mut &'static Location<'static>, + caller: MaybeLocation<&'w mut &'static Location<'static>>, ) -> Self { Self { value, @@ -907,10 +994,18 @@ impl<'w, T: ?Sized> Mut<'w, T> { last_run, this_run, }, - #[cfg(feature = "track_change_detection")] changed_by: caller, } } + + /// Overwrite the `last_run` and `this_run` tick that are used for change detection. + /// + /// This is an advanced feature. `Mut`s are usually _created_ by engine-internal code and + /// _consumed_ by end-user code. + pub fn set_ticks(&mut self, last_run: Tick, this_run: Tick) { + self.ticks.last_run = last_run; + self.ticks.this_run = this_run; + } } impl<'w, T: ?Sized> From> for Ref<'w, T> { @@ -918,8 +1013,7 @@ impl<'w, T: ?Sized> From> for Ref<'w, T> { Self { value: mut_ref.value, ticks: mut_ref.ticks.into(), - #[cfg(feature = "track_change_detection")] - changed_by: mut_ref.changed_by, + changed_by: mut_ref.changed_by.map(|changed_by| &*changed_by), } } } @@ -965,8 +1059,7 @@ impl_debug!(Mut<'w, T>,); pub struct MutUntyped<'w> { pub(crate) value: PtrMut<'w>, pub(crate) ticks: TicksMut<'w>, - #[cfg(feature = "track_change_detection")] - pub(crate) changed_by: &'w mut &'static Location<'static>, + pub(crate) changed_by: MaybeLocation<&'w mut &'static Location<'static>>, } impl<'w> MutUntyped<'w> { @@ -991,8 +1084,7 @@ impl<'w> MutUntyped<'w> { last_run: self.ticks.last_run, this_run: self.ticks.this_run, }, - #[cfg(feature = "track_change_detection")] - changed_by: self.changed_by, + changed_by: self.changed_by.as_deref_mut(), } } @@ -1043,7 +1135,6 @@ impl<'w> MutUntyped<'w> { Mut { value: f(self.value), ticks: self.ticks, - #[cfg(feature = "track_change_detection")] changed_by: self.changed_by, } } @@ -1058,7 +1149,6 @@ impl<'w> MutUntyped<'w> { value: unsafe { self.value.deref_mut() }, ticks: self.ticks, // SAFETY: `caller` is `Aligned`. - #[cfg(feature = "track_change_detection")] changed_by: self.changed_by, } } @@ -1085,9 +1175,13 @@ impl<'w> DetectChanges for MutUntyped<'w> { } #[inline] - #[cfg(feature = "track_change_detection")] - fn changed_by(&self) -> &'static Location<'static> { - self.changed_by + fn changed_by(&self) -> MaybeLocation { + self.changed_by.copied() + } + + #[inline] + fn added(&self) -> Tick { + *self.ticks.added } } @@ -1098,20 +1192,30 @@ impl<'w> DetectChangesMut for MutUntyped<'w> { #[track_caller] fn set_changed(&mut self) { *self.ticks.changed = self.ticks.this_run; - #[cfg(feature = "track_change_detection")] - { - *self.changed_by = Location::caller(); - } + self.changed_by.assign(MaybeLocation::caller()); + } + + #[inline] + #[track_caller] + fn set_added(&mut self) { + *self.ticks.changed = self.ticks.this_run; + *self.ticks.added = self.ticks.this_run; + self.changed_by.assign(MaybeLocation::caller()); } #[inline] #[track_caller] fn set_last_changed(&mut self, last_changed: Tick) { *self.ticks.changed = last_changed; - #[cfg(feature = "track_change_detection")] - { - *self.changed_by = Location::caller(); - } + self.changed_by.assign(MaybeLocation::caller()); + } + + #[inline] + #[track_caller] + fn set_last_added(&mut self, last_added: Tick) { + *self.ticks.added = last_added; + *self.ticks.changed = last_added; + self.changed_by.assign(MaybeLocation::caller()); } #[inline] @@ -1134,62 +1238,294 @@ impl<'w, T> From> for MutUntyped<'w> { MutUntyped { value: value.value.into(), ticks: value.ticks, - #[cfg(feature = "track_change_detection")] changed_by: value.changed_by, } } } -/// A type alias to [`&'static Location<'static>`](std::panic::Location) when the `track_change_detection` feature is -/// enabled, and the unit type `()` when it is not. +/// A value that contains a `T` if the `track_location` feature is enabled, +/// and is a ZST if it is not. /// -/// This is primarily used in places where `#[cfg(...)]` attributes are not allowed, such as -/// function return types. Because unit is a zero-sized type, it is the equivalent of not using a -/// `Location` at all. +/// The overall API is similar to [`Option`], but whether the value is `Some` or `None` is set at compile +/// time and is the same for all values. /// -/// Please use this type sparingly: prefer normal `#[cfg(...)]` attributes when possible. -#[cfg(feature = "track_change_detection")] -pub(crate) type MaybeLocation = &'static Location<'static>; - -/// A type alias to [`&'static Location<'static>`](std::panic::Location) when the `track_change_detection` feature is -/// enabled, and the unit type `()` when it is not. -/// -/// This is primarily used in places where `#[cfg(...)]` attributes are not allowed, such as -/// function return types. Because unit is a zero-sized type, it is the equivalent of not using a -/// `Location` at all. +/// If the `track_location` feature is disabled, then all functions on this type that return +/// an `MaybeLocation` will have an empty body and should be removed by the optimizer. /// -/// Please use this type sparingly: prefer normal `#[cfg(...)]` attributes when possible. -#[cfg(not(feature = "track_change_detection"))] -pub(crate) type MaybeLocation = (); +/// This allows code to be written that will be checked by the compiler even when the feature is disabled, +/// but that will be entirely removed during compilation. +#[cfg_attr(feature = "bevy_reflect", derive(Reflect))] +#[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct MaybeLocation> { + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] + marker: PhantomData, + #[cfg(feature = "track_location")] + value: T, +} -/// A type alias to `&UnsafeCell<&'static Location<'static>>` when the `track_change_detection` -/// feature is enabled, and the unit type `()` when it is not. -/// -/// See [`MaybeLocation`] for further information. -#[cfg(feature = "track_change_detection")] -pub(crate) type MaybeUnsafeCellLocation<'a> = &'a UnsafeCell<&'static Location<'static>>; +impl core::fmt::Display for MaybeLocation { + fn fmt(&self, _f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + #[cfg(feature = "track_location")] + { + self.value.fmt(_f)?; + } + Ok(()) + } +} -/// A type alias to `&UnsafeCell<&'static Location<'static>>` when the `track_change_detection` -/// feature is enabled, and the unit type `()` when it is not. -/// -/// See [`MaybeLocation`] for further information. -#[cfg(not(feature = "track_change_detection"))] -pub(crate) type MaybeUnsafeCellLocation<'a> = (); +impl MaybeLocation { + /// Constructs a new `MaybeLocation` that wraps the given value. + /// + /// This may only accept `Copy` types, + /// since it needs to drop the value if the `track_location` feature is disabled, + /// and non-`Copy` types cannot be dropped in `const` context. + /// Use [`new_with`][Self::new_with] if you need to construct a non-`Copy` value. + /// + /// # See also + /// - [`new_with`][Self::new_with] to initialize using a closure. + /// - [`new_with_flattened`][Self::new_with_flattened] to initialize using a closure that returns an `Option>`. + #[inline] + pub const fn new(_value: T) -> Self + where + T: Copy, + { + Self { + #[cfg(feature = "track_location")] + value: _value, + marker: PhantomData, + } + } -/// A type alias to `ThinSlicePtr<'w, UnsafeCell<&'static Location<'static>>>` when the -/// `track_change_detection` feature is enabled, and the unit type `()` when it is not. -/// -/// See [`MaybeLocation`] for further information. -#[cfg(feature = "track_change_detection")] -pub(crate) type MaybeThinSlicePtrLocation<'w> = - ThinSlicePtr<'w, UnsafeCell<&'static Location<'static>>>; + /// Constructs a new `MaybeLocation` that wraps the result of the given closure. + /// + /// # See also + /// - [`new`][Self::new] to initialize using a value. + /// - [`new_with_flattened`][Self::new_with_flattened] to initialize using a closure that returns an `Option>`. + #[inline] + pub fn new_with(_f: impl FnOnce() -> T) -> Self { + Self { + #[cfg(feature = "track_location")] + value: _f(), + marker: PhantomData, + } + } -/// A type alias to `ThinSlicePtr<'w, UnsafeCell<&'static Location<'static>>>` when the -/// `track_change_detection` feature is enabled, and the unit type `()` when it is not. -/// -/// See [`MaybeLocation`] for further information. -#[cfg(not(feature = "track_change_detection"))] -pub(crate) type MaybeThinSlicePtrLocation<'w> = (); + /// Maps an `MaybeLocation `to `MaybeLocation` by applying a function to a contained value. + #[inline] + pub fn map(self, _f: impl FnOnce(T) -> U) -> MaybeLocation { + MaybeLocation { + #[cfg(feature = "track_location")] + value: _f(self.value), + marker: PhantomData, + } + } + + /// Converts a pair of `MaybeLocation` values to an `MaybeLocation` of a tuple. + #[inline] + pub fn zip(self, _other: MaybeLocation) -> MaybeLocation<(T, U)> { + MaybeLocation { + #[cfg(feature = "track_location")] + value: (self.value, _other.value), + marker: PhantomData, + } + } + + /// Returns the contained value or a default. + /// If the `track_location` feature is enabled, this always returns the contained value. + /// If it is disabled, this always returns `T::Default()`. + #[inline] + pub fn unwrap_or_default(self) -> T + where + T: Default, + { + self.into_option().unwrap_or_default() + } + + /// Converts an `MaybeLocation` to an [`Option`] to allow run-time branching. + /// If the `track_location` feature is enabled, this always returns `Some`. + /// If it is disabled, this always returns `None`. + #[inline] + pub fn into_option(self) -> Option { + #[cfg(feature = "track_location")] + { + Some(self.value) + } + #[cfg(not(feature = "track_location"))] + { + None + } + } +} + +impl MaybeLocation> { + /// Constructs a new `MaybeLocation` that wraps the result of the given closure. + /// If the closure returns `Some`, it unwraps the inner value. + /// + /// # See also + /// - [`new`][Self::new] to initialize using a value. + /// - [`new_with`][Self::new_with] to initialize using a closure. + #[inline] + pub fn new_with_flattened(_f: impl FnOnce() -> Option>) -> Self { + Self { + #[cfg(feature = "track_location")] + value: _f().map(|value| value.value), + marker: PhantomData, + } + } + + /// Transposes a `MaybeLocation` of an [`Option`] into an [`Option`] of a `MaybeLocation`. + /// + /// This can be useful if you want to use the `?` operator to exit early + /// if the `track_location` feature is enabled but the value is not found. + /// + /// If the `track_location` feature is enabled, + /// this returns `Some` if the inner value is `Some` + /// and `None` if the inner value is `None`. + /// + /// If it is disabled, this always returns `Some`. + /// + /// # Example + /// + /// ``` + /// # use bevy_ecs::{change_detection::MaybeLocation, world::World}; + /// # use core::panic::Location; + /// # + /// # fn test() -> Option<()> { + /// let mut world = World::new(); + /// let entity = world.spawn(()).id(); + /// let location: MaybeLocation>> = + /// world.entities().entity_get_spawned_or_despawned_by(entity); + /// let location: MaybeLocation<&'static Location<'static>> = location.transpose()?; + /// # Some(()) + /// # } + /// # test(); + /// ``` + /// + /// # See also + /// + /// - [`into_option`][Self::into_option] to convert to an `Option>`. + /// When used with [`Option::flatten`], this will have a similar effect, + /// but will return `None` when the `track_location` feature is disabled. + #[inline] + pub fn transpose(self) -> Option> { + #[cfg(feature = "track_location")] + { + self.value.map(|value| MaybeLocation { + value, + marker: PhantomData, + }) + } + #[cfg(not(feature = "track_location"))] + { + Some(MaybeLocation { + marker: PhantomData, + }) + } + } +} + +impl MaybeLocation<&T> { + /// Maps an `MaybeLocation<&T>` to an `MaybeLocation` by copying the contents. + #[inline] + pub const fn copied(&self) -> MaybeLocation + where + T: Copy, + { + MaybeLocation { + #[cfg(feature = "track_location")] + value: *self.value, + marker: PhantomData, + } + } +} + +impl MaybeLocation<&mut T> { + /// Maps an `MaybeLocation<&mut T>` to an `MaybeLocation` by copying the contents. + #[inline] + pub const fn copied(&self) -> MaybeLocation + where + T: Copy, + { + MaybeLocation { + #[cfg(feature = "track_location")] + value: *self.value, + marker: PhantomData, + } + } + + /// Assigns the contents of an `MaybeLocation` to an `MaybeLocation<&mut T>`. + #[inline] + pub fn assign(&mut self, _value: MaybeLocation) { + #[cfg(feature = "track_location")] + { + *self.value = _value.value; + } + } +} + +impl MaybeLocation { + /// Converts from `&MaybeLocation` to `MaybeLocation<&T>`. + #[inline] + pub const fn as_ref(&self) -> MaybeLocation<&T> { + MaybeLocation { + #[cfg(feature = "track_location")] + value: &self.value, + marker: PhantomData, + } + } + + /// Converts from `&mut MaybeLocation` to `MaybeLocation<&mut T>`. + #[inline] + pub const fn as_mut(&mut self) -> MaybeLocation<&mut T> { + MaybeLocation { + #[cfg(feature = "track_location")] + value: &mut self.value, + marker: PhantomData, + } + } + + /// Converts from `&MaybeLocation` to `MaybeLocation<&T::Target>`. + #[inline] + pub fn as_deref(&self) -> MaybeLocation<&T::Target> + where + T: Deref, + { + MaybeLocation { + #[cfg(feature = "track_location")] + value: &*self.value, + marker: PhantomData, + } + } + + /// Converts from `&mut MaybeLocation` to `MaybeLocation<&mut T::Target>`. + #[inline] + pub fn as_deref_mut(&mut self) -> MaybeLocation<&mut T::Target> + where + T: DerefMut, + { + MaybeLocation { + #[cfg(feature = "track_location")] + value: &mut *self.value, + marker: PhantomData, + } + } +} + +impl MaybeLocation { + /// Returns the source location of the caller of this function. If that function's caller is + /// annotated then its call location will be returned, and so on up the stack to the first call + /// within a non-tracked function body. + #[inline] + #[track_caller] + pub fn caller() -> Self { + // Note that this cannot use `new_with`, since `FnOnce` invocations cannot be annotated with `#[track_caller]`. + MaybeLocation { + #[cfg(feature = "track_location")] + value: Location::caller(), + marker: PhantomData, + } + } +} #[cfg(test)] mod tests { @@ -1197,13 +1533,11 @@ mod tests { use bevy_ptr::PtrMut; use bevy_reflect::{FromType, ReflectFromPtr}; use core::ops::{Deref, DerefMut}; - #[cfg(feature = "track_change_detection")] - use core::panic::Location; use crate::{ - self as bevy_ecs, change_detection::{ - Mut, NonSendMut, Ref, ResMut, TicksMut, CHECK_TICK_THRESHOLD, MAX_CHANGE_AGE, + MaybeLocation, Mut, NonSendMut, Ref, ResMut, TicksMut, CHECK_TICK_THRESHOLD, + MAX_CHANGE_AGE, }, component::{Component, ComponentTicks, Tick}, system::{IntoSystem, Single, System}, @@ -1283,7 +1617,7 @@ mod tests { // Since the world is always ahead, as long as changes can't get older than `u32::MAX` (which we ensure), // the wrapping difference will always be positive, so wraparound doesn't matter. let mut query = world.query::>(); - assert!(query.single(&world).is_changed()); + assert!(query.single(&world).unwrap().is_changed()); } #[test] @@ -1329,14 +1663,12 @@ mod tests { this_run: Tick::new(4), }; let mut res = R {}; - #[cfg(feature = "track_change_detection")] - let mut caller = Location::caller(); + let mut caller = MaybeLocation::caller(); let res_mut = ResMut { value: &mut res, ticks, - #[cfg(feature = "track_change_detection")] - changed_by: &mut caller, + changed_by: caller.as_mut(), }; let into_mut: Mut = res_mut.into(); @@ -1353,8 +1685,7 @@ mod tests { changed: Tick::new(3), }; let mut res = R {}; - #[cfg(feature = "track_change_detection")] - let mut caller = Location::caller(); + let mut caller = MaybeLocation::caller(); let val = Mut::new( &mut res, @@ -1362,8 +1693,7 @@ mod tests { &mut component_ticks.changed, Tick::new(2), // last_run Tick::new(4), // this_run - #[cfg(feature = "track_change_detection")] - &mut caller, + caller.as_mut(), ); assert!(!val.is_added()); @@ -1383,14 +1713,12 @@ mod tests { this_run: Tick::new(4), }; let mut res = R {}; - #[cfg(feature = "track_change_detection")] - let mut caller = Location::caller(); + let mut caller = MaybeLocation::caller(); let non_send_mut = NonSendMut { value: &mut res, ticks, - #[cfg(feature = "track_change_detection")] - changed_by: &mut caller, + changed_by: caller.as_mut(), }; let into_mut: Mut = non_send_mut.into(); @@ -1419,14 +1747,12 @@ mod tests { }; let mut outer = Outer(0); - #[cfg(feature = "track_change_detection")] - let mut caller = Location::caller(); + let mut caller = MaybeLocation::caller(); let ptr = Mut { value: &mut outer, ticks, - #[cfg(feature = "track_change_detection")] - changed_by: &mut caller, + changed_by: caller.as_mut(), }; assert!(!ptr.is_changed()); @@ -1509,14 +1835,12 @@ mod tests { }; let mut value: i32 = 5; - #[cfg(feature = "track_change_detection")] - let mut caller = Location::caller(); + let mut caller = MaybeLocation::caller(); let value = MutUntyped { value: PtrMut::from(&mut value), ticks, - #[cfg(feature = "track_change_detection")] - changed_by: &mut caller, + changed_by: caller.as_mut(), }; let reflect_from_ptr = >::from_type(); @@ -1547,14 +1871,12 @@ mod tests { this_run: Tick::new(4), }; let mut c = C {}; - #[cfg(feature = "track_change_detection")] - let mut caller = Location::caller(); + let mut caller = MaybeLocation::caller(); let mut_typed = Mut { value: &mut c, ticks, - #[cfg(feature = "track_change_detection")] - changed_by: &mut caller, + changed_by: caller.as_mut(), }; let into_mut: MutUntyped = mut_typed.into(); diff --git a/crates/bevy_ecs/src/component.rs b/crates/bevy_ecs/src/component.rs index f0cd6a6b5bcaf..bfa55804b616a 100644 --- a/crates/bevy_ecs/src/component.rs +++ b/crates/bevy_ecs/src/component.rs @@ -1,26 +1,29 @@ //! Types for declaring and storing [`Component`]s. use crate::{ - self as bevy_ecs, archetype::ArchetypeFlags, bundle::BundleInfo, - change_detection::MAX_CHANGE_AGE, - entity::{ComponentCloneCtx, Entity}, + change_detection::{MaybeLocation, MAX_CHANGE_AGE}, + entity::{ComponentCloneCtx, Entity, EntityMapper, SourceComponent}, query::DebugCheckedUnwrap, - storage::{SparseSetIndex, SparseSets, Storages, Table, TableRow}, - system::{Local, Resource, SystemParam}, + relationship::RelationshipHookMode, + resource::Resource, + storage::{SparseSetIndex, SparseSets, Table, TableRow}, + system::{Local, SystemParam}, world::{DeferredWorld, FromWorld, World}, }; -#[cfg(feature = "bevy_reflect")] use alloc::boxed::Box; use alloc::{borrow::Cow, format, vec::Vec}; pub use bevy_ecs_macros::Component; +use bevy_platform::sync::Arc; +use bevy_platform::{ + collections::{HashMap, HashSet}, + sync::PoisonError, +}; use bevy_ptr::{OwningPtr, UnsafeCellDeref}; #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; -use bevy_utils::{HashMap, HashSet, TypeIdMap}; -#[cfg(feature = "track_change_detection")] -use core::panic::Location; +use bevy_utils::TypeIdMap; use core::{ alloc::Layout, any::{Any, TypeId}, @@ -28,18 +31,12 @@ use core::{ fmt::Debug, marker::PhantomData, mem::needs_drop, + ops::{Deref, DerefMut}, }; use disqualified::ShortName; +use smallvec::SmallVec; use thiserror::Error; -#[cfg(feature = "portable-atomic")] -use portable_atomic_util::Arc; - -#[cfg(not(feature = "portable-atomic"))] -use alloc::sync::Arc; - -pub use bevy_ecs_macros::require; - /// A data type that can be used to store data for an [entity]. /// /// `Component` is a [derivable trait]: this means that a data type can implement it by applying a `#[derive(Component)]` attribute to it. @@ -163,16 +160,73 @@ pub use bevy_ecs_macros::require; /// assert_eq!(&C(0), world.entity(id).get::().unwrap()); /// ``` /// -/// You can also define a custom constructor function or closure: +/// You can define inline component values that take the following forms: +/// ``` +/// # use bevy_ecs::prelude::*; +/// #[derive(Component)] +/// #[require( +/// B(1), // tuple structs +/// C { // named-field structs +/// x: 1, +/// ..Default::default() +/// }, +/// D::One, // enum variants +/// E::ONE, // associated consts +/// F::new(1) // constructors +/// )] +/// struct A; +/// +/// #[derive(Component, PartialEq, Eq, Debug)] +/// struct B(u8); +/// +/// #[derive(Component, PartialEq, Eq, Debug, Default)] +/// struct C { +/// x: u8, +/// y: u8, +/// } +/// +/// #[derive(Component, PartialEq, Eq, Debug)] +/// enum D { +/// Zero, +/// One, +/// } +/// +/// #[derive(Component, PartialEq, Eq, Debug)] +/// struct E(u8); +/// +/// impl E { +/// pub const ONE: Self = Self(1); +/// } +/// +/// #[derive(Component, PartialEq, Eq, Debug)] +/// struct F(u8); +/// +/// impl F { +/// fn new(value: u8) -> Self { +/// Self(value) +/// } +/// } +/// +/// # let mut world = World::default(); +/// let id = world.spawn(A).id(); +/// assert_eq!(&B(1), world.entity(id).get::().unwrap()); +/// assert_eq!(&C { x: 1, y: 0 }, world.entity(id).get::().unwrap()); +/// assert_eq!(&D::One, world.entity(id).get::().unwrap()); +/// assert_eq!(&E(1), world.entity(id).get::().unwrap()); +/// assert_eq!(&F(1), world.entity(id).get::().unwrap()); +/// ```` +/// +/// +/// You can also define arbitrary expressions by using `=` /// /// ``` /// # use bevy_ecs::prelude::*; /// #[derive(Component)] -/// #[require(C(init_c))] +/// #[require(C = init_c())] /// struct A; /// /// #[derive(Component, PartialEq, Eq, Debug)] -/// #[require(C(|| C(20)))] +/// #[require(C = C(20))] /// struct B; /// /// #[derive(Component, PartialEq, Eq, Debug)] @@ -227,13 +281,13 @@ pub use bevy_ecs_macros::require; /// struct X(usize); /// /// #[derive(Component, Default)] -/// #[require(X(|| X(1)))] +/// #[require(X(1))] /// struct Y; /// /// #[derive(Component)] /// #[require( /// Y, -/// X(|| X(2)), +/// X(2), /// )] /// struct Z; /// @@ -293,6 +347,23 @@ pub use bevy_ecs_macros::require; /// Note that requirements must currently be registered before the requiring component is inserted /// into the world for the first time. Registering requirements after this will lead to a panic. /// +/// # Relationships between Entities +/// +/// Sometimes it is useful to define relationships between entities. A common example is the +/// parent / child relationship. Since Components are how data is stored for Entities, one might +/// naturally think to create a Component which has a field of type [`Entity`]. +/// +/// To facilitate this pattern, Bevy provides the [`Relationship`](`crate::relationship::Relationship`) +/// trait. You can derive the [`Relationship`](`crate::relationship::Relationship`) and +/// [`RelationshipTarget`](`crate::relationship::RelationshipTarget`) traits in addition to the +/// Component trait in order to implement data driven relationships between entities, see the trait +/// docs for more details. +/// +/// In addition, Bevy provides canonical implementations of the parent / child relationship via the +/// [`ChildOf`](crate::hierarchy::ChildOf) [`Relationship`](crate::relationship::Relationship) and +/// the [`Children`](crate::hierarchy::Children) +/// [`RelationshipTarget`](crate::relationship::RelationshipTarget). +/// /// # Adding component's hooks /// /// See [`ComponentHooks`] for a detailed explanation of component's hooks. @@ -304,10 +375,11 @@ pub use bevy_ecs_macros::require; /// - `#[component(on_remove = on_remove_function)]` /// /// ``` -/// # use bevy_ecs::component::Component; +/// # use bevy_ecs::component::{Component, HookContext}; /// # use bevy_ecs::world::DeferredWorld; /// # use bevy_ecs::entity::Entity; /// # use bevy_ecs::component::ComponentId; +/// # use core::panic::Location; /// # /// #[derive(Component)] /// #[component(on_add = my_on_add_hook)] @@ -319,16 +391,35 @@ pub use bevy_ecs_macros::require; /// // #[component(on_replace = my_on_replace_hook, on_remove = my_on_remove_hook)] /// struct ComponentA; /// -/// fn my_on_add_hook(world: DeferredWorld, entity: Entity, id: ComponentId) { +/// fn my_on_add_hook(world: DeferredWorld, context: HookContext) { /// // ... /// } /// -/// // You can also omit writing some types using generics. -/// fn my_on_insert_hook(world: DeferredWorld, _: T1, _: T2) { +/// // You can also destructure items directly in the signature +/// fn my_on_insert_hook(world: DeferredWorld, HookContext { caller, .. }: HookContext) { /// // ... /// } /// ``` /// +/// This also supports function calls that yield closures +/// +/// ``` +/// # use bevy_ecs::component::{Component, HookContext}; +/// # use bevy_ecs::world::DeferredWorld; +/// # +/// #[derive(Component)] +/// #[component(on_add = my_msg_hook("hello"))] +/// #[component(on_despawn = my_msg_hook("yoink"))] +/// struct ComponentA; +/// +/// // a hook closure generating function +/// fn my_msg_hook(message: &'static str) -> impl Fn(DeferredWorld, HookContext) { +/// move |_world, _ctx| { +/// println!("{message}"); +/// } +/// } +/// ``` +/// /// # Implementing the trait for foreign types /// /// As a consequence of the [orphan rule], it is not possible to separate into two different crates the implementation of `Component` from the definition of a type. @@ -404,13 +495,43 @@ pub trait Component: Send + Sync + 'static { type Mutability: ComponentMutability; /// Called when registering this component, allowing mutable access to its [`ComponentHooks`]. - fn register_component_hooks(_hooks: &mut ComponentHooks) {} + #[deprecated( + since = "0.16.0", + note = "Use the individual hook methods instead (e.g., `Component::on_add`, etc.)" + )] + fn register_component_hooks(hooks: &mut ComponentHooks) { + hooks.update_from_component::(); + } + + /// Gets the `on_add` [`ComponentHook`] for this [`Component`] if one is defined. + fn on_add() -> Option { + None + } + + /// Gets the `on_insert` [`ComponentHook`] for this [`Component`] if one is defined. + fn on_insert() -> Option { + None + } + + /// Gets the `on_replace` [`ComponentHook`] for this [`Component`] if one is defined. + fn on_replace() -> Option { + None + } + + /// Gets the `on_remove` [`ComponentHook`] for this [`Component`] if one is defined. + fn on_remove() -> Option { + None + } + + /// Gets the `on_despawn` [`ComponentHook`] for this [`Component`] if one is defined. + fn on_despawn() -> Option { + None + } /// Registers required components. fn register_required_components( _component_id: ComponentId, - _components: &mut Components, - _storages: &mut Storages, + _components: &mut ComponentsRegistrator, _required_components: &mut RequiredComponents, _inheritance_depth: u16, _recursion_check_stack: &mut Vec, @@ -419,10 +540,27 @@ pub trait Component: Send + Sync + 'static { /// Called when registering this component, allowing to override clone function (or disable cloning altogether) for this component. /// - /// See [Handlers section of `EntityCloneBuilder`](crate::entity::EntityCloneBuilder#handlers) to understand how this affects handler priority. - fn get_component_clone_handler() -> ComponentCloneHandler { - ComponentCloneHandler::default_handler() + /// See [Handlers section of `EntityClonerBuilder`](crate::entity::EntityClonerBuilder#handlers) to understand how this affects handler priority. + #[inline] + fn clone_behavior() -> ComponentCloneBehavior { + ComponentCloneBehavior::Default } + + /// Maps the entities on this component using the given [`EntityMapper`]. This is used to remap entities in contexts like scenes and entity cloning. + /// When deriving [`Component`], this is populated by annotating fields containing entities with `#[entities]` + /// + /// ``` + /// # use bevy_ecs::{component::Component, entity::Entity}; + /// #[derive(Component)] + /// struct Inventory { + /// #[entities] + /// items: Vec + /// } + /// ``` + /// + /// Fields with `#[entities]` must implement [`MapEntities`](crate::entity::MapEntities). + #[inline] + fn map_entities(_this: &mut Self, _mapper: &mut E) {} } mod private { @@ -501,8 +639,21 @@ pub enum StorageType { SparseSet, } -/// The type used for [`Component`] lifecycle hooks such as `on_add`, `on_insert` or `on_remove` -pub type ComponentHook = for<'w> fn(DeferredWorld<'w>, Entity, ComponentId); +/// The type used for [`Component`] lifecycle hooks such as `on_add`, `on_insert` or `on_remove`. +pub type ComponentHook = for<'w> fn(DeferredWorld<'w>, HookContext); + +/// Context provided to a [`ComponentHook`]. +#[derive(Clone, Copy, Debug)] +pub struct HookContext { + /// The [`Entity`] this hook was invoked for. + pub entity: Entity, + /// The [`ComponentId`] this hook was invoked for. + pub component_id: ComponentId, + /// The caller location is `Some` if the `track_caller` feature is enabled. + pub caller: MaybeLocation, + /// Configures how relationship hooks will run + pub relationship_hook_mode: RelationshipHookMode, +} /// [`World`]-mutating functions that run as part of lifecycle events of a [`Component`]. /// @@ -524,7 +675,7 @@ pub type ComponentHook = for<'w> fn(DeferredWorld<'w>, Entity, ComponentId); /// /// ``` /// use bevy_ecs::prelude::*; -/// use bevy_utils::HashSet; +/// use bevy_platform::collections::HashSet; /// /// #[derive(Component)] /// struct MyTrackedComponent; @@ -539,14 +690,14 @@ pub type ComponentHook = for<'w> fn(DeferredWorld<'w>, Entity, ComponentId); /// let mut tracked_component_query = world.query::<&MyTrackedComponent>(); /// assert!(tracked_component_query.iter(&world).next().is_none()); /// -/// world.register_component_hooks::().on_add(|mut world, entity, _component_id| { +/// world.register_component_hooks::().on_add(|mut world, context| { /// let mut tracked_entities = world.resource_mut::(); -/// tracked_entities.0.insert(entity); +/// tracked_entities.0.insert(context.entity); /// }); /// -/// world.register_component_hooks::().on_remove(|mut world, entity, _component_id| { +/// world.register_component_hooks::().on_remove(|mut world, context| { /// let mut tracked_entities = world.resource_mut::(); -/// tracked_entities.0.remove(&entity); +/// tracked_entities.0.remove(&context.entity); /// }); /// /// let entity = world.spawn(MyTrackedComponent).id(); @@ -563,9 +714,30 @@ pub struct ComponentHooks { pub(crate) on_insert: Option, pub(crate) on_replace: Option, pub(crate) on_remove: Option, + pub(crate) on_despawn: Option, } impl ComponentHooks { + pub(crate) fn update_from_component(&mut self) -> &mut Self { + if let Some(hook) = C::on_add() { + self.on_add(hook); + } + if let Some(hook) = C::on_insert() { + self.on_insert(hook); + } + if let Some(hook) = C::on_replace() { + self.on_replace(hook); + } + if let Some(hook) = C::on_remove() { + self.on_remove(hook); + } + if let Some(hook) = C::on_despawn() { + self.on_despawn(hook); + } + + self + } + /// Register a [`ComponentHook`] that will be run when this component is added to an entity. /// An `on_add` hook will always run before `on_insert` hooks. Spawning an entity counts as /// adding all of its components. @@ -629,6 +801,16 @@ impl ComponentHooks { .expect("Component already has an on_remove hook") } + /// Register a [`ComponentHook`] that will be run for each component on an entity when it is despawned. + /// + /// # Panics + /// + /// Will panic if the component already has an `on_despawn` hook + pub fn on_despawn(&mut self, hook: ComponentHook) -> &mut Self { + self.try_on_despawn(hook) + .expect("Component already has an on_despawn hook") + } + /// Attempt to register a [`ComponentHook`] that will be run when this component is added to an entity. /// /// This is a fallible version of [`Self::on_add`]. @@ -680,6 +862,19 @@ impl ComponentHooks { self.on_remove = Some(hook); Some(self) } + + /// Attempt to register a [`ComponentHook`] that will be run for each component on an entity when it is despawned. + /// + /// This is a fallible version of [`Self::on_despawn`]. + /// + /// Returns `None` if the component already has an `on_despawn` hook. + pub fn try_on_despawn(&mut self, hook: ComponentHook) -> Option<&mut Self> { + if self.on_despawn.is_some() { + return None; + } + self.on_despawn = Some(hook); + Some(self) + } } /// Stores metadata for a type of component or resource stored in a specific [`World`]. @@ -711,6 +906,12 @@ impl ComponentInfo { self.descriptor.mutable } + /// Returns [`ComponentCloneBehavior`] of the current component. + #[inline] + pub fn clone_behavior(&self) -> &ComponentCloneBehavior { + &self.descriptor.clone_behavior + } + /// Returns the [`TypeId`] of the underlying component type. /// Returns `None` if the component does not correspond to a Rust type. #[inline] @@ -775,6 +976,9 @@ impl ComponentInfo { if self.hooks().on_remove.is_some() { flags.insert(ArchetypeFlags::ON_REMOVE_HOOK); } + if self.hooks().on_despawn.is_some() { + flags.insert(ArchetypeFlags::ON_DESPAWN_HOOK); + } } /// Provides a reference to the collection of hooks associated with this [`Component`] @@ -814,7 +1018,7 @@ impl ComponentInfo { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Hash, PartialEq) + reflect(Debug, Hash, PartialEq, Clone) )] pub struct ComponentId(usize); @@ -864,6 +1068,7 @@ pub struct ComponentDescriptor { // None if the underlying type doesn't need to be dropped drop: Option unsafe fn(OwningPtr<'a>)>, mutable: bool, + clone_behavior: ComponentCloneBehavior, } // We need to ignore the `drop` field in our `Debug` impl @@ -876,6 +1081,7 @@ impl Debug for ComponentDescriptor { .field("type_id", &self.type_id) .field("layout", &self.layout) .field("mutable", &self.mutable) + .field("clone_behavior", &self.clone_behavior) .finish() } } @@ -901,6 +1107,7 @@ impl ComponentDescriptor { layout: Layout::new::(), drop: needs_drop::().then_some(Self::drop_ptr:: as _), mutable: T::Mutability::MUTABLE, + clone_behavior: T::clone_behavior(), } } @@ -915,6 +1122,7 @@ impl ComponentDescriptor { layout: Layout, drop: Option unsafe fn(OwningPtr<'a>)>, mutable: bool, + clone_behavior: ComponentCloneBehavior, ) -> Self { Self { name: name.into(), @@ -924,6 +1132,7 @@ impl ComponentDescriptor { layout, drop, mutable, + clone_behavior, } } @@ -941,6 +1150,7 @@ impl ComponentDescriptor { layout: Layout::new::(), drop: needs_drop::().then_some(Self::drop_ptr:: as _), mutable: true, + clone_behavior: ComponentCloneBehavior::Default, } } @@ -953,6 +1163,7 @@ impl ComponentDescriptor { layout: Layout::new::(), drop: needs_drop::().then_some(Self::drop_ptr:: as _), mutable: true, + clone_behavior: ComponentCloneBehavior::Default, } } @@ -983,254 +1194,930 @@ impl ComponentDescriptor { } /// Function type that can be used to clone an entity. -pub type ComponentCloneFn = fn(&mut DeferredWorld, &mut ComponentCloneCtx); - -/// A struct instructing which clone handler to use when cloning a component. -#[derive(Debug)] -pub struct ComponentCloneHandler(Option); - -impl ComponentCloneHandler { - /// Use the global default function to clone the component with this handler. - pub fn default_handler() -> Self { - Self(None) - } +pub type ComponentCloneFn = fn(&SourceComponent, &mut ComponentCloneCtx); - /// Do not clone the component. When a command to clone an entity is issued, component with this handler will be skipped. - pub fn ignore() -> Self { - Self(Some(component_clone_ignore)) - } +/// The clone behavior to use when cloning a [`Component`]. +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub enum ComponentCloneBehavior { + /// Uses the default behavior (which is passed to [`ComponentCloneBehavior::resolve`]) + #[default] + Default, + /// Do not clone this component. + Ignore, + /// Uses a custom [`ComponentCloneFn`]. + Custom(ComponentCloneFn), +} +impl ComponentCloneBehavior { /// Set clone handler based on `Clone` trait. /// /// If set as a handler for a component that is not the same as the one used to create this handler, it will panic. - pub fn clone_handler() -> Self { - Self(Some(component_clone_via_clone::)) + pub fn clone() -> Self { + Self::Custom(component_clone_via_clone::) } /// Set clone handler based on `Reflect` trait. #[cfg(feature = "bevy_reflect")] - pub fn reflect_handler() -> Self { - Self(Some(component_clone_via_reflect)) + pub fn reflect() -> Self { + Self::Custom(component_clone_via_reflect) } - /// Set a custom handler for the component. - pub fn custom_handler(handler: ComponentCloneFn) -> Self { - Self(Some(handler)) + /// Returns the "global default" + pub fn global_default_fn() -> ComponentCloneFn { + #[cfg(feature = "bevy_reflect")] + return component_clone_via_reflect; + #[cfg(not(feature = "bevy_reflect"))] + return component_clone_ignore; } - /// Get [`ComponentCloneFn`] representing this handler or `None` if set to default handler. - pub fn get_handler(&self) -> Option { - self.0 + /// Resolves the [`ComponentCloneBehavior`] to a [`ComponentCloneFn`]. If [`ComponentCloneBehavior::Default`] is + /// specified, the given `default` function will be used. + pub fn resolve(&self, default: ComponentCloneFn) -> ComponentCloneFn { + match self { + ComponentCloneBehavior::Default => default, + ComponentCloneBehavior::Ignore => component_clone_ignore, + ComponentCloneBehavior::Custom(custom) => *custom, + } } } -/// A registry of component clone handlers. Allows to set global default and per-component clone function for all components in the world. -#[derive(Debug)] -pub struct ComponentCloneHandlers { - handlers: Vec>, - default_handler: ComponentCloneFn, +/// A queued component registration. +struct QueuedRegistration { + registrator: Box, + id: ComponentId, + descriptor: ComponentDescriptor, } -impl ComponentCloneHandlers { - /// Sets the default handler for this registry. All components with [`default`](ComponentCloneHandler::default_handler) handler, as well as any component that does not have an - /// explicitly registered clone function will use this handler. +impl QueuedRegistration { + /// Creates the [`QueuedRegistration`]. /// - /// See [Handlers section of `EntityCloneBuilder`](crate::entity::EntityCloneBuilder#handlers) to understand how this affects handler priority. - pub fn set_default_handler(&mut self, handler: ComponentCloneFn) { - self.default_handler = handler; + /// # Safety + /// + /// [`ComponentId`] must be unique. + unsafe fn new( + id: ComponentId, + descriptor: ComponentDescriptor, + func: impl FnOnce(&mut ComponentsRegistrator, ComponentId, ComponentDescriptor) + 'static, + ) -> Self { + Self { + registrator: Box::new(func), + id, + descriptor, + } } - /// Returns the currently registered default handler. - pub fn get_default_handler(&self) -> ComponentCloneFn { - self.default_handler + /// Performs the registration, returning the now valid [`ComponentId`]. + fn register(self, registrator: &mut ComponentsRegistrator) -> ComponentId { + (self.registrator)(registrator, self.id, self.descriptor); + self.id } +} - /// Sets a handler for a specific component. - /// - /// See [Handlers section of `EntityCloneBuilder`](crate::entity::EntityCloneBuilder#handlers) to understand how this affects handler priority. - pub fn set_component_handler(&mut self, id: ComponentId, handler: ComponentCloneHandler) { - if id.0 >= self.handlers.len() { - self.handlers.resize(id.0 + 1, None); - } - self.handlers[id.0] = handler.0; +/// Allows queuing components to be registered. +#[derive(Default)] +pub struct QueuedComponents { + components: TypeIdMap, + resources: TypeIdMap, + dynamic_registrations: Vec, +} + +impl Debug for QueuedComponents { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + let components = self + .components + .iter() + .map(|(type_id, queued)| (type_id, queued.id)) + .collect::>(); + let resources = self + .resources + .iter() + .map(|(type_id, queued)| (type_id, queued.id)) + .collect::>(); + let dynamic_registrations = self + .dynamic_registrations + .iter() + .map(|queued| queued.id) + .collect::>(); + write!(f, "components: {components:?}, resources: {resources:?}, dynamic_registrations: {dynamic_registrations:?}") } +} - /// Checks if the specified component is registered. If not, the component will use the default global handler. - /// - /// This will return an incorrect result if `id` did not come from the same world as `self`. - pub fn is_handler_registered(&self, id: ComponentId) -> bool { - self.handlers.get(id.0).is_some_and(Option::is_some) +/// Generates [`ComponentId`]s. +#[derive(Debug, Default)] +pub struct ComponentIds { + next: bevy_platform::sync::atomic::AtomicUsize, +} + +impl ComponentIds { + /// Peeks the next [`ComponentId`] to be generated without generating it. + pub fn peek(&self) -> ComponentId { + ComponentId( + self.next + .load(bevy_platform::sync::atomic::Ordering::Relaxed), + ) } - /// Gets a handler to clone a component. This can be one of the following: - /// - Custom clone function for this specific component. - /// - Default global handler. - /// - A [`component_clone_ignore`] (no cloning). - /// - /// This will return an incorrect result if `id` did not come from the same world as `self`. - pub fn get_handler(&self, id: ComponentId) -> ComponentCloneFn { - match self.handlers.get(id.0) { - Some(Some(handler)) => *handler, - Some(None) | None => self.default_handler, - } + /// Generates and returns the next [`ComponentId`]. + pub fn next(&self) -> ComponentId { + ComponentId( + self.next + .fetch_add(1, bevy_platform::sync::atomic::Ordering::Relaxed), + ) } -} -impl Default for ComponentCloneHandlers { - fn default() -> Self { - Self { - handlers: Default::default(), - #[cfg(feature = "bevy_reflect")] - default_handler: component_clone_via_reflect, - #[cfg(not(feature = "bevy_reflect"))] - default_handler: component_clone_ignore, - } + /// Peeks the next [`ComponentId`] to be generated without generating it. + pub fn peek_mut(&mut self) -> ComponentId { + ComponentId(*self.next.get_mut()) + } + + /// Generates and returns the next [`ComponentId`]. + pub fn next_mut(&mut self) -> ComponentId { + let id = self.next.get_mut(); + let result = ComponentId(*id); + *id += 1; + result + } + + /// Returns the number of [`ComponentId`]s generated. + pub fn len(&self) -> usize { + self.peek().0 + } + + /// Returns true if and only if no ids have been generated. + pub fn is_empty(&self) -> bool { + self.len() == 0 } } -/// Stores metadata associated with each kind of [`Component`] in a given [`World`]. -#[derive(Debug, Default)] -pub struct Components { - components: Vec, - indices: TypeIdMap, - resource_indices: TypeIdMap, - component_clone_handlers: ComponentCloneHandlers, +/// A type that enables queuing registration in [`Components`]. +/// +/// # Note +/// +/// These queued registrations return [`ComponentId`]s. +/// These ids are not yet valid, but they will become valid +/// when either [`ComponentsRegistrator::apply_queued_registrations`] is called or the same registration is made directly. +/// In either case, the returned [`ComponentId`]s will be correct, but they are not correct yet. +/// +/// Generally, that means these [`ComponentId`]s can be safely used for read-only purposes. +/// Modifying the contents of the world through these [`ComponentId`]s directly without waiting for them to be fully registered +/// and without then confirming that they have been fully registered is not supported. +/// Hence, extra care is needed with these [`ComponentId`]s to ensure all safety rules are followed. +/// +/// As a rule of thumb, if you have mutable access to [`ComponentsRegistrator`], prefer to use that instead. +/// Use this only if you need to know the id of a component but do not need to modify the contents of the world based on that id. +#[derive(Clone, Copy)] +pub struct ComponentsQueuedRegistrator<'w> { + components: &'w Components, + ids: &'w ComponentIds, } -impl Components { - /// Registers a [`Component`] of type `T` with this instance. - /// If a component of this type has already been registered, this will return - /// the ID of the pre-existing component. +impl Deref for ComponentsQueuedRegistrator<'_> { + type Target = Components; + + fn deref(&self) -> &Self::Target { + self.components + } +} + +impl<'w> ComponentsQueuedRegistrator<'w> { + /// Constructs a new [`ComponentsQueuedRegistrator`]. /// - /// # See also + /// # Safety /// - /// * [`Components::component_id()`] - /// * [`Components::register_component_with_descriptor()`] - #[inline] - pub fn register_component(&mut self, storages: &mut Storages) -> ComponentId { - self.register_component_internal::(storages, &mut Vec::new()) + /// The [`Components`] and [`ComponentIds`] must match. + /// For example, they must be from the same world. + pub unsafe fn new(components: &'w Components, ids: &'w ComponentIds) -> Self { + Self { components, ids } } - #[inline] - fn register_component_internal( - &mut self, - storages: &mut Storages, - recursion_check_stack: &mut Vec, + /// Queues this function to run as a component registrator. + /// + /// # Safety + /// + /// The [`TypeId`] must not already be registered or queued as a component. + unsafe fn force_register_arbitrary_component( + &self, + type_id: TypeId, + descriptor: ComponentDescriptor, + func: impl FnOnce(&mut ComponentsRegistrator, ComponentId, ComponentDescriptor) + 'static, ) -> ComponentId { - let mut is_new_registration = false; - let id = { - let Components { - indices, - components, - .. - } = self; - let type_id = TypeId::of::(); - *indices.entry(type_id).or_insert_with(|| { - let id = Components::register_component_inner( - components, - storages, - ComponentDescriptor::new::(), - ); - is_new_registration = true; - id - }) - }; - if is_new_registration { - let mut required_components = RequiredComponents::default(); - T::register_required_components( - id, - self, - storages, - &mut required_components, - 0, - recursion_check_stack, + let id = self.ids.next(); + self.components + .queued + .write() + .unwrap_or_else(PoisonError::into_inner) + .components + .insert( + type_id, + // SAFETY: The id was just generated. + unsafe { QueuedRegistration::new(id, descriptor, func) }, ); - let info = &mut self.components[id.index()]; - T::register_component_hooks(&mut info.hooks); - info.required_components = required_components; - let clone_handler = T::get_component_clone_handler(); - self.component_clone_handlers - .set_component_handler(id, clone_handler); - } id } - /// Registers a component described by `descriptor`. - /// - /// # Note - /// - /// If this method is called multiple times with identical descriptors, a distinct [`ComponentId`] - /// will be created for each one. + /// Queues this function to run as a resource registrator. /// - /// # See also + /// # Safety /// - /// * [`Components::component_id()`] - /// * [`Components::register_component()`] - pub fn register_component_with_descriptor( - &mut self, - storages: &mut Storages, + /// The [`TypeId`] must not already be registered or queued as a resource. + unsafe fn force_register_arbitrary_resource( + &self, + type_id: TypeId, descriptor: ComponentDescriptor, + func: impl FnOnce(&mut ComponentsRegistrator, ComponentId, ComponentDescriptor) + 'static, ) -> ComponentId { - Components::register_component_inner(&mut self.components, storages, descriptor) + let id = self.ids.next(); + self.components + .queued + .write() + .unwrap_or_else(PoisonError::into_inner) + .resources + .insert( + type_id, + // SAFETY: The id was just generated. + unsafe { QueuedRegistration::new(id, descriptor, func) }, + ); + id } - #[inline] - fn register_component_inner( - components: &mut Vec, - storages: &mut Storages, + /// Queues this function to run as a dynamic registrator. + fn force_register_arbitrary_dynamic( + &self, descriptor: ComponentDescriptor, + func: impl FnOnce(&mut ComponentsRegistrator, ComponentId, ComponentDescriptor) + 'static, ) -> ComponentId { - let component_id = ComponentId(components.len()); - let info = ComponentInfo::new(component_id, descriptor); - if info.descriptor.storage_type == StorageType::SparseSet { - storages.sparse_sets.get_or_insert(&info); - } - components.push(info); - component_id - } - - /// Returns the number of components registered with this instance. - #[inline] - pub fn len(&self) -> usize { - self.components.len() + let id = self.ids.next(); + self.components + .queued + .write() + .unwrap_or_else(PoisonError::into_inner) + .dynamic_registrations + .push( + // SAFETY: The id was just generated. + unsafe { QueuedRegistration::new(id, descriptor, func) }, + ); + id } - /// Returns `true` if there are no components registered with this instance. Otherwise, this returns `false`. + /// This is a queued version of [`ComponentsRegistrator::register_component`]. + /// This will reserve an id and queue the registration. + /// These registrations will be carried out at the next opportunity. + /// + /// If this has already been registered or queued, this returns the previous [`ComponentId`]. + /// + /// # Note + /// + /// Technically speaking, the returned [`ComponentId`] is not valid, but it will become valid later. + /// See type level docs for details. #[inline] - pub fn is_empty(&self) -> bool { - self.components.len() == 0 + pub fn queue_register_component(&self) -> ComponentId { + self.component_id::().unwrap_or_else(|| { + // SAFETY: We just checked that this type was not in the queue. + unsafe { + self.force_register_arbitrary_component( + TypeId::of::(), + ComponentDescriptor::new::(), + |registrator, id, _descriptor| { + // SAFETY: We just checked that this is not currently registered or queued, and if it was registered since, this would have been dropped from the queue. + #[expect(unused_unsafe, reason = "More precise to specify.")] + unsafe { + registrator.register_component_unchecked::(&mut Vec::new(), id); + } + }, + ) + } + }) } - /// Gets the metadata associated with the given component. + /// This is a queued version of [`ComponentsRegistrator::register_component_with_descriptor`]. + /// This will reserve an id and queue the registration. + /// These registrations will be carried out at the next opportunity. /// - /// This will return an incorrect result if `id` did not come from the same world as `self`. It may return `None` or a garbage value. + /// # Note + /// + /// Technically speaking, the returned [`ComponentId`] is not valid, but it will become valid later. + /// See type level docs for details. #[inline] - pub fn get_info(&self, id: ComponentId) -> Option<&ComponentInfo> { - self.components.get(id.0) + pub fn queue_register_component_with_descriptor( + &self, + descriptor: ComponentDescriptor, + ) -> ComponentId { + self.force_register_arbitrary_dynamic(descriptor, |registrator, id, descriptor| { + // SAFETY: Id uniqueness handled by caller. + unsafe { + registrator.register_component_inner(id, descriptor); + } + }) } - /// Returns the name associated with the given component. + /// This is a queued version of [`ComponentsRegistrator::register_resource`]. + /// This will reserve an id and queue the registration. + /// These registrations will be carried out at the next opportunity. /// - /// This will return an incorrect result if `id` did not come from the same world as `self`. It may return `None` or a garbage value. + /// If this has already been registered or queued, this returns the previous [`ComponentId`]. + /// + /// # Note + /// + /// Technically speaking, the returned [`ComponentId`] is not valid, but it will become valid later. + /// See type level docs for details. #[inline] - pub fn get_name(&self, id: ComponentId) -> Option<&str> { - self.get_info(id).map(ComponentInfo::name) + pub fn queue_register_resource(&self) -> ComponentId { + let type_id = TypeId::of::(); + self.get_resource_id(type_id).unwrap_or_else(|| { + // SAFETY: We just checked that this type was not in the queue. + unsafe { + self.force_register_arbitrary_resource( + type_id, + ComponentDescriptor::new_resource::(), + move |registrator, id, descriptor| { + // SAFETY: We just checked that this is not currently registered or queued, and if it was registered since, this would have been dropped from the queue. + // SAFETY: Id uniqueness handled by caller, and the type_id matches descriptor. + #[expect(unused_unsafe, reason = "More precise to specify.")] + unsafe { + registrator.register_resource_unchecked(type_id, id, descriptor); + } + }, + ) + } + }) } - /// Gets the metadata associated with the given component. - /// # Safety + /// This is a queued version of [`ComponentsRegistrator::register_non_send`]. + /// This will reserve an id and queue the registration. + /// These registrations will be carried out at the next opportunity. + /// + /// If this has already been registered or queued, this returns the previous [`ComponentId`]. + /// + /// # Note /// - /// `id` must be a valid [`ComponentId`] + /// Technically speaking, the returned [`ComponentId`] is not valid, but it will become valid later. + /// See type level docs for details. #[inline] - pub unsafe fn get_info_unchecked(&self, id: ComponentId) -> &ComponentInfo { - debug_assert!(id.index() < self.components.len()); - // SAFETY: The caller ensures `id` is valid. - unsafe { self.components.get_unchecked(id.0) } + pub fn queue_register_non_send(&self) -> ComponentId { + let type_id = TypeId::of::(); + self.get_resource_id(type_id).unwrap_or_else(|| { + // SAFETY: We just checked that this type was not in the queue. + unsafe { + self.force_register_arbitrary_resource( + type_id, + ComponentDescriptor::new_non_send::(StorageType::default()), + move |registrator, id, descriptor| { + // SAFETY: We just checked that this is not currently registered or queued, and if it was registered since, this would have been dropped from the queue. + // SAFETY: Id uniqueness handled by caller, and the type_id matches descriptor. + #[expect(unused_unsafe, reason = "More precise to specify.")] + unsafe { + registrator.register_resource_unchecked(type_id, id, descriptor); + } + }, + ) + } + }) + } + + /// This is a queued version of [`ComponentsRegistrator::register_resource_with_descriptor`]. + /// This will reserve an id and queue the registration. + /// These registrations will be carried out at the next opportunity. + /// + /// # Note + /// + /// Technically speaking, the returned [`ComponentId`] is not valid, but it will become valid later. + /// See type level docs for details. + #[inline] + pub fn queue_register_resource_with_descriptor( + &self, + descriptor: ComponentDescriptor, + ) -> ComponentId { + self.force_register_arbitrary_dynamic(descriptor, |registrator, id, descriptor| { + // SAFETY: Id uniqueness handled by caller. + unsafe { + registrator.register_component_inner(id, descriptor); + } + }) + } +} + +/// A [`Components`] wrapper that enables additional features, like registration. +pub struct ComponentsRegistrator<'w> { + components: &'w mut Components, + ids: &'w mut ComponentIds, +} + +impl Deref for ComponentsRegistrator<'_> { + type Target = Components; + + fn deref(&self) -> &Self::Target { + self.components + } +} + +impl DerefMut for ComponentsRegistrator<'_> { + fn deref_mut(&mut self) -> &mut Self::Target { + self.components + } +} + +impl<'w> ComponentsRegistrator<'w> { + /// Constructs a new [`ComponentsRegistrator`]. + /// + /// # Safety + /// + /// The [`Components`] and [`ComponentIds`] must match. + /// For example, they must be from the same world. + pub unsafe fn new(components: &'w mut Components, ids: &'w mut ComponentIds) -> Self { + Self { components, ids } + } + + /// Converts this [`ComponentsRegistrator`] into a [`ComponentsQueuedRegistrator`]. + /// This is intended for use to pass this value to a function that requires [`ComponentsQueuedRegistrator`]. + /// It is generally not a good idea to queue a registration when you can instead register directly on this type. + pub fn as_queued(&self) -> ComponentsQueuedRegistrator<'_> { + // SAFETY: ensured by the caller that created self. + unsafe { ComponentsQueuedRegistrator::new(self.components, self.ids) } + } + + /// Applies every queued registration. + /// This ensures that every valid [`ComponentId`] is registered, + /// enabling retrieving [`ComponentInfo`], etc. + pub fn apply_queued_registrations(&mut self) { + if !self.any_queued_mut() { + return; + } + + // Note: + // + // This is not just draining the queue. We need to empty the queue without removing the information from `Components`. + // If we drained directly, we could break invariance. + // + // For example, say `ComponentA` and `ComponentB` are queued, and `ComponentA` requires `ComponentB`. + // If we drain directly, and `ComponentA` was the first to be registered, then, when `ComponentA` + // registers `ComponentB` in `Component::register_required_components`, + // `Components` will not know that `ComponentB` was queued + // (since it will have been drained from the queue.) + // If that happened, `Components` would assign a new `ComponentId` to `ComponentB` + // which would be *different* than the id it was assigned in the queue. + // Then, when the drain iterator gets to `ComponentB`, + // it would be unsafely registering `ComponentB`, which is already registered. + // + // As a result, we need to pop from each queue one by one instead of draining. + + // components + while let Some(registrator) = { + let queued = self + .components + .queued + .get_mut() + .unwrap_or_else(PoisonError::into_inner); + queued.components.keys().next().copied().map(|type_id| { + // SAFETY: the id just came from a valid iterator. + unsafe { queued.components.remove(&type_id).debug_checked_unwrap() } + }) + } { + registrator.register(self); + } + + // resources + while let Some(registrator) = { + let queued = self + .components + .queued + .get_mut() + .unwrap_or_else(PoisonError::into_inner); + queued.resources.keys().next().copied().map(|type_id| { + // SAFETY: the id just came from a valid iterator. + unsafe { queued.resources.remove(&type_id).debug_checked_unwrap() } + }) + } { + registrator.register(self); + } + + // dynamic + let queued = &mut self + .components + .queued + .get_mut() + .unwrap_or_else(PoisonError::into_inner); + if !queued.dynamic_registrations.is_empty() { + for registrator in core::mem::take(&mut queued.dynamic_registrations) { + registrator.register(self); + } + } + } + + /// Registers a [`Component`] of type `T` with this instance. + /// If a component of this type has already been registered, this will return + /// the ID of the pre-existing component. + /// + /// # See also + /// + /// * [`Components::component_id()`] + /// * [`ComponentsRegistrator::register_component_with_descriptor()`] + #[inline] + pub fn register_component(&mut self) -> ComponentId { + self.register_component_checked::(&mut Vec::new()) + } + + /// Same as [`Self::register_component_unchecked`] but keeps a checks for safety. + #[inline] + fn register_component_checked( + &mut self, + recursion_check_stack: &mut Vec, + ) -> ComponentId { + let type_id = TypeId::of::(); + if let Some(id) = self.indices.get(&type_id) { + return *id; + } + + if let Some(registrator) = self + .components + .queued + .get_mut() + .unwrap_or_else(PoisonError::into_inner) + .components + .remove(&type_id) + { + // If we are trying to register something that has already been queued, we respect the queue. + // Just like if we are trying to register something that already is, we respect the first registration. + return registrator.register(self); + } + + let id = self.ids.next_mut(); + // SAFETY: The component is not currently registered, and the id is fresh. + unsafe { + self.register_component_unchecked::(recursion_check_stack, id); + } + id + } + + /// # Safety + /// + /// Neither this component, nor its id may be registered or queued. This must be a new registration. + #[inline] + unsafe fn register_component_unchecked( + &mut self, + recursion_check_stack: &mut Vec, + id: ComponentId, + ) { + // SAFETY: ensured by caller. + unsafe { + self.register_component_inner(id, ComponentDescriptor::new::()); + } + let type_id = TypeId::of::(); + let prev = self.indices.insert(type_id, id); + debug_assert!(prev.is_none()); + + let mut required_components = RequiredComponents::default(); + T::register_required_components( + id, + self, + &mut required_components, + 0, + recursion_check_stack, + ); + // SAFETY: we just inserted it in `register_component_inner` + let info = unsafe { + &mut self + .components + .components + .get_mut(id.0) + .debug_checked_unwrap() + .as_mut() + .debug_checked_unwrap() + }; + + #[expect( + deprecated, + reason = "need to use this method until it is removed to ensure user defined components register hooks correctly" + )] + // TODO: Replace with `info.hooks.update_from_component::();` once `Component::register_component_hooks` is removed + T::register_component_hooks(&mut info.hooks); + + info.required_components = required_components; + } + + /// Registers a component described by `descriptor`. + /// + /// # Note + /// + /// If this method is called multiple times with identical descriptors, a distinct [`ComponentId`] + /// will be created for each one. + /// + /// # See also + /// + /// * [`Components::component_id()`] + /// * [`ComponentsRegistrator::register_component()`] + #[inline] + pub fn register_component_with_descriptor( + &mut self, + descriptor: ComponentDescriptor, + ) -> ComponentId { + let id = self.ids.next_mut(); + // SAFETY: The id is fresh. + unsafe { + self.register_component_inner(id, descriptor); + } + id + } + + // NOTE: This should maybe be private, but it is currently public so that `bevy_ecs_macros` can use it. + // We can't directly move this there either, because this uses `Components::get_required_by_mut`, + // which is private, and could be equally risky to expose to users. + /// Registers the given component `R` and [required components] inherited from it as required by `T`, + /// and adds `T` to their lists of requirees. + /// + /// The given `inheritance_depth` determines how many levels of inheritance deep the requirement is. + /// A direct requirement has a depth of `0`, and each level of inheritance increases the depth by `1`. + /// Lower depths are more specific requirements, and can override existing less specific registrations. + /// + /// The `recursion_check_stack` allows checking whether this component tried to register itself as its + /// own (indirect) required component. + /// + /// This method does *not* register any components as required by components that require `T`. + /// + /// Only use this method if you know what you are doing. In most cases, you should instead use [`World::register_required_components`], + /// or the equivalent method in `bevy_app::App`. + /// + /// [required component]: Component#required-components + #[doc(hidden)] + pub fn register_required_components_manual( + &mut self, + required_components: &mut RequiredComponents, + constructor: fn() -> R, + inheritance_depth: u16, + recursion_check_stack: &mut Vec, + ) { + let requiree = self.register_component_checked::(recursion_check_stack); + let required = self.register_component_checked::(recursion_check_stack); + + // SAFETY: We just created the components. + unsafe { + self.register_required_components_manual_unchecked::( + requiree, + required, + required_components, + constructor, + inheritance_depth, + ); + } + } + + /// Registers a [`Resource`] of type `T` with this instance. + /// If a resource of this type has already been registered, this will return + /// the ID of the pre-existing resource. + /// + /// # See also + /// + /// * [`Components::resource_id()`] + /// * [`ComponentsRegistrator::register_resource_with_descriptor()`] + #[inline] + pub fn register_resource(&mut self) -> ComponentId { + // SAFETY: The [`ComponentDescriptor`] matches the [`TypeId`] + unsafe { + self.register_resource_with(TypeId::of::(), || { + ComponentDescriptor::new_resource::() + }) + } + } + + /// Registers a [non-send resource](crate::system::NonSend) of type `T` with this instance. + /// If a resource of this type has already been registered, this will return + /// the ID of the pre-existing resource. + #[inline] + pub fn register_non_send(&mut self) -> ComponentId { + // SAFETY: The [`ComponentDescriptor`] matches the [`TypeId`] + unsafe { + self.register_resource_with(TypeId::of::(), || { + ComponentDescriptor::new_non_send::(StorageType::default()) + }) + } + } + + /// Same as [`Components::register_resource_unchecked`] but handles safety. + /// + /// # Safety + /// + /// The [`ComponentDescriptor`] must match the [`TypeId`]. + #[inline] + unsafe fn register_resource_with( + &mut self, + type_id: TypeId, + descriptor: impl FnOnce() -> ComponentDescriptor, + ) -> ComponentId { + if let Some(id) = self.resource_indices.get(&type_id) { + return *id; + } + + if let Some(registrator) = self + .components + .queued + .get_mut() + .unwrap_or_else(PoisonError::into_inner) + .resources + .remove(&type_id) + { + // If we are trying to register something that has already been queued, we respect the queue. + // Just like if we are trying to register something that already is, we respect the first registration. + return registrator.register(self); + } + + let id = self.ids.next_mut(); + // SAFETY: The resource is not currently registered, the id is fresh, and the [`ComponentDescriptor`] matches the [`TypeId`] + unsafe { + self.register_resource_unchecked(type_id, id, descriptor()); + } + id + } + + /// Registers a [`Resource`] described by `descriptor`. + /// + /// # Note + /// + /// If this method is called multiple times with identical descriptors, a distinct [`ComponentId`] + /// will be created for each one. + /// + /// # See also + /// + /// * [`Components::resource_id()`] + /// * [`ComponentsRegistrator::register_resource()`] + #[inline] + pub fn register_resource_with_descriptor( + &mut self, + descriptor: ComponentDescriptor, + ) -> ComponentId { + let id = self.ids.next_mut(); + // SAFETY: The id is fresh. + unsafe { + self.register_component_inner(id, descriptor); + } + id + } +} + +/// Stores metadata associated with each kind of [`Component`] in a given [`World`]. +#[derive(Debug, Default)] +pub struct Components { + components: Vec>, + indices: TypeIdMap, + resource_indices: TypeIdMap, + // This is kept internal and local to verify that no deadlocks can occor. + queued: bevy_platform::sync::RwLock, +} + +impl Components { + /// This registers any descriptor, component or resource. + /// + /// # Safety + /// + /// The id must have never been registered before. This must be a fresh registration. + #[inline] + unsafe fn register_component_inner( + &mut self, + id: ComponentId, + descriptor: ComponentDescriptor, + ) { + let info = ComponentInfo::new(id, descriptor); + let least_len = id.0 + 1; + if self.components.len() < least_len { + self.components.resize_with(least_len, || None); + } + // SAFETY: We just extended the vec to make this index valid. + let slot = unsafe { self.components.get_mut(id.0).debug_checked_unwrap() }; + // Caller ensures id is unique + debug_assert!(slot.is_none()); + *slot = Some(info); + } + + /// Returns the number of components registered or queued with this instance. + #[inline] + pub fn len(&self) -> usize { + self.num_queued() + self.num_registered() + } + + /// Returns `true` if there are no components registered or queued with this instance. Otherwise, this returns `false`. + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns the number of components registered with this instance. + #[inline] + pub fn num_queued(&self) -> usize { + let queued = self.queued.read().unwrap_or_else(PoisonError::into_inner); + queued.components.len() + queued.dynamic_registrations.len() + queued.resources.len() + } + + /// Returns `true` if there are any components registered with this instance. Otherwise, this returns `false`. + #[inline] + pub fn any_queued(&self) -> bool { + self.num_queued() > 0 + } + + /// A faster version of [`Self::num_queued`]. + #[inline] + pub fn num_queued_mut(&mut self) -> usize { + let queued = self + .queued + .get_mut() + .unwrap_or_else(PoisonError::into_inner); + queued.components.len() + queued.dynamic_registrations.len() + queued.resources.len() + } + + /// A faster version of [`Self::any_queued`]. + #[inline] + pub fn any_queued_mut(&mut self) -> bool { + self.num_queued_mut() > 0 + } + + /// Returns the number of components registered with this instance. + #[inline] + pub fn num_registered(&self) -> usize { + self.components.len() + } + + /// Returns `true` if there are any components registered with this instance. Otherwise, this returns `false`. + #[inline] + pub fn any_registered(&self) -> bool { + self.num_registered() > 0 + } + + /// Gets the metadata associated with the given component, if it is registered. + /// This will return `None` if the id is not regiserted or is queued. + /// + /// This will return an incorrect result if `id` did not come from the same world as `self`. It may return `None` or a garbage value. + #[inline] + pub fn get_info(&self, id: ComponentId) -> Option<&ComponentInfo> { + self.components.get(id.0).and_then(|info| info.as_ref()) + } + + /// Gets the [`ComponentDescriptor`] of the component with this [`ComponentId`] if it is present. + /// This will return `None` only if the id is neither regisered nor queued to be registered. + /// + /// Currently, the [`Cow`] will be [`Cow::Owned`] if and only if the component is queued. It will be [`Cow::Borrowed`] otherwise. + /// + /// This will return an incorrect result if `id` did not come from the same world as `self`. It may return `None` or a garbage value. + #[inline] + pub fn get_descriptor<'a>(&'a self, id: ComponentId) -> Option> { + self.components + .get(id.0) + .and_then(|info| info.as_ref().map(|info| Cow::Borrowed(&info.descriptor))) + .or_else(|| { + let queued = self.queued.read().unwrap_or_else(PoisonError::into_inner); + // first check components, then resources, then dynamic + queued + .components + .values() + .chain(queued.resources.values()) + .chain(queued.dynamic_registrations.iter()) + .find(|queued| queued.id == id) + .map(|queued| Cow::Owned(queued.descriptor.clone())) + }) + } + + /// Gets the name of the component with this [`ComponentId`] if it is present. + /// This will return `None` only if the id is neither regisered nor queued to be registered. + /// + /// This will return an incorrect result if `id` did not come from the same world as `self`. It may return `None` or a garbage value. + #[inline] + pub fn get_name<'a>(&'a self, id: ComponentId) -> Option> { + self.components + .get(id.0) + .and_then(|info| { + info.as_ref() + .map(|info| Cow::Borrowed(info.descriptor.name())) + }) + .or_else(|| { + let queued = self.queued.read().unwrap_or_else(PoisonError::into_inner); + // first check components, then resources, then dynamic + queued + .components + .values() + .chain(queued.resources.values()) + .chain(queued.dynamic_registrations.iter()) + .find(|queued| queued.id == id) + .map(|queued| queued.descriptor.name.clone()) + }) + } + + /// Gets the metadata associated with the given component. + /// # Safety + /// + /// `id` must be a valid and fully registered [`ComponentId`]. + #[inline] + pub unsafe fn get_info_unchecked(&self, id: ComponentId) -> &ComponentInfo { + // SAFETY: The caller ensures `id` is valid. + unsafe { + self.components + .get(id.0) + .debug_checked_unwrap() + .as_ref() + .debug_checked_unwrap() + } } #[inline] pub(crate) fn get_hooks_mut(&mut self, id: ComponentId) -> Option<&mut ComponentHooks> { - self.components.get_mut(id.0).map(|info| &mut info.hooks) + self.components + .get_mut(id.0) + .and_then(|info| info.as_mut().map(|info| &mut info.hooks)) } #[inline] @@ -1240,7 +2127,7 @@ impl Components { ) -> Option<&mut RequiredComponents> { self.components .get_mut(id.0) - .map(|info| &mut info.required_components) + .and_then(|info| info.as_mut().map(|info| &mut info.required_components)) } /// Registers the given component `R` and [required components] inherited from it as required by `T`. @@ -1292,12 +2179,28 @@ impl Components { let required_by = unsafe { self.get_required_by_mut(required).debug_checked_unwrap() }; required_by.insert(requiree); + let mut required_components_tmp = RequiredComponents::default(); // SAFETY: The caller ensures that the `requiree` and `required` components are valid. - let inherited_requirements = - unsafe { self.register_inherited_required_components(requiree, required) }; + let inherited_requirements = unsafe { + self.register_inherited_required_components( + requiree, + required, + &mut required_components_tmp, + ) + }; + + // SAFETY: The caller ensures that the `requiree` is valid. + let required_components = unsafe { + self.get_required_components_mut(requiree) + .debug_checked_unwrap() + }; + required_components.0.extend(required_components_tmp.0); // Propagate the new required components up the chain to all components that require the requiree. - if let Some(required_by) = self.get_required_by(requiree).cloned() { + if let Some(required_by) = self + .get_required_by(requiree) + .map(|set| set.iter().copied().collect::>()) + { // `required` is now required by anything that `requiree` was required by. self.get_required_by_mut(required) .unwrap() @@ -1323,10 +2226,10 @@ impl Components { // SAFETY: Component ID and constructor match the ones on the original requiree. // The original requiree is responsible for making sure the registration is safe. unsafe { - required_components.register_dynamic( + required_components.register_dynamic_with( *component_id, - component.constructor.clone(), component.inheritance_depth + depth + 1, + || component.constructor.clone(), ); }; } @@ -1346,6 +2249,7 @@ impl Components { &mut self, requiree: ComponentId, required: ComponentId, + required_components: &mut RequiredComponents, ) -> Vec<(ComponentId, RequiredComponent)> { // Get required components inherited from the `required` component. // SAFETY: The caller ensures that the `required` component is valid. @@ -1368,27 +2272,21 @@ impl Components { .collect(); // Register the new required components. - for (component_id, component) in inherited_requirements.iter().cloned() { - // SAFETY: The caller ensures that the `requiree` is valid. - let required_components = unsafe { - self.get_required_components_mut(requiree) - .debug_checked_unwrap() - }; - + for (component_id, component) in inherited_requirements.iter() { // Register the required component for the requiree. // SAFETY: Component ID and constructor match the ones on the original requiree. unsafe { - required_components.register_dynamic( - component_id, - component.constructor, + required_components.register_dynamic_with( + *component_id, component.inheritance_depth, + || component.constructor.clone(), ); }; // Add the requiree to the list of components that require the required component. // SAFETY: The caller ensures that the required components are valid. let required_by = unsafe { - self.get_required_by_mut(component_id) + self.get_required_by_mut(*component_id) .debug_checked_unwrap() }; required_by.insert(requiree); @@ -1397,49 +2295,6 @@ impl Components { inherited_requirements } - // NOTE: This should maybe be private, but it is currently public so that `bevy_ecs_macros` can use it. - // We can't directly move this there either, because this uses `Components::get_required_by_mut`, - // which is private, and could be equally risky to expose to users. - /// Registers the given component `R` and [required components] inherited from it as required by `T`, - /// and adds `T` to their lists of requirees. - /// - /// The given `inheritance_depth` determines how many levels of inheritance deep the requirement is. - /// A direct requirement has a depth of `0`, and each level of inheritance increases the depth by `1`. - /// Lower depths are more specific requirements, and can override existing less specific registrations. - /// - /// The `recursion_check_stack` allows checking whether this component tried to register itself as its - /// own (indirect) required component. - /// - /// This method does *not* register any components as required by components that require `T`. - /// - /// Only use this method if you know what you are doing. In most cases, you should instead use [`World::register_required_components`], - /// or the equivalent method in `bevy_app::App`. - /// - /// [required component]: Component#required-components - #[doc(hidden)] - pub fn register_required_components_manual( - &mut self, - storages: &mut Storages, - required_components: &mut RequiredComponents, - constructor: fn() -> R, - inheritance_depth: u16, - recursion_check_stack: &mut Vec, - ) { - let requiree = self.register_component_internal::(storages, recursion_check_stack); - let required = self.register_component_internal::(storages, recursion_check_stack); - - // SAFETY: We just created the components. - unsafe { - self.register_required_components_manual_unchecked::( - requiree, - required, - required_components, - constructor, - inheritance_depth, - ); - } - } - /// Registers the given component `R` and [required components] inherited from it as required by `T`, /// and adds `T` to their lists of requirees. /// @@ -1476,31 +2331,14 @@ impl Components { let required_by = unsafe { self.get_required_by_mut(required).debug_checked_unwrap() }; required_by.insert(requiree); - // Register the inherited required components for the requiree. - let required: Vec<(ComponentId, RequiredComponent)> = self - .get_info(required) - .unwrap() - .required_components() - .0 - .iter() - .map(|(id, component)| (*id, component.clone())) - .collect(); - - for (id, component) in required { - // Register the inherited required components for the requiree. - // The inheritance depth is increased by `1` since this is a component required by the original required component. - required_components.register_dynamic( - id, - component.constructor.clone(), - component.inheritance_depth + 1, - ); - self.get_required_by_mut(id).unwrap().insert(requiree); - } + self.register_inherited_required_components(requiree, required, required_components); } #[inline] pub(crate) fn get_required_by(&self, id: ComponentId) -> Option<&HashSet> { - self.components.get(id.0).map(|info| &info.required_by) + self.components + .get(id.0) + .and_then(|info| info.as_ref().map(|info| &info.required_by)) } #[inline] @@ -1510,23 +2348,91 @@ impl Components { ) -> Option<&mut HashSet> { self.components .get_mut(id.0) - .map(|info| &mut info.required_by) + .and_then(|info| info.as_mut().map(|info| &mut info.required_by)) + } + + /// Returns true if the [`ComponentId`] is fully registered and valid. + /// Ids may be invalid if they are still queued to be registered. + /// Those ids are still correct, but they are not usable in every context yet. + #[inline] + pub fn is_id_valid(&self, id: ComponentId) -> bool { + self.components.get(id.0).is_some_and(Option::is_some) } - /// Retrieves the [`ComponentCloneHandlers`]. Can be used to get clone functions for components. - pub fn get_component_clone_handlers(&self) -> &ComponentCloneHandlers { - &self.component_clone_handlers + /// Type-erased equivalent of [`Components::valid_component_id()`]. + #[inline] + pub fn get_valid_id(&self, type_id: TypeId) -> Option { + self.indices.get(&type_id).copied() } - /// Retrieves a mutable reference to the [`ComponentCloneHandlers`]. Can be used to set and update clone functions for components. - pub fn get_component_clone_handlers_mut(&mut self) -> &mut ComponentCloneHandlers { - &mut self.component_clone_handlers + /// Returns the [`ComponentId`] of the given [`Component`] type `T` if it is fully registered. + /// If you want to include queued registration, see [`Components::component_id()`]. + /// + /// ``` + /// use bevy_ecs::prelude::*; + /// + /// let mut world = World::new(); + /// + /// #[derive(Component)] + /// struct ComponentA; + /// + /// let component_a_id = world.register_component::(); + /// + /// assert_eq!(component_a_id, world.components().valid_component_id::().unwrap()) + /// ``` + /// + /// # See also + /// + /// * [`Components::get_valid_id()`] + /// * [`Components::valid_resource_id()`] + /// * [`World::component_id()`] + #[inline] + pub fn valid_component_id(&self) -> Option { + self.get_id(TypeId::of::()) + } + + /// Type-erased equivalent of [`Components::valid_resource_id()`]. + #[inline] + pub fn get_valid_resource_id(&self, type_id: TypeId) -> Option { + self.resource_indices.get(&type_id).copied() + } + + /// Returns the [`ComponentId`] of the given [`Resource`] type `T` if it is fully registered. + /// If you want to include queued registration, see [`Components::resource_id()`]. + /// + /// ``` + /// use bevy_ecs::prelude::*; + /// + /// let mut world = World::new(); + /// + /// #[derive(Resource, Default)] + /// struct ResourceA; + /// + /// let resource_a_id = world.init_resource::(); + /// + /// assert_eq!(resource_a_id, world.components().valid_resource_id::().unwrap()) + /// ``` + /// + /// # See also + /// + /// * [`Components::valid_component_id()`] + /// * [`Components::get_resource_id()`] + #[inline] + pub fn valid_resource_id(&self) -> Option { + self.get_resource_id(TypeId::of::()) } /// Type-erased equivalent of [`Components::component_id()`]. #[inline] pub fn get_id(&self, type_id: TypeId) -> Option { - self.indices.get(&type_id).copied() + self.indices.get(&type_id).copied().or_else(|| { + self.queued + .read() + .unwrap_or_else(PoisonError::into_inner) + .components + .get(&type_id) + .map(|queued| queued.id) + }) } /// Returns the [`ComponentId`] of the given [`Component`] type `T`. @@ -1536,7 +2442,7 @@ impl Components { /// instance. /// /// Returns [`None`] if the `Component` type has not - /// yet been initialized using [`Components::register_component()`]. + /// yet been initialized using [`ComponentsRegistrator::register_component()`] or [`ComponentsQueuedRegistrator::queue_register_component()`]. /// /// ``` /// use bevy_ecs::prelude::*; @@ -1564,7 +2470,14 @@ impl Components { /// Type-erased equivalent of [`Components::resource_id()`]. #[inline] pub fn get_resource_id(&self, type_id: TypeId) -> Option { - self.resource_indices.get(&type_id).copied() + self.resource_indices.get(&type_id).copied().or_else(|| { + self.queued + .read() + .unwrap_or_else(PoisonError::into_inner) + .resources + .get(&type_id) + .map(|queued| queued.id) + }) } /// Returns the [`ComponentId`] of the given [`Resource`] type `T`. @@ -1574,7 +2487,7 @@ impl Components { /// instance. /// /// Returns [`None`] if the `Resource` type has not - /// yet been initialized using [`Components::register_resource()`]. + /// yet been initialized using [`ComponentsRegistrator::register_resource()`] or [`ComponentsQueuedRegistrator::queue_register_resource()`]. /// /// ``` /// use bevy_ecs::prelude::*; @@ -1598,84 +2511,29 @@ impl Components { self.get_resource_id(TypeId::of::()) } - /// Registers a [`Resource`] of type `T` with this instance. - /// If a resource of this type has already been registered, this will return - /// the ID of the pre-existing resource. - /// - /// # See also - /// - /// * [`Components::resource_id()`] - /// * [`Components::register_resource_with_descriptor()`] - #[inline] - pub fn register_resource(&mut self) -> ComponentId { - // SAFETY: The [`ComponentDescriptor`] matches the [`TypeId`] - unsafe { - self.get_or_register_resource_with(TypeId::of::(), || { - ComponentDescriptor::new_resource::() - }) - } - } - - /// Registers a [`Resource`] described by `descriptor`. - /// - /// # Note - /// - /// If this method is called multiple times with identical descriptors, a distinct [`ComponentId`] - /// will be created for each one. - /// - /// # See also - /// - /// * [`Components::resource_id()`] - /// * [`Components::register_resource()`] - pub fn register_resource_with_descriptor( - &mut self, - descriptor: ComponentDescriptor, - ) -> ComponentId { - Components::register_resource_inner(&mut self.components, descriptor) - } - - /// Registers a [non-send resource](crate::system::NonSend) of type `T` with this instance. - /// If a resource of this type has already been registered, this will return - /// the ID of the pre-existing resource. - #[inline] - pub fn register_non_send(&mut self) -> ComponentId { - // SAFETY: The [`ComponentDescriptor`] matches the [`TypeId`] - unsafe { - self.get_or_register_resource_with(TypeId::of::(), || { - ComponentDescriptor::new_non_send::(StorageType::default()) - }) - } - } - /// # Safety /// - /// The [`ComponentDescriptor`] must match the [`TypeId`] + /// The [`ComponentDescriptor`] must match the [`TypeId`]. + /// The [`ComponentId`] must be unique. + /// The [`TypeId`] and [`ComponentId`] must not be registered or queued. #[inline] - unsafe fn get_or_register_resource_with( + unsafe fn register_resource_unchecked( &mut self, type_id: TypeId, - func: impl FnOnce() -> ComponentDescriptor, - ) -> ComponentId { - let components = &mut self.components; - *self.resource_indices.entry(type_id).or_insert_with(|| { - let descriptor = func(); - Components::register_resource_inner(components, descriptor) - }) - } - - #[inline] - fn register_resource_inner( - components: &mut Vec, + component_id: ComponentId, descriptor: ComponentDescriptor, - ) -> ComponentId { - let component_id = ComponentId(components.len()); - components.push(ComponentInfo::new(component_id, descriptor)); - component_id + ) { + // SAFETY: ensured by caller + unsafe { + self.register_component_inner(component_id, descriptor); + } + let prev = self.resource_indices.insert(type_id, component_id); + debug_assert!(prev.is_none()); } - /// Gets an iterator over all components registered with this instance. - pub fn iter(&self) -> impl Iterator + '_ { - self.components.iter() + /// Gets an iterator over all components fully registered with this instance. + pub fn iter_registered(&self) -> impl Iterator + '_ { + self.components.iter().filter_map(Option::as_ref) } } @@ -1687,7 +2545,7 @@ impl Components { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Hash, PartialEq) + reflect(Debug, Hash, PartialEq, Clone) )] pub struct Tick { tick: u32, @@ -1784,7 +2642,7 @@ impl<'a> TickCells<'a> { /// Records when a component or resource was added and when it was last mutably dereferenced (or added). #[derive(Copy, Clone, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct ComponentTicks { /// Tick recording the time this component or resource was added. pub added: Tick, @@ -1818,7 +2676,7 @@ impl ComponentTicks { /// Manually sets the change tick. /// - /// This is normally done automatically via the [`DerefMut`](std::ops::DerefMut) implementation + /// This is normally done automatically via the [`DerefMut`] implementation /// on [`Mut`](crate::change_detection::Mut), [`ResMut`](crate::change_detection::ResMut), etc. /// However, components and resources that make use of interior mutability might require manual updates. /// @@ -1859,7 +2717,7 @@ impl ComponentIdFor<'_, T> { } } -impl core::ops::Deref for ComponentIdFor<'_, T> { +impl Deref for ComponentIdFor<'_, T> { type Target = ComponentId; fn deref(&self) -> &Self::Target { &self.0.component_id @@ -1901,17 +2759,9 @@ pub enum RequiredComponentsError { } /// A Required Component constructor. See [`Component`] for details. -#[cfg(feature = "track_change_detection")] -#[derive(Clone)] -pub struct RequiredComponentConstructor( - pub Arc)>, -); - -/// A Required Component constructor. See [`Component`] for details. -#[cfg(not(feature = "track_change_detection"))] #[derive(Clone)] pub struct RequiredComponentConstructor( - pub Arc, + pub Arc, ); impl RequiredComponentConstructor { @@ -1931,17 +2781,9 @@ impl RequiredComponentConstructor { change_tick: Tick, table_row: TableRow, entity: Entity, - #[cfg(feature = "track_change_detection")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { - (self.0)( - table, - sparse_sets, - change_tick, - table_row, - entity, - #[cfg(feature = "track_change_detection")] - caller, - ); + (self.0)(table, sparse_sets, change_tick, table_row, entity, caller); } } @@ -1990,25 +2832,30 @@ impl RequiredComponents { /// `constructor` _must_ initialize a component for `component_id` in such a way that /// matches the storage type of the component. It must only use the given `table_row` or `Entity` to /// initialize the storage for `component_id` corresponding to the given entity. - pub unsafe fn register_dynamic( + pub unsafe fn register_dynamic_with( &mut self, component_id: ComponentId, - constructor: RequiredComponentConstructor, inheritance_depth: u16, + constructor: impl FnOnce() -> RequiredComponentConstructor, ) { - self.0 - .entry(component_id) - .and_modify(|component| { - if component.inheritance_depth > inheritance_depth { - // New registration is more specific than existing requirement - component.constructor = constructor.clone(); - component.inheritance_depth = inheritance_depth; + let entry = self.0.entry(component_id); + match entry { + bevy_platform::collections::hash_map::Entry::Occupied(mut occupied) => { + let current = occupied.get_mut(); + if current.inheritance_depth > inheritance_depth { + *current = RequiredComponent { + constructor: constructor(), + inheritance_depth, + } } - }) - .or_insert(RequiredComponent { - constructor, - inheritance_depth, - }); + } + bevy_platform::collections::hash_map::Entry::Vacant(vacant) => { + vacant.insert(RequiredComponent { + constructor: constructor(), + inheritance_depth, + }); + } + } } /// Registers a required component. @@ -2017,12 +2864,11 @@ impl RequiredComponents { /// is smaller than the depth of the existing registration. Otherwise, the new registration will be ignored. pub fn register( &mut self, - components: &mut Components, - storages: &mut Storages, + components: &mut ComponentsRegistrator, constructor: fn() -> C, inheritance_depth: u16, ) { - let component_id = components.register_component::(storages); + let component_id = components.register_component::(); self.register_by_id(component_id, constructor, inheritance_depth); } @@ -2036,75 +2882,66 @@ impl RequiredComponents { constructor: fn() -> C, inheritance_depth: u16, ) { - let erased: RequiredComponentConstructor = RequiredComponentConstructor({ - // `portable-atomic-util` `Arc` is not able to coerce an unsized - // type like `std::sync::Arc` can. Creating a `Box` first does the - // coercion. - // - // This would be resolved by https://github.com/rust-lang/rust/issues/123430 - - #[cfg(feature = "portable-atomic")] - use alloc::boxed::Box; - - #[cfg(feature = "track_change_detection")] - type Constructor = dyn for<'a, 'b> Fn( - &'a mut Table, - &'b mut SparseSets, - Tick, - TableRow, - Entity, - &'static Location<'static>, - ); + let erased = || { + RequiredComponentConstructor({ + // `portable-atomic-util` `Arc` is not able to coerce an unsized + // type like `std::sync::Arc` can. Creating a `Box` first does the + // coercion. + // + // This would be resolved by https://github.com/rust-lang/rust/issues/123430 + + #[cfg(not(target_has_atomic = "ptr"))] + use alloc::boxed::Box; + + type Constructor = dyn for<'a, 'b> Fn( + &'a mut Table, + &'b mut SparseSets, + Tick, + TableRow, + Entity, + MaybeLocation, + ); - #[cfg(not(feature = "track_change_detection"))] - type Constructor = - dyn for<'a, 'b> Fn(&'a mut Table, &'b mut SparseSets, Tick, TableRow, Entity); - - #[cfg(feature = "portable-atomic")] - type Intermediate = Box; - - #[cfg(not(feature = "portable-atomic"))] - type Intermediate = Arc; - - let boxed: Intermediate = Intermediate::new( - move |table, - sparse_sets, - change_tick, - table_row, - entity, - #[cfg(feature = "track_change_detection")] caller| { - OwningPtr::make(constructor(), |ptr| { - // SAFETY: This will only be called in the context of `BundleInfo::write_components`, which will - // pass in a valid table_row and entity requiring a C constructor - // C::STORAGE_TYPE is the storage type associated with `component_id` / `C` - // `ptr` points to valid `C` data, which matches the type associated with `component_id` - unsafe { - BundleInfo::initialize_required_component( - table, - sparse_sets, - change_tick, - table_row, - entity, - component_id, - C::STORAGE_TYPE, - ptr, - #[cfg(feature = "track_change_detection")] - caller, - ); - } - }); - }, - ); + #[cfg(not(target_has_atomic = "ptr"))] + type Intermediate = Box; + + #[cfg(target_has_atomic = "ptr")] + type Intermediate = Arc; + + let boxed: Intermediate = Intermediate::new( + move |table, sparse_sets, change_tick, table_row, entity, caller| { + OwningPtr::make(constructor(), |ptr| { + // SAFETY: This will only be called in the context of `BundleInfo::write_components`, which will + // pass in a valid table_row and entity requiring a C constructor + // C::STORAGE_TYPE is the storage type associated with `component_id` / `C` + // `ptr` points to valid `C` data, which matches the type associated with `component_id` + unsafe { + BundleInfo::initialize_required_component( + table, + sparse_sets, + change_tick, + table_row, + entity, + component_id, + C::STORAGE_TYPE, + ptr, + caller, + ); + } + }); + }, + ); - Arc::from(boxed) - }); + Arc::from(boxed) + }) + }; // SAFETY: // `component_id` matches the type initialized by the `erased` constructor above. // `erased` initializes a component for `component_id` in such a way that // matches the storage type of the component. It only uses the given `table_row` or `Entity` to // initialize the storage corresponding to the given entity. - unsafe { self.register_dynamic(component_id, erased, inheritance_depth) }; + unsafe { self.register_dynamic_with(component_id, inheritance_depth, erased) }; } /// Iterates the ids of all required components. This includes recursive required components. @@ -2122,11 +2959,26 @@ impl RequiredComponents { } } - // Merges `required_components` into this collection. This only inserts a required component - // if it _did not already exist_. + /// Merges `required_components` into this collection. This only inserts a required component + /// if it _did not already exist_ *or* if the required component is more specific than the existing one + /// (in other words, if the inheritance depth is smaller). + /// + /// See [`register_dynamic_with`](Self::register_dynamic_with) for details. pub(crate) fn merge(&mut self, required_components: &RequiredComponents) { - for (id, constructor) in &required_components.0 { - self.0.entry(*id).or_insert_with(|| constructor.clone()); + for ( + component_id, + RequiredComponent { + constructor, + inheritance_depth, + }, + ) in required_components.0.iter() + { + // SAFETY: This exact registration must have been done on `required_components`, so safety is ensured by that caller. + unsafe { + self.register_dynamic_with(*component_id, *inheritance_depth, || { + constructor.clone() + }); + } } } } @@ -2149,13 +3001,13 @@ pub fn enforce_no_required_components_recursion( "Recursive required components detected: {}\nhelp: {}", recursion_check_stack .iter() - .map(|id| format!("{}", ShortName(components.get_name(*id).unwrap()))) + .map(|id| format!("{}", ShortName(&components.get_name(*id).unwrap()))) .collect::>() .join(" → "), if direct_recursion { format!( - "Remove require({})", - ShortName(components.get_name(requiree).unwrap()) + "Remove require({}).", + ShortName(&components.get_name(requiree).unwrap()) ) } else { "If this is intentional, consider merging the components.".into() @@ -2166,21 +3018,20 @@ pub fn enforce_no_required_components_recursion( } /// Component [clone handler function](ComponentCloneFn) implemented using the [`Clone`] trait. -/// Can be [set](ComponentCloneHandlers::set_component_handler) as clone handler for the specific component it is implemented for. +/// Can be [set](Component::clone_behavior) as clone handler for the specific component it is implemented for. /// It will panic if set as handler for any other component. /// -/// See [`ComponentCloneHandlers`] for more details. pub fn component_clone_via_clone( - _world: &mut DeferredWorld, + source: &SourceComponent, ctx: &mut ComponentCloneCtx, ) { - if let Some(component) = ctx.read_source_component::() { + if let Some(component) = source.read::() { ctx.write_target_component(component.clone()); } } /// Component [clone handler function](ComponentCloneFn) implemented using reflect. -/// Can be [set](ComponentCloneHandlers::set_component_handler) as clone handler for any registered component, +/// Can be [set](Component::clone_behavior) as clone handler for any registered component, /// but only reflected components will be cloned. /// /// To clone a component using this handler, the following must be true: @@ -2188,33 +3039,54 @@ pub fn component_clone_via_clone( /// - Component has [`TypeId`] /// - Component is registered /// - Component has [`ReflectFromPtr`](bevy_reflect::ReflectFromPtr) registered -/// - Component has one of the following registered: [`ReflectFromReflect`](bevy_reflect::ReflectFromReflect), +/// - Component can be cloned via [`PartialReflect::reflect_clone`] _or_ has one of the following registered: [`ReflectFromReflect`](bevy_reflect::ReflectFromReflect), /// [`ReflectDefault`](bevy_reflect::std_traits::ReflectDefault), [`ReflectFromWorld`](crate::reflect::ReflectFromWorld) -/// +/// /// If any of the conditions is not satisfied, the component will be skipped. /// -/// See [`EntityCloneBuilder`](crate::entity::EntityCloneBuilder) for details. +/// See [`EntityClonerBuilder`](crate::entity::EntityClonerBuilder) for details. +/// +/// [`PartialReflect::reflect_clone`]: bevy_reflect::PartialReflect::reflect_clone #[cfg(feature = "bevy_reflect")] -pub fn component_clone_via_reflect(world: &mut DeferredWorld, ctx: &mut ComponentCloneCtx) { - let Some(registry) = ctx.type_registry() else { +pub fn component_clone_via_reflect(source: &SourceComponent, ctx: &mut ComponentCloneCtx) { + let Some(app_registry) = ctx.type_registry().cloned() else { return; }; - let Some(source_component_reflect) = ctx.read_source_component_reflect() else { + let registry = app_registry.read(); + let Some(source_component_reflect) = source.read_reflect(®istry) else { return; }; let component_info = ctx.component_info(); // checked in read_source_component_reflect let type_id = component_info.type_id().unwrap(); - let registry = registry.read(); + + // Try to clone using `reflect_clone` + if let Ok(mut component) = source_component_reflect.reflect_clone() { + if let Some(reflect_component) = + registry.get_type_data::(type_id) + { + reflect_component.map_entities(&mut *component, ctx.entity_mapper()); + } + drop(registry); + + ctx.write_target_component_reflect(component); + return; + } // Try to clone using ReflectFromReflect if let Some(reflect_from_reflect) = registry.get_type_data::(type_id) { - if let Some(component) = + if let Some(mut component) = reflect_from_reflect.from_reflect(source_component_reflect.as_partial_reflect()) { + if let Some(reflect_component) = + registry.get_type_data::(type_id) + { + reflect_component.map_entities(&mut *component, ctx.entity_mapper()); + } drop(registry); + ctx.write_target_component_reflect(component); return; } @@ -2234,14 +3106,21 @@ pub fn component_clone_via_reflect(world: &mut DeferredWorld, ctx: &mut Componen registry.get_type_data::(type_id) { let reflect_from_world = reflect_from_world.clone(); - let source_component_cloned = source_component_reflect.clone_value(); + let source_component_cloned = source_component_reflect.to_dynamic(); let component_layout = component_info.layout(); let target = ctx.target(); let component_id = ctx.component_id(); - world.commands().queue(move |world: &mut World| { + drop(registry); + ctx.queue_deferred(move |world: &mut World, mapper: &mut dyn EntityMapper| { let mut component = reflect_from_world.from_world(world); assert_eq!(type_id, (*component).type_id()); component.apply(source_component_cloned.as_partial_reflect()); + if let Some(reflect_component) = app_registry + .read() + .get_type_data::(type_id) + { + reflect_component.map_entities(&mut *component, mapper); + } // SAFETY: // - component_id is from the same world as target entity // - component is a valid value represented by component_id @@ -2251,7 +3130,11 @@ pub fn component_clone_via_reflect(world: &mut DeferredWorld, ctx: &mut Componen world .entity_mut(target) .insert_by_id(component_id, OwningPtr::new(raw_component_ptr)); - alloc::alloc::dealloc(raw_component_ptr.as_ptr(), component_layout); + + if component_layout.size() > 0 { + // Ensure we don't attempt to deallocate zero-sized components + alloc::alloc::dealloc(raw_component_ptr.as_ptr(), component_layout); + } } }); } @@ -2259,14 +3142,14 @@ pub fn component_clone_via_reflect(world: &mut DeferredWorld, ctx: &mut Componen /// Noop implementation of component clone handler function. /// -/// See [`EntityCloneBuilder`](crate::entity::EntityCloneBuilder) for details. -pub fn component_clone_ignore(_world: &mut DeferredWorld, _ctx: &mut ComponentCloneCtx) {} +/// See [`EntityClonerBuilder`](crate::entity::EntityClonerBuilder) for details. +pub fn component_clone_ignore(_source: &SourceComponent, _ctx: &mut ComponentCloneCtx) {} /// Wrapper for components clone specialization using autoderef. #[doc(hidden)] -pub struct ComponentCloneSpecializationWrapper(PhantomData); +pub struct DefaultCloneBehaviorSpecialization(PhantomData); -impl Default for ComponentCloneSpecializationWrapper { +impl Default for DefaultCloneBehaviorSpecialization { fn default() -> Self { Self(PhantomData) } @@ -2274,22 +3157,22 @@ impl Default for ComponentCloneSpecializationWrapper { /// Base trait for components clone specialization using autoderef. #[doc(hidden)] -pub trait ComponentCloneBase { - fn get_component_clone_handler(&self) -> ComponentCloneHandler; +pub trait DefaultCloneBehaviorBase { + fn default_clone_behavior(&self) -> ComponentCloneBehavior; } -impl ComponentCloneBase for ComponentCloneSpecializationWrapper { - fn get_component_clone_handler(&self) -> ComponentCloneHandler { - ComponentCloneHandler::default_handler() +impl DefaultCloneBehaviorBase for DefaultCloneBehaviorSpecialization { + fn default_clone_behavior(&self) -> ComponentCloneBehavior { + ComponentCloneBehavior::Default } } /// Specialized trait for components clone specialization using autoderef. #[doc(hidden)] -pub trait ComponentCloneViaClone { - fn get_component_clone_handler(&self) -> ComponentCloneHandler; +pub trait DefaultCloneBehaviorViaClone { + fn default_clone_behavior(&self) -> ComponentCloneBehavior; } -impl ComponentCloneViaClone for &ComponentCloneSpecializationWrapper { - fn get_component_clone_handler(&self) -> ComponentCloneHandler { - ComponentCloneHandler::clone_handler::() +impl DefaultCloneBehaviorViaClone for &DefaultCloneBehaviorSpecialization { + fn default_clone_behavior(&self) -> ComponentCloneBehavior { + ComponentCloneBehavior::clone::() } } diff --git a/crates/bevy_ecs/src/entity/clone_entities.rs b/crates/bevy_ecs/src/entity/clone_entities.rs index b15723aa57419..bd8eb2b4bd71b 100644 --- a/crates/bevy_ecs/src/entity/clone_entities.rs +++ b/crates/bevy_ecs/src/entity/clone_entities.rs @@ -1,45 +1,91 @@ -use alloc::{borrow::ToOwned, vec::Vec}; +use alloc::{borrow::ToOwned, boxed::Box, collections::VecDeque, vec::Vec}; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_ptr::{Ptr, PtrMut}; use bumpalo::Bump; -use core::{any::TypeId, ptr::NonNull}; - -use bevy_utils::{HashMap, HashSet}; - -#[cfg(feature = "bevy_reflect")] -use alloc::boxed::Box; - -#[cfg(feature = "portable-atomic")] -use portable_atomic_util::Arc; - -#[cfg(not(feature = "portable-atomic"))] -use alloc::sync::Arc; +use core::any::TypeId; use crate::{ bundle::Bundle, - component::{Component, ComponentCloneHandler, ComponentId, ComponentInfo, Components}, - entity::Entity, + component::{Component, ComponentCloneBehavior, ComponentCloneFn, ComponentId, ComponentInfo}, + entity::{hash_map::EntityHashMap, Entities, Entity, EntityMapper}, query::DebugCheckedUnwrap, + relationship::RelationshipHookMode, world::World, }; +/// Provides read access to the source component (the component being cloned) in a [`ComponentCloneFn`]. +pub struct SourceComponent<'a> { + ptr: Ptr<'a>, + info: &'a ComponentInfo, +} + +impl<'a> SourceComponent<'a> { + /// Returns a reference to the component on the source entity. + /// + /// Will return `None` if `ComponentId` of requested component does not match `ComponentId` of source component + pub fn read(&self) -> Option<&C> { + if self + .info + .type_id() + .is_some_and(|id| id == TypeId::of::()) + { + // SAFETY: + // - Components and ComponentId are from the same world + // - source_component_ptr holds valid data of the type referenced by ComponentId + unsafe { Some(self.ptr.deref::()) } + } else { + None + } + } + + /// Returns the "raw" pointer to the source component. + pub fn ptr(&self) -> Ptr<'a> { + self.ptr + } + + /// Returns a reference to the component on the source entity as [`&dyn Reflect`](bevy_reflect::Reflect). + /// + /// Will return `None` if: + /// - World does not have [`AppTypeRegistry`](`crate::reflect::AppTypeRegistry`). + /// - Component does not implement [`ReflectFromPtr`](bevy_reflect::ReflectFromPtr). + /// - Component is not registered. + /// - Component does not have [`TypeId`] + /// - Registered [`ReflectFromPtr`](bevy_reflect::ReflectFromPtr)'s [`TypeId`] does not match component's [`TypeId`] + #[cfg(feature = "bevy_reflect")] + pub fn read_reflect( + &self, + registry: &bevy_reflect::TypeRegistry, + ) -> Option<&dyn bevy_reflect::Reflect> { + let type_id = self.info.type_id()?; + let reflect_from_ptr = registry.get_type_data::(type_id)?; + if reflect_from_ptr.type_id() != type_id { + return None; + } + // SAFETY: `source_component_ptr` stores data represented by `component_id`, which we used to get `ReflectFromPtr`. + unsafe { Some(reflect_from_ptr.as_reflect(self.ptr)) } + } +} + /// Context for component clone handlers. /// /// Provides fast access to useful resources like [`AppTypeRegistry`](crate::reflect::AppTypeRegistry) /// and allows component clone handler to get information about component being cloned. pub struct ComponentCloneCtx<'a, 'b> { component_id: ComponentId, - source_component_ptr: Ptr<'a>, target_component_written: bool, - target_components_ptrs: &'a mut Vec>, - target_components_buffer: &'b Bump, - components: &'a Components, + bundle_scratch: &'a mut BundleScratch<'b>, + bundle_scratch_allocator: &'b Bump, + entities: &'a Entities, + source: Entity, + target: Entity, component_info: &'a ComponentInfo, - entity_cloner: &'a EntityCloner, + entity_cloner: &'a mut EntityCloner, + mapper: &'a mut dyn EntityMapper, #[cfg(feature = "bevy_reflect")] type_registry: Option<&'a crate::reflect::AppTypeRegistry>, #[cfg(not(feature = "bevy_reflect"))] - #[expect(dead_code)] - type_registry: Option<()>, + #[expect(dead_code, reason = "type_registry is only used with bevy_reflect")] + type_registry: Option<&'a ()>, } impl<'a, 'b> ComponentCloneCtx<'a, 'b> { @@ -47,26 +93,31 @@ impl<'a, 'b> ComponentCloneCtx<'a, 'b> { /// /// # Safety /// Caller must ensure that: - /// - `components` and `component_id` are from the same world. + /// - `component_info` corresponds to the `component_id` in the same world,. /// - `source_component_ptr` points to a valid component of type represented by `component_id`. unsafe fn new( component_id: ComponentId, - source_component_ptr: Ptr<'a>, - target_components_ptrs: &'a mut Vec>, - target_components_buffer: &'b Bump, - components: &'a Components, - entity_cloner: &'a EntityCloner, + source: Entity, + target: Entity, + bundle_scratch_allocator: &'b Bump, + bundle_scratch: &'a mut BundleScratch<'b>, + entities: &'a Entities, + component_info: &'a ComponentInfo, + entity_cloner: &'a mut EntityCloner, + mapper: &'a mut dyn EntityMapper, #[cfg(feature = "bevy_reflect")] type_registry: Option<&'a crate::reflect::AppTypeRegistry>, - #[cfg(not(feature = "bevy_reflect"))] type_registry: Option<()>, + #[cfg(not(feature = "bevy_reflect"))] type_registry: Option<&'a ()>, ) -> Self { Self { component_id, - source_component_ptr, - target_components_ptrs, + source, + target, + bundle_scratch, target_component_written: false, - target_components_buffer, - components, - component_info: components.get_info_unchecked(component_id), + bundle_scratch_allocator, + entities, + mapper, + component_info, entity_cloner, type_registry, } @@ -79,12 +130,12 @@ impl<'a, 'b> ComponentCloneCtx<'a, 'b> { /// Returns the current source entity. pub fn source(&self) -> Entity { - self.entity_cloner.source + self.source } /// Returns the current target entity. pub fn target(&self) -> Entity { - self.entity_cloner.target + self.target } /// Returns the [`ComponentId`] of the component being cloned. @@ -97,42 +148,17 @@ impl<'a, 'b> ComponentCloneCtx<'a, 'b> { self.component_info } - /// Returns a reference to the component on the source entity. - /// - /// Will return `None` if `ComponentId` of requested component does not match `ComponentId` of source component - pub fn read_source_component(&self) -> Option<&T> { - if self - .component_info - .type_id() - .is_some_and(|id| id == TypeId::of::()) - { - // SAFETY: - // - Components and ComponentId are from the same world - // - source_component_ptr holds valid data of the type referenced by ComponentId - unsafe { Some(self.source_component_ptr.deref::()) } - } else { - None - } + /// Returns true if the [`EntityCloner`] is configured to recursively clone entities. When this is enabled, + /// entities stored in a cloned entity's [`RelationshipTarget`](crate::relationship::RelationshipTarget) component with + /// [`RelationshipTarget::LINKED_SPAWN`](crate::relationship::RelationshipTarget::LINKED_SPAWN) will also be cloned. + #[inline] + pub fn linked_cloning(&self) -> bool { + self.entity_cloner.linked_cloning } - /// Returns a reference to the component on the source entity as [`&dyn Reflect`](bevy_reflect::Reflect). - /// - /// Will return `None` if: - /// - World does not have [`AppTypeRegistry`](`crate::reflect::AppTypeRegistry`). - /// - Component does not implement [`ReflectFromPtr`](bevy_reflect::ReflectFromPtr). - /// - Component is not registered. - /// - Component does not have [`TypeId`] - /// - Registered [`ReflectFromPtr`](bevy_reflect::ReflectFromPtr)'s [`TypeId`] does not match component's [`TypeId`] - #[cfg(feature = "bevy_reflect")] - pub fn read_source_component_reflect(&self) -> Option<&dyn bevy_reflect::Reflect> { - let registry = self.type_registry?.read(); - let type_id = self.component_info.type_id()?; - let reflect_from_ptr = registry.get_type_data::(type_id)?; - if reflect_from_ptr.type_id() != type_id { - return None; - } - // SAFETY: `source_component_ptr` stores data represented by `component_id`, which we used to get `ReflectFromPtr`. - unsafe { Some(reflect_from_ptr.as_reflect(self.source_component_ptr)) } + /// Returns this context's [`EntityMapper`]. + pub fn entity_mapper(&mut self) -> &mut dyn EntityMapper { + self.mapper } /// Writes component data to target entity. @@ -142,51 +168,45 @@ impl<'a, 'b> ComponentCloneCtx<'a, 'b> { /// - Component has already been written once. /// - Component being written is not registered in the world. /// - `ComponentId` of component being written does not match expected `ComponentId`. - pub fn write_target_component(&mut self, component: T) { - let short_name = disqualified::ShortName::of::(); + pub fn write_target_component(&mut self, mut component: C) { + C::map_entities(&mut component, &mut self.mapper); + let short_name = disqualified::ShortName::of::(); if self.target_component_written { panic!("Trying to write component '{short_name}' multiple times") } - if !self + if self .component_info .type_id() - .is_some_and(|id| id == TypeId::of::()) + .is_none_or(|id| id != TypeId::of::()) { panic!("TypeId of component '{short_name}' does not match source component TypeId") }; - let component_ref = self.target_components_buffer.alloc(component); - self.target_components_ptrs - .push(PtrMut::from(component_ref)); + // SAFETY: the TypeId of self.component_id has been checked to ensure it matches `C` + unsafe { + self.bundle_scratch + .push(self.bundle_scratch_allocator, self.component_id, component); + }; self.target_component_written = true; } - /// Writes component data to target entity by providing a pointer to source component data and a pointer to uninitialized target component data. - /// - /// This method allows caller to provide a function (`clone_fn`) to clone component using untyped pointers. - /// First argument to `clone_fn` points to source component data ([`Ptr`]), second argument points to uninitialized buffer ([`NonNull`]) allocated with layout - /// described by [`ComponentInfo`] stored in this [`ComponentCloneCtx`]. If cloning is successful and uninitialized buffer contains a valid clone of - /// source component, `clone_fn` should return `true`, otherwise it should return `false`. + /// Writes component data to target entity by providing a pointer to source component data. /// /// # Safety - /// Caller must ensure that if `clone_fn` is called and returns `true`, the second argument ([`NonNull`] pointer) points to a valid component data - /// described by [`ComponentInfo`] stored in this [`ComponentCloneCtx`]. + /// Caller must ensure that the passed in `ptr` references data that corresponds to the type of the source / target [`ComponentId`]. + /// `ptr` must also contain data that the written component can "own" (for example, this should not directly copy non-Copy data). + /// /// # Panics /// This will panic if component has already been written once. - pub unsafe fn write_target_component_ptr( - &mut self, - clone_fn: impl FnOnce(Ptr, NonNull) -> bool, - ) { + pub unsafe fn write_target_component_ptr(&mut self, ptr: Ptr) { if self.target_component_written { panic!("Trying to write component multiple times") } let layout = self.component_info.layout(); - let target_component_data_ptr = self.target_components_buffer.alloc_layout(layout); - - if clone_fn(self.source_component_ptr, target_component_data_ptr) { - self.target_components_ptrs - .push(PtrMut::new(target_component_data_ptr)); - self.target_component_written = true; - } + let target_ptr = self.bundle_scratch_allocator.alloc_layout(layout); + core::ptr::copy_nonoverlapping(ptr.as_ptr(), target_ptr.as_ptr(), layout.size()); + self.bundle_scratch + .push_ptr(self.component_id, PtrMut::new(target_ptr)); + self.target_component_written = true; } /// Writes component data to target entity. @@ -215,7 +235,7 @@ impl<'a, 'b> ComponentCloneCtx<'a, 'b> { let component_data_ptr = Box::into_raw(component).cast::(); let target_component_data_ptr = - self.target_components_buffer.alloc_layout(component_layout); + self.bundle_scratch_allocator.alloc_layout(component_layout); // SAFETY: // - target_component_data_ptr and component_data have the same data type. // - component_data_ptr has layout of component_layout @@ -225,39 +245,18 @@ impl<'a, 'b> ComponentCloneCtx<'a, 'b> { target_component_data_ptr.as_ptr(), component_layout.size(), ); - self.target_components_ptrs - .push(PtrMut::new(target_component_data_ptr)); - alloc::alloc::dealloc(component_data_ptr, component_layout); + self.bundle_scratch + .push_ptr(self.component_id, PtrMut::new(target_component_data_ptr)); + + if component_layout.size() > 0 { + // Ensure we don't attempt to deallocate zero-sized components + alloc::alloc::dealloc(component_data_ptr, component_layout); + } } self.target_component_written = true; } - /// Return a reference to this context's `EntityCloner` instance. - /// - /// This can be used to issue clone commands using the same cloning configuration: - /// ``` - /// # use bevy_ecs::world::{DeferredWorld, World}; - /// # use bevy_ecs::entity::ComponentCloneCtx; - /// fn clone_handler(world: &mut DeferredWorld, ctx: &mut ComponentCloneCtx) { - /// let another_target = world.commands().spawn_empty().id(); - /// let mut entity_cloner = ctx - /// .entity_cloner() - /// .with_source_and_target(ctx.source(), another_target); - /// world.commands().queue(move |world: &mut World| { - /// entity_cloner.clone_entity(world); - /// }); - /// } - /// ``` - pub fn entity_cloner(&self) -> &EntityCloner { - self.entity_cloner - } - - /// Returns instance of [`Components`]. - pub fn components(&self) -> &Components { - self.components - } - /// Returns [`AppTypeRegistry`](`crate::reflect::AppTypeRegistry`) if it exists in the world. /// /// NOTE: Prefer this method instead of manually reading the resource from the world. @@ -265,137 +264,34 @@ impl<'a, 'b> ComponentCloneCtx<'a, 'b> { pub fn type_registry(&self) -> Option<&crate::reflect::AppTypeRegistry> { self.type_registry } -} - -/// A helper struct to clone an entity. Used internally by [`EntityCloneBuilder::clone_entity`]. -pub struct EntityCloner { - source: Entity, - target: Entity, - filter_allows_components: bool, - filter: Arc>, - clone_handlers_overrides: Arc>, - move_components: bool, -} - -impl EntityCloner { - /// Clones and inserts components from the `source` entity into `target` entity using the stored configuration. - pub fn clone_entity(&mut self, world: &mut World) { - // SAFETY: - // - `source_entity` is read-only. - // - `type_registry` is read-only. - // - `components` is read-only. - // - `deferred_world` disallows structural ecs changes, which means all read-only resources above a not affected. - let (type_registry, source_entity, components, mut deferred_world) = unsafe { - let world = world.as_unsafe_world_cell(); - let source_entity = world - .get_entity(self.source) - .expect("Source entity must exist"); - - #[cfg(feature = "bevy_reflect")] - let app_registry = world.get_resource::(); - #[cfg(not(feature = "bevy_reflect"))] - let app_registry = Option::<()>::None; - ( - app_registry, - source_entity, - world.components(), - world.into_deferred(), - ) - }; - let archetype = source_entity.archetype(); - - let component_data = Bump::new(); - let mut component_ids: Vec = Vec::with_capacity(archetype.component_count()); - let mut component_data_ptrs: Vec = Vec::with_capacity(archetype.component_count()); - - for component in archetype.components() { - if !self.is_cloning_allowed(&component) { - continue; - } - - let global_handlers = components.get_component_clone_handlers(); - let handler = match self.clone_handlers_overrides.get(&component) { - Some(handler) => handler - .get_handler() - .unwrap_or_else(|| global_handlers.get_default_handler()), - None => global_handlers.get_handler(component), - }; - - // SAFETY: - // - There are no other mutable references to source entity. - // - `component` is from `source_entity`'s archetype - let source_component_ptr = - unsafe { source_entity.get_by_id(component).debug_checked_unwrap() }; - - // SAFETY: - // - `components` and `component` are from the same world - // - `source_component_ptr` is valid and points to the same type as represented by `component` - let mut ctx = unsafe { - ComponentCloneCtx::new( - component, - source_component_ptr, - &mut component_data_ptrs, - &component_data, - components, - self, - type_registry, - ) - }; - - (handler)(&mut deferred_world, &mut ctx); - - if ctx.target_component_written { - component_ids.push(component); - } - } - - world.flush(); - - if !world.entities.contains(self.target) { - panic!("Target entity does not exist"); - } - - debug_assert_eq!(component_data_ptrs.len(), component_ids.len()); - - // SAFETY: - // - All `component_ids` are from the same world as `target` entity - // - All `component_data_ptrs` are valid types represented by `component_ids` - unsafe { - world.entity_mut(self.target).insert_by_ids( - &component_ids, - component_data_ptrs.into_iter().map(|ptr| ptr.promote()), - ); - } - - if self.move_components { - world.entity_mut(self.source).remove_by_ids(&component_ids); - } + /// Queues the `entity` to be cloned by the current [`EntityCloner`] + pub fn queue_entity_clone(&mut self, entity: Entity) { + let target = self.entities.reserve_entity(); + self.mapper.set_mapped(entity, target); + self.entity_cloner.clone_queue.push_back(entity); } - fn is_cloning_allowed(&self, component: &ComponentId) -> bool { - (self.filter_allows_components && self.filter.contains(component)) - || (!self.filter_allows_components && !self.filter.contains(component)) - } - - /// Reuse existing [`EntityCloner`] configuration with new source and target. - pub fn with_source_and_target(&self, source: Entity, target: Entity) -> EntityCloner { - EntityCloner { - source, - target, - filter: self.filter.clone(), - clone_handlers_overrides: self.clone_handlers_overrides.clone(), - ..*self - } + /// Queues a deferred clone operation, which will run with exclusive [`World`] access immediately after calling the clone handler for each component on an entity. + /// This exists, despite its similarity to [`Commands`](crate::system::Commands), to provide access to the entity mapper in the current context. + pub fn queue_deferred( + &mut self, + deferred: impl FnOnce(&mut World, &mut dyn EntityMapper) + 'static, + ) { + self.entity_cloner + .deferred_commands + .push_back(Box::new(deferred)); } } -/// Builder struct to clone an entity. Allows configuring which components to clone, as well as how to clone them. +/// A configuration determining how to clone entities. This can be built using [`EntityCloner::build`], which +/// returns an [`EntityClonerBuilder`]. +/// /// After configuration is complete an entity can be cloned using [`Self::clone_entity`]. /// ///``` /// use bevy_ecs::prelude::*; -/// use bevy_ecs::entity::EntityCloneBuilder; +/// use bevy_ecs::entity::EntityCloner; /// /// #[derive(Component, Clone, PartialEq, Eq)] /// struct A { @@ -409,7 +305,7 @@ impl EntityCloner { /// let entity = world.spawn(component.clone()).id(); /// let entity_clone = world.spawn_empty().id(); /// -/// EntityCloneBuilder::new(&mut world).clone_entity(entity, entity_clone); +/// EntityCloner::build(&mut world).clone_entity(entity, entity_clone); /// /// assert!(world.get::(entity_clone).is_some_and(|c| *c == component)); ///``` @@ -420,77 +316,317 @@ impl EntityCloner { /// /// It should be noted that if `Component` is implemented manually or if `Clone` implementation is conditional /// (like when deriving `Clone` for a type with a generic parameter without `Clone` bound), -/// the component will be cloned using the [default cloning strategy](crate::component::ComponentCloneHandlers::get_default_handler). -/// To use `Clone`-based handler ([`ComponentCloneHandler::clone_handler`]) in this case it should be set manually using one -/// of the methods mentioned in the [Handlers](#handlers) section +/// the component will be cloned using the [default cloning strategy](crate::component::ComponentCloneBehavior::global_default_fn). +/// To use `Clone`-based handler ([`ComponentCloneBehavior::clone`]) in this case it should be set manually using one +/// of the methods mentioned in the [Clone Behaviors](#Clone-Behaviors) section /// -/// Here's an example of how to do it using [`get_component_clone_handler`](Component::get_component_clone_handler): +/// Here's an example of how to do it using [`clone_behavior`](Component::clone_behavior): /// ``` /// # use bevy_ecs::prelude::*; -/// # use bevy_ecs::component::{StorageType, component_clone_via_clone, ComponentCloneHandler, Mutable}; +/// # use bevy_ecs::component::{StorageType, ComponentCloneBehavior, Mutable}; /// #[derive(Clone)] /// struct SomeComponent; /// /// impl Component for SomeComponent { /// const STORAGE_TYPE: StorageType = StorageType::Table; /// type Mutability = Mutable; -/// fn get_component_clone_handler() -> ComponentCloneHandler { -/// ComponentCloneHandler::clone_handler::() +/// fn clone_behavior() -> ComponentCloneBehavior { +/// ComponentCloneBehavior::clone::() /// } /// } /// ``` /// -/// # Handlers -/// `EntityCloneBuilder` clones entities by cloning components using [`handlers`](ComponentCloneHandler), and there are multiple layers +/// # Clone Behaviors +/// [`EntityCloner`] clones entities by cloning components using [`ComponentCloneBehavior`], and there are multiple layers /// to decide which handler to use for which component. The overall hierarchy looks like this (priority from most to least): -/// 1. local overrides using [`override_component_clone_handler`](Self::override_component_clone_handler) -/// 2. global overrides using [`set_component_handler`](crate::component::ComponentCloneHandlers::set_component_handler) -/// 3. component-defined handler using [`get_component_clone_handler`](Component::get_component_clone_handler) -/// 4. default handler override using [`set_default_handler`](crate::component::ComponentCloneHandlers::set_default_handler) -/// 5. reflect-based or noop default clone handler depending on if `bevy_reflect` feature is enabled or not. -#[derive(Debug)] -pub struct EntityCloneBuilder<'w> { - world: &'w mut World, +/// 1. local overrides using [`EntityClonerBuilder::override_clone_behavior`] +/// 2. component-defined handler using [`Component::clone_behavior`] +/// 3. default handler override using [`EntityClonerBuilder::with_default_clone_fn`]. +/// 4. reflect-based or noop default clone handler depending on if `bevy_reflect` feature is enabled or not. +pub struct EntityCloner { filter_allows_components: bool, filter: HashSet, - clone_handlers_overrides: HashMap, - attach_required_components: bool, + clone_behavior_overrides: HashMap, move_components: bool, + linked_cloning: bool, + default_clone_fn: ComponentCloneFn, + clone_queue: VecDeque, + deferred_commands: VecDeque>, } -impl<'w> EntityCloneBuilder<'w> { - /// Creates a new [`EntityCloneBuilder`] for world. - pub fn new(world: &'w mut World) -> Self { +impl Default for EntityCloner { + fn default() -> Self { Self { - world, filter_allows_components: false, - filter: Default::default(), - clone_handlers_overrides: Default::default(), - attach_required_components: true, move_components: false, + linked_cloning: false, + default_clone_fn: ComponentCloneBehavior::global_default_fn(), + filter: Default::default(), + clone_behavior_overrides: Default::default(), + clone_queue: Default::default(), + deferred_commands: Default::default(), + } + } +} + +/// An expandable scratch space for defining a dynamic bundle. +struct BundleScratch<'a> { + component_ids: Vec, + component_ptrs: Vec>, +} + +impl<'a> BundleScratch<'a> { + pub(crate) fn with_capacity(capacity: usize) -> Self { + Self { + component_ids: Vec::with_capacity(capacity), + component_ptrs: Vec::with_capacity(capacity), + } + } + + /// Pushes the `ptr` component onto this storage with the given `id` [`ComponentId`]. + /// + /// # Safety + /// The `id` [`ComponentId`] must match the component `ptr` for whatever [`World`] this scratch will + /// be written to. `ptr` must contain valid uniquely-owned data that matches the type of component referenced + /// in `id`. + pub(crate) unsafe fn push_ptr(&mut self, id: ComponentId, ptr: PtrMut<'a>) { + self.component_ids.push(id); + self.component_ptrs.push(ptr); + } + + /// Pushes the `C` component onto this storage with the given `id` [`ComponentId`], using the given `bump` allocator. + /// + /// # Safety + /// The `id` [`ComponentId`] must match the component `C` for whatever [`World`] this scratch will + /// be written to. + pub(crate) unsafe fn push( + &mut self, + allocator: &'a Bump, + id: ComponentId, + component: C, + ) { + let component_ref = allocator.alloc(component); + self.component_ids.push(id); + self.component_ptrs.push(PtrMut::from(component_ref)); + } + + /// Writes the scratch components to the given entity in the given world. + /// + /// # Safety + /// All [`ComponentId`] values in this instance must come from `world`. + pub(crate) unsafe fn write( + self, + world: &mut World, + entity: Entity, + relationship_hook_insert_mode: RelationshipHookMode, + ) { + // SAFETY: + // - All `component_ids` are from the same world as `target` entity + // - All `component_data_ptrs` are valid types represented by `component_ids` + unsafe { + world.entity_mut(entity).insert_by_ids_internal( + &self.component_ids, + self.component_ptrs.into_iter().map(|ptr| ptr.promote()), + relationship_hook_insert_mode, + ); } } +} - /// Finishes configuring the builder and clones `source` entity to `target`. - pub fn clone_entity(self, source: Entity, target: Entity) { - let EntityCloneBuilder { +impl EntityCloner { + /// Returns a new [`EntityClonerBuilder`] using the given `world`. + pub fn build(world: &mut World) -> EntityClonerBuilder { + EntityClonerBuilder { world, - filter_allows_components, - filter, - clone_handlers_overrides, - move_components, - .. - } = self; - - EntityCloner { - source, - target, - filter_allows_components, - filter: Arc::new(filter), - clone_handlers_overrides: Arc::new(clone_handlers_overrides), - move_components, + attach_required_components: true, + entity_cloner: EntityCloner::default(), + } + } + + /// Returns `true` if this cloner is configured to clone entities referenced in cloned components via [`RelationshipTarget::LINKED_SPAWN`](crate::relationship::RelationshipTarget::LINKED_SPAWN). + /// This will produce "deep" / recursive clones of relationship trees that have "linked spawn". + #[inline] + pub fn linked_cloning(&self) -> bool { + self.linked_cloning + } + + /// Clones and inserts components from the `source` entity into the entity mapped by `mapper` from `source` using the stored configuration. + fn clone_entity_internal( + &mut self, + world: &mut World, + source: Entity, + mapper: &mut dyn EntityMapper, + relationship_hook_insert_mode: RelationshipHookMode, + ) -> Entity { + let target = mapper.get_mapped(source); + // PERF: reusing allocated space across clones would be more efficient. Consider an allocation model similar to `Commands`. + let bundle_scratch_allocator = Bump::new(); + let mut bundle_scratch: BundleScratch; + { + let world = world.as_unsafe_world_cell(); + let source_entity = world.get_entity(source).expect("Source entity must exist"); + + #[cfg(feature = "bevy_reflect")] + // SAFETY: we have unique access to `world`, nothing else accesses the registry at this moment, and we clone + // the registry, which prevents future conflicts. + let app_registry = unsafe { + world + .get_resource::() + .cloned() + }; + #[cfg(not(feature = "bevy_reflect"))] + let app_registry = Option::<()>::None; + + let archetype = source_entity.archetype(); + bundle_scratch = BundleScratch::with_capacity(archetype.component_count()); + + for component in archetype.components() { + if !self.is_cloning_allowed(&component) { + continue; + } + + let handler = match self.clone_behavior_overrides.get(&component) { + Some(clone_behavior) => clone_behavior.resolve(self.default_clone_fn), + None => world + .components() + .get_info(component) + .map(|info| info.clone_behavior().resolve(self.default_clone_fn)) + .unwrap_or(self.default_clone_fn), + }; + + // SAFETY: This component exists because it is present on the archetype. + let info = unsafe { world.components().get_info_unchecked(component) }; + + // SAFETY: + // - There are no other mutable references to source entity. + // - `component` is from `source_entity`'s archetype + let source_component_ptr = + unsafe { source_entity.get_by_id(component).debug_checked_unwrap() }; + + let source_component = SourceComponent { + info, + ptr: source_component_ptr, + }; + + // SAFETY: + // - `components` and `component` are from the same world + // - `source_component_ptr` is valid and points to the same type as represented by `component` + let mut ctx = unsafe { + ComponentCloneCtx::new( + component, + source, + target, + &bundle_scratch_allocator, + &mut bundle_scratch, + world.entities(), + info, + self, + mapper, + app_registry.as_ref(), + ) + }; + + (handler)(&source_component, &mut ctx); + } + } + + world.flush(); + + for deferred in self.deferred_commands.drain(..) { + (deferred)(world, mapper); + } + + if !world.entities.contains(target) { + panic!("Target entity does not exist"); + } + + if self.move_components { + world + .entity_mut(source) + .remove_by_ids(&bundle_scratch.component_ids); + } + + // SAFETY: + // - All `component_ids` are from the same world as `target` entity + // - All `component_data_ptrs` are valid types represented by `component_ids` + unsafe { bundle_scratch.write(world, target, relationship_hook_insert_mode) }; + target + } + + /// Clones and inserts components from the `source` entity into `target` entity using the stored configuration. + /// If this [`EntityCloner`] has [`EntityCloner::linked_cloning`], then it will recursively spawn entities as defined + /// by [`RelationshipTarget`](crate::relationship::RelationshipTarget) components with + /// [`RelationshipTarget::LINKED_SPAWN`](crate::relationship::RelationshipTarget::LINKED_SPAWN) + #[track_caller] + pub fn clone_entity(&mut self, world: &mut World, source: Entity, target: Entity) { + let mut map = EntityHashMap::::new(); + map.set_mapped(source, target); + self.clone_entity_mapped(world, source, &mut map); + } + + /// Clones and inserts components from the `source` entity into a newly spawned entity using the stored configuration. + /// If this [`EntityCloner`] has [`EntityCloner::linked_cloning`], then it will recursively spawn entities as defined + /// by [`RelationshipTarget`](crate::relationship::RelationshipTarget) components with + /// [`RelationshipTarget::LINKED_SPAWN`](crate::relationship::RelationshipTarget::LINKED_SPAWN) + #[track_caller] + pub fn spawn_clone(&mut self, world: &mut World, source: Entity) -> Entity { + let target = world.spawn_empty().id(); + self.clone_entity(world, source, target); + target + } + + /// Clones the entity into whatever entity `mapper` chooses for it. + #[track_caller] + pub fn clone_entity_mapped( + &mut self, + world: &mut World, + source: Entity, + mapper: &mut dyn EntityMapper, + ) -> Entity { + // All relationships on the root should have their hooks run + let target = self.clone_entity_internal(world, source, mapper, RelationshipHookMode::Run); + let child_hook_insert_mode = if self.linked_cloning { + // When spawning "linked relationships", we want to ignore hooks for relationships we are spawning, while + // still registering with original relationship targets that are "not linked" to the current recursive spawn. + RelationshipHookMode::RunIfNotLinked + } else { + // If we are not cloning "linked relationships" recursively, then we want any cloned relationship components to + // register themselves with their original relationship target. + RelationshipHookMode::Run + }; + loop { + let queued = self.clone_queue.pop_front(); + if let Some(queued) = queued { + self.clone_entity_internal(world, queued, mapper, child_hook_insert_mode); + } else { + break; + } } - .clone_entity(world); + target + } + + fn is_cloning_allowed(&self, component: &ComponentId) -> bool { + (self.filter_allows_components && self.filter.contains(component)) + || (!self.filter_allows_components && !self.filter.contains(component)) + } +} + +/// A builder for configuring [`EntityCloner`]. See [`EntityCloner`] for more information. +pub struct EntityClonerBuilder<'w> { + world: &'w mut World, + entity_cloner: EntityCloner, + attach_required_components: bool, +} + +impl<'w> EntityClonerBuilder<'w> { + /// Internally calls [`EntityCloner::clone_entity`] on the builder's [`World`]. + pub fn clone_entity(&mut self, source: Entity, target: Entity) -> &mut Self { + self.entity_cloner.clone_entity(self.world, source, target); + self + } + /// Finishes configuring [`EntityCloner`] returns it. + pub fn finish(self) -> EntityCloner { + self.entity_cloner } /// By default, any components allowed/denied through the filter will automatically @@ -500,7 +636,7 @@ impl<'w> EntityCloneBuilder<'w> { /// will not involve required components. pub fn without_required_components( &mut self, - builder: impl FnOnce(&mut EntityCloneBuilder) + Send + Sync + 'static, + builder: impl FnOnce(&mut EntityClonerBuilder), ) -> &mut Self { self.attach_required_components = false; builder(self); @@ -508,15 +644,21 @@ impl<'w> EntityCloneBuilder<'w> { self } + /// Sets the default clone function to use. + pub fn with_default_clone_fn(&mut self, clone_fn: ComponentCloneFn) -> &mut Self { + self.entity_cloner.default_clone_fn = clone_fn; + self + } + /// Sets whether the cloner should remove any components that were cloned, /// effectively moving them from the source entity to the target. /// /// This is disabled by default. /// /// The setting only applies to components that are allowed through the filter - /// at the time [`EntityCloneBuilder::clone_entity`] is called. + /// at the time [`EntityClonerBuilder::clone_entity`] is called. pub fn move_components(&mut self, enable: bool) -> &mut Self { - self.move_components = enable; + self.entity_cloner.move_components = enable; self } @@ -559,8 +701,8 @@ impl<'w> EntityCloneBuilder<'w> { /// Resets the filter to allow all components to be cloned. pub fn allow_all(&mut self) -> &mut Self { - self.filter_allows_components = false; - self.filter.clear(); + self.entity_cloner.filter_allows_components = false; + self.entity_cloner.filter.clear(); self } @@ -594,47 +736,82 @@ impl<'w> EntityCloneBuilder<'w> { /// Sets the filter to deny all components. pub fn deny_all(&mut self) -> &mut Self { - self.filter_allows_components = true; - self.filter.clear(); + self.entity_cloner.filter_allows_components = true; + self.entity_cloner.filter.clear(); self } - /// Overrides the [`ComponentCloneHandler`] for a component in this builder. - /// This handler will be used to clone the component instead of the global one defined by [`ComponentCloneHandlers`](crate::component::ComponentCloneHandlers) + /// Overrides the [`ComponentCloneBehavior`] for a component in this builder. + /// This handler will be used to clone the component instead of the global one defined by the [`EntityCloner`]. /// - /// See [Handlers section of `EntityCloneBuilder`](EntityCloneBuilder#handlers) to understand how this affects handler priority. - pub fn override_component_clone_handler( + /// See [Handlers section of `EntityClonerBuilder`](EntityClonerBuilder#handlers) to understand how this affects handler priority. + pub fn override_clone_behavior( &mut self, - handler: ComponentCloneHandler, + clone_behavior: ComponentCloneBehavior, ) -> &mut Self { if let Some(id) = self.world.components().component_id::() { - self.clone_handlers_overrides.insert(id, handler); + self.entity_cloner + .clone_behavior_overrides + .insert(id, clone_behavior); } self } - /// Removes a previously set override of [`ComponentCloneHandler`] for a component in this builder. - pub fn remove_component_clone_handler_override(&mut self) -> &mut Self { + /// Overrides the [`ComponentCloneBehavior`] for a component with the given `component_id` in this builder. + /// This handler will be used to clone the component instead of the global one defined by the [`EntityCloner`]. + /// + /// See [Handlers section of `EntityClonerBuilder`](EntityClonerBuilder#handlers) to understand how this affects handler priority. + pub fn override_clone_behavior_with_id( + &mut self, + component_id: ComponentId, + clone_behavior: ComponentCloneBehavior, + ) -> &mut Self { + self.entity_cloner + .clone_behavior_overrides + .insert(component_id, clone_behavior); + self + } + + /// Removes a previously set override of [`ComponentCloneBehavior`] for a component in this builder. + pub fn remove_clone_behavior_override(&mut self) -> &mut Self { if let Some(id) = self.world.components().component_id::() { - self.clone_handlers_overrides.remove(&id); + self.entity_cloner.clone_behavior_overrides.remove(&id); } self } + /// Removes a previously set override of [`ComponentCloneBehavior`] for a given `component_id` in this builder. + pub fn remove_clone_behavior_override_with_id( + &mut self, + component_id: ComponentId, + ) -> &mut Self { + self.entity_cloner + .clone_behavior_overrides + .remove(&component_id); + self + } + + /// When true this cloner will be configured to clone entities referenced in cloned components via [`RelationshipTarget::LINKED_SPAWN`](crate::relationship::RelationshipTarget::LINKED_SPAWN). + /// This will produce "deep" / recursive clones of relationship trees that have "linked spawn". + pub fn linked_cloning(&mut self, linked_cloning: bool) -> &mut Self { + self.entity_cloner.linked_cloning = linked_cloning; + self + } + /// Helper function that allows a component through the filter. fn filter_allow(&mut self, id: ComponentId) { - if self.filter_allows_components { - self.filter.insert(id); + if self.entity_cloner.filter_allows_components { + self.entity_cloner.filter.insert(id); } else { - self.filter.remove(&id); + self.entity_cloner.filter.remove(&id); } if self.attach_required_components { if let Some(info) = self.world.components().get_info(id) { for required_id in info.required_components().iter_ids() { - if self.filter_allows_components { - self.filter.insert(required_id); + if self.entity_cloner.filter_allows_components { + self.entity_cloner.filter.insert(required_id); } else { - self.filter.remove(&required_id); + self.entity_cloner.filter.remove(&required_id); } } } @@ -643,18 +820,18 @@ impl<'w> EntityCloneBuilder<'w> { /// Helper function that disallows a component through the filter. fn filter_deny(&mut self, id: ComponentId) { - if self.filter_allows_components { - self.filter.remove(&id); + if self.entity_cloner.filter_allows_components { + self.entity_cloner.filter.remove(&id); } else { - self.filter.insert(id); + self.entity_cloner.filter.insert(id); } if self.attach_required_components { if let Some(info) = self.world.components().get_info(id) { for required_id in info.required_components().iter_ids() { - if self.filter_allows_components { - self.filter.remove(&required_id); + if self.entity_cloner.filter_allows_components { + self.entity_cloner.filter.remove(&required_id); } else { - self.filter.insert(required_id); + self.entity_cloner.filter.insert(required_id); } } } @@ -666,19 +843,27 @@ impl<'w> EntityCloneBuilder<'w> { mod tests { use super::ComponentCloneCtx; use crate::{ - self as bevy_ecs, - component::{Component, ComponentCloneHandler, ComponentDescriptor, StorageType}, - entity::EntityCloneBuilder, - world::{DeferredWorld, World}, + component::{Component, ComponentCloneBehavior, ComponentDescriptor, StorageType}, + entity::{Entity, EntityCloner, EntityHashMap, SourceComponent}, + prelude::{ChildOf, Children, Resource}, + reflect::{AppTypeRegistry, ReflectComponent, ReflectFromWorld}, + world::{FromWorld, World}, }; - use bevy_ecs_macros::require; + use alloc::vec::Vec; use bevy_ptr::OwningPtr; - use core::alloc::Layout; + use bevy_reflect::Reflect; + use core::marker::PhantomData; + use core::{alloc::Layout, ops::Deref}; #[cfg(feature = "bevy_reflect")] mod reflect { use super::*; - use crate::reflect::{AppTypeRegistry, ReflectComponent, ReflectFromWorld}; + use crate::{ + component::{Component, ComponentCloneBehavior}, + entity::{EntityCloner, SourceComponent}, + reflect::{AppTypeRegistry, ReflectComponent, ReflectFromWorld}, + }; + use alloc::vec; use bevy_reflect::{std_traits::ReflectDefault, FromType, Reflect, ReflectFromPtr}; #[test] @@ -695,83 +880,107 @@ mod tests { registry.write().register::(); world.register_component::(); - let id = world.component_id::().unwrap(); - world - .get_component_clone_handlers_mut() - .set_component_handler(id, ComponentCloneHandler::reflect_handler()); - let component = A { field: 5 }; let e = world.spawn(component.clone()).id(); let e_clone = world.spawn_empty().id(); - EntityCloneBuilder::new(&mut world).clone_entity(e, e_clone); + EntityCloner::build(&mut world) + .override_clone_behavior::(ComponentCloneBehavior::reflect()) + .clone_entity(e, e_clone); assert!(world.get::(e_clone).is_some_and(|c| *c == component)); } - // TODO: remove this when https://github.com/bevyengine/bevy/pull/13432 lands #[test] fn clone_entity_using_reflect_all_paths() { + #[derive(PartialEq, Eq, Default, Debug)] + struct NotClone; + + // `reflect_clone`-based fast path + #[derive(Component, Reflect, PartialEq, Eq, Default, Debug)] + #[reflect(from_reflect = false)] + struct A { + field: usize, + field2: Vec, + } + // `ReflectDefault`-based fast path #[derive(Component, Reflect, PartialEq, Eq, Default, Debug)] #[reflect(Default)] #[reflect(from_reflect = false)] - struct A { + struct B { field: usize, field2: Vec, + #[reflect(ignore)] + ignored: NotClone, } // `ReflectFromReflect`-based fast path #[derive(Component, Reflect, PartialEq, Eq, Default, Debug)] - struct B { + struct C { field: usize, field2: Vec, + #[reflect(ignore)] + ignored: NotClone, } // `ReflectFromWorld`-based fast path #[derive(Component, Reflect, PartialEq, Eq, Default, Debug)] #[reflect(FromWorld)] #[reflect(from_reflect = false)] - struct C { + struct D { field: usize, field2: Vec, + #[reflect(ignore)] + ignored: NotClone, } let mut world = World::default(); world.init_resource::(); let registry = world.get_resource::().unwrap(); - registry.write().register::<(A, B, C)>(); + registry.write().register::<(A, B, C, D)>(); let a_id = world.register_component::(); let b_id = world.register_component::(); let c_id = world.register_component::(); - let handlers = world.get_component_clone_handlers_mut(); - handlers.set_component_handler(a_id, ComponentCloneHandler::reflect_handler()); - handlers.set_component_handler(b_id, ComponentCloneHandler::reflect_handler()); - handlers.set_component_handler(c_id, ComponentCloneHandler::reflect_handler()); - + let d_id = world.register_component::(); let component_a = A { field: 5, field2: vec![1, 2, 3, 4, 5], }; let component_b = B { - field: 6, + field: 5, field2: vec![1, 2, 3, 4, 5], + ignored: NotClone, }; let component_c = C { + field: 6, + field2: vec![1, 2, 3, 4, 5], + ignored: NotClone, + }; + let component_d = D { field: 7, field2: vec![1, 2, 3, 4, 5], + ignored: NotClone, }; - let e = world.spawn((component_a, component_b, component_c)).id(); + let e = world + .spawn((component_a, component_b, component_c, component_d)) + .id(); let e_clone = world.spawn_empty().id(); - EntityCloneBuilder::new(&mut world).clone_entity(e, e_clone); + EntityCloner::build(&mut world) + .override_clone_behavior_with_id(a_id, ComponentCloneBehavior::reflect()) + .override_clone_behavior_with_id(b_id, ComponentCloneBehavior::reflect()) + .override_clone_behavior_with_id(c_id, ComponentCloneBehavior::reflect()) + .override_clone_behavior_with_id(d_id, ComponentCloneBehavior::reflect()) + .clone_entity(e, e_clone); assert_eq!(world.get::(e_clone), Some(world.get::(e).unwrap())); assert_eq!(world.get::(e_clone), Some(world.get::(e).unwrap())); assert_eq!(world.get::(e_clone), Some(world.get::(e).unwrap())); + assert_eq!(world.get::(e_clone), Some(world.get::(e).unwrap())); } #[test] @@ -782,8 +991,9 @@ mod tests { #[derive(Component, Reflect)] struct B; - fn test_handler(_world: &mut DeferredWorld, ctx: &mut ComponentCloneCtx) { - assert!(ctx.read_source_component_reflect().is_none()); + fn test_handler(source: &SourceComponent, ctx: &mut ComponentCloneCtx) { + let registry = ctx.type_registry().unwrap(); + assert!(source.read_reflect(®istry.read()).is_none()); } let mut world = World::default(); @@ -798,15 +1008,12 @@ mod tests { .insert(>::from_type()); } - let a_id = world.register_component::(); - let handlers = world.get_component_clone_handlers_mut(); - handlers - .set_component_handler(a_id, ComponentCloneHandler::custom_handler(test_handler)); - let e = world.spawn(A).id(); let e_clone = world.spawn_empty().id(); - EntityCloneBuilder::new(&mut world).clone_entity(e, e_clone); + EntityCloner::build(&mut world) + .override_clone_behavior::(ComponentCloneBehavior::Custom(test_handler)) + .clone_entity(e, e_clone); } #[test] @@ -833,7 +1040,7 @@ mod tests { let e = world.spawn(component.clone()).id(); let e_clone = world.spawn_empty().id(); - EntityCloneBuilder::new(&mut world).clone_entity(e, e_clone); + EntityCloner::build(&mut world).clone_entity(e, e_clone); assert!(world .get::(e_clone) @@ -846,23 +1053,21 @@ mod tests { #[derive(Component, PartialEq, Eq, Default, Debug)] struct A; - // No valid type data + // No valid type data and not `reflect_clone`-able #[derive(Component, Reflect, PartialEq, Eq, Default, Debug)] #[reflect(Component)] #[reflect(from_reflect = false)] - struct B; + struct B(#[reflect(ignore)] PhantomData<()>); let mut world = World::default(); - let a_id = world.register_component::(); - let b_id = world.register_component::(); - let handlers = world.get_component_clone_handlers_mut(); - handlers.set_component_handler(a_id, ComponentCloneHandler::reflect_handler()); - handlers.set_component_handler(b_id, ComponentCloneHandler::reflect_handler()); // No AppTypeRegistry - let e = world.spawn((A, B)).id(); + let e = world.spawn((A, B(Default::default()))).id(); let e_clone = world.spawn_empty().id(); - EntityCloneBuilder::new(&mut world).clone_entity(e, e_clone); + EntityCloner::build(&mut world) + .override_clone_behavior::(ComponentCloneBehavior::reflect()) + .override_clone_behavior::(ComponentCloneBehavior::reflect()) + .clone_entity(e, e_clone); assert_eq!(world.get::(e_clone), None); assert_eq!(world.get::(e_clone), None); @@ -871,9 +1076,9 @@ mod tests { let registry = world.get_resource::().unwrap(); registry.write().register::(); - let e = world.spawn((A, B)).id(); + let e = world.spawn((A, B(Default::default()))).id(); let e_clone = world.spawn_empty().id(); - EntityCloneBuilder::new(&mut world).clone_entity(e, e_clone); + EntityCloner::build(&mut world).clone_entity(e, e_clone); assert_eq!(world.get::(e_clone), None); assert_eq!(world.get::(e_clone), None); } @@ -893,7 +1098,7 @@ mod tests { let e = world.spawn(component.clone()).id(); let e_clone = world.spawn_empty().id(); - EntityCloneBuilder::new(&mut world).clone_entity(e, e_clone); + EntityCloner::build(&mut world).clone_entity(e, e_clone); assert!(world.get::(e_clone).is_some_and(|c| *c == component)); } @@ -915,10 +1120,10 @@ mod tests { let e = world.spawn((component.clone(), B)).id(); let e_clone = world.spawn_empty().id(); - let mut builder = EntityCloneBuilder::new(&mut world); - builder.deny_all(); - builder.allow::(); - builder.clone_entity(e, e_clone); + EntityCloner::build(&mut world) + .deny_all() + .allow::() + .clone_entity(e, e_clone); assert!(world.get::(e_clone).is_some_and(|c| *c == component)); assert!(world.get::(e_clone).is_none()); @@ -944,9 +1149,9 @@ mod tests { let e = world.spawn((component.clone(), B, C)).id(); let e_clone = world.spawn_empty().id(); - let mut builder = EntityCloneBuilder::new(&mut world); - builder.deny::(); - builder.clone_entity(e, e_clone); + EntityCloner::build(&mut world) + .deny::() + .clone_entity(e, e_clone); assert!(world.get::(e_clone).is_some_and(|c| *c == component)); assert!(world.get::(e_clone).is_none()); @@ -973,13 +1178,13 @@ mod tests { let e = world.spawn((component.clone(), B, C)).id(); let e_clone = world.spawn_empty().id(); - let mut builder = EntityCloneBuilder::new(&mut world); - builder.deny_all(); - builder.allow::(); - builder.allow::(); - builder.allow::(); - builder.deny::(); - builder.clone_entity(e, e_clone); + EntityCloner::build(&mut world) + .deny_all() + .allow::() + .allow::() + .allow::() + .deny::() + .clone_entity(e, e_clone); assert!(world.get::(e_clone).is_some_and(|c| *c == component)); assert!(world.get::(e_clone).is_none()); @@ -1006,11 +1211,11 @@ mod tests { let e = world.spawn((component.clone(), B, C)).id(); let e_clone = world.spawn_empty().id(); - let mut builder = EntityCloneBuilder::new(&mut world); - builder.deny_all(); - builder.allow::<(A, B, C)>(); - builder.deny::<(B, C)>(); - builder.clone_entity(e, e_clone); + EntityCloner::build(&mut world) + .deny_all() + .allow::<(A, B, C)>() + .deny::<(B, C)>() + .clone_entity(e, e_clone); assert!(world.get::(e_clone).is_some_and(|c| *c == component)); assert!(world.get::(e_clone).is_none()); @@ -1024,7 +1229,7 @@ mod tests { struct A; #[derive(Component, Clone, PartialEq, Debug, Default)] - #[require(C(|| C(5)))] + #[require(C(5))] struct B; #[derive(Component, Clone, PartialEq, Debug)] @@ -1035,32 +1240,53 @@ mod tests { let e = world.spawn(A).id(); let e_clone = world.spawn_empty().id(); - let mut builder = EntityCloneBuilder::new(&mut world); - builder.deny_all(); - builder.without_required_components(|builder| { - builder.allow::(); - }); - builder.clone_entity(e, e_clone); + EntityCloner::build(&mut world) + .deny_all() + .allow::() + .clone_entity(e, e_clone); assert_eq!(world.entity(e_clone).get::(), None); assert_eq!(world.entity(e_clone).get::(), Some(&B)); assert_eq!(world.entity(e_clone).get::(), Some(&C(5))); } + #[test] + fn clone_entity_with_default_required_components() { + #[derive(Component, Clone, PartialEq, Debug)] + #[require(B)] + struct A; + + #[derive(Component, Clone, PartialEq, Debug, Default)] + #[require(C(5))] + struct B; + + #[derive(Component, Clone, PartialEq, Debug)] + struct C(u32); + + let mut world = World::default(); + + let e = world.spawn((A, C(0))).id(); + let e_clone = world.spawn_empty().id(); + + EntityCloner::build(&mut world) + .deny_all() + .without_required_components(|builder| { + builder.allow::(); + }) + .clone_entity(e, e_clone); + + assert_eq!(world.entity(e_clone).get::(), Some(&A)); + assert_eq!(world.entity(e_clone).get::(), Some(&B)); + assert_eq!(world.entity(e_clone).get::(), Some(&C(5))); + } + #[test] fn clone_entity_with_dynamic_components() { const COMPONENT_SIZE: usize = 10; - fn test_handler(_world: &mut DeferredWorld, ctx: &mut ComponentCloneCtx) { - // SAFETY: this handler is only going to be used with a component represented by [u8; COMPONENT_SIZE] + fn test_handler(source: &SourceComponent, ctx: &mut ComponentCloneCtx) { + // SAFETY: the passed in ptr corresponds to copy-able data that matches the type of the source / target component unsafe { - ctx.write_target_component_ptr(move |source_ptr, target_ptr| { - core::ptr::copy_nonoverlapping( - source_ptr.as_ptr(), - target_ptr.as_ptr(), - COMPONENT_SIZE, - ); - true - }); + ctx.write_target_component_ptr(source.ptr()); } } @@ -1077,16 +1303,11 @@ mod tests { layout, None, true, + ComponentCloneBehavior::Custom(test_handler), ) }; let component_id = world.register_component_with_descriptor(descriptor); - let handlers = world.get_component_clone_handlers_mut(); - handlers.set_component_handler( - component_id, - ComponentCloneHandler::custom_handler(test_handler), - ); - let mut entity = world.spawn_empty(); let data = [5u8; COMPONENT_SIZE]; @@ -1099,8 +1320,7 @@ mod tests { let entity = entity.id(); let entity_clone = world.spawn_empty().id(); - let builder = EntityCloneBuilder::new(&mut world); - builder.clone_entity(entity, entity_clone); + EntityCloner::build(&mut world).clone_entity(entity, entity_clone); let ptr = world.get_by_id(entity, component_id).unwrap(); let clone_ptr = world.get_by_id(entity_clone, component_id).unwrap(); @@ -1112,4 +1332,78 @@ mod tests { ); } } + + #[test] + fn recursive_clone() { + let mut world = World::new(); + let root = world.spawn_empty().id(); + let child1 = world.spawn(ChildOf(root)).id(); + let grandchild = world.spawn(ChildOf(child1)).id(); + let child2 = world.spawn(ChildOf(root)).id(); + + let clone_root = world.spawn_empty().id(); + EntityCloner::build(&mut world) + .linked_cloning(true) + .clone_entity(root, clone_root); + + let root_children = world + .entity(clone_root) + .get::() + .unwrap() + .iter() + .cloned() + .collect::>(); + + assert!(root_children.iter().all(|e| *e != child1 && *e != child2)); + assert_eq!(root_children.len(), 2); + let child1_children = world.entity(root_children[0]).get::().unwrap(); + assert_eq!(child1_children.len(), 1); + assert_ne!(child1_children[0], grandchild); + assert!(world.entity(root_children[1]).get::().is_none()); + + assert_eq!( + world.entity(root).get::().unwrap().deref(), + &[child1, child2] + ); + } + + #[test] + fn clone_with_reflect_from_world() { + #[derive(Component, Reflect, PartialEq, Eq, Debug)] + #[reflect(Component, FromWorld, from_reflect = false)] + struct SomeRef( + #[entities] Entity, + // We add an ignored field here to ensure `reflect_clone` fails and `FromWorld` is used + #[reflect(ignore)] PhantomData<()>, + ); + + #[derive(Resource)] + struct FromWorldCalled(bool); + + impl FromWorld for SomeRef { + fn from_world(world: &mut World) -> Self { + world.insert_resource(FromWorldCalled(true)); + SomeRef(Entity::PLACEHOLDER, Default::default()) + } + } + let mut world = World::new(); + let registry = AppTypeRegistry::default(); + registry.write().register::(); + world.insert_resource(registry); + + let a = world.spawn_empty().id(); + let b = world.spawn_empty().id(); + let c = world.spawn(SomeRef(a, Default::default())).id(); + let d = world.spawn_empty().id(); + let mut map = EntityHashMap::::new(); + map.insert(a, b); + map.insert(c, d); + + let cloned = EntityCloner::default().clone_entity_mapped(&mut world, c, &mut map); + assert_eq!( + *world.entity(cloned).get::().unwrap(), + SomeRef(b, Default::default()) + ); + assert!(world.resource::().0); + } } diff --git a/crates/bevy_ecs/src/entity/entity_set.rs b/crates/bevy_ecs/src/entity/entity_set.rs index 34e48551fde7b..e4860685fe07f 100644 --- a/crates/bevy_ecs/src/entity/entity_set.rs +++ b/crates/bevy_ecs/src/entity/entity_set.rs @@ -3,52 +3,71 @@ use alloc::{ collections::{btree_map, btree_set}, rc::Rc, }; +use bevy_platform::collections::HashSet; use core::{ array, fmt::{Debug, Formatter}, + hash::{BuildHasher, Hash}, iter::{self, FusedIterator}, option, result, }; -use super::Entity; +use super::{Entity, UniqueEntityEquivalentSlice}; -#[cfg(feature = "portable-atomic")] -use portable_atomic_util::Arc; +use bevy_platform::sync::Arc; -#[cfg(not(feature = "portable-atomic"))] -use alloc::sync::Arc; - -/// A trait for entity borrows. +/// A trait for types that contain an [`Entity`]. +/// +/// This trait behaves similarly to `Borrow`, but yielding `Entity` directly. /// -/// This trait can be thought of as `Borrow`, but yielding `Entity` directly. -pub trait EntityBorrow { - /// Returns the borrowed entity. +/// It should only be implemented when: +/// - Retrieving the [`Entity`] is a simple operation. +/// - The [`Entity`] contained by the type is unambiguous. +pub trait ContainsEntity { + /// Returns the contained entity. fn entity(&self) -> Entity; } -/// A trait for [`Entity`] borrows with trustworthy comparison behavior. +/// A trait for types that represent an [`Entity`]. /// -/// Comparison trait behavior between a [`TrustedEntityBorrow`] type and its underlying entity will match. +/// Comparison trait behavior between an [`EntityEquivalent`] type and its underlying entity will match. /// This property includes [`PartialEq`], [`Eq`], [`PartialOrd`], [`Ord`] and [`Hash`], /// and remains even after [`Clone`] and/or [`Borrow`] calls. /// /// # Safety -/// Any [`PartialEq`], [`Eq`], [`PartialOrd`], [`Ord`], and [`Hash`] impls must be -/// equivalent for `Self` and its underlying entity: -/// `x.entity() == y.entity()` should give the same result as `x == y`. -/// The above equivalence must also hold through and between calls to any [`Clone`] -/// and [`Borrow`]/[`BorrowMut`] impls in place of [`entity()`]. +/// Any [`PartialEq`], [`Eq`], [`PartialOrd`], and [`Ord`] impls must evaluate the same for `Self` and +/// its underlying entity. +/// `x.entity() == y.entity()` must be equivalent to `x == y`. +/// +/// The above equivalence must also hold through and between calls to any [`Clone`] and +/// [`Borrow`]/[`BorrowMut`] impls in place of [`entity()`]. /// /// The result of [`entity()`] must be unaffected by any interior mutability. /// +/// The aforementioned properties imply determinism in both [`entity()`] calls +/// and comparison trait behavior. +/// +/// All [`Hash`] impls except that for [`Entity`] must delegate to the [`Hash`] impl of +/// another [`EntityEquivalent`] type. All conversions to the delegatee within the [`Hash`] impl must +/// follow [`entity()`] equivalence. +/// +/// It should be noted that [`Hash`] is *not* a comparison trait, and with [`Hash::hash`] being forcibly +/// generic over all [`Hasher`]s, **cannot** guarantee determinism or uniqueness of any final hash values +/// on its own. +/// To obtain hash values forming the same total order as [`Entity`], any [`Hasher`] used must be +/// deterministic and concerning [`Entity`], collisionless. +/// Standard library hash collections handle collisions with an [`Eq`] fallback, but do not account for +/// determinism when [`BuildHasher`] is unspecified,. +/// /// [`Hash`]: core::hash::Hash +/// [`Hasher`]: core::hash::Hasher /// [`Borrow`]: core::borrow::Borrow /// [`BorrowMut`]: core::borrow::BorrowMut -/// [`entity()`]: EntityBorrow::entity -pub unsafe trait TrustedEntityBorrow: EntityBorrow + Eq {} +/// [`entity()`]: ContainsEntity::entity +pub unsafe trait EntityEquivalent: ContainsEntity + Eq {} -impl EntityBorrow for Entity { +impl ContainsEntity for Entity { fn entity(&self) -> Entity { *self } @@ -56,9 +75,9 @@ impl EntityBorrow for Entity { // SAFETY: // The trait implementations of Entity are correct and deterministic. -unsafe impl TrustedEntityBorrow for Entity {} +unsafe impl EntityEquivalent for Entity {} -impl EntityBorrow for &T { +impl ContainsEntity for &T { fn entity(&self) -> Entity { (**self).entity() } @@ -68,9 +87,9 @@ impl EntityBorrow for &T { // `&T` delegates `PartialEq`, `Eq`, `PartialOrd`, `Ord`, and `Hash` to T. // `Clone` and `Borrow` maintain equality. // `&T` is `Freeze`. -unsafe impl TrustedEntityBorrow for &T {} +unsafe impl EntityEquivalent for &T {} -impl EntityBorrow for &mut T { +impl ContainsEntity for &mut T { fn entity(&self) -> Entity { (**self).entity() } @@ -80,9 +99,9 @@ impl EntityBorrow for &mut T { // `&mut T` delegates `PartialEq`, `Eq`, `PartialOrd`, `Ord`, and `Hash` to T. // `Borrow` and `BorrowMut` maintain equality. // `&mut T` is `Freeze`. -unsafe impl TrustedEntityBorrow for &mut T {} +unsafe impl EntityEquivalent for &mut T {} -impl EntityBorrow for Box { +impl ContainsEntity for Box { fn entity(&self) -> Entity { (**self).entity() } @@ -92,9 +111,9 @@ impl EntityBorrow for Box { // `Box` delegates `PartialEq`, `Eq`, `PartialOrd`, `Ord`, and `Hash` to T. // `Clone`, `Borrow` and `BorrowMut` maintain equality. // `Box` is `Freeze`. -unsafe impl TrustedEntityBorrow for Box {} +unsafe impl EntityEquivalent for Box {} -impl EntityBorrow for Rc { +impl ContainsEntity for Rc { fn entity(&self) -> Entity { (**self).entity() } @@ -104,9 +123,9 @@ impl EntityBorrow for Rc { // `Rc` delegates `PartialEq`, `Eq`, `PartialOrd`, `Ord`, and `Hash` to T. // `Clone`, `Borrow` and `BorrowMut` maintain equality. // `Rc` is `Freeze`. -unsafe impl TrustedEntityBorrow for Rc {} +unsafe impl EntityEquivalent for Rc {} -impl EntityBorrow for Arc { +impl ContainsEntity for Arc { fn entity(&self) -> Entity { (**self).entity() } @@ -116,7 +135,7 @@ impl EntityBorrow for Arc { // `Arc` delegates `PartialEq`, `Eq`, `PartialOrd`, `Ord`, and `Hash` to T. // `Clone`, `Borrow` and `BorrowMut` maintain equality. // `Arc` is `Freeze`. -unsafe impl TrustedEntityBorrow for Arc {} +unsafe impl EntityEquivalent for Arc {} /// A set of unique entities. /// @@ -135,6 +154,7 @@ unsafe impl TrustedEntityBorrow for Arc {} /// [`into_iter()`]: IntoIterator::into_iter /// [`iter_many_unique`]: crate::system::Query::iter_many_unique /// [`iter_many_unique_mut`]: crate::system::Query::iter_many_unique_mut +/// [`Vec`]: alloc::vec::Vec pub trait EntitySet: IntoIterator {} impl> EntitySet for T {} @@ -147,93 +167,104 @@ impl> EntitySet for T {} /// /// `x != y` must hold for any 2 elements returned by the iterator. /// This is always true for iterators that cannot return more than one element. -pub unsafe trait EntitySetIterator: Iterator {} +pub unsafe trait EntitySetIterator: Iterator { + /// Transforms an `EntitySetIterator` into a collection. + /// + /// This is a specialized form of [`collect`], for collections which benefit from the uniqueness guarantee. + /// When present, this should always be preferred over [`collect`]. + /// + /// [`collect`]: Iterator::collect + // FIXME: When subtrait item shadowing stabilizes, this should be renamed and shadow `Iterator::collect` + fn collect_set>(self) -> B + where + Self: Sized, + { + FromEntitySetIterator::from_entity_set_iter(self) + } +} // SAFETY: // A correct `BTreeMap` contains only unique keys. -// TrustedEntityBorrow guarantees a trustworthy Ord impl for T, and thus a correct `BTreeMap`. -unsafe impl EntitySetIterator for btree_map::Keys<'_, K, V> {} +// EntityEquivalent guarantees a trustworthy Ord impl for T, and thus a correct `BTreeMap`. +unsafe impl EntitySetIterator for btree_map::Keys<'_, K, V> {} // SAFETY: // A correct `BTreeMap` contains only unique keys. -// TrustedEntityBorrow guarantees a trustworthy Ord impl for T, and thus a correct `BTreeMap`. -unsafe impl EntitySetIterator for btree_map::IntoKeys {} +// EntityEquivalent guarantees a trustworthy Ord impl for T, and thus a correct `BTreeMap`. +unsafe impl EntitySetIterator for btree_map::IntoKeys {} // SAFETY: // A correct `BTreeSet` contains only unique elements. -// TrustedEntityBorrow guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. +// EntityEquivalent guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. // The sub-range maintains uniqueness. -unsafe impl EntitySetIterator for btree_set::Range<'_, T> {} +unsafe impl EntitySetIterator for btree_set::Range<'_, T> {} // SAFETY: // A correct `BTreeSet` contains only unique elements. -// TrustedEntityBorrow guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. +// EntityEquivalent guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. // The "intersection" operation maintains uniqueness. -unsafe impl EntitySetIterator for btree_set::Intersection<'_, T> {} +unsafe impl EntitySetIterator for btree_set::Intersection<'_, T> {} // SAFETY: // A correct `BTreeSet` contains only unique elements. -// TrustedEntityBorrow guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. +// EntityEquivalent guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. // The "union" operation maintains uniqueness. -unsafe impl EntitySetIterator for btree_set::Union<'_, T> {} +unsafe impl EntitySetIterator for btree_set::Union<'_, T> {} // SAFETY: // A correct `BTreeSet` contains only unique elements. -// TrustedEntityBorrow guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. +// EntityEquivalent guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. // The "difference" operation maintains uniqueness. -unsafe impl EntitySetIterator for btree_set::Difference<'_, T> {} +unsafe impl EntitySetIterator for btree_set::Difference<'_, T> {} // SAFETY: // A correct `BTreeSet` contains only unique elements. -// TrustedEntityBorrow guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. +// EntityEquivalent guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. // The "symmetric difference" operation maintains uniqueness. -unsafe impl EntitySetIterator - for btree_set::SymmetricDifference<'_, T> -{ -} +unsafe impl EntitySetIterator for btree_set::SymmetricDifference<'_, T> {} // SAFETY: // A correct `BTreeSet` contains only unique elements. -// TrustedEntityBorrow guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. -unsafe impl EntitySetIterator for btree_set::Iter<'_, T> {} +// EntityEquivalent guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. +unsafe impl EntitySetIterator for btree_set::Iter<'_, T> {} // SAFETY: // A correct `BTreeSet` contains only unique elements. -// TrustedEntityBorrow guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. -unsafe impl EntitySetIterator for btree_set::IntoIter {} +// EntityEquivalent guarantees a trustworthy Ord impl for T, and thus a correct `BTreeSet`. +unsafe impl EntitySetIterator for btree_set::IntoIter {} // SAFETY: This iterator only returns one element. -unsafe impl EntitySetIterator for option::Iter<'_, T> {} +unsafe impl EntitySetIterator for option::Iter<'_, T> {} // SAFETY: This iterator only returns one element. -// unsafe impl EntitySetIterator for option::IterMut<'_, T> {} +// unsafe impl EntitySetIterator for option::IterMut<'_, T> {} // SAFETY: This iterator only returns one element. -unsafe impl EntitySetIterator for option::IntoIter {} +unsafe impl EntitySetIterator for option::IntoIter {} // SAFETY: This iterator only returns one element. -unsafe impl EntitySetIterator for result::Iter<'_, T> {} +unsafe impl EntitySetIterator for result::Iter<'_, T> {} // SAFETY: This iterator only returns one element. -// unsafe impl EntitySetIterator for result::IterMut<'_, T> {} +// unsafe impl EntitySetIterator for result::IterMut<'_, T> {} // SAFETY: This iterator only returns one element. -unsafe impl EntitySetIterator for result::IntoIter {} +unsafe impl EntitySetIterator for result::IntoIter {} // SAFETY: This iterator only returns one element. -unsafe impl EntitySetIterator for array::IntoIter {} +unsafe impl EntitySetIterator for array::IntoIter {} // SAFETY: This iterator does not return any elements. -unsafe impl EntitySetIterator for array::IntoIter {} +unsafe impl EntitySetIterator for array::IntoIter {} // SAFETY: This iterator only returns one element. -unsafe impl T> EntitySetIterator for iter::OnceWith {} +unsafe impl T> EntitySetIterator for iter::OnceWith {} // SAFETY: This iterator only returns one element. -unsafe impl EntitySetIterator for iter::Once {} +unsafe impl EntitySetIterator for iter::Once {} // SAFETY: This iterator does not return any elements. -unsafe impl EntitySetIterator for iter::Empty {} +unsafe impl EntitySetIterator for iter::Empty {} // SAFETY: Taking a mutable reference of an iterator has no effect on its elements. unsafe impl EntitySetIterator for &mut I {} @@ -241,14 +272,14 @@ unsafe impl EntitySetIterator for &mut I {} // SAFETY: Boxing an iterator has no effect on its elements. unsafe impl EntitySetIterator for Box {} -// SAFETY: TrustedEntityBorrow ensures that Copy does not affect equality, via its restrictions on Clone. -unsafe impl<'a, T: 'a + TrustedEntityBorrow + Copy, I: EntitySetIterator> +// SAFETY: EntityEquivalent ensures that Copy does not affect equality, via its restrictions on Clone. +unsafe impl<'a, T: 'a + EntityEquivalent + Copy, I: EntitySetIterator> EntitySetIterator for iter::Copied { } -// SAFETY: TrustedEntityBorrow ensures that Clone does not affect equality. -unsafe impl<'a, T: 'a + TrustedEntityBorrow + Clone, I: EntitySetIterator> +// SAFETY: EntityEquivalent ensures that Clone does not affect equality. +unsafe impl<'a, T: 'a + EntityEquivalent + Clone, I: EntitySetIterator> EntitySetIterator for iter::Cloned { } @@ -264,7 +295,7 @@ unsafe impl EntitySetIterator for iter::Fuse {} // SAFETY: // Obtaining immutable references the elements of an iterator does not affect uniqueness. -// TrustedEntityBorrow ensures the lack of interior mutability. +// EntityEquivalent ensures the lack of interior mutability. unsafe impl::Item)> EntitySetIterator for iter::Inspect { @@ -294,10 +325,40 @@ unsafe impl::Item) -> bool> Enti // SAFETY: Discarding elements maintains uniqueness. unsafe impl EntitySetIterator for iter::StepBy {} +/// Conversion from an `EntitySetIterator`. +/// +/// Some collections, while they can be constructed from plain iterators, +/// benefit strongly from the additional uniqueness guarantee [`EntitySetIterator`] offers. +/// Mirroring [`Iterator::collect`]/[`FromIterator::from_iter`], [`EntitySetIterator::collect_set`] and +/// `FromEntitySetIterator::from_entity_set_iter` can be used for construction. +/// +/// See also: [`EntitySet`]. +// FIXME: When subtrait item shadowing stabilizes, this should be renamed and shadow `FromIterator::from_iter` +pub trait FromEntitySetIterator: FromIterator { + /// Creates a value from an [`EntitySetIterator`]. + fn from_entity_set_iter>(set_iter: T) -> Self; +} + +impl FromEntitySetIterator + for HashSet +{ + fn from_entity_set_iter>(set_iter: I) -> Self { + let iter = set_iter.into_iter(); + let set = HashSet::::with_capacity_and_hasher(iter.size_hint().0, S::default()); + iter.fold(set, |mut set, e| { + // SAFETY: Every element in self is unique. + unsafe { + set.insert_unique_unchecked(e); + } + set + }) + } +} + /// An iterator that yields unique entities. /// /// This wrapper can provide an [`EntitySetIterator`] implementation when an instance of `I` is known to uphold uniqueness. -pub struct UniqueEntityIter> { +pub struct UniqueEntityIter> { iter: I, } @@ -307,7 +368,8 @@ impl UniqueEntityIter { Self { iter } } } -impl> UniqueEntityIter { + +impl> UniqueEntityIter { /// Constructs a [`UniqueEntityIter`] from an iterator unsafely. /// /// # Safety @@ -316,9 +378,29 @@ impl> UniqueEntityIter { pub unsafe fn from_iterator_unchecked(iter: I) -> Self { Self { iter } } + + /// Returns the inner `I`. + pub fn into_inner(self) -> I { + self.iter + } + + /// Returns a reference to the inner `I`. + pub fn as_inner(&self) -> &I { + &self.iter + } + + /// Returns a mutable reference to the inner `I`. + /// + /// # Safety + /// + /// `self` must always contain an iterator that yields unique elements, + /// even while this reference is live. + pub unsafe fn as_mut_inner(&mut self) -> &mut I { + &mut self.iter + } } -impl> Iterator for UniqueEntityIter { +impl> Iterator for UniqueEntityIter { type Item = I::Item; fn next(&mut self) -> Option { @@ -330,27 +412,43 @@ impl> Iterator for UniqueEntityIter { } } -impl> ExactSizeIterator for UniqueEntityIter {} +impl> ExactSizeIterator for UniqueEntityIter {} -impl> DoubleEndedIterator - for UniqueEntityIter -{ +impl> DoubleEndedIterator for UniqueEntityIter { fn next_back(&mut self) -> Option { self.iter.next_back() } } -impl> FusedIterator for UniqueEntityIter {} +impl> FusedIterator for UniqueEntityIter {} // SAFETY: The underlying iterator is ensured to only return unique elements by its construction. -unsafe impl> EntitySetIterator for UniqueEntityIter {} +unsafe impl> EntitySetIterator for UniqueEntityIter {} -impl + AsRef<[T]>> AsRef<[T]> for UniqueEntityIter { +impl + AsRef<[T]>> AsRef<[T]> for UniqueEntityIter { fn as_ref(&self) -> &[T] { self.iter.as_ref() } } +impl + AsRef<[T]>> + AsRef> for UniqueEntityIter +{ + fn as_ref(&self) -> &UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.iter.as_ref()) } + } +} + +impl + AsMut<[T]>> + AsMut> for UniqueEntityIter +{ + fn as_mut(&mut self) -> &mut UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.iter.as_mut()) } + } +} + // Default does not guarantee uniqueness, meaning `I` needs to be EntitySetIterator. impl Default for UniqueEntityIter { fn default() -> Self { @@ -369,7 +467,7 @@ impl Clone for UniqueEntityIter { } } -impl + Debug> Debug for UniqueEntityIter { +impl + Debug> Debug for UniqueEntityIter { fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { f.debug_struct("UniqueEntityIter") .field("iter", &self.iter) @@ -379,25 +477,25 @@ impl + Debug> Debug for UniqueEntityIter< #[cfg(test)] mod tests { - #[allow(unused_imports)] + use alloc::{vec, vec::Vec}; + use crate::prelude::{Schedule, World}; - #[allow(unused_imports)] use crate::component::Component; + use crate::entity::Entity; use crate::query::{QueryState, With}; use crate::system::Query; use crate::world::Mut; - #[allow(unused_imports)] - use crate::{self as bevy_ecs}; - #[allow(unused_imports)] - use crate::{entity::Entity, world::unsafe_world_cell}; use super::UniqueEntityIter; #[derive(Component, Clone)] pub struct Thing; - #[allow(clippy::iter_skip_zero)] + #[expect( + clippy::iter_skip_zero, + reason = "The `skip(0)` is used to ensure that the `Skip` iterator implements `EntitySet`, which is needed to pass the iterator as the `entities` parameter." + )] #[test] fn preserving_uniqueness() { let mut world = World::new(); diff --git a/crates/bevy_ecs/src/entity/hash.rs b/crates/bevy_ecs/src/entity/hash.rs index b7d4dcae54586..a53847343952a 100644 --- a/crates/bevy_ecs/src/entity/hash.rs +++ b/crates/bevy_ecs/src/entity/hash.rs @@ -1,11 +1,11 @@ use core::hash::{BuildHasher, Hasher}; #[cfg(feature = "bevy_reflect")] -use bevy_reflect::Reflect; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; /// A [`BuildHasher`] that results in a [`EntityHasher`]. #[derive(Debug, Default, Clone)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Default, Clone))] pub struct EntityHash; impl BuildHasher for EntityHash { @@ -25,7 +25,7 @@ impl BuildHasher for EntityHash { /// /// If you have an unusual case -- say all your indices are multiples of 256 /// or most of the entities are dead generations -- then you might want also to -/// try [`DefaultHasher`](bevy_utils::DefaultHasher) for a slower hash +/// try [`DefaultHasher`](bevy_platform::hash::DefaultHasher) for a slower hash /// computation but fewer lookup conflicts. #[derive(Debug, Default)] pub struct EntityHasher { diff --git a/crates/bevy_ecs/src/entity/hash_map.rs b/crates/bevy_ecs/src/entity/hash_map.rs index 20ec6767baa46..d83ea7bae1dcc 100644 --- a/crates/bevy_ecs/src/entity/hash_map.rs +++ b/crates/bevy_ecs/src/entity/hash_map.rs @@ -1,3 +1,7 @@ +//! Contains the [`EntityHashMap`] type, a [`HashMap`] pre-configured to use [`EntityHash`] hashing. +//! +//! This module is a lightweight wrapper around Bevy's [`HashMap`] that is more performant for [`Entity`] keys. + use core::{ fmt::{self, Debug, Formatter}, iter::FusedIterator, @@ -5,14 +9,15 @@ use core::{ ops::{Deref, DerefMut, Index}, }; +use bevy_platform::collections::hash_map::{self, HashMap}; #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; -use bevy_utils::hashbrown::hash_map::{self, HashMap}; -use super::{Entity, EntityHash, EntitySetIterator, TrustedEntityBorrow}; +use super::{Entity, EntityEquivalent, EntityHash, EntitySetIterator}; /// A [`HashMap`] pre-configured to use [`EntityHash`] hashing. #[cfg_attr(feature = "bevy_reflect", derive(Reflect))] +#[cfg_attr(feature = "serialize", derive(serde::Deserialize, serde::Serialize))] #[derive(Debug, Clone, PartialEq, Eq)] pub struct EntityHashMap(pub(crate) HashMap); @@ -22,7 +27,7 @@ impl EntityHashMap { /// Equivalent to [`HashMap::with_hasher(EntityHash)`]. /// /// [`HashMap::with_hasher(EntityHash)`]: HashMap::with_hasher - pub fn new() -> Self { + pub const fn new() -> Self { Self(HashMap::with_hasher(EntityHash)) } @@ -108,7 +113,7 @@ impl FromIterator<(Entity, V)> for EntityHashMap { } } -impl Index<&Q> for EntityHashMap { +impl Index<&Q> for EntityHashMap { type Output = V; fn index(&self, key: &Q) -> &V { self.0.index(&key.entity()) @@ -145,7 +150,7 @@ impl IntoIterator for EntityHashMap { /// An iterator over the keys of a [`EntityHashMap`] in arbitrary order. /// The iterator element type is `&'a Entity`. /// -/// /// This struct is created by the [`keys`] method on [`EntityHashMap`]. See its documentation for more. +/// This struct is created by the [`keys`] method on [`EntityHashMap`]. See its documentation for more. /// /// [`keys`]: EntityHashMap::keys pub struct Keys<'a, V, S = EntityHash>(hash_map::Keys<'a, Entity, V>, PhantomData); @@ -165,12 +170,6 @@ impl<'a, V> Deref for Keys<'a, V> { } } -impl DerefMut for Keys<'_, V> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - impl<'a, V> Iterator for Keys<'a, V> { type Item = &'a Entity; @@ -229,12 +228,6 @@ impl Deref for IntoKeys { } } -impl DerefMut for IntoKeys { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - impl Iterator for IntoKeys { type Item = Entity; diff --git a/crates/bevy_ecs/src/entity/hash_set.rs b/crates/bevy_ecs/src/entity/hash_set.rs index 12538d873b5c9..7fd1ae9011273 100644 --- a/crates/bevy_ecs/src/entity/hash_set.rs +++ b/crates/bevy_ecs/src/entity/hash_set.rs @@ -1,3 +1,7 @@ +//! Contains the [`EntityHashSet`] type, a [`HashSet`] pre-configured to use [`EntityHash`] hashing. +//! +//! This module is a lightweight wrapper around Bevy's [`HashSet`] that is more performant for [`Entity`] keys. + use core::{ fmt::{self, Debug, Formatter}, iter::FusedIterator, @@ -8,14 +12,15 @@ use core::{ }, }; +use bevy_platform::collections::hash_set::{self, HashSet}; #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; -use bevy_utils::hashbrown::hash_set::{self, HashSet}; -use super::{Entity, EntityHash, EntitySetIterator}; +use super::{Entity, EntityHash, EntitySet, EntitySetIterator, FromEntitySetIterator}; /// A [`HashSet`] pre-configured to use [`EntityHash`] hashing. #[cfg_attr(feature = "bevy_reflect", derive(Reflect))] +#[cfg_attr(feature = "serialize", derive(serde::Deserialize, serde::Serialize))] #[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct EntityHashSet(pub(crate) HashSet); @@ -25,7 +30,7 @@ impl EntityHashSet { /// Equivalent to [`HashSet::with_hasher(EntityHash)`]. /// /// [`HashSet::with_hasher(EntityHash)`]: HashSet::with_hasher - pub fn new() -> Self { + pub const fn new() -> Self { Self(HashSet::with_hasher(EntityHash)) } @@ -38,6 +43,16 @@ impl EntityHashSet { Self(HashSet::with_capacity_and_hasher(n, EntityHash)) } + /// Returns the number of elements in the set. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if the set contains no elements. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + /// Returns the inner [`HashSet`]. pub fn into_inner(self) -> HashSet { self.0 @@ -181,6 +196,20 @@ impl FromIterator for EntityHashSet { } } +impl FromEntitySetIterator for EntityHashSet { + fn from_entity_set_iter>(set_iter: I) -> Self { + let iter = set_iter.into_iter(); + let set = EntityHashSet::with_capacity(iter.size_hint().0); + iter.fold(set, |mut set, e| { + // SAFETY: Every element in self is unique. + unsafe { + set.insert_unique_unchecked(e); + } + set + }) + } +} + /// An iterator over the items of an [`EntityHashSet`]. /// /// This struct is created by the [`iter`] method on [`EntityHashSet`]. See its documentation for more. @@ -203,12 +232,6 @@ impl<'a> Deref for Iter<'a> { } } -impl DerefMut for Iter<'_> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - impl<'a> Iterator for Iter<'a> { type Item = &'a Entity; @@ -264,12 +287,6 @@ impl Deref for IntoIter { } } -impl DerefMut for IntoIter { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - impl Iterator for IntoIter { type Item = Entity; @@ -322,12 +339,6 @@ impl<'a> Deref for Drain<'a> { } } -impl DerefMut for Drain<'_> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - impl<'a> Iterator for Drain<'a> { type Item = Entity; @@ -377,12 +388,6 @@ impl<'a, F: FnMut(&Entity) -> bool> Deref for ExtractIf<'a, F> { } } -impl bool> DerefMut for ExtractIf<'_, F> { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - impl<'a, F: FnMut(&Entity) -> bool> Iterator for ExtractIf<'a, F> { type Item = Entity; diff --git a/crates/bevy_ecs/src/entity/index_map.rs b/crates/bevy_ecs/src/entity/index_map.rs new file mode 100644 index 0000000000000..6f6cd1bb47af3 --- /dev/null +++ b/crates/bevy_ecs/src/entity/index_map.rs @@ -0,0 +1,1218 @@ +//! Contains the [`EntityIndexMap`] type, an [`IndexMap`] pre-configured to use [`EntityHash`] hashing. +//! +//! This module is a lightweight wrapper around `indexmap`'s [`IndexMap`] that is more performant for [`Entity`] keys. + +use core::{ + cmp::Ordering, + fmt::{self, Debug, Formatter}, + hash::{BuildHasher, Hash, Hasher}, + iter::FusedIterator, + marker::PhantomData, + ops::{ + Bound, Deref, DerefMut, Index, IndexMut, Range, RangeBounds, RangeFrom, RangeFull, + RangeInclusive, RangeTo, RangeToInclusive, + }, + ptr, +}; + +#[cfg(feature = "bevy_reflect")] +use bevy_reflect::Reflect; +use indexmap::map::{self, IndexMap, IntoValues, ValuesMut}; + +use super::{Entity, EntityEquivalent, EntityHash, EntitySetIterator}; + +use bevy_platform::prelude::Box; + +/// A [`IndexMap`] pre-configured to use [`EntityHash`] hashing. +#[cfg_attr(feature = "bevy_reflect", derive(Reflect))] +#[cfg_attr(feature = "serialize", derive(serde::Deserialize, serde::Serialize))] +#[derive(Debug, Clone)] +pub struct EntityIndexMap(pub(crate) IndexMap); + +impl EntityIndexMap { + /// Creates an empty `EntityIndexMap`. + /// + /// Equivalent to [`IndexMap::with_hasher(EntityHash)`]. + /// + /// [`IndexMap::with_hasher(EntityHash)`]: IndexMap::with_hasher + pub const fn new() -> Self { + Self(IndexMap::with_hasher(EntityHash)) + } + + /// Creates an empty `EntityIndexMap` with the specified capacity. + /// + /// Equivalent to [`IndexMap::with_capacity_and_hasher(n, EntityHash)`]. + /// + /// [`IndexMap:with_capacity_and_hasher(n, EntityHash)`]: IndexMap::with_capacity_and_hasher + pub fn with_capacity(n: usize) -> Self { + Self(IndexMap::with_capacity_and_hasher(n, EntityHash)) + } + + /// Returns the inner [`IndexMap`]. + pub fn into_inner(self) -> IndexMap { + self.0 + } + + /// Returns a slice of all the key-value pairs in the map. + /// + /// Equivalent to [`IndexMap::as_slice`]. + pub fn as_slice(&self) -> &Slice { + // SAFETY: Slice is a transparent wrapper around indexmap::map::Slice. + unsafe { Slice::from_slice_unchecked(self.0.as_slice()) } + } + + /// Returns a mutable slice of all the key-value pairs in the map. + /// + /// Equivalent to [`IndexMap::as_mut_slice`]. + pub fn as_mut_slice(&mut self) -> &mut Slice { + // SAFETY: Slice is a transparent wrapper around indexmap::map::Slice. + unsafe { Slice::from_slice_unchecked_mut(self.0.as_mut_slice()) } + } + + /// Converts into a boxed slice of all the key-value pairs in the map. + /// + /// Equivalent to [`IndexMap::into_boxed_slice`]. + pub fn into_boxed_slice(self) -> Box> { + // SAFETY: Slice is a transparent wrapper around indexmap::map::Slice. + unsafe { Slice::from_boxed_slice_unchecked(self.0.into_boxed_slice()) } + } + + /// Returns a slice of key-value pairs in the given range of indices. + /// + /// Equivalent to [`IndexMap::get_range`]. + pub fn get_range>(&self, range: R) -> Option<&Slice> { + self.0.get_range(range).map(|slice| + // SAFETY: EntityIndexSetSlice is a transparent wrapper around indexmap::set::Slice. + unsafe { Slice::from_slice_unchecked(slice) }) + } + + /// Returns a mutable slice of key-value pairs in the given range of indices. + /// + /// Equivalent to [`IndexMap::get_range_mut`]. + pub fn get_range_mut>(&mut self, range: R) -> Option<&mut Slice> { + self.0.get_range_mut(range).map(|slice| + // SAFETY: EntityIndexSetSlice is a transparent wrapper around indexmap::set::Slice. + unsafe { Slice::from_slice_unchecked_mut(slice) }) + } + + /// Return an iterator over the key-value pairs of the map, in their order. + /// + /// Equivalent to [`IndexMap::iter`]. + pub fn iter(&self) -> Iter<'_, V> { + Iter(self.0.iter(), PhantomData) + } + + /// Return a mutable iterator over the key-value pairs of the map, in their order. + /// + /// Equivalent to [`IndexMap::iter_mut`]. + pub fn iter_mut(&mut self) -> IterMut<'_, V> { + IterMut(self.0.iter_mut(), PhantomData) + } + + /// Clears the `IndexMap` in the given index range, returning those + /// key-value pairs as a drain iterator. + /// + /// Equivalent to [`IndexMap::drain`]. + pub fn drain>(&mut self, range: R) -> Drain<'_, V> { + Drain(self.0.drain(range), PhantomData) + } + + /// Return an iterator over the keys of the map, in their order. + /// + /// Equivalent to [`IndexMap::keys`]. + pub fn keys(&self) -> Keys<'_, V> { + Keys(self.0.keys(), PhantomData) + } + + /// Return an owning iterator over the keys of the map, in their order. + /// + /// Equivalent to [`IndexMap::into_keys`]. + pub fn into_keys(self) -> IntoKeys { + IntoKeys(self.0.into_keys(), PhantomData) + } +} + +impl Default for EntityIndexMap { + fn default() -> Self { + Self(Default::default()) + } +} + +impl Deref for EntityIndexMap { + type Target = IndexMap; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for EntityIndexMap { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl<'a, V: Copy> Extend<(&'a Entity, &'a V)> for EntityIndexMap { + fn extend>(&mut self, iter: T) { + self.0.extend(iter); + } +} + +impl Extend<(Entity, V)> for EntityIndexMap { + fn extend>(&mut self, iter: T) { + self.0.extend(iter); + } +} + +impl From<[(Entity, V); N]> for EntityIndexMap { + fn from(value: [(Entity, V); N]) -> Self { + Self(IndexMap::from_iter(value)) + } +} + +impl FromIterator<(Entity, V)> for EntityIndexMap { + fn from_iter>(iterable: I) -> Self { + Self(IndexMap::from_iter(iterable)) + } +} + +impl Index<&Q> for EntityIndexMap { + type Output = V; + fn index(&self, key: &Q) -> &V { + self.0.index(&key.entity()) + } +} + +impl Index<(Bound, Bound)> for EntityIndexMap { + type Output = Slice; + fn index(&self, key: (Bound, Bound)) -> &Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexMap { + type Output = Slice; + fn index(&self, key: Range) -> &Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexMap { + type Output = Slice; + fn index(&self, key: RangeFrom) -> &Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index for EntityIndexMap { + type Output = Slice; + fn index(&self, key: RangeFull) -> &Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexMap { + type Output = Slice; + fn index(&self, key: RangeInclusive) -> &Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexMap { + type Output = Slice; + fn index(&self, key: RangeTo) -> &Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexMap { + type Output = Slice; + fn index(&self, key: RangeToInclusive) -> &Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index for EntityIndexMap { + type Output = V; + fn index(&self, key: usize) -> &V { + self.0.index(key) + } +} + +impl IndexMut<&Q> for EntityIndexMap { + fn index_mut(&mut self, key: &Q) -> &mut V { + self.0.index_mut(&key.entity()) + } +} + +impl IndexMut<(Bound, Bound)> for EntityIndexMap { + fn index_mut(&mut self, key: (Bound, Bound)) -> &mut Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for EntityIndexMap { + fn index_mut(&mut self, key: Range) -> &mut Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for EntityIndexMap { + fn index_mut(&mut self, key: RangeFrom) -> &mut Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut for EntityIndexMap { + fn index_mut(&mut self, key: RangeFull) -> &mut Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for EntityIndexMap { + fn index_mut(&mut self, key: RangeInclusive) -> &mut Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for EntityIndexMap { + fn index_mut(&mut self, key: RangeTo) -> &mut Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for EntityIndexMap { + fn index_mut(&mut self, key: RangeToInclusive) -> &mut Self::Output { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut for EntityIndexMap { + fn index_mut(&mut self, key: usize) -> &mut V { + self.0.index_mut(key) + } +} + +impl<'a, V> IntoIterator for &'a EntityIndexMap { + type Item = (&'a Entity, &'a V); + type IntoIter = Iter<'a, V>; + + fn into_iter(self) -> Self::IntoIter { + Iter(self.0.iter(), PhantomData) + } +} + +impl<'a, V> IntoIterator for &'a mut EntityIndexMap { + type Item = (&'a Entity, &'a mut V); + type IntoIter = IterMut<'a, V>; + + fn into_iter(self) -> Self::IntoIter { + IterMut(self.0.iter_mut(), PhantomData) + } +} + +impl IntoIterator for EntityIndexMap { + type Item = (Entity, V); + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + IntoIter(self.0.into_iter(), PhantomData) + } +} + +impl PartialEq> for EntityIndexMap +where + V1: PartialEq, + S2: BuildHasher, +{ + fn eq(&self, other: &IndexMap) -> bool { + self.0.eq(other) + } +} + +impl PartialEq> for EntityIndexMap +where + V1: PartialEq, +{ + fn eq(&self, other: &EntityIndexMap) -> bool { + self.0.eq(other) + } +} + +impl Eq for EntityIndexMap {} + +/// A dynamically-sized slice of key-value pairs in an [`EntityIndexMap`]. +/// +/// Equivalent to an [`indexmap::map::Slice`] whose source [`IndexMap`] +/// uses [`EntityHash`]. +#[repr(transparent)] +pub struct Slice(PhantomData, map::Slice); + +impl Slice { + /// Returns an empty slice. + /// + /// Equivalent to [`map::Slice::new`]. + pub const fn new<'a>() -> &'a Self { + // SAFETY: The source slice is empty. + unsafe { Self::from_slice_unchecked(map::Slice::new()) } + } + + /// Returns an empty mutable slice. + /// + /// Equivalent to [`map::Slice::new_mut`]. + pub fn new_mut<'a>() -> &'a mut Self { + // SAFETY: The source slice is empty. + unsafe { Self::from_slice_unchecked_mut(map::Slice::new_mut()) } + } + + /// Constructs a [`entity::index_map::Slice`] from a [`indexmap::map::Slice`] unsafely. + /// + /// # Safety + /// + /// `slice` must stem from an [`IndexMap`] using [`EntityHash`]. + /// + /// [`entity::index_map::Slice`]: `crate::entity::index_map::Slice` + pub const unsafe fn from_slice_unchecked(slice: &map::Slice) -> &Self { + // SAFETY: Slice is a transparent wrapper around indexmap::map::Slice. + unsafe { &*(ptr::from_ref(slice) as *const Self) } + } + + /// Constructs a [`entity::index_map::Slice`] from a [`indexmap::map::Slice`] unsafely. + /// + /// # Safety + /// + /// `slice` must stem from an [`IndexMap`] using [`EntityHash`]. + /// + /// [`entity::index_map::Slice`]: `crate::entity::index_map::Slice` + pub const unsafe fn from_slice_unchecked_mut(slice: &mut map::Slice) -> &mut Self { + // SAFETY: Slice is a transparent wrapper around indexmap::map::Slice. + unsafe { &mut *(ptr::from_mut(slice) as *mut Self) } + } + + /// Casts `self` to the inner slice. + pub const fn as_inner(&self) -> &map::Slice { + &self.1 + } + + /// Constructs a boxed [`entity::index_map::Slice`] from a boxed [`indexmap::map::Slice`] unsafely. + /// + /// # Safety + /// + /// `slice` must stem from an [`IndexMap`] using [`EntityHash`]. + /// + /// [`entity::index_map::Slice`]: `crate::entity::index_map::Slice` + pub unsafe fn from_boxed_slice_unchecked(slice: Box>) -> Box { + // SAFETY: Slice is a transparent wrapper around indexmap::map::Slice. + unsafe { Box::from_raw(Box::into_raw(slice) as *mut Self) } + } + + /// Casts a reference to `self` to the inner slice. + #[expect( + clippy::borrowed_box, + reason = "We wish to access the Box API of the inner type, without consuming it." + )] + pub fn as_boxed_inner(self: &Box) -> &Box> { + // SAFETY: Slice is a transparent wrapper around indexmap::map::Slice. + unsafe { &*(ptr::from_ref(self).cast::>>()) } + } + + /// Casts `self` to the inner slice. + pub fn into_boxed_inner(self: Box) -> Box> { + // SAFETY: Slice is a transparent wrapper around indexmap::map::Slice. + unsafe { Box::from_raw(Box::into_raw(self) as *mut map::Slice) } + } + + /// Get a key-value pair by index, with mutable access to the value. + /// + /// Equivalent to [`map::Slice::get_index_mut`]. + pub fn get_index_mut(&mut self, index: usize) -> Option<(&Entity, &mut V)> { + self.1.get_index_mut(index) + } + + /// Returns a slice of key-value pairs in the given range of indices. + /// + /// Equivalent to [`map::Slice::get_range`]. + pub fn get_range>(&self, range: R) -> Option<&Self> { + self.1.get_range(range).map(|slice| + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(slice) }) + } + + /// Returns a mutable slice of key-value pairs in the given range of indices. + /// + /// Equivalent to [`map::Slice::get_range_mut`]. + pub fn get_range_mut>(&mut self, range: R) -> Option<&mut Self> { + self.1.get_range_mut(range).map(|slice| + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(slice) }) + } + + /// Get the first key-value pair, with mutable access to the value. + /// + /// Equivalent to [`map::Slice::first_mut`]. + pub fn first_mut(&mut self) -> Option<(&Entity, &mut V)> { + self.1.first_mut() + } + + /// Get the last key-value pair, with mutable access to the value. + /// + /// Equivalent to [`map::Slice::last_mut`]. + pub fn last_mut(&mut self) -> Option<(&Entity, &mut V)> { + self.1.last_mut() + } + + /// Divides one slice into two at an index. + /// + /// Equivalent to [`map::Slice::split_at`]. + pub fn split_at(&self, index: usize) -> (&Self, &Self) { + let (slice_1, slice_2) = self.1.split_at(index); + // SAFETY: These are subslices of a valid slice. + unsafe { + ( + Self::from_slice_unchecked(slice_1), + Self::from_slice_unchecked(slice_2), + ) + } + } + + /// Divides one mutable slice into two at an index. + /// + /// Equivalent to [`map::Slice::split_at_mut`]. + pub fn split_at_mut(&mut self, index: usize) -> (&mut Self, &mut Self) { + let (slice_1, slice_2) = self.1.split_at_mut(index); + // SAFETY: These are subslices of a valid slice. + unsafe { + ( + Self::from_slice_unchecked_mut(slice_1), + Self::from_slice_unchecked_mut(slice_2), + ) + } + } + + /// Returns the first key-value pair and the rest of the slice, + /// or `None` if it is empty. + /// + /// Equivalent to [`map::Slice::split_first`]. + pub fn split_first(&self) -> Option<((&Entity, &V), &Self)> { + self.1.split_first().map(|(first, rest)| { + ( + first, + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(rest) }, + ) + }) + } + + /// Returns the first key-value pair and the rest of the slice, + /// with mutable access to the value, or `None` if it is empty. + /// + /// Equivalent to [`map::Slice::split_first_mut`]. + pub fn split_first_mut(&mut self) -> Option<((&Entity, &mut V), &mut Self)> { + self.1.split_first_mut().map(|(first, rest)| { + ( + first, + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(rest) }, + ) + }) + } + + /// Returns the last key-value pair and the rest of the slice, + /// or `None` if it is empty. + /// + /// Equivalent to [`map::Slice::split_last`]. + pub fn split_last(&self) -> Option<((&Entity, &V), &Self)> { + self.1.split_last().map(|(last, rest)| { + ( + last, + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(rest) }, + ) + }) + } + + /// Returns the last key-value pair and the rest of the slice, + /// with mutable access to the value, or `None` if it is empty. + /// + /// Equivalent to [`map::Slice::split_last_mut`]. + pub fn split_last_mut(&mut self) -> Option<((&Entity, &mut V), &mut Self)> { + self.1.split_last_mut().map(|(last, rest)| { + ( + last, + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(rest) }, + ) + }) + } + + /// Return an iterator over the key-value pairs of the map slice. + /// + /// Equivalent to [`map::Slice::iter`]. + pub fn iter(&self) -> Iter<'_, V> { + Iter(self.1.iter(), PhantomData) + } + + /// Return an iterator over the key-value pairs of the map slice. + /// + /// Equivalent to [`map::Slice::iter_mut`]. + pub fn iter_mut(&mut self) -> IterMut<'_, V> { + IterMut(self.1.iter_mut(), PhantomData) + } + + /// Return an iterator over the keys of the map slice. + /// + /// Equivalent to [`map::Slice::keys`]. + pub fn keys(&self) -> Keys<'_, V> { + Keys(self.1.keys(), PhantomData) + } + + /// Return an owning iterator over the keys of the map slice. + /// + /// Equivalent to [`map::Slice::into_keys`]. + pub fn into_keys(self: Box) -> IntoKeys { + IntoKeys(self.into_boxed_inner().into_keys(), PhantomData) + } + + /// Return an iterator over mutable references to the the values of the map slice. + /// + /// Equivalent to [`map::Slice::values_mut`]. + pub fn values_mut(&mut self) -> ValuesMut<'_, Entity, V> { + self.1.values_mut() + } + + /// Return an owning iterator over the values of the map slice. + /// + /// Equivalent to [`map::Slice::into_values`]. + pub fn into_values(self: Box) -> IntoValues { + self.into_boxed_inner().into_values() + } +} + +impl Deref for Slice { + type Target = map::Slice; + + fn deref(&self) -> &Self::Target { + &self.1 + } +} + +impl Debug for Slice { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_tuple("Slice") + .field(&self.0) + .field(&&self.1) + .finish() + } +} + +impl Clone for Box> { + fn clone(&self) -> Self { + // SAFETY: This a clone of a valid slice. + unsafe { Slice::from_boxed_slice_unchecked(self.as_boxed_inner().clone()) } + } +} + +impl Default for &Slice { + fn default() -> Self { + // SAFETY: The source slice is empty. + unsafe { Slice::from_slice_unchecked(<&map::Slice>::default()) } + } +} + +impl Default for &mut Slice { + fn default() -> Self { + // SAFETY: The source slice is empty. + unsafe { Slice::from_slice_unchecked_mut(<&mut map::Slice>::default()) } + } +} + +impl Default for Box> { + fn default() -> Self { + // SAFETY: The source slice is empty. + unsafe { Slice::from_boxed_slice_unchecked(>>::default()) } + } +} + +impl From<&Slice> for Box> { + fn from(value: &Slice) -> Self { + // SAFETY: This slice is a copy of a valid slice. + unsafe { Slice::from_boxed_slice_unchecked(value.1.into()) } + } +} + +impl Hash for Slice { + fn hash(&self, state: &mut H) { + self.1.hash(state); + } +} + +impl<'a, V> IntoIterator for &'a Slice { + type Item = (&'a Entity, &'a V); + type IntoIter = Iter<'a, V>; + + fn into_iter(self) -> Self::IntoIter { + Iter(self.1.iter(), PhantomData) + } +} + +impl<'a, V> IntoIterator for &'a mut Slice { + type Item = (&'a Entity, &'a mut V); + type IntoIter = IterMut<'a, V>; + + fn into_iter(self) -> Self::IntoIter { + IterMut(self.1.iter_mut(), PhantomData) + } +} + +impl IntoIterator for Box> { + type Item = (Entity, V); + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + IntoIter(self.into_boxed_inner().into_iter(), PhantomData) + } +} + +impl PartialOrd for Slice { + fn partial_cmp(&self, other: &Self) -> Option { + self.1.partial_cmp(&other.1) + } +} + +impl Ord for Slice { + fn cmp(&self, other: &Self) -> Ordering { + self.1.cmp(other) + } +} + +impl PartialEq for Slice { + fn eq(&self, other: &Self) -> bool { + self.1 == other.1 + } +} + +impl Eq for Slice {} + +impl Index<(Bound, Bound)> for Slice { + type Output = Self; + fn index(&self, key: (Bound, Bound)) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Self; + fn index(&self, key: Range) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Self; + fn index(&self, key: RangeFrom) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index for Slice { + type Output = Self; + fn index(&self, key: RangeFull) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Self; + fn index(&self, key: RangeInclusive) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Self; + fn index(&self, key: RangeTo) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Self; + fn index(&self, key: RangeToInclusive) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index for Slice { + type Output = V; + fn index(&self, key: usize) -> &V { + self.1.index(key) + } +} + +impl IndexMut<(Bound, Bound)> for Slice { + fn index_mut(&mut self, key: (Bound, Bound)) -> &mut Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(self.1.index_mut(key)) } + } +} + +impl IndexMut> for Slice { + fn index_mut(&mut self, key: Range) -> &mut Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(self.1.index_mut(key)) } + } +} + +impl IndexMut> for Slice { + fn index_mut(&mut self, key: RangeFrom) -> &mut Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(self.1.index_mut(key)) } + } +} + +impl IndexMut for Slice { + fn index_mut(&mut self, key: RangeFull) -> &mut Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(self.1.index_mut(key)) } + } +} + +impl IndexMut> for Slice { + fn index_mut(&mut self, key: RangeInclusive) -> &mut Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(self.1.index_mut(key)) } + } +} + +impl IndexMut> for Slice { + fn index_mut(&mut self, key: RangeTo) -> &mut Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(self.1.index_mut(key)) } + } +} + +impl IndexMut> for Slice { + fn index_mut(&mut self, key: RangeToInclusive) -> &mut Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked_mut(self.1.index_mut(key)) } + } +} + +impl IndexMut for Slice { + fn index_mut(&mut self, key: usize) -> &mut V { + self.1.index_mut(key) + } +} + +/// An iterator over the entries of an [`EntityIndexMap`]. +/// +/// This `struct` is created by the [`EntityIndexMap::iter`] method. +/// See its documentation for more. +pub struct Iter<'a, V, S = EntityHash>(map::Iter<'a, Entity, V>, PhantomData); + +impl<'a, V> Iter<'a, V> { + /// Returns the inner [`Iter`](map::Iter). + pub fn into_inner(self) -> map::Iter<'a, Entity, V> { + self.0 + } + + /// Returns a slice of the remaining entries in the iterator. + /// + /// Equivalent to [`map::Iter::as_slice`]. + pub fn as_slice(&self) -> &Slice { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.as_slice()) } + } +} + +impl<'a, V> Deref for Iter<'a, V> { + type Target = map::Iter<'a, Entity, V>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<'a, V> Iterator for Iter<'a, V> { + type Item = (&'a Entity, &'a V); + + fn next(&mut self) -> Option { + self.0.next() + } +} + +impl DoubleEndedIterator for Iter<'_, V> { + fn next_back(&mut self) -> Option { + self.0.next_back() + } +} + +impl ExactSizeIterator for Iter<'_, V> {} + +impl FusedIterator for Iter<'_, V> {} + +impl Clone for Iter<'_, V> { + fn clone(&self) -> Self { + Self(self.0.clone(), PhantomData) + } +} + +impl Debug for Iter<'_, V> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_tuple("Iter").field(&self.0).field(&self.1).finish() + } +} + +impl Default for Iter<'_, V> { + fn default() -> Self { + Self(Default::default(), PhantomData) + } +} + +/// A mutable iterator over the entries of an [`EntityIndexMap`]. +/// +/// This `struct` is created by the [`EntityIndexMap::iter_mut`] method. +/// See its documentation for more. +pub struct IterMut<'a, V, S = EntityHash>(map::IterMut<'a, Entity, V>, PhantomData); + +impl<'a, V> IterMut<'a, V> { + /// Returns the inner [`IterMut`](map::IterMut). + pub fn into_inner(self) -> map::IterMut<'a, Entity, V> { + self.0 + } + + /// Returns a slice of the remaining entries in the iterator. + /// + /// Equivalent to [`map::IterMut::as_slice`]. + pub fn as_slice(&self) -> &Slice { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.as_slice()) } + } + + /// Returns a mutable slice of the remaining entries in the iterator. + /// + /// Equivalent to [`map::IterMut::into_slice`]. + pub fn into_slice(self) -> &'a mut Slice { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked_mut(self.0.into_slice()) } + } +} + +impl<'a, V> Deref for IterMut<'a, V> { + type Target = map::IterMut<'a, Entity, V>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<'a, V> Iterator for IterMut<'a, V> { + type Item = (&'a Entity, &'a mut V); + + fn next(&mut self) -> Option { + self.0.next() + } +} + +impl DoubleEndedIterator for IterMut<'_, V> { + fn next_back(&mut self) -> Option { + self.0.next_back() + } +} + +impl ExactSizeIterator for IterMut<'_, V> {} + +impl FusedIterator for IterMut<'_, V> {} + +impl Debug for IterMut<'_, V> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_tuple("IterMut") + .field(&self.0) + .field(&self.1) + .finish() + } +} + +impl Default for IterMut<'_, V> { + fn default() -> Self { + Self(Default::default(), PhantomData) + } +} + +/// An owning iterator over the entries of an [`IndexMap`]. +/// +/// This `struct` is created by the [`IndexMap::into_iter`] method +/// (provided by the [`IntoIterator`] trait). See its documentation for more. +pub struct IntoIter(map::IntoIter, PhantomData); + +impl IntoIter { + /// Returns the inner [`IntoIter`](map::IntoIter). + pub fn into_inner(self) -> map::IntoIter { + self.0 + } + + /// Returns a slice of the remaining entries in the iterator. + /// + /// Equivalent to [`map::IntoIter::as_slice`]. + pub fn as_slice(&self) -> &Slice { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.as_slice()) } + } + + /// Returns a mutable slice of the remaining entries in the iterator. + /// + /// Equivalent to [`map::IntoIter::as_mut_slice`]. + pub fn as_mut_slice(&mut self) -> &mut Slice { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked_mut(self.0.as_mut_slice()) } + } +} + +impl Deref for IntoIter { + type Target = map::IntoIter; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Iterator for IntoIter { + type Item = (Entity, V); + + fn next(&mut self) -> Option { + self.0.next() + } +} + +impl DoubleEndedIterator for IntoIter { + fn next_back(&mut self) -> Option { + self.0.next_back() + } +} + +impl ExactSizeIterator for IntoIter {} + +impl FusedIterator for IntoIter {} + +impl Clone for IntoIter { + fn clone(&self) -> Self { + Self(self.0.clone(), PhantomData) + } +} + +impl Debug for IntoIter { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_tuple("IntoIter") + .field(&self.0) + .field(&self.1) + .finish() + } +} + +impl Default for IntoIter { + fn default() -> Self { + Self(Default::default(), PhantomData) + } +} + +/// A draining iterator over the entries of an [`EntityIndexMap`]. +/// +/// This `struct` is created by the [`EntityIndexMap::drain`] method. +/// See its documentation for more. +pub struct Drain<'a, V, S = EntityHash>(map::Drain<'a, Entity, V>, PhantomData); + +impl<'a, V> Drain<'a, V> { + /// Returns the inner [`Drain`](map::Drain). + pub fn into_inner(self) -> map::Drain<'a, Entity, V> { + self.0 + } + + /// Returns a slice of the remaining entries in the iterator. + /// + /// Equivalent to [`map::Drain::as_slice`]. + pub fn as_slice(&self) -> &Slice { + // SAFETY: The source IndexMap uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.as_slice()) } + } +} + +impl<'a, V> Deref for Drain<'a, V> { + type Target = map::Drain<'a, Entity, V>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Iterator for Drain<'_, V> { + type Item = (Entity, V); + + fn next(&mut self) -> Option { + self.0.next() + } +} + +impl DoubleEndedIterator for Drain<'_, V> { + fn next_back(&mut self) -> Option { + self.0.next_back() + } +} + +impl ExactSizeIterator for Drain<'_, V> {} + +impl FusedIterator for Drain<'_, V> {} + +impl Debug for Drain<'_, V> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_tuple("Drain") + .field(&self.0) + .field(&self.1) + .finish() + } +} + +/// An iterator over the keys of an [`EntityIndexMap`]. +/// +/// This `struct` is created by the [`EntityIndexMap::keys`] method. +/// See its documentation for more. +pub struct Keys<'a, V, S = EntityHash>(map::Keys<'a, Entity, V>, PhantomData); + +impl<'a, V> Keys<'a, V> { + /// Returns the inner [`Keys`](map::Keys). + pub fn into_inner(self) -> map::Keys<'a, Entity, V> { + self.0 + } +} + +impl<'a, V, S> Deref for Keys<'a, V, S> { + type Target = map::Keys<'a, Entity, V>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<'a, V> Iterator for Keys<'a, V> { + type Item = &'a Entity; + + fn next(&mut self) -> Option { + self.0.next() + } +} + +impl DoubleEndedIterator for Keys<'_, V> { + fn next_back(&mut self) -> Option { + self.0.next_back() + } +} + +impl ExactSizeIterator for Keys<'_, V> {} + +impl FusedIterator for Keys<'_, V> {} + +impl Index for Keys<'_, V> { + type Output = Entity; + + fn index(&self, index: usize) -> &Entity { + self.0.index(index) + } +} + +impl Clone for Keys<'_, V> { + fn clone(&self) -> Self { + Self(self.0.clone(), PhantomData) + } +} + +impl Debug for Keys<'_, V> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_tuple("Keys").field(&self.0).field(&self.1).finish() + } +} + +impl Default for Keys<'_, V> { + fn default() -> Self { + Self(Default::default(), PhantomData) + } +} + +// SAFETY: Keys stems from a correctly behaving `IndexMap`. +unsafe impl EntitySetIterator for Keys<'_, V> {} + +/// An owning iterator over the keys of an [`EntityIndexMap`]. +/// +/// This `struct` is created by the [`EntityIndexMap::into_keys`] method. +/// See its documentation for more. +pub struct IntoKeys(map::IntoKeys, PhantomData); + +impl IntoKeys { + /// Returns the inner [`IntoKeys`](map::IntoKeys). + pub fn into_inner(self) -> map::IntoKeys { + self.0 + } +} + +impl Deref for IntoKeys { + type Target = map::IntoKeys; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Iterator for IntoKeys { + type Item = Entity; + + fn next(&mut self) -> Option { + self.0.next() + } +} + +impl DoubleEndedIterator for IntoKeys { + fn next_back(&mut self) -> Option { + self.0.next_back() + } +} + +impl ExactSizeIterator for IntoKeys {} + +impl FusedIterator for IntoKeys {} + +impl Debug for IntoKeys { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_tuple("IntoKeys") + .field(&self.0) + .field(&self.1) + .finish() + } +} + +impl Default for IntoKeys { + fn default() -> Self { + Self(Default::default(), PhantomData) + } +} + +// SAFETY: IntoKeys stems from a correctly behaving `IndexMap`. +unsafe impl EntitySetIterator for IntoKeys {} diff --git a/crates/bevy_ecs/src/entity/index_set.rs b/crates/bevy_ecs/src/entity/index_set.rs new file mode 100644 index 0000000000000..42f420a211a23 --- /dev/null +++ b/crates/bevy_ecs/src/entity/index_set.rs @@ -0,0 +1,766 @@ +//! Contains the [`EntityIndexSet`] type, a [`IndexSet`] pre-configured to use [`EntityHash`] hashing. +//! +//! This module is a lightweight wrapper around `indexmap`'ss [`IndexSet`] that is more performant for [`Entity`] keys. + +use core::{ + cmp::Ordering, + fmt::{self, Debug, Formatter}, + hash::BuildHasher, + hash::{Hash, Hasher}, + iter::FusedIterator, + marker::PhantomData, + ops::{ + BitAnd, BitOr, BitXor, Bound, Deref, DerefMut, Index, Range, RangeBounds, RangeFrom, + RangeFull, RangeInclusive, RangeTo, RangeToInclusive, Sub, + }, + ptr, +}; + +use indexmap::set::{self, IndexSet}; + +use super::{Entity, EntityHash, EntitySetIterator}; + +use bevy_platform::prelude::Box; + +/// An [`IndexSet`] pre-configured to use [`EntityHash`] hashing. +#[cfg_attr(feature = "serialize", derive(serde::Deserialize, serde::Serialize))] +#[derive(Debug, Clone, Default)] +pub struct EntityIndexSet(pub(crate) IndexSet); + +impl EntityIndexSet { + /// Creates an empty `EntityIndexSet`. + /// + /// Equivalent to [`IndexSet::with_hasher(EntityHash)`]. + /// + /// [`IndexSet::with_hasher(EntityHash)`]: IndexSet::with_hasher + pub const fn new() -> Self { + Self(IndexSet::with_hasher(EntityHash)) + } + + /// Creates an empty `EntityIndexSet` with the specified capacity. + /// + /// Equivalent to [`IndexSet::with_capacity_and_hasher(n, EntityHash)`]. + /// + /// [`IndexSet::with_capacity_and_hasher(n, EntityHash)`]: IndexSet::with_capacity_and_hasher + pub fn with_capacity(n: usize) -> Self { + Self(IndexSet::with_capacity_and_hasher(n, EntityHash)) + } + + /// Returns the inner [`IndexSet`]. + pub fn into_inner(self) -> IndexSet { + self.0 + } + + /// Returns a slice of all the values in the set. + /// + /// Equivalent to [`IndexSet::as_slice`]. + pub fn as_slice(&self) -> &Slice { + // SAFETY: Slice is a transparent wrapper around indexmap::set::Slice. + unsafe { Slice::from_slice_unchecked(self.0.as_slice()) } + } + + /// Clears the `IndexSet` in the given index range, returning those values + /// as a drain iterator. + /// + /// Equivalent to [`IndexSet::drain`]. + pub fn drain>(&mut self, range: R) -> Drain<'_> { + Drain(self.0.drain(range), PhantomData) + } + + /// Returns a slice of values in the given range of indices. + /// + /// Equivalent to [`IndexSet::get_range`]. + pub fn get_range>(&self, range: R) -> Option<&Slice> { + self.0.get_range(range).map(|slice| + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(slice) }) + } + + /// Return an iterator over the values of the set, in their order. + /// + /// Equivalent to [`IndexSet::iter`]. + pub fn iter(&self) -> Iter<'_> { + Iter(self.0.iter(), PhantomData) + } + + /// Converts into a boxed slice of all the values in the set. + /// + /// Equivalent to [`IndexSet::into_boxed_slice`]. + pub fn into_boxed_slice(self) -> Box { + // SAFETY: Slice is a transparent wrapper around indexmap::set::Slice. + unsafe { Slice::from_boxed_slice_unchecked(self.0.into_boxed_slice()) } + } +} + +impl Deref for EntityIndexSet { + type Target = IndexSet; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for EntityIndexSet { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl<'a> IntoIterator for &'a EntityIndexSet { + type Item = &'a Entity; + + type IntoIter = Iter<'a>; + + fn into_iter(self) -> Self::IntoIter { + Iter((&self.0).into_iter(), PhantomData) + } +} + +impl IntoIterator for EntityIndexSet { + type Item = Entity; + + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + IntoIter(self.0.into_iter(), PhantomData) + } +} + +impl BitAnd for &EntityIndexSet { + type Output = EntityIndexSet; + + fn bitand(self, rhs: Self) -> Self::Output { + EntityIndexSet(self.0.bitand(&rhs.0)) + } +} + +impl BitOr for &EntityIndexSet { + type Output = EntityIndexSet; + + fn bitor(self, rhs: Self) -> Self::Output { + EntityIndexSet(self.0.bitor(&rhs.0)) + } +} + +impl BitXor for &EntityIndexSet { + type Output = EntityIndexSet; + + fn bitxor(self, rhs: Self) -> Self::Output { + EntityIndexSet(self.0.bitxor(&rhs.0)) + } +} + +impl Sub for &EntityIndexSet { + type Output = EntityIndexSet; + + fn sub(self, rhs: Self) -> Self::Output { + EntityIndexSet(self.0.sub(&rhs.0)) + } +} + +impl<'a> Extend<&'a Entity> for EntityIndexSet { + fn extend>(&mut self, iter: T) { + self.0.extend(iter); + } +} + +impl Extend for EntityIndexSet { + fn extend>(&mut self, iter: T) { + self.0.extend(iter); + } +} + +impl From<[Entity; N]> for EntityIndexSet { + fn from(value: [Entity; N]) -> Self { + Self(IndexSet::from_iter(value)) + } +} + +impl FromIterator for EntityIndexSet { + fn from_iter>(iterable: I) -> Self { + Self(IndexSet::from_iter(iterable)) + } +} + +impl PartialEq> for EntityIndexSet +where + S2: BuildHasher, +{ + fn eq(&self, other: &IndexSet) -> bool { + self.0.eq(other) + } +} + +impl PartialEq for EntityIndexSet { + fn eq(&self, other: &EntityIndexSet) -> bool { + self.0.eq(other) + } +} + +impl Eq for EntityIndexSet {} + +impl Index<(Bound, Bound)> for EntityIndexSet { + type Output = Slice; + fn index(&self, key: (Bound, Bound)) -> &Self::Output { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexSet { + type Output = Slice; + fn index(&self, key: Range) -> &Self::Output { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexSet { + type Output = Slice; + fn index(&self, key: RangeFrom) -> &Self::Output { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index for EntityIndexSet { + type Output = Slice; + fn index(&self, key: RangeFull) -> &Self::Output { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexSet { + type Output = Slice; + fn index(&self, key: RangeInclusive) -> &Self::Output { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexSet { + type Output = Slice; + fn index(&self, key: RangeTo) -> &Self::Output { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for EntityIndexSet { + type Output = Slice; + fn index(&self, key: RangeToInclusive) -> &Self::Output { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index for EntityIndexSet { + type Output = Entity; + fn index(&self, key: usize) -> &Entity { + self.0.index(key) + } +} + +/// A dynamically-sized slice of values in an [`EntityIndexSet`]. +/// +/// Equivalent to an [`indexmap::set::Slice`] whose source [`IndexSet`] +/// uses [`EntityHash`]. +#[repr(transparent)] +pub struct Slice(PhantomData, set::Slice); + +impl Slice { + /// Returns an empty slice. + /// + /// Equivalent to [`set::Slice::new`]. + pub const fn new<'a>() -> &'a Self { + // SAFETY: The source slice is empty. + unsafe { Self::from_slice_unchecked(set::Slice::new()) } + } + + /// Constructs a [`entity::index_set::Slice`] from a [`indexmap::set::Slice`] unsafely. + /// + /// # Safety + /// + /// `slice` must stem from an [`IndexSet`] using [`EntityHash`]. + /// + /// [`entity::index_set::Slice`]: `crate::entity::index_set::Slice` + pub const unsafe fn from_slice_unchecked(slice: &set::Slice) -> &Self { + // SAFETY: Slice is a transparent wrapper around indexmap::set::Slice. + unsafe { &*(ptr::from_ref(slice) as *const Self) } + } + + /// Constructs a [`entity::index_set::Slice`] from a [`indexmap::set::Slice`] unsafely. + /// + /// # Safety + /// + /// `slice` must stem from an [`IndexSet`] using [`EntityHash`]. + /// + /// [`entity::index_set::Slice`]: `crate::entity::index_set::Slice` + pub const unsafe fn from_slice_unchecked_mut(slice: &mut set::Slice) -> &mut Self { + // SAFETY: Slice is a transparent wrapper around indexmap::set::Slice. + unsafe { &mut *(ptr::from_mut(slice) as *mut Self) } + } + + /// Casts `self` to the inner slice. + pub const fn as_inner(&self) -> &set::Slice { + &self.1 + } + + /// Constructs a boxed [`entity::index_set::Slice`] from a boxed [`indexmap::set::Slice`] unsafely. + /// + /// # Safety + /// + /// `slice` must stem from an [`IndexSet`] using [`EntityHash`]. + /// + /// [`entity::index_set::Slice`]: `crate::entity::index_set::Slice` + pub unsafe fn from_boxed_slice_unchecked(slice: Box>) -> Box { + // SAFETY: Slice is a transparent wrapper around indexmap::set::Slice. + unsafe { Box::from_raw(Box::into_raw(slice) as *mut Self) } + } + + /// Casts a reference to `self` to the inner slice. + #[expect( + clippy::borrowed_box, + reason = "We wish to access the Box API of the inner type, without consuming it." + )] + pub fn as_boxed_inner(self: &Box) -> &Box> { + // SAFETY: Slice is a transparent wrapper around indexmap::set::Slice. + unsafe { &*(ptr::from_ref(self).cast::>>()) } + } + + /// Casts `self` to the inner slice. + pub fn into_boxed_inner(self: Box) -> Box> { + // SAFETY: Slice is a transparent wrapper around indexmap::set::Slice. + unsafe { Box::from_raw(Box::into_raw(self) as *mut set::Slice) } + } + + /// Returns a slice of values in the given range of indices. + /// + /// Equivalent to [`set::Slice::get_range`]. + pub fn get_range>(&self, range: R) -> Option<&Self> { + self.1.get_range(range).map(|slice| + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(slice) }) + } + + /// Divides one slice into two at an index. + /// + /// Equivalent to [`set::Slice::split_at`]. + pub fn split_at(&self, index: usize) -> (&Self, &Self) { + let (slice_1, slice_2) = self.1.split_at(index); + // SAFETY: These are subslices of a valid slice. + unsafe { + ( + Self::from_slice_unchecked(slice_1), + Self::from_slice_unchecked(slice_2), + ) + } + } + + /// Returns the first value and the rest of the slice, + /// or `None` if it is empty. + /// + /// Equivalent to [`set::Slice::split_first`]. + pub fn split_first(&self) -> Option<(&Entity, &Self)> { + self.1.split_first().map(|(first, rest)| { + ( + first, + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(rest) }, + ) + }) + } + + /// Returns the last value and the rest of the slice, + /// or `None` if it is empty. + /// + /// Equivalent to [`set::Slice::split_last`]. + pub fn split_last(&self) -> Option<(&Entity, &Self)> { + self.1.split_last().map(|(last, rest)| { + ( + last, + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(rest) }, + ) + }) + } + + /// Return an iterator over the values of the set slice. + /// + /// Equivalent to [`set::Slice::iter`]. + pub fn iter(&self) -> Iter<'_> { + Iter(self.1.iter(), PhantomData) + } +} + +impl Deref for Slice { + type Target = set::Slice; + + fn deref(&self) -> &Self::Target { + &self.1 + } +} + +impl<'a> IntoIterator for &'a Slice { + type IntoIter = Iter<'a>; + type Item = &'a Entity; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl IntoIterator for Box { + type IntoIter = IntoIter; + type Item = Entity; + + fn into_iter(self) -> Self::IntoIter { + IntoIter(self.into_boxed_inner().into_iter(), PhantomData) + } +} + +impl Clone for Box { + fn clone(&self) -> Self { + // SAFETY: This is a clone of a valid slice. + unsafe { Slice::from_boxed_slice_unchecked(self.as_boxed_inner().clone()) } + } +} + +impl Default for &Slice { + fn default() -> Self { + // SAFETY: The source slice is empty. + unsafe { Slice::from_slice_unchecked(<&set::Slice>::default()) } + } +} + +impl Default for Box { + fn default() -> Self { + // SAFETY: The source slice is empty. + unsafe { Slice::from_boxed_slice_unchecked(>>::default()) } + } +} + +impl Debug for Slice { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_tuple("Slice") + .field(&self.0) + .field(&&self.1) + .finish() + } +} + +impl From<&Slice> for Box { + fn from(value: &Slice) -> Self { + // SAFETY: This slice is a copy of a valid slice. + unsafe { Slice::from_boxed_slice_unchecked(value.1.into()) } + } +} + +impl Hash for Slice { + fn hash(&self, state: &mut H) { + self.1.hash(state); + } +} + +impl PartialOrd for Slice { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Slice { + fn cmp(&self, other: &Self) -> Ordering { + self.1.cmp(other) + } +} + +impl PartialEq for Slice { + fn eq(&self, other: &Self) -> bool { + self.1 == other.1 + } +} + +impl Eq for Slice {} + +impl Index<(Bound, Bound)> for Slice { + type Output = Self; + fn index(&self, key: (Bound, Bound)) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Self; + fn index(&self, key: Range) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Slice; + fn index(&self, key: RangeFrom) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index for Slice { + type Output = Self; + fn index(&self, key: RangeFull) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Self; + fn index(&self, key: RangeInclusive) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Self; + fn index(&self, key: RangeTo) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index> for Slice { + type Output = Self; + fn index(&self, key: RangeToInclusive) -> &Self { + // SAFETY: This a subslice of a valid slice. + unsafe { Self::from_slice_unchecked(self.1.index(key)) } + } +} + +impl Index for Slice { + type Output = Entity; + fn index(&self, key: usize) -> &Entity { + self.1.index(key) + } +} + +/// An iterator over the items of an [`EntityIndexSet`]. +/// +/// This struct is created by the [`iter`] method on [`EntityIndexSet`]. See its documentation for more. +/// +/// [`iter`]: EntityIndexSet::iter +pub struct Iter<'a, S = EntityHash>(set::Iter<'a, Entity>, PhantomData); + +impl<'a> Iter<'a> { + /// Returns the inner [`Iter`](set::Iter). + pub fn into_inner(self) -> set::Iter<'a, Entity> { + self.0 + } + + /// Returns a slice of the remaining entries in the iterator. + /// + /// Equivalent to [`set::Iter::as_slice`]. + pub fn as_slice(&self) -> &Slice { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.as_slice()) } + } +} + +impl<'a> Deref for Iter<'a> { + type Target = set::Iter<'a, Entity>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<'a> Iterator for Iter<'a> { + type Item = &'a Entity; + + fn next(&mut self) -> Option { + self.0.next() + } +} + +impl DoubleEndedIterator for Iter<'_> { + fn next_back(&mut self) -> Option { + self.0.next_back() + } +} + +impl ExactSizeIterator for Iter<'_> {} + +impl FusedIterator for Iter<'_> {} + +impl Clone for Iter<'_> { + fn clone(&self) -> Self { + Self(self.0.clone(), PhantomData) + } +} + +impl Debug for Iter<'_> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_tuple("Iter").field(&self.0).field(&self.1).finish() + } +} + +impl Default for Iter<'_> { + fn default() -> Self { + Self(Default::default(), PhantomData) + } +} + +// SAFETY: Iter stems from a correctly behaving `IndexSet`. +unsafe impl EntitySetIterator for Iter<'_> {} + +/// Owning iterator over the items of an [`EntityIndexSet`]. +/// +/// This struct is created by the [`into_iter`] method on [`EntityIndexSet`] (provided by the [`IntoIterator`] trait). See its documentation for more. +/// +/// [`into_iter`]: EntityIndexSet::into_iter +pub struct IntoIter(set::IntoIter, PhantomData); + +impl IntoIter { + /// Returns the inner [`IntoIter`](set::IntoIter). + pub fn into_inner(self) -> set::IntoIter { + self.0 + } + + /// Returns a slice of the remaining entries in the iterator. + /// + /// Equivalent to [`set::IntoIter::as_slice`]. + pub fn as_slice(&self) -> &Slice { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.as_slice()) } + } +} + +impl Deref for IntoIter { + type Target = set::IntoIter; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Iterator for IntoIter { + type Item = Entity; + + fn next(&mut self) -> Option { + self.0.next() + } +} + +impl DoubleEndedIterator for IntoIter { + fn next_back(&mut self) -> Option { + self.0.next_back() + } +} + +impl ExactSizeIterator for IntoIter {} + +impl FusedIterator for IntoIter {} + +impl Clone for IntoIter { + fn clone(&self) -> Self { + Self(self.0.clone(), PhantomData) + } +} + +impl Debug for IntoIter { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_tuple("IntoIter") + .field(&self.0) + .field(&self.1) + .finish() + } +} + +impl Default for IntoIter { + fn default() -> Self { + Self(Default::default(), PhantomData) + } +} + +// SAFETY: IntoIter stems from a correctly behaving `IndexSet`. +unsafe impl EntitySetIterator for IntoIter {} + +/// A draining iterator over the items of an [`EntityIndexSet`]. +/// +/// This struct is created by the [`drain`] method on [`EntityIndexSet`]. See its documentation for more. +/// +/// [`drain`]: EntityIndexSet::drain +pub struct Drain<'a, S = EntityHash>(set::Drain<'a, Entity>, PhantomData); + +impl<'a> Drain<'a> { + /// Returns the inner [`Drain`](set::Drain). + pub fn into_inner(self) -> set::Drain<'a, Entity> { + self.0 + } + + /// Returns a slice of the remaining entries in the iterator.$ + /// + /// Equivalent to [`set::Drain::as_slice`]. + pub fn as_slice(&self) -> &Slice { + // SAFETY: The source IndexSet uses EntityHash. + unsafe { Slice::from_slice_unchecked(self.0.as_slice()) } + } +} + +impl<'a> Deref for Drain<'a> { + type Target = set::Drain<'a, Entity>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<'a> Iterator for Drain<'a> { + type Item = Entity; + + fn next(&mut self) -> Option { + self.0.next() + } +} + +impl DoubleEndedIterator for Drain<'_> { + fn next_back(&mut self) -> Option { + self.0.next_back() + } +} + +impl ExactSizeIterator for Drain<'_> {} + +impl FusedIterator for Drain<'_> {} + +impl Debug for Drain<'_> { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_tuple("Drain") + .field(&self.0) + .field(&self.1) + .finish() + } +} + +// SAFETY: Drain stems from a correctly behaving `IndexSet`. +unsafe impl EntitySetIterator for Drain<'_> {} + +// SAFETY: Difference stems from two correctly behaving `IndexSet`s. +unsafe impl EntitySetIterator for set::Difference<'_, Entity, EntityHash> {} + +// SAFETY: Intersection stems from two correctly behaving `IndexSet`s. +unsafe impl EntitySetIterator for set::Intersection<'_, Entity, EntityHash> {} + +// SAFETY: SymmetricDifference stems from two correctly behaving `IndexSet`s. +unsafe impl EntitySetIterator for set::SymmetricDifference<'_, Entity, EntityHash, EntityHash> {} + +// SAFETY: Union stems from two correctly behaving `IndexSet`s. +unsafe impl EntitySetIterator for set::Union<'_, Entity, EntityHash> {} + +// SAFETY: Splice stems from a correctly behaving `IndexSet`s. +unsafe impl> EntitySetIterator + for set::Splice<'_, I, Entity, EntityHash> +{ +} diff --git a/crates/bevy_ecs/src/entity/map_entities.rs b/crates/bevy_ecs/src/entity/map_entities.rs index 5b0de2359d6b8..c79853f979f7e 100644 --- a/crates/bevy_ecs/src/entity/map_entities.rs +++ b/crates/bevy_ecs/src/entity/map_entities.rs @@ -1,10 +1,15 @@ +pub use bevy_ecs_macros::MapEntities; + use crate::{ - entity::Entity, + entity::{hash_map::EntityHashMap, Entity}, identifier::masks::{IdentifierMask, HIGH_MASK}, world::World, }; -use super::{EntityHashMap, VisitEntitiesMut}; +use alloc::{collections::VecDeque, vec::Vec}; +use bevy_platform::collections::HashSet; +use core::hash::BuildHasher; +use smallvec::SmallVec; /// Operation to map all contained [`Entity`] fields in a type to new values. /// @@ -15,15 +20,11 @@ use super::{EntityHashMap, VisitEntitiesMut}; /// (usually by using an [`EntityHashMap`] between source entities and entities in the /// current world). /// -/// This trait is similar to [`VisitEntitiesMut`]. They differ in that [`VisitEntitiesMut`] operates -/// on `&mut Entity` and allows for in-place modification, while this trait makes no assumption that -/// such in-place modification is occurring, which is impossible for types such as [`HashSet`] -/// and [`EntityHashMap`] which must be rebuilt when their contained [`Entity`]s are remapped. -/// -/// Implementing this trait correctly is required for properly loading components -/// with entity references from scenes. +/// Components use [`Component::map_entities`](crate::component::Component::map_entities) to map +/// entities in the context of scenes and entity cloning, which generally uses [`MapEntities`] internally +/// to map each field (see those docs for usage). /// -/// [`HashSet`]: bevy_utils::HashSet +/// [`HashSet`]: bevy_platform::collections::HashSet /// /// ## Example /// @@ -39,8 +40,8 @@ use super::{EntityHashMap, VisitEntitiesMut}; /// /// impl MapEntities for Spring { /// fn map_entities(&mut self, entity_mapper: &mut M) { -/// self.a = entity_mapper.map_entity(self.a); -/// self.b = entity_mapper.map_entity(self.b); +/// self.a = entity_mapper.get_mapped(self.a); +/// self.b = entity_mapper.get_mapped(self.b); /// } /// } /// ``` @@ -49,17 +50,51 @@ pub trait MapEntities { /// /// Implementors should look up any and all [`Entity`] values stored within `self` and /// update them to the mapped values via `entity_mapper`. - fn map_entities(&mut self, entity_mapper: &mut M); + fn map_entities(&mut self, entity_mapper: &mut E); } -impl MapEntities for T { - fn map_entities(&mut self, entity_mapper: &mut M) { - self.visit_entities_mut(|entity| { - *entity = entity_mapper.map_entity(*entity); - }); +impl MapEntities for Entity { + fn map_entities(&mut self, entity_mapper: &mut E) { + *self = entity_mapper.get_mapped(*self); } } +impl MapEntities for Option { + fn map_entities(&mut self, entity_mapper: &mut E) { + if let Some(entity) = self { + *entity = entity_mapper.get_mapped(*entity); + } + } +} + +impl MapEntities for HashSet { + fn map_entities(&mut self, entity_mapper: &mut E) { + *self = self.drain().map(|e| entity_mapper.get_mapped(e)).collect(); + } +} +impl MapEntities for Vec { + fn map_entities(&mut self, entity_mapper: &mut E) { + for entity in self.iter_mut() { + *entity = entity_mapper.get_mapped(*entity); + } + } +} + +impl MapEntities for VecDeque { + fn map_entities(&mut self, entity_mapper: &mut E) { + for entity in self.iter_mut() { + *entity = entity_mapper.get_mapped(*entity); + } + } +} + +impl> MapEntities for SmallVec { + fn map_entities(&mut self, entity_mapper: &mut E) { + for entity in self.iter_mut() { + *entity = entity_mapper.get_mapped(*entity); + } + } +} /// An implementor of this trait knows how to map an [`Entity`] into another [`Entity`]. /// /// Usually this is done by using an [`EntityHashMap`] to map source entities @@ -67,6 +102,8 @@ impl MapEntities for T { /// /// More generally, this can be used to map [`Entity`] references between any two [`Worlds`](World). /// +/// This is used by [`MapEntities`] implementors. +/// /// ## Example /// /// ``` @@ -80,26 +117,61 @@ impl MapEntities for T { /// // Example implementation of EntityMapper where we map an entity to another entity if it exists /// // in the underlying `EntityHashMap`, otherwise we just return the original entity. /// impl EntityMapper for SimpleEntityMapper { -/// fn map_entity(&mut self, entity: Entity) -> Entity { +/// fn get_mapped(&mut self, entity: Entity) -> Entity { /// self.map.get(&entity).copied().unwrap_or(entity) /// } +/// +/// fn set_mapped(&mut self, source: Entity, target: Entity) { +/// self.map.insert(source, target); +/// } /// } /// ``` pub trait EntityMapper { - /// Map an entity to another entity - fn map_entity(&mut self, entity: Entity) -> Entity; + /// Returns the "target" entity that maps to the given `source`. + fn get_mapped(&mut self, source: Entity) -> Entity; + + /// Maps the `target` entity to the given `source`. For some implementations this might not actually determine the result + /// of [`EntityMapper::get_mapped`]. + fn set_mapped(&mut self, source: Entity, target: Entity); +} + +impl EntityMapper for () { + #[inline] + fn get_mapped(&mut self, source: Entity) -> Entity { + source + } + + #[inline] + fn set_mapped(&mut self, _source: Entity, _target: Entity) {} +} + +impl EntityMapper for (Entity, Entity) { + #[inline] + fn get_mapped(&mut self, source: Entity) -> Entity { + if source == self.0 { + self.1 + } else { + source + } + } + + fn set_mapped(&mut self, _source: Entity, _target: Entity) {} } impl EntityMapper for &mut dyn EntityMapper { - fn map_entity(&mut self, entity: Entity) -> Entity { - (*self).map_entity(entity) + fn get_mapped(&mut self, source: Entity) -> Entity { + (*self).get_mapped(source) + } + + fn set_mapped(&mut self, source: Entity, target: Entity) { + (*self).set_mapped(source, target); } } impl EntityMapper for SceneEntityMapper<'_> { /// Returns the corresponding mapped entity or reserves a new dead entity ID in the current world if it is absent. - fn map_entity(&mut self, entity: Entity) -> Entity { - if let Some(&mapped) = self.map.get(&entity) { + fn get_mapped(&mut self, source: Entity) -> Entity { + if let Some(&mapped) = self.map.get(&source) { return mapped; } @@ -112,10 +184,25 @@ impl EntityMapper for SceneEntityMapper<'_> { // Prevent generations counter from being a greater value than HIGH_MASK. self.generations = (self.generations + 1) & HIGH_MASK; - self.map.insert(entity, new); + self.map.insert(source, new); new } + + fn set_mapped(&mut self, source: Entity, target: Entity) { + self.map.insert(source, target); + } +} + +impl EntityMapper for EntityHashMap { + /// Returns the corresponding mapped entity or returns `entity` if there is no mapped entity + fn get_mapped(&mut self, source: Entity) -> Entity { + self.get(&source).cloned().unwrap_or(source) + } + + fn set_mapped(&mut self, source: Entity, target: Entity) { + self.insert(source, target); + } } /// A wrapper for [`EntityHashMap`], augmenting it with the ability to allocate new [`Entity`] references in a destination @@ -208,15 +295,15 @@ mod tests { let mut mapper = SceneEntityMapper::new(&mut map, &mut world); let mapped_ent = Entity::from_raw(FIRST_IDX); - let dead_ref = mapper.map_entity(mapped_ent); + let dead_ref = mapper.get_mapped(mapped_ent); assert_eq!( dead_ref, - mapper.map_entity(mapped_ent), + mapper.get_mapped(mapped_ent), "should persist the allocated mapping from the previous line" ); assert_eq!( - mapper.map_entity(Entity::from_raw(SECOND_IDX)).index(), + mapper.get_mapped(Entity::from_raw(SECOND_IDX)).index(), dead_ref.index(), "should re-use the same index for further dead refs" ); @@ -234,7 +321,7 @@ mod tests { let mut world = World::new(); let dead_ref = SceneEntityMapper::world_scope(&mut map, &mut world, |_, mapper| { - mapper.map_entity(Entity::from_raw(0)) + mapper.get_mapped(Entity::from_raw(0)) }); // Next allocated entity should be a further generation on the same index @@ -253,7 +340,7 @@ mod tests { // Create and exercise a SceneEntityMapper - should not panic because it flushes // `Entities` first. SceneEntityMapper::world_scope(&mut Default::default(), &mut world, |_, m| { - m.map_entity(Entity::PLACEHOLDER); + m.get_mapped(Entity::PLACEHOLDER); }); // The SceneEntityMapper should leave `Entities` in a flushed state. diff --git a/crates/bevy_ecs/src/entity/mod.rs b/crates/bevy_ecs/src/entity/mod.rs index cf2370d1ca440..7bba07aac6017 100644 --- a/crates/bevy_ecs/src/entity/mod.rs +++ b/crates/bevy_ecs/src/entity/mod.rs @@ -39,7 +39,6 @@ mod clone_entities; mod entity_set; mod map_entities; -mod visit_entities; #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; #[cfg(all(feature = "bevy_reflect", feature = "serialize"))] @@ -48,19 +47,33 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; pub use clone_entities::*; pub use entity_set::*; pub use map_entities::*; -pub use visit_entities::*; mod hash; pub use hash::*; -mod hash_map; -mod hash_set; +pub mod hash_map; +pub mod hash_set; pub use hash_map::EntityHashMap; pub use hash_set::EntityHashSet; +pub mod index_map; +pub mod index_set; + +pub use index_map::EntityIndexMap; +pub use index_set::EntityIndexSet; + +pub mod unique_array; +pub mod unique_slice; +pub mod unique_vec; + +pub use unique_array::{UniqueEntityArray, UniqueEntityEquivalentArray}; +pub use unique_slice::{UniqueEntityEquivalentSlice, UniqueEntitySlice}; +pub use unique_vec::{UniqueEntityEquivalentVec, UniqueEntityVec}; + use crate::{ archetype::{ArchetypeId, ArchetypeRow}, + change_detection::MaybeLocation, identifier::{ error::IdentifierError, kinds::IdKind, @@ -69,36 +82,24 @@ use crate::{ }, storage::{SparseSetIndex, TableId, TableRow}, }; -use alloc::{borrow::ToOwned, string::String, vec::Vec}; -use core::{fmt, hash::Hash, mem, num::NonZero}; +use alloc::vec::Vec; +use bevy_platform::sync::atomic::Ordering; +use core::{fmt, hash::Hash, mem, num::NonZero, panic::Location}; use log::warn; -#[cfg(feature = "track_change_detection")] -use core::panic::Location; - #[cfg(feature = "serialize")] use serde::{Deserialize, Serialize}; -#[cfg(not(feature = "portable-atomic"))] -use core::sync::atomic::Ordering; - -#[cfg(feature = "portable-atomic")] -use portable_atomic::Ordering; - -#[cfg(all(target_has_atomic = "64", not(feature = "portable-atomic")))] -use core::sync::atomic::AtomicI64 as AtomicIdCursor; -#[cfg(all(target_has_atomic = "64", feature = "portable-atomic"))] -use portable_atomic::AtomicI64 as AtomicIdCursor; +#[cfg(target_has_atomic = "64")] +use bevy_platform::sync::atomic::AtomicI64 as AtomicIdCursor; #[cfg(target_has_atomic = "64")] type IdCursor = i64; /// Most modern platforms support 64-bit atomics, but some less-common platforms /// do not. This fallback allows compilation using a 32-bit cursor instead, with /// the caveat that some conversions may fail (and panic) at runtime. -#[cfg(all(not(target_has_atomic = "64"), not(feature = "portable-atomic")))] -use core::sync::atomic::AtomicIsize as AtomicIdCursor; -#[cfg(all(not(target_has_atomic = "64"), feature = "portable-atomic"))] -use portable_atomic::AtomicIsize as AtomicIdCursor; +#[cfg(not(target_has_atomic = "64"))] +use bevy_platform::sync::atomic::AtomicIsize as AtomicIdCursor; #[cfg(not(target_has_atomic = "64"))] type IdCursor = isize; @@ -173,7 +174,7 @@ type IdCursor = isize; #[derive(Clone, Copy)] #[cfg_attr(feature = "bevy_reflect", derive(Reflect))] #[cfg_attr(feature = "bevy_reflect", reflect(opaque))] -#[cfg_attr(feature = "bevy_reflect", reflect(Hash, PartialEq, Debug))] +#[cfg_attr(feature = "bevy_reflect", reflect(Hash, PartialEq, Debug, Clone))] #[cfg_attr( all(feature = "bevy_reflect", feature = "serialize"), reflect(Serialize, Deserialize) @@ -242,6 +243,10 @@ impl Hash for Entity { } } +#[deprecated( + since = "0.16.0", + note = "This is exclusively used with the now deprecated `Entities::alloc_at_without_replacement`." +)] pub(crate) enum AllocAtWithoutReplacement { Exists(EntityLocation), DidNotExist, @@ -575,8 +580,6 @@ pub struct Entities { /// [`flush`]: Entities::flush pending: Vec, free_cursor: AtomicIdCursor, - /// Stores the number of free entities for [`len`](Entities::len) - len: u32, } impl Entities { @@ -585,14 +588,20 @@ impl Entities { meta: Vec::new(), pending: Vec::new(), free_cursor: AtomicIdCursor::new(0), - len: 0, } } /// Reserve entity IDs concurrently. /// /// Storage for entity generation and location is lazily allocated by calling [`flush`](Entities::flush). - #[allow(clippy::unnecessary_fallible_conversions)] // Because `IdCursor::try_from` may fail on 32-bit platforms. + #[expect( + clippy::allow_attributes, + reason = "`clippy::unnecessary_fallible_conversions` may not always lint." + )] + #[allow( + clippy::unnecessary_fallible_conversions, + reason = "`IdCursor::try_from` may fail on 32-bit platforms." + )] pub fn reserve_entities(&self, count: u32) -> ReserveEntitiesIterator { // Use one atomic subtract to grab a range of new IDs. The range might be // entirely nonnegative, meaning all IDs come from the freelist, or entirely @@ -670,7 +679,6 @@ impl Entities { /// Allocate an entity ID directly. pub fn alloc(&mut self) -> Entity { self.verify_flushed(); - self.len += 1; if let Some(index) = self.pending.pop() { let new_free_cursor = self.pending.len() as IdCursor; *self.free_cursor.get_mut() = new_free_cursor; @@ -686,6 +694,10 @@ impl Entities { /// /// Returns the location of the entity currently using the given ID, if any. Location should be /// written immediately. + #[deprecated( + since = "0.16.0", + note = "This can cause extreme performance problems when used after freeing a large number of entities and requesting an arbitrary entity. See #18054 on GitHub." + )] pub fn alloc_at(&mut self, entity: Entity) -> Option { self.verify_flushed(); @@ -696,13 +708,11 @@ impl Entities { *self.free_cursor.get_mut() = new_free_cursor; self.meta .resize(entity.index() as usize + 1, EntityMeta::EMPTY); - self.len += 1; None } else if let Some(index) = self.pending.iter().position(|item| *item == entity.index()) { self.pending.swap_remove(index); let new_free_cursor = self.pending.len() as IdCursor; *self.free_cursor.get_mut() = new_free_cursor; - self.len += 1; None } else { Some(mem::replace( @@ -719,6 +729,14 @@ impl Entities { /// Allocate a specific entity ID, overwriting its generation. /// /// Returns the location of the entity currently using the given ID, if any. + #[deprecated( + since = "0.16.0", + note = "This can cause extreme performance problems when used after freeing a large number of entities and requesting an arbitrary entity. See #18054 on GitHub." + )] + #[expect( + deprecated, + reason = "We need to support `AllocAtWithoutReplacement` for now." + )] pub(crate) fn alloc_at_without_replacement( &mut self, entity: Entity, @@ -732,13 +750,11 @@ impl Entities { *self.free_cursor.get_mut() = new_free_cursor; self.meta .resize(entity.index() as usize + 1, EntityMeta::EMPTY); - self.len += 1; AllocAtWithoutReplacement::DidNotExist } else if let Some(index) = self.pending.iter().position(|item| *item == entity.index()) { self.pending.swap_remove(index); let new_free_cursor = self.pending.len() as IdCursor; *self.free_cursor.get_mut() = new_free_cursor; - self.len += 1; AllocAtWithoutReplacement::DidNotExist } else { let current_meta = &self.meta[entity.index() as usize]; @@ -781,12 +797,18 @@ impl Entities { let new_free_cursor = self.pending.len() as IdCursor; *self.free_cursor.get_mut() = new_free_cursor; - self.len -= 1; Some(loc) } /// Ensure at least `n` allocations can succeed without reallocating. - #[allow(clippy::unnecessary_fallible_conversions)] // Because `IdCursor::try_from` may fail on 32-bit platforms. + #[expect( + clippy::allow_attributes, + reason = "`clippy::unnecessary_fallible_conversions` may not always lint." + )] + #[allow( + clippy::unnecessary_fallible_conversions, + reason = "`IdCursor::try_from` may fail on 32-bit platforms." + )] pub fn reserve(&mut self, additional: u32) { self.verify_flushed(); @@ -804,7 +826,7 @@ impl Entities { // not reallocated since the generation is incremented in `free` pub fn contains(&self, entity: Entity) -> bool { self.resolve_from_id(entity.index()) - .map_or(false, |e| e.generation() == entity.generation()) + .is_some_and(|e| e.generation() == entity.generation()) } /// Clears all [`Entity`] from the World. @@ -812,11 +834,10 @@ impl Entities { self.meta.clear(); self.pending.clear(); *self.free_cursor.get_mut() = 0; - self.len = 0; } /// Returns the location of an [`Entity`]. - /// Note: for pending entities, returns `Some(EntityLocation::INVALID)`. + /// Note: for pending entities, returns `None`. #[inline] pub fn get(&self, entity: Entity) -> Option { if let Some(meta) = self.meta.get(entity.index() as usize) { @@ -908,7 +929,6 @@ impl Entities { let old_meta_len = self.meta.len(); let new_meta_len = old_meta_len + -current_free_cursor as usize; self.meta.resize(new_meta_len, EntityMeta::EMPTY); - self.len += -current_free_cursor as u32; for (index, meta) in self.meta.iter_mut().enumerate().skip(old_meta_len) { init( Entity::from_raw_and_generation(index as u32, meta.generation), @@ -920,7 +940,6 @@ impl Entities { 0 }; - self.len += (self.pending.len() - new_free_cursor) as u32; for index in self.pending.drain(new_free_cursor..) { let meta = &mut self.meta[index as usize]; init( @@ -954,55 +973,119 @@ impl Entities { self.meta.len() } + /// The count of all entities in the [`World`] that are used, + /// including both those allocated and those reserved, but not those freed. + /// + /// [`World`]: crate::world::World + #[inline] + pub fn used_count(&self) -> usize { + (self.meta.len() as isize - self.free_cursor.load(Ordering::Relaxed) as isize) as usize + } + + /// The count of all entities in the [`World`] that have ever been allocated or reserved, including those that are freed. + /// This is the value that [`Self::total_count()`] would return if [`Self::flush()`] were called right now. + /// + /// [`World`]: crate::world::World + #[inline] + pub fn total_prospective_count(&self) -> usize { + self.meta.len() + (-self.free_cursor.load(Ordering::Relaxed)).min(0) as usize + } + /// The count of currently allocated entities. #[inline] pub fn len(&self) -> u32 { - self.len + // `pending`, by definition, can't be bigger than `meta`. + (self.meta.len() - self.pending.len()) as u32 } /// Checks if any entity is currently active. #[inline] pub fn is_empty(&self) -> bool { - self.len == 0 + self.len() == 0 } /// Sets the source code location from which this entity has last been spawned /// or despawned. - #[cfg(feature = "track_change_detection")] #[inline] - pub(crate) fn set_spawned_or_despawned_by(&mut self, index: u32, caller: &'static Location) { - let meta = self - .meta - .get_mut(index as usize) - .expect("Entity index invalid"); - meta.spawned_or_despawned_by = Some(caller); + pub(crate) fn set_spawned_or_despawned_by(&mut self, index: u32, caller: MaybeLocation) { + caller.map(|caller| { + let meta = self + .meta + .get_mut(index as usize) + .expect("Entity index invalid"); + meta.spawned_or_despawned_by = MaybeLocation::new(Some(caller)); + }); } /// Returns the source code location from which this entity has last been spawned - /// or despawned. Returns `None` if this entity has never existed. - #[cfg(feature = "track_change_detection")] + /// or despawned. Returns `None` if its index has been reused by another entity + /// or if this entity has never existed. pub fn entity_get_spawned_or_despawned_by( &self, entity: Entity, - ) -> Option<&'static Location<'static>> { - self.meta - .get(entity.index() as usize) - .and_then(|meta| meta.spawned_or_despawned_by) + ) -> MaybeLocation>> { + MaybeLocation::new_with_flattened(|| { + self.meta + .get(entity.index() as usize) + .filter(|meta| + // Generation is incremented immediately upon despawn + (meta.generation == entity.generation) + || (meta.location.archetype_id == ArchetypeId::INVALID) + && (meta.generation == IdentifierMask::inc_masked_high_by(entity.generation, 1))) + .map(|meta| meta.spawned_or_despawned_by) + }) + .map(Option::flatten) } - /// Constructs a message explaining why an entity does not exists, if known. - pub(crate) fn entity_does_not_exist_error_details_message(&self, _entity: Entity) -> String { - #[cfg(feature = "track_change_detection")] - { - if let Some(location) = self.entity_get_spawned_or_despawned_by(_entity) { - format!("was despawned by {location}",) - } else { - "was never spawned".to_owned() - } + /// Constructs a message explaining why an entity does not exist, if known. + pub(crate) fn entity_does_not_exist_error_details( + &self, + entity: Entity, + ) -> EntityDoesNotExistDetails { + EntityDoesNotExistDetails { + location: self.entity_get_spawned_or_despawned_by(entity), } - #[cfg(not(feature = "track_change_detection"))] - { - "does not exist (enable `track_change_detection` feature for more details)".to_owned() + } +} + +/// An error that occurs when a specified [`Entity`] does not exist. +#[derive(thiserror::Error, Debug, Clone, Copy, PartialEq, Eq)] +#[error("The entity with ID {entity} {details}")] +pub struct EntityDoesNotExistError { + /// The entity's ID. + pub entity: Entity, + /// Details on why the entity does not exist, if available. + pub details: EntityDoesNotExistDetails, +} + +impl EntityDoesNotExistError { + pub(crate) fn new(entity: Entity, entities: &Entities) -> Self { + Self { + entity, + details: entities.entity_does_not_exist_error_details(entity), + } + } +} + +/// Helper struct that, when printed, will write the appropriate details +/// regarding an entity that did not exist. +#[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct EntityDoesNotExistDetails { + location: MaybeLocation>>, +} + +impl fmt::Display for EntityDoesNotExistDetails { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.location.into_option() { + Some(Some(location)) => write!(f, "was despawned by {location}"), + Some(None) => write!( + f, + "does not exist (index has been reused or was never spawned)" + ), + None => write!( + f, + "does not exist (enable `track_location` feature for more details)" + ), } } } @@ -1014,8 +1097,7 @@ struct EntityMeta { /// The current location of the [`Entity`] pub location: EntityLocation, /// Location of the last spawn or despawn of this entity - #[cfg(feature = "track_change_detection")] - spawned_or_despawned_by: Option<&'static Location<'static>>, + spawned_or_despawned_by: MaybeLocation>>, } impl EntityMeta { @@ -1023,8 +1105,7 @@ impl EntityMeta { const EMPTY: EntityMeta = EntityMeta { generation: NonZero::::MIN, location: EntityLocation::INVALID, - #[cfg(feature = "track_change_detection")] - spawned_or_despawned_by: None, + spawned_or_despawned_by: MaybeLocation::new(None), }; } @@ -1065,6 +1146,7 @@ impl EntityLocation { #[cfg(test)] mod tests { use super::*; + use alloc::format; #[test] fn entity_niche_optimization() { @@ -1149,7 +1231,10 @@ mod tests { } #[test] - #[allow(clippy::nonminimal_bool)] // This is intentionally testing `lt` and `ge` as separate functions. + #[expect( + clippy::nonminimal_bool, + reason = "This intentionally tests all possible comparison operators as separate functions; thus, we don't want to rewrite these comparisons to use different operators." + )] fn entity_comparison() { assert_eq!( Entity::from_raw_and_generation(123, NonZero::::new(456).unwrap()), diff --git a/crates/bevy_ecs/src/entity/unique_array.rs b/crates/bevy_ecs/src/entity/unique_array.rs new file mode 100644 index 0000000000000..ce31e55448f35 --- /dev/null +++ b/crates/bevy_ecs/src/entity/unique_array.rs @@ -0,0 +1,587 @@ +//! A wrapper around entity arrays with a uniqueness invariant. + +use core::{ + array, + borrow::{Borrow, BorrowMut}, + fmt::Debug, + ops::{ + Bound, Deref, DerefMut, Index, IndexMut, Range, RangeFrom, RangeFull, RangeInclusive, + RangeTo, RangeToInclusive, + }, + ptr, +}; + +use alloc::{ + boxed::Box, + collections::{BTreeSet, BinaryHeap, LinkedList, VecDeque}, + rc::Rc, + vec::Vec, +}; + +use bevy_platform::sync::Arc; + +use super::{ + unique_slice::{self, UniqueEntityEquivalentSlice}, + Entity, EntityEquivalent, UniqueEntityIter, +}; + +/// An array that contains only unique entities. +/// +/// It can be obtained through certain methods on [`UniqueEntityEquivalentSlice`], +/// and some [`TryFrom`] implementations. +/// +/// When `T` is [`Entity`], use [`UniqueEntityArray`]. +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub struct UniqueEntityEquivalentArray([T; N]); + +/// An array that contains only unique [`Entity`]. +/// +/// This is the default case of a [`UniqueEntityEquivalentArray`]. +pub type UniqueEntityArray = UniqueEntityEquivalentArray; + +impl UniqueEntityEquivalentArray { + /// Constructs a `UniqueEntityEquivalentArray` from a [`[T; N]`] unsafely. + /// + /// # Safety + /// + /// `array` must contain only unique elements. + pub const unsafe fn from_array_unchecked(array: [T; N]) -> Self { + Self(array) + } + + /// Constructs a `&UniqueEntityEquivalentArray` from a [`&[T; N]`] unsafely. + /// + /// # Safety + /// + /// `array` must contain only unique elements. + pub const unsafe fn from_array_ref_unchecked(array: &[T; N]) -> &Self { + // SAFETY: UniqueEntityEquivalentArray is a transparent wrapper around [T; N]. + unsafe { &*(ptr::from_ref(array).cast()) } + } + + /// Constructs a `Box` from a [`Box<[T; N]>`] unsafely. + /// + /// # Safety + /// + /// `array` must contain only unique elements. + pub unsafe fn from_boxed_array_unchecked(array: Box<[T; N]>) -> Box { + // SAFETY: UniqueEntityEquivalentArray is a transparent wrapper around [T; N]. + unsafe { Box::from_raw(Box::into_raw(array).cast()) } + } + + /// Casts `self` into the inner array. + pub fn into_boxed_inner(self: Box) -> Box<[T; N]> { + // SAFETY: UniqueEntityEquivalentArray is a transparent wrapper around [T; N]. + unsafe { Box::from_raw(Box::into_raw(self).cast()) } + } + + /// Constructs a `Arc` from a [`Arc<[T; N]>`] unsafely. + /// + /// # Safety + /// + /// `slice` must contain only unique elements. + pub unsafe fn from_arc_array_unchecked(slice: Arc<[T; N]>) -> Arc { + // SAFETY: UniqueEntityEquivalentArray is a transparent wrapper around [T; N]. + unsafe { Arc::from_raw(Arc::into_raw(slice).cast()) } + } + + /// Casts `self` to the inner array. + pub fn into_arc_inner(this: Arc) -> Arc<[T; N]> { + // SAFETY: UniqueEntityEquivalentArray is a transparent wrapper around [T; N]. + unsafe { Arc::from_raw(Arc::into_raw(this).cast()) } + } + + // Constructs a `Rc` from a [`Rc<[T; N]>`] unsafely. + /// + /// # Safety + /// + /// `slice` must contain only unique elements. + pub unsafe fn from_rc_array_unchecked(slice: Rc<[T; N]>) -> Rc { + // SAFETY: UniqueEntityEquivalentArray is a transparent wrapper around [T; N]. + unsafe { Rc::from_raw(Rc::into_raw(slice).cast()) } + } + + /// Casts `self` to the inner array. + pub fn into_rc_inner(self: Rc) -> Rc<[T; N]> { + // SAFETY: UniqueEntityEquivalentArray is a transparent wrapper around [T; N]. + unsafe { Rc::from_raw(Rc::into_raw(self).cast()) } + } + + /// Return the inner array. + pub fn into_inner(self) -> [T; N] { + self.0 + } + + /// Returns a reference to the inner array. + pub fn as_inner(&self) -> &[T; N] { + &self.0 + } + + /// Returns a slice containing the entire array. Equivalent to `&s[..]`. + pub const fn as_slice(&self) -> &UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original array are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.as_slice()) } + } + + /// Returns a mutable slice containing the entire array. Equivalent to + /// `&mut s[..]`. + pub fn as_mut_slice(&mut self) -> &mut UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original array are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.as_mut_slice()) } + } + + /// Borrows each element and returns an array of references with the same + /// size as `self`. + /// + /// Equivalent to [`[T; N]::as_ref`](array::each_ref). + pub fn each_ref(&self) -> UniqueEntityEquivalentArray<&T, N> { + UniqueEntityEquivalentArray(self.0.each_ref()) + } +} + +impl Deref for UniqueEntityEquivalentArray { + type Target = UniqueEntityEquivalentSlice; + + fn deref(&self) -> &Self::Target { + // SAFETY: All elements in the original array are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(&self.0) } + } +} + +impl DerefMut for UniqueEntityEquivalentArray { + fn deref_mut(&mut self) -> &mut Self::Target { + // SAFETY: All elements in the original array are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(&mut self.0) } + } +} +impl Default for UniqueEntityEquivalentArray { + fn default() -> Self { + Self(Default::default()) + } +} + +impl<'a, T: EntityEquivalent, const N: usize> IntoIterator + for &'a UniqueEntityEquivalentArray +{ + type Item = &'a T; + + type IntoIter = unique_slice::Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + // SAFETY: All elements in the original array are unique. + unsafe { UniqueEntityIter::from_iterator_unchecked(self.0.iter()) } + } +} + +impl IntoIterator for UniqueEntityEquivalentArray { + type Item = T; + + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + // SAFETY: All elements in the original array are unique. + unsafe { UniqueEntityIter::from_iterator_unchecked(self.0.into_iter()) } + } +} + +impl AsRef> + for UniqueEntityEquivalentArray +{ + fn as_ref(&self) -> &UniqueEntityEquivalentSlice { + self + } +} + +impl AsMut> + for UniqueEntityEquivalentArray +{ + fn as_mut(&mut self) -> &mut UniqueEntityEquivalentSlice { + self + } +} + +impl Borrow> + for UniqueEntityEquivalentArray +{ + fn borrow(&self) -> &UniqueEntityEquivalentSlice { + self + } +} + +impl BorrowMut> + for UniqueEntityEquivalentArray +{ + fn borrow_mut(&mut self) -> &mut UniqueEntityEquivalentSlice { + self + } +} + +impl Index<(Bound, Bound)> + for UniqueEntityEquivalentArray +{ + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: (Bound, Bound)) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> + for UniqueEntityEquivalentArray +{ + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: Range) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> + for UniqueEntityEquivalentArray +{ + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: RangeFrom) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index for UniqueEntityEquivalentArray { + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: RangeFull) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> + for UniqueEntityEquivalentArray +{ + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: RangeInclusive) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> + for UniqueEntityEquivalentArray +{ + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: RangeTo) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> + for UniqueEntityEquivalentArray +{ + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: RangeToInclusive) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index for UniqueEntityEquivalentArray { + type Output = T; + fn index(&self, key: usize) -> &T { + self.0.index(key) + } +} + +impl IndexMut<(Bound, Bound)> + for UniqueEntityEquivalentArray +{ + fn index_mut(&mut self, key: (Bound, Bound)) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> + for UniqueEntityEquivalentArray +{ + fn index_mut(&mut self, key: Range) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> + for UniqueEntityEquivalentArray +{ + fn index_mut(&mut self, key: RangeFrom) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut + for UniqueEntityEquivalentArray +{ + fn index_mut(&mut self, key: RangeFull) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> + for UniqueEntityEquivalentArray +{ + fn index_mut(&mut self, key: RangeInclusive) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> + for UniqueEntityEquivalentArray +{ + fn index_mut(&mut self, key: RangeTo) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> + for UniqueEntityEquivalentArray +{ + fn index_mut(&mut self, key: RangeToInclusive) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl From<&[T; 1]> for UniqueEntityEquivalentArray { + fn from(value: &[T; 1]) -> Self { + Self(value.clone()) + } +} + +impl From<&[T; 0]> for UniqueEntityEquivalentArray { + fn from(value: &[T; 0]) -> Self { + Self(value.clone()) + } +} + +impl From<&mut [T; 1]> for UniqueEntityEquivalentArray { + fn from(value: &mut [T; 1]) -> Self { + Self(value.clone()) + } +} + +impl From<&mut [T; 0]> for UniqueEntityEquivalentArray { + fn from(value: &mut [T; 0]) -> Self { + Self(value.clone()) + } +} + +impl From<[T; 1]> for UniqueEntityEquivalentArray { + fn from(value: [T; 1]) -> Self { + Self(value) + } +} + +impl From<[T; 0]> for UniqueEntityEquivalentArray { + fn from(value: [T; 0]) -> Self { + Self(value) + } +} + +impl From> for (T,) { + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> for (T, T) { + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> for (T, T, T) { + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> for (T, T, T, T) { + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> for (T, T, T, T, T) { + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> for (T, T, T, T, T, T) { + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> for (T, T, T, T, T, T, T) { + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> for (T, T, T, T, T, T, T, T) { + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> for (T, T, T, T, T, T, T, T, T) { + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> + for (T, T, T, T, T, T, T, T, T, T) +{ + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> + for (T, T, T, T, T, T, T, T, T, T, T) +{ + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> + for (T, T, T, T, T, T, T, T, T, T, T, T) +{ + fn from(array: UniqueEntityEquivalentArray) -> Self { + Self::from(array.into_inner()) + } +} + +impl From> + for BTreeSet +{ + fn from(value: UniqueEntityEquivalentArray) -> Self { + BTreeSet::from(value.0) + } +} + +impl From> + for BinaryHeap +{ + fn from(value: UniqueEntityEquivalentArray) -> Self { + BinaryHeap::from(value.0) + } +} + +impl From> + for LinkedList +{ + fn from(value: UniqueEntityEquivalentArray) -> Self { + LinkedList::from(value.0) + } +} + +impl From> for Vec { + fn from(value: UniqueEntityEquivalentArray) -> Self { + Vec::from(value.0) + } +} + +impl From> for VecDeque { + fn from(value: UniqueEntityEquivalentArray) -> Self { + VecDeque::from(value.0) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq<&UniqueEntityEquivalentSlice> for UniqueEntityEquivalentArray +{ + fn eq(&self, other: &&UniqueEntityEquivalentSlice) -> bool { + self.0.eq(&other.as_inner()) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq> for UniqueEntityEquivalentArray +{ + fn eq(&self, other: &UniqueEntityEquivalentSlice) -> bool { + self.0.eq(other.as_inner()) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq<&UniqueEntityEquivalentArray> for Vec +{ + fn eq(&self, other: &&UniqueEntityEquivalentArray) -> bool { + self.eq(&other.0) + } +} +impl, U: EntityEquivalent, const N: usize> + PartialEq<&UniqueEntityEquivalentArray> for VecDeque +{ + fn eq(&self, other: &&UniqueEntityEquivalentArray) -> bool { + self.eq(&other.0) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq<&mut UniqueEntityEquivalentArray> for VecDeque +{ + fn eq(&self, other: &&mut UniqueEntityEquivalentArray) -> bool { + self.eq(&other.0) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq> for Vec +{ + fn eq(&self, other: &UniqueEntityEquivalentArray) -> bool { + self.eq(&other.0) + } +} +impl, U: EntityEquivalent, const N: usize> + PartialEq> for VecDeque +{ + fn eq(&self, other: &UniqueEntityEquivalentArray) -> bool { + self.eq(&other.0) + } +} + +/// A by-value array iterator. +/// +/// Equivalent to [`array::IntoIter`]. +pub type IntoIter = UniqueEntityIter>; + +impl UniqueEntityIter> { + /// Returns an immutable slice of all elements that have not been yielded + /// yet. + /// + /// Equivalent to [`array::IntoIter::as_slice`]. + pub fn as_slice(&self) -> &UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.as_inner().as_slice()) } + } + + /// Returns a mutable slice of all elements that have not been yielded yet. + /// + /// Equivalent to [`array::IntoIter::as_mut_slice`]. + pub fn as_mut_slice(&mut self) -> &mut UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original slice are unique. + unsafe { + UniqueEntityEquivalentSlice::from_slice_unchecked_mut( + self.as_mut_inner().as_mut_slice(), + ) + } + } +} diff --git a/crates/bevy_ecs/src/entity/unique_slice.rs b/crates/bevy_ecs/src/entity/unique_slice.rs new file mode 100644 index 0000000000000..e45c3a21c06bf --- /dev/null +++ b/crates/bevy_ecs/src/entity/unique_slice.rs @@ -0,0 +1,1895 @@ +//! A wrapper around entity slices with a uniqueness invariant. + +use core::{ + array::TryFromSliceError, + borrow::Borrow, + cmp::Ordering, + fmt::Debug, + iter::FusedIterator, + ops::{ + Bound, Deref, Index, IndexMut, Range, RangeFrom, RangeFull, RangeInclusive, RangeTo, + RangeToInclusive, + }, + ptr, + slice::{self, SliceIndex}, +}; + +use alloc::{ + borrow::{Cow, ToOwned}, + boxed::Box, + collections::VecDeque, + rc::Rc, + vec::Vec, +}; + +use bevy_platform::sync::Arc; + +use super::{ + unique_vec::{self, UniqueEntityEquivalentVec}, + Entity, EntityEquivalent, EntitySet, EntitySetIterator, FromEntitySetIterator, + UniqueEntityEquivalentArray, UniqueEntityIter, +}; + +/// A slice that contains only unique entities. +/// +/// This can be obtained by slicing [`UniqueEntityEquivalentVec`]. +/// +/// When `T` is [`Entity`], use [`UniqueEntitySlice`]. +#[repr(transparent)] +#[derive(Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub struct UniqueEntityEquivalentSlice([T]); + +/// A slice that contains only unique [`Entity`]. +/// +/// This is the default case of a [`UniqueEntityEquivalentSlice`]. +pub type UniqueEntitySlice = UniqueEntityEquivalentSlice; + +impl UniqueEntityEquivalentSlice { + /// Constructs a `UniqueEntityEquivalentSlice` from a [`&[T]`] unsafely. + /// + /// # Safety + /// + /// `slice` must contain only unique elements. + pub const unsafe fn from_slice_unchecked(slice: &[T]) -> &Self { + // SAFETY: UniqueEntityEquivalentSlice is a transparent wrapper around [T]. + unsafe { &*(ptr::from_ref(slice) as *const Self) } + } + + /// Constructs a `UniqueEntityEquivalentSlice` from a [`&mut [T]`] unsafely. + /// + /// # Safety + /// + /// `slice` must contain only unique elements. + pub const unsafe fn from_slice_unchecked_mut(slice: &mut [T]) -> &mut Self { + // SAFETY: UniqueEntityEquivalentSlice is a transparent wrapper around [T]. + unsafe { &mut *(ptr::from_mut(slice) as *mut Self) } + } + + /// Casts to `self` to a standard slice. + pub const fn as_inner(&self) -> &[T] { + &self.0 + } + + /// Constructs a `UniqueEntityEquivalentSlice` from a [`Box<[T]>`] unsafely. + /// + /// # Safety + /// + /// `slice` must contain only unique elements. + pub unsafe fn from_boxed_slice_unchecked(slice: Box<[T]>) -> Box { + // SAFETY: UniqueEntityEquivalentSlice is a transparent wrapper around [T]. + unsafe { Box::from_raw(Box::into_raw(slice) as *mut Self) } + } + + /// Casts `self` to the inner slice. + pub fn into_boxed_inner(self: Box) -> Box<[T]> { + // SAFETY: UniqueEntityEquivalentSlice is a transparent wrapper around [T]. + unsafe { Box::from_raw(Box::into_raw(self) as *mut [T]) } + } + + /// Constructs a `UniqueEntityEquivalentSlice` from a [`Arc<[T]>`] unsafely. + /// + /// # Safety + /// + /// `slice` must contain only unique elements. + pub unsafe fn from_arc_slice_unchecked(slice: Arc<[T]>) -> Arc { + // SAFETY: UniqueEntityEquivalentSlice is a transparent wrapper around [T]. + unsafe { Arc::from_raw(Arc::into_raw(slice) as *mut Self) } + } + + /// Casts `self` to the inner slice. + pub fn into_arc_inner(this: Arc) -> Arc<[T]> { + // SAFETY: UniqueEntityEquivalentSlice is a transparent wrapper around [T]. + unsafe { Arc::from_raw(Arc::into_raw(this) as *mut [T]) } + } + + // Constructs a `UniqueEntityEquivalentSlice` from a [`Rc<[T]>`] unsafely. + /// + /// # Safety + /// + /// `slice` must contain only unique elements. + pub unsafe fn from_rc_slice_unchecked(slice: Rc<[T]>) -> Rc { + // SAFETY: UniqueEntityEquivalentSlice is a transparent wrapper around [T]. + unsafe { Rc::from_raw(Rc::into_raw(slice) as *mut Self) } + } + + /// Casts `self` to the inner slice. + pub fn into_rc_inner(self: Rc) -> Rc<[T]> { + // SAFETY: UniqueEntityEquivalentSlice is a transparent wrapper around [T]. + unsafe { Rc::from_raw(Rc::into_raw(self) as *mut [T]) } + } + + /// Returns the first and all the rest of the elements of the slice, or `None` if it is empty. + /// + /// Equivalent to [`[T]::split_first`](slice::split_first). + pub const fn split_first(&self) -> Option<(&T, &Self)> { + let Some((first, rest)) = self.0.split_first() else { + return None; + }; + // SAFETY: All elements in the original slice are unique. + Some((first, unsafe { Self::from_slice_unchecked(rest) })) + } + + /// Returns the last and all the rest of the elements of the slice, or `None` if it is empty. + /// + /// Equivalent to [`[T]::split_last`](slice::split_last). + pub const fn split_last(&self) -> Option<(&T, &Self)> { + let Some((last, rest)) = self.0.split_last() else { + return None; + }; + // SAFETY: All elements in the original slice are unique. + Some((last, unsafe { Self::from_slice_unchecked(rest) })) + } + + /// Returns an array reference to the first `N` items in the slice. + /// + /// Equivalent to [`[T]::first_chunk`](slice::first_chunk). + pub const fn first_chunk(&self) -> Option<&UniqueEntityEquivalentArray> { + let Some(chunk) = self.0.first_chunk() else { + return None; + }; + // SAFETY: All elements in the original slice are unique. + Some(unsafe { UniqueEntityEquivalentArray::from_array_ref_unchecked(chunk) }) + } + + /// Returns an array reference to the first `N` items in the slice and the remaining slice. + /// + /// Equivalent to [`[T]::split_first_chunk`](slice::split_first_chunk). + pub const fn split_first_chunk( + &self, + ) -> Option<( + &UniqueEntityEquivalentArray, + &UniqueEntityEquivalentSlice, + )> { + let Some((chunk, rest)) = self.0.split_first_chunk() else { + return None; + }; + // SAFETY: All elements in the original slice are unique. + unsafe { + Some(( + UniqueEntityEquivalentArray::from_array_ref_unchecked(chunk), + Self::from_slice_unchecked(rest), + )) + } + } + + /// Returns an array reference to the last `N` items in the slice and the remaining slice. + /// + /// Equivalent to [`[T]::split_last_chunk`](slice::split_last_chunk). + pub const fn split_last_chunk( + &self, + ) -> Option<( + &UniqueEntityEquivalentSlice, + &UniqueEntityEquivalentArray, + )> { + let Some((rest, chunk)) = self.0.split_last_chunk() else { + return None; + }; + // SAFETY: All elements in the original slice are unique. + unsafe { + Some(( + Self::from_slice_unchecked(rest), + UniqueEntityEquivalentArray::from_array_ref_unchecked(chunk), + )) + } + } + + /// Returns an array reference to the last `N` items in the slice. + /// + /// Equivalent to [`[T]::last_chunk`](slice::last_chunk). + pub const fn last_chunk(&self) -> Option<&UniqueEntityEquivalentArray> { + let Some(chunk) = self.0.last_chunk() else { + return None; + }; + // SAFETY: All elements in the original slice are unique. + Some(unsafe { UniqueEntityEquivalentArray::from_array_ref_unchecked(chunk) }) + } + + /// Returns a reference to a subslice. + /// + /// Equivalent to the range functionality of [`[T]::get`]. + /// + /// Note that only the inner [`[T]::get`] supports indexing with a [`usize`]. + /// + /// [`[T]::get`]: `slice::get` + pub fn get(&self, index: I) -> Option<&Self> + where + Self: Index, + I: SliceIndex<[T], Output = [T]>, + { + self.0.get(index).map(|slice| + // SAFETY: All elements in the original slice are unique. + unsafe { Self::from_slice_unchecked(slice) }) + } + + /// Returns a mutable reference to a subslice. + /// + /// Equivalent to the range functionality of [`[T]::get_mut`]. + /// + /// Note that `UniqueEntityEquivalentSlice::get_mut` cannot be called with a [`usize`]. + /// + /// [`[T]::get_mut`]: `slice::get_mut`s + pub fn get_mut(&mut self, index: I) -> Option<&mut Self> + where + Self: Index, + I: SliceIndex<[T], Output = [T]>, + { + self.0.get_mut(index).map(|slice| + // SAFETY: All elements in the original slice are unique. + unsafe { Self::from_slice_unchecked_mut(slice) }) + } + + /// Returns a reference to a subslice, without doing bounds checking. + /// + /// Equivalent to the range functionality of [`[T]::get_unchecked`]. + /// + /// Note that only the inner [`[T]::get_unchecked`] supports indexing with a [`usize`]. + /// + /// # Safety + /// + /// `index` must be safe to use with [`[T]::get_unchecked`] + /// + /// [`[T]::get_unchecked`]: `slice::get_unchecked` + pub unsafe fn get_unchecked(&self, index: I) -> &Self + where + Self: Index, + I: SliceIndex<[T], Output = [T]>, + { + // SAFETY: All elements in the original slice are unique. + unsafe { Self::from_slice_unchecked(self.0.get_unchecked(index)) } + } + /// Returns a mutable reference to a subslice, without doing bounds checking. + /// + /// Equivalent to the range functionality of [`[T]::get_unchecked_mut`]. + /// + /// Note that `UniqueEntityEquivalentSlice::get_unchecked_mut` cannot be called with an index. + /// + /// # Safety + /// + /// `index` must be safe to use with [`[T]::get_unchecked_mut`] + /// + /// [`[T]::get_unchecked_mut`]: `slice::get_unchecked_mut` + pub unsafe fn get_unchecked_mut(&mut self, index: I) -> &mut Self + where + Self: Index, + I: SliceIndex<[T], Output = [T]>, + { + // SAFETY: All elements in the original slice are unique. + unsafe { Self::from_slice_unchecked_mut(self.0.get_unchecked_mut(index)) } + } + + /// Returns an unsafe mutable pointer to the slice's buffer. + pub const fn as_mut_ptr(&mut self) -> *mut T { + self.0.as_mut_ptr() + } + + /// Returns the two unsafe mutable pointers spanning the slice. + pub const fn as_mut_ptr_range(&mut self) -> Range<*mut T> { + self.0.as_mut_ptr_range() + } + + /// Swaps two elements in the slice. + pub fn swap(&mut self, a: usize, b: usize) { + self.0.swap(a, b); + } + + /// Reverses the order of elements in the slice, in place. + pub fn reverse(&mut self) { + self.0.reverse(); + } + + /// Returns an iterator over the slice. + pub fn iter(&self) -> Iter<'_, T> { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityIter::from_iterator_unchecked(self.0.iter()) } + } + + /// Returns an iterator over all contiguous windows of length + /// `size`. + /// + /// Equivalent to [`[T]::windows`]. + /// + /// [`[T]::windows`]: `slice::windows` + pub fn windows(&self, size: usize) -> Windows<'_, T> { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked(self.0.windows(size)) + } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the + /// beginning of the slice. + /// + /// Equivalent to [`[T]::chunks`]. + /// + /// [`[T]::chunks`]: `slice::chunks` + pub fn chunks(&self, chunk_size: usize) -> Chunks<'_, T> { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked( + self.0.chunks(chunk_size), + ) + } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the + /// beginning of the slice. + /// + /// Equivalent to [`[T]::chunks_mut`]. + /// + /// [`[T]::chunks_mut`]: `slice::chunks_mut` + pub fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.chunks_mut(chunk_size), + ) + } + } + + /// + /// + /// Equivalent to [`[T]::chunks_exact`]. + /// + /// [`[T]::chunks_exact`]: `slice::chunks_exact` + pub fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked( + self.0.chunks_exact(chunk_size), + ) + } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the + /// beginning of the slice. + /// + /// Equivalent to [`[T]::chunks_exact_mut`]. + /// + /// [`[T]::chunks_exact_mut`]: `slice::chunks_exact_mut` + pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.chunks_exact_mut(chunk_size), + ) + } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end + /// of the slice. + /// + /// Equivalent to [`[T]::rchunks`]. + /// + /// [`[T]::rchunks`]: `slice::rchunks` + pub fn rchunks(&self, chunk_size: usize) -> RChunks<'_, T> { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked( + self.0.rchunks(chunk_size), + ) + } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end + /// of the slice. + /// + /// Equivalent to [`[T]::rchunks_mut`]. + /// + /// [`[T]::rchunks_mut`]: `slice::rchunks_mut` + pub fn rchunks_mut(&mut self, chunk_size: usize) -> RChunksMut<'_, T> { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.rchunks_mut(chunk_size), + ) + } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the + /// end of the slice. + /// + /// Equivalent to [`[T]::rchunks_exact`]. + /// + /// [`[T]::rchunks_exact`]: `slice::rchunks_exact` + pub fn rchunks_exact(&self, chunk_size: usize) -> RChunksExact<'_, T> { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked( + self.0.rchunks_exact(chunk_size), + ) + } + } + + /// Returns an iterator over `chunk_size` elements of the slice at a time, starting at the end + /// of the slice. + /// + /// Equivalent to [`[T]::rchunks_exact_mut`]. + /// + /// [`[T]::rchunks_exact_mut`]: `slice::rchunks_exact_mut` + pub fn rchunks_exact_mut(&mut self, chunk_size: usize) -> RChunksExactMut<'_, T> { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.rchunks_exact_mut(chunk_size), + ) + } + } + + /// Returns an iterator over the slice producing non-overlapping runs + /// of elements using the predicate to separate them. + /// + /// Equivalent to [`[T]::chunk_by`]. + /// + /// [`[T]::chunk_by`]: `slice::chunk_by` + pub fn chunk_by(&self, pred: F) -> ChunkBy<'_, F, T> + where + F: FnMut(&T, &T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked(self.0.chunk_by(pred)) + } + } + + /// Returns an iterator over the slice producing non-overlapping mutable + /// runs of elements using the predicate to separate them. + /// + /// Equivalent to [`[T]::chunk_by_mut`]. + /// + /// [`[T]::chunk_by_mut`]: `slice::chunk_by_mut` + pub fn chunk_by_mut(&mut self, pred: F) -> ChunkByMut<'_, F, T> + where + F: FnMut(&T, &T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.chunk_by_mut(pred), + ) + } + } + + /// Divides one slice into two at an index. + /// + /// Equivalent to [`[T]::split_at`](slice::split_at). + pub const fn split_at(&self, mid: usize) -> (&Self, &Self) { + let (left, right) = self.0.split_at(mid); + // SAFETY: All elements in the original slice are unique. + unsafe { + ( + Self::from_slice_unchecked(left), + Self::from_slice_unchecked(right), + ) + } + } + + /// Divides one mutable slice into two at an index. + /// + /// Equivalent to [`[T]::split_at_mut`](slice::split_at_mut). + pub const fn split_at_mut(&mut self, mid: usize) -> (&mut Self, &mut Self) { + let (left, right) = self.0.split_at_mut(mid); + // SAFETY: All elements in the original slice are unique. + unsafe { + ( + Self::from_slice_unchecked_mut(left), + Self::from_slice_unchecked_mut(right), + ) + } + } + + /// Divides one slice into two at an index, without doing bounds checking. + /// + /// Equivalent to [`[T]::split_at_unchecked`](slice::split_at_unchecked). + /// + /// # Safety + /// + /// `mid` must be safe to use in [`[T]::split_at_unchecked`]. + /// + /// [`[T]::split_at_unchecked`]: `slice::split_at_unchecked` + pub const unsafe fn split_at_unchecked(&self, mid: usize) -> (&Self, &Self) { + // SAFETY: The safety contract is upheld by the caller. + let (left, right) = unsafe { self.0.split_at_unchecked(mid) }; + // SAFETY: All elements in the original slice are unique. + unsafe { + ( + Self::from_slice_unchecked(left), + Self::from_slice_unchecked(right), + ) + } + } + + /// Divides one mutable slice into two at an index, without doing bounds checking. + /// + /// Equivalent to [`[T]::split_at_mut_unchecked`](slice::split_at_mut_unchecked). + /// + /// # Safety + /// + /// `mid` must be safe to use in [`[T]::split_at_mut_unchecked`]. + /// + /// [`[T]::split_at_mut_unchecked`]: `slice::split_at_mut_unchecked` + pub const unsafe fn split_at_mut_unchecked(&mut self, mid: usize) -> (&mut Self, &mut Self) { + // SAFETY: The safety contract is upheld by the caller. + let (left, right) = unsafe { self.0.split_at_mut_unchecked(mid) }; + // SAFETY: All elements in the original slice are unique. + unsafe { + ( + Self::from_slice_unchecked_mut(left), + Self::from_slice_unchecked_mut(right), + ) + } + } + + /// Divides one slice into two at an index, returning `None` if the slice is + /// too short. + /// + /// Equivalent to [`[T]::split_at_checked`](slice::split_at_checked). + pub const fn split_at_checked(&self, mid: usize) -> Option<(&Self, &Self)> { + let Some((left, right)) = self.0.split_at_checked(mid) else { + return None; + }; + // SAFETY: All elements in the original slice are unique. + unsafe { + Some(( + Self::from_slice_unchecked(left), + Self::from_slice_unchecked(right), + )) + } + } + + /// Divides one mutable slice into two at an index, returning `None` if the + /// slice is too short. + /// + /// Equivalent to [`[T]::split_at_mut_checked`](slice::split_at_mut_checked). + pub const fn split_at_mut_checked(&mut self, mid: usize) -> Option<(&mut Self, &mut Self)> { + let Some((left, right)) = self.0.split_at_mut_checked(mid) else { + return None; + }; + // SAFETY: All elements in the original slice are unique. + unsafe { + Some(( + Self::from_slice_unchecked_mut(left), + Self::from_slice_unchecked_mut(right), + )) + } + } + + /// Returns an iterator over subslices separated by elements that match + /// `pred`. + /// + /// Equivalent to [`[T]::split`]. + /// + /// [`[T]::split`]: `slice::split` + pub fn split(&self, pred: F) -> Split<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked(self.0.split(pred)) + } + } + + /// Returns an iterator over mutable subslices separated by elements that + /// match `pred`. + /// + /// Equivalent to [`[T]::split_mut`]. + /// + /// [`[T]::split_mut`]: `slice::split_mut` + pub fn split_mut(&mut self, pred: F) -> SplitMut<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.split_mut(pred), + ) + } + } + + /// Returns an iterator over subslices separated by elements that match + /// `pred`. + /// + /// Equivalent to [`[T]::split_inclusive`]. + /// + /// [`[T]::split_inclusive`]: `slice::split_inclusive` + pub fn split_inclusive(&self, pred: F) -> SplitInclusive<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked( + self.0.split_inclusive(pred), + ) + } + } + + /// Returns an iterator over mutable subslices separated by elements that + /// match `pred`. + /// + /// Equivalent to [`[T]::split_inclusive_mut`]. + /// + /// [`[T]::split_inclusive_mut`]: `slice::split_inclusive_mut` + pub fn split_inclusive_mut(&mut self, pred: F) -> SplitInclusiveMut<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.split_inclusive_mut(pred), + ) + } + } + + /// Returns an iterator over subslices separated by elements that match + /// `pred`, starting at the end of the slice and working backwards. + /// + /// Equivalent to [`[T]::rsplit`]. + /// + /// [`[T]::rsplit`]: `slice::rsplit` + pub fn rsplit(&self, pred: F) -> RSplit<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked(self.0.rsplit(pred)) + } + } + + /// Returns an iterator over mutable subslices separated by elements that + /// match `pred`, starting at the end of the slice and working + /// backwards. + /// + /// Equivalent to [`[T]::rsplit_mut`]. + /// + /// [`[T]::rsplit_mut`]: `slice::rsplit_mut` + pub fn rsplit_mut(&mut self, pred: F) -> RSplitMut<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.rsplit_mut(pred), + ) + } + } + + /// Returns an iterator over subslices separated by elements that match + /// `pred`, limited to returning at most `n` items. + /// + /// Equivalent to [`[T]::splitn`]. + /// + /// [`[T]::splitn`]: `slice::splitn` + pub fn splitn(&self, n: usize, pred: F) -> SplitN<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked(self.0.splitn(n, pred)) + } + } + + /// Returns an iterator over mutable subslices separated by elements that match + /// `pred`, limited to returning at most `n` items. + /// + /// Equivalent to [`[T]::splitn_mut`]. + /// + /// [`[T]::splitn_mut`]: `slice::splitn_mut` + pub fn splitn_mut(&mut self, n: usize, pred: F) -> SplitNMut<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.splitn_mut(n, pred), + ) + } + } + + /// Returns an iterator over subslices separated by elements that match + /// `pred` limited to returning at most `n` items. + /// + /// Equivalent to [`[T]::rsplitn`]. + /// + /// [`[T]::rsplitn`]: `slice::rsplitn` + pub fn rsplitn(&self, n: usize, pred: F) -> RSplitN<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIter::from_slice_iterator_unchecked(self.0.rsplitn(n, pred)) + } + } + + /// Returns an iterator over subslices separated by elements that match + /// `pred` limited to returning at most `n` items. + /// + /// Equivalent to [`[T]::rsplitn_mut`]. + /// + /// [`[T]::rsplitn_mut`]: `slice::rsplitn_mut` + pub fn rsplitn_mut(&mut self, n: usize, pred: F) -> RSplitNMut<'_, F, T> + where + F: FnMut(&T) -> bool, + { + // SAFETY: Any subslice of a unique slice is also unique. + unsafe { + UniqueEntityEquivalentSliceIterMut::from_mut_slice_iterator_unchecked( + self.0.rsplitn_mut(n, pred), + ) + } + } + + /// Sorts the slice **without** preserving the initial order of equal elements. + /// + /// Equivalent to [`[T]::sort_unstable`](slice::sort_unstable). + pub fn sort_unstable(&mut self) + where + T: Ord, + { + self.0.sort_unstable(); + } + + /// Sorts the slice with a comparison function, **without** preserving the initial order of + /// equal elements. + /// + /// Equivalent to [`[T]::sort_unstable_by`](slice::sort_unstable_by). + pub fn sort_unstable_by(&mut self, compare: F) + where + F: FnMut(&T, &T) -> Ordering, + { + self.0.sort_unstable_by(compare); + } + + /// Sorts the slice with a key extraction function, **without** preserving the initial order of + /// equal elements. + /// + /// Equivalent to [`[T]::sort_unstable_by_key`](slice::sort_unstable_by_key). + pub fn sort_unstable_by_key(&mut self, f: F) + where + F: FnMut(&T) -> K, + K: Ord, + { + self.0.sort_unstable_by_key(f); + } + + /// Rotates the slice in-place such that the first `mid` elements of the + /// slice move to the end while the last `self.len() - mid` elements move to + /// the front. + /// + /// Equivalent to [`[T]::rotate_left`](slice::rotate_left). + pub fn rotate_left(&mut self, mid: usize) { + self.0.rotate_left(mid); + } + + /// Rotates the slice in-place such that the first `self.len() - k` + /// elements of the slice move to the end while the last `k` elements move + /// to the front. + /// + /// Equivalent to [`[T]::rotate_right`](slice::rotate_right). + pub fn rotate_right(&mut self, mid: usize) { + self.0.rotate_right(mid); + } + + /// Sorts the slice, preserving initial order of equal elements. + /// + /// Equivalent to [`[T]::sort`](slice::sort()). + pub fn sort(&mut self) + where + T: Ord, + { + self.0.sort(); + } + + /// Sorts the slice with a comparison function, preserving initial order of equal elements. + /// + /// Equivalent to [`[T]::sort_by`](slice::sort_by). + pub fn sort_by(&mut self, compare: F) + where + F: FnMut(&T, &T) -> Ordering, + { + self.0.sort_by(compare); + } + + /// Sorts the slice with a key extraction function, preserving initial order of equal elements. + /// + /// Equivalent to [`[T]::sort_by_key`](slice::sort_by_key). + pub fn sort_by_key(&mut self, f: F) + where + F: FnMut(&T) -> K, + K: Ord, + { + self.0.sort_by_key(f); + } + + // Sorts the slice with a key extraction function, preserving initial order of equal elements. + /// + /// Equivalent to [`[T]::sort_by_cached_key`](slice::sort_by_cached_key). + pub fn sort_by_cached_key(&mut self, f: F) + where + F: FnMut(&T) -> K, + K: Ord, + { + self.0.sort_by_cached_key(f); + } + + /// Copies self into a new `UniqueEntityEquivalentVec`. + pub fn to_vec(&self) -> UniqueEntityEquivalentVec + where + T: Clone, + { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentVec::from_vec_unchecked(self.0.to_vec()) } + } + + /// Converts `self` into a vector without clones or allocation. + /// + /// Equivalent to [`[T]::into_vec`](slice::into_vec). + pub fn into_vec(self: Box) -> UniqueEntityEquivalentVec { + // SAFETY: + // This matches the implementation of `slice::into_vec`. + // All elements in the original slice are unique. + unsafe { + let len = self.len(); + let vec = Vec::from_raw_parts(Box::into_raw(self).cast::(), len, len); + UniqueEntityEquivalentVec::from_vec_unchecked(vec) + } + } +} + +/// Converts a reference to T into a slice of length 1 (without copying). +pub const fn from_ref(s: &T) -> &UniqueEntityEquivalentSlice { + // SAFETY: A slice with a length of 1 is always unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(slice::from_ref(s)) } +} + +/// Converts a reference to T into a slice of length 1 (without copying). +pub const fn from_mut(s: &mut T) -> &mut UniqueEntityEquivalentSlice { + // SAFETY: A slice with a length of 1 is always unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(slice::from_mut(s)) } +} + +/// Forms a slice from a pointer and a length. +/// +/// Equivalent to [`slice::from_raw_parts`]. +/// +/// # Safety +/// +/// [`slice::from_raw_parts`] must be safe to call with `data` and `len`. +/// Additionally, all elements in the resulting slice must be unique. +pub const unsafe fn from_raw_parts<'a, T: EntityEquivalent>( + data: *const T, + len: usize, +) -> &'a UniqueEntityEquivalentSlice { + // SAFETY: The safety contract is upheld by the caller. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(slice::from_raw_parts(data, len)) } +} + +/// Performs the same functionality as [`from_raw_parts`], except that a mutable slice is returned. +/// +/// Equivalent to [`slice::from_raw_parts_mut`]. +/// +/// # Safety +/// +/// [`slice::from_raw_parts_mut`] must be safe to call with `data` and `len`. +/// Additionally, all elements in the resulting slice must be unique. +pub const unsafe fn from_raw_parts_mut<'a, T: EntityEquivalent>( + data: *mut T, + len: usize, +) -> &'a mut UniqueEntityEquivalentSlice { + // SAFETY: The safety contract is upheld by the caller. + unsafe { + UniqueEntityEquivalentSlice::from_slice_unchecked_mut(slice::from_raw_parts_mut(data, len)) + } +} + +/// Casts a slice of entity slices to a slice of [`UniqueEntityEquivalentSlice`]s. +/// +/// # Safety +/// +/// All elements in each of the casted slices must be unique. +pub unsafe fn cast_slice_of_unique_entity_slice<'a, 'b, T: EntityEquivalent + 'a>( + slice: &'b [&'a [T]], +) -> &'b [&'a UniqueEntityEquivalentSlice] { + // SAFETY: All elements in the original iterator are unique slices. + unsafe { &*(ptr::from_ref(slice) as *const [&UniqueEntityEquivalentSlice]) } +} + +/// Casts a mutable slice of entity slices to a slice of [`UniqueEntityEquivalentSlice`]s. +/// +/// # Safety +/// +/// All elements in each of the casted slices must be unique. +pub unsafe fn cast_slice_of_unique_entity_slice_mut<'a, 'b, T: EntityEquivalent + 'a>( + slice: &'b mut [&'a [T]], +) -> &'b mut [&'a UniqueEntityEquivalentSlice] { + // SAFETY: All elements in the original iterator are unique slices. + unsafe { &mut *(ptr::from_mut(slice) as *mut [&UniqueEntityEquivalentSlice]) } +} + +/// Casts a mutable slice of mutable entity slices to a slice of mutable [`UniqueEntityEquivalentSlice`]s. +/// +/// # Safety +/// +/// All elements in each of the casted slices must be unique. +pub unsafe fn cast_slice_of_mut_unique_entity_slice_mut<'a, 'b, T: EntityEquivalent + 'a>( + slice: &'b mut [&'a mut [T]], +) -> &'b mut [&'a mut UniqueEntityEquivalentSlice] { + // SAFETY: All elements in the original iterator are unique slices. + unsafe { &mut *(ptr::from_mut(slice) as *mut [&mut UniqueEntityEquivalentSlice]) } +} + +impl<'a, T: EntityEquivalent> IntoIterator for &'a UniqueEntityEquivalentSlice { + type Item = &'a T; + + type IntoIter = Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a, T: EntityEquivalent> IntoIterator for &'a Box> { + type Item = &'a T; + + type IntoIter = Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl IntoIterator for Box> { + type Item = T; + + type IntoIter = unique_vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.into_vec().into_iter() + } +} + +impl Deref for UniqueEntityEquivalentSlice { + type Target = [T]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl AsRef<[T]> for UniqueEntityEquivalentSlice { + fn as_ref(&self) -> &[T] { + self + } +} + +impl AsRef for UniqueEntityEquivalentSlice { + fn as_ref(&self) -> &Self { + self + } +} + +impl AsMut for UniqueEntityEquivalentSlice { + fn as_mut(&mut self) -> &mut Self { + self + } +} + +impl Borrow<[T]> for UniqueEntityEquivalentSlice { + fn borrow(&self) -> &[T] { + self + } +} + +impl Clone for Box> { + fn clone(&self) -> Self { + self.to_vec().into_boxed_slice() + } +} + +impl Default for &UniqueEntityEquivalentSlice { + fn default() -> Self { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(Default::default()) } + } +} + +impl Default for &mut UniqueEntityEquivalentSlice { + fn default() -> Self { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(Default::default()) } + } +} + +impl Default for Box> { + fn default() -> Self { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_boxed_slice_unchecked(Default::default()) } + } +} + +impl From<&UniqueEntityEquivalentSlice> + for Box> +{ + fn from(value: &UniqueEntityEquivalentSlice) -> Self { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_boxed_slice_unchecked(value.0.into()) } + } +} + +impl From<&UniqueEntityEquivalentSlice> + for Arc> +{ + fn from(value: &UniqueEntityEquivalentSlice) -> Self { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_arc_slice_unchecked(value.0.into()) } + } +} + +impl From<&UniqueEntityEquivalentSlice> + for Rc> +{ + fn from(value: &UniqueEntityEquivalentSlice) -> Self { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_rc_slice_unchecked(value.0.into()) } + } +} + +impl<'a, T: EntityEquivalent + Clone> From<&'a UniqueEntityEquivalentSlice> + for Cow<'a, UniqueEntityEquivalentSlice> +{ + fn from(value: &'a UniqueEntityEquivalentSlice) -> Self { + Cow::Borrowed(value) + } +} + +impl From> + for Box> +{ + fn from(value: UniqueEntityEquivalentArray) -> Self { + // SAFETY: All elements in the original slice are unique. + unsafe { + UniqueEntityEquivalentSlice::from_boxed_slice_unchecked(Box::new(value.into_inner())) + } + } +} + +impl<'a, T: EntityEquivalent + Clone> From>> + for Box> +{ + fn from(value: Cow<'a, UniqueEntityEquivalentSlice>) -> Self { + match value { + Cow::Borrowed(slice) => Box::from(slice), + Cow::Owned(slice) => Box::from(slice), + } + } +} + +impl From> + for Box> +{ + fn from(value: UniqueEntityEquivalentVec) -> Self { + value.into_boxed_slice() + } +} + +impl FromIterator for Box> { + fn from_iter>(iter: I) -> Self { + iter.into_iter() + .collect::>() + .into_boxed_slice() + } +} + +impl FromEntitySetIterator for Box> { + fn from_entity_set_iter>(iter: I) -> Self { + iter.into_iter() + .collect_set::>() + .into_boxed_slice() + } +} + +impl, U: EntityEquivalent> + PartialEq> for &UniqueEntityEquivalentSlice +{ + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { + self.0.eq(other.as_vec()) + } +} + +impl, U: EntityEquivalent> + PartialEq> for &mut UniqueEntityEquivalentSlice +{ + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { + self.0.eq(other.as_vec()) + } +} + +impl, U: EntityEquivalent> + PartialEq> for UniqueEntityEquivalentSlice +{ + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { + self.0.eq(other.as_vec()) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq<&UniqueEntityEquivalentSlice> for [T; N] +{ + fn eq(&self, other: &&UniqueEntityEquivalentSlice) -> bool { + self.eq(&other.0) + } +} + +impl + Clone, U: EntityEquivalent> PartialEq<&UniqueEntityEquivalentSlice> + for Cow<'_, [T]> +{ + fn eq(&self, other: &&UniqueEntityEquivalentSlice) -> bool { + self.eq(&&other.0) + } +} + +impl + Clone, U: EntityEquivalent> + PartialEq<&UniqueEntityEquivalentSlice> for Cow<'_, UniqueEntityEquivalentSlice> +{ + fn eq(&self, other: &&UniqueEntityEquivalentSlice) -> bool { + self.0.eq(&other.0) + } +} + +impl, U: EntityEquivalent> PartialEq<&UniqueEntityEquivalentSlice> for Vec { + fn eq(&self, other: &&UniqueEntityEquivalentSlice) -> bool { + self.eq(&other.0) + } +} + +impl, U: EntityEquivalent> PartialEq<&UniqueEntityEquivalentSlice> + for VecDeque +{ + fn eq(&self, other: &&UniqueEntityEquivalentSlice) -> bool { + self.eq(&&other.0) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq<&mut UniqueEntityEquivalentSlice> for [T; N] +{ + fn eq(&self, other: &&mut UniqueEntityEquivalentSlice) -> bool { + self.eq(&other.0) + } +} + +impl + Clone, U: EntityEquivalent> PartialEq<&mut UniqueEntityEquivalentSlice> + for Cow<'_, [T]> +{ + fn eq(&self, other: &&mut UniqueEntityEquivalentSlice) -> bool { + self.eq(&&**other) + } +} + +impl + Clone, U: EntityEquivalent> + PartialEq<&mut UniqueEntityEquivalentSlice> for Cow<'_, UniqueEntityEquivalentSlice> +{ + fn eq(&self, other: &&mut UniqueEntityEquivalentSlice) -> bool { + self.0.eq(&other.0) + } +} + +impl + Clone, U: EntityEquivalent> + PartialEq> for Cow<'_, UniqueEntityEquivalentSlice> +{ + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { + self.0.eq(other.as_vec()) + } +} + +impl, U: EntityEquivalent> PartialEq<&mut UniqueEntityEquivalentSlice> + for Vec +{ + fn eq(&self, other: &&mut UniqueEntityEquivalentSlice) -> bool { + self.eq(&other.0) + } +} + +impl, U: EntityEquivalent> PartialEq<&mut UniqueEntityEquivalentSlice> + for VecDeque +{ + fn eq(&self, other: &&mut UniqueEntityEquivalentSlice) -> bool { + self.eq(&&other.0) + } +} + +impl, U: EntityEquivalent> + PartialEq> for [T] +{ + fn eq(&self, other: &UniqueEntityEquivalentSlice) -> bool { + self.eq(&other.0) + } +} + +impl, U: EntityEquivalent, const N: usize> PartialEq> + for [T; N] +{ + fn eq(&self, other: &UniqueEntityEquivalentSlice) -> bool { + self.eq(&other.0) + } +} + +impl, U: EntityEquivalent> + PartialEq> for Vec +{ + fn eq(&self, other: &UniqueEntityEquivalentSlice) -> bool { + self.eq(&other.0) + } +} + +impl, U, const N: usize> PartialEq<[U; N]> + for &UniqueEntityEquivalentSlice +{ + fn eq(&self, other: &[U; N]) -> bool { + self.0.eq(other) + } +} + +impl, U, const N: usize> PartialEq<[U; N]> + for &mut UniqueEntityEquivalentSlice +{ + fn eq(&self, other: &[U; N]) -> bool { + self.0.eq(other) + } +} + +impl, U, const N: usize> PartialEq<[U; N]> + for UniqueEntityEquivalentSlice +{ + fn eq(&self, other: &[U; N]) -> bool { + self.0.eq(other) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq> for &UniqueEntityEquivalentSlice +{ + fn eq(&self, other: &UniqueEntityEquivalentArray) -> bool { + self.0.eq(&other.0) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq> for &mut UniqueEntityEquivalentSlice +{ + fn eq(&self, other: &UniqueEntityEquivalentArray) -> bool { + self.0.eq(&other.0) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq> for UniqueEntityEquivalentSlice +{ + fn eq(&self, other: &UniqueEntityEquivalentArray) -> bool { + self.0.eq(&other.0) + } +} + +impl, U> PartialEq> for &UniqueEntityEquivalentSlice { + fn eq(&self, other: &Vec) -> bool { + self.0.eq(other) + } +} + +impl, U> PartialEq> + for &mut UniqueEntityEquivalentSlice +{ + fn eq(&self, other: &Vec) -> bool { + self.0.eq(other) + } +} + +impl, U> PartialEq> for UniqueEntityEquivalentSlice { + fn eq(&self, other: &Vec) -> bool { + self.0.eq(other) + } +} + +impl ToOwned for UniqueEntityEquivalentSlice { + type Owned = UniqueEntityEquivalentVec; + + fn to_owned(&self) -> Self::Owned { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentVec::from_vec_unchecked(self.0.to_owned()) } + } +} + +impl<'a, T: EntityEquivalent + Copy, const N: usize> TryFrom<&'a UniqueEntityEquivalentSlice> + for &'a UniqueEntityEquivalentArray +{ + type Error = TryFromSliceError; + + fn try_from(value: &'a UniqueEntityEquivalentSlice) -> Result { + <&[T; N]>::try_from(&value.0).map(|array| + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentArray::from_array_ref_unchecked(array) }) + } +} + +impl TryFrom<&UniqueEntityEquivalentSlice> + for UniqueEntityEquivalentArray +{ + type Error = TryFromSliceError; + + fn try_from(value: &UniqueEntityEquivalentSlice) -> Result { + <&Self>::try_from(value).copied() + } +} + +impl TryFrom<&mut UniqueEntityEquivalentSlice> + for UniqueEntityEquivalentArray +{ + type Error = TryFromSliceError; + + fn try_from(value: &mut UniqueEntityEquivalentSlice) -> Result { + ::try_from(&*value) + } +} + +impl Index<(Bound, Bound)> for UniqueEntityEquivalentSlice { + type Output = Self; + fn index(&self, key: (Bound, Bound)) -> &Self { + // SAFETY: All elements in the original slice are unique. + unsafe { Self::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for UniqueEntityEquivalentSlice { + type Output = Self; + fn index(&self, key: Range) -> &Self { + // SAFETY: All elements in the original slice are unique. + unsafe { Self::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for UniqueEntityEquivalentSlice { + type Output = Self; + fn index(&self, key: RangeFrom) -> &Self { + // SAFETY: All elements in the original slice are unique. + unsafe { Self::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index for UniqueEntityEquivalentSlice { + type Output = Self; + fn index(&self, key: RangeFull) -> &Self { + // SAFETY: All elements in the original slice are unique. + unsafe { Self::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for UniqueEntityEquivalentSlice { + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: RangeInclusive) -> &Self { + // SAFETY: All elements in the original slice are unique. + unsafe { Self::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for UniqueEntityEquivalentSlice { + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: RangeTo) -> &Self { + // SAFETY: All elements in the original slice are unique. + unsafe { Self::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for UniqueEntityEquivalentSlice { + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: RangeToInclusive) -> &Self { + // SAFETY: All elements in the original slice are unique. + unsafe { Self::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index for UniqueEntityEquivalentSlice { + type Output = T; + + fn index(&self, index: usize) -> &T { + &self.0[index] + } +} + +impl IndexMut<(Bound, Bound)> + for UniqueEntityEquivalentSlice +{ + fn index_mut(&mut self, key: (Bound, Bound)) -> &mut Self { + // SAFETY: All elements in the original slice are unique. + unsafe { Self::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for UniqueEntityEquivalentSlice { + fn index_mut(&mut self, key: Range) -> &mut Self { + // SAFETY: All elements in the original slice are unique. + unsafe { Self::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for UniqueEntityEquivalentSlice { + fn index_mut(&mut self, key: RangeFrom) -> &mut Self { + // SAFETY: All elements in the original slice are unique. + unsafe { Self::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut for UniqueEntityEquivalentSlice { + fn index_mut(&mut self, key: RangeFull) -> &mut Self { + // SAFETY: All elements in the original slice are unique. + unsafe { Self::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for UniqueEntityEquivalentSlice { + fn index_mut(&mut self, key: RangeInclusive) -> &mut Self { + // SAFETY: All elements in the original slice are unique. + unsafe { Self::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for UniqueEntityEquivalentSlice { + fn index_mut(&mut self, key: RangeTo) -> &mut Self { + // SAFETY: All elements in the original slice are unique. + unsafe { Self::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for UniqueEntityEquivalentSlice { + fn index_mut(&mut self, key: RangeToInclusive) -> &mut Self { + // SAFETY: All elements in the original slice are unique. + unsafe { Self::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +/// Immutable slice iterator. +/// +/// This struct is created by [`iter`] method on [`UniqueEntityEquivalentSlice`] and +/// the [`IntoIterator`] impls on it and [`UniqueEntityEquivalentVec`]. +/// +/// [`iter`]: `UniqueEntityEquivalentSlice::iter` +pub type Iter<'a, T> = UniqueEntityIter>; + +impl<'a, T: EntityEquivalent> UniqueEntityIter> { + /// Views the underlying data as a subslice of the original data. + /// + /// Equivalent to [`slice::Iter::as_slice`]. + pub fn as_slice(&self) -> &'a UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.as_inner().as_slice()) } + } +} + +/// Mutable slice iterator. +pub type IterMut<'a, T> = UniqueEntityIter>; + +impl<'a, T: EntityEquivalent> UniqueEntityIter> { + /// Views the underlying data as a mutable subslice of the original data. + /// + /// Equivalent to [`slice::IterMut::into_slice`]. + pub fn into_slice(self) -> &'a mut UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original slice are unique. + unsafe { + UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.into_inner().into_slice()) + } + } + + /// Views the underlying data as a subslice of the original data. + /// + /// Equivalent to [`slice::IterMut::as_slice`]. + pub fn as_slice(&self) -> &UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.as_inner().as_slice()) } + } +} + +/// An iterator that yields `&UniqueEntityEquivalentSlice`. Note that an entity may appear +/// in multiple slices, depending on the wrapped iterator. +#[derive(Debug)] +pub struct UniqueEntityEquivalentSliceIter< + 'a, + T: EntityEquivalent + 'a, + I: Iterator, +> { + pub(crate) iter: I, +} + +impl<'a, T: EntityEquivalent + 'a, I: Iterator> + UniqueEntityEquivalentSliceIter<'a, T, I> +{ + /// Constructs a [`UniqueEntityEquivalentSliceIter`] from a slice iterator unsafely. + /// + /// # Safety + /// + /// All elements in each of the slices must be unique. + pub unsafe fn from_slice_iterator_unchecked(iter: I) -> Self { + Self { iter } + } + + /// Returns the inner `I`. + pub fn into_inner(self) -> I { + self.iter + } + + /// Returns a reference to the inner `I`. + pub fn as_inner(&self) -> &I { + &self.iter + } + + /// Returns a mutable reference to the inner `I`. + /// + /// # Safety + /// + /// `self` must always contain an iterator that yields unique elements, + /// even while this reference is live. + pub unsafe fn as_mut_inner(&mut self) -> &mut I { + &mut self.iter + } +} + +impl<'a, T: EntityEquivalent + 'a, I: Iterator> Iterator + for UniqueEntityEquivalentSliceIter<'a, T, I> +{ + type Item = &'a UniqueEntityEquivalentSlice; + + fn next(&mut self) -> Option { + self.iter.next().map(|slice| + // SAFETY: All elements in the original iterator are unique slices. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(slice) }) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl<'a, T: EntityEquivalent + 'a, I: ExactSizeIterator> ExactSizeIterator + for UniqueEntityEquivalentSliceIter<'a, T, I> +{ +} + +impl<'a, T: EntityEquivalent + 'a, I: DoubleEndedIterator> DoubleEndedIterator + for UniqueEntityEquivalentSliceIter<'a, T, I> +{ + fn next_back(&mut self) -> Option { + self.iter.next_back().map(|slice| + // SAFETY: All elements in the original iterator are unique slices. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(slice) }) + } +} + +impl<'a, T: EntityEquivalent + 'a, I: FusedIterator> FusedIterator + for UniqueEntityEquivalentSliceIter<'a, T, I> +{ +} + +impl<'a, T: EntityEquivalent + 'a, I: Iterator + AsRef<[&'a [T]]>> + AsRef<[&'a UniqueEntityEquivalentSlice]> for UniqueEntityEquivalentSliceIter<'a, T, I> +{ + fn as_ref(&self) -> &[&'a UniqueEntityEquivalentSlice] { + // SAFETY: + unsafe { cast_slice_of_unique_entity_slice(self.iter.as_ref()) } + } +} + +/// An iterator over overlapping subslices of length `size`. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::windows`]. +pub type Windows<'a, T = Entity> = UniqueEntityEquivalentSliceIter<'a, T, slice::Windows<'a, T>>; + +/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a +/// time), starting at the beginning of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::chunks`]. +pub type Chunks<'a, T = Entity> = UniqueEntityEquivalentSliceIter<'a, T, slice::Chunks<'a, T>>; + +/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a +/// time), starting at the beginning of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::chunks_exact`]. +pub type ChunksExact<'a, T = Entity> = + UniqueEntityEquivalentSliceIter<'a, T, slice::ChunksExact<'a, T>>; + +impl<'a, T: EntityEquivalent> UniqueEntityEquivalentSliceIter<'a, T, slice::ChunksExact<'a, T>> { + /// Returns the remainder of the original slice that is not going to be + /// returned by the iterator. + /// + /// Equivalent to [`slice::ChunksExact::remainder`]. + pub fn remainder(&self) -> &'a UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original iterator are unique slices. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.iter.remainder()) } + } +} + +/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a +/// time), starting at the end of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::rchunks`]. +pub type RChunks<'a, T = Entity> = UniqueEntityEquivalentSliceIter<'a, T, slice::RChunks<'a, T>>; + +/// An iterator over a slice in (non-overlapping) chunks (`chunk_size` elements at a +/// time), starting at the end of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::rchunks_exact`]. +pub type RChunksExact<'a, T = Entity> = + UniqueEntityEquivalentSliceIter<'a, T, slice::RChunksExact<'a, T>>; + +impl<'a, T: EntityEquivalent> UniqueEntityEquivalentSliceIter<'a, T, slice::RChunksExact<'a, T>> { + /// Returns the remainder of the original slice that is not going to be + /// returned by the iterator. + /// + /// Equivalent to [`slice::RChunksExact::remainder`]. + pub fn remainder(&self) -> &'a UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original iterator are unique slices. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.iter.remainder()) } + } +} + +/// An iterator over slice in (non-overlapping) chunks separated by a predicate. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::chunk_by`]. +pub type ChunkBy<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIter<'a, T, slice::ChunkBy<'a, T, P>>; + +/// An iterator over subslices separated by elements that match a predicate +/// function. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::split`]. +pub type Split<'a, P, T = Entity> = UniqueEntityEquivalentSliceIter<'a, T, slice::Split<'a, T, P>>; + +/// An iterator over subslices separated by elements that match a predicate +/// function. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::split_inclusive`]. +pub type SplitInclusive<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIter<'a, T, slice::SplitInclusive<'a, T, P>>; + +/// An iterator over subslices separated by elements that match a predicate +/// function, starting from the end of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::rsplit`]. +pub type RSplit<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIter<'a, T, slice::RSplit<'a, T, P>>; + +/// An iterator over subslices separated by elements that match a predicate +/// function, limited to a given number of splits. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::splitn`]. +pub type SplitN<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIter<'a, T, slice::SplitN<'a, T, P>>; + +/// An iterator over subslices separated by elements that match a +/// predicate function, limited to a given number of splits, starting +/// from the end of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::rsplitn`]. +pub type RSplitN<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIter<'a, T, slice::RSplitN<'a, T, P>>; + +/// An iterator that yields `&mut UniqueEntityEquivalentSlice`. Note that an entity may appear +/// in multiple slices, depending on the wrapped iterator. +#[derive(Debug)] +pub struct UniqueEntityEquivalentSliceIterMut< + 'a, + T: EntityEquivalent + 'a, + I: Iterator, +> { + pub(crate) iter: I, +} + +impl<'a, T: EntityEquivalent + 'a, I: Iterator> + UniqueEntityEquivalentSliceIterMut<'a, T, I> +{ + /// Constructs a [`UniqueEntityEquivalentSliceIterMut`] from a mutable slice iterator unsafely. + /// + /// # Safety + /// + /// All elements in each of the slices must be unique. + pub unsafe fn from_mut_slice_iterator_unchecked(iter: I) -> Self { + Self { iter } + } + + /// Returns the inner `I`. + pub fn into_inner(self) -> I { + self.iter + } + + /// Returns a reference to the inner `I`. + pub fn as_inner(&self) -> &I { + &self.iter + } + + /// Returns a mutable reference to the inner `I`. + /// + /// # Safety + /// + /// `self` must always contain an iterator that yields unique elements, + /// even while this reference is live. + pub unsafe fn as_mut_inner(&mut self) -> &mut I { + &mut self.iter + } +} + +impl<'a, T: EntityEquivalent + 'a, I: Iterator> Iterator + for UniqueEntityEquivalentSliceIterMut<'a, T, I> +{ + type Item = &'a mut UniqueEntityEquivalentSlice; + + fn next(&mut self) -> Option { + self.iter.next().map(|slice| + // SAFETY: All elements in the original iterator are unique slices. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(slice) }) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl<'a, T: EntityEquivalent + 'a, I: ExactSizeIterator> ExactSizeIterator + for UniqueEntityEquivalentSliceIterMut<'a, T, I> +{ +} + +impl<'a, T: EntityEquivalent + 'a, I: DoubleEndedIterator> DoubleEndedIterator + for UniqueEntityEquivalentSliceIterMut<'a, T, I> +{ + fn next_back(&mut self) -> Option { + self.iter.next_back().map(|slice| + // SAFETY: All elements in the original iterator are unique slices. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(slice) }) + } +} + +impl<'a, T: EntityEquivalent + 'a, I: FusedIterator> FusedIterator + for UniqueEntityEquivalentSliceIterMut<'a, T, I> +{ +} + +impl<'a, T: EntityEquivalent + 'a, I: Iterator + AsRef<[&'a [T]]>> + AsRef<[&'a UniqueEntityEquivalentSlice]> for UniqueEntityEquivalentSliceIterMut<'a, T, I> +{ + fn as_ref(&self) -> &[&'a UniqueEntityEquivalentSlice] { + // SAFETY: All elements in the original iterator are unique slices. + unsafe { cast_slice_of_unique_entity_slice(self.iter.as_ref()) } + } +} + +impl<'a, T: EntityEquivalent + 'a, I: Iterator + AsMut<[&'a mut [T]]>> + AsMut<[&'a mut UniqueEntityEquivalentSlice]> + for UniqueEntityEquivalentSliceIterMut<'a, T, I> +{ + fn as_mut(&mut self) -> &mut [&'a mut UniqueEntityEquivalentSlice] { + // SAFETY: All elements in the original iterator are unique slices. + unsafe { cast_slice_of_mut_unique_entity_slice_mut(self.iter.as_mut()) } + } +} + +/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size` +/// elements at a time), starting at the beginning of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::chunks_mut`]. +pub type ChunksMut<'a, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::ChunksMut<'a, T>>; + +/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size` +/// elements at a time), starting at the beginning of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::chunks_exact_mut`]. +pub type ChunksExactMut<'a, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::ChunksExactMut<'a, T>>; + +impl<'a, T: EntityEquivalent> + UniqueEntityEquivalentSliceIterMut<'a, T, slice::ChunksExactMut<'a, T>> +{ + /// Returns the remainder of the original slice that is not going to be + /// returned by the iterator. + /// + /// Equivalent to [`slice::ChunksExactMut::into_remainder`]. + pub fn into_remainder(self) -> &'a mut UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original iterator are unique slices. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.iter.into_remainder()) } + } +} + +/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size` +/// elements at a time), starting at the end of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::rchunks_mut`]. +pub type RChunksMut<'a, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::RChunksMut<'a, T>>; + +/// An iterator over a slice in (non-overlapping) mutable chunks (`chunk_size` +/// elements at a time), starting at the end of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::rchunks_exact_mut`]. +pub type RChunksExactMut<'a, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::RChunksExactMut<'a, T>>; + +impl<'a, T: EntityEquivalent> + UniqueEntityEquivalentSliceIterMut<'a, T, slice::RChunksExactMut<'a, T>> +{ + /// Returns the remainder of the original slice that is not going to be + /// returned by the iterator. + /// + /// Equivalent to [`slice::RChunksExactMut::into_remainder`]. + pub fn into_remainder(self) -> &'a mut UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original iterator are unique slices. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.iter.into_remainder()) } + } +} + +/// An iterator over slice in (non-overlapping) mutable chunks separated +/// by a predicate. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::chunk_by_mut`]. +pub type ChunkByMut<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::ChunkByMut<'a, T, P>>; + +/// An iterator over the mutable subslices of the vector which are separated +/// by elements that match `pred`. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::split_mut`]. +pub type SplitMut<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::SplitMut<'a, T, P>>; + +/// An iterator over the mutable subslices of the vector which are separated +/// by elements that match `pred`. Unlike `SplitMut`, it contains the matched +/// parts in the ends of the subslices. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::split_inclusive_mut`]. +pub type SplitInclusiveMut<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::SplitInclusiveMut<'a, T, P>>; + +/// An iterator over the subslices of the vector which are separated +/// by elements that match `pred`, starting from the end of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::rsplit_mut`]. +pub type RSplitMut<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::RSplitMut<'a, T, P>>; + +/// An iterator over subslices separated by elements that match a predicate +/// function, limited to a given number of splits. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::splitn_mut`]. +pub type SplitNMut<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::SplitNMut<'a, T, P>>; + +/// An iterator over subslices separated by elements that match a +/// predicate function, limited to a given number of splits, starting +/// from the end of the slice. +/// +/// This struct is created by [`UniqueEntityEquivalentSlice::rsplitn_mut`]. +pub type RSplitNMut<'a, P, T = Entity> = + UniqueEntityEquivalentSliceIterMut<'a, T, slice::RSplitNMut<'a, T, P>>; diff --git a/crates/bevy_ecs/src/entity/unique_vec.rs b/crates/bevy_ecs/src/entity/unique_vec.rs new file mode 100644 index 0000000000000..30f9984e70be7 --- /dev/null +++ b/crates/bevy_ecs/src/entity/unique_vec.rs @@ -0,0 +1,1114 @@ +//! A wrapper around entity [`Vec`]s with a uniqueness invariant. + +use core::{ + borrow::{Borrow, BorrowMut}, + mem::MaybeUninit, + ops::{ + Bound, Deref, DerefMut, Index, IndexMut, Range, RangeBounds, RangeFrom, RangeFull, + RangeInclusive, RangeTo, RangeToInclusive, + }, +}; + +use alloc::{ + borrow::{Cow, ToOwned}, + boxed::Box, + collections::{BTreeSet, BinaryHeap, TryReserveError, VecDeque}, + rc::Rc, + vec::{self, Vec}, +}; + +use bevy_platform::sync::Arc; + +use super::{ + unique_slice::{self, UniqueEntityEquivalentSlice}, + Entity, EntityEquivalent, EntitySet, FromEntitySetIterator, UniqueEntityEquivalentArray, + UniqueEntityIter, +}; + +/// A `Vec` that contains only unique entities. +/// +/// "Unique" means that `x != y` holds for any 2 entities in this collection. +/// This is always true when less than 2 entities are present. +/// +/// This type is best obtained by its `FromEntitySetIterator` impl, via either +/// `EntityIterator::collect_set` or `UniqueEntityEquivalentVec::from_entity_iter`. +/// +/// While this type can be constructed via `Iterator::collect`, doing so is inefficient, +/// and not recommended. +/// +/// When `T` is [`Entity`], use the [`UniqueEntityVec`] alias. +#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub struct UniqueEntityEquivalentVec(Vec); + +/// A `Vec` that contains only unique [`Entity`]. +/// +/// This is the default case of a [`UniqueEntityEquivalentVec`]. +pub type UniqueEntityVec = UniqueEntityEquivalentVec; + +impl UniqueEntityEquivalentVec { + /// Constructs a new, empty `UniqueEntityEquivalentVec`. + /// + /// Equivalent to [`Vec::new`]. + pub const fn new() -> Self { + Self(Vec::new()) + } + + /// Constructs a new, empty `UniqueEntityEquivalentVec` with at least the specified capacity. + /// + /// Equivalent to [`Vec::with_capacity`] + pub fn with_capacity(capacity: usize) -> Self { + Self(Vec::with_capacity(capacity)) + } + + /// Creates a `UniqueEntityEquivalentVec` directly from a pointer, a length, and a capacity. + /// + /// Equivalent to [`Vec::from_raw_parts`]. + /// + /// # Safety + /// + /// It must be safe to call [`Vec::from_raw_parts`] with these inputs, + /// and the resulting [`Vec`] must only contain unique elements. + pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize) -> Self { + // SAFETY: Caller ensures it's safe to call `Vec::from_raw_parts` + Self(unsafe { Vec::from_raw_parts(ptr, length, capacity) }) + } + + /// Constructs a `UniqueEntityEquivalentVec` from a [`Vec`] unsafely. + /// + /// # Safety + /// + /// `vec` must contain only unique elements. + pub unsafe fn from_vec_unchecked(vec: Vec) -> Self { + Self(vec) + } + + /// Returns the inner [`Vec`]. + pub fn into_inner(self) -> Vec { + self.0 + } + + /// Returns a reference to the inner [`Vec`]. + pub fn as_vec(&self) -> &Vec { + &self.0 + } + + /// Returns a mutable reference to the inner [`Vec`]. + /// + /// # Safety + /// + /// The elements of this `Vec` must always remain unique, even while + /// this mutable reference is live. + pub unsafe fn as_mut_vec(&mut self) -> &mut Vec { + &mut self.0 + } + + /// Returns the total number of elements the vector can hold without + /// reallocating. + /// + /// Equivalent to [`Vec::capacity`]. + pub fn capacity(&self) -> usize { + self.0.capacity() + } + + /// Reserves capacity for at least `additional` more elements to be inserted + /// in the given `Vec`. + /// + /// Equivalent to [`Vec::reserve`]. + pub fn reserve(&mut self, additional: usize) { + self.0.reserve(additional); + } + + /// Reserves the minimum capacity for at least `additional` more elements to + /// be inserted in the given `UniqueEntityEquivalentVec`. + /// + /// Equivalent to [`Vec::reserve_exact`]. + pub fn reserve_exact(&mut self, additional: usize) { + self.0.reserve_exact(additional); + } + + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `Vec`. + /// + /// Equivalent to [`Vec::try_reserve`]. + pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { + self.0.try_reserve(additional) + } + + /// Tries to reserve the minimum capacity for at least `additional` + /// elements to be inserted in the given `Vec`. + /// + /// Equivalent to [`Vec::try_reserve_exact`]. + pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> { + self.0.try_reserve_exact(additional) + } + + /// Shrinks the capacity of the vector as much as possible. + /// + /// Equivalent to [`Vec::shrink_to_fit`]. + pub fn shrink_to_fit(&mut self) { + self.0.shrink_to_fit(); + } + + /// Shrinks the capacity of the vector with a lower bound. + /// + /// Equivalent to [`Vec::shrink_to`]. + pub fn shrink_to(&mut self, min_capacity: usize) { + self.0.shrink_to(min_capacity); + } + + /// Converts the vector into `Box>`. + pub fn into_boxed_slice(self) -> Box> { + // SAFETY: UniqueEntityEquivalentSlice is a transparent wrapper around [T]. + unsafe { + UniqueEntityEquivalentSlice::from_boxed_slice_unchecked(self.0.into_boxed_slice()) + } + } + + /// Extracts a slice containing the entire vector. + pub fn as_slice(&self) -> &UniqueEntityEquivalentSlice { + self + } + + /// Extracts a mutable slice of the entire vector. + pub fn as_mut_slice(&mut self) -> &mut UniqueEntityEquivalentSlice { + self + } + + /// Shortens the vector, keeping the first `len` elements and dropping + /// the rest. + /// + /// Equivalent to [`Vec::truncate`]. + pub fn truncate(&mut self, len: usize) { + self.0.truncate(len); + } + + /// Returns a raw pointer to the vector's buffer, or a dangling raw pointer + /// valid for zero sized reads if the vector didn't allocate. + /// + /// Equivalent to [`Vec::as_ptr`]. + pub fn as_ptr(&self) -> *const T { + self.0.as_ptr() + } + /// Returns a raw mutable pointer to the vector's buffer, or a dangling + /// raw pointer valid for zero sized reads if the vector didn't allocate. + /// + /// Equivalent to [`Vec::as_mut_ptr`]. + pub fn as_mut_ptr(&mut self) -> *mut T { + self.0.as_mut_ptr() + } + + /// Forces the length of the vector to `new_len`. + /// + /// Equivalent to [`Vec::set_len`]. + /// + /// # Safety + /// + /// It must be safe to call [`Vec::set_len`] with these inputs, + /// and the resulting [`Vec`] must only contain unique elements. + pub unsafe fn set_len(&mut self, new_len: usize) { + // SAFETY: Caller ensures it's safe to call `Vec::set_len` + unsafe { self.0.set_len(new_len) }; + } + + /// Removes an element from the vector and returns it. + /// + /// Equivalent to [`Vec::swap_remove`]. + pub fn swap_remove(&mut self, index: usize) -> T { + self.0.swap_remove(index) + } + + /// Inserts an element at position `index` within the vector, shifting all + /// elements after it to the right. + /// + /// Equivalent to [`Vec::insert`]. + /// + /// # Safety + /// + /// No `T` contained by `self` may equal `element`. + pub unsafe fn insert(&mut self, index: usize, element: T) { + self.0.insert(index, element); + } + + /// Removes and returns the element at position `index` within the vector, + /// shifting all elements after it to the left. + /// + /// Equivalent to [`Vec::remove`]. + pub fn remove(&mut self, index: usize) -> T { + self.0.remove(index) + } + + /// Retains only the elements specified by the predicate. + /// + /// Equivalent to [`Vec::retain`]. + pub fn retain(&mut self, f: F) + where + F: FnMut(&T) -> bool, + { + self.0.retain(f); + } + + /// Retains only the elements specified by the predicate, passing a mutable reference to it. + /// + /// Equivalent to [`Vec::retain_mut`]. + /// + /// # Safety + /// + /// `self` must only contain unique elements after each individual execution of `f`. + pub unsafe fn retain_mut(&mut self, f: F) + where + F: FnMut(&mut T) -> bool, + { + self.0.retain_mut(f); + } + + /// Removes all but the first of consecutive elements in the vector that resolve to the same + /// key. + /// + /// Equivalent to [`Vec::dedup_by_key`]. + /// + /// # Safety + /// + /// `self` must only contain unique elements after each individual execution of `key`. + pub unsafe fn dedup_by_key(&mut self, key: F) + where + F: FnMut(&mut T) -> K, + K: PartialEq, + { + self.0.dedup_by_key(key); + } + + /// Removes all but the first of consecutive elements in the vector satisfying a given equality + /// relation. + /// + /// Equivalent to [`Vec::dedup_by`]. + /// + /// # Safety + /// + /// `self` must only contain unique elements after each individual execution of `same_bucket`. + pub unsafe fn dedup_by(&mut self, same_bucket: F) + where + F: FnMut(&mut T, &mut T) -> bool, + { + self.0.dedup_by(same_bucket); + } + + /// Appends an element to the back of a collection. + /// + /// Equivalent to [`Vec::push`]. + /// + /// # Safety + /// + /// No `T` contained by `self` may equal `element`. + pub unsafe fn push(&mut self, value: T) { + self.0.push(value); + } + + /// Moves all the elements of `other` into `self`, leaving `other` empty. + /// + /// Equivalent to [`Vec::append`]. + /// + /// # Safety + /// + /// `other` must contain no elements that equal any element in `self`. + pub unsafe fn append(&mut self, other: &mut UniqueEntityEquivalentVec) { + self.0.append(&mut other.0); + } + + /// Removes the last element from a vector and returns it, or [`None`] if it + /// is empty. + /// + /// Equivalent to [`Vec::pop`]. + pub fn pop(&mut self) -> Option { + self.0.pop() + } + + /// Removes the specified range from the vector in bulk, returning all + /// removed elements as an iterator. + /// + /// Equivalent to [`Vec::drain`]. + pub fn drain(&mut self, range: R) -> Drain<'_, T> + where + R: RangeBounds, + { + // SAFETY: `self` and thus `range` contains only unique elements. + unsafe { UniqueEntityIter::from_iterator_unchecked(self.0.drain(range)) } + } + + /// Clears the vector, removing all values. + /// + /// Equivalent to [`Vec::clear`]. + pub fn clear(&mut self) { + self.0.clear(); + } + + /// Returns the number of elements in the vector, also referred to + /// as its 'length'. + /// + /// Equivalent to [`Vec::len`]. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if the vector contains no elements. + /// + /// Equivalent to [`Vec::is_empty`]. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Splits the collection into two at the given index. + /// + /// Equivalent to [`Vec::split_off`]. + pub fn split_off(&mut self, at: usize) -> Self { + Self(self.0.split_off(at)) + } + + /// Resizes the `Vec` in-place so that `len` is equal to `new_len`. + /// + /// Equivalent to [`Vec::resize_with`]. + /// + /// # Safety + /// + /// `f` must only produce unique `T`, and none of these may equal any `T` in `self`. + pub unsafe fn resize_with(&mut self, new_len: usize, f: F) + where + F: FnMut() -> T, + { + self.0.resize_with(new_len, f); + } + + /// Consumes and leaks the Vec, returning a mutable reference to the contents, `&'a mut UniqueEntityEquivalentSlice`. + pub fn leak<'a>(self) -> &'a mut UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.leak()) } + } + + /// Returns the remaining spare capacity of the vector as a slice of + /// [`MaybeUninit`]. + /// + /// Equivalent to [`Vec::spare_capacity_mut`]. + pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit] { + self.0.spare_capacity_mut() + } + + /// Creates a splicing iterator that replaces the specified range in the vector + /// with the given `replace_with` iterator and yields the removed items. + /// + /// Equivalent to [`Vec::splice`]. + /// + /// # Safety + /// + /// `replace_with` must not yield any elements that equal any elements in `self`, + /// except for those in `range`. + pub unsafe fn splice( + &mut self, + range: R, + replace_with: I, + ) -> Splice<'_, ::IntoIter> + where + R: RangeBounds, + I: EntitySet, + { + // SAFETY: `self` and thus `range` contains only unique elements. + unsafe { UniqueEntityIter::from_iterator_unchecked(self.0.splice(range, replace_with)) } + } +} + +impl Default for UniqueEntityEquivalentVec { + fn default() -> Self { + Self(Vec::default()) + } +} + +impl Deref for UniqueEntityEquivalentVec { + type Target = UniqueEntityEquivalentSlice; + + fn deref(&self) -> &Self::Target { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(&self.0) } + } +} + +impl DerefMut for UniqueEntityEquivalentVec { + fn deref_mut(&mut self) -> &mut Self::Target { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(&mut self.0) } + } +} + +impl<'a, T: EntityEquivalent> IntoIterator for &'a UniqueEntityEquivalentVec +where + &'a T: EntityEquivalent, +{ + type Item = &'a T; + + type IntoIter = unique_slice::Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + // SAFETY: `self` contains only unique elements. + unsafe { UniqueEntityIter::from_iterator_unchecked(self.0.iter()) } + } +} + +impl IntoIterator for UniqueEntityEquivalentVec { + type Item = T; + + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + // SAFETY: `self` contains only unique elements. + unsafe { UniqueEntityIter::from_iterator_unchecked(self.0.into_iter()) } + } +} + +impl AsMut for UniqueEntityEquivalentVec { + fn as_mut(&mut self) -> &mut UniqueEntityEquivalentVec { + self + } +} + +impl AsMut> for UniqueEntityEquivalentVec { + fn as_mut(&mut self) -> &mut UniqueEntityEquivalentSlice { + self + } +} + +impl AsRef for UniqueEntityEquivalentVec { + fn as_ref(&self) -> &Self { + self + } +} + +impl AsRef> for UniqueEntityEquivalentVec { + fn as_ref(&self) -> &Vec { + &self.0 + } +} + +impl Borrow> for UniqueEntityEquivalentVec { + fn borrow(&self) -> &Vec { + &self.0 + } +} + +impl AsRef<[T]> for UniqueEntityEquivalentVec { + fn as_ref(&self) -> &[T] { + &self.0 + } +} + +impl AsRef> for UniqueEntityEquivalentVec { + fn as_ref(&self) -> &UniqueEntityEquivalentSlice { + self + } +} + +impl Borrow<[T]> for UniqueEntityEquivalentVec { + fn borrow(&self) -> &[T] { + &self.0 + } +} + +impl Borrow> for UniqueEntityEquivalentVec { + fn borrow(&self) -> &UniqueEntityEquivalentSlice { + self + } +} + +impl BorrowMut> + for UniqueEntityEquivalentVec +{ + fn borrow_mut(&mut self) -> &mut UniqueEntityEquivalentSlice { + self + } +} + +impl, U> PartialEq> for UniqueEntityEquivalentVec { + fn eq(&self, other: &Vec) -> bool { + self.0.eq(other) + } +} + +impl, U> PartialEq<&[U]> for UniqueEntityEquivalentVec { + fn eq(&self, other: &&[U]) -> bool { + self.0.eq(other) + } +} + +impl, U: EntityEquivalent> + PartialEq<&UniqueEntityEquivalentSlice> for UniqueEntityEquivalentVec +{ + fn eq(&self, other: &&UniqueEntityEquivalentSlice) -> bool { + self.0.eq(other) + } +} + +impl, U> PartialEq<&mut [U]> for UniqueEntityEquivalentVec { + fn eq(&self, other: &&mut [U]) -> bool { + self.0.eq(other) + } +} + +impl, U: EntityEquivalent> + PartialEq<&mut UniqueEntityEquivalentSlice> for UniqueEntityEquivalentVec +{ + fn eq(&self, other: &&mut UniqueEntityEquivalentSlice) -> bool { + self.0.eq(other) + } +} + +impl, U, const N: usize> PartialEq<&[U; N]> + for UniqueEntityEquivalentVec +{ + fn eq(&self, other: &&[U; N]) -> bool { + self.0.eq(other) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq<&UniqueEntityEquivalentArray> for UniqueEntityEquivalentVec +{ + fn eq(&self, other: &&UniqueEntityEquivalentArray) -> bool { + self.0.eq(&other.as_inner()) + } +} + +impl, U, const N: usize> PartialEq<&mut [U; N]> + for UniqueEntityEquivalentVec +{ + fn eq(&self, other: &&mut [U; N]) -> bool { + self.0.eq(&**other) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq<&mut UniqueEntityEquivalentArray> for UniqueEntityEquivalentVec +{ + fn eq(&self, other: &&mut UniqueEntityEquivalentArray) -> bool { + self.0.eq(other.as_inner()) + } +} + +impl, U> PartialEq<[U]> for UniqueEntityEquivalentVec { + fn eq(&self, other: &[U]) -> bool { + self.0.eq(other) + } +} + +impl, U: EntityEquivalent> + PartialEq> for UniqueEntityEquivalentVec +{ + fn eq(&self, other: &UniqueEntityEquivalentSlice) -> bool { + self.0.eq(&**other) + } +} + +impl, U, const N: usize> PartialEq<[U; N]> + for UniqueEntityEquivalentVec +{ + fn eq(&self, other: &[U; N]) -> bool { + self.0.eq(other) + } +} + +impl, U: EntityEquivalent, const N: usize> + PartialEq> for UniqueEntityEquivalentVec +{ + fn eq(&self, other: &UniqueEntityEquivalentArray) -> bool { + self.0.eq(other.as_inner()) + } +} + +impl, U: EntityEquivalent> PartialEq> for Vec { + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { + self.eq(&other.0) + } +} + +impl, U: EntityEquivalent> PartialEq> for &[T] { + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { + self.eq(&other.0) + } +} + +impl, U: EntityEquivalent> PartialEq> for &mut [T] { + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { + self.eq(&other.0) + } +} + +impl, U: EntityEquivalent> + PartialEq> for [T] +{ + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { + self.eq(&other.0) + } +} + +impl + Clone, U: EntityEquivalent> PartialEq> + for Cow<'_, [T]> +{ + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { + self.eq(&other.0) + } +} + +impl, U: EntityEquivalent> PartialEq> for VecDeque { + fn eq(&self, other: &UniqueEntityEquivalentVec) -> bool { + self.eq(&other.0) + } +} + +impl From<&UniqueEntityEquivalentSlice> + for UniqueEntityEquivalentVec +{ + fn from(value: &UniqueEntityEquivalentSlice) -> Self { + value.to_vec() + } +} + +impl From<&mut UniqueEntityEquivalentSlice> + for UniqueEntityEquivalentVec +{ + fn from(value: &mut UniqueEntityEquivalentSlice) -> Self { + value.to_vec() + } +} + +impl From>> + for UniqueEntityEquivalentVec +{ + fn from(value: Box>) -> Self { + value.into_vec() + } +} + +impl From>> + for UniqueEntityEquivalentVec +where + UniqueEntityEquivalentSlice: ToOwned>, +{ + fn from(value: Cow>) -> Self { + value.into_owned() + } +} + +impl From<&[T; 1]> for UniqueEntityEquivalentVec { + fn from(value: &[T; 1]) -> Self { + Self(Vec::from(value)) + } +} + +impl From<&[T; 0]> for UniqueEntityEquivalentVec { + fn from(value: &[T; 0]) -> Self { + Self(Vec::from(value)) + } +} + +impl From<&mut [T; 1]> for UniqueEntityEquivalentVec { + fn from(value: &mut [T; 1]) -> Self { + Self(Vec::from(value)) + } +} + +impl From<&mut [T; 0]> for UniqueEntityEquivalentVec { + fn from(value: &mut [T; 0]) -> Self { + Self(Vec::from(value)) + } +} + +impl From<[T; 1]> for UniqueEntityEquivalentVec { + fn from(value: [T; 1]) -> Self { + Self(Vec::from(value)) + } +} + +impl From<[T; 0]> for UniqueEntityEquivalentVec { + fn from(value: [T; 0]) -> Self { + Self(Vec::from(value)) + } +} + +impl From<&UniqueEntityEquivalentArray> + for UniqueEntityEquivalentVec +{ + fn from(value: &UniqueEntityEquivalentArray) -> Self { + Self(Vec::from(value.as_inner().clone())) + } +} + +impl From<&mut UniqueEntityEquivalentArray> + for UniqueEntityEquivalentVec +{ + fn from(value: &mut UniqueEntityEquivalentArray) -> Self { + Self(Vec::from(value.as_inner().clone())) + } +} + +impl From> + for UniqueEntityEquivalentVec +{ + fn from(value: UniqueEntityEquivalentArray) -> Self { + Self(Vec::from(value.into_inner())) + } +} + +impl From> for Vec { + fn from(value: UniqueEntityEquivalentVec) -> Self { + value.0 + } +} + +impl<'a, T: EntityEquivalent + Clone> From> for Cow<'a, [T]> { + fn from(value: UniqueEntityEquivalentVec) -> Self { + Cow::from(value.0) + } +} + +impl<'a, T: EntityEquivalent + Clone> From> + for Cow<'a, UniqueEntityEquivalentSlice> +{ + fn from(value: UniqueEntityEquivalentVec) -> Self { + Cow::Owned(value) + } +} + +impl From> for Arc<[T]> { + fn from(value: UniqueEntityEquivalentVec) -> Self { + Arc::from(value.0) + } +} + +impl From> + for Arc> +{ + fn from(value: UniqueEntityEquivalentVec) -> Self { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_arc_slice_unchecked(Arc::from(value.0)) } + } +} + +impl From> for BinaryHeap { + fn from(value: UniqueEntityEquivalentVec) -> Self { + BinaryHeap::from(value.0) + } +} + +impl From> for Box<[T]> { + fn from(value: UniqueEntityEquivalentVec) -> Self { + Box::from(value.0) + } +} + +impl From> for Rc<[T]> { + fn from(value: UniqueEntityEquivalentVec) -> Self { + Rc::from(value.0) + } +} + +impl From> + for Rc> +{ + fn from(value: UniqueEntityEquivalentVec) -> Self { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_rc_slice_unchecked(Rc::from(value.0)) } + } +} + +impl From> for VecDeque { + fn from(value: UniqueEntityEquivalentVec) -> Self { + VecDeque::from(value.0) + } +} + +impl TryFrom> for Box<[T; N]> { + type Error = UniqueEntityEquivalentVec; + + fn try_from(value: UniqueEntityEquivalentVec) -> Result { + Box::try_from(value.0).map_err(UniqueEntityEquivalentVec) + } +} + +impl TryFrom> + for Box> +{ + type Error = UniqueEntityEquivalentVec; + + fn try_from(value: UniqueEntityEquivalentVec) -> Result { + Box::try_from(value.0) + .map(|v| + // SAFETY: All elements in the original Vec are unique. + unsafe { UniqueEntityEquivalentArray::from_boxed_array_unchecked(v) }) + .map_err(UniqueEntityEquivalentVec) + } +} + +impl TryFrom> for [T; N] { + type Error = UniqueEntityEquivalentVec; + + fn try_from(value: UniqueEntityEquivalentVec) -> Result { + <[T; N] as TryFrom>>::try_from(value.0).map_err(UniqueEntityEquivalentVec) + } +} + +impl TryFrom> + for UniqueEntityEquivalentArray +{ + type Error = UniqueEntityEquivalentVec; + + fn try_from(value: UniqueEntityEquivalentVec) -> Result { + <[T; N] as TryFrom>>::try_from(value.0) + .map(|v| + // SAFETY: All elements in the original Vec are unique. + unsafe { UniqueEntityEquivalentArray::from_array_unchecked(v) }) + .map_err(UniqueEntityEquivalentVec) + } +} + +impl From> for UniqueEntityEquivalentVec { + fn from(value: BTreeSet) -> Self { + Self(value.into_iter().collect::>()) + } +} + +impl FromIterator for UniqueEntityEquivalentVec { + /// This impl only uses `Eq` to validate uniqueness, resulting in O(n^2) complexity. + /// It can make sense for very low N, or if `T` implements neither `Ord` nor `Hash`. + /// When possible, use `FromEntitySetIterator::from_entity_iter` instead. + fn from_iter>(iter: I) -> Self { + // Matches the `HashSet::from_iter` reservation logic. + let iter = iter.into_iter(); + let unique_vec = Self::with_capacity(iter.size_hint().0); + // Internal iteration (fold/for_each) is known to result in better code generation + // over a for loop. + iter.fold(unique_vec, |mut unique_vec, item| { + if !unique_vec.0.contains(&item) { + unique_vec.0.push(item); + } + unique_vec + }) + } +} + +impl FromEntitySetIterator for UniqueEntityEquivalentVec { + fn from_entity_set_iter>(iter: I) -> Self { + // SAFETY: `iter` is an `EntitySet`. + unsafe { Self::from_vec_unchecked(Vec::from_iter(iter)) } + } +} + +impl Extend for UniqueEntityEquivalentVec { + /// Use with caution, because this impl only uses `Eq` to validate uniqueness, + /// resulting in O(n^2) complexity. + /// It can make sense for very low N, or if `T` implements neither `Ord` nor `Hash`. + fn extend>(&mut self, iter: I) { + // Matches the `HashSet::extend` reservation logic. Their reasoning: + // "Keys may be already present or show multiple times in the iterator. + // Reserve the entire hint lower bound if the map is empty. + // Otherwise reserve half the hint (rounded up), so the map + // will only resize twice in the worst case." + let iter = iter.into_iter(); + let reserve = if self.is_empty() { + iter.size_hint().0 + } else { + iter.size_hint().0.div_ceil(2) + }; + self.reserve(reserve); + // Internal iteration (fold/for_each) is known to result in better code generation + // over a for loop. + iter.for_each(move |item| { + if !self.0.contains(&item) { + self.0.push(item); + } + }); + } +} + +impl<'a, T: EntityEquivalent + Copy + 'a> Extend<&'a T> for UniqueEntityEquivalentVec { + /// Use with caution, because this impl only uses `Eq` to validate uniqueness, + /// resulting in O(n^2) complexity. + /// It can make sense for very low N, or if `T` implements neither `Ord` nor `Hash`. + fn extend>(&mut self, iter: I) { + // Matches the `HashSet::extend` reservation logic. Their reasoning: + // "Keys may be already present or show multiple times in the iterator. + // Reserve the entire hint lower bound if the map is empty. + // Otherwise reserve half the hint (rounded up), so the map + // will only resize twice in the worst case." + let iter = iter.into_iter(); + let reserve = if self.is_empty() { + iter.size_hint().0 + } else { + iter.size_hint().0.div_ceil(2) + }; + self.reserve(reserve); + // Internal iteration (fold/for_each) is known to result in better code generation + // over a for loop. + iter.for_each(move |item| { + if !self.0.contains(item) { + self.0.push(*item); + } + }); + } +} + +impl Index<(Bound, Bound)> for UniqueEntityEquivalentVec { + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: (Bound, Bound)) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for UniqueEntityEquivalentVec { + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: Range) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for UniqueEntityEquivalentVec { + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: RangeFrom) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index for UniqueEntityEquivalentVec { + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: RangeFull) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for UniqueEntityEquivalentVec { + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: RangeInclusive) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for UniqueEntityEquivalentVec { + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: RangeTo) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index> for UniqueEntityEquivalentVec { + type Output = UniqueEntityEquivalentSlice; + fn index(&self, key: RangeToInclusive) -> &Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.0.index(key)) } + } +} + +impl Index for UniqueEntityEquivalentVec { + type Output = T; + fn index(&self, key: usize) -> &T { + self.0.index(key) + } +} + +impl IndexMut<(Bound, Bound)> for UniqueEntityEquivalentVec { + fn index_mut(&mut self, key: (Bound, Bound)) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for UniqueEntityEquivalentVec { + fn index_mut(&mut self, key: Range) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for UniqueEntityEquivalentVec { + fn index_mut(&mut self, key: RangeFrom) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut for UniqueEntityEquivalentVec { + fn index_mut(&mut self, key: RangeFull) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for UniqueEntityEquivalentVec { + fn index_mut(&mut self, key: RangeInclusive) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for UniqueEntityEquivalentVec { + fn index_mut(&mut self, key: RangeTo) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +impl IndexMut> for UniqueEntityEquivalentVec { + fn index_mut(&mut self, key: RangeToInclusive) -> &mut Self::Output { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked_mut(self.0.index_mut(key)) } + } +} + +/// An iterator that moves out of a vector. +/// +/// This `struct` is created by the [`IntoIterator::into_iter`] trait +/// method on [`UniqueEntityEquivalentVec`]. +pub type IntoIter = UniqueEntityIter>; + +impl UniqueEntityIter> { + /// Returns the remaining items of this iterator as a slice. + /// + /// Equivalent to [`vec::IntoIter::as_slice`]. + pub fn as_slice(&self) -> &UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.as_inner().as_slice()) } + } + + /// Returns the remaining items of this iterator as a mutable slice. + /// + /// Equivalent to [`vec::IntoIter::as_mut_slice`]. + pub fn as_mut_slice(&mut self) -> &mut UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original slice are unique. + unsafe { + UniqueEntityEquivalentSlice::from_slice_unchecked_mut( + self.as_mut_inner().as_mut_slice(), + ) + } + } +} + +/// A draining iterator for [`UniqueEntityEquivalentVec`]. +/// +/// This struct is created by [`UniqueEntityEquivalentVec::drain`]. +/// See its documentation for more. +pub type Drain<'a, T = Entity> = UniqueEntityIter>; + +impl<'a, T: EntityEquivalent> UniqueEntityIter> { + /// Returns the remaining items of this iterator as a slice. + /// + /// Equivalent to [`vec::Drain::as_slice`]. + pub fn as_slice(&self) -> &UniqueEntityEquivalentSlice { + // SAFETY: All elements in the original slice are unique. + unsafe { UniqueEntityEquivalentSlice::from_slice_unchecked(self.as_inner().as_slice()) } + } +} + +/// A splicing iterator for [`UniqueEntityEquivalentVec`]. +/// +/// This struct is created by [`UniqueEntityEquivalentVec::splice`]. +/// See its documentation for more. +pub type Splice<'a, I> = UniqueEntityIter>; diff --git a/crates/bevy_ecs/src/entity/visit_entities.rs b/crates/bevy_ecs/src/entity/visit_entities.rs deleted file mode 100644 index abce76853d403..0000000000000 --- a/crates/bevy_ecs/src/entity/visit_entities.rs +++ /dev/null @@ -1,150 +0,0 @@ -pub use bevy_ecs_macros::{VisitEntities, VisitEntitiesMut}; - -use crate::entity::Entity; - -/// Apply an operation to all entities in a container. -/// -/// This is implemented by default for types that implement [`IntoIterator`]. -/// -/// It may be useful to implement directly for types that can't produce an -/// iterator for lifetime reasons, such as those involving internal mutexes. -pub trait VisitEntities { - /// Apply an operation to all contained entities. - fn visit_entities(&self, f: F); -} - -impl VisitEntities for T -where - for<'a> &'a T: IntoIterator, -{ - fn visit_entities(&self, f: F) { - self.into_iter().copied().for_each(f); - } -} - -impl VisitEntities for Entity { - fn visit_entities(&self, mut f: F) { - f(*self); - } -} - -/// Apply an operation to mutable references to all entities in a container. -/// -/// This is implemented by default for types that implement [`IntoIterator`]. -/// -/// It may be useful to implement directly for types that can't produce an -/// iterator for lifetime reasons, such as those involving internal mutexes. -pub trait VisitEntitiesMut: VisitEntities { - /// Apply an operation to mutable references to all contained entities. - fn visit_entities_mut(&mut self, f: F); -} - -impl VisitEntitiesMut for T -where - for<'a> &'a mut T: IntoIterator, -{ - fn visit_entities_mut(&mut self, f: F) { - self.into_iter().for_each(f); - } -} - -impl VisitEntitiesMut for Entity { - fn visit_entities_mut(&mut self, mut f: F) { - f(self); - } -} - -#[cfg(test)] -mod tests { - use crate::{ - self as bevy_ecs, - entity::{EntityHashMap, MapEntities, SceneEntityMapper}, - world::World, - }; - use bevy_utils::HashSet; - - use super::*; - - #[derive(VisitEntities, Debug, PartialEq)] - struct Foo { - ordered: Vec, - unordered: HashSet, - single: Entity, - #[allow(dead_code)] - #[visit_entities(ignore)] - not_an_entity: String, - } - - // Need a manual impl since VisitEntitiesMut isn't implemented for `HashSet`. - // We don't expect users to actually do this - it's only for test purposes - // to prove out the automatic `MapEntities` impl we get with `VisitEntitiesMut`. - impl VisitEntitiesMut for Foo { - fn visit_entities_mut(&mut self, mut f: F) { - self.ordered.visit_entities_mut(&mut f); - self.unordered = self - .unordered - .drain() - .map(|mut entity| { - f(&mut entity); - entity - }) - .collect(); - f(&mut self.single); - } - } - - #[test] - fn visit_entities() { - let mut world = World::new(); - let entities = world.entities(); - let mut foo = Foo { - ordered: vec![entities.reserve_entity(), entities.reserve_entity()], - unordered: [ - entities.reserve_entity(), - entities.reserve_entity(), - entities.reserve_entity(), - ] - .into_iter() - .collect(), - single: entities.reserve_entity(), - not_an_entity: "Bar".into(), - }; - - let mut entity_map = EntityHashMap::::default(); - let mut remapped = Foo { - ordered: vec![], - unordered: HashSet::default(), - single: Entity::PLACEHOLDER, - not_an_entity: foo.not_an_entity.clone(), - }; - - // Note: this assumes that the VisitEntities derive is field-ordered, - // which isn't explicitly stated/guaranteed. - // If that changes, this test will fail, but that might be OK if - // we're intentionally breaking that assumption. - let mut i = 0; - foo.visit_entities(|entity| { - let new_entity = entities.reserve_entity(); - if i < foo.ordered.len() { - assert_eq!(entity, foo.ordered[i]); - remapped.ordered.push(new_entity); - } else if i < foo.ordered.len() + foo.unordered.len() { - assert!(foo.unordered.contains(&entity)); - remapped.unordered.insert(new_entity); - } else { - assert_eq!(entity, foo.single); - remapped.single = new_entity; - } - - entity_map.insert(entity, new_entity); - - i += 1; - }); - - SceneEntityMapper::world_scope(&mut entity_map, &mut world, |_, mapper| { - foo.map_entities(mapper); - }); - - assert_eq!(foo, remapped); - } -} diff --git a/crates/bevy_ecs/src/entity_disabling.rs b/crates/bevy_ecs/src/entity_disabling.rs new file mode 100644 index 0000000000000..5d62011174dac --- /dev/null +++ b/crates/bevy_ecs/src/entity_disabling.rs @@ -0,0 +1,307 @@ +//! Disabled entities do not show up in queries unless the query explicitly mentions them. +//! +//! Entities which are disabled in this way are not removed from the [`World`], +//! and their relationships remain intact. +//! In many cases, you may want to disable entire trees of entities at once, +//! using [`EntityCommands::insert_recursive`](crate::prelude::EntityCommands::insert_recursive). +//! +//! While Bevy ships with a built-in [`Disabled`] component, you can also create your own +//! disabling components, which will operate in the same way but can have distinct semantics. +//! +//! ``` +//! use bevy_ecs::prelude::*; +//! +//! // Our custom disabling component! +//! #[derive(Component, Clone)] +//! struct Prefab; +//! +//! #[derive(Component)] +//! struct A; +//! +//! let mut world = World::new(); +//! world.register_disabling_component::(); +//! world.spawn((A, Prefab)); +//! world.spawn((A,)); +//! world.spawn((A,)); +//! +//! let mut normal_query = world.query::<&A>(); +//! assert_eq!(2, normal_query.iter(&world).count()); +//! +//! let mut prefab_query = world.query_filtered::<&A, With>(); +//! assert_eq!(1, prefab_query.iter(&world).count()); +//! +//! let mut maybe_prefab_query = world.query::<(&A, Has)>(); +//! assert_eq!(3, maybe_prefab_query.iter(&world).count()); +//! ``` +//! +//! ## Default query filters +//! +//! In Bevy, entity disabling is implemented through the construction of a global "default query filter". +//! Queries which do not explicitly mention the disabled component will not include entities with that component. +//! If an entity has multiple disabling components, it will only be included in queries that mention all of them. +//! +//! For example, `Query<&Position>` will not include entities with the [`Disabled`] component, +//! even if they have a `Position` component, +//! but `Query<&Position, With>` or `Query<(&Position, Has)>` will see them. +//! +//! Entities with disabling components are still present in the [`World`] and can be accessed directly, +//! using methods on [`World`] or [`Commands`](crate::prelude::Commands). +//! +//! ### Warnings +//! +//! Currently, only queries for which the cache is built after enabling a default query filter will have entities +//! with those components filtered. As a result, they should generally only be modified before the +//! app starts. +//! +//! Because filters are applied to all queries they can have performance implication for +//! the enire [`World`], especially when they cause queries to mix sparse and table components. +//! See [`Query` performance] for more info. +//! +//! Custom disabling components can cause significant interoperability issues within the ecosystem, +//! as users must be aware of each disabling component in use. +//! Libraries should think carefully about whether they need to use a new disabling component, +//! and clearly communicate their presence to their users to avoid the new for library compatibility flags. +//! +//! [`With`]: crate::prelude::With +//! [`Has`]: crate::prelude::Has +//! [`World`]: crate::prelude::World +//! [`Query` performance]: crate::prelude::Query#performance + +use crate::{ + component::{ComponentId, Components, StorageType}, + query::FilteredAccess, + world::{FromWorld, World}, +}; +use bevy_ecs_macros::{Component, Resource}; +use smallvec::SmallVec; + +#[cfg(feature = "bevy_reflect")] +use { + crate::reflect::ReflectComponent, bevy_reflect::std_traits::ReflectDefault, + bevy_reflect::Reflect, +}; + +/// A marker component for disabled entities. +/// +/// Semantically, this component is used to mark entities that are temporarily disabled (typically for gameplay reasons), +/// but will likely be re-enabled at some point. +/// +/// Like all disabling components, this only disables the entity itself, +/// not its children or other entities that reference it. +/// To disable an entire tree of entities, use [`EntityCommands::insert_recursive`](crate::prelude::EntityCommands::insert_recursive). +/// +/// Every [`World`] has a default query filter that excludes entities with this component, +/// registered in the [`DefaultQueryFilters`] resource. +/// See [the module docs] for more info. +/// +/// [the module docs]: crate::entity_disabling +#[derive(Component, Clone, Debug, Default)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Component), + reflect(Debug, Clone, Default) +)] +// This component is registered as a disabling component during World::bootstrap +pub struct Disabled; + +/// Default query filters work by excluding entities with certain components from most queries. +/// +/// If a query does not explicitly mention a given disabling component, it will not include entities with that component. +/// To be more precise, this checks if the query's [`FilteredAccess`] contains the component, +/// and if it does not, adds a [`Without`](crate::prelude::Without) filter for that component to the query. +/// +/// This resource is initialized in the [`World`] whenever a new world is created, +/// with the [`Disabled`] component as a disabling component. +/// +/// Note that you can remove default query filters by overwriting the [`DefaultQueryFilters`] resource. +/// This can be useful as a last resort escape hatch, but is liable to break compatibility with other libraries. +/// +/// See the [module docs](crate::entity_disabling) for more info. +/// +/// +/// # Warning +/// +/// Default query filters are a global setting that affects all queries in the [`World`], +/// and incur a small performance cost for each query. +/// +/// They can cause significant interoperability issues within the ecosystem, +/// as users must be aware of each disabling component in use. +/// +/// Think carefully about whether you need to use a new disabling component, +/// and clearly communicate their presence in any libraries you publish. +#[derive(Resource, Debug)] +#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))] +pub struct DefaultQueryFilters { + // We only expect a few components per application to act as disabling components, so we use a SmallVec here + // to avoid heap allocation in most cases. + disabling: SmallVec<[ComponentId; 4]>, +} + +impl FromWorld for DefaultQueryFilters { + fn from_world(world: &mut World) -> Self { + let mut filters = DefaultQueryFilters::empty(); + let disabled_component_id = world.register_component::(); + filters.register_disabling_component(disabled_component_id); + filters + } +} + +impl DefaultQueryFilters { + /// Creates a new, completely empty [`DefaultQueryFilters`]. + /// + /// This is provided as an escape hatch; in most cases you should initialize this using [`FromWorld`], + /// which is automatically called when creating a new [`World`]. + #[must_use] + pub fn empty() -> Self { + DefaultQueryFilters { + disabling: SmallVec::new(), + } + } + + /// Adds this [`ComponentId`] to the set of [`DefaultQueryFilters`], + /// causing entities with this component to be excluded from queries. + /// + /// This method is idempotent, and will not add the same component multiple times. + /// + /// # Warning + /// + /// This method should only be called before the app starts, as it will not affect queries + /// initialized before it is called. + /// + /// As discussed in the [module docs](crate::entity_disabling), this can have performance implications, + /// as well as create interoperability issues, and should be used with caution. + pub fn register_disabling_component(&mut self, component_id: ComponentId) { + if !self.disabling.contains(&component_id) { + self.disabling.push(component_id); + } + } + + /// Get an iterator over all of the components which disable entities when present. + pub fn disabling_ids(&self) -> impl Iterator + use<'_> { + self.disabling.iter().copied() + } + + /// Modifies the provided [`FilteredAccess`] to include the filters from this [`DefaultQueryFilters`]. + pub(super) fn modify_access(&self, component_access: &mut FilteredAccess) { + for component_id in self.disabling_ids() { + if !component_access.contains(component_id) { + component_access.and_without(component_id); + } + } + } + + pub(super) fn is_dense(&self, components: &Components) -> bool { + self.disabling_ids().all(|component_id| { + components + .get_info(component_id) + .is_some_and(|info| info.storage_type() == StorageType::Table) + }) + } +} + +#[cfg(test)] +mod tests { + + use super::*; + use crate::{ + prelude::World, + query::{Has, With}, + }; + use alloc::{vec, vec::Vec}; + + #[test] + fn filters_modify_access() { + let mut filters = DefaultQueryFilters::empty(); + filters.register_disabling_component(ComponentId::new(1)); + + // A component access with an unrelated component + let mut component_access = FilteredAccess::::default(); + component_access + .access_mut() + .add_component_read(ComponentId::new(2)); + + let mut applied_access = component_access.clone(); + filters.modify_access(&mut applied_access); + assert_eq!(0, applied_access.with_filters().count()); + assert_eq!( + vec![ComponentId::new(1)], + applied_access.without_filters().collect::>() + ); + + // We add a with filter, now we expect to see both filters + component_access.and_with(ComponentId::new(4)); + + let mut applied_access = component_access.clone(); + filters.modify_access(&mut applied_access); + assert_eq!( + vec![ComponentId::new(4)], + applied_access.with_filters().collect::>() + ); + assert_eq!( + vec![ComponentId::new(1)], + applied_access.without_filters().collect::>() + ); + + let copy = component_access.clone(); + // We add a rule targeting a default component, that filter should no longer be added + component_access.and_with(ComponentId::new(1)); + + let mut applied_access = component_access.clone(); + filters.modify_access(&mut applied_access); + assert_eq!( + vec![ComponentId::new(1), ComponentId::new(4)], + applied_access.with_filters().collect::>() + ); + assert_eq!(0, applied_access.without_filters().count()); + + // Archetypal access should also filter rules + component_access = copy.clone(); + component_access + .access_mut() + .add_archetypal(ComponentId::new(1)); + + let mut applied_access = component_access.clone(); + filters.modify_access(&mut applied_access); + assert_eq!( + vec![ComponentId::new(4)], + applied_access.with_filters().collect::>() + ); + assert_eq!(0, applied_access.without_filters().count()); + } + + #[derive(Component)] + struct CustomDisabled; + + #[test] + fn multiple_disabling_components() { + let mut world = World::new(); + world.register_disabling_component::(); + + world.spawn_empty(); + world.spawn(Disabled); + world.spawn(CustomDisabled); + world.spawn((Disabled, CustomDisabled)); + + let mut query = world.query::<()>(); + assert_eq!(1, query.iter(&world).count()); + + let mut query = world.query_filtered::<(), With>(); + assert_eq!(1, query.iter(&world).count()); + + let mut query = world.query::>(); + assert_eq!(2, query.iter(&world).count()); + + let mut query = world.query_filtered::<(), With>(); + assert_eq!(1, query.iter(&world).count()); + + let mut query = world.query::>(); + assert_eq!(2, query.iter(&world).count()); + + let mut query = world.query_filtered::<(), (With, With)>(); + assert_eq!(1, query.iter(&world).count()); + + let mut query = world.query::<(Has, Has)>(); + assert_eq!(4, query.iter(&world).count()); + } +} diff --git a/crates/bevy_ecs/src/error/bevy_error.rs b/crates/bevy_ecs/src/error/bevy_error.rs new file mode 100644 index 0000000000000..0686e68f1db69 --- /dev/null +++ b/crates/bevy_ecs/src/error/bevy_error.rs @@ -0,0 +1,249 @@ +use alloc::boxed::Box; +use core::{ + error::Error, + fmt::{Debug, Display}, +}; + +/// The built in "universal" Bevy error type. This has a blanket [`From`] impl for any type that implements Rust's [`Error`], +/// meaning it can be used as a "catch all" error. +/// +/// # Backtraces +/// +/// When used with the `backtrace` Cargo feature, it will capture a backtrace when the error is constructed (generally in the [`From`] impl]). +/// When printed, the backtrace will be displayed. By default, the backtrace will be trimmed down to filter out noise. To see the full backtrace, +/// set the `BEVY_BACKTRACE=full` environment variable. +/// +/// # Usage +/// +/// ``` +/// # use bevy_ecs::prelude::*; +/// +/// fn fallible_system() -> Result<(), BevyError> { +/// // This will result in Rust's built-in ParseIntError, which will automatically +/// // be converted into a BevyError. +/// let parsed: usize = "I am not a number".parse()?; +/// Ok(()) +/// } +/// ``` +pub struct BevyError { + inner: Box, +} + +impl BevyError { + /// Attempts to downcast the internal error to the given type. + pub fn downcast_ref(&self) -> Option<&E> { + self.inner.error.downcast_ref::() + } + + fn format_backtrace(&self, _f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + #[cfg(feature = "backtrace")] + { + let f = _f; + let backtrace = &self.inner.backtrace; + if let std::backtrace::BacktraceStatus::Captured = backtrace.status() { + let full_backtrace = std::env::var("BEVY_BACKTRACE").is_ok_and(|val| val == "full"); + + let backtrace_str = alloc::string::ToString::to_string(backtrace); + let mut skip_next_location_line = false; + for line in backtrace_str.split('\n') { + if !full_backtrace { + if skip_next_location_line { + if line.starts_with(" at") { + continue; + } + skip_next_location_line = false; + } + if line.contains("std::backtrace_rs::backtrace::") { + skip_next_location_line = true; + continue; + } + if line.contains("std::backtrace::Backtrace::") { + skip_next_location_line = true; + continue; + } + if line.contains(">::from") { + skip_next_location_line = true; + continue; + } + if line.contains(" as core::ops::try_trait::FromResidual>>::from_residual") { + skip_next_location_line = true; + continue; + } + if line.contains("__rust_begin_short_backtrace") { + break; + } + if line.contains("bevy_ecs::observer::Observers::invoke::{{closure}}") { + break; + } + } + writeln!(f, "{}", line)?; + } + if !full_backtrace { + if std::thread::panicking() { + SKIP_NORMAL_BACKTRACE.set(true); + } + writeln!(f, "{FILTER_MESSAGE}")?; + } + } + } + Ok(()) + } +} + +/// This type exists (rather than having a `BevyError(Box, + #[cfg(feature = "backtrace")] + backtrace: std::backtrace::Backtrace, +} + +// NOTE: writing the impl this way gives us From<&str> ... nice! +impl From for BevyError +where + Box: From, +{ + #[cold] + fn from(error: E) -> Self { + BevyError { + inner: Box::new(InnerBevyError { + error: error.into(), + #[cfg(feature = "backtrace")] + backtrace: std::backtrace::Backtrace::capture(), + }), + } + } +} + +impl Display for BevyError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + writeln!(f, "{}", self.inner.error)?; + self.format_backtrace(f)?; + Ok(()) + } +} + +impl Debug for BevyError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + writeln!(f, "{:?}", self.inner.error)?; + self.format_backtrace(f)?; + Ok(()) + } +} + +#[cfg(feature = "backtrace")] +const FILTER_MESSAGE: &str = "note: Some \"noisy\" backtrace lines have been filtered out. Run with `BEVY_BACKTRACE=full` for a verbose backtrace."; + +#[cfg(feature = "backtrace")] +std::thread_local! { + static SKIP_NORMAL_BACKTRACE: core::cell::Cell = + const { core::cell::Cell::new(false) }; +} + +/// When called, this will skip the currently configured panic hook when a [`BevyError`] backtrace has already been printed. +#[cfg(feature = "backtrace")] +#[expect(clippy::print_stdout, reason = "Allowed behind `std` feature gate.")] +pub fn bevy_error_panic_hook( + current_hook: impl Fn(&std::panic::PanicHookInfo), +) -> impl Fn(&std::panic::PanicHookInfo) { + move |info| { + if SKIP_NORMAL_BACKTRACE.replace(false) { + if let Some(payload) = info.payload().downcast_ref::<&str>() { + std::println!("{payload}"); + } else if let Some(payload) = info.payload().downcast_ref::() { + std::println!("{payload}"); + } + return; + } + + current_hook(info); + } +} + +#[cfg(test)] +mod tests { + + #[test] + #[cfg(not(miri))] // miri backtraces are weird + #[cfg(not(windows))] // the windows backtrace in this context is ... unhelpful and not worth testing + fn filtered_backtrace_test() { + fn i_fail() -> crate::error::Result { + let _: usize = "I am not a number".parse()?; + Ok(()) + } + + // SAFETY: this is not safe ... this test could run in parallel with another test + // that writes the environment variable. We either accept that so we can write this test, + // or we don't. + + unsafe { std::env::set_var("RUST_BACKTRACE", "1") }; + + let error = i_fail().err().unwrap(); + let debug_message = alloc::format!("{error:?}"); + let mut lines = debug_message.lines().peekable(); + assert_eq!( + "ParseIntError { kind: InvalidDigit }", + lines.next().unwrap() + ); + + // On mac backtraces can start with Backtrace::create + let mut skip = false; + if let Some(line) = lines.peek() { + if &line[6..] == "std::backtrace::Backtrace::create" { + skip = true; + } + } + + if skip { + lines.next().unwrap(); + } + + let expected_lines = alloc::vec![ + "bevy_ecs::error::bevy_error::tests::filtered_backtrace_test::i_fail", + "bevy_ecs::error::bevy_error::tests::filtered_backtrace_test", + "bevy_ecs::error::bevy_error::tests::filtered_backtrace_test::{{closure}}", + "core::ops::function::FnOnce::call_once", + ]; + + for expected in expected_lines { + let line = lines.next().unwrap(); + assert_eq!(&line[6..], expected); + let mut skip = false; + if let Some(line) = lines.peek() { + if line.starts_with(" at") { + skip = true; + } + } + + if skip { + lines.next().unwrap(); + } + } + + // on linux there is a second call_once + let mut skip = false; + if let Some(line) = lines.peek() { + if &line[6..] == "core::ops::function::FnOnce::call_once" { + skip = true; + } + } + if skip { + lines.next().unwrap(); + } + let mut skip = false; + if let Some(line) = lines.peek() { + if line.starts_with(" at") { + skip = true; + } + } + + if skip { + lines.next().unwrap(); + } + assert_eq!(super::FILTER_MESSAGE, lines.next().unwrap()); + assert!(lines.next().is_none()); + } +} diff --git a/crates/bevy_ecs/src/error/command_handling.rs b/crates/bevy_ecs/src/error/command_handling.rs new file mode 100644 index 0000000000000..d85ad4a87e551 --- /dev/null +++ b/crates/bevy_ecs/src/error/command_handling.rs @@ -0,0 +1,120 @@ +use core::{any::type_name, fmt}; + +use crate::{ + entity::Entity, + never::Never, + system::{entity_command::EntityCommandError, Command, EntityCommand}, + world::{error::EntityMutableFetchError, World}, +}; + +use super::{default_error_handler, BevyError, ErrorContext}; + +/// Takes a [`Command`] that returns a Result and uses a given error handler function to convert it into +/// a [`Command`] that internally handles an error if it occurs and returns `()`. +pub trait HandleError { + /// Takes a [`Command`] that returns a Result and uses a given error handler function to convert it into + /// a [`Command`] that internally handles an error if it occurs and returns `()`. + fn handle_error_with(self, error_handler: fn(BevyError, ErrorContext)) -> impl Command; + /// Takes a [`Command`] that returns a Result and uses the default error handler function to convert it into + /// a [`Command`] that internally handles an error if it occurs and returns `()`. + fn handle_error(self) -> impl Command + where + Self: Sized, + { + self.handle_error_with(default_error_handler()) + } +} + +impl HandleError> for C +where + C: Command>, + E: Into, +{ + fn handle_error_with(self, error_handler: fn(BevyError, ErrorContext)) -> impl Command { + move |world: &mut World| match self.apply(world) { + Ok(_) => {} + Err(err) => (error_handler)( + err.into(), + ErrorContext::Command { + name: type_name::().into(), + }, + ), + } + } +} + +impl HandleError for C +where + C: Command, +{ + fn handle_error_with(self, _error_handler: fn(BevyError, ErrorContext)) -> impl Command { + move |world: &mut World| { + self.apply(world); + } + } +} + +impl HandleError for C +where + C: Command, +{ + #[inline] + fn handle_error_with(self, _error_handler: fn(BevyError, ErrorContext)) -> impl Command { + self + } + #[inline] + fn handle_error(self) -> impl Command + where + Self: Sized, + { + self + } +} + +/// Passes in a specific entity to an [`EntityCommand`], resulting in a [`Command`] that +/// internally runs the [`EntityCommand`] on that entity. +/// +// NOTE: This is a separate trait from `EntityCommand` because "result-returning entity commands" and +// "non-result returning entity commands" require different implementations, so they cannot be automatically +// implemented. And this isn't the type of implementation that we want to thrust on people implementing +// EntityCommand. +pub trait CommandWithEntity { + /// Passes in a specific entity to an [`EntityCommand`], resulting in a [`Command`] that + /// internally runs the [`EntityCommand`] on that entity. + fn with_entity(self, entity: Entity) -> impl Command + HandleError; +} + +impl CommandWithEntity> for C +where + C: EntityCommand, +{ + fn with_entity( + self, + entity: Entity, + ) -> impl Command> + + HandleError> { + move |world: &mut World| -> Result<(), EntityMutableFetchError> { + let entity = world.get_entity_mut(entity)?; + self.apply(entity); + Ok(()) + } + } +} + +impl CommandWithEntity>> for C +where + C: EntityCommand>, + Err: fmt::Debug + fmt::Display + Send + Sync + 'static, +{ + fn with_entity( + self, + entity: Entity, + ) -> impl Command>> + HandleError>> + { + move |world: &mut World| { + let entity = world.get_entity_mut(entity)?; + self.apply(entity) + .map_err(EntityCommandError::CommandFailed) + } + } +} diff --git a/crates/bevy_ecs/src/error/handler.rs b/crates/bevy_ecs/src/error/handler.rs new file mode 100644 index 0000000000000..688b599473ab9 --- /dev/null +++ b/crates/bevy_ecs/src/error/handler.rs @@ -0,0 +1,183 @@ +#[cfg(feature = "configurable_error_handler")] +use bevy_platform::sync::OnceLock; +use core::fmt::Display; + +use crate::{component::Tick, error::BevyError}; +use alloc::borrow::Cow; + +/// Context for a [`BevyError`] to aid in debugging. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum ErrorContext { + /// The error occurred in a system. + System { + /// The name of the system that failed. + name: Cow<'static, str>, + /// The last tick that the system was run. + last_run: Tick, + }, + /// The error occurred in a run condition. + RunCondition { + /// The name of the run condition that failed. + name: Cow<'static, str>, + /// The last tick that the run condition was evaluated. + last_run: Tick, + }, + /// The error occurred in a command. + Command { + /// The name of the command that failed. + name: Cow<'static, str>, + }, + /// The error occurred in an observer. + Observer { + /// The name of the observer that failed. + name: Cow<'static, str>, + /// The last tick that the observer was run. + last_run: Tick, + }, +} + +impl Display for ErrorContext { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + Self::System { name, .. } => { + write!(f, "System `{}` failed", name) + } + Self::Command { name } => write!(f, "Command `{}` failed", name), + Self::Observer { name, .. } => { + write!(f, "Observer `{}` failed", name) + } + Self::RunCondition { name, .. } => { + write!(f, "Run condition `{}` failed", name) + } + } + } +} + +impl ErrorContext { + /// The name of the ECS construct that failed. + pub fn name(&self) -> &str { + match self { + Self::System { name, .. } + | Self::Command { name, .. } + | Self::Observer { name, .. } + | Self::RunCondition { name, .. } => name, + } + } + + /// A string representation of the kind of ECS construct that failed. + /// + /// This is a simpler helper used for logging. + pub fn kind(&self) -> &str { + match self { + Self::System { .. } => "system", + Self::Command { .. } => "command", + Self::Observer { .. } => "observer", + Self::RunCondition { .. } => "run condition", + } + } +} + +/// A global error handler. This can be set at startup, as long as it is set before +/// any uses. This should generally be configured _before_ initializing the app. +/// +/// This should be set inside of your `main` function, before initializing the Bevy app. +/// The value of this error handler can be accessed using the [`default_error_handler`] function, +/// which calls [`OnceLock::get_or_init`] to get the value. +/// +/// **Note:** this is only available when the `configurable_error_handler` feature of `bevy_ecs` (or `bevy`) is enabled! +/// +/// # Example +/// +/// ``` +/// # use bevy_ecs::error::{GLOBAL_ERROR_HANDLER, warn}; +/// GLOBAL_ERROR_HANDLER.set(warn).expect("The error handler can only be set once, globally."); +/// // initialize Bevy App here +/// ``` +/// +/// To use this error handler in your app for custom error handling logic: +/// +/// ```rust +/// use bevy_ecs::error::{default_error_handler, GLOBAL_ERROR_HANDLER, BevyError, ErrorContext, panic}; +/// +/// fn handle_errors(error: BevyError, ctx: ErrorContext) { +/// let error_handler = default_error_handler(); +/// error_handler(error, ctx); +/// } +/// ``` +/// +/// # Warning +/// +/// As this can *never* be overwritten, library code should never set this value. +#[cfg(feature = "configurable_error_handler")] +pub static GLOBAL_ERROR_HANDLER: OnceLock = OnceLock::new(); + +/// The default error handler. This defaults to [`panic()`], +/// but if set, the [`GLOBAL_ERROR_HANDLER`] will be used instead, enabling error handler customization. +/// The `configurable_error_handler` feature must be enabled to change this from the panicking default behavior, +/// as there may be runtime overhead. +#[inline] +pub fn default_error_handler() -> fn(BevyError, ErrorContext) { + #[cfg(not(feature = "configurable_error_handler"))] + return panic; + + #[cfg(feature = "configurable_error_handler")] + return *GLOBAL_ERROR_HANDLER.get_or_init(|| panic); +} + +macro_rules! inner { + ($call:path, $e:ident, $c:ident) => { + $call!( + "Encountered an error in {} `{}`: {}", + $c.kind(), + $c.name(), + $e + ); + }; +} + +/// Error handler that panics with the system error. +#[track_caller] +#[inline] +pub fn panic(error: BevyError, ctx: ErrorContext) { + inner!(panic, error, ctx); +} + +/// Error handler that logs the system error at the `error` level. +#[track_caller] +#[inline] +pub fn error(error: BevyError, ctx: ErrorContext) { + inner!(log::error, error, ctx); +} + +/// Error handler that logs the system error at the `warn` level. +#[track_caller] +#[inline] +pub fn warn(error: BevyError, ctx: ErrorContext) { + inner!(log::warn, error, ctx); +} + +/// Error handler that logs the system error at the `info` level. +#[track_caller] +#[inline] +pub fn info(error: BevyError, ctx: ErrorContext) { + inner!(log::info, error, ctx); +} + +/// Error handler that logs the system error at the `debug` level. +#[track_caller] +#[inline] +pub fn debug(error: BevyError, ctx: ErrorContext) { + inner!(log::debug, error, ctx); +} + +/// Error handler that logs the system error at the `trace` level. +#[track_caller] +#[inline] +pub fn trace(error: BevyError, ctx: ErrorContext) { + inner!(log::trace, error, ctx); +} + +/// Error handler that ignores the system error. +#[track_caller] +#[inline] +pub fn ignore(_: BevyError, _: ErrorContext) {} diff --git a/crates/bevy_ecs/src/error/mod.rs b/crates/bevy_ecs/src/error/mod.rs new file mode 100644 index 0000000000000..950deee3ecf97 --- /dev/null +++ b/crates/bevy_ecs/src/error/mod.rs @@ -0,0 +1,81 @@ +//! Error handling for Bevy systems, commands, and observers. +//! +//! When a system is added to a [`Schedule`], and its return type is that of [`Result`], then Bevy +//! considers those systems to be "fallible", and the ECS scheduler will special-case the [`Err`] +//! variant of the returned `Result`. +//! +//! All [`BevyError`]s returned by a system, observer or command are handled by an "error handler". By default, the +//! [`panic`] error handler function is used, resulting in a panic with the error message attached. +//! +//! You can change the default behavior by registering a custom error handler. +//! Modify the [`GLOBAL_ERROR_HANDLER`] value to set a custom error handler function for your entire app. +//! In practice, this is generally feature-flagged: panicking or loudly logging errors in development, +//! and quietly logging or ignoring them in production to avoid crashing the app. +//! +//! Bevy provides a number of pre-built error-handlers for you to use: +//! +//! - [`panic`] – panics with the system error +//! - [`error`] – logs the system error at the `error` level +//! - [`warn`] – logs the system error at the `warn` level +//! - [`info`] – logs the system error at the `info` level +//! - [`debug`] – logs the system error at the `debug` level +//! - [`trace`] – logs the system error at the `trace` level +//! - [`ignore`] – ignores the system error +//! +//! However, you can use any custom error handler logic by providing your own function (or +//! non-capturing closure that coerces to the function signature) as long as it matches the +//! signature: +//! +//! ```rust,ignore +//! fn(BevyError, ErrorContext) +//! ``` +//! +//! The [`ErrorContext`] allows you to access additional details relevant to providing +//! context surrounding the error – such as the system's [`name`] – in your error messages. +//! +//! Remember to turn on the `configurable_error_handler` feature to set a global error handler! +//! +//! ```rust, ignore +//! use bevy_ecs::error::{GLOBAL_ERROR_HANDLER, BevyError, ErrorContext}; +//! use log::trace; +//! +//! fn my_error_handler(error: BevyError, ctx: ErrorContext) { +//! if ctx.name().ends_with("plz_ignore") { +//! trace!("Nothing to see here, move along."); +//! return; +//! } +//! bevy_ecs::error::error(error, ctx); +//! } +//! +//! fn main() { +//! // This requires the "configurable_error_handler" feature to be enabled to be in scope. +//! GLOBAL_ERROR_HANDLER.set(my_error_handler).expect("The error handler can only be set once."); +//! +//! // Initialize your Bevy App here +//! } +//! ``` +//! +//! If you need special handling of individual fallible systems, you can use Bevy's [`system piping +//! feature`] to capture the [`Result`] output of the system and handle it accordingly. +//! +//! When working with commands, you can handle the result of each command separately using the [`HandleError::handle_error_with`] method. +//! +//! [`Schedule`]: crate::schedule::Schedule +//! [`panic`]: panic() +//! [`World`]: crate::world::World +//! [`System`]: crate::system::System +//! [`name`]: crate::system::System::name +//! [`system piping feature`]: crate::system::In + +mod bevy_error; +mod command_handling; +mod handler; + +pub use bevy_error::*; +pub use command_handling::*; +pub use handler::*; + +/// A result type for use in fallible systems, commands and observers. +/// +/// The [`BevyError`] type is a type-erased error type with optional Bevy-specific diagnostics. +pub type Result = core::result::Result; diff --git a/crates/bevy_ecs/src/event/base.rs b/crates/bevy_ecs/src/event/base.rs index 3106009d6be3b..d525ba2e57695 100644 --- a/crates/bevy_ecs/src/event/base.rs +++ b/crates/bevy_ecs/src/event/base.rs @@ -1,8 +1,9 @@ +use crate::change_detection::MaybeLocation; +use crate::component::ComponentId; +use crate::world::World; use crate::{component::Component, traversal::Traversal}; #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; -#[cfg(feature = "track_change_detection")] -use core::panic::Location; use core::{ cmp::Ordering, fmt, @@ -17,13 +18,21 @@ use core::{ /// /// Events can also be "triggered" on a [`World`], which will then cause any [`Observer`] of that trigger to run. /// +/// Events must be thread-safe. +/// +/// ## Derive /// This trait can be derived. +/// Adding `auto_propagate` sets [`Self::AUTO_PROPAGATE`] to true. +/// Adding `traversal = "X"` sets [`Self::Traversal`] to be of type "X". /// -/// Events implement the [`Component`] type (and they automatically do when they are derived). Events are (generally) -/// not directly inserted as components. More often, the [`ComponentId`] is used to identify the event type within the -/// context of the ECS. +/// ``` +/// use bevy_ecs::prelude::*; +/// +/// #[derive(Event)] +/// #[event(auto_propagate)] +/// struct MyEvent; +/// ``` /// -/// Events must be thread-safe. /// /// [`World`]: crate::world::World /// [`ComponentId`]: crate::component::ComponentId @@ -36,7 +45,7 @@ use core::{ label = "invalid `Event`", note = "consider annotating `{Self}` with `#[derive(Event)]`" )] -pub trait Event: Component { +pub trait Event: Send + Sync + 'static { /// The component that describes which Entity to propagate this event to next, when [propagation] is enabled. /// /// [propagation]: crate::observer::Trigger::propagate @@ -48,23 +57,71 @@ pub trait Event: Component { /// [triggered]: crate::system::Commands::trigger_targets /// [`Trigger::propagate`]: crate::observer::Trigger::propagate const AUTO_PROPAGATE: bool = false; + + /// Generates the [`ComponentId`] for this event type. + /// + /// If this type has already been registered, + /// this will return the existing [`ComponentId`]. + /// + /// This is used by various dynamically typed observer APIs, + /// such as [`World::trigger_targets_dynamic`]. + /// + /// # Warning + /// + /// This method should not be overridden by implementors, + /// and should always correspond to the implementation of [`component_id`](Event::component_id). + fn register_component_id(world: &mut World) -> ComponentId { + world.register_component::>() + } + + /// Fetches the [`ComponentId`] for this event type, + /// if it has already been generated. + /// + /// This is used by various dynamically typed observer APIs, + /// such as [`World::trigger_targets_dynamic`]. + /// + /// # Warning + /// + /// This method should not be overridden by implementors, + /// and should always correspond to the implementation of [`register_component_id`](Event::register_component_id). + fn component_id(world: &World) -> Option { + world.component_id::>() + } } +/// An internal type that implements [`Component`] for a given [`Event`] type. +/// +/// This exists so we can easily get access to a unique [`ComponentId`] for each [`Event`] type, +/// without requiring that [`Event`] types implement [`Component`] directly. +/// [`ComponentId`] is used internally as a unique identifier for events because they are: +/// +/// - Unique to each event type. +/// - Can be quickly generated and looked up. +/// - Are compatible with dynamic event types, which aren't backed by a Rust type. +/// +/// This type is an implementation detail and should never be made public. +// TODO: refactor events to store their metadata on distinct entities, rather than using `ComponentId` +#[derive(Component)] +struct EventWrapperComponent(PhantomData); + /// An `EventId` uniquely identifies an event stored in a specific [`World`]. /// /// An `EventId` can among other things be used to trace the flow of an event from the point it was /// sent to the point it was processed. `EventId`s increase monotonically by send order. /// /// [`World`]: crate::world::World -#[cfg_attr(feature = "bevy_reflect", derive(Reflect))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, Debug, PartialEq, Hash) +)] pub struct EventId { /// Uniquely identifies the event associated with this ID. // This value corresponds to the order in which each event was added to the world. pub id: usize, /// The source code location that triggered this event. - #[cfg(feature = "track_change_detection")] - pub caller: &'static Location<'static>, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + pub caller: MaybeLocation, + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(super) _marker: PhantomData, } diff --git a/crates/bevy_ecs/src/event/collections.rs b/crates/bevy_ecs/src/event/collections.rs index e5c3e43452d56..66447b7de4268 100644 --- a/crates/bevy_ecs/src/event/collections.rs +++ b/crates/bevy_ecs/src/event/collections.rs @@ -1,11 +1,9 @@ -use crate as bevy_ecs; use alloc::vec::Vec; use bevy_ecs::{ + change_detection::MaybeLocation, event::{Event, EventCursor, EventId, EventInstance}, - system::Resource, + resource::Resource, }; -#[cfg(feature = "track_change_detection")] -use core::panic::Location; use core::{ marker::PhantomData, ops::{Deref, DerefMut}, @@ -75,7 +73,7 @@ use { /// - [`EventReader`]s that read at least once per update will never drop events. /// - [`EventReader`]s that read once within two updates might still receive some events /// - [`EventReader`]s that read after two updates are guaranteed to drop all events that occurred -/// before those updates. +/// before those updates. /// /// The buffers in [`Events`] will grow indefinitely if [`update`](Events::update) is never called. /// @@ -124,21 +122,12 @@ impl Events { /// This method returns the [ID](`EventId`) of the sent `event`. #[track_caller] pub fn send(&mut self, event: E) -> EventId { - self.send_with_caller( - event, - #[cfg(feature = "track_change_detection")] - Location::caller(), - ) + self.send_with_caller(event, MaybeLocation::caller()) } - pub(crate) fn send_with_caller( - &mut self, - event: E, - #[cfg(feature = "track_change_detection")] caller: &'static Location<'static>, - ) -> EventId { + pub(crate) fn send_with_caller(&mut self, event: E, caller: MaybeLocation) -> EventId { let event_id = EventId { id: self.event_count, - #[cfg(feature = "track_change_detection")] caller, _marker: PhantomData, }; @@ -308,8 +297,7 @@ impl Extend for Events { let events = iter.into_iter().map(|event| { let event_id = EventId { id: event_count, - #[cfg(feature = "track_change_detection")] - caller: Location::caller(), + caller: MaybeLocation::caller(), _marker: PhantomData, }; event_count += 1; @@ -332,7 +320,7 @@ impl Extend for Events { } #[derive(Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Default))] pub(crate) struct EventSequence { pub(crate) events: Vec>, pub(crate) start_event_count: usize, @@ -379,8 +367,7 @@ impl Iterator for SendBatchIds { let result = Some(EventId { id: self.last_count, - #[cfg(feature = "track_change_detection")] - caller: Location::caller(), + caller: MaybeLocation::caller(), _marker: PhantomData, }); @@ -398,7 +385,7 @@ impl ExactSizeIterator for SendBatchIds { #[cfg(test)] mod tests { - use crate::{self as bevy_ecs, event::Events}; + use crate::event::Events; use bevy_ecs_macros::Event; #[test] diff --git a/crates/bevy_ecs/src/event/event_cursor.rs b/crates/bevy_ecs/src/event/event_cursor.rs index ca1be152e5caa..ff15ef4931581 100644 --- a/crates/bevy_ecs/src/event/event_cursor.rs +++ b/crates/bevy_ecs/src/event/event_cursor.rs @@ -1,4 +1,3 @@ -use crate as bevy_ecs; use bevy_ecs::event::{ Event, EventIterator, EventIteratorWithId, EventMutIterator, EventMutIteratorWithId, Events, }; @@ -74,7 +73,6 @@ impl Clone for EventCursor { } } -#[allow(clippy::len_without_is_empty)] // Check fails since the is_empty implementation has a signature other than `(&self) -> bool` impl EventCursor { /// See [`EventReader::read`](super::EventReader::read) pub fn read<'a>(&'a mut self, events: &'a Events) -> EventIterator<'a, E> { diff --git a/crates/bevy_ecs/src/event/iterators.rs b/crates/bevy_ecs/src/event/iterators.rs index 956072715c74a..f9ee74b8b08d9 100644 --- a/crates/bevy_ecs/src/event/iterators.rs +++ b/crates/bevy_ecs/src/event/iterators.rs @@ -1,4 +1,3 @@ -use crate as bevy_ecs; #[cfg(feature = "multi_threaded")] use bevy_ecs::batching::BatchingStrategy; use bevy_ecs::event::{Event, EventCursor, EventId, EventInstance, Events}; @@ -145,6 +144,7 @@ pub struct EventParIter<'a, E: Event> { reader: &'a mut EventCursor, slices: [&'a [EventInstance]; 2], batching_strategy: BatchingStrategy, + #[cfg(not(target_arch = "wasm32"))] unread: usize, } @@ -170,6 +170,7 @@ impl<'a, E: Event> EventParIter<'a, E> { reader, slices: [a, b], batching_strategy: BatchingStrategy::default(), + #[cfg(not(target_arch = "wasm32"))] unread: unread_count, } } @@ -206,6 +207,10 @@ impl<'a, E: Event> EventParIter<'a, E> { /// initialized and run from the ECS scheduler, this should never panic. /// /// [`ComputeTaskPool`]: bevy_tasks::ComputeTaskPool + #[cfg_attr( + target_arch = "wasm32", + expect(unused_mut, reason = "not mutated on this target") + )] pub fn for_each_with_id) + Send + Sync + Clone>(mut self, func: FN) { #[cfg(target_arch = "wasm32")] { diff --git a/crates/bevy_ecs/src/event/mod.rs b/crates/bevy_ecs/src/event/mod.rs index f7f46af258a3c..9c19dc16895e7 100644 --- a/crates/bevy_ecs/src/event/mod.rs +++ b/crates/bevy_ecs/src/event/mod.rs @@ -7,7 +7,6 @@ mod mut_iterators; mod mutator; mod reader; mod registry; -mod send_event; mod update; mod writer; @@ -25,7 +24,6 @@ pub use mut_iterators::{EventMutIterator, EventMutIteratorWithId}; pub use mutator::EventMutator; pub use reader::EventReader; pub use registry::{EventRegistry, ShouldUpdateEvents}; -pub use send_event::SendEvent; pub use update::{ event_update_condition, event_update_system, signal_event_update_system, EventUpdates, }; @@ -33,7 +31,7 @@ pub use writer::EventWriter; #[cfg(test)] mod tests { - use crate as bevy_ecs; + use alloc::{vec, vec::Vec}; use bevy_ecs::{event::*, system::assert_is_read_only_system}; use bevy_ecs_macros::Event; @@ -569,7 +567,6 @@ mod tests { assert!(last.is_none(), "EventMutator should be empty"); } - #[allow(clippy::iter_nth_zero)] #[test] fn test_event_reader_iter_nth() { use bevy_ecs::prelude::*; @@ -596,7 +593,6 @@ mod tests { schedule.run(&mut world); } - #[allow(clippy::iter_nth_zero)] #[test] fn test_event_mutator_iter_nth() { use bevy_ecs::prelude::*; diff --git a/crates/bevy_ecs/src/event/mut_iterators.rs b/crates/bevy_ecs/src/event/mut_iterators.rs index f8f32236ea8e6..3cb531ce78829 100644 --- a/crates/bevy_ecs/src/event/mut_iterators.rs +++ b/crates/bevy_ecs/src/event/mut_iterators.rs @@ -1,4 +1,3 @@ -use crate as bevy_ecs; #[cfg(feature = "multi_threaded")] use bevy_ecs::batching::BatchingStrategy; use bevy_ecs::event::{Event, EventCursor, EventId, EventInstance, Events}; @@ -148,6 +147,7 @@ pub struct EventMutParIter<'a, E: Event> { mutator: &'a mut EventCursor, slices: [&'a mut [EventInstance]; 2], batching_strategy: BatchingStrategy, + #[cfg(not(target_arch = "wasm32"))] unread: usize, } @@ -171,6 +171,7 @@ impl<'a, E: Event> EventMutParIter<'a, E> { mutator, slices: [a, b], batching_strategy: BatchingStrategy::default(), + #[cfg(not(target_arch = "wasm32"))] unread: unread_count, } } @@ -207,6 +208,10 @@ impl<'a, E: Event> EventMutParIter<'a, E> { /// initialized and run from the ECS scheduler, this should never panic. /// /// [`ComputeTaskPool`]: bevy_tasks::ComputeTaskPool + #[cfg_attr( + target_arch = "wasm32", + expect(unused_mut, reason = "not mutated on this target") + )] pub fn for_each_with_id) + Send + Sync + Clone>( mut self, func: FN, diff --git a/crates/bevy_ecs/src/event/mutator.rs b/crates/bevy_ecs/src/event/mutator.rs index ee77f9961a3b4..e95037af5ba6d 100644 --- a/crates/bevy_ecs/src/event/mutator.rs +++ b/crates/bevy_ecs/src/event/mutator.rs @@ -1,4 +1,3 @@ -use crate as bevy_ecs; #[cfg(feature = "multi_threaded")] use bevy_ecs::event::EventMutParIter; use bevy_ecs::{ @@ -45,6 +44,7 @@ use bevy_ecs::{ #[derive(SystemParam, Debug)] pub struct EventMutator<'w, 's, E: Event> { pub(super) reader: Local<'s, EventCursor>, + #[system_param(validation_message = "Event not initialized")] events: ResMut<'w, Events>, } diff --git a/crates/bevy_ecs/src/event/reader.rs b/crates/bevy_ecs/src/event/reader.rs index 1611fa6ba5ffc..995e2ca9e9043 100644 --- a/crates/bevy_ecs/src/event/reader.rs +++ b/crates/bevy_ecs/src/event/reader.rs @@ -1,4 +1,3 @@ -use crate as bevy_ecs; #[cfg(feature = "multi_threaded")] use bevy_ecs::event::EventParIter; use bevy_ecs::{ @@ -17,6 +16,7 @@ use bevy_ecs::{ #[derive(SystemParam, Debug)] pub struct EventReader<'w, 's, E: Event> { pub(super) reader: Local<'s, EventCursor>, + #[system_param(validation_message = "Event not initialized")] events: Res<'w, Events>, } diff --git a/crates/bevy_ecs/src/event/registry.rs b/crates/bevy_ecs/src/event/registry.rs index 3f92c1134a4cf..231f792f68392 100644 --- a/crates/bevy_ecs/src/event/registry.rs +++ b/crates/bevy_ecs/src/event/registry.rs @@ -1,10 +1,9 @@ -use crate as bevy_ecs; use alloc::vec::Vec; use bevy_ecs::{ change_detection::{DetectChangesMut, MutUntyped}, component::{ComponentId, Tick}, event::{Event, Events}, - system::Resource, + resource::Resource, world::World, }; diff --git a/crates/bevy_ecs/src/event/send_event.rs b/crates/bevy_ecs/src/event/send_event.rs deleted file mode 100644 index 0d5f61cadcc4f..0000000000000 --- a/crates/bevy_ecs/src/event/send_event.rs +++ /dev/null @@ -1,37 +0,0 @@ -#[cfg(feature = "track_change_detection")] -use core::panic::Location; - -use super::{Event, Events}; -use crate::world::{Command, World}; - -/// A command to send an arbitrary [`Event`], used by [`Commands::send_event`](crate::system::Commands::send_event). -pub struct SendEvent { - /// The event to send. - pub event: E, - /// The source code location that triggered this command. - #[cfg(feature = "track_change_detection")] - pub caller: &'static Location<'static>, -} - -// This does not use `From`, as the resulting `Into` is not track_caller -impl SendEvent { - /// Constructs a new `SendEvent` tracking the caller. - pub fn new(event: E) -> Self { - Self { - event, - #[cfg(feature = "track_change_detection")] - caller: Location::caller(), - } - } -} - -impl Command for SendEvent { - fn apply(self, world: &mut World) { - let mut events = world.resource_mut::>(); - events.send_with_caller( - self.event, - #[cfg(feature = "track_change_detection")] - self.caller, - ); - } -} diff --git a/crates/bevy_ecs/src/event/update.rs b/crates/bevy_ecs/src/event/update.rs index bf3c07de4d3c8..c7b43aef00689 100644 --- a/crates/bevy_ecs/src/event/update.rs +++ b/crates/bevy_ecs/src/event/update.rs @@ -1,4 +1,3 @@ -use crate as bevy_ecs; use bevy_ecs::{ change_detection::Mut, component::Tick, diff --git a/crates/bevy_ecs/src/event/writer.rs b/crates/bevy_ecs/src/event/writer.rs index f391e7c3449da..a1c42f8b60aaf 100644 --- a/crates/bevy_ecs/src/event/writer.rs +++ b/crates/bevy_ecs/src/event/writer.rs @@ -1,4 +1,3 @@ -use crate as bevy_ecs; use bevy_ecs::{ event::{Event, EventId, Events, SendBatchIds}, system::{ResMut, SystemParam}, @@ -15,14 +14,14 @@ use bevy_ecs::{ /// #[derive(Event)] /// pub struct MyEvent; // Custom event type. /// fn my_system(mut writer: EventWriter) { -/// writer.send(MyEvent); +/// writer.write(MyEvent); /// } /// /// # bevy_ecs::system::assert_is_system(my_system); /// ``` /// # Observers /// -/// "Buffered" Events, such as those sent directly in [`Events`] or sent using [`EventWriter`], do _not_ automatically +/// "Buffered" Events, such as those sent directly in [`Events`] or written using [`EventWriter`], do _not_ automatically /// trigger any [`Observer`]s watching for that event, as each [`Event`] has different requirements regarding _if_ it will /// be triggered, and if so, _when_ it will be triggered in the schedule. /// @@ -33,7 +32,7 @@ use bevy_ecs::{ /// /// # Untyped events /// -/// `EventWriter` can only send events of one specific type, which must be known at compile-time. +/// `EventWriter` can only write events of one specific type, which must be known at compile-time. /// This is not a problem most of the time, but you may find a situation where you cannot know /// ahead of time every kind of event you'll need to send. In this case, you can use the "type-erased event" pattern. /// @@ -61,17 +60,53 @@ use bevy_ecs::{ /// [`Observer`]: crate::observer::Observer #[derive(SystemParam)] pub struct EventWriter<'w, E: Event> { + #[system_param(validation_message = "Event not initialized")] events: ResMut<'w, Events>, } impl<'w, E: Event> EventWriter<'w, E> { + /// Writes an `event`, which can later be read by [`EventReader`](super::EventReader)s. + /// This method returns the [ID](`EventId`) of the written `event`. + /// + /// See [`Events`] for details. + #[doc(alias = "send")] + #[track_caller] + pub fn write(&mut self, event: E) -> EventId { + self.events.send(event) + } + + /// Sends a list of `events` all at once, which can later be read by [`EventReader`](super::EventReader)s. + /// This is more efficient than sending each event individually. + /// This method returns the [IDs](`EventId`) of the written `events`. + /// + /// See [`Events`] for details. + #[doc(alias = "send_batch")] + #[track_caller] + pub fn write_batch(&mut self, events: impl IntoIterator) -> SendBatchIds { + self.events.send_batch(events) + } + + /// Writes the default value of the event. Useful when the event is an empty struct. + /// This method returns the [ID](`EventId`) of the written `event`. + /// + /// See [`Events`] for details. + #[doc(alias = "send_default")] + #[track_caller] + pub fn write_default(&mut self) -> EventId + where + E: Default, + { + self.events.send_default() + } + /// Sends an `event`, which can later be read by [`EventReader`](super::EventReader)s. /// This method returns the [ID](`EventId`) of the sent `event`. /// /// See [`Events`] for details. + #[deprecated(since = "0.16.0", note = "Use `EventWriter::write` instead.")] #[track_caller] pub fn send(&mut self, event: E) -> EventId { - self.events.send(event) + self.write(event) } /// Sends a list of `events` all at once, which can later be read by [`EventReader`](super::EventReader)s. @@ -79,20 +114,22 @@ impl<'w, E: Event> EventWriter<'w, E> { /// This method returns the [IDs](`EventId`) of the sent `events`. /// /// See [`Events`] for details. + #[deprecated(since = "0.16.0", note = "Use `EventWriter::write_batch` instead.")] #[track_caller] pub fn send_batch(&mut self, events: impl IntoIterator) -> SendBatchIds { - self.events.send_batch(events) + self.write_batch(events) } /// Sends the default value of the event. Useful when the event is an empty struct. /// This method returns the [ID](`EventId`) of the sent `event`. /// /// See [`Events`] for details. + #[deprecated(since = "0.16.0", note = "Use `EventWriter::write_default` instead.")] #[track_caller] pub fn send_default(&mut self) -> EventId where E: Default, { - self.events.send_default() + self.write_default() } } diff --git a/crates/bevy_ecs/src/hierarchy.rs b/crates/bevy_ecs/src/hierarchy.rs new file mode 100644 index 0000000000000..9f4b0d0f8f8da --- /dev/null +++ b/crates/bevy_ecs/src/hierarchy.rs @@ -0,0 +1,1029 @@ +//! The canonical "parent-child" [`Relationship`] for entities, driven by +//! the [`ChildOf`] [`Relationship`] and the [`Children`] [`RelationshipTarget`]. +//! +//! See [`ChildOf`] for a full description of the relationship and how to use it. +//! +//! [`Relationship`]: crate::relationship::Relationship +//! [`RelationshipTarget`]: crate::relationship::RelationshipTarget + +#[cfg(feature = "bevy_reflect")] +use crate::reflect::{ReflectComponent, ReflectFromWorld}; +use crate::{ + bundle::Bundle, + component::{Component, HookContext}, + entity::Entity, + relationship::{RelatedSpawner, RelatedSpawnerCommands}, + system::EntityCommands, + world::{DeferredWorld, EntityWorldMut, FromWorld, World}, +}; +use alloc::{format, string::String, vec::Vec}; +#[cfg(feature = "bevy_reflect")] +use bevy_reflect::std_traits::ReflectDefault; +use core::ops::Deref; +use core::slice; +use disqualified::ShortName; +use log::warn; + +/// Stores the parent entity of this child entity with this component. +/// +/// This is a [`Relationship`] component, and creates the canonical +/// "parent / child" hierarchy. This is the "source of truth" component, and it pairs with +/// the [`Children`] [`RelationshipTarget`](crate::relationship::RelationshipTarget). +/// +/// This relationship should be used for things like: +/// +/// 1. Organizing entities in a scene +/// 2. Propagating configuration or data inherited from a parent, such as "visibility" or "world-space global transforms". +/// 3. Ensuring a hierarchy is despawned when an entity is despawned. +/// +/// [`ChildOf`] contains a single "target" [`Entity`]. When [`ChildOf`] is inserted on a "source" entity, +/// the "target" entity will automatically (and immediately, via a component hook) have a [`Children`] +/// component inserted, and the "source" entity will be added to that [`Children`] instance. +/// +/// If the [`ChildOf`] component is replaced with a different "target" entity, the old target's [`Children`] +/// will be automatically (and immediately, via a component hook) be updated to reflect that change. +/// +/// Likewise, when the [`ChildOf`] component is removed, the "source" entity will be removed from the old +/// target's [`Children`]. If this results in [`Children`] being empty, [`Children`] will be automatically removed. +/// +/// When a parent is despawned, all children (and their descendants) will _also_ be despawned. +/// +/// You can create parent-child relationships in a variety of ways. The most direct way is to insert a [`ChildOf`] component: +/// +/// ``` +/// # use bevy_ecs::prelude::*; +/// # let mut world = World::new(); +/// let root = world.spawn_empty().id(); +/// let child1 = world.spawn(ChildOf(root)).id(); +/// let child2 = world.spawn(ChildOf(root)).id(); +/// let grandchild = world.spawn(ChildOf(child1)).id(); +/// +/// assert_eq!(&**world.entity(root).get::().unwrap(), &[child1, child2]); +/// assert_eq!(&**world.entity(child1).get::().unwrap(), &[grandchild]); +/// +/// world.entity_mut(child2).remove::(); +/// assert_eq!(&**world.entity(root).get::().unwrap(), &[child1]); +/// +/// world.entity_mut(root).despawn(); +/// assert!(world.get_entity(root).is_err()); +/// assert!(world.get_entity(child1).is_err()); +/// assert!(world.get_entity(grandchild).is_err()); +/// ``` +/// +/// However if you are spawning many children, you might want to use the [`EntityWorldMut::with_children`] helper instead: +/// +/// ``` +/// # use bevy_ecs::prelude::*; +/// # let mut world = World::new(); +/// let mut child1 = Entity::PLACEHOLDER; +/// let mut child2 = Entity::PLACEHOLDER; +/// let mut grandchild = Entity::PLACEHOLDER; +/// let root = world.spawn_empty().with_children(|p| { +/// child1 = p.spawn_empty().with_children(|p| { +/// grandchild = p.spawn_empty().id(); +/// }).id(); +/// child2 = p.spawn_empty().id(); +/// }).id(); +/// +/// assert_eq!(&**world.entity(root).get::().unwrap(), &[child1, child2]); +/// assert_eq!(&**world.entity(child1).get::().unwrap(), &[grandchild]); +/// ``` +/// +/// [`Relationship`]: crate::relationship::Relationship +#[derive(Component, Clone, PartialEq, Eq, Debug)] +#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))] +#[cfg_attr( + feature = "bevy_reflect", + reflect(Component, PartialEq, Debug, FromWorld, Clone) +)] +#[relationship(relationship_target = Children)] +#[doc(alias = "IsChild", alias = "Parent")] +pub struct ChildOf(pub Entity); + +impl ChildOf { + /// The parent entity of this child entity. + #[inline] + pub fn parent(&self) -> Entity { + self.0 + } + + /// The parent entity of this child entity. + #[deprecated(since = "0.16.0", note = "Use child_of.parent() instead")] + #[inline] + pub fn get(&self) -> Entity { + self.0 + } +} + +// TODO: We need to impl either FromWorld or Default so ChildOf can be registered as Reflect. +// This is because Reflect deserialize by creating an instance and apply a patch on top. +// However ChildOf should only ever be set with a real user-defined entity. Its worth looking into +// better ways to handle cases like this. +impl FromWorld for ChildOf { + #[inline(always)] + fn from_world(_world: &mut World) -> Self { + ChildOf(Entity::PLACEHOLDER) + } +} + +/// Tracks which entities are children of this parent entity. +/// +/// A [`RelationshipTarget`] collection component that is populated +/// with entities that "target" this entity with the [`ChildOf`] [`Relationship`] component. +/// +/// Together, these components form the "canonical parent-child hierarchy". See the [`ChildOf`] component for the full +/// description of this relationship and instructions on how to use it. +/// +/// # Usage +/// +/// Like all [`RelationshipTarget`] components, this data should not be directly manipulated to avoid desynchronization. +/// Instead, modify the [`ChildOf`] components on the "source" entities. +/// +/// To access the children of an entity, you can iterate over the [`Children`] component, +/// using the [`IntoIterator`] trait. +/// For more complex access patterns, see the [`RelationshipTarget`] trait. +/// +/// [`Relationship`]: crate::relationship::Relationship +/// [`RelationshipTarget`]: crate::relationship::RelationshipTarget +#[derive(Component, Default, Debug, PartialEq, Eq)] +#[relationship_target(relationship = ChildOf, linked_spawn)] +#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))] +#[cfg_attr(feature = "bevy_reflect", reflect(Component, FromWorld, Default))] +#[doc(alias = "IsParent")] +pub struct Children(Vec); + +impl Children { + /// Swaps the child at `a_index` with the child at `b_index`. + #[inline] + pub fn swap(&mut self, a_index: usize, b_index: usize) { + self.0.swap(a_index, b_index); + } + + /// Sorts children [stably](https://en.wikipedia.org/wiki/Sorting_algorithm#Stability) + /// in place using the provided comparator function. + /// + /// For the underlying implementation, see [`slice::sort_by`]. + /// + /// For the unstable version, see [`sort_unstable_by`](Children::sort_unstable_by). + /// + /// See also [`sort_by_key`](Children::sort_by_key), [`sort_by_cached_key`](Children::sort_by_cached_key). + #[inline] + pub fn sort_by(&mut self, compare: F) + where + F: FnMut(&Entity, &Entity) -> core::cmp::Ordering, + { + self.0.sort_by(compare); + } + + /// Sorts children [stably](https://en.wikipedia.org/wiki/Sorting_algorithm#Stability) + /// in place using the provided key extraction function. + /// + /// For the underlying implementation, see [`slice::sort_by_key`]. + /// + /// For the unstable version, see [`sort_unstable_by_key`](Children::sort_unstable_by_key). + /// + /// See also [`sort_by`](Children::sort_by), [`sort_by_cached_key`](Children::sort_by_cached_key). + #[inline] + pub fn sort_by_key(&mut self, compare: F) + where + F: FnMut(&Entity) -> K, + K: Ord, + { + self.0.sort_by_key(compare); + } + + /// Sorts children [stably](https://en.wikipedia.org/wiki/Sorting_algorithm#Stability) + /// in place using the provided key extraction function. Only evaluates each key at most + /// once per sort, caching the intermediate results in memory. + /// + /// For the underlying implementation, see [`slice::sort_by_cached_key`]. + /// + /// See also [`sort_by`](Children::sort_by), [`sort_by_key`](Children::sort_by_key). + #[inline] + pub fn sort_by_cached_key(&mut self, compare: F) + where + F: FnMut(&Entity) -> K, + K: Ord, + { + self.0.sort_by_cached_key(compare); + } + + /// Sorts children [unstably](https://en.wikipedia.org/wiki/Sorting_algorithm#Stability) + /// in place using the provided comparator function. + /// + /// For the underlying implementation, see [`slice::sort_unstable_by`]. + /// + /// For the stable version, see [`sort_by`](Children::sort_by). + /// + /// See also [`sort_unstable_by_key`](Children::sort_unstable_by_key). + #[inline] + pub fn sort_unstable_by(&mut self, compare: F) + where + F: FnMut(&Entity, &Entity) -> core::cmp::Ordering, + { + self.0.sort_unstable_by(compare); + } + + /// Sorts children [unstably](https://en.wikipedia.org/wiki/Sorting_algorithm#Stability) + /// in place using the provided key extraction function. + /// + /// For the underlying implementation, see [`slice::sort_unstable_by_key`]. + /// + /// For the stable version, see [`sort_by_key`](Children::sort_by_key). + /// + /// See also [`sort_unstable_by`](Children::sort_unstable_by). + #[inline] + pub fn sort_unstable_by_key(&mut self, compare: F) + where + F: FnMut(&Entity) -> K, + K: Ord, + { + self.0.sort_unstable_by_key(compare); + } +} + +impl<'a> IntoIterator for &'a Children { + type Item = ::Item; + + type IntoIter = slice::Iter<'a, Entity>; + + #[inline(always)] + fn into_iter(self) -> Self::IntoIter { + self.0.iter() + } +} + +impl Deref for Children { + type Target = [Entity]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +/// A type alias over [`RelatedSpawner`] used to spawn child entities containing a [`ChildOf`] relationship. +pub type ChildSpawner<'w> = RelatedSpawner<'w, ChildOf>; + +/// A type alias over [`RelatedSpawnerCommands`] used to spawn child entities containing a [`ChildOf`] relationship. +pub type ChildSpawnerCommands<'w> = RelatedSpawnerCommands<'w, ChildOf>; + +impl<'w> EntityWorldMut<'w> { + /// Spawns children of this entity (with a [`ChildOf`] relationship) by taking a function that operates on a [`ChildSpawner`]. + /// See also [`with_related`](Self::with_related). + pub fn with_children(&mut self, func: impl FnOnce(&mut ChildSpawner)) -> &mut Self { + self.with_related_entities(func); + self + } + + /// Adds the given children to this entity + /// See also [`add_related`](Self::add_related). + pub fn add_children(&mut self, children: &[Entity]) -> &mut Self { + self.add_related::(children) + } + + /// Insert children at specific index. + /// See also [`insert_related`](Self::insert_related). + pub fn insert_children(&mut self, index: usize, children: &[Entity]) -> &mut Self { + self.insert_related::(index, children) + } + + /// Adds the given child to this entity + /// See also [`add_related`](Self::add_related). + pub fn add_child(&mut self, child: Entity) -> &mut Self { + self.add_related::(&[child]) + } + + /// Removes the relationship between this entity and the given entities. + pub fn remove_children(&mut self, children: &[Entity]) -> &mut Self { + self.remove_related::(children) + } + + /// Replaces all the related children with a new set of children. + pub fn replace_children(&mut self, children: &[Entity]) -> &mut Self { + self.replace_related::(children) + } + + /// Replaces all the related children with a new set of children. + /// + /// # Warning + /// + /// Failing to maintain the functions invariants may lead to erratic engine behavior including random crashes. + /// Refer to [`Self::replace_related_with_difference`] for a list of these invariants. + /// + /// # Panics + /// + /// Panics when debug assertions are enabled if an invariant is is broken and the command is executed. + pub fn replace_children_with_difference( + &mut self, + entities_to_unrelate: &[Entity], + entities_to_relate: &[Entity], + newly_related_entities: &[Entity], + ) -> &mut Self { + self.replace_related_with_difference::( + entities_to_unrelate, + entities_to_relate, + newly_related_entities, + ) + } + + /// Spawns the passed bundle and adds it to this entity as a child. + /// + /// For efficient spawning of multiple children, use [`with_children`]. + /// + /// [`with_children`]: EntityWorldMut::with_children + pub fn with_child(&mut self, bundle: impl Bundle) -> &mut Self { + let parent = self.id(); + self.world_scope(|world| { + world.spawn((bundle, ChildOf(parent))); + }); + self + } + + /// Removes the [`ChildOf`] component, if it exists. + #[deprecated(since = "0.16.0", note = "Use entity_mut.remove::()")] + pub fn remove_parent(&mut self) -> &mut Self { + self.remove::(); + self + } + + /// Inserts the [`ChildOf`] component with the given `parent` entity, if it exists. + #[deprecated(since = "0.16.0", note = "Use entity_mut.insert(ChildOf(entity))")] + pub fn set_parent(&mut self, parent: Entity) -> &mut Self { + self.insert(ChildOf(parent)); + self + } +} + +impl<'a> EntityCommands<'a> { + /// Spawns children of this entity (with a [`ChildOf`] relationship) by taking a function that operates on a [`ChildSpawner`]. + pub fn with_children( + &mut self, + func: impl FnOnce(&mut RelatedSpawnerCommands), + ) -> &mut Self { + self.with_related_entities(func); + self + } + + /// Adds the given children to this entity + pub fn add_children(&mut self, children: &[Entity]) -> &mut Self { + self.add_related::(children) + } + + /// Insert children at specific index. + /// See also [`insert_related`](Self::insert_related). + pub fn insert_children(&mut self, index: usize, children: &[Entity]) -> &mut Self { + self.insert_related::(index, children) + } + + /// Adds the given child to this entity + pub fn add_child(&mut self, child: Entity) -> &mut Self { + self.add_related::(&[child]) + } + + /// Removes the relationship between this entity and the given entities. + pub fn remove_children(&mut self, children: &[Entity]) -> &mut Self { + self.remove_related::(children) + } + + /// Replaces the children on this entity with a new list of children. + pub fn replace_children(&mut self, children: &[Entity]) -> &mut Self { + self.replace_related::(children) + } + + /// Replaces all the related entities with a new set of entities. + /// + /// # Warning + /// + /// Failing to maintain the functions invariants may lead to erratic engine behavior including random crashes. + /// Refer to [`EntityWorldMut::replace_related_with_difference`] for a list of these invariants. + /// + /// # Panics + /// + /// Panics when debug assertions are enabled if an invariant is is broken and the command is executed. + pub fn replace_children_with_difference( + &mut self, + entities_to_unrelate: &[Entity], + entities_to_relate: &[Entity], + newly_related_entities: &[Entity], + ) -> &mut Self { + self.replace_related_with_difference::( + entities_to_unrelate, + entities_to_relate, + newly_related_entities, + ) + } + + /// Spawns the passed bundle and adds it to this entity as a child. + /// + /// For efficient spawning of multiple children, use [`with_children`]. + /// + /// [`with_children`]: EntityCommands::with_children + pub fn with_child(&mut self, bundle: impl Bundle) -> &mut Self { + self.with_related::(bundle); + self + } + + /// Removes the [`ChildOf`] component, if it exists. + #[deprecated(since = "0.16.0", note = "Use entity_commands.remove::()")] + pub fn remove_parent(&mut self) -> &mut Self { + self.remove::(); + self + } + + /// Inserts the [`ChildOf`] component with the given `parent` entity, if it exists. + #[deprecated(since = "0.16.0", note = "Use entity_commands.insert(ChildOf(entity))")] + pub fn set_parent(&mut self, parent: Entity) -> &mut Self { + self.insert(ChildOf(parent)); + self + } +} + +/// An `on_insert` component hook that when run, will validate that the parent of a given entity +/// contains component `C`. This will print a warning if the parent does not contain `C`. +pub fn validate_parent_has_component( + world: DeferredWorld, + HookContext { entity, caller, .. }: HookContext, +) { + let entity_ref = world.entity(entity); + let Some(child_of) = entity_ref.get::() else { + return; + }; + if !world + .get_entity(child_of.parent()) + .is_ok_and(|e| e.contains::()) + { + // TODO: print name here once Name lives in bevy_ecs + let name: Option = None; + warn!( + "warning[B0004]: {}{name} with the {ty_name} component has a parent without {ty_name}.\n\ + This will cause inconsistent behaviors! See: https://bevyengine.org/learn/errors/b0004", + caller.map(|c| format!("{c}: ")).unwrap_or_default(), + ty_name = ShortName::of::(), + name = name.map_or_else( + || format!("Entity {}", entity), + |s| format!("The {s} entity") + ), + ); + } +} + +/// Returns a [`SpawnRelatedBundle`] that will insert the [`Children`] component, spawn a [`SpawnableList`] of entities with given bundles that +/// relate to the [`Children`] entity via the [`ChildOf`] component, and reserve space in the [`Children`] for each spawned entity. +/// +/// Any additional arguments will be interpreted as bundles to be spawned. +/// +/// Also see [`related`](crate::related) for a version of this that works with any [`RelationshipTarget`] type. +/// +/// ``` +/// # use bevy_ecs::hierarchy::Children; +/// # use bevy_ecs::name::Name; +/// # use bevy_ecs::world::World; +/// # use bevy_ecs::children; +/// # use bevy_ecs::spawn::{Spawn, SpawnRelated}; +/// let mut world = World::new(); +/// world.spawn(( +/// Name::new("Root"), +/// children![ +/// Name::new("Child1"), +/// ( +/// Name::new("Child2"), +/// children![Name::new("Grandchild")] +/// ) +/// ] +/// )); +/// ``` +/// +/// [`RelationshipTarget`]: crate::relationship::RelationshipTarget +/// [`SpawnRelatedBundle`]: crate::spawn::SpawnRelatedBundle +/// [`SpawnableList`]: crate::spawn::SpawnableList +#[macro_export] +macro_rules! children { + [$($child:expr),*$(,)?] => { + $crate::hierarchy::Children::spawn(($($crate::spawn::Spawn($child)),*)) + }; +} + +#[cfg(test)] +mod tests { + use crate::{ + entity::Entity, + hierarchy::{ChildOf, Children}, + relationship::{RelationshipHookMode, RelationshipTarget}, + spawn::{Spawn, SpawnRelated}, + world::World, + }; + use alloc::{vec, vec::Vec}; + + #[derive(PartialEq, Eq, Debug)] + struct Node { + entity: Entity, + children: Vec, + } + + impl Node { + fn new(entity: Entity) -> Self { + Self { + entity, + children: Vec::new(), + } + } + + fn new_with(entity: Entity, children: Vec) -> Self { + Self { entity, children } + } + } + + fn get_hierarchy(world: &World, entity: Entity) -> Node { + Node { + entity, + children: world + .entity(entity) + .get::() + .map_or_else(Default::default, |c| { + c.iter().map(|e| get_hierarchy(world, e)).collect() + }), + } + } + + #[test] + fn hierarchy() { + let mut world = World::new(); + let root = world.spawn_empty().id(); + let child1 = world.spawn(ChildOf(root)).id(); + let grandchild = world.spawn(ChildOf(child1)).id(); + let child2 = world.spawn(ChildOf(root)).id(); + + // Spawn + let hierarchy = get_hierarchy(&world, root); + assert_eq!( + hierarchy, + Node::new_with( + root, + vec![ + Node::new_with(child1, vec![Node::new(grandchild)]), + Node::new(child2) + ] + ) + ); + + // Removal + world.entity_mut(child1).remove::(); + let hierarchy = get_hierarchy(&world, root); + assert_eq!(hierarchy, Node::new_with(root, vec![Node::new(child2)])); + + // Insert + world.entity_mut(child1).insert(ChildOf(root)); + let hierarchy = get_hierarchy(&world, root); + assert_eq!( + hierarchy, + Node::new_with( + root, + vec![ + Node::new(child2), + Node::new_with(child1, vec![Node::new(grandchild)]) + ] + ) + ); + + // Recursive Despawn + world.entity_mut(root).despawn(); + assert!(world.get_entity(root).is_err()); + assert!(world.get_entity(child1).is_err()); + assert!(world.get_entity(child2).is_err()); + assert!(world.get_entity(grandchild).is_err()); + } + + #[test] + fn with_children() { + let mut world = World::new(); + let mut child1 = Entity::PLACEHOLDER; + let mut child2 = Entity::PLACEHOLDER; + let root = world + .spawn_empty() + .with_children(|p| { + child1 = p.spawn_empty().id(); + child2 = p.spawn_empty().id(); + }) + .id(); + + let hierarchy = get_hierarchy(&world, root); + assert_eq!( + hierarchy, + Node::new_with(root, vec![Node::new(child1), Node::new(child2)]) + ); + } + + #[test] + fn add_children() { + let mut world = World::new(); + let child1 = world.spawn_empty().id(); + let child2 = world.spawn_empty().id(); + let root = world.spawn_empty().add_children(&[child1, child2]).id(); + + let hierarchy = get_hierarchy(&world, root); + assert_eq!( + hierarchy, + Node::new_with(root, vec![Node::new(child1), Node::new(child2)]) + ); + } + + #[test] + fn insert_children() { + let mut world = World::new(); + let child1 = world.spawn_empty().id(); + let child2 = world.spawn_empty().id(); + let child3 = world.spawn_empty().id(); + let child4 = world.spawn_empty().id(); + + let mut entity_world_mut = world.spawn_empty(); + + let first_children = entity_world_mut.add_children(&[child1, child2]); + + let root = first_children.insert_children(1, &[child3, child4]).id(); + + let hierarchy = get_hierarchy(&world, root); + assert_eq!( + hierarchy, + Node::new_with( + root, + vec![ + Node::new(child1), + Node::new(child3), + Node::new(child4), + Node::new(child2) + ] + ) + ); + } + + #[test] + fn remove_children() { + let mut world = World::new(); + let child1 = world.spawn_empty().id(); + let child2 = world.spawn_empty().id(); + let child3 = world.spawn_empty().id(); + let child4 = world.spawn_empty().id(); + + let mut root = world.spawn_empty(); + root.add_children(&[child1, child2, child3, child4]); + root.remove_children(&[child2, child3]); + let root = root.id(); + + let hierarchy = get_hierarchy(&world, root); + assert_eq!( + hierarchy, + Node::new_with(root, vec![Node::new(child1), Node::new(child4)]) + ); + } + + #[test] + fn self_parenting_invalid() { + let mut world = World::new(); + let id = world.spawn_empty().id(); + world.entity_mut(id).insert(ChildOf(id)); + assert!( + world.entity(id).get::().is_none(), + "invalid ChildOf relationships should self-remove" + ); + } + + #[test] + fn missing_parent_invalid() { + let mut world = World::new(); + let parent = world.spawn_empty().id(); + world.entity_mut(parent).despawn(); + let id = world.spawn(ChildOf(parent)).id(); + assert!( + world.entity(id).get::().is_none(), + "invalid ChildOf relationships should self-remove" + ); + } + + #[test] + fn reinsert_same_parent() { + let mut world = World::new(); + let parent = world.spawn_empty().id(); + let id = world.spawn(ChildOf(parent)).id(); + world.entity_mut(id).insert(ChildOf(parent)); + assert_eq!( + Some(&ChildOf(parent)), + world.entity(id).get::(), + "ChildOf should still be there" + ); + } + + #[test] + fn spawn_children() { + let mut world = World::new(); + let id = world.spawn(Children::spawn((Spawn(()), Spawn(())))).id(); + assert_eq!(world.entity(id).get::().unwrap().len(), 2,); + } + + #[test] + fn replace_children() { + let mut world = World::new(); + let parent = world.spawn(Children::spawn((Spawn(()), Spawn(())))).id(); + let &[child_a, child_b] = &world.entity(parent).get::().unwrap().0[..] else { + panic!("Tried to spawn 2 children on an entity and didn't get 2 children"); + }; + + let child_c = world.spawn_empty().id(); + + world + .entity_mut(parent) + .replace_children(&[child_a, child_c]); + + let children = world.entity(parent).get::().unwrap(); + + assert!(children.contains(&child_a)); + assert!(children.contains(&child_c)); + assert!(!children.contains(&child_b)); + + assert_eq!( + world.entity(child_a).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(child_c).get::().unwrap(), + &ChildOf(parent) + ); + assert!(world.entity(child_b).get::().is_none()); + } + + #[test] + fn replace_children_with_nothing() { + let mut world = World::new(); + let parent = world.spawn_empty().id(); + let child_a = world.spawn_empty().id(); + let child_b = world.spawn_empty().id(); + + world.entity_mut(parent).add_children(&[child_a, child_b]); + + assert_eq!(world.entity(parent).get::().unwrap().len(), 2); + + world.entity_mut(parent).replace_children(&[]); + + assert!(world.entity(child_a).get::().is_none()); + assert!(world.entity(child_b).get::().is_none()); + } + + #[test] + fn insert_same_child_twice() { + let mut world = World::new(); + + let parent = world.spawn_empty().id(); + let child = world.spawn_empty().id(); + + world.entity_mut(parent).add_child(child); + world.entity_mut(parent).add_child(child); + + let children = world.get::(parent).unwrap(); + assert_eq!(children.0, [child]); + assert_eq!( + world.entity(child).get::().unwrap(), + &ChildOf(parent) + ); + } + + #[test] + fn replace_with_difference() { + let mut world = World::new(); + + let parent = world.spawn_empty().id(); + let child_a = world.spawn_empty().id(); + let child_b = world.spawn_empty().id(); + let child_c = world.spawn_empty().id(); + let child_d = world.spawn_empty().id(); + + // Test inserting new relations + world.entity_mut(parent).replace_children_with_difference( + &[], + &[child_a, child_b], + &[child_a, child_b], + ); + + assert_eq!( + world.entity(child_a).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(child_b).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(parent).get::().unwrap().0, + [child_a, child_b] + ); + + // Test replacing relations and changing order + world.entity_mut(parent).replace_children_with_difference( + &[child_b], + &[child_d, child_c, child_a], + &[child_c, child_d], + ); + assert_eq!( + world.entity(child_a).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(child_c).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(child_d).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(parent).get::().unwrap().0, + [child_d, child_c, child_a] + ); + assert!(!world.entity(child_b).contains::()); + + // Test removing relationships + world.entity_mut(parent).replace_children_with_difference( + &[child_a, child_d, child_c], + &[], + &[], + ); + assert!(!world.entity(parent).contains::()); + assert!(!world.entity(child_a).contains::()); + assert!(!world.entity(child_b).contains::()); + assert!(!world.entity(child_c).contains::()); + assert!(!world.entity(child_d).contains::()); + } + + #[test] + fn replace_with_difference_on_empty() { + let mut world = World::new(); + + let parent = world.spawn_empty().id(); + let child_a = world.spawn_empty().id(); + + world + .entity_mut(parent) + .replace_children_with_difference(&[child_a], &[], &[]); + + assert!(!world.entity(parent).contains::()); + assert!(!world.entity(child_a).contains::()); + } + + #[test] + fn replace_with_difference_totally_new_children() { + let mut world = World::new(); + + let parent = world.spawn_empty().id(); + let child_a = world.spawn_empty().id(); + let child_b = world.spawn_empty().id(); + let child_c = world.spawn_empty().id(); + let child_d = world.spawn_empty().id(); + + // Test inserting new relations + world.entity_mut(parent).replace_children_with_difference( + &[], + &[child_a, child_b], + &[child_a, child_b], + ); + + assert_eq!( + world.entity(child_a).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(child_b).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(parent).get::().unwrap().0, + [child_a, child_b] + ); + + // Test replacing relations and changing order + world.entity_mut(parent).replace_children_with_difference( + &[child_b, child_a], + &[child_d, child_c], + &[child_c, child_d], + ); + assert_eq!( + world.entity(child_c).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(child_d).get::().unwrap(), + &ChildOf(parent) + ); + assert_eq!( + world.entity(parent).get::().unwrap().0, + [child_d, child_c] + ); + assert!(!world.entity(child_a).contains::()); + assert!(!world.entity(child_b).contains::()); + } + + #[test] + fn replace_children_order() { + let mut world = World::new(); + + let parent = world.spawn_empty().id(); + let child_a = world.spawn_empty().id(); + let child_b = world.spawn_empty().id(); + let child_c = world.spawn_empty().id(); + let child_d = world.spawn_empty().id(); + + let initial_order = [child_a, child_b, child_c, child_d]; + world.entity_mut(parent).add_children(&initial_order); + + assert_eq!( + world.entity_mut(parent).get::().unwrap().0, + initial_order + ); + + let new_order = [child_d, child_b, child_a, child_c]; + world.entity_mut(parent).replace_children(&new_order); + + assert_eq!(world.entity(parent).get::().unwrap().0, new_order); + } + + #[test] + #[should_panic] + #[cfg_attr( + not(debug_assertions), + ignore = "we don't check invariants if debug assertions are off" + )] + fn replace_diff_invariant_overlapping_unrelate_with_relate() { + let mut world = World::new(); + + let parent = world.spawn_empty().id(); + let child_a = world.spawn_empty().id(); + + world + .entity_mut(parent) + .replace_children_with_difference(&[], &[child_a], &[child_a]); + + // This should panic + world + .entity_mut(parent) + .replace_children_with_difference(&[child_a], &[child_a], &[]); + } + + #[test] + #[should_panic] + #[cfg_attr( + not(debug_assertions), + ignore = "we don't check invariants if debug assertions are off" + )] + fn replace_diff_invariant_overlapping_unrelate_with_newly() { + let mut world = World::new(); + + let parent = world.spawn_empty().id(); + let child_a = world.spawn_empty().id(); + let child_b = world.spawn_empty().id(); + + world + .entity_mut(parent) + .replace_children_with_difference(&[], &[child_a], &[child_a]); + + // This should panic + world.entity_mut(parent).replace_children_with_difference( + &[child_b], + &[child_a, child_b], + &[child_b], + ); + } + + #[test] + #[should_panic] + #[cfg_attr( + not(debug_assertions), + ignore = "we don't check invariants if debug assertions are off" + )] + fn replace_diff_invariant_newly_not_subset() { + let mut world = World::new(); + + let parent = world.spawn_empty().id(); + let child_a = world.spawn_empty().id(); + let child_b = world.spawn_empty().id(); + + // This should panic + world.entity_mut(parent).replace_children_with_difference( + &[], + &[child_a, child_b], + &[child_a], + ); + } + + #[test] + fn child_replace_hook_skip() { + let mut world = World::new(); + let parent = world.spawn_empty().id(); + let other = world.spawn_empty().id(); + let child = world.spawn(ChildOf(parent)).id(); + world + .entity_mut(child) + .insert_with_relationship_hook_mode(ChildOf(other), RelationshipHookMode::Skip); + assert_eq!( + &**world.entity(parent).get::().unwrap(), + &[child], + "Children should still have the old value, as on_insert/on_replace didn't run" + ); + } +} diff --git a/crates/bevy_ecs/src/identifier/mod.rs b/crates/bevy_ecs/src/identifier/mod.rs index 6134e472427e2..c08ea7b4aa241 100644 --- a/crates/bevy_ecs/src/identifier/mod.rs +++ b/crates/bevy_ecs/src/identifier/mod.rs @@ -21,7 +21,7 @@ pub(crate) mod masks; #[derive(Debug, Clone, Copy)] #[cfg_attr(feature = "bevy_reflect", derive(Reflect))] #[cfg_attr(feature = "bevy_reflect", reflect(opaque))] -#[cfg_attr(feature = "bevy_reflect", reflect(Debug, Hash, PartialEq))] +#[cfg_attr(feature = "bevy_reflect", reflect(Debug, Hash, PartialEq, Clone))] // Alignment repr necessary to allow LLVM to better output // optimized codegen for `to_bits`, `PartialEq` and `Ord`. #[repr(C, align(8))] @@ -201,7 +201,7 @@ mod tests { // and also Entity flag. let high = 0x7FFFFFFF; let low = 0xC; - let bits: u64 = high << u32::BITS | low; + let bits: u64 = (high << u32::BITS) | low; let id = Identifier::try_from_bits(bits).unwrap(); @@ -216,7 +216,10 @@ mod tests { #[rustfmt::skip] #[test] - #[allow(clippy::nonminimal_bool)] // This is intentionally testing `lt` and `ge` as separate functions. + #[expect( + clippy::nonminimal_bool, + reason = "This intentionally tests all possible comparison operators as separate functions; thus, we don't want to rewrite these comparisons to use different operators." + )] fn id_comparison() { assert!(Identifier::new(123, 456, IdKind::Entity).unwrap() == Identifier::new(123, 456, IdKind::Entity).unwrap()); assert!(Identifier::new(123, 456, IdKind::Placeholder).unwrap() == Identifier::new(123, 456, IdKind::Placeholder).unwrap()); diff --git a/crates/bevy_ecs/src/intern.rs b/crates/bevy_ecs/src/intern.rs index e606b0d546315..b10e6a2ac691b 100644 --- a/crates/bevy_ecs/src/intern.rs +++ b/crates/bevy_ecs/src/intern.rs @@ -5,16 +5,13 @@ //! and make comparisons for any type as fast as integers. use alloc::{borrow::ToOwned, boxed::Box}; +use bevy_platform::{ + collections::HashSet, + hash::FixedHasher, + sync::{PoisonError, RwLock}, +}; use core::{fmt::Debug, hash::Hash, ops::Deref}; -#[cfg(feature = "std")] -use std::sync::{PoisonError, RwLock}; - -#[cfg(not(feature = "std"))] -use spin::rwlock::RwLock; - -use bevy_utils::{FixedHasher, HashSet}; - /// An interned value. Will stay valid until the end of the program and will not drop. /// /// For details on interning, see [the module level docs](self). @@ -143,24 +140,16 @@ impl Interner { /// will return [`Interned`] using the same static reference. pub fn intern(&self, value: &T) -> Interned { { - #[cfg(feature = "std")] let set = self.0.read().unwrap_or_else(PoisonError::into_inner); - #[cfg(not(feature = "std"))] - let set = self.0.read(); - if let Some(value) = set.get(value) { return Interned(*value); } } { - #[cfg(feature = "std")] let mut set = self.0.write().unwrap_or_else(PoisonError::into_inner); - #[cfg(not(feature = "std"))] - let mut set = self.0.write(); - if let Some(value) = set.get(value) { Interned(*value) } else { @@ -180,7 +169,8 @@ impl Default for Interner { #[cfg(test)] mod tests { - use bevy_utils::FixedHasher; + use alloc::{boxed::Box, string::ToString}; + use bevy_platform::hash::FixedHasher; use core::hash::{BuildHasher, Hash, Hasher}; use crate::intern::{Internable, Interned, Interner}; diff --git a/crates/bevy_ecs/src/label.rs b/crates/bevy_ecs/src/label.rs index e3f5078b22f71..c404c563bdbec 100644 --- a/crates/bevy_ecs/src/label.rs +++ b/crates/bevy_ecs/src/label.rs @@ -22,6 +22,9 @@ pub trait DynEq: Any { fn dyn_eq(&self, other: &dyn DynEq) -> bool; } +// Tests that this trait is dyn-compatible +const _: Option> = None; + impl DynEq for T where T: Any + Eq, @@ -48,6 +51,9 @@ pub trait DynHash: DynEq { fn dyn_hash(&self, state: &mut dyn Hasher); } +// Tests that this trait is dyn-compatible +const _: Option> = None; + impl DynHash for T where T: DynEq + Hash, @@ -136,6 +142,7 @@ macro_rules! define_label { } } + #[diagnostic::do_not_recommend] impl $label_trait_name for $crate::intern::Interned { $($interned_extra_methods_impl)* @@ -174,7 +181,7 @@ macro_rules! define_label { impl $crate::intern::Internable for dyn $label_trait_name { fn leak(&self) -> &'static Self { - Box::leak(self.dyn_clone()) + $crate::label::Box::leak(self.dyn_clone()) } fn ref_eq(&self, other: &Self) -> bool { @@ -201,19 +208,3 @@ macro_rules! define_label { $crate::intern::Interner::new(); }; } - -#[cfg(test)] -mod tests { - use super::{DynEq, DynHash}; - use bevy_utils::assert_object_safe; - - #[test] - fn dyn_eq_object_safe() { - assert_object_safe::(); - } - - #[test] - fn dyn_hash_object_safe() { - assert_object_safe::(); - } -} diff --git a/crates/bevy_ecs/src/lib.rs b/crates/bevy_ecs/src/lib.rs index ab949045ef5b6..99f95763d572a 100644 --- a/crates/bevy_ecs/src/lib.rs +++ b/crates/bevy_ecs/src/lib.rs @@ -1,7 +1,7 @@ -// FIXME(11590): remove this once the lint is fixed -#![allow(unsafe_op_in_unsafe_fn)] -// TODO: remove once Edition 2024 is released -#![allow(dependency_on_unit_never_type_fallback)] +#![expect( + unsafe_op_in_unsafe_fn, + reason = "See #11590. To be removed once all applicable unsafe code has an unsafe block with a safety comment." +)] #![doc = include_str!("../README.md")] #![cfg_attr( any(docsrs, docsrs_dep), @@ -11,36 +11,48 @@ ) )] #![cfg_attr(any(docsrs, docsrs_dep), feature(doc_auto_cfg, rustdoc_internals))] -#![allow(unsafe_code)] +#![expect(unsafe_code, reason = "Unsafe code is used to improve performance.")] #![doc( html_logo_url = "https://bevyengine.org/assets/icon.png", html_favicon_url = "https://bevyengine.org/assets/icon.png" )] -#![cfg_attr(not(feature = "std"), no_std)] +#![no_std] + +#[cfg(feature = "std")] +extern crate std; #[cfg(target_pointer_width = "16")] compile_error!("bevy_ecs cannot safely compile for a 16-bit platform."); extern crate alloc; +// Required to make proc macros work in bevy itself. +extern crate self as bevy_ecs; + pub mod archetype; pub mod batching; pub mod bundle; pub mod change_detection; pub mod component; pub mod entity; +pub mod entity_disabling; +pub mod error; pub mod event; +pub mod hierarchy; pub mod identifier; pub mod intern; pub mod label; pub mod name; +pub mod never; pub mod observer; pub mod query; #[cfg(feature = "bevy_reflect")] pub mod reflect; +pub mod relationship; pub mod removal_detection; -pub mod result; +pub mod resource; pub mod schedule; +pub mod spawn; pub mod storage; pub mod system; pub mod traversal; @@ -52,31 +64,40 @@ pub use bevy_ptr as ptr; /// /// This includes the most common types in this crate, re-exported for your convenience. pub mod prelude { - #[expect(deprecated)] + #[expect( + deprecated, + reason = "`crate::schedule::apply_deferred` is considered deprecated; however, it may still be used by crates which consume `bevy_ecs`, so its removal here may cause confusion. It is intended to be removed in the Bevy 0.17 cycle." + )] #[doc(hidden)] pub use crate::{ bundle::Bundle, change_detection::{DetectChanges, DetectChangesMut, Mut, Ref}, - component::{require, Component}, - entity::{Entity, EntityBorrow, EntityMapper}, + children, + component::Component, + entity::{ContainsEntity, Entity, EntityMapper}, + error::{BevyError, Result}, event::{Event, EventMutator, EventReader, EventWriter, Events}, + hierarchy::{ChildOf, ChildSpawner, ChildSpawnerCommands, Children}, name::{Name, NameOrEntity}, - observer::{CloneEntityWithObserversExt, Observer, Trigger}, + observer::{Observer, Trigger}, query::{Added, AnyOf, Changed, Has, Or, QueryBuilder, QueryState, With, Without}, + related, + relationship::RelationshipTarget, removal_detection::RemovedComponents, - result::{Error, Result}, + resource::Resource, schedule::{ - apply_deferred, common_conditions::*, ApplyDeferred, Condition, IntoSystemConfigs, - IntoSystemSet, IntoSystemSetConfigs, Schedule, Schedules, SystemSet, + apply_deferred, common_conditions::*, ApplyDeferred, Condition, IntoScheduleConfigs, + IntoSystemSet, Schedule, Schedules, SystemSet, }, + spawn::{Spawn, SpawnRelated}, system::{ - Commands, Deferred, EntityCommand, EntityCommands, In, InMut, InRef, IntoSystem, Local, - NonSend, NonSendMut, ParamSet, Populated, Query, ReadOnlySystem, Res, ResMut, Resource, - Single, System, SystemIn, SystemInput, SystemParamBuilder, SystemParamFunction, - WithParamWarnPolicy, + Command, Commands, Deferred, EntityCommand, EntityCommands, In, InMut, InRef, + IntoSystem, Local, NonSend, NonSendMut, ParamSet, Populated, Query, ReadOnlySystem, + Res, ResMut, Single, System, SystemIn, SystemInput, SystemParamBuilder, + SystemParamFunction, }, world::{ - Command, EntityMut, EntityRef, EntityWorldMut, FilteredResources, FilteredResourcesMut, + EntityMut, EntityRef, EntityWorldMut, FilteredResources, FilteredResourcesMut, FromWorld, OnAdd, OnInsert, OnRemove, OnReplace, World, }, }; @@ -109,21 +130,25 @@ pub mod __macro_exports { #[cfg(test)] mod tests { - use crate as bevy_ecs; use crate::{ bundle::Bundle, change_detection::Ref, - component::{require, Component, ComponentId, RequiredComponents, RequiredComponentsError}, - entity::Entity, + component::{Component, ComponentId, RequiredComponents, RequiredComponentsError}, + entity::{Entity, EntityMapper}, + entity_disabling::DefaultQueryFilters, prelude::Or, query::{Added, Changed, FilteredAccess, QueryFilter, With, Without}, - system::Resource, + resource::Resource, world::{EntityMut, EntityRef, Mut, World}, }; - use alloc::{sync::Arc, vec}; - use bevy_ecs_macros::{VisitEntities, VisitEntitiesMut}; + use alloc::{ + string::{String, ToString}, + sync::Arc, + vec, + vec::Vec, + }; + use bevy_platform::collections::HashSet; use bevy_tasks::{ComputeTaskPool, TaskPool}; - use bevy_utils::HashSet; use core::{ any::TypeId, marker::PhantomData, @@ -139,9 +164,8 @@ mod tests { #[derive(Component, Debug, PartialEq, Eq, Clone, Copy)] struct C; - #[allow(dead_code)] #[derive(Default)] - struct NonSendA(usize, PhantomData<*mut ()>); + struct NonSendA(PhantomData<*mut ()>); #[derive(Component, Clone, Debug)] struct DropCk(Arc); @@ -158,8 +182,10 @@ mod tests { } } - // TODO: The compiler says the Debug and Clone are removed during dead code analysis. Investigate. - #[allow(dead_code)] + #[expect( + dead_code, + reason = "This struct is used to test how `Drop` behavior works in regards to SparseSet storage, and as such is solely a wrapper around `DropCk` to make it use the SparseSet storage. Because of this, the inner field is intentionally never read." + )] #[derive(Component, Clone, Debug)] #[component(storage = "SparseSet")] struct DropCkSparse(DropCk); @@ -203,13 +229,9 @@ mod tests { y: SparseStored, } let mut ids = Vec::new(); - ::component_ids( - &mut world.components, - &mut world.storages, - &mut |id| { - ids.push(id); - }, - ); + ::component_ids(&mut world.components_registrator(), &mut |id| { + ids.push(id); + }); assert_eq!( ids, @@ -257,13 +279,9 @@ mod tests { } let mut ids = Vec::new(); - ::component_ids( - &mut world.components, - &mut world.storages, - &mut |id| { - ids.push(id); - }, - ); + ::component_ids(&mut world.components_registrator(), &mut |id| { + ids.push(id); + }); assert_eq!( ids, @@ -314,8 +332,7 @@ mod tests { let mut ids = Vec::new(); ::component_ids( - &mut world.components, - &mut world.storages, + &mut world.components_registrator(), &mut |id| { ids.push(id); }, @@ -1200,7 +1217,7 @@ mod tests { #[test] fn resource() { - use crate::system::Resource; + use crate::resource::Resource; #[derive(Resource, PartialEq, Debug)] struct Num(i32); @@ -1505,6 +1522,8 @@ mod tests { #[test] fn filtered_query_access() { let mut world = World::new(); + // We remove entity disabling so it doesn't affect our query filters + world.remove_resource::(); let query = world.query_filtered::<&mut A, Changed>(); let mut expected = FilteredAccess::::default(); @@ -1686,6 +1705,10 @@ mod tests { let values = vec![(e0, (B(0), C)), (e1, (B(1), C))]; + #[expect( + deprecated, + reason = "This needs to be supported for now, and therefore still needs the test." + )] world.insert_or_spawn_batch(values).unwrap(); assert_eq!( @@ -1726,6 +1749,10 @@ mod tests { let values = vec![(e0, (B(0), C)), (e1, (B(1), C)), (invalid_e2, (B(2), C))]; + #[expect( + deprecated, + reason = "This needs to be supported for now, and therefore still needs the test." + )] let result = world.insert_or_spawn_batch(values); assert_eq!( @@ -1853,7 +1880,9 @@ mod tests { let values = vec![(e0, (A(1), B(0))), (e1, (A(0), B(1)))]; - world.try_insert_batch(values); + let error = world.try_insert_batch(values).unwrap_err(); + + assert_eq!(e1, error.entities[0]); assert_eq!( world.get::(e0), @@ -1875,7 +1904,9 @@ mod tests { let values = vec![(e0, (A(1), B(0))), (e1, (A(0), B(1)))]; - world.try_insert_batch_if_new(values); + let error = world.try_insert_batch_if_new(values).unwrap_err(); + + assert_eq!(e1, error.entities[0]); assert_eq!( world.get::(e0), @@ -1896,7 +1927,7 @@ mod tests { struct X; #[derive(Component)] - #[require(Z(new_z))] + #[require(Z = new_z())] struct Y { value: String, } @@ -2001,8 +2032,8 @@ mod tests { world.insert_resource(I(0)); world .register_component_hooks::() - .on_add(|mut world, _, _| world.resource_mut::().0 += 1) - .on_insert(|mut world, _, _| world.resource_mut::().0 += 1); + .on_add(|mut world, _| world.resource_mut::().0 += 1) + .on_insert(|mut world, _| world.resource_mut::().0 += 1); // Spawn entity and ensure Y was added assert!(world.spawn(X).contains::()); @@ -2031,8 +2062,8 @@ mod tests { world.insert_resource(I(0)); world .register_component_hooks::() - .on_add(|mut world, _, _| world.resource_mut::().0 += 1) - .on_insert(|mut world, _, _| world.resource_mut::().0 += 1); + .on_add(|mut world, _| world.resource_mut::().0 += 1) + .on_insert(|mut world, _| world.resource_mut::().0 += 1); // Spawn entity and ensure Y was added assert!(world.spawn_empty().insert(X).contains::()); @@ -2615,6 +2646,37 @@ mod tests { assert_eq!(to_vec(required_z), vec![(b, 0), (c, 1)]); } + #[test] + fn required_components_inheritance_depth_bias() { + #[derive(Component, PartialEq, Eq, Clone, Copy, Debug)] + struct MyRequired(bool); + + #[derive(Component, Default)] + #[require(MyRequired(false))] + struct MiddleMan; + + #[derive(Component, Default)] + #[require(MiddleMan)] + struct ConflictingRequire; + + #[derive(Component, Default)] + #[require(MyRequired(true))] + struct MyComponent; + + let mut world = World::new(); + let order_a = world + .spawn((ConflictingRequire, MyComponent)) + .get::() + .cloned(); + let order_b = world + .spawn((MyComponent, ConflictingRequire)) + .get::() + .cloned(); + + assert_eq!(order_a, Some(MyRequired(true))); + assert_eq!(order_b, Some(MyRequired(true))); + } + #[test] #[should_panic = "Recursive required components detected: A → B → C → B\nhelp: If this is intentional, consider merging the components."] fn required_components_recursion_errors() { @@ -2633,42 +2695,152 @@ mod tests { World::new().register_component::(); } - // These structs are primarily compilation tests to test the derive macros. Because they are - // never constructed, we have to manually silence the `dead_code` lint. - #[allow(dead_code)] + #[test] + #[should_panic = "Recursive required components detected: A → A\nhelp: Remove require(A)."] + fn required_components_self_errors() { + #[derive(Component, Default)] + #[require(A)] + struct A; + + World::new().register_component::(); + } + + #[derive(Default)] + struct CaptureMapper(Vec); + impl EntityMapper for CaptureMapper { + fn get_mapped(&mut self, source: Entity) -> Entity { + self.0.push(source); + source + } + + fn set_mapped(&mut self, _source: Entity, _target: Entity) {} + } + + #[test] + fn map_struct_entities() { + #[derive(Component)] + #[expect( + unused, + reason = "extra fields are used to ensure the derive works properly" + )] + struct Foo(usize, #[entities] Entity); + + #[derive(Component)] + #[expect( + unused, + reason = "extra fields are used to ensure the derive works properly" + )] + struct Bar { + #[entities] + a: Entity, + b: usize, + #[entities] + c: Vec, + } + + let mut world = World::new(); + let e1 = world.spawn_empty().id(); + let e2 = world.spawn_empty().id(); + let e3 = world.spawn_empty().id(); + + let mut foo = Foo(1, e1); + let mut mapper = CaptureMapper::default(); + Component::map_entities(&mut foo, &mut mapper); + assert_eq!(&mapper.0, &[e1]); + + let mut bar = Bar { + a: e1, + b: 1, + c: vec![e2, e3], + }; + let mut mapper = CaptureMapper::default(); + Component::map_entities(&mut bar, &mut mapper); + assert_eq!(&mapper.0, &[e1, e2, e3]); + } + + #[test] + fn map_enum_entities() { + #[derive(Component)] + #[expect( + unused, + reason = "extra fields are used to ensure the derive works properly" + )] + enum Foo { + Bar(usize, #[entities] Entity), + Baz { + #[entities] + a: Entity, + b: usize, + #[entities] + c: Vec, + }, + } + + let mut world = World::new(); + let e1 = world.spawn_empty().id(); + let e2 = world.spawn_empty().id(); + let e3 = world.spawn_empty().id(); + + let mut foo = Foo::Bar(1, e1); + let mut mapper = CaptureMapper::default(); + Component::map_entities(&mut foo, &mut mapper); + assert_eq!(&mapper.0, &[e1]); + + let mut foo = Foo::Baz { + a: e1, + b: 1, + c: vec![e2, e3], + }; + let mut mapper = CaptureMapper::default(); + Component::map_entities(&mut foo, &mut mapper); + assert_eq!(&mapper.0, &[e1, e2, e3]); + } + + #[expect( + dead_code, + reason = "This struct is used as a compilation test to test the derive macros, and as such is intentionally never constructed." + )] #[derive(Component)] struct ComponentA(u32); - #[allow(dead_code)] + #[expect( + dead_code, + reason = "This struct is used as a compilation test to test the derive macros, and as such is intentionally never constructed." + )] #[derive(Component)] struct ComponentB(u32); - #[allow(dead_code)] #[derive(Bundle)] struct Simple(ComponentA); - #[allow(dead_code)] #[derive(Bundle)] struct Tuple(Simple, ComponentB); - #[allow(dead_code)] #[derive(Bundle)] struct Record { field0: Simple, field1: ComponentB, } - #[allow(dead_code)] - #[derive(Component, VisitEntities, VisitEntitiesMut)] + #[derive(Component)] struct MyEntities { + #[entities] entities: Vec, + #[entities] another_one: Entity, + #[entities] maybe_entity: Option, - #[visit_entities(ignore)] + #[expect( + dead_code, + reason = "This struct is used as a compilation test to test the derive macros, and as such this field is intentionally never used." + )] something_else: String, } - #[allow(dead_code)] - #[derive(Component, VisitEntities, VisitEntitiesMut)] - struct MyEntitiesTuple(Vec, Entity, #[visit_entities(ignore)] usize); + #[expect( + dead_code, + reason = "This struct is used as a compilation test to test the derive macros, and as such is intentionally never constructed." + )] + #[derive(Component)] + struct MyEntitiesTuple(#[entities] Vec, #[entities] Entity, usize); } diff --git a/crates/bevy_ecs/src/name.rs b/crates/bevy_ecs/src/name.rs index 3ae68798a17bf..dd34f5578a6b3 100644 --- a/crates/bevy_ecs/src/name.rs +++ b/crates/bevy_ecs/src/name.rs @@ -1,21 +1,24 @@ //! Provides the [`Name`] [`Component`], used for identifying an [`Entity`]. -use crate::{self as bevy_ecs, component::Component, entity::Entity, query::QueryData}; +use crate::{component::Component, entity::Entity, query::QueryData}; use alloc::{ borrow::{Cow, ToOwned}, string::String, }; -use bevy_utils::FixedHasher; +use bevy_platform::hash::FixedHasher; use core::{ hash::{BuildHasher, Hash, Hasher}, ops::Deref, }; #[cfg(feature = "serialize")] -use serde::{ - de::{Error, Visitor}, - Deserialize, Deserializer, Serialize, Serializer, +use { + alloc::string::ToString, + serde::{ + de::{Error, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, + }, }; #[cfg(feature = "bevy_reflect")] @@ -38,7 +41,7 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Component, Default, Debug) + reflect(Component, Default, Debug, Clone, Hash, PartialEq) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -261,6 +264,7 @@ impl<'de> Visitor<'de> for NameVisitor { mod tests { use super::*; use crate::world::World; + use alloc::string::ToString; #[test] fn test_display_of_debug_name() { diff --git a/crates/bevy_ecs/src/never.rs b/crates/bevy_ecs/src/never.rs new file mode 100644 index 0000000000000..ba814c7006dec --- /dev/null +++ b/crates/bevy_ecs/src/never.rs @@ -0,0 +1,39 @@ +//! A workaround for the `!` type in stable Rust. +//! +//! This approach is taken from the [`never_say_never`] crate, +//! reimplemented here to avoid adding a new dependency. +//! +//! This module exists due to a change in [never type fallback inference] in the Rust 2024 edition. +//! This caused failures in `bevy_ecs`'s traits which are implemented for functions +//! (like [`System`](crate::system::System)) when working with panicking closures. +//! +//! Note that using this hack is not recommended in general; +//! by doing so you are knowingly opting out of rustc's stability guarantees. +//! Code that compiles due to this hack may break in future versions of Rust. +//! +//! Please read [issue #18778](https://github.com/bevyengine/bevy/issues/18778) for an explanation of why +//! Bevy has chosen to use this workaround. +//! +//! [`never_say_never`]: https://crates.io/crates/never_say_never +//! [never type fallback inference]: https://doc.rust-lang.org/edition-guide/rust-2024/never-type-fallback.html + +mod fn_ret { + /// A helper trait for naming the ! type. + #[doc(hidden)] + pub trait FnRet { + /// The return type of the function. + type Output; + } + + /// This blanket implementation allows us to name the never type, + /// by using the associated type of this trait for `fn() -> !`. + impl FnRet for fn() -> R { + type Output = R; + } +} + +/// A hacky type alias for the `!` (never) type. +/// +/// This knowingly opts out of rustc's stability guarantees. +/// Read the module documentation carefully before using this! +pub type Never = ! as fn_ret::FnRet>::Output; diff --git a/crates/bevy_ecs/src/observer/entity_observer.rs b/crates/bevy_ecs/src/observer/entity_observer.rs index ee94cfa62a73e..d69f7764fe489 100644 --- a/crates/bevy_ecs/src/observer/entity_observer.rs +++ b/crates/bevy_ecs/src/observer/entity_observer.rs @@ -1,21 +1,23 @@ use crate::{ - component::{Component, ComponentCloneHandler, ComponentHooks, Mutable, StorageType}, - entity::{ComponentCloneCtx, Entity, EntityCloneBuilder}, + component::{ + Component, ComponentCloneBehavior, ComponentHook, HookContext, Mutable, StorageType, + }, + entity::{ComponentCloneCtx, Entity, EntityClonerBuilder, EntityMapper, SourceComponent}, observer::ObserverState, - world::{DeferredWorld, World}, + world::World, }; use alloc::vec::Vec; /// Tracks a list of entity observers for the [`Entity`] [`ObservedBy`] is added to. #[derive(Default)] -pub(crate) struct ObservedBy(pub(crate) Vec); +pub struct ObservedBy(pub(crate) Vec); impl Component for ObservedBy { const STORAGE_TYPE: StorageType = StorageType::SparseSet; type Mutability = Mutable; - fn register_component_hooks(hooks: &mut ComponentHooks) { - hooks.on_remove(|mut world, entity, _| { + fn on_remove() -> Option { + Some(|mut world, HookContext { entity, .. }| { let observed_by = { let mut component = world.get_mut::(entity).unwrap(); core::mem::take(&mut component.0) @@ -40,37 +42,32 @@ impl Component for ObservedBy { world.commands().entity(e).despawn(); } } - }); + }) } - fn get_component_clone_handler() -> ComponentCloneHandler { - ComponentCloneHandler::ignore() + fn clone_behavior() -> ComponentCloneBehavior { + ComponentCloneBehavior::Ignore } } -/// Trait that holds functions for configuring interaction with observers during entity cloning. -pub trait CloneEntityWithObserversExt { - /// Sets the option to automatically add cloned entities to the obsevers targeting source entity. - fn add_observers(&mut self, add_observers: bool) -> &mut Self; -} - -impl CloneEntityWithObserversExt for EntityCloneBuilder<'_> { - fn add_observers(&mut self, add_observers: bool) -> &mut Self { +impl EntityClonerBuilder<'_> { + /// Sets the option to automatically add cloned entities to the observers targeting source entity. + pub fn add_observers(&mut self, add_observers: bool) -> &mut Self { if add_observers { - self.override_component_clone_handler::( - ComponentCloneHandler::custom_handler(component_clone_observed_by), - ) + self.override_clone_behavior::(ComponentCloneBehavior::Custom( + component_clone_observed_by, + )) } else { - self.remove_component_clone_handler_override::() + self.remove_clone_behavior_override::() } } } -fn component_clone_observed_by(world: &mut DeferredWorld, ctx: &mut ComponentCloneCtx) { +fn component_clone_observed_by(_source: &SourceComponent, ctx: &mut ComponentCloneCtx) { let target = ctx.target(); let source = ctx.source(); - world.commands().queue(move |world: &mut World| { + ctx.queue_deferred(move |world: &mut World, _mapper: &mut dyn EntityMapper| { let observed_by = world .get::(source) .map(|observed_by| observed_by.0.clone()) @@ -112,11 +109,7 @@ fn component_clone_observed_by(world: &mut DeferredWorld, ctx: &mut ComponentClo #[cfg(test)] mod tests { use crate::{ - self as bevy_ecs, - entity::EntityCloneBuilder, - event::Event, - observer::{CloneEntityWithObserversExt, Trigger}, - system::{ResMut, Resource}, + entity::EntityCloner, event::Event, observer::Trigger, resource::Resource, system::ResMut, world::World, }; @@ -140,9 +133,9 @@ mod tests { world.trigger_targets(E, e); let e_clone = world.spawn_empty().id(); - let mut builder = EntityCloneBuilder::new(&mut world); - builder.add_observers(true); - builder.clone_entity(e, e_clone); + EntityCloner::build(&mut world) + .add_observers(true) + .clone_entity(e, e_clone); world.trigger_targets(E, [e, e_clone]); diff --git a/crates/bevy_ecs/src/observer/mod.rs b/crates/bevy_ecs/src/observer/mod.rs index c78484c6a6ff5..78569bc4ec73b 100644 --- a/crates/bevy_ecs/src/observer/mod.rs +++ b/crates/bevy_ecs/src/observer/mod.rs @@ -2,24 +2,23 @@ mod entity_observer; mod runner; -mod trigger_event; -pub use entity_observer::CloneEntityWithObserversExt; +pub use entity_observer::ObservedBy; pub use runner::*; -pub use trigger_event::*; +use variadics_please::all_tuples; use crate::{ archetype::ArchetypeFlags, + change_detection::MaybeLocation, component::ComponentId, entity::EntityHashMap, - observer::entity_observer::ObservedBy, prelude::*, system::IntoObserverSystem, world::{DeferredWorld, *}, }; use alloc::vec::Vec; +use bevy_platform::collections::HashMap; use bevy_ptr::Ptr; -use bevy_utils::HashMap; use core::{ fmt::Debug, marker::PhantomData, @@ -141,6 +140,11 @@ impl<'w, E, B: Bundle> Trigger<'w, E, B> { pub fn get_propagate(&self) -> bool { *self.propagate } + + /// Returns the source code location that triggered this observer. + pub fn caller(&self) -> MaybeLocation { + self.trigger.caller + } } impl<'w, E: Debug, B: Bundle> Debug for Trigger<'w, E, B> { @@ -168,6 +172,114 @@ impl<'w, E, B: Bundle> DerefMut for Trigger<'w, E, B> { } } +/// Represents a collection of targets for a specific [`Trigger`] of an [`Event`]. Targets can be of type [`Entity`] or [`ComponentId`]. +/// +/// When a trigger occurs for a given event and [`TriggerTargets`], any [`Observer`] that watches for that specific event-target combination +/// will run. +pub trait TriggerTargets { + /// The components the trigger should target. + fn components(&self) -> impl Iterator + Clone + '_; + + /// The entities the trigger should target. + fn entities(&self) -> impl Iterator + Clone + '_; +} + +impl TriggerTargets for &T { + fn components(&self) -> impl Iterator + Clone + '_ { + (**self).components() + } + + fn entities(&self) -> impl Iterator + Clone + '_ { + (**self).entities() + } +} + +impl TriggerTargets for Entity { + fn components(&self) -> impl Iterator + Clone + '_ { + [].into_iter() + } + + fn entities(&self) -> impl Iterator + Clone + '_ { + core::iter::once(*self) + } +} + +impl TriggerTargets for ComponentId { + fn components(&self) -> impl Iterator + Clone + '_ { + core::iter::once(*self) + } + + fn entities(&self) -> impl Iterator + Clone + '_ { + [].into_iter() + } +} + +impl TriggerTargets for Vec { + fn components(&self) -> impl Iterator + Clone + '_ { + self.iter().flat_map(T::components) + } + + fn entities(&self) -> impl Iterator + Clone + '_ { + self.iter().flat_map(T::entities) + } +} + +impl TriggerTargets for [T; N] { + fn components(&self) -> impl Iterator + Clone + '_ { + self.iter().flat_map(T::components) + } + + fn entities(&self) -> impl Iterator + Clone + '_ { + self.iter().flat_map(T::entities) + } +} + +impl TriggerTargets for [T] { + fn components(&self) -> impl Iterator + Clone + '_ { + self.iter().flat_map(T::components) + } + + fn entities(&self) -> impl Iterator + Clone + '_ { + self.iter().flat_map(T::entities) + } +} + +macro_rules! impl_trigger_targets_tuples { + ($(#[$meta:meta])* $($trigger_targets: ident),*) => { + #[expect(clippy::allow_attributes, reason = "can't guarantee violation of non_snake_case")] + #[allow(non_snake_case, reason = "`all_tuples!()` generates non-snake-case variable names.")] + $(#[$meta])* + impl<$($trigger_targets: TriggerTargets),*> TriggerTargets for ($($trigger_targets,)*) + { + fn components(&self) -> impl Iterator + Clone + '_ { + let iter = [].into_iter(); + let ($($trigger_targets,)*) = self; + $( + let iter = iter.chain($trigger_targets.components()); + )* + iter + } + + fn entities(&self) -> impl Iterator + Clone + '_ { + let iter = [].into_iter(); + let ($($trigger_targets,)*) = self; + $( + let iter = iter.chain($trigger_targets.entities()); + )* + iter + } + } + } +} + +all_tuples!( + #[doc(fake_variadic)] + impl_trigger_targets_tuples, + 0, + 15, + T +); + /// A description of what an [`Observer`] observes. #[derive(Default, Clone)] pub struct ObserverDescriptor { @@ -209,6 +321,21 @@ impl ObserverDescriptor { .extend(descriptor.components.iter().copied()); self.entities.extend(descriptor.entities.iter().copied()); } + + /// Returns the `events` that the observer is watching. + pub fn events(&self) -> &[ComponentId] { + &self.events + } + + /// Returns the `components` that the observer is watching. + pub fn components(&self) -> &[ComponentId] { + &self.components + } + + /// Returns the `entities` that the observer is watching. + pub fn entities(&self) -> &[Entity] { + &self.entities + } } /// Event trigger metadata for a given [`Observer`], @@ -222,6 +349,8 @@ pub struct ObserverTrigger { components: SmallVec<[ComponentId; 2]>, /// The entity the trigger targeted. pub target: Entity, + /// The location of the source code that triggered the obserer. + pub caller: MaybeLocation, } impl ObserverTrigger { @@ -262,6 +391,7 @@ pub struct Observers { on_insert: CachedObservers, on_replace: CachedObservers, on_remove: CachedObservers, + on_despawn: CachedObservers, // Map from trigger type to set of observers cache: HashMap, } @@ -273,6 +403,7 @@ impl Observers { ON_INSERT => &mut self.on_insert, ON_REPLACE => &mut self.on_replace, ON_REMOVE => &mut self.on_remove, + ON_DESPAWN => &mut self.on_despawn, _ => self.cache.entry(event_type).or_default(), } } @@ -283,6 +414,7 @@ impl Observers { ON_INSERT => Some(&self.on_insert), ON_REPLACE => Some(&self.on_replace), ON_REMOVE => Some(&self.on_remove), + ON_DESPAWN => Some(&self.on_despawn), _ => self.cache.get(&event_type), } } @@ -295,6 +427,7 @@ impl Observers { components: impl Iterator + Clone, data: &mut T, propagate: &mut bool, + caller: MaybeLocation, ) { // SAFETY: You cannot get a mutable reference to `observers` from `DeferredWorld` let (mut world, observers) = unsafe { @@ -319,6 +452,7 @@ impl Observers { event_type, components: components.clone().collect(), target, + caller, }, data.into(), propagate, @@ -357,6 +491,7 @@ impl Observers { ON_INSERT => Some(ArchetypeFlags::ON_INSERT_OBSERVER), ON_REPLACE => Some(ArchetypeFlags::ON_REPLACE_OBSERVER), ON_REMOVE => Some(ArchetypeFlags::ON_REMOVE_OBSERVER), + ON_DESPAWN => Some(ArchetypeFlags::ON_DESPAWN_OBSERVER), _ => None, } } @@ -393,6 +528,14 @@ impl Observers { { flags.insert(ArchetypeFlags::ON_REMOVE_OBSERVER); } + + if self + .on_despawn + .component_observers + .contains_key(&component_id) + { + flags.insert(ArchetypeFlags::ON_DESPAWN_OBSERVER); + } } } @@ -431,16 +574,28 @@ impl World { /// While event types commonly implement [`Copy`], /// those that don't will be consumed and will no longer be accessible. /// If you need to use the event after triggering it, use [`World::trigger_ref`] instead. - pub fn trigger(&mut self, event: impl Event) { - TriggerEvent { event, targets: () }.trigger(self); + #[track_caller] + pub fn trigger(&mut self, event: E) { + self.trigger_with_caller(event, MaybeLocation::caller()); + } + + pub(crate) fn trigger_with_caller(&mut self, mut event: E, caller: MaybeLocation) { + let event_id = E::register_component_id(self); + // SAFETY: We just registered `event_id` with the type of `event` + unsafe { + self.trigger_targets_dynamic_ref_with_caller(event_id, &mut event, (), caller); + } } /// Triggers the given [`Event`] as a mutable reference, which will run any [`Observer`]s watching for it. /// /// Compared to [`World::trigger`], this method is most useful when it's necessary to check /// or use the event after it has been modified by observers. - pub fn trigger_ref(&mut self, event: &mut impl Event) { - TriggerEvent { event, targets: () }.trigger_ref(self); + #[track_caller] + pub fn trigger_ref(&mut self, event: &mut E) { + let event_id = E::register_component_id(self); + // SAFETY: We just registered `event_id` with the type of `event` + unsafe { self.trigger_targets_dynamic_ref(event_id, event, ()) }; } /// Triggers the given [`Event`] for the given `targets`, which will run any [`Observer`]s watching for it. @@ -448,8 +603,22 @@ impl World { /// While event types commonly implement [`Copy`], /// those that don't will be consumed and will no longer be accessible. /// If you need to use the event after triggering it, use [`World::trigger_targets_ref`] instead. - pub fn trigger_targets(&mut self, event: impl Event, targets: impl TriggerTargets) { - TriggerEvent { event, targets }.trigger(self); + #[track_caller] + pub fn trigger_targets(&mut self, event: E, targets: impl TriggerTargets) { + self.trigger_targets_with_caller(event, targets, MaybeLocation::caller()); + } + + pub(crate) fn trigger_targets_with_caller( + &mut self, + mut event: E, + targets: impl TriggerTargets, + caller: MaybeLocation, + ) { + let event_id = E::register_component_id(self); + // SAFETY: We just registered `event_id` with the type of `event` + unsafe { + self.trigger_targets_dynamic_ref_with_caller(event_id, &mut event, targets, caller); + } } /// Triggers the given [`Event`] as a mutable reference for the given `targets`, @@ -457,8 +626,98 @@ impl World { /// /// Compared to [`World::trigger_targets`], this method is most useful when it's necessary to check /// or use the event after it has been modified by observers. - pub fn trigger_targets_ref(&mut self, event: &mut impl Event, targets: impl TriggerTargets) { - TriggerEvent { event, targets }.trigger_ref(self); + #[track_caller] + pub fn trigger_targets_ref(&mut self, event: &mut E, targets: impl TriggerTargets) { + let event_id = E::register_component_id(self); + // SAFETY: We just registered `event_id` with the type of `event` + unsafe { self.trigger_targets_dynamic_ref(event_id, event, targets) }; + } + + /// Triggers the given [`Event`] for the given `targets`, which will run any [`Observer`]s watching for it. + /// + /// While event types commonly implement [`Copy`], + /// those that don't will be consumed and will no longer be accessible. + /// If you need to use the event after triggering it, use [`World::trigger_targets_dynamic_ref`] instead. + /// + /// # Safety + /// + /// Caller must ensure that `event_data` is accessible as the type represented by `event_id`. + #[track_caller] + pub unsafe fn trigger_targets_dynamic( + &mut self, + event_id: ComponentId, + mut event_data: E, + targets: Targets, + ) { + // SAFETY: `event_data` is accessible as the type represented by `event_id` + unsafe { + self.trigger_targets_dynamic_ref(event_id, &mut event_data, targets); + }; + } + + /// Triggers the given [`Event`] as a mutable reference for the given `targets`, + /// which will run any [`Observer`]s watching for it. + /// + /// Compared to [`World::trigger_targets_dynamic`], this method is most useful when it's necessary to check + /// or use the event after it has been modified by observers. + /// + /// # Safety + /// + /// Caller must ensure that `event_data` is accessible as the type represented by `event_id`. + #[track_caller] + pub unsafe fn trigger_targets_dynamic_ref( + &mut self, + event_id: ComponentId, + event_data: &mut E, + targets: Targets, + ) { + self.trigger_targets_dynamic_ref_with_caller( + event_id, + event_data, + targets, + MaybeLocation::caller(), + ); + } + + /// # Safety + /// + /// See `trigger_targets_dynamic_ref` + unsafe fn trigger_targets_dynamic_ref_with_caller( + &mut self, + event_id: ComponentId, + event_data: &mut E, + targets: Targets, + caller: MaybeLocation, + ) { + let mut world = DeferredWorld::from(self); + let mut entity_targets = targets.entities().peekable(); + if entity_targets.peek().is_none() { + // SAFETY: `event_data` is accessible as the type represented by `event_id` + unsafe { + world.trigger_observers_with_data::<_, E::Traversal>( + event_id, + Entity::PLACEHOLDER, + targets.components(), + event_data, + false, + caller, + ); + }; + } else { + for target_entity in entity_targets { + // SAFETY: `event_data` is accessible as the type represented by `event_id` + unsafe { + world.trigger_observers_with_data::<_, E::Traversal>( + event_id, + target_entity, + targets.components(), + event_data, + E::AUTO_PROPAGATE, + caller, + ); + }; + } + } } /// Register an observer to the cache, called when an observer is created @@ -582,15 +841,15 @@ impl World { #[cfg(test)] mod tests { - use alloc::vec; + use alloc::{vec, vec::Vec}; + use bevy_platform::collections::HashMap; use bevy_ptr::OwningPtr; - use bevy_utils::HashMap; - use crate as bevy_ecs; use crate::component::ComponentId; use crate::{ - observer::{EmitDynamicTrigger, Observer, ObserverDescriptor, ObserverState, OnReplace}, + change_detection::MaybeLocation, + observer::{Observer, ObserverDescriptor, ObserverState, OnReplace}, prelude::*, traversal::Traversal, }; @@ -627,23 +886,18 @@ mod tests { } #[derive(Component)] - struct Parent(Entity); + struct ChildOf(Entity); - impl Traversal for &'_ Parent { + impl Traversal for &'_ ChildOf { fn traverse(item: Self::Item<'_>, _: &D) -> Option { Some(item.0) } } - #[derive(Component)] + #[derive(Component, Event)] + #[event(traversal = &'static ChildOf, auto_propagate)] struct EventPropagating; - impl Event for EventPropagating { - type Traversal = &'static Parent; - - const AUTO_PROPAGATE: bool = true; - } - #[test] fn observer_order_spawn_despawn() { let mut world = World::new(); @@ -834,7 +1088,7 @@ mod tests { fn observer_multiple_events() { let mut world = World::new(); world.init_resource::(); - let on_remove = world.register_component::(); + let on_remove = OnRemove::register_component_id(&mut world); world.spawn( // SAFETY: OnAdd and OnRemove are both unit types, so this is safe unsafe { @@ -874,11 +1128,10 @@ mod tests { fn observer_despawn() { let mut world = World::new(); - let observer = world - .add_observer(|_: Trigger| { - panic!("Observer triggered after being despawned.") - }) - .id(); + let system: fn(Trigger) = |_| { + panic!("Observer triggered after being despawned."); + }; + let observer = world.add_observer(system).id(); world.despawn(observer); world.spawn(A).flush(); } @@ -895,11 +1148,11 @@ mod tests { res.observed("remove_a"); }); - let observer = world - .add_observer(|_: Trigger| { - panic!("Observer triggered after being despawned.") - }) - .flush(); + let system: fn(Trigger) = |_: Trigger| { + panic!("Observer triggered after being despawned."); + }; + + let observer = world.add_observer(system).flush(); world.despawn(observer); world.despawn(entity); @@ -925,9 +1178,10 @@ mod tests { let mut world = World::new(); world.init_resource::(); - world - .spawn_empty() - .observe(|_: Trigger| panic!("Trigger routed to non-targeted entity.")); + let system: fn(Trigger) = |_| { + panic!("Trigger routed to non-targeted entity."); + }; + world.spawn_empty().observe(system); world.add_observer(move |obs: Trigger, mut res: ResMut| { assert_eq!(obs.target(), Entity::PLACEHOLDER); res.observed("event_a"); @@ -946,9 +1200,11 @@ mod tests { let mut world = World::new(); world.init_resource::(); - world - .spawn_empty() - .observe(|_: Trigger| panic!("Trigger routed to non-targeted entity.")); + let system: fn(Trigger) = |_| { + panic!("Trigger routed to non-targeted entity."); + }; + + world.spawn_empty().observe(system); let entity = world .spawn_empty() .observe(|_: Trigger, mut res: ResMut| res.observed("a_1")) @@ -966,6 +1222,119 @@ mod tests { assert_eq!(vec!["a_2", "a_1"], world.resource::().0); } + #[test] + fn observer_multiple_targets() { + #[derive(Resource, Default)] + struct R(i32); + + let mut world = World::new(); + let component_a = world.register_component::(); + let component_b = world.register_component::(); + world.init_resource::(); + + // targets (entity_1, A) + let entity_1 = world + .spawn_empty() + .observe(|_: Trigger, mut res: ResMut| res.0 += 1) + .id(); + // targets (entity_2, B) + let entity_2 = world + .spawn_empty() + .observe(|_: Trigger, mut res: ResMut| res.0 += 10) + .id(); + // targets any entity or component + world.add_observer(|_: Trigger, mut res: ResMut| res.0 += 100); + // targets any entity, and components A or B + world.add_observer(|_: Trigger, mut res: ResMut| res.0 += 1000); + // test all tuples + world.add_observer(|_: Trigger, mut res: ResMut| res.0 += 10000); + world.add_observer( + |_: Trigger, mut res: ResMut| { + res.0 += 100000; + }, + ); + world.add_observer( + |_: Trigger, + mut res: ResMut| res.0 += 1000000, + ); + + // WorldEntityMut does not automatically flush. + world.flush(); + + // trigger for an entity and a component + world.trigger_targets(EventA, (entity_1, component_a)); + world.flush(); + // only observer that doesn't trigger is the one only watching entity_2 + assert_eq!(1111101, world.resource::().0); + world.resource_mut::().0 = 0; + + // trigger for both entities, but no components: trigger once per entity target + world.trigger_targets(EventA, (entity_1, entity_2)); + world.flush(); + // only the observer that doesn't require components triggers - once per entity + assert_eq!(200, world.resource::().0); + world.resource_mut::().0 = 0; + + // trigger for both components, but no entities: trigger once + world.trigger_targets(EventA, (component_a, component_b)); + world.flush(); + // all component observers trigger, entities are not observed + assert_eq!(1111100, world.resource::().0); + world.resource_mut::().0 = 0; + + // trigger for both entities and both components: trigger once per entity target + // we only get 2222211 because a given observer can trigger only once per entity target + world.trigger_targets(EventA, ((component_a, component_b), (entity_1, entity_2))); + world.flush(); + assert_eq!(2222211, world.resource::().0); + world.resource_mut::().0 = 0; + + // trigger to test complex tuples: (A, B, (A, B)) + world.trigger_targets( + EventA, + (component_a, component_b, (component_a, component_b)), + ); + world.flush(); + // the duplicate components in the tuple don't cause multiple triggers + assert_eq!(1111100, world.resource::().0); + world.resource_mut::().0 = 0; + + // trigger to test complex tuples: (A, B, (A, B), ((A, B), (A, B))) + world.trigger_targets( + EventA, + ( + component_a, + component_b, + (component_a, component_b), + ((component_a, component_b), (component_a, component_b)), + ), + ); + world.flush(); + // the duplicate components in the tuple don't cause multiple triggers + assert_eq!(1111100, world.resource::().0); + world.resource_mut::().0 = 0; + + // trigger to test the most complex tuple: (A, B, (A, B), (B, A), (A, B, ((A, B), (B, A)))) + world.trigger_targets( + EventA, + ( + component_a, + component_b, + (component_a, component_b), + (component_b, component_a), + ( + component_a, + component_b, + ((component_a, component_b), (component_b, component_a)), + ), + ), + ); + world.flush(); + // the duplicate components in the tuple don't cause multiple triggers + assert_eq!(1111100, world.resource::().0); + world.resource_mut::().0 = 0; + } + #[test] fn observer_dynamic_component() { let mut world = World::new(); @@ -993,10 +1362,10 @@ mod tests { fn observer_dynamic_trigger() { let mut world = World::new(); world.init_resource::(); - let event_a = world.register_component::(); + let event_a = OnRemove::register_component_id(&mut world); world.spawn(ObserverState { - // SAFETY: we registered `event_a` above and it matches the type of TriggerA + // SAFETY: we registered `event_a` above and it matches the type of EventA descriptor: unsafe { ObserverDescriptor::default().with_events(vec![event_a]) }, runner: |mut world, _trigger, _ptr, _propagate| { world.resource_mut::().observed("event_a"); @@ -1004,10 +1373,10 @@ mod tests { ..Default::default() }); - world.commands().queue( - // SAFETY: we registered `event_a` above and it matches the type of TriggerA - unsafe { EmitDynamicTrigger::new_with_id(event_a, EventA, ()) }, - ); + world.commands().queue(move |world: &mut World| { + // SAFETY: we registered `event_a` above and it matches the type of EventA + unsafe { world.trigger_targets_dynamic(event_a, EventA, ()) }; + }); world.flush(); assert_eq!(vec!["event_a"], world.resource::().0); } @@ -1025,7 +1394,7 @@ mod tests { .id(); let child = world - .spawn(Parent(parent)) + .spawn(ChildOf(parent)) .observe(|_: Trigger, mut res: ResMut| { res.observed("child"); }) @@ -1052,7 +1421,7 @@ mod tests { .id(); let child = world - .spawn(Parent(parent)) + .spawn(ChildOf(parent)) .observe(|_: Trigger, mut res: ResMut| { res.observed("child"); }) @@ -1082,7 +1451,7 @@ mod tests { .id(); let child = world - .spawn(Parent(parent)) + .spawn(ChildOf(parent)) .observe(|_: Trigger, mut res: ResMut| { res.observed("child"); }) @@ -1112,7 +1481,7 @@ mod tests { .id(); let child = world - .spawn(Parent(parent)) + .spawn(ChildOf(parent)) .observe( |mut trigger: Trigger, mut res: ResMut| { res.observed("child"); @@ -1142,14 +1511,14 @@ mod tests { .id(); let child_a = world - .spawn(Parent(parent)) + .spawn(ChildOf(parent)) .observe(|_: Trigger, mut res: ResMut| { res.observed("child_a"); }) .id(); let child_b = world - .spawn(Parent(parent)) + .spawn(ChildOf(parent)) .observe(|_: Trigger, mut res: ResMut| { res.observed("child_b"); }) @@ -1199,7 +1568,7 @@ mod tests { .id(); let child_a = world - .spawn(Parent(parent_a)) + .spawn(ChildOf(parent_a)) .observe( |mut trigger: Trigger, mut res: ResMut| { res.observed("child_a"); @@ -1216,7 +1585,7 @@ mod tests { .id(); let child_b = world - .spawn(Parent(parent_b)) + .spawn(ChildOf(parent_b)) .observe(|_: Trigger, mut res: ResMut| { res.observed("child_b"); }) @@ -1243,8 +1612,8 @@ mod tests { }); let grandparent = world.spawn_empty().id(); - let parent = world.spawn(Parent(grandparent)).id(); - let child = world.spawn(Parent(parent)).id(); + let parent = world.spawn(ChildOf(grandparent)).id(); + let child = world.spawn(ChildOf(parent)).id(); // TODO: ideally this flush is not necessary, but right now observe() returns WorldEntityMut // and therefore does not automatically flush. @@ -1268,8 +1637,8 @@ mod tests { ); let grandparent = world.spawn(A).id(); - let parent = world.spawn(Parent(grandparent)).id(); - let child = world.spawn((A, Parent(parent))).id(); + let parent = world.spawn(ChildOf(grandparent)).id(); + let child = world.spawn((A, ChildOf(parent))).id(); // TODO: ideally this flush is not necessary, but right now observe() returns WorldEntityMut // and therefore does not automatically flush. @@ -1279,6 +1648,23 @@ mod tests { assert_eq!(vec!["event", "event"], world.resource::().0); } + // Originally for https://github.com/bevyengine/bevy/issues/18452 + #[test] + fn observer_modifies_relationship() { + fn on_add(trigger: Trigger, mut commands: Commands) { + commands + .entity(trigger.target()) + .with_related_entities::(|rsc| { + rsc.spawn_empty(); + }); + } + + let mut world = World::new(); + world.add_observer(on_add); + world.spawn(A); + world.flush(); + } + // Regression test for https://github.com/bevyengine/bevy/issues/14467 // Fails prior to https://github.com/bevyengine/bevy/pull/15398 #[test] @@ -1302,6 +1688,7 @@ mod tests { } #[test] + #[should_panic] fn observer_invalid_params() { #[derive(Resource)] struct ResA; @@ -1315,8 +1702,6 @@ mod tests { commands.insert_resource(ResB); }); world.trigger(EventA); - - assert!(world.get_resource::().is_none()); } #[test] @@ -1339,6 +1724,38 @@ mod tests { assert!(world.get_resource::().is_some()); } + #[test] + #[track_caller] + fn observer_caller_location_event() { + #[derive(Event)] + struct EventA; + + let caller = MaybeLocation::caller(); + let mut world = World::new(); + world.add_observer(move |trigger: Trigger| { + assert_eq!(trigger.caller(), caller); + }); + world.trigger(EventA); + } + + #[test] + #[track_caller] + fn observer_caller_location_command_archetype_move() { + #[derive(Component)] + struct Component; + + let caller = MaybeLocation::caller(); + let mut world = World::new(); + world.add_observer(move |trigger: Trigger| { + assert_eq!(trigger.caller(), caller); + }); + world.add_observer(move |trigger: Trigger| { + assert_eq!(trigger.caller(), caller); + }); + world.commands().spawn(Component).clear(); + world.flush(); + } + #[test] fn observer_triggered_components() { #[derive(Resource, Default)] diff --git a/crates/bevy_ecs/src/observer/runner.rs b/crates/bevy_ecs/src/observer/runner.rs index b63f4f34a5d00..d68c495dabf55 100644 --- a/crates/bevy_ecs/src/observer/runner.rs +++ b/crates/bevy_ecs/src/observer/runner.rs @@ -2,7 +2,8 @@ use alloc::{boxed::Box, vec, vec::Vec}; use core::any::Any; use crate::{ - component::{ComponentHook, ComponentHooks, ComponentId, Mutable, StorageType}, + component::{ComponentHook, ComponentId, HookContext, Mutable, StorageType}, + error::{default_error_handler, ErrorContext}, observer::{ObserverDescriptor, ObserverTrigger}, prelude::*, query::DebugCheckedUnwrap, @@ -65,13 +66,16 @@ impl Component for ObserverState { const STORAGE_TYPE: StorageType = StorageType::SparseSet; type Mutability = Mutable; - fn register_component_hooks(hooks: &mut ComponentHooks) { - hooks.on_add(|mut world, entity, _| { + fn on_add() -> Option { + Some(|mut world, HookContext { entity, .. }| { world.commands().queue(move |world: &mut World| { world.register_observer(entity); }); - }); - hooks.on_remove(|mut world, entity, _| { + }) + } + + fn on_remove() -> Option { + Some(|mut world, HookContext { entity, .. }| { let descriptor = core::mem::take( &mut world .entity_mut(entity) @@ -83,7 +87,7 @@ impl Component for ObserverState { world.commands().queue(move |world: &mut World| { world.unregister_observer(entity, descriptor); }); - }); + }) } } @@ -198,7 +202,7 @@ pub type ObserverRunner = fn(DeferredWorld, ObserverTrigger, PtrMut, propagate: /// struct Explode; /// /// world.add_observer(|trigger: Trigger, mut commands: Commands| { -/// println!("Entity {:?} goes BOOM!", trigger.target()); +/// println!("Entity {} goes BOOM!", trigger.target()); /// commands.entity(trigger.target()).despawn(); /// }); /// @@ -269,6 +273,7 @@ pub struct Observer { system: Box, descriptor: ObserverDescriptor, hook_on_add: ComponentHook, + error_handler: Option, } impl Observer { @@ -279,6 +284,7 @@ impl Observer { system: Box::new(IntoObserverSystem::into_system(system)), descriptor: Default::default(), hook_on_add: hook_on_add::, + error_handler: None, } } @@ -312,19 +318,32 @@ impl Observer { self.descriptor.events.push(event); self } + + /// Set the error handler to use for this observer. + /// + /// See the [`error` module-level documentation](crate::error) for more information. + pub fn with_error_handler(mut self, error_handler: fn(BevyError, ErrorContext)) -> Self { + self.error_handler = Some(error_handler); + self + } + + /// Returns the [`ObserverDescriptor`] for this [`Observer`]. + pub fn descriptor(&self) -> &ObserverDescriptor { + &self.descriptor + } } impl Component for Observer { const STORAGE_TYPE: StorageType = StorageType::SparseSet; type Mutability = Mutable; - fn register_component_hooks(hooks: &mut ComponentHooks) { - hooks.on_add(|world, entity, _id| { - let Some(observe) = world.get::(entity) else { + fn on_add() -> Option { + Some(|world, context| { + let Some(observe) = world.get::(context.entity) else { return; }; let hook = observe.hook_on_add; - hook(world, entity, _id); - }); + hook(world, context); + }) } } @@ -355,6 +374,15 @@ fn observer_system_runner>( } state.last_trigger_id = last_trigger; + // SAFETY: Observer was triggered so must have an `Observer` component. + let error_handler = unsafe { + observer_cell + .get::() + .debug_checked_unwrap() + .error_handler + .debug_checked_unwrap() + }; + let trigger: Trigger = Trigger::new( // SAFETY: Caller ensures `ptr` is castable to `&mut T` unsafe { ptr.deref_mut() }, @@ -374,17 +402,39 @@ fn observer_system_runner>( // - `update_archetype_component_access` is called first // - there are no outstanding references to world except a private component // - system is an `ObserverSystem` so won't mutate world beyond the access of a `DeferredWorld` + // and is never exclusive // - system is the same type erased system from above unsafe { (*system).update_archetype_component_access(world); - if (*system).validate_param_unsafe(world) { - (*system).run_unsafe(trigger, world); - (*system).queue_deferred(world.into_deferred()); + match (*system).validate_param_unsafe(world) { + Ok(()) => { + if let Err(err) = (*system).run_unsafe(trigger, world) { + error_handler( + err, + ErrorContext::Observer { + name: (*system).name(), + last_run: (*system).get_last_run(), + }, + ); + }; + (*system).queue_deferred(world.into_deferred()); + } + Err(e) => { + if !e.skipped { + error_handler( + e.into(), + ErrorContext::Observer { + name: (*system).name(), + last_run: (*system).get_last_run(), + }, + ); + } + } } } } -/// A [`ComponentHook`] used by [`Observer`] to handle its [`on-add`](`ComponentHooks::on_add`). +/// A [`ComponentHook`] used by [`Observer`] to handle its [`on-add`](`crate::component::ComponentHooks::on_add`). /// /// This function exists separate from [`Observer`] to allow [`Observer`] to have its type parameters /// erased. @@ -394,25 +444,29 @@ fn observer_system_runner>( /// ensure type parameters match. fn hook_on_add>( mut world: DeferredWorld<'_>, - entity: Entity, - _: ComponentId, + HookContext { entity, .. }: HookContext, ) { world.commands().queue(move |world: &mut World| { - let event_type = world.register_component::(); + let event_id = E::register_component_id(world); let mut components = Vec::new(); - B::component_ids(&mut world.components, &mut world.storages, &mut |id| { + B::component_ids(&mut world.components_registrator(), &mut |id| { components.push(id); }); let mut descriptor = ObserverDescriptor { - events: vec![event_type], + events: vec![event_id], components, ..Default::default() }; + let error_handler = default_error_handler(); + // Initialize System let system: *mut dyn ObserverSystem = if let Some(mut observe) = world.get_mut::(entity) { descriptor.merge(&observe.descriptor); + if observe.error_handler.is_none() { + observe.error_handler = Some(error_handler); + } let system = observe.system.downcast_mut::().unwrap(); &mut *system } else { @@ -435,3 +489,44 @@ fn hook_on_add>( } }); } + +#[cfg(test)] +mod tests { + use super::*; + use crate::{event::Event, observer::Trigger}; + + #[derive(Event)] + struct TriggerEvent; + + #[test] + #[should_panic(expected = "I failed!")] + fn test_fallible_observer() { + fn system(_: Trigger) -> Result { + Err("I failed!".into()) + } + + let mut world = World::default(); + world.add_observer(system); + Schedule::default().run(&mut world); + world.trigger(TriggerEvent); + } + + #[test] + fn test_fallible_observer_ignored_errors() { + #[derive(Resource, Default)] + struct Ran(bool); + + fn system(_: Trigger, mut ran: ResMut) -> Result { + ran.0 = true; + Err("I failed!".into()) + } + + let mut world = World::default(); + world.init_resource::(); + let observer = Observer::new(system).with_error_handler(crate::error::ignore); + world.spawn(observer); + Schedule::default().run(&mut world); + world.trigger(TriggerEvent); + assert!(world.resource::().0); + } +} diff --git a/crates/bevy_ecs/src/observer/trigger_event.rs b/crates/bevy_ecs/src/observer/trigger_event.rs deleted file mode 100644 index bf84e57bae301..0000000000000 --- a/crates/bevy_ecs/src/observer/trigger_event.rs +++ /dev/null @@ -1,196 +0,0 @@ -use crate::{ - component::ComponentId, - entity::Entity, - event::Event, - world::{Command, DeferredWorld, World}, -}; -use alloc::vec::Vec; - -/// A [`Command`] that emits a given trigger for a given set of targets. -pub struct TriggerEvent { - /// The event to trigger. - pub event: E, - - /// The targets to trigger the event for. - pub targets: Targets, -} - -impl TriggerEvent { - pub(super) fn trigger(mut self, world: &mut World) { - let event_type = world.register_component::(); - trigger_event(world, event_type, &mut self.event, self.targets); - } -} - -impl TriggerEvent<&mut E, Targets> { - pub(super) fn trigger_ref(self, world: &mut World) { - let event_type = world.register_component::(); - trigger_event(world, event_type, self.event, self.targets); - } -} - -impl Command - for TriggerEvent -{ - fn apply(self, world: &mut World) { - self.trigger(world); - } -} - -/// Emit a trigger for a dynamic component id. This is unsafe and must be verified manually. -pub struct EmitDynamicTrigger { - event_type: ComponentId, - event_data: T, - targets: Targets, -} - -impl EmitDynamicTrigger { - /// Sets the event type of the resulting trigger, used for dynamic triggers - /// # Safety - /// Caller must ensure that the component associated with `event_type` is accessible as E - pub unsafe fn new_with_id(event_type: ComponentId, event_data: E, targets: Targets) -> Self { - Self { - event_type, - event_data, - targets, - } - } -} - -impl Command - for EmitDynamicTrigger -{ - fn apply(mut self, world: &mut World) { - trigger_event(world, self.event_type, &mut self.event_data, self.targets); - } -} - -#[inline] -fn trigger_event( - world: &mut World, - event_type: ComponentId, - event_data: &mut E, - targets: Targets, -) { - let mut world = DeferredWorld::from(world); - if targets.entities().is_empty() { - // SAFETY: T is accessible as the type represented by self.trigger, ensured in `Self::new` - unsafe { - world.trigger_observers_with_data::<_, E::Traversal>( - event_type, - Entity::PLACEHOLDER, - targets.components(), - event_data, - false, - ); - }; - } else { - for target in targets.entities() { - // SAFETY: T is accessible as the type represented by self.trigger, ensured in `Self::new` - unsafe { - world.trigger_observers_with_data::<_, E::Traversal>( - event_type, - *target, - targets.components(), - event_data, - E::AUTO_PROPAGATE, - ); - }; - } - } -} - -/// Represents a collection of targets for a specific [`Trigger`] of an [`Event`]. Targets can be of type [`Entity`] or [`ComponentId`]. -/// -/// When a trigger occurs for a given event and [`TriggerTargets`], any [`Observer`] that watches for that specific event-target combination -/// will run. -/// -/// [`Trigger`]: crate::observer::Trigger -/// [`Observer`]: crate::observer::Observer -pub trait TriggerTargets { - /// The components the trigger should target. - fn components(&self) -> &[ComponentId]; - - /// The entities the trigger should target. - fn entities(&self) -> &[Entity]; -} - -impl TriggerTargets for () { - fn components(&self) -> &[ComponentId] { - &[] - } - - fn entities(&self) -> &[Entity] { - &[] - } -} - -impl TriggerTargets for Entity { - fn components(&self) -> &[ComponentId] { - &[] - } - - fn entities(&self) -> &[Entity] { - core::slice::from_ref(self) - } -} - -impl TriggerTargets for Vec { - fn components(&self) -> &[ComponentId] { - &[] - } - - fn entities(&self) -> &[Entity] { - self.as_slice() - } -} - -impl TriggerTargets for [Entity; N] { - fn components(&self) -> &[ComponentId] { - &[] - } - - fn entities(&self) -> &[Entity] { - self.as_slice() - } -} - -impl TriggerTargets for ComponentId { - fn components(&self) -> &[ComponentId] { - core::slice::from_ref(self) - } - - fn entities(&self) -> &[Entity] { - &[] - } -} - -impl TriggerTargets for Vec { - fn components(&self) -> &[ComponentId] { - self.as_slice() - } - - fn entities(&self) -> &[Entity] { - &[] - } -} - -impl TriggerTargets for [ComponentId; N] { - fn components(&self) -> &[ComponentId] { - self.as_slice() - } - - fn entities(&self) -> &[Entity] { - &[] - } -} - -impl TriggerTargets for &Vec { - fn components(&self) -> &[ComponentId] { - &[] - } - - fn entities(&self) -> &[Entity] { - self.as_slice() - } -} diff --git a/crates/bevy_ecs/src/query/access.rs b/crates/bevy_ecs/src/query/access.rs index 1ee7c188775c1..158bb148ce967 100644 --- a/crates/bevy_ecs/src/query/access.rs +++ b/crates/bevy_ecs/src/query/access.rs @@ -3,9 +3,10 @@ use crate::storage::SparseSetIndex; use crate::world::World; use alloc::{format, string::String, vec, vec::Vec}; use core::{fmt, fmt::Debug, marker::PhantomData}; -use derive_more::derive::From; +use derive_more::From; use disqualified::ShortName; use fixedbitset::FixedBitSet; +use thiserror::Error; /// A wrapper struct to make Debug representations of [`FixedBitSet`] easier /// to read, when used to store [`SparseSetIndex`]. @@ -309,6 +310,16 @@ impl Access { self.writes_all_resources || !self.resource_writes.is_clear() } + /// Returns `true` if this accesses any resources or components. + pub fn has_any_read(&self) -> bool { + self.has_any_component_read() || self.has_any_resource_read() + } + + /// Returns `true` if this accesses any resources or components mutably. + pub fn has_any_write(&self) -> bool { + self.has_any_component_write() || self.has_any_resource_write() + } + /// Returns true if this has an archetypal (indirect) access to the component given by `index`. /// /// This is a component whose value is not accessed (and thus will never cause conflicts), @@ -763,38 +774,99 @@ impl Access { self.archetypal.ones().map(T::get_sparse_set_index) } - /// Returns an iterator over the component IDs that this `Access` either - /// reads and writes or can't read or write. + /// Returns an iterator over the component IDs and their [`ComponentAccessKind`]. /// - /// The returned flag specifies whether the list consists of the components - /// that the access *can* read or write (false) or whether the list consists - /// of the components that the access *can't* read or write (true). + /// Returns `Err(UnboundedAccess)` if the access is unbounded. + /// This typically occurs when an [`Access`] is marked as accessing all + /// components, and then adding exceptions. /// - /// Because this method depends on internal implementation details of - /// `Access`, it's not recommended. Prefer to manage your own lists of - /// accessible components if your application needs to do that. - #[doc(hidden)] - // TODO: this should be deprecated and removed, see https://github.com/bevyengine/bevy/issues/16339 - pub fn component_reads_and_writes(&self) -> (impl Iterator + '_, bool) { - ( - self.component_read_and_writes - .ones() - .map(T::get_sparse_set_index), - self.component_read_and_writes_inverted, - ) + /// # Examples + /// + /// ```rust + /// # use bevy_ecs::query::{Access, ComponentAccessKind}; + /// let mut access = Access::::default(); + /// + /// access.add_component_read(1); + /// access.add_component_write(2); + /// access.add_archetypal(3); + /// + /// let result = access + /// .try_iter_component_access() + /// .map(Iterator::collect::>); + /// + /// assert_eq!( + /// result, + /// Ok(vec![ + /// ComponentAccessKind::Shared(1), + /// ComponentAccessKind::Exclusive(2), + /// ComponentAccessKind::Archetypal(3), + /// ]), + /// ); + /// ``` + pub fn try_iter_component_access( + &self, + ) -> Result> + '_, UnboundedAccessError> { + // component_writes_inverted is only ever true when component_read_and_writes_inverted is + // also true. Therefore it is sufficient to check just component_read_and_writes_inverted. + if self.component_read_and_writes_inverted { + return Err(UnboundedAccessError { + writes_inverted: self.component_writes_inverted, + read_and_writes_inverted: self.component_read_and_writes_inverted, + }); + } + + let reads_and_writes = self.component_read_and_writes.ones().map(|index| { + let sparse_index = T::get_sparse_set_index(index); + + if self.component_writes.contains(index) { + ComponentAccessKind::Exclusive(sparse_index) + } else { + ComponentAccessKind::Shared(sparse_index) + } + }); + + let archetypal = self + .archetypal + .ones() + .filter(|&index| { + !self.component_writes.contains(index) + && !self.component_read_and_writes.contains(index) + }) + .map(|index| ComponentAccessKind::Archetypal(T::get_sparse_set_index(index))); + + Ok(reads_and_writes.chain(archetypal)) } +} - /// Returns an iterator over the component IDs that this `Access` either - /// writes or can't write. - /// - /// The returned flag specifies whether the list consists of the components - /// that the access *can* write (false) or whether the list consists of the - /// components that the access *can't* write (true). - pub(crate) fn component_writes(&self) -> (impl Iterator + '_, bool) { - ( - self.component_writes.ones().map(T::get_sparse_set_index), - self.component_writes_inverted, - ) +/// Error returned when attempting to iterate over items included in an [`Access`] +/// if the access excludes items rather than including them. +#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)] +#[error("Access is unbounded")] +pub struct UnboundedAccessError { + /// [`Access`] is defined in terms of _excluding_ [exclusive](ComponentAccessKind::Exclusive) + /// access. + pub writes_inverted: bool, + /// [`Access`] is defined in terms of _excluding_ [shared](ComponentAccessKind::Shared) and + /// [exclusive](ComponentAccessKind::Exclusive) access. + pub read_and_writes_inverted: bool, +} + +/// Describes the level of access for a particular component as defined in an [`Access`]. +#[derive(PartialEq, Eq, Hash, Debug, Clone, Copy)] +pub enum ComponentAccessKind { + /// Archetypical access, such as `Has`. + Archetypal(T), + /// Shared access, such as `&Foo`. + Shared(T), + /// Exclusive access, such as `&mut Foo`. + Exclusive(T), +} + +impl ComponentAccessKind { + /// Gets the index of this `ComponentAccessKind`. + pub fn index(&self) -> &T { + let (Self::Archetypal(value) | Self::Shared(value) | Self::Exclusive(value)) = self; + value } } @@ -809,7 +881,7 @@ impl Access { /// otherwise would allow for queries to be considered disjoint when they shouldn't: /// - `Query<(&mut T, Option<&U>)>` read/write `T`, read `U`, with `U` /// - `Query<&mut T, Without>` read/write `T`, without `U` -/// from this we could reasonably conclude that the queries are disjoint but they aren't. +/// from this we could reasonably conclude that the queries are disjoint but they aren't. /// /// In order to solve this the actual access that `Query<(&mut T, Option<&U>)>` has /// is read/write `T`, read `U`. It must still have a read `U` access otherwise the following @@ -896,11 +968,10 @@ impl AccessConflicts { format!( "{}", ShortName( - world + &world .components - .get_info(ComponentId::get_sparse_set_index(index)) + .get_name(ComponentId::get_sparse_set_index(index)) .unwrap() - .name() ) ) }) @@ -1123,6 +1194,16 @@ impl FilteredAccess { .iter() .flat_map(|f| f.without.ones().map(T::get_sparse_set_index)) } + + /// Returns true if the index is used by this `FilteredAccess` in any way + pub fn contains(&self, index: T) -> bool { + self.access().has_component_read(index.clone()) + || self.access().has_archetypal(index.clone()) + || self.filter_sets.iter().any(|f| { + f.with.contains(index.sparse_set_index()) + || f.without.contains(index.sparse_set_index()) + }) + } } #[derive(Eq, PartialEq)] @@ -1207,6 +1288,14 @@ impl Clone for FilteredAccessSet { } impl FilteredAccessSet { + /// Creates an empty [`FilteredAccessSet`]. + pub const fn new() -> Self { + Self { + combined_access: Access::new(), + filtered_accesses: Vec::new(), + } + } + /// Returns a reference to the unfiltered access of the entire set. #[inline] pub fn combined_access(&self) -> &Access { @@ -1272,28 +1361,28 @@ impl FilteredAccessSet { } /// Adds a read access to a resource to the set. - pub(crate) fn add_unfiltered_resource_read(&mut self, index: T) { + pub fn add_unfiltered_resource_read(&mut self, index: T) { let mut filter = FilteredAccess::default(); filter.add_resource_read(index); self.add(filter); } /// Adds a write access to a resource to the set. - pub(crate) fn add_unfiltered_resource_write(&mut self, index: T) { + pub fn add_unfiltered_resource_write(&mut self, index: T) { let mut filter = FilteredAccess::default(); filter.add_resource_write(index); self.add(filter); } /// Adds read access to all resources to the set. - pub(crate) fn add_unfiltered_read_all_resources(&mut self) { + pub fn add_unfiltered_read_all_resources(&mut self) { let mut filter = FilteredAccess::default(); filter.access.read_all_resources(); self.add(filter); } /// Adds write access to all resources to the set. - pub(crate) fn add_unfiltered_write_all_resources(&mut self) { + pub fn add_unfiltered_write_all_resources(&mut self) { let mut filter = FilteredAccess::default(); filter.access.write_all_resources(); self.add(filter); @@ -1309,12 +1398,16 @@ impl FilteredAccessSet { /// Marks the set as reading all possible indices of type T. pub fn read_all(&mut self) { - self.combined_access.read_all(); + let mut filter = FilteredAccess::matches_everything(); + filter.read_all(); + self.add(filter); } /// Marks the set as writing all T. pub fn write_all(&mut self) { - self.combined_access.write_all(); + let mut filter = FilteredAccess::matches_everything(); + filter.write_all(); + self.add(filter); } /// Removes all accesses stored in this set. @@ -1326,18 +1419,17 @@ impl FilteredAccessSet { impl Default for FilteredAccessSet { fn default() -> Self { - Self { - combined_access: Default::default(), - filtered_accesses: Vec::new(), - } + Self::new() } } #[cfg(test)] mod tests { use crate::query::{ - access::AccessFilters, Access, AccessConflicts, FilteredAccess, FilteredAccessSet, + access::AccessFilters, Access, AccessConflicts, ComponentAccessKind, FilteredAccess, + FilteredAccessSet, UnboundedAccessError, }; + use alloc::{vec, vec::Vec}; use core::marker::PhantomData; use fixedbitset::FixedBitSet; @@ -1609,4 +1701,70 @@ mod tests { assert_eq!(access_a, expected); } + + #[test] + fn try_iter_component_access_simple() { + let mut access = Access::::default(); + + access.add_component_read(1); + access.add_component_read(2); + access.add_component_write(3); + access.add_archetypal(5); + + let result = access + .try_iter_component_access() + .map(Iterator::collect::>); + + assert_eq!( + result, + Ok(vec![ + ComponentAccessKind::Shared(1), + ComponentAccessKind::Shared(2), + ComponentAccessKind::Exclusive(3), + ComponentAccessKind::Archetypal(5), + ]), + ); + } + + #[test] + fn try_iter_component_access_unbounded_write_all() { + let mut access = Access::::default(); + + access.add_component_read(1); + access.add_component_read(2); + access.write_all(); + + let result = access + .try_iter_component_access() + .map(Iterator::collect::>); + + assert_eq!( + result, + Err(UnboundedAccessError { + writes_inverted: true, + read_and_writes_inverted: true + }), + ); + } + + #[test] + fn try_iter_component_access_unbounded_read_all() { + let mut access = Access::::default(); + + access.add_component_read(1); + access.add_component_read(2); + access.read_all(); + + let result = access + .try_iter_component_access() + .map(Iterator::collect::>); + + assert_eq!( + result, + Err(UnboundedAccessError { + writes_inverted: false, + read_and_writes_inverted: true + }), + ); + } } diff --git a/crates/bevy_ecs/src/query/builder.rs b/crates/bevy_ecs/src/query/builder.rs index af1af7749e89b..81819cb9ac824 100644 --- a/crates/bevy_ecs/src/query/builder.rs +++ b/crates/bevy_ecs/src/query/builder.rs @@ -33,7 +33,7 @@ use super::{FilteredAccess, QueryData, QueryFilter}; /// .build(); /// /// // Consume the QueryState -/// let (entity, b) = query.single(&world); +/// let (entity, b) = query.single(&world).unwrap(); /// ``` pub struct QueryBuilder<'w, D: QueryData = (), F: QueryFilter = ()> { access: FilteredAccess, @@ -78,18 +78,17 @@ impl<'w, D: QueryData, F: QueryFilter> QueryBuilder<'w, D, F> { self.world() .components() .get_info(component_id) - .map_or(false, |info| info.storage_type() == StorageType::Table) + .is_some_and(|info| info.storage_type() == StorageType::Table) }; - #[allow(deprecated)] - let (mut component_reads_and_writes, component_reads_and_writes_inverted) = - self.access.access().component_reads_and_writes(); - if component_reads_and_writes_inverted { + let Ok(component_accesses) = self.access.access().try_iter_component_access() else { + // Access is unbounded, pessimistically assume it's sparse. return false; - } + }; - component_reads_and_writes.all(is_dense) - && self.access.access().archetypal().all(is_dense) + component_accesses + .map(|access| *access.index()) + .all(is_dense) && !self.access.access().has_read_all_components() && self.access.with_filters().all(is_dense) && self.access.without_filters().all(is_dense) @@ -276,8 +275,8 @@ impl<'w, D: QueryData, F: QueryFilter> QueryBuilder<'w, D, F> { #[cfg(test)] mod tests { - use crate as bevy_ecs; use crate::{prelude::*, world::FilteredEntityRef}; + use std::dbg; #[derive(Component, PartialEq, Debug)] struct A(usize); @@ -298,13 +297,13 @@ mod tests { .with::() .without::() .build(); - assert_eq!(entity_a, query_a.single(&world)); + assert_eq!(entity_a, query_a.single(&world).unwrap()); let mut query_b = QueryBuilder::::new(&mut world) .with::() .without::() .build(); - assert_eq!(entity_b, query_b.single(&world)); + assert_eq!(entity_b, query_b.single(&world).unwrap()); } #[test] @@ -320,13 +319,13 @@ mod tests { .with_id(component_id_a) .without_id(component_id_c) .build(); - assert_eq!(entity_a, query_a.single(&world)); + assert_eq!(entity_a, query_a.single(&world).unwrap()); let mut query_b = QueryBuilder::::new(&mut world) .with_id(component_id_a) .without_id(component_id_b) .build(); - assert_eq!(entity_b, query_b.single(&world)); + assert_eq!(entity_b, query_b.single(&world).unwrap()); } #[test] @@ -386,7 +385,7 @@ mod tests { .data::<&B>() .build(); - let entity_ref = query.single(&world); + let entity_ref = query.single(&world).unwrap(); assert_eq!(entity, entity_ref.id()); @@ -409,7 +408,7 @@ mod tests { .ref_id(component_id_b) .build(); - let entity_ref = query.single(&world); + let entity_ref = query.single(&world).unwrap(); assert_eq!(entity, entity_ref.id()); diff --git a/crates/bevy_ecs/src/query/error.rs b/crates/bevy_ecs/src/query/error.rs index 9746471c66852..6d0b149b86058 100644 --- a/crates/bevy_ecs/src/query/error.rs +++ b/crates/bevy_ecs/src/query/error.rs @@ -1,110 +1,55 @@ use thiserror::Error; -use crate::{entity::Entity, world::unsafe_world_cell::UnsafeWorldCell}; +use crate::{ + archetype::ArchetypeId, + entity::{Entity, EntityDoesNotExistError}, +}; /// An error that occurs when retrieving a specific [`Entity`]'s query result from [`Query`](crate::system::Query) or [`QueryState`](crate::query::QueryState). // TODO: return the type_name as part of this error -#[derive(Clone, Copy)] -pub enum QueryEntityError<'w> { +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum QueryEntityError { /// The given [`Entity`]'s components do not match the query. /// /// Either it does not have a requested component, or it has a component which the query filters out. - QueryDoesNotMatch(Entity, UnsafeWorldCell<'w>), + QueryDoesNotMatch(Entity, ArchetypeId), /// The given [`Entity`] does not exist. - NoSuchEntity(Entity, UnsafeWorldCell<'w>), + EntityDoesNotExist(EntityDoesNotExistError), /// The [`Entity`] was requested mutably more than once. /// - /// See [`QueryState::get_many_mut`](crate::query::QueryState::get_many_mut) for an example. + /// See [`Query::get_many_mut`](crate::system::Query::get_many_mut) for an example. AliasedMutability(Entity), } -impl<'w> core::error::Error for QueryEntityError<'w> {} - -impl<'w> core::fmt::Display for QueryEntityError<'w> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match *self { - Self::QueryDoesNotMatch(entity, world) => { - write!( - f, - "The query does not match entity {entity}, which has components " - )?; - format_archetype(f, world, entity) - } - Self::NoSuchEntity(entity, world) => { - write!( - f, - "Entity {entity} {}", - world - .entities() - .entity_does_not_exist_error_details_message(entity) - ) - } - Self::AliasedMutability(entity) => { - write!(f, "Entity {entity} was requested mutably more than once") - } - } +impl From for QueryEntityError { + fn from(error: EntityDoesNotExistError) -> Self { + QueryEntityError::EntityDoesNotExist(error) } } -impl<'w> core::fmt::Debug for QueryEntityError<'w> { +impl core::error::Error for QueryEntityError {} + +impl core::fmt::Display for QueryEntityError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { match *self { - Self::QueryDoesNotMatch(entity, world) => { - write!(f, "QueryDoesNotMatch({entity} with components ")?; - format_archetype(f, world, entity)?; - write!(f, ")") + Self::QueryDoesNotMatch(entity, _) => { + write!(f, "The query does not match entity {entity}") + } + Self::EntityDoesNotExist(error) => { + write!(f, "{error}") } - Self::NoSuchEntity(entity, world) => { + Self::AliasedMutability(entity) => { write!( f, - "NoSuchEntity({entity} {})", - world - .entities() - .entity_does_not_exist_error_details_message(entity) + "The entity with ID {entity} was requested mutably more than once" ) } - Self::AliasedMutability(entity) => write!(f, "AliasedMutability({entity})"), - } - } -} - -fn format_archetype( - f: &mut core::fmt::Formatter<'_>, - world: UnsafeWorldCell<'_>, - entity: Entity, -) -> core::fmt::Result { - // We know entity is still alive - let entity = world - .get_entity(entity) - .expect("entity does not belong to world"); - for (i, component_id) in entity.archetype().components().enumerate() { - if i > 0 { - write!(f, ", ")?; } - let name = world - .components() - .get_name(component_id) - .expect("entity does not belong to world"); - write!(f, "{}", disqualified::ShortName(name))?; } - Ok(()) } -impl<'w> PartialEq for QueryEntityError<'w> { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (Self::QueryDoesNotMatch(e1, _), Self::QueryDoesNotMatch(e2, _)) if e1 == e2 => true, - (Self::NoSuchEntity(e1, _), Self::NoSuchEntity(e2, _)) if e1 == e2 => true, - (Self::AliasedMutability(e1), Self::AliasedMutability(e2)) if e1 == e2 => true, - _ => false, - } - } -} - -impl<'w> Eq for QueryEntityError<'w> {} - /// An error that occurs when evaluating a [`Query`](crate::system::Query) or [`QueryState`](crate::query::QueryState) as a single expected result via -/// [`get_single`](crate::system::Query::get_single) or [`get_single_mut`](crate::system::Query::get_single_mut). +/// [`single`](crate::system::Query::single) or [`single_mut`](crate::system::Query::single_mut). #[derive(Debug, Error)] pub enum QuerySingleError { /// No entity fits the query. @@ -117,8 +62,7 @@ pub enum QuerySingleError { #[cfg(test)] mod test { - use crate as bevy_ecs; - use crate::prelude::World; + use crate::{prelude::World, query::QueryEntityError}; use bevy_ecs_macros::Component; #[test] @@ -129,19 +73,18 @@ mod test { struct Present1; #[derive(Component)] struct Present2; - #[derive(Component, Debug)] + #[derive(Component, Debug, PartialEq)] struct NotPresent; - let entity = world.spawn((Present1, Present2)).id(); + let entity = world.spawn((Present1, Present2)); + + let (entity, archetype_id) = (entity.id(), entity.archetype().id()); - let err = world - .query::<&NotPresent>() - .get(&world, entity) - .unwrap_err(); + let result = world.query::<&NotPresent>().get(&world, entity); assert_eq!( - format!("{err:?}"), - "QueryDoesNotMatch(0v1 with components Present1, Present2)" + result, + Err(QueryEntityError::QueryDoesNotMatch(entity, archetype_id)) ); } } diff --git a/crates/bevy_ecs/src/query/fetch.rs b/crates/bevy_ecs/src/query/fetch.rs index 13ad4655b895d..cd632f7b14f22 100644 --- a/crates/bevy_ecs/src/query/fetch.rs +++ b/crates/bevy_ecs/src/query/fetch.rs @@ -1,7 +1,7 @@ use crate::{ archetype::{Archetype, Archetypes}, bundle::Bundle, - change_detection::{MaybeThinSlicePtrLocation, Ticks, TicksMut}, + change_detection::{MaybeLocation, Ticks, TicksMut}, component::{Component, ComponentId, Components, Mutable, StorageType, Tick}, entity::{Entities, Entity, EntityLocation}, query::{Access, DebugCheckedUnwrap, FilteredAccess, WorldQuery}, @@ -12,7 +12,7 @@ use crate::{ }, }; use bevy_ptr::{ThinSlicePtr, UnsafeCellDeref}; -use core::{cell::UnsafeCell, marker::PhantomData}; +use core::{cell::UnsafeCell, marker::PhantomData, panic::Location}; use smallvec::SmallVec; use variadics_please::all_tuples; @@ -265,8 +265,9 @@ use variadics_please::all_tuples; /// /// # Safety /// -/// Component access of `Self::ReadOnly` must be a subset of `Self` -/// and `Self::ReadOnly` must match exactly the same archetypes/tables as `Self` +/// - Component access of `Self::ReadOnly` must be a subset of `Self` +/// and `Self::ReadOnly` must match exactly the same archetypes/tables as `Self` +/// - `IS_READ_ONLY` must be `true` if and only if `Self: ReadOnlyQueryData` /// /// [`Query`]: crate::system::Query /// [`ReadOnly`]: Self::ReadOnly @@ -276,8 +277,36 @@ use variadics_please::all_tuples; note = "if `{Self}` is a component type, try using `&{Self}` or `&mut {Self}`" )] pub unsafe trait QueryData: WorldQuery { + /// True if this query is read-only and may not perform mutable access. + const IS_READ_ONLY: bool; + /// The read-only variant of this [`QueryData`], which satisfies the [`ReadOnlyQueryData`] trait. type ReadOnly: ReadOnlyQueryData::State>; + + /// The item returned by this [`WorldQuery`] + /// This will be the data retrieved by the query, + /// and is visible to the end user when calling e.g. `Query::get`. + type Item<'a>; + + /// This function manually implements subtyping for the query items. + fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort>; + + /// Fetch [`Self::Item`](`QueryData::Item`) for either the given `entity` in the current [`Table`], + /// or for the given `entity` in the current [`Archetype`]. This must always be called after + /// [`WorldQuery::set_table`] with a `table_row` in the range of the current [`Table`] or after + /// [`WorldQuery::set_archetype`] with an `entity` in the current archetype. + /// Accesses components registered in [`WorldQuery::update_component_access`]. + /// + /// # Safety + /// + /// - Must always be called _after_ [`WorldQuery::set_table`] or [`WorldQuery::set_archetype`]. `entity` and + /// `table_row` must be in the range of the current table and archetype. + /// - There must not be simultaneous conflicting component access registered in `update_component_access`. + unsafe fn fetch<'w>( + fetch: &mut Self::Fetch<'w>, + entity: Entity, + table_row: TableRow, + ) -> Self::Item<'w>; } /// A [`QueryData`] that is read only. @@ -288,7 +317,7 @@ pub unsafe trait QueryData: WorldQuery { pub unsafe trait ReadOnlyQueryData: QueryData {} /// The item type returned when a [`WorldQuery`] is iterated over -pub type QueryItem<'w, Q> = ::Item<'w>; +pub type QueryItem<'w, Q> = ::Item<'w>; /// The read-only variant of the item type returned when a [`QueryData`] is iterated over immutably pub type ROQueryItem<'w, D> = QueryItem<'w, ::ReadOnly>; @@ -296,14 +325,9 @@ pub type ROQueryItem<'w, D> = QueryItem<'w, ::ReadOnly>; /// `update_component_access` and `update_archetype_component_access` do nothing. /// This is sound because `fetch` does not access components. unsafe impl WorldQuery for Entity { - type Item<'w> = Entity; type Fetch<'w> = (); type State = (); - fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { - item - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>(_: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> {} unsafe fn init_fetch<'w>( @@ -329,15 +353,6 @@ unsafe impl WorldQuery for Entity { unsafe fn set_table<'w>(_fetch: &mut Self::Fetch<'w>, _state: &Self::State, _table: &'w Table) { } - #[inline(always)] - unsafe fn fetch<'w>( - _fetch: &mut Self::Fetch<'w>, - entity: Entity, - _table_row: TableRow, - ) -> Self::Item<'w> { - entity - } - fn update_component_access(_state: &Self::State, _access: &mut FilteredAccess) {} fn init_state(_world: &mut World) {} @@ -356,7 +371,23 @@ unsafe impl WorldQuery for Entity { /// SAFETY: `Self` is the same as `Self::ReadOnly` unsafe impl QueryData for Entity { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; + + type Item<'w> = Entity; + + fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { + item + } + + #[inline(always)] + unsafe fn fetch<'w>( + _fetch: &mut Self::Fetch<'w>, + entity: Entity, + _table_row: TableRow, + ) -> Self::Item<'w> { + entity + } } /// SAFETY: access is read only @@ -366,14 +397,9 @@ unsafe impl ReadOnlyQueryData for Entity {} /// `update_component_access` and `update_archetype_component_access` do nothing. /// This is sound because `fetch` does not access components. unsafe impl WorldQuery for EntityLocation { - type Item<'w> = EntityLocation; type Fetch<'w> = &'w Entities; type State = (); - fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { - item - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { fetch } @@ -404,16 +430,6 @@ unsafe impl WorldQuery for EntityLocation { unsafe fn set_table<'w>(_fetch: &mut Self::Fetch<'w>, _state: &Self::State, _table: &'w Table) { } - #[inline(always)] - unsafe fn fetch<'w>( - fetch: &mut Self::Fetch<'w>, - entity: Entity, - _table_row: TableRow, - ) -> Self::Item<'w> { - // SAFETY: `fetch` must be called with an entity that exists in the world - unsafe { fetch.get(entity).debug_checked_unwrap() } - } - fn update_component_access(_state: &Self::State, _access: &mut FilteredAccess) {} fn init_state(_world: &mut World) {} @@ -432,7 +448,23 @@ unsafe impl WorldQuery for EntityLocation { /// SAFETY: `Self` is the same as `Self::ReadOnly` unsafe impl QueryData for EntityLocation { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; + type Item<'w> = EntityLocation; + + fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { + item + } + + #[inline(always)] + unsafe fn fetch<'w>( + fetch: &mut Self::Fetch<'w>, + entity: Entity, + _table_row: TableRow, + ) -> Self::Item<'w> { + // SAFETY: `fetch` must be called with an entity that exists in the world + unsafe { fetch.get(entity).debug_checked_unwrap() } + } } /// SAFETY: access is read only @@ -443,14 +475,9 @@ unsafe impl ReadOnlyQueryData for EntityLocation {} /// This is sound because `update_component_access` and `update_archetype_component_access` set read access for all components and panic when appropriate. /// Filters are unchanged. unsafe impl<'a> WorldQuery for EntityRef<'a> { - type Item<'w> = EntityRef<'w>; type Fetch<'w> = UnsafeWorldCell<'w>; type State = (); - fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { - item - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { fetch } @@ -479,18 +506,6 @@ unsafe impl<'a> WorldQuery for EntityRef<'a> { unsafe fn set_table<'w>(_fetch: &mut Self::Fetch<'w>, _state: &Self::State, _table: &'w Table) { } - #[inline(always)] - unsafe fn fetch<'w>( - world: &mut Self::Fetch<'w>, - entity: Entity, - _table_row: TableRow, - ) -> Self::Item<'w> { - // SAFETY: `fetch` must be called with an entity that exists in the world - let cell = unsafe { world.get_entity(entity).debug_checked_unwrap() }; - // SAFETY: Read-only access to every component has been registered. - unsafe { EntityRef::new(cell) } - } - fn update_component_access(_state: &Self::State, access: &mut FilteredAccess) { assert!( !access.access().has_any_component_write(), @@ -515,7 +530,25 @@ unsafe impl<'a> WorldQuery for EntityRef<'a> { /// SAFETY: `Self` is the same as `Self::ReadOnly` unsafe impl<'a> QueryData for EntityRef<'a> { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; + type Item<'w> = EntityRef<'w>; + + fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { + item + } + + #[inline(always)] + unsafe fn fetch<'w>( + world: &mut Self::Fetch<'w>, + entity: Entity, + _table_row: TableRow, + ) -> Self::Item<'w> { + // SAFETY: `fetch` must be called with an entity that exists in the world + let cell = unsafe { world.get_entity(entity).debug_checked_unwrap() }; + // SAFETY: Read-only access to every component has been registered. + unsafe { EntityRef::new(cell) } + } } /// SAFETY: access is read only @@ -523,14 +556,9 @@ unsafe impl ReadOnlyQueryData for EntityRef<'_> {} /// SAFETY: The accesses of `Self::ReadOnly` are a subset of the accesses of `Self` unsafe impl<'a> WorldQuery for EntityMut<'a> { - type Item<'w> = EntityMut<'w>; type Fetch<'w> = UnsafeWorldCell<'w>; type State = (); - fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { - item - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { fetch } @@ -559,18 +587,6 @@ unsafe impl<'a> WorldQuery for EntityMut<'a> { unsafe fn set_table<'w>(_fetch: &mut Self::Fetch<'w>, _state: &Self::State, _table: &'w Table) { } - #[inline(always)] - unsafe fn fetch<'w>( - world: &mut Self::Fetch<'w>, - entity: Entity, - _table_row: TableRow, - ) -> Self::Item<'w> { - // SAFETY: `fetch` must be called with an entity that exists in the world - let cell = unsafe { world.get_entity(entity).debug_checked_unwrap() }; - // SAFETY: mutable access to every component has been registered. - unsafe { EntityMut::new(cell) } - } - fn update_component_access(_state: &Self::State, access: &mut FilteredAccess) { assert!( !access.access().has_any_component_read(), @@ -595,19 +611,32 @@ unsafe impl<'a> WorldQuery for EntityMut<'a> { /// SAFETY: access of `EntityRef` is a subset of `EntityMut` unsafe impl<'a> QueryData for EntityMut<'a> { + const IS_READ_ONLY: bool = false; type ReadOnly = EntityRef<'a>; + type Item<'w> = EntityMut<'w>; + + fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { + item + } + + #[inline(always)] + unsafe fn fetch<'w>( + world: &mut Self::Fetch<'w>, + entity: Entity, + _table_row: TableRow, + ) -> Self::Item<'w> { + // SAFETY: `fetch` must be called with an entity that exists in the world + let cell = unsafe { world.get_entity(entity).debug_checked_unwrap() }; + // SAFETY: mutable access to every component has been registered. + unsafe { EntityMut::new(cell) } + } } /// SAFETY: The accesses of `Self::ReadOnly` are a subset of the accesses of `Self` unsafe impl<'a> WorldQuery for FilteredEntityRef<'a> { type Fetch<'w> = (UnsafeWorldCell<'w>, Access); - type Item<'w> = FilteredEntityRef<'w>; type State = FilteredAccess; - fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { - item - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { fetch } @@ -646,18 +675,6 @@ unsafe impl<'a> WorldQuery for FilteredEntityRef<'a> { state.access_mut().clear_writes(); } - #[inline(always)] - unsafe fn fetch<'w>( - (world, access): &mut Self::Fetch<'w>, - entity: Entity, - _table_row: TableRow, - ) -> Self::Item<'w> { - // SAFETY: `fetch` must be called with an entity that exists in the world - let cell = unsafe { world.get_entity(entity).debug_checked_unwrap() }; - // SAFETY: mutable access to every component has been registered. - unsafe { FilteredEntityRef::new(cell, access.clone()) } - } - fn update_component_access( state: &Self::State, filtered_access: &mut FilteredAccess, @@ -687,7 +704,25 @@ unsafe impl<'a> WorldQuery for FilteredEntityRef<'a> { /// SAFETY: `Self` is the same as `Self::ReadOnly` unsafe impl<'a> QueryData for FilteredEntityRef<'a> { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; + type Item<'w> = FilteredEntityRef<'w>; + + fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { + item + } + + #[inline(always)] + unsafe fn fetch<'w>( + (world, access): &mut Self::Fetch<'w>, + entity: Entity, + _table_row: TableRow, + ) -> Self::Item<'w> { + // SAFETY: `fetch` must be called with an entity that exists in the world + let cell = unsafe { world.get_entity(entity).debug_checked_unwrap() }; + // SAFETY: mutable access to every component has been registered. + unsafe { FilteredEntityRef::new(cell, access.clone()) } + } } /// SAFETY: Access is read-only. @@ -696,13 +731,8 @@ unsafe impl ReadOnlyQueryData for FilteredEntityRef<'_> {} /// SAFETY: The accesses of `Self::ReadOnly` are a subset of the accesses of `Self` unsafe impl<'a> WorldQuery for FilteredEntityMut<'a> { type Fetch<'w> = (UnsafeWorldCell<'w>, Access); - type Item<'w> = FilteredEntityMut<'w>; type State = FilteredAccess; - fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { - item - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { fetch } @@ -740,18 +770,6 @@ unsafe impl<'a> WorldQuery for FilteredEntityMut<'a> { state.clone_from(access); } - #[inline(always)] - unsafe fn fetch<'w>( - (world, access): &mut Self::Fetch<'w>, - entity: Entity, - _table_row: TableRow, - ) -> Self::Item<'w> { - // SAFETY: `fetch` must be called with an entity that exists in the world - let cell = unsafe { world.get_entity(entity).debug_checked_unwrap() }; - // SAFETY: mutable access to every component has been registered. - unsafe { FilteredEntityMut::new(cell, access.clone()) } - } - fn update_component_access( state: &Self::State, filtered_access: &mut FilteredAccess, @@ -781,7 +799,25 @@ unsafe impl<'a> WorldQuery for FilteredEntityMut<'a> { /// SAFETY: access of `FilteredEntityRef` is a subset of `FilteredEntityMut` unsafe impl<'a> QueryData for FilteredEntityMut<'a> { + const IS_READ_ONLY: bool = false; type ReadOnly = FilteredEntityRef<'a>; + type Item<'w> = FilteredEntityMut<'w>; + + fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { + item + } + + #[inline(always)] + unsafe fn fetch<'w>( + (world, access): &mut Self::Fetch<'w>, + entity: Entity, + _table_row: TableRow, + ) -> Self::Item<'w> { + // SAFETY: `fetch` must be called with an entity that exists in the world + let cell = unsafe { world.get_entity(entity).debug_checked_unwrap() }; + // SAFETY: mutable access to every component has been registered. + unsafe { FilteredEntityMut::new(cell, access.clone()) } + } } /// SAFETY: `EntityRefExcept` guards access to all components in the bundle `B` @@ -792,13 +828,8 @@ where B: Bundle, { type Fetch<'w> = UnsafeWorldCell<'w>; - type Item<'w> = EntityRefExcept<'w, B>; type State = SmallVec<[ComponentId; 4]>; - fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { - item - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { fetch } @@ -824,15 +855,6 @@ where unsafe fn set_table<'w>(_: &mut Self::Fetch<'w>, _: &Self::State, _: &'w Table) {} - unsafe fn fetch<'w>( - world: &mut Self::Fetch<'w>, - entity: Entity, - _: TableRow, - ) -> Self::Item<'w> { - let cell = world.get_entity(entity).unwrap(); - EntityRefExcept::new(cell) - } - fn update_component_access( state: &Self::State, filtered_access: &mut FilteredAccess, @@ -876,7 +898,22 @@ unsafe impl<'a, B> QueryData for EntityRefExcept<'a, B> where B: Bundle, { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; + type Item<'w> = EntityRefExcept<'w, B>; + + fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { + item + } + + unsafe fn fetch<'w>( + world: &mut Self::Fetch<'w>, + entity: Entity, + _: TableRow, + ) -> Self::Item<'w> { + let cell = world.get_entity(entity).unwrap(); + EntityRefExcept::new(cell) + } } /// SAFETY: `EntityRefExcept` enforces read-only access to its contained @@ -891,13 +928,8 @@ where B: Bundle, { type Fetch<'w> = UnsafeWorldCell<'w>; - type Item<'w> = EntityMutExcept<'w, B>; type State = SmallVec<[ComponentId; 4]>; - fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { - item - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { fetch } @@ -923,15 +955,6 @@ where unsafe fn set_table<'w>(_: &mut Self::Fetch<'w>, _: &Self::State, _: &'w Table) {} - unsafe fn fetch<'w>( - world: &mut Self::Fetch<'w>, - entity: Entity, - _: TableRow, - ) -> Self::Item<'w> { - let cell = world.get_entity(entity).unwrap(); - EntityMutExcept::new(cell) - } - fn update_component_access( state: &Self::State, filtered_access: &mut FilteredAccess, @@ -976,21 +999,31 @@ unsafe impl<'a, B> QueryData for EntityMutExcept<'a, B> where B: Bundle, { + const IS_READ_ONLY: bool = false; type ReadOnly = EntityRefExcept<'a, B>; + type Item<'w> = EntityMutExcept<'w, B>; + + fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { + item + } + + unsafe fn fetch<'w>( + world: &mut Self::Fetch<'w>, + entity: Entity, + _: TableRow, + ) -> Self::Item<'w> { + let cell = world.get_entity(entity).unwrap(); + EntityMutExcept::new(cell) + } } /// SAFETY: /// `update_component_access` and `update_archetype_component_access` do nothing. /// This is sound because `fetch` does not access components. unsafe impl WorldQuery for &Archetype { - type Item<'w> = &'w Archetype; type Fetch<'w> = (&'w Entities, &'w Archetypes); type State = (); - fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { - item - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { fetch } @@ -1021,19 +1054,6 @@ unsafe impl WorldQuery for &Archetype { unsafe fn set_table<'w>(_fetch: &mut Self::Fetch<'w>, _state: &Self::State, _table: &'w Table) { } - #[inline(always)] - unsafe fn fetch<'w>( - fetch: &mut Self::Fetch<'w>, - entity: Entity, - _table_row: TableRow, - ) -> Self::Item<'w> { - let (entities, archetypes) = *fetch; - // SAFETY: `fetch` must be called with an entity that exists in the world - let location = unsafe { entities.get(entity).debug_checked_unwrap() }; - // SAFETY: The assigned archetype for a living entity must always be valid. - unsafe { archetypes.get(location.archetype_id).debug_checked_unwrap() } - } - fn update_component_access(_state: &Self::State, _access: &mut FilteredAccess) {} fn init_state(_world: &mut World) {} @@ -1052,7 +1072,26 @@ unsafe impl WorldQuery for &Archetype { /// SAFETY: `Self` is the same as `Self::ReadOnly` unsafe impl QueryData for &Archetype { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; + type Item<'w> = &'w Archetype; + + fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { + item + } + + #[inline(always)] + unsafe fn fetch<'w>( + fetch: &mut Self::Fetch<'w>, + entity: Entity, + _table_row: TableRow, + ) -> Self::Item<'w> { + let (entities, archetypes) = *fetch; + // SAFETY: `fetch` must be called with an entity that exists in the world + let location = unsafe { entities.get(entity).debug_checked_unwrap() }; + // SAFETY: The assigned archetype for a living entity must always be valid. + unsafe { archetypes.get(location.archetype_id).debug_checked_unwrap() } + } } /// SAFETY: access is read only @@ -1065,7 +1104,7 @@ pub struct ReadFetch<'w, T: Component> { // T::STORAGE_TYPE = StorageType::Table Option>>, // T::STORAGE_TYPE = StorageType::SparseSet - &'w ComponentSparseSet, + Option<&'w ComponentSparseSet>, >, } @@ -1082,14 +1121,9 @@ impl Copy for ReadFetch<'_, T> {} /// `update_component_access` adds a `With` filter for a component. /// This is sound because `matches_component_set` returns whether the set contains that component. unsafe impl WorldQuery for &T { - type Item<'w> = &'w T; type Fetch<'w> = ReadFetch<'w, T>; type State = ComponentId; - fn shrink<'wlong: 'wshort, 'wshort>(item: &'wlong T) -> &'wshort T { - item - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { fetch } @@ -1109,13 +1143,7 @@ unsafe impl WorldQuery for &T { // which we are allowed to access since we registered it in `update_archetype_component_access`. // Note that we do not actually access any components in this function, we just get a shared // reference to the sparse set, which is used to access the components in `Self::fetch`. - unsafe { - world - .storages() - .sparse_sets - .get(component_id) - .debug_checked_unwrap() - } + unsafe { world.storages().sparse_sets.get(component_id) } }, ), } @@ -1159,28 +1187,6 @@ unsafe impl WorldQuery for &T { unsafe { fetch.components.set_table(table_data) }; } - #[inline(always)] - unsafe fn fetch<'w>( - fetch: &mut Self::Fetch<'w>, - entity: Entity, - table_row: TableRow, - ) -> Self::Item<'w> { - fetch.components.extract( - |table| { - // SAFETY: set_table was previously called - let table = unsafe { table.debug_checked_unwrap() }; - // SAFETY: Caller ensures `table_row` is in range. - let item = unsafe { table.get(table_row.as_usize()) }; - item.deref() - }, - |sparse_set| { - // SAFETY: Caller ensures `entity` is in range. - let item = unsafe { sparse_set.get(entity).debug_checked_unwrap() }; - item.deref() - }, - ) - } - fn update_component_access( &component_id: &ComponentId, access: &mut FilteredAccess, @@ -1211,7 +1217,40 @@ unsafe impl WorldQuery for &T { /// SAFETY: `Self` is the same as `Self::ReadOnly` unsafe impl QueryData for &T { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; + type Item<'w> = &'w T; + + fn shrink<'wlong: 'wshort, 'wshort>(item: &'wlong T) -> &'wshort T { + item + } + + #[inline(always)] + unsafe fn fetch<'w>( + fetch: &mut Self::Fetch<'w>, + entity: Entity, + table_row: TableRow, + ) -> Self::Item<'w> { + fetch.components.extract( + |table| { + // SAFETY: set_table was previously called + let table = unsafe { table.debug_checked_unwrap() }; + // SAFETY: Caller ensures `table_row` is in range. + let item = unsafe { table.get(table_row.as_usize()) }; + item.deref() + }, + |sparse_set| { + // SAFETY: Caller ensures `entity` is in range. + let item = unsafe { + sparse_set + .debug_checked_unwrap() + .get(entity) + .debug_checked_unwrap() + }; + item.deref() + }, + ) + } } /// SAFETY: access is read only @@ -1226,10 +1265,11 @@ pub struct RefFetch<'w, T: Component> { ThinSlicePtr<'w, UnsafeCell>, ThinSlicePtr<'w, UnsafeCell>, ThinSlicePtr<'w, UnsafeCell>, - MaybeThinSlicePtrLocation<'w>, + MaybeLocation>>>, )>, // T::STORAGE_TYPE = StorageType::SparseSet - &'w ComponentSparseSet, + // Can be `None` when the component has never been inserted + Option<&'w ComponentSparseSet>, >, last_run: Tick, this_run: Tick, @@ -1248,14 +1288,9 @@ impl Copy for RefFetch<'_, T> {} /// `update_component_access` adds a `With` filter for a component. /// This is sound because `matches_component_set` returns whether the set contains that component. unsafe impl<'__w, T: Component> WorldQuery for Ref<'__w, T> { - type Item<'w> = Ref<'w, T>; type Fetch<'w> = RefFetch<'w, T>; type State = ComponentId; - fn shrink<'wlong: 'wshort, 'wshort>(item: Ref<'wlong, T>) -> Ref<'wshort, T> { - item - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { fetch } @@ -1275,13 +1310,7 @@ unsafe impl<'__w, T: Component> WorldQuery for Ref<'__w, T> { // which we are allowed to access since we registered it in `update_archetype_component_access`. // Note that we do not actually access any components in this function, we just get a shared // reference to the sparse set, which is used to access the components in `Self::fetch`. - unsafe { - world - .storages() - .sparse_sets - .get(component_id) - .debug_checked_unwrap() - } + unsafe { world.storages().sparse_sets.get(component_id) } }, ), last_run, @@ -1322,15 +1351,52 @@ unsafe impl<'__w, T: Component> WorldQuery for Ref<'__w, T> { column.get_data_slice(table.entity_count()).into(), column.get_added_ticks_slice(table.entity_count()).into(), column.get_changed_ticks_slice(table.entity_count()).into(), - #[cfg(feature = "track_change_detection")] - column.get_changed_by_slice(table.entity_count()).into(), - #[cfg(not(feature = "track_change_detection"))] - (), + column + .get_changed_by_slice(table.entity_count()) + .map(Into::into), )); // SAFETY: set_table is only called when T::STORAGE_TYPE = StorageType::Table unsafe { fetch.components.set_table(table_data) }; } + fn update_component_access( + &component_id: &ComponentId, + access: &mut FilteredAccess, + ) { + assert!( + !access.access().has_component_write(component_id), + "&{} conflicts with a previous access in this query. Shared access cannot coincide with exclusive access.", + core::any::type_name::(), + ); + access.add_component_read(component_id); + } + + fn init_state(world: &mut World) -> ComponentId { + world.register_component::() + } + + fn get_state(components: &Components) -> Option { + components.component_id::() + } + + fn matches_component_set( + &state: &ComponentId, + set_contains_id: &impl Fn(ComponentId) -> bool, + ) -> bool { + set_contains_id(state) + } +} + +/// SAFETY: `Self` is the same as `Self::ReadOnly` +unsafe impl<'__w, T: Component> QueryData for Ref<'__w, T> { + const IS_READ_ONLY: bool = true; + type ReadOnly = Self; + type Item<'w> = Ref<'w, T>; + + fn shrink<'wlong: 'wshort, 'wshort>(item: Ref<'wlong, T>) -> Ref<'wshort, T> { + item + } + #[inline(always)] unsafe fn fetch<'w>( fetch: &mut Self::Fetch<'w>, @@ -1340,7 +1406,7 @@ unsafe impl<'__w, T: Component> WorldQuery for Ref<'__w, T> { fetch.components.extract( |table| { // SAFETY: set_table was previously called - let (table_components, added_ticks, changed_ticks, _callers) = + let (table_components, added_ticks, changed_ticks, callers) = unsafe { table.debug_checked_unwrap() }; // SAFETY: The caller ensures `table_row` is in range. @@ -1350,8 +1416,7 @@ unsafe impl<'__w, T: Component> WorldQuery for Ref<'__w, T> { // SAFETY: The caller ensures `table_row` is in range. let changed = unsafe { changed_ticks.get(table_row.as_usize()) }; // SAFETY: The caller ensures `table_row` is in range. - #[cfg(feature = "track_change_detection")] - let caller = unsafe { _callers.get(table_row.as_usize()) }; + let caller = callers.map(|callers| unsafe { callers.get(table_row.as_usize()) }); Ref { value: component.deref(), @@ -1361,56 +1426,26 @@ unsafe impl<'__w, T: Component> WorldQuery for Ref<'__w, T> { this_run: fetch.this_run, last_run: fetch.last_run, }, - #[cfg(feature = "track_change_detection")] - changed_by: caller.deref(), + changed_by: caller.map(|caller| caller.deref()), } }, |sparse_set| { - // SAFETY: The caller ensures `entity` is in range. - let (component, ticks, _caller) = - unsafe { sparse_set.get_with_ticks(entity).debug_checked_unwrap() }; + // SAFETY: The caller ensures `entity` is in range and has the component. + let (component, ticks, caller) = unsafe { + sparse_set + .debug_checked_unwrap() + .get_with_ticks(entity) + .debug_checked_unwrap() + }; Ref { value: component.deref(), ticks: Ticks::from_tick_cells(ticks, fetch.last_run, fetch.this_run), - #[cfg(feature = "track_change_detection")] - changed_by: _caller.deref(), + changed_by: caller.map(|caller| caller.deref()), } }, ) } - - fn update_component_access( - &component_id: &ComponentId, - access: &mut FilteredAccess, - ) { - assert!( - !access.access().has_component_write(component_id), - "&{} conflicts with a previous access in this query. Shared access cannot coincide with exclusive access.", - core::any::type_name::(), - ); - access.add_component_read(component_id); - } - - fn init_state(world: &mut World) -> ComponentId { - world.register_component::() - } - - fn get_state(components: &Components) -> Option { - components.component_id::() - } - - fn matches_component_set( - &state: &ComponentId, - set_contains_id: &impl Fn(ComponentId) -> bool, - ) -> bool { - set_contains_id(state) - } -} - -/// SAFETY: `Self` is the same as `Self::ReadOnly` -unsafe impl<'__w, T: Component> QueryData for Ref<'__w, T> { - type ReadOnly = Self; } /// SAFETY: access is read only @@ -1425,10 +1460,11 @@ pub struct WriteFetch<'w, T: Component> { ThinSlicePtr<'w, UnsafeCell>, ThinSlicePtr<'w, UnsafeCell>, ThinSlicePtr<'w, UnsafeCell>, - MaybeThinSlicePtrLocation<'w>, + MaybeLocation>>>, )>, // T::STORAGE_TYPE = StorageType::SparseSet - &'w ComponentSparseSet, + // Can be `None` when the component has never been inserted + Option<&'w ComponentSparseSet>, >, last_run: Tick, this_run: Tick, @@ -1447,14 +1483,9 @@ impl Copy for WriteFetch<'_, T> {} /// `update_component_access` adds a `With` filter for a component. /// This is sound because `matches_component_set` returns whether the set contains that component. unsafe impl<'__w, T: Component> WorldQuery for &'__w mut T { - type Item<'w> = Mut<'w, T>; type Fetch<'w> = WriteFetch<'w, T>; type State = ComponentId; - fn shrink<'wlong: 'wshort, 'wshort>(item: Mut<'wlong, T>) -> Mut<'wshort, T> { - item - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { fetch } @@ -1474,13 +1505,7 @@ unsafe impl<'__w, T: Component> WorldQuery for &'__w mut T { // which we are allowed to access since we registered it in `update_archetype_component_access`. // Note that we do not actually access any components in this function, we just get a shared // reference to the sparse set, which is used to access the components in `Self::fetch`. - unsafe { - world - .storages() - .sparse_sets - .get(component_id) - .debug_checked_unwrap() - } + unsafe { world.storages().sparse_sets.get(component_id) } }, ), last_run, @@ -1510,24 +1535,61 @@ unsafe impl<'__w, T: Component> WorldQuery for &'__w mut T { } } - #[inline] - unsafe fn set_table<'w>( - fetch: &mut WriteFetch<'w, T>, - &component_id: &ComponentId, - table: &'w Table, - ) { - let column = table.get_column(component_id).debug_checked_unwrap(); - let table_data = Some(( - column.get_data_slice(table.entity_count()).into(), - column.get_added_ticks_slice(table.entity_count()).into(), - column.get_changed_ticks_slice(table.entity_count()).into(), - #[cfg(feature = "track_change_detection")] - column.get_changed_by_slice(table.entity_count()).into(), - #[cfg(not(feature = "track_change_detection"))] - (), - )); - // SAFETY: set_table is only called when T::STORAGE_TYPE = StorageType::Table - unsafe { fetch.components.set_table(table_data) }; + #[inline] + unsafe fn set_table<'w>( + fetch: &mut WriteFetch<'w, T>, + &component_id: &ComponentId, + table: &'w Table, + ) { + let column = table.get_column(component_id).debug_checked_unwrap(); + let table_data = Some(( + column.get_data_slice(table.entity_count()).into(), + column.get_added_ticks_slice(table.entity_count()).into(), + column.get_changed_ticks_slice(table.entity_count()).into(), + column + .get_changed_by_slice(table.entity_count()) + .map(Into::into), + )); + // SAFETY: set_table is only called when T::STORAGE_TYPE = StorageType::Table + unsafe { fetch.components.set_table(table_data) }; + } + + fn update_component_access( + &component_id: &ComponentId, + access: &mut FilteredAccess, + ) { + assert!( + !access.access().has_component_read(component_id), + "&mut {} conflicts with a previous access in this query. Mutable component access must be unique.", + core::any::type_name::(), + ); + access.add_component_write(component_id); + } + + fn init_state(world: &mut World) -> ComponentId { + world.register_component::() + } + + fn get_state(components: &Components) -> Option { + components.component_id::() + } + + fn matches_component_set( + &state: &ComponentId, + set_contains_id: &impl Fn(ComponentId) -> bool, + ) -> bool { + set_contains_id(state) + } +} + +/// SAFETY: access of `&T` is a subset of `&mut T` +unsafe impl<'__w, T: Component> QueryData for &'__w mut T { + const IS_READ_ONLY: bool = false; + type ReadOnly = &'__w T; + type Item<'w> = Mut<'w, T>; + + fn shrink<'wlong: 'wshort, 'wshort>(item: Mut<'wlong, T>) -> Mut<'wshort, T> { + item } #[inline(always)] @@ -1539,7 +1601,7 @@ unsafe impl<'__w, T: Component> WorldQuery for &'__w mut T { fetch.components.extract( |table| { // SAFETY: set_table was previously called - let (table_components, added_ticks, changed_ticks, _callers) = + let (table_components, added_ticks, changed_ticks, callers) = unsafe { table.debug_checked_unwrap() }; // SAFETY: The caller ensures `table_row` is in range. @@ -1549,8 +1611,7 @@ unsafe impl<'__w, T: Component> WorldQuery for &'__w mut T { // SAFETY: The caller ensures `table_row` is in range. let changed = unsafe { changed_ticks.get(table_row.as_usize()) }; // SAFETY: The caller ensures `table_row` is in range. - #[cfg(feature = "track_change_detection")] - let caller = unsafe { _callers.get(table_row.as_usize()) }; + let caller = callers.map(|callers| unsafe { callers.get(table_row.as_usize()) }); Mut { value: component.deref_mut(), @@ -1560,56 +1621,26 @@ unsafe impl<'__w, T: Component> WorldQuery for &'__w mut T { this_run: fetch.this_run, last_run: fetch.last_run, }, - #[cfg(feature = "track_change_detection")] - changed_by: caller.deref_mut(), + changed_by: caller.map(|caller| caller.deref_mut()), } }, |sparse_set| { - // SAFETY: The caller ensures `entity` is in range. - let (component, ticks, _caller) = - unsafe { sparse_set.get_with_ticks(entity).debug_checked_unwrap() }; + // SAFETY: The caller ensures `entity` is in range and has the component. + let (component, ticks, caller) = unsafe { + sparse_set + .debug_checked_unwrap() + .get_with_ticks(entity) + .debug_checked_unwrap() + }; Mut { value: component.assert_unique().deref_mut(), ticks: TicksMut::from_tick_cells(ticks, fetch.last_run, fetch.this_run), - #[cfg(feature = "track_change_detection")] - changed_by: _caller.deref_mut(), + changed_by: caller.map(|caller| caller.deref_mut()), } }, ) } - - fn update_component_access( - &component_id: &ComponentId, - access: &mut FilteredAccess, - ) { - assert!( - !access.access().has_component_read(component_id), - "&mut {} conflicts with a previous access in this query. Mutable component access must be unique.", - core::any::type_name::(), - ); - access.add_component_write(component_id); - } - - fn init_state(world: &mut World) -> ComponentId { - world.register_component::() - } - - fn get_state(components: &Components) -> Option { - components.component_id::() - } - - fn matches_component_set( - &state: &ComponentId, - set_contains_id: &impl Fn(ComponentId) -> bool, - ) -> bool { - set_contains_id(state) - } -} - -/// SAFETY: access of `&T` is a subset of `&mut T` -unsafe impl<'__w, T: Component> QueryData for &'__w mut T { - type ReadOnly = &'__w T; } /// When `Mut` is used in a query, it will be converted to `Ref` when transformed into its read-only form, providing access to change detection methods. @@ -1622,15 +1653,9 @@ unsafe impl<'__w, T: Component> QueryData for &'__w mut T /// `update_component_access` adds a `With` filter for a component. /// This is sound because `matches_component_set` returns whether the set contains that component. unsafe impl<'__w, T: Component> WorldQuery for Mut<'__w, T> { - type Item<'w> = Mut<'w, T>; type Fetch<'w> = WriteFetch<'w, T>; type State = ComponentId; - // Forwarded to `&mut T` - fn shrink<'wlong: 'wshort, 'wshort>(item: Mut<'wlong, T>) -> Mut<'wshort, T> { - <&mut T as WorldQuery>::shrink(item) - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { fetch } @@ -1666,18 +1691,6 @@ unsafe impl<'__w, T: Component> WorldQuery for Mut<'__w, T> { <&mut T as WorldQuery>::set_table(fetch, state, table); } - #[inline(always)] - // Forwarded to `&mut T` - unsafe fn fetch<'w>( - // Rust complains about lifetime bounds not matching the trait if I directly use `WriteFetch<'w, T>` right here. - // But it complains nowhere else in the entire trait implementation. - fetch: &mut Self::Fetch<'w>, - entity: Entity, - table_row: TableRow, - ) -> Mut<'w, T> { - <&mut T as WorldQuery>::fetch(fetch, entity, table_row) - } - // NOT forwarded to `&mut T` fn update_component_access( &component_id: &ComponentId, @@ -1713,8 +1726,27 @@ unsafe impl<'__w, T: Component> WorldQuery for Mut<'__w, T> { } // SAFETY: access of `Ref` is a subset of `Mut` -unsafe impl<'__w, T: Component> QueryData for Mut<'__w, T> { +unsafe impl<'__w, T: Component> QueryData for Mut<'__w, T> { + const IS_READ_ONLY: bool = false; type ReadOnly = Ref<'__w, T>; + type Item<'w> = Mut<'w, T>; + + // Forwarded to `&mut T` + fn shrink<'wlong: 'wshort, 'wshort>(item: Mut<'wlong, T>) -> Mut<'wshort, T> { + <&mut T as QueryData>::shrink(item) + } + + #[inline(always)] + // Forwarded to `&mut T` + unsafe fn fetch<'w>( + // Rust complains about lifetime bounds not matching the trait if I directly use `WriteFetch<'w, T>` right here. + // But it complains nowhere else in the entire trait implementation. + fetch: &mut Self::Fetch<'w>, + entity: Entity, + table_row: TableRow, + ) -> Mut<'w, T> { + <&mut T as QueryData>::fetch(fetch, entity, table_row) + } } #[doc(hidden)] @@ -1737,14 +1769,9 @@ impl Clone for OptionFetch<'_, T> { /// This is sound because `update_component_access` and `update_archetype_component_access` add the same accesses as `T`. /// Filters are unchanged. unsafe impl WorldQuery for Option { - type Item<'w> = Option>; type Fetch<'w> = OptionFetch<'w, T>; type State = T::State; - fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { - item.map(T::shrink) - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { OptionFetch { fetch: T::shrink_fetch(fetch.fetch), @@ -1760,7 +1787,7 @@ unsafe impl WorldQuery for Option { this_run: Tick, ) -> OptionFetch<'w, T> { OptionFetch { - // SAFETY: The invariants are uphold by the caller. + // SAFETY: The invariants are upheld by the caller. fetch: unsafe { T::init_fetch(world, state, last_run, this_run) }, matches: false, } @@ -1777,7 +1804,7 @@ unsafe impl WorldQuery for Option { ) { fetch.matches = T::matches_component_set(state, &|id| archetype.contains(id)); if fetch.matches { - // SAFETY: The invariants are uphold by the caller. + // SAFETY: The invariants are upheld by the caller. unsafe { T::set_archetype(&mut fetch.fetch, state, archetype, table); } @@ -1788,25 +1815,13 @@ unsafe impl WorldQuery for Option { unsafe fn set_table<'w>(fetch: &mut OptionFetch<'w, T>, state: &T::State, table: &'w Table) { fetch.matches = T::matches_component_set(state, &|id| table.has_column(id)); if fetch.matches { - // SAFETY: The invariants are uphold by the caller. + // SAFETY: The invariants are upheld by the caller. unsafe { T::set_table(&mut fetch.fetch, state, table); } } } - #[inline(always)] - unsafe fn fetch<'w>( - fetch: &mut Self::Fetch<'w>, - entity: Entity, - table_row: TableRow, - ) -> Self::Item<'w> { - fetch - .matches - // SAFETY: The invariants are uphold by the caller. - .then(|| unsafe { T::fetch(&mut fetch.fetch, entity, table_row) }) - } - fn update_component_access(state: &T::State, access: &mut FilteredAccess) { // FilteredAccess::add_[write,read] adds the component to the `with` filter. // Those methods are called on `access` in `T::update_component_access`. @@ -1840,7 +1855,25 @@ unsafe impl WorldQuery for Option { // SAFETY: defers to soundness of `T: WorldQuery` impl unsafe impl QueryData for Option { + const IS_READ_ONLY: bool = T::IS_READ_ONLY; type ReadOnly = Option; + type Item<'w> = Option>; + + fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { + item.map(T::shrink) + } + + #[inline(always)] + unsafe fn fetch<'w>( + fetch: &mut Self::Fetch<'w>, + entity: Entity, + table_row: TableRow, + ) -> Self::Item<'w> { + fetch + .matches + // SAFETY: The invariants are upheld by the caller. + .then(|| unsafe { T::fetch(&mut fetch.fetch, entity, table_row) }) + } } /// SAFETY: [`OptionFetch`] is read only because `T` is read only @@ -1921,14 +1954,9 @@ impl core::fmt::Debug for Has { /// `update_component_access` and `update_archetype_component_access` do nothing. /// This is sound because `fetch` does not access components. unsafe impl WorldQuery for Has { - type Item<'w> = bool; type Fetch<'w> = bool; type State = ComponentId; - fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { - item - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { fetch } @@ -1965,15 +1993,6 @@ unsafe impl WorldQuery for Has { *fetch = table.has_column(*state); } - #[inline(always)] - unsafe fn fetch<'w>( - fetch: &mut Self::Fetch<'w>, - _entity: Entity, - _table_row: TableRow, - ) -> Self::Item<'w> { - *fetch - } - fn update_component_access( &component_id: &Self::State, access: &mut FilteredAccess, @@ -2000,7 +2019,22 @@ unsafe impl WorldQuery for Has { /// SAFETY: `Self` is the same as `Self::ReadOnly` unsafe impl QueryData for Has { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; + type Item<'w> = bool; + + fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { + item + } + + #[inline(always)] + unsafe fn fetch<'w>( + fetch: &mut Self::Fetch<'w>, + _entity: Entity, + _table_row: TableRow, + ) -> Self::Item<'w> { + *fetch + } } /// SAFETY: [`Has`] is read only @@ -2015,12 +2049,46 @@ pub struct AnyOf(PhantomData); macro_rules! impl_tuple_query_data { ($(#[$meta:meta])* $(($name: ident, $state: ident)),*) => { - #[allow(non_snake_case)] - #[allow(clippy::unused_unit)] + #[expect( + clippy::allow_attributes, + reason = "This is a tuple-related macro; as such the lints below may not always apply." + )] + #[allow( + non_snake_case, + reason = "The names of some variables are provided by the macro's caller, not by us." + )] + #[allow( + unused_variables, + reason = "Zero-length tuples won't use any of the parameters." + )] + #[allow( + clippy::unused_unit, + reason = "Zero-length tuples will generate some function bodies equivalent to `()`; however, this macro is meant for all applicable tuples, and as such it makes no sense to rewrite it just for that case." + )] $(#[$meta])* // SAFETY: defers to soundness `$name: WorldQuery` impl unsafe impl<$($name: QueryData),*> QueryData for ($($name,)*) { + const IS_READ_ONLY: bool = true $(&& $name::IS_READ_ONLY)*; type ReadOnly = ($($name::ReadOnly,)*); + type Item<'w> = ($($name::Item<'w>,)*); + + fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { + let ($($name,)*) = item; + ($( + $name::shrink($name), + )*) + } + + #[inline(always)] + unsafe fn fetch<'w>( + fetch: &mut Self::Fetch<'w>, + entity: Entity, + table_row: TableRow + ) -> Self::Item<'w> { + let ($($name,)*) = fetch; + // SAFETY: The invariants are upheld by the caller. + ($(unsafe { $name::fetch($name, entity, table_row) },)*) + } } $(#[$meta])* @@ -2033,8 +2101,22 @@ macro_rules! impl_tuple_query_data { macro_rules! impl_anytuple_fetch { ($(#[$meta:meta])* $(($name: ident, $state: ident)),*) => { $(#[$meta])* - #[allow(non_snake_case)] - #[allow(clippy::unused_unit)] + #[expect( + clippy::allow_attributes, + reason = "This is a tuple-related macro; as such the lints below may not always apply." + )] + #[allow( + non_snake_case, + reason = "The names of some variables are provided by the macro's caller, not by us." + )] + #[allow( + unused_variables, + reason = "Zero-length tuples won't use any of the parameters." + )] + #[allow( + clippy::unused_unit, + reason = "Zero-length tuples will generate some function bodies equivalent to `()`; however, this macro is meant for all applicable tuples, and as such it makes no sense to rewrite it just for that case." + )] /// SAFETY: /// `fetch` accesses are a subset of the subqueries' accesses /// This is sound because `update_component_access` and `update_archetype_component_access` adds accesses according to the implementations of all the subqueries. @@ -2042,15 +2124,8 @@ macro_rules! impl_anytuple_fetch { /// This is sound because `matches_component_set` returns a disjunction of the results of the subqueries' implementations. unsafe impl<$($name: WorldQuery),*> WorldQuery for AnyOf<($($name,)*)> { type Fetch<'w> = ($(($name::Fetch<'w>, bool),)*); - type Item<'w> = ($(Option<$name::Item<'w>>,)*); type State = ($($name::State,)*); - fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { - let ($($name,)*) = item; - ($( - $name.map($name::shrink), - )*) - } fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { let ($($name,)*) = fetch; ($( @@ -2059,10 +2134,9 @@ macro_rules! impl_anytuple_fetch { } #[inline] - #[allow(clippy::unused_unit)] unsafe fn init_fetch<'w>(_world: UnsafeWorldCell<'w>, state: &Self::State, _last_run: Tick, _this_run: Tick) -> Self::Fetch<'w> { let ($($name,)*) = state; - // SAFETY: The invariants are uphold by the caller. + // SAFETY: The invariants are upheld by the caller. ($(( unsafe { $name::init_fetch(_world, $name, _last_run, _this_run) }, false),)*) } @@ -2080,7 +2154,7 @@ macro_rules! impl_anytuple_fetch { $( $name.1 = $name::matches_component_set($state, &|id| _archetype.contains(id)); if $name.1 { - // SAFETY: The invariants are uphold by the caller. + // SAFETY: The invariants are upheld by the caller. unsafe { $name::set_archetype(&mut $name.0, $state, _archetype, _table); } } )* @@ -2093,26 +2167,12 @@ macro_rules! impl_anytuple_fetch { $( $name.1 = $name::matches_component_set($state, &|id| _table.has_column(id)); if $name.1 { - // SAFETY: The invariants are required to be upheld by the caller. + // SAFETY: The invariants are required to be upheld by the caller. unsafe { $name::set_table(&mut $name.0, $state, _table); } } )* } - #[inline(always)] - #[allow(clippy::unused_unit)] - unsafe fn fetch<'w>( - _fetch: &mut Self::Fetch<'w>, - _entity: Entity, - _table_row: TableRow - ) -> Self::Item<'w> { - let ($($name,)*) = _fetch; - ($( - // SAFETY: The invariants are required to be upheld by the caller. - $name.1.then(|| unsafe { $name::fetch(&mut $name.0, _entity, _table_row) }), - )*) - } - fn update_component_access(state: &Self::State, access: &mut FilteredAccess) { // update the filters (Or<(With<$name>,)>) let ($($name,)*) = state; @@ -2139,11 +2199,9 @@ macro_rules! impl_anytuple_fetch { <($(Option<$name>,)*)>::update_component_access(state, access); } - #[allow(unused_variables)] fn init_state(world: &mut World) -> Self::State { ($($name::init_state(world),)*) } - #[allow(unused_variables)] fn get_state(components: &Components) -> Option { Some(($($name::get_state(components)?,)*)) } @@ -2154,12 +2212,48 @@ macro_rules! impl_anytuple_fetch { } } + #[expect( + clippy::allow_attributes, + reason = "This is a tuple-related macro; as such the lints below may not always apply." + )] + #[allow( + non_snake_case, + reason = "The names of some variables are provided by the macro's caller, not by us." + )] + #[allow( + unused_variables, + reason = "Zero-length tuples won't use any of the parameters." + )] + #[allow( + clippy::unused_unit, + reason = "Zero-length tuples will generate some function bodies equivalent to `()`; however, this macro is meant for all applicable tuples, and as such it makes no sense to rewrite it just for that case." + )] $(#[$meta])* - #[allow(non_snake_case)] - #[allow(clippy::unused_unit)] // SAFETY: defers to soundness of `$name: WorldQuery` impl unsafe impl<$($name: QueryData),*> QueryData for AnyOf<($($name,)*)> { + const IS_READ_ONLY: bool = true $(&& $name::IS_READ_ONLY)*; type ReadOnly = AnyOf<($($name::ReadOnly,)*)>; + type Item<'w> = ($(Option<$name::Item<'w>>,)*); + + fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { + let ($($name,)*) = item; + ($( + $name.map($name::shrink), + )*) + } + + #[inline(always)] + unsafe fn fetch<'w>( + _fetch: &mut Self::Fetch<'w>, + _entity: Entity, + _table_row: TableRow + ) -> Self::Item<'w> { + let ($($name,)*) = _fetch; + ($( + // SAFETY: The invariants are required to be upheld by the caller. + $name.1.then(|| unsafe { $name::fetch(&mut $name.0, _entity, _table_row) }), + )*) + } } $(#[$meta])* @@ -2194,12 +2288,9 @@ pub(crate) struct NopWorldQuery(PhantomData); /// `update_component_access` and `update_archetype_component_access` do nothing. /// This is sound because `fetch` does not access components. unsafe impl WorldQuery for NopWorldQuery { - type Item<'w> = (); type Fetch<'w> = (); type State = D::State; - fn shrink<'wlong: 'wshort, 'wshort>(_: ()) {} - fn shrink_fetch<'wlong: 'wshort, 'wshort>(_: ()) {} #[inline(always)] @@ -2225,14 +2316,6 @@ unsafe impl WorldQuery for NopWorldQuery { #[inline(always)] unsafe fn set_table<'w>(_fetch: &mut (), _state: &D::State, _table: &Table) {} - #[inline(always)] - unsafe fn fetch<'w>( - _fetch: &mut Self::Fetch<'w>, - _entity: Entity, - _table_row: TableRow, - ) -> Self::Item<'w> { - } - fn update_component_access(_state: &D::State, _access: &mut FilteredAccess) {} fn init_state(world: &mut World) -> Self::State { @@ -2253,7 +2336,19 @@ unsafe impl WorldQuery for NopWorldQuery { /// SAFETY: `Self::ReadOnly` is `Self` unsafe impl QueryData for NopWorldQuery { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; + type Item<'w> = (); + + fn shrink<'wlong: 'wshort, 'wshort>(_: ()) {} + + #[inline(always)] + unsafe fn fetch<'w>( + _fetch: &mut Self::Fetch<'w>, + _entity: Entity, + _table_row: TableRow, + ) -> Self::Item<'w> { + } } /// SAFETY: `NopFetch` never accesses any data @@ -2263,13 +2358,10 @@ unsafe impl ReadOnlyQueryData for NopWorldQuery {} /// `update_component_access` and `update_archetype_component_access` do nothing. /// This is sound because `fetch` does not access components. unsafe impl WorldQuery for PhantomData { - type Item<'a> = (); type Fetch<'a> = (); type State = (); - fn shrink<'wlong: 'wshort, 'wshort>(_item: Self::Item<'wlong>) -> Self::Item<'wshort> {} - fn shrink_fetch<'wlong: 'wshort, 'wshort>(_fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { } @@ -2296,13 +2388,6 @@ unsafe impl WorldQuery for PhantomData { unsafe fn set_table<'w>(_fetch: &mut Self::Fetch<'w>, _state: &Self::State, _table: &'w Table) { } - unsafe fn fetch<'w>( - _fetch: &mut Self::Fetch<'w>, - _entity: Entity, - _table_row: TableRow, - ) -> Self::Item<'w> { - } - fn update_component_access(_state: &Self::State, _access: &mut FilteredAccess) {} fn init_state(_world: &mut World) -> Self::State {} @@ -2321,7 +2406,18 @@ unsafe impl WorldQuery for PhantomData { /// SAFETY: `Self::ReadOnly` is `Self` unsafe impl QueryData for PhantomData { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; + type Item<'a> = (); + + fn shrink<'wlong: 'wshort, 'wshort>(_item: Self::Item<'wlong>) -> Self::Item<'wshort> {} + + unsafe fn fetch<'w>( + _fetch: &mut Self::Fetch<'w>, + _entity: Entity, + _table_row: TableRow, + ) -> Self::Item<'w> { + } } /// SAFETY: `PhantomData` never accesses any world data. @@ -2400,10 +2496,7 @@ mod tests { use bevy_ecs_macros::QueryData; use super::*; - use crate::{ - self as bevy_ecs, - system::{assert_is_system, Query}, - }; + use crate::system::{assert_is_system, Query}; #[derive(Component)] pub struct A; diff --git a/crates/bevy_ecs/src/query/filter.rs b/crates/bevy_ecs/src/query/filter.rs index b096e801f4cc2..e4e1f0fd668d3 100644 --- a/crates/bevy_ecs/src/query/filter.rs +++ b/crates/bevy_ecs/src/query/filter.rs @@ -66,10 +66,7 @@ use variadics_please::all_tuples; /// # bevy_ecs::system::assert_is_system(my_system); /// ``` /// -/// [`fetch`]: Self::fetch -/// [`matches_component_set`]: Self::matches_component_set /// [`Query`]: crate::system::Query -/// [`State`]: Self::State /// /// # Safety /// @@ -86,6 +83,8 @@ pub unsafe trait QueryFilter: WorldQuery { /// /// This enables optimizations for [`crate::query::QueryIter`] that rely on knowing exactly how /// many elements are being iterated (such as `Iterator::collect()`). + /// + /// If this is `true`, then [`QueryFilter::filter_fetch`] must always return true. const IS_ARCHETYPAL: bool; /// Returns true if the provided [`Entity`] and [`TableRow`] should be included in the query results. @@ -94,11 +93,13 @@ pub unsafe trait QueryFilter: WorldQuery { /// Note that this is called after already restricting the matched [`Table`]s and [`Archetype`]s to the /// ones that are compatible with the Filter's access. /// + /// Implementors of this method will generally either have a trivial `true` body (required for archetypal filters), + /// or access the necessary data within this function to make the final decision on filter inclusion. + /// /// # Safety /// /// Must always be called _after_ [`WorldQuery::set_table`] or [`WorldQuery::set_archetype`]. `entity` and /// `table_row` must be in the range of the current table and archetype. - #[allow(unused_variables)] unsafe fn filter_fetch( fetch: &mut Self::Fetch<'_>, entity: Entity, @@ -137,16 +138,13 @@ pub struct With(PhantomData); /// SAFETY: /// `update_component_access` does not add any accesses. -/// This is sound because `fetch` does not access any components. +/// This is sound because [`QueryFilter::filter_fetch`] does not access any components. /// `update_component_access` adds a `With` filter for `T`. /// This is sound because `matches_component_set` returns whether the set contains the component. unsafe impl WorldQuery for With { - type Item<'w> = (); type Fetch<'w> = (); type State = ComponentId; - fn shrink<'wlong: 'wshort, 'wshort>(_: Self::Item<'wlong>) -> Self::Item<'wshort> {} - fn shrink_fetch<'wlong: 'wshort, 'wshort>(_: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> {} #[inline] @@ -177,14 +175,6 @@ unsafe impl WorldQuery for With { #[inline] unsafe fn set_table(_fetch: &mut (), _state: &ComponentId, _table: &Table) {} - #[inline(always)] - unsafe fn fetch<'w>( - _fetch: &mut Self::Fetch<'w>, - _entity: Entity, - _table_row: TableRow, - ) -> Self::Item<'w> { - } - #[inline] fn update_component_access(&id: &ComponentId, access: &mut FilteredAccess) { access.and_with(id); @@ -248,16 +238,13 @@ pub struct Without(PhantomData); /// SAFETY: /// `update_component_access` does not add any accesses. -/// This is sound because `fetch` does not access any components. +/// This is sound because [`QueryFilter::filter_fetch`] does not access any components. /// `update_component_access` adds a `Without` filter for `T`. /// This is sound because `matches_component_set` returns whether the set does not contain the component. unsafe impl WorldQuery for Without { - type Item<'w> = (); type Fetch<'w> = (); type State = ComponentId; - fn shrink<'wlong: 'wshort, 'wshort>(_: Self::Item<'wlong>) -> Self::Item<'wshort> {} - fn shrink_fetch<'wlong: 'wshort, 'wshort>(_: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> {} #[inline] @@ -288,14 +275,6 @@ unsafe impl WorldQuery for Without { #[inline] unsafe fn set_table(_fetch: &mut (), _state: &Self::State, _table: &Table) {} - #[inline(always)] - unsafe fn fetch<'w>( - _fetch: &mut Self::Fetch<'w>, - _entity: Entity, - _table_row: TableRow, - ) -> Self::Item<'w> { - } - #[inline] fn update_component_access(&id: &ComponentId, access: &mut FilteredAccess) { access.and_without(id); @@ -356,7 +335,7 @@ unsafe impl QueryFilter for Without { /// # /// fn print_cool_entity_system(query: Query, Changed)>>) { /// for entity in &query { -/// println!("Entity {:?} got a new style or color", entity); +/// println!("Entity {} got a new style or color", entity); /// } /// } /// # bevy_ecs::system::assert_is_system(print_cool_entity_system); @@ -381,23 +360,31 @@ impl Clone for OrFetch<'_, T> { macro_rules! impl_or_query_filter { ($(#[$meta:meta])* $(($filter: ident, $state: ident)),*) => { $(#[$meta])* - #[allow(unused_variables)] - #[allow(non_snake_case)] - #[allow(clippy::unused_unit)] + #[expect( + clippy::allow_attributes, + reason = "This is a tuple-related macro; as such the lints below may not always apply." + )] + #[allow( + non_snake_case, + reason = "The names of some variables are provided by the macro's caller, not by us." + )] + #[allow( + unused_variables, + reason = "Zero-length tuples won't use any of the parameters." + )] + #[allow( + clippy::unused_unit, + reason = "Zero-length tuples will generate some function bodies equivalent to `()`; however, this macro is meant for all applicable tuples, and as such it makes no sense to rewrite it just for that case." + )] /// SAFETY: - /// `fetch` accesses are a subset of the subqueries' accesses + /// [`QueryFilter::filter_fetch`] accesses are a subset of the subqueries' accesses /// This is sound because `update_component_access` adds accesses according to the implementations of all the subqueries. /// `update_component_access` replace the filters with a disjunction where every element is a conjunction of the previous filters and the filters of one of the subqueries. /// This is sound because `matches_component_set` returns a disjunction of the results of the subqueries' implementations. unsafe impl<$($filter: QueryFilter),*> WorldQuery for Or<($($filter,)*)> { type Fetch<'w> = ($(OrFetch<'w, $filter>,)*); - type Item<'w> = bool; type State = ($($filter::State,)*); - fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { - item - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { let ($($filter,)*) = fetch; ($( @@ -414,7 +401,7 @@ macro_rules! impl_or_query_filter { unsafe fn init_fetch<'w>(world: UnsafeWorldCell<'w>, state: &Self::State, last_run: Tick, this_run: Tick) -> Self::Fetch<'w> { let ($($filter,)*) = state; ($(OrFetch { - // SAFETY: The invariants are uphold by the caller. + // SAFETY: The invariants are upheld by the caller. fetch: unsafe { $filter::init_fetch(world, $filter, last_run, this_run) }, matches: false, },)*) @@ -427,7 +414,7 @@ macro_rules! impl_or_query_filter { $( $filter.matches = $filter::matches_component_set($state, &|id| table.has_column(id)); if $filter.matches { - // SAFETY: The invariants are uphold by the caller. + // SAFETY: The invariants are upheld by the caller. unsafe { $filter::set_table(&mut $filter.fetch, $state, table); } } )* @@ -445,43 +432,32 @@ macro_rules! impl_or_query_filter { $( $filter.matches = $filter::matches_component_set($state, &|id| archetype.contains(id)); if $filter.matches { - // SAFETY: The invariants are uphold by the caller. + // SAFETY: The invariants are upheld by the caller. unsafe { $filter::set_archetype(&mut $filter.fetch, $state, archetype, table); } } )* } - #[inline(always)] - unsafe fn fetch<'w>( - fetch: &mut Self::Fetch<'w>, - _entity: Entity, - _table_row: TableRow - ) -> Self::Item<'w> { - let ($($filter,)*) = fetch; - // SAFETY: The invariants are uphold by the caller. - false $(|| ($filter.matches && unsafe { $filter::filter_fetch(&mut $filter.fetch, _entity, _table_row) }))* - } - fn update_component_access(state: &Self::State, access: &mut FilteredAccess) { let ($($filter,)*) = state; - let mut _new_access = FilteredAccess::matches_nothing(); + let mut new_access = FilteredAccess::matches_nothing(); $( // Create an intermediate because `access`'s value needs to be preserved // for the next filter, and `_new_access` has to be modified only by `append_or` to it. let mut intermediate = access.clone(); $filter::update_component_access($filter, &mut intermediate); - _new_access.append_or(&intermediate); + new_access.append_or(&intermediate); // Also extend the accesses required to compute the filter. This is required because // otherwise a `Query<(), Or<(Changed,)>` won't conflict with `Query<&mut Foo>`. - _new_access.extend_access(&intermediate); + new_access.extend_access(&intermediate); )* // The required components remain the same as the original `access`. - _new_access.required = core::mem::take(&mut access.required); + new_access.required = core::mem::take(&mut access.required); - *access = _new_access; + *access = new_access; } fn init_state(world: &mut World) -> Self::State { @@ -492,15 +468,27 @@ macro_rules! impl_or_query_filter { Some(($($filter::get_state(components)?,)*)) } - fn matches_component_set(_state: &Self::State, _set_contains_id: &impl Fn(ComponentId) -> bool) -> bool { - let ($($filter,)*) = _state; - false $(|| $filter::matches_component_set($filter, _set_contains_id))* + fn matches_component_set(state: &Self::State, set_contains_id: &impl Fn(ComponentId) -> bool) -> bool { + let ($($filter,)*) = state; + false $(|| $filter::matches_component_set($filter, set_contains_id))* } } - $(#[$meta])* - // SAFETY: This only performs access that subqueries perform, and they impl `QueryFilter` and so perform no mutable access. - unsafe impl<$($filter: QueryFilter),*> QueryFilter for Or<($($filter,)*)> { + #[expect( + clippy::allow_attributes, + reason = "This is a tuple-related macro; as such the lints below may not always apply." + )] + #[allow( + non_snake_case, + reason = "The names of some variables are provided by the macro's caller, not by us." + )] + #[allow( + unused_variables, + reason = "Zero-length tuples won't use any of the parameters." + )] + $(#[$meta])* + // SAFETY: This only performs access that subqueries perform, and they impl `QueryFilter` and so perform no mutable access. + unsafe impl<$($filter: QueryFilter),*> QueryFilter for Or<($($filter,)*)> { const IS_ARCHETYPAL: bool = true $(&& $filter::IS_ARCHETYPAL)*; #[inline(always)] @@ -509,8 +497,9 @@ macro_rules! impl_or_query_filter { entity: Entity, table_row: TableRow ) -> bool { - // SAFETY: The invariants are uphold by the caller. - unsafe { Self::fetch(fetch, entity, table_row) } + let ($($filter,)*) = fetch; + // SAFETY: The invariants are upheld by the caller. + false $(|| ($filter.matches && unsafe { $filter::filter_fetch(&mut $filter.fetch, entity, table_row) }))* } } }; @@ -518,9 +507,18 @@ macro_rules! impl_or_query_filter { macro_rules! impl_tuple_query_filter { ($(#[$meta:meta])* $($name: ident),*) => { - #[allow(unused_variables)] - #[allow(non_snake_case)] - #[allow(clippy::unused_unit)] + #[expect( + clippy::allow_attributes, + reason = "This is a tuple-related macro; as such the lints below may not always apply." + )] + #[allow( + non_snake_case, + reason = "The names of some variables are provided by the macro's caller, not by us." + )] + #[allow( + unused_variables, + reason = "Zero-length tuples won't use any of the parameters." + )] $(#[$meta])* // SAFETY: This only performs access that subqueries perform, and they impl `QueryFilter` and so perform no mutable access. unsafe impl<$($name: QueryFilter),*> QueryFilter for ($($name,)*) { @@ -529,12 +527,12 @@ macro_rules! impl_tuple_query_filter { #[inline(always)] unsafe fn filter_fetch( fetch: &mut Self::Fetch<'_>, - _entity: Entity, - _table_row: TableRow + entity: Entity, + table_row: TableRow ) -> bool { let ($($name,)*) = fetch; - // SAFETY: The invariants are uphold by the caller. - true $(&& unsafe { $name::filter_fetch($name, _entity, _table_row) })* + // SAFETY: The invariants are upheld by the caller. + true $(&& unsafe { $name::filter_fetch($name, entity, table_row) })* } } @@ -630,7 +628,8 @@ pub struct AddedFetch<'w, T: Component> { // T::STORAGE_TYPE = StorageType::Table Option>>, // T::STORAGE_TYPE = StorageType::SparseSet - &'w ComponentSparseSet, + // Can be `None` when the component has never been inserted + Option<&'w ComponentSparseSet>, >, last_run: Tick, this_run: Tick, @@ -647,19 +646,14 @@ impl Clone for AddedFetch<'_, T> { } /// SAFETY: -/// `fetch` accesses a single component in a readonly way. +/// [`QueryFilter::filter_fetch`] accesses a single component in a readonly way. /// This is sound because `update_component_access` adds read access for that component and panics when appropriate. /// `update_component_access` adds a `With` filter for a component. /// This is sound because `matches_component_set` returns whether the set contains that component. unsafe impl WorldQuery for Added { - type Item<'w> = bool; type Fetch<'w> = AddedFetch<'w, T>; type State = ComponentId; - fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { - item - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { fetch } @@ -679,7 +673,7 @@ unsafe impl WorldQuery for Added { // which we are allowed to access since we registered it in `update_archetype_component_access`. // Note that we do not actually access any components' ticks in this function, we just get a shared // reference to the sparse set, which is used to access the components' ticks in `Self::fetch`. - unsafe { world.storages().sparse_sets.get(id).debug_checked_unwrap() } + unsafe { world.storages().sparse_sets.get(id) } }, ), last_run, @@ -725,32 +719,6 @@ unsafe impl WorldQuery for Added { unsafe { fetch.ticks.set_table(table_ticks) }; } - #[inline(always)] - unsafe fn fetch<'w>( - fetch: &mut Self::Fetch<'w>, - entity: Entity, - table_row: TableRow, - ) -> Self::Item<'w> { - fetch.ticks.extract( - |table| { - // SAFETY: set_table was previously called - let table = unsafe { table.debug_checked_unwrap() }; - // SAFETY: The caller ensures `table_row` is in range. - let tick = unsafe { table.get(table_row.as_usize()) }; - - tick.deref().is_newer_than(fetch.last_run, fetch.this_run) - }, - |sparse_set| { - // SAFETY: The caller ensures `entity` is in range. - let tick = unsafe { - ComponentSparseSet::get_added_tick(sparse_set, entity).debug_checked_unwrap() - }; - - tick.deref().is_newer_than(fetch.last_run, fetch.this_run) - }, - ) - } - #[inline] fn update_component_access(&id: &ComponentId, access: &mut FilteredAccess) { if access.access().has_component_write(id) { @@ -784,8 +752,28 @@ unsafe impl QueryFilter for Added { entity: Entity, table_row: TableRow, ) -> bool { - // SAFETY: The invariants are uphold by the caller. - unsafe { Self::fetch(fetch, entity, table_row) } + // SAFETY: The invariants are upheld by the caller. + fetch.ticks.extract( + |table| { + // SAFETY: set_table was previously called + let table = unsafe { table.debug_checked_unwrap() }; + // SAFETY: The caller ensures `table_row` is in range. + let tick = unsafe { table.get(table_row.as_usize()) }; + + tick.deref().is_newer_than(fetch.last_run, fetch.this_run) + }, + |sparse_set| { + // SAFETY: The caller ensures `entity` is in range. + let tick = unsafe { + sparse_set + .debug_checked_unwrap() + .get_added_tick(entity) + .debug_checked_unwrap() + }; + + tick.deref().is_newer_than(fetch.last_run, fetch.this_run) + }, + ) } } @@ -864,7 +852,12 @@ pub struct Changed(PhantomData); #[doc(hidden)] pub struct ChangedFetch<'w, T: Component> { - ticks: StorageSwitch>>, &'w ComponentSparseSet>, + ticks: StorageSwitch< + T, + Option>>, + // Can be `None` when the component has never been inserted + Option<&'w ComponentSparseSet>, + >, last_run: Tick, this_run: Tick, } @@ -885,14 +878,9 @@ impl Clone for ChangedFetch<'_, T> { /// `update_component_access` adds a `With` filter for a component. /// This is sound because `matches_component_set` returns whether the set contains that component. unsafe impl WorldQuery for Changed { - type Item<'w> = bool; type Fetch<'w> = ChangedFetch<'w, T>; type State = ComponentId; - fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { - item - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { fetch } @@ -912,7 +900,7 @@ unsafe impl WorldQuery for Changed { // which we are allowed to access since we registered it in `update_archetype_component_access`. // Note that we do not actually access any components' ticks in this function, we just get a shared // reference to the sparse set, which is used to access the components' ticks in `Self::fetch`. - unsafe { world.storages().sparse_sets.get(id).debug_checked_unwrap() } + unsafe { world.storages().sparse_sets.get(id) } }, ), last_run, @@ -958,32 +946,6 @@ unsafe impl WorldQuery for Changed { unsafe { fetch.ticks.set_table(table_ticks) }; } - #[inline(always)] - unsafe fn fetch<'w>( - fetch: &mut Self::Fetch<'w>, - entity: Entity, - table_row: TableRow, - ) -> Self::Item<'w> { - fetch.ticks.extract( - |table| { - // SAFETY: set_table was previously called - let table = unsafe { table.debug_checked_unwrap() }; - // SAFETY: The caller ensures `table_row` is in range. - let tick = unsafe { table.get(table_row.as_usize()) }; - - tick.deref().is_newer_than(fetch.last_run, fetch.this_run) - }, - |sparse_set| { - // SAFETY: The caller ensures `entity` is in range. - let tick = unsafe { - ComponentSparseSet::get_changed_tick(sparse_set, entity).debug_checked_unwrap() - }; - - tick.deref().is_newer_than(fetch.last_run, fetch.this_run) - }, - ) - } - #[inline] fn update_component_access(&id: &ComponentId, access: &mut FilteredAccess) { if access.access().has_component_write(id) { @@ -1018,8 +980,28 @@ unsafe impl QueryFilter for Changed { entity: Entity, table_row: TableRow, ) -> bool { - // SAFETY: The invariants are uphold by the caller. - unsafe { Self::fetch(fetch, entity, table_row) } + // SAFETY: The invariants are upheld by the caller. + fetch.ticks.extract( + |table| { + // SAFETY: set_table was previously called + let table = unsafe { table.debug_checked_unwrap() }; + // SAFETY: The caller ensures `table_row` is in range. + let tick = unsafe { table.get(table_row.as_usize()) }; + + tick.deref().is_newer_than(fetch.last_run, fetch.this_run) + }, + |sparse_set| { + // SAFETY: The caller ensures `entity` is in range. + let tick = unsafe { + sparse_set + .debug_checked_unwrap() + .get_changed_tick(entity) + .debug_checked_unwrap() + }; + + tick.deref().is_newer_than(fetch.last_run, fetch.this_run) + }, + ) } } diff --git a/crates/bevy_ecs/src/query/iter.rs b/crates/bevy_ecs/src/query/iter.rs index c7303382f68d8..fc89843493a03 100644 --- a/crates/bevy_ecs/src/query/iter.rs +++ b/crates/bevy_ecs/src/query/iter.rs @@ -3,7 +3,7 @@ use crate::{ archetype::{Archetype, ArchetypeEntity, Archetypes}, bundle::Bundle, component::Tick, - entity::{Entities, Entity, EntityBorrow, EntitySet, EntitySetIterator}, + entity::{ContainsEntity, Entities, Entity, EntityEquivalent, EntitySet, EntitySetIterator}, query::{ArchetypeFilter, DebugCheckedUnwrap, QueryState, StorageId}, storage::{Table, TableRow, Tables}, world::{ @@ -48,7 +48,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { // SAFETY: We only access table data that has been registered in `query_state`. tables: unsafe { &world.storages().tables }, archetypes: world.archetypes(), - // SAFETY: The invariants are uphold by the caller. + // SAFETY: The invariants are upheld by the caller. cursor: unsafe { QueryIterationCursor::init(world, query_state, last_run, this_run) }, } } @@ -126,7 +126,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { } /// Executes the equivalent of [`Iterator::fold`] over a contiguous segment - /// from an storage. + /// from a storage. /// /// # Safety /// - `range` must be in `[0, storage::entity_count)` or None. @@ -187,7 +187,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { } /// Executes the equivalent of [`Iterator::fold`] over a contiguous segment - /// from an table. + /// from a table. /// /// # Safety /// - all `rows` must be in `[0, table.entity_count)`. @@ -487,7 +487,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { /// # schedule.add_systems((system_1, system_2, system_3)); /// # schedule.run(&mut world); /// ``` - pub fn sort: Ord> + 'w>( + pub fn sort( self, ) -> QuerySortedIter< 'w, @@ -495,46 +495,11 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { D, F, impl ExactSizeIterator + DoubleEndedIterator + FusedIterator + 'w, - > { - // On the first successful iteration of `QueryIterationCursor`, `archetype_entities` or `table_entities` - // will be set to a non-zero value. The correctness of this method relies on this. - // I.e. this sort method will execute if and only if `next` on `QueryIterationCursor` of a - // non-empty `QueryIter` has not yet been called. When empty, this sort method will not panic. - if !self.cursor.archetype_entities.is_empty() || !self.cursor.table_entities.is_empty() { - panic!("it is not valid to call sort() after next()") - } - - let world = self.world; - - let query_lens_state = self.query_state.transmute_filtered::<(L, Entity), F>(world); - - // SAFETY: - // `self.world` has permission to access the required components. - // The original query iter has not been iterated on, so no items are aliased from it. - let query_lens = unsafe { - query_lens_state.iter_unchecked_manual( - world, - world.last_change_tick(), - world.change_tick(), - ) - }; - let mut keyed_query: Vec<_> = query_lens - .map(|(key, entity)| (key, NeutralOrd(entity))) - .collect(); - keyed_query.sort(); - let entity_iter = keyed_query.into_iter().map(|(.., entity)| entity.0); - // SAFETY: - // `self.world` has permission to access the required components. - // Each lens query item is dropped before the respective actual query item is accessed. - unsafe { - QuerySortedIter::new( - world, - self.query_state, - entity_iter, - world.last_change_tick(), - world.change_tick(), - ) - } + > + where + for<'lw> L::Item<'lw>: Ord, + { + self.sort_impl::(|keyed_query| keyed_query.sort()) } /// Sorts all query items into a new iterator, using the query lens as a key. @@ -579,7 +544,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { /// # schedule.add_systems((system_1)); /// # schedule.run(&mut world); /// ``` - pub fn sort_unstable: Ord> + 'w>( + pub fn sort_unstable( self, ) -> QuerySortedIter< 'w, @@ -587,46 +552,11 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { D, F, impl ExactSizeIterator + DoubleEndedIterator + FusedIterator + 'w, - > { - // On the first successful iteration of `QueryIterationCursor`, `archetype_entities` or `table_entities` - // will be set to a non-zero value. The correctness of this method relies on this. - // I.e. this sort method will execute if and only if `next` on `QueryIterationCursor` of a - // non-empty `QueryIter` has not yet been called. When empty, this sort method will not panic. - if !self.cursor.archetype_entities.is_empty() || !self.cursor.table_entities.is_empty() { - panic!("it is not valid to call sort() after next()") - } - - let world = self.world; - - let query_lens_state = self.query_state.transmute_filtered::<(L, Entity), F>(world); - - // SAFETY: - // `self.world` has permission to access the required components. - // The original query iter has not been iterated on, so no items are aliased from it. - let query_lens = unsafe { - query_lens_state.iter_unchecked_manual( - world, - world.last_change_tick(), - world.change_tick(), - ) - }; - let mut keyed_query: Vec<_> = query_lens - .map(|(key, entity)| (key, NeutralOrd(entity))) - .collect(); - keyed_query.sort_unstable(); - let entity_iter = keyed_query.into_iter().map(|(.., entity)| entity.0); - // SAFETY: - // `self.world` has permission to access the required components. - // Each lens query item is dropped before the respective actual query item is accessed. - unsafe { - QuerySortedIter::new( - world, - self.query_state, - entity_iter, - world.last_change_tick(), - world.change_tick(), - ) - } + > + where + for<'lw> L::Item<'lw>: Ord, + { + self.sort_impl::(|keyed_query| keyed_query.sort_unstable()) } /// Sorts all query items into a new iterator with a comparator function over the query lens. @@ -680,7 +610,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { /// ``` pub fn sort_by( self, - mut compare: impl FnMut(&L::Item<'w>, &L::Item<'w>) -> Ordering, + mut compare: impl FnMut(&L::Item<'_>, &L::Item<'_>) -> Ordering, ) -> QuerySortedIter< 'w, 's, @@ -688,43 +618,9 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { F, impl ExactSizeIterator + DoubleEndedIterator + FusedIterator + 'w, > { - // On the first successful iteration of `QueryIterationCursor`, `archetype_entities` or `table_entities` - // will be set to a non-zero value. The correctness of this method relies on this. - // I.e. this sort method will execute if and only if `next` on `QueryIterationCursor` of a - // non-empty `QueryIter` has not yet been called. When empty, this sort method will not panic. - if !self.cursor.archetype_entities.is_empty() || !self.cursor.table_entities.is_empty() { - panic!("it is not valid to call sort() after next()") - } - - let world = self.world; - - let query_lens_state = self.query_state.transmute_filtered::<(L, Entity), F>(world); - - // SAFETY: - // `self.world` has permission to access the required components. - // The original query iter has not been iterated on, so no items are aliased from it. - let query_lens = unsafe { - query_lens_state.iter_unchecked_manual( - world, - world.last_change_tick(), - world.change_tick(), - ) - }; - let mut keyed_query: Vec<_> = query_lens.collect(); - keyed_query.sort_by(|(key_1, _), (key_2, _)| compare(key_1, key_2)); - let entity_iter = keyed_query.into_iter().map(|(.., entity)| entity); - // SAFETY: - // `self.world` has permission to access the required components. - // Each lens query item is dropped before the respective actual query item is accessed. - unsafe { - QuerySortedIter::new( - world, - self.query_state, - entity_iter, - world.last_change_tick(), - world.change_tick(), - ) - } + self.sort_impl::(move |keyed_query| { + keyed_query.sort_by(|(key_1, _), (key_2, _)| compare(key_1, key_2)); + }) } /// Sorts all query items into a new iterator with a comparator function over the query lens. @@ -746,7 +642,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { /// This will panic if `next` has been called on `QueryIter` before, unless the underlying `Query` is empty. pub fn sort_unstable_by( self, - mut compare: impl FnMut(&L::Item<'w>, &L::Item<'w>) -> Ordering, + mut compare: impl FnMut(&L::Item<'_>, &L::Item<'_>) -> Ordering, ) -> QuerySortedIter< 'w, 's, @@ -754,43 +650,9 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { F, impl ExactSizeIterator + DoubleEndedIterator + FusedIterator + 'w, > { - // On the first successful iteration of `QueryIterationCursor`, `archetype_entities` or `table_entities` - // will be set to a non-zero value. The correctness of this method relies on this. - // I.e. this sort method will execute if and only if `next` on `QueryIterationCursor` of a - // non-empty `QueryIter` has not yet been called. When empty, this sort method will not panic. - if !self.cursor.archetype_entities.is_empty() || !self.cursor.table_entities.is_empty() { - panic!("it is not valid to call sort() after next()") - } - - let world = self.world; - - let query_lens_state = self.query_state.transmute_filtered::<(L, Entity), F>(world); - - // SAFETY: - // `self.world` has permission to access the required components. - // The original query iter has not been iterated on, so no items are aliased from it. - let query_lens = unsafe { - query_lens_state.iter_unchecked_manual( - world, - world.last_change_tick(), - world.change_tick(), - ) - }; - let mut keyed_query: Vec<_> = query_lens.collect(); - keyed_query.sort_unstable_by(|(key_1, _), (key_2, _)| compare(key_1, key_2)); - let entity_iter = keyed_query.into_iter().map(|(.., entity)| entity); - // SAFETY: - // `self.world` has permission to access the required components. - // Each lens query item is dropped before the respective actual query item is accessed. - unsafe { - QuerySortedIter::new( - world, - self.query_state, - entity_iter, - world.last_change_tick(), - world.change_tick(), - ) - } + self.sort_impl::(move |keyed_query| { + keyed_query.sort_unstable_by(|(key_1, _), (key_2, _)| compare(key_1, key_2)); + }) } /// Sorts all query items into a new iterator with a key extraction function over the query lens. @@ -832,7 +694,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { /// #[derive(Component)] /// struct AvailableMarker; /// - /// #[derive(Component, PartialEq, Eq, PartialOrd, Ord)] + /// #[derive(Component, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] /// enum Rarity { /// Common, /// Rare, @@ -860,7 +722,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { /// .sort_by_key::(|entity_ref| { /// ( /// entity_ref.contains::(), - /// entity_ref.get::() + /// entity_ref.get::().copied() /// ) /// }) /// .rev() @@ -872,7 +734,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { /// ``` pub fn sort_by_key( self, - mut f: impl FnMut(&L::Item<'w>) -> K, + mut f: impl FnMut(&L::Item<'_>) -> K, ) -> QuerySortedIter< 'w, 's, @@ -883,43 +745,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { where K: Ord, { - // On the first successful iteration of `QueryIterationCursor`, `archetype_entities` or `table_entities` - // will be set to a non-zero value. The correctness of this method relies on this. - // I.e. this sort method will execute if and only if `next` on `QueryIterationCursor` of a - // non-empty `QueryIter` has not yet been called. When empty, this sort method will not panic. - if !self.cursor.archetype_entities.is_empty() || !self.cursor.table_entities.is_empty() { - panic!("it is not valid to call sort() after next()") - } - - let world = self.world; - - let query_lens_state = self.query_state.transmute_filtered::<(L, Entity), F>(world); - - // SAFETY: - // `self.world` has permission to access the required components. - // The original query iter has not been iterated on, so no items are aliased from it. - let query_lens = unsafe { - query_lens_state.iter_unchecked_manual( - world, - world.last_change_tick(), - world.change_tick(), - ) - }; - let mut keyed_query: Vec<_> = query_lens.collect(); - keyed_query.sort_by_key(|(lens, _)| f(lens)); - let entity_iter = keyed_query.into_iter().map(|(.., entity)| entity); - // SAFETY: - // `self.world` has permission to access the required components. - // Each lens query item is dropped before the respective actual query item is accessed. - unsafe { - QuerySortedIter::new( - world, - self.query_state, - entity_iter, - world.last_change_tick(), - world.change_tick(), - ) - } + self.sort_impl::(move |keyed_query| keyed_query.sort_by_key(|(lens, _)| f(lens))) } /// Sorts all query items into a new iterator with a key extraction function over the query lens. @@ -941,7 +767,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { /// This will panic if `next` has been called on `QueryIter` before, unless the underlying `Query` is empty. pub fn sort_unstable_by_key( self, - mut f: impl FnMut(&L::Item<'w>) -> K, + mut f: impl FnMut(&L::Item<'_>) -> K, ) -> QuerySortedIter< 'w, 's, @@ -952,43 +778,9 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { where K: Ord, { - // On the first successful iteration of `QueryIterationCursor`, `archetype_entities` or `table_entities` - // will be set to a non-zero value. The correctness of this method relies on this. - // I.e. this sort method will execute if and only if `next` on `QueryIterationCursor` of a - // non-empty `QueryIter` has not yet been called. When empty, this sort method will not panic. - if !self.cursor.archetype_entities.is_empty() || !self.cursor.table_entities.is_empty() { - panic!("it is not valid to call sort() after next()") - } - - let world = self.world; - - let query_lens_state = self.query_state.transmute_filtered::<(L, Entity), F>(world); - - // SAFETY: - // `self.world` has permission to access the required components. - // The original query iter has not been iterated on, so no items are aliased from it. - let query_lens = unsafe { - query_lens_state.iter_unchecked_manual( - world, - world.last_change_tick(), - world.change_tick(), - ) - }; - let mut keyed_query: Vec<_> = query_lens.collect(); - keyed_query.sort_unstable_by_key(|(lens, _)| f(lens)); - let entity_iter = keyed_query.into_iter().map(|(.., entity)| entity); - // SAFETY: - // `self.world` has permission to access the required components. - // Each lens query item is dropped before the respective actual query item is accessed. - unsafe { - QuerySortedIter::new( - world, - self.query_state, - entity_iter, - world.last_change_tick(), - world.change_tick(), - ) - } + self.sort_impl::(move |keyed_query| { + keyed_query.sort_unstable_by_key(|(lens, _)| f(lens)); + }) } /// Sort all query items into a new iterator with a key extraction function over the query lens. @@ -1010,7 +802,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { /// This will panic if `next` has been called on `QueryIter` before, unless the underlying `Query` is empty. pub fn sort_by_cached_key( self, - mut f: impl FnMut(&L::Item<'w>) -> K, + mut f: impl FnMut(&L::Item<'_>) -> K, ) -> QuerySortedIter< 'w, 's, @@ -1021,6 +813,33 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { where K: Ord, { + self.sort_impl::(move |keyed_query| keyed_query.sort_by_cached_key(|(lens, _)| f(lens))) + } + + /// Shared implementation for the various `sort` methods. + /// This uses the lens to collect the items for sorting, but delegates the actual sorting to the provided closure. + /// + /// Defining the lens works like [`transmute_lens`](crate::system::Query::transmute_lens). + /// This includes the allowed parameter type changes listed under [allowed transmutes]. + /// However, the lens uses the filter of the original query when present. + /// + /// The sort is not cached across system runs. + /// + /// [allowed transmutes]: crate::system::Query#allowed-transmutes + /// + /// # Panics + /// + /// This will panic if `next` has been called on `QueryIter` before, unless the underlying `Query` is empty. + fn sort_impl( + self, + f: impl FnOnce(&mut Vec<(L::Item<'_>, NeutralOrd)>), + ) -> QuerySortedIter< + 'w, + 's, + D, + F, + impl ExactSizeIterator + DoubleEndedIterator + FusedIterator + 'w, + > { // On the first successful iteration of `QueryIterationCursor`, `archetype_entities` or `table_entities` // will be set to a non-zero value. The correctness of this method relies on this. // I.e. this sort method will execute if and only if `next` on `QueryIterationCursor` of a @@ -1036,16 +855,13 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIter<'w, 's, D, F> { // SAFETY: // `self.world` has permission to access the required components. // The original query iter has not been iterated on, so no items are aliased from it. - let query_lens = unsafe { - query_lens_state.iter_unchecked_manual( - world, - world.last_change_tick(), - world.change_tick(), - ) - }; - let mut keyed_query: Vec<_> = query_lens.collect(); - keyed_query.sort_by_cached_key(|(lens, _)| f(lens)); - let entity_iter = keyed_query.into_iter().map(|(.., entity)| entity); + // `QueryIter::new` ensures `world` is the same one used to initialize `query_state`. + let query_lens = unsafe { query_lens_state.query_unchecked_manual(world) }.into_iter(); + let mut keyed_query: Vec<_> = query_lens + .map(|(key, entity)| (key, NeutralOrd(entity))) + .collect(); + f(&mut keyed_query); + let entity_iter = keyed_query.into_iter().map(|(.., entity)| entity.0); // SAFETY: // `self.world` has permission to access the required components. // Each lens query item is dropped before the respective actual query item is accessed. @@ -1145,6 +961,12 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Debug for QueryIter<'w, 's, D, F> { } } +impl<'w, 's, D: ReadOnlyQueryData, F: QueryFilter> Clone for QueryIter<'w, 's, D, F> { + fn clone(&self) -> Self { + self.remaining() + } +} + /// An [`Iterator`] over sorted query results of a [`Query`](crate::system::Query). /// /// This struct is created by the [`QueryIter::sort`], [`QueryIter::sort_unstable`], @@ -1295,7 +1117,8 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> Debug /// Entities that don't match the query are skipped. /// /// This struct is created by the [`Query::iter_many`](crate::system::Query::iter_many) and [`Query::iter_many_mut`](crate::system::Query::iter_many_mut) methods. -pub struct QueryManyIter<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> { +pub struct QueryManyIter<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> +{ world: UnsafeWorldCell<'w>, entity_iter: I, entities: &'w Entities, @@ -1306,7 +1129,7 @@ pub struct QueryManyIter<'w, 's, D: QueryData, F: QueryFilter, I: Iterator, } -impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> +impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> QueryManyIter<'w, 's, D, F, I> { /// # Safety @@ -1345,7 +1168,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> /// It is always safe for shared access. #[inline(always)] unsafe fn fetch_next_aliased_unchecked( - entity_iter: impl Iterator, + entity_iter: impl Iterator, entities: &'w Entities, tables: &'w Tables, archetypes: &'w Archetypes, @@ -1494,7 +1317,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> /// // We need to collect the internal iterator before iterating mutably /// let mut parent_query_iter = query.iter_many_mut(entity_list) /// .sort::(); - /// + /// /// let mut scratch_value = 0; /// while let Some(mut part_value) = parent_query_iter.fetch_next_back() /// { @@ -1518,47 +1341,9 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> impl ExactSizeIterator + DoubleEndedIterator + FusedIterator + 'w, > where - L::Item<'w>: Ord, + for<'lw> L::Item<'lw>: Ord, { - let world = self.world; - - let query_lens_state = self.query_state.transmute_filtered::<(L, Entity), F>(world); - - // SAFETY: - // `self.world` has permission to access the required components. - // The original query iter has not been iterated on, so no items are aliased from it. - let query_lens = unsafe { - query_lens_state.iter_many_unchecked_manual( - self.entity_iter, - world, - world.last_change_tick(), - world.change_tick(), - ) - }; - let mut keyed_query: Vec<_> = query_lens - .map(|(key, entity)| (key, NeutralOrd(entity))) - .collect(); - keyed_query.sort(); - // Re-collect into a `Vec` to eagerly drop the lens items. - // They must be dropped before `fetch_next` is called since they may alias - // with other data items if there are duplicate entities in `entity_iter`. - let entity_iter = keyed_query - .into_iter() - .map(|(.., entity)| entity.0) - .collect::>() - .into_iter(); - // SAFETY: - // `self.world` has permission to access the required components. - // Each lens query item is dropped before the respective actual query item is accessed. - unsafe { - QuerySortedManyIter::new( - world, - self.query_state, - entity_iter, - world.last_change_tick(), - world.change_tick(), - ) - } + self.sort_impl::(|keyed_query| keyed_query.sort()) } /// Sorts all query items into a new iterator, using the query lens as a key. @@ -1614,47 +1399,9 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> impl ExactSizeIterator + DoubleEndedIterator + FusedIterator + 'w, > where - L::Item<'w>: Ord, + for<'lw> L::Item<'lw>: Ord, { - let world = self.world; - - let query_lens_state = self.query_state.transmute_filtered::<(L, Entity), F>(world); - - // SAFETY: - // `self.world` has permission to access the required components. - // The original query iter has not been iterated on, so no items are aliased from it. - let query_lens = unsafe { - query_lens_state.iter_many_unchecked_manual( - self.entity_iter, - world, - world.last_change_tick(), - world.change_tick(), - ) - }; - let mut keyed_query: Vec<_> = query_lens - .map(|(key, entity)| (key, NeutralOrd(entity))) - .collect(); - keyed_query.sort_unstable(); - // Re-collect into a `Vec` to eagerly drop the lens items. - // They must be dropped before `fetch_next` is called since they may alias - // with other data items if there are duplicate entities in `entity_iter`. - let entity_iter = keyed_query - .into_iter() - .map(|(.., entity)| entity.0) - .collect::>() - .into_iter(); - // SAFETY: - // `self.world` has permission to access the required components. - // Each lens query item is dropped before the respective actual query item is accessed. - unsafe { - QuerySortedManyIter::new( - world, - self.query_state, - entity_iter, - world.last_change_tick(), - world.change_tick(), - ) - } + self.sort_impl::(|keyed_query| keyed_query.sort_unstable()) } /// Sorts all query items into a new iterator with a comparator function over the query lens. @@ -1709,7 +1456,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> /// ``` pub fn sort_by( self, - mut compare: impl FnMut(&L::Item<'w>, &L::Item<'w>) -> Ordering, + mut compare: impl FnMut(&L::Item<'_>, &L::Item<'_>) -> Ordering, ) -> QuerySortedManyIter< 'w, 's, @@ -1717,43 +1464,9 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> F, impl ExactSizeIterator + DoubleEndedIterator + FusedIterator + 'w, > { - let world = self.world; - - let query_lens_state = self.query_state.transmute_filtered::<(L, Entity), F>(world); - - // SAFETY: - // `self.world` has permission to access the required components. - // The original query iter has not been iterated on, so no items are aliased from it. - let query_lens = unsafe { - query_lens_state.iter_many_unchecked_manual( - self.entity_iter, - world, - world.last_change_tick(), - world.change_tick(), - ) - }; - let mut keyed_query: Vec<_> = query_lens.collect(); - keyed_query.sort_by(|(key_1, _), (key_2, _)| compare(key_1, key_2)); - // Re-collect into a `Vec` to eagerly drop the lens items. - // They must be dropped before `fetch_next` is called since they may alias - // with other data items if there are duplicate entities in `entity_iter`. - let entity_iter = keyed_query - .into_iter() - .map(|(.., entity)| entity) - .collect::>() - .into_iter(); - // SAFETY: - // `self.world` has permission to access the required components. - // Each lens query item is dropped before the respective actual query item is accessed. - unsafe { - QuerySortedManyIter::new( - world, - self.query_state, - entity_iter, - world.last_change_tick(), - world.change_tick(), - ) - } + self.sort_impl::(move |keyed_query| { + keyed_query.sort_by(|(key_1, _), (key_2, _)| compare(key_1, key_2)); + }) } /// Sorts all query items into a new iterator with a comparator function over the query lens. @@ -1774,7 +1487,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> /// called on [`QueryManyIter`] before. pub fn sort_unstable_by( self, - mut compare: impl FnMut(&L::Item<'w>, &L::Item<'w>) -> Ordering, + mut compare: impl FnMut(&L::Item<'_>, &L::Item<'_>) -> Ordering, ) -> QuerySortedManyIter< 'w, 's, @@ -1782,43 +1495,9 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> F, impl ExactSizeIterator + DoubleEndedIterator + FusedIterator + 'w, > { - let world = self.world; - - let query_lens_state = self.query_state.transmute_filtered::<(L, Entity), F>(world); - - // SAFETY: - // `self.world` has permission to access the required components. - // The original query iter has not been iterated on, so no items are aliased from it. - let query_lens = unsafe { - query_lens_state.iter_many_unchecked_manual( - self.entity_iter, - world, - world.last_change_tick(), - world.change_tick(), - ) - }; - let mut keyed_query: Vec<_> = query_lens.collect(); - keyed_query.sort_unstable_by(|(key_1, _), (key_2, _)| compare(key_1, key_2)); - // Re-collect into a `Vec` to eagerly drop the lens items. - // They must be dropped before `fetch_next` is called since they may alias - // with other data items if there are duplicate entities in `entity_iter`. - let entity_iter = keyed_query - .into_iter() - .map(|(.., entity)| entity) - .collect::>() - .into_iter(); - // SAFETY: - // `self.world` has permission to access the required components. - // Each lens query item is dropped before the respective actual query item is accessed. - unsafe { - QuerySortedManyIter::new( - world, - self.query_state, - entity_iter, - world.last_change_tick(), - world.change_tick(), - ) - } + self.sort_impl::(move |keyed_query| { + keyed_query.sort_unstable_by(|(key_1, _), (key_2, _)| compare(key_1, key_2)); + }) } /// Sorts all query items into a new iterator with a key extraction function over the query lens. @@ -1860,7 +1539,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> /// #[derive(Component)] /// struct AvailableMarker; /// - /// #[derive(Component, PartialEq, Eq, PartialOrd, Ord)] + /// #[derive(Component, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] /// enum Rarity { /// Common, /// Rare, @@ -1890,7 +1569,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> /// .sort_by_key::(|entity_ref| { /// ( /// entity_ref.contains::(), - /// entity_ref.get::() + // entity_ref.get::().copied() /// ) /// }) /// .rev() @@ -1902,7 +1581,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> /// ``` pub fn sort_by_key( self, - mut f: impl FnMut(&L::Item<'w>) -> K, + mut f: impl FnMut(&L::Item<'_>) -> K, ) -> QuerySortedManyIter< 'w, 's, @@ -1913,43 +1592,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> where K: Ord, { - let world = self.world; - - let query_lens_state = self.query_state.transmute_filtered::<(L, Entity), F>(world); - - // SAFETY: - // `self.world` has permission to access the required components. - // The original query iter has not been iterated on, so no items are aliased from it. - let query_lens = unsafe { - query_lens_state.iter_many_unchecked_manual( - self.entity_iter, - world, - world.last_change_tick(), - world.change_tick(), - ) - }; - let mut keyed_query: Vec<_> = query_lens.collect(); - keyed_query.sort_by_key(|(lens, _)| f(lens)); - // Re-collect into a `Vec` to eagerly drop the lens items. - // They must be dropped before `fetch_next` is called since they may alias - // with other data items if there are duplicate entities in `entity_iter`. - let entity_iter = keyed_query - .into_iter() - .map(|(.., entity)| entity) - .collect::>() - .into_iter(); - // SAFETY: - // `self.world` has permission to access the required components. - // Each lens query item is dropped before the respective actual query item is accessed. - unsafe { - QuerySortedManyIter::new( - world, - self.query_state, - entity_iter, - world.last_change_tick(), - world.change_tick(), - ) - } + self.sort_impl::(move |keyed_query| keyed_query.sort_by_key(|(lens, _)| f(lens))) } /// Sorts all query items into a new iterator with a key extraction function over the query lens. @@ -1970,7 +1613,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> /// called on [`QueryManyIter`] before. pub fn sort_unstable_by_key( self, - mut f: impl FnMut(&L::Item<'w>) -> K, + mut f: impl FnMut(&L::Item<'_>) -> K, ) -> QuerySortedManyIter< 'w, 's, @@ -1981,43 +1624,9 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> where K: Ord, { - let world = self.world; - - let query_lens_state = self.query_state.transmute_filtered::<(L, Entity), F>(world); - - // SAFETY: - // `self.world` has permission to access the required components. - // The original query iter has not been iterated on, so no items are aliased from it. - let query_lens = unsafe { - query_lens_state.iter_many_unchecked_manual( - self.entity_iter, - world, - world.last_change_tick(), - world.change_tick(), - ) - }; - let mut keyed_query: Vec<_> = query_lens.collect(); - keyed_query.sort_unstable_by_key(|(lens, _)| f(lens)); - // Re-collect into a `Vec` to eagerly drop the lens items. - // They must be dropped before `fetch_next` is called since they may alias - // with other data items if there are duplicate entities in `entity_iter`. - let entity_iter = keyed_query - .into_iter() - .map(|(.., entity)| entity) - .collect::>() - .into_iter(); - // SAFETY: - // `self.world` has permission to access the required components. - // Each lens query item is dropped before the respective actual query item is accessed. - unsafe { - QuerySortedManyIter::new( - world, - self.query_state, - entity_iter, - world.last_change_tick(), - world.change_tick(), - ) - } + self.sort_impl::(move |keyed_query| { + keyed_query.sort_unstable_by_key(|(lens, _)| f(lens)); + }) } /// Sort all query items into a new iterator with a key extraction function over the query lens. @@ -2038,7 +1647,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> /// called on [`QueryManyIter`] before. pub fn sort_by_cached_key( self, - mut f: impl FnMut(&L::Item<'w>) -> K, + mut f: impl FnMut(&L::Item<'_>) -> K, ) -> QuerySortedManyIter< 'w, 's, @@ -2049,6 +1658,32 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> where K: Ord, { + self.sort_impl::(move |keyed_query| keyed_query.sort_by_cached_key(|(lens, _)| f(lens))) + } + + /// Shared implementation for the various `sort` methods. + /// This uses the lens to collect the items for sorting, but delegates the actual sorting to the provided closure. + /// + /// Defining the lens works like [`transmute_lens`](crate::system::Query::transmute_lens). + /// This includes the allowed parameter type changes listed under [allowed transmutes]. + /// However, the lens uses the filter of the original query when present. + /// + /// The sort is not cached across system runs. + /// + /// [allowed transmutes]: crate::system::Query#allowed-transmutes + /// + /// Unlike the sort methods on [`QueryIter`], this does NOT panic if `next`/`fetch_next` has been + /// called on [`QueryManyIter`] before. + fn sort_impl( + self, + f: impl FnOnce(&mut Vec<(L::Item<'_>, NeutralOrd)>), + ) -> QuerySortedManyIter< + 'w, + 's, + D, + F, + impl ExactSizeIterator + DoubleEndedIterator + FusedIterator + 'w, + > { let world = self.world; let query_lens_state = self.query_state.transmute_filtered::<(L, Entity), F>(world); @@ -2056,22 +1691,19 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> // SAFETY: // `self.world` has permission to access the required components. // The original query iter has not been iterated on, so no items are aliased from it. - let query_lens = unsafe { - query_lens_state.iter_many_unchecked_manual( - self.entity_iter, - world, - world.last_change_tick(), - world.change_tick(), - ) - }; - let mut keyed_query: Vec<_> = query_lens.collect(); - keyed_query.sort_by_cached_key(|(lens, _)| f(lens)); + // `QueryIter::new` ensures `world` is the same one used to initialize `query_state`. + let query_lens = unsafe { query_lens_state.query_unchecked_manual(world) } + .iter_many_inner(self.entity_iter); + let mut keyed_query: Vec<_> = query_lens + .map(|(key, entity)| (key, NeutralOrd(entity))) + .collect(); + f(&mut keyed_query); // Re-collect into a `Vec` to eagerly drop the lens items. // They must be dropped before `fetch_next` is called since they may alias // with other data items if there are duplicate entities in `entity_iter`. let entity_iter = keyed_query .into_iter() - .map(|(.., entity)| entity) + .map(|(.., entity)| entity.0) .collect::>() .into_iter(); // SAFETY: @@ -2089,7 +1721,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> } } -impl<'w, 's, D: QueryData, F: QueryFilter, I: DoubleEndedIterator> +impl<'w, 's, D: QueryData, F: QueryFilter, I: DoubleEndedIterator> QueryManyIter<'w, 's, D, F, I> { /// Get next result from the back of the query @@ -2115,7 +1747,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter, I: DoubleEndedIterator> Iterator +impl<'w, 's, D: ReadOnlyQueryData, F: QueryFilter, I: Iterator> Iterator for QueryManyIter<'w, 's, D, F, I> { type Item = D::Item<'w>; @@ -2144,8 +1776,13 @@ impl<'w, 's, D: ReadOnlyQueryData, F: QueryFilter, I: Iterator> - DoubleEndedIterator for QueryManyIter<'w, 's, D, F, I> +impl< + 'w, + 's, + D: ReadOnlyQueryData, + F: QueryFilter, + I: DoubleEndedIterator, + > DoubleEndedIterator for QueryManyIter<'w, 's, D, F, I> { #[inline(always)] fn next_back(&mut self) -> Option { @@ -2167,8 +1804,8 @@ impl<'w, 's, D: ReadOnlyQueryData, F: QueryFilter, I: DoubleEndedIterator> FusedIterator - for QueryManyIter<'w, 's, D, F, I> +impl<'w, 's, D: ReadOnlyQueryData, F: QueryFilter, I: Iterator> + FusedIterator for QueryManyIter<'w, 's, D, F, I> { } @@ -2178,7 +1815,7 @@ unsafe impl<'w, 's, F: QueryFilter, I: EntitySetIterator> EntitySetIterator { } -impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> Debug +impl<'w, 's, D: QueryData, F: QueryFilter, I: Iterator> Debug for QueryManyIter<'w, 's, D, F, I> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { @@ -2810,7 +2447,9 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryIterationCursor<'w, 's, D, F> { } // NOTE: If you are changing query iteration code, remember to update the following places, where relevant: - // QueryIter, QueryIterationCursor, QuerySortedIter, QueryManyIter, QuerySortedManyIter, QueryCombinationIter, QueryState::par_fold_init_unchecked_manual + // QueryIter, QueryIterationCursor, QuerySortedIter, QueryManyIter, QuerySortedManyIter, QueryCombinationIter, + // QueryState::par_fold_init_unchecked_manual, QueryState::par_many_fold_init_unchecked_manual, + // QueryState::par_many_unique_fold_init_unchecked_manual /// # Safety /// `tables` and `archetypes` must belong to the same world that the [`QueryIterationCursor`] /// was initialized for. @@ -2935,7 +2574,10 @@ impl PartialEq for NeutralOrd { impl Eq for NeutralOrd {} -#[allow(clippy::non_canonical_partial_ord_impl)] +#[expect( + clippy::non_canonical_partial_ord_impl, + reason = "`PartialOrd` and `Ord` on this struct must only ever return `Ordering::Equal`, so we prefer clarity" +)] impl PartialOrd for NeutralOrd { fn partial_cmp(&self, _other: &Self) -> Option { Some(Ordering::Equal) @@ -2949,15 +2591,14 @@ impl Ord for NeutralOrd { } #[cfg(test)] +#[expect(clippy::print_stdout, reason = "Allowed in tests.")] mod tests { - #[allow(unused_imports)] + use alloc::vec::Vec; + use std::println; + use crate::component::Component; - #[allow(unused_imports)] use crate::entity::Entity; - #[allow(unused_imports)] use crate::prelude::World; - #[allow(unused_imports)] - use crate::{self as bevy_ecs}; #[derive(Component, Debug, PartialEq, PartialOrd, Clone, Copy)] struct A(f32); @@ -2965,14 +2606,19 @@ mod tests { #[component(storage = "SparseSet")] struct Sparse(usize); - #[allow(clippy::unnecessary_sort_by)] #[test] fn query_iter_sorts() { let mut world = World::new(); + for i in 0..100 { + world.spawn(A(i as f32)); + world.spawn((A(i as f32), Sparse(i))); + world.spawn(Sparse(i)); + } let mut query = world.query::(); let sort = query.iter(&world).sort::().collect::>(); + assert_eq!(sort.len(), 300); let sort_unstable = query .iter(&world) @@ -3153,7 +2799,6 @@ mod tests { } } - #[allow(clippy::unnecessary_sort_by)] #[test] fn query_iter_many_sorts() { let mut world = World::new(); @@ -3276,13 +2921,13 @@ mod tests { { let mut query = query_state .iter_many_mut(&mut world, [id, id]) - .sort_by::<&C>(Ord::cmp); + .sort_by::<&C>(|l, r| Ord::cmp(l, r)); while query.fetch_next().is_some() {} } { let mut query = query_state .iter_many_mut(&mut world, [id, id]) - .sort_unstable_by::<&C>(Ord::cmp); + .sort_unstable_by::<&C>(|l, r| Ord::cmp(l, r)); while query.fetch_next().is_some() {} } { diff --git a/crates/bevy_ecs/src/query/mod.rs b/crates/bevy_ecs/src/query/mod.rs index c6c1383ceb7b9..c1744cbf24211 100644 --- a/crates/bevy_ecs/src/query/mod.rs +++ b/crates/bevy_ecs/src/query/mod.rs @@ -102,9 +102,9 @@ impl DebugCheckedUnwrap for Option { } #[cfg(test)] +#[expect(clippy::print_stdout, reason = "Allowed in tests.")] mod tests { use crate::{ - self as bevy_ecs, archetype::Archetype, component::{Component, ComponentId, Components, Tick}, prelude::{AnyOf, Changed, Entity, Or, QueryState, Res, ResMut, Resource, With, Without}, @@ -112,14 +112,15 @@ mod tests { ArchetypeFilter, FilteredAccess, Has, QueryCombinationIter, QueryData, ReadOnlyQueryData, WorldQuery, }, - schedule::{IntoSystemConfigs, Schedule}, + schedule::{IntoScheduleConfigs, Schedule}, storage::{Table, TableRow}, system::{assert_is_system, IntoSystem, Query, System, SystemState}, world::{unsafe_world_cell::UnsafeWorldCell, World}, }; + use alloc::{vec, vec::Vec}; use bevy_ecs_macros::QueryFilter; use core::{any::type_name, fmt::Debug, hash::Hash}; - use std::collections::HashSet; + use std::{collections::HashSet, println}; #[derive(Component, Debug, Hash, Eq, PartialEq, Clone, Copy, PartialOrd, Ord)] struct A(usize); @@ -437,6 +438,18 @@ mod tests { ); } + #[test] + fn get_many_only_mut_checks_duplicates() { + let mut world = World::new(); + let id = world.spawn(A(10)).id(); + let mut query_state = world.query::<&mut A>(); + let mut query = query_state.query_mut(&mut world); + let result = query.get_many([id, id]); + assert_eq!(result, Ok([&A(10), &A(10)])); + let mut_result = query.get_many_mut([id, id]); + assert!(mut_result.is_err()); + } + #[test] fn multi_storage_query() { let mut world = World::new(); @@ -751,8 +764,8 @@ mod tests { let _: Option<&Foo> = q.get(&world, e).ok(); let _: Option<&Foo> = q.get_manual(&world, e).ok(); let _: Option<[&Foo; 1]> = q.get_many(&world, [e]).ok(); - let _: Option<&Foo> = q.get_single(&world).ok(); - let _: &Foo = q.single(&world); + let _: Option<&Foo> = q.single(&world).ok(); + let _: &Foo = q.single(&world).unwrap(); // system param let mut q = SystemState::>::new(&mut world); @@ -764,9 +777,8 @@ mod tests { let _: Option<&Foo> = q.get(e).ok(); let _: Option<[&Foo; 1]> = q.get_many([e]).ok(); - let _: Option<&Foo> = q.get_single().ok(); - let _: [&Foo; 1] = q.many([e]); - let _: &Foo = q.single(); + let _: Option<&Foo> = q.single().ok(); + let _: &Foo = q.single().unwrap(); } // regression test for https://github.com/bevyengine/bevy/pull/8029 @@ -804,19 +816,13 @@ mod tests { /// `QueryData` that performs read access on R to test that resource access is tracked struct ReadsRData; - /// `QueryData` that performs write access on R to test that resource access is tracked - struct WritesRData; - /// SAFETY: /// `update_component_access` adds resource read access for `R`. /// `update_archetype_component_access` does nothing, as this accesses no components. unsafe impl WorldQuery for ReadsRData { - type Item<'w> = (); type Fetch<'w> = (); type State = ComponentId; - fn shrink<'wlong: 'wshort, 'wshort>(_item: Self::Item<'wlong>) -> Self::Item<'wshort> {} - fn shrink_fetch<'wlong: 'wshort, 'wshort>(_: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> {} unsafe fn init_fetch<'w>( @@ -846,14 +852,6 @@ mod tests { ) { } - #[inline(always)] - unsafe fn fetch<'w>( - _fetch: &mut Self::Fetch<'w>, - _entity: Entity, - _table_row: TableRow, - ) -> Self::Item<'w> { - } - fn update_component_access( &component_id: &Self::State, access: &mut FilteredAccess, @@ -866,7 +864,7 @@ mod tests { } fn init_state(world: &mut World) -> Self::State { - world.components.register_resource::() + world.components_registrator().register_resource::() } fn get_state(components: &Components) -> Option { @@ -883,51 +881,12 @@ mod tests { /// SAFETY: `Self` is the same as `Self::ReadOnly` unsafe impl QueryData for ReadsRData { + const IS_READ_ONLY: bool = true; type ReadOnly = Self; - } - - /// SAFETY: access is read only - unsafe impl ReadOnlyQueryData for ReadsRData {} - - /// SAFETY: - /// `update_component_access` adds resource read access for `R`. - /// `update_archetype_component_access` does nothing, as this accesses no components. - unsafe impl WorldQuery for WritesRData { type Item<'w> = (); - type Fetch<'w> = (); - type State = ComponentId; fn shrink<'wlong: 'wshort, 'wshort>(_item: Self::Item<'wlong>) -> Self::Item<'wshort> {} - fn shrink_fetch<'wlong: 'wshort, 'wshort>(_: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> {} - - unsafe fn init_fetch<'w>( - _world: UnsafeWorldCell<'w>, - _state: &Self::State, - _last_run: Tick, - _this_run: Tick, - ) -> Self::Fetch<'w> { - } - - const IS_DENSE: bool = true; - - #[inline] - unsafe fn set_archetype<'w>( - _fetch: &mut Self::Fetch<'w>, - _state: &Self::State, - _archetype: &'w Archetype, - _table: &Table, - ) { - } - - #[inline] - unsafe fn set_table<'w>( - _fetch: &mut Self::Fetch<'w>, - _state: &Self::State, - _table: &'w Table, - ) { - } - #[inline(always)] unsafe fn fetch<'w>( _fetch: &mut Self::Fetch<'w>, @@ -935,38 +894,10 @@ mod tests { _table_row: TableRow, ) -> Self::Item<'w> { } - - fn update_component_access( - &component_id: &Self::State, - access: &mut FilteredAccess, - ) { - assert!( - !access.access().has_resource_read(component_id), - "WritesRData conflicts with a previous access in this query. Shared access cannot coincide with exclusive access.", - ); - access.add_resource_write(component_id); - } - - fn init_state(world: &mut World) -> Self::State { - world.components.register_resource::() - } - - fn get_state(components: &Components) -> Option { - components.resource_id::() - } - - fn matches_component_set( - _state: &Self::State, - _set_contains_id: &impl Fn(ComponentId) -> bool, - ) -> bool { - true - } } - /// SAFETY: `Self` is the same as `Self::ReadOnly` - unsafe impl QueryData for WritesRData { - type ReadOnly = ReadsRData; - } + /// SAFETY: access is read only + unsafe impl ReadOnlyQueryData for ReadsRData {} #[test] fn read_res_read_res_no_conflict() { @@ -975,38 +906,13 @@ mod tests { } #[test] - #[should_panic] - fn read_res_write_res_conflict() { - fn system(_q1: Query>, _q2: Query>) {} - assert_is_system(system); - } - - #[test] - #[should_panic] - fn write_res_read_res_conflict() { - fn system(_q1: Query>, _q2: Query>) {} - assert_is_system(system); - } - - #[test] - #[should_panic] - fn write_res_write_res_conflict() { - fn system(_q1: Query>, _q2: Query>) {} - assert_is_system(system); - } - - #[test] - fn read_write_res_sets_archetype_component_access() { + fn read_res_sets_archetype_component_access() { let mut world = World::new(); fn read_query(_q: Query>) {} let mut read_query = IntoSystem::into_system(read_query); read_query.initialize(&mut world); - fn write_query(_q: Query>) {} - let mut write_query = IntoSystem::into_system(write_query); - write_query.initialize(&mut world); - fn read_res(_r: Res) {} let mut read_res = IntoSystem::into_system(read_res); read_res.initialize(&mut world); @@ -1018,14 +924,8 @@ mod tests { assert!(read_query .archetype_component_access() .is_compatible(read_res.archetype_component_access())); - assert!(!write_query - .archetype_component_access() - .is_compatible(read_res.archetype_component_access())); assert!(!read_query .archetype_component_access() .is_compatible(write_res.archetype_component_access())); - assert!(!write_query - .archetype_component_access() - .is_compatible(write_res.archetype_component_access())); } } diff --git a/crates/bevy_ecs/src/query/par_iter.rs b/crates/bevy_ecs/src/query/par_iter.rs index b3ea93fbbc514..6683238aa36ee 100644 --- a/crates/bevy_ecs/src/query/par_iter.rs +++ b/crates/bevy_ecs/src/query/par_iter.rs @@ -1,8 +1,13 @@ use crate::{ - batching::BatchingStrategy, component::Tick, world::unsafe_world_cell::UnsafeWorldCell, + batching::BatchingStrategy, + component::Tick, + entity::{EntityEquivalent, UniqueEntityEquivalentVec}, + world::unsafe_world_cell::UnsafeWorldCell, }; -use super::{QueryData, QueryFilter, QueryItem, QueryState}; +use super::{QueryData, QueryFilter, QueryItem, QueryState, ReadOnlyQueryData}; + +use alloc::vec::Vec; /// A parallel iterator over query results of a [`Query`](crate::system::Query). /// @@ -54,7 +59,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryParIter<'w, 's, D, F> { /// fn system(query: Query<&T>){ /// let mut queue: Parallel = Parallel::default(); /// // queue.borrow_local_mut() will get or create a thread_local queue for each task/thread; - /// query.par_iter().for_each_init(|| queue.borrow_local_mut(),|local_queue,item| { + /// query.par_iter().for_each_init(|| queue.borrow_local_mut(),|local_queue, item| { /// **local_queue += 1; /// }); /// @@ -89,7 +94,8 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryParIter<'w, 's, D, F> { // at the same time. unsafe { self.state - .iter_unchecked_manual(self.world, self.last_run, self.this_run) + .query_unchecked_manual_with_ticks(self.world, self.last_run, self.this_run) + .into_iter() .fold(init, func); } } @@ -101,7 +107,8 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryParIter<'w, 's, D, F> { // SAFETY: See the safety comment above. unsafe { self.state - .iter_unchecked_manual(self.world, self.last_run, self.this_run) + .query_unchecked_manual_with_ticks(self.world, self.last_run, self.this_run) + .into_iter() .fold(init, func); } } else { @@ -146,3 +153,311 @@ impl<'w, 's, D: QueryData, F: QueryFilter> QueryParIter<'w, 's, D, F> { .calc_batch_size(max_items, thread_count) } } + +/// A parallel iterator over the unique query items generated from an [`Entity`] list. +/// +/// This struct is created by the [`Query::par_iter_many`] method. +/// +/// [`Entity`]: crate::entity::Entity +/// [`Query::par_iter_many`]: crate::system::Query::par_iter_many +pub struct QueryParManyIter<'w, 's, D: QueryData, F: QueryFilter, E: EntityEquivalent> { + pub(crate) world: UnsafeWorldCell<'w>, + pub(crate) state: &'s QueryState, + pub(crate) entity_list: Vec, + pub(crate) last_run: Tick, + pub(crate) this_run: Tick, + pub(crate) batching_strategy: BatchingStrategy, +} + +impl<'w, 's, D: ReadOnlyQueryData, F: QueryFilter, E: EntityEquivalent + Sync> + QueryParManyIter<'w, 's, D, F, E> +{ + /// Changes the batching strategy used when iterating. + /// + /// For more information on how this affects the resultant iteration, see + /// [`BatchingStrategy`]. + pub fn batching_strategy(mut self, strategy: BatchingStrategy) -> Self { + self.batching_strategy = strategy; + self + } + + /// Runs `func` on each query result in parallel. + /// + /// # Panics + /// If the [`ComputeTaskPool`] is not initialized. If using this from a query that is being + /// initialized and run from the ECS scheduler, this should never panic. + /// + /// [`ComputeTaskPool`]: bevy_tasks::ComputeTaskPool + #[inline] + pub fn for_each) + Send + Sync + Clone>(self, func: FN) { + self.for_each_init(|| {}, |_, item| func(item)); + } + + /// Runs `func` on each query result in parallel on a value returned by `init`. + /// + /// `init` may be called multiple times per thread, and the values returned may be discarded between tasks on any given thread. + /// Callers should avoid using this function as if it were a parallel version + /// of [`Iterator::fold`]. + /// + /// # Example + /// + /// ``` + /// use bevy_utils::Parallel; + /// use crate::{bevy_ecs::prelude::{Component, Res, Resource, Entity}, bevy_ecs::system::Query}; + /// # use core::slice; + /// use bevy_platform::prelude::Vec; + /// # fn some_expensive_operation(_item: &T) -> usize { + /// # 0 + /// # } + /// + /// #[derive(Component)] + /// struct T; + /// + /// #[derive(Resource)] + /// struct V(Vec); + /// + /// impl<'a> IntoIterator for &'a V { + /// // ... + /// # type Item = &'a Entity; + /// # type IntoIter = slice::Iter<'a, Entity>; + /// # + /// # fn into_iter(self) -> Self::IntoIter { + /// # self.0.iter() + /// # } + /// } + /// + /// fn system(query: Query<&T>, entities: Res){ + /// let mut queue: Parallel = Parallel::default(); + /// // queue.borrow_local_mut() will get or create a thread_local queue for each task/thread; + /// query.par_iter_many(&entities).for_each_init(|| queue.borrow_local_mut(),|local_queue, item| { + /// **local_queue += some_expensive_operation(item); + /// }); + /// + /// // collect value from every thread + /// let final_value: usize = queue.iter_mut().map(|v| *v).sum(); + /// } + /// ``` + /// + /// # Panics + /// If the [`ComputeTaskPool`] is not initialized. If using this from a query that is being + /// initialized and run from the ECS scheduler, this should never panic. + /// + /// [`ComputeTaskPool`]: bevy_tasks::ComputeTaskPool + #[inline] + pub fn for_each_init(self, init: INIT, func: FN) + where + FN: Fn(&mut T, QueryItem<'w, D>) + Send + Sync + Clone, + INIT: Fn() -> T + Sync + Send + Clone, + { + let func = |mut init, item| { + func(&mut init, item); + init + }; + #[cfg(any(target_arch = "wasm32", not(feature = "multi_threaded")))] + { + let init = init(); + // SAFETY: + // This method can only be called once per instance of QueryParManyIter, + // which ensures that mutable queries cannot be executed multiple times at once. + // Mutable instances of QueryParManyUniqueIter can only be created via an exclusive borrow of a + // Query or a World, which ensures that multiple aliasing QueryParManyIters cannot exist + // at the same time. + unsafe { + self.state + .query_unchecked_manual_with_ticks(self.world, self.last_run, self.this_run) + .iter_many_inner(&self.entity_list) + .fold(init, func); + } + } + #[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] + { + let thread_count = bevy_tasks::ComputeTaskPool::get().thread_num(); + if thread_count <= 1 { + let init = init(); + // SAFETY: See the safety comment above. + unsafe { + self.state + .query_unchecked_manual_with_ticks(self.world, self.last_run, self.this_run) + .iter_many_inner(&self.entity_list) + .fold(init, func); + } + } else { + // Need a batch size of at least 1. + let batch_size = self.get_batch_size(thread_count).max(1); + // SAFETY: See the safety comment above. + unsafe { + self.state.par_many_fold_init_unchecked_manual( + init, + self.world, + &self.entity_list, + batch_size, + func, + self.last_run, + self.this_run, + ); + } + } + } + } + + #[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] + fn get_batch_size(&self, thread_count: usize) -> usize { + self.batching_strategy + .calc_batch_size(|| self.entity_list.len(), thread_count) + } +} + +/// A parallel iterator over the unique query items generated from an [`EntitySet`]. +/// +/// This struct is created by the [`Query::par_iter_many_unique`] and [`Query::par_iter_many_unique_mut`] methods. +/// +/// [`EntitySet`]: crate::entity::EntitySet +/// [`Query::par_iter_many_unique`]: crate::system::Query::par_iter_many_unique +/// [`Query::par_iter_many_unique_mut`]: crate::system::Query::par_iter_many_unique_mut +pub struct QueryParManyUniqueIter<'w, 's, D: QueryData, F: QueryFilter, E: EntityEquivalent + Sync> +{ + pub(crate) world: UnsafeWorldCell<'w>, + pub(crate) state: &'s QueryState, + pub(crate) entity_list: UniqueEntityEquivalentVec, + pub(crate) last_run: Tick, + pub(crate) this_run: Tick, + pub(crate) batching_strategy: BatchingStrategy, +} + +impl<'w, 's, D: QueryData, F: QueryFilter, E: EntityEquivalent + Sync> + QueryParManyUniqueIter<'w, 's, D, F, E> +{ + /// Changes the batching strategy used when iterating. + /// + /// For more information on how this affects the resultant iteration, see + /// [`BatchingStrategy`]. + pub fn batching_strategy(mut self, strategy: BatchingStrategy) -> Self { + self.batching_strategy = strategy; + self + } + + /// Runs `func` on each query result in parallel. + /// + /// # Panics + /// If the [`ComputeTaskPool`] is not initialized. If using this from a query that is being + /// initialized and run from the ECS scheduler, this should never panic. + /// + /// [`ComputeTaskPool`]: bevy_tasks::ComputeTaskPool + #[inline] + pub fn for_each) + Send + Sync + Clone>(self, func: FN) { + self.for_each_init(|| {}, |_, item| func(item)); + } + + /// Runs `func` on each query result in parallel on a value returned by `init`. + /// + /// `init` may be called multiple times per thread, and the values returned may be discarded between tasks on any given thread. + /// Callers should avoid using this function as if it were a parallel version + /// of [`Iterator::fold`]. + /// + /// # Example + /// + /// ``` + /// use bevy_utils::Parallel; + /// use crate::{bevy_ecs::{prelude::{Component, Res, Resource, Entity}, entity::UniqueEntityVec, system::Query}}; + /// # use core::slice; + /// # use crate::bevy_ecs::entity::UniqueEntityIter; + /// # fn some_expensive_operation(_item: &T) -> usize { + /// # 0 + /// # } + /// + /// #[derive(Component)] + /// struct T; + /// + /// #[derive(Resource)] + /// struct V(UniqueEntityVec); + /// + /// impl<'a> IntoIterator for &'a V { + /// // ... + /// # type Item = &'a Entity; + /// # type IntoIter = UniqueEntityIter>; + /// # + /// # fn into_iter(self) -> Self::IntoIter { + /// # self.0.iter() + /// # } + /// } + /// + /// fn system(query: Query<&T>, entities: Res){ + /// let mut queue: Parallel = Parallel::default(); + /// // queue.borrow_local_mut() will get or create a thread_local queue for each task/thread; + /// query.par_iter_many_unique(&entities).for_each_init(|| queue.borrow_local_mut(),|local_queue, item| { + /// **local_queue += some_expensive_operation(item); + /// }); + /// + /// // collect value from every thread + /// let final_value: usize = queue.iter_mut().map(|v| *v).sum(); + /// } + /// ``` + /// + /// # Panics + /// If the [`ComputeTaskPool`] is not initialized. If using this from a query that is being + /// initialized and run from the ECS scheduler, this should never panic. + /// + /// [`ComputeTaskPool`]: bevy_tasks::ComputeTaskPool + #[inline] + pub fn for_each_init(self, init: INIT, func: FN) + where + FN: Fn(&mut T, QueryItem<'w, D>) + Send + Sync + Clone, + INIT: Fn() -> T + Sync + Send + Clone, + { + let func = |mut init, item| { + func(&mut init, item); + init + }; + #[cfg(any(target_arch = "wasm32", not(feature = "multi_threaded")))] + { + let init = init(); + // SAFETY: + // This method can only be called once per instance of QueryParManyUniqueIter, + // which ensures that mutable queries cannot be executed multiple times at once. + // Mutable instances of QueryParManyUniqueIter can only be created via an exclusive borrow of a + // Query or a World, which ensures that multiple aliasing QueryParManyUniqueIters cannot exist + // at the same time. + unsafe { + self.state + .query_unchecked_manual_with_ticks(self.world, self.last_run, self.this_run) + .iter_many_unique_inner(self.entity_list) + .fold(init, func); + } + } + #[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] + { + let thread_count = bevy_tasks::ComputeTaskPool::get().thread_num(); + if thread_count <= 1 { + let init = init(); + // SAFETY: See the safety comment above. + unsafe { + self.state + .query_unchecked_manual_with_ticks(self.world, self.last_run, self.this_run) + .iter_many_unique_inner(self.entity_list) + .fold(init, func); + } + } else { + // Need a batch size of at least 1. + let batch_size = self.get_batch_size(thread_count).max(1); + // SAFETY: See the safety comment above. + unsafe { + self.state.par_many_unique_fold_init_unchecked_manual( + init, + self.world, + &self.entity_list, + batch_size, + func, + self.last_run, + self.this_run, + ); + } + } + } + } + + #[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] + fn get_batch_size(&self, thread_count: usize) -> usize { + self.batching_strategy + .calc_batch_size(|| self.entity_list.len(), thread_count) + } +} diff --git a/crates/bevy_ecs/src/query/state.rs b/crates/bevy_ecs/src/query/state.rs index 36d82772d4783..e9a00f4646e1f 100644 --- a/crates/bevy_ecs/src/query/state.rs +++ b/crates/bevy_ecs/src/query/state.rs @@ -1,27 +1,28 @@ use crate::{ archetype::{Archetype, ArchetypeComponentId, ArchetypeGeneration, ArchetypeId}, - batching::BatchingStrategy, component::{ComponentId, Tick}, - entity::{Entity, EntityBorrow, EntitySet}, + entity::{Entity, EntityEquivalent, EntitySet, UniqueEntityArray}, + entity_disabling::DefaultQueryFilters, prelude::FromWorld, - query::{ - Access, DebugCheckedUnwrap, FilteredAccess, QueryCombinationIter, QueryIter, QueryParIter, - WorldQuery, - }, + query::{Access, FilteredAccess, QueryCombinationIter, QueryIter, QueryParIter, WorldQuery}, storage::{SparseSetIndex, TableId}, + system::Query, world::{unsafe_world_cell::UnsafeWorldCell, World, WorldId}, }; +#[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] +use crate::entity::UniqueEntityEquivalentSlice; + use alloc::vec::Vec; -#[cfg(feature = "trace")] -use bevy_utils::tracing::Span; -use core::{fmt, mem::MaybeUninit, ptr}; +use core::{fmt, ptr}; use fixedbitset::FixedBitSet; use log::warn; +#[cfg(feature = "trace")] +use tracing::Span; use super::{ - NopWorldQuery, QueryBuilder, QueryData, QueryEntityError, QueryFilter, QueryManyIter, - QueryManyUniqueIter, QuerySingleError, ROQueryItem, + ComponentAccessKind, NopWorldQuery, QueryBuilder, QueryData, QueryEntityError, QueryFilter, + QueryManyIter, QueryManyUniqueIter, QuerySingleError, ROQueryItem, ReadOnlyQueryData, }; /// An ID for either a table or an archetype. Used for Query iteration. @@ -50,9 +51,9 @@ pub(super) union StorageId { /// /// This data is cached between system runs, and is used to: /// - store metadata about which [`Table`] or [`Archetype`] are matched by the query. "Matched" means -/// that the query will iterate over the data in the matched table/archetype. +/// that the query will iterate over the data in the matched table/archetype. /// - cache the [`State`] needed to compute the [`Fetch`] struct used to retrieve data -/// from a specific [`Table`] or [`Archetype`] +/// from a specific [`Table`] or [`Archetype`] /// - build iterators that can iterate over the query results /// /// [`State`]: crate::query::world_query::WorldQuery::State @@ -71,6 +72,9 @@ pub struct QueryState { pub(crate) matched_archetypes: FixedBitSet, /// [`FilteredAccess`] computed by combining the `D` and `F` access. Used to check which other queries /// this query can run in parallel with. + /// Note that because we do a zero-cost reference conversion in `Query::as_readonly`, + /// the access for a read-only query may include accesses for the original mutable version, + /// but the `Query` does not have exclusive access to those components. pub(crate) component_access: FilteredAccess, // NOTE: we maintain both a bitset and a vec because iterating the vec is faster pub(super) matched_storage_ids: Vec, @@ -131,7 +135,7 @@ impl QueryState { /// `NewD` must have a subset of the access that `D` does and match the exact same archetypes/tables /// `NewF` must have a subset of the access that `F` does and match the exact same archetypes/tables pub(crate) unsafe fn as_transmuted_state< - NewD: QueryData, + NewD: ReadOnlyQueryData, NewF: QueryFilter, >( &self, @@ -153,9 +157,7 @@ impl QueryState { pub fn matched_archetypes(&self) -> impl Iterator + '_ { self.matched_archetypes.ones().map(ArchetypeId::new) } -} -impl QueryState { /// Creates a new [`QueryState`] from a given [`World`] and inherits the result of `world.id()`. pub fn new(world: &mut World) -> Self { let mut state = Self::new_uninitialized(world); @@ -199,13 +201,10 @@ impl QueryState { } } - if state.component_access.access().has_write_all_resources() { - access.write_all_resources(); - } else { - for component_id in state.component_access.access().resource_writes() { - access.add_resource_write(world.initialize_resource_internal(component_id).id()); - } - } + debug_assert!( + !state.component_access.access().has_any_resource_write(), + "Mutable resource access in queries is not allowed" + ); state } @@ -217,7 +216,7 @@ impl QueryState { fn new_uninitialized(world: &mut World) -> Self { let fetch_state = D::init_state(world); let filter_state = F::init_state(world); - Self::from_states_uninitialized(world.id(), fetch_state, filter_state) + Self::from_states_uninitialized(world, fetch_state, filter_state) } /// Creates a new [`QueryState`] but does not populate it with the matched results from the World yet @@ -228,7 +227,7 @@ impl QueryState { let fetch_state = D::get_state(world.components())?; let filter_state = F::get_state(world.components())?; Some(Self::from_states_uninitialized( - world.id(), + world, fetch_state, filter_state, )) @@ -239,7 +238,7 @@ impl QueryState { /// `new_archetype` and its variants must be called on all of the World's archetypes before the /// state can return valid query results. fn from_states_uninitialized( - world_id: WorldId, + world: &World, fetch_state: ::State, filter_state: ::State, ) -> Self { @@ -258,10 +257,15 @@ impl QueryState { // For queries without dynamic filters the dense-ness of the query is equal to the dense-ness // of its static type parameters. - let is_dense = D::IS_DENSE && F::IS_DENSE; + let mut is_dense = D::IS_DENSE && F::IS_DENSE; + + if let Some(default_filters) = world.get_resource::() { + default_filters.modify_access(&mut component_access); + is_dense &= default_filters.is_dense(world.components()); + } Self { - world_id, + world_id: world.id(), archetype_generation: ArchetypeGeneration::initial(), matched_storage_ids: Vec::new(), is_dense, @@ -285,15 +289,24 @@ impl QueryState { let filter_state = F::init_state(builder.world_mut()); D::set_access(&mut fetch_state, builder.access()); + let mut component_access = builder.access().clone(); + + // For dynamic queries the dense-ness is given by the query builder. + let mut is_dense = builder.is_dense(); + + if let Some(default_filters) = builder.world().get_resource::() { + default_filters.modify_access(&mut component_access); + is_dense &= default_filters.is_dense(builder.world().components()); + } + let mut state = Self { world_id: builder.world().id(), archetype_generation: ArchetypeGeneration::initial(), matched_storage_ids: Vec::new(), - // For dynamic queries the dense-ness is given by the query builder. - is_dense: builder.is_dense(), + is_dense, fetch_state, filter_state, - component_access: builder.access().clone(), + component_access, matched_tables: Default::default(), matched_archetypes: Default::default(), #[cfg(feature = "trace")] @@ -307,6 +320,134 @@ impl QueryState { state } + /// Creates a [`Query`] from the given [`QueryState`] and [`World`]. + /// + /// This will create read-only queries, see [`Self::query_mut`] for mutable queries. + pub fn query<'w, 's>(&'s mut self, world: &'w World) -> Query<'w, 's, D::ReadOnly, F> { + self.update_archetypes(world); + self.query_manual(world) + } + + /// Creates a [`Query`] from the given [`QueryState`] and [`World`]. + /// + /// This method is slightly more efficient than [`QueryState::query`] in some situations, since + /// it does not update this instance's internal cache. The resulting query may skip an entity that + /// belongs to an archetype that has not been cached. + /// + /// To ensure that the cache is up to date, call [`QueryState::update_archetypes`] before this method. + /// The cache is also updated in [`QueryState::new`], [`QueryState::get`], or any method with mutable + /// access to `self`. + /// + /// This will create read-only queries, see [`Self::query_mut`] for mutable queries. + pub fn query_manual<'w, 's>(&'s self, world: &'w World) -> Query<'w, 's, D::ReadOnly, F> { + self.validate_world(world.id()); + // SAFETY: + // - We have read access to the entire world, and we call `as_readonly()` so the query only performs read access. + // - We called `validate_world`. + unsafe { + self.as_readonly() + .query_unchecked_manual(world.as_unsafe_world_cell_readonly()) + } + } + + /// Creates a [`Query`] from the given [`QueryState`] and [`World`]. + pub fn query_mut<'w, 's>(&'s mut self, world: &'w mut World) -> Query<'w, 's, D, F> { + let last_run = world.last_change_tick(); + let this_run = world.change_tick(); + // SAFETY: We have exclusive access to the entire world. + unsafe { self.query_unchecked_with_ticks(world.as_unsafe_world_cell(), last_run, this_run) } + } + + /// Creates a [`Query`] from the given [`QueryState`] and [`World`]. + /// + /// # Safety + /// + /// This does not check for mutable query correctness. To be safe, make sure mutable queries + /// have unique access to the components they query. + pub unsafe fn query_unchecked<'w, 's>( + &'s mut self, + world: UnsafeWorldCell<'w>, + ) -> Query<'w, 's, D, F> { + self.update_archetypes_unsafe_world_cell(world); + // SAFETY: Caller ensures we have the required access + unsafe { self.query_unchecked_manual(world) } + } + + /// Creates a [`Query`] from the given [`QueryState`] and [`World`]. + /// + /// This method is slightly more efficient than [`QueryState::query_unchecked`] in some situations, since + /// it does not update this instance's internal cache. The resulting query may skip an entity that + /// belongs to an archetype that has not been cached. + /// + /// To ensure that the cache is up to date, call [`QueryState::update_archetypes`] before this method. + /// The cache is also updated in [`QueryState::new`], [`QueryState::get`], or any method with mutable + /// access to `self`. + /// + /// # Safety + /// + /// This does not check for mutable query correctness. To be safe, make sure mutable queries + /// have unique access to the components they query. + /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` + /// with a mismatched [`WorldId`] is unsound. + pub unsafe fn query_unchecked_manual<'w, 's>( + &'s self, + world: UnsafeWorldCell<'w>, + ) -> Query<'w, 's, D, F> { + let last_run = world.last_change_tick(); + let this_run = world.change_tick(); + // SAFETY: + // - The caller ensured we have the correct access to the world. + // - The caller ensured that the world matches. + unsafe { self.query_unchecked_manual_with_ticks(world, last_run, this_run) } + } + + /// Creates a [`Query`] from the given [`QueryState`] and [`World`]. + /// + /// # Safety + /// + /// This does not check for mutable query correctness. To be safe, make sure mutable queries + /// have unique access to the components they query. + pub unsafe fn query_unchecked_with_ticks<'w, 's>( + &'s mut self, + world: UnsafeWorldCell<'w>, + last_run: Tick, + this_run: Tick, + ) -> Query<'w, 's, D, F> { + self.update_archetypes_unsafe_world_cell(world); + // SAFETY: + // - The caller ensured we have the correct access to the world. + // - We called `update_archetypes_unsafe_world_cell`, which calls `validate_world`. + unsafe { self.query_unchecked_manual_with_ticks(world, last_run, this_run) } + } + + /// Creates a [`Query`] from the given [`QueryState`] and [`World`]. + /// + /// This method is slightly more efficient than [`QueryState::query_unchecked_with_ticks`] in some situations, since + /// it does not update this instance's internal cache. The resulting query may skip an entity that + /// belongs to an archetype that has not been cached. + /// + /// To ensure that the cache is up to date, call [`QueryState::update_archetypes`] before this method. + /// The cache is also updated in [`QueryState::new`], [`QueryState::get`], or any method with mutable + /// access to `self`. + /// + /// # Safety + /// + /// This does not check for mutable query correctness. To be safe, make sure mutable queries + /// have unique access to the components they query. + /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` + /// with a mismatched [`WorldId`] is unsound. + pub unsafe fn query_unchecked_manual_with_ticks<'w, 's>( + &'s self, + world: UnsafeWorldCell<'w>, + last_run: Tick, + this_run: Tick, + ) -> Query<'w, 's, D, F> { + // SAFETY: + // - The caller ensured we have the correct access to the world. + // - The caller ensured that the world matches. + unsafe { Query::new(world, self, last_run, this_run) } + } + /// Checks if the query is empty for the given [`World`], where the last change and current tick are given. /// /// This is equivalent to `self.iter().next().is_none()`, and thus the worst case runtime will be `O(n)` @@ -324,15 +465,16 @@ impl QueryState { pub fn is_empty(&self, world: &World, last_run: Tick, this_run: Tick) -> bool { self.validate_world(world.id()); // SAFETY: - // - We have read-only access to the entire world. - // - The world has been validated. + // - We have read access to the entire world, and `is_empty()` only performs read access. + // - We called `validate_world`. unsafe { - self.is_empty_unsafe_world_cell( + self.query_unchecked_manual_with_ticks( world.as_unsafe_world_cell_readonly(), last_run, this_run, ) } + .is_empty() } /// Returns `true` if the given [`Entity`] matches the query. @@ -340,41 +482,18 @@ impl QueryState { /// This is always guaranteed to run in `O(1)` time. #[inline] pub fn contains(&self, entity: Entity, world: &World, last_run: Tick, this_run: Tick) -> bool { - // SAFETY: NopFetch does not access any members while &self ensures no one has exclusive access - unsafe { - self.as_nop() - .get_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - entity, - last_run, - this_run, - ) - .is_ok() - } - } - - /// Checks if the query is empty for the given [`UnsafeWorldCell`]. - /// - /// # Safety - /// - /// - `world` must have permission to read any components required by this instance's `F` [`QueryFilter`]. - /// - `world` must match the one used to create this [`QueryState`]. - #[inline] - pub(crate) unsafe fn is_empty_unsafe_world_cell( - &self, - world: UnsafeWorldCell, - last_run: Tick, - this_run: Tick, - ) -> bool { + self.validate_world(world.id()); // SAFETY: - // - The caller ensures that `world` has permission to access any data used by the filter. - // - The caller ensures that the world matches. + // - We have read access to the entire world, and `is_empty()` only performs read access. + // - We called `validate_world`. unsafe { - self.as_nop() - .iter_unchecked_manual(world, last_run, this_run) - .next() - .is_none() + self.query_unchecked_manual_with_ticks( + world.as_unsafe_world_cell_readonly(), + last_run, + this_run, + ) } + .contains(entity) } /// Updates the state's internal view of the [`World`]'s archetypes. If this is not called before querying data, @@ -565,24 +684,22 @@ impl QueryState { access: &mut Access, ) { // As a fast path, we can iterate directly over the components involved - // if the `access` isn't inverted. - #[allow(deprecated)] - let (component_reads_and_writes, component_reads_and_writes_inverted) = - self.component_access.access.component_reads_and_writes(); - let (component_writes, component_writes_inverted) = - self.component_access.access.component_writes(); - - if !component_reads_and_writes_inverted && !component_writes_inverted { - component_reads_and_writes.for_each(|id| { - if let Some(id) = archetype.get_archetype_component_id(id) { - access.add_component_read(id); - } - }); - component_writes.for_each(|id| { - if let Some(id) = archetype.get_archetype_component_id(id) { - access.add_component_write(id); + // if the `access` is finite. + if let Ok(iter) = self.component_access.access.try_iter_component_access() { + iter.for_each(|component_access| { + if let Some(id) = archetype.get_archetype_component_id(*component_access.index()) { + match component_access { + ComponentAccessKind::Archetypal(_) => {} + ComponentAccessKind::Shared(_) => { + access.add_component_read(id); + } + ComponentAccessKind::Exclusive(_) => { + access.add_component_write(id); + } + } } }); + return; } @@ -613,7 +730,7 @@ impl QueryState { /// You should not call [`update_archetypes`](Self::update_archetypes) on the returned [`QueryState`] as the result will be unpredictable. /// You might end up with a mix of archetypes that only matched the original query + archetypes that only match /// the new [`QueryState`]. Most of the safe methods on [`QueryState`] call [`QueryState::update_archetypes`] internally, so this - /// best used through a [`Query`](crate::system::Query). + /// best used through a [`Query`] pub fn transmute<'a, NewD: QueryData>( &self, world: impl Into>, @@ -636,7 +753,21 @@ impl QueryState { let mut fetch_state = NewD::get_state(world.components()).expect("Could not create fetch_state, Please initialize all referenced components before transmuting."); let filter_state = NewF::get_state(world.components()).expect("Could not create filter_state, Please initialize all referenced components before transmuting."); - NewD::set_access(&mut fetch_state, &self.component_access); + fn to_readonly(mut access: FilteredAccess) -> FilteredAccess { + access.access_mut().clear_writes(); + access + } + + let self_access = if D::IS_READ_ONLY && self.component_access.access().has_any_write() { + // The current state was transmuted from a mutable + // `QueryData` to a read-only one. + // Ignore any write access in the current state. + &to_readonly(self.component_access.clone()) + } else { + &self.component_access + }; + + NewD::set_access(&mut fetch_state, self_access); NewD::update_component_access(&fetch_state, &mut component_access); let mut filter_component_access = FilteredAccess::default(); @@ -644,7 +775,7 @@ impl QueryState { component_access.extend(&filter_component_access); assert!( - component_access.is_subset(&self.component_access), + component_access.is_subset(self_access), "Transmuted state for {} attempts to access terms that are not allowed by original state {}.", core::any::type_name::<(NewD, NewF)>(), core::any::type_name::<(D, F)>() ); @@ -726,7 +857,31 @@ impl QueryState { let new_filter_state = NewF::get_state(world.components()) .expect("Could not create filter_state, Please initialize all referenced components before transmuting."); - NewD::set_access(&mut new_fetch_state, &self.component_access); + let mut joined_component_access = self.component_access.clone(); + joined_component_access.extend(&other.component_access); + + if D::IS_READ_ONLY && self.component_access.access().has_any_write() + || OtherD::IS_READ_ONLY && other.component_access.access().has_any_write() + { + // One of the input states was transmuted from a mutable + // `QueryData` to a read-only one. + // Ignore any write access in that current state. + // The simplest way to do this is to clear *all* writes + // and then add back in any writes that are valid + joined_component_access.access_mut().clear_writes(); + if !D::IS_READ_ONLY { + joined_component_access + .access_mut() + .extend(self.component_access.access()); + } + if !OtherD::IS_READ_ONLY { + joined_component_access + .access_mut() + .extend(other.component_access.access()); + } + } + + NewD::set_access(&mut new_fetch_state, &joined_component_access); NewD::update_component_access(&new_fetch_state, &mut component_access); let mut new_filter_component_access = FilteredAccess::default(); @@ -734,9 +889,6 @@ impl QueryState { component_access.extend(&new_filter_component_access); - let mut joined_component_access = self.component_access.clone(); - joined_component_access.extend(&other.component_access); - assert!( component_access.is_subset(&joined_component_access), "Joined state for {} attempts to access terms that are not allowed by state {} joined with {}.", @@ -800,17 +952,8 @@ impl QueryState { &mut self, world: &'w World, entity: Entity, - ) -> Result, QueryEntityError<'w>> { - self.update_archetypes(world); - // SAFETY: query is read only - unsafe { - self.as_readonly().get_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - entity, - world.last_change_tick(), - world.read_change_tick(), - ) - } + ) -> Result, QueryEntityError> { + self.query(world).get_inner(entity) } /// Returns the read-only query results for the given array of [`Entity`]. @@ -843,27 +986,53 @@ impl QueryState { /// /// let wrong_entity = Entity::from_raw(365); /// - /// assert_eq!(match query_state.get_many(&mut world, [wrong_entity]).unwrap_err() {QueryEntityError::NoSuchEntity(entity, _) => entity, _ => panic!()}, wrong_entity); + /// assert_eq!(match query_state.get_many(&mut world, [wrong_entity]).unwrap_err() {QueryEntityError::EntityDoesNotExist(error) => error.entity, _ => panic!()}, wrong_entity); /// ``` #[inline] pub fn get_many<'w, const N: usize>( &mut self, world: &'w World, entities: [Entity; N], - ) -> Result<[ROQueryItem<'w, D>; N], QueryEntityError<'w>> { - self.update_archetypes(world); + ) -> Result<[ROQueryItem<'w, D>; N], QueryEntityError> { + self.query(world).get_many_inner(entities) + } - // SAFETY: - // - We have read-only access to the entire world. - // - `update_archetypes` validates that the `World` matches. - unsafe { - self.get_many_read_only_manual( - world.as_unsafe_world_cell_readonly(), - entities, - world.last_change_tick(), - world.read_change_tick(), - ) - } + /// Returns the read-only query results for the given [`UniqueEntityArray`]. + /// + /// In case of a nonexisting entity or mismatched component, a [`QueryEntityError`] is + /// returned instead. + /// + /// # Examples + /// + /// ``` + /// use bevy_ecs::{prelude::*, query::QueryEntityError, entity::{EntitySetIterator, UniqueEntityArray, UniqueEntityVec}}; + /// + /// #[derive(Component, PartialEq, Debug)] + /// struct A(usize); + /// + /// let mut world = World::new(); + /// let entity_set: UniqueEntityVec = world.spawn_batch((0..3).map(A)).collect_set(); + /// let entity_set: UniqueEntityArray<3> = entity_set.try_into().unwrap(); + /// + /// world.spawn(A(73)); + /// + /// let mut query_state = world.query::<&A>(); + /// + /// let component_values = query_state.get_many_unique(&world, entity_set).unwrap(); + /// + /// assert_eq!(component_values, [&A(0), &A(1), &A(2)]); + /// + /// let wrong_entity = Entity::from_raw(365); + /// + /// assert_eq!(match query_state.get_many_unique(&mut world, UniqueEntityArray::from([wrong_entity])).unwrap_err() {QueryEntityError::EntityDoesNotExist(error) => error.entity, _ => panic!()}, wrong_entity); + /// ``` + #[inline] + pub fn get_many_unique<'w, const N: usize>( + &mut self, + world: &'w World, + entities: UniqueEntityArray, + ) -> Result<[ROQueryItem<'w, D>; N], QueryEntityError> { + self.query(world).get_many_unique_inner(entities) } /// Gets the query result for the given [`World`] and [`Entity`]. @@ -874,19 +1043,8 @@ impl QueryState { &mut self, world: &'w mut World, entity: Entity, - ) -> Result, QueryEntityError<'w>> { - self.update_archetypes(world); - let change_tick = world.change_tick(); - let last_change_tick = world.last_change_tick(); - // SAFETY: query has unique world access - unsafe { - self.get_unchecked_manual( - world.as_unsafe_world_cell(), - entity, - last_change_tick, - change_tick, - ) - } + ) -> Result, QueryEntityError> { + self.query_mut(world).get_inner(entity) } /// Returns the query results for the given array of [`Entity`]. @@ -923,7 +1081,7 @@ impl QueryState { /// let wrong_entity = Entity::from_raw(57); /// let invalid_entity = world.spawn_empty().id(); /// - /// assert_eq!(match query_state.get_many(&mut world, [wrong_entity]).unwrap_err() {QueryEntityError::NoSuchEntity(entity, _) => entity, _ => panic!()}, wrong_entity); + /// assert_eq!(match query_state.get_many(&mut world, [wrong_entity]).unwrap_err() {QueryEntityError::EntityDoesNotExist(error) => error.entity, _ => panic!()}, wrong_entity); /// assert_eq!(match query_state.get_many_mut(&mut world, [invalid_entity]).unwrap_err() {QueryEntityError::QueryDoesNotMatch(entity, _) => entity, _ => panic!()}, invalid_entity); /// assert_eq!(query_state.get_many_mut(&mut world, [entities[0], entities[0]]).unwrap_err(), QueryEntityError::AliasedMutability(entities[0])); /// ``` @@ -932,21 +1090,53 @@ impl QueryState { &mut self, world: &'w mut World, entities: [Entity; N], - ) -> Result<[D::Item<'w>; N], QueryEntityError<'w>> { - self.update_archetypes(world); + ) -> Result<[D::Item<'w>; N], QueryEntityError> { + self.query_mut(world).get_many_mut_inner(entities) + } - let change_tick = world.change_tick(); - let last_change_tick = world.last_change_tick(); - // SAFETY: method requires exclusive world access - // and world has been validated via update_archetypes - unsafe { - self.get_many_unchecked_manual( - world.as_unsafe_world_cell(), - entities, - last_change_tick, - change_tick, - ) - } + /// Returns the query results for the given [`UniqueEntityArray`]. + /// + /// In case of a nonexisting entity or mismatched component, a [`QueryEntityError`] is + /// returned instead. + /// + /// ``` + /// use bevy_ecs::{prelude::*, query::QueryEntityError, entity::{EntitySetIterator, UniqueEntityArray, UniqueEntityVec}}; + /// + /// #[derive(Component, PartialEq, Debug)] + /// struct A(usize); + /// + /// let mut world = World::new(); + /// + /// let entity_set: UniqueEntityVec = world.spawn_batch((0..3).map(A)).collect_set(); + /// let entity_set: UniqueEntityArray<3> = entity_set.try_into().unwrap(); + /// + /// world.spawn(A(73)); + /// + /// let mut query_state = world.query::<&mut A>(); + /// + /// let mut mutable_component_values = query_state.get_many_unique_mut(&mut world, entity_set).unwrap(); + /// + /// for mut a in &mut mutable_component_values { + /// a.0 += 5; + /// } + /// + /// let component_values = query_state.get_many_unique(&world, entity_set).unwrap(); + /// + /// assert_eq!(component_values, [&A(5), &A(6), &A(7)]); + /// + /// let wrong_entity = Entity::from_raw(57); + /// let invalid_entity = world.spawn_empty().id(); + /// + /// assert_eq!(match query_state.get_many_unique(&mut world, UniqueEntityArray::from([wrong_entity])).unwrap_err() {QueryEntityError::EntityDoesNotExist(error) => error.entity, _ => panic!()}, wrong_entity); + /// assert_eq!(match query_state.get_many_unique_mut(&mut world, UniqueEntityArray::from([invalid_entity])).unwrap_err() {QueryEntityError::QueryDoesNotMatch(entity, _) => entity, _ => panic!()}, invalid_entity); + /// ``` + #[inline] + pub fn get_many_unique_mut<'w, const N: usize>( + &mut self, + world: &'w mut World, + entities: UniqueEntityArray, + ) -> Result<[D::Item<'w>; N], QueryEntityError> { + self.query_mut(world).get_many_unique_inner(entities) } /// Gets the query result for the given [`World`] and [`Entity`]. @@ -967,17 +1157,8 @@ impl QueryState { &self, world: &'w World, entity: Entity, - ) -> Result, QueryEntityError<'w>> { - self.validate_world(world.id()); - // SAFETY: query is read only and world is validated - unsafe { - self.as_readonly().get_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - entity, - world.last_change_tick(), - world.read_change_tick(), - ) - } + ) -> Result, QueryEntityError> { + self.query_manual(world).get_inner(entity) } /// Gets the query result for the given [`World`] and [`Entity`]. @@ -993,187 +1174,43 @@ impl QueryState { &mut self, world: UnsafeWorldCell<'w>, entity: Entity, - ) -> Result, QueryEntityError<'w>> { - self.update_archetypes_unsafe_world_cell(world); - self.get_unchecked_manual(world, entity, world.last_change_tick(), world.change_tick()) + ) -> Result, QueryEntityError> { + self.query_unchecked(world).get_inner(entity) } - /// Gets the query result for the given [`World`] and [`Entity`], where the last change and - /// the current change tick are given. + /// Returns an [`Iterator`] over the query results for the given [`World`]. /// - /// This is always guaranteed to run in `O(1)` time. + /// This can only be called for read-only queries, see [`Self::iter_mut`] for write-queries. + #[inline] + pub fn iter<'w, 's>(&'s mut self, world: &'w World) -> QueryIter<'w, 's, D::ReadOnly, F> { + self.query(world).into_iter() + } + + /// Returns an [`Iterator`] over the query results for the given [`World`]. /// - /// # Safety + /// This iterator is always guaranteed to return results from each matching entity once and only once. + /// Iteration order is not guaranteed. + #[inline] + pub fn iter_mut<'w, 's>(&'s mut self, world: &'w mut World) -> QueryIter<'w, 's, D, F> { + self.query_mut(world).into_iter() + } + + /// Returns an [`Iterator`] over the query results for the given [`World`] without updating the query's archetypes. + /// Archetypes must be manually updated before by using [`Self::update_archetypes`]. /// - /// This does not check for mutable query correctness. To be safe, make sure mutable queries - /// have unique access to the components they query. + /// This iterator is always guaranteed to return results from each matching entity once and only once. + /// Iteration order is not guaranteed. /// - /// This must be called on the same `World` that the `Query` was generated from: - /// use `QueryState::validate_world` to verify this. - pub(crate) unsafe fn get_unchecked_manual<'w>( - &self, - world: UnsafeWorldCell<'w>, - entity: Entity, - last_run: Tick, - this_run: Tick, - ) -> Result, QueryEntityError<'w>> { - let location = world - .entities() - .get(entity) - .ok_or(QueryEntityError::NoSuchEntity(entity, world))?; - if !self - .matched_archetypes - .contains(location.archetype_id.index()) - { - return Err(QueryEntityError::QueryDoesNotMatch(entity, world)); - } - let archetype = world - .archetypes() - .get(location.archetype_id) - .debug_checked_unwrap(); - let mut fetch = D::init_fetch(world, &self.fetch_state, last_run, this_run); - let mut filter = F::init_fetch(world, &self.filter_state, last_run, this_run); - - let table = world - .storages() - .tables - .get(location.table_id) - .debug_checked_unwrap(); - D::set_archetype(&mut fetch, &self.fetch_state, archetype, table); - F::set_archetype(&mut filter, &self.filter_state, archetype, table); - - if F::filter_fetch(&mut filter, entity, location.table_row) { - Ok(D::fetch(&mut fetch, entity, location.table_row)) - } else { - Err(QueryEntityError::QueryDoesNotMatch(entity, world)) - } + /// This can only be called for read-only queries. + #[inline] + pub fn iter_manual<'w, 's>(&'s self, world: &'w World) -> QueryIter<'w, 's, D::ReadOnly, F> { + self.query_manual(world).into_iter() } - /// Gets the read-only query results for the given [`World`] and array of [`Entity`], where the last change and - /// the current change tick are given. + /// Returns an [`Iterator`] over all possible combinations of `K` query results without repetition. + /// This can only be called for read-only queries. /// - /// # Safety - /// - /// * `world` must have permission to read all of the components returned from this call. - /// No mutable references may coexist with any of the returned references. - /// * This must be called on the same `World` that the `Query` was generated from: - /// use `QueryState::validate_world` to verify this. - pub(crate) unsafe fn get_many_read_only_manual<'w, const N: usize>( - &self, - world: UnsafeWorldCell<'w>, - entities: [Entity; N], - last_run: Tick, - this_run: Tick, - ) -> Result<[ROQueryItem<'w, D>; N], QueryEntityError<'w>> { - let mut values = [(); N].map(|_| MaybeUninit::uninit()); - - for (value, entity) in core::iter::zip(&mut values, entities) { - // SAFETY: fetch is read-only and world must be validated - let item = unsafe { - self.as_readonly() - .get_unchecked_manual(world, entity, last_run, this_run)? - }; - *value = MaybeUninit::new(item); - } - - // SAFETY: Each value has been fully initialized. - Ok(values.map(|x| unsafe { x.assume_init() })) - } - - /// Gets the query results for the given [`World`] and array of [`Entity`], where the last change and - /// the current change tick are given. - /// - /// This is always guaranteed to run in `O(1)` time. - /// - /// # Safety - /// - /// This does not check for unique access to subsets of the entity-component data. - /// To be safe, make sure mutable queries have unique access to the components they query. - /// - /// This must be called on the same `World` that the `Query` was generated from: - /// use `QueryState::validate_world` to verify this. - pub(crate) unsafe fn get_many_unchecked_manual<'w, const N: usize>( - &self, - world: UnsafeWorldCell<'w>, - entities: [Entity; N], - last_run: Tick, - this_run: Tick, - ) -> Result<[D::Item<'w>; N], QueryEntityError<'w>> { - // Verify that all entities are unique - for i in 0..N { - for j in 0..i { - if entities[i] == entities[j] { - return Err(QueryEntityError::AliasedMutability(entities[i])); - } - } - } - - let mut values = [(); N].map(|_| MaybeUninit::uninit()); - - for (value, entity) in core::iter::zip(&mut values, entities) { - let item = self.get_unchecked_manual(world, entity, last_run, this_run)?; - *value = MaybeUninit::new(item); - } - - // SAFETY: Each value has been fully initialized. - Ok(values.map(|x| x.assume_init())) - } - - /// Returns an [`Iterator`] over the query results for the given [`World`]. - /// - /// This can only be called for read-only queries, see [`Self::iter_mut`] for write-queries. - #[inline] - pub fn iter<'w, 's>(&'s mut self, world: &'w World) -> QueryIter<'w, 's, D::ReadOnly, F> { - self.update_archetypes(world); - // SAFETY: query is read only - unsafe { - self.as_readonly().iter_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - world.last_change_tick(), - world.read_change_tick(), - ) - } - } - - /// Returns an [`Iterator`] over the query results for the given [`World`]. - /// - /// This iterator is always guaranteed to return results from each matching entity once and only once. - /// Iteration order is not guaranteed. - #[inline] - pub fn iter_mut<'w, 's>(&'s mut self, world: &'w mut World) -> QueryIter<'w, 's, D, F> { - self.update_archetypes(world); - let change_tick = world.change_tick(); - let last_change_tick = world.last_change_tick(); - // SAFETY: query has unique world access - unsafe { - self.iter_unchecked_manual(world.as_unsafe_world_cell(), last_change_tick, change_tick) - } - } - - /// Returns an [`Iterator`] over the query results for the given [`World`] without updating the query's archetypes. - /// Archetypes must be manually updated before by using [`Self::update_archetypes`]. - /// - /// This iterator is always guaranteed to return results from each matching entity once and only once. - /// Iteration order is not guaranteed. - /// - /// This can only be called for read-only queries. - #[inline] - pub fn iter_manual<'w, 's>(&'s self, world: &'w World) -> QueryIter<'w, 's, D::ReadOnly, F> { - self.validate_world(world.id()); - // SAFETY: query is read only and world is validated - unsafe { - self.as_readonly().iter_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - world.last_change_tick(), - world.read_change_tick(), - ) - } - } - - /// Returns an [`Iterator`] over all possible combinations of `K` query results without repetition. - /// This can only be called for read-only queries. - /// - /// A combination is an arrangement of a collection of items where order does not matter. + /// A combination is an arrangement of a collection of items where order does not matter. /// /// `K` is the number of items that make up each subset, and the number of items returned by the iterator. /// `N` is the number of total entities output by query. @@ -1199,15 +1236,7 @@ impl QueryState { &'s mut self, world: &'w World, ) -> QueryCombinationIter<'w, 's, D::ReadOnly, F, K> { - self.update_archetypes(world); - // SAFETY: query is read only - unsafe { - self.as_readonly().iter_combinations_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - world.last_change_tick(), - world.read_change_tick(), - ) - } + self.query(world).iter_combinations_inner() } /// Returns an [`Iterator`] over all possible combinations of `K` query results without repetition. @@ -1232,17 +1261,7 @@ impl QueryState { &'s mut self, world: &'w mut World, ) -> QueryCombinationIter<'w, 's, D, F, K> { - self.update_archetypes(world); - let change_tick = world.change_tick(); - let last_change_tick = world.last_change_tick(); - // SAFETY: query has unique world access - unsafe { - self.iter_combinations_unchecked_manual( - world.as_unsafe_world_cell(), - last_change_tick, - change_tick, - ) - } + self.query_mut(world).iter_combinations_inner() } /// Returns an [`Iterator`] over the read-only query items generated from an [`Entity`] list. @@ -1254,21 +1273,12 @@ impl QueryState { /// /// - [`iter_many_mut`](Self::iter_many_mut) to get mutable query items. #[inline] - pub fn iter_many<'w, 's, EntityList: IntoIterator>( + pub fn iter_many<'w, 's, EntityList: IntoIterator>( &'s mut self, world: &'w World, entities: EntityList, ) -> QueryManyIter<'w, 's, D::ReadOnly, F, EntityList::IntoIter> { - self.update_archetypes(world); - // SAFETY: query is read only - unsafe { - self.as_readonly().iter_many_unchecked_manual( - entities, - world.as_unsafe_world_cell_readonly(), - world.last_change_tick(), - world.read_change_tick(), - ) - } + self.query(world).iter_many_inner(entities) } /// Returns an [`Iterator`] over the read-only query items generated from an [`Entity`] list. @@ -1286,21 +1296,12 @@ impl QueryState { /// - [`iter_many`](Self::iter_many) to update archetypes. /// - [`iter_manual`](Self::iter_manual) to iterate over all query items. #[inline] - pub fn iter_many_manual<'w, 's, EntityList: IntoIterator>( + pub fn iter_many_manual<'w, 's, EntityList: IntoIterator>( &'s self, world: &'w World, entities: EntityList, ) -> QueryManyIter<'w, 's, D::ReadOnly, F, EntityList::IntoIter> { - self.validate_world(world.id()); - // SAFETY: query is read only, world id is validated - unsafe { - self.as_readonly().iter_many_unchecked_manual( - entities, - world.as_unsafe_world_cell_readonly(), - world.last_change_tick(), - world.read_change_tick(), - ) - } + self.query_manual(world).iter_many_inner(entities) } /// Returns an iterator over the query items generated from an [`Entity`] list. @@ -1308,23 +1309,12 @@ impl QueryState { /// Items are returned in the order of the list of entities. /// Entities that don't match the query are skipped. #[inline] - pub fn iter_many_mut<'w, 's, EntityList: IntoIterator>( + pub fn iter_many_mut<'w, 's, EntityList: IntoIterator>( &'s mut self, world: &'w mut World, entities: EntityList, ) -> QueryManyIter<'w, 's, D, F, EntityList::IntoIter> { - self.update_archetypes(world); - let change_tick = world.change_tick(); - let last_change_tick = world.last_change_tick(); - // SAFETY: Query has unique world access. - unsafe { - self.iter_many_unchecked_manual( - entities, - world.as_unsafe_world_cell(), - last_change_tick, - change_tick, - ) - } + self.query_mut(world).iter_many_inner(entities) } /// Returns an [`Iterator`] over the unique read-only query items generated from an [`EntitySet`]. @@ -1341,16 +1331,7 @@ impl QueryState { world: &'w World, entities: EntityList, ) -> QueryManyUniqueIter<'w, 's, D::ReadOnly, F, EntityList::IntoIter> { - self.update_archetypes(world); - // SAFETY: query is read only - unsafe { - self.as_readonly().iter_many_unique_unchecked_manual( - entities, - world.as_unsafe_world_cell_readonly(), - world.last_change_tick(), - world.read_change_tick(), - ) - } + self.query(world).iter_many_unique_inner(entities) } /// Returns an [`Iterator`] over the unique read-only query items generated from an [`EntitySet`]. @@ -1374,16 +1355,7 @@ impl QueryState { world: &'w World, entities: EntityList, ) -> QueryManyUniqueIter<'w, 's, D::ReadOnly, F, EntityList::IntoIter> { - self.validate_world(world.id()); - // SAFETY: query is read only, world id is validated - unsafe { - self.as_readonly().iter_many_unique_unchecked_manual( - entities, - world.as_unsafe_world_cell_readonly(), - world.last_change_tick(), - world.read_change_tick(), - ) - } + self.query_manual(world).iter_many_unique_inner(entities) } /// Returns an iterator over the unique query items generated from an [`EntitySet`]. @@ -1396,18 +1368,7 @@ impl QueryState { world: &'w mut World, entities: EntityList, ) -> QueryManyUniqueIter<'w, 's, D, F, EntityList::IntoIter> { - self.update_archetypes(world); - let last_change_tick = world.last_change_tick(); - let change_tick = world.change_tick(); - // SAFETY: Query has unique world access. - unsafe { - self.iter_many_unique_unchecked_manual( - entities, - world.as_unsafe_world_cell(), - last_change_tick, - change_tick, - ) - } + self.query_mut(world).iter_many_unique_inner(entities) } /// Returns an [`Iterator`] over the query results for the given [`World`]. /// @@ -1423,8 +1384,7 @@ impl QueryState { &'s mut self, world: UnsafeWorldCell<'w>, ) -> QueryIter<'w, 's, D, F> { - self.update_archetypes_unsafe_world_cell(world); - self.iter_unchecked_manual(world, world.last_change_tick(), world.change_tick()) + self.query_unchecked(world).into_iter() } /// Returns an [`Iterator`] over all possible combinations of `K` query results for the @@ -1443,107 +1403,7 @@ impl QueryState { &'s mut self, world: UnsafeWorldCell<'w>, ) -> QueryCombinationIter<'w, 's, D, F, K> { - self.update_archetypes_unsafe_world_cell(world); - self.iter_combinations_unchecked_manual( - world, - world.last_change_tick(), - world.change_tick(), - ) - } - - /// Returns an [`Iterator`] for the given [`World`], where the last change and - /// the current change tick are given. - /// - /// This iterator is always guaranteed to return results from each matching entity once and only once. - /// Iteration order is not guaranteed. - /// - /// # Safety - /// - /// This does not check for mutable query correctness. To be safe, make sure mutable queries - /// have unique access to the components they query. - /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` - /// with a mismatched [`WorldId`] is unsound. - #[inline] - pub(crate) unsafe fn iter_unchecked_manual<'w, 's>( - &'s self, - world: UnsafeWorldCell<'w>, - last_run: Tick, - this_run: Tick, - ) -> QueryIter<'w, 's, D, F> { - QueryIter::new(world, self, last_run, this_run) - } - - /// Returns an [`Iterator`] for the given [`World`] and list of [`Entity`]'s, where the last change and - /// the current change tick are given. - /// - /// This iterator is always guaranteed to return results from each unique pair of matching entities. - /// Iteration order is not guaranteed. - /// - /// # Safety - /// - /// This does not check for mutable query correctness. To be safe, make sure mutable queries - /// have unique access to the components they query. - /// This does not check for entity uniqueness - /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` - /// with a mismatched [`WorldId`] is unsound. - #[inline] - pub(crate) unsafe fn iter_many_unchecked_manual<'w, 's, EntityList>( - &'s self, - entities: EntityList, - world: UnsafeWorldCell<'w>, - last_run: Tick, - this_run: Tick, - ) -> QueryManyIter<'w, 's, D, F, EntityList::IntoIter> - where - EntityList: IntoIterator, - { - QueryManyIter::new(world, self, entities, last_run, this_run) - } - - /// Returns an [`Iterator`] for the given [`World`] and an [`EntitySet`], where the last change and - /// the current change tick are given. - /// - /// Items are returned in the order of the list of entities. - /// Entities that don't match the query are skipped. - /// - /// # Safety - /// - /// This does not check for mutable query correctness. To be safe, make sure mutable queries - /// have unique access to the components they query. - /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` - /// with a mismatched [`WorldId`] is unsound. - #[inline] - pub(crate) unsafe fn iter_many_unique_unchecked_manual<'w, 's, EntityList: EntitySet>( - &'s self, - entities: EntityList, - world: UnsafeWorldCell<'w>, - last_run: Tick, - this_run: Tick, - ) -> QueryManyUniqueIter<'w, 's, D, F, EntityList::IntoIter> { - QueryManyUniqueIter::new(world, self, entities, last_run, this_run) - } - - /// Returns an [`Iterator`] over all possible combinations of `K` query results for the - /// given [`World`] without repetition. - /// This can only be called for read-only queries. - /// - /// This iterator is always guaranteed to return results from each unique pair of matching entities. - /// Iteration order is not guaranteed. - /// - /// # Safety - /// - /// This does not check for mutable query correctness. To be safe, make sure mutable queries - /// have unique access to the components they query. - /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` - /// with a mismatched [`WorldId`] is unsound. - #[inline] - pub(crate) unsafe fn iter_combinations_unchecked_manual<'w, 's, const K: usize>( - &'s self, - world: UnsafeWorldCell<'w>, - last_run: Tick, - this_run: Tick, - ) -> QueryCombinationIter<'w, 's, D, F, K> { - QueryCombinationIter::new(world, self, last_run, this_run) + self.query_unchecked(world).iter_combinations_inner() } /// Returns a parallel iterator over the query results for the given [`World`]. @@ -1559,14 +1419,7 @@ impl QueryState { &'s mut self, world: &'w World, ) -> QueryParIter<'w, 's, D::ReadOnly, F> { - self.update_archetypes(world); - QueryParIter { - world: world.as_unsafe_world_cell_readonly(), - state: self.as_readonly(), - last_run: world.last_change_tick(), - this_run: world.read_change_tick(), - batching_strategy: BatchingStrategy::new(), - } + self.query(world).par_iter_inner() } /// Returns a parallel iterator over the query results for the given [`World`]. @@ -1602,7 +1455,7 @@ impl QueryState { /// # let wrong_entity = Entity::from_raw(57); /// # let invalid_entity = world.spawn_empty().id(); /// - /// # assert_eq!(match query_state.get_many(&mut world, [wrong_entity]).unwrap_err() {QueryEntityError::NoSuchEntity(entity, _) => entity, _ => panic!()}, wrong_entity); + /// # assert_eq!(match query_state.get_many(&mut world, [wrong_entity]).unwrap_err() {QueryEntityError::EntityDoesNotExist(error) => error.entity, _ => panic!()}, wrong_entity); /// assert_eq!(match query_state.get_many_mut(&mut world, [invalid_entity]).unwrap_err() {QueryEntityError::QueryDoesNotMatch(entity, _) => entity, _ => panic!()}, invalid_entity); /// # assert_eq!(query_state.get_many_mut(&mut world, [entities[0], entities[0]]).unwrap_err(), QueryEntityError::AliasedMutability(entities[0])); /// ``` @@ -1615,16 +1468,7 @@ impl QueryState { /// [`ComputeTaskPool`]: bevy_tasks::ComputeTaskPool #[inline] pub fn par_iter_mut<'w, 's>(&'s mut self, world: &'w mut World) -> QueryParIter<'w, 's, D, F> { - self.update_archetypes(world); - let this_run = world.change_tick(); - let last_run = world.last_change_tick(); - QueryParIter { - world: world.as_unsafe_world_cell(), - state: self, - last_run, - this_run, - batching_strategy: BatchingStrategy::new(), - } + self.query_mut(world).par_iter_inner() } /// Runs `func` on each query result in parallel for the given [`World`], where the last change and @@ -1657,7 +1501,8 @@ impl QueryState { INIT: Fn() -> T + Sync + Send + Clone, { // NOTE: If you are changing query iteration code, remember to update the following places, where relevant: - // QueryIter, QueryIterationCursor, QueryManyIter, QueryCombinationIter,QueryState::par_fold_init_unchecked_manual + // QueryIter, QueryIterationCursor, QueryManyIter, QueryCombinationIter,QueryState::par_fold_init_unchecked_manual, + // QueryState::par_many_fold_init_unchecked_manual, QueryState::par_many_unique_fold_init_unchecked_manual use arrayvec::ArrayVec; bevy_tasks::ComputeTaskPool::get().scope(|scope| { @@ -1678,7 +1523,9 @@ impl QueryState { scope.spawn(async move { #[cfg(feature = "trace")] let _span = self.par_iter_span.enter(); - let mut iter = self.iter_unchecked_manual(world, last_run, this_run); + let mut iter = self + .query_unchecked_manual_with_ticks(world, last_run, this_run) + .into_iter(); let mut accum = init_accum(); for storage_id in queue { accum = iter.fold_over_storage_range(accum, &mut func, storage_id, None); @@ -1697,7 +1544,8 @@ impl QueryState { #[cfg(feature = "trace")] let _span = self.par_iter_span.enter(); let accum = init_accum(); - self.iter_unchecked_manual(world, last_run, this_run) + self.query_unchecked_manual_with_ticks(world, last_run, this_run) + .into_iter() .fold_over_storage_range(accum, &mut func, storage_id, Some(batch)); }); } @@ -1737,89 +1585,246 @@ impl QueryState { }); } - /// Returns a single immutable query result when there is exactly one entity matching - /// the query. + /// Runs `func` on each query result in parallel for the given [`EntitySet`], + /// where the last change and the current change tick are given. This is faster than the + /// equivalent `iter_many_unique()` method, but cannot be chained like a normal [`Iterator`]. /// - /// This can only be called for read-only queries, - /// see [`single_mut`](Self::single_mut) for write-queries. + /// # Panics + /// The [`ComputeTaskPool`] is not initialized. If using this from a query that is being + /// initialized and run from the ECS scheduler, this should never panic. + /// + /// # Safety + /// + /// This does not check for mutable query correctness. To be safe, make sure mutable queries + /// have unique access to the components they query. + /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` + /// with a mismatched [`WorldId`] is unsound. + /// + /// [`ComputeTaskPool`]: bevy_tasks::ComputeTaskPool + #[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] + pub(crate) unsafe fn par_many_unique_fold_init_unchecked_manual<'w, T, FN, INIT, E>( + &self, + init_accum: INIT, + world: UnsafeWorldCell<'w>, + entity_list: &UniqueEntityEquivalentSlice, + batch_size: usize, + mut func: FN, + last_run: Tick, + this_run: Tick, + ) where + FN: Fn(T, D::Item<'w>) -> T + Send + Sync + Clone, + INIT: Fn() -> T + Sync + Send + Clone, + E: EntityEquivalent + Sync, + { + // NOTE: If you are changing query iteration code, remember to update the following places, where relevant: + // QueryIter, QueryIterationCursor, QueryManyIter, QueryCombinationIter,QueryState::par_fold_init_unchecked_manual + // QueryState::par_many_fold_init_unchecked_manual, QueryState::par_many_unique_fold_init_unchecked_manual + + bevy_tasks::ComputeTaskPool::get().scope(|scope| { + let chunks = entity_list.chunks_exact(batch_size); + let remainder = chunks.remainder(); + + for batch in chunks { + let mut func = func.clone(); + let init_accum = init_accum.clone(); + scope.spawn(async move { + #[cfg(feature = "trace")] + let _span = self.par_iter_span.enter(); + let accum = init_accum(); + self.query_unchecked_manual_with_ticks(world, last_run, this_run) + .iter_many_unique_inner(batch) + .fold(accum, &mut func); + }); + } + + #[cfg(feature = "trace")] + let _span = self.par_iter_span.enter(); + let accum = init_accum(); + self.query_unchecked_manual_with_ticks(world, last_run, this_run) + .iter_many_unique_inner(remainder) + .fold(accum, &mut func); + }); + } +} + +impl QueryState { + /// Runs `func` on each read-only query result in parallel for the given [`Entity`] list, + /// where the last change and the current change tick are given. This is faster than the equivalent + /// `iter_many()` method, but cannot be chained like a normal [`Iterator`]. /// /// # Panics + /// The [`ComputeTaskPool`] is not initialized. If using this from a query that is being + /// initialized and run from the ECS scheduler, this should never panic. /// - /// Panics if the number of query results is not exactly one. Use - /// [`get_single`](Self::get_single) to return a `Result` instead of panicking. - #[track_caller] - #[inline] - pub fn single<'w>(&mut self, world: &'w World) -> ROQueryItem<'w, D> { - match self.get_single(world) { - Ok(items) => items, - Err(error) => panic!("Cannot get single query result: {error}"), - } + /// # Safety + /// + /// This does not check for mutable query correctness. To be safe, make sure mutable queries + /// have unique access to the components they query. + /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` + /// with a mismatched [`WorldId`] is unsound. + /// + /// [`ComputeTaskPool`]: bevy_tasks::ComputeTaskPool + #[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] + pub(crate) unsafe fn par_many_fold_init_unchecked_manual<'w, T, FN, INIT, E>( + &self, + init_accum: INIT, + world: UnsafeWorldCell<'w>, + entity_list: &[E], + batch_size: usize, + mut func: FN, + last_run: Tick, + this_run: Tick, + ) where + FN: Fn(T, D::Item<'w>) -> T + Send + Sync + Clone, + INIT: Fn() -> T + Sync + Send + Clone, + E: EntityEquivalent + Sync, + { + // NOTE: If you are changing query iteration code, remember to update the following places, where relevant: + // QueryIter, QueryIterationCursor, QueryManyIter, QueryCombinationIter, QueryState::par_fold_init_unchecked_manual + // QueryState::par_many_fold_init_unchecked_manual, QueryState::par_many_unique_fold_init_unchecked_manual + + bevy_tasks::ComputeTaskPool::get().scope(|scope| { + let chunks = entity_list.chunks_exact(batch_size); + let remainder = chunks.remainder(); + + for batch in chunks { + let mut func = func.clone(); + let init_accum = init_accum.clone(); + scope.spawn(async move { + #[cfg(feature = "trace")] + let _span = self.par_iter_span.enter(); + let accum = init_accum(); + self.query_unchecked_manual_with_ticks(world, last_run, this_run) + .iter_many_inner(batch) + .fold(accum, &mut func); + }); + } + + #[cfg(feature = "trace")] + let _span = self.par_iter_span.enter(); + let accum = init_accum(); + self.query_unchecked_manual_with_ticks(world, last_run, this_run) + .iter_many_inner(remainder) + .fold(accum, &mut func); + }); } +} +impl QueryState { /// Returns a single immutable query result when there is exactly one entity matching /// the query. /// /// This can only be called for read-only queries, - /// see [`get_single_mut`](Self::get_single_mut) for write-queries. + /// see [`single_mut`](Self::single_mut) for write-queries. /// /// If the number of query results is not exactly one, a [`QuerySingleError`] is returned /// instead. + /// + /// # Example + /// + /// Sometimes, you might want to handle the error in a specific way, + /// generally by spawning the missing entity. + /// + /// ```rust + /// use bevy_ecs::prelude::*; + /// use bevy_ecs::query::QuerySingleError; + /// + /// #[derive(Component)] + /// struct A(usize); + /// + /// fn my_system(query: Query<&A>, mut commands: Commands) { + /// match query.single() { + /// Ok(a) => (), // Do something with `a` + /// Err(err) => match err { + /// QuerySingleError::NoEntities(_) => { + /// commands.spawn(A(0)); + /// } + /// QuerySingleError::MultipleEntities(_) => panic!("Multiple entities found!"), + /// }, + /// } + /// } + /// ``` + /// + /// However in most cases, this error can simply be handled with a graceful early return. + /// If this is an expected failure mode, you can do this using the `let else` pattern like so: + /// ```rust + /// use bevy_ecs::prelude::*; + /// + /// #[derive(Component)] + /// struct A(usize); + /// + /// fn my_system(query: Query<&A>) { + /// let Ok(a) = query.single() else { + /// return; + /// }; + /// + /// // Do something with `a` + /// } + /// ``` + /// + /// If this is unexpected though, you should probably use the `?` operator + /// in combination with Bevy's error handling apparatus. + /// + /// ```rust + /// use bevy_ecs::prelude::*; + /// + /// #[derive(Component)] + /// struct A(usize); + /// + /// fn my_system(query: Query<&A>) -> Result { + /// let a = query.single()?; + /// + /// // Do something with `a` + /// Ok(()) + /// } + /// ``` + /// + /// This allows you to globally control how errors are handled in your application, + /// by setting up a custom error handler. + /// See the [`bevy_ecs::error`] module docs for more information! + /// Commonly, you might want to panic on an error during development, but log the error and continue + /// execution in production. + /// + /// Simply unwrapping the [`Result`] also works, but should generally be reserved for tests. + #[inline] + pub fn single<'w>(&mut self, world: &'w World) -> Result, QuerySingleError> { + self.query(world).single_inner() + } + + /// A deprecated alias for [`QueryState::single`]. + #[deprecated(since = "0.16.0", note = "Please use `single` instead.")] #[inline] pub fn get_single<'w>( &mut self, world: &'w World, ) -> Result, QuerySingleError> { - self.update_archetypes(world); - - // SAFETY: query is read only - unsafe { - self.as_readonly().get_single_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - world.last_change_tick(), - world.read_change_tick(), - ) - } + self.single(world) } /// Returns a single mutable query result when there is exactly one entity matching /// the query. /// - /// # Panics + /// If the number of query results is not exactly one, a [`QuerySingleError`] is returned + /// instead. /// - /// Panics if the number of query results is not exactly one. Use - /// [`get_single_mut`](Self::get_single_mut) to return a `Result` instead of panicking. - #[track_caller] + /// # Examples + /// + /// Please see [`Query::single`] for advice on handling the error. #[inline] - pub fn single_mut<'w>(&mut self, world: &'w mut World) -> D::Item<'w> { - // SAFETY: query has unique world access - match self.get_single_mut(world) { - Ok(items) => items, - Err(error) => panic!("Cannot get single query result: {error}"), - } + pub fn single_mut<'w>( + &mut self, + world: &'w mut World, + ) -> Result, QuerySingleError> { + self.query_mut(world).single_inner() } - /// Returns a single mutable query result when there is exactly one entity matching - /// the query. - /// - /// If the number of query results is not exactly one, a [`QuerySingleError`] is returned - /// instead. - #[inline] + /// A deprecated alias for [`QueryState::single_mut`]. + #[deprecated(since = "0.16.0", note = "Please use `single` instead.")] pub fn get_single_mut<'w>( &mut self, world: &'w mut World, ) -> Result, QuerySingleError> { - self.update_archetypes(world); - - let change_tick = world.change_tick(); - let last_change_tick = world.last_change_tick(); - // SAFETY: query has unique world access - unsafe { - self.get_single_unchecked_manual( - world.as_unsafe_world_cell(), - last_change_tick, - change_tick, - ) - } + self.single_mut(world) } /// Returns a query result when there is exactly one entity matching the query. @@ -1832,12 +1837,11 @@ impl QueryState { /// This does not check for mutable query correctness. To be safe, make sure mutable queries /// have unique access to the components they query. #[inline] - pub unsafe fn get_single_unchecked<'w>( + pub unsafe fn single_unchecked<'w>( &mut self, world: UnsafeWorldCell<'w>, ) -> Result, QuerySingleError> { - self.update_archetypes_unsafe_world_cell(world); - self.get_single_unchecked_manual(world, world.last_change_tick(), world.change_tick()) + self.query_unchecked(world).single_inner() } /// Returns a query result when there is exactly one entity matching the query, @@ -1850,24 +1854,20 @@ impl QueryState { /// /// This does not check for mutable query correctness. To be safe, make sure mutable queries /// have unique access to the components they query. + /// This does not validate that `world.id()` matches `self.world_id`. Calling this on a `world` + /// with a mismatched [`WorldId`] is unsound. #[inline] - pub unsafe fn get_single_unchecked_manual<'w>( + pub unsafe fn single_unchecked_manual<'w>( &self, world: UnsafeWorldCell<'w>, last_run: Tick, this_run: Tick, ) -> Result, QuerySingleError> { - let mut query = self.iter_unchecked_manual(world, last_run, this_run); - let first = query.next(); - let extra = query.next().is_some(); - - match (first, extra) { - (Some(r), false) => Ok(r), - (None, _) => Err(QuerySingleError::NoEntities(core::any::type_name::())), - (Some(_), _) => Err(QuerySingleError::MultipleEntities(core::any::type_name::< - Self, - >())), - } + // SAFETY: + // - The caller ensured we have the correct access to the world. + // - The caller ensured that the world matches. + self.query_unchecked_manual_with_ticks(world, last_run, this_run) + .single_inner() } } @@ -1879,85 +1879,14 @@ impl From> for QueryState = (0..10).map(|_| world.spawn_empty().id()).collect(); - - let query_state = world.query::(); - - // These don't matter for the test - let last_change_tick = world.last_change_tick(); - let change_tick = world.change_tick(); - - // It's best to test get_many_unchecked_manual directly, - // as it is shared and unsafe - // We don't care about aliased mutability for the read-only equivalent - - // SAFETY: Query does not access world data. - assert!(unsafe { - query_state - .get_many_unchecked_manual::<10>( - world.as_unsafe_world_cell_readonly(), - entities.clone().try_into().unwrap(), - last_change_tick, - change_tick, - ) - .is_ok() - }); - - assert_eq!( - // SAFETY: Query does not access world data. - unsafe { - query_state - .get_many_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - [entities[0], entities[0]], - last_change_tick, - change_tick, - ) - .unwrap_err() - }, - QueryEntityError::AliasedMutability(entities[0]) - ); - - assert_eq!( - // SAFETY: Query does not access world data. - unsafe { - query_state - .get_many_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - [entities[0], entities[1], entities[0]], - last_change_tick, - change_tick, - ) - .unwrap_err() - }, - QueryEntityError::AliasedMutability(entities[0]) - ); - - assert_eq!( - // SAFETY: Query does not access world data. - unsafe { - query_state - .get_many_unchecked_manual( - world.as_unsafe_world_cell_readonly(), - [entities[9], entities[9]], - last_change_tick, - change_tick, - ) - .unwrap_err() - }, - QueryEntityError::AliasedMutability(entities[9]) - ); - } - #[test] #[should_panic] fn right_world_get() { @@ -2005,7 +1934,7 @@ mod tests { let query_state = world.query::<(&A, &B)>(); let mut new_query_state = query_state.transmute::<&A>(&world); assert_eq!(new_query_state.iter(&world).len(), 1); - let a = new_query_state.single(&world); + let a = new_query_state.single(&world).unwrap(); assert_eq!(a.0, 1); } @@ -2019,7 +1948,7 @@ mod tests { let query_state = world.query_filtered::<(&A, &B), Without>(); let mut new_query_state = query_state.transmute::<&A>(&world); // even though we change the query to not have Without, we do not get the component with C. - let a = new_query_state.single(&world); + let a = new_query_state.single(&world).unwrap(); assert_eq!(a.0, 0); } @@ -2032,7 +1961,7 @@ mod tests { let q = world.query::<()>(); let mut q = q.transmute::(&world); - assert_eq!(q.single(&world), entity); + assert_eq!(q.single(&world).unwrap(), entity); } #[test] @@ -2042,7 +1971,7 @@ mod tests { let q = world.query::<&A>(); let mut new_q = q.transmute::>(&world); - assert!(new_q.single(&world).is_added()); + assert!(new_q.single(&world).unwrap().is_added()); let q = world.query::>(); let _ = q.transmute::<&A>(&world); @@ -2113,7 +2042,7 @@ mod tests { let query_state = world.query::>(); let mut new_query_state = query_state.transmute::<&A>(&world); - let x = new_query_state.single(&world); + let x = new_query_state.single(&world).unwrap(); assert_eq!(x.0, 1234); } @@ -2138,7 +2067,7 @@ mod tests { let mut query = query; // Our result is completely untyped - let entity_ref = query.single(&world); + let entity_ref = query.single(&world).unwrap(); assert_eq!(entity, entity_ref.id()); assert_eq!(0, entity_ref.get::().unwrap().0); @@ -2153,16 +2082,16 @@ mod tests { let mut query = QueryState::<(Entity, &A, Has)>::new(&mut world) .transmute_filtered::<(Entity, Has), Added>(&world); - assert_eq!((entity_a, false), query.single(&world)); + assert_eq!((entity_a, false), query.single(&world).unwrap()); world.clear_trackers(); let entity_b = world.spawn((A(0), B(0))).id(); - assert_eq!((entity_b, true), query.single(&world)); + assert_eq!((entity_b, true), query.single(&world).unwrap()); world.clear_trackers(); - assert!(query.get_single(&world).is_err()); + assert!(query.single(&world).is_err()); } #[test] @@ -2174,15 +2103,15 @@ mod tests { .transmute_filtered::>(&world); let mut change_query = QueryState::<&mut A>::new(&mut world); - assert_eq!(entity_a, detection_query.single(&world)); + assert_eq!(entity_a, detection_query.single(&world).unwrap()); world.clear_trackers(); - assert!(detection_query.get_single(&world).is_err()); + assert!(detection_query.single(&world).is_err()); - change_query.single_mut(&mut world).0 = 1; + change_query.single_mut(&mut world).unwrap().0 = 1; - assert_eq!(entity_a, detection_query.single(&world)); + assert_eq!(entity_a, detection_query.single(&world).unwrap()); } #[test] @@ -2197,6 +2126,23 @@ mod tests { let _new_query = query.transmute_filtered::>(&world); } + #[test] + #[should_panic( + expected = "Transmuted state for (&mut bevy_ecs::query::state::tests::A, ()) attempts to access terms that are not allowed by original state (&bevy_ecs::query::state::tests::A, ())." + )] + fn cannot_transmute_mutable_after_readonly() { + let mut world = World::new(); + // Calling this method would mean we had aliasing queries. + fn bad(_: Query<&mut A>, _: Query<&A>) {} + world + .run_system_once(|query: Query<&mut A>| { + let mut readonly = query.as_readonly(); + let mut lens: QueryLens<&mut A> = readonly.transmute_lens(); + bad(lens.query(), query.as_readonly()); + }) + .unwrap(); + } + // Regression test for #14629 #[test] #[should_panic] @@ -2269,7 +2215,7 @@ mod tests { let query_2 = QueryState::<&B, Without>::new(&mut world); let mut new_query: QueryState = query_1.join_filtered(&world, &query_2); - assert_eq!(new_query.single(&world), entity_ab); + assert_eq!(new_query.single(&world).unwrap(), entity_ab); } #[test] @@ -2314,4 +2260,107 @@ mod tests { let query_2 = QueryState::<&B, Without>::new(&mut world); let _: QueryState> = query_1.join_filtered(&world, &query_2); } + + #[test] + #[should_panic( + expected = "Joined state for ((&mut bevy_ecs::query::state::tests::A, &mut bevy_ecs::query::state::tests::B), ()) attempts to access terms that are not allowed by state (&bevy_ecs::query::state::tests::A, ()) joined with (&mut bevy_ecs::query::state::tests::B, ())." + )] + fn cannot_join_mutable_after_readonly() { + let mut world = World::new(); + // Calling this method would mean we had aliasing queries. + fn bad(_: Query<(&mut A, &mut B)>, _: Query<&A>) {} + world + .run_system_once(|query_a: Query<&mut A>, mut query_b: Query<&mut B>| { + let mut readonly = query_a.as_readonly(); + let mut lens: QueryLens<(&mut A, &mut B)> = readonly.join(&mut query_b); + bad(lens.query(), query_a.as_readonly()); + }) + .unwrap(); + } + + #[test] + fn join_to_filtered_entity_mut() { + let mut world = World::new(); + world.spawn((A(2), B(3))); + + let query_1 = QueryState::<&mut A>::new(&mut world); + let query_2 = QueryState::<&mut B>::new(&mut world); + let mut new_query: QueryState = query_1.join(&world, &query_2); + + let mut entity = new_query.single_mut(&mut world).unwrap(); + assert!(entity.get_mut::().is_some()); + assert!(entity.get_mut::().is_some()); + } + + #[test] + fn query_respects_default_filters() { + let mut world = World::new(); + world.spawn((A(0), B(0))); + world.spawn((B(0), C(0))); + world.spawn(C(0)); + + let mut df = DefaultQueryFilters::empty(); + df.register_disabling_component(world.register_component::()); + world.insert_resource(df); + + // Without only matches the first entity + let mut query = QueryState::<()>::new(&mut world); + assert_eq!(1, query.iter(&world).count()); + + // With matches the last two entities + let mut query = QueryState::<(), With>::new(&mut world); + assert_eq!(2, query.iter(&world).count()); + + // Has should bypass the filter entirely + let mut query = QueryState::>::new(&mut world); + assert_eq!(3, query.iter(&world).count()); + + // Other filters should still be respected + let mut query = QueryState::, Without>::new(&mut world); + assert_eq!(1, query.iter(&world).count()); + } + + #[derive(Component)] + struct Table; + + #[derive(Component)] + #[component(storage = "SparseSet")] + struct Sparse; + + #[test] + fn query_default_filters_updates_is_dense() { + let mut world = World::new(); + world.spawn((Table, Sparse)); + world.spawn(Table); + world.spawn(Sparse); + + let mut query = QueryState::<()>::new(&mut world); + // There are no sparse components involved thus the query is dense + assert!(query.is_dense); + assert_eq!(3, query.iter(&world).count()); + + let mut df = DefaultQueryFilters::empty(); + df.register_disabling_component(world.register_component::()); + world.insert_resource(df); + + let mut query = QueryState::<()>::new(&mut world); + // The query doesn't ask for sparse components, but the default filters adds + // a sparse components thus it is NOT dense + assert!(!query.is_dense); + assert_eq!(1, query.iter(&world).count()); + + let mut df = DefaultQueryFilters::empty(); + df.register_disabling_component(world.register_component::()); + world.insert_resource(df); + + let mut query = QueryState::<()>::new(&mut world); + // If the filter is instead a table components, the query can still be dense + assert!(query.is_dense); + assert_eq!(1, query.iter(&world).count()); + + let mut query = QueryState::<&Sparse>::new(&mut world); + // But only if the original query was dense + assert!(!query.is_dense); + assert_eq!(1, query.iter(&world).count()); + } } diff --git a/crates/bevy_ecs/src/query/world_query.rs b/crates/bevy_ecs/src/query/world_query.rs index c805e8dec7213..da147770e0fcf 100644 --- a/crates/bevy_ecs/src/query/world_query.rs +++ b/crates/bevy_ecs/src/query/world_query.rs @@ -1,9 +1,8 @@ use crate::{ archetype::Archetype, component::{ComponentId, Components, Tick}, - entity::Entity, query::FilteredAccess, - storage::{Table, TableRow}, + storage::Table, world::{unsafe_world_cell::UnsafeWorldCell, World}, }; use variadics_please::all_tuples; @@ -14,11 +13,11 @@ use variadics_please::all_tuples; /// # Safety /// /// Implementor must ensure that -/// [`update_component_access`], [`matches_component_set`], and [`fetch`] +/// [`update_component_access`], [`matches_component_set`], [`QueryData::fetch`], [`QueryFilter::filter_fetch`] and [`init_fetch`] /// obey the following: /// -/// - For each component mutably accessed by [`fetch`], [`update_component_access`] should add write access unless read or write access has already been added, in which case it should panic. -/// - For each component readonly accessed by [`fetch`], [`update_component_access`] should add read access unless write access has already been added, in which case it should panic. +/// - For each component mutably accessed by [`QueryData::fetch`], [`update_component_access`] should add write access unless read or write access has already been added, in which case it should panic. +/// - For each component readonly accessed by [`QueryData::fetch`] or [`QueryFilter::filter_fetch`], [`update_component_access`] should add read access unless write access has already been added, in which case it should panic. /// - If `fetch` mutably accesses the same component twice, [`update_component_access`] should panic. /// - [`update_component_access`] may not add a `Without` filter for a component unless [`matches_component_set`] always returns `false` when the component set contains that component. /// - [`update_component_access`] may not add a `With` filter for a component unless [`matches_component_set`] always returns `false` when the component set doesn't contain that component. @@ -26,12 +25,13 @@ use variadics_please::all_tuples; /// - [`matches_component_set`] must be a disjunction of the element's implementations /// - [`update_component_access`] must replace the filters with a disjunction of filters /// - Each filter in that disjunction must be a conjunction of the corresponding element's filter with the previous `access` -/// - For each resource mutably accessed by [`init_fetch`], [`update_component_access`] should add write access unless read or write access has already been added, in which case it should panic. -/// - For each resource readonly accessed by [`init_fetch`], [`update_component_access`] should add read access unless write access has already been added, in which case it should panic. +/// - For each resource readonly accessed by [`init_fetch`], [`update_component_access`] should add read access. +/// - Mutable resource access is not allowed. /// /// When implementing [`update_component_access`], note that `add_read` and `add_write` both also add a `With` filter, whereas `extend_access` does not change the filters. /// -/// [`fetch`]: Self::fetch +/// [`QueryData::fetch`]: crate::query::QueryData::fetch +/// [`QueryFilter::filter_fetch`]: crate::query::QueryFilter::filter_fetch /// [`init_fetch`]: Self::init_fetch /// [`matches_component_set`]: Self::matches_component_set /// [`Query`]: crate::system::Query @@ -39,13 +39,7 @@ use variadics_please::all_tuples; /// [`QueryData`]: crate::query::QueryData /// [`QueryFilter`]: crate::query::QueryFilter pub unsafe trait WorldQuery { - /// The item returned by this [`WorldQuery`] - /// For `QueryData` this will be the item returned by the query. - /// For `QueryFilter` this will be either `()`, or a `bool` indicating whether the entity should be included - /// or a tuple of such things. - type Item<'a>; - - /// Per archetype/table state used by this [`WorldQuery`] to fetch [`Self::Item`](WorldQuery::Item) + /// Per archetype/table state retrieved by this [`WorldQuery`] to compute [`Self::Item`](crate::query::QueryData::Item) for each entity. type Fetch<'a>: Clone; /// State used to construct a [`Self::Fetch`](WorldQuery::Fetch). This will be cached inside [`QueryState`](crate::query::QueryState), @@ -53,18 +47,19 @@ pub unsafe trait WorldQuery { /// constructing [`Self::Fetch`](WorldQuery::Fetch). type State: Send + Sync + Sized; - /// This function manually implements subtyping for the query items. - fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort>; - /// This function manually implements subtyping for the query fetches. fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort>; - /// Creates a new instance of this fetch. + /// Creates a new instance of [`Self::Fetch`](WorldQuery::Fetch), + /// by combining data from the [`World`] with the cached [`Self::State`](WorldQuery::State). + /// Readonly accesses resources registered in [`WorldQuery::update_component_access`]. /// /// # Safety /// /// - `state` must have been initialized (via [`WorldQuery::init_state`]) using the same `world` passed /// in to this function. + /// - `world` must have the **right** to access any access registered in `update_component_access`. + /// - There must not be simultaneous resource access conflicting with readonly resource access registered in [`WorldQuery::update_component_access`]. unsafe fn init_fetch<'w>( world: UnsafeWorldCell<'w>, state: &Self::State, @@ -73,10 +68,12 @@ pub unsafe trait WorldQuery { ) -> Self::Fetch<'w>; /// Returns true if (and only if) every table of every archetype matched by this fetch contains - /// all of the matched components. This is used to select a more efficient "table iterator" + /// all of the matched components. + /// + /// This is used to select a more efficient "table iterator" /// for "dense" queries. If this returns true, [`WorldQuery::set_table`] must be used before - /// [`WorldQuery::fetch`] can be called for iterators. If this returns false, - /// [`WorldQuery::set_archetype`] must be used before [`WorldQuery::fetch`] can be called for + /// [`QueryData::fetch`](crate::query::QueryData::fetch) can be called for iterators. If this returns false, + /// [`WorldQuery::set_archetype`] must be used before [`QueryData::fetch`](crate::query::QueryData::fetch) can be called for /// iterators. const IS_DENSE: bool; @@ -110,21 +107,6 @@ pub unsafe trait WorldQuery { /// Called when constructing a [`QueryLens`](crate::system::QueryLens) or calling [`QueryState::from_builder`](super::QueryState::from_builder) fn set_access(_state: &mut Self::State, _access: &FilteredAccess) {} - /// Fetch [`Self::Item`](`WorldQuery::Item`) for either the given `entity` in the current [`Table`], - /// or for the given `entity` in the current [`Archetype`]. This must always be called after - /// [`WorldQuery::set_table`] with a `table_row` in the range of the current [`Table`] or after - /// [`WorldQuery::set_archetype`] with a `entity` in the current archetype. - /// - /// # Safety - /// - /// Must always be called _after_ [`WorldQuery::set_table`] or [`WorldQuery::set_archetype`]. `entity` and - /// `table_row` must be in the range of the current table and archetype. - unsafe fn fetch<'w>( - fetch: &mut Self::Fetch<'w>, - entity: Entity, - table_row: TableRow, - ) -> Self::Item<'w>; - /// Adds any component accesses used by this [`WorldQuery`] to `access`. /// /// Used to check which queries are disjoint and can run in parallel @@ -142,7 +124,8 @@ pub unsafe trait WorldQuery { /// Returns `true` if this query matches a set of components. Otherwise, returns `false`. /// /// Used to check which [`Archetype`]s can be skipped by the query - /// (if none of the [`Component`](crate::component::Component)s match) + /// (if none of the [`Component`](crate::component::Component)s match). + /// This is how archetypal query filters like `With` work. fn matches_component_set( state: &Self::State, set_contains_id: &impl Fn(ComponentId) -> bool, @@ -152,8 +135,22 @@ pub unsafe trait WorldQuery { macro_rules! impl_tuple_world_query { ($(#[$meta:meta])* $(($name: ident, $state: ident)),*) => { - #[allow(non_snake_case)] - #[allow(clippy::unused_unit)] + #[expect( + clippy::allow_attributes, + reason = "This is a tuple-related macro; as such the lints below may not always apply." + )] + #[allow( + non_snake_case, + reason = "The names of some variables are provided by the macro's caller, not by us." + )] + #[allow( + unused_variables, + reason = "Zero-length tuples won't use any of the parameters." + )] + #[allow( + clippy::unused_unit, + reason = "Zero-length tuples will generate some function bodies equivalent to `()`; however, this macro is meant for all applicable tuples, and as such it makes no sense to rewrite it just for that case." + )] $(#[$meta])* /// SAFETY: /// `fetch` accesses are the conjunction of the subqueries' accesses @@ -162,15 +159,8 @@ macro_rules! impl_tuple_world_query { /// This is sound because `matches_component_set` always returns `false` if any the subqueries' implementations return `false`. unsafe impl<$($name: WorldQuery),*> WorldQuery for ($($name,)*) { type Fetch<'w> = ($($name::Fetch<'w>,)*); - type Item<'w> = ($($name::Item<'w>,)*); type State = ($($name::State,)*); - fn shrink<'wlong: 'wshort, 'wshort>(item: Self::Item<'wlong>) -> Self::Item<'wshort> { - let ($($name,)*) = item; - ($( - $name::shrink($name), - )*) - } fn shrink_fetch<'wlong: 'wshort, 'wshort>(fetch: Self::Fetch<'wlong>) -> Self::Fetch<'wshort> { let ($($name,)*) = fetch; @@ -180,64 +170,50 @@ macro_rules! impl_tuple_world_query { } #[inline] - #[allow(clippy::unused_unit)] - unsafe fn init_fetch<'w>(_world: UnsafeWorldCell<'w>, state: &Self::State, _last_run: Tick, _this_run: Tick) -> Self::Fetch<'w> { + unsafe fn init_fetch<'w>(world: UnsafeWorldCell<'w>, state: &Self::State, last_run: Tick, this_run: Tick) -> Self::Fetch<'w> { let ($($name,)*) = state; - // SAFETY: The invariants are uphold by the caller. - ($(unsafe { $name::init_fetch(_world, $name, _last_run, _this_run) },)*) + // SAFETY: The invariants are upheld by the caller. + ($(unsafe { $name::init_fetch(world, $name, last_run, this_run) },)*) } const IS_DENSE: bool = true $(&& $name::IS_DENSE)*; #[inline] unsafe fn set_archetype<'w>( - _fetch: &mut Self::Fetch<'w>, - _state: &Self::State, - _archetype: &'w Archetype, - _table: &'w Table + fetch: &mut Self::Fetch<'w>, + state: &Self::State, + archetype: &'w Archetype, + table: &'w Table ) { - let ($($name,)*) = _fetch; - let ($($state,)*) = _state; - // SAFETY: The invariants are uphold by the caller. - $(unsafe { $name::set_archetype($name, $state, _archetype, _table); })* + let ($($name,)*) = fetch; + let ($($state,)*) = state; + // SAFETY: The invariants are upheld by the caller. + $(unsafe { $name::set_archetype($name, $state, archetype, table); })* } #[inline] - unsafe fn set_table<'w>(_fetch: &mut Self::Fetch<'w>, _state: &Self::State, _table: &'w Table) { - let ($($name,)*) = _fetch; - let ($($state,)*) = _state; - // SAFETY: The invariants are uphold by the caller. - $(unsafe { $name::set_table($name, $state, _table); })* + unsafe fn set_table<'w>(fetch: &mut Self::Fetch<'w>, state: &Self::State, table: &'w Table) { + let ($($name,)*) = fetch; + let ($($state,)*) = state; + // SAFETY: The invariants are upheld by the caller. + $(unsafe { $name::set_table($name, $state, table); })* } - #[inline(always)] - #[allow(clippy::unused_unit)] - unsafe fn fetch<'w>( - _fetch: &mut Self::Fetch<'w>, - _entity: Entity, - _table_row: TableRow - ) -> Self::Item<'w> { - let ($($name,)*) = _fetch; - // SAFETY: The invariants are uphold by the caller. - ($(unsafe { $name::fetch($name, _entity, _table_row) },)*) - } - fn update_component_access(state: &Self::State, _access: &mut FilteredAccess) { + fn update_component_access(state: &Self::State, access: &mut FilteredAccess) { let ($($name,)*) = state; - $($name::update_component_access($name, _access);)* + $($name::update_component_access($name, access);)* } - #[allow(unused_variables)] fn init_state(world: &mut World) -> Self::State { ($($name::init_state(world),)*) } - #[allow(unused_variables)] fn get_state(components: &Components) -> Option { Some(($($name::get_state(components)?,)*)) } - fn matches_component_set(state: &Self::State, _set_contains_id: &impl Fn(ComponentId) -> bool) -> bool { + fn matches_component_set(state: &Self::State, set_contains_id: &impl Fn(ComponentId) -> bool) -> bool { let ($($name,)*) = state; - true $(&& $name::matches_component_set($name, _set_contains_id))* + true $(&& $name::matches_component_set($name, set_contains_id))* } } }; diff --git a/crates/bevy_ecs/src/reflect/bundle.rs b/crates/bevy_ecs/src/reflect/bundle.rs index 248ca1a7048c1..ee02aff86e7fe 100644 --- a/crates/bevy_ecs/src/reflect/bundle.rs +++ b/crates/bevy_ecs/src/reflect/bundle.rs @@ -8,7 +8,10 @@ use alloc::boxed::Box; use core::any::{Any, TypeId}; use crate::{ + bundle::BundleFromComponents, + entity::EntityMapper, prelude::Bundle, + relationship::RelationshipHookMode, world::{EntityMut, EntityWorldMut}, }; use bevy_reflect::{ @@ -33,8 +36,14 @@ pub struct ReflectBundleFns { pub insert: fn(&mut EntityWorldMut, &dyn PartialReflect, &TypeRegistry), /// Function pointer implementing [`ReflectBundle::apply`]. pub apply: fn(EntityMut, &dyn PartialReflect, &TypeRegistry), - /// Function pointer implementing [`ReflectBundle::apply_or_insert`]. - pub apply_or_insert: fn(&mut EntityWorldMut, &dyn PartialReflect, &TypeRegistry), + /// Function pointer implementing [`ReflectBundle::apply_or_insert_mapped`]. + pub apply_or_insert_mapped: fn( + &mut EntityWorldMut, + &dyn PartialReflect, + &TypeRegistry, + &mut dyn EntityMapper, + RelationshipHookMode, + ), /// Function pointer implementing [`ReflectBundle::remove`]. pub remove: fn(&mut EntityWorldMut), /// Function pointer implementing [`ReflectBundle::take`]. @@ -47,7 +56,7 @@ impl ReflectBundleFns { /// /// This is useful if you want to start with the default implementation before overriding some /// of the functions to create a custom implementation. - pub fn new() -> Self { + pub fn new() -> Self { >::from_type().0 } } @@ -78,13 +87,15 @@ impl ReflectBundle { } /// Uses reflection to set the value of this [`Bundle`] type in the entity to the given value or insert a new one if it does not exist. - pub fn apply_or_insert( + pub fn apply_or_insert_mapped( &self, entity: &mut EntityWorldMut, bundle: &dyn PartialReflect, registry: &TypeRegistry, + mapper: &mut dyn EntityMapper, + relationship_hook_mode: RelationshipHookMode, ) { - (self.0.apply_or_insert)(entity, bundle, registry); + (self.0.apply_or_insert_mapped)(entity, bundle, registry, mapper, relationship_hook_mode); } /// Removes this [`Bundle`] type from the entity. Does nothing if it doesn't exist. @@ -136,7 +147,7 @@ impl ReflectBundle { } } -impl FromType for ReflectBundle { +impl FromType for ReflectBundle { fn from_type() -> Self { ReflectBundle(ReflectBundleFns { insert: |entity, reflected_bundle, registry| { @@ -166,19 +177,41 @@ impl FromType for ReflectBundle { } } }, - apply_or_insert: |entity, reflected_bundle, registry| { + apply_or_insert_mapped: |entity, + reflected_bundle, + registry, + mapper, + relationship_hook_mode| { if let Some(reflect_component) = registry.get_type_data::(TypeId::of::()) { - reflect_component.apply_or_insert(entity, reflected_bundle, registry); + reflect_component.apply_or_insert_mapped( + entity, + reflected_bundle, + registry, + mapper, + relationship_hook_mode, + ); } else { match reflected_bundle.reflect_ref() { - ReflectRef::Struct(bundle) => bundle - .iter_fields() - .for_each(|field| apply_or_insert_field(entity, field, registry)), - ReflectRef::Tuple(bundle) => bundle - .iter_fields() - .for_each(|field| apply_or_insert_field(entity, field, registry)), + ReflectRef::Struct(bundle) => bundle.iter_fields().for_each(|field| { + apply_or_insert_field_mapped( + entity, + field, + registry, + mapper, + relationship_hook_mode, + ); + }), + ReflectRef::Tuple(bundle) => bundle.iter_fields().for_each(|field| { + apply_or_insert_field_mapped( + entity, + field, + registry, + mapper, + relationship_hook_mode, + ); + }), _ => panic!( "expected bundle `{}` to be a named struct or tuple", // FIXME: once we have unique reflect, use `TypePath`. @@ -218,10 +251,12 @@ fn apply_field(entity: &mut EntityMut, field: &dyn PartialReflect, registry: &Ty } } -fn apply_or_insert_field( +fn apply_or_insert_field_mapped( entity: &mut EntityWorldMut, field: &dyn PartialReflect, registry: &TypeRegistry, + mapper: &mut dyn EntityMapper, + relationship_hook_mode: RelationshipHookMode, ) { let Some(type_id) = field.try_as_reflect().map(Any::type_id) else { panic!( @@ -231,9 +266,21 @@ fn apply_or_insert_field( }; if let Some(reflect_component) = registry.get_type_data::(type_id) { - reflect_component.apply_or_insert(entity, field, registry); + reflect_component.apply_or_insert_mapped( + entity, + field, + registry, + mapper, + relationship_hook_mode, + ); } else if let Some(reflect_bundle) = registry.get_type_data::(type_id) { - reflect_bundle.apply_or_insert(entity, field, registry); + reflect_bundle.apply_or_insert_mapped( + entity, + field, + registry, + mapper, + relationship_hook_mode, + ); } else { let is_component = entity.world().components().get_id(type_id).is_some(); diff --git a/crates/bevy_ecs/src/reflect/component.rs b/crates/bevy_ecs/src/reflect/component.rs index 7778f16451a4a..893e9b13fa8e3 100644 --- a/crates/bevy_ecs/src/reflect/component.rs +++ b/crates/bevy_ecs/src/reflect/component.rs @@ -61,8 +61,9 @@ use super::from_reflect_with_fallback; use crate::{ change_detection::Mut, component::{ComponentId, ComponentMutability}, - entity::Entity, + entity::{Entity, EntityMapper}, prelude::Component, + relationship::RelationshipHookMode, world::{ unsafe_world_cell::UnsafeEntityCell, EntityMut, EntityWorldMut, FilteredEntityMut, FilteredEntityRef, World, @@ -104,8 +105,14 @@ pub struct ReflectComponentFns { pub insert: fn(&mut EntityWorldMut, &dyn PartialReflect, &TypeRegistry), /// Function pointer implementing [`ReflectComponent::apply()`]. pub apply: fn(EntityMut, &dyn PartialReflect), - /// Function pointer implementing [`ReflectComponent::apply_or_insert()`]. - pub apply_or_insert: fn(&mut EntityWorldMut, &dyn PartialReflect, &TypeRegistry), + /// Function pointer implementing [`ReflectComponent::apply_or_insert_mapped()`]. + pub apply_or_insert_mapped: fn( + &mut EntityWorldMut, + &dyn PartialReflect, + &TypeRegistry, + &mut dyn EntityMapper, + RelationshipHookMode, + ), /// Function pointer implementing [`ReflectComponent::remove()`]. pub remove: fn(&mut EntityWorldMut), /// Function pointer implementing [`ReflectComponent::contains()`]. @@ -114,6 +121,8 @@ pub struct ReflectComponentFns { pub reflect: fn(FilteredEntityRef) -> Option<&dyn Reflect>, /// Function pointer implementing [`ReflectComponent::reflect_mut()`]. pub reflect_mut: fn(FilteredEntityMut) -> Option>, + /// Function pointer implementing [`ReflectComponent::map_entities()`]. + pub map_entities: fn(&mut dyn Reflect, &mut dyn EntityMapper), /// Function pointer implementing [`ReflectComponent::reflect_unchecked_mut()`]. /// /// # Safety @@ -163,13 +172,15 @@ impl ReflectComponent { /// # Panics /// /// Panics if [`Component`] is immutable. - pub fn apply_or_insert( + pub fn apply_or_insert_mapped( &self, entity: &mut EntityWorldMut, component: &dyn PartialReflect, registry: &TypeRegistry, + map: &mut dyn EntityMapper, + relationship_hook_mode: RelationshipHookMode, ) { - (self.0.apply_or_insert)(entity, component, registry); + (self.0.apply_or_insert_mapped)(entity, component, registry, map, relationship_hook_mode); } /// Removes this [`Component`] type from the entity. Does nothing if it doesn't exist. @@ -277,6 +288,11 @@ impl ReflectComponent { pub fn fn_pointers(&self) -> &ReflectComponentFns { &self.0 } + + /// Calls a dynamic version of [`Component::map_entities`]. + pub fn map_entities(&self, component: &mut dyn Reflect, func: &mut dyn EntityMapper) { + (self.0.map_entities)(component, func); + } } impl FromType for ReflectComponent { @@ -300,20 +316,30 @@ impl FromType for ReflectComponent { let mut component = unsafe { entity.get_mut_assume_mutable::() }.unwrap(); component.apply(reflected_component); }, - apply_or_insert: |entity, reflected_component, registry| { - if !C::Mutability::MUTABLE { - let name = ShortName::of::(); - panic!("Cannot call `ReflectComponent::apply_or_insert` on component {name}. It is immutable, and cannot modified through reflection"); - } - - // SAFETY: guard ensures `C` is a mutable component - if let Some(mut component) = unsafe { entity.get_mut_assume_mutable::() } { - component.apply(reflected_component.as_partial_reflect()); + apply_or_insert_mapped: |entity, + reflected_component, + registry, + mut mapper, + relationship_hook_mode| { + if C::Mutability::MUTABLE { + // SAFETY: guard ensures `C` is a mutable component + if let Some(mut component) = unsafe { entity.get_mut_assume_mutable::() } { + component.apply(reflected_component.as_partial_reflect()); + C::map_entities(&mut component, &mut mapper); + } else { + let mut component = entity.world_scope(|world| { + from_reflect_with_fallback::(reflected_component, world, registry) + }); + C::map_entities(&mut component, &mut mapper); + entity + .insert_with_relationship_hook_mode(component, relationship_hook_mode); + } } else { - let component = entity.world_scope(|world| { + let mut component = entity.world_scope(|world| { from_reflect_with_fallback::(reflected_component, world, registry) }); - entity.insert(component); + C::map_entities(&mut component, &mut mapper); + entity.insert_with_relationship_hook_mode(component, relationship_hook_mode); } }, remove: |entity| { @@ -357,6 +383,10 @@ impl FromType for ReflectComponent { register_component: |world: &mut World| -> ComponentId { world.register_component::() }, + map_entities: |reflect: &mut dyn Reflect, mut mapper: &mut dyn EntityMapper| { + let component = reflect.downcast_mut::().unwrap(); + Component::map_entities(component, &mut mapper); + }, }) } } diff --git a/crates/bevy_ecs/src/reflect/entity_commands.rs b/crates/bevy_ecs/src/reflect/entity_commands.rs index 4bb77d9d8fc95..20c5e16c6ddc7 100644 --- a/crates/bevy_ecs/src/reflect/entity_commands.rs +++ b/crates/bevy_ecs/src/reflect/entity_commands.rs @@ -2,12 +2,12 @@ use crate::{ entity::Entity, prelude::Mut, reflect::{AppTypeRegistry, ReflectBundle, ReflectComponent}, - system::{EntityCommands, Resource}, - world::{Command, World}, + resource::Resource, + system::EntityCommands, + world::{EntityWorldMut, World}, }; use alloc::{borrow::Cow, boxed::Box}; use bevy_reflect::{PartialReflect, TypeRegistry}; -use core::marker::PhantomData; /// An extension trait for [`EntityCommands`] for reflection related functions pub trait ReflectCommandExt { @@ -20,7 +20,7 @@ pub trait ReflectCommandExt { /// /// - If the entity doesn't exist. /// - If [`AppTypeRegistry`] does not have the reflection data for the given - /// [`Component`](crate::component::Component) or [`Bundle`](crate::bundle::Bundle). + /// [`Component`](crate::component::Component) or [`Bundle`](crate::bundle::Bundle). /// - If the component or bundle data is invalid. See [`PartialReflect::apply`] for further details. /// - If [`AppTypeRegistry`] is not present in the [`World`]. /// @@ -82,7 +82,7 @@ pub trait ReflectCommandExt { /// // use the insert_reflect entity command to insert that component/bundle into an entity. /// commands /// .spawn_empty() - /// .insert_reflect(prefab.data.clone_value()); + /// .insert_reflect(prefab.data.reflect_clone().unwrap().into_partial_reflect()); /// } /// ``` fn insert_reflect(&mut self, component: Box) -> &mut Self; @@ -170,48 +170,175 @@ pub trait ReflectCommandExt { impl ReflectCommandExt for EntityCommands<'_> { fn insert_reflect(&mut self, component: Box) -> &mut Self { - self.commands.queue(InsertReflect { - entity: self.entity, - component, + self.queue(move |mut entity: EntityWorldMut| { + entity.insert_reflect(component); + }) + } + + fn insert_reflect_with_registry>( + &mut self, + component: Box, + ) -> &mut Self { + self.queue(move |mut entity: EntityWorldMut| { + entity.insert_reflect_with_registry::(component); + }) + } + + fn remove_reflect(&mut self, component_type_path: impl Into>) -> &mut Self { + let component_type_path: Cow<'static, str> = component_type_path.into(); + self.queue(move |mut entity: EntityWorldMut| { + entity.remove_reflect(component_type_path); + }) + } + + fn remove_reflect_with_registry>( + &mut self, + component_type_path: impl Into>, + ) -> &mut Self { + let component_type_path: Cow<'static, str> = component_type_path.into(); + self.queue(move |mut entity: EntityWorldMut| { + entity.remove_reflect_with_registry::(component_type_path); + }) + } +} + +impl<'w> EntityWorldMut<'w> { + /// Adds the given boxed reflect component or bundle to the entity using the reflection data in + /// [`AppTypeRegistry`]. + /// + /// This will overwrite any previous component(s) of the same type. + /// + /// # Panics + /// + /// - If the entity has been despawned while this `EntityWorldMut` is still alive. + /// - If [`AppTypeRegistry`] does not have the reflection data for the given + /// [`Component`](crate::component::Component) or [`Bundle`](crate::bundle::Bundle). + /// - If the component or bundle data is invalid. See [`PartialReflect::apply`] for further details. + /// - If [`AppTypeRegistry`] is not present in the [`World`]. + /// + /// # Note + /// + /// Prefer to use the typed [`EntityWorldMut::insert`] if possible. Adding a reflected component + /// is much slower. + pub fn insert_reflect(&mut self, component: Box) -> &mut Self { + self.assert_not_despawned(); + let entity_id = self.id(); + self.world_scope(|world| { + world.resource_scope(|world, registry: Mut| { + let type_registry = ®istry.as_ref().read(); + insert_reflect_with_registry_ref(world, entity_id, type_registry, component); + }); + world.flush(); }); + self.update_location(); self } - fn insert_reflect_with_registry>( + /// Same as [`insert_reflect`](EntityWorldMut::insert_reflect), but using + /// the `T` resource as type registry instead of [`AppTypeRegistry`]. + /// + /// This will overwrite any previous component(s) of the same type. + /// + /// # Panics + /// + /// - If the entity has been despawned while this `EntityWorldMut` is still alive. + /// - If the given [`Resource`] does not have the reflection data for the given + /// [`Component`](crate::component::Component) or [`Bundle`](crate::bundle::Bundle). + /// - If the component or bundle data is invalid. See [`PartialReflect::apply`] for further details. + /// - If the given [`Resource`] is not present in the [`World`]. + pub fn insert_reflect_with_registry>( &mut self, component: Box, ) -> &mut Self { - self.commands.queue(InsertReflectWithRegistry:: { - entity: self.entity, - _t: PhantomData, - component, + self.assert_not_despawned(); + let entity_id = self.id(); + self.world_scope(|world| { + world.resource_scope(|world, registry: Mut| { + let type_registry = registry.as_ref().as_ref(); + insert_reflect_with_registry_ref(world, entity_id, type_registry, component); + }); + world.flush(); }); + self.update_location(); self } - fn remove_reflect(&mut self, component_type_path: impl Into>) -> &mut Self { - self.commands.queue(RemoveReflect { - entity: self.entity, - component_type_path: component_type_path.into(), + /// Removes from the entity the component or bundle with the given type name registered in [`AppTypeRegistry`]. + /// + /// If the type is a bundle, it will remove any components in that bundle regardless if the entity + /// contains all the components. + /// + /// Does nothing if the type is a component and the entity does not have a component of the same type, + /// if the type is a bundle and the entity does not contain any of the components in the bundle, + /// or if [`AppTypeRegistry`] does not contain the reflection data for the given component. + /// + /// # Panics + /// + /// - If the entity has been despawned while this `EntityWorldMut` is still alive. + /// - If [`AppTypeRegistry`] is not present in the [`World`]. + /// + /// # Note + /// + /// Prefer to use the typed [`EntityCommands::remove`] if possible. Removing a reflected component + /// is much slower. + pub fn remove_reflect(&mut self, component_type_path: Cow<'static, str>) -> &mut Self { + self.assert_not_despawned(); + let entity_id = self.id(); + self.world_scope(|world| { + world.resource_scope(|world, registry: Mut| { + let type_registry = ®istry.as_ref().read(); + remove_reflect_with_registry_ref( + world, + entity_id, + type_registry, + component_type_path, + ); + }); + world.flush(); }); + self.update_location(); self } - fn remove_reflect_with_registry>( + /// Same as [`remove_reflect`](EntityWorldMut::remove_reflect), but using + /// the `T` resource as type registry instead of `AppTypeRegistry`. + /// + /// If the given type is a bundle, it will remove any components in that bundle regardless if the entity + /// contains all the components. + /// + /// Does nothing if the type is a component and the entity does not have a component of the same type, + /// if the type is a bundle and the entity does not contain any of the components in the bundle, + /// or if [`AppTypeRegistry`] does not contain the reflection data for the given component. + /// + /// # Panics + /// + /// - If the entity has been despawned while this `EntityWorldMut` is still alive. + /// - If [`AppTypeRegistry`] is not present in the [`World`]. + pub fn remove_reflect_with_registry>( &mut self, - component_type_name: impl Into>, + component_type_path: Cow<'static, str>, ) -> &mut Self { - self.commands.queue(RemoveReflectWithRegistry:: { - entity: self.entity, - _t: PhantomData, - component_type_name: component_type_name.into(), + self.assert_not_despawned(); + let entity_id = self.id(); + self.world_scope(|world| { + world.resource_scope(|world, registry: Mut| { + let type_registry = registry.as_ref().as_ref(); + remove_reflect_with_registry_ref( + world, + entity_id, + type_registry, + component_type_path, + ); + }); + world.flush(); }); + self.update_location(); self } } /// Helper function to add a reflect component or bundle to a given entity -fn insert_reflect( +fn insert_reflect_with_registry_ref( world: &mut World, entity: Entity, type_registry: &TypeRegistry, @@ -223,7 +350,7 @@ fn insert_reflect( let type_path = type_info.type_path(); let Ok(mut entity) = world.get_entity_mut(entity) else { panic!("error[B0003]: Could not insert a reflected component (of type {type_path}) for entity {entity}, which {}. See: https://bevyengine.org/learn/errors/b0003", - world.entities().entity_does_not_exist_error_details_message(entity)); + world.entities().entity_does_not_exist_error_details(entity)); }; let Some(type_registration) = type_registry.get(type_info.type_id()) else { panic!("`{type_path}` should be registered in type registry via `App::register_type<{type_path}>`"); @@ -238,48 +365,8 @@ fn insert_reflect( } } -/// A [`Command`] that adds the boxed reflect component or bundle to an entity using the data in -/// [`AppTypeRegistry`]. -/// -/// See [`ReflectCommandExt::insert_reflect`] for details. -pub struct InsertReflect { - /// The entity on which the component will be inserted. - pub entity: Entity, - /// The reflect [`Component`](crate::component::Component) or [`Bundle`](crate::bundle::Bundle) - /// that will be added to the entity. - pub component: Box, -} - -impl Command for InsertReflect { - fn apply(self, world: &mut World) { - let registry = world.get_resource::().unwrap().clone(); - insert_reflect(world, self.entity, ®istry.read(), self.component); - } -} - -/// A [`Command`] that adds the boxed reflect component or bundle to an entity using the data in the provided -/// [`Resource`] that implements [`AsRef`]. -/// -/// See [`ReflectCommandExt::insert_reflect_with_registry`] for details. -pub struct InsertReflectWithRegistry> { - /// The entity on which the component will be inserted. - pub entity: Entity, - pub _t: PhantomData, - /// The reflect [`Component`](crate::component::Component) that will be added to the entity. - pub component: Box, -} - -impl> Command for InsertReflectWithRegistry { - fn apply(self, world: &mut World) { - world.resource_scope(|world, registry: Mut| { - let registry: &TypeRegistry = registry.as_ref().as_ref(); - insert_reflect(world, self.entity, registry, self.component); - }); - } -} - /// Helper function to remove a reflect component or bundle from a given entity -fn remove_reflect( +fn remove_reflect_with_registry_ref( world: &mut World, entity: Entity, type_registry: &TypeRegistry, @@ -298,58 +385,9 @@ fn remove_reflect( } } -/// A [`Command`] that removes the component or bundle of the same type as the given type name from -/// the provided entity. -/// -/// See [`ReflectCommandExt::remove_reflect`] for details. -pub struct RemoveReflect { - /// The entity from which the component will be removed. - pub entity: Entity, - /// The [`Component`](crate::component::Component) or [`Bundle`](crate::bundle::Bundle) - /// type name that will be used to remove a component - /// of the same type from the entity. - pub component_type_path: Cow<'static, str>, -} - -impl Command for RemoveReflect { - fn apply(self, world: &mut World) { - let registry = world.get_resource::().unwrap().clone(); - remove_reflect( - world, - self.entity, - ®istry.read(), - self.component_type_path, - ); - } -} - -/// A [`Command`] that removes the component or bundle of the same type as the given type name from -/// the provided entity using the provided [`Resource`] that implements [`AsRef`]. -/// -/// See [`ReflectCommandExt::remove_reflect_with_registry`] for details. -pub struct RemoveReflectWithRegistry> { - /// The entity from which the component will be removed. - pub entity: Entity, - pub _t: PhantomData, - /// The [`Component`](crate::component::Component) or [`Bundle`](crate::bundle::Bundle) - /// type name that will be used to remove a component - /// of the same type from the entity. - pub component_type_name: Cow<'static, str>, -} - -impl> Command for RemoveReflectWithRegistry { - fn apply(self, world: &mut World) { - world.resource_scope(|world, registry: Mut| { - let registry: &TypeRegistry = registry.as_ref().as_ref(); - remove_reflect(world, self.entity, registry, self.component_type_name); - }); - } -} - #[cfg(test)] mod tests { use crate::{ - self as bevy_ecs, bundle::Bundle, component::Component, prelude::{AppTypeRegistry, ReflectComponent}, @@ -357,6 +395,7 @@ mod tests { system::{Commands, SystemState}, world::World, }; + use alloc::{borrow::ToOwned, boxed::Box}; use bevy_ecs_macros::Resource; use bevy_reflect::{PartialReflect, Reflect, TypeRegistry}; @@ -403,21 +442,30 @@ mod tests { let entity = commands.spawn_empty().id(); let entity2 = commands.spawn_empty().id(); + let entity3 = commands.spawn_empty().id(); let boxed_reflect_component_a = Box::new(ComponentA(916)) as Box; - let boxed_reflect_component_a_clone = boxed_reflect_component_a.clone_value(); + let boxed_reflect_component_a_clone = boxed_reflect_component_a.reflect_clone().unwrap(); + let boxed_reflect_component_a_dynamic = boxed_reflect_component_a.to_dynamic(); commands .entity(entity) .insert_reflect(boxed_reflect_component_a); commands .entity(entity2) - .insert_reflect(boxed_reflect_component_a_clone); + .insert_reflect(boxed_reflect_component_a_clone.into_partial_reflect()); + commands + .entity(entity3) + .insert_reflect(boxed_reflect_component_a_dynamic); system_state.apply(&mut world); assert_eq!( world.entity(entity).get::(), - world.entity(entity2).get::() + world.entity(entity2).get::(), + ); + assert_eq!( + world.entity(entity).get::(), + world.entity(entity3).get::(), ); } diff --git a/crates/bevy_ecs/src/reflect/mod.rs b/crates/bevy_ecs/src/reflect/mod.rs index ba27538d2ade9..b630f587197d7 100644 --- a/crates/bevy_ecs/src/reflect/mod.rs +++ b/crates/bevy_ecs/src/reflect/mod.rs @@ -5,8 +5,7 @@ use core::{ ops::{Deref, DerefMut}, }; -use crate as bevy_ecs; -use crate::{system::Resource, world::World}; +use crate::{resource::Resource, world::World}; use bevy_reflect::{ std_traits::ReflectDefault, PartialReflect, Reflect, ReflectFromReflect, TypePath, TypeRegistry, TypeRegistryArc, @@ -18,7 +17,6 @@ mod entity_commands; mod from_world; mod map_entities; mod resource; -mod visit_entities; pub use bundle::{ReflectBundle, ReflectBundleFns}; pub use component::{ReflectComponent, ReflectComponentFns}; @@ -26,7 +24,6 @@ pub use entity_commands::ReflectCommandExt; pub use from_world::{ReflectFromWorld, ReflectFromWorldFns}; pub use map_entities::ReflectMapEntities; pub use resource::{ReflectResource, ReflectResourceFns}; -pub use visit_entities::{ReflectVisitEntities, ReflectVisitEntitiesMut}; /// A [`Resource`] storing [`TypeRegistry`] for /// type registrations relevant to a whole app. diff --git a/crates/bevy_ecs/src/reflect/resource.rs b/crates/bevy_ecs/src/reflect/resource.rs index f9da7adc7c9a2..60cf7bc609169 100644 --- a/crates/bevy_ecs/src/reflect/resource.rs +++ b/crates/bevy_ecs/src/reflect/resource.rs @@ -7,8 +7,11 @@ use crate::{ change_detection::Mut, component::ComponentId, - system::Resource, - world::{unsafe_world_cell::UnsafeWorldCell, World}, + resource::Resource, + world::{ + error::ResourceFetchError, unsafe_world_cell::UnsafeWorldCell, FilteredResources, + FilteredResourcesMut, World, + }, }; use bevy_reflect::{FromReflect, FromType, PartialReflect, Reflect, TypePath, TypeRegistry}; @@ -52,7 +55,12 @@ pub struct ReflectResourceFns { /// Function pointer implementing [`ReflectResource::remove()`]. pub remove: fn(&mut World), /// Function pointer implementing [`ReflectResource::reflect()`]. - pub reflect: fn(&World) -> Option<&dyn Reflect>, + pub reflect: + for<'w> fn(FilteredResources<'w, '_>) -> Result<&'w dyn Reflect, ResourceFetchError>, + /// Function pointer implementing [`ReflectResource::reflect_mut()`]. + pub reflect_mut: for<'w> fn( + FilteredResourcesMut<'w, '_>, + ) -> Result, ResourceFetchError>, /// Function pointer implementing [`ReflectResource::reflect_unchecked_mut()`]. /// /// # Safety @@ -111,14 +119,23 @@ impl ReflectResource { } /// Gets the value of this [`Resource`] type from the world as a reflected reference. - pub fn reflect<'a>(&self, world: &'a World) -> Option<&'a dyn Reflect> { - (self.0.reflect)(world) + /// + /// Note that [`&World`](World) is a valid type for `resources`. + pub fn reflect<'w, 's>( + &self, + resources: impl Into>, + ) -> Result<&'w dyn Reflect, ResourceFetchError> { + (self.0.reflect)(resources.into()) } /// Gets the value of this [`Resource`] type from the world as a mutable reflected reference. - pub fn reflect_mut<'a>(&self, world: &'a mut World) -> Option> { - // SAFETY: unique world access - unsafe { (self.0.reflect_unchecked_mut)(world.as_unsafe_world_cell()) } + /// + /// Note that [`&mut World`](World) is a valid type for `resources`. + pub fn reflect_mut<'w, 's>( + &self, + resources: impl Into>, + ) -> Result, ResourceFetchError> { + (self.0.reflect_mut)(resources.into()) } /// # Safety @@ -212,7 +229,12 @@ impl FromType for ReflectResource { remove: |world| { world.remove_resource::(); }, - reflect: |world| world.get_resource::().map(|res| res as &dyn Reflect), + reflect: |world| world.get::().map(|res| res.into_inner() as &dyn Reflect), + reflect_mut: |world| { + world + .into_mut::() + .map(|res| res.map_unchanged(|value| value as &mut dyn Reflect)) + }, reflect_unchecked_mut: |world| { // SAFETY: all usages of `reflect_unchecked_mut` guarantee that there is either a single mutable // reference or multiple immutable ones alive at any given point diff --git a/crates/bevy_ecs/src/reflect/visit_entities.rs b/crates/bevy_ecs/src/reflect/visit_entities.rs deleted file mode 100644 index 11f02612ba1f9..0000000000000 --- a/crates/bevy_ecs/src/reflect/visit_entities.rs +++ /dev/null @@ -1,62 +0,0 @@ -use crate::entity::{Entity, VisitEntities, VisitEntitiesMut}; -use bevy_reflect::{FromReflect, FromType, PartialReflect}; - -/// For a reflected value, apply an operation to all contained entities. -/// -/// See [`VisitEntities`] for more details. -#[derive(Clone)] -pub struct ReflectVisitEntities { - visit_entities: fn(&dyn PartialReflect, &mut dyn FnMut(Entity)), -} - -impl ReflectVisitEntities { - /// A general method for applying an operation to all entities in a - /// reflected component. - pub fn visit_entities(&self, component: &dyn PartialReflect, f: &mut dyn FnMut(Entity)) { - (self.visit_entities)(component, f); - } -} - -impl FromType for ReflectVisitEntities { - fn from_type() -> Self { - ReflectVisitEntities { - visit_entities: |component, f| { - let concrete = C::from_reflect(component).unwrap(); - concrete.visit_entities(f); - }, - } - } -} - -/// For a reflected value, apply an operation to mutable references to all -/// contained entities. -/// -/// See [`VisitEntitiesMut`] for more details. -#[derive(Clone)] -pub struct ReflectVisitEntitiesMut { - visit_entities_mut: fn(&mut dyn PartialReflect, &mut dyn FnMut(&mut Entity)), -} - -impl ReflectVisitEntitiesMut { - /// A general method for applying an operation to all entities in a - /// reflected component. - pub fn visit_entities( - &self, - component: &mut dyn PartialReflect, - f: &mut dyn FnMut(&mut Entity), - ) { - (self.visit_entities_mut)(component, f); - } -} - -impl FromType for ReflectVisitEntitiesMut { - fn from_type() -> Self { - ReflectVisitEntitiesMut { - visit_entities_mut: |component, f| { - let mut concrete = C::from_reflect(component).unwrap(); - concrete.visit_entities_mut(f); - component.apply(&concrete); - }, - } - } -} diff --git a/crates/bevy_ecs/src/relationship/mod.rs b/crates/bevy_ecs/src/relationship/mod.rs new file mode 100644 index 0000000000000..9a2a2a2d5a39a --- /dev/null +++ b/crates/bevy_ecs/src/relationship/mod.rs @@ -0,0 +1,427 @@ +//! This module provides functionality to link entities to each other using specialized components called "relationships". See the [`Relationship`] trait for more info. + +mod related_methods; +mod relationship_query; +mod relationship_source_collection; + +use alloc::format; + +pub use related_methods::*; +pub use relationship_query::*; +pub use relationship_source_collection::*; + +use crate::{ + component::{Component, HookContext, Mutable}, + entity::{ComponentCloneCtx, Entity, SourceComponent}, + error::{ignore, CommandWithEntity, HandleError}, + system::entity_command::{self}, + world::{DeferredWorld, EntityWorldMut}, +}; +use log::warn; + +/// A [`Component`] on a "source" [`Entity`] that references another target [`Entity`], creating a "relationship" between them. Every [`Relationship`] +/// has a corresponding [`RelationshipTarget`] type (and vice-versa), which exists on the "target" entity of a relationship and contains the list of all +/// "source" entities that relate to the given "target" +/// +/// The [`Relationship`] component is the "source of truth" and the [`RelationshipTarget`] component reflects that source of truth. When a [`Relationship`] +/// component is inserted on an [`Entity`], the corresponding [`RelationshipTarget`] component is immediately inserted on the target component if it does +/// not already exist, and the "source" entity is automatically added to the [`RelationshipTarget`] collection (this is done via "component hooks"). +/// +/// A common example of a [`Relationship`] is the parent / child relationship. Bevy ECS includes a canonical form of this via the [`ChildOf`](crate::hierarchy::ChildOf) +/// [`Relationship`] and the [`Children`](crate::hierarchy::Children) [`RelationshipTarget`]. +/// +/// [`Relationship`] and [`RelationshipTarget`] should always be derived via the [`Component`] trait to ensure the hooks are set up properly. +/// +/// ## Derive +/// +/// [`Relationship`] and [`RelationshipTarget`] can only be derived for structs with a single unnamed field, single named field +/// or for named structs where one field is annotated with `#[relationship]`. +/// If there are additional fields, they must all implement [`Default`]. +/// +/// [`RelationshipTarget`] also requires that the relationship field is private to prevent direct mutation, +/// ensuring the correctness of relationships. +/// ``` +/// # use bevy_ecs::component::Component; +/// # use bevy_ecs::entity::Entity; +/// #[derive(Component)] +/// #[relationship(relationship_target = Children)] +/// pub struct ChildOf { +/// #[relationship] +/// pub parent: Entity, +/// internal: u8, +/// }; +/// +/// #[derive(Component)] +/// #[relationship_target(relationship = ChildOf)] +/// pub struct Children(Vec); +/// ``` +/// +/// When deriving [`RelationshipTarget`] you can specify the `#[relationship_target(linked_spawn)]` attribute to +/// automatically despawn entities stored in an entity's [`RelationshipTarget`] when that entity is despawned: +/// +/// ``` +/// # use bevy_ecs::component::Component; +/// # use bevy_ecs::entity::Entity; +/// #[derive(Component)] +/// #[relationship(relationship_target = Children)] +/// pub struct ChildOf(pub Entity); +/// +/// #[derive(Component)] +/// #[relationship_target(relationship = ChildOf, linked_spawn)] +/// pub struct Children(Vec); +/// ``` +pub trait Relationship: Component + Sized { + /// The [`Component`] added to the "target" entities of this [`Relationship`], which contains the list of all "source" + /// entities that relate to the "target". + type RelationshipTarget: RelationshipTarget; + + /// Gets the [`Entity`] ID of the related entity. + fn get(&self) -> Entity; + + /// Creates this [`Relationship`] from the given `entity`. + fn from(entity: Entity) -> Self; + + /// The `on_insert` component hook that maintains the [`Relationship`] / [`RelationshipTarget`] connection. + fn on_insert( + mut world: DeferredWorld, + HookContext { + entity, + caller, + relationship_hook_mode, + .. + }: HookContext, + ) { + match relationship_hook_mode { + RelationshipHookMode::Run => {} + RelationshipHookMode::Skip => return, + RelationshipHookMode::RunIfNotLinked => { + if ::LINKED_SPAWN { + return; + } + } + } + let target_entity = world.entity(entity).get::().unwrap().get(); + if target_entity == entity { + warn!( + "{}The {}({target_entity:?}) relationship on entity {entity:?} points to itself. The invalid {} relationship has been removed.", + caller.map(|location|format!("{location}: ")).unwrap_or_default(), + core::any::type_name::(), + core::any::type_name::() + ); + world.commands().entity(entity).remove::(); + return; + } + if let Ok(mut target_entity_mut) = world.get_entity_mut(target_entity) { + if let Some(mut relationship_target) = + target_entity_mut.get_mut::() + { + relationship_target.collection_mut_risky().add(entity); + } else { + let mut target = ::with_capacity(1); + target.collection_mut_risky().add(entity); + world.commands().entity(target_entity).insert(target); + } + } else { + warn!( + "{}The {}({target_entity:?}) relationship on entity {entity:?} relates to an entity that does not exist. The invalid {} relationship has been removed.", + caller.map(|location|format!("{location}: ")).unwrap_or_default(), + core::any::type_name::(), + core::any::type_name::() + ); + world.commands().entity(entity).remove::(); + } + } + + /// The `on_replace` component hook that maintains the [`Relationship`] / [`RelationshipTarget`] connection. + // note: think of this as "on_drop" + fn on_replace( + mut world: DeferredWorld, + HookContext { + entity, + relationship_hook_mode, + .. + }: HookContext, + ) { + match relationship_hook_mode { + RelationshipHookMode::Run => {} + RelationshipHookMode::Skip => return, + RelationshipHookMode::RunIfNotLinked => { + if ::LINKED_SPAWN { + return; + } + } + } + let target_entity = world.entity(entity).get::().unwrap().get(); + if let Ok(mut target_entity_mut) = world.get_entity_mut(target_entity) { + if let Some(mut relationship_target) = + target_entity_mut.get_mut::() + { + relationship_target.collection_mut_risky().remove(entity); + if relationship_target.len() == 0 { + if let Ok(mut entity) = world.commands().get_entity(target_entity) { + // this "remove" operation must check emptiness because in the event that an identical + // relationship is inserted on top, this despawn would result in the removal of that identical + // relationship ... not what we want! + entity.queue(|mut entity: EntityWorldMut| { + if entity + .get::() + .is_some_and(RelationshipTarget::is_empty) + { + entity.remove::(); + } + }); + } + } + } + } + } +} + +/// The iterator type for the source entities in a [`RelationshipTarget`] collection, +/// as defined in the [`RelationshipSourceCollection`] trait. +pub type SourceIter<'w, R> = + <::Collection as RelationshipSourceCollection>::SourceIter<'w>; + +/// A [`Component`] containing the collection of entities that relate to this [`Entity`] via the associated `Relationship` type. +/// See the [`Relationship`] documentation for more information. +pub trait RelationshipTarget: Component + Sized { + /// If this is true, when despawning or cloning (when [linked cloning is enabled](crate::entity::EntityClonerBuilder::linked_cloning)), the related entities targeting this entity will also be despawned or cloned. + /// + /// For example, this is set to `true` for Bevy's built-in parent-child relation, defined by [`ChildOf`](crate::prelude::ChildOf) and [`Children`](crate::prelude::Children). + /// This means that when a parent is despawned, any children targeting that parent are also despawned (and the same applies to cloning). + /// + /// To get around this behavior, you can first break the relationship between entities, and *then* despawn or clone. + /// This defaults to false when derived. + const LINKED_SPAWN: bool; + /// The [`Relationship`] that populates this [`RelationshipTarget`] collection. + type Relationship: Relationship; + /// The collection type that stores the "source" entities for this [`RelationshipTarget`] component. + /// + /// Check the list of types which implement [`RelationshipSourceCollection`] for the data structures that can be used inside of your component. + /// If you need a new collection type, you can implement the [`RelationshipSourceCollection`] trait + /// for a type you own which wraps the collection you want to use (to avoid the orphan rule), + /// or open an issue on the Bevy repository to request first-party support for your collection type. + type Collection: RelationshipSourceCollection; + + /// Returns a reference to the stored [`RelationshipTarget::Collection`]. + fn collection(&self) -> &Self::Collection; + /// Returns a mutable reference to the stored [`RelationshipTarget::Collection`]. + /// + /// # Warning + /// This should generally not be called by user code, as modifying the internal collection could invalidate the relationship. + /// The collection should not contain duplicates. + fn collection_mut_risky(&mut self) -> &mut Self::Collection; + + /// Creates a new [`RelationshipTarget`] from the given [`RelationshipTarget::Collection`]. + /// + /// # Warning + /// This should generally not be called by user code, as constructing the internal collection could invalidate the relationship. + /// The collection should not contain duplicates. + fn from_collection_risky(collection: Self::Collection) -> Self; + + /// The `on_replace` component hook that maintains the [`Relationship`] / [`RelationshipTarget`] connection. + // note: think of this as "on_drop" + fn on_replace(mut world: DeferredWorld, HookContext { entity, caller, .. }: HookContext) { + let (entities, mut commands) = world.entities_and_commands(); + let relationship_target = entities.get(entity).unwrap().get::().unwrap(); + for source_entity in relationship_target.iter() { + if entities.get(source_entity).is_ok() { + commands.queue( + entity_command::remove::() + .with_entity(source_entity) + .handle_error_with(ignore), + ); + } else { + warn!( + "{}Tried to despawn non-existent entity {}", + caller + .map(|location| format!("{location}: ")) + .unwrap_or_default(), + source_entity + ); + } + } + } + + /// The `on_despawn` component hook that despawns entities stored in an entity's [`RelationshipTarget`] when + /// that entity is despawned. + // note: think of this as "on_drop" + fn on_despawn(mut world: DeferredWorld, HookContext { entity, caller, .. }: HookContext) { + let (entities, mut commands) = world.entities_and_commands(); + let relationship_target = entities.get(entity).unwrap().get::().unwrap(); + for source_entity in relationship_target.iter() { + if entities.get(source_entity).is_ok() { + commands.queue( + entity_command::despawn() + .with_entity(source_entity) + .handle_error_with(ignore), + ); + } else { + warn!( + "{}Tried to despawn non-existent entity {}", + caller + .map(|location| format!("{location}: ")) + .unwrap_or_default(), + source_entity + ); + } + } + } + + /// Creates this [`RelationshipTarget`] with the given pre-allocated entity capacity. + fn with_capacity(capacity: usize) -> Self { + let collection = + ::with_capacity(capacity); + Self::from_collection_risky(collection) + } + + /// Iterates the entities stored in this collection. + #[inline] + fn iter(&self) -> SourceIter<'_, Self> { + self.collection().iter() + } + + /// Returns the number of entities in this collection. + #[inline] + fn len(&self) -> usize { + self.collection().len() + } + + /// Returns true if this entity collection is empty. + #[inline] + fn is_empty(&self) -> bool { + self.collection().is_empty() + } +} + +/// The "clone behavior" for [`RelationshipTarget`]. This actually creates an empty +/// [`RelationshipTarget`] instance with space reserved for the number of targets in the +/// original instance. The [`RelationshipTarget`] will then be populated with the proper components +/// when the corresponding [`Relationship`] sources of truth are inserted. Cloning the actual entities +/// in the original [`RelationshipTarget`] would result in duplicates, so we don't do that! +/// +/// This will also queue up clones of the relationship sources if the [`EntityCloner`](crate::entity::EntityCloner) is configured +/// to spawn recursively. +pub fn clone_relationship_target( + source: &SourceComponent, + context: &mut ComponentCloneCtx, +) { + if let Some(component) = source.read::() { + let mut cloned = T::with_capacity(component.len()); + if context.linked_cloning() && T::LINKED_SPAWN { + let collection = cloned.collection_mut_risky(); + for entity in component.iter() { + collection.add(entity); + context.queue_entity_clone(entity); + } + } + context.write_target_component(cloned); + } +} + +/// Configures the conditions under which the Relationship insert/replace hooks will be run. +#[derive(Copy, Clone, Debug)] +pub enum RelationshipHookMode { + /// Relationship insert/replace hooks will always run + Run, + /// Relationship insert/replace hooks will run if [`RelationshipTarget::LINKED_SPAWN`] is false + RunIfNotLinked, + /// Relationship insert/replace hooks will always be skipped + Skip, +} + +#[cfg(test)] +mod tests { + use crate::world::World; + use crate::{component::Component, entity::Entity}; + use alloc::vec::Vec; + + #[test] + fn custom_relationship() { + #[derive(Component)] + #[relationship(relationship_target = LikedBy)] + struct Likes(pub Entity); + + #[derive(Component)] + #[relationship_target(relationship = Likes)] + struct LikedBy(Vec); + + let mut world = World::new(); + let a = world.spawn_empty().id(); + let b = world.spawn(Likes(a)).id(); + let c = world.spawn(Likes(a)).id(); + assert_eq!(world.entity(a).get::().unwrap().0, &[b, c]); + } + + #[test] + fn self_relationship_fails() { + #[derive(Component)] + #[relationship(relationship_target = RelTarget)] + struct Rel(Entity); + + #[derive(Component)] + #[relationship_target(relationship = Rel)] + struct RelTarget(Vec); + + let mut world = World::new(); + let a = world.spawn_empty().id(); + world.entity_mut(a).insert(Rel(a)); + assert!(!world.entity(a).contains::()); + assert!(!world.entity(a).contains::()); + } + + #[test] + fn relationship_with_missing_target_fails() { + #[derive(Component)] + #[relationship(relationship_target = RelTarget)] + struct Rel(Entity); + + #[derive(Component)] + #[relationship_target(relationship = Rel)] + struct RelTarget(Vec); + + let mut world = World::new(); + let a = world.spawn_empty().id(); + world.despawn(a); + let b = world.spawn(Rel(a)).id(); + assert!(!world.entity(b).contains::()); + assert!(!world.entity(b).contains::()); + } + + #[test] + fn relationship_with_multiple_non_target_fields_compiles() { + #[derive(Component)] + #[relationship(relationship_target=Target)] + #[expect(dead_code, reason = "test struct")] + struct Source { + #[relationship] + target: Entity, + foo: u8, + bar: u8, + } + + #[derive(Component)] + #[relationship_target(relationship=Source)] + struct Target(Vec); + + // No assert necessary, looking to make sure compilation works with the macros + } + #[test] + fn relationship_target_with_multiple_non_target_fields_compiles() { + #[derive(Component)] + #[relationship(relationship_target=Target)] + struct Source(Entity); + + #[derive(Component)] + #[relationship_target(relationship=Source)] + #[expect(dead_code, reason = "test struct")] + struct Target { + #[relationship] + target: Vec, + foo: u8, + bar: u8, + } + + // No assert necessary, looking to make sure compilation works with the macros + } +} diff --git a/crates/bevy_ecs/src/relationship/related_methods.rs b/crates/bevy_ecs/src/relationship/related_methods.rs new file mode 100644 index 0000000000000..98ef8d08321ac --- /dev/null +++ b/crates/bevy_ecs/src/relationship/related_methods.rs @@ -0,0 +1,616 @@ +use crate::{ + bundle::Bundle, + entity::{hash_set::EntityHashSet, Entity}, + relationship::{ + Relationship, RelationshipHookMode, RelationshipSourceCollection, RelationshipTarget, + }, + system::{Commands, EntityCommands}, + world::{EntityWorldMut, World}, +}; +use bevy_platform::prelude::{Box, Vec}; +use core::{marker::PhantomData, mem}; + +use super::OrderedRelationshipSourceCollection; + +impl<'w> EntityWorldMut<'w> { + /// Spawns a entity related to this entity (with the `R` relationship) by taking a bundle + pub fn with_related(&mut self, bundle: impl Bundle) -> &mut Self { + let parent = self.id(); + self.world_scope(|world| { + world.spawn((bundle, R::from(parent))); + }); + self + } + + /// Spawns entities related to this entity (with the `R` relationship) by taking a function that operates on a [`RelatedSpawner`]. + pub fn with_related_entities( + &mut self, + func: impl FnOnce(&mut RelatedSpawner), + ) -> &mut Self { + let parent = self.id(); + self.world_scope(|world| { + func(&mut RelatedSpawner::new(world, parent)); + }); + self + } + + /// Relates the given entities to this entity with the relation `R`. + /// + /// See [`add_one_related`](Self::add_one_related) if you want relate only one entity. + pub fn add_related(&mut self, related: &[Entity]) -> &mut Self { + let id = self.id(); + self.world_scope(|world| { + for related in related { + world.entity_mut(*related).insert(R::from(id)); + } + }); + self + } + + /// Relates the given entities to this entity with the relation `R`, starting at this particular index. + /// + /// If the `related` has duplicates, a related entity will take the index of its last occurrence in `related`. + /// If the indices go out of bounds, they will be clamped into bounds. + /// This will not re-order existing related entities unless they are in `related`. + /// + /// # Example + /// + /// ``` + /// use bevy_ecs::prelude::*; + /// + /// let mut world = World::new(); + /// let e0 = world.spawn_empty().id(); + /// let e1 = world.spawn_empty().id(); + /// let e2 = world.spawn_empty().id(); + /// let e3 = world.spawn_empty().id(); + /// let e4 = world.spawn_empty().id(); + /// + /// let mut main_entity = world.spawn_empty(); + /// main_entity.add_related::(&[e0, e1, e2, e2]); + /// main_entity.insert_related::(1, &[e0, e3, e4, e4]); + /// let main_id = main_entity.id(); + /// + /// let relationship_source = main_entity.get::().unwrap().collection(); + /// assert_eq!(relationship_source, &[e1, e0, e3, e2, e4]); + /// ``` + pub fn insert_related(&mut self, index: usize, related: &[Entity]) -> &mut Self + where + ::Collection: + OrderedRelationshipSourceCollection, + { + let id = self.id(); + self.world_scope(|world| { + for (offset, related) in related.iter().enumerate() { + let index = index + offset; + if world + .get::(*related) + .is_some_and(|relationship| relationship.get() == id) + { + world + .get_mut::(id) + .expect("hooks should have added relationship target") + .collection_mut_risky() + .place(*related, index); + } else { + world.entity_mut(*related).insert(R::from(id)); + world + .get_mut::(id) + .expect("hooks should have added relationship target") + .collection_mut_risky() + .place_most_recent(index); + } + } + }); + + self + } + + /// Removes the relation `R` between this entity and the given entities. + pub fn remove_related(&mut self, related: &[Entity]) -> &mut Self { + let id = self.id(); + self.world_scope(|world| { + for related in related { + if world + .get::(*related) + .is_some_and(|relationship| relationship.get() == id) + { + world.entity_mut(*related).remove::(); + } + } + }); + + self + } + + /// Replaces all the related entities with a new set of entities. + pub fn replace_related(&mut self, related: &[Entity]) -> &mut Self { + type Collection = + <::RelationshipTarget as RelationshipTarget>::Collection; + + if related.is_empty() { + self.remove::(); + + return self; + } + + let Some(mut existing_relations) = self.get_mut::() else { + return self.add_related::(related); + }; + + // We take the collection here so we can modify it without taking the component itself (this would create archetype move). + // SAFETY: We eventually return the correctly initialized collection into the target. + let mut existing_relations = mem::replace( + existing_relations.collection_mut_risky(), + Collection::::with_capacity(0), + ); + + let mut potential_relations = EntityHashSet::from_iter(related.iter().copied()); + + let id = self.id(); + self.world_scope(|world| { + for related in existing_relations.iter() { + if !potential_relations.remove(related) { + world.entity_mut(related).remove::(); + } + } + + for related in potential_relations { + // SAFETY: We'll manually be adjusting the contents of the parent to fit the final state. + world + .entity_mut(related) + .insert_with_relationship_hook_mode(R::from(id), RelationshipHookMode::Skip); + } + }); + + // SAFETY: The entities we're inserting will be the entities that were either already there or entities that we've just inserted. + existing_relations.clear(); + existing_relations.extend_from_iter(related.iter().copied()); + self.insert(R::RelationshipTarget::from_collection_risky( + existing_relations, + )); + + self + } + + /// Replaces all the related entities with a new set of entities. + /// + /// This is a more efficient of [`Self::replace_related`] which doesn't allocate. + /// The passed in arguments must adhere to these invariants: + /// - `entities_to_unrelate`: A slice of entities to remove from the relationship source. + /// Entities need not be related to this entity, but must not appear in `entities_to_relate` + /// - `entities_to_relate`: A slice of entities to relate to this entity. + /// This must contain all entities that will remain related (i.e. not those in `entities_to_unrelate`) plus the newly related entities. + /// - `newly_related_entities`: A subset of `entities_to_relate` containing only entities not already related to this entity. + /// - Slices **must not** contain any duplicates + /// + /// # Warning + /// + /// Violating these invariants may lead to panics, crashes or unpredictable engine behavior. + /// + /// # Panics + /// + /// Panics when debug assertions are enabled and any invariants are broken. + /// + // TODO: Consider making these iterators so users aren't required to allocate a separate buffers for the different slices. + pub fn replace_related_with_difference( + &mut self, + entities_to_unrelate: &[Entity], + entities_to_relate: &[Entity], + newly_related_entities: &[Entity], + ) -> &mut Self { + #[cfg(debug_assertions)] + { + let entities_to_relate = EntityHashSet::from_iter(entities_to_relate.iter().copied()); + let entities_to_unrelate = + EntityHashSet::from_iter(entities_to_unrelate.iter().copied()); + let mut newly_related_entities = + EntityHashSet::from_iter(newly_related_entities.iter().copied()); + assert!( + entities_to_relate.is_disjoint(&entities_to_unrelate), + "`entities_to_relate` ({entities_to_relate:?}) shared entities with `entities_to_unrelate` ({entities_to_unrelate:?})" + ); + assert!( + newly_related_entities.is_disjoint(&entities_to_unrelate), + "`newly_related_entities` ({newly_related_entities:?}) shared entities with `entities_to_unrelate ({entities_to_unrelate:?})`" + ); + assert!( + newly_related_entities.is_subset(&entities_to_relate), + "`newly_related_entities` ({newly_related_entities:?}) wasn't a subset of `entities_to_relate` ({entities_to_relate:?})" + ); + + if let Some(target) = self.get::() { + let existing_relationships: EntityHashSet = target.collection().iter().collect(); + + assert!( + existing_relationships.is_disjoint(&newly_related_entities), + "`newly_related_entities` contains an entity that wouldn't be newly related" + ); + + newly_related_entities.extend(existing_relationships); + newly_related_entities -= &entities_to_unrelate; + } + + assert_eq!(newly_related_entities, entities_to_relate, "`entities_to_relate` ({entities_to_relate:?}) didn't contain all entities that would end up related"); + }; + + if !self.contains::() { + self.add_related::(entities_to_relate); + + return self; + }; + + let this = self.id(); + self.world_scope(|world| { + for unrelate in entities_to_unrelate { + world.entity_mut(*unrelate).remove::(); + } + + for new_relation in newly_related_entities { + // We're changing the target collection manually so don't run the insert hook + world + .entity_mut(*new_relation) + .insert_with_relationship_hook_mode(R::from(this), RelationshipHookMode::Skip); + } + }); + + if !entities_to_relate.is_empty() { + if let Some(mut target) = self.get_mut::() { + // SAFETY: The invariants expected by this function mean we'll only be inserting entities that are already related. + let collection = target.collection_mut_risky(); + collection.clear(); + + collection.extend_from_iter(entities_to_relate.iter().copied()); + } else { + let mut empty = + ::Collection::with_capacity( + entities_to_relate.len(), + ); + empty.extend_from_iter(entities_to_relate.iter().copied()); + + // SAFETY: We've just initialized this collection and we know there's no `RelationshipTarget` on `self` + self.insert(R::RelationshipTarget::from_collection_risky(empty)); + } + } + + self + } + + /// Relates the given entity to this with the relation `R`. + /// + /// See [`add_related`](Self::add_related) if you want to relate more than one entity. + pub fn add_one_related(&mut self, entity: Entity) -> &mut Self { + self.add_related::(&[entity]) + } + + /// Despawns entities that relate to this one via the given [`RelationshipTarget`]. + /// This entity will not be despawned. + pub fn despawn_related(&mut self) -> &mut Self { + if let Some(sources) = self.take::() { + self.world_scope(|world| { + for entity in sources.iter() { + if let Ok(entity_mut) = world.get_entity_mut(entity) { + entity_mut.despawn(); + } + } + }); + } + self + } + + /// Inserts a component or bundle of components into the entity and all related entities, + /// traversing the relationship tracked in `S` in a breadth-first manner. + /// + /// # Warning + /// + /// This method should only be called on relationships that form a tree-like structure. + /// Any cycles will cause this method to loop infinitely. + // We could keep track of a list of visited entities and track cycles, + // but this is not a very well-defined operation (or hard to write) for arbitrary relationships. + pub fn insert_recursive( + &mut self, + bundle: impl Bundle + Clone, + ) -> &mut Self { + self.insert(bundle.clone()); + if let Some(relationship_target) = self.get::() { + let related_vec: Vec = relationship_target.iter().collect(); + for related in related_vec { + self.world_scope(|world| { + world + .entity_mut(related) + .insert_recursive::(bundle.clone()); + }); + } + } + + self + } + + /// Removes a component or bundle of components of type `B` from the entity and all related entities, + /// traversing the relationship tracked in `S` in a breadth-first manner. + /// + /// # Warning + /// + /// This method should only be called on relationships that form a tree-like structure. + /// Any cycles will cause this method to loop infinitely. + pub fn remove_recursive(&mut self) -> &mut Self { + self.remove::(); + if let Some(relationship_target) = self.get::() { + let related_vec: Vec = relationship_target.iter().collect(); + for related in related_vec { + self.world_scope(|world| { + world.entity_mut(related).remove_recursive::(); + }); + } + } + + self + } +} + +impl<'a> EntityCommands<'a> { + /// Spawns a entity related to this entity (with the `R` relationship) by taking a bundle + pub fn with_related(&mut self, bundle: impl Bundle) -> &mut Self { + let parent = self.id(); + self.commands.spawn((bundle, R::from(parent))); + self + } + + /// Spawns entities related to this entity (with the `R` relationship) by taking a function that operates on a [`RelatedSpawner`]. + pub fn with_related_entities( + &mut self, + func: impl FnOnce(&mut RelatedSpawnerCommands), + ) -> &mut Self { + let id = self.id(); + func(&mut RelatedSpawnerCommands::new(self.commands(), id)); + self + } + + /// Relates the given entities to this entity with the relation `R`. + /// + /// See [`add_one_related`](Self::add_one_related) if you want relate only one entity. + pub fn add_related(&mut self, related: &[Entity]) -> &mut Self { + let related: Box<[Entity]> = related.into(); + + self.queue(move |mut entity: EntityWorldMut| { + entity.add_related::(&related); + }) + } + + /// Relates the given entities to this entity with the relation `R`, starting at this particular index. + /// + /// If the `related` has duplicates, a related entity will take the index of its last occurrence in `related`. + /// If the indices go out of bounds, they will be clamped into bounds. + /// This will not re-order existing related entities unless they are in `related`. + pub fn insert_related(&mut self, index: usize, related: &[Entity]) -> &mut Self + where + ::Collection: + OrderedRelationshipSourceCollection, + { + let related: Box<[Entity]> = related.into(); + + self.queue(move |mut entity: EntityWorldMut| { + entity.insert_related::(index, &related); + }) + } + + /// Relates the given entity to this with the relation `R`. + /// + /// See [`add_related`](Self::add_related) if you want to relate more than one entity. + pub fn add_one_related(&mut self, entity: Entity) -> &mut Self { + self.add_related::(&[entity]) + } + + /// Removes the relation `R` between this entity and the given entities. + pub fn remove_related(&mut self, related: &[Entity]) -> &mut Self { + let related: Box<[Entity]> = related.into(); + + self.queue(move |mut entity: EntityWorldMut| { + entity.remove_related::(&related); + }) + } + + /// Replaces all the related entities with the given set of new related entities. + pub fn replace_related(&mut self, related: &[Entity]) -> &mut Self { + let related: Box<[Entity]> = related.into(); + + self.queue(move |mut entity: EntityWorldMut| { + entity.replace_related::(&related); + }) + } + + /// Replaces all the related entities with a new set of entities. + /// + /// # Warning + /// + /// Failing to maintain the functions invariants may lead to erratic engine behavior including random crashes. + /// Refer to [`EntityWorldMut::replace_related_with_difference`] for a list of these invariants. + /// + /// # Panics + /// + /// Panics when debug assertions are enable, an invariant is are broken and the command is executed. + pub fn replace_related_with_difference( + &mut self, + entities_to_unrelate: &[Entity], + entities_to_relate: &[Entity], + newly_related_entities: &[Entity], + ) -> &mut Self { + let entities_to_unrelate: Box<[Entity]> = entities_to_unrelate.into(); + let entities_to_relate: Box<[Entity]> = entities_to_relate.into(); + let newly_related_entities: Box<[Entity]> = newly_related_entities.into(); + + self.queue(move |mut entity: EntityWorldMut| { + entity.replace_related_with_difference::( + &entities_to_unrelate, + &entities_to_relate, + &newly_related_entities, + ); + }) + } + + /// Despawns entities that relate to this one via the given [`RelationshipTarget`]. + /// This entity will not be despawned. + pub fn despawn_related(&mut self) -> &mut Self { + self.queue(move |mut entity: EntityWorldMut| { + entity.despawn_related::(); + }) + } + + /// Inserts a component or bundle of components into the entity and all related entities, + /// traversing the relationship tracked in `S` in a breadth-first manner. + /// + /// # Warning + /// + /// This method should only be called on relationships that form a tree-like structure. + /// Any cycles will cause this method to loop infinitely. + pub fn insert_recursive( + &mut self, + bundle: impl Bundle + Clone, + ) -> &mut Self { + self.queue(move |mut entity: EntityWorldMut| { + entity.insert_recursive::(bundle); + }) + } + + /// Removes a component or bundle of components of type `B` from the entity and all related entities, + /// traversing the relationship tracked in `S` in a breadth-first manner. + /// + /// # Warning + /// + /// This method should only be called on relationships that form a tree-like structure. + /// Any cycles will cause this method to loop infinitely. + pub fn remove_recursive(&mut self) -> &mut Self { + self.queue(move |mut entity: EntityWorldMut| { + entity.remove_recursive::(); + }) + } +} + +/// Directly spawns related "source" entities with the given [`Relationship`], targeting +/// a specific entity. +pub struct RelatedSpawner<'w, R: Relationship> { + target: Entity, + world: &'w mut World, + _marker: PhantomData, +} + +impl<'w, R: Relationship> RelatedSpawner<'w, R> { + /// Creates a new instance that will spawn entities targeting the `target` entity. + pub fn new(world: &'w mut World, target: Entity) -> Self { + Self { + world, + target, + _marker: PhantomData, + } + } + + /// Spawns an entity with the given `bundle` and an `R` relationship targeting the `target` + /// entity this spawner was initialized with. + pub fn spawn(&mut self, bundle: impl Bundle) -> EntityWorldMut<'_> { + self.world.spawn((R::from(self.target), bundle)) + } + + /// Spawns an entity with an `R` relationship targeting the `target` + /// entity this spawner was initialized with. + pub fn spawn_empty(&mut self) -> EntityWorldMut<'_> { + self.world.spawn(R::from(self.target)) + } + + /// Returns the "target entity" used when spawning entities with an `R` [`Relationship`]. + pub fn target_entity(&self) -> Entity { + self.target + } +} + +/// Uses commands to spawn related "source" entities with the given [`Relationship`], targeting +/// a specific entity. +pub struct RelatedSpawnerCommands<'w, R: Relationship> { + target: Entity, + commands: Commands<'w, 'w>, + _marker: PhantomData, +} + +impl<'w, R: Relationship> RelatedSpawnerCommands<'w, R> { + /// Creates a new instance that will spawn entities targeting the `target` entity. + pub fn new(commands: Commands<'w, 'w>, target: Entity) -> Self { + Self { + commands, + target, + _marker: PhantomData, + } + } + + /// Spawns an entity with the given `bundle` and an `R` relationship targeting the `target` + /// entity this spawner was initialized with. + pub fn spawn(&mut self, bundle: impl Bundle) -> EntityCommands<'_> { + self.commands.spawn((R::from(self.target), bundle)) + } + + /// Spawns an entity with an `R` relationship targeting the `target` + /// entity this spawner was initialized with. + pub fn spawn_empty(&mut self) -> EntityCommands<'_> { + self.commands.spawn(R::from(self.target)) + } + + /// Returns the "target entity" used when spawning entities with an `R` [`Relationship`]. + pub fn target_entity(&self) -> Entity { + self.target + } + + /// Returns the underlying [`Commands`]. + pub fn commands(&mut self) -> Commands { + self.commands.reborrow() + } + + /// Returns a mutable reference to the underlying [`Commands`]. + pub fn commands_mut(&mut self) -> &mut Commands<'w, 'w> { + &mut self.commands + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::prelude::{ChildOf, Children, Component}; + + #[derive(Component, Clone, Copy)] + struct TestComponent; + + #[test] + fn insert_and_remove_recursive() { + let mut world = World::new(); + + let a = world.spawn_empty().id(); + let b = world.spawn(ChildOf(a)).id(); + let c = world.spawn(ChildOf(a)).id(); + let d = world.spawn(ChildOf(b)).id(); + + world + .entity_mut(a) + .insert_recursive::(TestComponent); + + for entity in [a, b, c, d] { + assert!(world.entity(entity).contains::()); + } + + world + .entity_mut(b) + .remove_recursive::(); + + // Parent + assert!(world.entity(a).contains::()); + // Target + assert!(!world.entity(b).contains::()); + // Sibling + assert!(world.entity(c).contains::()); + // Child + assert!(!world.entity(d).contains::()); + + world + .entity_mut(a) + .remove_recursive::(); + + for entity in [a, b, c, d] { + assert!(!world.entity(entity).contains::()); + } + } +} diff --git a/crates/bevy_ecs/src/relationship/relationship_query.rs b/crates/bevy_ecs/src/relationship/relationship_query.rs new file mode 100644 index 0000000000000..a2ec937c29b9a --- /dev/null +++ b/crates/bevy_ecs/src/relationship/relationship_query.rs @@ -0,0 +1,272 @@ +use crate::{ + entity::Entity, + query::{QueryData, QueryFilter}, + relationship::{Relationship, RelationshipTarget}, + system::Query, +}; +use alloc::collections::VecDeque; +use smallvec::SmallVec; + +use super::SourceIter; + +impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { + /// If the given `entity` contains the `R` [`Relationship`] component, returns the + /// target entity of that relationship. + pub fn related(&'w self, entity: Entity) -> Option + where + ::ReadOnly: QueryData = &'w R>, + { + self.get(entity).map(R::get).ok() + } + + /// If the given `entity` contains the `S` [`RelationshipTarget`] component, returns the + /// source entities stored on that component. + pub fn relationship_sources( + &'w self, + entity: Entity, + ) -> impl Iterator + 'w + where + ::ReadOnly: QueryData = &'w S>, + { + self.get(entity) + .into_iter() + .flat_map(RelationshipTarget::iter) + } + + /// Recursively walks up the tree defined by the given `R` [`Relationship`] until + /// there are no more related entities, returning the "root entity" of the relationship hierarchy. + /// + /// # Warning + /// + /// For relationship graphs that contain loops, this could loop infinitely. + /// If your relationship is not a tree (like Bevy's hierarchy), be sure to stop if you encounter a duplicate entity. + pub fn root_ancestor(&'w self, entity: Entity) -> Entity + where + ::ReadOnly: QueryData = &'w R>, + { + // Recursively search up the tree until we're out of parents + match self.get(entity) { + Ok(parent) => self.root_ancestor(parent.get()), + Err(_) => entity, + } + } + + /// Iterates all "leaf entities" as defined by the [`RelationshipTarget`] hierarchy. + /// + /// # Warning + /// + /// For relationship graphs that contain loops, this could loop infinitely. + /// If your relationship is not a tree (like Bevy's hierarchy), be sure to stop if you encounter a duplicate entity. + pub fn iter_leaves( + &'w self, + entity: Entity, + ) -> impl Iterator + 'w + where + ::ReadOnly: QueryData = &'w S>, + SourceIter<'w, S>: DoubleEndedIterator, + { + self.iter_descendants_depth_first(entity).filter(|entity| { + self.get(*entity) + // These are leaf nodes if they have the `Children` component but it's empty + .map(|children| children.len() == 0) + // Or if they don't have the `Children` component at all + .unwrap_or(true) + }) + } + + /// Iterates all sibling entities that also have the `R` [`Relationship`] with the same target entity. + pub fn iter_siblings( + &'w self, + entity: Entity, + ) -> impl Iterator + 'w + where + D::ReadOnly: QueryData = (Option<&'w R>, Option<&'w R::RelationshipTarget>)>, + { + self.get(entity) + .ok() + .and_then(|(maybe_parent, _)| maybe_parent.map(R::get)) + .and_then(|parent| self.get(parent).ok()) + .and_then(|(_, maybe_children)| maybe_children) + .into_iter() + .flat_map(move |children| children.iter().filter(move |child| *child != entity)) + } + + /// Iterates all descendant entities as defined by the given `entity`'s [`RelationshipTarget`] and their recursive + /// [`RelationshipTarget`]. + /// + /// # Warning + /// + /// For relationship graphs that contain loops, this could loop infinitely. + /// If your relationship is not a tree (like Bevy's hierarchy), be sure to stop if you encounter a duplicate entity. + pub fn iter_descendants( + &'w self, + entity: Entity, + ) -> DescendantIter<'w, 's, D, F, S> + where + D::ReadOnly: QueryData = &'w S>, + { + DescendantIter::new(self, entity) + } + + /// Iterates all descendant entities as defined by the given `entity`'s [`RelationshipTarget`] and their recursive + /// [`RelationshipTarget`] in depth-first order. + /// + /// # Warning + /// + /// For relationship graphs that contain loops, this could loop infinitely. + /// If your relationship is not a tree (like Bevy's hierarchy), be sure to stop if you encounter a duplicate entity. + pub fn iter_descendants_depth_first( + &'w self, + entity: Entity, + ) -> DescendantDepthFirstIter<'w, 's, D, F, S> + where + D::ReadOnly: QueryData = &'w S>, + SourceIter<'w, S>: DoubleEndedIterator, + { + DescendantDepthFirstIter::new(self, entity) + } + + /// Iterates all ancestors of the given `entity` as defined by the `R` [`Relationship`]. + /// + /// # Warning + /// + /// For relationship graphs that contain loops, this could loop infinitely. + /// If your relationship is not a tree (like Bevy's hierarchy), be sure to stop if you encounter a duplicate entity. + pub fn iter_ancestors( + &'w self, + entity: Entity, + ) -> AncestorIter<'w, 's, D, F, R> + where + D::ReadOnly: QueryData = &'w R>, + { + AncestorIter::new(self, entity) + } +} + +/// An [`Iterator`] of [`Entity`]s over the descendants of an [`Entity`]. +/// +/// Traverses the hierarchy breadth-first. +pub struct DescendantIter<'w, 's, D: QueryData, F: QueryFilter, S: RelationshipTarget> +where + D::ReadOnly: QueryData = &'w S>, +{ + children_query: &'w Query<'w, 's, D, F>, + vecdeque: VecDeque, +} + +impl<'w, 's, D: QueryData, F: QueryFilter, S: RelationshipTarget> DescendantIter<'w, 's, D, F, S> +where + D::ReadOnly: QueryData = &'w S>, +{ + /// Returns a new [`DescendantIter`]. + pub fn new(children_query: &'w Query<'w, 's, D, F>, entity: Entity) -> Self { + DescendantIter { + children_query, + vecdeque: children_query + .get(entity) + .into_iter() + .flat_map(RelationshipTarget::iter) + .collect(), + } + } +} + +impl<'w, 's, D: QueryData, F: QueryFilter, S: RelationshipTarget> Iterator + for DescendantIter<'w, 's, D, F, S> +where + D::ReadOnly: QueryData = &'w S>, +{ + type Item = Entity; + + fn next(&mut self) -> Option { + let entity = self.vecdeque.pop_front()?; + + if let Ok(children) = self.children_query.get(entity) { + self.vecdeque.extend(children.iter()); + } + + Some(entity) + } +} + +/// An [`Iterator`] of [`Entity`]s over the descendants of an [`Entity`]. +/// +/// Traverses the hierarchy depth-first. +pub struct DescendantDepthFirstIter<'w, 's, D: QueryData, F: QueryFilter, S: RelationshipTarget> +where + D::ReadOnly: QueryData = &'w S>, +{ + children_query: &'w Query<'w, 's, D, F>, + stack: SmallVec<[Entity; 8]>, +} + +impl<'w, 's, D: QueryData, F: QueryFilter, S: RelationshipTarget> + DescendantDepthFirstIter<'w, 's, D, F, S> +where + D::ReadOnly: QueryData = &'w S>, + SourceIter<'w, S>: DoubleEndedIterator, +{ + /// Returns a new [`DescendantDepthFirstIter`]. + pub fn new(children_query: &'w Query<'w, 's, D, F>, entity: Entity) -> Self { + DescendantDepthFirstIter { + children_query, + stack: children_query + .get(entity) + .map_or(SmallVec::new(), |children| children.iter().rev().collect()), + } + } +} + +impl<'w, 's, D: QueryData, F: QueryFilter, S: RelationshipTarget> Iterator + for DescendantDepthFirstIter<'w, 's, D, F, S> +where + D::ReadOnly: QueryData = &'w S>, + SourceIter<'w, S>: DoubleEndedIterator, +{ + type Item = Entity; + + fn next(&mut self) -> Option { + let entity = self.stack.pop()?; + + if let Ok(children) = self.children_query.get(entity) { + self.stack.extend(children.iter().rev()); + } + + Some(entity) + } +} + +/// An [`Iterator`] of [`Entity`]s over the ancestors of an [`Entity`]. +pub struct AncestorIter<'w, 's, D: QueryData, F: QueryFilter, R: Relationship> +where + D::ReadOnly: QueryData = &'w R>, +{ + parent_query: &'w Query<'w, 's, D, F>, + next: Option, +} + +impl<'w, 's, D: QueryData, F: QueryFilter, R: Relationship> AncestorIter<'w, 's, D, F, R> +where + D::ReadOnly: QueryData = &'w R>, +{ + /// Returns a new [`AncestorIter`]. + pub fn new(parent_query: &'w Query<'w, 's, D, F>, entity: Entity) -> Self { + AncestorIter { + parent_query, + next: Some(entity), + } + } +} + +impl<'w, 's, D: QueryData, F: QueryFilter, R: Relationship> Iterator + for AncestorIter<'w, 's, D, F, R> +where + D::ReadOnly: QueryData = &'w R>, +{ + type Item = Entity; + + fn next(&mut self) -> Option { + self.next = self.parent_query.get(self.next?).ok().map(R::get); + self.next + } +} diff --git a/crates/bevy_ecs/src/relationship/relationship_source_collection.rs b/crates/bevy_ecs/src/relationship/relationship_source_collection.rs new file mode 100644 index 0000000000000..c2c9bd94d8235 --- /dev/null +++ b/crates/bevy_ecs/src/relationship/relationship_source_collection.rs @@ -0,0 +1,587 @@ +use crate::entity::{hash_set::EntityHashSet, Entity}; +use alloc::vec::Vec; +use smallvec::SmallVec; + +/// The internal [`Entity`] collection used by a [`RelationshipTarget`](crate::relationship::RelationshipTarget) component. +/// This is not intended to be modified directly by users, as it could invalidate the correctness of relationships. +pub trait RelationshipSourceCollection { + /// The type of iterator returned by the `iter` method. + /// + /// This is an associated type (rather than using a method that returns an opaque return-position impl trait) + /// to ensure that all methods and traits (like [`DoubleEndedIterator`]) of the underlying collection's iterator + /// are available to the user when implemented without unduly restricting the possible collections. + /// + /// The [`SourceIter`](super::SourceIter) type alias can be helpful to reduce confusion when working with this associated type. + type SourceIter<'a>: Iterator + where + Self: 'a; + + /// Creates a new empty instance. + fn new() -> Self; + + /// Returns an instance with the given pre-allocated entity `capacity`. + /// + /// Some collections will ignore the provided `capacity` and return a default instance. + fn with_capacity(capacity: usize) -> Self; + + /// Reserves capacity for at least `additional` more entities to be inserted. + /// + /// Not all collections support this operation, in which case it is a no-op. + fn reserve(&mut self, additional: usize); + + /// Adds the given `entity` to the collection. + /// + /// Returns whether the entity was added to the collection. + /// Mainly useful when dealing with collections that don't allow + /// multiple instances of the same entity ([`EntityHashSet`]). + fn add(&mut self, entity: Entity) -> bool; + + /// Removes the given `entity` from the collection. + /// + /// Returns whether the collection actually contained + /// the entity. + fn remove(&mut self, entity: Entity) -> bool; + + /// Iterates all entities in the collection. + fn iter(&self) -> Self::SourceIter<'_>; + + /// Returns the current length of the collection. + fn len(&self) -> usize; + + /// Clears the collection. + fn clear(&mut self); + + /// Attempts to save memory by shrinking the capacity to fit the current length. + /// + /// This operation is a no-op for collections that do not support it. + fn shrink_to_fit(&mut self); + + /// Returns true if the collection contains no entities. + #[inline] + fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Add multiple entities to collection at once. + /// + /// May be faster than repeatedly calling [`Self::add`]. + fn extend_from_iter(&mut self, entities: impl IntoIterator) { + // The method name shouldn't conflict with `Extend::extend` as it's in the rust prelude and + // would always conflict with it. + for entity in entities { + self.add(entity); + } + } +} + +/// This trait signals that a [`RelationshipSourceCollection`] is ordered. +pub trait OrderedRelationshipSourceCollection: RelationshipSourceCollection { + /// Inserts the entity at a specific index. + /// If the index is too large, the entity will be added to the end of the collection. + fn insert(&mut self, index: usize, entity: Entity); + /// Removes the entity at the specified idnex if it exists. + fn remove_at(&mut self, index: usize) -> Option; + /// Inserts the entity at a specific index. + /// This will never reorder other entities. + /// If the index is too large, the entity will be added to the end of the collection. + fn insert_stable(&mut self, index: usize, entity: Entity); + /// Removes the entity at the specified idnex if it exists. + /// This will never reorder other entities. + fn remove_at_stable(&mut self, index: usize) -> Option; + /// Sorts the source collection. + fn sort(&mut self); + /// Inserts the entity at the proper place to maintain sorting. + fn insert_sorted(&mut self, entity: Entity); + + /// This places the most recently added entity at the particular index. + fn place_most_recent(&mut self, index: usize); + + /// This places the given entity at the particular index. + /// This will do nothing if the entity is not in the collection. + /// If the index is out of bounds, this will put the entity at the end. + fn place(&mut self, entity: Entity, index: usize); + + /// Adds the entity at index 0. + fn push_front(&mut self, entity: Entity) { + self.insert(0, entity); + } + + /// Adds the entity to the back of the collection. + fn push_back(&mut self, entity: Entity) { + self.insert(usize::MAX, entity); + } + + /// Removes the first entity. + fn pop_front(&mut self) -> Option { + self.remove_at(0) + } + + /// Removes the last entity. + fn pop_back(&mut self) -> Option { + if self.is_empty() { + None + } else { + self.remove_at(self.len() - 1) + } + } +} + +impl RelationshipSourceCollection for Vec { + type SourceIter<'a> = core::iter::Copied>; + + fn new() -> Self { + Vec::new() + } + + fn reserve(&mut self, additional: usize) { + Vec::reserve(self, additional); + } + + fn with_capacity(capacity: usize) -> Self { + Vec::with_capacity(capacity) + } + + fn add(&mut self, entity: Entity) -> bool { + Vec::push(self, entity); + + true + } + + fn remove(&mut self, entity: Entity) -> bool { + if let Some(index) = <[Entity]>::iter(self).position(|e| *e == entity) { + Vec::remove(self, index); + return true; + } + + false + } + + fn iter(&self) -> Self::SourceIter<'_> { + <[Entity]>::iter(self).copied() + } + + fn len(&self) -> usize { + Vec::len(self) + } + + fn clear(&mut self) { + self.clear(); + } + + fn shrink_to_fit(&mut self) { + Vec::shrink_to_fit(self); + } + + fn extend_from_iter(&mut self, entities: impl IntoIterator) { + self.extend(entities); + } +} + +impl OrderedRelationshipSourceCollection for Vec { + fn insert(&mut self, index: usize, entity: Entity) { + self.push(entity); + let len = self.len(); + if index < len { + self.swap(index, len - 1); + } + } + + fn remove_at(&mut self, index: usize) -> Option { + (index < self.len()).then(|| self.swap_remove(index)) + } + + fn insert_stable(&mut self, index: usize, entity: Entity) { + if index < self.len() { + Vec::insert(self, index, entity); + } else { + self.push(entity); + } + } + + fn remove_at_stable(&mut self, index: usize) -> Option { + (index < self.len()).then(|| self.remove(index)) + } + + fn sort(&mut self) { + self.sort_unstable(); + } + + fn insert_sorted(&mut self, entity: Entity) { + let index = self.partition_point(|e| e <= &entity); + self.insert_stable(index, entity); + } + + fn place_most_recent(&mut self, index: usize) { + if let Some(entity) = self.pop() { + let index = index.min(self.len().saturating_sub(1)); + self.insert(index, entity); + } + } + + fn place(&mut self, entity: Entity, index: usize) { + if let Some(current) = <[Entity]>::iter(self).position(|e| *e == entity) { + // The len is at least 1, so the subtraction is safe. + let index = index.min(self.len().saturating_sub(1)); + Vec::remove(self, current); + self.insert(index, entity); + }; + } +} + +impl RelationshipSourceCollection for EntityHashSet { + type SourceIter<'a> = core::iter::Copied>; + + fn new() -> Self { + EntityHashSet::new() + } + + fn reserve(&mut self, additional: usize) { + self.0.reserve(additional); + } + + fn with_capacity(capacity: usize) -> Self { + EntityHashSet::with_capacity(capacity) + } + + fn add(&mut self, entity: Entity) -> bool { + self.insert(entity) + } + + fn remove(&mut self, entity: Entity) -> bool { + // We need to call the remove method on the underlying hash set, + // which takes its argument by reference + self.0.remove(&entity) + } + + fn iter(&self) -> Self::SourceIter<'_> { + self.iter().copied() + } + + fn len(&self) -> usize { + self.len() + } + + fn clear(&mut self) { + self.0.clear(); + } + + fn shrink_to_fit(&mut self) { + self.0.shrink_to_fit(); + } + + fn extend_from_iter(&mut self, entities: impl IntoIterator) { + self.extend(entities); + } +} + +impl RelationshipSourceCollection for SmallVec<[Entity; N]> { + type SourceIter<'a> = core::iter::Copied>; + + fn new() -> Self { + SmallVec::new() + } + + fn reserve(&mut self, additional: usize) { + SmallVec::reserve(self, additional); + } + + fn with_capacity(capacity: usize) -> Self { + SmallVec::with_capacity(capacity) + } + + fn add(&mut self, entity: Entity) -> bool { + SmallVec::push(self, entity); + + true + } + + fn remove(&mut self, entity: Entity) -> bool { + if let Some(index) = <[Entity]>::iter(self).position(|e| *e == entity) { + SmallVec::remove(self, index); + return true; + } + + false + } + + fn iter(&self) -> Self::SourceIter<'_> { + <[Entity]>::iter(self).copied() + } + + fn len(&self) -> usize { + SmallVec::len(self) + } + + fn clear(&mut self) { + self.clear(); + } + + fn shrink_to_fit(&mut self) { + SmallVec::shrink_to_fit(self); + } + + fn extend_from_iter(&mut self, entities: impl IntoIterator) { + self.extend(entities); + } +} + +impl RelationshipSourceCollection for Entity { + type SourceIter<'a> = core::option::IntoIter; + + fn new() -> Self { + Entity::PLACEHOLDER + } + + fn reserve(&mut self, _: usize) {} + + fn with_capacity(_capacity: usize) -> Self { + Self::new() + } + + fn add(&mut self, entity: Entity) -> bool { + assert_eq!( + *self, + Entity::PLACEHOLDER, + "Entity {entity} attempted to target an entity with a one-to-one relationship, but it is already targeted by {}. You must remove the original relationship first.", + *self + ); + *self = entity; + + true + } + + fn remove(&mut self, entity: Entity) -> bool { + if *self == entity { + *self = Entity::PLACEHOLDER; + + return true; + } + + false + } + + fn iter(&self) -> Self::SourceIter<'_> { + if *self == Entity::PLACEHOLDER { + None.into_iter() + } else { + Some(*self).into_iter() + } + } + + fn len(&self) -> usize { + if *self == Entity::PLACEHOLDER { + return 0; + } + 1 + } + + fn clear(&mut self) { + *self = Entity::PLACEHOLDER; + } + + fn shrink_to_fit(&mut self) {} + + fn extend_from_iter(&mut self, entities: impl IntoIterator) { + for entity in entities { + assert_eq!( + *self, + Entity::PLACEHOLDER, + "Entity {entity} attempted to target an entity with a one-to-one relationship, but it is already targeted by {}. You must remove the original relationship first.", + *self + ); + *self = entity; + } + } +} + +impl OrderedRelationshipSourceCollection for SmallVec<[Entity; N]> { + fn insert(&mut self, index: usize, entity: Entity) { + self.push(entity); + let len = self.len(); + if index < len { + self.swap(index, len - 1); + } + } + + fn remove_at(&mut self, index: usize) -> Option { + (index < self.len()).then(|| self.swap_remove(index)) + } + + fn insert_stable(&mut self, index: usize, entity: Entity) { + if index < self.len() { + SmallVec::<[Entity; N]>::insert(self, index, entity); + } else { + self.push(entity); + } + } + + fn remove_at_stable(&mut self, index: usize) -> Option { + (index < self.len()).then(|| self.remove(index)) + } + + fn sort(&mut self) { + self.sort_unstable(); + } + + fn insert_sorted(&mut self, entity: Entity) { + let index = self.partition_point(|e| e <= &entity); + self.insert_stable(index, entity); + } + + fn place_most_recent(&mut self, index: usize) { + if let Some(entity) = self.pop() { + let index = index.min(self.len() - 1); + self.insert(index, entity); + } + } + + fn place(&mut self, entity: Entity, index: usize) { + if let Some(current) = <[Entity]>::iter(self).position(|e| *e == entity) { + // The len is at least 1, so the subtraction is safe. + let index = index.min(self.len() - 1); + SmallVec::<[Entity; N]>::remove(self, current); + self.insert(index, entity); + }; + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::prelude::{Component, World}; + use crate::relationship::RelationshipTarget; + + #[test] + fn vec_relationship_source_collection() { + #[derive(Component)] + #[relationship(relationship_target = RelTarget)] + struct Rel(Entity); + + #[derive(Component)] + #[relationship_target(relationship = Rel, linked_spawn)] + struct RelTarget(Vec); + + let mut world = World::new(); + let a = world.spawn_empty().id(); + let b = world.spawn_empty().id(); + + world.entity_mut(a).insert(Rel(b)); + + let rel_target = world.get::(b).unwrap(); + let collection = rel_target.collection(); + assert_eq!(collection, &alloc::vec!(a)); + } + + #[test] + fn smallvec_relationship_source_collection() { + #[derive(Component)] + #[relationship(relationship_target = RelTarget)] + struct Rel(Entity); + + #[derive(Component)] + #[relationship_target(relationship = Rel, linked_spawn)] + struct RelTarget(SmallVec<[Entity; 4]>); + + let mut world = World::new(); + let a = world.spawn_empty().id(); + let b = world.spawn_empty().id(); + + world.entity_mut(a).insert(Rel(b)); + + let rel_target = world.get::(b).unwrap(); + let collection = rel_target.collection(); + assert_eq!(collection, &SmallVec::from_buf([a])); + } + + #[test] + fn entity_relationship_source_collection() { + #[derive(Component)] + #[relationship(relationship_target = RelTarget)] + struct Rel(Entity); + + #[derive(Component)] + #[relationship_target(relationship = Rel)] + struct RelTarget(Entity); + + let mut world = World::new(); + let a = world.spawn_empty().id(); + let b = world.spawn_empty().id(); + + world.entity_mut(a).insert(Rel(b)); + + let rel_target = world.get::(b).unwrap(); + let collection = rel_target.collection(); + assert_eq!(collection, &a); + } + + #[test] + fn one_to_one_relationships() { + #[derive(Component)] + #[relationship(relationship_target = Below)] + struct Above(Entity); + + #[derive(Component)] + #[relationship_target(relationship = Above)] + struct Below(Entity); + + let mut world = World::new(); + let a = world.spawn_empty().id(); + let b = world.spawn_empty().id(); + + world.entity_mut(a).insert(Above(b)); + assert_eq!(a, world.get::(b).unwrap().0); + + // Verify removing target removes relationship + world.entity_mut(b).remove::(); + assert!(world.get::(a).is_none()); + + // Verify removing relationship removes target + world.entity_mut(a).insert(Above(b)); + world.entity_mut(a).remove::(); + assert!(world.get::(b).is_none()); + + // Actually - a is above c now! Verify relationship was updated correctly + let c = world.spawn_empty().id(); + world.entity_mut(a).insert(Above(c)); + assert!(world.get::(b).is_none()); + assert_eq!(a, world.get::(c).unwrap().0); + } + + #[test] + #[should_panic] + fn one_to_one_relationship_shared_target() { + #[derive(Component)] + #[relationship(relationship_target = Below)] + struct Above(Entity); + + #[derive(Component)] + #[relationship_target(relationship = Above)] + struct Below(Entity); + + let mut world = World::new(); + let a = world.spawn_empty().id(); + let b = world.spawn_empty().id(); + let c = world.spawn_empty().id(); + + world.entity_mut(a).insert(Above(c)); + world.entity_mut(b).insert(Above(c)); + } + + #[test] + fn one_to_one_relationship_reinsert() { + #[derive(Component)] + #[relationship(relationship_target = Below)] + struct Above(Entity); + + #[derive(Component)] + #[relationship_target(relationship = Above)] + struct Below(Entity); + + let mut world = World::new(); + let a = world.spawn_empty().id(); + let b = world.spawn_empty().id(); + + world.entity_mut(a).insert(Above(b)); + world.entity_mut(a).insert(Above(b)); + } +} diff --git a/crates/bevy_ecs/src/removal_detection.rs b/crates/bevy_ecs/src/removal_detection.rs index 7df072ab2d39f..64cc63a7ce254 100644 --- a/crates/bevy_ecs/src/removal_detection.rs +++ b/crates/bevy_ecs/src/removal_detection.rs @@ -1,7 +1,6 @@ //! Alerting events when a component is removed from an entity. use crate::{ - self as bevy_ecs, component::{Component, ComponentId, ComponentIdFor, Tick}, entity::Entity, event::{Event, EventCursor, EventId, EventIterator, EventIteratorWithId, Events}, @@ -27,7 +26,7 @@ use core::{ /// Internally, `RemovedComponents` uses these as an `Events`. #[derive(Event, Debug, Clone, Into)] #[cfg_attr(feature = "bevy_reflect", derive(Reflect))] -#[cfg_attr(feature = "bevy_reflect", reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", reflect(Debug, Clone))] pub struct RemovedComponentEntity(Entity); /// Wrapper around a [`EventCursor`] so that we @@ -134,7 +133,7 @@ impl RemovedComponentEvents { /// # #[derive(Component)] /// # struct MyComponent; /// fn react_on_removal(mut removed: RemovedComponents) { -/// removed.read().for_each(|removed_entity| println!("{:?}", removed_entity)); +/// removed.read().for_each(|removed_entity| println!("{}", removed_entity)); /// } /// # bevy_ecs::system::assert_is_system(react_on_removal); /// ``` @@ -233,8 +232,7 @@ impl<'w, 's, T: Component> RemovedComponents<'w, 's, T> { /// Returns `true` if there are no events available to read. pub fn is_empty(&self) -> bool { self.events() - .map(|events| self.reader.is_empty(events)) - .unwrap_or(true) + .is_none_or(|events| self.reader.is_empty(events)) } /// Consumes all available events. diff --git a/crates/bevy_ecs/src/resource.rs b/crates/bevy_ecs/src/resource.rs new file mode 100644 index 0000000000000..c3f7805631560 --- /dev/null +++ b/crates/bevy_ecs/src/resource.rs @@ -0,0 +1,75 @@ +//! Resources are unique, singleton-like data types that can be accessed from systems and stored in the [`World`](crate::world::World). + +// The derive macro for the `Resource` trait +pub use bevy_ecs_macros::Resource; + +/// A type that can be inserted into a [`World`] as a singleton. +/// +/// You can access resource data in systems using the [`Res`] and [`ResMut`] system parameters +/// +/// Only one resource of each type can be stored in a [`World`] at any given time. +/// +/// # Examples +/// +/// ``` +/// # let mut world = World::default(); +/// # let mut schedule = Schedule::default(); +/// # use bevy_ecs::prelude::*; +/// #[derive(Resource)] +/// struct MyResource { value: u32 } +/// +/// world.insert_resource(MyResource { value: 42 }); +/// +/// fn read_resource_system(resource: Res) { +/// assert_eq!(resource.value, 42); +/// } +/// +/// fn write_resource_system(mut resource: ResMut) { +/// assert_eq!(resource.value, 42); +/// resource.value = 0; +/// assert_eq!(resource.value, 0); +/// } +/// # schedule.add_systems((read_resource_system, write_resource_system).chain()); +/// # schedule.run(&mut world); +/// ``` +/// +/// # `!Sync` Resources +/// A `!Sync` type cannot implement `Resource`. However, it is possible to wrap a `Send` but not `Sync` +/// type in [`SyncCell`] or the currently unstable [`Exclusive`] to make it `Sync`. This forces only +/// having mutable access (`&mut T` only, never `&T`), but makes it safe to reference across multiple +/// threads. +/// +/// This will fail to compile since `RefCell` is `!Sync`. +/// ```compile_fail +/// # use std::cell::RefCell; +/// # use bevy_ecs::resource::Resource; +/// +/// #[derive(Resource)] +/// struct NotSync { +/// counter: RefCell, +/// } +/// ``` +/// +/// This will compile since the `RefCell` is wrapped with `SyncCell`. +/// ``` +/// # use std::cell::RefCell; +/// # use bevy_ecs::resource::Resource; +/// use bevy_utils::synccell::SyncCell; +/// +/// #[derive(Resource)] +/// struct ActuallySync { +/// counter: SyncCell>, +/// } +/// ``` +/// +/// [`Exclusive`]: https://doc.rust-lang.org/nightly/std/sync/struct.Exclusive.html +/// [`World`]: crate::world::World +/// [`Res`]: crate::system::Res +/// [`ResMut`]: crate::system::ResMut +/// [`SyncCell`]: bevy_utils::synccell::SyncCell +#[diagnostic::on_unimplemented( + message = "`{Self}` is not a `Resource`", + label = "invalid `Resource`", + note = "consider annotating `{Self}` with `#[derive(Resource)]`" +)] +pub trait Resource: Send + Sync + 'static {} diff --git a/crates/bevy_ecs/src/result.rs b/crates/bevy_ecs/src/result.rs deleted file mode 100644 index 77e7fc00743ae..0000000000000 --- a/crates/bevy_ecs/src/result.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! Contains error and result helpers for use in fallible systems. - -use alloc::boxed::Box; - -/// A dynamic error type for use in fallible systems. -pub type Error = Box; - -/// A result type for use in fallible systems. -pub type Result = core::result::Result; diff --git a/crates/bevy_ecs/src/schedule/auto_insert_apply_deferred.rs b/crates/bevy_ecs/src/schedule/auto_insert_apply_deferred.rs new file mode 100644 index 0000000000000..dda6d604a74b2 --- /dev/null +++ b/crates/bevy_ecs/src/schedule/auto_insert_apply_deferred.rs @@ -0,0 +1,252 @@ +use alloc::{boxed::Box, collections::BTreeSet, vec::Vec}; + +use bevy_platform::collections::HashMap; + +use crate::system::IntoSystem; +use crate::world::World; + +use super::{ + is_apply_deferred, ApplyDeferred, DiGraph, Direction, NodeId, ReportCycles, ScheduleBuildError, + ScheduleBuildPass, ScheduleGraph, SystemNode, +}; + +/// A [`ScheduleBuildPass`] that inserts [`ApplyDeferred`] systems into the schedule graph +/// when there are [`Deferred`](crate::prelude::Deferred) +/// in one system and there are ordering dependencies on that system. [`Commands`](crate::system::Commands) is one +/// such deferred buffer. +/// +/// This pass is typically automatically added to the schedule. You can disable this by setting +/// [`ScheduleBuildSettings::auto_insert_apply_deferred`](crate::schedule::ScheduleBuildSettings::auto_insert_apply_deferred) +/// to `false`. You may want to disable this if you only want to sync deferred params at the end of the schedule, +/// or want to manually insert all your sync points. +#[derive(Debug, Default)] +pub struct AutoInsertApplyDeferredPass { + /// Dependency edges that will **not** automatically insert an instance of `ApplyDeferred` on the edge. + no_sync_edges: BTreeSet<(NodeId, NodeId)>, + auto_sync_node_ids: HashMap, +} + +/// If added to a dependency edge, the edge will not be considered for auto sync point insertions. +pub struct IgnoreDeferred; + +impl AutoInsertApplyDeferredPass { + /// Returns the `NodeId` of the cached auto sync point. Will create + /// a new one if needed. + fn get_sync_point(&mut self, graph: &mut ScheduleGraph, distance: u32) -> NodeId { + self.auto_sync_node_ids + .get(&distance) + .copied() + .or_else(|| { + let node_id = self.add_auto_sync(graph); + self.auto_sync_node_ids.insert(distance, node_id); + Some(node_id) + }) + .unwrap() + } + /// add an [`ApplyDeferred`] system with no config + fn add_auto_sync(&mut self, graph: &mut ScheduleGraph) -> NodeId { + let id = NodeId::System(graph.systems.len()); + + graph + .systems + .push(SystemNode::new(Box::new(IntoSystem::into_system( + ApplyDeferred, + )))); + graph.system_conditions.push(Vec::new()); + + // ignore ambiguities with auto sync points + // They aren't under user control, so no one should know or care. + graph.ambiguous_with_all.insert(id); + + id + } +} + +impl ScheduleBuildPass for AutoInsertApplyDeferredPass { + type EdgeOptions = IgnoreDeferred; + + fn add_dependency(&mut self, from: NodeId, to: NodeId, options: Option<&Self::EdgeOptions>) { + if options.is_some() { + self.no_sync_edges.insert((from, to)); + } + } + + fn build( + &mut self, + _world: &mut World, + graph: &mut ScheduleGraph, + dependency_flattened: &mut DiGraph, + ) -> Result<(), ScheduleBuildError> { + let mut sync_point_graph = dependency_flattened.clone(); + let topo = graph.topsort_graph(dependency_flattened, ReportCycles::Dependency)?; + + fn set_has_conditions(graph: &ScheduleGraph, node: NodeId) -> bool { + !graph.set_conditions_at(node).is_empty() + || graph + .hierarchy() + .graph() + .edges_directed(node, Direction::Incoming) + .any(|(parent, _)| set_has_conditions(graph, parent)) + } + + fn system_has_conditions(graph: &ScheduleGraph, node: NodeId) -> bool { + assert!(node.is_system()); + !graph.system_conditions[node.index()].is_empty() + || graph + .hierarchy() + .graph() + .edges_directed(node, Direction::Incoming) + .any(|(parent, _)| set_has_conditions(graph, parent)) + } + + let mut system_has_conditions_cache = HashMap::::default(); + let mut is_valid_explicit_sync_point = |system: NodeId| { + let index = system.index(); + is_apply_deferred(graph.systems[index].get().unwrap()) + && !*system_has_conditions_cache + .entry(index) + .or_insert_with(|| system_has_conditions(graph, system)) + }; + + // Calculate the distance for each node. + // The "distance" is the number of sync points between a node and the beginning of the graph. + // Also store if a preceding edge would have added a sync point but was ignored to add it at + // a later edge that is not ignored. + let mut distances_and_pending_sync: HashMap = + HashMap::with_capacity_and_hasher(topo.len(), Default::default()); + + // Keep track of any explicit sync nodes for a specific distance. + let mut distance_to_explicit_sync_node: HashMap = HashMap::default(); + + // Determine the distance for every node and collect the explicit sync points. + for node in &topo { + let (node_distance, mut node_needs_sync) = distances_and_pending_sync + .get(&node.index()) + .copied() + .unwrap_or_default(); + + if is_valid_explicit_sync_point(*node) { + // The distance of this sync point does not change anymore as the iteration order + // makes sure that this node is no unvisited target of another node. + // Because of this, the sync point can be stored for this distance to be reused as + // automatically added sync points later. + distance_to_explicit_sync_node.insert(node_distance, *node); + + // This node just did a sync, so the only reason to do another sync is if one was + // explicitly scheduled afterwards. + node_needs_sync = false; + } else if !node_needs_sync { + // No previous node has postponed sync points to add so check if the system itself + // has deferred params that require a sync point to apply them. + node_needs_sync = graph.systems[node.index()].get().unwrap().has_deferred(); + } + + for target in dependency_flattened.neighbors_directed(*node, Direction::Outgoing) { + let (target_distance, target_pending_sync) = distances_and_pending_sync + .entry(target.index()) + .or_default(); + + let mut edge_needs_sync = node_needs_sync; + if node_needs_sync + && !graph.systems[target.index()].get().unwrap().is_exclusive() + && self.no_sync_edges.contains(&(*node, target)) + { + // The node has deferred params to apply, but this edge is ignoring sync points. + // Mark the target as 'delaying' those commands to a future edge and the current + // edge as not needing a sync point. + *target_pending_sync = true; + edge_needs_sync = false; + } + + let mut weight = 0; + if edge_needs_sync || is_valid_explicit_sync_point(target) { + // The target distance grows if a sync point is added between it and the node. + // Also raise the distance if the target is a sync point itself so it then again + // raises the distance of following nodes as that is what the distance is about. + weight = 1; + } + + // The target cannot have fewer sync points in front of it than the preceding node. + *target_distance = (node_distance + weight).max(*target_distance); + } + } + + // Find any edges which have a different number of sync points between them and make sure + // there is a sync point between them. + for node in &topo { + let (node_distance, _) = distances_and_pending_sync + .get(&node.index()) + .copied() + .unwrap_or_default(); + + for target in dependency_flattened.neighbors_directed(*node, Direction::Outgoing) { + let (target_distance, _) = distances_and_pending_sync + .get(&target.index()) + .copied() + .unwrap_or_default(); + + if node_distance == target_distance { + // These nodes are the same distance, so they don't need an edge between them. + continue; + } + + if is_apply_deferred(graph.systems[target.index()].get().unwrap()) { + // We don't need to insert a sync point since ApplyDeferred is a sync point + // already! + continue; + } + + let sync_point = distance_to_explicit_sync_node + .get(&target_distance) + .copied() + .unwrap_or_else(|| self.get_sync_point(graph, target_distance)); + + sync_point_graph.add_edge(*node, sync_point); + sync_point_graph.add_edge(sync_point, target); + + // The edge without the sync point is now redundant. + sync_point_graph.remove_edge(*node, target); + } + } + + *dependency_flattened = sync_point_graph; + Ok(()) + } + + fn collapse_set( + &mut self, + set: NodeId, + systems: &[NodeId], + dependency_flattened: &DiGraph, + ) -> impl Iterator { + if systems.is_empty() { + // collapse dependencies for empty sets + for a in dependency_flattened.neighbors_directed(set, Direction::Incoming) { + for b in dependency_flattened.neighbors_directed(set, Direction::Outgoing) { + if self.no_sync_edges.contains(&(a, set)) + && self.no_sync_edges.contains(&(set, b)) + { + self.no_sync_edges.insert((a, b)); + } + } + } + } else { + for a in dependency_flattened.neighbors_directed(set, Direction::Incoming) { + for &sys in systems { + if self.no_sync_edges.contains(&(a, set)) { + self.no_sync_edges.insert((a, sys)); + } + } + } + + for b in dependency_flattened.neighbors_directed(set, Direction::Outgoing) { + for &sys in systems { + if self.no_sync_edges.contains(&(set, b)) { + self.no_sync_edges.insert((sys, b)); + } + } + } + } + core::iter::empty() + } +} diff --git a/crates/bevy_ecs/src/schedule/condition.rs b/crates/bevy_ecs/src/schedule/condition.rs index 4cfdd1cc81f0f..a85a8c6fa48cb 100644 --- a/crates/bevy_ecs/src/schedule/condition.rs +++ b/crates/bevy_ecs/src/schedule/condition.rs @@ -398,8 +398,10 @@ pub mod common_conditions { change_detection::DetectChanges, event::{Event, EventReader}, prelude::{Component, Query, With}, + query::QueryFilter, removal_detection::RemovedComponents, - system::{In, IntoSystem, Local, Res, Resource, System, SystemInput}, + resource::Resource, + system::{In, IntoSystem, Local, Res, System, SystemInput}, }; use alloc::format; @@ -937,6 +939,12 @@ pub mod common_conditions { removals.read().count() > 0 } + /// A [`Condition`]-satisfying system that returns `true` + /// if there are any entities that match the given [`QueryFilter`]. + pub fn any_match_filter(query: Query<(), F>) -> bool { + !query.is_empty() + } + /// Generates a [`Condition`] that inverses the result of passed one. /// /// # Example @@ -1255,11 +1263,11 @@ where #[cfg(test)] mod tests { use super::{common_conditions::*, Condition}; - use crate as bevy_ecs; + use crate::query::With; use crate::{ change_detection::ResMut, component::Component, - schedule::{IntoSystemConfigs, Schedule}, + schedule::{IntoScheduleConfigs, Schedule}, system::Local, world::World, }; @@ -1396,6 +1404,7 @@ mod tests { .distributive_run_if(resource_removed::) .distributive_run_if(on_event::) .distributive_run_if(any_with_component::) + .distributive_run_if(any_match_filter::>) .distributive_run_if(not(run_once)), ); } diff --git a/crates/bevy_ecs/src/schedule/config.rs b/crates/bevy_ecs/src/schedule/config.rs index a4518c0255773..b98205e32bb8f 100644 --- a/crates/bevy_ecs/src/schedule/config.rs +++ b/crates/bevy_ecs/src/schedule/config.rs @@ -2,14 +2,16 @@ use alloc::{boxed::Box, vec, vec::Vec}; use variadics_please::all_tuples; use crate::{ - result::Result, + error::Result, + never::Never, schedule::{ + auto_insert_apply_deferred::IgnoreDeferred, condition::{BoxedCondition, Condition}, graph::{Ambiguity, Dependency, DependencyKind, GraphInfo}, set::{InternedSystemSet, IntoSystemSet, SystemSet}, Chain, }, - system::{BoxedSystem, IntoSystem, ScheduleSystem, System}, + system::{BoxedSystem, InfallibleSystemWrapper, IntoSystem, ScheduleSystem, System}, }; fn new_condition(condition: impl Condition) -> BoxedCondition { @@ -35,61 +37,87 @@ fn ambiguous_with(graph_info: &mut GraphInfo, set: InternedSystemSet) { } } +/// Stores data to differentiate different schedulable structs. +pub trait Schedulable { + /// Additional data used to configure independent scheduling. Stored in [`ScheduleConfig`]. + type Metadata; + /// Additional data used to configure a schedulable group. Stored in [`ScheduleConfigs`]. + type GroupMetadata; + + /// Initializes a configuration from this node. + fn into_config(self) -> ScheduleConfig + where + Self: Sized; +} + +impl Schedulable for ScheduleSystem { + type Metadata = GraphInfo; + type GroupMetadata = Chain; + + fn into_config(self) -> ScheduleConfig { + let sets = self.default_system_sets().clone(); + ScheduleConfig { + node: self, + metadata: GraphInfo { + hierarchy: sets, + ..Default::default() + }, + conditions: Vec::new(), + } + } +} + +impl Schedulable for InternedSystemSet { + type Metadata = GraphInfo; + type GroupMetadata = Chain; + + fn into_config(self) -> ScheduleConfig { + assert!( + self.system_type().is_none(), + "configuring system type sets is not allowed" + ); + + ScheduleConfig { + node: self, + metadata: GraphInfo::default(), + conditions: Vec::new(), + } + } +} + /// Stores configuration for a single generic node (a system or a system set) /// /// The configuration includes the node itself, scheduling metadata /// (hierarchy: in which sets is the node contained, /// dependencies: before/after which other nodes should this node run) /// and the run conditions associated with this node. -pub struct NodeConfig { +pub struct ScheduleConfig { pub(crate) node: T, - /// Hierarchy and dependency metadata for this node - pub(crate) graph_info: GraphInfo, + pub(crate) metadata: T::Metadata, pub(crate) conditions: Vec, } -/// Stores configuration for a single system. -pub type SystemConfig = NodeConfig; - -/// A collections of generic [`NodeConfig`]s. -pub enum NodeConfigs { - /// Configuration for a single node. - NodeConfig(NodeConfig), +/// Single or nested configurations for [`Schedulable`]s. +pub enum ScheduleConfigs { + /// Configuration for a single [`Schedulable`]. + ScheduleConfig(ScheduleConfig), /// Configuration for a tuple of nested `Configs` instances. Configs { /// Configuration for each element of the tuple. - configs: Vec>, + configs: Vec>, /// Run conditions applied to everything in the tuple. collective_conditions: Vec, - /// See [`Chain`] for usage. - chained: Chain, + /// Metadata to be applied to all elements in the tuple. + metadata: T::GroupMetadata, }, } -/// A collection of [`SystemConfig`]. -pub type SystemConfigs = NodeConfigs; - -impl SystemConfigs { - fn new_system(system: ScheduleSystem) -> Self { - // include system in its default sets - let sets = system.default_system_sets().into_iter().collect(); - Self::NodeConfig(SystemConfig { - node: system, - graph_info: GraphInfo { - hierarchy: sets, - ..Default::default() - }, - conditions: Vec::new(), - }) - } -} - -impl NodeConfigs { +impl> ScheduleConfigs { /// Adds a new boxed system set to the systems. pub fn in_set_inner(&mut self, set: InternedSystemSet) { match self { - Self::NodeConfig(config) => { - config.graph_info.hierarchy.push(set); + Self::ScheduleConfig(config) => { + config.metadata.hierarchy.push(set); } Self::Configs { configs, .. } => { for config in configs { @@ -101,9 +129,9 @@ impl NodeConfigs { fn before_inner(&mut self, set: InternedSystemSet) { match self { - Self::NodeConfig(config) => { + Self::ScheduleConfig(config) => { config - .graph_info + .metadata .dependencies .push(Dependency::new(DependencyKind::Before, set)); } @@ -117,9 +145,9 @@ impl NodeConfigs { fn after_inner(&mut self, set: InternedSystemSet) { match self { - Self::NodeConfig(config) => { + Self::ScheduleConfig(config) => { config - .graph_info + .metadata .dependencies .push(Dependency::new(DependencyKind::After, set)); } @@ -133,11 +161,11 @@ impl NodeConfigs { fn before_ignore_deferred_inner(&mut self, set: InternedSystemSet) { match self { - Self::NodeConfig(config) => { + Self::ScheduleConfig(config) => { config - .graph_info + .metadata .dependencies - .push(Dependency::new(DependencyKind::BeforeNoSync, set)); + .push(Dependency::new(DependencyKind::Before, set).add_config(IgnoreDeferred)); } Self::Configs { configs, .. } => { for config in configs { @@ -149,11 +177,11 @@ impl NodeConfigs { fn after_ignore_deferred_inner(&mut self, set: InternedSystemSet) { match self { - Self::NodeConfig(config) => { + Self::ScheduleConfig(config) => { config - .graph_info + .metadata .dependencies - .push(Dependency::new(DependencyKind::AfterNoSync, set)); + .push(Dependency::new(DependencyKind::After, set).add_config(IgnoreDeferred)); } Self::Configs { configs, .. } => { for config in configs { @@ -165,7 +193,7 @@ impl NodeConfigs { fn distributive_run_if_inner(&mut self, condition: impl Condition + Clone) { match self { - Self::NodeConfig(config) => { + Self::ScheduleConfig(config) => { config.conditions.push(new_condition(condition)); } Self::Configs { configs, .. } => { @@ -178,8 +206,8 @@ impl NodeConfigs { fn ambiguous_with_inner(&mut self, set: InternedSystemSet) { match self { - Self::NodeConfig(config) => { - ambiguous_with(&mut config.graph_info, set); + Self::ScheduleConfig(config) => { + ambiguous_with(&mut config.metadata, set); } Self::Configs { configs, .. } => { for config in configs { @@ -191,8 +219,8 @@ impl NodeConfigs { fn ambiguous_with_all_inner(&mut self) { match self { - Self::NodeConfig(config) => { - config.graph_info.ambiguous_with = Ambiguity::IgnoreAll; + Self::ScheduleConfig(config) => { + config.metadata.ambiguous_with = Ambiguity::IgnoreAll; } Self::Configs { configs, .. } => { for config in configs { @@ -208,7 +236,7 @@ impl NodeConfigs { /// Prefer `run_if` for run conditions whose type is known at compile time. pub fn run_if_dyn(&mut self, condition: BoxedCondition) { match self { - Self::NodeConfig(config) => { + Self::ScheduleConfig(config) => { config.conditions.push(condition); } Self::Configs { @@ -222,42 +250,48 @@ impl NodeConfigs { fn chain_inner(mut self) -> Self { match &mut self { - Self::NodeConfig(_) => { /* no op */ } - Self::Configs { chained, .. } => { - *chained = Chain::Yes; + Self::ScheduleConfig(_) => { /* no op */ } + Self::Configs { metadata, .. } => { + metadata.set_chained(); } - } + }; self } fn chain_ignore_deferred_inner(mut self) -> Self { match &mut self { - Self::NodeConfig(_) => { /* no op */ } - Self::Configs { chained, .. } => { - *chained = Chain::YesIgnoreDeferred; + Self::ScheduleConfig(_) => { /* no op */ } + Self::Configs { metadata, .. } => { + metadata.set_chained_with_config(IgnoreDeferred); } } self } } -/// Types that can convert into a [`SystemConfigs`]. +/// Types that can convert into a [`ScheduleConfigs`]. /// /// This trait is implemented for "systems" (functions whose arguments all implement /// [`SystemParam`](crate::system::SystemParam)), or tuples thereof. /// It is a common entry point for system configurations. /// +/// # Usage notes +/// +/// This trait should only be used as a bound for trait implementations or as an +/// argument to a function. If system configs need to be returned from a +/// function or stored somewhere, use [`ScheduleConfigs`] instead of this trait. +/// /// # Examples /// /// ``` -/// # use bevy_ecs::schedule::IntoSystemConfigs; +/// # use bevy_ecs::{schedule::IntoScheduleConfigs, system::ScheduleSystem}; /// # struct AppMock; /// # struct Update; /// # impl AppMock { /// # pub fn add_systems( /// # &mut self, /// # schedule: Update, -/// # systems: impl IntoSystemConfigs, +/// # systems: impl IntoScheduleConfigs, /// # ) -> &mut Self { self } /// # } /// # let mut app = AppMock; @@ -279,16 +313,15 @@ impl NodeConfigs { message = "`{Self}` does not describe a valid system configuration", label = "invalid system configuration" )] -pub trait IntoSystemConfigs -where - Self: Sized, +pub trait IntoScheduleConfigs, Marker>: + Sized { - /// Convert into a [`SystemConfigs`]. - fn into_configs(self) -> SystemConfigs; + /// Convert into a [`ScheduleConfigs`]. + fn into_configs(self) -> ScheduleConfigs; /// Add these systems to the provided `set`. #[track_caller] - fn in_set(self, set: impl SystemSet) -> SystemConfigs { + fn in_set(self, set: impl SystemSet) -> ScheduleConfigs { self.into_configs().in_set(set) } @@ -300,7 +333,7 @@ where /// /// Calling [`.chain`](Self::chain) is often more convenient and ensures that all systems are added to the schedule. /// Please check the [caveats section of `.after`](Self::after) for details. - fn before(self, set: impl IntoSystemSet) -> SystemConfigs { + fn before(self, set: impl IntoSystemSet) -> ScheduleConfigs { self.into_configs().before(set) } @@ -327,7 +360,7 @@ where /// any ordering calls between them—whether using `.before`, `.after`, or `.chain`—will be silently ignored. /// /// [`configure_sets`]: https://docs.rs/bevy/latest/bevy/app/struct.App.html#method.configure_sets - fn after(self, set: impl IntoSystemSet) -> SystemConfigs { + fn after(self, set: impl IntoSystemSet) -> ScheduleConfigs { self.into_configs().after(set) } @@ -335,7 +368,7 @@ where /// /// Unlike [`before`](Self::before), this will not cause the systems in /// `set` to wait for the deferred effects of `self` to be applied. - fn before_ignore_deferred(self, set: impl IntoSystemSet) -> SystemConfigs { + fn before_ignore_deferred(self, set: impl IntoSystemSet) -> ScheduleConfigs { self.into_configs().before_ignore_deferred(set) } @@ -343,7 +376,7 @@ where /// /// Unlike [`after`](Self::after), this will not wait for the deferred /// effects of systems in `set` to be applied. - fn after_ignore_deferred(self, set: impl IntoSystemSet) -> SystemConfigs { + fn after_ignore_deferred(self, set: impl IntoSystemSet) -> ScheduleConfigs { self.into_configs().after_ignore_deferred(set) } @@ -355,7 +388,7 @@ where /// Each individual condition will be evaluated at most once (per schedule run), /// right before the corresponding system prepares to run. /// - /// This is equivalent to calling [`run_if`](IntoSystemConfigs::run_if) on each individual + /// This is equivalent to calling [`run_if`](IntoScheduleConfigs::run_if) on each individual /// system, as shown below: /// /// ``` @@ -374,10 +407,10 @@ where /// that all evaluations in a single schedule run will yield the same result. If another /// system is run inbetween two evaluations it could cause the result of the condition to change. /// - /// Use [`run_if`](IntoSystemSetConfigs::run_if) on a [`SystemSet`] if you want to make sure + /// Use [`run_if`](ScheduleConfigs::run_if) on a [`SystemSet`] if you want to make sure /// that either all or none of the systems are run, or you don't want to evaluate the run /// condition for each contained system separately. - fn distributive_run_if(self, condition: impl Condition + Clone) -> SystemConfigs { + fn distributive_run_if(self, condition: impl Condition + Clone) -> ScheduleConfigs { self.into_configs().distributive_run_if(condition) } @@ -409,21 +442,21 @@ where /// is upheld after the first system has run. You need to make sure that no other systems that /// could invalidate the condition are scheduled inbetween the first and last run system. /// - /// Use [`distributive_run_if`](IntoSystemConfigs::distributive_run_if) if you want the + /// Use [`distributive_run_if`](IntoScheduleConfigs::distributive_run_if) if you want the /// condition to be evaluated for each individual system, right before one is run. - fn run_if(self, condition: impl Condition) -> SystemConfigs { + fn run_if(self, condition: impl Condition) -> ScheduleConfigs { self.into_configs().run_if(condition) } /// Suppress warnings and errors that would result from these systems having ambiguities /// (conflicting access but indeterminate order) with systems in `set`. - fn ambiguous_with(self, set: impl IntoSystemSet) -> SystemConfigs { + fn ambiguous_with(self, set: impl IntoSystemSet) -> ScheduleConfigs { self.into_configs().ambiguous_with(set) } /// Suppress warnings and errors that would result from these systems having ambiguities /// (conflicting access but indeterminate order) with any other system. - fn ambiguous_with_all(self) -> SystemConfigs { + fn ambiguous_with_all(self) -> ScheduleConfigs { self.into_configs().ambiguous_with_all() } @@ -431,10 +464,10 @@ where /// /// Ordering constraints will be applied between the successive elements. /// - /// If the preceding node on a edge has deferred parameters, a [`ApplyDeferred`](crate::schedule::ApplyDeferred) + /// If the preceding node on an edge has deferred parameters, an [`ApplyDeferred`](crate::schedule::ApplyDeferred) /// will be inserted on the edge. If this behavior is not desired consider using /// [`chain_ignore_deferred`](Self::chain_ignore_deferred) instead. - fn chain(self) -> SystemConfigs { + fn chain(self) -> ScheduleConfigs { self.into_configs().chain() } @@ -443,12 +476,14 @@ where /// Ordering constraints will be applied between the successive elements. /// /// Unlike [`chain`](Self::chain) this will **not** add [`ApplyDeferred`](crate::schedule::ApplyDeferred) on the edges. - fn chain_ignore_deferred(self) -> SystemConfigs { + fn chain_ignore_deferred(self) -> ScheduleConfigs { self.into_configs().chain_ignore_deferred() } } -impl IntoSystemConfigs<()> for SystemConfigs { +impl> IntoScheduleConfigs + for ScheduleConfigs +{ fn into_configs(self) -> Self { self } @@ -489,12 +524,15 @@ impl IntoSystemConfigs<()> for SystemConfigs { self } - fn distributive_run_if(mut self, condition: impl Condition + Clone) -> SystemConfigs { + fn distributive_run_if( + mut self, + condition: impl Condition + Clone, + ) -> ScheduleConfigs { self.distributive_run_if_inner(condition); self } - fn run_if(mut self, condition: impl Condition) -> SystemConfigs { + fn run_if(mut self, condition: impl Condition) -> ScheduleConfigs { self.run_if_dyn(new_condition(condition)); self } @@ -519,61 +557,80 @@ impl IntoSystemConfigs<()> for SystemConfigs { } } +/// Marker component to allow for conflicting implementations of [`IntoScheduleConfigs`] #[doc(hidden)] pub struct Infallible; -impl IntoSystemConfigs<(Infallible, Marker)> for F +impl IntoScheduleConfigs for F where F: IntoSystem<(), (), Marker>, { - fn into_configs(self) -> SystemConfigs { - let boxed_system = Box::new(IntoSystem::into_system(self)); - SystemConfigs::new_system(ScheduleSystem::Infallible(boxed_system)) + fn into_configs(self) -> ScheduleConfigs { + let wrapper = InfallibleSystemWrapper::new(IntoSystem::into_system(self)); + ScheduleConfigs::ScheduleConfig(ScheduleSystem::into_config(Box::new(wrapper))) } } -impl IntoSystemConfigs<()> for BoxedSystem<(), ()> { - fn into_configs(self) -> SystemConfigs { - SystemConfigs::new_system(ScheduleSystem::Infallible(self)) +impl IntoScheduleConfigs for F +where + F: IntoSystem<(), Never, Marker>, +{ + fn into_configs(self) -> ScheduleConfigs { + let wrapper = InfallibleSystemWrapper::new(IntoSystem::into_system(self)); + ScheduleConfigs::ScheduleConfig(ScheduleSystem::into_config(Box::new(wrapper))) } } +/// Marker component to allow for conflicting implementations of [`IntoScheduleConfigs`] #[doc(hidden)] pub struct Fallible; -impl IntoSystemConfigs<(Fallible, Marker)> for F +impl IntoScheduleConfigs for F where F: IntoSystem<(), Result, Marker>, { - fn into_configs(self) -> SystemConfigs { + fn into_configs(self) -> ScheduleConfigs { let boxed_system = Box::new(IntoSystem::into_system(self)); - SystemConfigs::new_system(ScheduleSystem::Fallible(boxed_system)) + ScheduleConfigs::ScheduleConfig(ScheduleSystem::into_config(boxed_system)) + } +} + +impl IntoScheduleConfigs for BoxedSystem<(), Result> { + fn into_configs(self) -> ScheduleConfigs { + ScheduleConfigs::ScheduleConfig(ScheduleSystem::into_config(self)) } } -impl IntoSystemConfigs<()> for BoxedSystem<(), Result> { - fn into_configs(self) -> SystemConfigs { - SystemConfigs::new_system(ScheduleSystem::Fallible(self)) +impl IntoScheduleConfigs for S { + fn into_configs(self) -> ScheduleConfigs { + ScheduleConfigs::ScheduleConfig(InternedSystemSet::into_config(self.intern())) } } #[doc(hidden)] -pub struct SystemConfigTupleMarker; +pub struct ScheduleConfigTupleMarker; -macro_rules! impl_system_collection { +macro_rules! impl_node_type_collection { ($(#[$meta:meta])* $(($param: ident, $sys: ident)),*) => { $(#[$meta])* - impl<$($param, $sys),*> IntoSystemConfigs<(SystemConfigTupleMarker, $($param,)*)> for ($($sys,)*) + impl<$($param, $sys),*, T: Schedulable> IntoScheduleConfigs for ($($sys,)*) where - $($sys: IntoSystemConfigs<$param>),* + $($sys: IntoScheduleConfigs),* { - #[allow(non_snake_case)] - fn into_configs(self) -> SystemConfigs { + #[expect( + clippy::allow_attributes, + reason = "We are inside a macro, and as such, `non_snake_case` is not guaranteed to apply." + )] + #[allow( + non_snake_case, + reason = "Variable names are provided by the macro caller, not by us." + )] + fn into_configs(self) -> ScheduleConfigs { let ($($sys,)*) = self; - SystemConfigs::Configs { + ScheduleConfigs::Configs { + metadata: Default::default(), configs: vec![$($sys.into_configs(),)*], collective_conditions: Vec::new(), - chained: Chain::No, } } } @@ -582,233 +639,9 @@ macro_rules! impl_system_collection { all_tuples!( #[doc(fake_variadic)] - impl_system_collection, + impl_node_type_collection, 1, 20, P, S ); - -/// A [`SystemSet`] with scheduling metadata. -pub type SystemSetConfig = NodeConfig; - -impl SystemSetConfig { - #[track_caller] - pub(super) fn new(set: InternedSystemSet) -> Self { - // system type sets are automatically populated - // to avoid unintentionally broad changes, they cannot be configured - assert!( - set.system_type().is_none(), - "configuring system type sets is not allowed" - ); - - Self { - node: set, - graph_info: GraphInfo::default(), - conditions: Vec::new(), - } - } -} - -/// A collection of [`SystemSetConfig`]. -pub type SystemSetConfigs = NodeConfigs; - -/// Types that can convert into a [`SystemSetConfigs`]. -#[diagnostic::on_unimplemented( - message = "`{Self}` does not describe a valid system set configuration", - label = "invalid system set configuration" -)] -pub trait IntoSystemSetConfigs -where - Self: Sized, -{ - /// Convert into a [`SystemSetConfigs`]. - #[doc(hidden)] - fn into_configs(self) -> SystemSetConfigs; - - /// Add these system sets to the provided `set`. - #[track_caller] - fn in_set(self, set: impl SystemSet) -> SystemSetConfigs { - self.into_configs().in_set(set) - } - - /// Runs before all systems in `set`. If `self` has any systems that produce [`Commands`](crate::system::Commands) - /// or other [`Deferred`](crate::system::Deferred) operations, all systems in `set` will see their effect. - /// - /// If automatically inserting [`ApplyDeferred`](crate::schedule::ApplyDeferred) like - /// this isn't desired, use [`before_ignore_deferred`](Self::before_ignore_deferred) instead. - fn before(self, set: impl IntoSystemSet) -> SystemSetConfigs { - self.into_configs().before(set) - } - - /// Runs before all systems in `set`. If `set` has any systems that produce [`Commands`](crate::system::Commands) - /// or other [`Deferred`](crate::system::Deferred) operations, all systems in `self` will see their effect. - /// - /// If automatically inserting [`ApplyDeferred`](crate::schedule::ApplyDeferred) like - /// this isn't desired, use [`after_ignore_deferred`](Self::after_ignore_deferred) instead. - fn after(self, set: impl IntoSystemSet) -> SystemSetConfigs { - self.into_configs().after(set) - } - - /// Run before all systems in `set`. - /// - /// Unlike [`before`](Self::before), this will not cause the systems in `set` to wait for the - /// deferred effects of `self` to be applied. - fn before_ignore_deferred(self, set: impl IntoSystemSet) -> SystemSetConfigs { - self.into_configs().before_ignore_deferred(set) - } - - /// Run after all systems in `set`. - /// - /// Unlike [`after`](Self::after), this may not see the deferred - /// effects of systems in `set` to be applied. - fn after_ignore_deferred(self, set: impl IntoSystemSet) -> SystemSetConfigs { - self.into_configs().after_ignore_deferred(set) - } - - /// Run the systems in this set(s) only if the [`Condition`] is `true`. - /// - /// The `Condition` will be evaluated at most once (per schedule run), - /// the first time a system in this set(s) prepares to run. - fn run_if(self, condition: impl Condition) -> SystemSetConfigs { - self.into_configs().run_if(condition) - } - - /// Suppress warnings and errors that would result from systems in these sets having ambiguities - /// (conflicting access but indeterminate order) with systems in `set`. - fn ambiguous_with(self, set: impl IntoSystemSet) -> SystemSetConfigs { - self.into_configs().ambiguous_with(set) - } - - /// Suppress warnings and errors that would result from systems in these sets having ambiguities - /// (conflicting access but indeterminate order) with any other system. - fn ambiguous_with_all(self) -> SystemSetConfigs { - self.into_configs().ambiguous_with_all() - } - - /// Treat this collection as a sequence of system sets. - /// - /// Ordering constraints will be applied between the successive elements. - fn chain(self) -> SystemSetConfigs { - self.into_configs().chain() - } - - /// Treat this collection as a sequence of systems. - /// - /// Ordering constraints will be applied between the successive elements. - /// - /// Unlike [`chain`](Self::chain) this will **not** add [`ApplyDeferred`](crate::schedule::ApplyDeferred) on the edges. - fn chain_ignore_deferred(self) -> SystemSetConfigs { - self.into_configs().chain_ignore_deferred() - } -} - -impl IntoSystemSetConfigs for SystemSetConfigs { - fn into_configs(self) -> Self { - self - } - - #[track_caller] - fn in_set(mut self, set: impl SystemSet) -> Self { - assert!( - set.system_type().is_none(), - "adding arbitrary systems to a system type set is not allowed" - ); - self.in_set_inner(set.intern()); - - self - } - - fn before(mut self, set: impl IntoSystemSet) -> Self { - let set = set.into_system_set(); - self.before_inner(set.intern()); - - self - } - - fn after(mut self, set: impl IntoSystemSet) -> Self { - let set = set.into_system_set(); - self.after_inner(set.intern()); - - self - } - - fn before_ignore_deferred(mut self, set: impl IntoSystemSet) -> Self { - let set = set.into_system_set(); - self.before_ignore_deferred_inner(set.intern()); - - self - } - - fn after_ignore_deferred(mut self, set: impl IntoSystemSet) -> Self { - let set = set.into_system_set(); - self.after_ignore_deferred_inner(set.intern()); - - self - } - - fn run_if(mut self, condition: impl Condition) -> SystemSetConfigs { - self.run_if_dyn(new_condition(condition)); - - self - } - - fn ambiguous_with(mut self, set: impl IntoSystemSet) -> Self { - let set = set.into_system_set(); - self.ambiguous_with_inner(set.intern()); - - self - } - - fn ambiguous_with_all(mut self) -> Self { - self.ambiguous_with_all_inner(); - - self - } - - fn chain(self) -> Self { - self.chain_inner() - } - - fn chain_ignore_deferred(self) -> Self { - self.chain_ignore_deferred_inner() - } -} - -impl IntoSystemSetConfigs for S { - fn into_configs(self) -> SystemSetConfigs { - SystemSetConfigs::NodeConfig(SystemSetConfig::new(self.intern())) - } -} - -impl IntoSystemSetConfigs for SystemSetConfig { - fn into_configs(self) -> SystemSetConfigs { - SystemSetConfigs::NodeConfig(self) - } -} - -macro_rules! impl_system_set_collection { - ($(#[$meta:meta])* $($set: ident),*) => { - $(#[$meta])* - impl<$($set: IntoSystemSetConfigs),*> IntoSystemSetConfigs for ($($set,)*) - { - #[allow(non_snake_case)] - fn into_configs(self) -> SystemSetConfigs { - let ($($set,)*) = self; - SystemSetConfigs::Configs { - configs: vec![$($set.into_configs(),)*], - collective_conditions: Vec::new(), - chained: Chain::No, - } - } - } - } -} - -all_tuples!( - #[doc(fake_variadic)] - impl_system_set_collection, - 1, - 20, - S -); diff --git a/crates/bevy_ecs/src/schedule/executor/mod.rs b/crates/bevy_ecs/src/schedule/executor/mod.rs index f87bbcd10d425..09f01b2289f7e 100644 --- a/crates/bevy_ecs/src/schedule/executor/mod.rs +++ b/crates/bevy_ecs/src/schedule/executor/mod.rs @@ -16,10 +16,11 @@ use fixedbitset::FixedBitSet; use crate::{ archetype::ArchetypeComponentId, component::{ComponentId, Tick}, + error::{BevyError, ErrorContext, Result}, prelude::{IntoSystemSet, SystemSet}, - query::Access, + query::{Access, FilteredAccessSet}, schedule::{BoxedCondition, InternedSystemSet, NodeId, SystemTypeSet}, - system::{ScheduleSystem, System, SystemIn}, + system::{ScheduleSystem, System, SystemIn, SystemParamValidationError}, world::{unsafe_world_cell::UnsafeWorldCell, DeferredWorld, World}, }; @@ -32,6 +33,7 @@ pub(super) trait SystemExecutor: Send + Sync { schedule: &mut SystemSchedule, world: &mut World, skip_systems: Option<&FixedBitSet>, + error_handler: fn(BevyError, ErrorContext), ); fn set_apply_final_deferred(&mut self, value: bool); } @@ -121,7 +123,10 @@ impl SystemSchedule { since = "0.16.0", note = "Use `ApplyDeferred` instead. This was previously a function but is now a marker struct System." )] -#[expect(non_upper_case_globals)] +#[expect( + non_upper_case_globals, + reason = "This item is deprecated; as such, its previous name needs to stay." +)] pub const apply_deferred: ApplyDeferred = ApplyDeferred; /// A special [`System`] that instructs the executor to call @@ -158,7 +163,7 @@ pub(super) fn is_apply_deferred(system: &ScheduleSystem) -> bool { impl System for ApplyDeferred { type In = (); - type Out = (); + type Out = Result<()>; fn name(&self) -> Cow<'static, str> { Cow::Borrowed("bevy_ecs::apply_deferred") @@ -169,6 +174,10 @@ impl System for ApplyDeferred { const { &Access::new() } } + fn component_access_set(&self) -> &FilteredAccessSet { + const { &FilteredAccessSet::new() } + } + fn archetype_component_access(&self) -> &Access { // This system accesses no archetype components. const { &Access::new() } @@ -203,21 +212,26 @@ impl System for ApplyDeferred { ) -> Self::Out { // This system does nothing on its own. The executor will apply deferred // commands from other systems instead of running this system. + Ok(()) } fn run(&mut self, _input: SystemIn<'_, Self>, _world: &mut World) -> Self::Out { // This system does nothing on its own. The executor will apply deferred // commands from other systems instead of running this system. + Ok(()) } fn apply_deferred(&mut self, _world: &mut World) {} fn queue_deferred(&mut self, _world: DeferredWorld) {} - unsafe fn validate_param_unsafe(&mut self, _world: UnsafeWorldCell) -> bool { + unsafe fn validate_param_unsafe( + &mut self, + _world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { // This system is always valid to run because it doesn't do anything, // and only used as a marker for the executor. - true + Ok(()) } fn initialize(&mut self, _world: &mut World) {} @@ -258,8 +272,8 @@ mod __rust_begin_short_backtrace { use core::hint::black_box; use crate::{ - result::Result, - system::{ReadOnlySystem, ScheduleSystem, System}, + error::Result, + system::{ReadOnlySystem, ScheduleSystem}, world::{unsafe_world_cell::UnsafeWorldCell, World}, }; @@ -305,18 +319,14 @@ mod __rust_begin_short_backtrace { #[cfg(test)] mod tests { use crate::{ - self as bevy_ecs, - prelude::{IntoSystemConfigs, IntoSystemSetConfigs, Resource, Schedule, SystemSet}, + prelude::{Component, In, IntoSystem, Resource, Schedule}, schedule::ExecutorKind, - system::{Commands, Res, WithParamWarnPolicy}, + system::{Populated, Res, ResMut, Single}, world::World, }; - #[derive(Resource)] - struct R1; - - #[derive(Resource)] - struct R2; + #[derive(Component)] + struct TestComponent; const EXECUTORS: [ExecutorKind; 3] = [ ExecutorKind::Simple, @@ -324,63 +334,243 @@ mod tests { ExecutorKind::MultiThreaded, ]; + #[derive(Resource, Default)] + struct TestState { + populated_ran: bool, + single_ran: bool, + } + + #[derive(Resource, Default)] + struct Counter(u8); + + fn set_single_state(mut _single: Single<&TestComponent>, mut state: ResMut) { + state.single_ran = true; + } + + fn set_populated_state( + mut _populated: Populated<&TestComponent>, + mut state: ResMut, + ) { + state.populated_ran = true; + } + #[test] - fn invalid_system_param_skips() { + #[expect(clippy::print_stdout, reason = "std and println are allowed in tests")] + fn single_and_populated_skipped_and_run() { for executor in EXECUTORS { - invalid_system_param_skips_core(executor); + std::println!("Testing executor: {:?}", executor); + + let mut world = World::new(); + world.init_resource::(); + + let mut schedule = Schedule::default(); + schedule.set_executor_kind(executor); + schedule.add_systems((set_single_state, set_populated_state)); + schedule.run(&mut world); + + let state = world.get_resource::().unwrap(); + assert!(!state.single_ran); + assert!(!state.populated_ran); + + world.spawn(TestComponent); + + schedule.run(&mut world); + let state = world.get_resource::().unwrap(); + assert!(state.single_ran); + assert!(state.populated_ran); } } - fn invalid_system_param_skips_core(executor: ExecutorKind) { + fn look_for_missing_resource(_res: Res) {} + + #[test] + #[should_panic] + fn missing_resource_panics_simple() { let mut world = World::new(); let mut schedule = Schedule::default(); - schedule.set_executor_kind(executor); - schedule.add_systems( - ( - // This system depends on a system that is always skipped. - (|mut commands: Commands| { - commands.insert_resource(R2); - }) - .param_warn_once(), - ) - .chain(), - ); + + schedule.set_executor_kind(ExecutorKind::Simple); + schedule.add_systems(look_for_missing_resource); schedule.run(&mut world); - assert!(world.get_resource::().is_none()); - assert!(world.get_resource::().is_some()); } - #[derive(SystemSet, Hash, Debug, PartialEq, Eq, Clone)] - struct S1; + #[test] + #[should_panic] + fn missing_resource_panics_single_threaded() { + let mut world = World::new(); + let mut schedule = Schedule::default(); + + schedule.set_executor_kind(ExecutorKind::SingleThreaded); + schedule.add_systems(look_for_missing_resource); + schedule.run(&mut world); + } #[test] - fn invalid_condition_param_skips_system() { - for executor in EXECUTORS { - invalid_condition_param_skips_system_core(executor); + #[should_panic] + fn missing_resource_panics_multi_threaded() { + let mut world = World::new(); + let mut schedule = Schedule::default(); + + schedule.set_executor_kind(ExecutorKind::MultiThreaded); + schedule.add_systems(look_for_missing_resource); + schedule.run(&mut world); + } + + #[test] + fn piped_systems_first_system_skipped() { + // This system should be skipped when run due to no matching entity + fn pipe_out(_single: Single<&TestComponent>) -> u8 { + 42 + } + + fn pipe_in(_input: In, mut counter: ResMut) { + counter.0 += 1; + } + + let mut world = World::new(); + world.init_resource::(); + let mut schedule = Schedule::default(); + + schedule.add_systems(pipe_out.pipe(pipe_in)); + schedule.run(&mut world); + + let counter = world.resource::(); + assert_eq!(counter.0, 0); + } + + #[test] + fn piped_system_second_system_skipped() { + fn pipe_out(mut counter: ResMut) -> u8 { + counter.0 += 1; + 42 + } + + // This system should be skipped when run due to no matching entity + fn pipe_in(_input: In, _single: Single<&TestComponent>) {} + + let mut world = World::new(); + world.init_resource::(); + let mut schedule = Schedule::default(); + + schedule.add_systems(pipe_out.pipe(pipe_in)); + schedule.run(&mut world); + let counter = world.resource::(); + assert_eq!(counter.0, 0); + } + + #[test] + #[should_panic] + fn piped_system_first_system_panics() { + // This system should panic when run because the resource is missing + fn pipe_out(_res: Res) -> u8 { + 42 } + + fn pipe_in(_input: In) {} + + let mut world = World::new(); + let mut schedule = Schedule::default(); + + schedule.add_systems(pipe_out.pipe(pipe_in)); + schedule.run(&mut world); } - fn invalid_condition_param_skips_system_core(executor: ExecutorKind) { + #[test] + #[should_panic] + fn piped_system_second_system_panics() { + fn pipe_out() -> u8 { + 42 + } + + // This system should panic when run because the resource is missing + fn pipe_in(_input: In, _res: Res) {} + let mut world = World::new(); let mut schedule = Schedule::default(); - schedule.set_executor_kind(executor); - schedule.configure_sets(S1.run_if((|_: Res| true).param_warn_once())); - schedule.add_systems(( - // System gets skipped if system set run conditions fail validation. - (|mut commands: Commands| { - commands.insert_resource(R1); - }) - .param_warn_once() - .in_set(S1), - // System gets skipped if run conditions fail validation. - (|mut commands: Commands| { - commands.insert_resource(R2); - }) - .param_warn_once() - .run_if((|_: Res| true).param_warn_once()), - )); + + schedule.add_systems(pipe_out.pipe(pipe_in)); schedule.run(&mut world); - assert!(world.get_resource::().is_none()); - assert!(world.get_resource::().is_none()); + } + + // This test runs without panicking because we've + // decided to use early-out behavior for piped systems + #[test] + fn piped_system_skip_and_panic() { + // This system should be skipped when run due to no matching entity + fn pipe_out(_single: Single<&TestComponent>) -> u8 { + 42 + } + + // This system should panic when run because the resource is missing + fn pipe_in(_input: In, _res: Res) {} + + let mut world = World::new(); + let mut schedule = Schedule::default(); + + schedule.add_systems(pipe_out.pipe(pipe_in)); + schedule.run(&mut world); + } + + #[test] + #[should_panic] + fn piped_system_panic_and_skip() { + // This system should panic when run because the resource is missing + + fn pipe_out(_res: Res) -> u8 { + 42 + } + + // This system should be skipped when run due to no matching entity + fn pipe_in(_input: In, _single: Single<&TestComponent>) {} + + let mut world = World::new(); + let mut schedule = Schedule::default(); + + schedule.add_systems(pipe_out.pipe(pipe_in)); + schedule.run(&mut world); + } + + #[test] + #[should_panic] + fn piped_system_panic_and_panic() { + // This system should panic when run because the resource is missing + + fn pipe_out(_res: Res) -> u8 { + 42 + } + + // This system should panic when run because the resource is missing + fn pipe_in(_input: In, _res: Res) {} + + let mut world = World::new(); + let mut schedule = Schedule::default(); + + schedule.add_systems(pipe_out.pipe(pipe_in)); + schedule.run(&mut world); + } + + #[test] + fn piped_system_skip_and_skip() { + // This system should be skipped when run due to no matching entity + + fn pipe_out(_single: Single<&TestComponent>, mut counter: ResMut) -> u8 { + counter.0 += 1; + 42 + } + + // This system should be skipped when run due to no matching entity + fn pipe_in(_input: In, _single: Single<&TestComponent>, mut counter: ResMut) { + counter.0 += 1; + } + + let mut world = World::new(); + world.init_resource::(); + let mut schedule = Schedule::default(); + + schedule.add_systems(pipe_out.pipe(pipe_in)); + schedule.run(&mut world); + + let counter = world.resource::(); + assert_eq!(counter.0, 0); } } diff --git a/crates/bevy_ecs/src/schedule/executor/multi_threaded.rs b/crates/bevy_ecs/src/schedule/executor/multi_threaded.rs index 2d687ed510833..b0757cc031c16 100644 --- a/crates/bevy_ecs/src/schedule/executor/multi_threaded.rs +++ b/crates/bevy_ecs/src/schedule/executor/multi_threaded.rs @@ -1,31 +1,25 @@ use alloc::{boxed::Box, vec::Vec}; +use bevy_platform::sync::Arc; use bevy_tasks::{ComputeTaskPool, Scope, TaskPool, ThreadExecutor}; -use bevy_utils::{default, syncunsafecell::SyncUnsafeCell}; +use bevy_utils::syncunsafecell::SyncUnsafeCell; use concurrent_queue::ConcurrentQueue; use core::{any::Any, panic::AssertUnwindSafe}; use fixedbitset::FixedBitSet; +#[cfg(feature = "std")] +use std::eprintln; use std::sync::{Mutex, MutexGuard}; #[cfg(feature = "trace")] use tracing::{info_span, Span}; -#[cfg(feature = "portable-atomic")] -use portable_atomic_util::Arc; - -#[cfg(not(feature = "portable-atomic"))] -use alloc::sync::Arc; - use crate::{ - archetype::ArchetypeComponentId, + error::{default_error_handler, BevyError, ErrorContext, Result}, prelude::Resource, - query::Access, schedule::{is_apply_deferred, BoxedCondition, ExecutorKind, SystemExecutor, SystemSchedule}, - system::{ScheduleSystem, System}, + system::ScheduleSystem, world::{unsafe_world_cell::UnsafeWorldCell, World}, }; -use crate as bevy_ecs; - use super::__rust_begin_short_backtrace; /// Borrowed data used by the [`MultiThreadedExecutor`]. @@ -66,8 +60,13 @@ impl<'env, 'sys> Environment<'env, 'sys> { /// Per-system data used by the [`MultiThreadedExecutor`]. // Copied here because it can't be read from the system when it's running. struct SystemTaskMetadata { - /// The [`ArchetypeComponentId`] access of the system. - archetype_component_access: Access, + /// The set of systems whose `component_access_set()` conflicts with this one. + conflicting_systems: FixedBitSet, + /// The set of systems whose `component_access_set()` conflicts with this system's conditions. + /// Note that this is separate from `conflicting_systems` to handle the case where + /// a system is skipped by an earlier system set condition or system stepping, + /// and needs access to run its conditions but not for itself. + condition_conflicting_systems: FixedBitSet, /// Indices of the systems that directly depend on the system. dependents: Vec, /// Is `true` if the system does not access `!Send` data. @@ -101,8 +100,8 @@ pub struct MultiThreadedExecutor { pub struct ExecutorState { /// Metadata for scheduling and running system tasks. system_task_metadata: Vec, - /// Union of the accesses of all currently running systems. - active_access: Access, + /// The set of systems whose `component_access_set()` conflicts with this system set's conditions. + set_condition_conflicting_systems: Vec, /// Returns `true` if a system with non-`Send` access is running. local_thread_running: bool, /// Returns `true` if an exclusive system is running. @@ -135,6 +134,7 @@ pub struct ExecutorState { struct Context<'scope, 'env, 'sys> { environment: &'env Environment<'env, 'sys>, scope: &'scope Scope<'scope, 'env, ()>, + error_handler: fn(BevyError, ErrorContext), } impl Default for MultiThreadedExecutor { @@ -167,7 +167,8 @@ impl SystemExecutor for MultiThreadedExecutor { state.system_task_metadata = Vec::with_capacity(sys_count); for index in 0..sys_count { state.system_task_metadata.push(SystemTaskMetadata { - archetype_component_access: default(), + conflicting_systems: FixedBitSet::with_capacity(sys_count), + condition_conflicting_systems: FixedBitSet::with_capacity(sys_count), dependents: schedule.system_dependents[index].clone(), is_send: schedule.systems[index].is_send(), is_exclusive: schedule.systems[index].is_exclusive(), @@ -177,6 +178,60 @@ impl SystemExecutor for MultiThreadedExecutor { } } + { + #[cfg(feature = "trace")] + let _span = info_span!("calculate conflicting systems").entered(); + for index1 in 0..sys_count { + let system1 = &schedule.systems[index1]; + for index2 in 0..index1 { + let system2 = &schedule.systems[index2]; + if !system2 + .component_access_set() + .is_compatible(system1.component_access_set()) + { + state.system_task_metadata[index1] + .conflicting_systems + .insert(index2); + state.system_task_metadata[index2] + .conflicting_systems + .insert(index1); + } + } + + for index2 in 0..sys_count { + let system2 = &schedule.systems[index2]; + if schedule.system_conditions[index1].iter().any(|condition| { + !system2 + .component_access_set() + .is_compatible(condition.component_access_set()) + }) { + state.system_task_metadata[index1] + .condition_conflicting_systems + .insert(index2); + } + } + } + + state.set_condition_conflicting_systems.clear(); + state.set_condition_conflicting_systems.reserve(set_count); + for set_idx in 0..set_count { + let mut conflicting_systems = FixedBitSet::with_capacity(sys_count); + for sys_index in 0..sys_count { + let system = &schedule.systems[sys_index]; + if schedule.set_conditions[set_idx].iter().any(|condition| { + !system + .component_access_set() + .is_compatible(condition.component_access_set()) + }) { + conflicting_systems.insert(sys_index); + } + } + state + .set_condition_conflicting_systems + .push(conflicting_systems); + } + } + state.num_dependencies_remaining = Vec::with_capacity(sys_count); } @@ -185,6 +240,7 @@ impl SystemExecutor for MultiThreadedExecutor { schedule: &mut SystemSchedule, world: &mut World, _skip_systems: Option<&FixedBitSet>, + error_handler: fn(BevyError, ErrorContext), ) { let state = self.state.get_mut().unwrap(); // reset counts @@ -224,7 +280,11 @@ impl SystemExecutor for MultiThreadedExecutor { false, thread_executor, |scope| { - let context = Context { environment, scope }; + let context = Context { + environment, + scope, + error_handler, + }; // The first tick won't need to process finished systems, but we still need to run the loop in // tick_executor() in case a system completes while the first tick still holds the mutex. @@ -255,7 +315,6 @@ impl SystemExecutor for MultiThreadedExecutor { debug_assert!(state.ready_systems.is_clear()); debug_assert!(state.running_systems.is_clear()); - state.active_access.clear(); state.evaluated_sets.clear(); state.skipped_systems.clear(); state.completed_systems.clear(); @@ -280,7 +339,11 @@ impl<'scope, 'env: 'scope, 'sys> Context<'scope, 'env, 'sys> { .push(SystemResult { system_index }) .unwrap_or_else(|error| unreachable!("{}", error)); if let Err(payload) = res { - eprintln!("Encountered a panic in system `{}`!", &*system.name()); + #[cfg(feature = "std")] + #[expect(clippy::print_stderr, reason = "Allowed behind `std` feature gate.")] + { + eprintln!("Encountered a panic in system `{}`!", &*system.name()); + } // set the payload to propagate the error { let mut panic_payload = self.environment.executor.panic_payload.lock().unwrap(); @@ -339,9 +402,9 @@ impl ExecutorState { fn new() -> Self { Self { system_task_metadata: Vec::new(), + set_condition_conflicting_systems: Vec::new(), num_running_systems: 0, num_dependencies_remaining: Vec::new(), - active_access: default(), local_thread_running: false, exclusive_running: false, evaluated_sets: FixedBitSet::new(), @@ -362,8 +425,6 @@ impl ExecutorState { self.finish_system_and_handle_dependents(result); } - self.rebuild_active_access(); - // SAFETY: // - `finish_system_and_handle_dependents` has updated the currently running systems. // - `rebuild_active_access` locks access for all currently running systems. @@ -446,6 +507,8 @@ impl ExecutorState { // SAFETY: // - Caller ensured no other reference to this system exists. + // - `system_task_metadata[system_index].is_exclusive` is `false`, + // so `System::is_exclusive` returned `false` when we called it. // - `can_run` has been called, which calls `update_archetype_component_access` with this system. // - `can_run` returned true, so no systems with conflicting world access are running. unsafe { @@ -480,37 +543,30 @@ impl ExecutorState { { for condition in &mut conditions.set_conditions[set_idx] { condition.update_archetype_component_access(world); - if !condition - .archetype_component_access() - .is_compatible(&self.active_access) - { - return false; - } + } + if !self.set_condition_conflicting_systems[set_idx].is_disjoint(&self.running_systems) { + return false; } } for condition in &mut conditions.system_conditions[system_index] { condition.update_archetype_component_access(world); - if !condition - .archetype_component_access() - .is_compatible(&self.active_access) - { - return false; - } + } + if !system_meta + .condition_conflicting_systems + .is_disjoint(&self.running_systems) + { + return false; } if !self.skipped_systems.contains(system_index) { system.update_archetype_component_access(world); - if !system - .archetype_component_access() - .is_compatible(&self.active_access) + if !system_meta + .conflicting_systems + .is_disjoint(&self.running_systems) { return false; } - - self.system_task_metadata[system_index] - .archetype_component_access - .clone_from(system.archetype_component_access()); } true @@ -530,6 +586,7 @@ impl ExecutorState { world: UnsafeWorldCell, ) -> bool { let mut should_run = !self.skipped_systems.contains(system_index); + let error_handler = default_error_handler(); for set_idx in conditions.sets_with_conditions_of_systems[system_index].ones() { if self.evaluated_sets.contains(set_idx) { @@ -574,10 +631,25 @@ impl ExecutorState { // - The caller ensures that `world` has permission to read any data // required by the system. // - `update_archetype_component_access` has been called for system. - let valid_params = unsafe { system.validate_param_unsafe(world) }; + let valid_params = match unsafe { system.validate_param_unsafe(world) } { + Ok(()) => true, + Err(e) => { + if !e.skipped { + error_handler( + e.into(), + ErrorContext::System { + name: system.name(), + last_run: system.get_last_run(), + }, + ); + } + false + } + }; if !valid_params { self.skipped_systems.insert(system_index); } + should_run &= valid_params; } @@ -586,6 +658,7 @@ impl ExecutorState { /// # Safety /// - Caller must not alias systems that are running. + /// - `is_exclusive` must have returned `false` for the specified system. /// - `world` must have permission to access the world data /// used by the specified system. /// - `update_archetype_component_access` must have been called with `world` @@ -603,21 +676,26 @@ impl ExecutorState { // SAFETY: // - The caller ensures that we have permission to // access the world data used by the system. + // - `is_exclusive` returned false // - `update_archetype_component_access` has been called. unsafe { - // TODO: implement an error-handling API instead of suppressing a possible failure. - let _ = __rust_begin_short_backtrace::run_unsafe( + if let Err(err) = __rust_begin_short_backtrace::run_unsafe( system, context.environment.world_cell, - ); + ) { + (context.error_handler)( + err, + ErrorContext::System { + name: system.name(), + last_run: system.get_last_run(), + }, + ); + } }; })); context.system_completed(system_index, res, system); }; - self.active_access - .extend(&system_meta.archetype_component_access); - if system_meta.is_send { context.scope.spawn(task); } else { @@ -653,8 +731,15 @@ impl ExecutorState { // that no other systems currently have access to the world. let world = unsafe { context.environment.world_cell.world_mut() }; let res = std::panic::catch_unwind(AssertUnwindSafe(|| { - // TODO: implement an error-handling API instead of suppressing a possible failure. - let _ = __rust_begin_short_backtrace::run(system, world); + if let Err(err) = __rust_begin_short_backtrace::run(system, world) { + (context.error_handler)( + err, + ErrorContext::System { + name: system.name(), + last_run: system.get_last_run(), + }, + ); + } })); context.system_completed(system_index, res, system); }; @@ -701,15 +786,6 @@ impl ExecutorState { } } } - - fn rebuild_active_access(&mut self) { - self.active_access.clear(); - for index in self.running_systems.ones() { - let system_meta = &self.system_task_metadata[index]; - self.active_access - .extend(&system_meta.archetype_component_access); - } - } } fn apply_deferred( @@ -724,10 +800,14 @@ fn apply_deferred( system.apply_deferred(world); })); if let Err(payload) = res { - eprintln!( - "Encountered a panic when applying buffers for system `{}`!", - &*system.name() - ); + #[cfg(feature = "std")] + #[expect(clippy::print_stderr, reason = "Allowed behind `std` feature gate.")] + { + eprintln!( + "Encountered a panic when applying buffers for system `{}`!", + &*system.name() + ); + } return Err(payload); } } @@ -743,8 +823,12 @@ unsafe fn evaluate_and_fold_conditions( conditions: &mut [BoxedCondition], world: UnsafeWorldCell, ) -> bool { - // not short-circuiting is intentional - #[allow(clippy::unnecessary_fold)] + let error_handler = default_error_handler(); + + #[expect( + clippy::unnecessary_fold, + reason = "Short-circuiting here would prevent conditions from mutating their own state as needed." + )] conditions .iter_mut() .map(|condition| { @@ -752,8 +836,20 @@ unsafe fn evaluate_and_fold_conditions( // - The caller ensures that `world` has permission to read any data // required by the condition. // - `update_archetype_component_access` has been called for condition. - if !unsafe { condition.validate_param_unsafe(world) } { - return false; + match unsafe { condition.validate_param_unsafe(world) } { + Ok(()) => (), + Err(e) => { + if !e.skipped { + error_handler( + e.into(), + ErrorContext::System { + name: condition.name(), + last_run: condition.get_last_run(), + }, + ); + } + return false; + } } // SAFETY: // - The caller ensures that `world` has permission to read any data @@ -784,9 +880,8 @@ impl MainThreadExecutor { #[cfg(test)] mod tests { use crate::{ - self as bevy_ecs, prelude::Resource, - schedule::{ExecutorKind, IntoSystemConfigs, Schedule}, + schedule::{ExecutorKind, IntoScheduleConfigs, Schedule}, system::Commands, world::World, }; diff --git a/crates/bevy_ecs/src/schedule/executor/simple.rs b/crates/bevy_ecs/src/schedule/executor/simple.rs index 5cf03e2088276..a237a356de688 100644 --- a/crates/bevy_ecs/src/schedule/executor/simple.rs +++ b/crates/bevy_ecs/src/schedule/executor/simple.rs @@ -1,13 +1,17 @@ use core::panic::AssertUnwindSafe; use fixedbitset::FixedBitSet; + #[cfg(feature = "trace")] use tracing::info_span; +#[cfg(feature = "std")] +use std::eprintln; + use crate::{ + error::{default_error_handler, BevyError, ErrorContext}, schedule::{ executor::is_apply_deferred, BoxedCondition, ExecutorKind, SystemExecutor, SystemSchedule, }, - system::System, world::World, }; @@ -40,6 +44,7 @@ impl SystemExecutor for SimpleExecutor { schedule: &mut SystemSchedule, world: &mut World, _skip_systems: Option<&FixedBitSet>, + error_handler: fn(BevyError, ErrorContext), ) { // If stepping is enabled, make sure we skip those systems that should // not be run. @@ -82,7 +87,21 @@ impl SystemExecutor for SimpleExecutor { let system = &mut schedule.systems[system_index]; if should_run { - let valid_params = system.validate_param(world); + let valid_params = match system.validate_param(world) { + Ok(()) => true, + Err(e) => { + if !e.skipped { + error_handler( + e.into(), + ErrorContext::System { + name: system.name(), + last_run: system.get_last_run(), + }, + ); + } + false + } + }; should_run &= valid_params; } @@ -101,11 +120,19 @@ impl SystemExecutor for SimpleExecutor { } let f = AssertUnwindSafe(|| { - // TODO: implement an error-handling API instead of suppressing a possible failure. - let _ = __rust_begin_short_backtrace::run(system, world); + if let Err(err) = __rust_begin_short_backtrace::run(system, world) { + error_handler( + err, + ErrorContext::System { + name: system.name(), + last_run: system.get_last_run(), + }, + ); + } }); #[cfg(feature = "std")] + #[expect(clippy::print_stderr, reason = "Allowed behind `std` feature gate.")] { if let Err(payload) = std::panic::catch_unwind(f) { eprintln!("Encountered a panic in system `{}`!", &*system.name()); @@ -130,7 +157,7 @@ impl SystemExecutor for SimpleExecutor { impl SimpleExecutor { /// Creates a new simple executor for use in a [`Schedule`](crate::schedule::Schedule). - /// This calls each system in order and immediately calls [`System::apply_deferred`]. + /// This calls each system in order and immediately calls [`System::apply_deferred`](crate::system::System). pub const fn new() -> Self { Self { evaluated_sets: FixedBitSet::new(), @@ -140,13 +167,29 @@ impl SimpleExecutor { } fn evaluate_and_fold_conditions(conditions: &mut [BoxedCondition], world: &mut World) -> bool { - // not short-circuiting is intentional - #[allow(clippy::unnecessary_fold)] + let error_handler = default_error_handler(); + + #[expect( + clippy::unnecessary_fold, + reason = "Short-circuiting here would prevent conditions from mutating their own state as needed." + )] conditions .iter_mut() .map(|condition| { - if !condition.validate_param(world) { - return false; + match condition.validate_param(world) { + Ok(()) => (), + Err(e) => { + if !e.skipped { + error_handler( + e.into(), + ErrorContext::System { + name: condition.name(), + last_run: condition.get_last_run(), + }, + ); + } + return false; + } } __rust_begin_short_backtrace::readonly_run(&mut **condition, world) }) diff --git a/crates/bevy_ecs/src/schedule/executor/single_threaded.rs b/crates/bevy_ecs/src/schedule/executor/single_threaded.rs index d19a222d3011b..b42f47726d50c 100644 --- a/crates/bevy_ecs/src/schedule/executor/single_threaded.rs +++ b/crates/bevy_ecs/src/schedule/executor/single_threaded.rs @@ -1,11 +1,15 @@ use core::panic::AssertUnwindSafe; use fixedbitset::FixedBitSet; + #[cfg(feature = "trace")] use tracing::info_span; +#[cfg(feature = "std")] +use std::eprintln; + use crate::{ + error::{default_error_handler, BevyError, ErrorContext}, schedule::{is_apply_deferred, BoxedCondition, ExecutorKind, SystemExecutor, SystemSchedule}, - system::System, world::World, }; @@ -46,6 +50,7 @@ impl SystemExecutor for SingleThreadedExecutor { schedule: &mut SystemSchedule, world: &mut World, _skip_systems: Option<&FixedBitSet>, + error_handler: fn(BevyError, ErrorContext), ) { // If stepping is enabled, make sure we skip those systems that should // not be run. @@ -88,7 +93,22 @@ impl SystemExecutor for SingleThreadedExecutor { let system = &mut schedule.systems[system_index]; if should_run { - let valid_params = system.validate_param(world); + let valid_params = match system.validate_param(world) { + Ok(()) => true, + Err(e) => { + if !e.skipped { + error_handler( + e.into(), + ErrorContext::System { + name: system.name(), + last_run: system.get_last_run(), + }, + ); + } + false + } + }; + should_run &= valid_params; } @@ -109,8 +129,15 @@ impl SystemExecutor for SingleThreadedExecutor { let f = AssertUnwindSafe(|| { if system.is_exclusive() { - // TODO: implement an error-handling API instead of suppressing a possible failure. - let _ = __rust_begin_short_backtrace::run(system, world); + if let Err(err) = __rust_begin_short_backtrace::run(system, world) { + error_handler( + err, + ErrorContext::System { + name: system.name(), + last_run: system.get_last_run(), + }, + ); + } } else { // Use run_unsafe to avoid immediately applying deferred buffers let world = world.as_unsafe_world_cell(); @@ -118,13 +145,21 @@ impl SystemExecutor for SingleThreadedExecutor { // SAFETY: We have exclusive, single-threaded access to the world and // update_archetype_component_access is being called immediately before this. unsafe { - // TODO: implement an error-handling API instead of suppressing a possible failure. - let _ = __rust_begin_short_backtrace::run_unsafe(system, world); + if let Err(err) = __rust_begin_short_backtrace::run_unsafe(system, world) { + error_handler( + err, + ErrorContext::System { + name: system.name(), + last_run: system.get_last_run(), + }, + ); + } }; } }); #[cfg(feature = "std")] + #[expect(clippy::print_stderr, reason = "Allowed behind `std` feature gate.")] { if let Err(payload) = std::panic::catch_unwind(f) { eprintln!("Encountered a panic in system `{}`!", &*system.name()); @@ -176,13 +211,29 @@ impl SingleThreadedExecutor { } fn evaluate_and_fold_conditions(conditions: &mut [BoxedCondition], world: &mut World) -> bool { - // not short-circuiting is intentional - #[allow(clippy::unnecessary_fold)] + let error_handler: fn(BevyError, ErrorContext) = default_error_handler(); + + #[expect( + clippy::unnecessary_fold, + reason = "Short-circuiting here would prevent conditions from mutating their own state as needed." + )] conditions .iter_mut() .map(|condition| { - if !condition.validate_param(world) { - return false; + match condition.validate_param(world) { + Ok(()) => (), + Err(e) => { + if !e.skipped { + error_handler( + e.into(), + ErrorContext::System { + name: condition.name(), + last_run: condition.get_last_run(), + }, + ); + } + return false; + } } __rust_begin_short_backtrace::readonly_run(&mut **condition, world) }) diff --git a/crates/bevy_ecs/src/schedule/graph/graph_map.rs b/crates/bevy_ecs/src/schedule/graph/graph_map.rs index b20c763543181..d2fbde9995071 100644 --- a/crates/bevy_ecs/src/schedule/graph/graph_map.rs +++ b/crates/bevy_ecs/src/schedule/graph/graph_map.rs @@ -5,7 +5,7 @@ //! [`petgraph`]: https://docs.rs/petgraph/0.6.5/petgraph/ use alloc::vec::Vec; -use bevy_utils::{hashbrown::HashSet, FixedHasher}; +use bevy_platform::{collections::HashSet, hash::FixedHasher}; use core::{ fmt, hash::{BuildHasher, Hash}, @@ -32,7 +32,7 @@ pub type DiGraph = Graph; /// `Graph` is a graph datastructure using an associative array /// of its node weights `NodeId`. /// -/// It uses an combined adjacency list and sparse adjacency matrix +/// It uses a combined adjacency list and sparse adjacency matrix /// representation, using **O(|N| + |E|)** space, and allows testing for edge /// existence in constant time. /// @@ -65,7 +65,7 @@ where S: BuildHasher, { /// Create a new `Graph` with estimated capacity. - pub(crate) fn with_capacity(nodes: usize, edges: usize) -> Self + pub fn with_capacity(nodes: usize, edges: usize) -> Self where S: Default, { @@ -89,14 +89,14 @@ where } /// Add node `n` to the graph. - pub(crate) fn add_node(&mut self, n: NodeId) { + pub fn add_node(&mut self, n: NodeId) { self.nodes.entry(n).or_default(); } /// Remove a node `n` from the graph. /// /// Computes in **O(N)** time, due to the removal of edges with other nodes. - pub(crate) fn remove_node(&mut self, n: NodeId) { + pub fn remove_node(&mut self, n: NodeId) { let Some(links) = self.nodes.swap_remove(&n) else { return; }; @@ -125,7 +125,7 @@ where /// For a directed graph, the edge is directed from `a` to `b`. /// /// Inserts nodes `a` and/or `b` if they aren't already part of the graph. - pub(crate) fn add_edge(&mut self, a: NodeId, b: NodeId) { + pub fn add_edge(&mut self, a: NodeId, b: NodeId) { if self.edges.insert(Self::edge_key(a, b)) { // insert in the adjacency list if it's a new edge self.nodes @@ -166,7 +166,7 @@ where /// Remove edge from `a` to `b` from the graph. /// /// Return `false` if the edge didn't exist. - pub(crate) fn remove_edge(&mut self, a: NodeId, b: NodeId) -> bool { + pub fn remove_edge(&mut self, a: NodeId, b: NodeId) -> bool { let exist1 = self.remove_single_edge(a, b, Outgoing); let exist2 = if a != b { self.remove_single_edge(b, a, Incoming) @@ -393,6 +393,7 @@ impl CompactNodeIdPair { #[cfg(test)] mod tests { use super::*; + use alloc::vec; /// The `Graph` type _must_ preserve the order that nodes are inserted in if /// no removals occur. Removals are permitted to swap the latest node into the @@ -436,7 +437,7 @@ mod tests { assert_eq!(graph.nodes().collect::>(), vec![]); } - /// Nodes that have bidrectional edges (or any edge in the case of undirected graphs) are + /// Nodes that have bidirectional edges (or any edge in the case of undirected graphs) are /// considered strongly connected. A strongly connected component is a collection of /// nodes where there exists a path from any node to any other node in the collection. #[test] diff --git a/crates/bevy_ecs/src/schedule/graph/mod.rs b/crates/bevy_ecs/src/schedule/graph/mod.rs index 1532184f1761e..8a98604102058 100644 --- a/crates/bevy_ecs/src/schedule/graph/mod.rs +++ b/crates/bevy_ecs/src/schedule/graph/mod.rs @@ -1,8 +1,13 @@ -use alloc::{vec, vec::Vec}; -use core::fmt::Debug; +use alloc::{boxed::Box, vec, vec::Vec}; +use core::{ + any::{Any, TypeId}, + fmt::Debug, +}; use smallvec::SmallVec; -use bevy_utils::{HashMap, HashSet}; +use bevy_platform::collections::{HashMap, HashSet}; +use bevy_utils::TypeIdMap; + use fixedbitset::FixedBitSet; use crate::schedule::set::*; @@ -21,22 +26,26 @@ pub(crate) enum DependencyKind { Before, /// A node that should be succeeded. After, - /// A node that should be preceded and will **not** automatically insert an instance of `ApplyDeferred` on the edge. - BeforeNoSync, - /// A node that should be succeeded and will **not** automatically insert an instance of `ApplyDeferred` on the edge. - AfterNoSync, } /// An edge to be added to the dependency graph. -#[derive(Clone)] pub(crate) struct Dependency { pub(crate) kind: DependencyKind, pub(crate) set: InternedSystemSet, + pub(crate) options: TypeIdMap>, } impl Dependency { pub fn new(kind: DependencyKind, set: InternedSystemSet) -> Self { - Self { kind, set } + Self { + kind, + set, + options: Default::default(), + } + } + pub fn add_config(mut self, option: T) -> Self { + self.options.insert(TypeId::of::(), Box::new(option)); + self } } @@ -52,8 +61,8 @@ pub(crate) enum Ambiguity { } /// Metadata about how the node fits in the schedule graph -#[derive(Clone, Default)] -pub(crate) struct GraphInfo { +#[derive(Default)] +pub struct GraphInfo { /// the sets that the node belongs to (hierarchy) pub(crate) hierarchy: Vec, /// the sets that the node depends on (must run before or after) @@ -86,7 +95,7 @@ pub(crate) struct CheckGraphResults { pub(crate) transitive_reduction: DiGraph, /// Variant of the graph with all possible transitive edges. // TODO: this will very likely be used by "if-needed" ordering - #[allow(dead_code)] + #[expect(dead_code, reason = "See the TODO above this attribute.")] pub(crate) transitive_closure: DiGraph, } @@ -267,7 +276,7 @@ pub fn simple_cycles_in_component(graph: &DiGraph, scc: &[NodeId]) -> Vec Option { - Some(Ord::cmp(self, other)) - } -} - -impl Ord for NodeId { - fn cmp(&self, other: &Self) -> core::cmp::Ordering { - self.cmp(other) - } -} - impl NodeId { /// Returns the internal integer value. - pub(crate) const fn index(&self) -> usize { + pub const fn index(&self) -> usize { match self { NodeId::System(index) | NodeId::Set(index) => *index, } diff --git a/crates/bevy_ecs/src/schedule/mod.rs b/crates/bevy_ecs/src/schedule/mod.rs index dc2a907949bd1..aeaf8e3929ce6 100644 --- a/crates/bevy_ecs/src/schedule/mod.rs +++ b/crates/bevy_ecs/src/schedule/mod.rs @@ -1,29 +1,39 @@ //! Contains APIs for ordering systems and executing them on a [`World`](crate::world::World) +mod auto_insert_apply_deferred; mod condition; mod config; mod executor; -mod graph; -#[allow(clippy::module_inception)] +mod pass; mod schedule; mod set; mod stepping; use self::graph::*; pub use self::{condition::*, config::*, executor::*, schedule::*, set::*}; +pub use pass::ScheduleBuildPass; pub use self::graph::NodeId; +/// An implementation of a graph data structure. +pub mod graph; + +/// Included optional schedule build passes. +pub mod passes { + pub use crate::schedule::auto_insert_apply_deferred::*; +} + #[cfg(test)] mod tests { use super::*; + use alloc::{string::ToString, vec, vec::Vec}; use core::sync::atomic::{AtomicU32, Ordering}; - pub use crate as bevy_ecs; pub use crate::{ prelude::World, + resource::Resource, schedule::{Schedule, SystemSet}, - system::{Res, ResMut, Resource}, + system::{Res, ResMut}, }; #[derive(SystemSet, Clone, Debug, PartialEq, Eq, Hash)] @@ -719,8 +729,6 @@ mod tests { use alloc::collections::BTreeSet; use super::*; - // Required to make the derive macro behave - use crate as bevy_ecs; use crate::prelude::*; #[derive(Resource)] @@ -1081,7 +1089,7 @@ mod tests { schedule.graph_mut().initialize(&mut world); let _ = schedule.graph_mut().build_schedule( - world.components(), + &mut world, TestSchedule.intern(), &BTreeSet::new(), ); @@ -1089,28 +1097,38 @@ mod tests { let ambiguities: Vec<_> = schedule .graph() .conflicts_to_string(schedule.graph().conflicting_systems(), world.components()) + .map(|item| { + ( + item.0, + item.1, + item.2 + .into_iter() + .map(|name| name.to_string()) + .collect::>(), + ) + }) .collect(); let expected = &[ ( "system_d".to_string(), "system_a".to_string(), - vec!["bevy_ecs::schedule::tests::system_ambiguity::R"], + vec!["bevy_ecs::schedule::tests::system_ambiguity::R".into()], ), ( "system_d".to_string(), "system_e".to_string(), - vec!["bevy_ecs::schedule::tests::system_ambiguity::R"], + vec!["bevy_ecs::schedule::tests::system_ambiguity::R".into()], ), ( "system_b".to_string(), "system_a".to_string(), - vec!["bevy_ecs::schedule::tests::system_ambiguity::R"], + vec!["bevy_ecs::schedule::tests::system_ambiguity::R".into()], ), ( "system_b".to_string(), "system_e".to_string(), - vec!["bevy_ecs::schedule::tests::system_ambiguity::R"], + vec!["bevy_ecs::schedule::tests::system_ambiguity::R".into()], ), ]; @@ -1130,7 +1148,7 @@ mod tests { let mut world = World::new(); schedule.graph_mut().initialize(&mut world); let _ = schedule.graph_mut().build_schedule( - world.components(), + &mut world, TestSchedule.intern(), &BTreeSet::new(), ); @@ -1138,6 +1156,16 @@ mod tests { let ambiguities: Vec<_> = schedule .graph() .conflicts_to_string(schedule.graph().conflicting_systems(), world.components()) + .map(|item| { + ( + item.0, + item.1, + item.2 + .into_iter() + .map(|name| name.to_string()) + .collect::>(), + ) + }) .collect(); assert_eq!( @@ -1145,7 +1173,7 @@ mod tests { ( "resmut_system (in set (resmut_system, resmut_system))".to_string(), "resmut_system (in set (resmut_system, resmut_system))".to_string(), - vec!["bevy_ecs::schedule::tests::system_ambiguity::R"], + vec!["bevy_ecs::schedule::tests::system_ambiguity::R".into()], ) ); } @@ -1184,7 +1212,7 @@ mod tests { let mut schedule = Schedule::new(TestSchedule); schedule .set_executor_kind($executor) - .add_systems(|| panic!("Executor ignored Stepping")); + .add_systems(|| -> () { panic!("Executor ignored Stepping") }); // Add our schedule to stepping & and enable stepping; this should // prevent any systems in the schedule from running diff --git a/crates/bevy_ecs/src/schedule/pass.rs b/crates/bevy_ecs/src/schedule/pass.rs new file mode 100644 index 0000000000000..20680e04e032c --- /dev/null +++ b/crates/bevy_ecs/src/schedule/pass.rs @@ -0,0 +1,79 @@ +use alloc::{boxed::Box, vec::Vec}; +use core::any::{Any, TypeId}; + +use super::{DiGraph, NodeId, ScheduleBuildError, ScheduleGraph}; +use crate::world::World; +use bevy_utils::TypeIdMap; +use core::fmt::Debug; + +/// A pass for modular modification of the dependency graph. +pub trait ScheduleBuildPass: Send + Sync + Debug + 'static { + /// Custom options for dependencies between sets or systems. + type EdgeOptions: 'static; + + /// Called when a dependency between sets or systems was explicitly added to the graph. + fn add_dependency(&mut self, from: NodeId, to: NodeId, options: Option<&Self::EdgeOptions>); + + /// Called while flattening the dependency graph. For each `set`, this method is called + /// with the `systems` associated with the set as well as an immutable reference to the current graph. + /// Instead of modifying the graph directly, this method should return an iterator of edges to add to the graph. + fn collapse_set( + &mut self, + set: NodeId, + systems: &[NodeId], + dependency_flattened: &DiGraph, + ) -> impl Iterator; + + /// The implementation will be able to modify the `ScheduleGraph` here. + fn build( + &mut self, + world: &mut World, + graph: &mut ScheduleGraph, + dependency_flattened: &mut DiGraph, + ) -> Result<(), ScheduleBuildError>; +} + +/// Object safe version of [`ScheduleBuildPass`]. +pub(super) trait ScheduleBuildPassObj: Send + Sync + Debug { + fn build( + &mut self, + world: &mut World, + graph: &mut ScheduleGraph, + dependency_flattened: &mut DiGraph, + ) -> Result<(), ScheduleBuildError>; + + fn collapse_set( + &mut self, + set: NodeId, + systems: &[NodeId], + dependency_flattened: &DiGraph, + dependencies_to_add: &mut Vec<(NodeId, NodeId)>, + ); + fn add_dependency(&mut self, from: NodeId, to: NodeId, all_options: &TypeIdMap>); +} +impl ScheduleBuildPassObj for T { + fn build( + &mut self, + world: &mut World, + graph: &mut ScheduleGraph, + dependency_flattened: &mut DiGraph, + ) -> Result<(), ScheduleBuildError> { + self.build(world, graph, dependency_flattened) + } + fn collapse_set( + &mut self, + set: NodeId, + systems: &[NodeId], + dependency_flattened: &DiGraph, + dependencies_to_add: &mut Vec<(NodeId, NodeId)>, + ) { + let iter = self.collapse_set(set, systems, dependency_flattened); + dependencies_to_add.extend(iter); + } + fn add_dependency(&mut self, from: NodeId, to: NodeId, all_options: &TypeIdMap>) { + let option = all_options + .get(&TypeId::of::()) + .and_then(|x| x.downcast_ref::()); + self.add_dependency(from, to, option); + } +} diff --git a/crates/bevy_ecs/src/schedule/schedule.rs b/crates/bevy_ecs/src/schedule/schedule.rs index ff1d8283e8d6c..584e621bf8a39 100644 --- a/crates/bevy_ecs/src/schedule/schedule.rs +++ b/crates/bevy_ecs/src/schedule/schedule.rs @@ -1,27 +1,37 @@ +#![expect( + clippy::module_inception, + reason = "This instance of module inception is being discussed; see #17344." +)] +use alloc::borrow::Cow; use alloc::{ boxed::Box, - collections::BTreeSet, + collections::{BTreeMap, BTreeSet}, format, string::{String, ToString}, vec, vec::Vec, }; -use bevy_utils::{default, HashMap, HashSet}; -use core::fmt::{Debug, Write}; +use bevy_platform::collections::{HashMap, HashSet}; +use bevy_utils::{default, TypeIdMap}; +use core::{ + any::{Any, TypeId}, + fmt::{Debug, Write}, +}; use disqualified::ShortName; use fixedbitset::FixedBitSet; use log::{error, info, warn}; +use pass::ScheduleBuildPassObj; use thiserror::Error; #[cfg(feature = "trace")] use tracing::info_span; use crate::{ - self as bevy_ecs, component::{ComponentId, Components, Tick}, + error::default_error_handler, prelude::Component, - result::Result, + resource::Resource, schedule::*, - system::{IntoSystem, Resource, ScheduleSystem, System}, + system::ScheduleSystem, world::World, }; @@ -40,10 +50,7 @@ pub struct Schedules { impl Schedules { /// Constructs an empty `Schedules` with zero initial capacity. pub fn new() -> Self { - Self { - inner: HashMap::default(), - ignored_scheduling_ambiguities: BTreeSet::new(), - } + Self::default() } /// Inserts a labeled schedule into the map. @@ -108,8 +115,13 @@ impl Schedules { pub(crate) fn check_change_ticks(&mut self, change_tick: Tick) { #[cfg(feature = "trace")] let _all_span = info_span!("check stored schedule ticks").entered(); - // label used when trace feature is enabled - #[allow(unused_variables)] + #[cfg_attr( + not(feature = "trace"), + expect( + unused_variables, + reason = "The `label` variable goes unused if the `trace` feature isn't active" + ) + )] for (label, schedule) in &mut self.inner { #[cfg(feature = "trace")] let name = format!("{label:?}"); @@ -135,7 +147,7 @@ impl Schedules { /// Ignore system order ambiguities caused by conflicts on [`Resource`]s of type `T`. pub fn allow_ambiguous_resource(&mut self, world: &mut World) { self.ignored_scheduling_ambiguities - .insert(world.components.register_resource::()); + .insert(world.components_registrator().register_resource::()); } /// Iterate through the [`ComponentId`]'s that will be ignored. @@ -162,7 +174,7 @@ impl Schedules { pub fn add_systems( &mut self, schedule: impl ScheduleLabel, - systems: impl IntoSystemConfigs, + systems: impl IntoScheduleConfigs, ) -> &mut Self { self.entry(schedule).add_systems(systems); @@ -171,10 +183,10 @@ impl Schedules { /// Configures a collection of system sets in the provided schedule, adding any sets that do not exist. #[track_caller] - pub fn configure_sets( + pub fn configure_sets( &mut self, schedule: impl ScheduleLabel, - sets: impl IntoSystemSetConfigs, + sets: impl IntoScheduleConfigs, ) -> &mut Self { self.entry(schedule).configure_sets(sets); @@ -214,15 +226,32 @@ fn make_executor(kind: ExecutorKind) -> Box { } /// Chain systems into dependencies -#[derive(PartialEq)] +#[derive(Default)] pub enum Chain { - /// Run nodes in order. If there are deferred parameters in preceding systems a - /// [`ApplyDeferred`] will be added on the edge. - Yes, - /// Run nodes in order. This will not add [`ApplyDeferred`] between nodes. - YesIgnoreDeferred, - /// Nodes are allowed to run in any order. - No, + /// Systems are independent. Nodes are allowed to run in any order. + #[default] + Unchained, + /// Systems are chained. `before -> after` ordering constraints + /// will be added between the successive elements. + Chained(TypeIdMap>), +} +impl Chain { + /// Specify that the systems must be chained. + pub fn set_chained(&mut self) { + if matches!(self, Chain::Unchained) { + *self = Self::Chained(Default::default()); + }; + } + /// Specify that the systems must be chained, and add the specified configuration for + /// all dependencies created between these systems. + pub fn set_chained_with_config(&mut self, config: T) { + self.set_chained(); + if let Chain::Chained(config_map) = self { + config_map.insert(TypeId::of::(), Box::new(config)); + } else { + unreachable!() + }; + } } /// A collection of systems, and the metadata and executor needed to run them @@ -286,13 +315,16 @@ impl Default for Schedule { impl Schedule { /// Constructs an empty `Schedule`. pub fn new(label: impl ScheduleLabel) -> Self { - Self { + let mut this = Self { label: label.intern(), graph: ScheduleGraph::new(), executable: SystemSchedule::new(), executor: make_executor(ExecutorKind::default()), executor_initialized: false, - } + }; + // Call `set_build_settings` to add any default build passes + this.set_build_settings(Default::default()); + this } /// Get the `InternedScheduleLabel` for this `Schedule`. @@ -301,7 +333,10 @@ impl Schedule { } /// Add a collection of systems to the schedule. - pub fn add_systems(&mut self, systems: impl IntoSystemConfigs) -> &mut Self { + pub fn add_systems( + &mut self, + systems: impl IntoScheduleConfigs, + ) -> &mut Self { self.graph.process_configs(systems.into_configs(), false); self } @@ -339,13 +374,32 @@ impl Schedule { /// Configures a collection of system sets in this schedule, adding them if they does not exist. #[track_caller] - pub fn configure_sets(&mut self, sets: impl IntoSystemSetConfigs) -> &mut Self { + pub fn configure_sets( + &mut self, + sets: impl IntoScheduleConfigs, + ) -> &mut Self { self.graph.configure_sets(sets); self } + /// Add a custom build pass to the schedule. + pub fn add_build_pass(&mut self, pass: T) -> &mut Self { + self.graph.passes.insert(TypeId::of::(), Box::new(pass)); + self + } + + /// Remove a custom build pass. + pub fn remove_build_pass(&mut self) { + self.graph.passes.remove(&TypeId::of::()); + } + /// Changes miscellaneous build settings. pub fn set_build_settings(&mut self, settings: ScheduleBuildSettings) -> &mut Self { + if settings.auto_insert_apply_deferred { + self.add_build_pass(passes::AutoInsertApplyDeferredPass::default()); + } else { + self.remove_build_pass::(); + } self.graph.settings = settings; self } @@ -387,8 +441,11 @@ impl Schedule { self.initialize(world) .unwrap_or_else(|e| panic!("Error when initializing schedule {:?}: {e}", self.label)); + let error_handler = default_error_handler(); + #[cfg(not(feature = "bevy_debug_stepping"))] - self.executor.run(&mut self.executable, world, None); + self.executor + .run(&mut self.executable, world, None, error_handler); #[cfg(feature = "bevy_debug_stepping")] { @@ -397,8 +454,12 @@ impl Schedule { Some(mut stepping) => stepping.skipped_systems(self), }; - self.executor - .run(&mut self.executable, world, skip_systems.as_ref()); + self.executor.run( + &mut self.executable, + world, + skip_systems.as_ref(), + error_handler, + ); } } @@ -414,8 +475,8 @@ impl Schedule { .ignored_scheduling_ambiguities .clone(); self.graph.update_schedule( + world, &mut self.executable, - world.components(), &ignored_ambiguities, self.label, )?; @@ -569,21 +630,24 @@ impl SystemSetNode { } /// A [`ScheduleSystem`] stored in a [`ScheduleGraph`]. -struct SystemNode { +pub struct SystemNode { inner: Option, } impl SystemNode { + /// Create a new [`SystemNode`] pub fn new(system: ScheduleSystem) -> Self { Self { inner: Some(system), } } + /// Obtain a reference to the [`ScheduleSystem`] represented by this node. pub fn get(&self) -> Option<&ScheduleSystem> { self.inner.as_ref() } + /// Obtain a mutable reference to the [`ScheduleSystem`] represented by this node. pub fn get_mut(&mut self) -> Option<&mut ScheduleSystem> { self.inner.as_mut() } @@ -596,9 +660,9 @@ impl SystemNode { #[derive(Default)] pub struct ScheduleGraph { /// List of systems in the schedule - systems: Vec, + pub systems: Vec, /// List of conditions for each system, in the same order as `systems` - system_conditions: Vec>, + pub system_conditions: Vec>, /// List of system sets in the schedule system_sets: Vec, /// List of conditions for each system set, in the same order as `system_sets` @@ -613,14 +677,14 @@ pub struct ScheduleGraph { /// Directed acyclic graph of the dependency (which systems/sets have to run before which other systems/sets) dependency: Dag, ambiguous_with: UnGraph, - ambiguous_with_all: HashSet, + /// Nodes that are allowed to have ambiguous ordering relationship with any other systems. + pub ambiguous_with_all: HashSet, conflicting_systems: Vec<(NodeId, NodeId, Vec)>, anonymous_sets: usize, changed: bool, settings: ScheduleBuildSettings, - /// Dependency edges that will **not** automatically insert an instance of `apply_deferred` on the edge. - no_sync_edges: BTreeSet<(NodeId, NodeId)>, - auto_sync_node_ids: HashMap, + + passes: BTreeMap>, } impl ScheduleGraph { @@ -641,8 +705,7 @@ impl ScheduleGraph { anonymous_sets: 0, changed: false, settings: default(), - no_sync_edges: BTreeSet::new(), - auto_sync_node_ids: HashMap::default(), + passes: default(), } } @@ -689,6 +752,26 @@ impl ScheduleGraph { .unwrap() } + /// Returns the conditions for the set at the given [`NodeId`], if it exists. + pub fn get_set_conditions_at(&self, id: NodeId) -> Option<&[BoxedCondition]> { + if !id.is_set() { + return None; + } + self.system_set_conditions + .get(id.index()) + .map(Vec::as_slice) + } + + /// Returns the conditions for the set at the given [`NodeId`]. + /// + /// Panics if it doesn't exist. + #[track_caller] + pub fn set_conditions_at(&self, id: NodeId) -> &[BoxedCondition] { + self.get_set_conditions_at(id) + .ok_or_else(|| format!("set with id {id:?} does not exist in this Schedule")) + .unwrap() + } + /// Returns an iterator over all systems in this schedule, along with the conditions for each system. pub fn systems(&self) -> impl Iterator { self.systems @@ -736,9 +819,9 @@ impl ScheduleGraph { &self.conflicting_systems } - fn process_config( + fn process_config( &mut self, - config: NodeConfig, + config: ScheduleConfig, collect_nodes: bool, ) -> ProcessConfigsResult { ProcessConfigsResult { @@ -750,9 +833,11 @@ impl ScheduleGraph { } } - fn apply_collective_conditions( + fn apply_collective_conditions< + T: ProcessScheduleConfig + Schedulable, + >( &mut self, - configs: &mut [NodeConfigs], + configs: &mut [ScheduleConfigs], collective_conditions: Vec, ) { if !collective_conditions.is_empty() { @@ -765,7 +850,7 @@ impl ScheduleGraph { for config in configs.iter_mut() { config.in_set_inner(set.intern()); } - let mut set_config = SystemSetConfig::new(set.intern()); + let mut set_config = InternedSystemSet::into_config(set.intern()); set_config.conditions.extend(collective_conditions); self.configure_set_inner(set_config).unwrap(); } @@ -778,30 +863,31 @@ impl ScheduleGraph { /// `process_config` is the function which processes each individual config node and returns a corresponding `NodeId`. /// /// The fields on the returned [`ProcessConfigsResult`] are: - /// - `nodes`: a vector of all node ids contained in the nested `NodeConfigs` + /// - `nodes`: a vector of all node ids contained in the nested `ScheduleConfigs` /// - `densely_chained`: a boolean that is true if all nested nodes are linearly chained (with successive `after` orderings) in the order they are defined #[track_caller] - fn process_configs( + fn process_configs< + T: ProcessScheduleConfig + Schedulable, + >( &mut self, - configs: NodeConfigs, + configs: ScheduleConfigs, collect_nodes: bool, ) -> ProcessConfigsResult { match configs { - NodeConfigs::NodeConfig(config) => self.process_config(config, collect_nodes), - NodeConfigs::Configs { + ScheduleConfigs::ScheduleConfig(config) => self.process_config(config, collect_nodes), + ScheduleConfigs::Configs { + metadata, mut configs, collective_conditions, - chained, } => { self.apply_collective_conditions(&mut configs, collective_conditions); - let ignore_deferred = matches!(chained, Chain::YesIgnoreDeferred); - let chained = matches!(chained, Chain::Yes | Chain::YesIgnoreDeferred); + let is_chained = matches!(metadata, Chain::Chained(_)); // Densely chained if // * chained and all configs in the chain are densely chained, or // * unchained with a single densely chained config - let mut densely_chained = chained || configs.len() == 1; + let mut densely_chained = is_chained || configs.len() == 1; let mut configs = configs.into_iter(); let mut nodes = Vec::new(); @@ -811,14 +897,14 @@ impl ScheduleGraph { densely_chained, }; }; - let mut previous_result = self.process_configs(first, collect_nodes || chained); + let mut previous_result = self.process_configs(first, collect_nodes || is_chained); densely_chained &= previous_result.densely_chained; for current in configs { - let current_result = self.process_configs(current, collect_nodes || chained); + let current_result = self.process_configs(current, collect_nodes || is_chained); densely_chained &= current_result.densely_chained; - if chained { + if let Chain::Chained(chain_options) = &metadata { // if the current result is densely chained, we only need to chain the first node let current_nodes = if current_result.densely_chained { ¤t_result.nodes[..1] @@ -838,8 +924,12 @@ impl ScheduleGraph { .graph .add_edge(*previous_node, *current_node); - if ignore_deferred { - self.no_sync_edges.insert((*previous_node, *current_node)); + for pass in self.passes.values_mut() { + pass.add_dependency( + *previous_node, + *current_node, + chain_options, + ); } } } @@ -862,12 +952,15 @@ impl ScheduleGraph { } } - /// Add a [`SystemConfig`] to the graph, including its dependencies and conditions. - fn add_system_inner(&mut self, config: SystemConfig) -> Result { + /// Add a [`ScheduleConfig`] to the graph, including its dependencies and conditions. + fn add_system_inner( + &mut self, + config: ScheduleConfig, + ) -> Result { let id = NodeId::System(self.systems.len()); // graph updates are immediate - self.update_graphs(id, config.graph_info)?; + self.update_graphs(id, config.metadata)?; // system init has to be deferred (need `&mut World`) self.uninit.push((id, 0)); @@ -878,15 +971,18 @@ impl ScheduleGraph { } #[track_caller] - fn configure_sets(&mut self, sets: impl IntoSystemSetConfigs) { + fn configure_sets(&mut self, sets: impl IntoScheduleConfigs) { self.process_configs(sets.into_configs(), false); } - /// Add a single `SystemSetConfig` to the graph, including its dependencies and conditions. - fn configure_set_inner(&mut self, set: SystemSetConfig) -> Result { - let SystemSetConfig { + /// Add a single `ScheduleConfig` to the graph, including its dependencies and conditions. + fn configure_set_inner( + &mut self, + set: ScheduleConfig, + ) -> Result { + let ScheduleConfig { node: set, - graph_info, + metadata, mut conditions, } = set; @@ -896,7 +992,7 @@ impl ScheduleGraph { }; // graph updates are immediate - self.update_graphs(id, graph_info)?; + self.update_graphs(id, metadata)?; // system init has to be deferred (need `&mut World`) let system_set_conditions = &mut self.system_set_conditions[id.index()]; @@ -962,7 +1058,7 @@ impl ScheduleGraph { id: &NodeId, graph_info: &GraphInfo, ) -> Result<(), ScheduleBuildError> { - for Dependency { kind: _, set } in &graph_info.dependencies { + for Dependency { set, .. } in &graph_info.dependencies { match self.system_set_ids.get(set) { Some(set_id) => { if id == set_id { @@ -1013,23 +1109,18 @@ impl ScheduleGraph { self.dependency.graph.add_node(set); } - for (kind, set) in dependencies + for (kind, set, options) in dependencies .into_iter() - .map(|Dependency { kind, set }| (kind, self.system_set_ids[&set])) + .map(|Dependency { kind, set, options }| (kind, self.system_set_ids[&set], options)) { let (lhs, rhs) = match kind { DependencyKind::Before => (id, set), - DependencyKind::BeforeNoSync => { - self.no_sync_edges.insert((id, set)); - (id, set) - } DependencyKind::After => (set, id), - DependencyKind::AfterNoSync => { - self.no_sync_edges.insert((set, id)); - (set, id) - } }; self.dependency.graph.add_edge(lhs, rhs); + for pass in self.passes.values_mut() { + pass.add_dependency(lhs, rhs, &options); + } // ensure set also appears in hierarchy graph self.hierarchy.graph.add_node(set); @@ -1053,7 +1144,7 @@ impl ScheduleGraph { Ok(()) } - /// Initializes any newly-added systems and conditions by calling [`System::initialize`] + /// Initializes any newly-added systems and conditions by calling [`System::initialize`](crate::system::System) pub fn initialize(&mut self, world: &mut World) { for (id, i) in self.uninit.drain(..) { match id { @@ -1079,7 +1170,7 @@ impl ScheduleGraph { /// - checks for system access conflicts and reports ambiguities pub fn build_schedule( &mut self, - components: &Components, + world: &mut World, schedule_label: InternedScheduleLabel, ignored_ambiguities: &BTreeSet, ) -> Result { @@ -1112,10 +1203,12 @@ impl ScheduleGraph { let mut dependency_flattened = self.get_dependency_flattened(&set_systems); - // modify graph with auto sync points - if self.settings.auto_insert_apply_deferred { - dependency_flattened = self.auto_insert_apply_deferred(&mut dependency_flattened)?; + // modify graph with build passes + let mut passes = core::mem::take(&mut self.passes); + for pass in passes.values_mut() { + pass.build(world, self, &mut dependency_flattened)?; } + self.passes = passes; // topsort let mut dependency_flattened_dag = Dag { @@ -1140,92 +1233,13 @@ impl ScheduleGraph { &ambiguous_with_flattened, ignored_ambiguities, ); - self.optionally_check_conflicts(&conflicting_systems, components, schedule_label)?; + self.optionally_check_conflicts(&conflicting_systems, world.components(), schedule_label)?; self.conflicting_systems = conflicting_systems; // build the schedule Ok(self.build_schedule_inner(dependency_flattened_dag, hier_results.reachable)) } - // modify the graph to have sync nodes for any dependents after a system with deferred system params - fn auto_insert_apply_deferred( - &mut self, - dependency_flattened: &mut DiGraph, - ) -> Result { - let mut sync_point_graph = dependency_flattened.clone(); - let topo = self.topsort_graph(dependency_flattened, ReportCycles::Dependency)?; - - // calculate the number of sync points each sync point is from the beginning of the graph - // use the same sync point if the distance is the same - let mut distances: HashMap> = - HashMap::with_capacity_and_hasher(topo.len(), Default::default()); - for node in &topo { - let add_sync_after = self.systems[node.index()].get().unwrap().has_deferred(); - - for target in dependency_flattened.neighbors_directed(*node, Outgoing) { - let add_sync_on_edge = add_sync_after - && !is_apply_deferred(self.systems[target.index()].get().unwrap()) - && !self.no_sync_edges.contains(&(*node, target)); - - let weight = if add_sync_on_edge { 1 } else { 0 }; - - let distance = distances - .get(&target.index()) - .unwrap_or(&None) - .or(Some(0)) - .map(|distance| { - distance.max( - distances.get(&node.index()).unwrap_or(&None).unwrap_or(0) + weight, - ) - }); - - distances.insert(target.index(), distance); - - if add_sync_on_edge { - let sync_point = self.get_sync_point(distances[&target.index()].unwrap()); - sync_point_graph.add_edge(*node, sync_point); - sync_point_graph.add_edge(sync_point, target); - - // edge is now redundant - sync_point_graph.remove_edge(*node, target); - } - } - } - - Ok(sync_point_graph) - } - - /// add an [`ApplyDeferred`] system with no config - fn add_auto_sync(&mut self) -> NodeId { - let id = NodeId::System(self.systems.len()); - - self.systems - .push(SystemNode::new(ScheduleSystem::Infallible(Box::new( - IntoSystem::into_system(ApplyDeferred), - )))); - self.system_conditions.push(Vec::new()); - - // ignore ambiguities with auto sync points - // They aren't under user control, so no one should know or care. - self.ambiguous_with_all.insert(id); - - id - } - - /// Returns the `NodeId` of the cached auto sync point. Will create - /// a new one if needed. - fn get_sync_point(&mut self, distance: u32) -> NodeId { - self.auto_sync_node_ids - .get(&distance) - .copied() - .or_else(|| { - let node_id = self.add_auto_sync(); - self.auto_sync_node_ids.insert(distance, node_id); - Some(node_id) - }) - .unwrap() - } - /// Return a map from system set `NodeId` to a list of system `NodeId`s that are included in the set. /// Also return a map from system set `NodeId` to a `FixedBitSet` of system `NodeId`s that are included in the set, /// where the bitset order is the same as `self.systems` @@ -1273,34 +1287,25 @@ impl ScheduleGraph { let mut dependency_flattened = self.dependency.graph.clone(); let mut temp = Vec::new(); for (&set, systems) in set_systems { + for pass in self.passes.values_mut() { + pass.collapse_set(set, systems, &dependency_flattened, &mut temp); + } if systems.is_empty() { // collapse dependencies for empty sets for a in dependency_flattened.neighbors_directed(set, Incoming) { for b in dependency_flattened.neighbors_directed(set, Outgoing) { - if self.no_sync_edges.contains(&(a, set)) - && self.no_sync_edges.contains(&(set, b)) - { - self.no_sync_edges.insert((a, b)); - } - temp.push((a, b)); } } } else { for a in dependency_flattened.neighbors_directed(set, Incoming) { for &sys in systems { - if self.no_sync_edges.contains(&(a, set)) { - self.no_sync_edges.insert((a, sys)); - } temp.push((a, sys)); } } for b in dependency_flattened.neighbors_directed(set, Outgoing) { for &sys in systems { - if self.no_sync_edges.contains(&(set, b)) { - self.no_sync_edges.insert((sys, b)); - } temp.push((sys, b)); } } @@ -1495,8 +1500,8 @@ impl ScheduleGraph { /// Updates the `SystemSchedule` from the `ScheduleGraph`. fn update_schedule( &mut self, + world: &mut World, schedule: &mut SystemSchedule, - components: &Components, ignored_ambiguities: &BTreeSet, schedule_label: InternedScheduleLabel, ) -> Result<(), ScheduleBuildError> { @@ -1523,7 +1528,7 @@ impl ScheduleGraph { self.system_set_conditions[id.index()] = conditions; } - *schedule = self.build_schedule(components, schedule_label, ignored_ambiguities)?; + *schedule = self.build_schedule(world, schedule_label, ignored_ambiguities)?; // move systems into new schedule for &id in &schedule.system_ids { @@ -1544,7 +1549,7 @@ impl ScheduleGraph { /// Values returned by [`ScheduleGraph::process_configs`] struct ProcessConfigsResult { - /// All nodes contained inside this `process_configs` call's [`NodeConfigs`] hierarchy, + /// All nodes contained inside this `process_configs` call's [`ScheduleConfigs`] hierarchy, /// if `ancestor_chained` is true nodes: Vec, /// True if and only if all nodes are "densely chained", meaning that all nested nodes @@ -1553,27 +1558,29 @@ struct ProcessConfigsResult { densely_chained: bool, } -/// Trait used by [`ScheduleGraph::process_configs`] to process a single [`NodeConfig`]. -trait ProcessNodeConfig: Sized { - /// Process a single [`NodeConfig`]. - fn process_config(schedule_graph: &mut ScheduleGraph, config: NodeConfig) -> NodeId; +/// Trait used by [`ScheduleGraph::process_configs`] to process a single [`ScheduleConfig`]. +trait ProcessScheduleConfig: Schedulable + Sized { + /// Process a single [`ScheduleConfig`]. + fn process_config(schedule_graph: &mut ScheduleGraph, config: ScheduleConfig) -> NodeId; } -impl ProcessNodeConfig for ScheduleSystem { - fn process_config(schedule_graph: &mut ScheduleGraph, config: NodeConfig) -> NodeId { +impl ProcessScheduleConfig for ScheduleSystem { + fn process_config(schedule_graph: &mut ScheduleGraph, config: ScheduleConfig) -> NodeId { schedule_graph.add_system_inner(config).unwrap() } } -impl ProcessNodeConfig for InternedSystemSet { - fn process_config(schedule_graph: &mut ScheduleGraph, config: NodeConfig) -> NodeId { +impl ProcessScheduleConfig for InternedSystemSet { + fn process_config(schedule_graph: &mut ScheduleGraph, config: ScheduleConfig) -> NodeId { schedule_graph.configure_set_inner(config).unwrap() } } /// Used to select the appropriate reporting function. -enum ReportCycles { +pub enum ReportCycles { + /// When sets contain themselves Hierarchy, + /// When the graph is no longer a DAG Dependency, } @@ -1690,7 +1697,7 @@ impl ScheduleGraph { /// # Errors /// /// If the graph contain cycles, then an error is returned. - fn topsort_graph( + pub fn topsort_graph( &self, graph: &DiGraph, report: ReportCycles, @@ -1896,7 +1903,7 @@ impl ScheduleGraph { &'a self, ambiguities: &'a [(NodeId, NodeId, Vec)], components: &'a Components, - ) -> impl Iterator)> + 'a { + ) -> impl Iterator>)> + 'a { ambiguities .iter() .map(move |(system_a, system_b, conflicts)| { @@ -2053,11 +2060,9 @@ mod tests { use bevy_ecs_macros::ScheduleLabel; use crate::{ - self as bevy_ecs, - prelude::{Res, Resource}, + prelude::{ApplyDeferred, Res, Resource}, schedule::{ - tests::ResMut, IntoSystemConfigs, IntoSystemSetConfigs, Schedule, - ScheduleBuildSettings, SystemSet, + tests::ResMut, IntoScheduleConfigs, Schedule, ScheduleBuildSettings, SystemSet, }, system::Commands, world::World, @@ -2080,12 +2085,12 @@ mod tests { let mut world = World::new(); let mut schedule = Schedule::default(); + let system: fn() = || { + panic!("This system must not run"); + }; + schedule.configure_sets(Set.run_if(|| false)); - schedule.add_systems( - (|| panic!("This system must not run")) - .ambiguous_with(|| ()) - .in_set(Set), - ); + schedule.add_systems(system.ambiguous_with(|| ()).in_set(Set)); schedule.run(&mut world); } @@ -2106,6 +2111,108 @@ mod tests { assert_eq!(schedule.executable.systems.len(), 3); } + #[test] + fn explicit_sync_point_used_as_auto_sync_point() { + let mut schedule = Schedule::default(); + let mut world = World::default(); + schedule.add_systems( + ( + |mut commands: Commands| commands.insert_resource(Resource1), + |_: Res| {}, + ) + .chain(), + ); + schedule.add_systems((|| {}, ApplyDeferred, || {}).chain()); + schedule.run(&mut world); + + // No sync point was inserted, since we can reuse the explicit sync point. + assert_eq!(schedule.executable.systems.len(), 5); + } + + #[test] + fn conditional_explicit_sync_point_not_used_as_auto_sync_point() { + let mut schedule = Schedule::default(); + let mut world = World::default(); + schedule.add_systems( + ( + |mut commands: Commands| commands.insert_resource(Resource1), + |_: Res| {}, + ) + .chain(), + ); + schedule.add_systems((|| {}, ApplyDeferred.run_if(|| false), || {}).chain()); + schedule.run(&mut world); + + // A sync point was inserted, since the explicit sync point is not always run. + assert_eq!(schedule.executable.systems.len(), 6); + } + + #[test] + fn conditional_explicit_sync_point_not_used_as_auto_sync_point_condition_on_chain() { + let mut schedule = Schedule::default(); + let mut world = World::default(); + schedule.add_systems( + ( + |mut commands: Commands| commands.insert_resource(Resource1), + |_: Res| {}, + ) + .chain(), + ); + schedule.add_systems((|| {}, ApplyDeferred, || {}).chain().run_if(|| false)); + schedule.run(&mut world); + + // A sync point was inserted, since the explicit sync point is not always run. + assert_eq!(schedule.executable.systems.len(), 6); + } + + #[test] + fn conditional_explicit_sync_point_not_used_as_auto_sync_point_condition_on_system_set() { + #[derive(SystemSet, Debug, Clone, PartialEq, Eq, Hash)] + struct Set; + + let mut schedule = Schedule::default(); + let mut world = World::default(); + schedule.configure_sets(Set.run_if(|| false)); + schedule.add_systems( + ( + |mut commands: Commands| commands.insert_resource(Resource1), + |_: Res| {}, + ) + .chain(), + ); + schedule.add_systems((|| {}, ApplyDeferred.in_set(Set), || {}).chain()); + schedule.run(&mut world); + + // A sync point was inserted, since the explicit sync point is not always run. + assert_eq!(schedule.executable.systems.len(), 6); + } + + #[test] + fn conditional_explicit_sync_point_not_used_as_auto_sync_point_condition_on_nested_system_set() + { + #[derive(SystemSet, Debug, Clone, PartialEq, Eq, Hash)] + struct Set1; + #[derive(SystemSet, Debug, Clone, PartialEq, Eq, Hash)] + struct Set2; + + let mut schedule = Schedule::default(); + let mut world = World::default(); + schedule.configure_sets(Set2.run_if(|| false)); + schedule.configure_sets(Set1.in_set(Set2)); + schedule.add_systems( + ( + |mut commands: Commands| commands.insert_resource(Resource1), + |_: Res| {}, + ) + .chain(), + ); + schedule.add_systems((|| {}, ApplyDeferred, || {}).chain().in_set(Set1)); + schedule.run(&mut world); + + // A sync point was inserted, since the explicit sync point is not always run. + assert_eq!(schedule.executable.systems.len(), 6); + } + #[test] fn merges_sync_points_into_one() { let mut schedule = Schedule::default(); @@ -2158,6 +2265,63 @@ mod tests { assert_eq!(schedule.executable.systems.len(), 5); } + #[test] + fn do_not_consider_ignore_deferred_before_exclusive_system() { + let mut schedule = Schedule::default(); + let mut world = World::default(); + // chain_ignore_deferred adds no sync points usually but an exception is made for exclusive systems + schedule.add_systems( + ( + |_: Commands| {}, + // <- no sync point is added here because the following system is not exclusive + |mut commands: Commands| commands.insert_resource(Resource1), + // <- sync point is added here because the following system is exclusive which expects to see all commands to that point + |world: &mut World| assert!(world.contains_resource::()), + // <- no sync point is added here because the previous system has no deferred parameters + |_: &mut World| {}, + // <- no sync point is added here because the following system is not exclusive + |_: Commands| {}, + ) + .chain_ignore_deferred(), + ); + schedule.run(&mut world); + + assert_eq!(schedule.executable.systems.len(), 6); // 5 systems + 1 sync point + } + + #[test] + fn bubble_sync_point_through_ignore_deferred_node() { + let mut schedule = Schedule::default(); + let mut world = World::default(); + + let insert_resource_config = ( + // the first system has deferred commands + |mut commands: Commands| commands.insert_resource(Resource1), + // the second system has no deferred commands + || {}, + ) + // the first two systems are chained without a sync point in between + .chain_ignore_deferred(); + + schedule.add_systems( + ( + insert_resource_config, + // the third system would panic if the command of the first system was not applied + |_: Res| {}, + ) + // the third system is chained after the first two, possibly with a sync point in between + .chain(), + ); + + // To add a sync point between the second and third system despite the second having no commands, + // the first system has to signal the second system that there are unapplied commands. + // With that the second system will add a sync point after it so the third system will find the resource. + + schedule.run(&mut world); + + assert_eq!(schedule.executable.systems.len(), 4); // 3 systems + 1 sync point + } + #[test] fn disable_auto_sync_points() { let mut schedule = Schedule::default(); diff --git a/crates/bevy_ecs/src/schedule/set.rs b/crates/bevy_ecs/src/schedule/set.rs index cbe1011e782a2..896c7ed050ecc 100644 --- a/crates/bevy_ecs/src/schedule/set.rs +++ b/crates/bevy_ecs/src/schedule/set.rs @@ -20,12 +20,18 @@ use crate::{ define_label!( /// A strongly-typed class of labels used to identify a [`Schedule`](crate::schedule::Schedule). + #[diagnostic::on_unimplemented( + note = "consider annotating `{Self}` with `#[derive(ScheduleLabel)]`" + )] ScheduleLabel, SCHEDULE_LABEL_INTERNER ); define_label!( /// Types that identify logical groups of systems. + #[diagnostic::on_unimplemented( + note = "consider annotating `{Self}` with `#[derive(SystemSet)]`" + )] SystemSet, SYSTEM_SET_INTERNER, extra_methods: { @@ -152,6 +158,12 @@ impl SystemSet for AnonymousSet { } /// Types that can be converted into a [`SystemSet`]. +/// +/// # Usage notes +/// +/// This trait should only be used as a bound for trait implementations or as an +/// argument to a function. If a system set needs to be returned from a function +/// or stored somewhere, use [`SystemSet`] instead of this trait. #[diagnostic::on_unimplemented( message = "`{Self}` is not a system set", label = "invalid system set" @@ -205,15 +217,15 @@ where #[cfg(test)] mod tests { use crate::{ + resource::Resource, schedule::{tests::ResMut, Schedule}, - system::Resource, }; use super::*; #[test] fn test_schedule_label() { - use crate::{self as bevy_ecs, world::World}; + use crate::world::World; #[derive(Resource)] struct Flag(bool); @@ -245,8 +257,6 @@ mod tests { #[test] fn test_derive_schedule_label() { - use crate::{self as bevy_ecs}; - #[derive(ScheduleLabel, Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] struct UnitLabel; @@ -347,8 +357,6 @@ mod tests { #[test] fn test_derive_system_set() { - use crate::{self as bevy_ecs}; - #[derive(SystemSet, Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] struct UnitSet; diff --git a/crates/bevy_ecs/src/schedule/stepping.rs b/crates/bevy_ecs/src/schedule/stepping.rs index ed796c29e9e4e..b5df8555e2115 100644 --- a/crates/bevy_ecs/src/schedule/stepping.rs +++ b/crates/bevy_ecs/src/schedule/stepping.rs @@ -1,9 +1,11 @@ use crate::{ + resource::Resource, schedule::{InternedScheduleLabel, NodeId, Schedule, ScheduleLabel}, - system::{IntoSystem, ResMut, Resource, System}, + system::{IntoSystem, ResMut}, }; use alloc::vec::Vec; -use bevy_utils::{HashMap, TypeIdMap}; +use bevy_platform::collections::HashMap; +use bevy_utils::TypeIdMap; use core::any::TypeId; use fixedbitset::FixedBitSet; use log::{info, warn}; @@ -15,8 +17,6 @@ use log::error; #[cfg(test)] use log::debug; -use crate as bevy_ecs; - #[derive(Debug, Default, PartialEq, Eq, Copy, Clone)] enum Action { /// Stepping is disabled; run all systems @@ -168,14 +168,8 @@ impl Stepping { if self.action == Action::RunAll { return None; } - let label = match self.schedule_order.get(self.cursor.schedule) { - None => return None, - Some(label) => label, - }; - let state = match self.schedule_states.get(label) { - None => return None, - Some(state) => state, - }; + let label = self.schedule_order.get(self.cursor.schedule)?; + let state = self.schedule_states.get(label)?; state .node_ids .get(self.cursor.system) @@ -420,7 +414,10 @@ impl Stepping { // transitions, and add debugging messages for permitted // transitions. Any action transition that falls through // this match block will be performed. - #[expect(clippy::match_same_arms)] + #[expect( + clippy::match_same_arms, + reason = "Readability would be negatively impacted by combining the `(Waiting, RunAll)` and `(Continue, RunAll)` match arms." + )] match (self.action, action) { // ignore non-transition updates, and prevent a call to // enable() from overwriting a step or continue call @@ -826,11 +823,12 @@ impl ScheduleState { } #[cfg(all(test, feature = "bevy_debug_stepping"))] +#[expect(clippy::print_stdout, reason = "Allowed in tests.")] mod tests { use super::*; use crate::{prelude::*, schedule::ScheduleLabel}; - - pub use crate as bevy_ecs; + use alloc::{format, vec}; + use std::println; #[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] struct TestSchedule; @@ -1351,7 +1349,9 @@ mod tests { // // first system will be configured as `run_if(|| false)`, so it can // just panic if called - let first_system = move || panic!("first_system should not be run"); + let first_system: fn() = move || { + panic!("first_system should not be run"); + }; // The second system, we need to know when it has been called, so we'll // add a resource for tracking if it has been run. The system will diff --git a/crates/bevy_ecs/src/spawn.rs b/crates/bevy_ecs/src/spawn.rs new file mode 100644 index 0000000000000..5235889ffba20 --- /dev/null +++ b/crates/bevy_ecs/src/spawn.rs @@ -0,0 +1,361 @@ +//! Entity spawning abstractions, largely focused on spawning related hierarchies of entities. See [`related`](crate::related) and [`SpawnRelated`] +//! for the best entry points into these APIs and examples of how to use them. + +use crate::{ + bundle::{Bundle, BundleEffect, DynamicBundle, NoBundleEffect}, + entity::Entity, + relationship::{RelatedSpawner, Relationship, RelationshipTarget}, + world::{EntityWorldMut, World}, +}; +use alloc::vec::Vec; +use core::marker::PhantomData; +use variadics_please::all_tuples; + +/// A wrapper over a [`Bundle`] indicating that an entity should be spawned with that [`Bundle`]. +/// This is intended to be used for hierarchical spawning via traits like [`SpawnableList`] and [`SpawnRelated`]. +/// +/// Also see the [`children`](crate::children) and [`related`](crate::related) macros that abstract over the [`Spawn`] API. +/// +/// ``` +/// # use bevy_ecs::hierarchy::Children; +/// # use bevy_ecs::spawn::{Spawn, SpawnRelated}; +/// # use bevy_ecs::name::Name; +/// # use bevy_ecs::world::World; +/// let mut world = World::new(); +/// world.spawn(( +/// Name::new("Root"), +/// Children::spawn(( +/// Spawn(Name::new("Child1")), +/// Spawn(( +/// Name::new("Child2"), +/// Children::spawn(Spawn(Name::new("Grandchild"))), +/// )) +/// )), +/// )); +/// ``` +pub struct Spawn(pub B); + +/// A spawn-able list of changes to a given [`World`] and relative to a given [`Entity`]. This is generally used +/// for spawning "related" entities, such as children. +pub trait SpawnableList { + /// Spawn this list of changes in a given [`World`] and relative to a given [`Entity`]. This is generally used + /// for spawning "related" entities, such as children. + fn spawn(self, world: &mut World, entity: Entity); + /// Returns a size hint, which is used to reserve space for this list in a [`RelationshipTarget`]. This should be + /// less than or equal to the actual size of the list. When in doubt, just use 0. + fn size_hint(&self) -> usize; +} + +impl> SpawnableList for Vec { + fn spawn(self, world: &mut World, entity: Entity) { + let mapped_bundles = self.into_iter().map(|b| (R::from(entity), b)); + world.spawn_batch(mapped_bundles); + } + + fn size_hint(&self) -> usize { + self.len() + } +} + +impl SpawnableList for Spawn { + fn spawn(self, world: &mut World, entity: Entity) { + world.spawn((R::from(entity), self.0)); + } + + fn size_hint(&self) -> usize { + 1 + } +} + +/// A [`SpawnableList`] that spawns entities using an iterator of a given [`Bundle`]: +/// +/// ``` +/// # use bevy_ecs::hierarchy::Children; +/// # use bevy_ecs::spawn::{Spawn, SpawnIter, SpawnRelated}; +/// # use bevy_ecs::name::Name; +/// # use bevy_ecs::world::World; +/// let mut world = World::new(); +/// world.spawn(( +/// Name::new("Root"), +/// Children::spawn(( +/// Spawn(Name::new("Child1")), +/// SpawnIter(["Child2", "Child3"].into_iter().map(Name::new)), +/// )), +/// )); +/// ``` +pub struct SpawnIter(pub I); + +impl + Send + Sync + 'static, B: Bundle> SpawnableList + for SpawnIter +{ + fn spawn(self, world: &mut World, entity: Entity) { + for bundle in self.0 { + world.spawn((R::from(entity), bundle)); + } + } + + fn size_hint(&self) -> usize { + self.0.size_hint().0 + } +} + +/// A [`SpawnableList`] that spawns entities using a [`FnOnce`] with a [`RelatedSpawner`] as an argument: +/// +/// ``` +/// # use bevy_ecs::hierarchy::{Children, ChildOf}; +/// # use bevy_ecs::spawn::{Spawn, SpawnWith, SpawnRelated}; +/// # use bevy_ecs::name::Name; +/// # use bevy_ecs::relationship::RelatedSpawner; +/// # use bevy_ecs::world::World; +/// let mut world = World::new(); +/// world.spawn(( +/// Name::new("Root"), +/// Children::spawn(( +/// Spawn(Name::new("Child1")), +/// SpawnWith(|parent: &mut RelatedSpawner| { +/// parent.spawn(Name::new("Child2")); +/// parent.spawn(Name::new("Child3")); +/// }), +/// )), +/// )); +/// ``` +pub struct SpawnWith(pub F); + +impl) + Send + Sync + 'static> SpawnableList + for SpawnWith +{ + fn spawn(self, world: &mut World, entity: Entity) { + world.entity_mut(entity).with_related_entities(self.0); + } + + fn size_hint(&self) -> usize { + 1 + } +} + +macro_rules! spawnable_list_impl { + ($($list: ident),*) => { + #[expect( + clippy::allow_attributes, + reason = "This is a tuple-related macro; as such, the lints below may not always apply." + )] + impl),*> SpawnableList for ($($list,)*) { + fn spawn(self, _world: &mut World, _entity: Entity) { + #[allow( + non_snake_case, + reason = "The names of these variables are provided by the caller, not by us." + )] + let ($($list,)*) = self; + $($list.spawn(_world, _entity);)* + } + + fn size_hint(&self) -> usize { + #[allow( + non_snake_case, + reason = "The names of these variables are provided by the caller, not by us." + )] + let ($($list,)*) = self; + 0 $(+ $list.size_hint())* + } + } + } +} + +all_tuples!(spawnable_list_impl, 0, 12, P); + +/// A [`Bundle`] that: +/// 1. Contains a [`RelationshipTarget`] component (associated with the given [`Relationship`]). This reserves space for the [`SpawnableList`]. +/// 2. Spawns a [`SpawnableList`] of related entities with a given [`Relationship`]. +/// +/// This is intended to be created using [`SpawnRelated`]. +pub struct SpawnRelatedBundle> { + list: L, + marker: PhantomData, +} + +impl> BundleEffect for SpawnRelatedBundle { + fn apply(self, entity: &mut EntityWorldMut) { + let id = entity.id(); + entity.world_scope(|world: &mut World| { + self.list.spawn(world, id); + }); + } +} + +// SAFETY: This internally relies on the RelationshipTarget's Bundle implementation, which is sound. +unsafe impl + Send + Sync + 'static> Bundle + for SpawnRelatedBundle +{ + fn component_ids( + components: &mut crate::component::ComponentsRegistrator, + ids: &mut impl FnMut(crate::component::ComponentId), + ) { + ::component_ids(components, ids); + } + + fn get_component_ids( + components: &crate::component::Components, + ids: &mut impl FnMut(Option), + ) { + ::get_component_ids(components, ids); + } + + fn register_required_components( + components: &mut crate::component::ComponentsRegistrator, + required_components: &mut crate::component::RequiredComponents, + ) { + ::register_required_components( + components, + required_components, + ); + } +} +impl> DynamicBundle for SpawnRelatedBundle { + type Effect = Self; + + fn get_components( + self, + func: &mut impl FnMut(crate::component::StorageType, bevy_ptr::OwningPtr<'_>), + ) -> Self::Effect { + ::with_capacity(self.list.size_hint()) + .get_components(func); + self + } +} + +/// A [`Bundle`] that: +/// 1. Contains a [`RelationshipTarget`] component (associated with the given [`Relationship`]). This reserves space for a single entity. +/// 2. Spawns a single related entity containing the given `B` [`Bundle`] and the given [`Relationship`]. +/// +/// This is intended to be created using [`SpawnRelated`]. +pub struct SpawnOneRelated { + bundle: B, + marker: PhantomData, +} + +impl BundleEffect for SpawnOneRelated { + fn apply(self, entity: &mut EntityWorldMut) { + entity.with_related::(self.bundle); + } +} + +impl DynamicBundle for SpawnOneRelated { + type Effect = Self; + + fn get_components( + self, + func: &mut impl FnMut(crate::component::StorageType, bevy_ptr::OwningPtr<'_>), + ) -> Self::Effect { + ::with_capacity(1).get_components(func); + self + } +} + +// SAFETY: This internally relies on the RelationshipTarget's Bundle implementation, which is sound. +unsafe impl Bundle for SpawnOneRelated { + fn component_ids( + components: &mut crate::component::ComponentsRegistrator, + ids: &mut impl FnMut(crate::component::ComponentId), + ) { + ::component_ids(components, ids); + } + + fn get_component_ids( + components: &crate::component::Components, + ids: &mut impl FnMut(Option), + ) { + ::get_component_ids(components, ids); + } + + fn register_required_components( + components: &mut crate::component::ComponentsRegistrator, + required_components: &mut crate::component::RequiredComponents, + ) { + ::register_required_components( + components, + required_components, + ); + } +} + +/// [`RelationshipTarget`] methods that create a [`Bundle`] with a [`DynamicBundle::Effect`] that: +/// +/// 1. Contains the [`RelationshipTarget`] component, pre-allocated with the necessary space for spawned entities. +/// 2. Spawns an entity (or a list of entities) that relate to the entity the [`Bundle`] is added to via the [`RelationshipTarget::Relationship`]. +pub trait SpawnRelated: RelationshipTarget { + /// Returns a [`Bundle`] containing this [`RelationshipTarget`] component. It also spawns a [`SpawnableList`] of entities, each related to the bundle's entity + /// via [`RelationshipTarget::Relationship`]. The [`RelationshipTarget`] (when possible) will pre-allocate space for the related entities. + /// + /// See [`Spawn`], [`SpawnIter`], and [`SpawnWith`] for usage examples. + fn spawn>( + list: L, + ) -> SpawnRelatedBundle; + + /// Returns a [`Bundle`] containing this [`RelationshipTarget`] component. It also spawns a single entity containing [`Bundle`] that is related to the bundle's entity + /// via [`RelationshipTarget::Relationship`]. + /// + /// ``` + /// # use bevy_ecs::hierarchy::Children; + /// # use bevy_ecs::spawn::SpawnRelated; + /// # use bevy_ecs::name::Name; + /// # use bevy_ecs::world::World; + /// let mut world = World::new(); + /// world.spawn(( + /// Name::new("Root"), + /// Children::spawn_one(Name::new("Child")), + /// )); + /// ``` + fn spawn_one(bundle: B) -> SpawnOneRelated; +} + +impl SpawnRelated for T { + fn spawn>( + list: L, + ) -> SpawnRelatedBundle { + SpawnRelatedBundle { + list, + marker: PhantomData, + } + } + + fn spawn_one(bundle: B) -> SpawnOneRelated { + SpawnOneRelated { + bundle, + marker: PhantomData, + } + } +} + +/// Returns a [`SpawnRelatedBundle`] that will insert the given [`RelationshipTarget`], spawn a [`SpawnableList`] of entities with given bundles that +/// relate to the [`RelationshipTarget`] entity via the [`RelationshipTarget::Relationship`] component, and reserve space in the [`RelationshipTarget`] for each spawned entity. +/// +/// The first argument is the [`RelationshipTarget`] type. Any additional arguments will be interpreted as bundles to be spawned. +/// +/// Also see [`children`](crate::children) for a [`Children`](crate::hierarchy::Children)-specific equivalent. +/// +/// ``` +/// # use bevy_ecs::hierarchy::Children; +/// # use bevy_ecs::name::Name; +/// # use bevy_ecs::world::World; +/// # use bevy_ecs::related; +/// # use bevy_ecs::spawn::{Spawn, SpawnRelated}; +/// let mut world = World::new(); +/// world.spawn(( +/// Name::new("Root"), +/// related!(Children[ +/// Name::new("Child1"), +/// ( +/// Name::new("Child2"), +/// related!(Children[ +/// Name::new("Grandchild"), +/// ]) +/// ) +/// ]) +/// )); +/// ``` +#[macro_export] +macro_rules! related { + ($relationship_target:ty [$($child:expr),*$(,)?]) => { + <$relationship_target>::spawn(($($crate::spawn::Spawn($child)),*)) + }; +} diff --git a/crates/bevy_ecs/src/storage/blob_array.rs b/crates/bevy_ecs/src/storage/blob_array.rs index c508b78c9853c..9b738a763c8ce 100644 --- a/crates/bevy_ecs/src/storage/blob_array.rs +++ b/crates/bevy_ecs/src/storage/blob_array.rs @@ -76,7 +76,9 @@ impl BlobArray { /// /// # Safety /// - The element at index `index` is safe to access. - /// (If the safety requirements of every method that has been used on `Self` have been fulfilled, the caller just needs to ensure that `index` < `len`) + /// (If the safety requirements of every method that has been used on `Self` have been fulfilled, the caller just needs to ensure that `index` < `len`) + /// + /// [`Vec::len`]: alloc::vec::Vec::len #[inline] pub unsafe fn get_unchecked(&self, index: usize) -> Ptr<'_> { #[cfg(debug_assertions)] @@ -97,7 +99,9 @@ impl BlobArray { /// /// # Safety /// - The element with at index `index` is safe to access. - /// (If the safety requirements of every method that has been used on `Self` have been fulfilled, the caller just needs to ensure that `index` < `len`) + /// (If the safety requirements of every method that has been used on `Self` have been fulfilled, the caller just needs to ensure that `index` < `len`) + /// + /// [`Vec::len`]: alloc::vec::Vec::len #[inline] pub unsafe fn get_unchecked_mut(&mut self, index: usize) -> PtrMut<'_> { #[cfg(debug_assertions)] @@ -134,6 +138,8 @@ impl BlobArray { /// # Safety /// - The type `T` must be the type of the items in this [`BlobArray`]. /// - `slice_len` <= `len` + /// + /// [`Vec::len`]: alloc::vec::Vec::len pub unsafe fn get_sub_slice(&self, slice_len: usize) -> &[UnsafeCell] { #[cfg(debug_assertions)] debug_assert!(slice_len <= self.capacity); @@ -150,7 +156,9 @@ impl BlobArray { /// /// # Safety /// - For every element with index `i`, if `i` < `len`: It must be safe to call [`Self::get_unchecked_mut`] with `i`. - /// (If the safety requirements of every method that has been used on `Self` have been fulfilled, the caller just needs to ensure that `len` is correct.) + /// (If the safety requirements of every method that has been used on `Self` have been fulfilled, the caller just needs to ensure that `len` is correct.) + /// + /// [`Vec::clear`]: alloc::vec::Vec::clear pub unsafe fn clear(&mut self, len: usize) { #[cfg(debug_assertions)] debug_assert!(self.capacity >= len); @@ -248,7 +256,7 @@ impl BlobArray { new_capacity: NonZeroUsize, ) { #[cfg(debug_assertions)] - debug_assert_eq!(self.capacity, current_capacity.into()); + debug_assert_eq!(self.capacity, current_capacity.get()); if !self.is_zst() { // SAFETY: `new_capacity` can't overflow usize let new_layout = @@ -281,7 +289,7 @@ impl BlobArray { /// # Safety /// - `index` must be in bounds (`index` < capacity) /// - The [`Layout`] of the value must match the layout of the blobs stored in this array, - /// and it must be safe to use the `drop` function of this [`BlobArray`] to drop `value`. + /// and it must be safe to use the `drop` function of this [`BlobArray`] to drop `value`. /// - `value` must not point to the same value that is being initialized. #[inline] pub unsafe fn initialize_unchecked(&mut self, index: usize, value: OwningPtr<'_>) { @@ -297,7 +305,7 @@ impl BlobArray { /// # Safety /// - Index must be in-bounds (`index` < `len`) /// - `value`'s [`Layout`] must match this [`BlobArray`]'s `item_layout`, - /// and it must be safe to use the `drop` function of this [`BlobArray`] to drop `value`. + /// and it must be safe to use the `drop` function of this [`BlobArray`] to drop `value`. /// - `value` must not point to the same value that is being replaced. pub unsafe fn replace_unchecked(&mut self, index: usize, value: OwningPtr<'_>) { #[cfg(debug_assertions)] @@ -471,7 +479,6 @@ impl BlobArray { #[cfg(test)] mod tests { - use crate as bevy_ecs; use bevy_ecs::prelude::*; #[derive(Component)] diff --git a/crates/bevy_ecs/src/storage/blob_vec.rs b/crates/bevy_ecs/src/storage/blob_vec.rs index d42c63a6f1605..2451fccb140f8 100644 --- a/crates/bevy_ecs/src/storage/blob_vec.rs +++ b/crates/bevy_ecs/src/storage/blob_vec.rs @@ -176,7 +176,7 @@ impl BlobVec { /// # Safety /// - index must be in bounds /// - the memory in the [`BlobVec`] starting at index `index`, of a size matching this [`BlobVec`]'s - /// `item_layout`, must have been previously allocated. + /// `item_layout`, must have been previously allocated. #[inline] pub unsafe fn initialize_unchecked(&mut self, index: usize, value: OwningPtr<'_>) { debug_assert!(index < self.len()); @@ -189,10 +189,10 @@ impl BlobVec { /// # Safety /// - index must be in-bounds /// - the memory in the [`BlobVec`] starting at index `index`, of a size matching this - /// [`BlobVec`]'s `item_layout`, must have been previously initialized with an item matching - /// this [`BlobVec`]'s `item_layout` + /// [`BlobVec`]'s `item_layout`, must have been previously initialized with an item matching + /// this [`BlobVec`]'s `item_layout` /// - the memory at `*value` must also be previously initialized with an item matching this - /// [`BlobVec`]'s `item_layout` + /// [`BlobVec`]'s `item_layout` pub unsafe fn replace_unchecked(&mut self, index: usize, value: OwningPtr<'_>) { debug_assert!(index < self.len()); @@ -497,11 +497,12 @@ const fn padding_needed_for(layout: &Layout, align: usize) -> usize { #[cfg(test)] mod tests { - use crate as bevy_ecs; // required for derive macros - use crate::{component::Component, ptr::OwningPtr, world::World}; - use super::BlobVec; - use alloc::rc::Rc; + use crate::{component::Component, ptr::OwningPtr, world::World}; + use alloc::{ + rc::Rc, + string::{String, ToString}, + }; use core::{alloc::Layout, cell::RefCell}; /// # Safety diff --git a/crates/bevy_ecs/src/storage/mod.rs b/crates/bevy_ecs/src/storage/mod.rs index eaeff97a8cccb..2a5a5f184e649 100644 --- a/crates/bevy_ecs/src/storage/mod.rs +++ b/crates/bevy_ecs/src/storage/mod.rs @@ -31,10 +31,13 @@ pub use resource::*; pub use sparse_set::*; pub use table::*; +use crate::component::{ComponentInfo, StorageType}; + /// The raw data stores of a [`World`](crate::world::World) #[derive(Default)] pub struct Storages { /// Backing storage for [`SparseSet`] components. + /// Note that sparse sets are only present for components that have been spawned or have had a relevant bundle registered. pub sparse_sets: SparseSets, /// Backing storage for [`Table`] components. pub tables: Tables, @@ -43,3 +46,17 @@ pub struct Storages { /// Backing storage for `!Send` resources. pub non_send_resources: Resources, } + +impl Storages { + /// ensures that the component has its necessary storage initialize. + pub fn prepare_component(&mut self, component: &ComponentInfo) { + match component.storage_type() { + StorageType::Table => { + // table needs no preparation + } + StorageType::SparseSet => { + self.sparse_sets.get_or_insert(component); + } + } + } +} diff --git a/crates/bevy_ecs/src/storage/resource.rs b/crates/bevy_ecs/src/storage/resource.rs index 76f14b3e10ef2..caa0785b799e9 100644 --- a/crates/bevy_ecs/src/storage/resource.rs +++ b/crates/bevy_ecs/src/storage/resource.rs @@ -1,14 +1,12 @@ use crate::{ archetype::ArchetypeComponentId, - change_detection::{MaybeLocation, MaybeUnsafeCellLocation, MutUntyped, TicksMut}, + change_detection::{MaybeLocation, MutUntyped, TicksMut}, component::{ComponentId, ComponentTicks, Components, Tick, TickCells}, storage::{blob_vec::BlobVec, SparseSet}, }; use alloc::string::String; use bevy_ptr::{OwningPtr, Ptr, UnsafeCellDeref}; -#[cfg(feature = "track_change_detection")] -use core::panic::Location; -use core::{cell::UnsafeCell, mem::ManuallyDrop}; +use core::{cell::UnsafeCell, mem::ManuallyDrop, panic::Location}; #[cfg(feature = "std")] use std::thread::ThreadId; @@ -30,8 +28,7 @@ pub struct ResourceData { id: ArchetypeComponentId, #[cfg(feature = "std")] origin_thread_id: Option, - #[cfg(feature = "track_change_detection")] - changed_by: UnsafeCell<&'static Location<'static>>, + changed_by: MaybeLocation>>, } impl Drop for ResourceData { @@ -70,6 +67,13 @@ impl ResourceData { #[inline] fn validate_access(&self) { if SEND { + #[cfg_attr( + not(feature = "std"), + expect( + clippy::needless_return, + reason = "needless until no_std is addressed (see below)", + ) + )] return; } @@ -87,6 +91,7 @@ impl ResourceData { // TODO: Handle no_std non-send. // Currently, no_std is single-threaded only, so this is safe to ignore. // To support no_std multithreading, an alternative will be required. + // Remove the #[expect] attribute above when this is addressed. } /// Returns true if the resource is populated. @@ -136,7 +141,11 @@ impl ResourceData { #[inline] pub(crate) fn get_with_ticks( &self, - ) -> Option<(Ptr<'_>, TickCells<'_>, MaybeUnsafeCellLocation<'_>)> { + ) -> Option<( + Ptr<'_>, + TickCells<'_>, + MaybeLocation<&UnsafeCell<&'static Location<'static>>>, + )> { self.is_present().then(|| { self.validate_access(); ( @@ -146,10 +155,7 @@ impl ResourceData { added: &self.added_ticks, changed: &self.changed_ticks, }, - #[cfg(feature = "track_change_detection")] - &self.changed_by, - #[cfg(not(feature = "track_change_detection"))] - (), + self.changed_by.as_ref(), ) }) } @@ -160,15 +166,14 @@ impl ResourceData { /// If `SEND` is false, this will panic if a value is present and is not accessed from the /// original thread it was inserted in. pub(crate) fn get_mut(&mut self, last_run: Tick, this_run: Tick) -> Option> { - let (ptr, ticks, _caller) = self.get_with_ticks()?; + let (ptr, ticks, caller) = self.get_with_ticks()?; Some(MutUntyped { // SAFETY: We have exclusive access to the underlying storage. value: unsafe { ptr.assert_unique() }, // SAFETY: We have exclusive access to the underlying storage. ticks: unsafe { TicksMut::from_tick_cells(ticks, last_run, this_run) }, - #[cfg(feature = "track_change_detection")] // SAFETY: We have exclusive access to the underlying storage. - changed_by: unsafe { _caller.deref_mut() }, + changed_by: unsafe { caller.map(|caller| caller.deref_mut()) }, }) } @@ -186,7 +191,7 @@ impl ResourceData { &mut self, value: OwningPtr<'_>, change_tick: Tick, - #[cfg(feature = "track_change_detection")] caller: &'static Location, + caller: MaybeLocation, ) { if self.is_present() { self.validate_access(); @@ -205,10 +210,11 @@ impl ResourceData { *self.added_ticks.deref_mut() = change_tick; } *self.changed_ticks.deref_mut() = change_tick; - #[cfg(feature = "track_change_detection")] - { - *self.changed_by.deref_mut() = caller; - } + + self.changed_by + .as_ref() + .map(|changed_by| changed_by.deref_mut()) + .assign(caller); } /// Inserts a value into the resource with a pre-existing change tick. If a @@ -225,7 +231,7 @@ impl ResourceData { &mut self, value: OwningPtr<'_>, change_ticks: ComponentTicks, - #[cfg(feature = "track_change_detection")] caller: &'static Location, + caller: MaybeLocation, ) { if self.is_present() { self.validate_access(); @@ -244,10 +250,10 @@ impl ResourceData { } *self.added_ticks.deref_mut() = change_ticks.added; *self.changed_ticks.deref_mut() = change_ticks.changed; - #[cfg(feature = "track_change_detection")] - { - *self.changed_by.deref_mut() = caller; - } + self.changed_by + .as_ref() + .map(|changed_by| changed_by.deref_mut()) + .assign(caller); } /// Removes a value from the resource, if present. @@ -267,11 +273,11 @@ impl ResourceData { // SAFETY: We've already validated that the row is present. let res = unsafe { self.data.swap_remove_and_forget_unchecked(Self::ROW) }; - // SAFETY: This function is being called through an exclusive mutable reference to Self - #[cfg(feature = "track_change_detection")] - let caller = unsafe { *self.changed_by.deref_mut() }; - #[cfg(not(feature = "track_change_detection"))] - let caller = (); + let caller = self + .changed_by + .as_ref() + // SAFETY: This function is being called through an exclusive mutable reference to Self + .map(|changed_by| unsafe { *changed_by.deref_mut() }); // SAFETY: This function is being called through an exclusive mutable reference to Self, which // makes it sound to read these ticks. @@ -308,7 +314,7 @@ impl ResourceData { /// The backing store for all [`Resource`]s stored in the [`World`]. /// -/// [`Resource`]: crate::system::Resource +/// [`Resource`]: crate::resource::Resource /// [`World`]: crate::world::World #[derive(Default)] pub struct Resources { @@ -392,8 +398,7 @@ impl Resources { id: f(), #[cfg(feature = "std")] origin_thread_id: None, - #[cfg(feature = "track_change_detection")] - changed_by: UnsafeCell::new(Location::caller()) + changed_by: MaybeLocation::caller().map(UnsafeCell::new), } }) } diff --git a/crates/bevy_ecs/src/storage/sparse_set.rs b/crates/bevy_ecs/src/storage/sparse_set.rs index 14b135fd04241..bb79382e06a8d 100644 --- a/crates/bevy_ecs/src/storage/sparse_set.rs +++ b/crates/bevy_ecs/src/storage/sparse_set.rs @@ -1,14 +1,12 @@ use crate::{ - change_detection::MaybeUnsafeCellLocation, + change_detection::MaybeLocation, component::{ComponentId, ComponentInfo, ComponentTicks, Tick, TickCells}, entity::Entity, storage::{Column, TableRow}, }; use alloc::{boxed::Box, vec::Vec}; use bevy_ptr::{OwningPtr, Ptr}; -#[cfg(feature = "track_change_detection")] -use core::panic::Location; -use core::{cell::UnsafeCell, hash::Hash, marker::PhantomData}; +use core::{cell::UnsafeCell, hash::Hash, marker::PhantomData, panic::Location}; use nonmax::NonMaxUsize; type EntityIndex = u32; @@ -50,7 +48,7 @@ macro_rules! impl_sparse_array { #[inline] pub fn contains(&self, index: I) -> bool { let index = index.sparse_set_index(); - self.values.get(index).map(|v| v.is_some()).unwrap_or(false) + self.values.get(index).is_some_and(Option::is_some) } /// Returns a reference to the value at `index`. @@ -59,7 +57,7 @@ macro_rules! impl_sparse_array { #[inline] pub fn get(&self, index: I) -> Option<&V> { let index = index.sparse_set_index(); - self.values.get(index).map(|v| v.as_ref()).unwrap_or(None) + self.values.get(index).and_then(Option::as_ref) } } }; @@ -87,10 +85,7 @@ impl SparseArray { #[inline] pub fn get_mut(&mut self, index: I) -> Option<&mut V> { let index = index.sparse_set_index(); - self.values - .get_mut(index) - .map(|v| v.as_mut()) - .unwrap_or(None) + self.values.get_mut(index).and_then(Option::as_mut) } /// Removes and returns the value stored at `index`. @@ -173,26 +168,16 @@ impl ComponentSparseSet { entity: Entity, value: OwningPtr<'_>, change_tick: Tick, - #[cfg(feature = "track_change_detection")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { if let Some(&dense_index) = self.sparse.get(entity.index()) { #[cfg(debug_assertions)] assert_eq!(entity, self.entities[dense_index.as_usize()]); - self.dense.replace( - dense_index, - value, - change_tick, - #[cfg(feature = "track_change_detection")] - caller, - ); + self.dense.replace(dense_index, value, change_tick, caller); } else { let dense_index = self.dense.len(); - self.dense.push( - value, - ComponentTicks::new(change_tick), - #[cfg(feature = "track_change_detection")] - caller, - ); + self.dense + .push(value, ComponentTicks::new(change_tick), caller); self.sparse .insert(entity.index(), TableRow::from_usize(dense_index)); #[cfg(debug_assertions)] @@ -241,7 +226,11 @@ impl ComponentSparseSet { pub fn get_with_ticks( &self, entity: Entity, - ) -> Option<(Ptr<'_>, TickCells<'_>, MaybeUnsafeCellLocation<'_>)> { + ) -> Option<( + Ptr<'_>, + TickCells<'_>, + MaybeLocation<&UnsafeCell<&'static Location<'static>>>, + )> { let dense_index = *self.sparse.get(entity.index())?; #[cfg(debug_assertions)] assert_eq!(entity, self.entities[dense_index.as_usize()]); @@ -253,10 +242,7 @@ impl ComponentSparseSet { added: self.dense.get_added_tick_unchecked(dense_index), changed: self.dense.get_changed_tick_unchecked(dense_index), }, - #[cfg(feature = "track_change_detection")] self.dense.get_changed_by_unchecked(dense_index), - #[cfg(not(feature = "track_change_detection"))] - (), )) } } @@ -301,16 +287,17 @@ impl ComponentSparseSet { /// /// Returns `None` if `entity` does not have a component in the sparse set. #[inline] - #[cfg(feature = "track_change_detection")] pub fn get_changed_by( &self, entity: Entity, - ) -> Option<&UnsafeCell<&'static Location<'static>>> { - let dense_index = *self.sparse.get(entity.index())?; - #[cfg(debug_assertions)] - assert_eq!(entity, self.entities[dense_index.as_usize()]); - // SAFETY: if the sparse index points to something in the dense vec, it exists - unsafe { Some(self.dense.get_changed_by_unchecked(dense_index)) } + ) -> MaybeLocation>>> { + MaybeLocation::new_with_flattened(|| { + let dense_index = *self.sparse.get(entity.index())?; + #[cfg(debug_assertions)] + assert_eq!(entity, self.entities[dense_index.as_usize()]); + // SAFETY: if the sparse index points to something in the dense vec, it exists + unsafe { Some(self.dense.get_changed_by_unchecked(dense_index)) } + }) } /// Removes the `entity` from this sparse set and returns a pointer to the associated value (if @@ -619,7 +606,7 @@ impl SparseSets { self.sets.iter().map(|(id, data)| (*id, data)) } - /// Gets a reference to the [`ComponentSparseSet`] of a [`ComponentId`]. + /// Gets a reference to the [`ComponentSparseSet`] of a [`ComponentId`]. This may be `None` if the component has never been spawned. #[inline] pub fn get(&self, component_id: ComponentId) -> Option<&ComponentSparseSet> { self.sets.get(component_id) @@ -641,7 +628,7 @@ impl SparseSets { self.sets.get_mut(component_info.id()).unwrap() } - /// Gets a mutable reference to the [`ComponentSparseSet`] of a [`ComponentId`]. + /// Gets a mutable reference to the [`ComponentSparseSet`] of a [`ComponentId`]. This may be `None` if the component has never been spawned. pub(crate) fn get_mut(&mut self, component_id: ComponentId) -> Option<&mut ComponentSparseSet> { self.sets.get_mut(component_id) } @@ -664,11 +651,11 @@ impl SparseSets { mod tests { use super::SparseSets; use crate::{ - self as bevy_ecs, component::{Component, ComponentDescriptor, ComponentId, ComponentInfo}, entity::Entity, storage::SparseSet, }; + use alloc::{vec, vec::Vec}; #[derive(Debug, Eq, PartialEq)] struct Foo(usize); @@ -735,10 +722,10 @@ mod tests { assert_eq!(sets.len(), 0); assert!(sets.is_empty()); - init_component::(&mut sets, 1); + register_component::(&mut sets, 1); assert_eq!(sets.len(), 1); - init_component::(&mut sets, 2); + register_component::(&mut sets, 2); assert_eq!(sets.len(), 2); // check its shape by iter @@ -752,7 +739,7 @@ mod tests { vec![(ComponentId::new(1), 0), (ComponentId::new(2), 0),] ); - fn init_component(sets: &mut SparseSets, id: usize) { + fn register_component(sets: &mut SparseSets, id: usize) { let descriptor = ComponentDescriptor::new::(); let id = ComponentId::new(id); let info = ComponentInfo::new(id, descriptor); diff --git a/crates/bevy_ecs/src/storage/table/column.rs b/crates/bevy_ecs/src/storage/table/column.rs index f7ea1683e458f..d4690d264cb32 100644 --- a/crates/bevy_ecs/src/storage/table/column.rs +++ b/crates/bevy_ecs/src/storage/table/column.rs @@ -1,10 +1,12 @@ use super::*; use crate::{ + change_detection::MaybeLocation, component::TickCells, storage::{blob_array::BlobArray, thin_array_ptr::ThinArrayPtr}, }; use alloc::vec::Vec; use bevy_ptr::PtrMut; +use core::panic::Location; /// Very similar to a normal [`Column`], but with the capacities and lengths cut out for performance reasons. /// @@ -17,8 +19,7 @@ pub struct ThinColumn { pub(super) data: BlobArray, pub(super) added_ticks: ThinArrayPtr>, pub(super) changed_ticks: ThinArrayPtr>, - #[cfg(feature = "track_change_detection")] - pub(super) changed_by: ThinArrayPtr>>, + pub(super) changed_by: MaybeLocation>>>, } impl ThinColumn { @@ -31,8 +32,7 @@ impl ThinColumn { }, added_ticks: ThinArrayPtr::with_capacity(capacity), changed_ticks: ThinArrayPtr::with_capacity(capacity), - #[cfg(feature = "track_change_detection")] - changed_by: ThinArrayPtr::with_capacity(capacity), + changed_by: MaybeLocation::new_with(|| ThinArrayPtr::with_capacity(capacity)), } } @@ -54,9 +54,9 @@ impl ThinColumn { .swap_remove_unchecked_nonoverlapping(row.as_usize(), last_element_index); self.changed_ticks .swap_remove_unchecked_nonoverlapping(row.as_usize(), last_element_index); - #[cfg(feature = "track_change_detection")] - self.changed_by - .swap_remove_unchecked_nonoverlapping(row.as_usize(), last_element_index); + self.changed_by.as_mut().map(|changed_by| { + changed_by.swap_remove_unchecked_nonoverlapping(row.as_usize(), last_element_index); + }); } /// Swap-remove and drop the removed element. @@ -76,9 +76,9 @@ impl ThinColumn { .swap_remove_and_drop_unchecked(row.as_usize(), last_element_index); self.changed_ticks .swap_remove_and_drop_unchecked(row.as_usize(), last_element_index); - #[cfg(feature = "track_change_detection")] - self.changed_by - .swap_remove_and_drop_unchecked(row.as_usize(), last_element_index); + self.changed_by.as_mut().map(|changed_by| { + changed_by.swap_remove_and_drop_unchecked(row.as_usize(), last_element_index); + }); } /// Swap-remove and forget the removed element. @@ -99,9 +99,9 @@ impl ThinColumn { .swap_remove_unchecked(row.as_usize(), last_element_index); self.changed_ticks .swap_remove_unchecked(row.as_usize(), last_element_index); - #[cfg(feature = "track_change_detection")] self.changed_by - .swap_remove_unchecked(row.as_usize(), last_element_index); + .as_mut() + .map(|changed_by| changed_by.swap_remove_unchecked(row.as_usize(), last_element_index)); } /// Call [`realloc`](std::alloc::realloc) to expand / shrink the memory allocation for this [`ThinColumn`] @@ -117,8 +117,9 @@ impl ThinColumn { self.data.realloc(current_capacity, new_capacity); self.added_ticks.realloc(current_capacity, new_capacity); self.changed_ticks.realloc(current_capacity, new_capacity); - #[cfg(feature = "track_change_detection")] - self.changed_by.realloc(current_capacity, new_capacity); + self.changed_by + .as_mut() + .map(|changed_by| changed_by.realloc(current_capacity, new_capacity)); } /// Call [`alloc`](std::alloc::alloc) to allocate memory for this [`ThinColumn`] @@ -127,8 +128,9 @@ impl ThinColumn { self.data.alloc(new_capacity); self.added_ticks.alloc(new_capacity); self.changed_ticks.alloc(new_capacity); - #[cfg(feature = "track_change_detection")] - self.changed_by.alloc(new_capacity); + self.changed_by + .as_mut() + .map(|changed_by| changed_by.alloc(new_capacity)); } /// Writes component data to the column at the given row. @@ -144,7 +146,7 @@ impl ThinColumn { row: TableRow, data: OwningPtr<'_>, tick: Tick, - #[cfg(feature = "track_change_detection")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { self.data.initialize_unchecked(row.as_usize(), data); *self.added_ticks.get_unchecked_mut(row.as_usize()).get_mut() = tick; @@ -152,10 +154,10 @@ impl ThinColumn { .changed_ticks .get_unchecked_mut(row.as_usize()) .get_mut() = tick; - #[cfg(feature = "track_change_detection")] - { - *self.changed_by.get_unchecked_mut(row.as_usize()).get_mut() = caller; - } + self.changed_by + .as_mut() + .map(|changed_by| changed_by.get_unchecked_mut(row.as_usize()).get_mut()) + .assign(caller); } /// Writes component data to the column at given row. Assumes the slot is initialized, drops the previous value. @@ -169,17 +171,17 @@ impl ThinColumn { row: TableRow, data: OwningPtr<'_>, change_tick: Tick, - #[cfg(feature = "track_change_detection")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { self.data.replace_unchecked(row.as_usize(), data); *self .changed_ticks .get_unchecked_mut(row.as_usize()) .get_mut() = change_tick; - #[cfg(feature = "track_change_detection")] - { - *self.changed_by.get_unchecked_mut(row.as_usize()).get_mut() = caller; - } + self.changed_by + .as_mut() + .map(|changed_by| changed_by.get_unchecked_mut(row.as_usize()).get_mut()) + .assign(caller); } /// Removes the element from `other` at `src_row` and inserts it @@ -218,13 +220,13 @@ impl ThinColumn { .swap_remove_unchecked(src_row.as_usize(), other_last_element_index); self.changed_ticks .initialize_unchecked(dst_row.as_usize(), changed_tick); - #[cfg(feature = "track_change_detection")] - let changed_by = other - .changed_by - .swap_remove_unchecked(src_row.as_usize(), other_last_element_index); - #[cfg(feature = "track_change_detection")] - self.changed_by - .initialize_unchecked(dst_row.as_usize(), changed_by); + self.changed_by.as_mut().zip(other.changed_by.as_mut()).map( + |(self_changed_by, other_changed_by)| { + let changed_by = other_changed_by + .swap_remove_unchecked(src_row.as_usize(), other_last_element_index); + self_changed_by.initialize_unchecked(dst_row.as_usize(), changed_by); + }, + ); } /// Call [`Tick::check_tick`] on all of the ticks stored in this column. @@ -258,8 +260,9 @@ impl ThinColumn { self.added_ticks.clear_elements(len); self.changed_ticks.clear_elements(len); self.data.clear(len); - #[cfg(feature = "track_change_detection")] - self.changed_by.clear_elements(len); + self.changed_by + .as_mut() + .map(|changed_by| changed_by.clear_elements(len)); } /// Because this method needs parameters, it can't be the implementation of the `Drop` trait. @@ -273,8 +276,9 @@ impl ThinColumn { self.added_ticks.drop(cap, len); self.changed_ticks.drop(cap, len); self.data.drop(cap, len); - #[cfg(feature = "track_change_detection")] - self.changed_by.drop(cap, len); + self.changed_by + .as_mut() + .map(|changed_by| changed_by.drop(cap, len)); } /// Drops the last component in this column. @@ -285,8 +289,9 @@ impl ThinColumn { pub(crate) unsafe fn drop_last_component(&mut self, last_element_index: usize) { core::ptr::drop_in_place(self.added_ticks.get_unchecked_raw(last_element_index)); core::ptr::drop_in_place(self.changed_ticks.get_unchecked_raw(last_element_index)); - #[cfg(feature = "track_change_detection")] - core::ptr::drop_in_place(self.changed_by.get_unchecked_raw(last_element_index)); + self.changed_by.as_mut().map(|changed_by| { + core::ptr::drop_in_place(changed_by.get_unchecked_raw(last_element_index)); + }); self.data.drop_last_element(last_element_index); } @@ -319,12 +324,13 @@ impl ThinColumn { /// /// # Safety /// - `len` must match the actual length of this column (number of elements stored) - #[cfg(feature = "track_change_detection")] pub unsafe fn get_changed_by_slice( &self, len: usize, - ) -> &[UnsafeCell<&'static Location<'static>>] { - self.changed_by.as_slice(len) + ) -> MaybeLocation<&[UnsafeCell<&'static Location<'static>>]> { + self.changed_by + .as_ref() + .map(|changed_by| changed_by.as_slice(len)) } } @@ -343,8 +349,7 @@ pub struct Column { pub(super) data: BlobVec, pub(super) added_ticks: Vec>, pub(super) changed_ticks: Vec>, - #[cfg(feature = "track_change_detection")] - changed_by: Vec>>, + changed_by: MaybeLocation>>>, } impl Column { @@ -356,8 +361,7 @@ impl Column { data: unsafe { BlobVec::new(component_info.layout(), component_info.drop(), capacity) }, added_ticks: Vec::with_capacity(capacity), changed_ticks: Vec::with_capacity(capacity), - #[cfg(feature = "track_change_detection")] - changed_by: Vec::with_capacity(capacity), + changed_by: MaybeLocation::new_with(|| Vec::with_capacity(capacity)), } } @@ -378,7 +382,7 @@ impl Column { row: TableRow, data: OwningPtr<'_>, change_tick: Tick, - #[cfg(feature = "track_change_detection")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { debug_assert!(row.as_usize() < self.len()); self.data.replace_unchecked(row.as_usize(), data); @@ -386,10 +390,10 @@ impl Column { .changed_ticks .get_unchecked_mut(row.as_usize()) .get_mut() = change_tick; - #[cfg(feature = "track_change_detection")] - { - *self.changed_by.get_unchecked_mut(row.as_usize()).get_mut() = caller; - } + self.changed_by + .as_mut() + .map(|changed_by| changed_by.get_unchecked_mut(row.as_usize()).get_mut()) + .assign(caller); } /// Gets the current number of elements stored in the column. @@ -418,8 +422,9 @@ impl Column { self.data.swap_remove_and_drop_unchecked(row.as_usize()); self.added_ticks.swap_remove(row.as_usize()); self.changed_ticks.swap_remove(row.as_usize()); - #[cfg(feature = "track_change_detection")] - self.changed_by.swap_remove(row.as_usize()); + self.changed_by + .as_mut() + .map(|changed_by| changed_by.swap_remove(row.as_usize())); } /// Removes an element from the [`Column`] and returns it and its change detection ticks. @@ -442,10 +447,10 @@ impl Column { let data = self.data.swap_remove_and_forget_unchecked(row.as_usize()); let added = self.added_ticks.swap_remove(row.as_usize()).into_inner(); let changed = self.changed_ticks.swap_remove(row.as_usize()).into_inner(); - #[cfg(feature = "track_change_detection")] - let caller = self.changed_by.swap_remove(row.as_usize()).into_inner(); - #[cfg(not(feature = "track_change_detection"))] - let caller = (); + let caller = self + .changed_by + .as_mut() + .map(|changed_by| changed_by.swap_remove(row.as_usize()).into_inner()); (data, ComponentTicks { added, changed }, caller) } @@ -457,13 +462,15 @@ impl Column { &mut self, ptr: OwningPtr<'_>, ticks: ComponentTicks, - #[cfg(feature = "track_change_detection")] caller: &'static Location<'static>, + caller: MaybeLocation, ) { self.data.push(ptr); self.added_ticks.push(UnsafeCell::new(ticks.added)); self.changed_ticks.push(UnsafeCell::new(ticks.changed)); - #[cfg(feature = "track_change_detection")] - self.changed_by.push(UnsafeCell::new(caller)); + self.changed_by + .as_mut() + .zip(caller) + .map(|(changed_by, caller)| changed_by.push(UnsafeCell::new(caller))); } /// Fetches the data pointer to the first element of the [`Column`]. @@ -644,8 +651,7 @@ impl Column { self.data.clear(); self.added_ticks.clear(); self.changed_ticks.clear(); - #[cfg(feature = "track_change_detection")] - self.changed_by.clear(); + self.changed_by.as_mut().map(Vec::clear); } #[inline] @@ -666,9 +672,13 @@ impl Column { /// Users of this API must ensure that accesses to each individual element /// adhere to the safety invariants of [`UnsafeCell`]. #[inline] - #[cfg(feature = "track_change_detection")] - pub fn get_changed_by(&self, row: TableRow) -> Option<&UnsafeCell<&'static Location<'static>>> { - self.changed_by.get(row.as_usize()) + pub fn get_changed_by( + &self, + row: TableRow, + ) -> MaybeLocation>>> { + self.changed_by + .as_ref() + .map(|changed_by| changed_by.get(row.as_usize())) } /// Fetches the calling location that last changed the value at `row`. @@ -678,12 +688,13 @@ impl Column { /// # Safety /// `row` must be within the range `[0, self.len())`. #[inline] - #[cfg(feature = "track_change_detection")] pub unsafe fn get_changed_by_unchecked( &self, row: TableRow, - ) -> &UnsafeCell<&'static Location<'static>> { - debug_assert!(row.as_usize() < self.changed_by.len()); - self.changed_by.get_unchecked(row.as_usize()) + ) -> MaybeLocation<&UnsafeCell<&'static Location<'static>>> { + self.changed_by.as_ref().map(|changed_by| { + debug_assert!(row.as_usize() < changed_by.len()); + changed_by.get_unchecked(row.as_usize()) + }) } } diff --git a/crates/bevy_ecs/src/storage/table/mod.rs b/crates/bevy_ecs/src/storage/table/mod.rs index ec77645eac62a..0f80b77f513c4 100644 --- a/crates/bevy_ecs/src/storage/table/mod.rs +++ b/crates/bevy_ecs/src/storage/table/mod.rs @@ -6,16 +6,15 @@ use crate::{ storage::{blob_vec::BlobVec, ImmutableSparseSet, SparseSet}, }; use alloc::{boxed::Box, vec, vec::Vec}; +use bevy_platform::collections::HashMap; use bevy_ptr::{OwningPtr, Ptr, UnsafeCellDeref}; -use bevy_utils::HashMap; pub use column::*; -#[cfg(feature = "track_change_detection")] -use core::panic::Location; use core::{ alloc::Layout, cell::UnsafeCell, num::NonZeroUsize, ops::{Index, IndexMut}, + panic::Location, }; mod column; @@ -85,11 +84,11 @@ impl TableId { } } -/// A opaque newtype for rows in [`Table`]s. Specifies a single row in a specific table. +/// An opaque newtype for rows in [`Table`]s. Specifies a single row in a specific table. /// /// Values of this type are retrievable from [`Archetype::entity_table_row`] and can be /// used alongside [`Archetype::table_id`] to fetch the exact table and row where an -/// [`Entity`]'s +/// [`Entity`]'s components are stored. /// /// Values of this type are only valid so long as entities have not moved around. /// Adding and removing components from an entity, or despawning it will invalidate @@ -183,7 +182,7 @@ impl TableBuilder { /// A column-oriented [structure-of-arrays] based storage for [`Component`]s of entities /// in a [`World`]. /// -/// Conceptually, a `Table` can be thought of as an `HashMap`, where +/// Conceptually, a `Table` can be thought of as a `HashMap`, where /// each [`ThinColumn`] is a type-erased `Vec`. Each row corresponds to a single entity /// (i.e. index 3 in Column A and index 3 in Column B point to different components on the same /// entity). Fetching components from a table involves fetching the associated column for a @@ -390,14 +389,15 @@ impl Table { } /// Fetches the calling locations that last changed the each component - #[cfg(feature = "track_change_detection")] pub fn get_changed_by_slice_for( &self, component_id: ComponentId, - ) -> Option<&[UnsafeCell<&'static Location<'static>>]> { - self.get_column(component_id) - // SAFETY: `self.len()` is guaranteed to be the len of the locations array - .map(|col| unsafe { col.get_changed_by_slice(self.entity_count()) }) + ) -> MaybeLocation>]>> { + MaybeLocation::new_with_flattened(|| { + self.get_column(component_id) + // SAFETY: `self.len()` is guaranteed to be the len of the locations array + .map(|col| unsafe { col.get_changed_by_slice(self.entity_count()) }) + }) } /// Get the specific [`change tick`](Tick) of the component matching `component_id` in `row`. @@ -433,20 +433,22 @@ impl Table { } /// Get the specific calling location that changed the component matching `component_id` in `row` - #[cfg(feature = "track_change_detection")] pub fn get_changed_by( &self, component_id: ComponentId, row: TableRow, - ) -> Option<&UnsafeCell<&'static Location<'static>>> { - (row.as_usize() < self.entity_count()).then_some( - // SAFETY: `row.as_usize()` < `len` - unsafe { - self.get_column(component_id)? - .changed_by - .get_unchecked(row.as_usize()) - }, - ) + ) -> MaybeLocation>>> { + MaybeLocation::new_with_flattened(|| { + (row.as_usize() < self.entity_count()).then_some( + // SAFETY: `row.as_usize()` < `len` + unsafe { + self.get_column(component_id)? + .changed_by + .as_ref() + .map(|changed_by| changed_by.get_unchecked(row.as_usize())) + }, + ) + }) } /// Get the [`ComponentTicks`] of the component matching `component_id` in `row`. @@ -571,9 +573,12 @@ impl Table { .initialize_unchecked(len, UnsafeCell::new(Tick::new(0))); col.changed_ticks .initialize_unchecked(len, UnsafeCell::new(Tick::new(0))); - #[cfg(feature = "track_change_detection")] col.changed_by - .initialize_unchecked(len, UnsafeCell::new(Location::caller())); + .as_mut() + .zip(MaybeLocation::caller()) + .map(|(changed_by, caller)| { + changed_by.initialize_unchecked(len, UnsafeCell::new(caller)); + }); } TableRow::from_usize(len) } @@ -815,15 +820,14 @@ impl Drop for Table { #[cfg(test)] mod tests { - use crate as bevy_ecs; use crate::{ - component::{Component, Components, Tick}, + change_detection::MaybeLocation, + component::{Component, ComponentIds, Components, ComponentsRegistrator, Tick}, entity::Entity, ptr::OwningPtr, - storage::{Storages, TableBuilder, TableId, TableRow, Tables}, + storage::{TableBuilder, TableId, TableRow, Tables}, }; - #[cfg(feature = "track_change_detection")] - use core::panic::Location; + use alloc::vec::Vec; #[derive(Component)] struct W(T); @@ -843,8 +847,11 @@ mod tests { #[test] fn table() { let mut components = Components::default(); - let mut storages = Storages::default(); - let component_id = components.register_component::>(&mut storages); + let mut componentids = ComponentIds::default(); + // SAFETY: They are both new. + let mut registrator = + unsafe { ComponentsRegistrator::new(&mut components, &mut componentids) }; + let component_id = registrator.register_component::>(); let columns = &[component_id]; let mut table = TableBuilder::with_capacity(0, columns.len()) .add_column(components.get_info(component_id).unwrap()) @@ -860,8 +867,7 @@ mod tests { row, value_ptr, Tick::new(0), - #[cfg(feature = "track_change_detection")] - Location::caller(), + MaybeLocation::caller(), ); }); }; diff --git a/crates/bevy_ecs/src/storage/thin_array_ptr.rs b/crates/bevy_ecs/src/storage/thin_array_ptr.rs index 9c073324559d3..90163440297c8 100644 --- a/crates/bevy_ecs/src/storage/thin_array_ptr.rs +++ b/crates/bevy_ecs/src/storage/thin_array_ptr.rs @@ -14,6 +14,8 @@ use core::{ /// /// This type can be treated as a `ManuallyDrop>` without a built in length. To avoid /// memory leaks, [`drop`](Self::drop) must be called when no longer in use. +/// +/// [`Vec`]: alloc::vec::Vec pub struct ThinArrayPtr { data: NonNull, #[cfg(debug_assertions)] @@ -85,7 +87,7 @@ impl ThinArrayPtr { /// - The caller should update their saved `capacity` value to reflect the fact that it was changed pub unsafe fn realloc(&mut self, current_capacity: NonZeroUsize, new_capacity: NonZeroUsize) { #[cfg(debug_assertions)] - assert_eq!(self.capacity, current_capacity.into()); + assert_eq!(self.capacity, current_capacity.get()); self.set_capacity(new_capacity.get()); if size_of::() != 0 { let new_layout = diff --git a/crates/bevy_ecs/src/system/adapter_system.rs b/crates/bevy_ecs/src/system/adapter_system.rs index 27e812928c8a1..5953a43d70736 100644 --- a/crates/bevy_ecs/src/system/adapter_system.rs +++ b/crates/bevy_ecs/src/system/adapter_system.rs @@ -1,6 +1,6 @@ use alloc::{borrow::Cow, vec::Vec}; -use super::{IntoSystem, ReadOnlySystem, System}; +use super::{IntoSystem, ReadOnlySystem, System, SystemParamValidationError}; use crate::{ schedule::InternedSystemSet, system::{input::SystemInput, SystemIn}, @@ -131,6 +131,12 @@ where self.system.component_access() } + fn component_access_set( + &self, + ) -> &crate::query::FilteredAccessSet { + self.system.component_access_set() + } + #[inline] fn archetype_component_access( &self, @@ -162,12 +168,6 @@ where }) } - #[inline] - fn run(&mut self, input: SystemIn<'_, Self>, world: &mut crate::prelude::World) -> Self::Out { - self.func - .adapt(input, |input| self.system.run(input, world)) - } - #[inline] fn apply_deferred(&mut self, world: &mut crate::prelude::World) { self.system.apply_deferred(world); @@ -179,7 +179,10 @@ where } #[inline] - unsafe fn validate_param_unsafe(&mut self, world: UnsafeWorldCell) -> bool { + unsafe fn validate_param_unsafe( + &mut self, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { // SAFETY: Delegate to other `System` implementations. unsafe { self.system.validate_param_unsafe(world) } } diff --git a/crates/bevy_ecs/src/system/builder.rs b/crates/bevy_ecs/src/system/builder.rs index fe68ea6bfa6a7..6261b9e35587e 100644 --- a/crates/bevy_ecs/src/system/builder.rs +++ b/crates/bevy_ecs/src/system/builder.rs @@ -5,6 +5,7 @@ use variadics_please::all_tuples; use crate::{ prelude::QueryBuilder, query::{QueryData, QueryFilter, QueryState}, + resource::Resource, system::{ DynSystemParam, DynSystemParamState, Local, ParamSet, Query, SystemMeta, SystemParam, }, @@ -15,7 +16,7 @@ use crate::{ }; use core::fmt::Debug; -use super::{init_query_param, Res, ResMut, Resource, SystemState}; +use super::{init_query_param, Res, ResMut, SystemState}; /// A builder that can create a [`SystemParam`]. /// @@ -162,7 +163,7 @@ pub unsafe trait SystemParamBuilder: Sized { /// .build_state(&mut world) /// .build_system(my_system); /// ``` -#[derive(Default, Debug, Copy, Clone)] +#[derive(Default, Debug, Clone)] pub struct ParamBuilder; // SAFETY: Calls `SystemParam::init_state` @@ -240,7 +241,7 @@ unsafe impl<'w, 's, D: QueryData + 'static, F: QueryFilter + 'static> /// .build_state(&mut world) /// .build_system(|query: Query<()>| { /// for _ in &query { -/// // This only includes entities with an `Player` component. +/// // This only includes entities with a `Player` component. /// } /// }); /// @@ -257,6 +258,7 @@ unsafe impl<'w, 's, D: QueryData + 'static, F: QueryFilter + 'static> /// .build_state(&mut world) /// .build_system(|query: Vec>| {}); /// ``` +#[derive(Clone)] pub struct QueryParamBuilder(T); impl QueryParamBuilder { @@ -299,14 +301,28 @@ unsafe impl< macro_rules! impl_system_param_builder_tuple { ($(#[$meta:meta])* $(($param: ident, $builder: ident)),*) => { + #[expect( + clippy::allow_attributes, + reason = "This is in a macro; as such, the below lints may not always apply." + )] + #[allow( + unused_variables, + reason = "Zero-length tuples won't use any of the parameters." + )] + #[allow( + non_snake_case, + reason = "The variable names are provided by the macro caller, not by us." + )] $(#[$meta])* // SAFETY: implementors of each `SystemParamBuilder` in the tuple have validated their impls unsafe impl<$($param: SystemParam,)* $($builder: SystemParamBuilder<$param>,)*> SystemParamBuilder<($($param,)*)> for ($($builder,)*) { - fn build(self, _world: &mut World, _meta: &mut SystemMeta) -> <($($param,)*) as SystemParam>::State { - #[allow(non_snake_case)] + fn build(self, world: &mut World, meta: &mut SystemMeta) -> <($($param,)*) as SystemParam>::State { let ($($builder,)*) = self; - #[allow(clippy::unused_unit)] - ($($builder.build(_world, _meta),)*) + #[allow( + clippy::unused_unit, + reason = "Zero-length tuples won't generate any calls to the system parameter builders." + )] + ($($builder.build(world, meta),)*) } } }; @@ -401,14 +417,26 @@ unsafe impl> SystemParamBuilder> /// set.for_each(|mut query| for mut health in query.iter_mut() {}); /// } /// ``` +#[derive(Debug, Default, Clone)] pub struct ParamSetBuilder(pub T); macro_rules! impl_param_set_builder_tuple { ($(($param: ident, $builder: ident, $meta: ident)),*) => { + #[expect( + clippy::allow_attributes, + reason = "This is in a macro; as such, the below lints may not always apply." + )] + #[allow( + unused_variables, + reason = "Zero-length tuples won't use any of the parameters." + )] + #[allow( + non_snake_case, + reason = "The variable names are provided by the macro caller, not by us." + )] // SAFETY: implementors of each `SystemParamBuilder` in the tuple have validated their impls unsafe impl<'w, 's, $($param: SystemParam,)* $($builder: SystemParamBuilder<$param>,)*> SystemParamBuilder> for ParamSetBuilder<($($builder,)*)> { - #[allow(non_snake_case)] - fn build(self, _world: &mut World, _system_meta: &mut SystemMeta) -> <($($param,)*) as SystemParam>::State { + fn build(self, world: &mut World, system_meta: &mut SystemMeta) -> <($($param,)*) as SystemParam>::State { let ParamSetBuilder(($($builder,)*)) = self; // Note that this is slightly different from `init_state`, which calls `init_state` on each param twice. // One call populates an empty `SystemMeta` with the new access, while the other runs against a cloned `SystemMeta` to check for conflicts. @@ -416,22 +444,25 @@ macro_rules! impl_param_set_builder_tuple { // That means that any `filtered_accesses` in the `component_access_set` will get copied to every `$meta` // and will appear multiple times in the final `SystemMeta`. $( - let mut $meta = _system_meta.clone(); - let $param = $builder.build(_world, &mut $meta); + let mut $meta = system_meta.clone(); + let $param = $builder.build(world, &mut $meta); )* // Make the ParamSet non-send if any of its parameters are non-send. if false $(|| !$meta.is_send())* { - _system_meta.set_non_send(); + system_meta.set_non_send(); } $( - _system_meta + system_meta .component_access_set .extend($meta.component_access_set); - _system_meta + system_meta .archetype_component_access .extend(&$meta.archetype_component_access); )* - #[allow(clippy::unused_unit)] + #[allow( + clippy::unused_unit, + reason = "Zero-length tuples won't generate any calls to the system parameter builders." + )] ($($param,)*) } } @@ -520,6 +551,7 @@ unsafe impl<'a, 'w, 's> SystemParamBuilder> for DynParamB /// }); /// # world.run_system_once(system); /// ``` +#[derive(Default, Debug, Clone)] pub struct LocalBuilder(pub T); // SAFETY: `Local` performs no world access. @@ -537,6 +569,7 @@ unsafe impl<'s, T: FromWorld + Send + 'static> SystemParamBuilder> /// A [`SystemParamBuilder`] for a [`FilteredResources`]. /// See the [`FilteredResources`] docs for examples. +#[derive(Clone)] pub struct FilteredResourcesParamBuilder(T); impl FilteredResourcesParamBuilder { @@ -600,6 +633,7 @@ unsafe impl<'w, 's, T: FnOnce(&mut FilteredResourcesBuilder)> /// A [`SystemParamBuilder`] for a [`FilteredResourcesMut`]. /// See the [`FilteredResourcesMut`] docs for examples. +#[derive(Clone)] pub struct FilteredResourcesMutParamBuilder(T); impl FilteredResourcesMutParamBuilder { @@ -678,12 +712,14 @@ unsafe impl<'w, 's, T: FnOnce(&mut FilteredResourcesMutBuilder)> #[cfg(test)] mod tests { - use crate as bevy_ecs; use crate::{ entity::Entities, prelude::{Component, Query}, + reflect::ReflectResource, system::{Local, RunSystemOnce}, }; + use alloc::vec; + use bevy_reflect::{FromType, Reflect, ReflectRef}; use super::*; @@ -696,8 +732,11 @@ mod tests { #[derive(Component)] struct C; - #[derive(Resource, Default)] - struct R; + #[derive(Resource, Default, Reflect)] + #[reflect(Resource)] + struct R { + foo: usize, + } fn local_system(local: Local) -> u64 { *local @@ -1037,4 +1076,31 @@ mod tests { .build_state(&mut world) .build_system(|_r: ResMut, _fr: FilteredResourcesMut| {}); } + + #[test] + fn filtered_resource_reflect() { + let mut world = World::new(); + world.insert_resource(R { foo: 7 }); + + let system = (FilteredResourcesParamBuilder::new(|builder| { + builder.add_read::(); + }),) + .build_state(&mut world) + .build_system(|res: FilteredResources| { + let reflect_resource = >::from_type(); + let ReflectRef::Struct(reflect_struct) = + reflect_resource.reflect(res).unwrap().reflect_ref() + else { + panic!() + }; + *reflect_struct + .field("foo") + .unwrap() + .try_downcast_ref::() + .unwrap() + }); + + let output = world.run_system_once(system).unwrap(); + assert_eq!(output, 7); + } } diff --git a/crates/bevy_ecs/src/system/combinator.rs b/crates/bevy_ecs/src/system/combinator.rs index 1f956c5d4c00e..9d11de95258a6 100644 --- a/crates/bevy_ecs/src/system/combinator.rs +++ b/crates/bevy_ecs/src/system/combinator.rs @@ -5,9 +5,9 @@ use crate::{ archetype::ArchetypeComponentId, component::{ComponentId, Tick}, prelude::World, - query::Access, + query::{Access, FilteredAccessSet}, schedule::InternedSystemSet, - system::{input::SystemInput, SystemIn}, + system::{input::SystemInput, SystemIn, SystemParamValidationError}, world::unsafe_world_cell::UnsafeWorldCell, }; @@ -114,7 +114,7 @@ pub struct CombinatorSystem { a: A, b: B, name: Cow<'static, str>, - component_access: Access, + component_access_set: FilteredAccessSet, archetype_component_access: Access, } @@ -122,13 +122,13 @@ impl CombinatorSystem { /// Creates a new system that combines two inner systems. /// /// The returned system will only be usable if `Func` implements [`Combine`]. - pub const fn new(a: A, b: B, name: Cow<'static, str>) -> Self { + pub fn new(a: A, b: B, name: Cow<'static, str>) -> Self { Self { _marker: PhantomData, a, b, name, - component_access: Access::new(), + component_access_set: FilteredAccessSet::default(), archetype_component_access: Access::new(), } } @@ -148,7 +148,11 @@ where } fn component_access(&self) -> &Access { - &self.component_access + self.component_access_set.combined_access() + } + + fn component_access_set(&self) -> &FilteredAccessSet { + &self.component_access_set } fn archetype_component_access(&self) -> &Access { @@ -176,6 +180,7 @@ where input, // SAFETY: The world accesses for both underlying systems have been registered, // so the caller will guarantee that no other systems will conflict with `a` or `b`. + // If either system has `is_exclusive()`, then the combined system also has `is_exclusive`. // Since these closures are `!Send + !Sync + !'static`, they can never be called // in parallel, so their world accesses will not conflict with each other. // Additionally, `update_archetype_component_access` has been called, @@ -186,19 +191,6 @@ where ) } - fn run(&mut self, input: SystemIn<'_, Self>, world: &mut World) -> Self::Out { - let world = world.as_unsafe_world_cell(); - Func::combine( - input, - // SAFETY: Since these closures are `!Send + !Sync + !'static`, they can never - // be called in parallel. Since mutable access to `world` only exists within - // the scope of either closure, we can be sure they will never alias one another. - |input| self.a.run(input, unsafe { world.world_mut() }), - #[allow(clippy::undocumented_unsafe_blocks)] - |input| self.b.run(input, unsafe { world.world_mut() }), - ) - } - #[inline] fn apply_deferred(&mut self, world: &mut World) { self.a.apply_deferred(world); @@ -212,7 +204,10 @@ where } #[inline] - unsafe fn validate_param_unsafe(&mut self, world: UnsafeWorldCell) -> bool { + unsafe fn validate_param_unsafe( + &mut self, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { // SAFETY: Delegate to other `System` implementations. unsafe { self.a.validate_param_unsafe(world) } } @@ -220,8 +215,10 @@ where fn initialize(&mut self, world: &mut World) { self.a.initialize(world); self.b.initialize(world); - self.component_access.extend(self.a.component_access()); - self.component_access.extend(self.b.component_access()); + self.component_access_set + .extend(self.a.component_access_set().clone()); + self.component_access_set + .extend(self.b.component_access_set().clone()); } fn update_archetype_component_access(&mut self, world: UnsafeWorldCell) { @@ -352,7 +349,7 @@ pub struct PipeSystem { a: A, b: B, name: Cow<'static, str>, - component_access: Access, + component_access_set: FilteredAccessSet, archetype_component_access: Access, } @@ -363,12 +360,12 @@ where for<'a> B::In: SystemInput = A::Out>, { /// Creates a new system that pipes two inner systems. - pub const fn new(a: A, b: B, name: Cow<'static, str>) -> Self { + pub fn new(a: A, b: B, name: Cow<'static, str>) -> Self { Self { a, b, name, - component_access: Access::new(), + component_access_set: FilteredAccessSet::default(), archetype_component_access: Access::new(), } } @@ -388,7 +385,11 @@ where } fn component_access(&self) -> &Access { - &self.component_access + self.component_access_set.combined_access() + } + + fn component_access_set(&self) -> &FilteredAccessSet { + &self.component_access_set } fn archetype_component_access(&self) -> &Access { @@ -416,11 +417,6 @@ where self.b.run_unsafe(value, world) } - fn run(&mut self, input: SystemIn<'_, Self>, world: &mut World) -> Self::Out { - let value = self.a.run(input, world); - self.b.run(value, world) - } - fn apply_deferred(&mut self, world: &mut World) { self.a.apply_deferred(world); self.b.apply_deferred(world); @@ -431,20 +427,36 @@ where self.b.queue_deferred(world); } - unsafe fn validate_param_unsafe(&mut self, world: UnsafeWorldCell) -> bool { - // SAFETY: Delegate to other `System` implementations. - unsafe { self.a.validate_param_unsafe(world) } - } + /// This method uses "early out" logic: if the first system fails validation, + /// the second system is not validated. + /// + /// Because the system validation is performed upfront, this can lead to situations + /// where later systems pass validation, but fail at runtime due to changes made earlier + /// in the piped systems. + // TODO: ensure that systems are only validated just before they are run. + // Fixing this will require fundamentally rethinking how piped systems work: + // they're currently treated as a single system from the perspective of the scheduler. + // See https://github.com/bevyengine/bevy/issues/18796 + unsafe fn validate_param_unsafe( + &mut self, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { + // SAFETY: Delegate to the `System` implementation for `a`. + unsafe { self.a.validate_param_unsafe(world) }?; + + // SAFETY: Delegate to the `System` implementation for `b`. + unsafe { self.b.validate_param_unsafe(world) }?; - fn validate_param(&mut self, world: &World) -> bool { - self.a.validate_param(world) && self.b.validate_param(world) + Ok(()) } fn initialize(&mut self, world: &mut World) { self.a.initialize(world); self.b.initialize(world); - self.component_access.extend(self.a.component_access()); - self.component_access.extend(self.b.component_access()); + self.component_access_set + .extend(self.a.component_access_set().clone()); + self.component_access_set + .extend(self.b.component_access_set().clone()); } fn update_archetype_component_access(&mut self, world: UnsafeWorldCell) { @@ -486,3 +498,27 @@ where for<'a> B::In: SystemInput = A::Out>, { } + +#[cfg(test)] +mod tests { + + #[test] + fn exclusive_system_piping_is_possible() { + use crate::prelude::*; + + fn my_exclusive_system(_world: &mut World) -> u32 { + 1 + } + + fn out_pipe(input: In) { + assert!(input.0 == 1); + } + + let mut world = World::new(); + + let mut schedule = Schedule::default(); + schedule.add_systems(my_exclusive_system.pipe(out_pipe)); + + schedule.run(&mut world); + } +} diff --git a/crates/bevy_ecs/src/system/commands/command.rs b/crates/bevy_ecs/src/system/commands/command.rs new file mode 100644 index 0000000000000..af7b88edfc77c --- /dev/null +++ b/crates/bevy_ecs/src/system/commands/command.rs @@ -0,0 +1,239 @@ +//! Contains the definition of the [`Command`] trait, +//! as well as the blanket implementation of the trait for closures. +//! +//! It also contains functions that return closures for use with +//! [`Commands`](crate::system::Commands). + +use crate::{ + bundle::{Bundle, InsertMode, NoBundleEffect}, + change_detection::MaybeLocation, + entity::Entity, + error::Result, + event::{Event, Events}, + observer::TriggerTargets, + resource::Resource, + schedule::ScheduleLabel, + system::{IntoSystem, SystemId, SystemInput}, + world::{FromWorld, SpawnBatchIter, World}, +}; + +/// A [`World`] mutation. +/// +/// Should be used with [`Commands::queue`](crate::system::Commands::queue). +/// +/// The `Out` generic parameter is the returned "output" of the command. +/// +/// # Usage +/// +/// ``` +/// # use bevy_ecs::prelude::*; +/// // Our world resource +/// #[derive(Resource, Default)] +/// struct Counter(u64); +/// +/// // Our custom command +/// struct AddToCounter(u64); +/// +/// impl Command for AddToCounter { +/// fn apply(self, world: &mut World) { +/// let mut counter = world.get_resource_or_insert_with(Counter::default); +/// counter.0 += self.0; +/// } +/// } +/// +/// fn some_system(mut commands: Commands) { +/// commands.queue(AddToCounter(42)); +/// } +/// ``` +pub trait Command: Send + 'static { + /// Applies this command, causing it to mutate the provided `world`. + /// + /// This method is used to define what a command "does" when it is ultimately applied. + /// Because this method takes `self`, you can store data or settings on the type that implements this trait. + /// This data is set by the system or other source of the command, and then ultimately read in this method. + fn apply(self, world: &mut World) -> Out; +} + +impl Command for F +where + F: FnOnce(&mut World) -> Out + Send + 'static, +{ + fn apply(self, world: &mut World) -> Out { + self(world) + } +} + +/// A [`Command`] that consumes an iterator of [`Bundles`](Bundle) to spawn a series of entities. +/// +/// This is more efficient than spawning the entities individually. +#[track_caller] +pub fn spawn_batch(bundles_iter: I) -> impl Command +where + I: IntoIterator + Send + Sync + 'static, + I::Item: Bundle, +{ + let caller = MaybeLocation::caller(); + move |world: &mut World| { + SpawnBatchIter::new(world, bundles_iter.into_iter(), caller); + } +} + +/// A [`Command`] that consumes an iterator to add a series of [`Bundles`](Bundle) to a set of entities. +/// +/// If any entities do not exist in the world, this command will return a +/// [`TryInsertBatchError`](crate::world::error::TryInsertBatchError). +/// +/// This is more efficient than inserting the bundles individually. +#[track_caller] +pub fn insert_batch(batch: I, insert_mode: InsertMode) -> impl Command +where + I: IntoIterator + Send + Sync + 'static, + B: Bundle, +{ + let caller = MaybeLocation::caller(); + move |world: &mut World| -> Result { + world.try_insert_batch_with_caller(batch, insert_mode, caller)?; + Ok(()) + } +} + +/// A [`Command`] that inserts a [`Resource`] into the world using a value +/// created with the [`FromWorld`] trait. +#[track_caller] +pub fn init_resource() -> impl Command { + move |world: &mut World| { + world.init_resource::(); + } +} + +/// A [`Command`] that inserts a [`Resource`] into the world. +#[track_caller] +pub fn insert_resource(resource: R) -> impl Command { + let caller = MaybeLocation::caller(); + move |world: &mut World| { + world.insert_resource_with_caller(resource, caller); + } +} + +/// A [`Command`] that removes a [`Resource`] from the world. +pub fn remove_resource() -> impl Command { + move |world: &mut World| { + world.remove_resource::(); + } +} + +/// A [`Command`] that runs the system corresponding to the given [`SystemId`]. +pub fn run_system(id: SystemId<(), O>) -> impl Command { + move |world: &mut World| -> Result { + world.run_system(id)?; + Ok(()) + } +} + +/// A [`Command`] that runs the system corresponding to the given [`SystemId`] +/// and provides the given input value. +pub fn run_system_with(id: SystemId, input: I::Inner<'static>) -> impl Command +where + I: SystemInput: Send> + 'static, +{ + move |world: &mut World| -> Result { + world.run_system_with(id, input)?; + Ok(()) + } +} + +/// A [`Command`] that runs the given system, +/// caching its [`SystemId`] in a [`CachedSystemId`](crate::system::CachedSystemId) resource. +pub fn run_system_cached(system: S) -> impl Command +where + M: 'static, + S: IntoSystem<(), (), M> + Send + 'static, +{ + move |world: &mut World| -> Result { + world.run_system_cached(system)?; + Ok(()) + } +} + +/// A [`Command`] that runs the given system with the given input value, +/// caching its [`SystemId`] in a [`CachedSystemId`](crate::system::CachedSystemId) resource. +pub fn run_system_cached_with(system: S, input: I::Inner<'static>) -> impl Command +where + I: SystemInput: Send> + Send + 'static, + M: 'static, + S: IntoSystem + Send + 'static, +{ + move |world: &mut World| -> Result { + world.run_system_cached_with(system, input)?; + Ok(()) + } +} + +/// A [`Command`] that removes a system previously registered with +/// [`Commands::register_system`](crate::system::Commands::register_system) or +/// [`World::register_system`]. +pub fn unregister_system(system_id: SystemId) -> impl Command +where + I: SystemInput + Send + 'static, + O: Send + 'static, +{ + move |world: &mut World| -> Result { + world.unregister_system(system_id)?; + Ok(()) + } +} + +/// A [`Command`] that removes a system previously registered with one of the following: +/// - [`Commands::run_system_cached`](crate::system::Commands::run_system_cached) +/// - [`World::run_system_cached`] +/// - [`World::register_system_cached`] +pub fn unregister_system_cached(system: S) -> impl Command +where + I: SystemInput + Send + 'static, + O: 'static, + M: 'static, + S: IntoSystem + Send + 'static, +{ + move |world: &mut World| -> Result { + world.unregister_system_cached(system)?; + Ok(()) + } +} + +/// A [`Command`] that runs the schedule corresponding to the given [`ScheduleLabel`]. +pub fn run_schedule(label: impl ScheduleLabel) -> impl Command { + move |world: &mut World| -> Result { + world.try_run_schedule(label)?; + Ok(()) + } +} + +/// A [`Command`] that sends a global [`Trigger`](crate::observer::Trigger) without any targets. +#[track_caller] +pub fn trigger(event: impl Event) -> impl Command { + let caller = MaybeLocation::caller(); + move |world: &mut World| { + world.trigger_with_caller(event, caller); + } +} + +/// A [`Command`] that sends a [`Trigger`](crate::observer::Trigger) for the given targets. +pub fn trigger_targets( + event: impl Event, + targets: impl TriggerTargets + Send + Sync + 'static, +) -> impl Command { + let caller = MaybeLocation::caller(); + move |world: &mut World| { + world.trigger_targets_with_caller(event, targets, caller); + } +} + +/// A [`Command`] that sends an arbitrary [`Event`]. +#[track_caller] +pub fn send_event(event: E) -> impl Command { + let caller = MaybeLocation::caller(); + move |world: &mut World| { + let mut events = world.resource_mut::>(); + events.send_with_caller(event, caller); + } +} diff --git a/crates/bevy_ecs/src/system/commands/entity_command.rs b/crates/bevy_ecs/src/system/commands/entity_command.rs new file mode 100644 index 0000000000000..317ad8476abe8 --- /dev/null +++ b/crates/bevy_ecs/src/system/commands/entity_command.rs @@ -0,0 +1,282 @@ +//! Contains the definition of the [`EntityCommand`] trait, +//! as well as the blanket implementation of the trait for closures. +//! +//! It also contains functions that return closures for use with +//! [`EntityCommands`](crate::system::EntityCommands). + +use alloc::vec::Vec; +use log::info; + +use crate::{ + bundle::{Bundle, InsertMode}, + change_detection::MaybeLocation, + component::{Component, ComponentId, ComponentInfo}, + entity::{Entity, EntityClonerBuilder}, + event::Event, + relationship::RelationshipHookMode, + system::IntoObserverSystem, + world::{error::EntityMutableFetchError, EntityWorldMut, FromWorld}, +}; +use bevy_ptr::OwningPtr; + +/// A command which gets executed for a given [`Entity`]. +/// +/// Should be used with [`EntityCommands::queue`](crate::system::EntityCommands::queue). +/// +/// The `Out` generic parameter is the returned "output" of the command. +/// +/// # Examples +/// +/// ``` +/// # use std::collections::HashSet; +/// # use bevy_ecs::prelude::*; +/// use bevy_ecs::system::EntityCommand; +/// # +/// # #[derive(Component, PartialEq)] +/// # struct Name(String); +/// # impl Name { +/// # fn new(s: String) -> Self { Name(s) } +/// # fn as_str(&self) -> &str { &self.0 } +/// # } +/// +/// #[derive(Resource, Default)] +/// struct Counter(i64); +/// +/// /// A `Command` which names an entity based on a global counter. +/// fn count_name(mut entity: EntityWorldMut) { +/// // Get the current value of the counter, and increment it for next time. +/// let i = { +/// let mut counter = entity.resource_mut::(); +/// let i = counter.0; +/// counter.0 += 1; +/// i +/// }; +/// // Name the entity after the value of the counter. +/// entity.insert(Name::new(format!("Entity #{i}"))); +/// } +/// +/// // App creation boilerplate omitted... +/// # let mut world = World::new(); +/// # world.init_resource::(); +/// # +/// # let mut setup_schedule = Schedule::default(); +/// # setup_schedule.add_systems(setup); +/// # let mut assert_schedule = Schedule::default(); +/// # assert_schedule.add_systems(assert_names); +/// # +/// # setup_schedule.run(&mut world); +/// # assert_schedule.run(&mut world); +/// +/// fn setup(mut commands: Commands) { +/// commands.spawn_empty().queue(count_name); +/// commands.spawn_empty().queue(count_name); +/// } +/// +/// fn assert_names(named: Query<&Name>) { +/// // We use a HashSet because we do not care about the order. +/// let names: HashSet<_> = named.iter().map(Name::as_str).collect(); +/// assert_eq!(names, HashSet::from_iter(["Entity #0", "Entity #1"])); +/// } +/// ``` +pub trait EntityCommand: Send + 'static { + /// Executes this command for the given [`Entity`]. + fn apply(self, entity: EntityWorldMut) -> Out; +} + +/// An error that occurs when running an [`EntityCommand`] on a specific entity. +#[derive(thiserror::Error, Debug)] +pub enum EntityCommandError { + /// The entity this [`EntityCommand`] tried to run on could not be fetched. + #[error(transparent)] + EntityFetchError(#[from] EntityMutableFetchError), + /// An error that occurred while running the [`EntityCommand`]. + #[error("{0}")] + CommandFailed(E), +} + +impl EntityCommand for F +where + F: FnOnce(EntityWorldMut) -> Out + Send + 'static, +{ + fn apply(self, entity: EntityWorldMut) -> Out { + self(entity) + } +} + +/// An [`EntityCommand`] that adds the components in a [`Bundle`] to an entity. +#[track_caller] +pub fn insert(bundle: impl Bundle, mode: InsertMode) -> impl EntityCommand { + let caller = MaybeLocation::caller(); + move |mut entity: EntityWorldMut| { + entity.insert_with_caller(bundle, mode, caller, RelationshipHookMode::Run); + } +} + +/// An [`EntityCommand`] that adds a dynamic component to an entity. +/// +/// # Safety +/// +/// - [`ComponentId`] must be from the same world as the target entity. +/// - `T` must have the same layout as the one passed during `component_id` creation. +#[track_caller] +pub unsafe fn insert_by_id( + component_id: ComponentId, + value: T, + mode: InsertMode, +) -> impl EntityCommand { + let caller = MaybeLocation::caller(); + move |mut entity: EntityWorldMut| { + // SAFETY: + // - `component_id` safety is ensured by the caller + // - `ptr` is valid within the `make` block + OwningPtr::make(value, |ptr| unsafe { + entity.insert_by_id_with_caller( + component_id, + ptr, + mode, + caller, + RelationshipHookMode::Run, + ); + }); + } +} + +/// An [`EntityCommand`] that adds a component to an entity using +/// the component's [`FromWorld`] implementation. +#[track_caller] +pub fn insert_from_world(mode: InsertMode) -> impl EntityCommand { + let caller = MaybeLocation::caller(); + move |mut entity: EntityWorldMut| { + let value = entity.world_scope(|world| T::from_world(world)); + entity.insert_with_caller(value, mode, caller, RelationshipHookMode::Run); + } +} + +/// An [`EntityCommand`] that removes the components in a [`Bundle`] from an entity. +#[track_caller] +pub fn remove() -> impl EntityCommand { + let caller = MaybeLocation::caller(); + move |mut entity: EntityWorldMut| { + entity.remove_with_caller::(caller); + } +} + +/// An [`EntityCommand`] that removes the components in a [`Bundle`] from an entity, +/// as well as the required components for each component removed. +#[track_caller] +pub fn remove_with_requires() -> impl EntityCommand { + let caller = MaybeLocation::caller(); + move |mut entity: EntityWorldMut| { + entity.remove_with_requires_with_caller::(caller); + } +} + +/// An [`EntityCommand`] that removes a dynamic component from an entity. +#[track_caller] +pub fn remove_by_id(component_id: ComponentId) -> impl EntityCommand { + let caller = MaybeLocation::caller(); + move |mut entity: EntityWorldMut| { + entity.remove_by_id_with_caller(component_id, caller); + } +} + +/// An [`EntityCommand`] that removes all components from an entity. +#[track_caller] +pub fn clear() -> impl EntityCommand { + let caller = MaybeLocation::caller(); + move |mut entity: EntityWorldMut| { + entity.clear_with_caller(caller); + } +} + +/// An [`EntityCommand`] that removes all components from an entity, +/// except for those in the given [`Bundle`]. +#[track_caller] +pub fn retain() -> impl EntityCommand { + let caller = MaybeLocation::caller(); + move |mut entity: EntityWorldMut| { + entity.retain_with_caller::(caller); + } +} + +/// An [`EntityCommand`] that despawns an entity. +/// +/// # Note +/// +/// This will also despawn the entities in any [`RelationshipTarget`](crate::relationship::RelationshipTarget) +/// that is configured to despawn descendants. +/// +/// For example, this will recursively despawn [`Children`](crate::hierarchy::Children). +#[track_caller] +pub fn despawn() -> impl EntityCommand { + let caller = MaybeLocation::caller(); + move |entity: EntityWorldMut| { + entity.despawn_with_caller(caller); + } +} + +/// An [`EntityCommand`] that creates an [`Observer`](crate::observer::Observer) +/// listening for events of type `E` targeting an entity +#[track_caller] +pub fn observe( + observer: impl IntoObserverSystem, +) -> impl EntityCommand { + let caller = MaybeLocation::caller(); + move |mut entity: EntityWorldMut| { + entity.observe_with_caller(observer, caller); + } +} + +/// An [`EntityCommand`] that sends a [`Trigger`](crate::observer::Trigger) targeting an entity. +/// +/// This will run any [`Observer`](crate::observer::Observer) of the given [`Event`] watching the entity. +#[track_caller] +pub fn trigger(event: impl Event) -> impl EntityCommand { + let caller = MaybeLocation::caller(); + move |mut entity: EntityWorldMut| { + let id = entity.id(); + entity.world_scope(|world| { + world.trigger_targets_with_caller(event, id, caller); + }); + } +} + +/// An [`EntityCommand`] that clones parts of an entity onto another entity, +/// configured through [`EntityClonerBuilder`]. +pub fn clone_with( + target: Entity, + config: impl FnOnce(&mut EntityClonerBuilder) + Send + Sync + 'static, +) -> impl EntityCommand { + move |mut entity: EntityWorldMut| { + entity.clone_with(target, config); + } +} + +/// An [`EntityCommand`] that clones the specified components of an entity +/// and inserts them into another entity. +pub fn clone_components(target: Entity) -> impl EntityCommand { + move |mut entity: EntityWorldMut| { + entity.clone_components::(target); + } +} + +/// An [`EntityCommand`] that clones the specified components of an entity +/// and inserts them into another entity, then removes them from the original entity. +pub fn move_components(target: Entity) -> impl EntityCommand { + move |mut entity: EntityWorldMut| { + entity.move_components::(target); + } +} + +/// An [`EntityCommand`] that logs the components of an entity. +pub fn log_components() -> impl EntityCommand { + move |entity: EntityWorldMut| { + let debug_infos: Vec<_> = entity + .world() + .inspect_entity(entity.id()) + .expect("Entity existence is verified before an EntityCommand is executed") + .map(ComponentInfo::name) + .collect(); + info!("Entity {}: {debug_infos:?}", entity.id()); + } +} diff --git a/crates/bevy_ecs/src/system/commands/mod.rs b/crates/bevy_ecs/src/system/commands/mod.rs index 9086ece263824..4cb6d61bc0e9a 100644 --- a/crates/bevy_ecs/src/system/commands/mod.rs +++ b/crates/bevy_ecs/src/system/commands/mod.rs @@ -1,33 +1,39 @@ +pub mod command; +pub mod entity_command; + #[cfg(feature = "std")] mod parallel_scope; -use alloc::vec::Vec; -use core::{marker::PhantomData, panic::Location}; +pub use command::Command; +pub use entity_command::EntityCommand; + +#[cfg(feature = "std")] +pub use parallel_scope::*; + +use alloc::boxed::Box; +use core::marker::PhantomData; +use log::error; -use super::{ - Deferred, IntoObserverSystem, IntoSystem, RegisterSystem, Resource, RunSystemCachedWith, - UnregisterSystem, UnregisterSystemCached, -}; use crate::{ self as bevy_ecs, - bundle::{Bundle, InsertMode}, - change_detection::Mut, - component::{Component, ComponentId, ComponentInfo, Mutable}, - entity::{Entities, Entity, EntityCloneBuilder}, - event::{Event, SendEvent}, - observer::{Observer, TriggerEvent, TriggerTargets}, + bundle::{Bundle, InsertMode, NoBundleEffect}, + change_detection::{MaybeLocation, Mut}, + component::{Component, ComponentId, Mutable}, + entity::{Entities, Entity, EntityClonerBuilder, EntityDoesNotExistError}, + error::{ignore, warn, BevyError, CommandWithEntity, ErrorContext, HandleError}, + event::Event, + observer::{Observer, TriggerTargets}, + resource::Resource, schedule::ScheduleLabel, - system::{input::SystemInput, RunSystemWith, SystemId}, + system::{ + Deferred, IntoObserverSystem, IntoSystem, RegisteredSystem, SystemId, SystemInput, + SystemParamValidationError, + }, world::{ - command_queue::RawCommandQueue, unsafe_world_cell::UnsafeWorldCell, Command, CommandQueue, - EntityWorldMut, FromWorld, SpawnBatchIter, World, + command_queue::RawCommandQueue, unsafe_world_cell::UnsafeWorldCell, CommandQueue, + EntityWorldMut, FromWorld, World, }, }; -use bevy_ptr::OwningPtr; -use log::{error, info}; - -#[cfg(feature = "std")] -pub use parallel_scope::*; /// A [`Command`] queue to perform structural changes to the [`World`]. /// @@ -53,7 +59,6 @@ pub use parallel_scope::*; /// /// ``` /// # use bevy_ecs::prelude::*; -/// # /// fn my_system(mut commands: Commands) { /// // ... /// } @@ -75,11 +80,23 @@ pub use parallel_scope::*; /// // NOTE: type inference fails here, so annotations are required on the closure. /// commands.queue(|w: &mut World| { /// // Mutate the world however you want... -/// # todo!(); /// }); /// # } /// ``` /// +/// # Error handling +/// +/// A [`Command`] can return a [`Result`](crate::error::Result), +/// which will be passed to an [error handler](crate::error) if the `Result` is an error. +/// +/// The [default error handler](crate::error::default_error_handler) panics. +/// It can be configured by setting the `GLOBAL_ERROR_HANDLER`. +/// +/// Alternatively, you can customize the error handler for a specific command +/// by calling [`Commands::queue_handled`]. +/// +/// The [`error`](crate::error) module provides some simple error handlers for convenience. +/// /// [`ApplyDeferred`]: crate::schedule::ApplyDeferred pub struct Commands<'w, 's> { queue: InternalQueue<'s>, @@ -160,7 +177,7 @@ const _: () = { state: &Self::State, system_meta: &bevy_ecs::system::SystemMeta, world: UnsafeWorldCell, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { <(Deferred, &Entities) as bevy_ecs::system::SystemParam>::validate_param( &state.state, system_meta, @@ -198,19 +215,11 @@ enum InternalQueue<'s> { impl<'w, 's> Commands<'w, 's> { /// Returns a new `Commands` instance from a [`CommandQueue`] and a [`World`]. - /// - /// It is not required to call this constructor when using `Commands` as a [system parameter]. - /// - /// [system parameter]: crate::system::SystemParam pub fn new(queue: &'s mut CommandQueue, world: &'w World) -> Self { Self::new_from_entities(queue, &world.entities) } /// Returns a new `Commands` instance from a [`CommandQueue`] and an [`Entities`] reference. - /// - /// It is not required to call this constructor when using `Commands` as a [system parameter]. - /// - /// [system parameter]: crate::system::SystemParam pub fn new_from_entities(queue: &'s mut CommandQueue, entities: &'w Entities) -> Self { Self { queue: InternalQueue::CommandQueue(Deferred(queue)), @@ -224,7 +233,7 @@ impl<'w, 's> Commands<'w, 's> { /// /// # Safety /// - /// * Caller ensures that `queue` must outlive 'w + /// * Caller ensures that `queue` must outlive `'w` pub(crate) unsafe fn new_raw_from_entities( queue: RawCommandQueue, entities: &'w Entities, @@ -236,9 +245,10 @@ impl<'w, 's> Commands<'w, 's> { } /// Returns a [`Commands`] with a smaller lifetime. + /// /// This is useful if you have `&mut Commands` but need `Commands`. /// - /// # Examples + /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; @@ -265,7 +275,7 @@ impl<'w, 's> Commands<'w, 's> { } } - /// Take all commands from `other` and append them to `self`, leaving `other` empty + /// Take all commands from `other` and append them to `self`, leaving `other` empty. pub fn append(&mut self, other: &mut CommandQueue) { match &mut self.queue { InternalQueue::CommandQueue(queue) => queue.bytes.append(&mut other.bytes), @@ -276,15 +286,12 @@ impl<'w, 's> Commands<'w, 's> { } } - /// Reserves a new empty [`Entity`] to be spawned, and returns its corresponding [`EntityCommands`]. - /// - /// See [`World::spawn_empty`] for more details. + /// Spawns a new empty [`Entity`] and returns its corresponding [`EntityCommands`]. /// /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; - /// /// #[derive(Component)] /// struct Label(&'static str); /// #[derive(Component)] @@ -293,14 +300,14 @@ impl<'w, 's> Commands<'w, 's> { /// struct Agility(u32); /// /// fn example_system(mut commands: Commands) { - /// // Create a new empty entity and retrieve its id. - /// let empty_entity = commands.spawn_empty().id(); + /// // Create a new empty entity. + /// commands.spawn_empty(); /// - /// // Create another empty entity, then add some component to it + /// // Create another empty entity. /// commands.spawn_empty() - /// // adds a new component bundle to the entity + /// // Add a new component bundle to the entity. /// .insert((Strength(1), Agility(2))) - /// // adds a single component to the entity + /// // Add a single component to the entity. /// .insert(Label("hello world")); /// } /// # bevy_ecs::system::assert_is_system(example_system); @@ -308,8 +315,9 @@ impl<'w, 's> Commands<'w, 's> { /// /// # See also /// - /// - [`spawn`](Self::spawn) to spawn an entity with a bundle. - /// - [`spawn_batch`](Self::spawn_batch) to spawn entities with a bundle each. + /// - [`spawn`](Self::spawn) to spawn an entity with components. + /// - [`spawn_batch`](Self::spawn_batch) to spawn many entities + /// with the same combination of components. pub fn spawn_empty(&mut self) -> EntityCommands { let entity = self.entities.reserve_entity(); EntityCommands { @@ -318,83 +326,39 @@ impl<'w, 's> Commands<'w, 's> { } } - /// Pushes a [`Command`] to the queue for creating a new [`Entity`] if the given one does not exists, - /// and returns its corresponding [`EntityCommands`]. - /// - /// This method silently fails by returning [`EntityCommands`] - /// even if the given `Entity` cannot be spawned. - /// - /// See [`World::get_or_spawn`] for more details. - /// - /// # Note - /// - /// Spawning a specific `entity` value is rarely the right choice. Most apps should favor - /// [`Commands::spawn`]. This method should generally only be used for sharing entities across - /// apps, and only when they have a scheme worked out to share an ID space (which doesn't happen - /// by default). - #[deprecated(since = "0.15.0", note = "use Commands::spawn instead")] - #[track_caller] - pub fn get_or_spawn(&mut self, entity: Entity) -> EntityCommands { - #[cfg(feature = "track_change_detection")] - let caller = Location::caller(); - self.queue(move |world: &mut World| { - world.get_or_spawn_with_caller( - entity, - #[cfg(feature = "track_change_detection")] - caller, - ); - }); - EntityCommands { - entity, - commands: self.reborrow(), - } - } - - /// Pushes a [`Command`] to the queue for creating a new entity with the given [`Bundle`]'s components, - /// and returns its corresponding [`EntityCommands`]. + /// Spawns a new [`Entity`] with the given components + /// and returns the entity's corresponding [`EntityCommands`]. /// - /// In case multiple bundles of the same [`Bundle`] type need to be spawned, - /// [`spawn_batch`](Self::spawn_batch) should be used for better performance. + /// To spawn many entities with the same combination of components, + /// [`spawn_batch`](Self::spawn_batch) can be used for better performance. /// /// # Example /// /// ``` - /// use bevy_ecs::prelude::*; - /// - /// #[derive(Component)] - /// struct Component1; - /// #[derive(Component)] - /// struct Component2; - /// #[derive(Component)] - /// struct Label(&'static str); + /// # use bevy_ecs::prelude::*; /// #[derive(Component)] - /// struct Strength(u32); + /// struct ComponentA(u32); /// #[derive(Component)] - /// struct Agility(u32); + /// struct ComponentB(u32); /// /// #[derive(Bundle)] /// struct ExampleBundle { - /// a: Component1, - /// b: Component2, + /// a: ComponentA, + /// b: ComponentB, /// } /// /// fn example_system(mut commands: Commands) { /// // Create a new entity with a single component. - /// commands.spawn(Component1); + /// commands.spawn(ComponentA(1)); + /// + /// // Create a new entity with two components using a "tuple bundle". + /// commands.spawn((ComponentA(2), ComponentB(1))); /// /// // Create a new entity with a component bundle. /// commands.spawn(ExampleBundle { - /// a: Component1, - /// b: Component2, + /// a: ComponentA(3), + /// b: ComponentB(2), /// }); - /// - /// commands - /// // Create a new entity with two components using a "tuple bundle". - /// .spawn((Component1, Component2)) - /// // `spawn returns a builder, so you can insert more bundles like this: - /// .insert((Strength(1), Agility(2))) - /// // or insert single components like this: - /// .insert(Label("hello world")); /// } /// # bevy_ecs::system::assert_is_system(example_system); /// ``` @@ -402,7 +366,8 @@ impl<'w, 's> Commands<'w, 's> { /// # See also /// /// - [`spawn_empty`](Self::spawn_empty) to spawn an entity without any components. - /// - [`spawn_batch`](Self::spawn_batch) to spawn entities with a bundle each. + /// - [`spawn_batch`](Self::spawn_batch) to spawn many entities + /// with the same combination of components. #[track_caller] pub fn spawn(&mut self, bundle: T) -> EntityCommands { let mut entity = self.spawn_empty(); @@ -410,33 +375,26 @@ impl<'w, 's> Commands<'w, 's> { entity } - /// Returns the [`EntityCommands`] for the requested [`Entity`]. - /// - /// # Panics + /// Returns the [`EntityCommands`] for the given [`Entity`]. /// - /// This method panics if the requested entity does not exist. + /// This method does not guarantee that commands queued by the returned `EntityCommands` + /// will be successful, since the entity could be despawned before they are executed. /// /// # Example /// /// ``` - /// use bevy_ecs::prelude::*; + /// # use bevy_ecs::prelude::*; + /// #[derive(Resource)] + /// struct PlayerEntity { + /// entity: Entity + /// } /// /// #[derive(Component)] /// struct Label(&'static str); - /// #[derive(Component)] - /// struct Strength(u32); - /// #[derive(Component)] - /// struct Agility(u32); - /// - /// fn example_system(mut commands: Commands) { - /// // Create a new, empty entity - /// let entity = commands.spawn_empty().id(); /// - /// commands.entity(entity) - /// // adds a new component bundle to the entity - /// .insert((Strength(1), Agility(2))) - /// // adds a single component to the entity - /// .insert(Label("hello world")); + /// fn example_system(mut commands: Commands, player: Res) { + /// // Get the entity and add a component. + /// commands.entity(player.entity).insert(Label("hello world")); /// } /// # bevy_ecs::system::assert_is_system(example_system); /// ``` @@ -447,144 +405,193 @@ impl<'w, 's> Commands<'w, 's> { #[inline] #[track_caller] pub fn entity(&mut self, entity: Entity) -> EntityCommands { - #[inline(never)] - #[cold] - #[track_caller] - fn panic_no_entity(entities: &Entities, entity: Entity) -> ! { - panic!( - "Attempting to create an EntityCommands for entity {entity:?}, which {}", - entities.entity_does_not_exist_error_details_message(entity) - ); - } - - if self.get_entity(entity).is_some() { - EntityCommands { - entity, - commands: self.reborrow(), - } - } else { - panic_no_entity(self.entities, entity) + EntityCommands { + entity, + commands: self.reborrow(), } } - /// Returns the [`EntityCommands`] for the requested [`Entity`], if it exists. + /// Returns the [`EntityCommands`] for the requested [`Entity`] if it exists. + /// + /// This method does not guarantee that commands queued by the returned `EntityCommands` + /// will be successful, since the entity could be despawned before they are executed. /// - /// Returns `None` if the entity does not exist. + /// # Errors /// - /// This method does not guarantee that `EntityCommands` will be successfully applied, - /// since another command in the queue may delete the entity before them. + /// Returns [`EntityDoesNotExistError`] if the requested entity does not exist. /// /// # Example /// /// ``` - /// use bevy_ecs::prelude::*; + /// # use bevy_ecs::prelude::*; + /// #[derive(Resource)] + /// struct PlayerEntity { + /// entity: Entity + /// } /// /// #[derive(Component)] /// struct Label(&'static str); - /// fn example_system(mut commands: Commands) { - /// // Create a new, empty entity - /// let entity = commands.spawn_empty().id(); /// - /// // Get the entity if it still exists, which it will in this case - /// if let Some(mut entity_commands) = commands.get_entity(entity) { - /// // adds a single component to the entity - /// entity_commands.insert(Label("hello world")); - /// } + /// fn example_system(mut commands: Commands, player: Res) -> Result { + /// // Get the entity if it still exists and store the `EntityCommands`. + /// // If it doesn't exist, the `?` operator will propagate the returned error + /// // to the system, and the system will pass it to an error handler. + /// let mut entity_commands = commands.get_entity(player.entity)?; + /// + /// // Add a component to the entity. + /// entity_commands.insert(Label("hello world")); + /// + /// // Return from the system successfully. + /// Ok(()) /// } /// # bevy_ecs::system::assert_is_system(example_system); /// ``` /// /// # See also /// - /// - [`entity`](Self::entity) for the panicking version. + /// - [`entity`](Self::entity) for the infallible version. #[inline] #[track_caller] - pub fn get_entity(&mut self, entity: Entity) -> Option { - self.entities.contains(entity).then_some(EntityCommands { - entity, - commands: self.reborrow(), - }) + pub fn get_entity( + &mut self, + entity: Entity, + ) -> Result { + if self.entities.contains(entity) { + Ok(EntityCommands { + entity, + commands: self.reborrow(), + }) + } else { + Err(EntityDoesNotExistError::new(entity, self.entities)) + } } - /// Pushes a [`Command`] to the queue for creating entities with a particular [`Bundle`] type. + /// Spawns multiple entities with the same combination of components, + /// based on a batch of [`Bundles`](Bundle). /// - /// `bundles_iter` is a type that can be converted into a [`Bundle`] iterator - /// (it can also be a collection). + /// A batch can be any type that implements [`IntoIterator`] and contains bundles, + /// such as a [`Vec`](alloc::vec::Vec) or an array `[Bundle; N]`. /// - /// This method is equivalent to iterating `bundles_iter` - /// and calling [`spawn`](Self::spawn) on each bundle, - /// but it is faster due to memory pre-allocation. + /// This method is equivalent to iterating the batch + /// and calling [`spawn`](Self::spawn) for each bundle, + /// but is faster by pre-allocating memory and having exclusive [`World`] access. /// /// # Example /// /// ``` - /// # use bevy_ecs::prelude::*; - /// # - /// # #[derive(Component)] - /// # struct Name(String); - /// # #[derive(Component)] - /// # struct Score(u32); - /// # - /// # fn system(mut commands: Commands) { - /// commands.spawn_batch(vec![ - /// ( - /// Name("Alice".to_string()), - /// Score(0), - /// ), - /// ( - /// Name("Bob".to_string()), - /// Score(0), - /// ), - /// ]); - /// # } - /// # bevy_ecs::system::assert_is_system(system); + /// use bevy_ecs::prelude::*; + /// + /// #[derive(Component)] + /// struct Score(u32); + /// + /// fn example_system(mut commands: Commands) { + /// commands.spawn_batch([ + /// (Name::new("Alice"), Score(0)), + /// (Name::new("Bob"), Score(0)), + /// ]); + /// } + /// # bevy_ecs::system::assert_is_system(example_system); /// ``` /// /// # See also /// - /// - [`spawn`](Self::spawn) to spawn an entity with a bundle. - /// - [`spawn_empty`](Self::spawn_empty) to spawn an entity without any components. + /// - [`spawn`](Self::spawn) to spawn an entity with components. + /// - [`spawn_empty`](Self::spawn_empty) to spawn an entity without components. #[track_caller] - pub fn spawn_batch(&mut self, bundles_iter: I) + pub fn spawn_batch(&mut self, batch: I) where I: IntoIterator + Send + Sync + 'static, - I::Item: Bundle, + I::Item: Bundle, { - #[cfg(feature = "track_change_detection")] - let caller = Location::caller(); - self.queue(move |world: &mut World| { - SpawnBatchIter::new( - world, - bundles_iter.into_iter(), - #[cfg(feature = "track_change_detection")] - caller, - ); - }); + self.queue(command::spawn_batch(batch)); + } + + /// Pushes a generic [`Command`] to the command queue. + /// + /// If the [`Command`] returns a [`Result`], + /// it will be handled using the [default error handler](crate::error::default_error_handler). + /// + /// To use a custom error handler, see [`Commands::queue_handled`]. + /// + /// The command can be: + /// - A custom struct that implements [`Command`]. + /// - A closure or function that matches one of the following signatures: + /// - [`(&mut World)`](World) + /// - A built-in command from the [`command`] module. + /// + /// # Example + /// + /// ``` + /// # use bevy_ecs::prelude::*; + /// #[derive(Resource, Default)] + /// struct Counter(u64); + /// + /// struct AddToCounter(String); + /// + /// impl Command for AddToCounter { + /// fn apply(self, world: &mut World) -> Result { + /// let mut counter = world.get_resource_or_insert_with(Counter::default); + /// let amount: u64 = self.0.parse()?; + /// counter.0 += amount; + /// Ok(()) + /// } + /// } + /// + /// fn add_three_to_counter_system(mut commands: Commands) { + /// commands.queue(AddToCounter("3".to_string())); + /// } + /// + /// fn add_twenty_five_to_counter_system(mut commands: Commands) { + /// commands.queue(|world: &mut World| { + /// let mut counter = world.get_resource_or_insert_with(Counter::default); + /// counter.0 += 25; + /// }); + /// } + /// # bevy_ecs::system::assert_is_system(add_three_to_counter_system); + /// # bevy_ecs::system::assert_is_system(add_twenty_five_to_counter_system); + /// ``` + pub fn queue + HandleError, T>(&mut self, command: C) { + self.queue_internal(command.handle_error()); } /// Pushes a generic [`Command`] to the command queue. /// - /// `command` can be a built-in command, custom struct that implements [`Command`] or a closure - /// that takes [`&mut World`](World) as an argument. + /// If the [`Command`] returns a [`Result`], + /// the given `error_handler` will be used to handle error cases. + /// + /// To implicitly use the default error handler, see [`Commands::queue`]. + /// + /// The command can be: + /// - A custom struct that implements [`Command`]. + /// - A closure or function that matches one of the following signatures: + /// - [`(&mut World)`](World) + /// - [`(&mut World)`](World) `->` [`Result`] + /// - A built-in command from the [`command`] module. + /// /// # Example /// /// ``` - /// # use bevy_ecs::{world::Command, prelude::*}; + /// # use bevy_ecs::prelude::*; + /// use bevy_ecs::error::warn; + /// /// #[derive(Resource, Default)] /// struct Counter(u64); /// - /// struct AddToCounter(u64); + /// struct AddToCounter(String); /// - /// impl Command for AddToCounter { - /// fn apply(self, world: &mut World) { + /// impl Command for AddToCounter { + /// fn apply(self, world: &mut World) -> Result { /// let mut counter = world.get_resource_or_insert_with(Counter::default); - /// counter.0 += self.0; + /// let amount: u64 = self.0.parse()?; + /// counter.0 += amount; + /// Ok(()) /// } /// } /// /// fn add_three_to_counter_system(mut commands: Commands) { - /// commands.queue(AddToCounter(3)); + /// commands.queue_handled(AddToCounter("3".to_string()), warn); /// } + /// /// fn add_twenty_five_to_counter_system(mut commands: Commands) { /// commands.queue(|world: &mut World| { /// let mut counter = world.get_resource_or_insert_with(Counter::default); @@ -594,7 +601,15 @@ impl<'w, 's> Commands<'w, 's> { /// # bevy_ecs::system::assert_is_system(add_three_to_counter_system); /// # bevy_ecs::system::assert_is_system(add_twenty_five_to_counter_system); /// ``` - pub fn queue(&mut self, command: C) { + pub fn queue_handled + HandleError, T>( + &mut self, + command: C, + error_handler: fn(BevyError, ErrorContext), + ) { + self.queue_internal(command.handle_error_with(error_handler)); + } + + fn queue_internal(&mut self, command: impl Command) { match &mut self.queue { InternalQueue::CommandQueue(queue) => { queue.push(command); @@ -621,7 +636,7 @@ impl<'w, 's> Commands<'w, 's> { /// Then, the `Bundle` is added to the entity. /// /// This method is equivalent to iterating `bundles_iter`, - /// calling [`get_or_spawn`](Self::get_or_spawn) for each bundle, + /// calling [`spawn`](Self::spawn) for each bundle, /// and passing it to [`insert`](EntityCommands::insert), /// but it is faster due to memory pre-allocation. /// @@ -631,21 +646,28 @@ impl<'w, 's> Commands<'w, 's> { /// This method should generally only be used for sharing entities across apps, and only when they have a scheme /// worked out to share an ID space (which doesn't happen by default). #[track_caller] + #[deprecated( + since = "0.16.0", + note = "This can cause extreme performance problems when used with lots of arbitrary free entities. See #18054 on GitHub." + )] pub fn insert_or_spawn_batch(&mut self, bundles_iter: I) where I: IntoIterator + Send + Sync + 'static, - B: Bundle, + B: Bundle, { - #[cfg(feature = "track_change_detection")] - let caller = Location::caller(); + let caller = MaybeLocation::caller(); self.queue(move |world: &mut World| { + + #[expect( + deprecated, + reason = "This needs to be supported for now, and the outer item is deprecated too." + )] if let Err(invalid_entities) = world.insert_or_spawn_batch_with_caller( bundles_iter, - #[cfg(feature = "track_change_detection")] caller, ) { error!( - "Failed to 'insert or spawn' bundle of type {} into the following invalid entities: {:?}", + "{caller}: Failed to 'insert or spawn' bundle of type {} into the following invalid entities: {:?}", core::any::type_name::(), invalid_entities ); @@ -653,255 +675,281 @@ impl<'w, 's> Commands<'w, 's> { }); } - /// Pushes a [`Command`] to the queue for adding a [`Bundle`] type to a batch of [`Entities`](Entity). + /// Adds a series of [`Bundles`](Bundle) to each [`Entity`] they are paired with, + /// based on a batch of `(Entity, Bundle)` pairs. /// - /// A batch can be any type that implements [`IntoIterator`] containing `(Entity, Bundle)` tuples, - /// such as a [`Vec<(Entity, Bundle)>`] or an array `[(Entity, Bundle); N]`. + /// A batch can be any type that implements [`IntoIterator`] + /// and contains `(Entity, Bundle)` tuples, + /// such as a [`Vec<(Entity, Bundle)>`](alloc::vec::Vec) + /// or an array `[(Entity, Bundle); N]`. /// - /// When the command is applied, for each `(Entity, Bundle)` pair in the given batch, - /// the `Bundle` is added to the `Entity`, overwriting any existing components shared by the `Bundle`. + /// This will overwrite any pre-existing components shared by the [`Bundle`] type. + /// Use [`Commands::insert_batch_if_new`] to keep the pre-existing components instead. /// - /// This method is equivalent to iterating the batch, - /// calling [`entity`](Self::entity) for each pair, - /// and passing the bundle to [`insert`](EntityCommands::insert), - /// but it is faster due to memory pre-allocation. + /// This method is equivalent to iterating the batch + /// and calling [`insert`](EntityCommands::insert) for each pair, + /// but is faster by caching data that is shared between entities. /// - /// # Panics + /// # Fallible /// - /// This command panics if any of the given entities do not exist. + /// This command will fail if any of the given entities do not exist. /// - /// For the non-panicking version, see [`try_insert_batch`](Self::try_insert_batch). + /// It will internally return a [`TryInsertBatchError`](crate::world::error::TryInsertBatchError), + /// which will be handled by the [default error handler](crate::error::default_error_handler). #[track_caller] pub fn insert_batch(&mut self, batch: I) where I: IntoIterator + Send + Sync + 'static, - B: Bundle, + B: Bundle, { - self.queue(insert_batch(batch, InsertMode::Replace)); + self.queue(command::insert_batch(batch, InsertMode::Replace)); } - /// Pushes a [`Command`] to the queue for adding a [`Bundle`] type to a batch of [`Entities`](Entity). + /// Adds a series of [`Bundles`](Bundle) to each [`Entity`] they are paired with, + /// based on a batch of `(Entity, Bundle)` pairs. /// - /// A batch can be any type that implements [`IntoIterator`] containing `(Entity, Bundle)` tuples, - /// such as a [`Vec<(Entity, Bundle)>`] or an array `[(Entity, Bundle); N]`. + /// A batch can be any type that implements [`IntoIterator`] + /// and contains `(Entity, Bundle)` tuples, + /// such as a [`Vec<(Entity, Bundle)>`](alloc::vec::Vec) + /// or an array `[(Entity, Bundle); N]`. /// - /// When the command is applied, for each `(Entity, Bundle)` pair in the given batch, - /// the `Bundle` is added to the `Entity`, except for any components already present on the `Entity`. + /// This will keep any pre-existing components shared by the [`Bundle`] type + /// and discard the new values. + /// Use [`Commands::insert_batch`] to overwrite the pre-existing components instead. /// - /// This method is equivalent to iterating the batch, - /// calling [`entity`](Self::entity) for each pair, - /// and passing the bundle to [`insert_if_new`](EntityCommands::insert_if_new), - /// but it is faster due to memory pre-allocation. + /// This method is equivalent to iterating the batch + /// and calling [`insert_if_new`](EntityCommands::insert_if_new) for each pair, + /// but is faster by caching data that is shared between entities. /// - /// # Panics + /// # Fallible /// - /// This command panics if any of the given entities do not exist. + /// This command will fail if any of the given entities do not exist. /// - /// For the non-panicking version, see [`try_insert_batch_if_new`](Self::try_insert_batch_if_new). + /// It will internally return a [`TryInsertBatchError`](crate::world::error::TryInsertBatchError), + /// which will be handled by the [default error handler](crate::error::default_error_handler). #[track_caller] pub fn insert_batch_if_new(&mut self, batch: I) where I: IntoIterator + Send + Sync + 'static, - B: Bundle, + B: Bundle, { - self.queue(insert_batch(batch, InsertMode::Keep)); + self.queue(command::insert_batch(batch, InsertMode::Keep)); } - /// Pushes a [`Command`] to the queue for adding a [`Bundle`] type to a batch of [`Entities`](Entity). + /// Adds a series of [`Bundles`](Bundle) to each [`Entity`] they are paired with, + /// based on a batch of `(Entity, Bundle)` pairs. /// - /// A batch can be any type that implements [`IntoIterator`] containing `(Entity, Bundle)` tuples, - /// such as a [`Vec<(Entity, Bundle)>`] or an array `[(Entity, Bundle); N]`. + /// A batch can be any type that implements [`IntoIterator`] + /// and contains `(Entity, Bundle)` tuples, + /// such as a [`Vec<(Entity, Bundle)>`](alloc::vec::Vec) + /// or an array `[(Entity, Bundle); N]`. /// - /// When the command is applied, for each `(Entity, Bundle)` pair in the given batch, - /// the `Bundle` is added to the `Entity`, overwriting any existing components shared by the `Bundle`. + /// This will overwrite any pre-existing components shared by the [`Bundle`] type. + /// Use [`Commands::try_insert_batch_if_new`] to keep the pre-existing components instead. /// - /// This method is equivalent to iterating the batch, - /// calling [`get_entity`](Self::get_entity) for each pair, - /// and passing the bundle to [`insert`](EntityCommands::insert), - /// but it is faster due to memory pre-allocation. + /// This method is equivalent to iterating the batch + /// and calling [`insert`](EntityCommands::insert) for each pair, + /// but is faster by caching data that is shared between entities. /// - /// This command silently fails by ignoring any entities that do not exist. + /// # Fallible /// - /// For the panicking version, see [`insert_batch`](Self::insert_batch). + /// This command will fail if any of the given entities do not exist. + /// + /// It will internally return a [`TryInsertBatchError`](crate::world::error::TryInsertBatchError), + /// which will be handled by [logging the error at the `warn` level](warn). #[track_caller] pub fn try_insert_batch(&mut self, batch: I) where I: IntoIterator + Send + Sync + 'static, - B: Bundle, + B: Bundle, { - self.queue(try_insert_batch(batch, InsertMode::Replace)); + self.queue(command::insert_batch(batch, InsertMode::Replace).handle_error_with(warn)); } - /// Pushes a [`Command`] to the queue for adding a [`Bundle`] type to a batch of [`Entities`](Entity). + /// Adds a series of [`Bundles`](Bundle) to each [`Entity`] they are paired with, + /// based on a batch of `(Entity, Bundle)` pairs. /// - /// A batch can be any type that implements [`IntoIterator`] containing `(Entity, Bundle)` tuples, - /// such as a [`Vec<(Entity, Bundle)>`] or an array `[(Entity, Bundle); N]`. + /// A batch can be any type that implements [`IntoIterator`] + /// and contains `(Entity, Bundle)` tuples, + /// such as a [`Vec<(Entity, Bundle)>`](alloc::vec::Vec) + /// or an array `[(Entity, Bundle); N]`. /// - /// When the command is applied, for each `(Entity, Bundle)` pair in the given batch, - /// the `Bundle` is added to the `Entity`, except for any components already present on the `Entity`. + /// This will keep any pre-existing components shared by the [`Bundle`] type + /// and discard the new values. + /// Use [`Commands::try_insert_batch`] to overwrite the pre-existing components instead. /// - /// This method is equivalent to iterating the batch, - /// calling [`get_entity`](Self::get_entity) for each pair, - /// and passing the bundle to [`insert_if_new`](EntityCommands::insert_if_new), - /// but it is faster due to memory pre-allocation. + /// This method is equivalent to iterating the batch + /// and calling [`insert_if_new`](EntityCommands::insert_if_new) for each pair, + /// but is faster by caching data that is shared between entities. /// - /// This command silently fails by ignoring any entities that do not exist. + /// # Fallible /// - /// For the panicking version, see [`insert_batch_if_new`](Self::insert_batch_if_new). + /// This command will fail if any of the given entities do not exist. + /// + /// It will internally return a [`TryInsertBatchError`](crate::world::error::TryInsertBatchError), + /// which will be handled by [logging the error at the `warn` level](warn). #[track_caller] pub fn try_insert_batch_if_new(&mut self, batch: I) where I: IntoIterator + Send + Sync + 'static, - B: Bundle, + B: Bundle, { - self.queue(try_insert_batch(batch, InsertMode::Keep)); + self.queue(command::insert_batch(batch, InsertMode::Keep).handle_error_with(warn)); } - /// Pushes a [`Command`] to the queue for inserting a [`Resource`] in the [`World`] with an inferred value. + /// Inserts a [`Resource`] into the [`World`] with an inferred value. /// /// The inferred value is determined by the [`FromWorld`] trait of the resource. - /// When the command is applied, - /// if the resource already exists, nothing happens. + /// Note that any resource with the [`Default`] trait automatically implements [`FromWorld`], + /// and those default values will be used. /// - /// See [`World::init_resource`] for more details. + /// If the resource already exists when the command is applied, nothing happens. /// /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; - /// # - /// # #[derive(Resource, Default)] - /// # struct Scoreboard { - /// # current_score: u32, - /// # high_score: u32, - /// # } - /// # - /// # fn initialize_scoreboard(mut commands: Commands) { - /// commands.init_resource::(); - /// # } + /// #[derive(Resource, Default)] + /// struct Scoreboard { + /// current_score: u32, + /// high_score: u32, + /// } + /// + /// fn initialize_scoreboard(mut commands: Commands) { + /// commands.init_resource::(); + /// } /// # bevy_ecs::system::assert_is_system(initialize_scoreboard); /// ``` #[track_caller] pub fn init_resource(&mut self) { - self.queue(move |world: &mut World| { - world.init_resource::(); - }); + self.queue(command::init_resource::()); } - /// Pushes a [`Command`] to the queue for inserting a [`Resource`] in the [`World`] with a specific value. + /// Inserts a [`Resource`] into the [`World`] with a specific value. /// /// This will overwrite any previous value of the same resource type. /// - /// See [`World::insert_resource`] for more details. - /// /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; - /// # - /// # #[derive(Resource)] - /// # struct Scoreboard { - /// # current_score: u32, - /// # high_score: u32, - /// # } - /// # - /// # fn system(mut commands: Commands) { - /// commands.insert_resource(Scoreboard { - /// current_score: 0, - /// high_score: 0, - /// }); - /// # } + /// #[derive(Resource)] + /// struct Scoreboard { + /// current_score: u32, + /// high_score: u32, + /// } + /// + /// fn system(mut commands: Commands) { + /// commands.insert_resource(Scoreboard { + /// current_score: 0, + /// high_score: 0, + /// }); + /// } /// # bevy_ecs::system::assert_is_system(system); /// ``` #[track_caller] pub fn insert_resource(&mut self, resource: R) { - #[cfg(feature = "track_change_detection")] - let caller = Location::caller(); - self.queue(move |world: &mut World| { - world.insert_resource_with_caller( - resource, - #[cfg(feature = "track_change_detection")] - caller, - ); - }); + self.queue(command::insert_resource(resource)); } - /// Pushes a [`Command`] to the queue for removing a [`Resource`] from the [`World`]. - /// - /// See [`World::remove_resource`] for more details. + /// Removes a [`Resource`] from the [`World`]. /// /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; - /// # - /// # #[derive(Resource)] - /// # struct Scoreboard { - /// # current_score: u32, - /// # high_score: u32, - /// # } - /// # - /// # fn system(mut commands: Commands) { - /// commands.remove_resource::(); - /// # } + /// #[derive(Resource)] + /// struct Scoreboard { + /// current_score: u32, + /// high_score: u32, + /// } + /// + /// fn system(mut commands: Commands) { + /// commands.remove_resource::(); + /// } /// # bevy_ecs::system::assert_is_system(system); /// ``` pub fn remove_resource(&mut self) { - self.queue(move |world: &mut World| { - world.remove_resource::(); - }); + self.queue(command::remove_resource::()); } /// Runs the system corresponding to the given [`SystemId`]. - /// Systems are ran in an exclusive and single threaded way. - /// Running slow systems can become a bottleneck. + /// Before running a system, it must first be registered via + /// [`Commands::register_system`] or [`World::register_system`]. /// - /// Calls [`World::run_system`](World::run_system). + /// The system is run in an exclusive and single-threaded way. + /// Running slow systems can become a bottleneck. /// /// There is no way to get the output of a system when run as a command, because the /// execution of the system happens later. To get the output of a system, use /// [`World::run_system`] or [`World::run_system_with`] instead of running the system as a command. + /// + /// # Fallible + /// + /// This command will fail if the given [`SystemId`] + /// does not correspond to a [`System`](crate::system::System). + /// + /// It will internally return a [`RegisteredSystemError`](crate::system::system_registry::RegisteredSystemError), + /// which will be handled by [logging the error at the `warn` level](warn). pub fn run_system(&mut self, id: SystemId) { - self.run_system_with(id, ()); + self.queue(command::run_system(id).handle_error_with(warn)); } - /// Runs the system corresponding to the given [`SystemId`]. - /// Systems are ran in an exclusive and single threaded way. - /// Running slow systems can become a bottleneck. + /// Runs the system corresponding to the given [`SystemId`] with input. + /// Before running a system, it must first be registered via + /// [`Commands::register_system`] or [`World::register_system`]. /// - /// Calls [`World::run_system_with`](World::run_system_with). + /// The system is run in an exclusive and single-threaded way. + /// Running slow systems can become a bottleneck. /// /// There is no way to get the output of a system when run as a command, because the /// execution of the system happens later. To get the output of a system, use /// [`World::run_system`] or [`World::run_system_with`] instead of running the system as a command. + /// + /// # Fallible + /// + /// This command will fail if the given [`SystemId`] + /// does not correspond to a [`System`](crate::system::System). + /// + /// It will internally return a [`RegisteredSystemError`](crate::system::system_registry::RegisteredSystemError), + /// which will be handled by [logging the error at the `warn` level](warn). pub fn run_system_with(&mut self, id: SystemId, input: I::Inner<'static>) where I: SystemInput: Send> + 'static, { - self.queue(RunSystemWith::new_with_input(id, input)); + self.queue(command::run_system_with(id, input).handle_error_with(warn)); } - /// Registers a system and returns a [`SystemId`] so it can later be called by [`World::run_system`]. - /// - /// It's possible to register the same systems more than once, they'll be stored separately. + /// Registers a system and returns its [`SystemId`] so it can later be called by + /// [`Commands::run_system`] or [`World::run_system`]. /// /// This is different from adding systems to a [`Schedule`](crate::schedule::Schedule), /// because the [`SystemId`] that is returned can be used anywhere in the [`World`] to run the associated system. - /// This allows for running systems in a push-based fashion. + /// /// Using a [`Schedule`](crate::schedule::Schedule) is still preferred for most cases /// due to its better performance and ability to run non-conflicting systems simultaneously. /// - /// If you want to prevent Commands from registering the same system multiple times, consider using [`Local`](crate::system::Local) + /// # Note + /// + /// If the same system is registered more than once, + /// each registration will be considered a different system, + /// and they will each be given their own [`SystemId`]. + /// + /// If you want to avoid registering the same system multiple times, + /// consider using [`Commands::run_system_cached`] or storing the [`SystemId`] + /// in a [`Local`](crate::system::Local). /// /// # Example /// /// ``` /// # use bevy_ecs::{prelude::*, world::CommandQueue, system::SystemId}; - /// /// #[derive(Resource)] /// struct Counter(i32); /// - /// fn register_system(mut local_system: Local>, mut commands: Commands) { + /// fn register_system( + /// mut commands: Commands, + /// mut local_system: Local>, + /// ) { /// if let Some(system) = *local_system { /// commands.run_system(system); /// } else { @@ -939,78 +987,134 @@ impl<'w, 's> Commands<'w, 's> { O: Send + 'static, { let entity = self.spawn_empty().id(); - self.queue(RegisterSystem::new(system, entity)); + let system = RegisteredSystem::::new(Box::new(IntoSystem::into_system(system))); + self.entity(entity).insert(system); SystemId::from_entity(entity) } - /// Removes a system previously registered with [`Commands::register_system`] or [`World::register_system`]. + /// Removes a system previously registered with [`Commands::register_system`] + /// or [`World::register_system`]. + /// + /// After removing a system, the [`SystemId`] becomes invalid + /// and attempting to use it afterwards will result in an error. + /// Re-adding the removed system will register it with a new `SystemId`. /// - /// See [`World::unregister_system`] for more information. + /// # Fallible + /// + /// This command will fail if the given [`SystemId`] + /// does not correspond to a [`System`](crate::system::System). + /// + /// It will internally return a [`RegisteredSystemError`](crate::system::system_registry::RegisteredSystemError), + /// which will be handled by [logging the error at the `warn` level](warn). pub fn unregister_system(&mut self, system_id: SystemId) where I: SystemInput + Send + 'static, O: Send + 'static, { - self.queue(UnregisterSystem::new(system_id)); + self.queue(command::unregister_system(system_id).handle_error_with(warn)); } - /// Removes a system previously registered with [`World::register_system_cached`]. + /// Removes a system previously registered with one of the following: + /// - [`Commands::run_system_cached`] + /// - [`World::run_system_cached`] + /// - [`World::register_system_cached`] + /// + /// # Fallible + /// + /// This command will fail if the given system + /// is not currently cached in a [`CachedSystemId`](crate::system::CachedSystemId) resource. /// - /// See [`World::unregister_system_cached`] for more information. - pub fn unregister_system_cached< + /// It will internally return a [`RegisteredSystemError`](crate::system::system_registry::RegisteredSystemError), + /// which will be handled by [logging the error at the `warn` level](warn). + pub fn unregister_system_cached(&mut self, system: S) + where I: SystemInput + Send + 'static, O: 'static, M: 'static, S: IntoSystem + Send + 'static, - >( - &mut self, - system: S, - ) { - self.queue(UnregisterSystemCached::new(system)); + { + self.queue(command::unregister_system_cached(system).handle_error_with(warn)); } - /// Similar to [`Self::run_system`], but caching the [`SystemId`] in a - /// [`CachedSystemId`](crate::system::CachedSystemId) resource. + /// Runs a cached system, registering it if necessary. /// - /// See [`World::register_system_cached`] for more information. - pub fn run_system_cached + Send + 'static>( - &mut self, - system: S, - ) { - self.run_system_cached_with(system, ()); + /// Unlike [`Commands::run_system`], this method does not require manual registration. + /// + /// The first time this method is called for a particular system, + /// it will register the system and store its [`SystemId`] in a + /// [`CachedSystemId`](crate::system::CachedSystemId) resource for later. + /// + /// If you would rather manage the [`SystemId`] yourself, + /// or register multiple copies of the same system, + /// use [`Commands::register_system`] instead. + /// + /// # Limitations + /// + /// This method only accepts ZST (zero-sized) systems to guarantee that any two systems of + /// the same type must be equal. This means that closures that capture the environment, and + /// function pointers, are not accepted. + /// + /// If you want to access values from the environment within a system, + /// consider passing them in as inputs via [`Commands::run_system_cached_with`]. + /// + /// If that's not an option, consider [`Commands::register_system`] instead. + pub fn run_system_cached(&mut self, system: S) + where + M: 'static, + S: IntoSystem<(), (), M> + Send + 'static, + { + self.queue(command::run_system_cached(system).handle_error_with(warn)); } - /// Similar to [`Self::run_system_with`], but caching the [`SystemId`] in a - /// [`CachedSystemId`](crate::system::CachedSystemId) resource. + /// Runs a cached system with an input, registering it if necessary. + /// + /// Unlike [`Commands::run_system_with`], this method does not require manual registration. + /// + /// The first time this method is called for a particular system, + /// it will register the system and store its [`SystemId`] in a + /// [`CachedSystemId`](crate::system::CachedSystemId) resource for later. + /// + /// If you would rather manage the [`SystemId`] yourself, + /// or register multiple copies of the same system, + /// use [`Commands::register_system`] instead. + /// + /// # Limitations + /// + /// This method only accepts ZST (zero-sized) systems to guarantee that any two systems of + /// the same type must be equal. This means that closures that capture the environment, and + /// function pointers, are not accepted. /// - /// See [`World::register_system_cached`] for more information. + /// If you want to access values from the environment within a system, + /// consider passing them in as inputs. + /// + /// If that's not an option, consider [`Commands::register_system`] instead. pub fn run_system_cached_with(&mut self, system: S, input: I::Inner<'static>) where I: SystemInput: Send> + Send + 'static, M: 'static, S: IntoSystem + Send + 'static, { - self.queue(RunSystemCachedWith::new(system, input)); + self.queue(command::run_system_cached_with(system, input).handle_error_with(warn)); } - /// Sends a "global" [`Trigger`] without any targets. This will run any [`Observer`] of the `event` that - /// isn't scoped to specific targets. + /// Sends a "global" [`Trigger`](crate::observer::Trigger) without any targets. /// - /// [`Trigger`]: crate::observer::Trigger + /// This will run any [`Observer`] of the given [`Event`] that isn't scoped to specific targets. + #[track_caller] pub fn trigger(&mut self, event: impl Event) { - self.queue(TriggerEvent { event, targets: () }); + self.queue(command::trigger(event)); } - /// Sends a [`Trigger`] for the given targets. This will run any [`Observer`] of the `event` that - /// watches those targets. + /// Sends a [`Trigger`](crate::observer::Trigger) for the given targets. /// - /// [`Trigger`]: crate::observer::Trigger + /// This will run any [`Observer`] of the given [`Event`] watching those targets. + #[track_caller] pub fn trigger_targets( &mut self, event: impl Event, targets: impl TriggerTargets + Send + Sync + 'static, ) { - self.queue(TriggerEvent { event, targets }); + self.queue(command::trigger_targets(event, targets)); } /// Spawns an [`Observer`] and returns the [`EntityCommands`] associated @@ -1028,21 +1132,19 @@ impl<'w, 's> Commands<'w, 's> { /// Sends an arbitrary [`Event`]. /// - /// This is a convenience method for sending events without requiring an [`EventWriter`]. - /// ## Performance + /// This is a convenience method for sending events + /// without requiring an [`EventWriter`](crate::event::EventWriter). + /// + /// # Performance + /// /// Since this is a command, exclusive world access is used, which means that it will not profit from /// system-level parallelism on supported platforms. - /// If these events are performance-critical or very frequently - /// sent, consider using a typed [`EventWriter`] instead. /// - /// [`EventWriter`]: crate::event::EventWriter + /// If these events are performance-critical or very frequently sent, + /// consider using a typed [`EventWriter`](crate::event::EventWriter) instead. #[track_caller] pub fn send_event(&mut self, event: E) -> &mut Self { - self.queue(SendEvent { - event, - #[cfg(feature = "track_change_detection")] - caller: Location::caller(), - }); + self.queue(command::send_event(event)); self } @@ -1050,17 +1152,21 @@ impl<'w, 's> Commands<'w, 's> { /// /// Calls [`World::try_run_schedule`](World::try_run_schedule). /// - /// This will log an error if the schedule is not available to be run. + /// # Fallible /// - /// # Examples + /// This command will fail if the given [`ScheduleLabel`] + /// does not correspond to a [`Schedule`](crate::schedule::Schedule). /// - /// ``` - /// # use bevy_ecs::prelude::*; + /// It will internally return a [`TryRunScheduleError`](crate::world::error::TryRunScheduleError), + /// which will be handled by [logging the error at the `warn` level](warn). + /// + /// # Example + /// + /// ``` + /// # use bevy_ecs::prelude::*; /// # use bevy_ecs::schedule::ScheduleLabel; - /// # /// # #[derive(Default, Resource)] /// # struct Counter(u32); - /// # /// #[derive(ScheduleLabel, Hash, Debug, PartialEq, Eq, Clone, Copy)] /// struct FooSchedule; /// @@ -1086,87 +1192,44 @@ impl<'w, 's> Commands<'w, 's> { /// # assert_eq!(world.resource::().0, 1); /// ``` pub fn run_schedule(&mut self, label: impl ScheduleLabel) { - self.queue(|world: &mut World| { - if let Err(error) = world.try_run_schedule(label) { - panic!("Failed to run schedule: {error}"); - } - }); + self.queue(command::run_schedule(label).handle_error_with(warn)); } } -/// A [`Command`] which gets executed for a given [`Entity`]. +/// A list of commands that will be run to modify an [`Entity`]. /// -/// # Examples +/// # Note /// -/// ``` -/// # use std::collections::HashSet; -/// # use bevy_ecs::prelude::*; -/// use bevy_ecs::system::EntityCommand; -/// # -/// # #[derive(Component, PartialEq)] -/// # struct Name(String); -/// # impl Name { -/// # fn new(s: String) -> Self { Name(s) } -/// # fn as_str(&self) -> &str { &self.0 } -/// # } +/// Most [`Commands`] (and thereby [`EntityCommands`]) are deferred: +/// when you call the command, if it requires mutable access to the [`World`] +/// (that is, if it removes, adds, or changes something), it's not executed immediately. /// -/// #[derive(Resource, Default)] -/// struct Counter(i64); +/// Instead, the command is added to a "command queue." +/// The command queue is applied later +/// when the [`ApplyDeferred`](crate::schedule::ApplyDeferred) system runs. +/// Commands are executed one-by-one so that +/// each command can have exclusive access to the `World`. /// -/// /// A `Command` which names an entity based on a global counter. -/// fn count_name(entity: Entity, world: &mut World) { -/// // Get the current value of the counter, and increment it for next time. -/// let mut counter = world.resource_mut::(); -/// let i = counter.0; -/// counter.0 += 1; +/// # Fallible /// -/// // Name the entity after the value of the counter. -/// world.entity_mut(entity).insert(Name::new(format!("Entity #{i}"))); -/// } +/// Due to their deferred nature, an entity you're trying to change with an [`EntityCommand`] +/// can be despawned by the time the command is executed. /// -/// // App creation boilerplate omitted... -/// # let mut world = World::new(); -/// # world.init_resource::(); -/// # -/// # let mut setup_schedule = Schedule::default(); -/// # setup_schedule.add_systems(setup); -/// # let mut assert_schedule = Schedule::default(); -/// # assert_schedule.add_systems(assert_names); -/// # -/// # setup_schedule.run(&mut world); -/// # assert_schedule.run(&mut world); +/// All deferred entity commands will check whether the entity exists at the time of execution +/// and will return an error if it doesn't. /// -/// fn setup(mut commands: Commands) { -/// commands.spawn_empty().queue(count_name); -/// commands.spawn_empty().queue(count_name); -/// } +/// # Error handling /// -/// fn assert_names(named: Query<&Name>) { -/// // We use a HashSet because we do not care about the order. -/// let names: HashSet<_> = named.iter().map(Name::as_str).collect(); -/// assert_eq!(names, HashSet::from_iter(["Entity #0", "Entity #1"])); -/// } -/// ``` -pub trait EntityCommand: Send + 'static { - /// Executes this command for the given [`Entity`]. - fn apply(self, entity: Entity, world: &mut World); - - /// Returns a [`Command`] which executes this [`EntityCommand`] for the given [`Entity`]. - /// - /// This method is called when adding an [`EntityCommand`] to a command queue via [`Commands`]. - /// You can override the provided implementation if you can return a `Command` with a smaller memory - /// footprint than `(Entity, Self)`. - /// In most cases the provided implementation is sufficient. - #[must_use = "commands do nothing unless applied to a `World`"] - fn with_entity(self, entity: Entity) -> impl Command - where - Self: Sized, - { - move |world: &mut World| self.apply(entity, world) - } -} - -/// A list of commands that will be run to modify an [entity](crate::entity). +/// An [`EntityCommand`] can return a [`Result`](crate::error::Result), +/// which will be passed to an [error handler](crate::error) if the `Result` is an error. +/// +/// The [default error handler](crate::error::default_error_handler) panics. +/// It can be configured by setting the `GLOBAL_ERROR_HANDLER`. +/// +/// Alternatively, you can customize the error handler for a specific command +/// by calling [`EntityCommands::queue_handled`]. +/// +/// The [`error`](crate::error) module provides some simple error handlers for convenience. pub struct EntityCommands<'a> { pub(crate) entity: Entity, pub(crate) commands: Commands<'a, 'a>, @@ -1192,6 +1255,7 @@ impl<'a> EntityCommands<'a> { } /// Returns an [`EntityCommands`] with a smaller lifetime. + /// /// This is useful if you have `&mut EntityCommands` but you need `EntityCommands`. pub fn reborrow(&mut self) -> EntityCommands { EntityCommands { @@ -1203,7 +1267,8 @@ impl<'a> EntityCommands<'a> { /// Get an [`EntityEntryCommands`] for the [`Component`] `T`, /// allowing you to modify it or insert it if it isn't already present. /// - /// See also [`insert_if_new`](Self::insert_if_new), which lets you insert a [`Bundle`] without overwriting it. + /// See also [`insert_if_new`](Self::insert_if_new), + /// which lets you insert a [`Bundle`] without overwriting it. /// /// # Example /// @@ -1218,9 +1283,9 @@ impl<'a> EntityCommands<'a> { /// commands /// .entity(player.entity) /// .entry::() - /// // Modify the component if it exists + /// // Modify the component if it exists. /// .and_modify(|mut lvl| lvl.0 += 1) - /// // Otherwise insert a default value + /// // Otherwise, insert a default value. /// .or_insert(Level(0)); /// } /// # bevy_ecs::system::assert_is_system(level_up_system); @@ -1237,12 +1302,6 @@ impl<'a> EntityCommands<'a> { /// This will overwrite any previous value(s) of the same component type. /// See [`EntityCommands::insert_if_new`] to keep the old value instead. /// - /// # Panics - /// - /// The command will panic when applied if the associated entity does not exist. - /// - /// To avoid a panic in this case, use the command [`Self::try_insert`] instead. - /// /// # Example /// /// ``` @@ -1286,17 +1345,12 @@ impl<'a> EntityCommands<'a> { /// ``` #[track_caller] pub fn insert(&mut self, bundle: impl Bundle) -> &mut Self { - self.queue(insert(bundle, InsertMode::Replace)) + self.queue(entity_command::insert(bundle, InsertMode::Replace)) } - /// Similar to [`Self::insert`] but will only insert if the predicate returns true. - /// This is useful for chaining method calls. + /// Adds a [`Bundle`] of components to the entity if the predicate returns true. /// - /// # Panics - /// - /// The command will panic when applied if the associated entity does not exist. - /// - /// To avoid a panic in this case, use the command [`Self::try_insert_if`] instead. + /// This is useful for chaining method calls. /// /// # Example /// @@ -1333,36 +1387,20 @@ impl<'a> EntityCommands<'a> { /// Adds a [`Bundle`] of components to the entity without overwriting. /// /// This is the same as [`EntityCommands::insert`], but in case of duplicate - /// components will leave the old values instead of replacing them with new - /// ones. + /// components will leave the old values instead of replacing them with new ones. /// /// See also [`entry`](Self::entry), which lets you modify a [`Component`] if it's present, /// as well as initialize it with a default value. - /// - /// # Panics - /// - /// The command will panic when applied if the associated entity does not exist. - /// - /// To avoid a panic in this case, use the command [`Self::try_insert_if_new`] instead. #[track_caller] pub fn insert_if_new(&mut self, bundle: impl Bundle) -> &mut Self { - self.queue(insert(bundle, InsertMode::Keep)) + self.queue(entity_command::insert(bundle, InsertMode::Keep)) } /// Adds a [`Bundle`] of components to the entity without overwriting if the /// predicate returns true. /// /// This is the same as [`EntityCommands::insert_if`], but in case of duplicate - /// components will leave the old values instead of replacing them with new - /// ones. - /// - /// # Panics - /// - /// The command will panic when applied if the associated entity does not - /// exist. - /// - /// To avoid a panic in this case, use the command [`Self::try_insert_if_new`] - /// instead. + /// components will leave the old values instead of replacing them with new ones. #[track_caller] pub fn insert_if_new_and(&mut self, bundle: impl Bundle, condition: F) -> &mut Self where @@ -1375,15 +1413,11 @@ impl<'a> EntityCommands<'a> { } } - /// Adds a dynamic component to an entity. - /// - /// See [`EntityWorldMut::insert_by_id`] for more information. - /// - /// # Panics + /// Adds a dynamic [`Component`] to the entity. /// - /// The command will panic when applied if the associated entity does not exist. + /// This will overwrite any previous value(s) of the same component type. /// - /// To avoid a panic in this case, use the command [`Self::try_insert_by_id`] instead. + /// You should prefer to use the typed API [`EntityCommands::insert`] where possible. /// /// # Safety /// @@ -1395,24 +1429,24 @@ impl<'a> EntityCommands<'a> { component_id: ComponentId, value: T, ) -> &mut Self { - let caller = Location::caller(); - self.queue(move |entity: Entity, world: &mut World| { - if let Ok(mut entity) = world.get_entity_mut(entity) { - // SAFETY: - // - `component_id` safety is ensured by the caller - // - `ptr` is valid within the `make` block - OwningPtr::make(value, |ptr| unsafe { - entity.insert_by_id(component_id, ptr); - }); - } else { - panic!("error[B0003]: {caller}: Could not insert a component {component_id:?} (with type {}) for entity {entity:?}, which {}. See: https://bevyengine.org/learn/errors/b0003", core::any::type_name::(), world.entities().entity_does_not_exist_error_details_message(entity)); - } - }) + self.queue( + // SAFETY: + // - `ComponentId` safety is ensured by the caller. + // - `T` safety is ensured by the caller. + unsafe { entity_command::insert_by_id(component_id, value, InsertMode::Replace) }, + ) } - /// Attempts to add a dynamic component to an entity. + /// Adds a dynamic [`Component`] to the entity. + /// + /// This will overwrite any previous value(s) of the same component type. /// - /// See [`EntityWorldMut::insert_by_id`] for more information. + /// You should prefer to use the typed API [`EntityCommands::try_insert`] where possible. + /// + /// # Note + /// + /// If the entity does not exist when this command is executed, + /// the resulting error will be ignored. /// /// # Safety /// @@ -1424,25 +1458,23 @@ impl<'a> EntityCommands<'a> { component_id: ComponentId, value: T, ) -> &mut Self { - self.queue(move |entity: Entity, world: &mut World| { - if let Ok(mut entity) = world.get_entity_mut(entity) { - // SAFETY: - // - `component_id` safety is ensured by the caller - // - `ptr` is valid within the `make` block - OwningPtr::make(value, |ptr| unsafe { - entity.insert_by_id(component_id, ptr); - }); - } - }) + self.queue_handled( + // SAFETY: + // - `ComponentId` safety is ensured by the caller. + // - `T` safety is ensured by the caller. + unsafe { entity_command::insert_by_id(component_id, value, InsertMode::Replace) }, + ignore, + ) } - /// Tries to add a [`Bundle`] of components to the entity. + /// Adds a [`Bundle`] of components to the entity. /// /// This will overwrite any previous value(s) of the same component type. /// /// # Note /// - /// Unlike [`Self::insert`], this will not panic if the associated entity does not exist. + /// If the entity does not exist when this command is executed, + /// the resulting error will be ignored. /// /// # Example /// @@ -1464,57 +1496,36 @@ impl<'a> EntityCommands<'a> { /// } /// /// fn add_combat_stats_system(mut commands: Commands, player: Res) { - /// commands.entity(player.entity) - /// // You can try_insert individual components: - /// .try_insert(Defense(10)) - /// - /// // You can also insert tuples of components: - /// .try_insert(CombatBundle { - /// health: Health(100), - /// strength: Strength(40), - /// }); + /// commands.entity(player.entity) + /// // You can insert individual components: + /// .try_insert(Defense(10)) + /// // You can also insert tuples of components: + /// .try_insert(CombatBundle { + /// health: Health(100), + /// strength: Strength(40), + /// }); /// - /// // Suppose this occurs in a parallel adjacent system or process - /// commands.entity(player.entity) - /// .despawn(); + /// // Suppose this occurs in a parallel adjacent system or process. + /// commands.entity(player.entity).despawn(); /// - /// commands.entity(player.entity) - /// // This will not panic nor will it add the component - /// .try_insert(Defense(5)); + /// // This will not panic nor will it add the component. + /// commands.entity(player.entity).try_insert(Defense(5)); /// } /// # bevy_ecs::system::assert_is_system(add_combat_stats_system); /// ``` #[track_caller] pub fn try_insert(&mut self, bundle: impl Bundle) -> &mut Self { - self.queue(try_insert(bundle, InsertMode::Replace)) + self.queue_handled(entity_command::insert(bundle, InsertMode::Replace), ignore) } - /// Similar to [`Self::try_insert`] but will only try to insert if the predicate returns true. - /// This is useful for chaining method calls. - /// - /// # Example + /// Adds a [`Bundle`] of components to the entity if the predicate returns true. /// - /// ``` - /// # use bevy_ecs::prelude::*; - /// # #[derive(Resource)] - /// # struct PlayerEntity { entity: Entity } - /// # impl PlayerEntity { fn is_spectator(&self) -> bool { true } } - /// #[derive(Component)] - /// struct StillLoadingStats; - /// #[derive(Component)] - /// struct Health(u32); + /// This is useful for chaining method calls. /// - /// fn add_health_system(mut commands: Commands, player: Res) { - /// commands.entity(player.entity) - /// .try_insert_if(Health(10), || !player.is_spectator()) - /// .remove::(); + /// # Note /// - /// commands.entity(player.entity) - /// // This will not panic nor will it add the component - /// .try_insert_if(Health(5), || !player.is_spectator()); - /// } - /// # bevy_ecs::system::assert_is_system(add_health_system); - /// ``` + /// If the entity does not exist when this command is executed, + /// the resulting error will be ignored. #[track_caller] pub fn try_insert_if(&mut self, bundle: impl Bundle, condition: F) -> &mut Self where @@ -1527,41 +1538,16 @@ impl<'a> EntityCommands<'a> { } } - /// Tries to add a [`Bundle`] of components to the entity without overwriting if the + /// Adds a [`Bundle`] of components to the entity without overwriting if the /// predicate returns true. /// /// This is the same as [`EntityCommands::try_insert_if`], but in case of duplicate - /// components will leave the old values instead of replacing them with new - /// ones. + /// components will leave the old values instead of replacing them with new ones. /// /// # Note /// - /// Unlike [`Self::insert_if_new_and`], this will not panic if the associated entity does - /// not exist. - /// - /// # Example - /// - /// ``` - /// # use bevy_ecs::prelude::*; - /// # #[derive(Resource)] - /// # struct PlayerEntity { entity: Entity } - /// # impl PlayerEntity { fn is_spectator(&self) -> bool { true } } - /// #[derive(Component)] - /// struct StillLoadingStats; - /// #[derive(Component)] - /// struct Health(u32); - /// - /// fn add_health_system(mut commands: Commands, player: Res) { - /// commands.entity(player.entity) - /// .try_insert_if(Health(10), || player.is_spectator()) - /// .remove::(); - /// - /// commands.entity(player.entity) - /// // This will not panic nor will it overwrite the component - /// .try_insert_if_new_and(Health(5), || player.is_spectator()); - /// } - /// # bevy_ecs::system::assert_is_system(add_health_system); - /// ``` + /// If the entity does not exist when this command is executed, + /// the resulting error will be ignored. #[track_caller] pub fn try_insert_if_new_and(&mut self, bundle: impl Bundle, condition: F) -> &mut Self where @@ -1574,27 +1560,31 @@ impl<'a> EntityCommands<'a> { } } - /// Tries to add a [`Bundle`] of components to the entity without overwriting. + /// Adds a [`Bundle`] of components to the entity without overwriting. /// /// This is the same as [`EntityCommands::try_insert`], but in case of duplicate - /// components will leave the old values instead of replacing them with new - /// ones. + /// components will leave the old values instead of replacing them with new ones. /// /// # Note /// - /// Unlike [`Self::insert_if_new`], this will not panic if the associated entity does not exist. + /// If the entity does not exist when this command is executed, + /// the resulting error will be ignored. #[track_caller] pub fn try_insert_if_new(&mut self, bundle: impl Bundle) -> &mut Self { - self.queue(try_insert(bundle, InsertMode::Keep)) + self.queue_handled(entity_command::insert(bundle, InsertMode::Keep), ignore) } /// Removes a [`Bundle`] of components from the entity. /// + /// This will remove all components that intersect with the provided bundle; + /// the entity does not need to have all the components in the bundle. + /// + /// This will emit a warning if the entity does not exist. + /// /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; - /// # /// # #[derive(Resource)] /// # struct PlayerEntity { entity: Entity } /// #[derive(Component)] @@ -1615,7 +1605,7 @@ impl<'a> EntityCommands<'a> { /// .entity(player.entity) /// // You can remove individual components: /// .remove::() - /// // You can also remove pre-defined Bundles of components: + /// // You can also remove pre-defined bundles of components: /// .remove::() /// // You can also remove tuples of components and bundles. /// // This is equivalent to the calls above: @@ -1623,47 +1613,85 @@ impl<'a> EntityCommands<'a> { /// } /// # bevy_ecs::system::assert_is_system(remove_combat_stats_system); /// ``` - pub fn remove(&mut self) -> &mut Self - where - T: Bundle, - { - self.queue(move |entity: Entity, world: &mut World| { - if let Ok(mut entity) = world.get_entity_mut(entity) { - entity.remove::(); - } - }) + #[track_caller] + pub fn remove(&mut self) -> &mut Self { + self.queue_handled(entity_command::remove::(), warn) } - /// Removes all components in the [`Bundle`] components and remove all required components for each component in the [`Bundle`] from entity. + /// Removes a [`Bundle`] of components from the entity. + /// + /// This will remove all components that intersect with the provided bundle; + /// the entity does not need to have all the components in the bundle. + /// + /// Unlike [`Self::remove`], + /// this will not emit a warning if the entity does not exist. /// /// # Example /// /// ``` - /// use bevy_ecs::prelude::*; + /// # use bevy_ecs::prelude::*; + /// # #[derive(Resource)] + /// # struct PlayerEntity { entity: Entity } + /// #[derive(Component)] + /// struct Health(u32); + /// #[derive(Component)] + /// struct Strength(u32); + /// #[derive(Component)] + /// struct Defense(u32); /// + /// #[derive(Bundle)] + /// struct CombatBundle { + /// health: Health, + /// strength: Strength, + /// } + /// + /// fn remove_combat_stats_system(mut commands: Commands, player: Res) { + /// commands + /// .entity(player.entity) + /// // You can remove individual components: + /// .try_remove::() + /// // You can also remove pre-defined bundles of components: + /// .try_remove::() + /// // You can also remove tuples of components and bundles. + /// // This is equivalent to the calls above: + /// .try_remove::<(Defense, CombatBundle)>(); + /// } + /// # bevy_ecs::system::assert_is_system(remove_combat_stats_system); + /// ``` + pub fn try_remove(&mut self) -> &mut Self { + self.queue_handled(entity_command::remove::(), ignore) + } + + /// Removes a [`Bundle`] of components from the entity, + /// and also removes any components required by the components in the bundle. + /// + /// This will remove all components that intersect with the provided bundle; + /// the entity does not need to have all the components in the bundle. + /// + /// # Example + /// + /// ``` + /// # use bevy_ecs::prelude::*; + /// # #[derive(Resource)] + /// # struct PlayerEntity { entity: Entity } + /// # /// #[derive(Component)] /// #[require(B)] /// struct A; /// #[derive(Component, Default)] /// struct B; /// - /// #[derive(Resource)] - /// struct PlayerEntity { entity: Entity } - /// /// fn remove_with_requires_system(mut commands: Commands, player: Res) { /// commands /// .entity(player.entity) - /// // Remove both A and B components from the entity, because B is required by A + /// // Removes both A and B from the entity, because B is required by A. /// .remove_with_requires::(); /// } /// # bevy_ecs::system::assert_is_system(remove_with_requires_system); /// ``` - pub fn remove_with_requires(&mut self) -> &mut Self { - self.queue(move |entity: Entity, world: &mut World| { - if let Ok(mut entity) = world.get_entity_mut(entity) { - entity.remove_with_requires::(); - } - }) + #[track_caller] + pub fn remove_with_requires(&mut self) -> &mut Self { + self.queue(entity_command::remove_with_requires::()) } /// Removes a dynamic [`Component`] from the entity if it exists. @@ -1671,66 +1699,89 @@ impl<'a> EntityCommands<'a> { /// # Panics /// /// Panics if the provided [`ComponentId`] does not exist in the [`World`]. + #[track_caller] pub fn remove_by_id(&mut self, component_id: ComponentId) -> &mut Self { - self.queue(move |entity: Entity, world: &mut World| { - if let Ok(mut entity) = world.get_entity_mut(entity) { - entity.remove_by_id(component_id); - } - }) + self.queue(entity_command::remove_by_id(component_id)) } /// Removes all components associated with the entity. + #[track_caller] pub fn clear(&mut self) -> &mut Self { - self.queue(move |entity: Entity, world: &mut World| { - if let Ok(mut entity) = world.get_entity_mut(entity) { - entity.clear(); - } - }) + self.queue(entity_command::clear()) } /// Despawns the entity. - /// This will emit a warning if the entity does not exist. /// - /// See [`World::despawn`] for more details. + /// This will emit a warning if the entity does not exist. /// /// # Note /// - /// This won't clean up external references to the entity (such as parent-child relationships - /// if you're using `bevy_hierarchy`), which may leave the world in an invalid state. + /// This will also despawn the entities in any [`RelationshipTarget`](crate::relationship::RelationshipTarget) + /// that is configured to despawn descendants. + /// + /// For example, this will recursively despawn [`Children`](crate::hierarchy::Children). /// /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; - /// # /// # #[derive(Resource)] /// # struct CharacterToRemove { entity: Entity } /// # /// fn remove_character_system( /// mut commands: Commands, /// character_to_remove: Res - /// ) - /// { + /// ) { /// commands.entity(character_to_remove.entity).despawn(); /// } /// # bevy_ecs::system::assert_is_system(remove_character_system); /// ``` #[track_caller] pub fn despawn(&mut self) { - self.queue(despawn(true)); + self.queue_handled(entity_command::despawn(), warn); + } + /// Despawns the provided entity and its descendants. + #[deprecated( + since = "0.16.0", + note = "Use entity.despawn(), which now automatically despawns recursively." + )] + pub fn despawn_recursive(&mut self) { + self.despawn(); } /// Despawns the entity. - /// This will not emit a warning if the entity does not exist, essentially performing - /// the same function as [`Self::despawn`] without emitting warnings. - #[track_caller] + /// + /// Unlike [`Self::despawn`], + /// this will not emit a warning if the entity does not exist. + /// + /// # Note + /// + /// This will also despawn the entities in any [`RelationshipTarget`](crate::relationship::RelationshipTarget) + /// that is configured to despawn descendants. + /// + /// For example, this will recursively despawn [`Children`](crate::hierarchy::Children). pub fn try_despawn(&mut self) { - self.queue(despawn(false)); + self.queue_handled(entity_command::despawn(), ignore); } - /// Pushes an [`EntityCommand`] to the queue, which will get executed for the current [`Entity`]. + /// Pushes an [`EntityCommand`] to the queue, + /// which will get executed for the current [`Entity`]. + /// + /// The [default error handler](crate::error::default_error_handler) + /// will be used to handle error cases. + /// Every [`EntityCommand`] checks whether the entity exists at the time of execution + /// and returns an error if it does not. /// - /// # Examples + /// To use a custom error handler, see [`EntityCommands::queue_handled`]. + /// + /// The command can be: + /// - A custom struct that implements [`EntityCommand`]. + /// - A closure or function that matches the following signature: + /// - [`(EntityWorldMut)`](EntityWorldMut) + /// - [`(EntityWorldMut)`](EntityWorldMut) `->` [`Result`] + /// - A built-in command from the [`entity_command`] module. + /// + /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; @@ -1739,25 +1790,72 @@ impl<'a> EntityCommands<'a> { /// .spawn_empty() /// // Closures with this signature implement `EntityCommand`. /// .queue(|entity: EntityWorldMut| { - /// println!("Executed an EntityCommand for {:?}", entity.id()); + /// println!("Executed an EntityCommand for {}", entity.id()); /// }); /// # } /// # bevy_ecs::system::assert_is_system(my_system); /// ``` - pub fn queue(&mut self, command: impl EntityCommand) -> &mut Self { + pub fn queue + CommandWithEntity, T, M>( + &mut self, + command: C, + ) -> &mut Self { self.commands.queue(command.with_entity(self.entity)); self } - /// Removes all components except the given [`Bundle`] from the entity. + /// Pushes an [`EntityCommand`] to the queue, + /// which will get executed for the current [`Entity`]. /// - /// This can also be used to remove all the components from the entity by passing it an empty Bundle. + /// The given `error_handler` will be used to handle error cases. + /// Every [`EntityCommand`] checks whether the entity exists at the time of execution + /// and returns an error if it does not. + /// + /// To implicitly use the default error handler, see [`EntityCommands::queue`]. + /// + /// The command can be: + /// - A custom struct that implements [`EntityCommand`]. + /// - A closure or function that matches the following signature: + /// - [`(EntityWorldMut)`](EntityWorldMut) + /// - [`(EntityWorldMut)`](EntityWorldMut) `->` [`Result`] + /// - A built-in command from the [`entity_command`] module. + /// + /// # Example + /// + /// ``` + /// # use bevy_ecs::prelude::*; + /// # fn my_system(mut commands: Commands) { + /// use bevy_ecs::error::warn; + /// + /// commands + /// .spawn_empty() + /// // Closures with this signature implement `EntityCommand`. + /// .queue_handled( + /// |entity: EntityWorldMut| -> Result { + /// let value: usize = "100".parse()?; + /// println!("Successfully parsed the value {} for entity {}", value, entity.id()); + /// Ok(()) + /// }, + /// warn + /// ); + /// # } + /// # bevy_ecs::system::assert_is_system(my_system); + /// ``` + pub fn queue_handled + CommandWithEntity, T, M>( + &mut self, + command: C, + error_handler: fn(BevyError, ErrorContext), + ) -> &mut Self { + self.commands + .queue_handled(command.with_entity(self.entity), error_handler); + self + } + + /// Removes all components except the given [`Bundle`] from the entity. /// /// # Example /// /// ``` /// # use bevy_ecs::prelude::*; - /// # /// # #[derive(Resource)] /// # struct PlayerEntity { entity: Entity } /// #[derive(Component)] @@ -1777,39 +1875,21 @@ impl<'a> EntityCommands<'a> { /// commands /// .entity(player.entity) /// // You can retain a pre-defined Bundle of components, - /// // with this removing only the Defense component + /// // with this removing only the Defense component. /// .retain::() - /// // You can also retain only a single component - /// .retain::() - /// // And you can remove all the components by passing in an empty Bundle - /// .retain::<()>(); + /// // You can also retain only a single component. + /// .retain::(); /// } /// # bevy_ecs::system::assert_is_system(remove_combat_stats_system); /// ``` - pub fn retain(&mut self) -> &mut Self - where - T: Bundle, - { - self.queue(move |entity: Entity, world: &mut World| { - if let Ok(mut entity) = world.get_entity_mut(entity) { - entity.retain::(); - } - }) + #[track_caller] + pub fn retain(&mut self) -> &mut Self { + self.queue(entity_command::retain::()) } - /// Logs the components of the entity at the info level. - /// - /// # Panics - /// - /// The command will panic when applied if the associated entity does not exist. + /// Logs the components of the entity at the [`info`](log::info) level. pub fn log_components(&mut self) -> &mut Self { - self.queue(move |entity: Entity, world: &mut World| { - let debug_infos: Vec<_> = world - .inspect_entity(entity) - .map(ComponentInfo::name) - .collect(); - info!("Entity {entity}: {debug_infos:?}"); - }) + self.queue(entity_command::log_components()) } /// Returns the underlying [`Commands`]. @@ -1822,29 +1902,24 @@ impl<'a> EntityCommands<'a> { &mut self.commands } - /// Sends a [`Trigger`] targeting this entity. This will run any [`Observer`] of the `event` that - /// watches this entity. + /// Sends a [`Trigger`](crate::observer::Trigger) targeting the entity. /// - /// [`Trigger`]: crate::observer::Trigger + /// This will run any [`Observer`] of the given [`Event`] watching this entity. + #[track_caller] pub fn trigger(&mut self, event: impl Event) -> &mut Self { - self.commands.trigger_targets(event, self.entity); - self + self.queue(entity_command::trigger(event)) } - /// Creates an [`Observer`] listening for a trigger of type `T` that targets this entity. + /// Creates an [`Observer`] listening for events of type `E` targeting this entity. pub fn observe( &mut self, observer: impl IntoObserverSystem, ) -> &mut Self { - self.queue(move |entity: Entity, world: &mut World| { - if let Ok(mut entity) = world.get_entity_mut(entity) { - entity.observe(observer); - } - }) + self.queue(entity_command::observe(observer)) } /// Clones parts of an entity (components, observers, etc.) onto another entity, - /// configured through [`EntityCloneBuilder`]. + /// configured through [`EntityClonerBuilder`]. /// /// By default, the other entity will receive all the components of the original that implement /// [`Clone`] or [`Reflect`](bevy_reflect::Reflect). @@ -1855,23 +1930,22 @@ impl<'a> EntityCommands<'a> { /// /// # Example /// - /// Configure through [`EntityCloneBuilder`] as follows: + /// Configure through [`EntityClonerBuilder`] as follows: /// ``` /// # use bevy_ecs::prelude::*; - /// /// #[derive(Component, Clone)] /// struct ComponentA(u32); /// #[derive(Component, Clone)] /// struct ComponentB(u32); /// /// fn example_system(mut commands: Commands) { - /// // Create an empty entity + /// // Create an empty entity. /// let target = commands.spawn_empty().id(); /// - /// // Create a new entity and keep its EntityCommands + /// // Create a new entity and keep its EntityCommands. /// let mut entity = commands.spawn((ComponentA(10), ComponentB(20))); /// - /// // Clone only ComponentA onto the target + /// // Clone only ComponentA onto the target. /// entity.clone_with(target, |builder| { /// builder.deny::(); /// }); @@ -1879,20 +1953,13 @@ impl<'a> EntityCommands<'a> { /// # bevy_ecs::system::assert_is_system(example_system); /// ``` /// - /// See the following for more options: - /// - [`EntityCloneBuilder`] - /// - [`CloneEntityWithObserversExt`](crate::observer::CloneEntityWithObserversExt) - /// - `CloneEntityHierarchyExt` + /// See [`EntityClonerBuilder`] for more options. pub fn clone_with( &mut self, target: Entity, - config: impl FnOnce(&mut EntityCloneBuilder) + Send + Sync + 'static, + config: impl FnOnce(&mut EntityClonerBuilder) + Send + Sync + 'static, ) -> &mut Self { - self.queue(move |entity: Entity, world: &mut World| { - if let Ok(mut entity) = world.get_entity_mut(entity) { - entity.clone_with(target, config); - } - }) + self.queue(entity_command::clone_with(target, config)) } /// Spawns a clone of this entity and returns the [`EntityCommands`] of the clone. @@ -1912,17 +1979,16 @@ impl<'a> EntityCommands<'a> { /// /// ``` /// # use bevy_ecs::prelude::*; - /// /// #[derive(Component, Clone)] /// struct ComponentA(u32); /// #[derive(Component, Clone)] /// struct ComponentB(u32); /// /// fn example_system(mut commands: Commands) { - /// // Create a new entity and keep its EntityCommands + /// // Create a new entity and store its EntityCommands. /// let mut entity = commands.spawn((ComponentA(10), ComponentB(20))); /// - /// // Create a clone of the first entity + /// // Create a clone of the first entity. /// let mut entity_clone = entity.clone_and_spawn(); /// } /// # bevy_ecs::system::assert_is_system(example_system); @@ -1931,16 +1997,16 @@ impl<'a> EntityCommands<'a> { } /// Spawns a clone of this entity and allows configuring cloning behavior - /// using [`EntityCloneBuilder`], returning the [`EntityCommands`] of the clone. + /// using [`EntityClonerBuilder`], returning the [`EntityCommands`] of the clone. /// /// By default, the clone will receive all the components of the original that implement /// [`Clone`] or [`Reflect`](bevy_reflect::Reflect). /// - /// To exclude specific components, use [`EntityCloneBuilder::deny`]. - /// To only include specific components, use [`EntityCloneBuilder::deny_all`] - /// followed by [`EntityCloneBuilder::allow`]. + /// To exclude specific components, use [`EntityClonerBuilder::deny`]. + /// To only include specific components, use [`EntityClonerBuilder::deny_all`] + /// followed by [`EntityClonerBuilder::allow`]. /// - /// See the methods on [`EntityCloneBuilder`] for more options. + /// See the methods on [`EntityClonerBuilder`] for more options. /// /// # Note /// @@ -1951,17 +2017,16 @@ impl<'a> EntityCommands<'a> { /// /// ``` /// # use bevy_ecs::prelude::*; - /// /// #[derive(Component, Clone)] /// struct ComponentA(u32); /// #[derive(Component, Clone)] /// struct ComponentB(u32); /// /// fn example_system(mut commands: Commands) { - /// // Create a new entity and keep its EntityCommands + /// // Create a new entity and store its EntityCommands. /// let mut entity = commands.spawn((ComponentA(10), ComponentB(20))); /// - /// // Create a clone of the first entity, but without ComponentB + /// // Create a clone of the first entity, but without ComponentB. /// let mut entity_clone = entity.clone_and_spawn_with(|builder| { /// builder.deny::(); /// }); @@ -1969,7 +2034,7 @@ impl<'a> EntityCommands<'a> { /// # bevy_ecs::system::assert_is_system(example_system); pub fn clone_and_spawn_with( &mut self, - config: impl FnOnce(&mut EntityCloneBuilder) + Send + Sync + 'static, + config: impl FnOnce(&mut EntityClonerBuilder) + Send + Sync + 'static, ) -> EntityCommands<'_> { let entity_clone = self.commands().spawn_empty().id(); self.clone_with(entity_clone, config); @@ -1988,11 +2053,7 @@ impl<'a> EntityCommands<'a> { /// /// The command will panic when applied if the target entity does not exist. pub fn clone_components(&mut self, target: Entity) -> &mut Self { - self.queue(move |entity: Entity, world: &mut World| { - if let Ok(mut entity) = world.get_entity_mut(entity) { - entity.clone_components::(target); - } - }) + self.queue(entity_command::clone_components::(target)) } /// Clones the specified components of this entity and inserts them into another entity, @@ -2005,11 +2066,7 @@ impl<'a> EntityCommands<'a> { /// /// The command will panic when applied if the target entity does not exist. pub fn move_components(&mut self, target: Entity) -> &mut Self { - self.queue(move |entity: Entity, world: &mut World| { - if let Ok(mut entity) = world.get_entity_mut(entity) { - entity.move_components::(target); - } - }) + self.queue(entity_command::move_components::(target)) } } @@ -2033,61 +2090,48 @@ impl<'a, T: Component> EntityEntryCommands<'a, T> { } impl<'a, T: Component> EntityEntryCommands<'a, T> { - /// [Insert](EntityCommands::insert) `default` into this entity, if `T` is not already present. - /// - /// See also [`or_insert_with`](Self::or_insert_with). - /// - /// # Panics - /// - /// Panics if the entity does not exist. - /// See [`or_try_insert`](Self::or_try_insert) for a non-panicking version. + /// [Insert](EntityCommands::insert) `default` into this entity, + /// if `T` is not already present. #[track_caller] pub fn or_insert(&mut self, default: T) -> &mut Self { self.entity_commands.insert_if_new(default); self } - /// [Insert](EntityCommands::insert) `default` into this entity, if `T` is not already present. + /// [Insert](EntityCommands::insert) `default` into this entity, + /// if `T` is not already present. /// - /// Unlike [`or_insert`](Self::or_insert), this will not panic if the entity does not exist. + /// # Note /// - /// See also [`or_insert_with`](Self::or_insert_with). + /// If the entity does not exist when this command is executed, + /// the resulting error will be ignored. #[track_caller] pub fn or_try_insert(&mut self, default: T) -> &mut Self { self.entity_commands.try_insert_if_new(default); self } - /// [Insert](EntityCommands::insert) the value returned from `default` into this entity, if `T` is not already present. - /// - /// See also [`or_insert`](Self::or_insert) and [`or_try_insert`](Self::or_try_insert). - /// - /// # Panics - /// - /// Panics if the entity does not exist. - /// See [`or_try_insert_with`](Self::or_try_insert_with) for a non-panicking version. + /// [Insert](EntityCommands::insert) the value returned from `default` into this entity, + /// if `T` is not already present. #[track_caller] pub fn or_insert_with(&mut self, default: impl Fn() -> T) -> &mut Self { self.or_insert(default()) } - /// [Insert](EntityCommands::insert) the value returned from `default` into this entity, if `T` is not already present. + /// [Insert](EntityCommands::insert) the value returned from `default` into this entity, + /// if `T` is not already present. /// - /// Unlike [`or_insert_with`](Self::or_insert_with), this will not panic if the entity does not exist. + /// # Note /// - /// See also [`or_insert`](Self::or_insert) and [`or_try_insert`](Self::or_try_insert). + /// If the entity does not exist when this command is executed, + /// the resulting error will be ignored. #[track_caller] pub fn or_try_insert_with(&mut self, default: impl Fn() -> T) -> &mut Self { self.or_try_insert(default()) } - /// [Insert](EntityCommands::insert) `T::default` into this entity, if `T` is not already present. - /// - /// See also [`or_insert`](Self::or_insert) and [`or_from_world`](Self::or_from_world). - /// - /// # Panics - /// - /// Panics if the entity does not exist. + /// [Insert](EntityCommands::insert) `T::default` into this entity, + /// if `T` is not already present. #[track_caller] pub fn or_default(&mut self) -> &mut Self where @@ -2096,174 +2140,69 @@ impl<'a, T: Component> EntityEntryCommands<'a, T> { self.or_insert(T::default()) } - /// [Insert](EntityCommands::insert) `T::from_world` into this entity, if `T` is not already present. - /// - /// See also [`or_insert`](Self::or_insert) and [`or_default`](Self::or_default). - /// - /// # Panics - /// - /// Panics if the entity does not exist. + /// [Insert](EntityCommands::insert) `T::from_world` into this entity, + /// if `T` is not already present. #[track_caller] pub fn or_from_world(&mut self) -> &mut Self where T: FromWorld, { - let caller = Location::caller(); - self.entity_commands.queue(move |entity: Entity, world: &mut World| { - let value = T::from_world(world); - if let Ok(mut entity) = world.get_entity_mut(entity) { - entity.insert_with_caller( - value, - InsertMode::Keep, - #[cfg(feature = "track_change_detection")] - caller, - ); - } else { - panic!("error[B0003]: {caller}: Could not insert a bundle (of type `{}`) for {entity:?}, which {}. See: https://bevyengine.org/learn/errors/b0003", core::any::type_name::(), world.entities().entity_does_not_exist_error_details_message(entity) ); - } - }); + self.entity_commands + .queue(entity_command::insert_from_world::(InsertMode::Keep)); self } -} - -impl Command for F -where - F: FnOnce(&mut World) + Send + 'static, -{ - fn apply(self, world: &mut World) { - self(world); - } -} - -impl EntityCommand for F -where - F: FnOnce(EntityWorldMut) + Send + 'static, -{ - fn apply(self, id: Entity, world: &mut World) { - self(world.entity_mut(id)); - } -} - -impl EntityCommand for F -where - F: FnOnce(Entity, &mut World) + Send + 'static, -{ - fn apply(self, id: Entity, world: &mut World) { - self(id, world); - } -} - -/// A [`Command`] that consumes an iterator to add a series of [`Bundles`](Bundle) to a set of entities. -/// If any entities do not exist in the world, this command will panic. -/// -/// This is more efficient than inserting the bundles individually. -#[track_caller] -fn insert_batch(batch: I, mode: InsertMode) -> impl Command -where - I: IntoIterator + Send + Sync + 'static, - B: Bundle, -{ - #[cfg(feature = "track_change_detection")] - let caller = Location::caller(); - move |world: &mut World| { - world.insert_batch_with_caller( - batch, - mode, - #[cfg(feature = "track_change_detection")] - caller, - ); - } -} -/// A [`Command`] that consumes an iterator to add a series of [`Bundles`](Bundle) to a set of entities. -/// If any entities do not exist in the world, this command will ignore them. -/// -/// This is more efficient than inserting the bundles individually. -#[track_caller] -fn try_insert_batch(batch: I, mode: InsertMode) -> impl Command -where - I: IntoIterator + Send + Sync + 'static, - B: Bundle, -{ - #[cfg(feature = "track_change_detection")] - let caller = Location::caller(); - move |world: &mut World| { - world.try_insert_batch_with_caller( - batch, - mode, - #[cfg(feature = "track_change_detection")] - caller, - ); - } -} - -/// A [`Command`] that despawns a specific entity. -/// This will emit a warning if the entity does not exist. -/// -/// # Note -/// -/// This won't clean up external references to the entity (such as parent-child relationships -/// if you're using `bevy_hierarchy`), which may leave the world in an invalid state. -#[track_caller] -fn despawn(log_warning: bool) -> impl EntityCommand { - let caller = Location::caller(); - move |entity: Entity, world: &mut World| { - world.despawn_with_caller(entity, caller, log_warning); - } -} - -/// An [`EntityCommand`] that adds the components in a [`Bundle`] to an entity. -#[track_caller] -fn insert(bundle: T, mode: InsertMode) -> impl EntityCommand { - let caller = Location::caller(); - move |entity: Entity, world: &mut World| { - if let Ok(mut entity) = world.get_entity_mut(entity) { - entity.insert_with_caller( - bundle, - mode, - #[cfg(feature = "track_change_detection")] - caller, - ); - } else { - panic!("error[B0003]: {caller}: Could not insert a bundle (of type `{}`) for entity {entity:?}, which {}. See: https://bevyengine.org/learn/errors/b0003", core::any::type_name::(), world.entities().entity_does_not_exist_error_details_message(entity)); - } - } -} - -/// An [`EntityCommand`] that attempts to add the components in a [`Bundle`] to an entity. -/// Does nothing if the entity does not exist. -#[track_caller] -fn try_insert(bundle: impl Bundle, mode: InsertMode) -> impl EntityCommand { - #[cfg(feature = "track_change_detection")] - let caller = Location::caller(); - move |entity: Entity, world: &mut World| { - if let Ok(mut entity) = world.get_entity_mut(entity) { - entity.insert_with_caller( - bundle, - mode, - #[cfg(feature = "track_change_detection")] - caller, - ); - } + /// Get the [`EntityCommands`] from which the [`EntityEntryCommands`] was initiated. + /// + /// This allows you to continue chaining method calls after calling [`EntityCommands::entry`]. + /// + /// # Example + /// + /// ``` + /// # use bevy_ecs::prelude::*; + /// # #[derive(Resource)] + /// # struct PlayerEntity { entity: Entity } + /// #[derive(Component)] + /// struct Level(u32); + /// + /// fn level_up_system(mut commands: Commands, player: Res) { + /// commands + /// .entity(player.entity) + /// .entry::() + /// // Modify the component if it exists. + /// .and_modify(|mut lvl| lvl.0 += 1) + /// // Otherwise, insert a default value. + /// .or_insert(Level(0)) + /// // Return the EntityCommands for the entity. + /// .entity() + /// // Continue chaining method calls. + /// .insert(Name::new("Player")); + /// } + /// # bevy_ecs::system::assert_is_system(level_up_system); + /// ``` + pub fn entity(&mut self) -> EntityCommands { + self.entity_commands.reborrow() } } #[cfg(test)] -#[allow(clippy::float_cmp, clippy::approx_constant)] mod tests { use crate::{ - self as bevy_ecs, - component::{require, Component}, - system::{Commands, Resource}, + component::Component, + resource::Resource, + system::Commands, world::{CommandQueue, FromWorld, World}, }; - use alloc::sync::Arc; + use alloc::{string::String, sync::Arc, vec, vec::Vec}; use core::{ any::TypeId, sync::atomic::{AtomicUsize, Ordering}, }; - #[allow(dead_code)] + #[expect( + dead_code, + reason = "This struct is used to test how `Drop` behavior works in regards to SparseSet storage, and as such is solely a wrapper around `DropCk` to make it use the SparseSet storage. Because of this, the inner field is intentionally never read." + )] #[derive(Component)] #[component(storage = "SparseSet")] struct SparseDropCk(DropCk); @@ -2332,6 +2271,10 @@ mod tests { commands.entity(entity).entry::>().or_from_world(); queue.apply(&mut world); assert_eq!("*****", &world.get::>(entity).unwrap().0); + let mut commands = Commands::new(&mut queue, &world); + let id = commands.entity(entity).entry::>().entity().id(); + queue.apply(&mut world); + assert_eq!(id, entity); } #[test] @@ -2590,15 +2533,15 @@ mod tests { fn nothing() {} - assert!(world.iter_resources().count() == 0); + let resources = world.iter_resources().count(); let id = world.register_system_cached(nothing); - assert!(world.iter_resources().count() == 1); + assert_eq!(world.iter_resources().count(), resources + 1); assert!(world.get_entity(id.entity).is_ok()); let mut commands = Commands::new(&mut queue, &world); commands.unregister_system_cached(nothing); queue.apply(&mut world); - assert!(world.iter_resources().count() == 0); + assert_eq!(world.iter_resources().count(), resources); assert!(world.get_entity(id.entity).is_err()); } diff --git a/crates/bevy_ecs/src/system/commands/parallel_scope.rs b/crates/bevy_ecs/src/system/commands/parallel_scope.rs index 82a00ceac2977..bee491017d500 100644 --- a/crates/bevy_ecs/src/system/commands/parallel_scope.rs +++ b/crates/bevy_ecs/src/system/commands/parallel_scope.rs @@ -1,7 +1,6 @@ use bevy_utils::Parallel; use crate::{ - self as bevy_ecs, entity::Entities, prelude::World, system::{Deferred, SystemBuffer, SystemMeta, SystemParam}, @@ -21,9 +20,13 @@ struct ParallelCommandQueue { /// [`Bundle`](crate::prelude::Bundle) type need to be spawned, consider using /// [`Commands::spawn_batch`] for better performance. /// -/// Note: Because command application order will depend on how many threads are ran, non-commutative commands may result in non-deterministic results. +/// # Note +/// +/// Because command application order will depend on how many threads are ran, +/// non-commutative commands may result in non-deterministic results. +/// +/// # Example /// -/// Example: /// ``` /// # use bevy_ecs::prelude::*; /// # use bevy_tasks::ComputeTaskPool; diff --git a/crates/bevy_ecs/src/system/exclusive_function_system.rs b/crates/bevy_ecs/src/system/exclusive_function_system.rs index 99f3d1d0299af..9107993f9542a 100644 --- a/crates/bevy_ecs/src/system/exclusive_function_system.rs +++ b/crates/bevy_ecs/src/system/exclusive_function_system.rs @@ -1,7 +1,7 @@ use crate::{ archetype::ArchetypeComponentId, component::{ComponentId, Tick}, - query::Access, + query::{Access, FilteredAccessSet}, schedule::{InternedSystemSet, SystemSet}, system::{ check_system_change_tick, ExclusiveSystemParam, ExclusiveSystemParamItem, IntoSystem, @@ -14,6 +14,8 @@ use alloc::{borrow::Cow, vec, vec::Vec}; use core::marker::PhantomData; use variadics_please::all_tuples; +use super::SystemParamValidationError; + /// A function system that runs with exclusive [`World`] access. /// /// You get this by calling [`IntoSystem::into_system`] on a function that only accepts @@ -84,6 +86,11 @@ where self.system_meta.component_access_set.combined_access() } + #[inline] + fn component_access_set(&self) -> &FilteredAccessSet { + &self.system_meta.component_access_set + } + #[inline] fn archetype_component_access(&self) -> &Access { &self.system_meta.archetype_component_access @@ -111,13 +118,11 @@ where #[inline] unsafe fn run_unsafe( &mut self, - _input: SystemIn<'_, Self>, - _world: UnsafeWorldCell, + input: SystemIn<'_, Self>, + world: UnsafeWorldCell, ) -> Self::Out { - panic!("Cannot run exclusive systems with a shared World reference"); - } - - fn run(&mut self, input: SystemIn<'_, Self>, world: &mut World) -> Self::Out { + // SAFETY: The safety is upheld by the caller. + let world = unsafe { world.world_mut() }; world.last_change_tick_scope(self.system_meta.last_run, |world| { #[cfg(feature = "trace")] let _span_guard = self.system_meta.system_span.enter(); @@ -150,9 +155,12 @@ where } #[inline] - unsafe fn validate_param_unsafe(&mut self, _world: UnsafeWorldCell) -> bool { + unsafe fn validate_param_unsafe( + &mut self, + _world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { // All exclusive system params are always available. - true + Ok(()) } #[inline] @@ -219,7 +227,14 @@ pub struct HasExclusiveSystemInput; macro_rules! impl_exclusive_system_function { ($($param: ident),*) => { - #[allow(non_snake_case)] + #[expect( + clippy::allow_attributes, + reason = "This is within a macro, and as such, the below lints may not always apply." + )] + #[allow( + non_snake_case, + reason = "Certain variable names are provided by the caller, not by us." + )] impl ExclusiveSystemParamFunction Out> for Func where Func: Send + Sync + 'static, @@ -236,7 +251,6 @@ macro_rules! impl_exclusive_system_function { // Yes, this is strange, but `rustc` fails to compile this impl // without using this function. It fails to recognize that `func` // is a function, potentially because of the multiple impls of `FnMut` - #[allow(clippy::too_many_arguments)] fn call_inner( mut f: impl FnMut(&mut World, $($param,)*) -> Out, world: &mut World, @@ -249,7 +263,14 @@ macro_rules! impl_exclusive_system_function { } } - #[allow(non_snake_case)] + #[expect( + clippy::allow_attributes, + reason = "This is within a macro, and as such, the below lints may not always apply." + )] + #[allow( + non_snake_case, + reason = "Certain variable names are provided by the caller, not by us." + )] impl ExclusiveSystemParamFunction<(HasExclusiveSystemInput, fn(In, $($param,)*) -> Out)> for Func where Func: Send + Sync + 'static, @@ -267,8 +288,8 @@ macro_rules! impl_exclusive_system_function { // Yes, this is strange, but `rustc` fails to compile this impl // without using this function. It fails to recognize that `func` // is a function, potentially because of the multiple impls of `FnMut` - #[allow(clippy::too_many_arguments)] fn call_inner( + _: PhantomData, mut f: impl FnMut(In::Param<'_>, &mut World, $($param,)*) -> Out, input: In::Inner<'_>, world: &mut World, @@ -277,7 +298,7 @@ macro_rules! impl_exclusive_system_function { f(In::wrap(input), world, $($param,)*) } let ($($param,)*) = param_value; - call_inner(self, input, world, $($param),*) + call_inner(PhantomData::, self, input, world, $($param),*) } } }; diff --git a/crates/bevy_ecs/src/system/exclusive_system_param.rs b/crates/bevy_ecs/src/system/exclusive_system_param.rs index cc24cb7904304..f271e32e2f714 100644 --- a/crates/bevy_ecs/src/system/exclusive_system_param.rs +++ b/crates/bevy_ecs/src/system/exclusive_system_param.rs @@ -88,26 +88,38 @@ impl ExclusiveSystemParam for PhantomData { macro_rules! impl_exclusive_system_param_tuple { ($(#[$meta:meta])* $($param: ident),*) => { - #[allow(unused_variables)] - #[allow(non_snake_case)] + #[expect( + clippy::allow_attributes, + reason = "This is within a macro, and as such, the below lints may not always apply." + )] + #[allow( + non_snake_case, + reason = "Certain variable names are provided by the caller, not by us." + )] + #[allow( + unused_variables, + reason = "Zero-length tuples won't use any of the parameters." + )] $(#[$meta])* impl<$($param: ExclusiveSystemParam),*> ExclusiveSystemParam for ($($param,)*) { type State = ($($param::State,)*); type Item<'s> = ($($param::Item<'s>,)*); #[inline] - fn init(_world: &mut World, _system_meta: &mut SystemMeta) -> Self::State { - (($($param::init(_world, _system_meta),)*)) + fn init(world: &mut World, system_meta: &mut SystemMeta) -> Self::State { + (($($param::init(world, system_meta),)*)) } #[inline] - #[allow(clippy::unused_unit)] fn get_param<'s>( state: &'s mut Self::State, system_meta: &SystemMeta, ) -> Self::Item<'s> { - let ($($param,)*) = state; + #[allow( + clippy::unused_unit, + reason = "Zero-length tuples won't have any params to get." + )] ($($param::get_param($param, system_meta),)*) } } @@ -124,8 +136,8 @@ all_tuples!( #[cfg(test)] mod tests { - use crate as bevy_ecs; use crate::{schedule::Schedule, system::Local, world::World}; + use alloc::vec::Vec; use bevy_ecs_macros::Resource; use core::marker::PhantomData; diff --git a/crates/bevy_ecs/src/system/function_system.rs b/crates/bevy_ecs/src/system/function_system.rs index eeee2f1c78491..c64e30822b406 100644 --- a/crates/bevy_ecs/src/system/function_system.rs +++ b/crates/bevy_ecs/src/system/function_system.rs @@ -18,7 +18,7 @@ use variadics_please::all_tuples; #[cfg(feature = "trace")] use tracing::{info_span, Span}; -use super::{In, IntoSystem, ReadOnlySystem, SystemParamBuilder}; +use super::{IntoSystem, ReadOnlySystem, SystemParamBuilder, SystemParamValidationError}; /// The metadata of a [`System`]. #[derive(Clone)] @@ -43,7 +43,6 @@ pub struct SystemMeta { is_send: bool, has_deferred: bool, pub(crate) last_run: Tick, - param_warn_policy: ParamWarnPolicy, #[cfg(feature = "trace")] pub(crate) system_span: Span, #[cfg(feature = "trace")] @@ -60,7 +59,6 @@ impl SystemMeta { is_send: true, has_deferred: false, last_run: Tick::new(0), - param_warn_policy: ParamWarnPolicy::Panic, #[cfg(feature = "trace")] system_span: info_span!("system", name = name), #[cfg(feature = "trace")] @@ -116,27 +114,6 @@ impl SystemMeta { self.has_deferred = true; } - /// Changes the warn policy. - #[inline] - pub(crate) fn set_param_warn_policy(&mut self, warn_policy: ParamWarnPolicy) { - self.param_warn_policy = warn_policy; - } - - /// Advances the warn policy after validation failed. - #[inline] - pub(crate) fn advance_param_warn_policy(&mut self) { - self.param_warn_policy.advance(); - } - - /// Emits a warning about inaccessible system param if policy allows it. - #[inline] - pub fn try_warn_param

(&self) - where - P: SystemParam, - { - self.param_warn_policy.try_warn::

(&self.name); - } - /// Archetype component access that is used to determine which systems can run in parallel with each other /// in the multithreaded executor. /// @@ -187,83 +164,6 @@ impl SystemMeta { } } -/// State machine for emitting warnings when [system params are invalid](System::validate_param). -#[derive(Clone, Copy)] -pub enum ParamWarnPolicy { - /// Stop app with a panic. - Panic, - /// No warning should ever be emitted. - Never, - /// The warning will be emitted once and status will update to [`Self::Never`]. - Once, -} - -impl ParamWarnPolicy { - /// Advances the warn policy after validation failed. - #[inline] - fn advance(&mut self) { - // Ignore `Panic` case, because it stops execution before this function gets called. - *self = Self::Never; - } - - /// Emits a warning about inaccessible system param if policy allows it. - #[inline] - fn try_warn

(&self, name: &str) - where - P: SystemParam, - { - match self { - Self::Panic => panic!( - "{0} could not access system parameter {1}", - name, - disqualified::ShortName::of::

() - ), - Self::Once => { - log::warn!( - "{0} did not run because it requested inaccessible system parameter {1}", - name, - disqualified::ShortName::of::

() - ); - } - Self::Never => {} - } - } -} - -/// Trait for manipulating warn policy of systems. -#[doc(hidden)] -pub trait WithParamWarnPolicy -where - M: 'static, - F: SystemParamFunction, - Self: Sized, -{ - /// Set warn policy. - fn with_param_warn_policy(self, warn_policy: ParamWarnPolicy) -> FunctionSystem; - - /// Warn only once about invalid system parameters. - fn param_warn_once(self) -> FunctionSystem { - self.with_param_warn_policy(ParamWarnPolicy::Once) - } - - /// Disable all param warnings. - fn never_param_warn(self) -> FunctionSystem { - self.with_param_warn_policy(ParamWarnPolicy::Never) - } -} - -impl WithParamWarnPolicy for F -where - M: 'static, - F: SystemParamFunction, -{ - fn with_param_warn_policy(self, param_warn_policy: ParamWarnPolicy) -> FunctionSystem { - let mut system = IntoSystem::into_system(self); - system.system_meta.set_param_warn_policy(param_warn_policy); - system - } -} - // TODO: Actually use this in FunctionSystem. We should probably only do this once Systems are constructed using a World reference // (to avoid the need for unwrapping to retrieve SystemMeta) /// Holds on to persistent state required to drive [`SystemParam`] for a [`System`]. @@ -396,7 +296,7 @@ macro_rules! impl_build_system { Input: SystemInput, Out: 'static, Marker, - F: FnMut(In, $(SystemParamItem<$param>),*) -> Out + F: FnMut(Input, $(SystemParamItem<$param>),*) -> Out + SystemParamFunction, >( self, @@ -474,6 +374,12 @@ impl SystemState { &self.meta } + /// Gets the metadata for this instance. + #[inline] + pub fn meta_mut(&mut self) -> &mut SystemMeta { + &mut self.meta + } + /// Retrieve the [`SystemParam`] values. This can only be called when all parameters are read-only. #[inline] pub fn get<'w, 's>(&'s mut self, world: &'w World) -> SystemParamItem<'w, 's, Param> @@ -511,7 +417,10 @@ impl SystemState { /// - The passed [`UnsafeWorldCell`] must have read-only access to /// world data in `archetype_component_access`. /// - `world` must be the same [`World`] that was used to initialize [`state`](SystemParam::init_state). - pub unsafe fn validate_param(state: &Self, world: UnsafeWorldCell) -> bool { + pub unsafe fn validate_param( + state: &Self, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { // SAFETY: Delegated to existing `SystemParam` implementations. unsafe { Param::validate_param(&state.param_state, &state.meta, world) } } @@ -624,7 +533,7 @@ impl SystemState { world: UnsafeWorldCell<'w>, ) -> SystemParamItem<'w, 's, Param> { let change_tick = world.increment_change_tick(); - // SAFETY: The invariants are uphold by the caller. + // SAFETY: The invariants are upheld by the caller. unsafe { self.fetch(world, change_tick) } } @@ -638,12 +547,31 @@ impl SystemState { world: UnsafeWorldCell<'w>, change_tick: Tick, ) -> SystemParamItem<'w, 's, Param> { - // SAFETY: The invariants are uphold by the caller. + // SAFETY: The invariants are upheld by the caller. let param = unsafe { Param::get_param(&mut self.param_state, &self.meta, world, change_tick) }; self.meta.last_run = change_tick; param } + + /// Returns a reference to the current system param states. + pub fn param_state(&self) -> &Param::State { + &self.param_state + } + + /// Returns a mutable reference to the current system param states. + /// Marked as unsafe because modifying the system states may result in violation to certain + /// assumptions made by the [`SystemParam`]. Use with care. + /// + /// # Safety + /// Modifying the system param states may have unintended consequences. + /// The param state is generally considered to be owned by the [`SystemParam`]. Modifications + /// should respect any invariants as required by the [`SystemParam`]. + /// For example, modifying the system state of [`ResMut`](crate::system::ResMut) without also + /// updating [`SystemMeta::component_access_set`] will obviously create issues. + pub unsafe fn param_state_mut(&mut self) -> &mut Param::State { + &mut self.param_state + } } impl FromWorld for SystemState { @@ -656,7 +584,7 @@ impl FromWorld for SystemState { /// /// You get this by calling [`IntoSystem::into_system`] on a function that only accepts /// [`SystemParam`]s. The output of the system becomes the functions return type, while the input -/// becomes the functions [`In`] tagged parameter or `()` if no such parameter exists. +/// becomes the functions first parameter or `()` if no such parameter exists. /// /// [`FunctionSystem`] must be `.initialized` before they can be run. /// @@ -765,6 +693,11 @@ where self.system_meta.component_access_set.combined_access() } + #[inline] + fn component_access_set(&self) -> &FilteredAccessSet { + &self.system_meta.component_access_set + } + #[inline] fn archetype_component_access(&self) -> &Access { &self.system_meta.archetype_component_access @@ -822,18 +755,17 @@ where } #[inline] - unsafe fn validate_param_unsafe(&mut self, world: UnsafeWorldCell) -> bool { + unsafe fn validate_param_unsafe( + &mut self, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { let param_state = &self.state.as_ref().expect(Self::ERROR_UNINITIALIZED).param; // SAFETY: // - The caller has invoked `update_archetype_component_access`, which will panic // if the world does not match. // - All world accesses used by `F::Param` have been registered, so the caller // will ensure that there are no data access conflicts. - let is_valid = unsafe { F::Param::validate_param(param_state, &self.system_meta, world) }; - if !is_valid { - self.system_meta.advance_param_warn_policy(); - } - is_valid + unsafe { F::Param::validate_param(param_state, &self.system_meta, world) } } #[inline] @@ -987,7 +919,14 @@ pub struct HasSystemInput; macro_rules! impl_system_function { ($($param: ident),*) => { - #[allow(non_snake_case)] + #[expect( + clippy::allow_attributes, + reason = "This is within a macro, and as such, the below lints may not always apply." + )] + #[allow( + non_snake_case, + reason = "Certain variable names are provided by the caller, not by us." + )] impl SystemParamFunction Out> for Func where Func: Send + Sync + 'static, @@ -1004,7 +943,6 @@ macro_rules! impl_system_function { // Yes, this is strange, but `rustc` fails to compile this impl // without using this function. It fails to recognize that `func` // is a function, potentially because of the multiple impls of `FnMut` - #[allow(clippy::too_many_arguments)] fn call_inner( mut f: impl FnMut($($param,)*)->Out, $($param: $param,)* @@ -1016,7 +954,14 @@ macro_rules! impl_system_function { } } - #[allow(non_snake_case)] + #[expect( + clippy::allow_attributes, + reason = "This is within a macro, and as such, the below lints may not always apply." + )] + #[allow( + non_snake_case, + reason = "Certain variable names are provided by the caller, not by us." + )] impl SystemParamFunction<(HasSystemInput, fn(In, $($param,)*) -> Out)> for Func where Func: Send + Sync + 'static, @@ -1031,8 +976,8 @@ macro_rules! impl_system_function { type Param = ($($param,)*); #[inline] fn run(&mut self, input: In::Inner<'_>, param_value: SystemParamItem< ($($param,)*)>) -> Out { - #[allow(clippy::too_many_arguments)] fn call_inner( + _: PhantomData, mut f: impl FnMut(In::Param<'_>, $($param,)*)->Out, input: In::Inner<'_>, $($param: $param,)* @@ -1040,7 +985,7 @@ macro_rules! impl_system_function { f(In::wrap(input), $($param,)*) } let ($($param,)*) = param_value; - call_inner(self, input, $($param),*) + call_inner(PhantomData::, self, input, $($param),*) } } }; diff --git a/crates/bevy_ecs/src/system/input.rs b/crates/bevy_ecs/src/system/input.rs index 469403bc7345c..12087fdf6a64a 100644 --- a/crates/bevy_ecs/src/system/input.rs +++ b/crates/bevy_ecs/src/system/input.rs @@ -259,8 +259,18 @@ macro_rules! impl_system_input_tuple { type Param<'i> = ($($name::Param<'i>,)*); type Inner<'i> = ($($name::Inner<'i>,)*); - #[allow(non_snake_case)] - #[allow(clippy::unused_unit)] + #[expect( + clippy::allow_attributes, + reason = "This is in a macro; as such, the below lints may not always apply." + )] + #[allow( + non_snake_case, + reason = "Certain variable names are provided by the caller, not by us." + )] + #[allow( + clippy::unused_unit, + reason = "Zero-length tuples won't have anything to wrap." + )] fn wrap(this: Self::Inner<'_>) -> Self::Param<'_> { let ($($name,)*) = this; ($($name::wrap($name),)*) diff --git a/crates/bevy_ecs/src/system/mod.rs b/crates/bevy_ecs/src/system/mod.rs index 4a4c9e48af230..1bdd26add283f 100644 --- a/crates/bevy_ecs/src/system/mod.rs +++ b/crates/bevy_ecs/src/system/mod.rs @@ -82,7 +82,7 @@ //! # System return type //! //! Systems added to a schedule through [`add_systems`](crate::schedule::Schedule) may either return -//! empty `()` or a [`Result`](crate::result::Result). Other contexts (like one shot systems) allow +//! empty `()` or a [`Result`](crate::error::Result). Other contexts (like one shot systems) allow //! systems to return arbitrary values. //! //! # System parameter list @@ -116,6 +116,8 @@ //! - [`DynSystemParam`] //! - [`Vec

`] where `P: SystemParam` //! - [`ParamSet>`] where `P: SystemParam` +//! +//! [`Vec

`]: alloc::vec::Vec mod adapter_system; mod builder; @@ -128,7 +130,6 @@ mod input; mod observer_system; mod query; mod schedule_system; -#[allow(clippy::module_inception)] mod system; mod system_name; mod system_param; @@ -159,6 +160,12 @@ use crate::world::World; /// Use this to get a system from a function. Also note that every system implements this trait as /// well. /// +/// # Usage notes +/// +/// This trait should only be used as a bound for trait implementations or as an +/// argument to a function. If a system needs to be returned from a function or +/// stored somewhere, use [`System`] instead of this trait. +/// /// # Examples /// /// ``` @@ -316,32 +323,38 @@ pub fn assert_system_does_not_conflict>(world: &mut World, system: S) { + fn run_system>( + world: &mut World, + system: S, + ) { let mut schedule = Schedule::default(); schedule.add_systems(system); schedule.run(world); @@ -388,7 +404,7 @@ mod tests { #[test] fn get_many_is_ordered() { - use crate::system::Resource; + use crate::resource::Resource; const ENTITIES_COUNT: usize = 1000; #[derive(Resource)] @@ -417,8 +433,7 @@ mod tests { let entities_array: [Entity; ENTITIES_COUNT] = entities_array.0.clone().try_into().unwrap(); - #[allow(unused_mut)] - for (i, mut w) in (0..ENTITIES_COUNT).zip(q.get_many_mut(entities_array).unwrap()) { + for (i, w) in (0..ENTITIES_COUNT).zip(q.get_many_mut(entities_array).unwrap()) { assert_eq!(i, w.0); } @@ -470,7 +485,7 @@ mod tests { #[test] fn changed_resource_system() { - use crate::system::Resource; + use crate::resource::Resource; #[derive(Resource)] struct Flipper(bool); @@ -894,13 +909,18 @@ mod tests { } #[test] + #[expect( + dead_code, + reason = "The `NotSend1` and `NotSend2` structs is used to verify that a system will run, even if the system params include a non-Send resource. As such, the inner value doesn't matter." + )] fn non_send_option_system() { let mut world = World::default(); world.insert_resource(SystemRan::No); - #[allow(dead_code)] + // Two structs are used, one which is inserted and one which is not, to verify that wrapping + // non-Send resources in an `Option` will allow the system to run regardless of their + // existence. struct NotSend1(alloc::rc::Rc); - #[allow(dead_code)] struct NotSend2(alloc::rc::Rc); world.insert_non_send_resource(NotSend1(alloc::rc::Rc::new(0))); @@ -919,13 +939,15 @@ mod tests { } #[test] + #[expect( + dead_code, + reason = "The `NotSend1` and `NotSend2` structs are used to verify that a system will run, even if the system params include a non-Send resource. As such, the inner value doesn't matter." + )] fn non_send_system() { let mut world = World::default(); world.insert_resource(SystemRan::No); - #[allow(dead_code)] struct NotSend1(alloc::rc::Rc); - #[allow(dead_code)] struct NotSend2(alloc::rc::Rc); world.insert_non_send_resource(NotSend1(alloc::rc::Rc::new(1))); @@ -1107,7 +1129,6 @@ mod tests { } #[test] - #[allow(clippy::too_many_arguments)] fn can_have_16_parameters() { fn sys_x( _: Res, @@ -1275,9 +1296,11 @@ mod tests { } } - /// this test exists to show that read-only world-only queries can return data that lives as long as 'world #[test] - #[allow(unused)] + #[expect( + dead_code, + reason = "This test exists to show that read-only world-only queries can return data that lives as long as `'world`." + )] fn long_life_test() { struct Holder<'w> { value: &'w A, @@ -1344,7 +1367,7 @@ mod tests { fn mutable_query(mut query: Query<&mut A>) { for _ in &mut query {} - immutable_query(query.to_readonly()); + immutable_query(query.as_readonly()); } fn immutable_query(_: Query<&A>) {} @@ -1359,7 +1382,7 @@ mod tests { fn mutable_query(mut query: Query>) { for _ in &mut query {} - immutable_query(query.to_readonly()); + immutable_query(query.as_readonly()); } fn immutable_query(_: Query>) {} @@ -1374,7 +1397,7 @@ mod tests { fn mutable_query(mut query: Query<(&mut A, &B)>) { for _ in &mut query {} - immutable_query(query.to_readonly()); + immutable_query(query.as_readonly()); } fn immutable_query(_: Query<(&A, &B)>) {} @@ -1389,7 +1412,7 @@ mod tests { fn mutable_query(mut query: Query<(&mut A, &mut B)>) { for _ in &mut query {} - immutable_query(query.to_readonly()); + immutable_query(query.as_readonly()); } fn immutable_query(_: Query<(&A, &B)>) {} @@ -1404,7 +1427,7 @@ mod tests { fn mutable_query(mut query: Query<(&mut A, &mut B), With>) { for _ in &mut query {} - immutable_query(query.to_readonly()); + immutable_query(query.as_readonly()); } fn immutable_query(_: Query<(&A, &B), With>) {} @@ -1419,7 +1442,7 @@ mod tests { fn mutable_query(mut query: Query<(&mut A, &mut B), Without>) { for _ in &mut query {} - immutable_query(query.to_readonly()); + immutable_query(query.as_readonly()); } fn immutable_query(_: Query<(&A, &B), Without>) {} @@ -1434,7 +1457,7 @@ mod tests { fn mutable_query(mut query: Query<(&mut A, &mut B), Added>) { for _ in &mut query {} - immutable_query(query.to_readonly()); + immutable_query(query.as_readonly()); } fn immutable_query(_: Query<(&A, &B), Added>) {} @@ -1449,7 +1472,7 @@ mod tests { fn mutable_query(mut query: Query<(&mut A, &mut B), Changed>) { for _ in &mut query {} - immutable_query(query.to_readonly()); + immutable_query(query.as_readonly()); } fn immutable_query(_: Query<(&A, &B), Changed>) {} @@ -1555,24 +1578,6 @@ mod tests { }); } - #[test] - #[should_panic = "Encountered a mismatched World."] - fn query_validates_world_id() { - let mut world1 = World::new(); - let world2 = World::new(); - let qstate = world1.query::<()>(); - // SAFETY: doesnt access anything - let query = unsafe { - Query::new( - world2.as_unsafe_world_cell_readonly(), - &qstate, - Tick::new(0), - Tick::new(0), - ) - }; - query.iter(); - } - #[test] #[should_panic] fn assert_system_does_not_conflict() { @@ -1582,13 +1587,22 @@ mod tests { #[test] #[should_panic( - expected = "error[B0001]: Query in system bevy_ecs::system::tests::assert_world_and_entity_mut_system_does_conflict::system accesses component(s) in a way that conflicts with a previous system parameter. Consider using `Without` to create disjoint Queries or merging conflicting Queries into a `ParamSet`. See: https://bevyengine.org/learn/errors/b0001" + expected = "error[B0001]: Query in system bevy_ecs::system::tests::assert_world_and_entity_mut_system_does_conflict_first::system accesses component(s) in a way that conflicts with a previous system parameter. Consider using `Without` to create disjoint Queries or merging conflicting Queries into a `ParamSet`. See: https://bevyengine.org/learn/errors/b0001" )] - fn assert_world_and_entity_mut_system_does_conflict() { + fn assert_world_and_entity_mut_system_does_conflict_first() { fn system(_query: &World, _q2: Query) {} super::assert_system_does_not_conflict(system); } + #[test] + #[should_panic( + expected = "&World conflicts with a previous mutable system parameter. Allowing this would break Rust's mutability rules" + )] + fn assert_world_and_entity_mut_system_does_conflict_second() { + fn system(_: Query, _: &World) {} + super::assert_system_does_not_conflict(system); + } + #[test] #[should_panic( expected = "error[B0001]: Query in system bevy_ecs::system::tests::assert_entity_ref_and_entity_mut_system_does_conflict::system accesses component(s) in a way that conflicts with a previous system parameter. Consider using `Without` to create disjoint Queries or merging conflicting Queries into a `ParamSet`. See: https://bevyengine.org/learn/errors/b0001" @@ -1607,11 +1621,44 @@ mod tests { super::assert_system_does_not_conflict(system); } + #[test] + #[should_panic( + expected = "error[B0001]: Query in system bevy_ecs::system::tests::assert_deferred_world_and_entity_ref_system_does_conflict_first::system accesses component(s) in a way that conflicts with a previous system parameter. Consider using `Without` to create disjoint Queries or merging conflicting Queries into a `ParamSet`. See: https://bevyengine.org/learn/errors/b0001" + )] + fn assert_deferred_world_and_entity_ref_system_does_conflict_first() { + fn system(_world: DeferredWorld, _query: Query) {} + super::assert_system_does_not_conflict(system); + } + + #[test] + #[should_panic( + expected = "DeferredWorld in system bevy_ecs::system::tests::assert_deferred_world_and_entity_ref_system_does_conflict_second::system conflicts with a previous access." + )] + fn assert_deferred_world_and_entity_ref_system_does_conflict_second() { + fn system(_query: Query, _world: DeferredWorld) {} + super::assert_system_does_not_conflict(system); + } + + #[test] + fn assert_deferred_world_and_empty_query_does_not_conflict_first() { + fn system(_world: DeferredWorld, _query: Query) {} + super::assert_system_does_not_conflict(system); + } + + #[test] + fn assert_deferred_world_and_empty_query_does_not_conflict_second() { + fn system(_query: Query, _world: DeferredWorld) {} + super::assert_system_does_not_conflict(system); + } + #[test] #[should_panic] fn panic_inside_system() { let mut world = World::new(); - run_system(&mut world, || panic!("this system panics")); + let system: fn() = || { + panic!("this system panics"); + }; + run_system(&mut world, system); } #[test] @@ -1652,9 +1699,10 @@ mod tests { assert_is_system(returning::<&str>.map(u64::from_str).map(Result::unwrap)); assert_is_system(static_system_param); assert_is_system( - exclusive_in_out::<(), Result<(), std::io::Error>>.map(|result| { - if let Err(error) = result { - log::error!("{:?}", error); + exclusive_in_out::<(), Result<(), std::io::Error>>.map(|_out| { + #[cfg(feature = "trace")] + if let Err(error) = _out { + tracing::error!("{}", error); } }), ); @@ -1766,6 +1814,7 @@ mod tests { } #[test] + #[should_panic] fn simple_fallible_system() { fn sys() -> Result { Err("error")?; @@ -1775,4 +1824,59 @@ mod tests { let mut world = World::new(); run_system(&mut world, sys); } + + // Regression test for + // https://github.com/bevyengine/bevy/issues/18778 + // + // Dear rustc team, please reach out if you encounter this + // in a crater run and we can work something out! + // + // These todo! macro calls should never be removed; + // they're intended to demonstrate real-world usage + // in a way that's clearer than simply calling `panic!` + // + // Because type inference behaves differently for functions and closures, + // we need to test both, in addition to explicitly annotating the return type + // to ensure that there are no upstream regressions there. + #[test] + fn nondiverging_never_trait_impls() { + // This test is a compilation test: + // no meaningful logic is ever actually evaluated. + // It is simply intended to check that the correct traits are implemented + // when todo! or similar nondiverging panics are used. + let mut world = World::new(); + let mut schedule = Schedule::default(); + + fn sys(_query: Query<&Name>) { + todo!() + } + + schedule.add_systems(sys); + schedule.add_systems(|_query: Query<&Name>| {}); + schedule.add_systems(|_query: Query<&Name>| todo!()); + #[expect(clippy::unused_unit, reason = "this forces the () return type")] + schedule.add_systems(|_query: Query<&Name>| -> () { todo!() }); + + fn obs(_trigger: Trigger) { + todo!() + } + + world.add_observer(obs); + world.add_observer(|_trigger: Trigger| {}); + world.add_observer(|_trigger: Trigger| todo!()); + #[expect(clippy::unused_unit, reason = "this forces the () return type")] + world.add_observer(|_trigger: Trigger| -> () { todo!() }); + + fn my_command(_world: &mut World) { + todo!() + } + + world.commands().queue(my_command); + world.commands().queue(|_world: &mut World| {}); + world.commands().queue(|_world: &mut World| todo!()); + #[expect(clippy::unused_unit, reason = "this forces the () return type")] + world + .commands() + .queue(|_world: &mut World| -> () { todo!() }); + } } diff --git a/crates/bevy_ecs/src/system/observer_system.rs b/crates/bevy_ecs/src/system/observer_system.rs index aa247d493964a..9bd35c53615ba 100644 --- a/crates/bevy_ecs/src/system/observer_system.rs +++ b/crates/bevy_ecs/src/system/observer_system.rs @@ -1,32 +1,44 @@ +use alloc::{borrow::Cow, vec::Vec}; +use core::marker::PhantomData; + use crate::{ + archetype::ArchetypeComponentId, + component::{ComponentId, Tick}, + error::Result, + never::Never, prelude::{Bundle, Trigger}, - system::System, + query::{Access, FilteredAccessSet}, + schedule::{Fallible, Infallible}, + system::{input::SystemIn, System}, + world::{unsafe_world_cell::UnsafeWorldCell, DeferredWorld, World}, }; -use super::IntoSystem; +use super::{IntoSystem, SystemParamValidationError}; /// Implemented for [`System`]s that have a [`Trigger`] as the first argument. -pub trait ObserverSystem: +pub trait ObserverSystem: System, Out = Out> + Send + 'static { } -impl< - E: 'static, - B: Bundle, - Out, - T: System, Out = Out> + Send + 'static, - > ObserverSystem for T +impl ObserverSystem for T where + T: System, Out = Out> + Send + 'static { } /// Implemented for systems that convert into [`ObserverSystem`]. +/// +/// # Usage notes +/// +/// This trait should only be used as a bound for trait implementations or as an +/// argument to a function. If an observer system needs to be returned from a +/// function or stored somewhere, use [`ObserverSystem`] instead of this trait. #[diagnostic::on_unimplemented( message = "`{Self}` cannot become an `ObserverSystem`", label = "the trait `IntoObserverSystem` is not implemented", note = "for function `ObserverSystem`s, ensure the first argument is a `Trigger` and any subsequent ones are `SystemParam`" )] -pub trait IntoObserverSystem: Send + 'static { +pub trait IntoObserverSystem: Send + 'static { /// The type of [`System`] that this instance converts into. type System: ObserverSystem; @@ -34,27 +46,168 @@ pub trait IntoObserverSystem: Send + 'static fn into_system(this: Self) -> Self::System; } -impl< - S: IntoSystem, Out, M> + Send + 'static, - M, - Out, - E: 'static, - B: Bundle, - > IntoObserverSystem for S +impl IntoObserverSystem for S where + S: IntoSystem, Out, M> + Send + 'static, S::System: ObserverSystem, + E: 'static, + B: Bundle, { - type System = , Out, M>>::System; + type System = S::System; fn into_system(this: Self) -> Self::System { IntoSystem::into_system(this) } } +impl IntoObserverSystem for S +where + S: IntoSystem, (), M> + Send + 'static, + S::System: ObserverSystem, + E: Send + Sync + 'static, + B: Bundle, +{ + type System = InfallibleObserverWrapper; + + fn into_system(this: Self) -> Self::System { + InfallibleObserverWrapper::new(IntoSystem::into_system(this)) + } +} +impl IntoObserverSystem for S +where + S: IntoSystem, Never, M> + Send + 'static, + E: Send + Sync + 'static, + B: Bundle, +{ + type System = InfallibleObserverWrapper; + + fn into_system(this: Self) -> Self::System { + InfallibleObserverWrapper::new(IntoSystem::into_system(this)) + } +} + +/// A wrapper that converts an observer system that returns `()` into one that returns `Ok(())`. +pub struct InfallibleObserverWrapper { + observer: S, + _marker: PhantomData<(E, B, Out)>, +} + +impl InfallibleObserverWrapper { + /// Create a new `InfallibleObserverWrapper`. + pub fn new(observer: S) -> Self { + Self { + observer, + _marker: PhantomData, + } + } +} + +impl System for InfallibleObserverWrapper +where + S: ObserverSystem, + E: Send + Sync + 'static, + B: Bundle, + Out: Send + Sync + 'static, +{ + type In = Trigger<'static, E, B>; + type Out = Result; + + #[inline] + fn name(&self) -> Cow<'static, str> { + self.observer.name() + } + + #[inline] + fn component_access(&self) -> &Access { + self.observer.component_access() + } + + #[inline] + fn component_access_set(&self) -> &FilteredAccessSet { + self.observer.component_access_set() + } + + #[inline] + fn archetype_component_access(&self) -> &Access { + self.observer.archetype_component_access() + } + + #[inline] + fn is_send(&self) -> bool { + self.observer.is_send() + } + + #[inline] + fn is_exclusive(&self) -> bool { + self.observer.is_exclusive() + } + + #[inline] + fn has_deferred(&self) -> bool { + self.observer.has_deferred() + } + + #[inline] + unsafe fn run_unsafe( + &mut self, + input: SystemIn<'_, Self>, + world: UnsafeWorldCell, + ) -> Self::Out { + self.observer.run_unsafe(input, world); + Ok(()) + } + + #[inline] + fn apply_deferred(&mut self, world: &mut World) { + self.observer.apply_deferred(world); + } + + #[inline] + fn queue_deferred(&mut self, world: DeferredWorld) { + self.observer.queue_deferred(world); + } + + #[inline] + unsafe fn validate_param_unsafe( + &mut self, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { + self.observer.validate_param_unsafe(world) + } + + #[inline] + fn initialize(&mut self, world: &mut World) { + self.observer.initialize(world); + } + + #[inline] + fn update_archetype_component_access(&mut self, world: UnsafeWorldCell) { + self.observer.update_archetype_component_access(world); + } + + #[inline] + fn check_change_tick(&mut self, change_tick: Tick) { + self.observer.check_change_tick(change_tick); + } + + #[inline] + fn get_last_run(&self) -> Tick { + self.observer.get_last_run() + } + + #[inline] + fn set_last_run(&mut self, last_run: Tick) { + self.observer.set_last_run(last_run); + } + + fn default_system_sets(&self) -> Vec { + self.observer.default_system_sets() + } +} + #[cfg(test)] mod tests { use crate::{ - self as bevy_ecs, event::Event, observer::Trigger, system::{In, IntoSystem}, diff --git a/crates/bevy_ecs/src/system/query.rs b/crates/bevy_ecs/src/system/query.rs index cc4312761ead4..183bdecfb4f64 100644 --- a/crates/bevy_ecs/src/system/query.rs +++ b/crates/bevy_ecs/src/system/query.rs @@ -1,46 +1,54 @@ use crate::{ batching::BatchingStrategy, component::Tick, - entity::{Entity, EntityBorrow, EntitySet}, + entity::{Entity, EntityDoesNotExistError, EntityEquivalent, EntitySet, UniqueEntityArray}, query::{ - QueryCombinationIter, QueryData, QueryEntityError, QueryFilter, QueryIter, QueryManyIter, - QueryManyUniqueIter, QueryParIter, QuerySingleError, QueryState, ROQueryItem, - ReadOnlyQueryData, + DebugCheckedUnwrap, NopWorldQuery, QueryCombinationIter, QueryData, QueryEntityError, + QueryFilter, QueryIter, QueryManyIter, QueryManyUniqueIter, QueryParIter, QueryParManyIter, + QueryParManyUniqueIter, QuerySingleError, QueryState, ROQueryItem, ReadOnlyQueryData, }, world::unsafe_world_cell::UnsafeWorldCell, }; use core::{ marker::PhantomData, + mem::MaybeUninit, ops::{Deref, DerefMut}, }; -/// [System parameter] that provides selective access to the [`Component`] data stored in a [`World`]. +/// A [system parameter] that provides selective access to the [`Component`] data stored in a [`World`]. /// -/// Enables access to [entity identifiers] and [components] from a system, without the need to directly access the world. -/// Its iterators and getter methods return *query items*. -/// Each query item is a type containing data relative to an entity. +/// Queries enable systems to access [entity identifiers] and [components] without requiring direct access to the [`World`]. +/// Its iterators and getter methods return *query items*, which are types containing data related to an entity. /// /// `Query` is a generic data structure that accepts two type parameters: /// -/// - **`D` (query data).** -/// The type of data contained in the query item. +/// - **`D` (query data)**: +/// The type of data fetched by the query, which will be returned as the query item. /// Only entities that match the requested data will generate an item. /// Must implement the [`QueryData`] trait. -/// - **`F` (query filter).** -/// A set of conditions that determines whether query items should be kept or discarded. +/// - **`F` (query filter)**: +/// An optional set of conditions that determine whether query items should be kept or discarded. +/// This defaults to [`unit`], which means no additional filters will be applied. /// Must implement the [`QueryFilter`] trait. -/// This type parameter is optional. /// +/// [system parameter]: crate::system::SystemParam +/// [`Component`]: crate::component::Component /// [`World`]: crate::world::World +/// [entity identifiers]: Entity +/// [components]: crate::component::Component /// /// # Similar parameters /// -/// [`Query`] has few sibling [`SystemParam`](crate::system::system_param::SystemParam)s, which perform additional validation: +/// `Query` has few sibling [`SystemParam`]s, which perform additional validation: +/// /// - [`Single`] - Exactly one matching query item. /// - [`Option`] - Zero or one matching query item. /// - [`Populated`] - At least one matching query item. /// -/// Those parameters will prevent systems from running if their requirements aren't met. +/// These parameters will prevent systems from running if their requirements are not met. +/// +/// [`SystemParam`]: crate::system::system_param::SystemParam +/// [`Option`]: Single /// /// # System parameter declaration /// @@ -49,318 +57,428 @@ use core::{ /// /// ## Component access /// -/// A query defined with a reference to a component as the query fetch type parameter can be used to generate items that refer to the data of said component. +/// You can fetch an entity's component by specifying a reference to that component in the query's data parameter: +/// +/// ``` +/// # use bevy_ecs::prelude::*; +/// # +/// # #[derive(Component)] +/// # struct ComponentA; +/// # +/// // A component can be accessed by a shared reference... +/// fn immutable_query(query: Query<&ComponentA>) { +/// // ... +/// } /// +/// // ...or by a mutable reference. +/// fn mutable_query(query: Query<&mut ComponentA>) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_is_system(immutable_query); +/// # bevy_ecs::system::assert_is_system(mutable_query); /// ``` +/// +/// Note that components need to be behind a reference (`&` or `&mut`), or the query will not compile: +/// +/// ```compile_fail,E0277 /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct ComponentA; -/// # fn immutable_ref( -/// // A component can be accessed by shared reference... -/// query: Query<&ComponentA> -/// # ) {} -/// # bevy_ecs::system::assert_is_system(immutable_ref); -/// -/// # fn mutable_ref( -/// // ... or by mutable reference. -/// query: Query<&mut ComponentA> -/// # ) {} -/// # bevy_ecs::system::assert_is_system(mutable_ref); +/// # +/// // This needs to be `&ComponentA` or `&mut ComponentA` in order to compile. +/// fn invalid_query(query: Query) { +/// // ... +/// } /// ``` /// /// ## Query filtering /// -/// Setting the query filter type parameter will ensure that each query item satisfies the given condition. +/// Setting the query filter type parameter will ensure that each query item satisfies the given condition: /// /// ``` /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct ComponentA; +/// # /// # #[derive(Component)] /// # struct ComponentB; -/// # fn system( -/// // Just `ComponentA` data will be accessed, but only for entities that also contain -/// // `ComponentB`. -/// query: Query<&ComponentA, With> -/// # ) {} -/// # bevy_ecs::system::assert_is_system(system); +/// # +/// // `ComponentA` data will be accessed, but only for entities that also contain `ComponentB`. +/// fn filtered_query(query: Query<&ComponentA, With>) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_is_system(filtered_query); /// ``` /// +/// Note that the filter is `With`, not `With<&ComponentB>`. Unlike query data, `With` +/// does require components to be behind a reference. +/// /// ## `QueryData` or `QueryFilter` tuples /// -/// Using tuples, each `Query` type parameter can contain multiple elements. +/// Using [`tuple`]s, each `Query` type parameter can contain multiple elements. +/// +/// In the following example two components are accessed simultaneously, and the query items are +/// filtered on two conditions: +/// +/// ``` +/// # use bevy_ecs::prelude::*; +/// # +/// # #[derive(Component)] +/// # struct ComponentA; +/// # +/// # #[derive(Component)] +/// # struct ComponentB; +/// # +/// # #[derive(Component)] +/// # struct ComponentC; +/// # +/// # #[derive(Component)] +/// # struct ComponentD; +/// # +/// fn complex_query( +/// query: Query<(&mut ComponentA, &ComponentB), (With, Without)> +/// ) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_is_system(complex_query); +/// ``` /// -/// In the following example, two components are accessed simultaneously, and the query items are filtered on two conditions. +/// Note that this currently only works on tuples with 15 or fewer items. You may nest tuples to +/// get around this limit: /// /// ``` /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct ComponentA; +/// # /// # #[derive(Component)] /// # struct ComponentB; +/// # /// # #[derive(Component)] /// # struct ComponentC; +/// # /// # #[derive(Component)] /// # struct ComponentD; -/// # fn immutable_ref( -/// query: Query<(&ComponentA, &ComponentB), (With, Without)> -/// # ) {} -/// # bevy_ecs::system::assert_is_system(immutable_ref); +/// # +/// fn nested_query( +/// query: Query<(&ComponentA, &ComponentB, (&mut ComponentC, &mut ComponentD))> +/// ) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_is_system(nested_query); /// ``` /// /// ## Entity identifier access /// -/// The identifier of an entity can be made available inside the query item by including [`Entity`] in the query fetch type parameter. +/// You can access [`Entity`], the entity identifier, by including it in the query data parameter: /// /// ``` /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct ComponentA; -/// # fn system( -/// query: Query<(Entity, &ComponentA)> -/// # ) {} -/// # bevy_ecs::system::assert_is_system(system); +/// # +/// fn entity_id_query(query: Query<(Entity, &ComponentA)>) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_is_system(entity_id_query); /// ``` /// +/// Be aware that [`Entity`] is not a component, so it does not need to be behind a reference. +/// /// ## Optional component access /// -/// A component can be made optional in a query by wrapping it into an [`Option`]. -/// In this way, a query item can still be generated even if the queried entity does not contain the wrapped component. -/// In this case, its corresponding value will be `None`. +/// A component can be made optional by wrapping it into an [`Option`]. In the following example, a +/// query item will still be generated even if the queried entity does not contain `ComponentB`. +/// When this is the case, `Option<&ComponentB>`'s corresponding value will be `None`. /// /// ``` /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct ComponentA; +/// # /// # #[derive(Component)] /// # struct ComponentB; -/// # fn system( -/// // Generates items for entities that contain `ComponentA`, and optionally `ComponentB`. -/// query: Query<(&ComponentA, Option<&ComponentB>)> -/// # ) {} -/// # bevy_ecs::system::assert_is_system(system); +/// # +/// // A queried items must contain `ComponentA`. If they also contain `ComponentB`, its value will +/// // be fetched as well. +/// fn optional_component_query(query: Query<(&ComponentA, Option<&ComponentB>)>) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_is_system(optional_component_query); /// ``` /// -/// See the documentation for [`AnyOf`] to idiomatically declare many optional components. +/// Optional components can hurt performance in some cases, so please read the [performance] +/// section to learn more about them. Additionally, if you need to declare several optional +/// components, you may be interested in using [`AnyOf`]. /// -/// See the [performance] section to learn more about the impact of optional components. +/// [performance]: #performance +/// [`AnyOf`]: crate::query::AnyOf /// /// ## Disjoint queries /// -/// A system cannot contain two queries that break Rust's mutability rules. -/// In this case, the [`Without`] filter can be used to disjoint them. +/// A system cannot contain two queries that break Rust's mutability rules, or else it will panic +/// when initialized. This can often be fixed with the [`Without`] filter, which makes the queries +/// disjoint. /// -/// In the following example, two queries mutably access the same component. -/// Executing this system will panic, since an entity could potentially match the two queries at the same time by having both `Player` and `Enemy` components. -/// This would violate mutability rules. +/// In the following example, the two queries can mutably access the same `&mut Health` component +/// if an entity has both the `Player` and `Enemy` components. Bevy will catch this and panic, +/// however, instead of breaking Rust's mutability rules: /// /// ```should_panic /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct Health; +/// # /// # #[derive(Component)] /// # struct Player; +/// # /// # #[derive(Component)] /// # struct Enemy; /// # /// fn randomize_health( /// player_query: Query<&mut Health, With>, /// enemy_query: Query<&mut Health, With>, -/// ) -/// # {} -/// # let mut randomize_health_system = IntoSystem::into_system(randomize_health); -/// # let mut world = World::new(); -/// # randomize_health_system.initialize(&mut world); -/// # randomize_health_system.run((), &mut world); +/// ) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_system_does_not_conflict(randomize_health); /// ``` /// -/// Adding a `Without` filter will disjoint the queries. -/// In this way, any entity that has both `Player` and `Enemy` components is excluded from both queries. +/// Adding a [`Without`] filter will disjoint the queries. In the following example, any entity +/// that has both the `Player` and `Enemy` components will be excluded from _both_ queries: /// /// ``` /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct Health; +/// # /// # #[derive(Component)] /// # struct Player; +/// # /// # #[derive(Component)] /// # struct Enemy; /// # /// fn randomize_health( /// player_query: Query<&mut Health, (With, Without)>, /// enemy_query: Query<&mut Health, (With, Without)>, -/// ) -/// # {} -/// # let mut randomize_health_system = IntoSystem::into_system(randomize_health); -/// # let mut world = World::new(); -/// # randomize_health_system.initialize(&mut world); -/// # randomize_health_system.run((), &mut world); +/// ) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_system_does_not_conflict(randomize_health); /// ``` /// -/// An alternative to this idiom is to wrap the conflicting queries into a [`ParamSet`](super::ParamSet). +/// An alternative solution to this problem would be to wrap the conflicting queries in +/// [`ParamSet`]. +/// +/// [`Without`]: crate::query::Without +/// [`ParamSet`]: crate::system::ParamSet /// /// ## Whole Entity Access /// -/// [`EntityRef`]s can be fetched from a query. This will give read-only access to any component on the entity, -/// and can be use to dynamically fetch any component without baking it into the query type. Due to this global -/// access to the entity, this will block any other system from parallelizing with it. As such these queries -/// should be sparingly used. +/// [`EntityRef`] can be used in a query to gain read-only access to all components of an entity. +/// This is useful when dynamically fetching components instead of baking them into the query type. /// /// ``` /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct ComponentA; -/// # fn system( -/// query: Query<(EntityRef, &ComponentA)> -/// # ) {} -/// # bevy_ecs::system::assert_is_system(system); +/// # +/// fn all_components_query(query: Query<(EntityRef, &ComponentA)>) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_is_system(all_components_query); /// ``` /// -/// As `EntityRef` can read any component on an entity, a query using it will conflict with *any* mutable -/// access. It is strongly advised to couple `EntityRef` queries with the use of either `With`/`Without` -/// filters or `ParamSets`. This also limits the scope of the query, which will improve iteration performance -/// and also allows it to parallelize with other non-conflicting systems. +/// As [`EntityRef`] can read any component on an entity, a query using it will conflict with *any* +/// mutable component access. /// /// ```should_panic /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct ComponentA; -/// # fn system( -/// // This will panic! -/// query: Query<(EntityRef, &mut ComponentA)> -/// # ) {} -/// # bevy_ecs::system::assert_system_does_not_conflict(system); +/// # +/// // `EntityRef` provides read access to *all* components on an entity. When combined with +/// // `&mut ComponentA` in the same query, it creates a conflict because `EntityRef` could read +/// // `&ComponentA` while `&mut ComponentA` attempts to modify it - violating Rust's borrowing +/// // rules. +/// fn invalid_query(query: Query<(EntityRef, &mut ComponentA)>) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_system_does_not_conflict(invalid_query); /// ``` +/// +/// It is strongly advised to couple [`EntityRef`] queries with the use of either [`With`] / +/// [`Without`] filters or [`ParamSet`]s. Not only does this improve the performance and +/// parallelization of the system, but it enables systems to gain mutable access to other +/// components: +/// /// ``` /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct ComponentA; +/// # /// # #[derive(Component)] /// # struct ComponentB; -/// # fn system( -/// // This will not panic. -/// query_a: Query>, -/// query_b: Query<&mut ComponentB, Without>, -/// # ) {} -/// # bevy_ecs::system::assert_system_does_not_conflict(system); +/// # +/// // The first query only reads entities that have `ComponentA`, while the second query only +/// // modifies entities that *don't* have `ComponentA`. Because neither query will access the same +/// // entity, this system does not conflict. +/// fn disjoint_query( +/// query_a: Query>, +/// query_b: Query<&mut ComponentB, Without>, +/// ) { +/// // ... +/// } +/// # +/// # bevy_ecs::system::assert_system_does_not_conflict(disjoint_query); /// ``` /// +/// The fundamental rule: [`EntityRef`]'s ability to read all components means it can never +/// coexist with mutable access. [`With`] / [`Without`] filters can guarantee this by keeping the +/// queries on completely separate entities. +/// +/// [`EntityRef`]: crate::world::EntityRef +/// [`With`]: crate::query::With +/// /// # Accessing query items /// -/// The following table summarizes the behavior of the safe methods that can be used to get query items. +/// The following table summarizes the behavior of safe methods that can be used to get query +/// items: /// /// |Query methods|Effect| -/// |:---:|---| -/// |[`iter`]\[[`_mut`][`iter_mut`]]|Returns an iterator over all query items.| -/// |[[`iter().for_each()`][`for_each`]\[[`iter_mut().for_each()`][`for_each`]],
[`par_iter`]\[[`_mut`][`par_iter_mut`]]|Runs a specified function for each query item.| -/// |[`iter_many`]\[[`_mut`][`iter_many_mut`]]|Iterates or runs a specified function over query items generated by a list of entities.| -/// |[`iter_combinations`]\[[`_mut`][`iter_combinations_mut`]]|Returns an iterator over all combinations of a specified number of query items.| -/// |[`get`]\[[`_mut`][`get_mut`]]|Returns the query item for the specified entity.| -/// |[`many`]\[[`_mut`][`many_mut`]],
[`get_many`]\[[`_mut`][`get_many_mut`]]|Returns the query items for the specified entities.| -/// |[`single`]\[[`_mut`][`single_mut`]],
[`get_single`]\[[`_mut`][`get_single_mut`]]|Returns the query item while verifying that there aren't others.| +/// |-|-| +/// |[`iter`]\[[`_mut`][`iter_mut`]\]|Returns an iterator over all query items.| +/// |[`iter[_mut]().for_each()`][`for_each`],
[`par_iter`]\[[`_mut`][`par_iter_mut`]\]|Runs a specified function for each query item.| +/// |[`iter_many`]\[[`_unique`][`iter_many_unique`]\]\[[`_mut`][`iter_many_mut`]\]|Iterates over query items that match a list of entities.| +/// |[`iter_combinations`]\[[`_mut`][`iter_combinations_mut`]\]|Iterates over all combinations of query items.| +/// |[`single`](Self::single)\[[`_mut`][`single_mut`]\]|Returns a single query item if only one exists.| +/// |[`get`]\[[`_mut`][`get_mut`]\]|Returns the query item for a specified entity.| +/// |[`get_many`]\[[`_unique`][`get_many_unique`]\]\[[`_mut`][`get_many_mut`]\]|Returns all query items that match a list of entities.| /// /// There are two methods for each type of query operation: immutable and mutable (ending with `_mut`). /// When using immutable methods, the query items returned are of type [`ROQueryItem`], a read-only version of the query item. /// In this circumstance, every mutable reference in the query fetch type parameter is substituted by a shared reference. /// +/// [`iter`]: Self::iter +/// [`iter_mut`]: Self::iter_mut +/// [`for_each`]: #iteratorfor_each +/// [`par_iter`]: Self::par_iter +/// [`par_iter_mut`]: Self::par_iter_mut +/// [`iter_many`]: Self::iter_many +/// [`iter_many_unique`]: Self::iter_many_unique +/// [`iter_many_mut`]: Self::iter_many_mut +/// [`iter_combinations`]: Self::iter_combinations +/// [`iter_combinations_mut`]: Self::iter_combinations_mut +/// [`single_mut`]: Self::single_mut +/// [`get`]: Self::get +/// [`get_mut`]: Self::get_mut +/// [`get_many`]: Self::get_many +/// [`get_many_unique`]: Self::get_many_unique +/// [`get_many_mut`]: Self::get_many_mut +/// /// # Performance /// -/// Creating a `Query` is a low-cost constant operation. -/// Iterating it, on the other hand, fetches data from the world and generates items, which can have a significant computational cost. +/// Creating a `Query` is a low-cost constant operation. Iterating it, on the other hand, fetches +/// data from the world and generates items, which can have a significant computational cost. +/// +/// Two systems cannot be executed in parallel if both access the same component type where at +/// least one of the accesses is mutable. Because of this, it is recommended for queries to only +/// fetch mutable access to components when necessary, since immutable access can be parallelized. /// -/// [`Table`] component storage type is much more optimized for query iteration than [`SparseSet`]. +/// Query filters ([`With`] / [`Without`]) can improve performance because they narrow the kinds of +/// entities that can be fetched. Systems that access fewer kinds of entities are more likely to be +/// parallelized by the scheduler. /// -/// Two systems cannot be executed in parallel if both access the same component type where at least one of the accesses is mutable. -/// This happens unless the executor can verify that no entity could be found in both queries. +/// On the other hand, be careful using optional components (`Option<&ComponentA>`) and +/// [`EntityRef`] because they broaden the amount of entities kinds that can be accessed. This is +/// especially true of a query that _only_ fetches optional components or [`EntityRef`], as the +/// query would iterate over all entities in the world. /// -/// Optional components increase the number of entities a query has to match against. -/// This can hurt iteration performance, especially if the query solely consists of only optional components, since the query would iterate over each entity in the world. +/// There are two types of [component storage types]: [`Table`] and [`SparseSet`]. [`Table`] offers +/// fast iteration speeds, but slower insertion and removal speeds. [`SparseSet`] is the opposite: +/// it offers fast component insertion and removal speeds, but slower iteration speeds. /// -/// The following table compares the computational complexity of the various methods and operations, where: +/// The following table compares the computational complexity of the various methods and +/// operations, where: /// -/// - **n** is the number of entities that match the query, -/// - **r** is the number of elements in a combination, -/// - **k** is the number of involved entities in the operation, -/// - **a** is the number of archetypes in the world, -/// - **C** is the [binomial coefficient], used to count combinations. -/// nCr is read as "*n* choose *r*" and is equivalent to the number of distinct unordered subsets of *r* elements that can be taken from a set of *n* elements. +/// - **n** is the number of entities that match the query. +/// - **r** is the number of elements in a combination. +/// - **k** is the number of involved entities in the operation. +/// - **a** is the number of archetypes in the world. +/// - **C** is the [binomial coefficient], used to count combinations. nCr is +/// read as "*n* choose *r*" and is equivalent to the number of distinct unordered subsets of *r* +/// elements that can be taken from a set of *n* elements. /// /// |Query operation|Computational complexity| -/// |:---:|:---:| -/// |[`iter`]\[[`_mut`][`iter_mut`]]|O(n)| -/// |[[`iter().for_each()`][`for_each`]\[[`iter_mut().for_each()`][`for_each`]],
[`par_iter`]\[[`_mut`][`par_iter_mut`]]|O(n)| -/// |[`iter_many`]\[[`_mut`][`iter_many_mut`]]|O(k)| -/// |[`iter_combinations`]\[[`_mut`][`iter_combinations_mut`]]|O(nCr)| -/// |[`get`]\[[`_mut`][`get_mut`]]|O(1)| -/// |([`get_`][`get_many`])[`many`]|O(k)| -/// |([`get_`][`get_many_mut`])[`many_mut`]|O(k2)| -/// |[`single`]\[[`_mut`][`single_mut`]],
[`get_single`]\[[`_mut`][`get_single_mut`]]|O(a)| -/// |Archetype based filtering ([`With`], [`Without`], [`Or`])|O(a)| +/// |-|-| +/// |[`iter`]\[[`_mut`][`iter_mut`]\]|O(n)| +/// |[`iter[_mut]().for_each()`][`for_each`],
[`par_iter`]\[[`_mut`][`par_iter_mut`]\]|O(n)| +/// |[`iter_many`]\[[`_mut`][`iter_many_mut`]\]|O(k)| +/// |[`iter_combinations`]\[[`_mut`][`iter_combinations_mut`]\]|O(nCr)| +/// |[`single`](Self::single)\[[`_mut`][`single_mut`]\]|O(a)| +/// |[`get`]\[[`_mut`][`get_mut`]\]|O(1)| +/// |[`get_many`]|O(k)| +/// |[`get_many_mut`]|O(k2)| +/// |Archetype-based filtering ([`With`], [`Without`], [`Or`])|O(a)| /// |Change detection filtering ([`Added`], [`Changed`])|O(a + n)| /// +/// [component storage types]: crate::component::StorageType +/// [`Table`]: crate::storage::Table +/// [`SparseSet`]: crate::storage::SparseSet +/// [binomial coefficient]: https://en.wikipedia.org/wiki/Binomial_coefficient +/// [`Or`]: crate::query::Or +/// [`Added`]: crate::query::Added +/// [`Changed`]: crate::query::Changed +/// /// # `Iterator::for_each` /// -/// `for_each` methods are seen to be generally faster than directly iterating through `iter` on worlds with high archetype -/// fragmentation, and may enable additional optimizations like [autovectorization]. It is strongly advised to only use -/// [`Iterator::for_each`] if it tangibly improves performance. *Always* be sure profile or benchmark both before and -/// after the change! +/// The `for_each` methods appear to be generally faster than `for`-loops when run on worlds with +/// high archetype fragmentation, and may enable additional optimizations like [autovectorization]. It +/// is strongly advised to only use [`Iterator::for_each`] if it tangibly improves performance. +/// *Always* profile or benchmark before and after the change! /// /// ```rust /// # use bevy_ecs::prelude::*; +/// # /// # #[derive(Component)] /// # struct ComponentA; -/// # fn system( -/// # query: Query<&ComponentA>, -/// # ) { -/// // This might be result in better performance... -/// query.iter().for_each(|component| { -/// // do things with the component -/// }); -/// // ...than this. Always be sure to benchmark to validate the difference! -/// for component in query.iter() { -/// // do things with the component +/// # +/// fn system(query: Query<&ComponentA>) { +/// // This may result in better performance... +/// query.iter().for_each(|component| { +/// // ... +/// }); +/// +/// // ...than this. Always benchmark to validate the difference! +/// for component in query.iter() { +/// // ... +/// } /// } -/// # } -/// # bevy_ecs::system::assert_system_does_not_conflict(system); +/// # +/// # bevy_ecs::system::assert_is_system(system); /// ``` /// -/// [`Component`]: crate::component::Component /// [autovectorization]: https://en.wikipedia.org/wiki/Automatic_vectorization -/// [`Added`]: crate::query::Added -/// [`AnyOf`]: crate::query::AnyOf -/// [binomial coefficient]: https://en.wikipedia.org/wiki/Binomial_coefficient -/// [`Changed`]: crate::query::Changed -/// [components]: crate::component::Component -/// [entity identifiers]: Entity -/// [`EntityRef`]: crate::world::EntityRef -/// [`for_each`]: #iterator-for-each -/// [`get`]: Self::get -/// [`get_many`]: Self::get_many -/// [`get_many_mut`]: Self::get_many_mut -/// [`get_mut`]: Self::get_mut -/// [`get_single`]: Self::get_single -/// [`get_single_mut`]: Self::get_single_mut -/// [`iter`]: Self::iter -/// [`iter_combinations`]: Self::iter_combinations -/// [`iter_combinations_mut`]: Self::iter_combinations_mut -/// [`iter_many`]: Self::iter_many -/// [`iter_many_mut`]: Self::iter_many_mut -/// [`iter_mut`]: Self::iter_mut -/// [`many`]: Self::many -/// [`many_mut`]: Self::many_mut -/// [`Or`]: crate::query::Or -/// [`par_iter`]: Self::par_iter -/// [`par_iter_mut`]: Self::par_iter_mut -/// [performance]: #performance -/// [`Single`]: Single -/// [`Option`]: Single -/// [`single`]: Self::single -/// [`single_mut`]: Self::single_mut -/// [`SparseSet`]: crate::storage::SparseSet -/// [System parameter]: crate::system::SystemParam -/// [`Table`]: crate::storage::Table -/// [`With`]: crate::query::With -/// [`Without`]: crate::query::Without pub struct Query<'world, 'state, D: QueryData, F: QueryFilter = ()> { // SAFETY: Must have access to the components registered in `state`. world: UnsafeWorldCell<'world>, @@ -369,6 +487,14 @@ pub struct Query<'world, 'state, D: QueryData, F: QueryFilter = ()> { this_run: Tick, } +impl Clone for Query<'_, '_, D, F> { + fn clone(&self) -> Self { + *self + } +} + +impl Copy for Query<'_, '_, D, F> {} + impl core::fmt::Debug for Query<'_, '_, D, F> { fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { f.debug_struct("Query") @@ -384,14 +510,11 @@ impl core::fmt::Debug for Query<'_, '_, D, F> { impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// Creates a new query. /// - /// # Panics - /// - /// This will panic if the world used to create `state` is not `world`. - /// /// # Safety /// - /// This will create a query that could violate memory safety rules. Make sure that this is only - /// called in ways that ensure the queries have unique mutable access. + /// * This will create a query that could violate memory safety rules. Make sure that this is only + /// called in ways that ensure the queries have unique mutable access. + /// * `world` must be the world used to create `state`. #[inline] pub(crate) unsafe fn new( world: UnsafeWorldCell<'w>, @@ -399,8 +522,6 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { last_run: Tick, this_run: Tick, ) -> Self { - state.validate_world(world.id()); - Self { world, state, @@ -414,9 +535,42 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// For example, `Query<(&mut D1, &D2, &mut D3), With>` will become `Query<(&D1, &D2, &D3), With>`. /// This can be useful when working around the borrow checker, /// or reusing functionality between systems via functions that accept query types. - pub fn to_readonly(&self) -> Query<'_, 's, D::ReadOnly, F> { + /// + /// # See also + /// + /// [`into_readonly`](Self::into_readonly) for a version that consumes the `Query` to return one with the full `'world` lifetime. + pub fn as_readonly(&self) -> Query<'_, 's, D::ReadOnly, F> { + // SAFETY: The reborrowed query is converted to read-only, so it cannot perform mutable access, + // and the original query is held with a shared borrow, so it cannot perform mutable access either. + unsafe { self.reborrow_unsafe() }.into_readonly() + } + + /// Returns another `Query` from this does not return any data, which can be faster. + fn as_nop(&self) -> Query<'_, 's, NopWorldQuery, F> { + let new_state = self.state.as_nop(); + // SAFETY: + // - The reborrowed query is converted to read-only, so it cannot perform mutable access, + // and the original query is held with a shared borrow, so it cannot perform mutable access either. + // Note that although `NopWorldQuery` itself performs *no* access and could soundly alias a mutable query, + // it has the original `QueryState::component_access` and could be `transmute`d to a read-only query. + // - The world matches because it was the same one used to construct self. + unsafe { Query::new(self.world, new_state, self.last_run, self.this_run) } + } + + /// Returns another `Query` from this that fetches the read-only version of the query items. + /// + /// For example, `Query<(&mut D1, &D2, &mut D3), With>` will become `Query<(&D1, &D2, &D3), With>`. + /// This can be useful when working around the borrow checker, + /// or reusing functionality between systems via functions that accept query types. + /// + /// # See also + /// + /// [`as_readonly`](Self::as_readonly) for a version that borrows the `Query` instead of consuming it. + pub fn into_readonly(self) -> Query<'w, 's, D::ReadOnly, F> { let new_state = self.state.as_readonly(); - // SAFETY: This is memory safe because it turns the query immutable. + // SAFETY: + // - This is memory safe because it turns the query immutable. + // - The world matches because it was the same one used to construct self. unsafe { Query::new(self.world, new_state, self.last_run, self.this_run) } } @@ -447,6 +601,42 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { pub fn reborrow(&mut self) -> Query<'_, 's, D, F> { // SAFETY: this query is exclusively borrowed while the new one exists, so // no overlapping access can occur. + unsafe { self.reborrow_unsafe() } + } + + /// Returns a new `Query` reborrowing the access from this one. + /// The current query will still be usable while the new one exists, but must not be used in a way that violates aliasing. + /// + /// # Safety + /// + /// This function makes it possible to violate Rust's aliasing guarantees. + /// You must make sure this call does not result in a mutable or shared reference to a component with a mutable reference. + /// + /// # See also + /// + /// - [`reborrow`](Self::reborrow) for the safe versions. + pub unsafe fn reborrow_unsafe(&self) -> Query<'_, 's, D, F> { + // SAFETY: + // - This is memory safe because the caller ensures that there are no conflicting references. + // - The world matches because it was the same one used to construct self. + unsafe { self.copy_unsafe() } + } + + /// Returns a new `Query` copying the access from this one. + /// The current query will still be usable while the new one exists, but must not be used in a way that violates aliasing. + /// + /// # Safety + /// + /// This function makes it possible to violate Rust's aliasing guarantees. + /// You must make sure this call does not result in a mutable or shared reference to a component with a mutable reference. + /// + /// # See also + /// + /// - [`reborrow_unsafe`](Self::reborrow_unsafe) for a safer version that constrains the returned `'w` lifetime to the length of the borrow. + unsafe fn copy_unsafe(&self) -> Query<'w, 's, D, F> { + // SAFETY: + // - This is memory safe because the caller ensures that there are no conflicting references. + // - The world matches because it was the same one used to construct self. unsafe { Query::new(self.world, self.state, self.last_run, self.this_run) } } @@ -478,14 +668,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// [`iter_mut`](Self::iter_mut) for mutable query items. #[inline] pub fn iter(&self) -> QueryIter<'_, 's, D::ReadOnly, F> { - // SAFETY: - // - `self.world` has permission to access the required components. - // - The query is read-only, so it can be aliased even if it was originally mutable. - unsafe { - self.state - .as_readonly() - .iter_unchecked_manual(self.world, self.last_run, self.this_run) - } + self.as_readonly().into_iter() } /// Returns an [`Iterator`] over the query items. @@ -516,11 +699,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// [`iter`](Self::iter) for read-only query items. #[inline] pub fn iter_mut(&mut self) -> QueryIter<'_, 's, D, F> { - // SAFETY: `self.world` has permission to access the required components. - unsafe { - self.state - .iter_unchecked_manual(self.world, self.last_run, self.this_run) - } + self.reborrow().into_iter() } /// Returns a [`QueryCombinationIter`] over all combinations of `K` read-only query items without repetition. @@ -545,20 +724,12 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// # See also /// /// - [`iter_combinations_mut`](Self::iter_combinations_mut) for mutable query item combinations. + /// - [`iter_combinations_inner`](Self::iter_combinations_inner) for mutable query item combinations with the full `'world` lifetime. #[inline] pub fn iter_combinations( &self, ) -> QueryCombinationIter<'_, 's, D::ReadOnly, F, K> { - // SAFETY: - // - `self.world` has permission to access the required components. - // - The query is read-only, so it can be aliased even if it was originally mutable. - unsafe { - self.state.as_readonly().iter_combinations_unchecked_manual( - self.world, - self.last_run, - self.this_run, - ) - } + self.as_readonly().iter_combinations_inner() } /// Returns a [`QueryCombinationIter`] over all combinations of `K` query items without repetition. @@ -583,15 +754,42 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// # See also /// /// - [`iter_combinations`](Self::iter_combinations) for read-only query item combinations. + /// - [`iter_combinations_inner`](Self::iter_combinations_inner) for mutable query item combinations with the full `'world` lifetime. #[inline] pub fn iter_combinations_mut( &mut self, ) -> QueryCombinationIter<'_, 's, D, F, K> { + self.reborrow().iter_combinations_inner() + } + + /// Returns a [`QueryCombinationIter`] over all combinations of `K` query items without repetition. + /// This consumes the [`Query`] to return results with the actual "inner" world lifetime. + /// + /// This iterator is always guaranteed to return results from each unique pair of matching entities. + /// Iteration order is not guaranteed. + /// + /// # Example + /// + /// ``` + /// # use bevy_ecs::prelude::*; + /// # #[derive(Component)] + /// # struct ComponentA; + /// fn some_system(query: Query<&mut ComponentA>) { + /// let mut combinations = query.iter_combinations_inner(); + /// while let Some([mut a1, mut a2]) = combinations.fetch_next() { + /// // mutably access components data + /// } + /// } + /// ``` + /// + /// # See also + /// + /// - [`iter_combinations`](Self::iter_combinations) for read-only query item combinations. + /// - [`iter_combinations_mut`](Self::iter_combinations_mut) for mutable query item combinations. + #[inline] + pub fn iter_combinations_inner(self) -> QueryCombinationIter<'w, 's, D, F, K> { // SAFETY: `self.world` has permission to access the required components. - unsafe { - self.state - .iter_combinations_unchecked_manual(self.world, self.last_run, self.this_run) - } + unsafe { QueryCombinationIter::new(self.world, self.state, self.last_run, self.this_run) } } /// Returns an [`Iterator`] over the read-only query items generated from an [`Entity`] list. @@ -620,7 +818,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// ) { /// for friends in &friends_query { /// for counter in counter_query.iter_many(&friends.list) { - /// println!("Friend's counter: {:?}", counter.value); + /// println!("Friend's counter: {}", counter.value); /// } /// } /// } @@ -630,22 +828,13 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// # See also /// /// - [`iter_many_mut`](Self::iter_many_mut) to get mutable query items. + /// - [`iter_many_inner`](Self::iter_many_inner) to get mutable query items with the full `'world` lifetime. #[inline] - pub fn iter_many>( + pub fn iter_many>( &self, entities: EntityList, ) -> QueryManyIter<'_, 's, D::ReadOnly, F, EntityList::IntoIter> { - // SAFETY: - // - `self.world` has permission to access the required components. - // - The query is read-only, so it can be aliased even if it was originally mutable. - unsafe { - self.state.as_readonly().iter_many_unchecked_manual( - entities, - self.world, - self.last_run, - self.this_run, - ) - } + self.as_readonly().iter_many_inner(entities) } /// Returns an iterator over the query items generated from an [`Entity`] list. @@ -674,23 +863,46 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// for friends in &friends_query { /// let mut iter = counter_query.iter_many_mut(&friends.list); /// while let Some(mut counter) = iter.fetch_next() { - /// println!("Friend's counter: {:?}", counter.value); + /// println!("Friend's counter: {}", counter.value); /// counter.value += 1; /// } /// } /// } /// # bevy_ecs::system::assert_is_system(system); /// ``` + /// # See also + /// + /// - [`iter_many`](Self::iter_many) to get read-only query items. + /// - [`iter_many_inner`](Self::iter_many_inner) to get mutable query items with the full `'world` lifetime. #[inline] - pub fn iter_many_mut>( + pub fn iter_many_mut>( &mut self, entities: EntityList, ) -> QueryManyIter<'_, 's, D, F, EntityList::IntoIter> { + self.reborrow().iter_many_inner(entities) + } + + /// Returns an iterator over the query items generated from an [`Entity`] list. + /// This consumes the [`Query`] to return results with the actual "inner" world lifetime. + /// + /// Items are returned in the order of the list of entities, and may not be unique if the input + /// doesn't guarantee uniqueness. Entities that don't match the query are skipped. + /// + /// # See also + /// + /// - [`iter_many`](Self::iter_many) to get read-only query items. + /// - [`iter_many_mut`](Self::iter_many_mut) to get mutable query items. + #[inline] + pub fn iter_many_inner>( + self, + entities: EntityList, + ) -> QueryManyIter<'w, 's, D, F, EntityList::IntoIter> { // SAFETY: `self.world` has permission to access the required components. unsafe { - self.state.iter_many_unchecked_manual( - entities, + QueryManyIter::new( self.world, + self.state, + entities, self.last_run, self.this_run, ) @@ -744,22 +956,13 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// # See also /// /// - [`iter_many_unique_mut`](Self::iter_many_unique_mut) to get mutable query items. + /// - [`iter_many_unique_inner`](Self::iter_many_unique_inner) to get with the actual "inner" world lifetime. #[inline] pub fn iter_many_unique( &self, entities: EntityList, ) -> QueryManyUniqueIter<'_, 's, D::ReadOnly, F, EntityList::IntoIter> { - // SAFETY: - // - `self.world` has permission to access the required components. - // - The query is read-only, so it can be aliased even if it was originally mutable. - unsafe { - self.state.as_readonly().iter_many_unique_unchecked_manual( - entities, - self.world, - self.last_run, - self.this_run, - ) - } + self.as_readonly().iter_many_unique_inner(entities) } /// Returns an iterator over the unique query items generated from an [`EntitySet`]. @@ -805,16 +1008,76 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// } /// # bevy_ecs::system::assert_is_system(system); /// ``` + /// # See also + /// + /// - [`iter_many_unique`](Self::iter_many_unique) to get read-only query items. + /// - [`iter_many_unique_inner`](Self::iter_many_unique_inner) to get with the actual "inner" world lifetime. #[inline] pub fn iter_many_unique_mut( &mut self, entities: EntityList, ) -> QueryManyUniqueIter<'_, 's, D, F, EntityList::IntoIter> { + self.reborrow().iter_many_unique_inner(entities) + } + + /// Returns an iterator over the unique query items generated from an [`EntitySet`]. + /// This consumes the [`Query`] to return results with the actual "inner" world lifetime. + /// + /// Items are returned in the order of the list of entities. Entities that don't match the query are skipped. + /// + /// # Examples + /// + /// ``` + /// # use bevy_ecs::{prelude::*, entity::{EntitySet, UniqueEntityIter}}; + /// # use core::slice; + /// #[derive(Component)] + /// struct Counter { + /// value: i32 + /// } + /// + /// // `Friends` ensures that it only lists unique entities. + /// #[derive(Component)] + /// struct Friends { + /// unique_list: Vec, + /// } + /// + /// impl<'a> IntoIterator for &'a Friends { + /// type Item = &'a Entity; + /// type IntoIter = UniqueEntityIter>; + /// + /// fn into_iter(self) -> Self::IntoIter { + /// // SAFETY: `Friends` ensures that it unique_list contains only unique entities. + /// unsafe { UniqueEntityIter::from_iterator_unchecked(self.unique_list.iter()) } + /// } + /// } + /// + /// fn system( + /// friends_query: Query<&Friends>, + /// mut counter_query: Query<&mut Counter>, + /// ) { + /// let friends = friends_query.single().unwrap(); + /// for mut counter in counter_query.iter_many_unique_inner(friends) { + /// println!("Friend's counter: {:?}", counter.value); + /// counter.value += 1; + /// } + /// } + /// # bevy_ecs::system::assert_is_system(system); + /// ``` + /// # See also + /// + /// - [`iter_many_unique`](Self::iter_many_unique) to get read-only query items. + /// - [`iter_many_unique_mut`](Self::iter_many_unique_mut) to get mutable query items. + #[inline] + pub fn iter_many_unique_inner( + self, + entities: EntityList, + ) -> QueryManyUniqueIter<'w, 's, D, F, EntityList::IntoIter> { // SAFETY: `self.world` has permission to access the required components. unsafe { - self.state.iter_many_unique_unchecked_manual( - entities, + QueryManyUniqueIter::new( self.world, + self.state, + entities, self.last_run, self.this_run, ) @@ -836,13 +1099,8 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// - [`iter`](Self::iter) and [`iter_mut`](Self::iter_mut) for the safe versions. #[inline] pub unsafe fn iter_unsafe(&self) -> QueryIter<'_, 's, D, F> { - // SAFETY: - // - `self.world` has permission to access the required components. - // - The caller ensures that this operation will not result in any aliased mutable accesses. - unsafe { - self.state - .iter_unchecked_manual(self.world, self.last_run, self.this_run) - } + // SAFETY: The caller promises that this will not result in multiple mutable references. + unsafe { self.reborrow_unsafe() }.into_iter() } /// Iterates over all possible combinations of `K` query items without repetition. @@ -862,13 +1120,8 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { pub unsafe fn iter_combinations_unsafe( &self, ) -> QueryCombinationIter<'_, 's, D, F, K> { - // SAFETY: - // - `self.world` has permission to access the required components. - // - The caller ensures that this operation will not result in any aliased mutable accesses. - unsafe { - self.state - .iter_combinations_unchecked_manual(self.world, self.last_run, self.this_run) - } + // SAFETY: The caller promises that this will not result in multiple mutable references. + unsafe { self.reborrow_unsafe() }.iter_combinations_inner() } /// Returns an [`Iterator`] over the query items generated from an [`Entity`] list. @@ -885,21 +1138,12 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// # See also /// /// - [`iter_many_mut`](Self::iter_many_mut) to safely access the query items. - pub unsafe fn iter_many_unsafe>( + pub unsafe fn iter_many_unsafe>( &self, entities: EntityList, ) -> QueryManyIter<'_, 's, D, F, EntityList::IntoIter> { - // SAFETY: - // - `self.world` has permission to access the required components. - // - The caller ensures that this operation will not result in any aliased mutable accesses. - unsafe { - self.state.iter_many_unchecked_manual( - entities, - self.world, - self.last_run, - self.this_run, - ) - } + // SAFETY: The caller promises that this will not result in multiple mutable references. + unsafe { self.reborrow_unsafe() }.iter_many_inner(entities) } /// Returns an [`Iterator`] over the unique query items generated from an [`Entity`] list. @@ -913,22 +1157,15 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// /// # See also /// - /// - [`iter_many_mut`](Self::iter_many_mut) to safely access the query items. + /// - [`iter_many_unique`](Self::iter_many_unique) to get read-only query items. + /// - [`iter_many_unique_mut`](Self::iter_many_unique_mut) to get mutable query items. + /// - [`iter_many_unique_inner`](Self::iter_many_unique_inner) to get with the actual "inner" world lifetime. pub unsafe fn iter_many_unique_unsafe( &self, entities: EntityList, ) -> QueryManyUniqueIter<'_, 's, D, F, EntityList::IntoIter> { - // SAFETY: - // - `self.world` has permission to access the required components. - // - The caller ensures that this operation will not result in any aliased mutable accesses. - unsafe { - self.state.iter_many_unique_unchecked_manual( - entities, - self.world, - self.last_run, - self.this_run, - ) - } + // SAFETY: The caller promises that this will not result in multiple mutable references. + unsafe { self.reborrow_unsafe() }.iter_many_unique_inner(entities) } /// Returns a parallel iterator over the query results for the given [`World`]. @@ -948,13 +1185,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// [`World`]: crate::world::World #[inline] pub fn par_iter(&self) -> QueryParIter<'_, '_, D::ReadOnly, F> { - QueryParIter { - world: self.world, - state: self.state.as_readonly(), - last_run: self.last_run, - this_run: self.this_run, - batching_strategy: BatchingStrategy::new(), - } + self.as_readonly().par_iter_inner() } /// Returns a parallel iterator over the query results for the given [`World`]. @@ -989,6 +1220,37 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// [`World`]: crate::world::World #[inline] pub fn par_iter_mut(&mut self) -> QueryParIter<'_, '_, D, F> { + self.reborrow().par_iter_inner() + } + + /// Returns a parallel iterator over the query results for the given [`World`](crate::world::World). + /// This consumes the [`Query`] to return results with the actual "inner" world lifetime. + /// + /// This parallel iterator is always guaranteed to return results from each matching entity once and + /// only once. Iteration order and thread assignment is not guaranteed. + /// + /// If the `multithreaded` feature is disabled, iterating with this operates identically to [`Iterator::for_each`] + /// on [`QueryIter`]. + /// + /// # Example + /// + /// Here, the `gravity_system` updates the `Velocity` component of every entity that contains it: + /// + /// ``` + /// # use bevy_ecs::prelude::*; + /// # + /// # #[derive(Component)] + /// # struct Velocity { x: f32, y: f32, z: f32 } + /// fn gravity_system(query: Query<&mut Velocity>) { + /// const DELTA: f32 = 1.0 / 60.0; + /// query.par_iter_inner().for_each(|mut velocity| { + /// velocity.y -= 9.8 * DELTA; + /// }); + /// } + /// # bevy_ecs::system::assert_is_system(gravity_system); + /// ``` + #[inline] + pub fn par_iter_inner(self) -> QueryParIter<'w, 's, D, F> { QueryParIter { world: self.world, state: self.state, @@ -998,6 +1260,94 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { } } + /// Returns a parallel iterator over the read-only query items generated from an [`Entity`] list. + /// + /// Entities that don't match the query are skipped. Iteration order and thread assignment is not guaranteed. + /// + /// If the `multithreaded` feature is disabled, iterating with this operates identically to [`Iterator::for_each`] + /// on [`QueryManyIter`]. + /// + /// This can only be called for read-only queries. To avoid potential aliasing, there is no `par_iter_many_mut` equivalent. + /// See [`par_iter_many_unique_mut`] for an alternative using [`EntitySet`]. + /// + /// Note that you must use the `for_each` method to iterate over the + /// results, see [`par_iter_mut`] for an example. + /// + /// [`par_iter_many_unique_mut`]: Self::par_iter_many_unique_mut + /// [`par_iter_mut`]: Self::par_iter_mut + #[inline] + pub fn par_iter_many>( + &self, + entities: EntityList, + ) -> QueryParManyIter<'_, '_, D::ReadOnly, F, EntityList::Item> { + QueryParManyIter { + world: self.world, + state: self.state.as_readonly(), + entity_list: entities.into_iter().collect(), + last_run: self.last_run, + this_run: self.this_run, + batching_strategy: BatchingStrategy::new(), + } + } + + /// Returns a parallel iterator over the unique read-only query items generated from an [`EntitySet`]. + /// + /// Entities that don't match the query are skipped. Iteration order and thread assignment is not guaranteed. + /// + /// If the `multithreaded` feature is disabled, iterating with this operates identically to [`Iterator::for_each`] + /// on [`QueryManyUniqueIter`]. + /// + /// This can only be called for read-only queries, see [`par_iter_many_unique_mut`] for write-queries. + /// + /// Note that you must use the `for_each` method to iterate over the + /// results, see [`par_iter_mut`] for an example. + /// + /// [`par_iter_many_unique_mut`]: Self::par_iter_many_unique_mut + /// [`par_iter_mut`]: Self::par_iter_mut + #[inline] + pub fn par_iter_many_unique>( + &self, + entities: EntityList, + ) -> QueryParManyUniqueIter<'_, '_, D::ReadOnly, F, EntityList::Item> { + QueryParManyUniqueIter { + world: self.world, + state: self.state.as_readonly(), + entity_list: entities.into_iter().collect(), + last_run: self.last_run, + this_run: self.this_run, + batching_strategy: BatchingStrategy::new(), + } + } + + /// Returns a parallel iterator over the unique query items generated from an [`EntitySet`]. + /// + /// Entities that don't match the query are skipped. Iteration order and thread assignment is not guaranteed. + /// + /// If the `multithreaded` feature is disabled, iterating with this operates identically to [`Iterator::for_each`] + /// on [`QueryManyUniqueIter`]. + /// + /// This can only be called for mutable queries, see [`par_iter_many_unique`] for read-only-queries. + /// + /// Note that you must use the `for_each` method to iterate over the + /// results, see [`par_iter_mut`] for an example. + /// + /// [`par_iter_many_unique`]: Self::par_iter_many_unique + /// [`par_iter_mut`]: Self::par_iter_mut + #[inline] + pub fn par_iter_many_unique_mut>( + &mut self, + entities: EntityList, + ) -> QueryParManyUniqueIter<'_, '_, D, F, EntityList::Item> { + QueryParManyUniqueIter { + world: self.world, + state: self.state, + entity_list: entities.into_iter().collect(), + last_run: self.last_run, + this_run: self.this_run, + batching_strategy: BatchingStrategy::new(), + } + } + /// Returns the read-only query item for the given [`Entity`]. /// /// In case of a nonexisting entity or mismatched component, a [`QueryEntityError`] is returned instead. @@ -1033,16 +1383,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// - [`get_mut`](Self::get_mut) to get a mutable query item. #[inline] pub fn get(&self, entity: Entity) -> Result, QueryEntityError> { - // SAFETY: system runs without conflicts with other systems. - // same-system queries have runtime borrow checks when they conflict - unsafe { - self.state.as_readonly().get_unchecked_manual( - self.world, - entity, - self.last_run, - self.this_run, - ) - } + self.as_readonly().get_inner(entity) } /// Returns the read-only query items for the given array of [`Entity`]. @@ -1051,22 +1392,101 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// In case of a nonexisting entity or mismatched component, a [`QueryEntityError`] is returned instead. /// The elements of the array do not need to be unique, unlike `get_many_mut`. /// + /// # Examples + /// + /// ``` + /// use bevy_ecs::prelude::*; + /// use bevy_ecs::query::QueryEntityError; + /// + /// #[derive(Component, PartialEq, Debug)] + /// struct A(usize); + /// + /// let mut world = World::new(); + /// let entity_vec: Vec = (0..3).map(|i| world.spawn(A(i)).id()).collect(); + /// let entities: [Entity; 3] = entity_vec.try_into().unwrap(); + /// + /// world.spawn(A(73)); + /// + /// let mut query_state = world.query::<&A>(); + /// let query = query_state.query(&world); + /// + /// let component_values = query.get_many(entities).unwrap(); + /// + /// assert_eq!(component_values, [&A(0), &A(1), &A(2)]); + /// + /// let wrong_entity = Entity::from_raw(365); + /// + /// assert_eq!( + /// match query.get_many([wrong_entity]).unwrap_err() { + /// QueryEntityError::EntityDoesNotExist(error) => error.entity, + /// _ => panic!(), + /// }, + /// wrong_entity + /// ); + /// ``` + /// /// # See also /// /// - [`get_many_mut`](Self::get_many_mut) to get mutable query items. + /// - [`get_many_unique`](Self::get_many_unique) to only handle unique inputs. /// - [`many`](Self::many) for the panicking version. #[inline] pub fn get_many( &self, entities: [Entity; N], ) -> Result<[ROQueryItem<'_, D>; N], QueryEntityError> { - // SAFETY: - // - `&self` ensures there is no mutable access to any components accessible to this query. - // - `self.world` matches `self.state`. - unsafe { - self.state - .get_many_read_only_manual(self.world, entities, self.last_run, self.this_run) - } + // Note that we call a separate `*_inner` method from `get_many_mut` + // because we don't need to check for duplicates. + self.as_readonly().get_many_inner(entities) + } + + /// Returns the read-only query items for the given [`UniqueEntityArray`]. + /// + /// The returned query items are in the same order as the input. + /// In case of a nonexisting entity or mismatched component, a [`QueryEntityError`] is returned instead. + /// + /// # Examples + /// + /// ``` + /// use bevy_ecs::{prelude::*, query::QueryEntityError, entity::{EntitySetIterator, UniqueEntityArray, UniqueEntityVec}}; + /// + /// #[derive(Component, PartialEq, Debug)] + /// struct A(usize); + /// + /// let mut world = World::new(); + /// let entity_set: UniqueEntityVec = world.spawn_batch((0..3).map(A)).collect_set(); + /// let entity_set: UniqueEntityArray<3> = entity_set.try_into().unwrap(); + /// + /// world.spawn(A(73)); + /// + /// let mut query_state = world.query::<&A>(); + /// let query = query_state.query(&world); + /// + /// let component_values = query.get_many_unique(entity_set).unwrap(); + /// + /// assert_eq!(component_values, [&A(0), &A(1), &A(2)]); + /// + /// let wrong_entity = Entity::from_raw(365); + /// + /// assert_eq!( + /// match query.get_many_unique(UniqueEntityArray::from([wrong_entity])).unwrap_err() { + /// QueryEntityError::EntityDoesNotExist(error) => error.entity, + /// _ => panic!(), + /// }, + /// wrong_entity + /// ); + /// ``` + /// + /// # See also + /// + /// - [`get_many_unique_mut`](Self::get_many_mut) to get mutable query items. + /// - [`get_many`](Self::get_many) to handle inputs with duplicates. + #[inline] + pub fn get_many_unique( + &self, + entities: UniqueEntityArray, + ) -> Result<[ROQueryItem<'_, D>; N], QueryEntityError> { + self.as_readonly().get_many_unique_inner(entities) } /// Returns the read-only query items for the given array of [`Entity`]. @@ -1112,6 +1532,10 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// - [`get_many`](Self::get_many) for the non-panicking version. #[inline] #[track_caller] + #[deprecated( + since = "0.16.0", + note = "Use `get_many` instead and handle the Result." + )] pub fn many(&self, entities: [Entity; N]) -> [ROQueryItem<'_, D>; N] { match self.get_many(entities) { Ok(items) => items, @@ -1147,36 +1571,315 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// /// # See also /// - /// - [`get`](Self::get) to get a read-only query item. + /// - [`get`](Self::get) to get a read-only query item. + #[inline] + pub fn get_mut(&mut self, entity: Entity) -> Result, QueryEntityError> { + self.reborrow().get_inner(entity) + } + + /// Returns the query item for the given [`Entity`]. + /// This consumes the [`Query`] to return results with the actual "inner" world lifetime. + /// + /// In case of a nonexisting entity or mismatched component, a [`QueryEntityError`] is returned instead. + /// + /// This is always guaranteed to run in `O(1)` time. + /// + /// # See also + /// + /// - [`get_mut`](Self::get_mut) to get the item using a mutable borrow of the [`Query`]. + #[inline] + pub fn get_inner(self, entity: Entity) -> Result, QueryEntityError> { + // SAFETY: system runs without conflicts with other systems. + // same-system queries have runtime borrow checks when they conflict + unsafe { + let location = self + .world + .entities() + .get(entity) + .ok_or(EntityDoesNotExistError::new(entity, self.world.entities()))?; + if !self + .state + .matched_archetypes + .contains(location.archetype_id.index()) + { + return Err(QueryEntityError::QueryDoesNotMatch( + entity, + location.archetype_id, + )); + } + let archetype = self + .world + .archetypes() + .get(location.archetype_id) + .debug_checked_unwrap(); + let mut fetch = D::init_fetch( + self.world, + &self.state.fetch_state, + self.last_run, + self.this_run, + ); + let mut filter = F::init_fetch( + self.world, + &self.state.filter_state, + self.last_run, + self.this_run, + ); + + let table = self + .world + .storages() + .tables + .get(location.table_id) + .debug_checked_unwrap(); + D::set_archetype(&mut fetch, &self.state.fetch_state, archetype, table); + F::set_archetype(&mut filter, &self.state.filter_state, archetype, table); + + if F::filter_fetch(&mut filter, entity, location.table_row) { + Ok(D::fetch(&mut fetch, entity, location.table_row)) + } else { + Err(QueryEntityError::QueryDoesNotMatch( + entity, + location.archetype_id, + )) + } + } + } + + /// Returns the query items for the given array of [`Entity`]. + /// + /// The returned query items are in the same order as the input. + /// In case of a nonexisting entity, duplicate entities or mismatched component, a [`QueryEntityError`] is returned instead. + /// + /// # Examples + /// + /// ``` + /// use bevy_ecs::prelude::*; + /// use bevy_ecs::query::QueryEntityError; + /// + /// #[derive(Component, PartialEq, Debug)] + /// struct A(usize); + /// + /// let mut world = World::new(); + /// + /// let entities: Vec = (0..3).map(|i| world.spawn(A(i)).id()).collect(); + /// let entities: [Entity; 3] = entities.try_into().unwrap(); + /// + /// world.spawn(A(73)); + /// let wrong_entity = Entity::from_raw(57); + /// let invalid_entity = world.spawn_empty().id(); + /// + /// + /// let mut query_state = world.query::<&mut A>(); + /// let mut query = query_state.query_mut(&mut world); + /// + /// let mut mutable_component_values = query.get_many_mut(entities).unwrap(); + /// + /// for mut a in &mut mutable_component_values { + /// a.0 += 5; + /// } + /// + /// let component_values = query.get_many(entities).unwrap(); + /// + /// assert_eq!(component_values, [&A(5), &A(6), &A(7)]); + /// + /// assert_eq!( + /// match query + /// .get_many_mut([wrong_entity]) + /// .unwrap_err() + /// { + /// QueryEntityError::EntityDoesNotExist(error) => error.entity, + /// _ => panic!(), + /// }, + /// wrong_entity + /// ); + /// assert_eq!( + /// match query + /// .get_many_mut([invalid_entity]) + /// .unwrap_err() + /// { + /// QueryEntityError::QueryDoesNotMatch(entity, _) => entity, + /// _ => panic!(), + /// }, + /// invalid_entity + /// ); + /// assert_eq!( + /// query + /// .get_many_mut([entities[0], entities[0]]) + /// .unwrap_err(), + /// QueryEntityError::AliasedMutability(entities[0]) + /// ); + /// ``` + /// # See also + /// + /// - [`get_many`](Self::get_many) to get read-only query items without checking for duplicate entities. + /// - [`many_mut`](Self::many_mut) for the panicking version. + #[inline] + pub fn get_many_mut( + &mut self, + entities: [Entity; N], + ) -> Result<[D::Item<'_>; N], QueryEntityError> { + self.reborrow().get_many_mut_inner(entities) + } + + /// Returns the query items for the given [`UniqueEntityArray`]. + /// + /// The returned query items are in the same order as the input. + /// In case of a nonexisting entity or mismatched component, a [`QueryEntityError`] is returned instead. + /// + /// # Examples + /// + /// ``` + /// use bevy_ecs::{prelude::*, query::QueryEntityError, entity::{EntitySetIterator, UniqueEntityArray, UniqueEntityVec}}; + /// + /// #[derive(Component, PartialEq, Debug)] + /// struct A(usize); + /// + /// let mut world = World::new(); + /// + /// let entity_set: UniqueEntityVec = world.spawn_batch((0..3).map(A)).collect_set(); + /// let entity_set: UniqueEntityArray<3> = entity_set.try_into().unwrap(); + /// + /// world.spawn(A(73)); + /// let wrong_entity = Entity::from_raw(57); + /// let invalid_entity = world.spawn_empty().id(); + /// + /// + /// let mut query_state = world.query::<&mut A>(); + /// let mut query = query_state.query_mut(&mut world); + /// + /// let mut mutable_component_values = query.get_many_unique_mut(entity_set).unwrap(); + /// + /// for mut a in &mut mutable_component_values { + /// a.0 += 5; + /// } + /// + /// let component_values = query.get_many_unique(entity_set).unwrap(); + /// + /// assert_eq!(component_values, [&A(5), &A(6), &A(7)]); + /// + /// assert_eq!( + /// match query + /// .get_many_unique_mut(UniqueEntityArray::from([wrong_entity])) + /// .unwrap_err() + /// { + /// QueryEntityError::EntityDoesNotExist(error) => error.entity, + /// _ => panic!(), + /// }, + /// wrong_entity + /// ); + /// assert_eq!( + /// match query + /// .get_many_unique_mut(UniqueEntityArray::from([invalid_entity])) + /// .unwrap_err() + /// { + /// QueryEntityError::QueryDoesNotMatch(entity, _) => entity, + /// _ => panic!(), + /// }, + /// invalid_entity + /// ); + /// ``` + /// # See also + /// + /// - [`get_many_unique`](Self::get_many) to get read-only query items. + #[inline] + pub fn get_many_unique_mut( + &mut self, + entities: UniqueEntityArray, + ) -> Result<[D::Item<'_>; N], QueryEntityError> { + self.reborrow().get_many_unique_inner(entities) + } + + /// Returns the query items for the given array of [`Entity`]. + /// This consumes the [`Query`] to return results with the actual "inner" world lifetime. + /// + /// The returned query items are in the same order as the input. + /// In case of a nonexisting entity, duplicate entities or mismatched component, a [`QueryEntityError`] is returned instead. + /// + /// # See also + /// + /// - [`get_many`](Self::get_many) to get read-only query items without checking for duplicate entities. + /// - [`get_many_mut`](Self::get_many_mut) to get items using a mutable reference. + /// - [`get_many_inner`](Self::get_many_mut_inner) to get read-only query items with the actual "inner" world lifetime. + #[inline] + pub fn get_many_mut_inner( + self, + entities: [Entity; N], + ) -> Result<[D::Item<'w>; N], QueryEntityError> { + // Verify that all entities are unique + for i in 0..N { + for j in 0..i { + if entities[i] == entities[j] { + return Err(QueryEntityError::AliasedMutability(entities[i])); + } + } + } + // SAFETY: All entities are unique, so the results don't alias. + unsafe { self.get_many_impl(entities) } + } + + /// Returns the query items for the given array of [`Entity`]. + /// This consumes the [`Query`] to return results with the actual "inner" world lifetime. + /// + /// The returned query items are in the same order as the input. + /// In case of a nonexisting entity or mismatched component, a [`QueryEntityError`] is returned instead. + /// + /// # See also + /// + /// - [`get_many`](Self::get_many) to get read-only query items without checking for duplicate entities. + /// - [`get_many_mut`](Self::get_many_mut) to get items using a mutable reference. + /// - [`get_many_mut_inner`](Self::get_many_mut_inner) to get mutable query items with the actual "inner" world lifetime. #[inline] - pub fn get_mut(&mut self, entity: Entity) -> Result, QueryEntityError> { - // SAFETY: system runs without conflicts with other systems. - // same-system queries have runtime borrow checks when they conflict - unsafe { - self.state - .get_unchecked_manual(self.world, entity, self.last_run, self.this_run) - } + pub fn get_many_inner( + self, + entities: [Entity; N], + ) -> Result<[D::Item<'w>; N], QueryEntityError> + where + D: ReadOnlyQueryData, + { + // SAFETY: The query results are read-only, so they don't conflict if there are duplicate entities. + unsafe { self.get_many_impl(entities) } } - /// Returns the query items for the given array of [`Entity`]. + /// Returns the query items for the given [`UniqueEntityArray`]. + /// This consumes the [`Query`] to return results with the actual "inner" world lifetime. /// /// The returned query items are in the same order as the input. /// In case of a nonexisting entity, duplicate entities or mismatched component, a [`QueryEntityError`] is returned instead. /// /// # See also /// - /// - [`get_many`](Self::get_many) to get read-only query items. - /// - [`many_mut`](Self::many_mut) for the panicking version. + /// - [`get_many_unique`](Self::get_many_unique) to get read-only query items without checking for duplicate entities. + /// - [`get_many_unique_mut`](Self::get_many_unique_mut) to get items using a mutable reference. #[inline] - pub fn get_many_mut( - &mut self, + pub fn get_many_unique_inner( + self, + entities: UniqueEntityArray, + ) -> Result<[D::Item<'w>; N], QueryEntityError> { + // SAFETY: All entities are unique, so the results don't alias. + unsafe { self.get_many_impl(entities.into_inner()) } + } + + /// Returns the query items for the given array of [`Entity`]. + /// This consumes the [`Query`] to return results with the actual "inner" world lifetime. + /// + /// # Safety + /// + /// The caller must ensure that the query data returned for the entities does not conflict, + /// either because they are all unique or because the data is read-only. + unsafe fn get_many_impl( + self, entities: [Entity; N], - ) -> Result<[D::Item<'_>; N], QueryEntityError> { - // SAFETY: scheduler ensures safe Query world access - unsafe { - self.state - .get_many_unchecked_manual(self.world, entities, self.last_run, self.this_run) + ) -> Result<[D::Item<'w>; N], QueryEntityError> { + let mut values = [(); N].map(|_| MaybeUninit::uninit()); + + for (value, entity) in core::iter::zip(&mut values, entities) { + // SAFETY: The caller asserts that the results don't alias + let item = unsafe { self.copy_unsafe() }.get_inner(entity)?; + *value = MaybeUninit::new(item); } + + // SAFETY: Each value has been fully initialized. + Ok(values.map(|x| unsafe { x.assume_init() })) } /// Returns the query items for the given array of [`Entity`]. @@ -1229,6 +1932,10 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// - [`many`](Self::many) to get read-only query items. #[inline] #[track_caller] + #[deprecated( + since = "0.16.0", + note = "Use `get_many_mut` instead and handle the Result." + )] pub fn many_mut(&mut self, entities: [Entity; N]) -> [D::Item<'_>; N] { match self.get_many_mut(entities) { Ok(items) => items, @@ -1252,42 +1959,8 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// - [`get_mut`](Self::get_mut) for the safe version. #[inline] pub unsafe fn get_unchecked(&self, entity: Entity) -> Result, QueryEntityError> { - // SEMI-SAFETY: system runs without conflicts with other systems. - // same-system queries have runtime borrow checks when they conflict - unsafe { - self.state - .get_unchecked_manual(self.world, entity, self.last_run, self.this_run) - } - } - - /// Returns a single read-only query item when there is exactly one entity matching the query. - /// - /// # Panics - /// - /// This method panics if the number of query items is **not** exactly one. - /// - /// # Example - /// - /// ``` - /// # use bevy_ecs::prelude::*; - /// # #[derive(Component)] - /// # struct Player; - /// # #[derive(Component)] - /// # struct Position(f32, f32); - /// fn player_system(query: Query<&Position, With>) { - /// let player_position = query.single(); - /// // do something with player_position - /// } - /// # bevy_ecs::system::assert_is_system(player_system); - /// ``` - /// - /// # See also - /// - /// - [`get_single`](Self::get_single) for the non-panicking version. - /// - [`single_mut`](Self::single_mut) to get the mutable query item. - #[track_caller] - pub fn single(&self) -> ROQueryItem<'_, D> { - self.get_single().unwrap() + // SAFETY: The caller promises that this will not result in multiple mutable references. + unsafe { self.reborrow_unsafe() }.get_inner(entity) } /// Returns a single read-only query item when there is exactly one entity matching the query. @@ -1302,7 +1975,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// # #[derive(Component)] /// # struct PlayerScore(i32); /// fn player_scoring_system(query: Query<&PlayerScore>) { - /// match query.get_single() { + /// match query.single() { /// Ok(PlayerScore(score)) => { /// println!("Score: {}", score); /// } @@ -1319,27 +1992,21 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// /// # See also /// - /// - [`get_single_mut`](Self::get_single_mut) to get the mutable query item. - /// - [`single`](Self::single) for the panicking version. + /// - [`single_mut`](Self::single_mut) to get the mutable query item. #[inline] + pub fn single(&self) -> Result, QuerySingleError> { + self.as_readonly().single_inner() + } + + /// A deprecated alias for [`single`](Self::single). + #[deprecated(since = "0.16.0", note = "Please use `single` instead")] pub fn get_single(&self) -> Result, QuerySingleError> { - // SAFETY: - // the query ensures that the components it accesses are not mutably accessible somewhere else - // and the query is read only. - unsafe { - self.state.as_readonly().get_single_unchecked_manual( - self.world, - self.last_run, - self.this_run, - ) - } + self.single() } /// Returns a single query item when there is exactly one entity matching the query. /// - /// # Panics - /// - /// This method panics if the number of query items is **not** exactly one. + /// If the number of query items is not exactly one, a [`QuerySingleError`] is returned instead. /// /// # Example /// @@ -1352,7 +2019,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// # struct Health(u32); /// # /// fn regenerate_player_health_system(mut query: Query<&mut Health, With>) { - /// let mut health = query.single_mut(); + /// let mut health = query.single_mut().expect("Error: Could not find a single player."); /// health.0 += 1; /// } /// # bevy_ecs::system::assert_is_system(regenerate_player_health_system); @@ -1360,14 +2027,20 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// /// # See also /// - /// - [`get_single_mut`](Self::get_single_mut) for the non-panicking version. /// - [`single`](Self::single) to get the read-only query item. - #[track_caller] - pub fn single_mut(&mut self) -> D::Item<'_> { - self.get_single_mut().unwrap() + #[inline] + pub fn single_mut(&mut self) -> Result, QuerySingleError> { + self.reborrow().single_inner() + } + + /// A deprecated alias for [`single_mut`](Self::single_mut). + #[deprecated(since = "0.16.0", note = "Please use `single_mut` instead")] + pub fn get_single_mut(&mut self) -> Result, QuerySingleError> { + self.single_mut() } /// Returns a single query item when there is exactly one entity matching the query. + /// This consumes the [`Query`] to return results with the actual "inner" world lifetime. /// /// If the number of query items is not exactly one, a [`QuerySingleError`] is returned instead. /// @@ -1381,8 +2054,8 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// # #[derive(Component)] /// # struct Health(u32); /// # - /// fn regenerate_player_health_system(mut query: Query<&mut Health, With>) { - /// let mut health = query.get_single_mut().expect("Error: Could not find a single player."); + /// fn regenerate_player_health_system(query: Query<&mut Health, With>) { + /// let mut health = query.single_inner().expect("Error: Could not find a single player."); /// health.0 += 1; /// } /// # bevy_ecs::system::assert_is_system(regenerate_player_health_system); @@ -1390,16 +2063,21 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// /// # See also /// - /// - [`get_single`](Self::get_single) to get the read-only query item. - /// - [`single_mut`](Self::single_mut) for the panicking version. + /// - [`single`](Self::single) to get the read-only query item. + /// - [`single_mut`](Self::single_mut) to get the mutable query item. + /// - [`single_inner`](Self::single_inner) for the panicking version. #[inline] - pub fn get_single_mut(&mut self) -> Result, QuerySingleError> { - // SAFETY: - // the query ensures mutable access to the components it accesses, and the query - // is uniquely borrowed - unsafe { - self.state - .get_single_unchecked_manual(self.world, self.last_run, self.this_run) + pub fn single_inner(self) -> Result, QuerySingleError> { + let mut query = self.into_iter(); + let first = query.next(); + let extra = query.next().is_some(); + + match (first, extra) { + (Some(r), false) => Ok(r), + (None, _) => Err(QuerySingleError::NoEntities(core::any::type_name::())), + (Some(_), _) => Err(QuerySingleError::MultipleEntities(core::any::type_name::< + Self, + >())), } } @@ -1433,14 +2111,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// [`Changed`]: crate::query::Changed #[inline] pub fn is_empty(&self) -> bool { - // SAFETY: - // - `self.world` has permission to read any data required by the WorldQuery. - // - `&self` ensures that no one currently has write access. - // - `self.world` matches `self.state`. - unsafe { - self.state - .is_empty_unsafe_world_cell(self.world, self.last_run, self.this_run) - } + self.as_nop().iter().next().is_none() } /// Returns `true` if the given [`Entity`] matches the query. @@ -1469,13 +2140,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// ``` #[inline] pub fn contains(&self, entity: Entity) -> bool { - // SAFETY: NopFetch does not access any members while &self ensures no one has exclusive access - unsafe { - self.state - .as_nop() - .get_unchecked_manual(self.world, entity, self.last_run, self.this_run) - .is_ok() - } + self.as_nop().get(entity).is_ok() } /// Returns a [`QueryLens`] that can be used to get a query with a more general fetch. @@ -1507,7 +2172,7 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// # world.spawn((A(10), B(5))); /// # /// fn reusable_function(lens: &mut QueryLens<&A>) { - /// assert_eq!(lens.query().single().0, 10); + /// assert_eq!(lens.query().single().unwrap().0, 10); /// } /// /// // We can use the function in a system that takes the exact query. @@ -1636,6 +2301,80 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { self.transmute_lens_filtered::() } + /// Returns a [`QueryLens`] that can be used to get a query with a more general fetch. + /// This consumes the [`Query`] to return results with the actual "inner" world lifetime. + /// + /// For example, this can transform a `Query<(&A, &mut B)>` to a `Query<&B>`. + /// This can be useful for passing the query to another function. Note that since + /// filter terms are dropped, non-archetypal filters like [`Added`](crate::query::Added) and + /// [`Changed`](crate::query::Changed) will not be respected. To maintain or change filter + /// terms see [`Self::transmute_lens_filtered`] + /// + /// ## Panics + /// + /// This will panic if `NewD` is not a subset of the original fetch `Q` + /// + /// ## Example + /// + /// ```rust + /// # use bevy_ecs::prelude::*; + /// # use bevy_ecs::system::QueryLens; + /// # + /// # #[derive(Component)] + /// # struct A(usize); + /// # + /// # #[derive(Component)] + /// # struct B(usize); + /// # + /// # let mut world = World::new(); + /// # + /// # world.spawn((A(10), B(5))); + /// # + /// fn reusable_function(mut lens: QueryLens<&A>) { + /// assert_eq!(lens.query().single().unwrap().0, 10); + /// } + /// + /// // We can use the function in a system that takes the exact query. + /// fn system_1(query: Query<&A>) { + /// reusable_function(query.into_query_lens()); + /// } + /// + /// // We can also use it with a query that does not match exactly + /// // by transmuting it. + /// fn system_2(query: Query<(&mut A, &B)>) { + /// let mut lens = query.transmute_lens_inner::<&A>(); + /// reusable_function(lens); + /// } + /// + /// # let mut schedule = Schedule::default(); + /// # schedule.add_systems((system_1, system_2)); + /// # schedule.run(&mut world); + /// ``` + /// + /// ## Allowed Transmutes + /// + /// Besides removing parameters from the query, you can also + /// make limited changes to the types of parameters. + /// + /// * Can always add/remove [`Entity`] + /// * Can always add/remove [`EntityLocation`] + /// * Can always add/remove [`&Archetype`] + /// * `Ref` <-> `&T` + /// * `&mut T` -> `&T` + /// * `&mut T` -> `Ref` + /// * [`EntityMut`](crate::world::EntityMut) -> [`EntityRef`](crate::world::EntityRef) + /// + /// [`EntityLocation`]: crate::entity::EntityLocation + /// [`&Archetype`]: crate::archetype::Archetype + /// + /// # See also + /// + /// - [`transmute_lens`](Self::transmute_lens) to convert to a lens using a mutable borrow of the [`Query`]. + #[track_caller] + pub fn transmute_lens_inner(self) -> QueryLens<'w, NewD> { + self.transmute_lens_filtered_inner::() + } + /// Equivalent to [`Self::transmute_lens`] but also includes a [`QueryFilter`] type. /// /// Note that the lens will iterate the same tables and archetypes as the original query. This means that @@ -1646,6 +2385,24 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { pub fn transmute_lens_filtered( &mut self, ) -> QueryLens<'_, NewD, NewF> { + self.reborrow().transmute_lens_filtered_inner() + } + + /// Equivalent to [`Self::transmute_lens_inner`] but also includes a [`QueryFilter`] type. + /// This consumes the [`Query`] to return results with the actual "inner" world lifetime. + /// + /// Note that the lens will iterate the same tables and archetypes as the original query. This means that + /// additional archetypal query terms like [`With`](crate::query::With) and [`Without`](crate::query::Without) + /// will not necessarily be respected and non-archetypal terms like [`Added`](crate::query::Added) and + /// [`Changed`](crate::query::Changed) will only be respected if they are in the type signature. + /// + /// # See also + /// + /// - [`transmute_lens_filtered`](Self::transmute_lens_filtered) to convert to a lens using a mutable borrow of the [`Query`]. + #[track_caller] + pub fn transmute_lens_filtered_inner( + self, + ) -> QueryLens<'w, NewD, NewF> { let state = self.state.transmute_filtered::(self.world); QueryLens { world: self.world, @@ -1660,6 +2417,15 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { self.transmute_lens() } + /// Gets a [`QueryLens`] with the same accesses as the existing query + /// + /// # See also + /// + /// - [`as_query_lens`](Self::as_query_lens) to convert to a lens using a mutable borrow of the [`Query`]. + pub fn into_query_lens(self) -> QueryLens<'w, D> { + self.transmute_lens_inner() + } + /// Returns a [`QueryLens`] that can be used to get a query with the combined fetch. /// /// For example, this can take a `Query<&A>` and a `Query<&B>` and return a `Query<(&A, &B)>`. @@ -1714,13 +2480,40 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// /// Like `transmute_lens` the query terms can be changed with some restrictions. /// See [`Self::transmute_lens`] for more details. - pub fn join( - &mut self, - other: &mut Query, - ) -> QueryLens<'_, NewD> { + pub fn join<'a, OtherD: QueryData, NewD: QueryData>( + &'a mut self, + other: &'a mut Query, + ) -> QueryLens<'a, NewD> { self.join_filtered(other) } + /// Returns a [`QueryLens`] that can be used to get a query with the combined fetch. + /// This consumes the [`Query`] to return results with the actual "inner" world lifetime. + /// + /// For example, this can take a `Query<&A>` and a `Query<&B>` and return a `Query<(&A, &B)>`. + /// The returned query will only return items with both `A` and `B`. Note that since filters + /// are dropped, non-archetypal filters like `Added` and `Changed` will not be respected. + /// To maintain or change filter terms see `Self::join_filtered`. + /// + /// ## Panics + /// + /// This will panic if `NewD` is not a subset of the union of the original fetch `Q` and `OtherD`. + /// + /// ## Allowed Transmutes + /// + /// Like `transmute_lens` the query terms can be changed with some restrictions. + /// See [`Self::transmute_lens`] for more details. + /// + /// # See also + /// + /// - [`join`](Self::join) to join using a mutable borrow of the [`Query`]. + pub fn join_inner( + self, + other: Query<'w, '_, OtherD>, + ) -> QueryLens<'w, NewD> { + self.join_filtered_inner(other) + } + /// Equivalent to [`Self::join`] but also includes a [`QueryFilter`] type. /// /// Note that the lens with iterate a subset of the original queries' tables @@ -1729,14 +2522,39 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { /// terms like `Added` and `Changed` will only be respected if they are in /// the type signature. pub fn join_filtered< + 'a, OtherD: QueryData, OtherF: QueryFilter, NewD: QueryData, NewF: QueryFilter, >( - &mut self, - other: &mut Query, - ) -> QueryLens<'_, NewD, NewF> { + &'a mut self, + other: &'a mut Query, + ) -> QueryLens<'a, NewD, NewF> { + self.reborrow().join_filtered_inner(other.reborrow()) + } + + /// Equivalent to [`Self::join_inner`] but also includes a [`QueryFilter`] type. + /// This consumes the [`Query`] to return results with the actual "inner" world lifetime. + /// + /// Note that the lens with iterate a subset of the original queries' tables + /// and archetypes. This means that additional archetypal query terms like + /// `With` and `Without` will not necessarily be respected and non-archetypal + /// terms like `Added` and `Changed` will only be respected if they are in + /// the type signature. + /// + /// # See also + /// + /// - [`join_filtered`](Self::join_filtered) to join using a mutable borrow of the [`Query`]. + pub fn join_filtered_inner< + OtherD: QueryData, + OtherF: QueryFilter, + NewD: QueryData, + NewF: QueryFilter, + >( + self, + other: Query<'w, '_, OtherD, OtherF>, + ) -> QueryLens<'w, NewD, NewF> { let state = self .state .join_filtered::(self.world, other.state); @@ -1749,6 +2567,19 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Query<'w, 's, D, F> { } } +impl<'w, 's, D: QueryData, F: QueryFilter> IntoIterator for Query<'w, 's, D, F> { + type Item = D::Item<'w>; + type IntoIter = QueryIter<'w, 's, D, F>; + + fn into_iter(self) -> Self::IntoIter { + // SAFETY: + // - `self.world` has permission to access the required components. + // - We consume the query, so mutable queries cannot alias. + // Read-only queries are `Copy`, but may alias themselves. + unsafe { QueryIter::new(self.world, self.state, self.last_run, self.this_run) } + } +} + impl<'w, 's, D: QueryData, F: QueryFilter> IntoIterator for &'w Query<'_, 's, D, F> { type Item = ROQueryItem<'w, D>; type IntoIter = QueryIter<'w, 's, D::ReadOnly, F>; @@ -1768,52 +2599,6 @@ impl<'w, 's, D: QueryData, F: QueryFilter> IntoIterator for &'w mut Query<'_, 's } impl<'w, 's, D: ReadOnlyQueryData, F: QueryFilter> Query<'w, 's, D, F> { - /// Returns the query item for the given [`Entity`], with the actual "inner" world lifetime. - /// - /// In case of a nonexisting entity or mismatched component, a [`QueryEntityError`] is - /// returned instead. - /// - /// This can only return immutable data (mutable data will be cast to an immutable form). - /// See [`get_mut`](Self::get_mut) for queries that contain at least one mutable component. - /// - /// # Example - /// - /// Here, `get` is used to retrieve the exact query item of the entity specified by the - /// `SelectedCharacter` resource. - /// - /// ``` - /// # use bevy_ecs::prelude::*; - /// # - /// # #[derive(Resource)] - /// # struct SelectedCharacter { entity: Entity } - /// # #[derive(Component)] - /// # struct Character { name: String } - /// # - /// fn print_selected_character_name_system( - /// query: Query<&Character>, - /// selection: Res - /// ) - /// { - /// if let Ok(selected_character) = query.get(selection.entity) { - /// println!("{}", selected_character.name); - /// } - /// } - /// # bevy_ecs::system::assert_is_system(print_selected_character_name_system); - /// ``` - #[inline] - pub fn get_inner(&self, entity: Entity) -> Result, QueryEntityError> { - // SAFETY: system runs without conflicts with other systems. - // same-system queries have runtime borrow checks when they conflict - unsafe { - self.state.as_readonly().get_unchecked_manual( - self.world, - entity, - self.last_run, - self.this_run, - ) - } - } - /// Returns an [`Iterator`] over the query items, with the actual "inner" world lifetime. /// /// This can only return immutable data (mutable data will be cast to an immutable form). @@ -1839,13 +2624,7 @@ impl<'w, 's, D: ReadOnlyQueryData, F: QueryFilter> Query<'w, 's, D, F> { /// ``` #[inline] pub fn iter_inner(&self) -> QueryIter<'w, 's, D::ReadOnly, F> { - // SAFETY: system runs without conflicts with other systems. - // same-system queries have runtime borrow checks when they conflict - unsafe { - self.state - .as_readonly() - .iter_unchecked_manual(self.world, self.last_run, self.this_run) - } + (*self).into_iter() } } @@ -1861,7 +2640,21 @@ pub struct QueryLens<'w, Q: QueryData, F: QueryFilter = ()> { impl<'w, Q: QueryData, F: QueryFilter> QueryLens<'w, Q, F> { /// Create a [`Query`] from the underlying [`QueryState`]. - pub fn query(&mut self) -> Query<'w, '_, Q, F> { + pub fn query(&mut self) -> Query<'_, '_, Q, F> { + Query { + world: self.world, + state: &self.state, + last_run: self.last_run, + this_run: self.this_run, + } + } +} + +impl<'w, Q: ReadOnlyQueryData, F: QueryFilter> QueryLens<'w, Q, F> { + /// Create a [`Query`] from the underlying [`QueryState`]. + /// This returns results with the actual "inner" world lifetime, + /// so it may only be used with read-only queries to prevent mutable aliasing. + pub fn query_inner(&self) -> Query<'w, '_, Q, F> { Query { world: self.world, state: &self.state, @@ -1872,9 +2665,9 @@ impl<'w, Q: QueryData, F: QueryFilter> QueryLens<'w, Q, F> { } impl<'w, 's, Q: QueryData, F: QueryFilter> From<&'s mut QueryLens<'w, Q, F>> - for Query<'w, 's, Q, F> + for Query<'s, 's, Q, F> { - fn from(value: &'s mut QueryLens<'w, Q, F>) -> Query<'w, 's, Q, F> { + fn from(value: &'s mut QueryLens<'w, Q, F>) -> Query<'s, 's, Q, F> { value.query() } } @@ -1890,7 +2683,7 @@ impl<'w, 'q, Q: QueryData, F: QueryFilter> From<&'q mut Query<'w, '_, Q, F>> /// [System parameter] that provides access to single entity's components, much like [`Query::single`]/[`Query::single_mut`]. /// /// This [`SystemParam`](crate::system::SystemParam) fails validation if zero or more than one matching entity exists. -/// /// This will cause a panic, but can be configured to do nothing or warn once. +/// This will cause the system to be skipped, according to the rules laid out in [`SystemParamValidationError`](crate::system::SystemParamValidationError). /// /// Use [`Option>`] instead if zero or one matching entities can exist. /// @@ -1926,7 +2719,7 @@ impl<'w, D: QueryData, F: QueryFilter> Single<'w, D, F> { /// [System parameter] that works very much like [`Query`] except it always contains at least one matching entity. /// /// This [`SystemParam`](crate::system::SystemParam) fails validation if no matching entities exist. -/// /// This will cause a panic, but can be configured to do nothing or warn once. +/// This will cause the system to be skipped, according to the rules laid out in [`SystemParamValidationError`](crate::system::SystemParamValidationError). /// /// Much like [`Query::is_empty`] the worst case runtime will be `O(n)` where `n` is the number of *potential* matches. /// This can be notably expensive for queries that rely on non-archetypal filters such as [`Added`](crate::query::Added) or [`Changed`](crate::query::Changed) @@ -1957,3 +2750,51 @@ impl<'w, 's, D: QueryData, F: QueryFilter> Populated<'w, 's, D, F> { self.0 } } + +#[cfg(test)] +mod tests { + use crate::{prelude::*, query::QueryEntityError}; + use alloc::vec::Vec; + + #[test] + fn get_many_uniqueness() { + let mut world = World::new(); + + let entities: Vec = (0..10).map(|_| world.spawn_empty().id()).collect(); + + let mut query_state = world.query::(); + + // It's best to test get_many_mut_inner directly, as it is shared + // We don't care about aliased mutability for the read-only equivalent + + // SAFETY: Query does not access world data. + assert!(query_state + .query_mut(&mut world) + .get_many_mut_inner::<10>(entities.clone().try_into().unwrap()) + .is_ok()); + + assert_eq!( + query_state + .query_mut(&mut world) + .get_many_mut_inner([entities[0], entities[0]]) + .unwrap_err(), + QueryEntityError::AliasedMutability(entities[0]) + ); + + assert_eq!( + query_state + .query_mut(&mut world) + .get_many_mut_inner([entities[0], entities[1], entities[0]]) + .unwrap_err(), + QueryEntityError::AliasedMutability(entities[0]) + ); + + assert_eq!( + query_state + .query_mut(&mut world) + .get_many_mut_inner([entities[9], entities[9]]) + .unwrap_err(), + QueryEntityError::AliasedMutability(entities[9]) + ); + } +} diff --git a/crates/bevy_ecs/src/system/schedule_system.rs b/crates/bevy_ecs/src/system/schedule_system.rs index 042e69b675955..4ad990b47a499 100644 --- a/crates/bevy_ecs/src/system/schedule_system.rs +++ b/crates/bevy_ecs/src/system/schedule_system.rs @@ -1,183 +1,122 @@ use alloc::{borrow::Cow, vec::Vec}; -use core::any::TypeId; use crate::{ archetype::ArchetypeComponentId, component::{ComponentId, Tick}, - query::Access, - result::Result, - schedule::InternedSystemSet, + error::Result, + query::{Access, FilteredAccessSet}, system::{input::SystemIn, BoxedSystem, System}, world::{unsafe_world_cell::UnsafeWorldCell, DeferredWorld, World}, }; -/// A type which wraps and unifies the different sorts of systems that can be added to a schedule. -pub enum ScheduleSystem { - /// A system that does not return a result. - Infallible(BoxedSystem<(), ()>), - /// A system that does return a result. - Fallible(BoxedSystem<(), Result>), +use super::{IntoSystem, SystemParamValidationError}; + +/// A wrapper system to change a system that returns `()` to return `Ok(())` to make it into a [`ScheduleSystem`] +pub struct InfallibleSystemWrapper>(S); + +impl> InfallibleSystemWrapper { + /// Create a new `OkWrapperSystem` + pub fn new(system: S) -> Self { + Self(IntoSystem::into_system(system)) + } } -impl System for ScheduleSystem { +impl> System for InfallibleSystemWrapper { type In = (); type Out = Result; - #[inline(always)] + #[inline] fn name(&self) -> Cow<'static, str> { - match self { - ScheduleSystem::Infallible(inner_system) => inner_system.name(), - ScheduleSystem::Fallible(inner_system) => inner_system.name(), - } + self.0.name() } - #[inline(always)] - fn type_id(&self) -> TypeId { - match self { - ScheduleSystem::Infallible(inner_system) => inner_system.type_id(), - ScheduleSystem::Fallible(inner_system) => inner_system.type_id(), - } + #[inline] + fn component_access(&self) -> &Access { + self.0.component_access() } - #[inline(always)] - fn component_access(&self) -> &Access { - match self { - ScheduleSystem::Infallible(inner_system) => inner_system.component_access(), - ScheduleSystem::Fallible(inner_system) => inner_system.component_access(), - } + #[inline] + fn component_access_set(&self) -> &FilteredAccessSet { + self.0.component_access_set() } #[inline(always)] fn archetype_component_access(&self) -> &Access { - match self { - ScheduleSystem::Infallible(inner_system) => inner_system.archetype_component_access(), - ScheduleSystem::Fallible(inner_system) => inner_system.archetype_component_access(), - } + self.0.archetype_component_access() } - #[inline(always)] + #[inline] + fn is_send(&self) -> bool { + self.0.is_send() + } + + #[inline] fn is_exclusive(&self) -> bool { - match self { - ScheduleSystem::Infallible(inner_system) => inner_system.is_exclusive(), - ScheduleSystem::Fallible(inner_system) => inner_system.is_exclusive(), - } + self.0.is_exclusive() } - #[inline(always)] + #[inline] fn has_deferred(&self) -> bool { - match self { - ScheduleSystem::Infallible(inner_system) => inner_system.has_deferred(), - ScheduleSystem::Fallible(inner_system) => inner_system.has_deferred(), - } + self.0.has_deferred() } - #[inline(always)] + #[inline] unsafe fn run_unsafe( &mut self, input: SystemIn<'_, Self>, world: UnsafeWorldCell, ) -> Self::Out { - match self { - ScheduleSystem::Infallible(inner_system) => { - inner_system.run_unsafe(input, world); - Ok(()) - } - ScheduleSystem::Fallible(inner_system) => inner_system.run_unsafe(input, world), - } + self.0.run_unsafe(input, world); + Ok(()) } - #[inline(always)] - fn run(&mut self, input: SystemIn<'_, Self>, world: &mut World) -> Self::Out { - match self { - ScheduleSystem::Infallible(inner_system) => { - inner_system.run(input, world); - Ok(()) - } - ScheduleSystem::Fallible(inner_system) => inner_system.run(input, world), - } - } - - #[inline(always)] + #[inline] fn apply_deferred(&mut self, world: &mut World) { - match self { - ScheduleSystem::Infallible(inner_system) => inner_system.apply_deferred(world), - ScheduleSystem::Fallible(inner_system) => inner_system.apply_deferred(world), - } + self.0.apply_deferred(world); } - #[inline(always)] + #[inline] fn queue_deferred(&mut self, world: DeferredWorld) { - match self { - ScheduleSystem::Infallible(inner_system) => inner_system.queue_deferred(world), - ScheduleSystem::Fallible(inner_system) => inner_system.queue_deferred(world), - } + self.0.queue_deferred(world); } - #[inline(always)] - fn is_send(&self) -> bool { - match self { - ScheduleSystem::Infallible(inner_system) => inner_system.is_send(), - ScheduleSystem::Fallible(inner_system) => inner_system.is_send(), - } - } - - #[inline(always)] - unsafe fn validate_param_unsafe(&mut self, world: UnsafeWorldCell) -> bool { - match self { - ScheduleSystem::Infallible(inner_system) => inner_system.validate_param_unsafe(world), - ScheduleSystem::Fallible(inner_system) => inner_system.validate_param_unsafe(world), - } + #[inline] + unsafe fn validate_param_unsafe( + &mut self, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { + self.0.validate_param_unsafe(world) } - #[inline(always)] + #[inline] fn initialize(&mut self, world: &mut World) { - match self { - ScheduleSystem::Infallible(inner_system) => inner_system.initialize(world), - ScheduleSystem::Fallible(inner_system) => inner_system.initialize(world), - } + self.0.initialize(world); } - #[inline(always)] + #[inline] fn update_archetype_component_access(&mut self, world: UnsafeWorldCell) { - match self { - ScheduleSystem::Infallible(inner_system) => { - inner_system.update_archetype_component_access(world); - } - ScheduleSystem::Fallible(inner_system) => { - inner_system.update_archetype_component_access(world); - } - } + self.0.update_archetype_component_access(world); } - #[inline(always)] + #[inline] fn check_change_tick(&mut self, change_tick: Tick) { - match self { - ScheduleSystem::Infallible(inner_system) => inner_system.check_change_tick(change_tick), - ScheduleSystem::Fallible(inner_system) => inner_system.check_change_tick(change_tick), - } + self.0.check_change_tick(change_tick); } - #[inline(always)] - fn default_system_sets(&self) -> Vec { - match self { - ScheduleSystem::Infallible(inner_system) => inner_system.default_system_sets(), - ScheduleSystem::Fallible(inner_system) => inner_system.default_system_sets(), - } - } - - #[inline(always)] + #[inline] fn get_last_run(&self) -> Tick { - match self { - ScheduleSystem::Infallible(inner_system) => inner_system.get_last_run(), - ScheduleSystem::Fallible(inner_system) => inner_system.get_last_run(), - } + self.0.get_last_run() } - #[inline(always)] + #[inline] fn set_last_run(&mut self, last_run: Tick) { - match self { - ScheduleSystem::Infallible(inner_system) => inner_system.set_last_run(last_run), - ScheduleSystem::Fallible(inner_system) => inner_system.set_last_run(last_run), - } + self.0.set_last_run(last_run); + } + + fn default_system_sets(&self) -> Vec { + self.0.default_system_sets() } } + +/// Type alias for a `BoxedSystem` that a `Schedule` can store. +pub type ScheduleSystem = BoxedSystem<(), Result>; diff --git a/crates/bevy_ecs/src/system/system.rs b/crates/bevy_ecs/src/system/system.rs index e0aa69b660db1..18ec7f44cd4ff 100644 --- a/crates/bevy_ecs/src/system/system.rs +++ b/crates/bevy_ecs/src/system/system.rs @@ -1,3 +1,7 @@ +#![expect( + clippy::module_inception, + reason = "This instance of module inception is being discussed; see #17353." +)] use core::fmt::Debug; use log::warn; use thiserror::Error; @@ -5,7 +9,7 @@ use thiserror::Error; use crate::{ archetype::ArchetypeComponentId, component::{ComponentId, Tick}, - query::Access, + query::{Access, FilteredAccessSet}, schedule::InternedSystemSet, system::{input::SystemInput, SystemIn}, world::{unsafe_world_cell::UnsafeWorldCell, DeferredWorld, World}, @@ -14,7 +18,7 @@ use crate::{ use alloc::{borrow::Cow, boxed::Box, vec::Vec}; use core::any::TypeId; -use super::IntoSystem; +use super::{IntoSystem, SystemParamValidationError}; /// An ECS system that can be added to a [`Schedule`](crate::schedule::Schedule) /// @@ -26,7 +30,7 @@ use super::IntoSystem; /// /// Systems are executed in parallel, in opportunistic order; data access is managed automatically. /// It's possible to specify explicit execution order between specific systems, -/// see [`IntoSystemConfigs`](crate::schedule::IntoSystemConfigs). +/// see [`IntoScheduleConfigs`](crate::schedule::IntoScheduleConfigs). #[diagnostic::on_unimplemented(message = "`{Self}` is not a system", label = "invalid system")] pub trait System: Send + Sync + 'static { /// The system's input. @@ -40,8 +44,13 @@ pub trait System: Send + Sync + 'static { fn type_id(&self) -> TypeId { TypeId::of::() } + /// Returns the system's component [`Access`]. fn component_access(&self) -> &Access; + + /// Returns the system's component [`FilteredAccessSet`]. + fn component_access_set(&self) -> &FilteredAccessSet; + /// Returns the system's archetype component [`Access`]. fn archetype_component_access(&self) -> &Access; /// Returns true if the system is [`Send`]. @@ -65,6 +74,8 @@ pub trait System: Send + Sync + 'static { /// - The caller must ensure that [`world`](UnsafeWorldCell) has permission to access any world data /// registered in `archetype_component_access`. There must be no conflicting /// simultaneous accesses while the system is running. + /// - If [`System::is_exclusive`] returns `true`, then it must be valid to call + /// [`UnsafeWorldCell::world_mut`] on `world`. /// - The method [`System::update_archetype_component_access`] must be called at some /// point before this one, with the same exact [`World`]. If [`System::update_archetype_component_access`] /// panics (or otherwise does not return for any reason), this method must not be called. @@ -79,14 +90,25 @@ pub trait System: Send + Sync + 'static { /// /// [`run_readonly`]: ReadOnlySystem::run_readonly fn run(&mut self, input: SystemIn<'_, Self>, world: &mut World) -> Self::Out { + let ret = self.run_without_applying_deferred(input, world); + self.apply_deferred(world); + ret + } + + /// Runs the system with the given input in the world. + /// + /// [`run_readonly`]: ReadOnlySystem::run_readonly + fn run_without_applying_deferred( + &mut self, + input: SystemIn<'_, Self>, + world: &mut World, + ) -> Self::Out { let world_cell = world.as_unsafe_world_cell(); self.update_archetype_component_access(world_cell); // SAFETY: // - We have exclusive access to the entire world. // - `update_archetype_component_access` has been called. - let ret = unsafe { self.run_unsafe(input, world_cell) }; - self.apply_deferred(world); - ret + unsafe { self.run_unsafe(input, world_cell) } } /// Applies any [`Deferred`](crate::system::Deferred) system parameters (or other system buffers) of this system to the world. @@ -117,11 +139,14 @@ pub trait System: Send + Sync + 'static { /// - The method [`System::update_archetype_component_access`] must be called at some /// point before this one, with the same exact [`World`]. If [`System::update_archetype_component_access`] /// panics (or otherwise does not return for any reason), this method must not be called. - unsafe fn validate_param_unsafe(&mut self, world: UnsafeWorldCell) -> bool; + unsafe fn validate_param_unsafe( + &mut self, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError>; /// Safe version of [`System::validate_param_unsafe`]. /// that runs on exclusive, single-threaded `world` pointer. - fn validate_param(&mut self, world: &World) -> bool { + fn validate_param(&mut self, world: &World) -> Result<(), SystemParamValidationError> { let world_cell = world.as_unsafe_world_cell_readonly(); self.update_archetype_component_access(world_cell); // SAFETY: @@ -135,7 +160,7 @@ pub trait System: Send + Sync + 'static { /// Update the system's archetype component [`Access`]. /// - /// ## Note for implementors + /// ## Note for implementers /// `world` may only be used to access metadata. This can be done in safe code /// via functions such as [`UnsafeWorldCell::archetypes`]. fn update_archetype_component_access(&mut self, world: UnsafeWorldCell); @@ -348,37 +373,35 @@ impl RunSystemOnce for &mut World { { let mut system: T::System = IntoSystem::into_system(system); system.initialize(self); - if system.validate_param(self) { - Ok(system.run(input, self)) - } else { - Err(RunSystemError::InvalidParams(system.name())) - } + system + .validate_param(self) + .map_err(|err| RunSystemError::InvalidParams { + system: system.name(), + err, + })?; + Ok(system.run(input, self)) } } /// Running system failed. -#[derive(Error)] +#[derive(Error, Debug)] pub enum RunSystemError { /// System could not be run due to parameters that failed validation. - /// - /// This can occur because the data required by the system was not present in the world. - #[error("The data required by the system {0:?} was not found in the world and the system did not run due to failed parameter validation.")] - InvalidParams(Cow<'static, str>), -} - -impl Debug for RunSystemError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - Self::InvalidParams(arg0) => f.debug_tuple("InvalidParams").field(arg0).finish(), - } - } + /// This should not be considered an error if [`field@SystemParamValidationError::skipped`] is `true`. + #[error("System {system} did not run due to failed parameter validation: {err}")] + InvalidParams { + /// The identifier of the system that was run. + system: Cow<'static, str>, + /// The returned parameter validation error. + err: SystemParamValidationError, + }, } #[cfg(test)] mod tests { use super::*; - use crate as bevy_ecs; use crate::prelude::*; + use alloc::string::ToString; #[test] fn run_system_once() { @@ -400,7 +423,6 @@ mod tests { #[derive(Resource, Default, PartialEq, Debug)] struct Counter(u8); - #[allow(dead_code)] fn count_up(mut counter: ResMut) { counter.0 += 1; } @@ -416,7 +438,6 @@ mod tests { assert_eq!(*world.resource::(), Counter(2)); } - #[allow(dead_code)] fn spawn_entity(mut commands: Commands) { commands.spawn_empty(); } @@ -450,8 +471,10 @@ mod tests { let mut world = World::default(); // This fails because `T` has not been added to the world yet. - let result = world.run_system_once(system.param_warn_once()); + let result = world.run_system_once(system); - assert!(matches!(result, Err(RunSystemError::InvalidParams(_)))); + assert!(matches!(result, Err(RunSystemError::InvalidParams { .. }))); + let expected = "System bevy_ecs::system::system::tests::run_system_once_invalid_params::system did not run due to failed parameter validation: Parameter `Res` failed validation: Resource does not exist"; + assert_eq!(expected, result.unwrap_err().to_string()); } } diff --git a/crates/bevy_ecs/src/system/system_name.rs b/crates/bevy_ecs/src/system/system_name.rs index 3ecc901baae2b..b28ddd89f6658 100644 --- a/crates/bevy_ecs/src/system/system_name.rs +++ b/crates/bevy_ecs/src/system/system_name.rs @@ -94,6 +94,7 @@ mod tests { system::{IntoSystem, RunSystemOnce, SystemName}, world::World, }; + use alloc::{borrow::ToOwned, string::String}; #[test] fn test_system_name_regular_param() { diff --git a/crates/bevy_ecs/src/system/system_param.rs b/crates/bevy_ecs/src/system/system_param.rs index 5af22674afa7c..99d4c72df66e2 100644 --- a/crates/bevy_ecs/src/system/system_param.rs +++ b/crates/bevy_ecs/src/system/system_param.rs @@ -2,13 +2,14 @@ pub use crate::change_detection::{NonSendMut, Res, ResMut}; use crate::{ archetype::{Archetype, Archetypes}, bundle::Bundles, - change_detection::{Ticks, TicksMut}, + change_detection::{MaybeLocation, Ticks, TicksMut}, component::{ComponentId, ComponentTicks, Components, Tick}, entity::Entities, query::{ Access, FilteredAccess, FilteredAccessSet, QueryData, QueryFilter, QuerySingleError, QueryState, ReadOnlyQueryData, }, + resource::Resource, storage::ResourceData, system::{Query, Single, SystemMeta}, world::{ @@ -16,19 +17,23 @@ use crate::{ FromWorld, World, }, }; -use alloc::{borrow::ToOwned, boxed::Box, vec::Vec}; -pub use bevy_ecs_macros::{Resource, SystemParam}; +use alloc::{ + borrow::{Cow, ToOwned}, + boxed::Box, + vec::Vec, +}; +pub use bevy_ecs_macros::SystemParam; use bevy_ptr::UnsafeCellDeref; use bevy_utils::synccell::SyncCell; -#[cfg(feature = "track_change_detection")] -use core::panic::Location; use core::{ any::Any, - fmt::Debug, + fmt::{Debug, Display}, marker::PhantomData, ops::{Deref, DerefMut}, + panic::Location, }; use disqualified::ShortName; +use thiserror::Error; use super::Populated; use variadics_please::{all_tuples, all_tuples_enumerated}; @@ -126,6 +131,29 @@ use variadics_please::{all_tuples, all_tuples_enumerated}; /// This will most commonly occur when working with `SystemParam`s generically, as the requirement /// has not been proven to the compiler. /// +/// ## Custom Validation Messages +/// +/// When using the derive macro, any [`SystemParamValidationError`]s will be propagated from the sub-parameters. +/// If you want to override the error message, add a `#[system_param(validation_message = "New message")]` attribute to the parameter. +/// +/// ``` +/// # use bevy_ecs::prelude::*; +/// # #[derive(Resource)] +/// # struct SomeResource; +/// # use bevy_ecs::system::SystemParam; +/// # +/// #[derive(SystemParam)] +/// struct MyParam<'w> { +/// #[system_param(validation_message = "Custom Message")] +/// foo: Res<'w, SomeResource>, +/// } +/// +/// let mut world = World::new(); +/// let err = world.run_system_cached(|param: MyParam| {}).unwrap_err(); +/// let expected = "Parameter `MyParam::foo` failed validation: Custom Message"; +/// assert!(err.to_string().ends_with(expected)); +/// ``` +/// /// ## Builders /// /// If you want to use a [`SystemParamBuilder`](crate::system::SystemParamBuilder) with a derived [`SystemParam`] implementation, @@ -201,7 +229,10 @@ pub unsafe trait SystemParam: Sized { /// # Safety /// `archetype` must be from the [`World`] used to initialize `state` in [`SystemParam::init_state`]. #[inline] - #[allow(unused_variables)] + #[expect( + unused_variables, + reason = "The parameters here are intentionally unused by the default implementation; however, putting underscores here will result in the underscores being copied by rust-analyzer's tab completion." + )] unsafe fn new_archetype( state: &mut Self::State, archetype: &Archetype, @@ -214,16 +245,26 @@ pub unsafe trait SystemParam: Sized { /// /// [`Commands`]: crate::prelude::Commands #[inline] - #[allow(unused_variables)] + #[expect( + unused_variables, + reason = "The parameters here are intentionally unused by the default implementation; however, putting underscores here will result in the underscores being copied by rust-analyzer's tab completion." + )] fn apply(state: &mut Self::State, system_meta: &SystemMeta, world: &mut World) {} /// Queues any deferred mutations to be applied at the next [`ApplyDeferred`](crate::prelude::ApplyDeferred). #[inline] - #[allow(unused_variables)] + #[expect( + unused_variables, + reason = "The parameters here are intentionally unused by the default implementation; however, putting underscores here will result in the underscores being copied by rust-analyzer's tab completion." + )] fn queue(state: &mut Self::State, system_meta: &SystemMeta, world: DeferredWorld) {} /// Validates that the param can be acquired by the [`get_param`](SystemParam::get_param). - /// Built-in executors use this to prevent systems with invalid params from running. + /// + /// Built-in executors use this to prevent systems with invalid params from running, + /// and any failures here will be bubbled up to the default error handler defined in [`bevy_ecs::error`], + /// with a value of type [`SystemParamValidationError`]. + /// /// For nested [`SystemParam`]s validation will fail if any /// delegated validation fails. /// @@ -243,30 +284,35 @@ pub unsafe trait SystemParam: Sized { /// world mutations inbetween. Otherwise, while it won't lead to any undefined behavior, /// the validity of the param may change. /// + /// [`System::validate_param`](super::system::System::validate_param), + /// calls this method for each supplied system param. + /// /// # Safety /// /// - The passed [`UnsafeWorldCell`] must have read-only access to world data /// registered in [`init_state`](SystemParam::init_state). /// - `world` must be the same [`World`] that was used to initialize [`state`](SystemParam::init_state). /// - All `world`'s archetypes have been processed by [`new_archetype`](SystemParam::new_archetype). + #[expect( + unused_variables, + reason = "The parameters here are intentionally unused by the default implementation; however, putting underscores here will result in the underscores being copied by rust-analyzer's tab completion." + )] unsafe fn validate_param( - _state: &Self::State, - _system_meta: &SystemMeta, - _world: UnsafeWorldCell, - ) -> bool { - // By default we allow panics in [`SystemParam::get_param`] and return `true`. - // Preventing panics is an optional feature. - true + state: &Self::State, + system_meta: &SystemMeta, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { + Ok(()) } /// Creates a parameter to be passed into a [`SystemParamFunction`](super::SystemParamFunction). /// /// # Safety /// - /// - The passed [`UnsafeWorldCell`] must have access to any world data - /// registered in [`init_state`](SystemParam::init_state). + /// - The passed [`UnsafeWorldCell`] must have access to any world data registered + /// in [`init_state`](SystemParam::init_state). /// - `world` must be the same [`World`] that was used to initialize [`state`](SystemParam::init_state). - /// - all `world`'s archetypes have been processed by [`new_archetype`](SystemParam::new_archetype). + /// - All `world`'s archetypes have been processed by [`new_archetype`](SystemParam::new_archetype). unsafe fn get_param<'world, 'state>( state: &'state mut Self::State, system_meta: &SystemMeta, @@ -320,7 +366,8 @@ unsafe impl SystemParam for Qu // SAFETY: We have registered all of the query's world accesses, // so the caller ensures that `world` has permission to access any // world data that the query needs. - unsafe { Query::new(world, state, system_meta.last_run, change_tick) } + // The caller ensures the world matches the one used in init_state. + unsafe { state.query_unchecked_manual_with_ticks(world, system_meta.last_run, change_tick) } } } @@ -388,12 +435,14 @@ unsafe impl<'a, D: QueryData + 'static, F: QueryFilter + 'static> SystemParam fo world: UnsafeWorldCell<'w>, change_tick: Tick, ) -> Self::Item<'w, 's> { - state.validate_world(world.id()); // SAFETY: State ensures that the components it accesses are not accessible somewhere elsewhere. - let result = - unsafe { state.get_single_unchecked_manual(world, system_meta.last_run, change_tick) }; - let single = - result.expect("The query was expected to contain exactly one matching entity."); + // The caller ensures the world matches the one used in init_state. + let query = unsafe { + state.query_unchecked_manual_with_ticks(world, system_meta.last_run, change_tick) + }; + let single = query + .single_inner() + .expect("The query was expected to contain exactly one matching entity."); Single { item: single, _filter: PhantomData, @@ -405,22 +454,26 @@ unsafe impl<'a, D: QueryData + 'static, F: QueryFilter + 'static> SystemParam fo state: &Self::State, system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { - state.validate_world(world.id()); + ) -> Result<(), SystemParamValidationError> { // SAFETY: State ensures that the components it accesses are not mutably accessible elsewhere // and the query is read only. - let result = unsafe { - state.as_readonly().get_single_unchecked_manual( + // The caller ensures the world matches the one used in init_state. + let query = unsafe { + state.query_unchecked_manual_with_ticks( world, system_meta.last_run, world.change_tick(), ) }; - let is_valid = result.is_ok(); - if !is_valid { - system_meta.try_warn_param::(); + match query.single_inner() { + Ok(_) => Ok(()), + Err(QuerySingleError::NoEntities(_)) => Err( + SystemParamValidationError::skipped::("No matching entities"), + ), + Err(QuerySingleError::MultipleEntities(_)) => Err( + SystemParamValidationError::skipped::("Multiple matching entities"), + ), } - is_valid } } @@ -454,9 +507,11 @@ unsafe impl<'a, D: QueryData + 'static, F: QueryFilter + 'static> SystemParam ) -> Self::Item<'w, 's> { state.validate_world(world.id()); // SAFETY: State ensures that the components it accesses are not accessible elsewhere. - let result = - unsafe { state.get_single_unchecked_manual(world, system_meta.last_run, change_tick) }; - match result { + // The caller ensures the world matches the one used in init_state. + let query = unsafe { + state.query_unchecked_manual_with_ticks(world, system_meta.last_run, change_tick) + }; + match query.single_inner() { Ok(single) => Some(Single { item: single, _filter: PhantomData, @@ -471,22 +526,23 @@ unsafe impl<'a, D: QueryData + 'static, F: QueryFilter + 'static> SystemParam state: &Self::State, system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { - state.validate_world(world.id()); + ) -> Result<(), SystemParamValidationError> { // SAFETY: State ensures that the components it accesses are not mutably accessible elsewhere // and the query is read only. - let result = unsafe { - state.as_readonly().get_single_unchecked_manual( + // The caller ensures the world matches the one used in init_state. + let query = unsafe { + state.query_unchecked_manual_with_ticks( world, system_meta.last_run, world.change_tick(), ) }; - let is_valid = !matches!(result, Err(QuerySingleError::MultipleEntities(_))); - if !is_valid { - system_meta.try_warn_param::(); + match query.single_inner() { + Ok(_) | Err(QuerySingleError::NoEntities(_)) => Ok(()), + Err(QuerySingleError::MultipleEntities(_)) => Err( + SystemParamValidationError::skipped::("Multiple matching entities"), + ), } - is_valid } } @@ -540,13 +596,23 @@ unsafe impl SystemParam state: &Self::State, system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { - state.validate_world(world.id()); + ) -> Result<(), SystemParamValidationError> { // SAFETY: // - We have read-only access to the components accessed by query. - // - The world has been validated. - !unsafe { - state.is_empty_unsafe_world_cell(world, system_meta.last_run, world.change_tick()) + // - The caller ensures the world matches the one used in init_state. + let query = unsafe { + state.query_unchecked_manual_with_ticks( + world, + system_meta.last_run, + world.change_tick(), + ) + }; + if query.is_empty() { + Err(SystemParamValidationError::skipped::( + "No matching entities", + )) + } else { + Ok(()) } } } @@ -645,12 +711,16 @@ unsafe impl<'w, 's, D: ReadOnlyQueryData + 'static, F: QueryFilter + 'static> Re /// # } /// fn event_system( /// mut set: ParamSet<( -/// // `EventReader`s and `EventWriter`s conflict with each other, -/// // since they both access the event queue resource for `MyEvent`. +/// // PROBLEM: `EventReader` and `EventWriter` cannot be used together normally, +/// // because they both need access to the same event queue. +/// // SOLUTION: `ParamSet` allows these conflicting parameters to be used safely +/// // by ensuring only one is accessed at a time. /// EventReader, /// EventWriter, -/// // `&World` reads the entire world, so a `ParamSet` is the only way -/// // that it can be used in the same system as any mutable accesses. +/// // PROBLEM: `&World` needs read access to everything, which conflicts with +/// // any mutable access in the same system. +/// // SOLUTION: `ParamSet` ensures `&World` is only accessed when we're not +/// // using the other mutable parameters. /// &World, /// )>, /// ) { @@ -658,7 +728,7 @@ unsafe impl<'w, 's, D: ReadOnlyQueryData + 'static, F: QueryFilter + 'static> Re /// // ... /// # let _event = event; /// } -/// set.p1().send(MyEvent::new()); +/// set.p1().write(MyEvent::new()); /// /// let entities = set.p2().entities(); /// // ... @@ -687,8 +757,14 @@ macro_rules! impl_param_set { type State = ($($param::State,)*); type Item<'w, 's> = ParamSet<'w, 's, ($($param,)*)>; - // Note: We allow non snake case so the compiler don't complain about the creation of non_snake_case variables - #[allow(non_snake_case)] + #[expect( + clippy::allow_attributes, + reason = "This is inside a macro meant for tuples; as such, `non_snake_case` won't always lint." + )] + #[allow( + non_snake_case, + reason = "Certain variable names are provided by the caller, not by us." + )] fn init_state(world: &mut World, system_meta: &mut SystemMeta) -> Self::State { $( // Pretend to add each param to the system alone, see if it conflicts @@ -732,7 +808,7 @@ macro_rules! impl_param_set { state: &'s Self::State, system_meta: &SystemMeta, world: UnsafeWorldCell<'w>, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { <($($param,)*) as SystemParam>::validate_param(state, system_meta, world) } @@ -774,73 +850,6 @@ macro_rules! impl_param_set { all_tuples_enumerated!(impl_param_set, 1, 8, P, m, p); -/// A type that can be inserted into a [`World`] as a singleton. -/// -/// You can access resource data in systems using the [`Res`] and [`ResMut`] system parameters -/// -/// Only one resource of each type can be stored in a [`World`] at any given time. -/// -/// # Examples -/// -/// ``` -/// # let mut world = World::default(); -/// # let mut schedule = Schedule::default(); -/// # use bevy_ecs::prelude::*; -/// #[derive(Resource)] -/// struct MyResource { value: u32 } -/// -/// world.insert_resource(MyResource { value: 42 }); -/// -/// fn read_resource_system(resource: Res) { -/// assert_eq!(resource.value, 42); -/// } -/// -/// fn write_resource_system(mut resource: ResMut) { -/// assert_eq!(resource.value, 42); -/// resource.value = 0; -/// assert_eq!(resource.value, 0); -/// } -/// # schedule.add_systems((read_resource_system, write_resource_system).chain()); -/// # schedule.run(&mut world); -/// ``` -/// -/// # `!Sync` Resources -/// A `!Sync` type cannot implement `Resource`. However, it is possible to wrap a `Send` but not `Sync` -/// type in [`SyncCell`] or the currently unstable [`Exclusive`] to make it `Sync`. This forces only -/// having mutable access (`&mut T` only, never `&T`), but makes it safe to reference across multiple -/// threads. -/// -/// This will fail to compile since `RefCell` is `!Sync`. -/// ```compile_fail -/// # use std::cell::RefCell; -/// # use bevy_ecs::system::Resource; -/// -/// #[derive(Resource)] -/// struct NotSync { -/// counter: RefCell, -/// } -/// ``` -/// -/// This will compile since the `RefCell` is wrapped with `SyncCell`. -/// ``` -/// # use std::cell::RefCell; -/// # use bevy_ecs::system::Resource; -/// use bevy_utils::synccell::SyncCell; -/// -/// #[derive(Resource)] -/// struct ActuallySync { -/// counter: SyncCell>, -/// } -/// ``` -/// -/// [`Exclusive`]: https://doc.rust-lang.org/nightly/std/sync/struct.Exclusive.html -#[diagnostic::on_unimplemented( - message = "`{Self}` is not a `Resource`", - label = "invalid `Resource`", - note = "consider annotating `{Self}` with `#[derive(Resource)]`" -)] -pub trait Resource: Send + Sync + 'static {} - // SAFETY: Res only reads a single World resource unsafe impl<'a, T: Resource> ReadOnlySystemParam for Res<'a, T> {} @@ -851,7 +860,7 @@ unsafe impl<'a, T: Resource> SystemParam for Res<'a, T> { type Item<'w, 's> = Res<'w, T>; fn init_state(world: &mut World, system_meta: &mut SystemMeta) -> Self::State { - let component_id = world.components.register_resource::(); + let component_id = world.components_registrator().register_resource::(); let archetype_component_id = world.initialize_resource_internal(component_id).id(); let combined_access = system_meta.component_access_set.combined_access(); @@ -875,18 +884,21 @@ unsafe impl<'a, T: Resource> SystemParam for Res<'a, T> { #[inline] unsafe fn validate_param( &component_id: &Self::State, - system_meta: &SystemMeta, + _system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { // SAFETY: Read-only access to resource metadata. - let is_valid = unsafe { world.storages() } + if unsafe { world.storages() } .resources .get(component_id) - .is_some_and(ResourceData::is_present); - if !is_valid { - system_meta.try_warn_param::(); + .is_some_and(ResourceData::is_present) + { + Ok(()) + } else { + Err(SystemParamValidationError::invalid::( + "Resource does not exist", + )) } - is_valid } #[inline] @@ -896,7 +908,7 @@ unsafe impl<'a, T: Resource> SystemParam for Res<'a, T> { world: UnsafeWorldCell<'w>, change_tick: Tick, ) -> Self::Item<'w, 's> { - let (ptr, ticks, _caller) = + let (ptr, ticks, caller) = world .get_resource_with_ticks(component_id) .unwrap_or_else(|| { @@ -914,8 +926,7 @@ unsafe impl<'a, T: Resource> SystemParam for Res<'a, T> { last_run: system_meta.last_run, this_run: change_tick, }, - #[cfg(feature = "track_change_detection")] - changed_by: _caller.deref(), + changed_by: caller.map(|caller| caller.deref()), } } } @@ -941,7 +952,7 @@ unsafe impl<'a, T: Resource> SystemParam for Option> { ) -> Self::Item<'w, 's> { world .get_resource_with_ticks(component_id) - .map(|(ptr, ticks, _caller)| Res { + .map(|(ptr, ticks, caller)| Res { value: ptr.deref(), ticks: Ticks { added: ticks.added.deref(), @@ -949,8 +960,7 @@ unsafe impl<'a, T: Resource> SystemParam for Option> { last_run: system_meta.last_run, this_run: change_tick, }, - #[cfg(feature = "track_change_detection")] - changed_by: _caller.deref(), + changed_by: caller.map(|caller| caller.deref()), }) } } @@ -962,7 +972,7 @@ unsafe impl<'a, T: Resource> SystemParam for ResMut<'a, T> { type Item<'w, 's> = ResMut<'w, T>; fn init_state(world: &mut World, system_meta: &mut SystemMeta) -> Self::State { - let component_id = world.components.register_resource::(); + let component_id = world.components_registrator().register_resource::(); let archetype_component_id = world.initialize_resource_internal(component_id).id(); let combined_access = system_meta.component_access_set.combined_access(); @@ -989,18 +999,21 @@ unsafe impl<'a, T: Resource> SystemParam for ResMut<'a, T> { #[inline] unsafe fn validate_param( &component_id: &Self::State, - system_meta: &SystemMeta, + _system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { // SAFETY: Read-only access to resource metadata. - let is_valid = unsafe { world.storages() } + if unsafe { world.storages() } .resources .get(component_id) - .is_some_and(ResourceData::is_present); - if !is_valid { - system_meta.try_warn_param::(); + .is_some_and(ResourceData::is_present) + { + Ok(()) + } else { + Err(SystemParamValidationError::invalid::( + "Resource does not exist", + )) } - is_valid } #[inline] @@ -1027,7 +1040,6 @@ unsafe impl<'a, T: Resource> SystemParam for ResMut<'a, T> { last_run: system_meta.last_run, this_run: change_tick, }, - #[cfg(feature = "track_change_detection")] changed_by: value.changed_by, } } @@ -1059,7 +1071,6 @@ unsafe impl<'a, T: Resource> SystemParam for Option> { last_run: system_meta.last_run, this_run: change_tick, }, - #[cfg(feature = "track_change_detection")] changed_by: value.changed_by, }) } @@ -1115,9 +1126,16 @@ unsafe impl<'w> SystemParam for DeferredWorld<'w> { type Item<'world, 'state> = DeferredWorld<'world>; fn init_state(_world: &mut World, system_meta: &mut SystemMeta) -> Self::State { - system_meta.component_access_set.read_all(); + assert!( + !system_meta + .component_access_set + .combined_access() + .has_any_read(), + "DeferredWorld in system {} conflicts with a previous access.", + system_meta.name, + ); system_meta.component_access_set.write_all(); - system_meta.set_has_deferred(); + system_meta.archetype_component_access.write_all(); } unsafe fn get_param<'world, 'state>( @@ -1161,6 +1179,25 @@ unsafe impl<'w> SystemParam for DeferredWorld<'w> { /// assert_eq!(read_system.run((), world), 0); /// ``` /// +/// A simple way to set a different default value for a local is by wrapping the value with an Option. +/// +/// ``` +/// # use bevy_ecs::prelude::*; +/// # let world = &mut World::default(); +/// fn counter_from_10(mut count: Local>) -> usize { +/// let count = count.get_or_insert(10); +/// *count += 1; +/// *count +/// } +/// let mut counter_system = IntoSystem::into_system(counter_from_10); +/// counter_system.initialize(world); +/// +/// // Counter is initialized at 10, and increases to 11 on first run. +/// assert_eq!(counter_system.run((), world), 11); +/// // Counter is only increased by 1 on subsequent runs. +/// assert_eq!(counter_system.run((), world), 12); +/// ``` +/// /// N.B. A [`Local`]s value cannot be read or written to outside of the containing system. /// To add configuration to a system, convert a capturing closure into the system instead: /// @@ -1432,6 +1469,33 @@ unsafe impl SystemParam for Deferred<'_, T> { } } +/// A dummy type that is [`!Send`](Send), to force systems to run on the main thread. +pub struct NonSendMarker; + +// SAFETY: No world access. +unsafe impl SystemParam for NonSendMarker { + type State = (); + type Item<'w, 's> = Self; + + #[inline] + fn init_state(_world: &mut World, system_meta: &mut SystemMeta) -> Self::State { + system_meta.set_non_send(); + } + + #[inline] + unsafe fn get_param<'world, 'state>( + _state: &'state mut Self::State, + _system_meta: &SystemMeta, + _world: UnsafeWorldCell<'world>, + _change_tick: Tick, + ) -> Self::Item<'world, 'state> { + Self + } +} + +// SAFETY: Does not read any world state +unsafe impl ReadOnlySystemParam for NonSendMarker {} + /// Shared borrow of a non-[`Send`] resource. /// /// Only `Send` resources may be accessed with the [`Res`] [`SystemParam`]. In case that the @@ -1440,7 +1504,7 @@ unsafe impl SystemParam for Deferred<'_, T> { /// over to another thread. /// /// This [`SystemParam`] fails validation if non-send resource doesn't exist. -/// /// This will cause a panic, but can be configured to do nothing or warn once. +/// This will cause a panic, but can be configured to do nothing or warn once. /// /// Use [`Option>`] instead if the resource might not always exist. pub struct NonSend<'w, T: 'static> { @@ -1448,8 +1512,7 @@ pub struct NonSend<'w, T: 'static> { ticks: ComponentTicks, last_run: Tick, this_run: Tick, - #[cfg(feature = "track_change_detection")] - changed_by: &'static Location<'static>, + changed_by: MaybeLocation<&'w &'static Location<'static>>, } // SAFETY: Only reads a single World non-send resource @@ -1476,9 +1539,8 @@ impl<'w, T: 'static> NonSend<'w, T> { } /// The location that last caused this to change. - #[cfg(feature = "track_change_detection")] - pub fn changed_by(&self) -> &'static Location<'static> { - self.changed_by + pub fn changed_by(&self) -> MaybeLocation { + self.changed_by.copied() } } @@ -1499,8 +1561,7 @@ impl<'a, T> From> for NonSend<'a, T> { }, this_run: nsm.ticks.this_run, last_run: nsm.ticks.last_run, - #[cfg(feature = "track_change_detection")] - changed_by: nsm.changed_by, + changed_by: nsm.changed_by.map(|changed_by| &*changed_by), } } } @@ -1514,7 +1575,7 @@ unsafe impl<'a, T: 'static> SystemParam for NonSend<'a, T> { fn init_state(world: &mut World, system_meta: &mut SystemMeta) -> Self::State { system_meta.set_non_send(); - let component_id = world.components.register_non_send::(); + let component_id = world.components_registrator().register_non_send::(); let archetype_component_id = world.initialize_non_send_internal(component_id).id(); let combined_access = system_meta.component_access_set.combined_access(); @@ -1538,18 +1599,21 @@ unsafe impl<'a, T: 'static> SystemParam for NonSend<'a, T> { #[inline] unsafe fn validate_param( &component_id: &Self::State, - system_meta: &SystemMeta, + _system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { // SAFETY: Read-only access to resource metadata. - let is_valid = unsafe { world.storages() } + if unsafe { world.storages() } .non_send_resources .get(component_id) - .is_some_and(ResourceData::is_present); - if !is_valid { - system_meta.try_warn_param::(); + .is_some_and(ResourceData::is_present) + { + Ok(()) + } else { + Err(SystemParamValidationError::invalid::( + "Non-send resource does not exist", + )) } - is_valid } #[inline] @@ -1559,7 +1623,7 @@ unsafe impl<'a, T: 'static> SystemParam for NonSend<'a, T> { world: UnsafeWorldCell<'w>, change_tick: Tick, ) -> Self::Item<'w, 's> { - let (ptr, ticks, _caller) = + let (ptr, ticks, caller) = world .get_non_send_with_ticks(component_id) .unwrap_or_else(|| { @@ -1575,8 +1639,7 @@ unsafe impl<'a, T: 'static> SystemParam for NonSend<'a, T> { ticks: ticks.read(), last_run: system_meta.last_run, this_run: change_tick, - #[cfg(feature = "track_change_detection")] - changed_by: _caller.deref(), + changed_by: caller.map(|caller| caller.deref()), } } } @@ -1602,13 +1665,12 @@ unsafe impl SystemParam for Option> { ) -> Self::Item<'w, 's> { world .get_non_send_with_ticks(component_id) - .map(|(ptr, ticks, _caller)| NonSend { + .map(|(ptr, ticks, caller)| NonSend { value: ptr.deref(), ticks: ticks.read(), last_run: system_meta.last_run, this_run: change_tick, - #[cfg(feature = "track_change_detection")] - changed_by: _caller.deref(), + changed_by: caller.map(|caller| caller.deref()), }) } } @@ -1622,7 +1684,7 @@ unsafe impl<'a, T: 'static> SystemParam for NonSendMut<'a, T> { fn init_state(world: &mut World, system_meta: &mut SystemMeta) -> Self::State { system_meta.set_non_send(); - let component_id = world.components.register_non_send::(); + let component_id = world.components_registrator().register_non_send::(); let archetype_component_id = world.initialize_non_send_internal(component_id).id(); let combined_access = system_meta.component_access_set.combined_access(); @@ -1649,18 +1711,21 @@ unsafe impl<'a, T: 'static> SystemParam for NonSendMut<'a, T> { #[inline] unsafe fn validate_param( &component_id: &Self::State, - system_meta: &SystemMeta, + _system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { // SAFETY: Read-only access to resource metadata. - let is_valid = unsafe { world.storages() } + if unsafe { world.storages() } .non_send_resources .get(component_id) - .is_some_and(ResourceData::is_present); - if !is_valid { - system_meta.try_warn_param::(); + .is_some_and(ResourceData::is_present) + { + Ok(()) + } else { + Err(SystemParamValidationError::invalid::( + "Non-send resource does not exist", + )) } - is_valid } #[inline] @@ -1670,7 +1735,7 @@ unsafe impl<'a, T: 'static> SystemParam for NonSendMut<'a, T> { world: UnsafeWorldCell<'w>, change_tick: Tick, ) -> Self::Item<'w, 's> { - let (ptr, ticks, _caller) = + let (ptr, ticks, caller) = world .get_non_send_with_ticks(component_id) .unwrap_or_else(|| { @@ -1683,8 +1748,7 @@ unsafe impl<'a, T: 'static> SystemParam for NonSendMut<'a, T> { NonSendMut { value: ptr.assert_unique().deref_mut(), ticks: TicksMut::from_tick_cells(ticks, system_meta.last_run, change_tick), - #[cfg(feature = "track_change_detection")] - changed_by: _caller.deref_mut(), + changed_by: caller.map(|caller| caller.deref_mut()), } } } @@ -1707,11 +1771,10 @@ unsafe impl<'a, T: 'static> SystemParam for Option> { ) -> Self::Item<'w, 's> { world .get_non_send_with_ticks(component_id) - .map(|(ptr, ticks, _caller)| NonSendMut { + .map(|(ptr, ticks, caller)| NonSendMut { value: ptr.assert_unique().deref_mut(), ticks: TicksMut::from_tick_cells(ticks, system_meta.last_run, change_tick), - #[cfg(feature = "track_change_detection")] - changed_by: _caller.deref_mut(), + changed_by: caller.map(|caller| caller.deref_mut()), }) } } @@ -1870,10 +1933,11 @@ unsafe impl SystemParam for Vec { state: &Self::State, system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { - state - .iter() - .all(|state| T::validate_param(state, system_meta, world)) + ) -> Result<(), SystemParamValidationError> { + for state in state { + T::validate_param(state, system_meta, world)?; + } + Ok(()) } #[inline] @@ -2005,56 +2069,79 @@ macro_rules! impl_system_param_tuple { // SAFETY: tuple consists only of ReadOnlySystemParams unsafe impl<$($param: ReadOnlySystemParam),*> ReadOnlySystemParam for ($($param,)*) {} - // SAFETY: implementors of each `SystemParam` in the tuple have validated their impls - #[allow(clippy::undocumented_unsafe_blocks)] // false positive by clippy - #[allow(non_snake_case)] + #[expect( + clippy::allow_attributes, + reason = "This is in a macro, and as such, the below lints may not always apply." + )] + #[allow( + non_snake_case, + reason = "Certain variable names are provided by the caller, not by us." + )] + #[allow( + unused_variables, + reason = "Zero-length tuples won't use some of the parameters." + )] $(#[$meta])* + // SAFETY: implementers of each `SystemParam` in the tuple have validated their impls unsafe impl<$($param: SystemParam),*> SystemParam for ($($param,)*) { type State = ($($param::State,)*); type Item<'w, 's> = ($($param::Item::<'w, 's>,)*); #[inline] - fn init_state(_world: &mut World, _system_meta: &mut SystemMeta) -> Self::State { - (($($param::init_state(_world, _system_meta),)*)) + fn init_state(world: &mut World, system_meta: &mut SystemMeta) -> Self::State { + (($($param::init_state(world, system_meta),)*)) } #[inline] - #[allow(unused_unsafe)] - unsafe fn new_archetype(($($param,)*): &mut Self::State, _archetype: &Archetype, _system_meta: &mut SystemMeta) { + unsafe fn new_archetype(($($param,)*): &mut Self::State, archetype: &Archetype, system_meta: &mut SystemMeta) { + #[allow( + unused_unsafe, + reason = "Zero-length tuples will not run anything in the unsafe block." + )] // SAFETY: The caller ensures that `archetype` is from the World the state was initialized from in `init_state`. - unsafe { $($param::new_archetype($param, _archetype, _system_meta);)* } + unsafe { $($param::new_archetype($param, archetype, system_meta);)* } } #[inline] - fn apply(($($param,)*): &mut Self::State, _system_meta: &SystemMeta, _world: &mut World) { - $($param::apply($param, _system_meta, _world);)* + fn apply(($($param,)*): &mut Self::State, system_meta: &SystemMeta, world: &mut World) { + $($param::apply($param, system_meta, world);)* } #[inline] - fn queue(($($param,)*): &mut Self::State, _system_meta: &SystemMeta, mut _world: DeferredWorld) { - $($param::queue($param, _system_meta, _world.reborrow());)* + #[allow( + unused_mut, + reason = "The `world` parameter is unused for zero-length tuples; however, it must be mutable for other lengths of tuples." + )] + fn queue(($($param,)*): &mut Self::State, system_meta: &SystemMeta, mut world: DeferredWorld) { + $($param::queue($param, system_meta, world.reborrow());)* } #[inline] unsafe fn validate_param( state: &Self::State, - _system_meta: &SystemMeta, - _world: UnsafeWorldCell, - ) -> bool { + system_meta: &SystemMeta, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { let ($($param,)*) = state; - $($param::validate_param($param, _system_meta, _world)&&)* true + $( + $param::validate_param($param, system_meta, world)?; + )* + Ok(()) } #[inline] - #[allow(clippy::unused_unit)] unsafe fn get_param<'w, 's>( state: &'s mut Self::State, - _system_meta: &SystemMeta, - _world: UnsafeWorldCell<'w>, - _change_tick: Tick, + system_meta: &SystemMeta, + world: UnsafeWorldCell<'w>, + change_tick: Tick, ) -> Self::Item<'w, 's> { let ($($param,)*) = state; - ($($param::get_param($param, _system_meta, _world, _change_tick),)*) + #[allow( + clippy::unused_unit, + reason = "Zero-length tuples won't have any params to get." + )] + ($($param::get_param($param, system_meta, world, change_tick),)*) } } }; @@ -2206,7 +2293,7 @@ unsafe impl SystemParam for StaticSystemParam<'_, '_, state: &Self::State, system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { P::validate_param(state, system_meta, world) } @@ -2452,7 +2539,11 @@ trait DynParamState: Sync + Send { /// /// # Safety /// Refer to [`SystemParam::validate_param`]. - unsafe fn validate_param(&self, system_meta: &SystemMeta, world: UnsafeWorldCell) -> bool; + unsafe fn validate_param( + &self, + system_meta: &SystemMeta, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError>; } /// A wrapper around a [`SystemParam::State`] that can be used as a trait object in a [`DynSystemParam`]. @@ -2476,7 +2567,11 @@ impl DynParamState for ParamState { T::queue(&mut self.0, system_meta, world); } - unsafe fn validate_param(&self, system_meta: &SystemMeta, world: UnsafeWorldCell) -> bool { + unsafe fn validate_param( + &self, + system_meta: &SystemMeta, + world: UnsafeWorldCell, + ) -> Result<(), SystemParamValidationError> { T::validate_param(&self.0, system_meta, world) } } @@ -2496,7 +2591,7 @@ unsafe impl SystemParam for DynSystemParam<'_, '_> { state: &Self::State, system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { state.0.validate_param(system_meta, world) } @@ -2591,13 +2686,85 @@ unsafe impl SystemParam for FilteredResourcesMut<'_, '_> { } } +/// An error that occurs when a system parameter is not valid, +/// used by system executors to determine what to do with a system. +/// +/// Returned as an error from [`SystemParam::validate_param`], +/// and handled using the unified error handling mechanisms defined in [`bevy_ecs::error`]. +#[derive(Debug, PartialEq, Eq, Clone, Error)] +pub struct SystemParamValidationError { + /// Whether the system should be skipped. + /// + /// If `false`, the error should be handled. + /// By default, this will result in a panic. See [`crate::error`] for more information. + /// + /// This is the default behavior, and is suitable for system params that should *always* be valid, + /// either because sensible fallback behavior exists (like [`Query`] or because + /// failures in validation should be considered a bug in the user's logic that must be immediately addressed (like [`Res`]). + /// + /// If `true`, the system should be skipped. + /// This is suitable for system params that are intended to only operate in certain application states, such as [`Single`]. + pub skipped: bool, + + /// A message describing the validation error. + pub message: Cow<'static, str>, + + /// A string identifying the invalid parameter. + /// This is usually the type name of the parameter. + pub param: Cow<'static, str>, + + /// A string identifying the field within a parameter using `#[derive(SystemParam)]`. + /// This will be an empty string for other parameters. + /// + /// This will be printed after `param` in the `Display` impl, and should include a `::` prefix if non-empty. + pub field: Cow<'static, str>, +} + +impl SystemParamValidationError { + /// Constructs a `SystemParamValidationError` that skips the system. + /// The parameter name is initialized to the type name of `T`, so a `SystemParam` should usually pass `Self`. + pub fn skipped(message: impl Into>) -> Self { + Self::new::(true, message, Cow::Borrowed("")) + } + + /// Constructs a `SystemParamValidationError` for an invalid parameter that should be treated as an error. + /// The parameter name is initialized to the type name of `T`, so a `SystemParam` should usually pass `Self`. + pub fn invalid(message: impl Into>) -> Self { + Self::new::(false, message, Cow::Borrowed("")) + } + + /// Constructs a `SystemParamValidationError` for an invalid parameter. + /// The parameter name is initialized to the type name of `T`, so a `SystemParam` should usually pass `Self`. + pub fn new( + skipped: bool, + message: impl Into>, + field: impl Into>, + ) -> Self { + Self { + skipped, + message: message.into(), + param: Cow::Borrowed(core::any::type_name::()), + field: field.into(), + } + } +} + +impl Display for SystemParamValidationError { + fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!( + fmt, + "Parameter `{}{}` failed validation: {}", + ShortName(&self.param), + self.field, + self.message + ) + } +} + #[cfg(test)] mod tests { use super::*; - use crate::{ - self as bevy_ecs, // Necessary for the `SystemParam` Derive when used inside `bevy_ecs`. - system::assert_is_system, - }; + use crate::system::assert_is_system; use core::cell::RefCell; // Compile test for https://github.com/bevyengine/bevy/pull/2838. @@ -2643,7 +2810,10 @@ mod tests { // Compile test for https://github.com/bevyengine/bevy/pull/7001. #[test] fn system_param_const_generics() { - #[allow(dead_code)] + #[expect( + dead_code, + reason = "This struct is used to ensure that const generics are supported as a SystemParam; thus, the inner value never needs to be read." + )] #[derive(SystemParam)] pub struct ConstGenericParam<'w, const I: usize>(Res<'w, R>); @@ -2701,7 +2871,10 @@ mod tests { #[derive(SystemParam)] pub struct UnitParam; - #[allow(dead_code)] + #[expect( + dead_code, + reason = "This struct is used to ensure that tuple structs are supported as a SystemParam; thus, the inner values never need to be read." + )] #[derive(SystemParam)] pub struct TupleParam<'w, 's, R: Resource, L: FromWorld + Send + 'static>( Res<'w, R>, @@ -2718,7 +2891,10 @@ mod tests { #[derive(Resource)] struct PrivateResource; - #[allow(dead_code)] + #[expect( + dead_code, + reason = "This struct is used to ensure that SystemParam's derive can't leak private fields; thus, the inner values never need to be read." + )] #[derive(SystemParam)] pub struct EncapsulatedParam<'w>(Res<'w, PrivateResource>); @@ -2818,4 +2994,34 @@ mod tests { let _query: Query<()> = p.downcast_mut_inner().unwrap(); let _query: Query<()> = p.downcast().unwrap(); } + + #[test] + #[should_panic = "Encountered an error in system `bevy_ecs::system::system_param::tests::missing_resource_error::res_system`: Parameter `Res` failed validation: Resource does not exist"] + fn missing_resource_error() { + #[derive(Resource)] + pub struct MissingResource; + + let mut schedule = crate::schedule::Schedule::default(); + schedule.add_systems(res_system); + let mut world = World::new(); + schedule.run(&mut world); + + fn res_system(_: Res) {} + } + + #[test] + #[should_panic = "Encountered an error in system `bevy_ecs::system::system_param::tests::missing_event_error::event_system`: Parameter `EventReader::events` failed validation: Event not initialized"] + fn missing_event_error() { + use crate::prelude::{Event, EventReader}; + + #[derive(Event)] + pub struct MissingEvent; + + let mut schedule = crate::schedule::Schedule::default(); + schedule.add_systems(event_system); + let mut world = World::new(); + schedule.run(&mut world); + + fn event_system(_: EventReader) {} + } } diff --git a/crates/bevy_ecs/src/system/system_registry.rs b/crates/bevy_ecs/src/system/system_registry.rs index f924618a7182c..cf53b35be5f80 100644 --- a/crates/bevy_ecs/src/system/system_registry.rs +++ b/crates/bevy_ecs/src/system/system_registry.rs @@ -1,31 +1,39 @@ #[cfg(feature = "bevy_reflect")] use crate::reflect::ReflectComponent; use crate::{ - self as bevy_ecs, - bundle::Bundle, change_detection::Mut, entity::Entity, - system::{input::SystemInput, BoxedSystem, IntoSystem, System}, - world::{Command, World}, + system::{input::SystemInput, BoxedSystem, IntoSystem, SystemParamValidationError}, + world::World, }; use alloc::boxed::Box; use bevy_ecs_macros::{Component, Resource}; #[cfg(feature = "bevy_reflect")] -use bevy_reflect::Reflect; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use core::marker::PhantomData; use thiserror::Error; /// A small wrapper for [`BoxedSystem`] that also keeps track whether or not the system has been initialized. #[derive(Component)] -struct RegisteredSystem { +#[require(SystemIdMarker)] +pub(crate) struct RegisteredSystem { initialized: bool, system: BoxedSystem, } +impl RegisteredSystem { + pub fn new(system: BoxedSystem) -> Self { + RegisteredSystem { + initialized: false, + system, + } + } +} + /// Marker [`Component`](bevy_ecs::component::Component) for identifying [`SystemId`] [`Entity`]s. -#[derive(Component)] +#[derive(Component, Default)] #[cfg_attr(feature = "bevy_reflect", derive(Reflect))] -#[cfg_attr(feature = "bevy_reflect", reflect(Component))] +#[cfg_attr(feature = "bevy_reflect", reflect(Component, Default))] pub struct SystemIdMarker; /// A system that has been removed from the registry. @@ -118,17 +126,20 @@ impl core::fmt::Debug for SystemId { /// /// This resource is inserted by [`World::register_system_cached`]. #[derive(Resource)] -pub struct CachedSystemId(pub SystemId); +pub struct CachedSystemId { + /// The cached `SystemId` as an `Entity`. + pub entity: Entity, + _marker: PhantomData S>, +} -/// Creates a [`Bundle`] for a one-shot system entity. -fn system_bundle(system: BoxedSystem) -> impl Bundle { - ( - RegisteredSystem { - initialized: false, - system, - }, - SystemIdMarker, - ) +impl CachedSystemId { + /// Creates a new `CachedSystemId` struct given a `SystemId`. + pub fn new(id: SystemId) -> Self { + Self { + entity: id.entity(), + _marker: PhantomData, + } + } } impl World { @@ -157,13 +168,13 @@ impl World { /// Similar to [`Self::register_system`], but allows passing in a [`BoxedSystem`]. /// /// This is useful if the [`IntoSystem`] implementor has already been turned into a - /// [`System`] trait object and put in a [`Box`]. + /// [`System`](crate::system::System) trait object and put in a [`Box`]. pub fn register_boxed_system(&mut self, system: BoxedSystem) -> SystemId where I: SystemInput + 'static, O: 'static, { - let entity = self.spawn(system_bundle(system)).id(); + let entity = self.spawn(RegisteredSystem::new(system)).id(); SystemId::from_entity(entity) } @@ -202,11 +213,9 @@ impl World { /// This is different from [`RunSystemOnce::run_system_once`](crate::system::RunSystemOnce::run_system_once), /// because it keeps local state between calls and change detection works correctly. /// - /// In order to run a chained system with an input, use [`World::run_system_with`] instead. - /// - /// # Limitations + /// Also runs any queued-up commands. /// - /// - Stored systems cannot be recursive, they cannot call themselves through [`Commands::run_system`](crate::system::Commands). + /// In order to run a chained system with an input, use [`World::run_system_with`] instead. /// /// # Examples /// @@ -294,9 +303,7 @@ impl World { /// Before running a system, it must first be registered. /// The method [`World::register_system`] stores a given system and returns a [`SystemId`]. /// - /// # Limitations - /// - /// - Stored systems cannot be recursive, they cannot call themselves through [`Commands::run_system`](crate::system::Commands). + /// Also runs any queued-up commands. /// /// # Examples /// @@ -325,12 +332,12 @@ impl World { I: SystemInput + 'static, O: 'static, { - // lookup + // Lookup let mut entity = self .get_entity_mut(id.entity) .map_err(|_| RegisteredSystemError::SystemIdNotRegistered(id))?; - // take ownership of system trait object + // Take ownership of system trait object let RegisteredSystem { mut initialized, mut system, @@ -338,25 +345,33 @@ impl World { .take::>() .ok_or(RegisteredSystemError::Recursive(id))?; - // run the system + // Run the system if !initialized { system.initialize(self); initialized = true; } - let result = if system.validate_param(self) { - Ok(system.run(input, self)) - } else { - Err(RegisteredSystemError::InvalidParams(id)) - }; + let result = system + .validate_param(self) + .map_err(|err| RegisteredSystemError::InvalidParams { system: id, err }) + .map(|()| { + // Wait to run the commands until the system is available again. + // This is needed so the systems can recursively run themselves. + let ret = system.run_without_applying_deferred(input, self); + system.queue_deferred(self.into()); + ret + }); - // return ownership of system trait object (if entity still exists) + // Return ownership of system trait object (if entity still exists) if let Ok(mut entity) = self.get_entity_mut(id.entity) { entity.insert::>(RegisteredSystem { initialized, system, }); } + + // Run any commands enqueued by the system + self.flush(); result } @@ -392,21 +407,23 @@ impl World { ); } - if !self.contains_resource::>() { + if !self.contains_resource::>() { let id = self.register_system(system); - self.insert_resource(CachedSystemId::(id)); + self.insert_resource(CachedSystemId::::new(id)); return id; } - self.resource_scope(|world, mut id: Mut>| { - if let Ok(mut entity) = world.get_entity_mut(id.0.entity()) { + self.resource_scope(|world, mut id: Mut>| { + if let Ok(mut entity) = world.get_entity_mut(id.entity) { if !entity.contains::>() { - entity.insert(system_bundle(Box::new(IntoSystem::into_system(system)))); + entity.insert(RegisteredSystem::new(Box::new(IntoSystem::into_system( + system, + )))); } } else { - id.0 = world.register_system(system); + id.entity = world.register_system(system).entity(); } - id.0 + SystemId::from_entity(id.entity) }) } @@ -423,9 +440,9 @@ impl World { S: IntoSystem + 'static, { let id = self - .remove_resource::>() + .remove_resource::>() .ok_or(RegisteredSystemError::SystemNotCached)?; - self.unregister_system(id.0) + self.unregister_system(SystemId::::from_entity(id.entity)) } /// Runs a cached system, registering it if necessary. @@ -456,199 +473,6 @@ impl World { } } -/// The [`Command`] type for [`World::run_system`] or [`World::run_system_with`]. -/// -/// This command runs systems in an exclusive and single threaded way. -/// Running slow systems can become a bottleneck. -/// -/// If the system needs an [`In<_>`](crate::system::In) input value to run, it must -/// be provided as part of the command. -/// -/// There is no way to get the output of a system when run as a command, because the -/// execution of the system happens later. To get the output of a system, use -/// [`World::run_system`] or [`World::run_system_with`] instead of running the system as a command. -#[derive(Debug, Clone)] -pub struct RunSystemWith { - system_id: SystemId, - input: I::Inner<'static>, -} - -/// The [`Command`] type for [`World::run_system`]. -/// -/// This command runs systems in an exclusive and single threaded way. -/// Running slow systems can become a bottleneck. -/// -/// If the system needs an [`In<_>`](crate::system::In) input value to run, use the -/// [`RunSystemWith`] type instead. -/// -/// There is no way to get the output of a system when run as a command, because the -/// execution of the system happens later. To get the output of a system, use -/// [`World::run_system`] or [`World::run_system_with`] instead of running the system as a command. -pub type RunSystem = RunSystemWith<()>; - -impl RunSystem { - /// Creates a new [`Command`] struct, which can be added to [`Commands`](crate::system::Commands). - pub fn new(system_id: SystemId) -> Self { - Self::new_with_input(system_id, ()) - } -} - -impl RunSystemWith { - /// Creates a new [`Command`] struct, which can be added to [`Commands`](crate::system::Commands) - /// in order to run the specified system with the provided [`In<_>`](crate::system::In) input value. - pub fn new_with_input(system_id: SystemId, input: I::Inner<'static>) -> Self { - Self { system_id, input } - } -} - -impl Command for RunSystemWith -where - I: SystemInput: Send> + 'static, -{ - #[inline] - fn apply(self, world: &mut World) { - _ = world.run_system_with(self.system_id, self.input); - } -} - -/// The [`Command`] type for registering one shot systems from [`Commands`](crate::system::Commands). -/// -/// This command needs an already boxed system to register, and an already spawned entity. -pub struct RegisterSystem { - system: BoxedSystem, - entity: Entity, -} - -impl RegisterSystem -where - I: SystemInput + 'static, - O: 'static, -{ - /// Creates a new [`Command`] struct, which can be added to [`Commands`](crate::system::Commands). - pub fn new + 'static>(system: S, entity: Entity) -> Self { - Self { - system: Box::new(IntoSystem::into_system(system)), - entity, - } - } -} - -impl Command for RegisterSystem -where - I: SystemInput + Send + 'static, - O: Send + 'static, -{ - fn apply(self, world: &mut World) { - if let Ok(mut entity) = world.get_entity_mut(self.entity) { - entity.insert(system_bundle(self.system)); - } - } -} - -/// The [`Command`] type for unregistering one-shot systems from [`Commands`](crate::system::Commands). -pub struct UnregisterSystem { - system_id: SystemId, -} - -impl UnregisterSystem -where - I: SystemInput + 'static, - O: 'static, -{ - /// Creates a new [`Command`] struct, which can be added to [`Commands`](crate::system::Commands). - pub fn new(system_id: SystemId) -> Self { - Self { system_id } - } -} - -impl Command for UnregisterSystem -where - I: SystemInput + 'static, - O: 'static, -{ - fn apply(self, world: &mut World) { - let _ = world.unregister_system(self.system_id); - } -} - -/// The [`Command`] type for unregistering one-shot systems from [`Commands`](crate::system::Commands). -pub struct UnregisterSystemCached -where - I: SystemInput + 'static, - S: IntoSystem + Send + 'static, -{ - system: S, - _phantom: PhantomData (I, O, M)>, -} - -impl UnregisterSystemCached -where - I: SystemInput + 'static, - S: IntoSystem + Send + 'static, -{ - /// Creates a new [`Command`] struct, which can be added to [`Commands`](crate::system::Commands). - pub fn new(system: S) -> Self { - Self { - system, - _phantom: PhantomData, - } - } -} - -impl Command for UnregisterSystemCached -where - I: SystemInput + 'static, - O: 'static, - M: 'static, - S: IntoSystem + Send + 'static, -{ - fn apply(self, world: &mut World) { - let _ = world.unregister_system_cached(self.system); - } -} - -/// The [`Command`] type for running a cached one-shot system from -/// [`Commands`](crate::system::Commands). -/// -/// See [`World::register_system_cached`] for more information. -pub struct RunSystemCachedWith -where - I: SystemInput, - S: IntoSystem, -{ - system: S, - input: I::Inner<'static>, - _phantom: PhantomData<(fn() -> O, fn() -> M)>, -} - -impl RunSystemCachedWith -where - I: SystemInput, - S: IntoSystem, -{ - /// Creates a new [`Command`] struct, which can be added to - /// [`Commands`](crate::system::Commands). - pub fn new(system: S, input: I::Inner<'static>) -> Self { - Self { - system, - input, - _phantom: PhantomData, - } - } -} - -impl Command for RunSystemCachedWith -where - I: SystemInput: Send> + Send + 'static, - O: Send + 'static, - S: IntoSystem + Send + 'static, - M: 'static, -{ - fn apply(self, world: &mut World) { - let _ = world.run_system_cached_with(self.system, self.input); - } -} - /// An operation with stored systems failed. #[derive(Error)] pub enum RegisteredSystemError { @@ -669,10 +493,14 @@ pub enum RegisteredSystemError { #[error("System {0:?} tried to remove itself")] SelfRemove(SystemId), /// System could not be run due to parameters that failed validation. - /// - /// This can occur because the data required by the system was not present in the world. - #[error("The data required by the system {0:?} was not found in the world and the system did not run due to failed parameter validation.")] - InvalidParams(SystemId), + /// This should not be considered an error if [`field@SystemParamValidationError::skipped`] is `true`. + #[error("System {system:?} did not run due to failed parameter validation: {err}")] + InvalidParams { + /// The identifier of the system that was run. + system: SystemId, + /// The returned parameter validation error. + err: SystemParamValidationError, + }, } impl core::fmt::Debug for RegisteredSystemError { @@ -684,14 +512,22 @@ impl core::fmt::Debug for RegisteredSystemError { Self::SystemNotCached => write!(f, "SystemNotCached"), Self::Recursive(arg0) => f.debug_tuple("Recursive").field(arg0).finish(), Self::SelfRemove(arg0) => f.debug_tuple("SelfRemove").field(arg0).finish(), - Self::InvalidParams(arg0) => f.debug_tuple("InvalidParams").field(arg0).finish(), + Self::InvalidParams { system, err } => f + .debug_struct("InvalidParams") + .field("system", system) + .field("err", err) + .finish(), } } } +#[cfg(test)] mod tests { - use crate::prelude::*; - use crate::{self as bevy_ecs}; + use core::cell::Cell; + + use bevy_utils::default; + + use crate::{prelude::*, system::SystemId}; #[derive(Resource, Default, PartialEq, Debug)] struct Counter(u8); @@ -949,6 +785,43 @@ mod tests { assert!(matches!(output, Ok(8))); } + #[test] + fn cached_system_into_same_system_type() { + use crate::error::Result; + + struct Foo; + impl IntoSystem<(), Result<()>, ()> for Foo { + type System = ApplyDeferred; + fn into_system(_: Self) -> Self::System { + ApplyDeferred + } + } + + struct Bar; + impl IntoSystem<(), Result<()>, ()> for Bar { + type System = ApplyDeferred; + fn into_system(_: Self) -> Self::System { + ApplyDeferred + } + } + + let mut world = World::new(); + let foo1 = world.register_system_cached(Foo); + let foo2 = world.register_system_cached(Foo); + let bar1 = world.register_system_cached(Bar); + let bar2 = world.register_system_cached(Bar); + + // The `S: IntoSystem` types are different, so they should be cached + // as separate systems, even though the `::System` + // types / values are the same (`ApplyDeferred`). + assert_ne!(foo1, bar1); + + // But if the `S: IntoSystem` types are the same, they'll be cached + // as the same system. + assert_eq!(foo1, foo2); + assert_eq!(bar1, bar2); + } + #[test] fn system_with_input_ref() { fn with_ref(InRef(input): InRef, mut counter: ResMut) { @@ -992,19 +865,54 @@ mod tests { #[test] fn run_system_invalid_params() { use crate::system::RegisteredSystemError; + use alloc::{format, string::ToString}; struct T; impl Resource for T {} fn system(_: Res) {} let mut world = World::new(); - let id = world.register_system(system.param_warn_once()); + let id = world.register_system(system); // This fails because `T` has not been added to the world yet. let result = world.run_system(id); assert!(matches!( result, - Err(RegisteredSystemError::InvalidParams(_)) + Err(RegisteredSystemError::InvalidParams { .. }) )); + let expected = format!("System {id:?} did not run due to failed parameter validation: Parameter `Res` failed validation: Resource does not exist"); + assert_eq!(expected, result.unwrap_err().to_string()); + } + + #[test] + fn run_system_recursive() { + std::thread_local! { + static INVOCATIONS_LEFT: Cell = const { Cell::new(3) }; + static SYSTEM_ID: Cell> = default(); + } + + fn system(mut commands: Commands) { + let count = INVOCATIONS_LEFT.get() - 1; + INVOCATIONS_LEFT.set(count); + if count > 0 { + commands.run_system(SYSTEM_ID.get().unwrap()); + } + } + + let mut world = World::new(); + let id = world.register_system(system); + SYSTEM_ID.set(Some(id)); + world.run_system(id).unwrap(); + + assert_eq!(INVOCATIONS_LEFT.get(), 0); + } + + #[test] + fn run_system_exclusive_adapters() { + let mut world = World::new(); + fn system(_: &mut World) {} + world.run_system_cached(system).unwrap(); + world.run_system_cached(system.pipe(system)).unwrap(); + world.run_system_cached(system.map(|()| {})).unwrap(); } } diff --git a/crates/bevy_ecs/src/traversal.rs b/crates/bevy_ecs/src/traversal.rs index a8605e94ec8d0..342ad47849e06 100644 --- a/crates/bevy_ecs/src/traversal.rs +++ b/crates/bevy_ecs/src/traversal.rs @@ -1,6 +1,6 @@ //! A trait for components that let you traverse the ECS. -use crate::{entity::Entity, query::ReadOnlyQueryData}; +use crate::{entity::Entity, query::ReadOnlyQueryData, relationship::Relationship}; /// A component that can point to another entity, and which can be used to define a path through the ECS. /// @@ -30,3 +30,16 @@ impl Traversal for () { None } } + +/// This provides generalized hierarchy traversal for use in [event propagation]. +/// +/// # Warning +/// +/// Traversing in a loop could result in infinite loops for relationship graphs with loops. +/// +/// [event propagation]: crate::observer::Trigger::propagate +impl Traversal for &R { + fn traverse(item: Self::Item<'_>, _data: &D) -> Option { + Some(item.get()) + } +} diff --git a/crates/bevy_ecs/src/world/command_queue.rs b/crates/bevy_ecs/src/world/command_queue.rs index 014c5b21c5895..e8f820c0661f4 100644 --- a/crates/bevy_ecs/src/world/command_queue.rs +++ b/crates/bevy_ecs/src/world/command_queue.rs @@ -1,20 +1,17 @@ -use crate::system::{SystemBuffer, SystemMeta}; - +use crate::{ + system::{Command, SystemBuffer, SystemMeta}, + world::{DeferredWorld, World}, +}; +use alloc::{boxed::Box, vec::Vec}; +use bevy_ptr::{OwningPtr, Unaligned}; use core::{ fmt::Debug, mem::{size_of, MaybeUninit}, panic::AssertUnwindSafe, ptr::{addr_of_mut, NonNull}, }; - -use alloc::{boxed::Box, vec::Vec}; -use bevy_ptr::{OwningPtr, Unaligned}; use log::warn; -use crate::world::{Command, World}; - -use super::DeferredWorld; - struct CommandMeta { /// SAFETY: The `value` must point to a value of type `T: Command`, /// where `T` is some specific type that was used to produce this metadata. @@ -75,10 +72,7 @@ unsafe impl Sync for CommandQueue {} impl CommandQueue { /// Push a [`Command`] onto the queue. #[inline] - pub fn push(&mut self, command: C) - where - C: Command, - { + pub fn push(&mut self, command: impl Command) { // SAFETY: self is guaranteed to live for the lifetime of this method unsafe { self.get_raw().push(command); @@ -154,17 +148,14 @@ impl RawCommandQueue { /// /// * Caller ensures that `self` has not outlived the underlying queue #[inline] - pub unsafe fn push(&mut self, command: C) - where - C: Command, - { + pub unsafe fn push(&mut self, command: C) { // Stores a command alongside its metadata. // `repr(C)` prevents the compiler from reordering the fields, // while `repr(packed)` prevents the compiler from inserting padding bytes. #[repr(C, packed)] - struct Packed { + struct Packed { meta: CommandMeta, - command: T, + command: C, } let meta = CommandMeta { @@ -344,14 +335,16 @@ impl SystemBuffer for CommandQueue { #[cfg(test)] mod test { use super::*; - use crate as bevy_ecs; - use crate::system::Resource; - use alloc::sync::Arc; + use crate::resource::Resource; + use alloc::{borrow::ToOwned, string::String, sync::Arc}; use core::{ panic::AssertUnwindSafe, sync::atomic::{AtomicU32, Ordering}, }; + #[cfg(miri)] + use alloc::format; + struct DropCheck(Arc); impl DropCheck { @@ -438,10 +431,10 @@ mod test { assert_eq!(world.entities().len(), 2); } - // This has an arbitrary value `String` stored to ensure - // when then command gets pushed, the `bytes` vector gets - // some data added to it. - #[allow(dead_code)] + #[expect( + dead_code, + reason = "The inner string is used to ensure that, when the PanicCommand gets pushed to the queue, some data is written to the `bytes` vector." + )] struct PanicCommand(String); impl Command for PanicCommand { fn apply(self, _: &mut World) { @@ -517,7 +510,10 @@ mod test { assert_is_send(SpawnCommand); } - #[allow(dead_code)] + #[expect( + dead_code, + reason = "This struct is used to test how the CommandQueue reacts to padding added by rust's compiler." + )] struct CommandWithPadding(u8, u16); impl Command for CommandWithPadding { fn apply(self, _: &mut World) {} diff --git a/crates/bevy_ecs/src/world/component_constants.rs b/crates/bevy_ecs/src/world/component_constants.rs index 5eea8dc6229ef..ea2899c5f916a 100644 --- a/crates/bevy_ecs/src/world/component_constants.rs +++ b/crates/bevy_ecs/src/world/component_constants.rs @@ -1,7 +1,6 @@ //! Internal components used by bevy with a fixed component id. //! Constants are used to skip [`TypeId`] lookups in hot paths. use super::*; -use crate::{self as bevy_ecs}; #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; @@ -13,31 +12,44 @@ pub const ON_INSERT: ComponentId = ComponentId::new(1); pub const ON_REPLACE: ComponentId = ComponentId::new(2); /// [`ComponentId`] for [`OnRemove`] pub const ON_REMOVE: ComponentId = ComponentId::new(3); +/// [`ComponentId`] for [`OnDespawn`] +pub const ON_DESPAWN: ComponentId = ComponentId::new(4); -/// Trigger emitted when a component is added to an entity. See [`crate::component::ComponentHooks::on_add`] -/// for more information. +/// Trigger emitted when a component is inserted onto an entity that does not already have that +/// component. Runs before `OnInsert`. +/// See [`crate::component::ComponentHooks::on_add`] for more information. #[derive(Event, Debug)] #[cfg_attr(feature = "bevy_reflect", derive(Reflect))] #[cfg_attr(feature = "bevy_reflect", reflect(Debug))] pub struct OnAdd; -/// Trigger emitted when a component is inserted onto an entity. See [`crate::component::ComponentHooks::on_insert`] -/// for more information. +/// Trigger emitted when a component is inserted, regardless of whether or not the entity already +/// had that component. Runs after `OnAdd`, if it ran. +/// See [`crate::component::ComponentHooks::on_insert`] for more information. #[derive(Event, Debug)] #[cfg_attr(feature = "bevy_reflect", derive(Reflect))] #[cfg_attr(feature = "bevy_reflect", reflect(Debug))] pub struct OnInsert; -/// Trigger emitted when a component is replaced on an entity. See [`crate::component::ComponentHooks::on_replace`] -/// for more information. +/// Trigger emitted when a component is inserted onto an entity that already has that component. +/// Runs before the value is replaced, so you can still access the original component data. +/// See [`crate::component::ComponentHooks::on_replace`] for more information. #[derive(Event, Debug)] #[cfg_attr(feature = "bevy_reflect", derive(Reflect))] #[cfg_attr(feature = "bevy_reflect", reflect(Debug))] pub struct OnReplace; -/// Trigger emitted when a component is removed from an entity. See [`crate::component::ComponentHooks::on_remove`] -/// for more information. +/// Trigger emitted when a component is removed from an entity, and runs before the component is +/// removed, so you can still access the component data. +/// See [`crate::component::ComponentHooks::on_remove`] for more information. #[derive(Event, Debug)] #[cfg_attr(feature = "bevy_reflect", derive(Reflect))] #[cfg_attr(feature = "bevy_reflect", reflect(Debug))] pub struct OnRemove; + +/// Trigger emitted for each component on an entity when it is despawned. +/// See [`crate::component::ComponentHooks::on_despawn`] for more information. +#[derive(Event, Debug)] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect))] +#[cfg_attr(feature = "bevy_reflect", reflect(Debug))] +pub struct OnDespawn; diff --git a/crates/bevy_ecs/src/world/deferred_world.rs b/crates/bevy_ecs/src/world/deferred_world.rs index ef83d49270b0a..02c12fe6a3560 100644 --- a/crates/bevy_ecs/src/world/deferred_world.rs +++ b/crates/bevy_ecs/src/world/deferred_world.rs @@ -2,22 +2,26 @@ use core::ops::Deref; use crate::{ archetype::Archetype, - change_detection::MutUntyped, - component::{ComponentId, Mutable}, + change_detection::{MaybeLocation, MutUntyped}, + component::{ComponentId, HookContext, Mutable}, entity::Entity, event::{Event, EventId, Events, SendBatchIds}, observer::{Observers, TriggerTargets}, prelude::{Component, QueryState}, query::{QueryData, QueryFilter}, - system::{Commands, Query, Resource}, + relationship::RelationshipHookMode, + resource::Resource, + system::{Commands, Query}, traversal::Traversal, - world::{error::EntityFetchError, WorldEntityFetch}, + world::{error::EntityMutableFetchError, EntityFetcher, WorldEntityFetch}, }; -use super::{unsafe_world_cell::UnsafeWorldCell, Mut, World}; +use super::{unsafe_world_cell::UnsafeWorldCell, Mut, World, ON_INSERT, ON_REPLACE}; /// A [`World`] reference that disallows structural ECS changes. /// This includes initializing resources, registering components or spawning entities. +/// +/// This means that in order to add entities, for example, you will need to use commands instead of the world directly. pub struct DeferredWorld<'w> { // SAFETY: Implementors must not use this reference to make structural changes world: UnsafeWorldCell<'w>, @@ -75,11 +79,132 @@ impl<'w> DeferredWorld<'w> { &mut self, entity: Entity, ) -> Option> { + self.get_entity_mut(entity).ok()?.into_mut() + } + + /// Temporarily removes a [`Component`] `T` from the provided [`Entity`] and + /// runs the provided closure on it, returning the result if `T` was available. + /// This will trigger the `OnRemove` and `OnReplace` component hooks without + /// causing an archetype move. + /// + /// This is most useful with immutable components, where removal and reinsertion + /// is the only way to modify a value. + /// + /// If you do not need to ensure the above hooks are triggered, and your component + /// is mutable, prefer using [`get_mut`](DeferredWorld::get_mut). + #[inline] + pub(crate) fn modify_component( + &mut self, + entity: Entity, + f: impl FnOnce(&mut T) -> R, + ) -> Result, EntityMutableFetchError> { + // If the component is not registered, then it doesn't exist on this entity, so no action required. + let Some(component_id) = self.component_id::() else { + return Ok(None); + }; + + self.modify_component_by_id(entity, component_id, move |component| { + // SAFETY: component matches the component_id collected in the above line + let mut component = unsafe { component.with_type::() }; + + f(&mut component) + }) + } + + /// Temporarily removes a [`Component`] identified by the provided + /// [`ComponentId`] from the provided [`Entity`] and runs the provided + /// closure on it, returning the result if the component was available. + /// This will trigger the `OnRemove` and `OnReplace` component hooks without + /// causing an archetype move. + /// + /// This is most useful with immutable components, where removal and reinsertion + /// is the only way to modify a value. + /// + /// If you do not need to ensure the above hooks are triggered, and your component + /// is mutable, prefer using [`get_mut_by_id`](DeferredWorld::get_mut_by_id). + /// + /// You should prefer the typed [`modify_component`](DeferredWorld::modify_component) + /// whenever possible. + #[inline] + pub(crate) fn modify_component_by_id( + &mut self, + entity: Entity, + component_id: ComponentId, + f: impl for<'a> FnOnce(MutUntyped<'a>) -> R, + ) -> Result, EntityMutableFetchError> { + let entity_cell = self.get_entity_mut(entity)?; + + if !entity_cell.contains_id(component_id) { + return Ok(None); + } + + let archetype = &raw const *entity_cell.archetype(); + + // SAFETY: + // - DeferredWorld ensures archetype pointer will remain valid as no + // relocations will occur. + // - component_id exists on this world and this entity + // - ON_REPLACE is able to accept ZST events + unsafe { + let archetype = &*archetype; + self.trigger_on_replace( + archetype, + entity, + [component_id].into_iter(), + MaybeLocation::caller(), + RelationshipHookMode::Run, + ); + if archetype.has_replace_observer() { + self.trigger_observers( + ON_REPLACE, + entity, + [component_id].into_iter(), + MaybeLocation::caller(), + ); + } + } + + let mut entity_cell = self + .get_entity_mut(entity) + .expect("entity access confirmed above"); + + // SAFETY: we will run the required hooks to simulate removal/replacement. + let mut component = unsafe { + entity_cell + .get_mut_assume_mutable_by_id(component_id) + .expect("component access confirmed above") + }; + + let result = f(component.reborrow()); + + // Simulate adding this component by updating the relevant ticks + *component.ticks.added = *component.ticks.changed; + // SAFETY: - // - `as_unsafe_world_cell` is the only thing that is borrowing world - // - `as_unsafe_world_cell` provides mutable permission to everything - // - `&mut self` ensures no other borrows on world data - unsafe { self.world.get_entity(entity)?.get_mut() } + // - DeferredWorld ensures archetype pointer will remain valid as no + // relocations will occur. + // - component_id exists on this world and this entity + // - ON_REPLACE is able to accept ZST events + unsafe { + let archetype = &*archetype; + self.trigger_on_insert( + archetype, + entity, + [component_id].into_iter(), + MaybeLocation::caller(), + RelationshipHookMode::Run, + ); + if archetype.has_insert_observer() { + self.trigger_observers( + ON_INSERT, + entity, + [component_id].into_iter(), + MaybeLocation::caller(), + ); + } + } + + Ok(Some(result)) } /// Returns [`EntityMut`]s that expose read and write operations for the @@ -99,9 +224,9 @@ impl<'w> DeferredWorld<'w> { /// /// # Errors /// - /// - Returns [`EntityFetchError::NoSuchEntity`] if any of the given `entities` do not exist in the world. + /// - Returns [`EntityMutableFetchError::EntityDoesNotExist`] if any of the given `entities` do not exist in the world. /// - Only the first entity found to be missing will be returned. - /// - Returns [`EntityFetchError::AliasedMutability`] if the same entity is requested multiple times. + /// - Returns [`EntityMutableFetchError::AliasedMutability`] if the same entity is requested multiple times. /// /// # Examples /// @@ -110,11 +235,12 @@ impl<'w> DeferredWorld<'w> { /// [`EntityMut`]: crate::world::EntityMut /// [`&EntityHashSet`]: crate::entity::EntityHashSet /// [`EntityHashMap`]: crate::entity::EntityHashMap + /// [`Vec`]: alloc::vec::Vec #[inline] pub fn get_entity_mut( &mut self, entities: F, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { let cell = self.as_unsafe_world_cell(); // SAFETY: `&mut self` gives mutable access to the entire world, // and prevents any other access to the world. @@ -241,11 +367,59 @@ impl<'w> DeferredWorld<'w> { /// [`EntityMut`]: crate::world::EntityMut /// [`&EntityHashSet`]: crate::entity::EntityHashSet /// [`EntityHashMap`]: crate::entity::EntityHashMap + /// [`Vec`]: alloc::vec::Vec #[inline] pub fn entity_mut(&mut self, entities: F) -> F::DeferredMut<'_> { self.get_entity_mut(entities).unwrap() } + /// Simultaneously provides access to entity data and a command queue, which + /// will be applied when the [`World`] is next flushed. + /// + /// This allows using borrowed entity data to construct commands where the + /// borrow checker would otherwise prevent it. + /// + /// See [`World::entities_and_commands`] for the non-deferred version. + /// + /// # Example + /// + /// ```rust + /// # use bevy_ecs::{prelude::*, world::DeferredWorld}; + /// #[derive(Component)] + /// struct Targets(Vec); + /// #[derive(Component)] + /// struct TargetedBy(Entity); + /// + /// # let mut _world = World::new(); + /// # let e1 = _world.spawn_empty().id(); + /// # let e2 = _world.spawn_empty().id(); + /// # let eid = _world.spawn(Targets(vec![e1, e2])).id(); + /// let mut world: DeferredWorld = // ... + /// # DeferredWorld::from(&mut _world); + /// let (entities, mut commands) = world.entities_and_commands(); + /// + /// let entity = entities.get(eid).unwrap(); + /// for &target in entity.get::().unwrap().0.iter() { + /// commands.entity(target).insert(TargetedBy(eid)); + /// } + /// # _world.flush(); + /// # assert_eq!(_world.get::(e1).unwrap().0, eid); + /// # assert_eq!(_world.get::(e2).unwrap().0, eid); + /// ``` + pub fn entities_and_commands(&mut self) -> (EntityFetcher, Commands) { + let cell = self.as_unsafe_world_cell(); + // SAFETY: `&mut self` gives mutable access to the entire world, and prevents simultaneous access. + let fetcher = unsafe { EntityFetcher::new(cell) }; + // SAFETY: + // - `&mut self` gives mutable access to the entire world, and prevents simultaneous access. + // - Command queue access does not conflict with entity access. + let raw_queue = unsafe { cell.get_raw_command_queue() }; + // SAFETY: `&mut self` ensures the commands does not outlive the world. + let commands = unsafe { Commands::new_raw_from_entities(raw_queue, cell.entities()) }; + + (fetcher, commands) + } + /// Returns [`Query`] for the given [`QueryState`], which is used to efficiently /// run queries on the [`World`] by storing and reusing the [`QueryState`]. /// @@ -256,18 +430,8 @@ impl<'w> DeferredWorld<'w> { &mut self, state: &'s mut QueryState, ) -> Query<'_, 's, D, F> { - state.validate_world(self.world.id()); - state.update_archetypes(self); - // SAFETY: We ran validate_world to ensure our state matches - unsafe { - let world_cell = self.world; - Query::new( - world_cell, - state, - world_cell.last_change_tick(), - world_cell.change_tick(), - ) - } + // SAFETY: We have mutable access to the entire world + unsafe { state.query_unchecked(self.world) } } /// Gets a mutable reference to the resource of the given type @@ -403,13 +567,10 @@ impl<'w> DeferredWorld<'w> { entity: Entity, component_id: ComponentId, ) -> Option> { - // SAFETY: &mut self ensure that there are no outstanding accesses to the resource - unsafe { - self.world - .get_entity(entity)? - .get_mut_by_id(component_id) - .ok() - } + self.get_entity_mut(entity) + .ok()? + .into_mut_by_id(component_id) + .ok() } /// Triggers all `on_add` hooks for [`ComponentId`] in target. @@ -422,13 +583,22 @@ impl<'w> DeferredWorld<'w> { archetype: &Archetype, entity: Entity, targets: impl Iterator, + caller: MaybeLocation, ) { if archetype.has_add_hook() { for component_id in targets { // SAFETY: Caller ensures that these components exist let hooks = unsafe { self.components().get_info_unchecked(component_id) }.hooks(); if let Some(hook) = hooks.on_add { - hook(DeferredWorld { world: self.world }, entity, component_id); + hook( + DeferredWorld { world: self.world }, + HookContext { + entity, + component_id, + caller, + relationship_hook_mode: RelationshipHookMode::Run, + }, + ); } } } @@ -444,13 +614,23 @@ impl<'w> DeferredWorld<'w> { archetype: &Archetype, entity: Entity, targets: impl Iterator, + caller: MaybeLocation, + relationship_hook_mode: RelationshipHookMode, ) { if archetype.has_insert_hook() { for component_id in targets { // SAFETY: Caller ensures that these components exist let hooks = unsafe { self.components().get_info_unchecked(component_id) }.hooks(); if let Some(hook) = hooks.on_insert { - hook(DeferredWorld { world: self.world }, entity, component_id); + hook( + DeferredWorld { world: self.world }, + HookContext { + entity, + component_id, + caller, + relationship_hook_mode, + }, + ); } } } @@ -466,13 +646,23 @@ impl<'w> DeferredWorld<'w> { archetype: &Archetype, entity: Entity, targets: impl Iterator, + caller: MaybeLocation, + relationship_hook_mode: RelationshipHookMode, ) { if archetype.has_replace_hook() { for component_id in targets { // SAFETY: Caller ensures that these components exist let hooks = unsafe { self.components().get_info_unchecked(component_id) }.hooks(); if let Some(hook) = hooks.on_replace { - hook(DeferredWorld { world: self.world }, entity, component_id); + hook( + DeferredWorld { world: self.world }, + HookContext { + entity, + component_id, + caller, + relationship_hook_mode, + }, + ); } } } @@ -488,13 +678,53 @@ impl<'w> DeferredWorld<'w> { archetype: &Archetype, entity: Entity, targets: impl Iterator, + caller: MaybeLocation, ) { if archetype.has_remove_hook() { for component_id in targets { // SAFETY: Caller ensures that these components exist let hooks = unsafe { self.components().get_info_unchecked(component_id) }.hooks(); if let Some(hook) = hooks.on_remove { - hook(DeferredWorld { world: self.world }, entity, component_id); + hook( + DeferredWorld { world: self.world }, + HookContext { + entity, + component_id, + caller, + relationship_hook_mode: RelationshipHookMode::Run, + }, + ); + } + } + } + } + + /// Triggers all `on_despawn` hooks for [`ComponentId`] in target. + /// + /// # Safety + /// Caller must ensure [`ComponentId`] in target exist in self. + #[inline] + pub(crate) unsafe fn trigger_on_despawn( + &mut self, + archetype: &Archetype, + entity: Entity, + targets: impl Iterator, + caller: MaybeLocation, + ) { + if archetype.has_despawn_hook() { + for component_id in targets { + // SAFETY: Caller ensures that these components exist + let hooks = unsafe { self.components().get_info_unchecked(component_id) }.hooks(); + if let Some(hook) = hooks.on_despawn { + hook( + DeferredWorld { world: self.world }, + HookContext { + entity, + component_id, + caller, + relationship_hook_mode: RelationshipHookMode::Run, + }, + ); } } } @@ -510,6 +740,7 @@ impl<'w> DeferredWorld<'w> { event: ComponentId, target: Entity, components: impl Iterator + Clone, + caller: MaybeLocation, ) { Observers::invoke::<_>( self.reborrow(), @@ -518,6 +749,7 @@ impl<'w> DeferredWorld<'w> { components, &mut (), &mut false, + caller, ); } @@ -530,9 +762,10 @@ impl<'w> DeferredWorld<'w> { &mut self, event: ComponentId, mut target: Entity, - components: &[ComponentId], + components: impl Iterator + Clone, data: &mut E, mut propagate: bool, + caller: MaybeLocation, ) where T: Traversal, { @@ -541,9 +774,10 @@ impl<'w> DeferredWorld<'w> { self.reborrow(), event, target, - components.iter().copied(), + components.clone(), data, &mut propagate, + caller, ); if !propagate { break; diff --git a/crates/bevy_ecs/src/world/entity_fetch.rs b/crates/bevy_ecs/src/world/entity_fetch.rs index 8d01970bdbef8..85881315636fb 100644 --- a/crates/bevy_ecs/src/world/entity_fetch.rs +++ b/crates/bevy_ecs/src/world/entity_fetch.rs @@ -2,13 +2,99 @@ use alloc::vec::Vec; use core::mem::MaybeUninit; use crate::{ - entity::{Entity, EntityHashMap, EntityHashSet}, + entity::{Entity, EntityDoesNotExistError, EntityHashMap, EntityHashSet}, + error::Result, world::{ - error::EntityFetchError, unsafe_world_cell::UnsafeWorldCell, EntityMut, EntityRef, + error::EntityMutableFetchError, unsafe_world_cell::UnsafeWorldCell, EntityMut, EntityRef, EntityWorldMut, }, }; +/// Provides a safe interface for non-structural access to the entities in a [`World`]. +/// +/// This cannot add or remove components, or spawn or despawn entities, +/// making it relatively safe to access in concert with other ECS data. +/// This type can be constructed via [`World::entities_and_commands`], +/// or [`DeferredWorld::entities_and_commands`]. +/// +/// [`World`]: crate::world::World +/// [`World::entities_and_commands`]: crate::world::World::entities_and_commands +/// [`DeferredWorld::entities_and_commands`]: crate::world::DeferredWorld::entities_and_commands +pub struct EntityFetcher<'w> { + cell: UnsafeWorldCell<'w>, +} + +impl<'w> EntityFetcher<'w> { + // SAFETY: + // - The given `cell` has mutable access to all entities. + // - No other references to entities exist at the same time. + pub(crate) unsafe fn new(cell: UnsafeWorldCell<'w>) -> Self { + Self { cell } + } + + /// Returns [`EntityRef`]s that expose read-only operations for the given + /// `entities`, returning [`Err`] if any of the given entities do not exist. + /// + /// This function supports fetching a single entity or multiple entities: + /// - Pass an [`Entity`] to receive a single [`EntityRef`]. + /// - Pass a slice of [`Entity`]s to receive a [`Vec`]. + /// - Pass an array of [`Entity`]s to receive an equally-sized array of [`EntityRef`]s. + /// - Pass a reference to a [`EntityHashSet`](crate::entity::EntityHashMap) to receive an + /// [`EntityHashMap`](crate::entity::EntityHashMap). + /// + /// # Errors + /// + /// If any of the given `entities` do not exist in the world, the first + /// [`Entity`] found to be missing will return an [`EntityDoesNotExistError`]. + /// + /// # Examples + /// + /// For examples, see [`World::entity`]. + /// + /// [`World::entity`]: crate::world::World::entity + #[inline] + pub fn get( + &self, + entities: F, + ) -> Result, EntityDoesNotExistError> { + // SAFETY: `&self` gives read access to all entities, and prevents mutable access. + unsafe { entities.fetch_ref(self.cell) } + } + + /// Returns [`EntityMut`]s that expose read and write operations for the + /// given `entities`, returning [`Err`] if any of the given entities do not + /// exist. + /// + /// This function supports fetching a single entity or multiple entities: + /// - Pass an [`Entity`] to receive a single [`EntityMut`]. + /// - This reference type allows for structural changes to the entity, + /// such as adding or removing components, or despawning the entity. + /// - Pass a slice of [`Entity`]s to receive a [`Vec`]. + /// - Pass an array of [`Entity`]s to receive an equally-sized array of [`EntityMut`]s. + /// - Pass a reference to a [`EntityHashSet`](crate::entity::EntityHashMap) to receive an + /// [`EntityHashMap`](crate::entity::EntityHashMap). + /// # Errors + /// + /// - Returns [`EntityMutableFetchError::EntityDoesNotExist`] if any of the given `entities` do not exist in the world. + /// - Only the first entity found to be missing will be returned. + /// - Returns [`EntityMutableFetchError::AliasedMutability`] if the same entity is requested multiple times. + /// + /// # Examples + /// + /// For examples, see [`DeferredWorld::entity_mut`]. + /// + /// [`DeferredWorld::entity_mut`]: crate::world::DeferredWorld::entity_mut + #[inline] + pub fn get_mut( + &mut self, + entities: F, + ) -> Result, EntityMutableFetchError> { + // SAFETY: `&mut self` gives mutable access to all entities, + // and prevents any other access to entities. + unsafe { entities.fetch_deferred_mut(self.cell) } + } +} + /// Types that can be used to fetch [`Entity`] references from a [`World`]. /// /// Provided implementations are: @@ -21,10 +107,8 @@ use crate::{ /// /// # Performance /// -/// - The slice and array implementations perform an aliased mutabiltiy check +/// - The slice and array implementations perform an aliased mutability check /// in [`WorldEntityFetch::fetch_mut`] that is `O(N^2)`. -/// - The [`EntityHashSet`] implementation performs no such check as the type -/// itself guarantees no duplicates. /// - The single [`Entity`] implementation performs no such check as only one /// reference is returned. /// @@ -58,8 +142,11 @@ pub unsafe trait WorldEntityFetch { /// /// # Errors /// - /// - Returns [`Entity`] if the entity does not exist. - unsafe fn fetch_ref(self, cell: UnsafeWorldCell<'_>) -> Result, Entity>; + /// - Returns [`EntityDoesNotExistError`] if the entity does not exist. + unsafe fn fetch_ref( + self, + cell: UnsafeWorldCell<'_>, + ) -> Result, EntityDoesNotExistError>; /// Returns mutable reference(s) to the entities with the given [`Entity`] /// IDs, as determined by `self`. @@ -72,11 +159,13 @@ pub unsafe trait WorldEntityFetch { /// /// # Errors /// - /// - Returns [`EntityFetchError::NoSuchEntity`] if the entity does not exist. - /// - Returns [`EntityFetchError::AliasedMutability`] if the entity was + /// - Returns [`EntityMutableFetchError::EntityDoesNotExist`] if the entity does not exist. + /// - Returns [`EntityMutableFetchError::AliasedMutability`] if the entity was /// requested mutably more than once. - unsafe fn fetch_mut(self, cell: UnsafeWorldCell<'_>) - -> Result, EntityFetchError>; + unsafe fn fetch_mut( + self, + cell: UnsafeWorldCell<'_>, + ) -> Result, EntityMutableFetchError>; /// Returns mutable reference(s) to the entities with the given [`Entity`] /// IDs, as determined by `self`, but without structural mutability. @@ -93,13 +182,13 @@ pub unsafe trait WorldEntityFetch { /// /// # Errors /// - /// - Returns [`EntityFetchError::NoSuchEntity`] if the entity does not exist. - /// - Returns [`EntityFetchError::AliasedMutability`] if the entity was + /// - Returns [`EntityMutableFetchError::EntityDoesNotExist`] if the entity does not exist. + /// - Returns [`EntityMutableFetchError::AliasedMutability`] if the entity was /// requested mutably more than once. unsafe fn fetch_deferred_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError>; + ) -> Result, EntityMutableFetchError>; } // SAFETY: @@ -111,8 +200,11 @@ unsafe impl WorldEntityFetch for Entity { type Mut<'w> = EntityWorldMut<'w>; type DeferredMut<'w> = EntityMut<'w>; - unsafe fn fetch_ref(self, cell: UnsafeWorldCell<'_>) -> Result, Entity> { - let ecell = cell.get_entity(self).ok_or(self)?; + unsafe fn fetch_ref( + self, + cell: UnsafeWorldCell<'_>, + ) -> Result, EntityDoesNotExistError> { + let ecell = cell.get_entity(self)?; // SAFETY: caller ensures that the world cell has read-only access to the entity. Ok(unsafe { EntityRef::new(ecell) }) } @@ -120,11 +212,11 @@ unsafe impl WorldEntityFetch for Entity { unsafe fn fetch_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { let location = cell .entities() .get(self) - .ok_or(EntityFetchError::NoSuchEntity(self, cell))?; + .ok_or(EntityDoesNotExistError::new(self, cell.entities()))?; // SAFETY: caller ensures that the world cell has mutable access to the entity. let world = unsafe { cell.world_mut() }; // SAFETY: location was fetched from the same world's `Entities`. @@ -134,10 +226,8 @@ unsafe impl WorldEntityFetch for Entity { unsafe fn fetch_deferred_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { - let ecell = cell - .get_entity(self) - .ok_or(EntityFetchError::NoSuchEntity(self, cell))?; + ) -> Result, EntityMutableFetchError> { + let ecell = cell.get_entity(self)?; // SAFETY: caller ensures that the world cell has mutable access to the entity. Ok(unsafe { EntityMut::new(ecell) }) } @@ -152,21 +242,24 @@ unsafe impl WorldEntityFetch for [Entity; N] { type Mut<'w> = [EntityMut<'w>; N]; type DeferredMut<'w> = [EntityMut<'w>; N]; - unsafe fn fetch_ref(self, cell: UnsafeWorldCell<'_>) -> Result, Entity> { + unsafe fn fetch_ref( + self, + cell: UnsafeWorldCell<'_>, + ) -> Result, EntityDoesNotExistError> { <&Self>::fetch_ref(&self, cell) } unsafe fn fetch_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { <&Self>::fetch_mut(&self, cell) } unsafe fn fetch_deferred_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { <&Self>::fetch_deferred_mut(&self, cell) } } @@ -180,10 +273,13 @@ unsafe impl WorldEntityFetch for &'_ [Entity; N] { type Mut<'w> = [EntityMut<'w>; N]; type DeferredMut<'w> = [EntityMut<'w>; N]; - unsafe fn fetch_ref(self, cell: UnsafeWorldCell<'_>) -> Result, Entity> { + unsafe fn fetch_ref( + self, + cell: UnsafeWorldCell<'_>, + ) -> Result, EntityDoesNotExistError> { let mut refs = [MaybeUninit::uninit(); N]; for (r, &id) in core::iter::zip(&mut refs, self) { - let ecell = cell.get_entity(id).ok_or(id)?; + let ecell = cell.get_entity(id)?; // SAFETY: caller ensures that the world cell has read-only access to the entity. *r = MaybeUninit::new(unsafe { EntityRef::new(ecell) }); } @@ -197,21 +293,19 @@ unsafe impl WorldEntityFetch for &'_ [Entity; N] { unsafe fn fetch_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { // Check for duplicate entities. for i in 0..self.len() { for j in 0..i { if self[i] == self[j] { - return Err(EntityFetchError::AliasedMutability(self[i])); + return Err(EntityMutableFetchError::AliasedMutability(self[i])); } } } let mut refs = [const { MaybeUninit::uninit() }; N]; for (r, &id) in core::iter::zip(&mut refs, self) { - let ecell = cell - .get_entity(id) - .ok_or(EntityFetchError::NoSuchEntity(id, cell))?; + let ecell = cell.get_entity(id)?; // SAFETY: caller ensures that the world cell has mutable access to the entity. *r = MaybeUninit::new(unsafe { EntityMut::new(ecell) }); } @@ -225,7 +319,7 @@ unsafe impl WorldEntityFetch for &'_ [Entity; N] { unsafe fn fetch_deferred_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { // SAFETY: caller ensures that the world cell has mutable access to the entity, // and `fetch_mut` does not return structurally-mutable references. unsafe { self.fetch_mut(cell) } @@ -241,10 +335,13 @@ unsafe impl WorldEntityFetch for &'_ [Entity] { type Mut<'w> = Vec>; type DeferredMut<'w> = Vec>; - unsafe fn fetch_ref(self, cell: UnsafeWorldCell<'_>) -> Result, Entity> { + unsafe fn fetch_ref( + self, + cell: UnsafeWorldCell<'_>, + ) -> Result, EntityDoesNotExistError> { let mut refs = Vec::with_capacity(self.len()); for &id in self { - let ecell = cell.get_entity(id).ok_or(id)?; + let ecell = cell.get_entity(id)?; // SAFETY: caller ensures that the world cell has read-only access to the entity. refs.push(unsafe { EntityRef::new(ecell) }); } @@ -255,21 +352,19 @@ unsafe impl WorldEntityFetch for &'_ [Entity] { unsafe fn fetch_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { // Check for duplicate entities. for i in 0..self.len() { for j in 0..i { if self[i] == self[j] { - return Err(EntityFetchError::AliasedMutability(self[i])); + return Err(EntityMutableFetchError::AliasedMutability(self[i])); } } } let mut refs = Vec::with_capacity(self.len()); for &id in self { - let ecell = cell - .get_entity(id) - .ok_or(EntityFetchError::NoSuchEntity(id, cell))?; + let ecell = cell.get_entity(id)?; // SAFETY: caller ensures that the world cell has mutable access to the entity. refs.push(unsafe { EntityMut::new(ecell) }); } @@ -280,7 +375,7 @@ unsafe impl WorldEntityFetch for &'_ [Entity] { unsafe fn fetch_deferred_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { // SAFETY: caller ensures that the world cell has mutable access to the entity, // and `fetch_mut` does not return structurally-mutable references. unsafe { self.fetch_mut(cell) } @@ -296,10 +391,13 @@ unsafe impl WorldEntityFetch for &'_ EntityHashSet { type Mut<'w> = EntityHashMap>; type DeferredMut<'w> = EntityHashMap>; - unsafe fn fetch_ref(self, cell: UnsafeWorldCell<'_>) -> Result, Entity> { + unsafe fn fetch_ref( + self, + cell: UnsafeWorldCell<'_>, + ) -> Result, EntityDoesNotExistError> { let mut refs = EntityHashMap::with_capacity(self.len()); for &id in self { - let ecell = cell.get_entity(id).ok_or(id)?; + let ecell = cell.get_entity(id)?; // SAFETY: caller ensures that the world cell has read-only access to the entity. refs.insert(id, unsafe { EntityRef::new(ecell) }); } @@ -309,12 +407,10 @@ unsafe impl WorldEntityFetch for &'_ EntityHashSet { unsafe fn fetch_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { let mut refs = EntityHashMap::with_capacity(self.len()); for &id in self { - let ecell = cell - .get_entity(id) - .ok_or(EntityFetchError::NoSuchEntity(id, cell))?; + let ecell = cell.get_entity(id)?; // SAFETY: caller ensures that the world cell has mutable access to the entity. refs.insert(id, unsafe { EntityMut::new(ecell) }); } @@ -324,7 +420,7 @@ unsafe impl WorldEntityFetch for &'_ EntityHashSet { unsafe fn fetch_deferred_mut( self, cell: UnsafeWorldCell<'_>, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { // SAFETY: caller ensures that the world cell has mutable access to the entity, // and `fetch_mut` does not return structurally-mutable references. unsafe { self.fetch_mut(cell) } diff --git a/crates/bevy_ecs/src/world/entity_ref.rs b/crates/bevy_ecs/src/world/entity_ref.rs index f8b8e55f59f37..a9887c5248673 100644 --- a/crates/bevy_ecs/src/world/entity_ref.rs +++ b/crates/bevy_ecs/src/world/entity_ref.rs @@ -1,24 +1,34 @@ use crate::{ archetype::{Archetype, ArchetypeId, Archetypes}, - bundle::{Bundle, BundleId, BundleInfo, BundleInserter, DynamicBundle, InsertMode}, - change_detection::MutUntyped, - component::{Component, ComponentId, ComponentTicks, Components, Mutable, StorageType}, + bundle::{ + Bundle, BundleEffect, BundleFromComponents, BundleId, BundleInfo, BundleInserter, + DynamicBundle, InsertMode, + }, + change_detection::{MaybeLocation, MutUntyped}, + component::{ + Component, ComponentId, ComponentTicks, Components, ComponentsRegistrator, Mutable, + StorageType, + }, entity::{ - Entities, Entity, EntityBorrow, EntityCloneBuilder, EntityLocation, TrustedEntityBorrow, + ContainsEntity, Entities, Entity, EntityCloner, EntityClonerBuilder, EntityEquivalent, + EntityLocation, }, event::Event, observer::Observer, query::{Access, ReadOnlyQueryData}, + relationship::RelationshipHookMode, removal_detection::RemovedComponentEvents, + resource::Resource, storage::Storages, system::IntoObserverSystem, - world::{error::EntityComponentError, DeferredWorld, Mut, World}, + world::{ + error::EntityComponentError, unsafe_world_cell::UnsafeEntityCell, DeferredWorld, Mut, Ref, + World, ON_DESPAWN, ON_REMOVE, ON_REPLACE, + }, }; use alloc::vec::Vec; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_ptr::{OwningPtr, Ptr}; -use bevy_utils::{HashMap, HashSet}; -#[cfg(feature = "track_change_detection")] -use core::panic::Location; use core::{ any::TypeId, cmp::Ordering, @@ -28,8 +38,6 @@ use core::{ }; use thiserror::Error; -use super::{unsafe_world_cell::UnsafeEntityCell, Ref, ON_REMOVE, ON_REPLACE}; - /// A read-only reference to a particular [`Entity`] and all of its components. /// /// # Examples @@ -49,7 +57,9 @@ use super::{unsafe_world_cell::UnsafeEntityCell, Ref, ON_REMOVE, ON_REPLACE}; /// # bevy_ecs::system::assert_is_system(disjoint_system); /// ``` #[derive(Copy, Clone)] -pub struct EntityRef<'w>(UnsafeEntityCell<'w>); +pub struct EntityRef<'w> { + cell: UnsafeEntityCell<'w>, +} impl<'w> EntityRef<'w> { /// # Safety @@ -58,26 +68,26 @@ impl<'w> EntityRef<'w> { /// at the same time as the returned [`EntityRef`]. #[inline] pub(crate) unsafe fn new(cell: UnsafeEntityCell<'w>) -> Self { - Self(cell) + Self { cell } } /// Returns the [ID](Entity) of the current entity. #[inline] #[must_use = "Omit the .id() call if you do not need to store the `Entity` identifier."] pub fn id(&self) -> Entity { - self.0.id() + self.cell.id() } /// Gets metadata indicating the location where the current entity is stored. #[inline] pub fn location(&self) -> EntityLocation { - self.0.location() + self.cell.location() } /// Returns the archetype that the current entity belongs to. #[inline] pub fn archetype(&self) -> &Archetype { - self.0.archetype() + self.cell.archetype() } /// Returns `true` if the current entity has a component of type `T`. @@ -99,10 +109,10 @@ impl<'w> EntityRef<'w> { /// /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. /// - If you know the component's [`TypeId`] but not its [`ComponentId`], consider using - /// [`Self::contains_type_id`]. + /// [`Self::contains_type_id`]. #[inline] pub fn contains_id(&self, component_id: ComponentId) -> bool { - self.0.contains_id(component_id) + self.cell.contains_id(component_id) } /// Returns `true` if the current entity has a component with the type identified by `type_id`. @@ -114,7 +124,7 @@ impl<'w> EntityRef<'w> { /// - If you have a [`ComponentId`] instead of a [`TypeId`], consider using [`Self::contains_id`]. #[inline] pub fn contains_type_id(&self, type_id: TypeId) -> bool { - self.0.contains_type_id(type_id) + self.cell.contains_type_id(type_id) } /// Gets access to the component of type `T` for the current entity. @@ -122,7 +132,7 @@ impl<'w> EntityRef<'w> { #[inline] pub fn get(&self) -> Option<&'w T> { // SAFETY: We have read-only access to all components of this entity. - unsafe { self.0.get::() } + unsafe { self.cell.get::() } } /// Gets access to the component of type `T` for the current entity, @@ -132,7 +142,7 @@ impl<'w> EntityRef<'w> { #[inline] pub fn get_ref(&self) -> Option> { // SAFETY: We have read-only access to all components of this entity. - unsafe { self.0.get_ref::() } + unsafe { self.cell.get_ref::() } } /// Retrieves the change ticks for the given component. This can be useful for implementing change @@ -140,7 +150,7 @@ impl<'w> EntityRef<'w> { #[inline] pub fn get_change_ticks(&self) -> Option { // SAFETY: We have read-only access to all components of this entity. - unsafe { self.0.get_change_ticks::() } + unsafe { self.cell.get_change_ticks::() } } /// Retrieves the change ticks for the given [`ComponentId`]. This can be useful for implementing change @@ -152,7 +162,7 @@ impl<'w> EntityRef<'w> { #[inline] pub fn get_change_ticks_by_id(&self, component_id: ComponentId) -> Option { // SAFETY: We have read-only access to all components of this entity. - unsafe { self.0.get_change_ticks_by_id(component_id) } + unsafe { self.cell.get_change_ticks_by_id(component_id) } } /// Returns [untyped read-only reference(s)](Ptr) to component(s) for the @@ -240,7 +250,7 @@ impl<'w> EntityRef<'w> { /// ## [`HashSet`] of [`ComponentId`]s /// /// ``` - /// # use bevy_utils::HashSet; + /// # use bevy_platform::collections::HashSet; /// # use bevy_ecs::{prelude::*, component::ComponentId}; /// # /// # #[derive(Component, PartialEq, Debug)] @@ -265,7 +275,7 @@ impl<'w> EntityRef<'w> { component_ids: F, ) -> Result, EntityComponentError> { // SAFETY: We have read-only access to all components of this entity. - unsafe { component_ids.fetch_ref(self.0) } + unsafe { component_ids.fetch_ref(self.cell) } } /// Returns read-only components for the current entity that match the query `Q`. @@ -274,66 +284,66 @@ impl<'w> EntityRef<'w> { /// /// If the entity does not have the components required by the query `Q`. pub fn components(&self) -> Q::Item<'w> { - self.get_components::().expect(QUERY_MISMATCH_ERROR) + self.get_components::() + .expect("Query does not match the current entity") } /// Returns read-only components for the current entity that match the query `Q`, /// or `None` if the entity does not have the components required by the query `Q`. pub fn get_components(&self) -> Option> { // SAFETY: We have read-only access to all components of this entity. - unsafe { self.0.get_components::() } + unsafe { self.cell.get_components::() } } /// Returns the source code location from which this entity has been spawned. - #[cfg(feature = "track_change_detection")] - pub fn spawned_by(&self) -> &'static Location<'static> { - self.0.spawned_by() + pub fn spawned_by(&self) -> MaybeLocation { + self.cell.spawned_by() } } impl<'w> From> for EntityRef<'w> { - fn from(entity_mut: EntityWorldMut<'w>) -> EntityRef<'w> { + fn from(entity: EntityWorldMut<'w>) -> EntityRef<'w> { // SAFETY: // - `EntityWorldMut` guarantees exclusive access to the entire world. - unsafe { EntityRef::new(entity_mut.into_unsafe_entity_cell()) } + unsafe { EntityRef::new(entity.into_unsafe_entity_cell()) } } } impl<'a> From<&'a EntityWorldMut<'_>> for EntityRef<'a> { - fn from(value: &'a EntityWorldMut<'_>) -> Self { + fn from(entity: &'a EntityWorldMut<'_>) -> Self { // SAFETY: // - `EntityWorldMut` guarantees exclusive access to the entire world. - // - `&value` ensures no mutable accesses are active. - unsafe { EntityRef::new(value.as_unsafe_entity_cell_readonly()) } + // - `&entity` ensures no mutable accesses are active. + unsafe { EntityRef::new(entity.as_unsafe_entity_cell_readonly()) } } } impl<'w> From> for EntityRef<'w> { - fn from(value: EntityMut<'w>) -> Self { + fn from(entity: EntityMut<'w>) -> Self { // SAFETY: // - `EntityMut` guarantees exclusive access to all of the entity's components. - unsafe { EntityRef::new(value.0) } + unsafe { EntityRef::new(entity.cell) } } } impl<'a> From<&'a EntityMut<'_>> for EntityRef<'a> { - fn from(value: &'a EntityMut<'_>) -> Self { + fn from(entity: &'a EntityMut<'_>) -> Self { // SAFETY: // - `EntityMut` guarantees exclusive access to all of the entity's components. - // - `&value` ensures there are no mutable accesses. - unsafe { EntityRef::new(value.0) } + // - `&entity` ensures there are no mutable accesses. + unsafe { EntityRef::new(entity.cell) } } } impl<'a> TryFrom> for EntityRef<'a> { type Error = TryFromFilteredError; - fn try_from(value: FilteredEntityRef<'a>) -> Result { - if !value.access.has_read_all() { + fn try_from(entity: FilteredEntityRef<'a>) -> Result { + if !entity.access.has_read_all() { Err(TryFromFilteredError::MissingReadAllAccess) } else { // SAFETY: check above guarantees read-only access to all components of the entity. - Ok(unsafe { EntityRef::new(value.entity) }) + Ok(unsafe { EntityRef::new(entity.entity) }) } } } @@ -341,12 +351,12 @@ impl<'a> TryFrom> for EntityRef<'a> { impl<'a> TryFrom<&'a FilteredEntityRef<'_>> for EntityRef<'a> { type Error = TryFromFilteredError; - fn try_from(value: &'a FilteredEntityRef<'_>) -> Result { - if !value.access.has_read_all() { + fn try_from(entity: &'a FilteredEntityRef<'_>) -> Result { + if !entity.access.has_read_all() { Err(TryFromFilteredError::MissingReadAllAccess) } else { // SAFETY: check above guarantees read-only access to all components of the entity. - Ok(unsafe { EntityRef::new(value.entity) }) + Ok(unsafe { EntityRef::new(entity.entity) }) } } } @@ -354,12 +364,12 @@ impl<'a> TryFrom<&'a FilteredEntityRef<'_>> for EntityRef<'a> { impl<'a> TryFrom> for EntityRef<'a> { type Error = TryFromFilteredError; - fn try_from(value: FilteredEntityMut<'a>) -> Result { - if !value.access.has_read_all() { + fn try_from(entity: FilteredEntityMut<'a>) -> Result { + if !entity.access.has_read_all() { Err(TryFromFilteredError::MissingReadAllAccess) } else { // SAFETY: check above guarantees read-only access to all components of the entity. - Ok(unsafe { EntityRef::new(value.entity) }) + Ok(unsafe { EntityRef::new(entity.entity) }) } } } @@ -367,12 +377,12 @@ impl<'a> TryFrom> for EntityRef<'a> { impl<'a> TryFrom<&'a FilteredEntityMut<'_>> for EntityRef<'a> { type Error = TryFromFilteredError; - fn try_from(value: &'a FilteredEntityMut<'_>) -> Result { - if !value.access.has_read_all() { + fn try_from(entity: &'a FilteredEntityMut<'_>) -> Result { + if !entity.access.has_read_all() { Err(TryFromFilteredError::MissingReadAllAccess) } else { // SAFETY: check above guarantees read-only access to all components of the entity. - Ok(unsafe { EntityRef::new(value.entity) }) + Ok(unsafe { EntityRef::new(entity.entity) }) } } } @@ -385,12 +395,11 @@ impl PartialEq for EntityRef<'_> { impl Eq for EntityRef<'_> {} -#[expect(clippy::non_canonical_partial_ord_impl)] impl PartialOrd for EntityRef<'_> { /// [`EntityRef`]'s comparison trait implementations match the underlying [`Entity`], /// and cannot discern between different worlds. fn partial_cmp(&self, other: &Self) -> Option { - self.entity().partial_cmp(&other.entity()) + Some(self.cmp(other)) } } @@ -406,14 +415,14 @@ impl Hash for EntityRef<'_> { } } -impl EntityBorrow for EntityRef<'_> { +impl ContainsEntity for EntityRef<'_> { fn entity(&self) -> Entity { self.id() } } // SAFETY: This type represents one Entity. We implement the comparison traits based on that Entity. -unsafe impl TrustedEntityBorrow for EntityRef<'_> {} +unsafe impl EntityEquivalent for EntityRef<'_> {} /// Provides mutable access to a single entity and all of its components. /// @@ -436,7 +445,9 @@ unsafe impl TrustedEntityBorrow for EntityRef<'_> {} /// } /// # bevy_ecs::system::assert_is_system(disjoint_system); /// ``` -pub struct EntityMut<'w>(UnsafeEntityCell<'w>); +pub struct EntityMut<'w> { + cell: UnsafeEntityCell<'w>, +} impl<'w> EntityMut<'w> { /// # Safety @@ -444,14 +455,20 @@ impl<'w> EntityMut<'w> { /// - No accesses to any of the entity's components may exist /// at the same time as the returned [`EntityMut`]. pub(crate) unsafe fn new(cell: UnsafeEntityCell<'w>) -> Self { - Self(cell) + Self { cell } } /// Returns a new instance with a shorter lifetime. /// This is useful if you have `&mut EntityMut`, but you need `EntityMut`. pub fn reborrow(&mut self) -> EntityMut<'_> { // SAFETY: We have exclusive access to the entire entity and its components. - unsafe { Self::new(self.0) } + unsafe { Self::new(self.cell) } + } + + /// Consumes `self` and returns read-only access to all of the entity's + /// components, with the world `'w` lifetime. + pub fn into_readonly(self) -> EntityRef<'w> { + EntityRef::from(self) } /// Gets read-only access to all of the entity's components. @@ -463,19 +480,19 @@ impl<'w> EntityMut<'w> { #[inline] #[must_use = "Omit the .id() call if you do not need to store the `Entity` identifier."] pub fn id(&self) -> Entity { - self.0.id() + self.cell.id() } /// Gets metadata indicating the location where the current entity is stored. #[inline] pub fn location(&self) -> EntityLocation { - self.0.location() + self.cell.location() } /// Returns the archetype that the current entity belongs to. #[inline] pub fn archetype(&self) -> &Archetype { - self.0.archetype() + self.cell.archetype() } /// Returns `true` if the current entity has a component of type `T`. @@ -497,10 +514,10 @@ impl<'w> EntityMut<'w> { /// /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. /// - If you know the component's [`TypeId`] but not its [`ComponentId`], consider using - /// [`Self::contains_type_id`]. + /// [`Self::contains_type_id`]. #[inline] pub fn contains_id(&self, component_id: ComponentId) -> bool { - self.0.contains_id(component_id) + self.cell.contains_id(component_id) } /// Returns `true` if the current entity has a component with the type identified by `type_id`. @@ -512,7 +529,7 @@ impl<'w> EntityMut<'w> { /// - If you have a [`ComponentId`] instead of a [`TypeId`], consider using [`Self::contains_id`]. #[inline] pub fn contains_type_id(&self, type_id: TypeId) -> bool { - self.0.contains_type_id(type_id) + self.cell.contains_type_id(type_id) } /// Gets access to the component of type `T` for the current entity. @@ -528,14 +545,13 @@ impl<'w> EntityMut<'w> { /// /// If the entity does not have the components required by the query `Q`. pub fn components(&self) -> Q::Item<'_> { - self.get_components::().expect(QUERY_MISMATCH_ERROR) + self.as_readonly().components::() } /// Returns read-only components for the current entity that match the query `Q`, /// or `None` if the entity does not have the components required by the query `Q`. pub fn get_components(&self) -> Option> { - // SAFETY: We have read-only access to all components of this entity. - unsafe { self.0.get_components::() } + self.as_readonly().get_components::() } /// Consumes `self` and gets access to the component of type `T` with the @@ -544,8 +560,7 @@ impl<'w> EntityMut<'w> { /// Returns `None` if the entity does not have a component of type `T`. #[inline] pub fn into_borrow(self) -> Option<&'w T> { - // SAFETY: consuming `self` implies exclusive access - unsafe { self.0.get() } + self.into_readonly().get() } /// Gets access to the component of type `T` for the current entity, @@ -564,8 +579,7 @@ impl<'w> EntityMut<'w> { /// Returns `None` if the entity does not have a component of type `T`. #[inline] pub fn into_ref(self) -> Option> { - // SAFETY: consuming `self` implies exclusive access - unsafe { self.0.get_ref() } + self.into_readonly().get_ref() } /// Gets mutable access to the component of type `T` for the current entity. @@ -573,7 +587,7 @@ impl<'w> EntityMut<'w> { #[inline] pub fn get_mut>(&mut self) -> Option> { // SAFETY: &mut self implies exclusive access for duration of returned value - unsafe { self.0.get_mut() } + unsafe { self.cell.get_mut() } } /// Gets mutable access to the component of type `T` for the current entity. @@ -584,8 +598,10 @@ impl<'w> EntityMut<'w> { /// - `T` must be a mutable component #[inline] pub unsafe fn get_mut_assume_mutable(&mut self) -> Option> { - // SAFETY: &mut self implies exclusive access for duration of returned value - unsafe { self.0.get_mut_assume_mutable() } + // SAFETY: + // - &mut self implies exclusive access for duration of returned value + // - Caller ensures `T` is a mutable component + unsafe { self.cell.get_mut_assume_mutable() } } /// Consumes self and gets mutable access to the component of type `T` @@ -594,7 +610,21 @@ impl<'w> EntityMut<'w> { #[inline] pub fn into_mut>(self) -> Option> { // SAFETY: consuming `self` implies exclusive access - unsafe { self.0.get_mut() } + unsafe { self.cell.get_mut() } + } + + /// Gets mutable access to the component of type `T` for the current entity. + /// Returns `None` if the entity does not have a component of type `T`. + /// + /// # Safety + /// + /// - `T` must be a mutable component + #[inline] + pub unsafe fn into_mut_assume_mutable(self) -> Option> { + // SAFETY: + // - Consuming `self` implies exclusive access + // - Caller ensures `T` is a mutable component + unsafe { self.cell.get_mut_assume_mutable() } } /// Retrieves the change ticks for the given component. This can be useful for implementing change @@ -667,10 +697,7 @@ impl<'w> EntityMut<'w> { self, component_ids: F, ) -> Result, EntityComponentError> { - // SAFETY: - // - We have read-only access to all components of this entity. - // - consuming `self` ensures that no references exist to this entity's components. - unsafe { component_ids.fetch_ref(self.0) } + self.into_readonly().get_by_id(component_ids) } /// Returns [untyped mutable reference(s)](MutUntyped) to component(s) for @@ -765,7 +792,7 @@ impl<'w> EntityMut<'w> { /// ## [`HashSet`] of [`ComponentId`]s /// /// ``` - /// # use bevy_utils::HashSet; + /// # use bevy_platform::collections::HashSet; /// # use bevy_ecs::{prelude::*, component::ComponentId}; /// # /// # #[derive(Component, PartialEq, Debug)] @@ -794,7 +821,40 @@ impl<'w> EntityMut<'w> { // SAFETY: // - `&mut self` ensures that no references exist to this entity's components. // - We have exclusive access to all components of this entity. - unsafe { component_ids.fetch_mut(self.0) } + unsafe { component_ids.fetch_mut(self.cell) } + } + + /// Returns [untyped mutable reference(s)](MutUntyped) to component(s) for + /// the current entity, based on the given [`ComponentId`]s. + /// Assumes the given [`ComponentId`]s refer to mutable components. + /// + /// **You should prefer to use the typed API [`EntityMut::get_mut_assume_mutable`] where + /// possible and only use this in cases where the actual component types + /// are not known at compile time.** + /// + /// Unlike [`EntityMut::get_mut_assume_mutable`], this returns untyped reference(s) to + /// component(s), and it's the job of the caller to ensure the correct + /// type(s) are dereferenced (if necessary). + /// + /// # Errors + /// + /// - Returns [`EntityComponentError::MissingComponent`] if the entity does + /// not have a component. + /// - Returns [`EntityComponentError::AliasedMutability`] if a component + /// is requested multiple times. + /// + /// # Safety + /// It is the callers responsibility to ensure that + /// - the provided [`ComponentId`]s must refer to mutable components. + #[inline] + pub unsafe fn get_mut_assume_mutable_by_id( + &mut self, + component_ids: F, + ) -> Result, EntityComponentError> { + // SAFETY: + // - `&mut self` ensures that no references exist to this entity's components. + // - We have exclusive access to all components of this entity. + unsafe { component_ids.fetch_mut_assume_mutable(self.cell) } } /// Returns [untyped mutable reference](MutUntyped) to component for @@ -822,7 +882,37 @@ impl<'w> EntityMut<'w> { // SAFETY: // - The caller must ensure simultaneous access is limited // - to components that are mutually independent. - unsafe { component_ids.fetch_mut(self.0) } + unsafe { component_ids.fetch_mut(self.cell) } + } + + /// Returns [untyped mutable reference](MutUntyped) to component for + /// the current entity, based on the given [`ComponentId`]. + /// Assumes the given [`ComponentId`]s refer to mutable components. + /// + /// Unlike [`EntityMut::get_mut_assume_mutable_by_id`], this method borrows &self instead of + /// &mut self, allowing the caller to access multiple components simultaneously. + /// + /// # Errors + /// + /// - Returns [`EntityComponentError::MissingComponent`] if the entity does + /// not have a component. + /// - Returns [`EntityComponentError::AliasedMutability`] if a component + /// is requested multiple times. + /// + /// # Safety + /// It is the callers responsibility to ensure that + /// - the [`UnsafeEntityCell`] has permission to access the component mutably + /// - no other references to the component exist at the same time + /// - the provided [`ComponentId`]s must refer to mutable components. + #[inline] + pub unsafe fn get_mut_assume_mutable_by_id_unchecked( + &self, + component_ids: F, + ) -> Result, EntityComponentError> { + // SAFETY: + // - The caller must ensure simultaneous access is limited + // - to components that are mutually independent. + unsafe { component_ids.fetch_mut_assume_mutable(self.cell) } } /// Consumes `self` and returns [untyped mutable reference(s)](MutUntyped) @@ -855,47 +945,80 @@ impl<'w> EntityMut<'w> { // SAFETY: // - consuming `self` ensures that no references exist to this entity's components. // - We have exclusive access to all components of this entity. - unsafe { component_ids.fetch_mut(self.0) } + unsafe { component_ids.fetch_mut(self.cell) } + } + + /// Consumes `self` and returns [untyped mutable reference(s)](MutUntyped) + /// to component(s) with lifetime `'w` for the current entity, based on the + /// given [`ComponentId`]s. + /// Assumes the given [`ComponentId`]s refer to mutable components. + /// + /// **You should prefer to use the typed API [`EntityMut::into_mut_assume_mutable`] where + /// possible and only use this in cases where the actual component types + /// are not known at compile time.** + /// + /// Unlike [`EntityMut::into_mut_assume_mutable`], this returns untyped reference(s) to + /// component(s), and it's the job of the caller to ensure the correct + /// type(s) are dereferenced (if necessary). + /// + /// # Errors + /// + /// - Returns [`EntityComponentError::MissingComponent`] if the entity does + /// not have a component. + /// - Returns [`EntityComponentError::AliasedMutability`] if a component + /// is requested multiple times. + /// + /// # Safety + /// It is the callers responsibility to ensure that + /// - the provided [`ComponentId`]s must refer to mutable components. + #[inline] + pub unsafe fn into_mut_assume_mutable_by_id( + self, + component_ids: F, + ) -> Result, EntityComponentError> { + // SAFETY: + // - consuming `self` ensures that no references exist to this entity's components. + // - We have exclusive access to all components of this entity. + unsafe { component_ids.fetch_mut_assume_mutable(self.cell) } } /// Returns the source code location from which this entity has been spawned. - #[cfg(feature = "track_change_detection")] - pub fn spawned_by(&self) -> &'static Location<'static> { - self.0.spawned_by() + pub fn spawned_by(&self) -> MaybeLocation { + self.cell.spawned_by() } } impl<'w> From<&'w mut EntityMut<'_>> for EntityMut<'w> { - fn from(value: &'w mut EntityMut<'_>) -> Self { - value.reborrow() + fn from(entity: &'w mut EntityMut<'_>) -> Self { + entity.reborrow() } } impl<'w> From> for EntityMut<'w> { - fn from(value: EntityWorldMut<'w>) -> Self { + fn from(entity: EntityWorldMut<'w>) -> Self { // SAFETY: `EntityWorldMut` guarantees exclusive access to the entire world. - unsafe { EntityMut::new(value.into_unsafe_entity_cell()) } + unsafe { EntityMut::new(entity.into_unsafe_entity_cell()) } } } impl<'a> From<&'a mut EntityWorldMut<'_>> for EntityMut<'a> { - fn from(value: &'a mut EntityWorldMut<'_>) -> Self { + fn from(entity: &'a mut EntityWorldMut<'_>) -> Self { // SAFETY: `EntityWorldMut` guarantees exclusive access to the entire world. - unsafe { EntityMut::new(value.as_unsafe_entity_cell()) } + unsafe { EntityMut::new(entity.as_unsafe_entity_cell()) } } } impl<'a> TryFrom> for EntityMut<'a> { type Error = TryFromFilteredError; - fn try_from(value: FilteredEntityMut<'a>) -> Result { - if !value.access.has_read_all() { + fn try_from(entity: FilteredEntityMut<'a>) -> Result { + if !entity.access.has_read_all() { Err(TryFromFilteredError::MissingReadAllAccess) - } else if !value.access.has_write_all() { + } else if !entity.access.has_write_all() { Err(TryFromFilteredError::MissingWriteAllAccess) } else { // SAFETY: check above guarantees exclusive access to all components of the entity. - Ok(unsafe { EntityMut::new(value.entity) }) + Ok(unsafe { EntityMut::new(entity.entity) }) } } } @@ -903,14 +1026,14 @@ impl<'a> TryFrom> for EntityMut<'a> { impl<'a> TryFrom<&'a mut FilteredEntityMut<'_>> for EntityMut<'a> { type Error = TryFromFilteredError; - fn try_from(value: &'a mut FilteredEntityMut<'_>) -> Result { - if !value.access.has_read_all() { + fn try_from(entity: &'a mut FilteredEntityMut<'_>) -> Result { + if !entity.access.has_read_all() { Err(TryFromFilteredError::MissingReadAllAccess) - } else if !value.access.has_write_all() { + } else if !entity.access.has_write_all() { Err(TryFromFilteredError::MissingWriteAllAccess) } else { // SAFETY: check above guarantees exclusive access to all components of the entity. - Ok(unsafe { EntityMut::new(value.entity) }) + Ok(unsafe { EntityMut::new(entity.entity) }) } } } @@ -923,12 +1046,11 @@ impl PartialEq for EntityMut<'_> { impl Eq for EntityMut<'_> {} -#[expect(clippy::non_canonical_partial_ord_impl)] impl PartialOrd for EntityMut<'_> { /// [`EntityMut`]'s comparison trait implementations match the underlying [`Entity`], /// and cannot discern between different worlds. fn partial_cmp(&self, other: &Self) -> Option { - self.entity().partial_cmp(&other.entity()) + Some(self.cmp(other)) } } @@ -944,14 +1066,14 @@ impl Hash for EntityMut<'_> { } } -impl EntityBorrow for EntityMut<'_> { +impl ContainsEntity for EntityMut<'_> { fn entity(&self) -> Entity { self.id() } } // SAFETY: This type represents one Entity. We implement the comparison traits based on that Entity. -unsafe impl TrustedEntityBorrow for EntityMut<'_> {} +unsafe impl EntityEquivalent for EntityMut<'_> {} /// A mutable reference to a particular [`Entity`], and the entire world. /// @@ -980,13 +1102,13 @@ impl<'w> EntityWorldMut<'w> { self.entity, self.world .entities() - .entity_does_not_exist_error_details_message(self.entity) + .entity_does_not_exist_error_details(self.entity) ); } #[inline(always)] #[track_caller] - fn assert_not_despawned(&self) { + pub(crate) fn assert_not_despawned(&self) { if self.location.archetype_id == ArchetypeId::INVALID { self.panic_despawned(); } @@ -1039,6 +1161,28 @@ impl<'w> EntityWorldMut<'w> { } } + /// Consumes `self` and returns read-only access to all of the entity's + /// components, with the world `'w` lifetime. + pub fn into_readonly(self) -> EntityRef<'w> { + EntityRef::from(self) + } + + /// Gets read-only access to all of the entity's components. + pub fn as_readonly(&self) -> EntityRef<'_> { + EntityRef::from(self) + } + + /// Consumes `self` and returns non-structural mutable access to all of the + /// entity's components, with the world `'w` lifetime. + pub fn into_mutable(self) -> EntityMut<'w> { + EntityMut::from(self) + } + + /// Gets non-structural mutable access to all of the entity's components. + pub fn as_mutable(&mut self) -> EntityMut<'_> { + EntityMut::from(self) + } + /// Returns the [ID](Entity) of the current entity. #[inline] #[must_use = "Omit the .id() call if you do not need to store the `Entity` identifier."] @@ -1091,7 +1235,7 @@ impl<'w> EntityWorldMut<'w> { /// /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. /// - If you know the component's [`TypeId`] but not its [`ComponentId`], consider using - /// [`Self::contains_type_id`]. + /// [`Self::contains_type_id`]. /// /// # Panics /// @@ -1127,7 +1271,7 @@ impl<'w> EntityWorldMut<'w> { /// If the entity has been despawned while this `EntityWorldMut` is still alive. #[inline] pub fn get(&self) -> Option<&'_ T> { - EntityRef::from(self).get() + self.as_readonly().get() } /// Returns read-only components for the current entity that match the query `Q`. @@ -1138,7 +1282,7 @@ impl<'w> EntityWorldMut<'w> { /// has been despawned while this `EntityWorldMut` is still alive. #[inline] pub fn components(&self) -> Q::Item<'_> { - EntityRef::from(self).components::() + self.as_readonly().components::() } /// Returns read-only components for the current entity that match the query `Q`, @@ -1149,7 +1293,7 @@ impl<'w> EntityWorldMut<'w> { /// If the entity has been despawned while this `EntityWorldMut` is still alive. #[inline] pub fn get_components(&self) -> Option> { - EntityRef::from(self).get_components::() + self.as_readonly().get_components::() } /// Consumes `self` and gets access to the component of type `T` with @@ -1161,8 +1305,7 @@ impl<'w> EntityWorldMut<'w> { /// If the entity has been despawned while this `EntityWorldMut` is still alive. #[inline] pub fn into_borrow(self) -> Option<&'w T> { - // SAFETY: consuming `self` implies exclusive access - unsafe { self.into_unsafe_entity_cell().get() } + self.into_readonly().get() } /// Gets access to the component of type `T` for the current entity, @@ -1175,7 +1318,7 @@ impl<'w> EntityWorldMut<'w> { /// If the entity has been despawned while this `EntityWorldMut` is still alive. #[inline] pub fn get_ref(&self) -> Option> { - EntityRef::from(self).get_ref() + self.as_readonly().get_ref() } /// Consumes `self` and gets access to the component of type `T` @@ -1189,7 +1332,7 @@ impl<'w> EntityWorldMut<'w> { /// If the entity has been despawned while this `EntityWorldMut` is still alive. #[inline] pub fn into_ref(self) -> Option> { - EntityRef::from(self).get_ref() + self.into_readonly().get_ref() } /// Gets mutable access to the component of type `T` for the current entity. @@ -1200,8 +1343,92 @@ impl<'w> EntityWorldMut<'w> { /// If the entity has been despawned while this `EntityWorldMut` is still alive. #[inline] pub fn get_mut>(&mut self) -> Option> { - // SAFETY: trait bound `Mutability = Mutable` ensures `T` is mutable - unsafe { self.get_mut_assume_mutable() } + self.as_mutable().into_mut() + } + + /// Temporarily removes a [`Component`] `T` from this [`Entity`] and runs the + /// provided closure on it, returning the result if `T` was available. + /// This will trigger the `OnRemove` and `OnReplace` component hooks without + /// causing an archetype move. + /// + /// This is most useful with immutable components, where removal and reinsertion + /// is the only way to modify a value. + /// + /// If you do not need to ensure the above hooks are triggered, and your component + /// is mutable, prefer using [`get_mut`](EntityWorldMut::get_mut). + /// + /// # Examples + /// + /// ```rust + /// # use bevy_ecs::prelude::*; + /// # + /// #[derive(Component, PartialEq, Eq, Debug)] + /// #[component(immutable)] + /// struct Foo(bool); + /// + /// # let mut world = World::default(); + /// # world.register_component::(); + /// # + /// # let entity = world.spawn(Foo(false)).id(); + /// # + /// # let mut entity = world.entity_mut(entity); + /// # + /// # assert_eq!(entity.get::(), Some(&Foo(false))); + /// # + /// entity.modify_component(|foo: &mut Foo| { + /// foo.0 = true; + /// }); + /// # + /// # assert_eq!(entity.get::(), Some(&Foo(true))); + /// ``` + /// + /// # Panics + /// + /// If the entity has been despawned while this `EntityWorldMut` is still alive. + #[inline] + pub fn modify_component(&mut self, f: impl FnOnce(&mut T) -> R) -> Option { + self.assert_not_despawned(); + + let result = self + .world + .modify_component(self.entity, f) + .expect("entity access must be valid")?; + + self.update_location(); + + Some(result) + } + + /// Temporarily removes a [`Component`] `T` from this [`Entity`] and runs the + /// provided closure on it, returning the result if `T` was available. + /// This will trigger the `OnRemove` and `OnReplace` component hooks without + /// causing an archetype move. + /// + /// This is most useful with immutable components, where removal and reinsertion + /// is the only way to modify a value. + /// + /// If you do not need to ensure the above hooks are triggered, and your component + /// is mutable, prefer using [`get_mut`](EntityWorldMut::get_mut). + /// + /// # Panics + /// + /// If the entity has been despawned while this `EntityWorldMut` is still alive. + #[inline] + pub fn modify_component_by_id( + &mut self, + component_id: ComponentId, + f: impl for<'a> FnOnce(MutUntyped<'a>) -> R, + ) -> Option { + self.assert_not_despawned(); + + let result = self + .world + .modify_component_by_id(self.entity, component_id, f) + .expect("entity access must be valid")?; + + self.update_location(); + + Some(result) } /// Gets mutable access to the component of type `T` for the current entity. @@ -1212,10 +1439,7 @@ impl<'w> EntityWorldMut<'w> { /// - `T` must be a mutable component #[inline] pub unsafe fn get_mut_assume_mutable(&mut self) -> Option> { - // SAFETY: - // - &mut self implies exclusive access for duration of returned value - // - caller ensures T is mutable - unsafe { self.as_unsafe_entity_cell().get_mut_assume_mutable() } + self.as_mutable().into_mut_assume_mutable() } /// Consumes `self` and gets mutable access to the component of type `T` @@ -1231,6 +1455,62 @@ impl<'w> EntityWorldMut<'w> { unsafe { self.into_unsafe_entity_cell().get_mut() } } + /// Consumes `self` and gets mutable access to the component of type `T` + /// with the world `'w` lifetime for the current entity. + /// Returns `None` if the entity does not have a component of type `T`. + /// + /// # Panics + /// + /// If the entity has been despawned while this `EntityWorldMut` is still alive. + /// + /// # Safety + /// + /// - `T` must be a mutable component + #[inline] + pub unsafe fn into_mut_assume_mutable(self) -> Option> { + // SAFETY: consuming `self` implies exclusive access + unsafe { self.into_unsafe_entity_cell().get_mut_assume_mutable() } + } + + /// Gets a reference to the resource of the given type + /// + /// # Panics + /// + /// Panics if the resource does not exist. + /// Use [`get_resource`](EntityWorldMut::get_resource) instead if you want to handle this case. + #[inline] + #[track_caller] + pub fn resource(&self) -> &R { + self.world.resource::() + } + + /// Gets a mutable reference to the resource of the given type + /// + /// # Panics + /// + /// Panics if the resource does not exist. + /// Use [`get_resource_mut`](World::get_resource_mut) instead if you want to handle this case. + /// + /// If you want to instead insert a value if the resource does not exist, + /// use [`get_resource_or_insert_with`](World::get_resource_or_insert_with). + #[inline] + #[track_caller] + pub fn resource_mut(&mut self) -> Mut<'_, R> { + self.world.resource_mut::() + } + + /// Gets a reference to the resource of the given type if it exists + #[inline] + pub fn get_resource(&self) -> Option<&R> { + self.world.get_resource() + } + + /// Gets a mutable reference to the resource of the given type if it exists + #[inline] + pub fn get_resource_mut(&mut self) -> Option> { + self.world.get_resource_mut() + } + /// Retrieves the change ticks for the given component. This can be useful for implementing change /// detection in custom runtimes. /// @@ -1239,7 +1519,7 @@ impl<'w> EntityWorldMut<'w> { /// If the entity has been despawned while this `EntityWorldMut` is still alive. #[inline] pub fn get_change_ticks(&self) -> Option { - EntityRef::from(self).get_change_ticks::() + self.as_readonly().get_change_ticks::() } /// Retrieves the change ticks for the given [`ComponentId`]. This can be useful for implementing change @@ -1254,7 +1534,7 @@ impl<'w> EntityWorldMut<'w> { /// If the entity has been despawned while this `EntityWorldMut` is still alive. #[inline] pub fn get_change_ticks_by_id(&self, component_id: ComponentId) -> Option { - EntityRef::from(self).get_change_ticks_by_id(component_id) + self.as_readonly().get_change_ticks_by_id(component_id) } /// Returns [untyped read-only reference(s)](Ptr) to component(s) for the @@ -1285,7 +1565,7 @@ impl<'w> EntityWorldMut<'w> { &self, component_ids: F, ) -> Result, EntityComponentError> { - EntityRef::from(self).get_by_id(component_ids) + self.as_readonly().get_by_id(component_ids) } /// Consumes `self` and returns [untyped read-only reference(s)](Ptr) to @@ -1317,10 +1597,7 @@ impl<'w> EntityWorldMut<'w> { self, component_ids: F, ) -> Result, EntityComponentError> { - // SAFETY: - // - We have read-only access to all components of this entity. - // - consuming `self` ensures that no references exist to this entity's components. - unsafe { component_ids.fetch_ref(self.into_unsafe_entity_cell()) } + self.into_readonly().get_by_id(component_ids) } /// Returns [untyped mutable reference(s)](MutUntyped) to component(s) for @@ -1353,10 +1630,42 @@ impl<'w> EntityWorldMut<'w> { &mut self, component_ids: F, ) -> Result, EntityComponentError> { - // SAFETY: - // - `&mut self` ensures that no references exist to this entity's components. - // - We have exclusive access to all components of this entity. - unsafe { component_ids.fetch_mut(self.as_unsafe_entity_cell()) } + self.as_mutable().into_mut_by_id(component_ids) + } + + /// Returns [untyped mutable reference(s)](MutUntyped) to component(s) for + /// the current entity, based on the given [`ComponentId`]s. + /// Assumes the given [`ComponentId`]s refer to mutable components. + /// + /// **You should prefer to use the typed API [`EntityWorldMut::get_mut_assume_mutable`] where + /// possible and only use this in cases where the actual component types + /// are not known at compile time.** + /// + /// Unlike [`EntityWorldMut::get_mut_assume_mutable`], this returns untyped reference(s) to + /// component(s), and it's the job of the caller to ensure the correct + /// type(s) are dereferenced (if necessary). + /// + /// # Errors + /// + /// - Returns [`EntityComponentError::MissingComponent`] if the entity does + /// not have a component. + /// - Returns [`EntityComponentError::AliasedMutability`] if a component + /// is requested multiple times. + /// + /// # Panics + /// + /// If the entity has been despawned while this `EntityWorldMut` is still alive. + /// + /// # Safety + /// It is the callers responsibility to ensure that + /// - the provided [`ComponentId`]s must refer to mutable components. + #[inline] + pub unsafe fn get_mut_assume_mutable_by_id( + &mut self, + component_ids: F, + ) -> Result, EntityComponentError> { + self.as_mutable() + .into_mut_assume_mutable_by_id(component_ids) } /// Consumes `self` and returns [untyped mutable reference(s)](MutUntyped) @@ -1390,10 +1699,43 @@ impl<'w> EntityWorldMut<'w> { self, component_ids: F, ) -> Result, EntityComponentError> { - // SAFETY: - // - consuming `self` ensures that no references exist to this entity's components. - // - We have exclusive access to all components of this entity. - unsafe { component_ids.fetch_mut(self.into_unsafe_entity_cell()) } + self.into_mutable().into_mut_by_id(component_ids) + } + + /// Consumes `self` and returns [untyped mutable reference(s)](MutUntyped) + /// to component(s) with lifetime `'w` for the current entity, based on the + /// given [`ComponentId`]s. + /// Assumes the given [`ComponentId`]s refer to mutable components. + /// + /// **You should prefer to use the typed API [`EntityWorldMut::into_mut_assume_mutable`] where + /// possible and only use this in cases where the actual component types + /// are not known at compile time.** + /// + /// Unlike [`EntityWorldMut::into_mut_assume_mutable`], this returns untyped reference(s) to + /// component(s), and it's the job of the caller to ensure the correct + /// type(s) are dereferenced (if necessary). + /// + /// # Errors + /// + /// - Returns [`EntityComponentError::MissingComponent`] if the entity does + /// not have a component. + /// - Returns [`EntityComponentError::AliasedMutability`] if a component + /// is requested multiple times. + /// + /// # Panics + /// + /// If the entity has been despawned while this `EntityWorldMut` is still alive. + /// + /// # Safety + /// It is the callers responsibility to ensure that + /// - the provided [`ComponentId`]s must refer to mutable components. + #[inline] + pub unsafe fn into_mut_assume_mutable_by_id( + self, + component_ids: F, + ) -> Result, EntityComponentError> { + self.into_mutable() + .into_mut_assume_mutable_by_id(component_ids) } /// Adds a [`Bundle`] of components to the entity. @@ -1408,8 +1750,36 @@ impl<'w> EntityWorldMut<'w> { self.insert_with_caller( bundle, InsertMode::Replace, - #[cfg(feature = "track_change_detection")] - Location::caller(), + MaybeLocation::caller(), + RelationshipHookMode::Run, + ) + } + + /// Adds a [`Bundle`] of components to the entity. + /// [`Relationship`](crate::relationship::Relationship) components in the bundle will follow the configuration + /// in `relationship_hook_mode`. + /// + /// This will overwrite any previous value(s) of the same component type. + /// + /// # Warning + /// + /// This can easily break the integrity of relationships. This is intended to be used for cloning and spawning code internals, + /// not most user-facing scenarios. + /// + /// # Panics + /// + /// If the entity has been despawned while this `EntityWorldMut` is still alive. + #[track_caller] + pub fn insert_with_relationship_hook_mode( + &mut self, + bundle: T, + relationship_hook_mode: RelationshipHookMode, + ) -> &mut Self { + self.insert_with_caller( + bundle, + InsertMode::Replace, + MaybeLocation::caller(), + relationship_hook_mode, ) } @@ -1426,8 +1796,8 @@ impl<'w> EntityWorldMut<'w> { self.insert_with_caller( bundle, InsertMode::Keep, - #[cfg(feature = "track_change_detection")] - Location::caller(), + MaybeLocation::caller(), + RelationshipHookMode::Run, ) } @@ -1438,19 +1808,28 @@ impl<'w> EntityWorldMut<'w> { &mut self, bundle: T, mode: InsertMode, - #[cfg(feature = "track_change_detection")] caller: &'static Location, + caller: MaybeLocation, + relationship_hook_mode: RelationshipHookMode, ) -> &mut Self { self.assert_not_despawned(); let change_tick = self.world.change_tick(); let mut bundle_inserter = BundleInserter::new::(self.world, self.location.archetype_id, change_tick); - self.location = - // SAFETY: location matches current entity. `T` matches `bundle_info` - unsafe { - bundle_inserter.insert(self.entity, self.location, bundle, mode, #[cfg(feature = "track_change_detection")] caller) - }; + // SAFETY: location matches current entity. `T` matches `bundle_info` + let (location, after_effect) = unsafe { + bundle_inserter.insert( + self.entity, + self.location, + bundle, + mode, + caller, + relationship_hook_mode, + ) + }; + self.location = location; self.world.flush(); self.update_location(); + after_effect.apply(self); self } @@ -1473,13 +1852,36 @@ impl<'w> EntityWorldMut<'w> { &mut self, component_id: ComponentId, component: OwningPtr<'_>, + ) -> &mut Self { + self.insert_by_id_with_caller( + component_id, + component, + InsertMode::Replace, + MaybeLocation::caller(), + RelationshipHookMode::Run, + ) + } + + /// # Safety + /// + /// - [`ComponentId`] must be from the same world as [`EntityWorldMut`] + /// - [`OwningPtr`] must be a valid reference to the type represented by [`ComponentId`] + #[inline] + pub(crate) unsafe fn insert_by_id_with_caller( + &mut self, + component_id: ComponentId, + component: OwningPtr<'_>, + mode: InsertMode, + caller: MaybeLocation, + relationship_hook_insert_mode: RelationshipHookMode, ) -> &mut Self { self.assert_not_despawned(); let change_tick = self.world.change_tick(); - let bundle_id = self - .world - .bundles - .init_component_info(&self.world.components, component_id); + let bundle_id = self.world.bundles.init_component_info( + &mut self.world.storages, + &self.world.components, + component_id, + ); let storage_type = self.world.bundles.get_storage_unchecked(bundle_id); let bundle_inserter = BundleInserter::new_with_id( @@ -1495,6 +1897,9 @@ impl<'w> EntityWorldMut<'w> { self.location, Some(component).into_iter(), Some(storage_type).iter().cloned(), + mode, + caller, + relationship_hook_insert_mode, ); self.world.flush(); self.update_location(); @@ -1522,13 +1927,24 @@ impl<'w> EntityWorldMut<'w> { &mut self, component_ids: &[ComponentId], iter_components: I, + ) -> &mut Self { + self.insert_by_ids_internal(component_ids, iter_components, RelationshipHookMode::Run) + } + + #[track_caller] + pub(crate) unsafe fn insert_by_ids_internal<'a, I: Iterator>>( + &mut self, + component_ids: &[ComponentId], + iter_components: I, + relationship_hook_insert_mode: RelationshipHookMode, ) -> &mut Self { self.assert_not_despawned(); let change_tick = self.world.change_tick(); - let bundle_id = self - .world - .bundles - .init_dynamic_info(&self.world.components, component_ids); + let bundle_id = self.world.bundles.init_dynamic_info( + &mut self.world.storages, + &self.world.components, + component_ids, + ); let mut storage_types = core::mem::take(self.world.bundles.get_storages_unchecked(bundle_id)); let bundle_inserter = BundleInserter::new_with_id( @@ -1544,6 +1960,9 @@ impl<'w> EntityWorldMut<'w> { self.location, iter_components, (*storage_types).iter().cloned(), + InsertMode::Replace, + MaybeLocation::caller(), + relationship_hook_insert_mode, ); *self.world.bundles.get_storages_unchecked(bundle_id) = core::mem::take(&mut storage_types); self.world.flush(); @@ -1561,12 +1980,15 @@ impl<'w> EntityWorldMut<'w> { /// If the entity has been despawned while this `EntityWorldMut` is still alive. // TODO: BundleRemover? #[must_use] - pub fn take(&mut self) -> Option { + #[track_caller] + pub fn take(&mut self) -> Option { self.assert_not_despawned(); let world = &mut self.world; let storages = &mut world.storages; - let components = &mut world.components; - let bundle_id = world.bundles.register_info::(components, storages); + // SAFETY: These come from the same world. + let mut registrator = + unsafe { ComponentsRegistrator::new(&mut world.components, &mut world.component_ids) }; + let bundle_id = world.bundles.register_info::(&mut registrator, storages); // SAFETY: We just ensured this bundle exists let bundle_info = unsafe { world.bundles.get_unchecked(bundle_id) }; let old_location = self.location; @@ -1576,7 +1998,7 @@ impl<'w> EntityWorldMut<'w> { bundle_info.remove_bundle_from_archetype( &mut world.archetypes, storages, - components, + ®istrator, &world.observers, old_location.archetype_id, false, @@ -1606,6 +2028,7 @@ impl<'w> EntityWorldMut<'w> { old_archetype, entity, bundle_info, + MaybeLocation::caller(), ); } @@ -1626,6 +2049,7 @@ impl<'w> EntityWorldMut<'w> { // - entity location is valid // - table row is removed below, without dropping the contents // - `components` comes from the same world as `storages` + // - the component exists on the entity take_component( storages, components, @@ -1637,7 +2061,10 @@ impl<'w> EntityWorldMut<'w> { }) }; - #[allow(clippy::undocumented_unsafe_blocks)] // TODO: document why this is safe + #[expect( + clippy::undocumented_unsafe_blocks, + reason = "Needs to be documented; see #17345." + )] unsafe { Self::move_entity_from_remove::( entity, @@ -1664,7 +2091,6 @@ impl<'w> EntityWorldMut<'w> { /// when DROP is true removed components will be dropped otherwise they will be forgotten // We use a const generic here so that we are less reliant on // inlining for rustc to optimize out the `match DROP` - #[allow(clippy::too_many_arguments)] unsafe fn move_entity_from_remove( entity: Entity, self_location: &mut EntityLocation, @@ -1744,8 +2170,7 @@ impl<'w> EntityWorldMut<'w> { /// /// # Safety /// - A `BundleInfo` with the corresponding `BundleId` must have been initialized. - #[allow(clippy::too_many_arguments)] - unsafe fn remove_bundle(&mut self, bundle: BundleId) -> EntityLocation { + unsafe fn remove_bundle(&mut self, bundle: BundleId, caller: MaybeLocation) -> EntityLocation { let entity = self.entity; let world = &mut self.world; let location = self.location; @@ -1788,6 +2213,7 @@ impl<'w> EntityWorldMut<'w> { old_archetype, entity, bundle_info, + caller, ); } @@ -1803,6 +2229,7 @@ impl<'w> EntityWorldMut<'w> { .storages .sparse_sets .get_mut(component_id) + // Set exists because the component existed on the entity .unwrap() .remove(entity); } @@ -1834,14 +2261,26 @@ impl<'w> EntityWorldMut<'w> { /// /// If the entity has been despawned while this `EntityWorldMut` is still alive. // TODO: BundleRemover? + #[track_caller] pub fn remove(&mut self) -> &mut Self { + self.remove_with_caller::(MaybeLocation::caller()) + } + + #[inline] + pub(crate) fn remove_with_caller(&mut self, caller: MaybeLocation) -> &mut Self { self.assert_not_despawned(); let storages = &mut self.world.storages; - let components = &mut self.world.components; - let bundle_info = self.world.bundles.register_info::(components, storages); + // SAFETY: These come from the same world. + let mut registrator = unsafe { + ComponentsRegistrator::new(&mut self.world.components, &mut self.world.component_ids) + }; + let bundle_info = self + .world + .bundles + .register_info::(&mut registrator, storages); // SAFETY: the `BundleInfo` is initialized above - self.location = unsafe { self.remove_bundle(bundle_info) }; + self.location = unsafe { self.remove_bundle(bundle_info, caller) }; self.world.flush(); self.update_location(); self @@ -1852,16 +2291,27 @@ impl<'w> EntityWorldMut<'w> { /// # Panics /// /// If the entity has been despawned while this `EntityWorldMut` is still alive. + #[track_caller] pub fn remove_with_requires(&mut self) -> &mut Self { + self.remove_with_requires_with_caller::(MaybeLocation::caller()) + } + + pub(crate) fn remove_with_requires_with_caller( + &mut self, + caller: MaybeLocation, + ) -> &mut Self { self.assert_not_despawned(); let storages = &mut self.world.storages; - let components = &mut self.world.components; + // SAFETY: These come from the same world. + let mut registrator = unsafe { + ComponentsRegistrator::new(&mut self.world.components, &mut self.world.component_ids) + }; let bundles = &mut self.world.bundles; - let bundle_id = bundles.register_contributed_bundle_info::(components, storages); + let bundle_id = bundles.register_contributed_bundle_info::(&mut registrator, storages); // SAFETY: the dynamic `BundleInfo` is initialized above - self.location = unsafe { self.remove_bundle(bundle_id) }; + self.location = unsafe { self.remove_bundle(bundle_id, caller) }; self.world.flush(); self.update_location(); self @@ -1874,13 +2324,25 @@ impl<'w> EntityWorldMut<'w> { /// # Panics /// /// If the entity has been despawned while this `EntityWorldMut` is still alive. + #[track_caller] pub fn retain(&mut self) -> &mut Self { + self.retain_with_caller::(MaybeLocation::caller()) + } + + #[inline] + pub(crate) fn retain_with_caller(&mut self, caller: MaybeLocation) -> &mut Self { self.assert_not_despawned(); let archetypes = &mut self.world.archetypes; let storages = &mut self.world.storages; - let components = &mut self.world.components; + // SAFETY: These come from the same world. + let mut registrator = unsafe { + ComponentsRegistrator::new(&mut self.world.components, &mut self.world.component_ids) + }; - let retained_bundle = self.world.bundles.register_info::(components, storages); + let retained_bundle = self + .world + .bundles + .register_info::(&mut registrator, storages); // SAFETY: `retained_bundle` exists as we just initialized it. let retained_bundle_info = unsafe { self.world.bundles.get_unchecked(retained_bundle) }; let old_location = self.location; @@ -1891,10 +2353,13 @@ impl<'w> EntityWorldMut<'w> { .components() .filter(|c| !retained_bundle_info.contributed_components().contains(c)) .collect::>(); - let remove_bundle = self.world.bundles.init_dynamic_info(components, to_remove); + let remove_bundle = + self.world + .bundles + .init_dynamic_info(&mut self.world.storages, ®istrator, to_remove); // SAFETY: the `BundleInfo` for the components to remove is initialized above - self.location = unsafe { self.remove_bundle(remove_bundle) }; + self.location = unsafe { self.remove_bundle(remove_bundle, caller) }; self.world.flush(); self.update_location(); self @@ -1908,17 +2373,28 @@ impl<'w> EntityWorldMut<'w> { /// /// Panics if the provided [`ComponentId`] does not exist in the [`World`] or if the /// entity has been despawned while this `EntityWorldMut` is still alive. + #[track_caller] pub fn remove_by_id(&mut self, component_id: ComponentId) -> &mut Self { + self.remove_by_id_with_caller(component_id, MaybeLocation::caller()) + } + + #[inline] + pub(crate) fn remove_by_id_with_caller( + &mut self, + component_id: ComponentId, + caller: MaybeLocation, + ) -> &mut Self { self.assert_not_despawned(); let components = &mut self.world.components; - let bundle_id = self - .world - .bundles - .init_component_info(components, component_id); + let bundle_id = self.world.bundles.init_component_info( + &mut self.world.storages, + components, + component_id, + ); // SAFETY: the `BundleInfo` for this `component_id` is initialized above - self.location = unsafe { self.remove_bundle(bundle_id) }; + self.location = unsafe { self.remove_bundle(bundle_id, caller) }; self.world.flush(); self.update_location(); self @@ -1932,17 +2408,19 @@ impl<'w> EntityWorldMut<'w> { /// /// Panics if any of the provided [`ComponentId`]s do not exist in the [`World`] or if the /// entity has been despawned while this `EntityWorldMut` is still alive. + #[track_caller] pub fn remove_by_ids(&mut self, component_ids: &[ComponentId]) -> &mut Self { self.assert_not_despawned(); let components = &mut self.world.components; - let bundle_id = self - .world - .bundles - .init_dynamic_info(components, component_ids); + let bundle_id = self.world.bundles.init_dynamic_info( + &mut self.world.storages, + components, + component_ids, + ); // SAFETY: the `BundleInfo` for this `bundle_id` is initialized above - unsafe { self.remove_bundle(bundle_id) }; + unsafe { self.remove_bundle(bundle_id, MaybeLocation::caller()) }; self.world.flush(); self.update_location(); @@ -1954,18 +2432,25 @@ impl<'w> EntityWorldMut<'w> { /// # Panics /// /// If the entity has been despawned while this `EntityWorldMut` is still alive. + #[track_caller] pub fn clear(&mut self) -> &mut Self { + self.clear_with_caller(MaybeLocation::caller()) + } + + #[inline] + pub(crate) fn clear_with_caller(&mut self, caller: MaybeLocation) -> &mut Self { self.assert_not_despawned(); let component_ids: Vec = self.archetype().components().collect(); let components = &mut self.world.components; - let bundle_id = self - .world - .bundles - .init_dynamic_info(components, component_ids.as_slice()); + let bundle_id = self.world.bundles.init_dynamic_info( + &mut self.world.storages, + components, + component_ids.as_slice(), + ); // SAFETY: the `BundleInfo` for this `component_id` is initialized above - self.location = unsafe { self.remove_bundle(bundle_id) }; + self.location = unsafe { self.remove_bundle(bundle_id, caller) }; self.world.flush(); self.update_location(); self @@ -1975,21 +2460,29 @@ impl<'w> EntityWorldMut<'w> { /// /// See [`World::despawn`] for more details. /// + /// # Note + /// + /// This will also despawn any [`Children`](crate::hierarchy::Children) entities, and any other [`RelationshipTarget`](crate::relationship::RelationshipTarget) that is configured + /// to despawn descendants. This results in "recursive despawn" behavior. + /// /// # Panics /// /// If the entity has been despawned while this `EntityWorldMut` is still alive. #[track_caller] pub fn despawn(self) { - self.despawn_with_caller( - #[cfg(feature = "track_change_detection")] - Location::caller(), - ); + self.despawn_with_caller(MaybeLocation::caller()); } - pub(crate) fn despawn_with_caller( - self, - #[cfg(feature = "track_change_detection")] caller: &'static Location, - ) { + /// Despawns the provided entity and its descendants. + #[deprecated( + since = "0.16.0", + note = "Use entity.despawn(), which now automatically despawns recursively." + )] + pub fn despawn_recursive(self) { + self.despawn(); + } + + pub(crate) fn despawn_with_caller(self, caller: MaybeLocation) { self.assert_not_despawned(); let world = self.world; let archetype = &world.archetypes[self.location.archetype_id]; @@ -2003,14 +2496,49 @@ impl<'w> EntityWorldMut<'w> { // SAFETY: All components in the archetype exist in world unsafe { + if archetype.has_despawn_observer() { + deferred_world.trigger_observers( + ON_DESPAWN, + self.entity, + archetype.components(), + caller, + ); + } + deferred_world.trigger_on_despawn( + archetype, + self.entity, + archetype.components(), + caller, + ); if archetype.has_replace_observer() { - deferred_world.trigger_observers(ON_REPLACE, self.entity, archetype.components()); + deferred_world.trigger_observers( + ON_REPLACE, + self.entity, + archetype.components(), + caller, + ); } - deferred_world.trigger_on_replace(archetype, self.entity, archetype.components()); + deferred_world.trigger_on_replace( + archetype, + self.entity, + archetype.components(), + caller, + RelationshipHookMode::Run, + ); if archetype.has_remove_observer() { - deferred_world.trigger_observers(ON_REMOVE, self.entity, archetype.components()); + deferred_world.trigger_observers( + ON_REMOVE, + self.entity, + archetype.components(), + caller, + ); } - deferred_world.trigger_on_remove(archetype, self.entity, archetype.components()); + deferred_world.trigger_on_remove( + archetype, + self.entity, + archetype.components(), + caller, + ); } for component_id in archetype.components() { @@ -2050,6 +2578,7 @@ impl<'w> EntityWorldMut<'w> { table_row = remove_result.table_row; for component_id in archetype.sparse_set_components() { + // set must have existed for the component to be added. let sparse_set = world.storages.sparse_sets.get_mut(component_id).unwrap(); sparse_set.remove(self.entity); } @@ -2079,14 +2608,11 @@ impl<'w> EntityWorldMut<'w> { } world.flush(); - #[cfg(feature = "track_change_detection")] - { - // SAFETY: No structural changes - unsafe { - world - .entities_mut() - .set_spawned_or_despawned_by(self.entity.index(), caller); - } + // SAFETY: No structural changes + unsafe { + world + .entities_mut() + .set_spawned_or_despawned_by(self.entity.index(), caller); } } @@ -2204,11 +2730,11 @@ impl<'w> EntityWorldMut<'w> { /// let mut entity = world.spawn_empty(); /// entity.entry().or_insert_with(|| Comp(4)); /// # let entity_id = entity.id(); - /// assert_eq!(world.query::<&Comp>().single(&world).0, 4); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 4); /// /// # let mut entity = world.get_entity_mut(entity_id).unwrap(); /// entity.entry::().and_modify(|mut c| c.0 += 1); - /// assert_eq!(world.query::<&Comp>().single(&world).0, 5); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 5); /// ``` /// /// # Panics @@ -2247,25 +2773,34 @@ impl<'w> EntityWorldMut<'w> { /// # Panics /// /// If the entity has been despawned while this `EntityWorldMut` is still alive. + #[track_caller] pub fn observe( &mut self, observer: impl IntoObserverSystem, + ) -> &mut Self { + self.observe_with_caller(observer, MaybeLocation::caller()) + } + + pub(crate) fn observe_with_caller( + &mut self, + observer: impl IntoObserverSystem, + caller: MaybeLocation, ) -> &mut Self { self.assert_not_despawned(); self.world - .spawn(Observer::new(observer).with_entity(self.entity)); + .spawn_with_caller(Observer::new(observer).with_entity(self.entity), caller); self.world.flush(); self.update_location(); self } /// Clones parts of an entity (components, observers, etc.) onto another entity, - /// configured through [`EntityCloneBuilder`]. + /// configured through [`EntityClonerBuilder`]. /// /// By default, the other entity will receive all the components of the original that implement /// [`Clone`] or [`Reflect`](bevy_reflect::Reflect). /// - /// Configure through [`EntityCloneBuilder`] as follows: + /// Configure through [`EntityClonerBuilder`] as follows: /// ``` /// # use bevy_ecs::prelude::*; /// # #[derive(Component, Clone, PartialEq, Debug)] @@ -2282,10 +2817,7 @@ impl<'w> EntityWorldMut<'w> { /// # assert_eq!(world.get::(target), None); /// ``` /// - /// See the following for more options: - /// - [`EntityCloneBuilder`] - /// - [`CloneEntityWithObserversExt`](crate::observer::CloneEntityWithObserversExt) - /// - `CloneEntityHierarchyExt` + /// See [`EntityClonerBuilder`] for more options. /// /// # Panics /// @@ -2294,11 +2826,11 @@ impl<'w> EntityWorldMut<'w> { pub fn clone_with( &mut self, target: Entity, - config: impl FnOnce(&mut EntityCloneBuilder) + Send + Sync + 'static, + config: impl FnOnce(&mut EntityClonerBuilder) + Send + Sync + 'static, ) -> &mut Self { self.assert_not_despawned(); - let mut builder = EntityCloneBuilder::new(self.world); + let mut builder = EntityCloner::build(self.world); config(&mut builder); builder.clone_entity(self.entity, target); @@ -2323,12 +2855,12 @@ impl<'w> EntityWorldMut<'w> { } /// Spawns a clone of this entity and allows configuring cloning behavior - /// using [`EntityCloneBuilder`], returning the [`Entity`] of the clone. + /// using [`EntityClonerBuilder`], returning the [`Entity`] of the clone. /// /// By default, the clone will receive all the components of the original that implement /// [`Clone`] or [`Reflect`](bevy_reflect::Reflect). /// - /// Configure through [`EntityCloneBuilder`] as follows: + /// Configure through [`EntityClonerBuilder`] as follows: /// ``` /// # use bevy_ecs::prelude::*; /// # #[derive(Component, Clone, PartialEq, Debug)] @@ -2344,24 +2876,21 @@ impl<'w> EntityWorldMut<'w> { /// # assert_eq!(world.get::(entity_clone), None); /// ``` /// - /// See the following for more options: - /// - [`EntityCloneBuilder`] - /// - [`CloneEntityWithObserversExt`](crate::observer::CloneEntityWithObserversExt) - /// - `CloneEntityHierarchyExt` + /// See [`EntityClonerBuilder`] for more options. /// /// # Panics /// /// If this entity has been despawned while this `EntityWorldMut` is still alive. pub fn clone_and_spawn_with( &mut self, - config: impl FnOnce(&mut EntityCloneBuilder) + Send + Sync + 'static, + config: impl FnOnce(&mut EntityClonerBuilder) + Send + Sync + 'static, ) -> Entity { self.assert_not_despawned(); let entity_clone = self.world.entities.reserve_entity(); self.world.flush(); - let mut builder = EntityCloneBuilder::new(self.world); + let mut builder = EntityCloner::build(self.world); config(&mut builder); builder.clone_entity(self.entity, entity_clone); @@ -2382,9 +2911,10 @@ impl<'w> EntityWorldMut<'w> { pub fn clone_components(&mut self, target: Entity) -> &mut Self { self.assert_not_despawned(); - let mut builder = EntityCloneBuilder::new(self.world); - builder.deny_all().allow::(); - builder.clone_entity(self.entity, target); + EntityCloner::build(self.world) + .deny_all() + .allow::() + .clone_entity(self.entity, target); self.world.flush(); self.update_location(); @@ -2404,10 +2934,11 @@ impl<'w> EntityWorldMut<'w> { pub fn move_components(&mut self, target: Entity) -> &mut Self { self.assert_not_despawned(); - let mut builder = EntityCloneBuilder::new(self.world); - builder.deny_all().allow::(); - builder.move_components(true); - builder.clone_entity(self.entity, target); + EntityCloner::build(self.world) + .deny_all() + .allow::() + .move_components(true) + .clone_entity(self.entity, target); self.world.flush(); self.update_location(); @@ -2415,12 +2946,11 @@ impl<'w> EntityWorldMut<'w> { } /// Returns the source code location from which this entity has last been spawned. - #[cfg(feature = "track_change_detection")] - pub fn spawned_by(&self) -> &'static Location<'static> { + pub fn spawned_by(&self) -> MaybeLocation { self.world() .entities() .entity_get_spawned_or_despawned_by(self.entity) - .unwrap() + .map(|location| location.unwrap()) } } @@ -2431,23 +2961,39 @@ unsafe fn trigger_on_replace_and_on_remove_hooks_and_observers( archetype: &Archetype, entity: Entity, bundle_info: &BundleInfo, + caller: MaybeLocation, ) { + let bundle_components_in_archetype = || { + bundle_info + .iter_explicit_components() + .filter(|component_id| archetype.contains(*component_id)) + }; if archetype.has_replace_observer() { deferred_world.trigger_observers( ON_REPLACE, entity, - bundle_info.iter_explicit_components(), + bundle_components_in_archetype(), + caller, ); } - deferred_world.trigger_on_replace(archetype, entity, bundle_info.iter_explicit_components()); + deferred_world.trigger_on_replace( + archetype, + entity, + bundle_components_in_archetype(), + caller, + RelationshipHookMode::Run, + ); if archetype.has_remove_observer() { - deferred_world.trigger_observers(ON_REMOVE, entity, bundle_info.iter_explicit_components()); + deferred_world.trigger_observers( + ON_REMOVE, + entity, + bundle_components_in_archetype(), + caller, + ); } - deferred_world.trigger_on_remove(archetype, entity, bundle_info.iter_explicit_components()); + deferred_world.trigger_on_remove(archetype, entity, bundle_components_in_archetype(), caller); } -const QUERY_MISMATCH_ERROR: &str = "Query does not match the current entity"; - /// A view into a single entity and component in a world, which may either be vacant or occupied. /// /// This `enum` can only be constructed from the [`entry`] method on [`EntityWorldMut`]. @@ -2474,7 +3020,7 @@ impl<'w, 'a, T: Component> Entry<'w, 'a, T> { /// let mut entity = world.spawn(Comp(0)); /// /// entity.entry::().and_modify(|mut c| c.0 += 1); - /// assert_eq!(world.query::<&Comp>().single(&world).0, 1); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 1); /// ``` #[inline] pub fn and_modify)>(self, f: F) -> Self { @@ -2533,11 +3079,11 @@ impl<'w, 'a, T: Component> Entry<'w, 'a, T> { /// /// entity.entry().or_insert(Comp(4)); /// # let entity_id = entity.id(); - /// assert_eq!(world.query::<&Comp>().single(&world).0, 4); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 4); /// /// # let mut entity = world.get_entity_mut(entity_id).unwrap(); /// entity.entry().or_insert(Comp(15)).into_mut().0 *= 2; - /// assert_eq!(world.query::<&Comp>().single(&world).0, 8); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 8); /// ``` #[inline] pub fn or_insert(self, default: T) -> OccupiedEntry<'w, 'a, T> { @@ -2561,7 +3107,7 @@ impl<'w, 'a, T: Component> Entry<'w, 'a, T> { /// let mut entity = world.spawn_empty(); /// /// entity.entry().or_insert_with(|| Comp(4)); - /// assert_eq!(world.query::<&Comp>().single(&world).0, 4); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 4); /// ``` #[inline] pub fn or_insert_with T>(self, default: F) -> OccupiedEntry<'w, 'a, T> { @@ -2587,7 +3133,7 @@ impl<'w, 'a, T: Component + Default> Entry<'w, 'a, T> { /// let mut entity = world.spawn_empty(); /// /// entity.entry::().or_default(); - /// assert_eq!(world.query::<&Comp>().single(&world).0, 0); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 0); /// ``` #[inline] pub fn or_default(self) -> OccupiedEntry<'w, 'a, T> { @@ -2645,7 +3191,7 @@ impl<'w, 'a, T: Component> OccupiedEntry<'w, 'a, T> { /// o.insert(Comp(10)); /// } /// - /// assert_eq!(world.query::<&Comp>().single(&world).0, 10); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 10); /// ``` #[inline] pub fn insert(&mut self, component: T) { @@ -2703,7 +3249,7 @@ impl<'w, 'a, T: Component> OccupiedEntry<'w, 'a, T> { /// o.get_mut().0 += 2 /// } /// - /// assert_eq!(world.query::<&Comp>().single(&world).0, 17); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 17); /// ``` #[inline] pub fn get_mut(&mut self) -> Mut<'_, T> { @@ -2732,7 +3278,7 @@ impl<'w, 'a, T: Component> OccupiedEntry<'w, 'a, T> { /// o.into_mut().0 += 10; /// } /// - /// assert_eq!(world.query::<&Comp>().single(&world).0, 15); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 15); /// ``` #[inline] pub fn into_mut(self) -> Mut<'a, T> { @@ -2764,7 +3310,7 @@ impl<'w, 'a, T: Component> VacantEntry<'w, 'a, T> { /// v.insert(Comp(10)); /// } /// - /// assert_eq!(world.query::<&Comp>().single(&world).0, 10); + /// assert_eq!(world.query::<&Comp>().single(&world).unwrap().0, 10); /// ``` #[inline] pub fn insert(self, component: T) -> OccupiedEntry<'w, 'a, T> { @@ -2796,7 +3342,7 @@ impl<'w, 'a, T: Component> VacantEntry<'w, 'a, T> { /// .data::<&A>() /// .build(); /// -/// let filtered_entity: FilteredEntityRef = query.single(&mut world); +/// let filtered_entity: FilteredEntityRef = query.single(&mut world).unwrap(); /// let component: &A = filtered_entity.get().unwrap(); /// /// // Here `FilteredEntityRef` is nested in a tuple, so it does not have access to `&A`. @@ -2804,7 +3350,7 @@ impl<'w, 'a, T: Component> VacantEntry<'w, 'a, T> { /// .data::<&A>() /// .build(); /// -/// let (_, filtered_entity) = query.single(&mut world); +/// let (_, filtered_entity) = query.single(&mut world).unwrap(); /// assert!(filtered_entity.get::
().is_none()); /// ``` #[derive(Clone)] @@ -2817,7 +3363,7 @@ impl<'w> FilteredEntityRef<'w> { /// # Safety /// - No `&mut World` can exist from the underlying `UnsafeWorldCell` /// - If `access` takes read access to a component no mutable reference to that - /// component can exist at the same time as the returned [`FilteredEntityMut`] + /// component can exist at the same time as the returned [`FilteredEntityMut`] /// - If `access` takes any access for a component `entity` must have that component. #[inline] pub(crate) unsafe fn new(entity: UnsafeEntityCell<'w>, access: Access) -> Self { @@ -2868,7 +3414,7 @@ impl<'w> FilteredEntityRef<'w> { /// /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. /// - If you know the component's [`TypeId`] but not its [`ComponentId`], consider using - /// [`Self::contains_type_id`]. + /// [`Self::contains_type_id`]. #[inline] pub fn contains_id(&self, component_id: ComponentId) -> bool { self.entity.contains_id(component_id) @@ -2957,27 +3503,26 @@ impl<'w> FilteredEntityRef<'w> { } /// Returns the source code location from which this entity has been spawned. - #[cfg(feature = "track_change_detection")] - pub fn spawned_by(&self) -> &'static Location<'static> { + pub fn spawned_by(&self) -> MaybeLocation { self.entity.spawned_by() } } impl<'w> From> for FilteredEntityRef<'w> { #[inline] - fn from(entity_mut: FilteredEntityMut<'w>) -> Self { + fn from(entity: FilteredEntityMut<'w>) -> Self { // SAFETY: // - `FilteredEntityMut` guarantees exclusive access to all components in the new `FilteredEntityRef`. - unsafe { FilteredEntityRef::new(entity_mut.entity, entity_mut.access) } + unsafe { FilteredEntityRef::new(entity.entity, entity.access) } } } impl<'a> From<&'a FilteredEntityMut<'_>> for FilteredEntityRef<'a> { #[inline] - fn from(entity_mut: &'a FilteredEntityMut<'_>) -> Self { + fn from(entity: &'a FilteredEntityMut<'_>) -> Self { // SAFETY: // - `FilteredEntityMut` guarantees exclusive access to all components in the new `FilteredEntityRef`. - unsafe { FilteredEntityRef::new(entity_mut.entity, entity_mut.access.clone()) } + unsafe { FilteredEntityRef::new(entity.entity, entity.access.clone()) } } } @@ -2988,7 +3533,7 @@ impl<'a> From> for FilteredEntityRef<'a> { unsafe { let mut access = Access::default(); access.read_all(); - FilteredEntityRef::new(entity.0, access) + FilteredEntityRef::new(entity.cell, access) } } } @@ -3000,7 +3545,7 @@ impl<'a> From<&'a EntityRef<'_>> for FilteredEntityRef<'a> { unsafe { let mut access = Access::default(); access.read_all(); - FilteredEntityRef::new(entity.0, access) + FilteredEntityRef::new(entity.cell, access) } } } @@ -3012,7 +3557,7 @@ impl<'a> From> for FilteredEntityRef<'a> { unsafe { let mut access = Access::default(); access.read_all(); - FilteredEntityRef::new(entity.0, access) + FilteredEntityRef::new(entity.cell, access) } } } @@ -3024,7 +3569,7 @@ impl<'a> From<&'a EntityMut<'_>> for FilteredEntityRef<'a> { unsafe { let mut access = Access::default(); access.read_all(); - FilteredEntityRef::new(entity.0, access) + FilteredEntityRef::new(entity.cell, access) } } } @@ -3053,6 +3598,24 @@ impl<'a> From<&'a EntityWorldMut<'_>> for FilteredEntityRef<'a> { } } +impl<'a, B: Bundle> From<&'a EntityRefExcept<'_, B>> for FilteredEntityRef<'a> { + fn from(value: &'a EntityRefExcept<'_, B>) -> Self { + // SAFETY: + // - The FilteredEntityRef has the same component access as the given EntityRefExcept. + unsafe { + let mut access = Access::default(); + access.read_all(); + let components = value.entity.world().components(); + B::get_component_ids(components, &mut |maybe_id| { + if let Some(id) = maybe_id { + access.remove_component_read(id); + } + }); + FilteredEntityRef::new(value.entity, access) + } + } +} + impl PartialEq for FilteredEntityRef<'_> { fn eq(&self, other: &Self) -> bool { self.entity() == other.entity() @@ -3061,12 +3624,11 @@ impl PartialEq for FilteredEntityRef<'_> { impl Eq for FilteredEntityRef<'_> {} -#[expect(clippy::non_canonical_partial_ord_impl)] impl PartialOrd for FilteredEntityRef<'_> { /// [`FilteredEntityRef`]'s comparison trait implementations match the underlying [`Entity`], /// and cannot discern between different worlds. fn partial_cmp(&self, other: &Self) -> Option { - self.entity().partial_cmp(&other.entity()) + Some(self.cmp(other)) } } @@ -3082,14 +3644,14 @@ impl Hash for FilteredEntityRef<'_> { } } -impl EntityBorrow for FilteredEntityRef<'_> { +impl ContainsEntity for FilteredEntityRef<'_> { fn entity(&self) -> Entity { self.id() } } // SAFETY: This type represents one Entity. We implement the comparison traits based on that Entity. -unsafe impl TrustedEntityBorrow for FilteredEntityRef<'_> {} +unsafe impl EntityEquivalent for FilteredEntityRef<'_> {} /// Provides mutable access to a single entity and some of its components defined by the contained [`Access`]. /// @@ -3111,7 +3673,7 @@ unsafe impl TrustedEntityBorrow for FilteredEntityRef<'_> {} /// .data::<&mut A>() /// .build(); /// -/// let mut filtered_entity: FilteredEntityMut = query.single_mut(&mut world); +/// let mut filtered_entity: FilteredEntityMut = query.single_mut(&mut world).unwrap(); /// let component: Mut = filtered_entity.get_mut().unwrap(); /// /// // Here `FilteredEntityMut` is nested in a tuple, so it does not have access to `&mut A`. @@ -3119,7 +3681,7 @@ unsafe impl TrustedEntityBorrow for FilteredEntityRef<'_> {} /// .data::<&mut A>() /// .build(); /// -/// let (_, mut filtered_entity) = query.single_mut(&mut world); +/// let (_, mut filtered_entity) = query.single_mut(&mut world).unwrap(); /// assert!(filtered_entity.get_mut::().is_none()); /// ``` pub struct FilteredEntityMut<'w> { @@ -3131,9 +3693,9 @@ impl<'w> FilteredEntityMut<'w> { /// # Safety /// - No `&mut World` can exist from the underlying `UnsafeWorldCell` /// - If `access` takes read access to a component no mutable reference to that - /// component can exist at the same time as the returned [`FilteredEntityMut`] + /// component can exist at the same time as the returned [`FilteredEntityMut`] /// - If `access` takes write access to a component, no reference to that component - /// may exist at the same time as the returned [`FilteredEntityMut`] + /// may exist at the same time as the returned [`FilteredEntityMut`] /// - If `access` takes any access for a component `entity` must have that component. #[inline] pub(crate) unsafe fn new(entity: UnsafeEntityCell<'w>, access: Access) -> Self { @@ -3197,7 +3759,7 @@ impl<'w> FilteredEntityMut<'w> { /// /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. /// - If you know the component's [`TypeId`] but not its [`ComponentId`], consider using - /// [`Self::contains_type_id`]. + /// [`Self::contains_type_id`]. #[inline] pub fn contains_id(&self, component_id: ComponentId) -> bool { self.entity.contains_id(component_id) @@ -3322,8 +3884,7 @@ impl<'w> FilteredEntityMut<'w> { } /// Returns the source code location from which this entity has last been spawned. - #[cfg(feature = "track_change_detection")] - pub fn spawned_by(&self) -> &'static Location<'static> { + pub fn spawned_by(&self) -> MaybeLocation { self.entity.spawned_by() } } @@ -3336,7 +3897,7 @@ impl<'a> From> for FilteredEntityMut<'a> { let mut access = Access::default(); access.read_all(); access.write_all(); - FilteredEntityMut::new(entity.0, access) + FilteredEntityMut::new(entity.cell, access) } } } @@ -3349,7 +3910,7 @@ impl<'a> From<&'a mut EntityMut<'_>> for FilteredEntityMut<'a> { let mut access = Access::default(); access.read_all(); access.write_all(); - FilteredEntityMut::new(entity.0, access) + FilteredEntityMut::new(entity.cell, access) } } } @@ -3380,6 +3941,24 @@ impl<'a> From<&'a mut EntityWorldMut<'_>> for FilteredEntityMut<'a> { } } +impl<'a, B: Bundle> From<&'a EntityMutExcept<'_, B>> for FilteredEntityMut<'a> { + fn from(value: &'a EntityMutExcept<'_, B>) -> Self { + // SAFETY: + // - The FilteredEntityMut has the same component access as the given EntityMutExcept. + unsafe { + let mut access = Access::default(); + access.write_all(); + let components = value.entity.world().components(); + B::get_component_ids(components, &mut |maybe_id| { + if let Some(id) = maybe_id { + access.remove_component_read(id); + } + }); + FilteredEntityMut::new(value.entity, access) + } + } +} + impl PartialEq for FilteredEntityMut<'_> { fn eq(&self, other: &Self) -> bool { self.entity() == other.entity() @@ -3388,12 +3967,11 @@ impl PartialEq for FilteredEntityMut<'_> { impl Eq for FilteredEntityMut<'_> {} -#[expect(clippy::non_canonical_partial_ord_impl)] impl PartialOrd for FilteredEntityMut<'_> { /// [`FilteredEntityMut`]'s comparison trait implementations match the underlying [`Entity`], /// and cannot discern between different worlds. fn partial_cmp(&self, other: &Self) -> Option { - self.entity().partial_cmp(&other.entity()) + Some(self.cmp(other)) } } @@ -3409,14 +3987,14 @@ impl Hash for FilteredEntityMut<'_> { } } -impl EntityBorrow for FilteredEntityMut<'_> { +impl ContainsEntity for FilteredEntityMut<'_> { fn entity(&self) -> Entity { self.id() } } // SAFETY: This type represents one Entity. We implement the comparison traits based on that Entity. -unsafe impl TrustedEntityBorrow for FilteredEntityMut<'_> {} +unsafe impl EntityEquivalent for FilteredEntityMut<'_> {} /// Error type returned by [`TryFrom`] conversions from filtered entity types /// ([`FilteredEntityRef`]/[`FilteredEntityMut`]) to full-access entity types @@ -3435,7 +4013,6 @@ pub enum TryFromFilteredError { /// Provides read-only access to a single entity and all its components, save /// for an explicitly-enumerated set. -#[derive(Clone)] pub struct EntityRefExcept<'w, B> where B: Bundle, @@ -3488,25 +4065,111 @@ where /// doesn't have a component of that type or if the type is one of the /// excluded components. #[inline] - pub fn get_ref(&self) -> Option> - where - C: Component, - { + pub fn get_ref(&self) -> Option> + where + C: Component, + { + let components = self.entity.world().components(); + let id = components.component_id::()?; + if bundle_contains_component::(components, id) { + None + } else { + // SAFETY: We have read access for all components that weren't + // covered by the `contains` check above. + unsafe { self.entity.get_ref() } + } + } + + /// Returns the source code location from which this entity has been spawned. + pub fn spawned_by(&self) -> MaybeLocation { + self.entity.spawned_by() + } + + /// Gets the component of the given [`ComponentId`] from the entity. + /// + /// **You should prefer to use the typed API [`Self::get`] where possible and only + /// use this in cases where the actual component types are not known at + /// compile time.** + /// + /// Unlike [`EntityRefExcept::get`], this returns a raw pointer to the component, + /// which is only valid while the [`EntityRefExcept`] is alive. + #[inline] + pub fn get_by_id(&self, component_id: ComponentId) -> Option> { + let components = self.entity.world().components(); + (!bundle_contains_component::(components, component_id)) + .then(|| { + // SAFETY: We have read access for this component + unsafe { self.entity.get_by_id(component_id) } + }) + .flatten() + } + + /// Returns `true` if the current entity has a component of type `T`. + /// Otherwise, this returns `false`. + /// + /// ## Notes + /// + /// If you do not know the concrete type of a component, consider using + /// [`Self::contains_id`] or [`Self::contains_type_id`]. + #[inline] + pub fn contains(&self) -> bool { + self.contains_type_id(TypeId::of::()) + } + + /// Returns `true` if the current entity has a component identified by `component_id`. + /// Otherwise, this returns false. + /// + /// ## Notes + /// + /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. + /// - If you know the component's [`TypeId`] but not its [`ComponentId`], consider using + /// [`Self::contains_type_id`]. + #[inline] + pub fn contains_id(&self, component_id: ComponentId) -> bool { + self.entity.contains_id(component_id) + } + + /// Returns `true` if the current entity has a component with the type identified by `type_id`. + /// Otherwise, this returns false. + /// + /// ## Notes + /// + /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. + /// - If you have a [`ComponentId`] instead of a [`TypeId`], consider using [`Self::contains_id`]. + #[inline] + pub fn contains_type_id(&self, type_id: TypeId) -> bool { + self.entity.contains_type_id(type_id) + } + + /// Retrieves the change ticks for the given component. This can be useful for implementing change + /// detection in custom runtimes. + #[inline] + pub fn get_change_ticks(&self) -> Option { + let component_id = self.entity.world().components().get_id(TypeId::of::())?; let components = self.entity.world().components(); - let id = components.component_id::()?; - if bundle_contains_component::(components, id) { - None - } else { - // SAFETY: We have read access for all components that weren't - // covered by the `contains` check above. - unsafe { self.entity.get_ref() } - } + (!bundle_contains_component::(components, component_id)) + .then(|| { + // SAFETY: We have read access + unsafe { self.entity.get_change_ticks::() } + }) + .flatten() } - /// Returns the source code location from which this entity has been spawned. - #[cfg(feature = "track_change_detection")] - pub fn spawned_by(&self) -> &'static Location<'static> { - self.entity.spawned_by() + /// Retrieves the change ticks for the given [`ComponentId`]. This can be useful for implementing change + /// detection in custom runtimes. + /// + /// **You should prefer to use the typed API [`Self::get_change_ticks`] where possible and only + /// use this in cases where the actual component types are not known at + /// compile time.** + #[inline] + pub fn get_change_ticks_by_id(&self, component_id: ComponentId) -> Option { + let components = self.entity.world().components(); + (!bundle_contains_component::(components, component_id)) + .then(|| { + // SAFETY: We have read access + unsafe { self.entity.get_change_ticks_by_id(component_id) } + }) + .flatten() } } @@ -3514,13 +4177,21 @@ impl<'a, B> From<&'a EntityMutExcept<'_, B>> for EntityRefExcept<'a, B> where B: Bundle, { - fn from(entity_mut: &'a EntityMutExcept<'_, B>) -> Self { + fn from(entity: &'a EntityMutExcept<'_, B>) -> Self { // SAFETY: All accesses that `EntityRefExcept` provides are also // accesses that `EntityMutExcept` provides. - unsafe { EntityRefExcept::new(entity_mut.entity) } + unsafe { EntityRefExcept::new(entity.entity) } + } +} + +impl Clone for EntityRefExcept<'_, B> { + fn clone(&self) -> Self { + *self } } +impl Copy for EntityRefExcept<'_, B> {} + impl PartialEq for EntityRefExcept<'_, B> { fn eq(&self, other: &Self) -> bool { self.entity() == other.entity() @@ -3529,12 +4200,11 @@ impl PartialEq for EntityRefExcept<'_, B> { impl Eq for EntityRefExcept<'_, B> {} -#[expect(clippy::non_canonical_partial_ord_impl)] impl PartialOrd for EntityRefExcept<'_, B> { /// [`EntityRefExcept`]'s comparison trait implementations match the underlying [`Entity`], /// and cannot discern between different worlds. fn partial_cmp(&self, other: &Self) -> Option { - self.entity().partial_cmp(&other.entity()) + Some(self.cmp(other)) } } @@ -3550,14 +4220,14 @@ impl Hash for EntityRefExcept<'_, B> { } } -impl EntityBorrow for EntityRefExcept<'_, B> { +impl ContainsEntity for EntityRefExcept<'_, B> { fn entity(&self) -> Entity { self.id() } } // SAFETY: This type represents one Entity. We implement the comparison traits based on that Entity. -unsafe impl TrustedEntityBorrow for EntityRefExcept<'_, B> {} +unsafe impl EntityEquivalent for EntityRefExcept<'_, B> {} /// Provides mutable access to all components of an entity, with the exception /// of an explicit set. @@ -3567,7 +4237,6 @@ unsafe impl TrustedEntityBorrow for EntityRefExcept<'_, B> {} /// queries that might match entities that this query also matches. If you don't /// need access to all components, prefer a standard query with a /// [`crate::query::Without`] filter. -#[derive(Clone)] pub struct EntityMutExcept<'w, B> where B: Bundle, @@ -3656,10 +4325,81 @@ where } /// Returns the source code location from which this entity has been spawned. - #[cfg(feature = "track_change_detection")] - pub fn spawned_by(&self) -> &'static Location<'static> { + pub fn spawned_by(&self) -> MaybeLocation { self.entity.spawned_by() } + + /// Returns `true` if the current entity has a component of type `T`. + /// Otherwise, this returns `false`. + /// + /// ## Notes + /// + /// If you do not know the concrete type of a component, consider using + /// [`Self::contains_id`] or [`Self::contains_type_id`]. + #[inline] + pub fn contains(&self) -> bool { + self.contains_type_id(TypeId::of::()) + } + + /// Returns `true` if the current entity has a component identified by `component_id`. + /// Otherwise, this returns false. + /// + /// ## Notes + /// + /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. + /// - If you know the component's [`TypeId`] but not its [`ComponentId`], consider using + /// [`Self::contains_type_id`]. + #[inline] + pub fn contains_id(&self, component_id: ComponentId) -> bool { + self.entity.contains_id(component_id) + } + + /// Returns `true` if the current entity has a component with the type identified by `type_id`. + /// Otherwise, this returns false. + /// + /// ## Notes + /// + /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. + /// - If you have a [`ComponentId`] instead of a [`TypeId`], consider using [`Self::contains_id`]. + #[inline] + pub fn contains_type_id(&self, type_id: TypeId) -> bool { + self.entity.contains_type_id(type_id) + } + + /// Gets the component of the given [`ComponentId`] from the entity. + /// + /// **You should prefer to use the typed API [`Self::get`] where possible and only + /// use this in cases where the actual component types are not known at + /// compile time.** + /// + /// Unlike [`EntityMutExcept::get`], this returns a raw pointer to the component, + /// which is only valid while the [`EntityMutExcept`] is alive. + #[inline] + pub fn get_by_id(&'w self, component_id: ComponentId) -> Option> { + self.as_readonly().get_by_id(component_id) + } + + /// Gets a [`MutUntyped`] of the component of the given [`ComponentId`] from the entity. + /// + /// **You should prefer to use the typed API [`Self::get_mut`] where possible and only + /// use this in cases where the actual component types are not known at + /// compile time.** + /// + /// Unlike [`EntityMutExcept::get_mut`], this returns a raw pointer to the component, + /// which is only valid while the [`EntityMutExcept`] is alive. + #[inline] + pub fn get_mut_by_id( + &mut self, + component_id: ComponentId, + ) -> Option> { + let components = self.entity.world().components(); + (!bundle_contains_component::(components, component_id)) + .then(|| { + // SAFETY: We have write access + unsafe { self.entity.get_mut_by_id(component_id).ok() } + }) + .flatten() + } } impl PartialEq for EntityMutExcept<'_, B> { @@ -3670,12 +4410,11 @@ impl PartialEq for EntityMutExcept<'_, B> { impl Eq for EntityMutExcept<'_, B> {} -#[expect(clippy::non_canonical_partial_ord_impl)] impl PartialOrd for EntityMutExcept<'_, B> { /// [`EntityMutExcept`]'s comparison trait implementations match the underlying [`Entity`], /// and cannot discern between different worlds. fn partial_cmp(&self, other: &Self) -> Option { - self.entity().partial_cmp(&other.entity()) + Some(self.cmp(other)) } } @@ -3691,14 +4430,14 @@ impl Hash for EntityMutExcept<'_, B> { } } -impl EntityBorrow for EntityMutExcept<'_, B> { +impl ContainsEntity for EntityMutExcept<'_, B> { fn entity(&self) -> Entity { self.id() } } // SAFETY: This type represents one Entity. We implement the comparison traits based on that Entity. -unsafe impl TrustedEntityBorrow for EntityMutExcept<'_, B> {} +unsafe impl EntityEquivalent for EntityMutExcept<'_, B> {} fn bundle_contains_component(components: &Components, query_id: ComponentId) -> bool where @@ -3718,9 +4457,8 @@ where /// # Safety /// /// - [`OwningPtr`] and [`StorageType`] iterators must correspond to the -/// [`BundleInfo`] used to construct [`BundleInserter`] +/// [`BundleInfo`] used to construct [`BundleInserter`] /// - [`Entity`] must correspond to [`EntityLocation`] -#[track_caller] unsafe fn insert_dynamic_bundle< 'a, I: Iterator>, @@ -3731,6 +4469,9 @@ unsafe fn insert_dynamic_bundle< location: EntityLocation, components: I, storage_types: S, + mode: InsertMode, + caller: MaybeLocation, + relationship_hook_insert_mode: RelationshipHookMode, ) -> EntityLocation { struct DynamicInsertBundle<'a, I: Iterator)>> { components: I, @@ -3739,6 +4480,7 @@ unsafe fn insert_dynamic_bundle< impl<'a, I: Iterator)>> DynamicBundle for DynamicInsertBundle<'a, I> { + type Effect = (); fn get_components(self, func: &mut impl FnMut(StorageType, OwningPtr<'_>)) { self.components.for_each(|(t, ptr)| func(t, ptr)); } @@ -3750,14 +4492,16 @@ unsafe fn insert_dynamic_bundle< // SAFETY: location matches current entity. unsafe { - bundle_inserter.insert( - entity, - location, - bundle, - InsertMode::Replace, - #[cfg(feature = "track_change_detection")] - Location::caller(), - ) + bundle_inserter + .insert( + entity, + location, + bundle, + mode, + caller, + relationship_hook_insert_mode, + ) + .0 } } @@ -3772,6 +4516,9 @@ unsafe fn insert_dynamic_bundle< /// - `component_id` must be valid /// - `components` must come from the same world as `self` /// - The relevant table row **must be removed** by the caller once all components are taken, without dropping the value +/// +/// # Panics +/// Panics if the entity did not have the component. #[inline] pub(crate) unsafe fn take_component<'a>( storages: &'a mut Storages, @@ -3866,6 +4613,26 @@ pub unsafe trait DynamicComponentFetch { self, cell: UnsafeEntityCell<'_>, ) -> Result, EntityComponentError>; + + /// Returns untyped mutable reference(s) to the component(s) with the + /// given [`ComponentId`]s, as determined by `self`. + /// Assumes all [`ComponentId`]s refer to mutable components. + /// + /// # Safety + /// + /// It is the caller's responsibility to ensure that: + /// - The given [`UnsafeEntityCell`] has mutable access to the fetched components. + /// - No other references to the fetched components exist at the same time. + /// - The requested components are all mutable. + /// + /// # Errors + /// + /// - Returns [`EntityComponentError::MissingComponent`] if a component is missing from the entity. + /// - Returns [`EntityComponentError::AliasedMutability`] if a component is requested multiple times. + unsafe fn fetch_mut_assume_mutable( + self, + cell: UnsafeEntityCell<'_>, + ) -> Result, EntityComponentError>; } // SAFETY: @@ -3891,6 +4658,15 @@ unsafe impl DynamicComponentFetch for ComponentId { unsafe { cell.get_mut_by_id(self) } .map_err(|_| EntityComponentError::MissingComponent(self)) } + + unsafe fn fetch_mut_assume_mutable( + self, + cell: UnsafeEntityCell<'_>, + ) -> Result, EntityComponentError> { + // SAFETY: caller ensures that the cell has mutable access to the component. + unsafe { cell.get_mut_assume_mutable_by_id(self) } + .map_err(|_| EntityComponentError::MissingComponent(self)) + } } // SAFETY: @@ -3913,6 +4689,13 @@ unsafe impl DynamicComponentFetch for [ComponentId; N] { ) -> Result, EntityComponentError> { <&Self>::fetch_mut(&self, cell) } + + unsafe fn fetch_mut_assume_mutable( + self, + cell: UnsafeEntityCell<'_>, + ) -> Result, EntityComponentError> { + <&Self>::fetch_mut_assume_mutable(&self, cell) + } } // SAFETY: @@ -3967,6 +4750,34 @@ unsafe impl DynamicComponentFetch for &'_ [ComponentId; N] { Ok(ptrs) } + + unsafe fn fetch_mut_assume_mutable( + self, + cell: UnsafeEntityCell<'_>, + ) -> Result, EntityComponentError> { + // Check for duplicate component IDs. + for i in 0..self.len() { + for j in 0..i { + if self[i] == self[j] { + return Err(EntityComponentError::AliasedMutability(self[i])); + } + } + } + + let mut ptrs = [const { MaybeUninit::uninit() }; N]; + for (ptr, &id) in core::iter::zip(&mut ptrs, self) { + *ptr = MaybeUninit::new( + // SAFETY: caller ensures that the cell has mutable access to the component. + unsafe { cell.get_mut_assume_mutable_by_id(id) } + .map_err(|_| EntityComponentError::MissingComponent(id))?, + ); + } + + // SAFETY: Each ptr was initialized in the loop above. + let ptrs = ptrs.map(|ptr| unsafe { MaybeUninit::assume_init(ptr) }); + + Ok(ptrs) + } } // SAFETY: @@ -4013,6 +4824,30 @@ unsafe impl DynamicComponentFetch for &'_ [ComponentId] { } Ok(ptrs) } + + unsafe fn fetch_mut_assume_mutable( + self, + cell: UnsafeEntityCell<'_>, + ) -> Result, EntityComponentError> { + // Check for duplicate component IDs. + for i in 0..self.len() { + for j in 0..i { + if self[i] == self[j] { + return Err(EntityComponentError::AliasedMutability(self[i])); + } + } + } + + let mut ptrs = Vec::with_capacity(self.len()); + for &id in self { + ptrs.push( + // SAFETY: caller ensures that the cell has mutable access to the component. + unsafe { cell.get_mut_assume_mutable_by_id(id) } + .map_err(|_| EntityComponentError::MissingComponent(id))?, + ); + } + Ok(ptrs) + } } // SAFETY: @@ -4052,20 +4887,34 @@ unsafe impl DynamicComponentFetch for &'_ HashSet { } Ok(ptrs) } + + unsafe fn fetch_mut_assume_mutable( + self, + cell: UnsafeEntityCell<'_>, + ) -> Result, EntityComponentError> { + let mut ptrs = HashMap::with_capacity_and_hasher(self.len(), Default::default()); + for &id in self { + ptrs.insert( + id, + // SAFETY: caller ensures that the cell has mutable access to the component. + unsafe { cell.get_mut_assume_mutable_by_id(id) } + .map_err(|_| EntityComponentError::MissingComponent(id))?, + ); + } + Ok(ptrs) + } } #[cfg(test)] mod tests { + use alloc::{vec, vec::Vec}; use bevy_ptr::{OwningPtr, Ptr}; use core::panic::AssertUnwindSafe; - #[cfg(feature = "track_change_detection")] - use core::panic::Location; - #[cfg(feature = "track_change_detection")] use std::sync::OnceLock; + use crate::component::HookContext; use crate::{ - self as bevy_ecs, - change_detection::MutUntyped, + change_detection::{MaybeLocation, MutUntyped}, component::ComponentId, prelude::*, system::{assert_is_system, RunSystemOnce as _}, @@ -5140,27 +5989,28 @@ mod tests { #[component(on_add = ord_a_hook_on_add, on_insert = ord_a_hook_on_insert, on_replace = ord_a_hook_on_replace, on_remove = ord_a_hook_on_remove)] struct OrdA; - fn ord_a_hook_on_add(mut world: DeferredWorld, entity: Entity, _id: ComponentId) { + fn ord_a_hook_on_add(mut world: DeferredWorld, HookContext { entity, .. }: HookContext) { world.resource_mut::().0.push("OrdA hook on_add"); world.commands().entity(entity).insert(OrdB); } - fn ord_a_hook_on_insert(mut world: DeferredWorld, entity: Entity, _id: ComponentId) { + fn ord_a_hook_on_insert(mut world: DeferredWorld, HookContext { entity, .. }: HookContext) { world .resource_mut::() .0 .push("OrdA hook on_insert"); - world.commands().entity(entity).despawn(); + world.commands().entity(entity).remove::(); + world.commands().entity(entity).remove::(); } - fn ord_a_hook_on_replace(mut world: DeferredWorld, _entity: Entity, _id: ComponentId) { + fn ord_a_hook_on_replace(mut world: DeferredWorld, _: HookContext) { world .resource_mut::() .0 .push("OrdA hook on_replace"); } - fn ord_a_hook_on_remove(mut world: DeferredWorld, _entity: Entity, _id: ComponentId) { + fn ord_a_hook_on_remove(mut world: DeferredWorld, _: HookContext) { world .resource_mut::() .0 @@ -5187,7 +6037,7 @@ mod tests { #[component(on_add = ord_b_hook_on_add, on_insert = ord_b_hook_on_insert, on_replace = ord_b_hook_on_replace, on_remove = ord_b_hook_on_remove)] struct OrdB; - fn ord_b_hook_on_add(mut world: DeferredWorld, _entity: Entity, _id: ComponentId) { + fn ord_b_hook_on_add(mut world: DeferredWorld, _: HookContext) { world.resource_mut::().0.push("OrdB hook on_add"); world.commands().queue(|world: &mut World| { world @@ -5197,21 +6047,21 @@ mod tests { }); } - fn ord_b_hook_on_insert(mut world: DeferredWorld, _entity: Entity, _id: ComponentId) { + fn ord_b_hook_on_insert(mut world: DeferredWorld, _: HookContext) { world .resource_mut::() .0 .push("OrdB hook on_insert"); } - fn ord_b_hook_on_replace(mut world: DeferredWorld, _entity: Entity, _id: ComponentId) { + fn ord_b_hook_on_replace(mut world: DeferredWorld, _: HookContext) { world .resource_mut::() .0 .push("OrdB hook on_replace"); } - fn ord_b_hook_on_remove(mut world: DeferredWorld, _entity: Entity, _id: ComponentId) { + fn ord_b_hook_on_remove(mut world: DeferredWorld, _: HookContext) { world .resource_mut::() .0 @@ -5258,12 +6108,12 @@ mod tests { "OrdB observer on_insert", "OrdB command on_add", // command added by OrdB hook on_add, needs to run before despawn command "OrdA observer on_replace", // start of despawn - "OrdB observer on_replace", "OrdA hook on_replace", - "OrdB hook on_replace", "OrdA observer on_remove", - "OrdB observer on_remove", "OrdA hook on_remove", + "OrdB observer on_replace", + "OrdB hook on_replace", + "OrdB observer on_remove", "OrdB hook on_remove", ]; world.flush(); @@ -5307,7 +6157,7 @@ mod tests { struct A; #[derive(Component, Clone, PartialEq, Debug, Default)] - #[require(C(|| C(3)))] + #[require(C(3))] struct B; #[derive(Component, Clone, PartialEq, Debug, Default)] @@ -5322,10 +6172,11 @@ mod tests { let entity_b = world.spawn_empty().id(); world.entity_mut(entity_a).clone_with(entity_b, |builder| { - builder.move_components(true); - builder.without_required_components(|builder| { - builder.deny::(); - }); + builder + .move_components(true) + .without_required_components(|builder| { + builder.deny::(); + }); }); assert_eq!(world.entity(entity_a).get::(), Some(&A)); @@ -5342,7 +6193,6 @@ mod tests { } #[test] - #[cfg(feature = "track_change_detection")] fn update_despawned_by_after_observers() { let mut world = World::new(); @@ -5350,19 +6200,19 @@ mod tests { #[component(on_remove = get_tracked)] struct C; - static TRACKED: OnceLock<&'static Location<'static>> = OnceLock::new(); - fn get_tracked(world: DeferredWorld, entity: Entity, _: ComponentId) { + static TRACKED: OnceLock = OnceLock::new(); + fn get_tracked(world: DeferredWorld, HookContext { entity, .. }: HookContext) { TRACKED.get_or_init(|| { world .entities .entity_get_spawned_or_despawned_by(entity) - .unwrap() + .map(|l| l.unwrap()) }); } #[track_caller] - fn caller_spawn(world: &mut World) -> (Entity, &'static Location<'static>) { - let caller = Location::caller(); + fn caller_spawn(world: &mut World) -> (Entity, MaybeLocation) { + let caller = MaybeLocation::caller(); (world.spawn(C).id(), caller) } let (entity, spawner) = caller_spawn(&mut world); @@ -5372,13 +6222,13 @@ mod tests { world .entities() .entity_get_spawned_or_despawned_by(entity) - .unwrap() + .map(|l| l.unwrap()) ); #[track_caller] - fn caller_despawn(world: &mut World, entity: Entity) -> &'static Location<'static> { + fn caller_despawn(world: &mut World, entity: Entity) -> MaybeLocation { world.despawn(entity); - Location::caller() + MaybeLocation::caller() } let despawner = caller_despawn(&mut world, entity); @@ -5388,7 +6238,128 @@ mod tests { world .entities() .entity_get_spawned_or_despawned_by(entity) - .unwrap() + .map(|l| l.unwrap()) + ); + } + + #[test] + fn with_component_activates_hooks() { + use core::sync::atomic::{AtomicBool, AtomicU8, Ordering}; + + #[derive(Component, PartialEq, Eq, Debug)] + #[component(immutable)] + struct Foo(bool); + + static EXPECTED_VALUE: AtomicBool = AtomicBool::new(false); + + static ADD_COUNT: AtomicU8 = AtomicU8::new(0); + static REMOVE_COUNT: AtomicU8 = AtomicU8::new(0); + static REPLACE_COUNT: AtomicU8 = AtomicU8::new(0); + static INSERT_COUNT: AtomicU8 = AtomicU8::new(0); + + let mut world = World::default(); + + world.register_component::(); + world + .register_component_hooks::() + .on_add(|world, context| { + ADD_COUNT.fetch_add(1, Ordering::Relaxed); + + assert_eq!( + world.get(context.entity), + Some(&Foo(EXPECTED_VALUE.load(Ordering::Relaxed))) + ); + }) + .on_remove(|world, context| { + REMOVE_COUNT.fetch_add(1, Ordering::Relaxed); + + assert_eq!( + world.get(context.entity), + Some(&Foo(EXPECTED_VALUE.load(Ordering::Relaxed))) + ); + }) + .on_replace(|world, context| { + REPLACE_COUNT.fetch_add(1, Ordering::Relaxed); + + assert_eq!( + world.get(context.entity), + Some(&Foo(EXPECTED_VALUE.load(Ordering::Relaxed))) + ); + }) + .on_insert(|world, context| { + INSERT_COUNT.fetch_add(1, Ordering::Relaxed); + + assert_eq!( + world.get(context.entity), + Some(&Foo(EXPECTED_VALUE.load(Ordering::Relaxed))) + ); + }); + + let entity = world.spawn(Foo(false)).id(); + + assert_eq!(ADD_COUNT.load(Ordering::Relaxed), 1); + assert_eq!(REMOVE_COUNT.load(Ordering::Relaxed), 0); + assert_eq!(REPLACE_COUNT.load(Ordering::Relaxed), 0); + assert_eq!(INSERT_COUNT.load(Ordering::Relaxed), 1); + + let mut entity = world.entity_mut(entity); + + let archetype_pointer_before = &raw const *entity.archetype(); + + assert_eq!(entity.get::(), Some(&Foo(false))); + + entity.modify_component(|foo: &mut Foo| { + foo.0 = true; + EXPECTED_VALUE.store(foo.0, Ordering::Relaxed); + }); + + let archetype_pointer_after = &raw const *entity.archetype(); + + assert_eq!(entity.get::(), Some(&Foo(true))); + + assert_eq!(ADD_COUNT.load(Ordering::Relaxed), 1); + assert_eq!(REMOVE_COUNT.load(Ordering::Relaxed), 0); + assert_eq!(REPLACE_COUNT.load(Ordering::Relaxed), 1); + assert_eq!(INSERT_COUNT.load(Ordering::Relaxed), 2); + + assert_eq!(archetype_pointer_before, archetype_pointer_after); + } + + #[test] + fn bundle_remove_only_triggers_for_present_components() { + let mut world = World::default(); + + #[derive(Component)] + struct A; + + #[derive(Component)] + struct B; + + #[derive(Resource, PartialEq, Eq, Debug)] + struct Tracker { + a: bool, + b: bool, + } + + world.insert_resource(Tracker { a: false, b: false }); + let entity = world.spawn(A).id(); + + world.add_observer(|_: Trigger, mut tracker: ResMut| { + tracker.a = true; + }); + world.add_observer(|_: Trigger, mut tracker: ResMut| { + tracker.b = true; + }); + + world.entity_mut(entity).remove::<(A, B)>(); + + assert_eq!( + world.resource::(), + &Tracker { + a: true, + // The entity didn't have a B component, so it should not have been triggered. + b: false, + } ); } } diff --git a/crates/bevy_ecs/src/world/error.rs b/crates/bevy_ecs/src/world/error.rs index 7f137fa012fee..3527967942a94 100644 --- a/crates/bevy_ecs/src/world/error.rs +++ b/crates/bevy_ecs/src/world/error.rs @@ -1,20 +1,41 @@ //! Contains error types returned by bevy's schedule. -use thiserror::Error; +use alloc::vec::Vec; -use crate::{component::ComponentId, entity::Entity, schedule::InternedScheduleLabel}; - -use super::unsafe_world_cell::UnsafeWorldCell; +use crate::{ + component::ComponentId, + entity::{Entity, EntityDoesNotExistError}, + schedule::InternedScheduleLabel, +}; /// The error type returned by [`World::try_run_schedule`] if the provided schedule does not exist. /// /// [`World::try_run_schedule`]: crate::world::World::try_run_schedule -#[derive(Error, Debug)] +#[derive(thiserror::Error, Debug)] #[error("The schedule with the label {0:?} was not found.")] pub struct TryRunScheduleError(pub InternedScheduleLabel); +/// The error type returned by [`World::try_insert_batch`] and [`World::try_insert_batch_if_new`] +/// if any of the provided entities do not exist. +/// +/// [`World::try_insert_batch`]: crate::world::World::try_insert_batch +/// [`World::try_insert_batch_if_new`]: crate::world::World::try_insert_batch_if_new +#[derive(thiserror::Error, Debug, Clone)] +#[error("Could not insert bundles of type {bundle_type} into the entities with the following IDs because they do not exist: {entities:?}")] +pub struct TryInsertBatchError { + /// The bundles' type name. + pub bundle_type: &'static str, + /// The IDs of the provided entities that do not exist. + pub entities: Vec, +} + +/// An error that occurs when a specified [`Entity`] could not be despawned. +#[derive(thiserror::Error, Debug, Clone, Copy)] +#[error("Could not despawn entity: {0}")] +pub struct EntityDespawnError(#[from] pub EntityMutableFetchError); + /// An error that occurs when dynamically retrieving components from an entity. -#[derive(Error, Debug, Clone, Copy, PartialEq, Eq)] +#[derive(thiserror::Error, Debug, Clone, Copy, PartialEq, Eq)] pub enum EntityComponentError { /// The component with the given [`ComponentId`] does not exist on the entity. #[error("The component with ID {0:?} does not exist on the entity.")] @@ -25,60 +46,26 @@ pub enum EntityComponentError { } /// An error that occurs when fetching entities mutably from a world. -#[derive(Clone, Copy)] -pub enum EntityFetchError<'w> { +#[derive(thiserror::Error, Debug, Clone, Copy, PartialEq, Eq)] +pub enum EntityMutableFetchError { /// The entity with the given ID does not exist. - NoSuchEntity(Entity, UnsafeWorldCell<'w>), + #[error(transparent)] + EntityDoesNotExist(#[from] EntityDoesNotExistError), /// The entity with the given ID was requested mutably more than once. + #[error("The entity with ID {0} was requested mutably more than once")] AliasedMutability(Entity), } -impl<'w> core::error::Error for EntityFetchError<'w> {} - -impl<'w> core::fmt::Display for EntityFetchError<'w> { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - match *self { - Self::NoSuchEntity(entity, world) => { - write!( - f, - "Entity {entity} {}", - world - .entities() - .entity_does_not_exist_error_details_message(entity) - ) - } - Self::AliasedMutability(entity) => { - write!(f, "Entity {entity} was requested mutably more than once") - } - } - } +/// An error that occurs when getting a resource of a given type in a world. +#[derive(thiserror::Error, Debug, Clone, Copy, PartialEq, Eq)] +pub enum ResourceFetchError { + /// The resource has never been initialized or registered with the world. + #[error("The resource has never been initialized or registered with the world. Did you forget to add it using `app.insert_resource` / `app.init_resource`?")] + NotRegistered, + /// The resource with the given [`ComponentId`] does not currently exist in the world. + #[error("The resource with ID {0:?} does not currently exist in the world.")] + DoesNotExist(ComponentId), + /// Cannot get access to the resource with the given [`ComponentId`] in the world as it conflicts with an on going operation. + #[error("Cannot get access to the resource with ID {0:?} in the world as it conflicts with an on going operation.")] + NoResourceAccess(ComponentId), } - -impl<'w> core::fmt::Debug for EntityFetchError<'w> { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match *self { - Self::NoSuchEntity(entity, world) => { - write!( - f, - "NoSuchEntity({entity} {})", - world - .entities() - .entity_does_not_exist_error_details_message(entity) - ) - } - Self::AliasedMutability(entity) => write!(f, "AliasedMutability({entity})"), - } - } -} - -impl<'w> PartialEq for EntityFetchError<'w> { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (Self::NoSuchEntity(e1, _), Self::NoSuchEntity(e2, _)) if e1 == e2 => true, - (Self::AliasedMutability(e1), Self::AliasedMutability(e2)) if e1 == e2 => true, - _ => false, - } - } -} - -impl<'w> Eq for EntityFetchError<'w> {} diff --git a/crates/bevy_ecs/src/world/filtered_resource.rs b/crates/bevy_ecs/src/world/filtered_resource.rs index 66eac2fdb9f95..a9fac308fa1d1 100644 --- a/crates/bevy_ecs/src/world/filtered_resource.rs +++ b/crates/bevy_ecs/src/world/filtered_resource.rs @@ -2,12 +2,12 @@ use crate::{ change_detection::{Mut, MutUntyped, Ref, Ticks, TicksMut}, component::{ComponentId, Tick}, query::Access, - system::Resource, + resource::Resource, world::{unsafe_world_cell::UnsafeWorldCell, World}, }; -use bevy_ptr::Ptr; -#[cfg(feature = "track_change_detection")] -use bevy_ptr::UnsafeCellDeref; +use bevy_ptr::{Ptr, UnsafeCellDeref}; + +use super::error::ResourceFetchError; /// Provides read-only access to a set of [`Resource`]s defined by the contained [`Access`]. /// @@ -44,9 +44,9 @@ use bevy_ptr::UnsafeCellDeref; /// /// fn resource_system(res: FilteredResources) { /// // The resource exists, but we have no access, so we can't read it. -/// assert!(res.get::().is_none()); +/// assert!(res.get::().is_err()); /// // The resource doesn't exist, so we can't read it. -/// assert!(res.get::().is_none()); +/// assert!(res.get::().is_err()); /// // The resource exists and we have access, so we can read it. /// let c = res.get::().unwrap(); /// // The type parameter can be left out if it can be determined from use. @@ -146,39 +146,45 @@ impl<'w, 's> FilteredResources<'w, 's> { } /// Returns `true` if the `FilteredResources` has access to the given resource. - /// Note that [`Self::get()`] may still return `None` if the resource does not exist. + /// Note that [`Self::get()`] may still return `Err` if the resource does not exist. pub fn has_read(&self) -> bool { let component_id = self.world.components().resource_id::(); component_id.is_some_and(|component_id| self.access.has_resource_read(component_id)) } /// Gets a reference to the resource of the given type if it exists and the `FilteredResources` has access to it. - pub fn get(&self) -> Option> { - let component_id = self.world.components().resource_id::()?; + pub fn get(&self) -> Result, ResourceFetchError> { + let component_id = self + .world + .components() + .resource_id::() + .ok_or(ResourceFetchError::NotRegistered)?; if !self.access.has_resource_read(component_id) { - return None; + return Err(ResourceFetchError::NoResourceAccess(component_id)); } + // SAFETY: We have read access to this resource - unsafe { self.world.get_resource_with_ticks(component_id) }.map( - |(value, ticks, _caller)| Ref { - // SAFETY: `component_id` was obtained from the type ID of `R`. - value: unsafe { value.deref() }, - // SAFETY: We have read access to the resource, so no mutable reference can exist. - ticks: unsafe { Ticks::from_tick_cells(ticks, self.last_run, self.this_run) }, - #[cfg(feature = "track_change_detection")] - // SAFETY: We have read access to the resource, so no mutable reference can exist. - changed_by: unsafe { _caller.deref() }, - }, - ) + let (value, ticks, caller) = unsafe { self.world.get_resource_with_ticks(component_id) } + .ok_or(ResourceFetchError::DoesNotExist(component_id))?; + + Ok(Ref { + // SAFETY: `component_id` was obtained from the type ID of `R`. + value: unsafe { value.deref() }, + // SAFETY: We have read access to the resource, so no mutable reference can exist. + ticks: unsafe { Ticks::from_tick_cells(ticks, self.last_run, self.this_run) }, + // SAFETY: We have read access to the resource, so no mutable reference can exist. + changed_by: unsafe { caller.map(|caller| caller.deref()) }, + }) } /// Gets a pointer to the resource with the given [`ComponentId`] if it exists and the `FilteredResources` has access to it. - pub fn get_by_id(&self, component_id: ComponentId) -> Option> { + pub fn get_by_id(&self, component_id: ComponentId) -> Result, ResourceFetchError> { if !self.access.has_resource_read(component_id) { - return None; + return Err(ResourceFetchError::NoResourceAccess(component_id)); } // SAFETY: We have read access to this resource unsafe { self.world.get_resource_by_id(component_id) } + .ok_or(ResourceFetchError::DoesNotExist(component_id)) } } @@ -282,14 +288,14 @@ impl<'w> From<&'w mut World> for FilteredResources<'w, 'static> { /// /// fn resource_system(mut res: FilteredResourcesMut) { /// // The resource exists, but we have no access, so we can't read it or write it. -/// assert!(res.get::().is_none()); -/// assert!(res.get_mut::().is_none()); +/// assert!(res.get::().is_err()); +/// assert!(res.get_mut::().is_err()); /// // The resource doesn't exist, so we can't read it or write it. -/// assert!(res.get::().is_none()); -/// assert!(res.get_mut::().is_none()); +/// assert!(res.get::().is_err()); +/// assert!(res.get_mut::().is_err()); /// // The resource exists and we have read access, so we can read it but not write it. /// let c = res.get::().unwrap(); -/// assert!(res.get_mut::().is_none()); +/// assert!(res.get_mut::().is_err()); /// // The resource exists and we have write access, so we can read it or write it. /// let d = res.get::().unwrap(); /// let d = res.get_mut::().unwrap(); @@ -408,49 +414,55 @@ impl<'w, 's> FilteredResourcesMut<'w, 's> { } /// Returns `true` if the `FilteredResources` has read access to the given resource. - /// Note that [`Self::get()`] may still return `None` if the resource does not exist. + /// Note that [`Self::get()`] may still return `Err` if the resource does not exist. pub fn has_read(&self) -> bool { let component_id = self.world.components().resource_id::(); component_id.is_some_and(|component_id| self.access.has_resource_read(component_id)) } /// Returns `true` if the `FilteredResources` has write access to the given resource. - /// Note that [`Self::get_mut()`] may still return `None` if the resource does not exist. + /// Note that [`Self::get_mut()`] may still return `Err` if the resource does not exist. pub fn has_write(&self) -> bool { let component_id = self.world.components().resource_id::(); component_id.is_some_and(|component_id| self.access.has_resource_write(component_id)) } /// Gets a reference to the resource of the given type if it exists and the `FilteredResources` has access to it. - pub fn get(&self) -> Option> { + pub fn get(&self) -> Result, ResourceFetchError> { self.as_readonly().get() } /// Gets a pointer to the resource with the given [`ComponentId`] if it exists and the `FilteredResources` has access to it. - pub fn get_by_id(&self, component_id: ComponentId) -> Option> { + pub fn get_by_id(&self, component_id: ComponentId) -> Result, ResourceFetchError> { self.as_readonly().get_by_id(component_id) } /// Gets a mutable reference to the resource of the given type if it exists and the `FilteredResources` has access to it. - pub fn get_mut(&mut self) -> Option> { + pub fn get_mut(&mut self) -> Result, ResourceFetchError> { // SAFETY: We have exclusive access to the resources in `access` for `'_`, and we shorten the returned lifetime to that. unsafe { self.get_mut_unchecked() } } /// Gets a mutable pointer to the resource with the given [`ComponentId`] if it exists and the `FilteredResources` has access to it. - pub fn get_mut_by_id(&mut self, component_id: ComponentId) -> Option> { + pub fn get_mut_by_id( + &mut self, + component_id: ComponentId, + ) -> Result, ResourceFetchError> { // SAFETY: We have exclusive access to the resources in `access` for `'_`, and we shorten the returned lifetime to that. unsafe { self.get_mut_by_id_unchecked(component_id) } } /// Consumes self and gets mutable access to resource of the given type with the world `'w` lifetime if it exists and the `FilteredResources` has access to it. - pub fn into_mut(mut self) -> Option> { + pub fn into_mut(mut self) -> Result, ResourceFetchError> { // SAFETY: This consumes self, so we have exclusive access to the resources in `access` for the entirety of `'w`. unsafe { self.get_mut_unchecked() } } /// Consumes self and gets mutable access to resource with the given [`ComponentId`] with the world `'w` lifetime if it exists and the `FilteredResources` has access to it. - pub fn into_mut_by_id(mut self, component_id: ComponentId) -> Option> { + pub fn into_mut_by_id( + mut self, + component_id: ComponentId, + ) -> Result, ResourceFetchError> { // SAFETY: This consumes self, so we have exclusive access to the resources in `access` for the entirety of `'w`. unsafe { self.get_mut_by_id_unchecked(component_id) } } @@ -458,8 +470,12 @@ impl<'w, 's> FilteredResourcesMut<'w, 's> { /// Gets a mutable pointer to the resource of the given type if it exists and the `FilteredResources` has access to it. /// # Safety /// It is the callers responsibility to ensure that there are no conflicting borrows of anything in `access` for the duration of the returned value. - unsafe fn get_mut_unchecked(&mut self) -> Option> { - let component_id = self.world.components().resource_id::()?; + unsafe fn get_mut_unchecked(&mut self) -> Result, ResourceFetchError> { + let component_id = self + .world + .components() + .resource_id::() + .ok_or(ResourceFetchError::NotRegistered)?; // SAFETY: THe caller ensures that there are no conflicting borrows. unsafe { self.get_mut_by_id_unchecked(component_id) } // SAFETY: The underlying type of the resource is `R`. @@ -472,22 +488,23 @@ impl<'w, 's> FilteredResourcesMut<'w, 's> { unsafe fn get_mut_by_id_unchecked( &mut self, component_id: ComponentId, - ) -> Option> { + ) -> Result, ResourceFetchError> { if !self.access.has_resource_write(component_id) { - return None; + return Err(ResourceFetchError::NoResourceAccess(component_id)); } - // SAFETY: We have access to this resource in `access`, and the caller ensures that there are no conflicting borrows for the duration of the returned value. - unsafe { self.world.get_resource_with_ticks(component_id) }.map( - |(value, ticks, _caller)| MutUntyped { - // SAFETY: We have exclusive access to the underlying storage. - value: unsafe { value.assert_unique() }, - // SAFETY: We have exclusive access to the underlying storage. - ticks: unsafe { TicksMut::from_tick_cells(ticks, self.last_run, self.this_run) }, - #[cfg(feature = "track_change_detection")] - // SAFETY: We have exclusive access to the underlying storage. - changed_by: unsafe { _caller.deref_mut() }, - }, - ) + + // SAFETY: We have read access to this resource + let (value, ticks, caller) = unsafe { self.world.get_resource_with_ticks(component_id) } + .ok_or(ResourceFetchError::DoesNotExist(component_id))?; + + Ok(MutUntyped { + // SAFETY: We have exclusive access to the underlying storage. + value: unsafe { value.assert_unique() }, + // SAFETY: We have exclusive access to the underlying storage. + ticks: unsafe { TicksMut::from_tick_cells(ticks, self.last_run, self.this_run) }, + // SAFETY: We have exclusive access to the underlying storage. + changed_by: unsafe { caller.map(|caller| caller.deref_mut()) }, + }) } } @@ -546,7 +563,7 @@ impl<'w> FilteredResourcesBuilder<'w> { /// Add accesses required to read the resource of the given type. pub fn add_read(&mut self) -> &mut Self { - let component_id = self.world.components.register_resource::(); + let component_id = self.world.components_registrator().register_resource::(); self.add_read_by_id(component_id) } @@ -592,7 +609,7 @@ impl<'w> FilteredResourcesMutBuilder<'w> { /// Add accesses required to read the resource of the given type. pub fn add_read(&mut self) -> &mut Self { - let component_id = self.world.components.register_resource::(); + let component_id = self.world.components_registrator().register_resource::(); self.add_read_by_id(component_id) } @@ -610,7 +627,7 @@ impl<'w> FilteredResourcesMutBuilder<'w> { /// Add accesses required to get mutable access to the resource of the given type. pub fn add_write(&mut self) -> &mut Self { - let component_id = self.world.components.register_resource::(); + let component_id = self.world.components_registrator().register_resource::(); self.add_write_by_id(component_id) } diff --git a/crates/bevy_ecs/src/world/identifier.rs b/crates/bevy_ecs/src/world/identifier.rs index b1342e04dcc35..6b1c803e75375 100644 --- a/crates/bevy_ecs/src/world/identifier.rs +++ b/crates/bevy_ecs/src/world/identifier.rs @@ -4,12 +4,7 @@ use crate::{ system::{ExclusiveSystemParam, ReadOnlySystemParam, SystemMeta, SystemParam}, world::{FromWorld, World}, }; - -#[cfg(not(feature = "portable-atomic"))] -use core::sync::atomic::{AtomicUsize, Ordering}; - -#[cfg(feature = "portable-atomic")] -use portable_atomic::{AtomicUsize, Ordering}; +use bevy_platform::sync::atomic::{AtomicUsize, Ordering}; use super::unsafe_world_cell::UnsafeWorldCell; @@ -99,6 +94,7 @@ impl SparseSetIndex for WorldId { #[cfg(test)] mod tests { use super::*; + use alloc::vec::Vec; #[test] fn world_ids_unique() { diff --git a/crates/bevy_ecs/src/world/mod.rs b/crates/bevy_ecs/src/world/mod.rs index 997d9fcee7bd0..9bd8d699c6f21 100644 --- a/crates/bevy_ecs/src/world/mod.rs +++ b/crates/bevy_ecs/src/world/mod.rs @@ -18,9 +18,10 @@ pub use crate::{ change_detection::{Mut, Ref, CHECK_TICK_THRESHOLD}, world::command_queue::CommandQueue, }; +pub use bevy_ecs_macros::FromWorld; pub use component_constants::*; pub use deferred_world::DeferredWorld; -pub use entity_fetch::WorldEntityFetch; +pub use entity_fetch::{EntityFetcher, WorldEntityFetch}; pub use entity_ref::{ DynamicComponentFetch, EntityMut, EntityMutExcept, EntityRef, EntityRefExcept, EntityWorldMut, Entry, FilteredEntityMut, FilteredEntityRef, OccupiedEntry, TryFromFilteredError, VacantEntry, @@ -29,88 +30,54 @@ pub use filtered_resource::*; pub use identifier::WorldId; pub use spawn_batch::*; +#[expect( + deprecated, + reason = "We need to support `AllocAtWithoutReplacement` for now." +)] use crate::{ archetype::{ArchetypeId, ArchetypeRow, Archetypes}, - bundle::{Bundle, BundleInfo, BundleInserter, BundleSpawner, Bundles, InsertMode}, - change_detection::{MutUntyped, TicksMut}, + bundle::{ + Bundle, BundleEffect, BundleInfo, BundleInserter, BundleSpawner, Bundles, InsertMode, + NoBundleEffect, + }, + change_detection::{MaybeLocation, MutUntyped, TicksMut}, component::{ - Component, ComponentCloneHandlers, ComponentDescriptor, ComponentHooks, ComponentId, - ComponentInfo, ComponentTicks, Components, Mutable, RequiredComponents, - RequiredComponentsError, Tick, + Component, ComponentDescriptor, ComponentHooks, ComponentId, ComponentIds, ComponentInfo, + ComponentTicks, Components, ComponentsQueuedRegistrator, ComponentsRegistrator, Mutable, + RequiredComponents, RequiredComponentsError, Tick, + }, + entity::{ + AllocAtWithoutReplacement, Entities, Entity, EntityDoesNotExistError, EntityLocation, }, - entity::{AllocAtWithoutReplacement, Entities, Entity, EntityLocation}, + entity_disabling::DefaultQueryFilters, event::{Event, EventId, Events, SendBatchIds}, observer::Observers, query::{DebugCheckedUnwrap, QueryData, QueryFilter, QueryState}, + relationship::RelationshipHookMode, removal_detection::RemovedComponentEvents, + resource::Resource, schedule::{Schedule, ScheduleLabel, Schedules}, storage::{ResourceData, Storages}, - system::{Commands, Resource}, + system::Commands, world::{ command_queue::RawCommandQueue, - error::{EntityFetchError, TryRunScheduleError}, + error::{ + EntityDespawnError, EntityMutableFetchError, TryInsertBatchError, TryRunScheduleError, + }, }, }; use alloc::{boxed::Box, vec::Vec}; -use bevy_ptr::{OwningPtr, Ptr}; +use bevy_platform::sync::atomic::{AtomicU32, Ordering}; +use bevy_ptr::{OwningPtr, Ptr, UnsafeCellDeref}; use core::{any::TypeId, fmt}; use log::warn; - -#[cfg(not(feature = "portable-atomic"))] -use core::sync::atomic::{AtomicU32, Ordering}; - -#[cfg(feature = "portable-atomic")] -use portable_atomic::{AtomicU32, Ordering}; - -#[cfg(feature = "track_change_detection")] -use bevy_ptr::UnsafeCellDeref; - -use core::panic::Location; - use unsafe_world_cell::{UnsafeEntityCell, UnsafeWorldCell}; -/// A [`World`] mutation. -/// -/// Should be used with [`Commands::queue`]. -/// -/// # Usage -/// -/// ``` -/// # use bevy_ecs::prelude::*; -/// # use bevy_ecs::world::Command; -/// // Our world resource -/// #[derive(Resource, Default)] -/// struct Counter(u64); -/// -/// // Our custom command -/// struct AddToCounter(u64); -/// -/// impl Command for AddToCounter { -/// fn apply(self, world: &mut World) { -/// let mut counter = world.get_resource_or_insert_with(Counter::default); -/// counter.0 += self.0; -/// } -/// } -/// -/// fn some_system(mut commands: Commands) { -/// commands.queue(AddToCounter(42)); -/// } -/// ``` -pub trait Command: Send + 'static { - /// Applies this command, causing it to mutate the provided `world`. - /// - /// This method is used to define what a command "does" when it is ultimately applied. - /// Because this method takes `self`, you can store data or settings on the type that implements this trait. - /// This data is set by the system or other source of the command, and then ultimately read in this method. - fn apply(self, world: &mut World); -} - /// Stores and exposes operations on [entities](Entity), [components](Component), resources, /// and their associated metadata. /// -/// Each [`Entity`] has a set of components. Each component can have up to one instance of each -/// component type. Entity components can be created, updated, removed, and queried using a given -/// [`World`]. +/// Each [`Entity`] has a set of unique components, based on their type. +/// Entity components can be created, updated, removed, and queried using a given /// /// For complex access patterns involving [`SystemParam`](crate::system::SystemParam), /// consider using [`SystemState`](crate::system::SystemState). @@ -128,6 +95,7 @@ pub struct World { id: WorldId, pub(crate) entities: Entities, pub(crate) components: Components, + pub(crate) component_ids: ComponentIds, pub(crate) archetypes: Archetypes, pub(crate) storages: Storages, pub(crate) bundles: Bundles, @@ -158,6 +126,7 @@ impl Default for World { last_check_tick: Tick::new(0), last_trigger_id: 0, command_queue: RawCommandQueue::new(), + component_ids: ComponentIds::default(), }; world.bootstrap(); world @@ -182,10 +151,24 @@ impl World { /// This _must_ be run as part of constructing a [`World`], before it is returned to the caller. #[inline] fn bootstrap(&mut self) { - assert_eq!(ON_ADD, self.register_component::()); - assert_eq!(ON_INSERT, self.register_component::()); - assert_eq!(ON_REPLACE, self.register_component::()); - assert_eq!(ON_REMOVE, self.register_component::()); + // The order that we register these events is vital to ensure that the constants are correct! + let on_add = OnAdd::register_component_id(self); + assert_eq!(ON_ADD, on_add); + + let on_insert = OnInsert::register_component_id(self); + assert_eq!(ON_INSERT, on_insert); + + let on_replace = OnReplace::register_component_id(self); + assert_eq!(ON_REPLACE, on_replace); + + let on_remove = OnRemove::register_component_id(self); + assert_eq!(ON_REMOVE, on_remove); + + let on_despawn = OnDespawn::register_component_id(self); + assert_eq!(ON_DESPAWN, on_despawn); + + // This sets up `Disabled` as a disabling component, via the FromWorld impl + self.init_resource::(); } /// Creates a new empty [`World`]. /// @@ -245,6 +228,22 @@ impl World { &self.components } + /// Prepares a [`ComponentsQueuedRegistrator`] for the world. + /// **NOTE:** [`ComponentsQueuedRegistrator`] is easily misused. + /// See its docs for important notes on when and how it should be used. + #[inline] + pub fn components_queue(&self) -> ComponentsQueuedRegistrator { + // SAFETY: These are from the same world. + unsafe { ComponentsQueuedRegistrator::new(&self.components, &self.component_ids) } + } + + /// Prepares a [`ComponentsRegistrator`] for the world. + #[inline] + pub fn components_registrator(&mut self) -> ComponentsRegistrator { + // SAFETY: These are from the same world. + unsafe { ComponentsRegistrator::new(&mut self.components, &mut self.component_ids) } + } + /// Retrieves this world's [`Storages`] collection. #[inline] pub fn storages(&self) -> &Storages { @@ -272,8 +271,20 @@ impl World { } /// Registers a new [`Component`] type and returns the [`ComponentId`] created for it. + /// + /// # Usage Notes + /// In most cases, you don't need to call this method directly since component registration + /// happens automatically during system initialization. pub fn register_component(&mut self) -> ComponentId { - self.components.register_component::(&mut self.storages) + self.components_registrator().register_component::() + } + + /// Registers a component type as "disabling", + /// using [default query filters](DefaultQueryFilters) to exclude entities with the component from queries. + pub fn register_disabling_component(&mut self) { + let component_id = self.register_component::(); + let mut dqf = self.resource_mut::(); + dqf.register_disabling_component(component_id); } /// Returns a mutable reference to the [`ComponentHooks`] for a [`Component`] type. @@ -549,8 +560,8 @@ impl World { &mut self, descriptor: ComponentDescriptor, ) -> ComponentId { - self.components - .register_component_with_descriptor(&mut self.storages, descriptor) + self.components_registrator() + .register_component_with_descriptor(descriptor) } /// Returns the [`ComponentId`] of the given [`Component`] type `T`. @@ -589,7 +600,7 @@ impl World { /// to insert the [`Resource`] in the [`World`], use [`World::init_resource`] or /// [`World::insert_resource`] instead. pub fn register_resource(&mut self) -> ComponentId { - self.components.register_resource::() + self.components_registrator().register_resource::() } /// Returns the [`ComponentId`] of the given [`Resource`] type `T`. @@ -612,8 +623,6 @@ impl World { /// - Pass an [`Entity`] to receive a single [`EntityRef`]. /// - Pass a slice of [`Entity`]s to receive a [`Vec`]. /// - Pass an array of [`Entity`]s to receive an equally-sized array of [`EntityRef`]s. - /// - Pass a reference to a [`EntityHashSet`] to receive an - /// [`EntityHashMap`](crate::entity::EntityHashMap). /// /// # Panics /// @@ -710,16 +719,14 @@ impl World { #[track_caller] fn panic_no_entity(world: &World, entity: Entity) -> ! { panic!( - "Entity {entity:?} {}", - world - .entities - .entity_does_not_exist_error_details_message(entity) + "Entity {entity} {}", + world.entities.entity_does_not_exist_error_details(entity) ); } match self.get_entity(entities) { Ok(fetched) => fetched, - Err(entity) => panic_no_entity(self, entity), + Err(error) => panic_no_entity(self, error.entity), } } @@ -846,7 +853,7 @@ impl World { #[inline(never)] #[cold] #[track_caller] - fn panic_on_err(e: EntityFetchError) -> ! { + fn panic_on_err(e: EntityMutableFetchError) -> ! { panic!("{e}"); } @@ -858,68 +865,23 @@ impl World { /// Returns the components of an [`Entity`] through [`ComponentInfo`]. #[inline] - pub fn inspect_entity(&self, entity: Entity) -> impl Iterator { + pub fn inspect_entity( + &self, + entity: Entity, + ) -> Result, EntityDoesNotExistError> { let entity_location = self .entities() .get(entity) - .unwrap_or_else(|| panic!("Entity {entity:?} does not exist")); + .ok_or(EntityDoesNotExistError::new(entity, self.entities()))?; let archetype = self .archetypes() .get(entity_location.archetype_id) - .unwrap_or_else(|| { - panic!( - "Archetype {:?} does not exist", - entity_location.archetype_id - ) - }); + .expect("ArchetypeId was retrieved from an EntityLocation and should correspond to an Archetype"); - archetype + Ok(archetype .components() - .filter_map(|id| self.components().get_info(id)) - } - - /// Returns an [`EntityWorldMut`] for the given `entity` (if it exists) or spawns one if it doesn't exist. - /// This will return [`None`] if the `entity` exists with a different generation. - /// - /// # Note - /// Spawning a specific `entity` value is rarely the right choice. Most apps should favor [`World::spawn`]. - /// This method should generally only be used for sharing entities across apps, and only when they have a - /// scheme worked out to share an ID space (which doesn't happen by default). - #[inline] - #[deprecated(since = "0.15.0", note = "use `World::spawn` instead")] - pub fn get_or_spawn(&mut self, entity: Entity) -> Option { - self.get_or_spawn_with_caller( - entity, - #[cfg(feature = "track_change_detection")] - Location::caller(), - ) - } - - #[inline] - pub(crate) fn get_or_spawn_with_caller( - &mut self, - entity: Entity, - #[cfg(feature = "track_change_detection")] caller: &'static Location, - ) -> Option { - self.flush(); - match self.entities.alloc_at_without_replacement(entity) { - AllocAtWithoutReplacement::Exists(location) => { - // SAFETY: `entity` exists and `location` is that entity's location - Some(unsafe { EntityWorldMut::new(self, entity, location) }) - } - AllocAtWithoutReplacement::DidNotExist => { - // SAFETY: entity was just allocated - Some(unsafe { - self.spawn_at_empty_internal( - entity, - #[cfg(feature = "track_change_detection")] - caller, - ) - }) - } - AllocAtWithoutReplacement::ExistsWithWrongGeneration => None, - } + .filter_map(|id| self.components().get_info(id))) } /// Returns [`EntityRef`]s that expose read-only operations for the given @@ -937,7 +899,7 @@ impl World { /// # Errors /// /// If any of the given `entities` do not exist in the world, the first - /// [`Entity`] found to be missing will be returned in the [`Err`]. + /// [`Entity`] found to be missing will return an [`EntityDoesNotExistError`]. /// /// # Examples /// @@ -945,7 +907,10 @@ impl World { /// /// [`EntityHashSet`]: crate::entity::EntityHashSet #[inline] - pub fn get_entity(&self, entities: F) -> Result, Entity> { + pub fn get_entity( + &self, + entities: F, + ) -> Result, EntityDoesNotExistError> { let cell = self.as_unsafe_world_cell_readonly(); // SAFETY: `&self` gives read access to the entire world, and prevents mutable access. unsafe { entities.fetch_ref(cell) } @@ -973,9 +938,9 @@ impl World { /// /// # Errors /// - /// - Returns [`EntityFetchError::NoSuchEntity`] if any of the given `entities` do not exist in the world. + /// - Returns [`EntityMutableFetchError::EntityDoesNotExist`] if any of the given `entities` do not exist in the world. /// - Only the first entity found to be missing will be returned. - /// - Returns [`EntityFetchError::AliasedMutability`] if the same entity is requested multiple times. + /// - Returns [`EntityMutableFetchError::AliasedMutability`] if the same entity is requested multiple times. /// /// # Examples /// @@ -986,7 +951,7 @@ impl World { pub fn get_entity_mut( &mut self, entities: F, - ) -> Result, EntityFetchError> { + ) -> Result, EntityMutableFetchError> { let cell = self.as_unsafe_world_cell(); // SAFETY: `&mut self` gives mutable access to the entire world, // and prevents any other access to the world. @@ -1050,6 +1015,52 @@ impl World { }) } + /// Simultaneously provides access to entity data and a command queue, which + /// will be applied when the world is next flushed. + /// + /// This allows using borrowed entity data to construct commands where the + /// borrow checker would otherwise prevent it. + /// + /// See [`DeferredWorld::entities_and_commands`] for the deferred version. + /// + /// # Example + /// + /// ```rust + /// # use bevy_ecs::{prelude::*, world::DeferredWorld}; + /// #[derive(Component)] + /// struct Targets(Vec); + /// #[derive(Component)] + /// struct TargetedBy(Entity); + /// + /// let mut world: World = // ... + /// # World::new(); + /// # let e1 = world.spawn_empty().id(); + /// # let e2 = world.spawn_empty().id(); + /// # let eid = world.spawn(Targets(vec![e1, e2])).id(); + /// let (entities, mut commands) = world.entities_and_commands(); + /// + /// let entity = entities.get(eid).unwrap(); + /// for &target in entity.get::().unwrap().0.iter() { + /// commands.entity(target).insert(TargetedBy(eid)); + /// } + /// # world.flush(); + /// # assert_eq!(world.get::(e1).unwrap().0, eid); + /// # assert_eq!(world.get::(e2).unwrap().0, eid); + /// ``` + pub fn entities_and_commands(&mut self) -> (EntityFetcher, Commands) { + let cell = self.as_unsafe_world_cell(); + // SAFETY: `&mut self` gives mutable access to the entire world, and prevents simultaneous access. + let fetcher = unsafe { EntityFetcher::new(cell) }; + // SAFETY: + // - `&mut self` gives mutable access to the entire world, and prevents simultaneous access. + // - Command queue access does not conflict with entity access. + let raw_queue = unsafe { cell.get_raw_command_queue() }; + // SAFETY: `&mut self` ensures the commands does not outlive the world. + let commands = unsafe { Commands::new_raw_from_entities(raw_queue, cell.entities()) }; + + (fetcher, commands) + } + /// Spawns a new [`Entity`] and returns a corresponding [`EntityWorldMut`], which can be used /// to add components to the entity or retrieve its id. /// @@ -1080,13 +1091,7 @@ impl World { self.flush(); let entity = self.entities.alloc(); // SAFETY: entity was just allocated - unsafe { - self.spawn_at_empty_internal( - entity, - #[cfg(feature = "track_change_detection")] - Location::caller(), - ) - } + unsafe { self.spawn_at_empty_internal(entity, MaybeLocation::caller()) } } /// Spawns a new [`Entity`] with a given [`Bundle`] of [components](`Component`) and returns @@ -1151,28 +1156,38 @@ impl World { /// ``` #[track_caller] pub fn spawn(&mut self, bundle: B) -> EntityWorldMut { + self.spawn_with_caller(bundle, MaybeLocation::caller()) + } + + pub(crate) fn spawn_with_caller( + &mut self, + bundle: B, + caller: MaybeLocation, + ) -> EntityWorldMut { self.flush(); let change_tick = self.change_tick(); let entity = self.entities.alloc(); - let entity_location = { - let mut bundle_spawner = BundleSpawner::new::(self, change_tick); - // SAFETY: bundle's type matches `bundle_info`, entity is allocated but non-existent - unsafe { - bundle_spawner.spawn_non_existent( - entity, - bundle, - #[cfg(feature = "track_change_detection")] - Location::caller(), - ) - } - }; + let mut bundle_spawner = BundleSpawner::new::(self, change_tick); + // SAFETY: bundle's type matches `bundle_info`, entity is allocated but non-existent + let (mut entity_location, after_effect) = + unsafe { bundle_spawner.spawn_non_existent(entity, bundle, caller) }; + + // SAFETY: command_queue is not referenced anywhere else + if !unsafe { self.command_queue.is_empty() } { + self.flush(); + entity_location = self + .entities() + .get(entity) + .unwrap_or(EntityLocation::INVALID); + } - #[cfg(feature = "track_change_detection")] self.entities - .set_spawned_or_despawned_by(entity.index(), Location::caller()); + .set_spawned_or_despawned_by(entity.index(), caller); // SAFETY: entity and location are valid, as they were just created above - unsafe { EntityWorldMut::new(self, entity, entity_location) } + let mut entity = unsafe { EntityWorldMut::new(self, entity, entity_location) }; + after_effect.apply(&mut entity); + entity } /// # Safety @@ -1180,7 +1195,7 @@ impl World { unsafe fn spawn_at_empty_internal( &mut self, entity: Entity, - #[cfg(feature = "track_change_detection")] caller: &'static Location, + caller: MaybeLocation, ) -> EntityWorldMut { let archetype = self.archetypes.empty_mut(); // PERF: consider avoiding allocating entities in the empty archetype unless needed @@ -1190,7 +1205,6 @@ impl World { let location = unsafe { archetype.allocate(entity, table_row) }; self.entities.set(entity.index(), location); - #[cfg(feature = "track_change_detection")] self.entities .set_spawned_or_despawned_by(entity.index(), caller); @@ -1223,14 +1237,9 @@ impl World { pub fn spawn_batch(&mut self, iter: I) -> SpawnBatchIter<'_, I::IntoIter> where I: IntoIterator, - I::Item: Bundle, + I::Item: Bundle, { - SpawnBatchIter::new( - self, - iter.into_iter(), - #[cfg(feature = "track_change_detection")] - Location::caller(), - ) + SpawnBatchIter::new(self, iter.into_iter(), MaybeLocation::caller()) } /// Retrieves a reference to the given `entity`'s [`Component`] of the given type. @@ -1275,21 +1284,93 @@ impl World { &mut self, entity: Entity, ) -> Option> { - // SAFETY: - // - `as_unsafe_world_cell` is the only thing that is borrowing world - // - `as_unsafe_world_cell` provides mutable permission to everything - // - `&mut self` ensures no other borrows on world data - unsafe { self.as_unsafe_world_cell().get_entity(entity)?.get_mut() } + self.get_entity_mut(entity).ok()?.into_mut() } - /// Despawns the given `entity`, if it exists. This will also remove all of the entity's - /// [`Component`]s. Returns `true` if the `entity` is successfully despawned and `false` if - /// the `entity` does not exist. + /// Temporarily removes a [`Component`] `T` from the provided [`Entity`] and + /// runs the provided closure on it, returning the result if `T` was available. + /// This will trigger the `OnRemove` and `OnReplace` component hooks without + /// causing an archetype move. + /// + /// This is most useful with immutable components, where removal and reinsertion + /// is the only way to modify a value. + /// + /// If you do not need to ensure the above hooks are triggered, and your component + /// is mutable, prefer using [`get_mut`](World::get_mut). + /// + /// # Examples + /// + /// ```rust + /// # use bevy_ecs::prelude::*; + /// # + /// #[derive(Component, PartialEq, Eq, Debug)] + /// #[component(immutable)] + /// struct Foo(bool); + /// + /// # let mut world = World::default(); + /// # world.register_component::(); + /// # + /// # let entity = world.spawn(Foo(false)).id(); + /// # + /// world.modify_component(entity, |foo: &mut Foo| { + /// foo.0 = true; + /// }); + /// # + /// # assert_eq!(world.get::(entity), Some(&Foo(true))); + /// ``` + #[inline] + pub fn modify_component( + &mut self, + entity: Entity, + f: impl FnOnce(&mut T) -> R, + ) -> Result, EntityMutableFetchError> { + let mut world = DeferredWorld::from(&mut *self); + + let result = world.modify_component(entity, f)?; + + self.flush(); + Ok(result) + } + + /// Temporarily removes a [`Component`] identified by the provided + /// [`ComponentId`] from the provided [`Entity`] and runs the provided + /// closure on it, returning the result if the component was available. + /// This will trigger the `OnRemove` and `OnReplace` component hooks without + /// causing an archetype move. + /// + /// This is most useful with immutable components, where removal and reinsertion + /// is the only way to modify a value. + /// + /// If you do not need to ensure the above hooks are triggered, and your component + /// is mutable, prefer using [`get_mut_by_id`](World::get_mut_by_id). + /// + /// You should prefer the typed [`modify_component`](World::modify_component) + /// whenever possible. + #[inline] + pub fn modify_component_by_id( + &mut self, + entity: Entity, + component_id: ComponentId, + f: impl for<'a> FnOnce(MutUntyped<'a>) -> R, + ) -> Result, EntityMutableFetchError> { + let mut world = DeferredWorld::from(&mut *self); + + let result = world.modify_component_by_id(entity, component_id, f)?; + + self.flush(); + Ok(result) + } + + /// Despawns the given [`Entity`], if it exists. This will also remove all of the entity's + /// [`Components`](Component). + /// + /// Returns `true` if the entity is successfully despawned and `false` if + /// the entity does not exist. /// /// # Note /// - /// This won't clean up external references to the entity (such as parent-child relationships - /// if you're using `bevy_hierarchy`), which may leave the world in an invalid state. + /// This will also despawn the entities in any [`RelationshipTarget`](crate::relationship::RelationshipTarget) that is configured + /// to despawn descendants. For example, this will recursively despawn [`Children`](crate::hierarchy::Children). /// /// ``` /// use bevy_ecs::{component::Component, world::World}; @@ -1309,37 +1390,39 @@ impl World { #[track_caller] #[inline] pub fn despawn(&mut self, entity: Entity) -> bool { - self.despawn_with_caller(entity, Location::caller(), true) + if let Err(error) = self.despawn_with_caller(entity, MaybeLocation::caller()) { + warn!("{error}"); + false + } else { + true + } } - /// Performs the same function as [`Self::despawn`] but does not emit a warning if - /// the entity does not exist. + /// Despawns the given `entity`, if it exists. This will also remove all of the entity's + /// [`Components`](Component). + /// + /// Returns an [`EntityDespawnError`] if the entity does not exist. + /// + /// # Note + /// + /// This will also despawn the entities in any [`RelationshipTarget`](crate::relationship::RelationshipTarget) that is configured + /// to despawn descendants. For example, this will recursively despawn [`Children`](crate::hierarchy::Children). #[track_caller] #[inline] - pub fn try_despawn(&mut self, entity: Entity) -> bool { - self.despawn_with_caller(entity, Location::caller(), false) + pub fn try_despawn(&mut self, entity: Entity) -> Result<(), EntityDespawnError> { + self.despawn_with_caller(entity, MaybeLocation::caller()) } #[inline] pub(crate) fn despawn_with_caller( &mut self, entity: Entity, - caller: &'static Location, - log_warning: bool, - ) -> bool { + caller: MaybeLocation, + ) -> Result<(), EntityDespawnError> { self.flush(); - if let Ok(entity) = self.get_entity_mut(entity) { - entity.despawn_with_caller( - #[cfg(feature = "track_change_detection")] - caller, - ); - true - } else { - if log_warning { - warn!("error[B0003]: {caller}: Could not despawn entity {entity:?}, which {}. See: https://bevyengine.org/learn/errors/b0003", self.entities.entity_does_not_exist_error_details_message(entity)); - } - false - } + let entity = self.get_entity_mut(entity)?; + entity.despawn_with_caller(caller); + Ok(()) } /// Clears the internal component tracker state. @@ -1589,7 +1672,7 @@ impl World { &mut self, descriptor: ComponentDescriptor, ) -> ComponentId { - self.components + self.components_registrator() .register_resource_with_descriptor(descriptor) } @@ -1603,25 +1686,19 @@ impl World { #[inline] #[track_caller] pub fn init_resource(&mut self) -> ComponentId { - #[cfg(feature = "track_change_detection")] - let caller = Location::caller(); - let component_id = self.components.register_resource::(); + let caller = MaybeLocation::caller(); + let component_id = self.components_registrator().register_resource::(); if self .storages .resources .get(component_id) - .map_or(true, |data| !data.is_present()) + .is_none_or(|data| !data.is_present()) { let value = R::from_world(self); OwningPtr::make(value, |ptr| { // SAFETY: component_id was just initialized and corresponds to resource of type R. unsafe { - self.insert_resource_by_id( - component_id, - ptr, - #[cfg(feature = "track_change_detection")] - caller, - ); + self.insert_resource_by_id(component_id, ptr, caller); } }); } @@ -1636,11 +1713,7 @@ impl World { #[inline] #[track_caller] pub fn insert_resource(&mut self, value: R) { - self.insert_resource_with_caller( - value, - #[cfg(feature = "track_change_detection")] - Location::caller(), - ); + self.insert_resource_with_caller(value, MaybeLocation::caller()); } /// Split into a new function so we can pass the calling location into the function when using @@ -1649,18 +1722,13 @@ impl World { pub(crate) fn insert_resource_with_caller( &mut self, value: R, - #[cfg(feature = "track_change_detection")] caller: &'static Location, + caller: MaybeLocation, ) { - let component_id = self.components.register_resource::(); + let component_id = self.components_registrator().register_resource::(); OwningPtr::make(value, |ptr| { // SAFETY: component_id was just initialized and corresponds to resource of type R. unsafe { - self.insert_resource_by_id( - component_id, - ptr, - #[cfg(feature = "track_change_detection")] - caller, - ); + self.insert_resource_by_id(component_id, ptr, caller); } }); } @@ -1679,25 +1747,19 @@ impl World { #[inline] #[track_caller] pub fn init_non_send_resource(&mut self) -> ComponentId { - #[cfg(feature = "track_change_detection")] - let caller = Location::caller(); - let component_id = self.components.register_non_send::(); + let caller = MaybeLocation::caller(); + let component_id = self.components_registrator().register_non_send::(); if self .storages .non_send_resources .get(component_id) - .map_or(true, |data| !data.is_present()) + .is_none_or(|data| !data.is_present()) { let value = R::from_world(self); OwningPtr::make(value, |ptr| { // SAFETY: component_id was just initialized and corresponds to resource of type R. unsafe { - self.insert_non_send_by_id( - component_id, - ptr, - #[cfg(feature = "track_change_detection")] - caller, - ); + self.insert_non_send_by_id(component_id, ptr, caller); } }); } @@ -1716,18 +1778,12 @@ impl World { #[inline] #[track_caller] pub fn insert_non_send_resource(&mut self, value: R) { - #[cfg(feature = "track_change_detection")] - let caller = Location::caller(); - let component_id = self.components.register_non_send::(); + let caller = MaybeLocation::caller(); + let component_id = self.components_registrator().register_non_send::(); OwningPtr::make(value, |ptr| { // SAFETY: component_id was just initialized and corresponds to resource of type R. unsafe { - self.insert_non_send_by_id( - component_id, - ptr, - #[cfg(feature = "track_change_detection")] - caller, - ); + self.insert_non_send_by_id(component_id, ptr, caller); } }); } @@ -1824,12 +1880,11 @@ impl World { self.storages .resources .get(component_id) - .and_then(|resource| { - resource - .get_ticks() - .map(|ticks| ticks.is_added(self.last_change_tick(), self.read_change_tick())) + .is_some_and(|resource| { + resource.get_ticks().is_some_and(|ticks| { + ticks.is_added(self.last_change_tick(), self.read_change_tick()) + }) }) - .unwrap_or(false) } /// Returns `true` if a resource of type `R` exists and was modified since the world's @@ -1842,8 +1897,7 @@ impl World { pub fn is_resource_changed(&self) -> bool { self.components .get_resource_id(TypeId::of::()) - .map(|component_id| self.is_resource_changed_by_id(component_id)) - .unwrap_or(false) + .is_some_and(|component_id| self.is_resource_changed_by_id(component_id)) } /// Returns `true` if a resource with id `component_id` exists and was modified since the world's @@ -1857,12 +1911,11 @@ impl World { self.storages .resources .get(component_id) - .and_then(|resource| { - resource - .get_ticks() - .map(|ticks| ticks.is_changed(self.last_change_tick(), self.read_change_tick())) + .is_some_and(|resource| { + resource.get_ticks().is_some_and(|ticks| { + ticks.is_changed(self.last_change_tick(), self.read_change_tick()) + }) }) - .unwrap_or(false) } /// Retrieves the change ticks for the given resource. @@ -2005,23 +2058,17 @@ impl World { &mut self, func: impl FnOnce() -> R, ) -> Mut<'_, R> { - #[cfg(feature = "track_change_detection")] - let caller = Location::caller(); + let caller = MaybeLocation::caller(); let change_tick = self.change_tick(); let last_change_tick = self.last_change_tick(); - let component_id = self.components.register_resource::(); + let component_id = self.components_registrator().register_resource::(); let data = self.initialize_resource_internal(component_id); if !data.is_present() { OwningPtr::make(func(), |ptr| { // SAFETY: component_id was just initialized and corresponds to resource of type R. unsafe { - data.insert( - ptr, - change_tick, - #[cfg(feature = "track_change_detection")] - caller, - ); + data.insert(ptr, change_tick, caller); } }); } @@ -2069,28 +2116,22 @@ impl World { /// ``` #[track_caller] pub fn get_resource_or_init(&mut self) -> Mut<'_, R> { - #[cfg(feature = "track_change_detection")] - let caller = Location::caller(); + let caller = MaybeLocation::caller(); let change_tick = self.change_tick(); let last_change_tick = self.last_change_tick(); - let component_id = self.components.register_resource::(); + let component_id = self.components_registrator().register_resource::(); if self .storages .resources .get(component_id) - .map_or(true, |data| !data.is_present()) + .is_none_or(|data| !data.is_present()) { let value = R::from_world(self); OwningPtr::make(value, |ptr| { // SAFETY: component_id was just initialized and corresponds to resource of type R. unsafe { - self.insert_resource_by_id( - component_id, - ptr, - #[cfg(feature = "track_change_detection")] - caller, - ); + self.insert_resource_by_id(component_id, ptr, caller); } }); } @@ -2211,39 +2252,49 @@ impl World { /// assert_eq!(world.get::(e0), Some(&B(0.0))); /// ``` #[track_caller] + #[deprecated( + since = "0.16.0", + note = "This can cause extreme performance problems when used with lots of arbitrary free entities. See #18054 on GitHub." + )] pub fn insert_or_spawn_batch(&mut self, iter: I) -> Result<(), Vec> where I: IntoIterator, I::IntoIter: Iterator, - B: Bundle, + B: Bundle, { - self.insert_or_spawn_batch_with_caller( - iter, - #[cfg(feature = "track_change_detection")] - Location::caller(), - ) + #[expect( + deprecated, + reason = "This needs to be supported for now, and the outer function is deprecated too." + )] + self.insert_or_spawn_batch_with_caller(iter, MaybeLocation::caller()) } /// Split into a new function so we can pass the calling location into the function when using /// as a command. #[inline] + #[deprecated( + since = "0.16.0", + note = "This can cause extreme performance problems when used with lots of arbitrary free entities. See #18054 on GitHub." + )] pub(crate) fn insert_or_spawn_batch_with_caller( &mut self, iter: I, - #[cfg(feature = "track_change_detection")] caller: &'static Location, + caller: MaybeLocation, ) -> Result<(), Vec> where I: IntoIterator, I::IntoIter: Iterator, - B: Bundle, + B: Bundle, { self.flush(); - let change_tick = self.change_tick(); + // SAFETY: These come from the same world. `Self.components_registrator` can't be used since we borrow other fields too. + let mut registrator = + unsafe { ComponentsRegistrator::new(&mut self.components, &mut self.component_ids) }; let bundle_id = self .bundles - .register_info::(&mut self.components, &mut self.storages); + .register_info::(&mut registrator, &mut self.storages); enum SpawnOrInsert<'w> { Spawn(BundleSpawner<'w>), Insert(BundleInserter<'w>, ArchetypeId), @@ -2264,6 +2315,10 @@ impl World { let mut invalid_entities = Vec::new(); for (entity, bundle) in iter { + #[expect( + deprecated, + reason = "This needs to be supported for now, and the outer function is deprecated too." + )] match spawn_or_insert .entities() .alloc_at_without_replacement(entity) @@ -2280,8 +2335,8 @@ impl World { location, bundle, InsertMode::Replace, - #[cfg(feature = "track_change_detection")] caller, + RelationshipHookMode::Run, ) }; } @@ -2302,8 +2357,8 @@ impl World { location, bundle, InsertMode::Replace, - #[cfg(feature = "track_change_detection")] caller, + RelationshipHookMode::Run, ) }; spawn_or_insert = @@ -2314,27 +2369,13 @@ impl World { AllocAtWithoutReplacement::DidNotExist => { if let SpawnOrInsert::Spawn(ref mut spawner) = spawn_or_insert { // SAFETY: `entity` is allocated (but non existent), bundle matches inserter - unsafe { - spawner.spawn_non_existent( - entity, - bundle, - #[cfg(feature = "track_change_detection")] - caller, - ) - }; + unsafe { spawner.spawn_non_existent(entity, bundle, caller) }; } else { // SAFETY: we initialized this bundle_id in `init_info` let mut spawner = unsafe { BundleSpawner::new_with_id(self, bundle_id, change_tick) }; // SAFETY: `entity` is valid, `location` matches entity, bundle matches inserter - unsafe { - spawner.spawn_non_existent( - entity, - bundle, - #[cfg(feature = "track_change_detection")] - caller, - ) - }; + unsafe { spawner.spawn_non_existent(entity, bundle, caller) }; spawn_or_insert = SpawnOrInsert::Spawn(spawner); } } @@ -2365,20 +2406,15 @@ impl World { /// /// This function will panic if any of the associated entities do not exist. /// - /// For the non-panicking version, see [`World::try_insert_batch`]. + /// For the fallible version, see [`World::try_insert_batch`]. #[track_caller] pub fn insert_batch(&mut self, batch: I) where I: IntoIterator, I::IntoIter: Iterator, - B: Bundle, + B: Bundle, { - self.insert_batch_with_caller( - batch, - InsertMode::Replace, - #[cfg(feature = "track_change_detection")] - Location::caller(), - ); + self.insert_batch_with_caller(batch, InsertMode::Replace, MaybeLocation::caller()); } /// For a given batch of ([`Entity`], [`Bundle`]) pairs, @@ -2395,20 +2431,15 @@ impl World { /// /// This function will panic if any of the associated entities do not exist. /// - /// For the non-panicking version, see [`World::try_insert_batch_if_new`]. + /// For the fallible version, see [`World::try_insert_batch_if_new`]. #[track_caller] pub fn insert_batch_if_new(&mut self, batch: I) where I: IntoIterator, I::IntoIter: Iterator, - B: Bundle, + B: Bundle, { - self.insert_batch_with_caller( - batch, - InsertMode::Keep, - #[cfg(feature = "track_change_detection")] - Location::caller(), - ); + self.insert_batch_with_caller(batch, InsertMode::Keep, MaybeLocation::caller()); } /// Split into a new function so we can differentiate the calling location. @@ -2416,35 +2447,34 @@ impl World { /// This can be called by: /// - [`World::insert_batch`] /// - [`World::insert_batch_if_new`] - /// - [`Commands::insert_batch`] - /// - [`Commands::insert_batch_if_new`] #[inline] pub(crate) fn insert_batch_with_caller( &mut self, - iter: I, + batch: I, insert_mode: InsertMode, - #[cfg(feature = "track_change_detection")] caller: &'static Location, + caller: MaybeLocation, ) where I: IntoIterator, I::IntoIter: Iterator, - B: Bundle, + B: Bundle, { - self.flush(); - - let change_tick = self.change_tick(); - - let bundle_id = self - .bundles - .register_info::(&mut self.components, &mut self.storages); - struct InserterArchetypeCache<'w> { inserter: BundleInserter<'w>, archetype_id: ArchetypeId, } - let mut batch = iter.into_iter(); + self.flush(); + let change_tick = self.change_tick(); + // SAFETY: These come from the same world. `Self.components_registrator` can't be used since we borrow other fields too. + let mut registrator = + unsafe { ComponentsRegistrator::new(&mut self.components, &mut self.component_ids) }; + let bundle_id = self + .bundles + .register_info::(&mut registrator, &mut self.storages); + + let mut batch_iter = batch.into_iter(); - if let Some((first_entity, first_bundle)) = batch.next() { + if let Some((first_entity, first_bundle)) = batch_iter.next() { if let Some(first_location) = self.entities().get(first_entity) { let mut cache = InserterArchetypeCache { // SAFETY: we initialized this bundle_id in `register_info` @@ -2465,12 +2495,12 @@ impl World { first_location, first_bundle, insert_mode, - #[cfg(feature = "track_change_detection")] caller, + RelationshipHookMode::Run, ) }; - for (entity, bundle) in batch { + for (entity, bundle) in batch_iter { if let Some(location) = cache.inserter.entities().get(entity) { if location.archetype_id != cache.archetype_id { cache = InserterArchetypeCache { @@ -2493,16 +2523,16 @@ impl World { location, bundle, insert_mode, - #[cfg(feature = "track_change_detection")] caller, + RelationshipHookMode::Run, ) }; } else { - panic!("error[B0003]: Could not insert a bundle (of type `{}`) for entity {entity:?}, which {}. See: https://bevyengine.org/learn/errors/b0003", core::any::type_name::(), self.entities.entity_does_not_exist_error_details_message(entity)); + panic!("error[B0003]: Could not insert a bundle (of type `{}`) for entity {entity}, which {}. See: https://bevyengine.org/learn/errors/b0003", core::any::type_name::(), self.entities.entity_does_not_exist_error_details(entity)); } } } else { - panic!("error[B0003]: Could not insert a bundle (of type `{}`) for entity {first_entity:?}, which {}. See: https://bevyengine.org/learn/errors/b0003", core::any::type_name::(), self.entities.entity_does_not_exist_error_details_message(first_entity)); + panic!("error[B0003]: Could not insert a bundle (of type `{}`) for entity {first_entity}, which {}. See: https://bevyengine.org/learn/errors/b0003", core::any::type_name::(), self.entities.entity_does_not_exist_error_details(first_entity)); } } } @@ -2517,22 +2547,17 @@ impl World { /// This will overwrite any previous values of components shared by the `Bundle`. /// See [`World::try_insert_batch_if_new`] to keep the old values instead. /// - /// This function silently fails by ignoring any entities that do not exist. + /// Returns a [`TryInsertBatchError`] if any of the provided entities do not exist. /// /// For the panicking version, see [`World::insert_batch`]. #[track_caller] - pub fn try_insert_batch(&mut self, batch: I) + pub fn try_insert_batch(&mut self, batch: I) -> Result<(), TryInsertBatchError> where I: IntoIterator, I::IntoIter: Iterator, - B: Bundle, + B: Bundle, { - self.try_insert_batch_with_caller( - batch, - InsertMode::Replace, - #[cfg(feature = "track_change_detection")] - Location::caller(), - ); + self.try_insert_batch_with_caller(batch, InsertMode::Replace, MaybeLocation::caller()) } /// For a given batch of ([`Entity`], [`Bundle`]) pairs, /// adds the `Bundle` of components to each `Entity` without overwriting. @@ -2544,22 +2569,17 @@ impl World { /// This is the same as [`World::try_insert_batch`], but in case of duplicate /// components it will leave the old values instead of replacing them with new ones. /// - /// This function silently fails by ignoring any entities that do not exist. + /// Returns a [`TryInsertBatchError`] if any of the provided entities do not exist. /// /// For the panicking version, see [`World::insert_batch_if_new`]. #[track_caller] - pub fn try_insert_batch_if_new(&mut self, batch: I) + pub fn try_insert_batch_if_new(&mut self, batch: I) -> Result<(), TryInsertBatchError> where I: IntoIterator, I::IntoIter: Iterator, - B: Bundle, + B: Bundle, { - self.try_insert_batch_with_caller( - batch, - InsertMode::Keep, - #[cfg(feature = "track_change_detection")] - Location::caller(), - ); + self.try_insert_batch_with_caller(batch, InsertMode::Keep, MaybeLocation::caller()) } /// Split into a new function so we can differentiate the calling location. @@ -2567,91 +2587,119 @@ impl World { /// This can be called by: /// - [`World::try_insert_batch`] /// - [`World::try_insert_batch_if_new`] + /// - [`Commands::insert_batch`] + /// - [`Commands::insert_batch_if_new`] /// - [`Commands::try_insert_batch`] /// - [`Commands::try_insert_batch_if_new`] #[inline] pub(crate) fn try_insert_batch_with_caller( &mut self, - iter: I, + batch: I, insert_mode: InsertMode, - #[cfg(feature = "track_change_detection")] caller: &'static Location, - ) where + caller: MaybeLocation, + ) -> Result<(), TryInsertBatchError> + where I: IntoIterator, I::IntoIter: Iterator, - B: Bundle, + B: Bundle, { - self.flush(); - - let change_tick = self.change_tick(); - - let bundle_id = self - .bundles - .register_info::(&mut self.components, &mut self.storages); - struct InserterArchetypeCache<'w> { inserter: BundleInserter<'w>, archetype_id: ArchetypeId, } - let mut batch = iter.into_iter(); - - if let Some((first_entity, first_bundle)) = batch.next() { - if let Some(first_location) = self.entities().get(first_entity) { - let mut cache = InserterArchetypeCache { - // SAFETY: we initialized this bundle_id in `register_info` - inserter: unsafe { - BundleInserter::new_with_id( - self, - first_location.archetype_id, - bundle_id, - change_tick, + self.flush(); + let change_tick = self.change_tick(); + // SAFETY: These come from the same world. `Self.components_registrator` can't be used since we borrow other fields too. + let mut registrator = + unsafe { ComponentsRegistrator::new(&mut self.components, &mut self.component_ids) }; + let bundle_id = self + .bundles + .register_info::(&mut registrator, &mut self.storages); + + let mut invalid_entities = Vec::::new(); + let mut batch_iter = batch.into_iter(); + + // We need to find the first valid entity so we can initialize the bundle inserter. + // This differs from `insert_batch_with_caller` because that method can just panic + // if the first entity is invalid, whereas this method needs to keep going. + let cache = loop { + if let Some((first_entity, first_bundle)) = batch_iter.next() { + if let Some(first_location) = self.entities().get(first_entity) { + let mut cache = InserterArchetypeCache { + // SAFETY: we initialized this bundle_id in `register_info` + inserter: unsafe { + BundleInserter::new_with_id( + self, + first_location.archetype_id, + bundle_id, + change_tick, + ) + }, + archetype_id: first_location.archetype_id, + }; + // SAFETY: `entity` is valid, `location` matches entity, bundle matches inserter + unsafe { + cache.inserter.insert( + first_entity, + first_location, + first_bundle, + insert_mode, + caller, + RelationshipHookMode::Run, ) - }, - archetype_id: first_location.archetype_id, - }; - // SAFETY: `entity` is valid, `location` matches entity, bundle matches inserter - unsafe { - cache.inserter.insert( - first_entity, - first_location, - first_bundle, - insert_mode, - #[cfg(feature = "track_change_detection")] - caller, - ) - }; + }; + break Some(cache); + } + invalid_entities.push(first_entity); + } else { + // We reached the end of the entities the caller provided and none were valid. + break None; + } + }; - for (entity, bundle) in batch { - if let Some(location) = cache.inserter.entities().get(entity) { - if location.archetype_id != cache.archetype_id { - cache = InserterArchetypeCache { - // SAFETY: we initialized this bundle_id in `register_info` - inserter: unsafe { - BundleInserter::new_with_id( - self, - location.archetype_id, - bundle_id, - change_tick, - ) - }, - archetype_id: location.archetype_id, - } + if let Some(mut cache) = cache { + for (entity, bundle) in batch_iter { + if let Some(location) = cache.inserter.entities().get(entity) { + if location.archetype_id != cache.archetype_id { + cache = InserterArchetypeCache { + // SAFETY: we initialized this bundle_id in `register_info` + inserter: unsafe { + BundleInserter::new_with_id( + self, + location.archetype_id, + bundle_id, + change_tick, + ) + }, + archetype_id: location.archetype_id, } - // SAFETY: `entity` is valid, `location` matches entity, bundle matches inserter - unsafe { - cache.inserter.insert( - entity, - location, - bundle, - insert_mode, - #[cfg(feature = "track_change_detection")] - caller, - ) - }; } + // SAFETY: `entity` is valid, `location` matches entity, bundle matches inserter + unsafe { + cache.inserter.insert( + entity, + location, + bundle, + insert_mode, + caller, + RelationshipHookMode::Run, + ) + }; + } else { + invalid_entities.push(entity); } } } + + if invalid_entities.is_empty() { + Ok(()) + } else { + Err(TryInsertBatchError { + bundle_type: core::any::type_name::(), + entities: invalid_entities, + }) + } } /// Temporarily removes the requested resource from this [`World`], runs custom user code, @@ -2700,7 +2748,7 @@ impl World { let change_tick = self.change_tick(); let component_id = self.components.get_resource_id(TypeId::of::())?; - let (ptr, mut ticks, mut _caller) = self + let (ptr, mut ticks, mut caller) = self .storages .resources .get_mut(component_id) @@ -2716,8 +2764,7 @@ impl World { last_run: last_change_tick, this_run: change_tick, }, - #[cfg(feature = "track_change_detection")] - changed_by: &mut _caller, + changed_by: caller.as_mut(), }; let result = f(self, value_mut); assert!(!self.contains_resource::(), @@ -2729,12 +2776,7 @@ impl World { // SAFETY: pointer is of type R unsafe { self.storages.resources.get_mut(component_id).map(|info| { - info.insert_with_ticks( - ptr, - ticks, - #[cfg(feature = "track_change_detection")] - _caller, - ); + info.insert_with_ticks(ptr, ticks, caller); }) } })?; @@ -2789,19 +2831,14 @@ impl World { &mut self, component_id: ComponentId, value: OwningPtr<'_>, - #[cfg(feature = "track_change_detection")] caller: &'static Location, + caller: MaybeLocation, ) { let change_tick = self.change_tick(); let resource = self.initialize_resource_internal(component_id); // SAFETY: `value` is valid for `component_id`, ensured by caller unsafe { - resource.insert( - value, - change_tick, - #[cfg(feature = "track_change_detection")] - caller, - ); + resource.insert(value, change_tick, caller); } } @@ -2823,19 +2860,14 @@ impl World { &mut self, component_id: ComponentId, value: OwningPtr<'_>, - #[cfg(feature = "track_change_detection")] caller: &'static Location, + caller: MaybeLocation, ) { let change_tick = self.change_tick(); let resource = self.initialize_non_send_internal(component_id); // SAFETY: `value` is valid for `component_id`, ensured by caller unsafe { - resource.insert( - value, - change_tick, - #[cfg(feature = "track_change_detection")] - caller, - ); + resource.insert(value, change_tick, caller); } } @@ -2846,6 +2878,7 @@ impl World { &mut self, component_id: ComponentId, ) -> &mut ResourceData { + self.flush_components(); let archetypes = &mut self.archetypes; self.storages .resources @@ -2861,6 +2894,7 @@ impl World { &mut self, component_id: ComponentId, ) -> &mut ResourceData { + self.flush_components(); let archetypes = &mut self.archetypes; self.storages .non_send_resources @@ -2904,12 +2938,22 @@ impl World { } } + /// Applies any queued component registration. + /// For spawning vanilla rust component types and resources, this is not strictly necessary. + /// However, flushing components can make information available more quickly, and can have performance benefits. + /// Additionally, for components and resources registered dynamically through a raw descriptor or similar, + /// this is the only way to complete their registration. + pub(crate) fn flush_components(&mut self) { + self.components_registrator().apply_queued_registrations(); + } + /// Flushes queued entities and commands. /// /// Queued entities will be spawned, and then commands will be applied. #[inline] pub fn flush(&mut self) { self.flush_entities(); + self.flush_components(); self.flush_commands(); } @@ -3143,40 +3187,33 @@ impl World { /// component in the bundle. #[inline] pub fn register_bundle(&mut self) -> &BundleInfo { + // SAFETY: These come from the same world. `Self.components_registrator` can't be used since we borrow other fields too. + let mut registrator = + unsafe { ComponentsRegistrator::new(&mut self.components, &mut self.component_ids) }; let id = self .bundles - .register_info::(&mut self.components, &mut self.storages); + .register_info::(&mut registrator, &mut self.storages); // SAFETY: We just initialized the bundle so its id should definitely be valid. unsafe { self.bundles.get(id).debug_checked_unwrap() } } - /// Retrieves a mutable reference to the [`ComponentCloneHandlers`]. Can be used to set and update clone functions for components. - /// - /// ``` - /// # use bevy_ecs::prelude::*; - /// use bevy_ecs::component::{ComponentId, ComponentCloneHandler}; - /// use bevy_ecs::entity::ComponentCloneCtx; - /// use bevy_ecs::world::DeferredWorld; + /// Registers the given [`ComponentId`]s as a dynamic bundle and returns both the required component ids and the bundle id. /// - /// fn custom_clone_handler( - /// _world: &mut DeferredWorld, - /// _ctx: &mut ComponentCloneCtx, - /// ) { - /// // Custom cloning logic for component - /// } - /// - /// #[derive(Component)] - /// struct ComponentA; + /// Note that the components need to be registered first, this function only creates a bundle combining them. Components + /// can be registered with [`World::register_component`]/[`_with_descriptor`](World::register_component_with_descriptor). /// - /// let mut world = World::new(); + /// **You should prefer to use the typed API [`World::register_bundle`] where possible and only use this in cases where + /// not all of the actual types are known at compile time.** /// - /// let component_id = world.register_component::(); - /// - /// world.get_component_clone_handlers_mut() - /// .set_component_handler(component_id, ComponentCloneHandler::custom_handler(custom_clone_handler)) - /// ``` - pub fn get_component_clone_handlers_mut(&mut self) -> &mut ComponentCloneHandlers { - self.components.get_component_clone_handlers_mut() + /// # Panics + /// This function will panic if any of the provided component ids do not belong to a component known to this [`World`]. + #[inline] + pub fn register_dynamic_bundle(&mut self, component_ids: &[ComponentId]) -> &BundleInfo { + let id = + self.bundles + .init_dynamic_info(&mut self.storages, &self.components, component_ids); + // SAFETY: We just initialized the bundle so its id should definitely be valid. + unsafe { self.bundles.get(id).debug_checked_unwrap() } } } @@ -3232,6 +3269,7 @@ impl World { /// # struct B(u32); /// # /// # let mut world = World::new(); + /// # world.remove_resource::(); /// # world.insert_resource(A(1)); /// # world.insert_resource(B(2)); /// let mut total = 0; @@ -3393,7 +3431,7 @@ impl World { .get_info(component_id) .debug_checked_unwrap() }; - let (ptr, ticks, _caller) = data.get_with_ticks()?; + let (ptr, ticks, caller) = data.get_with_ticks()?; // SAFETY: // - We have exclusive access to the world, so no other code can be aliasing the `TickCells` @@ -3412,11 +3450,10 @@ impl World { // - We iterate one resource at a time, and we let go of each `PtrMut` before getting the next one value: unsafe { ptr.assert_unique() }, ticks, - #[cfg(feature = "track_change_detection")] // SAFETY: // - We have exclusive access to the world, so no other code can be aliasing the `Ptr` // - We iterate one resource at a time, and we let go of each `PtrMut` before getting the next one - changed_by: unsafe { _caller.deref_mut() }, + changed_by: unsafe { caller.map(|caller| caller.deref_mut()) }, }; Some((component_info, mut_untyped)) @@ -3500,14 +3537,7 @@ impl World { /// This function will panic if it isn't called from the same thread that the resource was inserted from. #[inline] pub fn get_by_id(&self, entity: Entity, component_id: ComponentId) -> Option> { - // SAFETY: - // - `&self` ensures that all accessed data is not mutably aliased - // - `as_unsafe_world_cell_readonly` provides shared/readonly permission to the whole world - unsafe { - self.as_unsafe_world_cell_readonly() - .get_entity(entity)? - .get_by_id(component_id) - } + self.get_entity(entity).ok()?.get_by_id(component_id).ok() } /// Retrieves a mutable untyped reference to the given `entity`'s [`Component`] of the given [`ComponentId`]. @@ -3521,15 +3551,10 @@ impl World { entity: Entity, component_id: ComponentId, ) -> Option> { - // SAFETY: - // - `&mut self` ensures that all accessed data is unaliased - // - `as_unsafe_world_cell` provides mutable permission to the whole world - unsafe { - self.as_unsafe_world_cell() - .get_entity(entity)? - .get_mut_by_id(component_id) - .ok() - } + self.get_entity_mut(entity) + .ok()? + .into_mut_by_id(component_id) + .ok() } } @@ -3692,7 +3717,37 @@ unsafe impl Sync for World {} /// /// This can be helpful for complex initialization or context-aware defaults. /// -/// [`FromWorld`] is automatically implemented for any type implementing [`Default`]. +/// [`FromWorld`] is automatically implemented for any type implementing [`Default`] +/// and may also be derived for: +/// - any struct whose fields all implement `FromWorld` +/// - any enum where one variant has the attribute `#[from_world]` +/// +/// ```rs +/// +/// #[derive(Default)] +/// struct A; +/// +/// #[derive(Default)] +/// struct B(Option) +/// +/// struct C; +/// +/// impl FromWorld for C { +/// fn from_world(_world: &mut World) -> Self { +/// Self +/// } +/// } +/// +/// #[derive(FromWorld)] +/// struct D(A, B, C); +/// +/// #[derive(FromWorld)] +/// enum E { +/// #[from_world] +/// F, +/// G +/// } +/// ``` pub trait FromWorld { /// Creates `Self` using data from the given [`World`]. fn from_world(world: &mut World) -> Self; @@ -3706,28 +3761,33 @@ impl FromWorld for T { } #[cfg(test)] +#[expect(clippy::print_stdout, reason = "Allowed in tests.")] mod tests { use super::{FromWorld, World}; use crate::{ - change_detection::DetectChangesMut, - component::{ComponentDescriptor, ComponentInfo, StorageType}, + change_detection::{DetectChangesMut, MaybeLocation}, + component::{ComponentCloneBehavior, ComponentDescriptor, ComponentInfo, StorageType}, entity::EntityHashSet, + entity_disabling::{DefaultQueryFilters, Disabled}, ptr::OwningPtr, - system::Resource, - world::error::EntityFetchError, + resource::Resource, + world::{error::EntityMutableFetchError, DeferredWorld}, + }; + use alloc::{ + borrow::ToOwned, + string::{String, ToString}, + sync::Arc, + vec, + vec::Vec, }; - use alloc::sync::Arc; use bevy_ecs_macros::Component; - use bevy_utils::{HashMap, HashSet}; + use bevy_platform::collections::{HashMap, HashSet}; use core::{ any::TypeId, panic, sync::atomic::{AtomicBool, AtomicU32, Ordering}, }; - use std::sync::Mutex; - - // For bevy_ecs_macros - use crate as bevy_ecs; + use std::{println, sync::Mutex}; type ID = u8; @@ -3895,6 +3955,8 @@ mod tests { #[test] fn iter_resources() { let mut world = World::new(); + // Remove DefaultQueryFilters so it doesn't show up in the iterator + world.remove_resource::(); world.insert_resource(TestResource(42)); world.insert_resource(TestResource2("Hello, world!".to_string())); world.insert_resource(TestResource3); @@ -3921,6 +3983,8 @@ mod tests { #[test] fn iter_resources_mut() { let mut world = World::new(); + // Remove DefaultQueryFilters so it doesn't show up in the iterator + world.remove_resource::(); world.insert_resource(TestResource(42)); world.insert_resource(TestResource2("Hello, world!".to_string())); world.insert_resource(TestResource3); @@ -3964,12 +4028,7 @@ mod tests { OwningPtr::make(value, |ptr| { // SAFETY: value is valid for the layout of `TestResource` unsafe { - world.insert_resource_by_id( - component_id, - ptr, - #[cfg(feature = "track_change_detection")] - panic::Location::caller(), - ); + world.insert_resource_by_id(component_id, ptr, MaybeLocation::caller()); } }); @@ -4003,6 +4062,7 @@ mod tests { DROP_COUNT.fetch_add(1, Ordering::SeqCst); }), true, + ComponentCloneBehavior::Default, ) }; @@ -4012,12 +4072,7 @@ mod tests { OwningPtr::make(value, |ptr| { // SAFETY: value is valid for the component layout unsafe { - world.insert_resource_by_id( - component_id, - ptr, - #[cfg(feature = "track_change_detection")] - panic::Location::caller(), - ); + world.insert_resource_by_id(component_id, ptr, MaybeLocation::caller()); } }); @@ -4101,39 +4156,39 @@ mod tests { let bar_id = TypeId::of::(); let baz_id = TypeId::of::(); assert_eq!( - to_type_ids(world.inspect_entity(ent0).collect()), + to_type_ids(world.inspect_entity(ent0).unwrap().collect()), [Some(foo_id), Some(bar_id), Some(baz_id)] .into_iter() .collect::>() ); assert_eq!( - to_type_ids(world.inspect_entity(ent1).collect()), + to_type_ids(world.inspect_entity(ent1).unwrap().collect()), [Some(foo_id), Some(bar_id)] .into_iter() .collect::>() ); assert_eq!( - to_type_ids(world.inspect_entity(ent2).collect()), + to_type_ids(world.inspect_entity(ent2).unwrap().collect()), [Some(bar_id), Some(baz_id)] .into_iter() .collect::>() ); assert_eq!( - to_type_ids(world.inspect_entity(ent3).collect()), + to_type_ids(world.inspect_entity(ent3).unwrap().collect()), [Some(foo_id), Some(baz_id)] .into_iter() .collect::>() ); assert_eq!( - to_type_ids(world.inspect_entity(ent4).collect()), + to_type_ids(world.inspect_entity(ent4).unwrap().collect()), [Some(foo_id)].into_iter().collect::>() ); assert_eq!( - to_type_ids(world.inspect_entity(ent5).collect()), + to_type_ids(world.inspect_entity(ent5).unwrap().collect()), [Some(bar_id)].into_iter().collect::>() ); assert_eq!( - to_type_ids(world.inspect_entity(ent6).collect()), + to_type_ids(world.inspect_entity(ent6).unwrap().collect()), [Some(baz_id)].into_iter().collect::>() ); } @@ -4280,20 +4335,34 @@ mod tests { world.entity_mut(e1).despawn(); - assert_eq!(Err(e1), world.get_entity(e1).map(|_| {})); - assert_eq!(Err(e1), world.get_entity([e1, e2]).map(|_| {})); + assert_eq!( + Err(e1), + world.get_entity(e1).map(|_| {}).map_err(|e| e.entity) + ); + assert_eq!( + Err(e1), + world.get_entity([e1, e2]).map(|_| {}).map_err(|e| e.entity) + ); assert_eq!( Err(e1), world .get_entity(&[e1, e2] /* this is an array not a slice */) .map(|_| {}) + .map_err(|e| e.entity) + ); + assert_eq!( + Err(e1), + world + .get_entity(&vec![e1, e2][..]) + .map(|_| {}) + .map_err(|e| e.entity) ); - assert_eq!(Err(e1), world.get_entity(&vec![e1, e2][..]).map(|_| {})); assert_eq!( Err(e1), world .get_entity(&EntityHashSet::from_iter([e1, e2])) .map(|_| {}) + .map_err(|e| e.entity) ); } @@ -4315,17 +4384,17 @@ mod tests { .is_ok()); assert_eq!( - Err(EntityFetchError::AliasedMutability(e1)), + Err(EntityMutableFetchError::AliasedMutability(e1)), world.get_entity_mut([e1, e2, e1]).map(|_| {}) ); assert_eq!( - Err(EntityFetchError::AliasedMutability(e1)), + Err(EntityMutableFetchError::AliasedMutability(e1)), world .get_entity_mut(&[e1, e2, e1] /* this is an array not a slice */) .map(|_| {}) ); assert_eq!( - Err(EntityFetchError::AliasedMutability(e1)), + Err(EntityMutableFetchError::AliasedMutability(e1)), world.get_entity_mut(&vec![e1, e2, e1][..]).map(|_| {}) ); // Aliased mutability isn't allowed by HashSets @@ -4337,24 +4406,105 @@ mod tests { assert!(matches!( world.get_entity_mut(e1).map(|_| {}), - Err(EntityFetchError::NoSuchEntity(e, ..)) if e == e1 + Err(EntityMutableFetchError::EntityDoesNotExist(e)) if e.entity == e1 )); assert!(matches!( world.get_entity_mut([e1, e2]).map(|_| {}), - Err(EntityFetchError::NoSuchEntity(e,..)) if e == e1)); + Err(EntityMutableFetchError::EntityDoesNotExist(e)) if e.entity == e1)); assert!(matches!( world .get_entity_mut(&[e1, e2] /* this is an array not a slice */) .map(|_| {}), - Err(EntityFetchError::NoSuchEntity(e, ..)) if e == e1)); + Err(EntityMutableFetchError::EntityDoesNotExist(e)) if e.entity == e1)); assert!(matches!( world.get_entity_mut(&vec![e1, e2][..]).map(|_| {}), - Err(EntityFetchError::NoSuchEntity(e, ..)) if e == e1, + Err(EntityMutableFetchError::EntityDoesNotExist(e)) if e.entity == e1, )); assert!(matches!( world .get_entity_mut(&EntityHashSet::from_iter([e1, e2])) .map(|_| {}), - Err(EntityFetchError::NoSuchEntity(e, ..)) if e == e1)); + Err(EntityMutableFetchError::EntityDoesNotExist(e)) if e.entity == e1)); + } + + #[test] + #[track_caller] + fn entity_spawn_despawn_tracking() { + use core::panic::Location; + + let mut world = World::new(); + let entity = world.spawn_empty().id(); + assert_eq!( + world.entities.entity_get_spawned_or_despawned_by(entity), + MaybeLocation::new(Some(Location::caller())) + ); + world.despawn(entity); + assert_eq!( + world.entities.entity_get_spawned_or_despawned_by(entity), + MaybeLocation::new(Some(Location::caller())) + ); + let new = world.spawn_empty().id(); + assert_eq!(entity.index(), new.index()); + assert_eq!( + world.entities.entity_get_spawned_or_despawned_by(entity), + MaybeLocation::new(None) + ); + world.despawn(new); + assert_eq!( + world.entities.entity_get_spawned_or_despawned_by(entity), + MaybeLocation::new(None) + ); + } + + #[test] + fn new_world_has_disabling() { + let mut world = World::new(); + world.spawn(Foo); + world.spawn((Foo, Disabled)); + assert_eq!(1, world.query::<&Foo>().iter(&world).count()); + + // If we explicitly remove the resource, no entities should be filtered anymore + world.remove_resource::(); + assert_eq!(2, world.query::<&Foo>().iter(&world).count()); + } + + #[test] + fn entities_and_commands() { + #[derive(Component, PartialEq, Debug)] + struct Foo(u32); + + let mut world = World::new(); + + let eid = world.spawn(Foo(35)).id(); + + let (mut fetcher, mut commands) = world.entities_and_commands(); + let emut = fetcher.get_mut(eid).unwrap(); + commands.entity(eid).despawn(); + assert_eq!(emut.get::().unwrap(), &Foo(35)); + + world.flush(); + + assert!(world.get_entity(eid).is_err()); + } + + #[test] + fn entities_and_commands_deferred() { + #[derive(Component, PartialEq, Debug)] + struct Foo(u32); + + let mut world = World::new(); + + let eid = world.spawn(Foo(1)).id(); + + let mut dworld = DeferredWorld::from(&mut world); + + let (mut fetcher, mut commands) = dworld.entities_and_commands(); + let emut = fetcher.get_mut(eid).unwrap(); + commands.entity(eid).despawn(); + assert_eq!(emut.get::().unwrap(), &Foo(1)); + + world.flush(); + + assert!(world.get_entity(eid).is_err()); } } diff --git a/crates/bevy_ecs/src/world/reflect.rs b/crates/bevy_ecs/src/world/reflect.rs index afa2015f4ef38..fdd8b28142576 100644 --- a/crates/bevy_ecs/src/world/reflect.rs +++ b/crates/bevy_ecs/src/world/reflect.rs @@ -80,7 +80,7 @@ impl World { let component_name = self .components() .get_name(component_id) - .map(ToString::to_string); + .map(|name| name.to_string()); return Err(GetComponentReflectError::EntityDoesNotHaveComponent { entity, @@ -169,7 +169,7 @@ impl World { let component_name = self .components() .get_name(component_id) - .map(ToString::to_string); + .map(|name| name.to_string()); let Some(comp_mut_untyped) = self.get_mut_by_id(entity, component_id) else { return Err(GetComponentReflectError::EntityDoesNotHaveComponent { @@ -213,7 +213,7 @@ pub enum GetComponentReflectError { NoCorrespondingComponentId(TypeId), /// The given [`Entity`] does not have a [`Component`] corresponding to the given [`TypeId`]. - #[error("The given `Entity` {entity:?} does not have a `{component_name:?}` component ({component_id:?}, which corresponds to {type_id:?})")] + #[error("The given `Entity` {entity} does not have a `{component_name:?}` component ({component_id:?}, which corresponds to {type_id:?})")] EntityDoesNotHaveComponent { /// The given [`Entity`]. entity: Entity, @@ -251,11 +251,7 @@ mod tests { use bevy_reflect::Reflect; - use crate::{ - // For bevy_ecs_macros - self as bevy_ecs, - prelude::{AppTypeRegistry, Component, DetectChanges, World}, - }; + use crate::prelude::{AppTypeRegistry, Component, DetectChanges, World}; #[derive(Component, Reflect)] struct RFoo(i32); diff --git a/crates/bevy_ecs/src/world/spawn_batch.rs b/crates/bevy_ecs/src/world/spawn_batch.rs index 6be86136953c3..16bd9bb8059b4 100644 --- a/crates/bevy_ecs/src/world/spawn_batch.rs +++ b/crates/bevy_ecs/src/world/spawn_batch.rs @@ -1,11 +1,10 @@ use crate::{ - bundle::{Bundle, BundleSpawner}, + bundle::{Bundle, BundleSpawner, NoBundleEffect}, + change_detection::MaybeLocation, entity::{Entity, EntitySetIterator}, world::World, }; use core::iter::FusedIterator; -#[cfg(feature = "track_change_detection")] -use core::panic::Location; /// An iterator that spawns a series of entities and returns the [ID](Entity) of /// each spawned entity. @@ -18,22 +17,17 @@ where { inner: I, spawner: BundleSpawner<'w>, - #[cfg(feature = "track_change_detection")] - caller: &'static Location<'static>, + caller: MaybeLocation, } impl<'w, I> SpawnBatchIter<'w, I> where I: Iterator, - I::Item: Bundle, + I::Item: Bundle, { #[inline] #[track_caller] - pub(crate) fn new( - world: &'w mut World, - iter: I, - #[cfg(feature = "track_change_detection")] caller: &'static Location, - ) -> Self { + pub(crate) fn new(world: &'w mut World, iter: I, caller: MaybeLocation) -> Self { // Ensure all entity allocations are accounted for so `self.entities` can realloc if // necessary world.flush(); @@ -50,7 +44,6 @@ where Self { inner: iter, spawner, - #[cfg(feature = "track_change_detection")] caller, } } @@ -80,13 +73,7 @@ where fn next(&mut self) -> Option { let bundle = self.inner.next()?; // SAFETY: bundle matches spawner type - unsafe { - Some(self.spawner.spawn( - bundle, - #[cfg(feature = "track_change_detection")] - self.caller, - )) - } + unsafe { Some(self.spawner.spawn(bundle, self.caller).0) } } fn size_hint(&self) -> (usize, Option) { diff --git a/crates/bevy_ecs/src/world/unsafe_world_cell.rs b/crates/bevy_ecs/src/world/unsafe_world_cell.rs index afc6e86be5516..b46b4a154b359 100644 --- a/crates/bevy_ecs/src/world/unsafe_world_cell.rs +++ b/crates/bevy_ecs/src/world/unsafe_world_cell.rs @@ -1,36 +1,25 @@ //! Contains types that allow disjoint mutable access to a [`World`]. -#![warn(unsafe_op_in_unsafe_fn)] - use super::{Mut, Ref, World, WorldId}; use crate::{ archetype::{Archetype, Archetypes}, bundle::Bundles, - change_detection::{MaybeUnsafeCellLocation, MutUntyped, Ticks, TicksMut}, + change_detection::{MaybeLocation, MutUntyped, Ticks, TicksMut}, component::{ComponentId, ComponentTicks, Components, Mutable, StorageType, Tick, TickCells}, - entity::{Entities, Entity, EntityBorrow, EntityLocation}, + entity::{ContainsEntity, Entities, Entity, EntityDoesNotExistError, EntityLocation}, observer::Observers, prelude::Component, query::{DebugCheckedUnwrap, ReadOnlyQueryData}, removal_detection::RemovedComponentEvents, + resource::Resource, storage::{ComponentSparseSet, Storages, Table}, - system::Resource, world::RawCommandQueue, }; -use bevy_ptr::Ptr; -#[cfg(feature = "track_change_detection")] -use bevy_ptr::UnsafeCellDeref; -#[cfg(feature = "track_change_detection")] -use core::panic::Location; -use core::{any::TypeId, cell::UnsafeCell, fmt::Debug, marker::PhantomData, ptr}; +use bevy_platform::sync::atomic::Ordering; +use bevy_ptr::{Ptr, UnsafeCellDeref}; +use core::{any::TypeId, cell::UnsafeCell, fmt::Debug, marker::PhantomData, panic::Location, ptr}; use thiserror::Error; -#[cfg(not(feature = "portable-atomic"))] -use core::sync::atomic::Ordering; - -#[cfg(feature = "portable-atomic")] -use portable_atomic::Ordering; - /// Variant of the [`World`] where resource and component accesses take `&self`, and the responsibility to avoid /// aliasing violations are given to the caller instead of being checked at compile-time by rust's unique XOR shared rule. /// @@ -59,7 +48,7 @@ use portable_atomic::Ordering; /// ``` /// use bevy_ecs::world::World; /// use bevy_ecs::change_detection::Mut; -/// use bevy_ecs::system::Resource; +/// use bevy_ecs::resource::Resource; /// use bevy_ecs::world::unsafe_world_cell::UnsafeWorldCell; /// /// // INVARIANT: existence of this struct means that users of it are the only ones being able to access resources in the world @@ -86,7 +75,12 @@ use portable_atomic::Ordering; /// } /// ``` #[derive(Copy, Clone)] -pub struct UnsafeWorldCell<'w>(*mut World, PhantomData<(&'w World, &'w UnsafeCell)>); +pub struct UnsafeWorldCell<'w> { + ptr: *mut World, + #[cfg(debug_assertions)] + allows_mutable_access: bool, + _marker: PhantomData<(&'w World, &'w UnsafeCell)>, +} // SAFETY: `&World` and `&mut World` are both `Send` unsafe impl Send for UnsafeWorldCell<'_> {} @@ -109,18 +103,48 @@ impl<'w> UnsafeWorldCell<'w> { /// Creates a [`UnsafeWorldCell`] that can be used to access everything immutably #[inline] pub(crate) fn new_readonly(world: &'w World) -> Self { - Self(ptr::from_ref(world).cast_mut(), PhantomData) + Self { + ptr: ptr::from_ref(world).cast_mut(), + #[cfg(debug_assertions)] + allows_mutable_access: false, + _marker: PhantomData, + } } /// Creates [`UnsafeWorldCell`] that can be used to access everything mutably #[inline] pub(crate) fn new_mutable(world: &'w mut World) -> Self { - Self(ptr::from_mut(world), PhantomData) + Self { + ptr: ptr::from_mut(world), + #[cfg(debug_assertions)] + allows_mutable_access: true, + _marker: PhantomData, + } + } + + #[cfg_attr(debug_assertions, inline(never), track_caller)] + #[cfg_attr(not(debug_assertions), inline(always))] + pub(crate) fn assert_allows_mutable_access(self) { + // This annotation is needed because the + // allows_mutable_access field doesn't exist otherwise. + // Kinda weird, since debug_assert would never be called, + // but CI complained in https://github.com/bevyengine/bevy/pull/17393 + #[cfg(debug_assertions)] + debug_assert!( + self.allows_mutable_access, + "mutating world data via `World::as_unsafe_world_cell_readonly` is forbidden" + ); } /// Gets a mutable reference to the [`World`] this [`UnsafeWorldCell`] belongs to. /// This is an incredibly error-prone operation and is only valid in a small number of circumstances. /// + /// Calling this method implies mutable access to the *whole* world (see first point on safety section + /// below), which includes all entities, components, and resources. Notably, calling this on + /// [`WorldQuery::init_fetch`](crate::query::WorldQuery::init_fetch) and + /// [`SystemParam::get_param`](crate::system::SystemParam::get_param) are most likely *unsound* unless + /// you can prove that the underlying [`World`] is exclusive, which in normal circumstances is not. + /// /// # Safety /// - `self` must have been obtained from a call to [`World::as_unsafe_world_cell`] /// (*not* `as_unsafe_world_cell_readonly` or any other method of construction that @@ -163,9 +187,10 @@ impl<'w> UnsafeWorldCell<'w> { /// ``` #[inline] pub unsafe fn world_mut(self) -> &'w mut World { + self.assert_allows_mutable_access(); // SAFETY: // - caller ensures the created `&mut World` is the only borrow of world - unsafe { &mut *self.0 } + unsafe { &mut *self.ptr } } /// Gets a reference to the [`&World`](World) this [`UnsafeWorldCell`] belongs to. @@ -214,7 +239,7 @@ impl<'w> UnsafeWorldCell<'w> { // SAFETY: // - caller ensures that the returned `&World` is not used in a way that would conflict // with any existing mutable borrows of world data - unsafe { &*self.0 } + unsafe { &*self.ptr } } /// Retrieves this world's unique [ID](WorldId). @@ -332,9 +357,15 @@ impl<'w> UnsafeWorldCell<'w> { /// Retrieves an [`UnsafeEntityCell`] that exposes read and write operations for the given `entity`. /// Similar to the [`UnsafeWorldCell`], you are in charge of making sure that no aliasing rules are violated. #[inline] - pub fn get_entity(self, entity: Entity) -> Option> { - let location = self.entities().get(entity)?; - Some(UnsafeEntityCell::new(self, entity, location)) + pub fn get_entity( + self, + entity: Entity, + ) -> Result, EntityDoesNotExistError> { + let location = self + .entities() + .get(entity) + .ok_or(EntityDoesNotExistError::new(entity, self.entities()))?; + Ok(UnsafeEntityCell::new(self, entity, location)) } /// Gets a reference to the resource of the given type if it exists @@ -367,7 +398,7 @@ impl<'w> UnsafeWorldCell<'w> { // SAFETY: caller ensures `self` has permission to access the resource // caller also ensure that no mutable reference to the resource exists - let (ptr, ticks, _caller) = unsafe { self.get_resource_with_ticks(component_id)? }; + let (ptr, ticks, caller) = unsafe { self.get_resource_with_ticks(component_id)? }; // SAFETY: `component_id` was obtained from the type ID of `R` let value = unsafe { ptr.deref::() }; @@ -377,13 +408,11 @@ impl<'w> UnsafeWorldCell<'w> { unsafe { Ticks::from_tick_cells(ticks, self.last_change_tick(), self.change_tick()) }; // SAFETY: caller ensures that no mutable reference to the resource exists - #[cfg(feature = "track_change_detection")] - let caller = unsafe { _caller.deref() }; + let caller = caller.map(|caller| unsafe { caller.deref() }); Some(Ref { value, ticks, - #[cfg(feature = "track_change_detection")] changed_by: caller, }) } @@ -459,6 +488,7 @@ impl<'w> UnsafeWorldCell<'w> { /// - no other references to the resource exist at the same time #[inline] pub unsafe fn get_resource_mut(self) -> Option> { + self.assert_allows_mutable_access(); let component_id = self.components().get_resource_id(TypeId::of::())?; // SAFETY: // - caller ensures `self` has permission to access the resource mutably @@ -486,9 +516,10 @@ impl<'w> UnsafeWorldCell<'w> { self, component_id: ComponentId, ) -> Option> { + self.assert_allows_mutable_access(); // SAFETY: we only access data that the caller has ensured is unaliased and `self` // has permission to access. - let (ptr, ticks, _caller) = unsafe { self.storages() } + let (ptr, ticks, caller) = unsafe { self.storages() } .resources .get(component_id)? .get_with_ticks()?; @@ -506,11 +537,10 @@ impl<'w> UnsafeWorldCell<'w> { // - caller ensures that the resource is unaliased value: unsafe { ptr.assert_unique() }, ticks, - #[cfg(feature = "track_change_detection")] // SAFETY: // - caller ensures that `self` has permission to access the resource // - caller ensures that the resource is unaliased - changed_by: unsafe { _caller.deref_mut() }, + changed_by: unsafe { caller.map(|caller| caller.deref_mut()) }, }) } @@ -522,6 +552,7 @@ impl<'w> UnsafeWorldCell<'w> { /// - no other references to the resource exist at the same time #[inline] pub unsafe fn get_non_send_resource_mut(self) -> Option> { + self.assert_allows_mutable_access(); let component_id = self.components().get_resource_id(TypeId::of::())?; // SAFETY: // - caller ensures that `self` has permission to access the resource @@ -552,10 +583,11 @@ impl<'w> UnsafeWorldCell<'w> { self, component_id: ComponentId, ) -> Option> { + self.assert_allows_mutable_access(); let change_tick = self.change_tick(); // SAFETY: we only access data that the caller has ensured is unaliased and `self` // has permission to access. - let (ptr, ticks, _caller) = unsafe { self.storages() } + let (ptr, ticks, caller) = unsafe { self.storages() } .non_send_resources .get(component_id)? .get_with_ticks()?; @@ -570,9 +602,8 @@ impl<'w> UnsafeWorldCell<'w> { // SAFETY: This function has exclusive access to the world so nothing aliases `ptr`. value: unsafe { ptr.assert_unique() }, ticks, - #[cfg(feature = "track_change_detection")] // SAFETY: This function has exclusive access to the world - changed_by: unsafe { _caller.deref_mut() }, + changed_by: unsafe { caller.map(|caller| caller.deref_mut()) }, }) } @@ -585,7 +616,11 @@ impl<'w> UnsafeWorldCell<'w> { pub(crate) unsafe fn get_resource_with_ticks( self, component_id: ComponentId, - ) -> Option<(Ptr<'w>, TickCells<'w>, MaybeUnsafeCellLocation<'w>)> { + ) -> Option<( + Ptr<'w>, + TickCells<'w>, + MaybeLocation<&'w UnsafeCell<&'static Location<'static>>>, + )> { // SAFETY: // - caller ensures there is no `&mut World` // - caller ensures there are no mutable borrows of this resource @@ -608,7 +643,11 @@ impl<'w> UnsafeWorldCell<'w> { pub(crate) unsafe fn get_non_send_with_ticks( self, component_id: ComponentId, - ) -> Option<(Ptr<'w>, TickCells<'w>, MaybeUnsafeCellLocation<'w>)> { + ) -> Option<( + Ptr<'w>, + TickCells<'w>, + MaybeLocation<&'w UnsafeCell<&'static Location<'static>>>, + )> { // SAFETY: // - caller ensures there is no `&mut World` // - caller ensures there are no mutable borrows of this resource @@ -625,18 +664,22 @@ impl<'w> UnsafeWorldCell<'w> { /// - the [`UnsafeWorldCell`] has permission to access the queue mutably /// - no mutable references to the queue exist at the same time pub(crate) unsafe fn get_raw_command_queue(self) -> RawCommandQueue { + self.assert_allows_mutable_access(); // SAFETY: // - caller ensures there are no existing mutable references // - caller ensures that we have permission to access the queue - unsafe { (*self.0).command_queue.clone() } + unsafe { (*self.ptr).command_queue.clone() } } /// # Safety /// It is the callers responsibility to ensure that there are no outstanding /// references to `last_trigger_id`. pub(crate) unsafe fn increment_trigger_id(self) { + self.assert_allows_mutable_access(); // SAFETY: Caller ensure there are no outstanding references - unsafe { (*self.0).last_trigger_id += 1 } + unsafe { + (*self.ptr).last_trigger_id = (*self.ptr).last_trigger_id.wrapping_add(1); + } } } @@ -713,7 +756,7 @@ impl<'w> UnsafeEntityCell<'w> { /// /// - If you know the concrete type of the component, you should prefer [`Self::contains`]. /// - If you know the component's [`TypeId`] but not its [`ComponentId`], consider using - /// [`Self::contains_type_id`]. + /// [`Self::contains_type_id`]. #[inline] pub fn contains_id(self, component_id: ComponentId) -> bool { self.archetype().contains(component_id) @@ -780,12 +823,11 @@ impl<'w> UnsafeEntityCell<'w> { self.entity, self.location, ) - .map(|(value, cells, _caller)| Ref { + .map(|(value, cells, caller)| Ref { // SAFETY: returned component is of type T value: value.deref::(), ticks: Ticks::from_tick_cells(cells, last_change_tick, change_tick), - #[cfg(feature = "track_change_detection")] - changed_by: _caller.deref(), + changed_by: caller.map(|caller| caller.deref()), }) } } @@ -886,6 +928,8 @@ impl<'w> UnsafeEntityCell<'w> { last_change_tick: Tick, change_tick: Tick, ) -> Option> { + self.world.assert_allows_mutable_access(); + let component_id = self.world.components().get_id(TypeId::of::())?; // SAFETY: @@ -900,12 +944,11 @@ impl<'w> UnsafeEntityCell<'w> { self.entity, self.location, ) - .map(|(value, cells, _caller)| Mut { + .map(|(value, cells, caller)| Mut { // SAFETY: returned component is of type T value: value.assert_unique().deref_mut::(), ticks: TicksMut::from_tick_cells(cells, last_change_tick, change_tick), - #[cfg(feature = "track_change_detection")] - changed_by: _caller.deref_mut(), + changed_by: caller.map(|caller| caller.deref_mut()), }) } } @@ -1002,6 +1045,8 @@ impl<'w> UnsafeEntityCell<'w> { self, component_id: ComponentId, ) -> Result, GetEntityMutByIdError> { + self.world.assert_allows_mutable_access(); + let info = self .world .components() @@ -1022,7 +1067,7 @@ impl<'w> UnsafeEntityCell<'w> { self.entity, self.location, ) - .map(|(value, cells, _caller)| MutUntyped { + .map(|(value, cells, caller)| MutUntyped { // SAFETY: world access validated by caller and ties world lifetime to `MutUntyped` lifetime value: value.assert_unique(), ticks: TicksMut::from_tick_cells( @@ -1030,20 +1075,66 @@ impl<'w> UnsafeEntityCell<'w> { self.world.last_change_tick(), self.world.change_tick(), ), - #[cfg(feature = "track_change_detection")] - changed_by: _caller.deref_mut(), + changed_by: caller.map(|caller| caller.deref_mut()), + }) + .ok_or(GetEntityMutByIdError::ComponentNotFound) + } + } + + /// Retrieves a mutable untyped reference to the given `entity`'s [`Component`] of the given [`ComponentId`]. + /// Returns `None` if the `entity` does not have a [`Component`] of the given type. + /// This method assumes the [`Component`] is mutable, skipping that check. + /// + /// **You should prefer to use the typed API [`UnsafeEntityCell::get_mut_assume_mutable`] where possible and only + /// use this in cases where the actual types are not known at compile time.** + /// + /// # Safety + /// It is the callers responsibility to ensure that + /// - the [`UnsafeEntityCell`] has permission to access the component mutably + /// - no other references to the component exist at the same time + /// - the component `T` is mutable + #[inline] + pub unsafe fn get_mut_assume_mutable_by_id( + self, + component_id: ComponentId, + ) -> Result, GetEntityMutByIdError> { + self.world.assert_allows_mutable_access(); + + let info = self + .world + .components() + .get_info(component_id) + .ok_or(GetEntityMutByIdError::InfoNotFound)?; + + // SAFETY: entity_location is valid, component_id is valid as checked by the line above + unsafe { + get_component_and_ticks( + self.world, + component_id, + info.storage_type(), + self.entity, + self.location, + ) + .map(|(value, cells, caller)| MutUntyped { + // SAFETY: world access validated by caller and ties world lifetime to `MutUntyped` lifetime + value: value.assert_unique(), + ticks: TicksMut::from_tick_cells( + cells, + self.world.last_change_tick(), + self.world.change_tick(), + ), + changed_by: caller.map(|caller| caller.deref_mut()), }) .ok_or(GetEntityMutByIdError::ComponentNotFound) } } /// Returns the source code location from which this entity has been spawned. - #[cfg(feature = "track_change_detection")] - pub fn spawned_by(self) -> &'static Location<'static> { + pub fn spawned_by(self) -> MaybeLocation { self.world() .entities() .entity_get_spawned_or_despawned_by(self.entity) - .unwrap() + .map(|o| o.unwrap()) } } @@ -1090,12 +1181,11 @@ impl<'w> UnsafeWorldCell<'w> { /// /// # Safety /// - `location` must refer to an archetype that contains `entity` -/// the archetype +/// the archetype /// - `component_id` must be valid /// - `storage_type` must accurately reflect where the components for `component_id` are stored. /// - the caller must ensure that no aliasing rules are violated #[inline] -#[allow(unsafe_op_in_unsafe_fn)] unsafe fn get_component( world: UnsafeWorldCell<'_>, component_id: ComponentId, @@ -1122,14 +1212,17 @@ unsafe fn get_component( /// - `storage_type` must accurately reflect where the components for `component_id` are stored. /// - the caller must ensure that no aliasing rules are violated #[inline] -#[allow(unsafe_op_in_unsafe_fn)] unsafe fn get_component_and_ticks( world: UnsafeWorldCell<'_>, component_id: ComponentId, storage_type: StorageType, entity: Entity, location: EntityLocation, -) -> Option<(Ptr<'_>, TickCells<'_>, MaybeUnsafeCellLocation<'_>)> { +) -> Option<( + Ptr<'_>, + TickCells<'_>, + MaybeLocation<&UnsafeCell<&'static Location<'static>>>, +)> { match storage_type { StorageType::Table => { let table = world.fetch_table(location)?; @@ -1145,12 +1238,9 @@ unsafe fn get_component_and_ticks( .get_changed_tick(component_id, location.table_row) .debug_checked_unwrap(), }, - #[cfg(feature = "track_change_detection")] table .get_changed_by(component_id, location.table_row) - .debug_checked_unwrap(), - #[cfg(not(feature = "track_change_detection"))] - (), + .map(|changed_by| changed_by.debug_checked_unwrap()), )) } StorageType::SparseSet => world.fetch_sparse_set(component_id)?.get_with_ticks(entity), @@ -1161,12 +1251,11 @@ unsafe fn get_component_and_ticks( /// /// # Safety /// - `location` must refer to an archetype that contains `entity` -/// the archetype +/// the archetype /// - `component_id` must be valid /// - `storage_type` must accurately reflect where the components for `component_id` are stored. /// - the caller must ensure that no aliasing rules are violated #[inline] -#[allow(unsafe_op_in_unsafe_fn)] unsafe fn get_ticks( world: UnsafeWorldCell<'_>, component_id: ComponentId, @@ -1184,8 +1273,49 @@ unsafe fn get_ticks( } } -impl EntityBorrow for UnsafeEntityCell<'_> { +impl ContainsEntity for UnsafeEntityCell<'_> { fn entity(&self) -> Entity { self.id() } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + #[should_panic = "is forbidden"] + fn as_unsafe_world_cell_readonly_world_mut_forbidden() { + let world = World::new(); + let world_cell = world.as_unsafe_world_cell_readonly(); + // SAFETY: this invalid usage will be caught by a runtime panic. + let _ = unsafe { world_cell.world_mut() }; + } + + #[derive(Resource)] + struct R; + + #[test] + #[should_panic = "is forbidden"] + fn as_unsafe_world_cell_readonly_resource_mut_forbidden() { + let mut world = World::new(); + world.insert_resource(R); + let world_cell = world.as_unsafe_world_cell_readonly(); + // SAFETY: this invalid usage will be caught by a runtime panic. + let _ = unsafe { world_cell.get_resource_mut::() }; + } + + #[derive(Component)] + struct C; + + #[test] + #[should_panic = "is forbidden"] + fn as_unsafe_world_cell_readonly_component_mut_forbidden() { + let mut world = World::new(); + let entity = world.spawn(C).id(); + let world_cell = world.as_unsafe_world_cell_readonly(); + let entity_cell = world_cell.get_entity(entity).unwrap(); + // SAFETY: this invalid usage will be caught by a runtime panic. + let _ = unsafe { entity_cell.get_mut::() }; + } +} diff --git a/crates/bevy_encase_derive/Cargo.toml b/crates/bevy_encase_derive/Cargo.toml index 9184bddd25418..b2f1b92d82bee 100644 --- a/crates/bevy_encase_derive/Cargo.toml +++ b/crates/bevy_encase_derive/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_encase_derive" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Bevy derive macro for encase" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -12,7 +12,7 @@ keywords = ["bevy"] proc-macro = true [dependencies] -bevy_macro_utils = { path = "../bevy_macro_utils", version = "0.15.0-dev" } +bevy_macro_utils = { path = "../bevy_macro_utils", version = "0.16.0-dev" } encase_derive_impl = "0.10" [lints] diff --git a/crates/bevy_encase_derive/LICENSE-APACHE b/crates/bevy_encase_derive/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_encase_derive/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_encase_derive/LICENSE-MIT b/crates/bevy_encase_derive/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_encase_derive/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_encase_derive/src/lib.rs b/crates/bevy_encase_derive/src/lib.rs index 0432d161f0a6b..15fbdca6a8f4a 100644 --- a/crates/bevy_encase_derive/src/lib.rs +++ b/crates/bevy_encase_derive/src/lib.rs @@ -14,7 +14,7 @@ const ENCASE: &str = "encase"; fn bevy_encase_path() -> syn::Path { let bevy_manifest = BevyManifest::shared(); bevy_manifest - .get_subcrate("render") + .maybe_get_path("bevy_render") .map(|bevy_render_path| { let mut segments = bevy_render_path.segments; segments.push(BevyManifest::parse_str("render_resource")); diff --git a/crates/bevy_gilrs/Cargo.toml b/crates/bevy_gilrs/Cargo.toml index cd20696e77ba3..864df285d98d2 100644 --- a/crates/bevy_gilrs/Cargo.toml +++ b/crates/bevy_gilrs/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_gilrs" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Gamepad system made using Gilrs for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -10,15 +10,19 @@ keywords = ["bevy"] [dependencies] # bevy -bevy_app = { path = "../bevy_app", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" } -bevy_input = { path = "../bevy_input", version = "0.15.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } -bevy_time = { path = "../bevy_time", version = "0.15.0-dev" } +bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } +bevy_input = { path = "../bevy_input", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } +bevy_time = { path = "../bevy_time", version = "0.16.0-dev" } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "std", +] } # other gilrs = "0.11.0" thiserror = { version = "2", default-features = false } +tracing = { version = "0.1", default-features = false, features = ["std"] } [lints] workspace = true diff --git a/crates/bevy_gilrs/LICENSE-APACHE b/crates/bevy_gilrs/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_gilrs/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_gilrs/LICENSE-MIT b/crates/bevy_gilrs/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_gilrs/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_gilrs/src/gilrs_system.rs b/crates/bevy_gilrs/src/gilrs_system.rs index 05f9aa02e9692..69a608a8b8b3a 100644 --- a/crates/bevy_gilrs/src/gilrs_system.rs +++ b/crates/bevy_gilrs/src/gilrs_system.rs @@ -4,8 +4,6 @@ use crate::{ }; use bevy_ecs::event::EventWriter; use bevy_ecs::prelude::Commands; -#[cfg(target_arch = "wasm32")] -use bevy_ecs::system::NonSendMut; use bevy_ecs::system::ResMut; use bevy_input::gamepad::{ GamepadConnection, GamepadConnectionEvent, RawGamepadAxisChangedEvent, @@ -15,101 +13,103 @@ use gilrs::{ev::filter::axis_dpad_to_button, EventType, Filter}; pub fn gilrs_event_startup_system( mut commands: Commands, - #[cfg(target_arch = "wasm32")] mut gilrs: NonSendMut, - #[cfg(not(target_arch = "wasm32"))] mut gilrs: ResMut, + mut gilrs: ResMut, mut gamepads: ResMut, mut events: EventWriter, ) { - for (id, gamepad) in gilrs.0.get().gamepads() { - // Create entity and add to mapping - let entity = commands.spawn_empty().id(); - gamepads.id_to_entity.insert(id, entity); - gamepads.entity_to_id.insert(entity, id); - - events.send(GamepadConnectionEvent { - gamepad: entity, - connection: GamepadConnection::Connected { - name: gamepad.name().to_string(), - vendor_id: gamepad.vendor_id(), - product_id: gamepad.product_id(), - }, - }); - } + gilrs.with(|gilrs| { + for (id, gamepad) in gilrs.gamepads() { + // Create entity and add to mapping + let entity = commands.spawn_empty().id(); + gamepads.id_to_entity.insert(id, entity); + gamepads.entity_to_id.insert(entity, id); + events.write(GamepadConnectionEvent { + gamepad: entity, + connection: GamepadConnection::Connected { + name: gamepad.name().to_string(), + vendor_id: gamepad.vendor_id(), + product_id: gamepad.product_id(), + }, + }); + } + }); } pub fn gilrs_event_system( mut commands: Commands, - #[cfg(target_arch = "wasm32")] mut gilrs: NonSendMut, - #[cfg(not(target_arch = "wasm32"))] mut gilrs: ResMut, + mut gilrs: ResMut, mut gamepads: ResMut, mut events: EventWriter, mut connection_events: EventWriter, mut button_events: EventWriter, mut axis_event: EventWriter, ) { - let gilrs = gilrs.0.get(); - while let Some(gilrs_event) = gilrs.next_event().filter_ev(&axis_dpad_to_button, gilrs) { - gilrs.update(&gilrs_event); - match gilrs_event.event { - EventType::Connected => { - let pad = gilrs.gamepad(gilrs_event.id); - let entity = gamepads.get_entity(gilrs_event.id).unwrap_or_else(|| { - let entity = commands.spawn_empty().id(); - gamepads.id_to_entity.insert(gilrs_event.id, entity); - gamepads.entity_to_id.insert(entity, gilrs_event.id); - entity - }); - - let event = GamepadConnectionEvent::new( - entity, - GamepadConnection::Connected { - name: pad.name().to_string(), - vendor_id: pad.vendor_id(), - product_id: pad.product_id(), - }, - ); + gilrs.with(|gilrs| { + while let Some(gilrs_event) = gilrs.next_event().filter_ev(&axis_dpad_to_button, gilrs) { + gilrs.update(&gilrs_event); + match gilrs_event.event { + EventType::Connected => { + let pad = gilrs.gamepad(gilrs_event.id); + let entity = gamepads.get_entity(gilrs_event.id).unwrap_or_else(|| { + let entity = commands.spawn_empty().id(); + gamepads.id_to_entity.insert(gilrs_event.id, entity); + gamepads.entity_to_id.insert(entity, gilrs_event.id); + entity + }); - events.send(event.clone().into()); - connection_events.send(event); - } - EventType::Disconnected => { - let gamepad = gamepads - .id_to_entity - .get(&gilrs_event.id) - .copied() - .expect("mapping should exist from connection"); - let event = GamepadConnectionEvent::new(gamepad, GamepadConnection::Disconnected); - events.send(event.clone().into()); - connection_events.send(event); - } - EventType::ButtonChanged(gilrs_button, raw_value, _) => { - let Some(button) = convert_button(gilrs_button) else { - continue; - }; - let gamepad = gamepads - .id_to_entity - .get(&gilrs_event.id) - .copied() - .expect("mapping should exist from connection"); - events.send(RawGamepadButtonChangedEvent::new(gamepad, button, raw_value).into()); - button_events.send(RawGamepadButtonChangedEvent::new( - gamepad, button, raw_value, - )); - } - EventType::AxisChanged(gilrs_axis, raw_value, _) => { - let Some(axis) = convert_axis(gilrs_axis) else { - continue; - }; - let gamepad = gamepads - .id_to_entity - .get(&gilrs_event.id) - .copied() - .expect("mapping should exist from connection"); - events.send(RawGamepadAxisChangedEvent::new(gamepad, axis, raw_value).into()); - axis_event.send(RawGamepadAxisChangedEvent::new(gamepad, axis, raw_value)); - } - _ => (), - }; - } - gilrs.inc(); + let event = GamepadConnectionEvent::new( + entity, + GamepadConnection::Connected { + name: pad.name().to_string(), + vendor_id: pad.vendor_id(), + product_id: pad.product_id(), + }, + ); + events.write(event.clone().into()); + connection_events.write(event); + } + EventType::Disconnected => { + let gamepad = gamepads + .id_to_entity + .get(&gilrs_event.id) + .copied() + .expect("mapping should exist from connection"); + let event = + GamepadConnectionEvent::new(gamepad, GamepadConnection::Disconnected); + events.write(event.clone().into()); + connection_events.write(event); + } + EventType::ButtonChanged(gilrs_button, raw_value, _) => { + let Some(button) = convert_button(gilrs_button) else { + continue; + }; + let gamepad = gamepads + .id_to_entity + .get(&gilrs_event.id) + .copied() + .expect("mapping should exist from connection"); + events.write( + RawGamepadButtonChangedEvent::new(gamepad, button, raw_value).into(), + ); + button_events.write(RawGamepadButtonChangedEvent::new( + gamepad, button, raw_value, + )); + } + EventType::AxisChanged(gilrs_axis, raw_value, _) => { + let Some(axis) = convert_axis(gilrs_axis) else { + continue; + }; + let gamepad = gamepads + .id_to_entity + .get(&gilrs_event.id) + .copied() + .expect("mapping should exist from connection"); + events.write(RawGamepadAxisChangedEvent::new(gamepad, axis, raw_value).into()); + axis_event.write(RawGamepadAxisChangedEvent::new(gamepad, axis, raw_value)); + } + _ => (), + }; + } + gilrs.inc(); + }); } diff --git a/crates/bevy_gilrs/src/lib.rs b/crates/bevy_gilrs/src/lib.rs index f9e9bc8dfd176..ce0d5f27f0fd3 100644 --- a/crates/bevy_gilrs/src/lib.rs +++ b/crates/bevy_gilrs/src/lib.rs @@ -14,17 +14,48 @@ mod converter; mod gilrs_system; mod rumble; +#[cfg(not(target_arch = "wasm32"))] +use bevy_utils::synccell::SyncCell; + +#[cfg(target_arch = "wasm32")] +use core::cell::RefCell; + use bevy_app::{App, Plugin, PostUpdate, PreStartup, PreUpdate}; use bevy_ecs::entity::EntityHashMap; use bevy_ecs::prelude::*; use bevy_input::InputSystem; -use bevy_utils::{synccell::SyncCell, tracing::error, HashMap}; +use bevy_platform::collections::HashMap; use gilrs::GilrsBuilder; use gilrs_system::{gilrs_event_startup_system, gilrs_event_system}; use rumble::{play_gilrs_rumble, RunningRumbleEffects}; +use tracing::error; + +#[cfg(target_arch = "wasm32")] +thread_local! { + /// Temporary storage of gilrs data to replace usage of `!Send` resources. This will be replaced with proper + /// storage of `!Send` data after issue #17667 is complete. + /// + /// Using a `thread_local!` here relies on the fact that wasm32 can only be single threaded. Previously, we used a + /// `NonSendMut` parameter, which told Bevy that the system was `!Send`, but now with the removal of `!Send` + /// resource/system parameter usage, there is no internal guarantee that the system will run in only one thread, so + /// we need to rely on the platform to make such a guarantee. + static GILRS: RefCell> = const { RefCell::new(None) }; +} -#[cfg_attr(not(target_arch = "wasm32"), derive(Resource))] -pub(crate) struct Gilrs(pub SyncCell); +#[derive(Resource)] +pub(crate) struct Gilrs { + #[cfg(not(target_arch = "wasm32"))] + cell: SyncCell, +} +impl Gilrs { + #[inline] + pub fn with(&mut self, f: impl FnOnce(&mut gilrs::Gilrs)) { + #[cfg(target_arch = "wasm32")] + GILRS.with(|g| f(g.borrow_mut().as_mut().expect("GILRS was not initialized"))); + #[cfg(not(target_arch = "wasm32"))] + f(self.cell.get()); + } +} /// A [`resource`](Resource) with the mapping of connected [`gilrs::GamepadId`] and their [`Entity`]. #[derive(Debug, Default, Resource)] @@ -63,10 +94,15 @@ impl Plugin for GilrsPlugin { .build() { Ok(gilrs) => { + let g = Gilrs { + #[cfg(not(target_arch = "wasm32"))] + cell: SyncCell::new(gilrs), + }; #[cfg(target_arch = "wasm32")] - app.insert_non_send_resource(Gilrs(SyncCell::new(gilrs))); - #[cfg(not(target_arch = "wasm32"))] - app.insert_resource(Gilrs(SyncCell::new(gilrs))); + GILRS.with(|g| { + g.replace(Some(gilrs)); + }); + app.insert_resource(g); app.init_resource::(); app.init_resource::() .add_systems(PreStartup, gilrs_event_startup_system) diff --git a/crates/bevy_gilrs/src/rumble.rs b/crates/bevy_gilrs/src/rumble.rs index 62c6b0dc7d639..8f41a3ca22fe8 100644 --- a/crates/bevy_gilrs/src/rumble.rs +++ b/crates/bevy_gilrs/src/rumble.rs @@ -1,20 +1,17 @@ //! Handle user specified rumble request events. use crate::{Gilrs, GilrsGamepads}; use bevy_ecs::prelude::{EventReader, Res, ResMut, Resource}; -#[cfg(target_arch = "wasm32")] -use bevy_ecs::system::NonSendMut; use bevy_input::gamepad::{GamepadRumbleIntensity, GamepadRumbleRequest}; +use bevy_platform::collections::HashMap; use bevy_time::{Real, Time}; -use bevy_utils::{ - synccell::SyncCell, - tracing::{debug, warn}, - Duration, HashMap, -}; +use bevy_utils::synccell::SyncCell; +use core::time::Duration; use gilrs::{ ff::{self, BaseEffect, BaseEffectType, Repeat, Replay}, GamepadId, }; use thiserror::Error; +use tracing::{debug, warn}; /// A rumble effect that is currently in effect. struct RunningRumble { @@ -23,7 +20,10 @@ struct RunningRumble { /// A ref-counted handle to the specific force-feedback effect /// /// Dropping it will cause the effect to stop - #[allow(dead_code)] + #[expect( + dead_code, + reason = "We don't need to read this field, as its purpose is to keep the rumble effect going until the field is dropped." + )] effect: SyncCell, } @@ -126,42 +126,42 @@ fn handle_rumble_request( } pub(crate) fn play_gilrs_rumble( time: Res>, - #[cfg(target_arch = "wasm32")] mut gilrs: NonSendMut, - #[cfg(not(target_arch = "wasm32"))] mut gilrs: ResMut, + mut gilrs: ResMut, gamepads: Res, mut requests: EventReader, mut running_rumbles: ResMut, ) { - let gilrs = gilrs.0.get(); - let current_time = time.elapsed(); - // Remove outdated rumble effects. - for rumbles in running_rumbles.rumbles.values_mut() { - // `ff::Effect` uses RAII, dropping = deactivating - rumbles.retain(|RunningRumble { deadline, .. }| *deadline >= current_time); - } - running_rumbles - .rumbles - .retain(|_gamepad, rumbles| !rumbles.is_empty()); - - // Add new effects. - for rumble in requests.read().cloned() { - let gamepad = rumble.gamepad(); - match handle_rumble_request(&mut running_rumbles, gilrs, &gamepads, rumble, current_time) { - Ok(()) => {} - Err(RumbleError::GilrsError(err)) => { - if let ff::Error::FfNotSupported(_) = err { - debug!("Tried to rumble {gamepad:?}, but it doesn't support force feedback"); - } else { - warn!( - "Tried to handle rumble request for {gamepad:?} but an error occurred: {err}" - ); + gilrs.with(|gilrs| { + let current_time = time.elapsed(); + // Remove outdated rumble effects. + for rumbles in running_rumbles.rumbles.values_mut() { + // `ff::Effect` uses RAII, dropping = deactivating + rumbles.retain(|RunningRumble { deadline, .. }| *deadline >= current_time); + } + running_rumbles + .rumbles + .retain(|_gamepad, rumbles| !rumbles.is_empty()); + + // Add new effects. + for rumble in requests.read().cloned() { + let gamepad = rumble.gamepad(); + match handle_rumble_request(&mut running_rumbles, gilrs, &gamepads, rumble, current_time) { + Ok(()) => {} + Err(RumbleError::GilrsError(err)) => { + if let ff::Error::FfNotSupported(_) = err { + debug!("Tried to rumble {gamepad:?}, but it doesn't support force feedback"); + } else { + warn!( + "Tried to handle rumble request for {gamepad:?} but an error occurred: {err}" + ); + } } - } - Err(RumbleError::GamepadNotFound) => { - warn!("Tried to handle rumble request {gamepad:?} but it doesn't exist!"); - } - }; - } + Err(RumbleError::GamepadNotFound) => { + warn!("Tried to handle rumble request {gamepad:?} but it doesn't exist!"); + } + }; + } + }); } #[cfg(test)] diff --git a/crates/bevy_gizmos/Cargo.toml b/crates/bevy_gizmos/Cargo.toml index e880e985fff52..3a264c6244609 100644 --- a/crates/bevy_gizmos/Cargo.toml +++ b/crates/bevy_gizmos/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_gizmos" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Provides gizmos for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -15,23 +15,25 @@ bevy_render = ["dep:bevy_render", "bevy_core_pipeline"] [dependencies] # Bevy -bevy_pbr = { path = "../bevy_pbr", version = "0.15.0-dev", optional = true } -bevy_sprite = { path = "../bevy_sprite", version = "0.15.0-dev", optional = true } -bevy_app = { path = "../bevy_app", version = "0.15.0-dev" } -bevy_color = { path = "../bevy_color", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" } -bevy_image = { path = "../bevy_image", version = "0.15.0-dev" } -bevy_math = { path = "../bevy_math", version = "0.15.0-dev" } -bevy_asset = { path = "../bevy_asset", version = "0.15.0-dev" } -bevy_render = { path = "../bevy_render", version = "0.15.0-dev", optional = true } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev" } -bevy_core_pipeline = { path = "../bevy_core_pipeline", version = "0.15.0-dev", optional = true } -bevy_transform = { path = "../bevy_transform", version = "0.15.0-dev" } -bevy_gizmos_macros = { path = "macros", version = "0.15.0-dev" } -bevy_time = { path = "../bevy_time", version = "0.15.0-dev" } +bevy_pbr = { path = "../bevy_pbr", version = "0.16.0-dev", optional = true } +bevy_sprite = { path = "../bevy_sprite", version = "0.16.0-dev", optional = true } +bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } +bevy_color = { path = "../bevy_color", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } +bevy_image = { path = "../bevy_image", version = "0.16.0-dev" } +bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } +bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } +bevy_render = { path = "../bevy_render", version = "0.16.0-dev", optional = true } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } +bevy_core_pipeline = { path = "../bevy_core_pipeline", version = "0.16.0-dev", optional = true } +bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } +bevy_gizmos_macros = { path = "macros", version = "0.16.0-dev" } +bevy_time = { path = "../bevy_time", version = "0.16.0-dev" } +# other bytemuck = "1.0" +tracing = { version = "0.1", default-features = false, features = ["std"] } [lints] workspace = true diff --git a/crates/bevy_gizmos/LICENSE-APACHE b/crates/bevy_gizmos/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_gizmos/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_gizmos/LICENSE-MIT b/crates/bevy_gizmos/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_gizmos/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_gizmos/macros/Cargo.toml b/crates/bevy_gizmos/macros/Cargo.toml index 97aebb4d894ba..b38a3c5374bf0 100644 --- a/crates/bevy_gizmos/macros/Cargo.toml +++ b/crates/bevy_gizmos/macros/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_gizmos_macros" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Derive implementations for bevy_gizmos" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -13,7 +13,7 @@ proc-macro = true [dependencies] -bevy_macro_utils = { path = "../../bevy_macro_utils", version = "0.15.0-dev" } +bevy_macro_utils = { path = "../../bevy_macro_utils", version = "0.16.0-dev" } syn = "2.0" proc-macro2 = "1.0" diff --git a/crates/bevy_gizmos/macros/LICENSE-APACHE b/crates/bevy_gizmos/macros/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_gizmos/macros/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_gizmos/macros/LICENSE-MIT b/crates/bevy_gizmos/macros/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_gizmos/macros/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_gizmos/src/aabb.rs b/crates/bevy_gizmos/src/aabb.rs index b10b2bc946966..16dc7ed773e16 100644 --- a/crates/bevy_gizmos/src/aabb.rs +++ b/crates/bevy_gizmos/src/aabb.rs @@ -1,7 +1,5 @@ //! A module adding debug visualization of [`Aabb`]s. -use crate as bevy_gizmos; - use bevy_app::{Plugin, PostUpdate}; use bevy_color::{Color, Oklcha}; use bevy_ecs::{ @@ -9,7 +7,7 @@ use bevy_ecs::{ entity::Entity, query::Without, reflect::ReflectComponent, - schedule::IntoSystemConfigs, + schedule::IntoScheduleConfigs, system::{Query, Res}, }; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; @@ -47,6 +45,7 @@ impl Plugin for AabbGizmoPlugin { } /// The [`GizmoConfigGroup`] used for debug visualizations of [`Aabb`] components on entities #[derive(Clone, Default, Reflect, GizmoConfigGroup)] +#[reflect(Clone, Default)] pub struct AabbGizmoConfigGroup { /// Draws all bounding boxes in the scene when set to `true`. /// diff --git a/crates/bevy_gizmos/src/arcs.rs b/crates/bevy_gizmos/src/arcs.rs index 65f5f67ee7e4f..41647f9fe21a2 100644 --- a/crates/bevy_gizmos/src/arcs.rs +++ b/crates/bevy_gizmos/src/arcs.rs @@ -136,11 +136,11 @@ where /// /// # Arguments /// - `angle`: sets how much of a circle circumference is passed, e.g. PI is half a circle. This - /// value should be in the range (-2 * PI..=2 * PI) + /// value should be in the range (-2 * PI..=2 * PI) /// - `radius`: distance between the arc and its center point /// - `isometry` defines the translation and rotation of the arc. - /// - the translation specifies the center of the arc - /// - the rotation is counter-clockwise starting from `Vec3::Y` + /// - the translation specifies the center of the arc + /// - the rotation is counter-clockwise starting from `Vec3::Y` /// - `color`: color of the arc /// /// # Builder methods @@ -219,10 +219,10 @@ where /// /// # Notes /// - This method assumes that the points `from` and `to` are distinct from `center`. If one of - /// the points is coincident with `center`, nothing is rendered. + /// the points is coincident with `center`, nothing is rendered. /// - The arc is drawn as a portion of a circle with a radius equal to the distance from the - /// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then - /// the results will behave as if this were the case + /// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then + /// the results will behave as if this were the case #[inline] pub fn short_arc_3d_between( &mut self, @@ -265,10 +265,10 @@ where /// /// # Notes /// - This method assumes that the points `from` and `to` are distinct from `center`. If one of - /// the points is coincident with `center`, nothing is rendered. + /// the points is coincident with `center`, nothing is rendered. /// - The arc is drawn as a portion of a circle with a radius equal to the distance from the - /// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then - /// the results will behave as if this were the case. + /// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then + /// the results will behave as if this were the case. #[inline] pub fn long_arc_3d_between( &mut self, @@ -352,10 +352,10 @@ where /// /// # Notes /// - This method assumes that the points `from` and `to` are distinct from `center`. If one of - /// the points is coincident with `center`, nothing is rendered. + /// the points is coincident with `center`, nothing is rendered. /// - The arc is drawn as a portion of a circle with a radius equal to the distance from the - /// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then - /// the results will behave as if this were the case + /// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then + /// the results will behave as if this were the case #[inline] pub fn short_arc_2d_between( &mut self, @@ -398,10 +398,10 @@ where /// /// # Notes /// - This method assumes that the points `from` and `to` are distinct from `center`. If one of - /// the points is coincident with `center`, nothing is rendered. + /// the points is coincident with `center`, nothing is rendered. /// - The arc is drawn as a portion of a circle with a radius equal to the distance from the - /// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then - /// the results will behave as if this were the case. + /// `center` to `from`. If the distance from `center` to `to` is not equal to the radius, then + /// the results will behave as if this were the case. #[inline] pub fn long_arc_2d_between( &mut self, diff --git a/crates/bevy_gizmos/src/config.rs b/crates/bevy_gizmos/src/config.rs index 76b22bead2495..973fa1cf0fbce 100644 --- a/crates/bevy_gizmos/src/config.rs +++ b/crates/bevy_gizmos/src/config.rs @@ -1,6 +1,5 @@ //! A module for the [`GizmoConfig`] [`Resource`]. -use crate::{self as bevy_gizmos}; pub use bevy_gizmos_macros::GizmoConfigGroup; #[cfg(all( @@ -9,7 +8,7 @@ pub use bevy_gizmos_macros::GizmoConfigGroup; ))] use {crate::GizmoAsset, bevy_asset::Handle, bevy_ecs::component::Component}; -use bevy_ecs::{reflect::ReflectResource, system::Resource}; +use bevy_ecs::{reflect::ReflectResource, resource::Resource}; use bevy_reflect::{std_traits::ReflectDefault, Reflect, TypePath}; use bevy_utils::TypeIdMap; use core::{ @@ -21,6 +20,7 @@ use core::{ /// An enum configuring how line joints will be drawn. #[derive(Debug, Default, Copy, Clone, Reflect, PartialEq, Eq, Hash)] +#[reflect(Default, PartialEq, Hash, Clone)] pub enum GizmoLineJoint { /// Does not draw any line joints. #[default] @@ -38,6 +38,7 @@ pub enum GizmoLineJoint { /// An enum used to configure the style of gizmo lines, similar to CSS line-style #[derive(Copy, Clone, Debug, Default, PartialEq, Reflect)] +#[reflect(Default, PartialEq, Hash, Clone)] #[non_exhaustive] pub enum GizmoLineStyle { /// A solid line without any decorators @@ -84,11 +85,13 @@ pub trait GizmoConfigGroup: Reflect + TypePath + Default {} /// The default gizmo config group. #[derive(Default, Reflect, GizmoConfigGroup)] +#[reflect(Default)] pub struct DefaultGizmoConfigGroup; /// Used when the gizmo config group needs to be type-erased. /// Also used for retained gizmos, which can't have a gizmo config group. #[derive(Default, Reflect, GizmoConfigGroup, Debug, Clone)] +#[reflect(Default, Clone)] pub struct ErasedGizmoConfigGroup; /// A [`Resource`] storing [`GizmoConfig`] and [`GizmoConfigGroup`] structs @@ -168,6 +171,7 @@ impl GizmoConfigStore { /// A struct that stores configuration for gizmos. #[derive(Clone, Reflect, Debug)] +#[reflect(Clone, Default)] pub struct GizmoConfig { /// Set to `false` to stop drawing gizmos. /// @@ -209,6 +213,7 @@ impl Default for GizmoConfig { /// A struct that stores configuration for gizmos. #[derive(Clone, Reflect, Debug)] +#[reflect(Clone, Default)] pub struct GizmoLineConfig { /// Line width specified in pixels. /// diff --git a/crates/bevy_gizmos/src/curves.rs b/crates/bevy_gizmos/src/curves.rs index 522bf8ebd5246..2d7a350ca29dc 100644 --- a/crates/bevy_gizmos/src/curves.rs +++ b/crates/bevy_gizmos/src/curves.rs @@ -4,7 +4,10 @@ //! [`GizmoBuffer::curve_3d`] and assorted support items. use bevy_color::Color; -use bevy_math::{curve::Curve, Vec2, Vec3}; +use bevy_math::{ + curve::{Curve, CurveExt}, + Vec2, Vec3, +}; use crate::{gizmos::GizmoBuffer, prelude::GizmoConfigGroup}; diff --git a/crates/bevy_gizmos/src/gizmos.rs b/crates/bevy_gizmos/src/gizmos.rs index 3580b41b61f43..b51dd672fe07d 100644 --- a/crates/bevy_gizmos/src/gizmos.rs +++ b/crates/bevy_gizmos/src/gizmos.rs @@ -10,11 +10,15 @@ use core::{ use bevy_color::{Color, LinearRgba}; use bevy_ecs::{ component::Tick, - system::{Deferred, ReadOnlySystemParam, Res, Resource, SystemBuffer, SystemMeta, SystemParam}, + resource::Resource, + system::{ + Deferred, ReadOnlySystemParam, Res, SystemBuffer, SystemMeta, SystemParam, + SystemParamValidationError, + }, world::{unsafe_world_cell::UnsafeWorldCell, World}, }; use bevy_math::{Isometry2d, Isometry3d, Vec2, Vec3}; -use bevy_reflect::Reflect; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_transform::TransformPoint; use bevy_utils::default; @@ -182,7 +186,10 @@ where state: as SystemParam>::State, } -#[allow(unsafe_code)] +#[expect( + unsafe_code, + reason = "We cannot implement SystemParam without using unsafe code." +)] // SAFETY: All methods are delegated to existing `SystemParam` implementations unsafe impl SystemParam for Gizmos<'_, '_, Config, Clear> where @@ -218,7 +225,7 @@ where state: &Self::State, system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { // SAFETY: Delegated to existing `SystemParam` implementations. unsafe { GizmosState::::validate_param(&state.state, system_meta, world) } } @@ -254,7 +261,10 @@ where } } -#[allow(unsafe_code)] +#[expect( + unsafe_code, + reason = "We cannot implement ReadOnlySystemParam without using unsafe code." +)] // Safety: Each field is `ReadOnlySystemParam`, and Gizmos SystemParam does not mutate world unsafe impl<'w, 's, Config, Clear> ReadOnlySystemParam for Gizmos<'w, 's, Config, Clear> where @@ -267,6 +277,7 @@ where /// Buffer for gizmo vertex data. #[derive(Debug, Clone, Reflect)] +#[reflect(Default)] pub struct GizmoBuffer where Config: GizmoConfigGroup, @@ -277,7 +288,7 @@ where pub(crate) list_colors: Vec, pub(crate) strip_positions: Vec, pub(crate) strip_colors: Vec, - #[reflect(ignore)] + #[reflect(ignore, clone)] pub(crate) marker: PhantomData<(Config, Clear)>, } @@ -813,8 +824,7 @@ where let polymorphic_color: Color = color.into(); let linear_color = LinearRgba::from(polymorphic_color); - self.list_colors - .extend(iter::repeat(linear_color).take(count)); + self.list_colors.extend(iter::repeat_n(linear_color, count)); } #[inline] diff --git a/crates/bevy_gizmos/src/grid.rs b/crates/bevy_gizmos/src/grid.rs index 03ee5c665445c..cdcfc41236fd5 100644 --- a/crates/bevy_gizmos/src/grid.rs +++ b/crates/bevy_gizmos/src/grid.rs @@ -186,10 +186,9 @@ where /// # Arguments /// /// - `isometry` defines the translation and rotation of the grid. - /// - the translation specifies the center of the grid - /// - defines the orientation of the grid, by default - /// we assume the grid is contained in a plane parallel - /// to the XY plane + /// - the translation specifies the center of the grid + /// - defines the orientation of the grid, by default we assume the grid is contained in a + /// plane parallel to the XY plane /// - `cell_count`: defines the amount of cells in the x and y axes /// - `spacing`: defines the distance between cells along the x and y axes /// - `color`: color of the grid @@ -241,9 +240,8 @@ where /// # Arguments /// /// - `isometry` defines the translation and rotation of the grid. - /// - the translation specifies the center of the grid - /// - defines the orientation of the grid, by default - /// we assume the grid is aligned with all axes + /// - the translation specifies the center of the grid + /// - defines the orientation of the grid, by default we assume the grid is aligned with all axes /// - `cell_count`: defines the amount of cells in the x, y and z axes /// - `spacing`: defines the distance between cells along the x, y and z axes /// - `color`: color of the grid @@ -295,9 +293,8 @@ where /// # Arguments /// /// - `isometry` defines the translation and rotation of the grid. - /// - the translation specifies the center of the grid - /// - defines the orientation of the grid, by default - /// we assume the grid is aligned with all axes + /// - the translation specifies the center of the grid + /// - defines the orientation of the grid, by default we assume the grid is aligned with all axes /// - `cell_count`: defines the amount of cells in the x and y axes /// - `spacing`: defines the distance between cells along the x and y axes /// - `color`: color of the grid @@ -347,7 +344,6 @@ where } } -#[allow(clippy::too_many_arguments)] fn draw_grid( gizmos: &mut GizmoBuffer, isometry: Isometry3d, diff --git a/crates/bevy_gizmos/src/lib.rs b/crates/bevy_gizmos/src/lib.rs old mode 100644 new mode 100755 index 242f19bc6bd5d..3cd2c7c40447a --- a/crates/bevy_gizmos/src/lib.rs +++ b/crates/bevy_gizmos/src/lib.rs @@ -19,6 +19,9 @@ //! //! See the documentation on [Gizmos](crate::gizmos::Gizmos) for more examples. +// Required to make proc macros work in bevy itself. +extern crate self as bevy_gizmos; + /// System set label for the systems handling the rendering of gizmos. #[derive(SystemSet, Clone, Debug, Hash, PartialEq, Eq)] pub enum GizmoRenderSystem { @@ -76,12 +79,12 @@ pub mod prelude { } use bevy_app::{App, FixedFirst, FixedLast, Last, Plugin, RunFixedMainLoop}; -use bevy_asset::{Asset, AssetApp, AssetId, Assets, Handle}; +use bevy_asset::{Asset, AssetApp, Assets, Handle}; use bevy_ecs::{ - schedule::{IntoSystemConfigs, SystemSet}, - system::{Res, ResMut, Resource}, + resource::Resource, + schedule::{IntoScheduleConfigs, SystemSet}, + system::{Res, ResMut}, }; -use bevy_math::{Vec3, Vec4}; use bevy_reflect::TypePath; #[cfg(all( @@ -95,6 +98,7 @@ use crate::{config::ErasedGizmoConfigGroup, gizmos::GizmoBuffer}; #[cfg(feature = "bevy_render")] use { crate::retained::extract_linegizmos, + bevy_asset::{weak_handle, AssetId}, bevy_ecs::{ component::Component, entity::Entity, @@ -104,7 +108,7 @@ use { Commands, SystemParamItem, }, }, - bevy_math::{Affine3, Affine3A}, + bevy_math::{Affine3, Affine3A, Vec4}, bevy_render::{ extract_component::{ComponentUniforms, DynamicUniformIndex, UniformComponentPlugin}, render_asset::{PrepareAssetError, RenderAsset, RenderAssetPlugin, RenderAssets}, @@ -128,18 +132,19 @@ use { use bevy_render::render_resource::{VertexAttribute, VertexBufferLayout, VertexStepMode}; use bevy_time::Fixed; use bevy_utils::TypeIdMap; -use config::{ - DefaultGizmoConfigGroup, GizmoConfig, GizmoConfigGroup, GizmoConfigStore, GizmoLineJoint, -}; +#[cfg(feature = "bevy_render")] +use config::GizmoLineJoint; +use config::{DefaultGizmoConfigGroup, GizmoConfig, GizmoConfigGroup, GizmoConfigStore}; use core::{any::TypeId, marker::PhantomData, mem}; use gizmos::{GizmoStorage, Swap}; #[cfg(all(feature = "bevy_pbr", feature = "bevy_render"))] use light::LightGizmoPlugin; #[cfg(feature = "bevy_render")] -const LINE_SHADER_HANDLE: Handle = Handle::weak_from_u128(7414812689238026784); +const LINE_SHADER_HANDLE: Handle = weak_handle!("15dc5869-ad30-4664-b35a-4137cb8804a1"); #[cfg(feature = "bevy_render")] -const LINE_JOINT_SHADER_HANDLE: Handle = Handle::weak_from_u128(1162780797909187908); +const LINE_JOINT_SHADER_HANDLE: Handle = + weak_handle!("7b5bdda5-df81-4711-a6cf-e587700de6f2"); /// A [`Plugin`] that provides an immediate mode drawing api for visual debugging. /// @@ -189,16 +194,16 @@ impl Plugin for GizmoPlugin { if app.is_plugin_added::() { app.add_plugins(pipeline_2d::LineGizmo2dPlugin); } else { - bevy_utils::tracing::warn!("bevy_sprite feature is enabled but bevy_sprite::SpritePlugin was not detected. Are you sure you loaded GizmoPlugin after SpritePlugin?"); + tracing::warn!("bevy_sprite feature is enabled but bevy_sprite::SpritePlugin was not detected. Are you sure you loaded GizmoPlugin after SpritePlugin?"); } #[cfg(feature = "bevy_pbr")] if app.is_plugin_added::() { app.add_plugins(pipeline_3d::LineGizmo3dPlugin); } else { - bevy_utils::tracing::warn!("bevy_pbr feature is enabled but bevy_pbr::PbrPlugin was not detected. Are you sure you loaded GizmoPlugin after PbrPlugin?"); + tracing::warn!("bevy_pbr feature is enabled but bevy_pbr::PbrPlugin was not detected. Are you sure you loaded GizmoPlugin after PbrPlugin?"); } } else { - bevy_utils::tracing::warn!("bevy_render feature is enabled but RenderApp was not detected. Are you sure you loaded GizmoPlugin after RenderPlugin?"); + tracing::warn!("bevy_render feature is enabled but RenderApp was not detected. Are you sure you loaded GizmoPlugin after RenderPlugin?"); } } @@ -419,8 +424,9 @@ fn extract_gizmo_data( handles: Extract>, config: Extract>, ) { - use bevy_utils::warn_once; + use bevy_utils::once; use config::GizmoLineStyle; + use tracing::warn; for (group_type_id, handle) in &handles.handles { let Some((config, _)) = config.get_config_dyn(group_type_id) else { @@ -447,10 +453,10 @@ fn extract_gizmo_data( } = config.line.style { if gap_scale <= 0.0 { - warn_once!("When using gizmos with the line style `GizmoLineStyle::Dashed{{..}}` the gap scale should be greater than zero."); + once!(warn!("When using gizmos with the line style `GizmoLineStyle::Dashed{{..}}` the gap scale should be greater than zero.")); } if line_scale <= 0.0 { - warn_once!("When using gizmos with the line style `GizmoLineStyle::Dashed{{..}}` the line scale should be greater than zero."); + once!(warn!("When using gizmos with the line style `GizmoLineStyle::Dashed{{..}}` the line scale should be greater than zero.")); } (gap_scale, line_scale) } else { @@ -497,7 +503,7 @@ struct LineGizmoUniform { line_scale: f32, /// WebGL2 structs must be 16 byte aligned. #[cfg(feature = "webgl")] - _padding: Vec3, + _padding: bevy_math::Vec3, } /// A collection of gizmos. diff --git a/crates/bevy_gizmos/src/light.rs b/crates/bevy_gizmos/src/light.rs index 9a618ac85acc7..7f7dadacc26ec 100644 --- a/crates/bevy_gizmos/src/light.rs +++ b/crates/bevy_gizmos/src/light.rs @@ -2,7 +2,7 @@ use core::f32::consts::PI; -use crate::{self as bevy_gizmos, primitives::dim3::GizmoPrimitive3d}; +use crate::primitives::dim3::GizmoPrimitive3d; use bevy_app::{Plugin, PostUpdate}; use bevy_color::{ @@ -14,7 +14,7 @@ use bevy_ecs::{ entity::Entity, query::Without, reflect::ReflectComponent, - schedule::IntoSystemConfigs, + schedule::IntoScheduleConfigs, system::{Query, Res}, }; use bevy_math::{ @@ -133,6 +133,7 @@ impl Plugin for LightGizmoPlugin { /// Configures how a color is attributed to a light gizmo. #[derive(Debug, Clone, Copy, Default, Reflect)] +#[reflect(Clone, Default)] pub enum LightGizmoColor { /// User-specified color. Manual(Color), @@ -147,6 +148,7 @@ pub enum LightGizmoColor { /// The [`GizmoConfigGroup`] used to configure the visualization of lights. #[derive(Clone, Reflect, GizmoConfigGroup)] +#[reflect(Clone, Default)] pub struct LightGizmoConfigGroup { /// Draw a gizmo for all lights if true. /// diff --git a/crates/bevy_gizmos/src/pipeline_2d.rs b/crates/bevy_gizmos/src/pipeline_2d.rs index 89d6cec6260b6..3a4305549108b 100644 --- a/crates/bevy_gizmos/src/pipeline_2d.rs +++ b/crates/bevy_gizmos/src/pipeline_2d.rs @@ -9,8 +9,9 @@ use bevy_core_pipeline::core_2d::{Transparent2d, CORE_2D_DEPTH_FORMAT}; use bevy_ecs::{ prelude::Entity, - schedule::{IntoSystemConfigs, IntoSystemSetConfigs}, - system::{Query, Res, ResMut, Resource}, + resource::Resource, + schedule::IntoScheduleConfigs, + system::{Query, Res, ResMut}, world::{FromWorld, World}, }; use bevy_image::BevyDefault as _; @@ -27,7 +28,7 @@ use bevy_render::{ Render, RenderApp, RenderSet, }; use bevy_sprite::{Mesh2dPipeline, Mesh2dPipelineKey, SetMesh2dViewBindGroup}; -use bevy_utils::tracing::error; +use tracing::error; pub struct LineGizmo2dPlugin; @@ -142,8 +143,8 @@ impl SpecializedRenderPipeline for LineGizmoPipeline { primitive: PrimitiveState::default(), depth_stencil: Some(DepthStencilState { format: CORE_2D_DEPTH_FORMAT, - depth_write_enabled: true, - depth_compare: CompareFunction::GreaterEqual, + depth_write_enabled: false, + depth_compare: CompareFunction::Always, stencil: StencilState { front: StencilFaceState::IGNORE, back: StencilFaceState::IGNORE, @@ -243,8 +244,8 @@ impl SpecializedRenderPipeline for LineJointGizmoPipeline { primitive: PrimitiveState::default(), depth_stencil: Some(DepthStencilState { format: CORE_2D_DEPTH_FORMAT, - depth_write_enabled: true, - depth_compare: CompareFunction::GreaterEqual, + depth_write_enabled: false, + depth_compare: CompareFunction::Always, stencil: StencilState { front: StencilFaceState::IGNORE, back: StencilFaceState::IGNORE, @@ -288,7 +289,6 @@ type DrawLineJointGizmo2d = ( DrawLineJointGizmo, ); -#[allow(clippy::too_many_arguments)] fn queue_line_gizmos_2d( draw_functions: Res>, pipeline: Res, @@ -297,7 +297,7 @@ fn queue_line_gizmos_2d( line_gizmos: Query<(Entity, &MainEntity, &GizmoMeshConfig)>, line_gizmo_assets: Res>, mut transparent_render_phases: ResMut>, - mut views: Query<(Entity, &ExtractedView, &Msaa, Option<&RenderLayers>)>, + mut views: Query<(&ExtractedView, &Msaa, Option<&RenderLayers>)>, ) { let draw_function = draw_functions.read().get_id::().unwrap(); let draw_function_strip = draw_functions @@ -305,8 +305,9 @@ fn queue_line_gizmos_2d( .get_id::() .unwrap(); - for (view_entity, view, msaa, render_layers) in &mut views { - let Some(transparent_phase) = transparent_render_phases.get_mut(&view_entity) else { + for (view, msaa, render_layers) in &mut views { + let Some(transparent_phase) = transparent_render_phases.get_mut(&view.retained_view_entity) + else { continue; }; @@ -340,6 +341,8 @@ fn queue_line_gizmos_2d( sort_key: FloatOrd(f32::INFINITY), batch_range: 0..1, extra_index: PhaseItemExtraIndex::None, + extracted_index: usize::MAX, + indexed: false, }); } @@ -360,13 +363,13 @@ fn queue_line_gizmos_2d( sort_key: FloatOrd(f32::INFINITY), batch_range: 0..1, extra_index: PhaseItemExtraIndex::None, + extracted_index: usize::MAX, + indexed: false, }); } } } } - -#[allow(clippy::too_many_arguments)] fn queue_line_joint_gizmos_2d( draw_functions: Res>, pipeline: Res, @@ -375,15 +378,16 @@ fn queue_line_joint_gizmos_2d( line_gizmos: Query<(Entity, &MainEntity, &GizmoMeshConfig)>, line_gizmo_assets: Res>, mut transparent_render_phases: ResMut>, - mut views: Query<(Entity, &ExtractedView, &Msaa, Option<&RenderLayers>)>, + mut views: Query<(&ExtractedView, &Msaa, Option<&RenderLayers>)>, ) { let draw_function = draw_functions .read() .get_id::() .unwrap(); - for (view_entity, view, msaa, render_layers) in &mut views { - let Some(transparent_phase) = transparent_render_phases.get_mut(&view_entity) else { + for (view, msaa, render_layers) in &mut views { + let Some(transparent_phase) = transparent_render_phases.get_mut(&view.retained_view_entity) + else { continue; }; @@ -419,6 +423,8 @@ fn queue_line_joint_gizmos_2d( sort_key: FloatOrd(f32::INFINITY), batch_range: 0..1, extra_index: PhaseItemExtraIndex::None, + extracted_index: usize::MAX, + indexed: false, }); } } diff --git a/crates/bevy_gizmos/src/pipeline_3d.rs b/crates/bevy_gizmos/src/pipeline_3d.rs index 025cc4c7c033b..799793e6cbba6 100644 --- a/crates/bevy_gizmos/src/pipeline_3d.rs +++ b/crates/bevy_gizmos/src/pipeline_3d.rs @@ -7,14 +7,16 @@ use crate::{ use bevy_app::{App, Plugin}; use bevy_core_pipeline::{ core_3d::{Transparent3d, CORE_3D_DEPTH_FORMAT}, + oit::OrderIndependentTransparencySettings, prepass::{DeferredPrepass, DepthPrepass, MotionVectorPrepass, NormalPrepass}, }; use bevy_ecs::{ prelude::Entity, query::Has, - schedule::{IntoSystemConfigs, IntoSystemSetConfigs}, - system::{Query, Res, ResMut, Resource}, + resource::Resource, + schedule::IntoScheduleConfigs, + system::{Query, Res, ResMut}, world::{FromWorld, World}, }; use bevy_image::BevyDefault as _; @@ -30,7 +32,7 @@ use bevy_render::{ view::{ExtractedView, Msaa, RenderLayers, ViewTarget}, Render, RenderApp, RenderSet, }; -use bevy_utils::tracing::error; +use tracing::error; pub struct LineGizmo3dPlugin; impl Plugin for LineGizmo3dPlugin { @@ -283,7 +285,6 @@ type DrawLineJointGizmo3d = ( DrawLineJointGizmo, ); -#[allow(clippy::too_many_arguments)] fn queue_line_gizmos_3d( draw_functions: Res>, pipeline: Res, @@ -292,8 +293,7 @@ fn queue_line_gizmos_3d( line_gizmos: Query<(Entity, &MainEntity, &GizmoMeshConfig)>, line_gizmo_assets: Res>, mut transparent_render_phases: ResMut>, - mut views: Query<( - Entity, + views: Query<( &ExtractedView, &Msaa, Option<&RenderLayers>, @@ -302,6 +302,7 @@ fn queue_line_gizmos_3d( Has, Has, Has, + Has, ), )>, ) { @@ -312,14 +313,14 @@ fn queue_line_gizmos_3d( .unwrap(); for ( - view_entity, view, msaa, render_layers, - (normal_prepass, depth_prepass, motion_vector_prepass, deferred_prepass), - ) in &mut views + (normal_prepass, depth_prepass, motion_vector_prepass, deferred_prepass, oit), + ) in &views { - let Some(transparent_phase) = transparent_render_phases.get_mut(&view_entity) else { + let Some(transparent_phase) = transparent_render_phases.get_mut(&view.retained_view_entity) + else { continue; }; @@ -344,6 +345,10 @@ fn queue_line_gizmos_3d( view_key |= MeshPipelineKey::DEFERRED_PREPASS; } + if oit { + view_key |= MeshPipelineKey::OIT_ENABLED; + } + for (entity, main_entity, config) in &line_gizmos { if !config.render_layers.intersects(render_layers) { continue; @@ -371,6 +376,7 @@ fn queue_line_gizmos_3d( distance: 0., batch_range: 0..1, extra_index: PhaseItemExtraIndex::None, + indexed: true, }); } @@ -392,13 +398,13 @@ fn queue_line_gizmos_3d( distance: 0., batch_range: 0..1, extra_index: PhaseItemExtraIndex::None, + indexed: true, }); } } } } -#[allow(clippy::too_many_arguments)] fn queue_line_joint_gizmos_3d( draw_functions: Res>, pipeline: Res, @@ -407,8 +413,7 @@ fn queue_line_joint_gizmos_3d( line_gizmos: Query<(Entity, &MainEntity, &GizmoMeshConfig)>, line_gizmo_assets: Res>, mut transparent_render_phases: ResMut>, - mut views: Query<( - Entity, + views: Query<( &ExtractedView, &Msaa, Option<&RenderLayers>, @@ -426,14 +431,14 @@ fn queue_line_joint_gizmos_3d( .unwrap(); for ( - view_entity, view, msaa, render_layers, (normal_prepass, depth_prepass, motion_vector_prepass, deferred_prepass), - ) in &mut views + ) in &views { - let Some(transparent_phase) = transparent_render_phases.get_mut(&view_entity) else { + let Some(transparent_phase) = transparent_render_phases.get_mut(&view.retained_view_entity) + else { continue; }; @@ -488,6 +493,7 @@ fn queue_line_joint_gizmos_3d( distance: 0., batch_range: 0..1, extra_index: PhaseItemExtraIndex::None, + indexed: true, }); } } diff --git a/crates/bevy_gizmos/src/primitives/dim2.rs b/crates/bevy_gizmos/src/primitives/dim2.rs index beb9c6f315f24..9535c28fbd0ab 100644 --- a/crates/bevy_gizmos/src/primitives/dim2.rs +++ b/crates/bevy_gizmos/src/primitives/dim2.rs @@ -540,10 +540,7 @@ where } // draw normal of the plane (orthogonal to the plane itself) let normal = primitive.normal; - let normal_segment = Segment2d { - direction: normal, - half_length: HALF_MIN_LINE_LEN, - }; + let normal_segment = Segment2d::from_direction_and_length(normal, HALF_MIN_LINE_LEN * 2.); self.primitive_2d( &normal_segment, // offset the normal so it starts on the plane line @@ -577,8 +574,8 @@ where { gizmos: &'a mut GizmoBuffer, - direction: Dir2, // Direction of the line segment - half_length: f32, // Half-length of the line segment + point1: Vec2, // First point of the segment + point2: Vec2, // Second point of the segment isometry: Isometry2d, // isometric transformation of the line segment color: Color, // color of the line segment @@ -616,8 +613,8 @@ where ) -> Self::Output<'_> { Segment2dBuilder { gizmos: self, - direction: primitive.direction, - half_length: primitive.half_length, + point1: primitive.point1(), + point2: primitive.point2(), isometry: isometry.into(), color: color.into(), @@ -637,14 +634,14 @@ where return; } - let direction = self.direction * self.half_length; - let start = self.isometry * (-direction); - let end = self.isometry * direction; + let segment = Segment2d::new(self.point1, self.point2).transformed(self.isometry); if self.draw_arrow { - self.gizmos.arrow_2d(start, end, self.color); + self.gizmos + .arrow_2d(segment.point1(), segment.point2(), self.color); } else { - self.gizmos.line_2d(start, end, self.color); + self.gizmos + .line_2d(segment.point1(), segment.point2(), self.color); } } } diff --git a/crates/bevy_gizmos/src/primitives/dim3.rs b/crates/bevy_gizmos/src/primitives/dim3.rs index 1af21869a9781..898850ddea901 100644 --- a/crates/bevy_gizmos/src/primitives/dim3.rs +++ b/crates/bevy_gizmos/src/primitives/dim3.rs @@ -228,9 +228,8 @@ where return; } - let isometry = isometry.into(); - let direction = primitive.direction.as_vec3(); - self.line(isometry * direction, isometry * (-direction), color); + let transformed = primitive.transformed(isometry); + self.line(transformed.point1(), transformed.point2(), color); } } @@ -413,7 +412,7 @@ where Config: GizmoConfigGroup, Clear: 'static + Send + Sync, { - /// Set the number of lines used to approximate the top an bottom of the cylinder geometry. + /// Set the number of lines used to approximate the top and bottom of the cylinder geometry. pub fn resolution(mut self, resolution: u32) -> Self { self.resolution = resolution; self diff --git a/crates/bevy_gizmos/src/primitives/helpers.rs b/crates/bevy_gizmos/src/primitives/helpers.rs index f6cdfcf0d3cd4..37253b14a9ac9 100644 --- a/crates/bevy_gizmos/src/primitives/helpers.rs +++ b/crates/bevy_gizmos/src/primitives/helpers.rs @@ -15,7 +15,7 @@ pub(crate) fn single_circle_coordinate(radius: f32, resolution: u32, nth_point: /// Generates an iterator over the coordinates of a circle. /// -/// The coordinates form a open circle, meaning the first and last points aren't the same. +/// The coordinates form an open circle, meaning the first and last points aren't the same. /// /// This function creates an iterator that yields the positions of points approximating a /// circle with the given radius, divided into linear segments. The iterator produces `resolution` diff --git a/crates/bevy_gizmos/src/retained.rs b/crates/bevy_gizmos/src/retained.rs index 9cb6791aca182..88610b9744203 100644 --- a/crates/bevy_gizmos/src/retained.rs +++ b/crates/bevy_gizmos/src/retained.rs @@ -3,11 +3,8 @@ use core::ops::{Deref, DerefMut}; use bevy_asset::Handle; -use bevy_ecs::{ - component::{require, Component}, - reflect::ReflectComponent, -}; -use bevy_reflect::Reflect; +use bevy_ecs::{component::Component, reflect::ReflectComponent}; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_transform::components::Transform; #[cfg(feature = "bevy_render")] @@ -76,7 +73,7 @@ impl DerefMut for GizmoAsset { /// /// [`Gizmos`]: crate::gizmos::Gizmos #[derive(Component, Clone, Debug, Default, Reflect)] -#[reflect(Component)] +#[reflect(Component, Clone, Default)] #[require(Transform)] pub struct Gizmo { /// The handle to the gizmo to draw. @@ -106,7 +103,8 @@ pub(crate) fn extract_linegizmos( ) { use bevy_math::Affine3; use bevy_render::sync_world::{MainEntity, TemporaryRenderEntity}; - use bevy_utils::warn_once; + use bevy_utils::once; + use tracing::warn; use crate::config::GizmoLineStyle; @@ -124,10 +122,10 @@ pub(crate) fn extract_linegizmos( } = gizmo.line_config.style { if gap_scale <= 0.0 { - warn_once!("when using gizmos with the line style `GizmoLineStyle::Dashed{{..}}` the gap scale should be greater than zero"); + once!(warn!("when using gizmos with the line style `GizmoLineStyle::Dashed{{..}}` the gap scale should be greater than zero")); } if line_scale <= 0.0 { - warn_once!("when using gizmos with the line style `GizmoLineStyle::Dashed{{..}}` the line scale should be greater than zero"); + once!(warn!("when using gizmos with the line style `GizmoLineStyle::Dashed{{..}}` the line scale should be greater than zero")); } (gap_scale, line_scale) } else { diff --git a/crates/bevy_gizmos/src/rounded_box.rs b/crates/bevy_gizmos/src/rounded_box.rs index 6f0df7ac0ec5e..530d4f8617a54 100644 --- a/crates/bevy_gizmos/src/rounded_box.rs +++ b/crates/bevy_gizmos/src/rounded_box.rs @@ -238,10 +238,9 @@ where /// # Arguments /// /// - `isometry` defines the translation and rotation of the rectangle. - /// - the translation specifies the center of the rectangle - /// - defines orientation of the rectangle, by default we - /// assume the rectangle is contained in a plane parallel - /// to the XY plane. + /// - the translation specifies the center of the rectangle + /// - defines orientation of the rectangle, by default we assume the rectangle is contained in + /// a plane parallel to the XY plane. /// - `size`: defines the size of the rectangle. This refers to the 'outer size', similar to a bounding box. /// - `color`: color of the rectangle /// @@ -249,7 +248,7 @@ where /// /// - The corner radius can be adjusted with the `.corner_radius(...)` method. /// - The resolution of the arcs at each corner (i.e. the level of detail) can be adjusted with the - /// `.arc_resolution(...)` method. + /// `.arc_resolution(...)` method. /// /// # Example /// ``` @@ -293,9 +292,8 @@ where /// # Arguments /// /// - `isometry` defines the translation and rotation of the rectangle. - /// - the translation specifies the center of the rectangle - /// - defines orientation of the rectangle, by default we - /// assume the rectangle aligned with all axes. + /// - the translation specifies the center of the rectangle + /// - defines orientation of the rectangle, by default we assume the rectangle aligned with all axes. /// - `size`: defines the size of the rectangle. This refers to the 'outer size', similar to a bounding box. /// - `color`: color of the rectangle /// @@ -303,7 +301,7 @@ where /// /// - The corner radius can be adjusted with the `.corner_radius(...)` method. /// - The resolution of the arcs at each corner (i.e. the level of detail) can be adjusted with the - /// `.arc_resolution(...)` method. + /// `.arc_resolution(...)` method. /// /// # Example /// ``` @@ -351,9 +349,8 @@ where /// # Arguments /// /// - `isometry` defines the translation and rotation of the cuboid. - /// - the translation specifies the center of the cuboid - /// - defines orientation of the cuboid, by default we - /// assume the cuboid aligned with all axes. + /// - the translation specifies the center of the cuboid + /// - defines orientation of the cuboid, by default we assume the cuboid aligned with all axes. /// - `size`: defines the size of the cuboid. This refers to the 'outer size', similar to a bounding box. /// - `color`: color of the cuboid /// @@ -361,7 +358,7 @@ where /// /// - The edge radius can be adjusted with the `.edge_radius(...)` method. /// - The resolution of the arcs at each edge (i.e. the level of detail) can be adjusted with the - /// `.arc_resolution(...)` method. + /// `.arc_resolution(...)` method. /// /// # Example /// ``` diff --git a/crates/bevy_gltf/Cargo.toml b/crates/bevy_gltf/Cargo.toml index 9bc1ca3e7d047..a67ab2276c352 100644 --- a/crates/bevy_gltf/Cargo.toml +++ b/crates/bevy_gltf/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_gltf" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Bevy Engine GLTF loading" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -9,35 +9,37 @@ license = "MIT OR Apache-2.0" keywords = ["bevy"] [features] -dds = ["bevy_render/dds", "bevy_image/dds", "bevy_core_pipeline/dds"] pbr_transmission_textures = ["bevy_pbr/pbr_transmission_textures"] pbr_multi_layer_material_textures = [ "bevy_pbr/pbr_multi_layer_material_textures", ] pbr_anisotropy_texture = ["bevy_pbr/pbr_anisotropy_texture"] +pbr_specular_textures = ["bevy_pbr/pbr_specular_textures"] [dependencies] # bevy -bevy_animation = { path = "../bevy_animation", version = "0.15.0-dev", optional = true } -bevy_app = { path = "../bevy_app", version = "0.15.0-dev" } -bevy_asset = { path = "../bevy_asset", version = "0.15.0-dev" } -bevy_color = { path = "../bevy_color", version = "0.15.0-dev" } -bevy_core_pipeline = { path = "../bevy_core_pipeline", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" } -bevy_hierarchy = { path = "../bevy_hierarchy", version = "0.15.0-dev" } -bevy_image = { path = "../bevy_image", version = "0.15.0-dev" } -bevy_math = { path = "../bevy_math", version = "0.15.0-dev" } -bevy_pbr = { path = "../bevy_pbr", version = "0.15.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev", features = [ - "bevy", -] } -bevy_render = { path = "../bevy_render", version = "0.15.0-dev" } -bevy_scene = { path = "../bevy_scene", version = "0.15.0-dev", features = [ +bevy_animation = { path = "../bevy_animation", version = "0.16.0-dev", optional = true } +bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } +bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } +bevy_color = { path = "../bevy_color", version = "0.16.0-dev" } +bevy_core_pipeline = { path = "../bevy_core_pipeline", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } +bevy_image = { path = "../bevy_image", version = "0.16.0-dev" } +bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } +bevy_mesh = { path = "../bevy_mesh", version = "0.16.0-dev" } +bevy_pbr = { path = "../bevy_pbr", version = "0.16.0-dev" } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } +bevy_render = { path = "../bevy_render", version = "0.16.0-dev" } +bevy_scene = { path = "../bevy_scene", version = "0.16.0-dev", features = [ "bevy_render", ] } -bevy_transform = { path = "../bevy_transform", version = "0.15.0-dev" } -bevy_tasks = { path = "../bevy_tasks", version = "0.15.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } +bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } +bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "std", + "serialize", +] } # other gltf = { version = "1.4.0", default-features = false, features = [ @@ -55,13 +57,16 @@ gltf = { version = "1.4.0", default-features = false, features = [ ] } thiserror = { version = "2", default-features = false } base64 = "0.22.0" +fixedbitset = "0.5" +itertools = "0.14" percent-encoding = "2.1" serde = { version = "1.0", features = ["derive"] } serde_json = "1" smallvec = "1.11" +tracing = { version = "0.1", default-features = false, features = ["std"] } [dev-dependencies] -bevy_log = { path = "../bevy_log", version = "0.15.0-dev" } +bevy_log = { path = "../bevy_log", version = "0.16.0-dev" } [lints] workspace = true diff --git a/crates/bevy_gltf/LICENSE-APACHE b/crates/bevy_gltf/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_gltf/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_gltf/LICENSE-MIT b/crates/bevy_gltf/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_gltf/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_gltf/src/assets.rs b/crates/bevy_gltf/src/assets.rs new file mode 100644 index 0000000000000..fe3303dd81059 --- /dev/null +++ b/crates/bevy_gltf/src/assets.rs @@ -0,0 +1,315 @@ +//! Representation of assets present in a glTF file + +#[cfg(feature = "bevy_animation")] +use bevy_animation::AnimationClip; +use bevy_asset::{Asset, Handle}; +use bevy_ecs::{component::Component, reflect::ReflectComponent}; +use bevy_mesh::{skinning::SkinnedMeshInverseBindposes, Mesh}; +use bevy_pbr::StandardMaterial; +use bevy_platform::collections::HashMap; +use bevy_reflect::{prelude::ReflectDefault, Reflect, TypePath}; +use bevy_scene::Scene; + +use crate::GltfAssetLabel; + +/// Representation of a loaded glTF file. +#[derive(Asset, Debug, TypePath)] +pub struct Gltf { + /// All scenes loaded from the glTF file. + pub scenes: Vec>, + /// Named scenes loaded from the glTF file. + pub named_scenes: HashMap, Handle>, + /// All meshes loaded from the glTF file. + pub meshes: Vec>, + /// Named meshes loaded from the glTF file. + pub named_meshes: HashMap, Handle>, + /// All materials loaded from the glTF file. + pub materials: Vec>, + /// Named materials loaded from the glTF file. + pub named_materials: HashMap, Handle>, + /// All nodes loaded from the glTF file. + pub nodes: Vec>, + /// Named nodes loaded from the glTF file. + pub named_nodes: HashMap, Handle>, + /// All skins loaded from the glTF file. + pub skins: Vec>, + /// Named skins loaded from the glTF file. + pub named_skins: HashMap, Handle>, + /// Default scene to be displayed. + pub default_scene: Option>, + /// All animations loaded from the glTF file. + #[cfg(feature = "bevy_animation")] + pub animations: Vec>, + /// Named animations loaded from the glTF file. + #[cfg(feature = "bevy_animation")] + pub named_animations: HashMap, Handle>, + /// The gltf root of the gltf asset, see . Only has a value when `GltfLoaderSettings::include_source` is true. + pub source: Option, +} + +/// A glTF mesh, which may consist of multiple [`GltfPrimitives`](GltfPrimitive) +/// and an optional [`GltfExtras`]. +/// +/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-mesh). +#[derive(Asset, Debug, Clone, TypePath)] +pub struct GltfMesh { + /// Index of the mesh inside the scene + pub index: usize, + /// Computed name for a mesh - either a user defined mesh name from gLTF or a generated name from index + pub name: String, + /// Primitives of the glTF mesh. + pub primitives: Vec, + /// Additional data. + pub extras: Option, +} + +impl GltfMesh { + /// Create a mesh extracting name and index from glTF def + pub fn new( + mesh: &gltf::Mesh, + primitives: Vec, + extras: Option, + ) -> Self { + Self { + index: mesh.index(), + name: if let Some(name) = mesh.name() { + name.to_string() + } else { + format!("GltfMesh{}", mesh.index()) + }, + primitives, + extras, + } + } + + /// Subasset label for this mesh within the gLTF parent asset. + pub fn asset_label(&self) -> GltfAssetLabel { + GltfAssetLabel::Mesh(self.index) + } +} + +/// A glTF node with all of its child nodes, its [`GltfMesh`], +/// [`Transform`](bevy_transform::prelude::Transform), its optional [`GltfSkin`] +/// and an optional [`GltfExtras`]. +/// +/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-node). +#[derive(Asset, Debug, Clone, TypePath)] +pub struct GltfNode { + /// Index of the node inside the scene + pub index: usize, + /// Computed name for a node - either a user defined node name from gLTF or a generated name from index + pub name: String, + /// Direct children of the node. + pub children: Vec>, + /// Mesh of the node. + pub mesh: Option>, + /// Skin of the node. + pub skin: Option>, + /// Local transform. + pub transform: bevy_transform::prelude::Transform, + /// Is this node used as an animation root + #[cfg(feature = "bevy_animation")] + pub is_animation_root: bool, + /// Additional data. + pub extras: Option, +} + +impl GltfNode { + /// Create a node extracting name and index from glTF def + pub fn new( + node: &gltf::Node, + children: Vec>, + mesh: Option>, + transform: bevy_transform::prelude::Transform, + skin: Option>, + extras: Option, + ) -> Self { + Self { + index: node.index(), + name: if let Some(name) = node.name() { + name.to_string() + } else { + format!("GltfNode{}", node.index()) + }, + children, + mesh, + transform, + skin, + #[cfg(feature = "bevy_animation")] + is_animation_root: false, + extras, + } + } + + /// Create a node with animation root mark + #[cfg(feature = "bevy_animation")] + pub fn with_animation_root(self, is_animation_root: bool) -> Self { + Self { + is_animation_root, + ..self + } + } + + /// Subasset label for this node within the gLTF parent asset. + pub fn asset_label(&self) -> GltfAssetLabel { + GltfAssetLabel::Node(self.index) + } +} + +/// Part of a [`GltfMesh`] that consists of a [`Mesh`], an optional [`StandardMaterial`] and [`GltfExtras`]. +/// +/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-mesh-primitive). +#[derive(Asset, Debug, Clone, TypePath)] +pub struct GltfPrimitive { + /// Index of the primitive inside the mesh + pub index: usize, + /// Index of the parent [`GltfMesh`] of this primitive + pub parent_mesh_index: usize, + /// Computed name for a primitive - either a user defined primitive name from gLTF or a generated name from index + pub name: String, + /// Topology to be rendered. + pub mesh: Handle, + /// Material to apply to the `mesh`. + pub material: Option>, + /// Additional data. + pub extras: Option, + /// Additional data of the `material`. + pub material_extras: Option, +} + +impl GltfPrimitive { + /// Create a primitive extracting name and index from glTF def + pub fn new( + gltf_mesh: &gltf::Mesh, + gltf_primitive: &gltf::Primitive, + mesh: Handle, + material: Option>, + extras: Option, + material_extras: Option, + ) -> Self { + GltfPrimitive { + index: gltf_primitive.index(), + parent_mesh_index: gltf_mesh.index(), + name: { + let mesh_name = gltf_mesh.name().unwrap_or("Mesh"); + if gltf_mesh.primitives().len() > 1 { + format!("{}.{}", mesh_name, gltf_primitive.index()) + } else { + mesh_name.to_string() + } + }, + mesh, + material, + extras, + material_extras, + } + } + + /// Subasset label for this primitive within its parent [`GltfMesh`] within the gLTF parent asset. + pub fn asset_label(&self) -> GltfAssetLabel { + GltfAssetLabel::Primitive { + mesh: self.parent_mesh_index, + primitive: self.index, + } + } +} + +/// A glTF skin with all of its joint nodes, [`SkinnedMeshInversiveBindposes`](bevy_mesh::skinning::SkinnedMeshInverseBindposes) +/// and an optional [`GltfExtras`]. +/// +/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-skin). +#[derive(Asset, Debug, Clone, TypePath)] +pub struct GltfSkin { + /// Index of the skin inside the scene + pub index: usize, + /// Computed name for a skin - either a user defined skin name from gLTF or a generated name from index + pub name: String, + /// All the nodes that form this skin. + pub joints: Vec>, + /// Inverse-bind matrices of this skin. + pub inverse_bind_matrices: Handle, + /// Additional data. + pub extras: Option, +} + +impl GltfSkin { + /// Create a skin extracting name and index from glTF def + pub fn new( + skin: &gltf::Skin, + joints: Vec>, + inverse_bind_matrices: Handle, + extras: Option, + ) -> Self { + Self { + index: skin.index(), + name: if let Some(name) = skin.name() { + name.to_string() + } else { + format!("GltfSkin{}", skin.index()) + }, + joints, + inverse_bind_matrices, + extras, + } + } + + /// Subasset label for this skin within the gLTF parent asset. + pub fn asset_label(&self) -> GltfAssetLabel { + GltfAssetLabel::Skin(self.index) + } +} + +/// Additional untyped data that can be present on most glTF types at the primitive level. +/// +/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-extras). +#[derive(Clone, Debug, Reflect, Default, Component)] +#[reflect(Component, Clone, Default, Debug)] +pub struct GltfExtras { + /// Content of the extra data. + pub value: String, +} + +impl From<&serde_json::value::RawValue> for GltfExtras { + fn from(value: &serde_json::value::RawValue) -> Self { + GltfExtras { + value: value.get().to_string(), + } + } +} + +/// Additional untyped data that can be present on most glTF types at the scene level. +/// +/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-extras). +#[derive(Clone, Debug, Reflect, Default, Component)] +#[reflect(Component, Clone, Default, Debug)] +pub struct GltfSceneExtras { + /// Content of the extra data. + pub value: String, +} + +/// Additional untyped data that can be present on most glTF types at the mesh level. +/// +/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-extras). +#[derive(Clone, Debug, Reflect, Default, Component)] +#[reflect(Component, Clone, Default, Debug)] +pub struct GltfMeshExtras { + /// Content of the extra data. + pub value: String, +} + +/// Additional untyped data that can be present on most glTF types at the material level. +/// +/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-extras). +#[derive(Clone, Debug, Reflect, Default, Component)] +#[reflect(Component, Clone, Default, Debug)] +pub struct GltfMaterialExtras { + /// Content of the extra data. + pub value: String, +} + +/// The material name of a glTF primitive. +/// +/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-material). +#[derive(Clone, Debug, Reflect, Default, Component)] +#[reflect(Component, Clone)] +pub struct GltfMaterialName(pub String); diff --git a/crates/bevy_gltf/src/label.rs b/crates/bevy_gltf/src/label.rs new file mode 100644 index 0000000000000..b74d5ab2d6631 --- /dev/null +++ b/crates/bevy_gltf/src/label.rs @@ -0,0 +1,127 @@ +//! Labels that can be used to load part of a glTF + +use bevy_asset::AssetPath; + +/// Labels that can be used to load part of a glTF +/// +/// You can use [`GltfAssetLabel::from_asset`] to add it to an asset path +/// +/// ``` +/// # use bevy_ecs::prelude::*; +/// # use bevy_asset::prelude::*; +/// # use bevy_scene::prelude::*; +/// # use bevy_gltf::prelude::*; +/// +/// fn load_gltf_scene(asset_server: Res) { +/// let gltf_scene: Handle = asset_server.load(GltfAssetLabel::Scene(0).from_asset("models/FlightHelmet/FlightHelmet.gltf")); +/// } +/// ``` +/// +/// Or when formatting a string for the path +/// +/// ``` +/// # use bevy_ecs::prelude::*; +/// # use bevy_asset::prelude::*; +/// # use bevy_scene::prelude::*; +/// # use bevy_gltf::prelude::*; +/// +/// fn load_gltf_scene(asset_server: Res) { +/// let gltf_scene: Handle = asset_server.load(format!("models/FlightHelmet/FlightHelmet.gltf#{}", GltfAssetLabel::Scene(0))); +/// } +/// ``` +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum GltfAssetLabel { + /// `Scene{}`: glTF Scene as a Bevy [`Scene`](bevy_scene::Scene) + Scene(usize), + /// `Node{}`: glTF Node as a [`GltfNode`](crate::GltfNode) + Node(usize), + /// `Mesh{}`: glTF Mesh as a [`GltfMesh`](crate::GltfMesh) + Mesh(usize), + /// `Mesh{}/Primitive{}`: glTF Primitive as a Bevy [`Mesh`](bevy_mesh::Mesh) + Primitive { + /// Index of the mesh for this primitive + mesh: usize, + /// Index of this primitive in its parent mesh + primitive: usize, + }, + /// `Mesh{}/Primitive{}/MorphTargets`: Morph target animation data for a glTF Primitive + /// as a Bevy [`Image`](bevy_image::prelude::Image) + MorphTarget { + /// Index of the mesh for this primitive + mesh: usize, + /// Index of this primitive in its parent mesh + primitive: usize, + }, + /// `Texture{}`: glTF Texture as a Bevy [`Image`](bevy_image::prelude::Image) + Texture(usize), + /// `Material{}`: glTF Material as a Bevy [`StandardMaterial`](bevy_pbr::StandardMaterial) + Material { + /// Index of this material + index: usize, + /// Used to set the [`Face`](bevy_render::render_resource::Face) of the material, + /// useful if it is used with negative scale + is_scale_inverted: bool, + }, + /// `DefaultMaterial`: glTF's default Material as a + /// Bevy [`StandardMaterial`](bevy_pbr::StandardMaterial) + DefaultMaterial, + /// `Animation{}`: glTF Animation as Bevy [`AnimationClip`](bevy_animation::AnimationClip) + Animation(usize), + /// `Skin{}`: glTF mesh skin as [`GltfSkin`](crate::GltfSkin) + Skin(usize), + /// `Skin{}/InverseBindMatrices`: glTF mesh skin matrices as Bevy + /// [`SkinnedMeshInverseBindposes`](bevy_mesh::skinning::SkinnedMeshInverseBindposes) + InverseBindMatrices(usize), +} + +impl core::fmt::Display for GltfAssetLabel { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + GltfAssetLabel::Scene(index) => f.write_str(&format!("Scene{index}")), + GltfAssetLabel::Node(index) => f.write_str(&format!("Node{index}")), + GltfAssetLabel::Mesh(index) => f.write_str(&format!("Mesh{index}")), + GltfAssetLabel::Primitive { mesh, primitive } => { + f.write_str(&format!("Mesh{mesh}/Primitive{primitive}")) + } + GltfAssetLabel::MorphTarget { mesh, primitive } => { + f.write_str(&format!("Mesh{mesh}/Primitive{primitive}/MorphTargets")) + } + GltfAssetLabel::Texture(index) => f.write_str(&format!("Texture{index}")), + GltfAssetLabel::Material { + index, + is_scale_inverted, + } => f.write_str(&format!( + "Material{index}{}", + if *is_scale_inverted { + " (inverted)" + } else { + "" + } + )), + GltfAssetLabel::DefaultMaterial => f.write_str("DefaultMaterial"), + GltfAssetLabel::Animation(index) => f.write_str(&format!("Animation{index}")), + GltfAssetLabel::Skin(index) => f.write_str(&format!("Skin{index}")), + GltfAssetLabel::InverseBindMatrices(index) => { + f.write_str(&format!("Skin{index}/InverseBindMatrices")) + } + } + } +} + +impl GltfAssetLabel { + /// Add this label to an asset path + /// + /// ``` + /// # use bevy_ecs::prelude::*; + /// # use bevy_asset::prelude::*; + /// # use bevy_scene::prelude::*; + /// # use bevy_gltf::prelude::*; + /// + /// fn load_gltf_scene(asset_server: Res) { + /// let gltf_scene: Handle = asset_server.load(GltfAssetLabel::Scene(0).from_asset("models/FlightHelmet/FlightHelmet.gltf")); + /// } + /// ``` + pub fn from_asset(&self, path: impl Into>) -> AssetPath<'static> { + path.into().with_label(self.to_string()) + } +} diff --git a/crates/bevy_gltf/src/lib.rs b/crates/bevy_gltf/src/lib.rs index b96d49c12d60c..ebcf49744a9b9 100644 --- a/crates/bevy_gltf/src/lib.rs +++ b/crates/bevy_gltf/src/lib.rs @@ -90,36 +90,31 @@ //! //! You can use [`GltfAssetLabel`] to ensure you are using the correct label. -extern crate alloc; - -#[cfg(feature = "bevy_animation")] -use bevy_animation::AnimationClip; -use bevy_utils::HashMap; - +mod assets; +mod label; mod loader; mod vertex_attributes; -pub use loader::*; + +extern crate alloc; + +use bevy_platform::collections::HashMap; use bevy_app::prelude::*; -use bevy_asset::{Asset, AssetApp, AssetPath, Handle}; -use bevy_ecs::{prelude::Component, reflect::ReflectComponent}; +use bevy_asset::AssetApp; use bevy_image::CompressedImageFormats; -use bevy_pbr::StandardMaterial; -use bevy_reflect::{std_traits::ReflectDefault, Reflect, TypePath}; -use bevy_render::{ - mesh::{skinning::SkinnedMeshInverseBindposes, Mesh, MeshVertexAttribute}, - renderer::RenderDevice, -}; -use bevy_scene::Scene; +use bevy_mesh::MeshVertexAttribute; +use bevy_render::renderer::RenderDevice; /// The glTF prelude. /// /// This includes the most common types in this crate, re-exported for your convenience. pub mod prelude { #[doc(hidden)] - pub use crate::{Gltf, GltfAssetLabel, GltfExtras}; + pub use crate::{assets::Gltf, assets::GltfExtras, label::GltfAssetLabel}; } +pub use {assets::*, label::GltfAssetLabel, loader::*}; + /// Adds support for glTF file loading to the app. #[derive(Default)] pub struct GltfPlugin { @@ -168,417 +163,3 @@ impl Plugin for GltfPlugin { }); } } - -/// Representation of a loaded glTF file. -#[derive(Asset, Debug, TypePath)] -pub struct Gltf { - /// All scenes loaded from the glTF file. - pub scenes: Vec>, - /// Named scenes loaded from the glTF file. - pub named_scenes: HashMap, Handle>, - /// All meshes loaded from the glTF file. - pub meshes: Vec>, - /// Named meshes loaded from the glTF file. - pub named_meshes: HashMap, Handle>, - /// All materials loaded from the glTF file. - pub materials: Vec>, - /// Named materials loaded from the glTF file. - pub named_materials: HashMap, Handle>, - /// All nodes loaded from the glTF file. - pub nodes: Vec>, - /// Named nodes loaded from the glTF file. - pub named_nodes: HashMap, Handle>, - /// All skins loaded from the glTF file. - pub skins: Vec>, - /// Named skins loaded from the glTF file. - pub named_skins: HashMap, Handle>, - /// Default scene to be displayed. - pub default_scene: Option>, - /// All animations loaded from the glTF file. - #[cfg(feature = "bevy_animation")] - pub animations: Vec>, - /// Named animations loaded from the glTF file. - #[cfg(feature = "bevy_animation")] - pub named_animations: HashMap, Handle>, - /// The gltf root of the gltf asset, see . Only has a value when `GltfLoaderSettings::include_source` is true. - pub source: Option, -} - -/// A glTF node with all of its child nodes, its [`GltfMesh`], -/// [`Transform`](bevy_transform::prelude::Transform), its optional [`GltfSkin`] -/// and an optional [`GltfExtras`]. -/// -/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-node). -#[derive(Asset, Debug, Clone, TypePath)] -pub struct GltfNode { - /// Index of the node inside the scene - pub index: usize, - /// Computed name for a node - either a user defined node name from gLTF or a generated name from index - pub name: String, - /// Direct children of the node. - pub children: Vec>, - /// Mesh of the node. - pub mesh: Option>, - /// Skin of the node. - pub skin: Option>, - /// Local transform. - pub transform: bevy_transform::prelude::Transform, - /// Is this node used as an animation root - #[cfg(feature = "bevy_animation")] - pub is_animation_root: bool, - /// Additional data. - pub extras: Option, -} - -impl GltfNode { - /// Create a node extracting name and index from glTF def - pub fn new( - node: &gltf::Node, - children: Vec>, - mesh: Option>, - transform: bevy_transform::prelude::Transform, - skin: Option>, - extras: Option, - ) -> Self { - Self { - index: node.index(), - name: if let Some(name) = node.name() { - name.to_string() - } else { - format!("GltfNode{}", node.index()) - }, - children, - mesh, - transform, - skin, - #[cfg(feature = "bevy_animation")] - is_animation_root: false, - extras, - } - } - - /// Create a node with animation root mark - #[cfg(feature = "bevy_animation")] - pub fn with_animation_root(self, is_animation_root: bool) -> Self { - Self { - is_animation_root, - ..self - } - } - - /// Subasset label for this node within the gLTF parent asset. - pub fn asset_label(&self) -> GltfAssetLabel { - GltfAssetLabel::Node(self.index) - } -} - -/// A glTF skin with all of its joint nodes, [`SkinnedMeshInversiveBindposes`](bevy_render::mesh::skinning::SkinnedMeshInverseBindposes) -/// and an optional [`GltfExtras`]. -/// -/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-skin). -#[derive(Asset, Debug, Clone, TypePath)] -pub struct GltfSkin { - /// Index of the skin inside the scene - pub index: usize, - /// Computed name for a skin - either a user defined skin name from gLTF or a generated name from index - pub name: String, - /// All the nodes that form this skin. - pub joints: Vec>, - /// Inverse-bind matrices of this skin. - pub inverse_bind_matrices: Handle, - /// Additional data. - pub extras: Option, -} - -impl GltfSkin { - /// Create a skin extracting name and index from glTF def - pub fn new( - skin: &gltf::Skin, - joints: Vec>, - inverse_bind_matrices: Handle, - extras: Option, - ) -> Self { - Self { - index: skin.index(), - name: if let Some(name) = skin.name() { - name.to_string() - } else { - format!("GltfSkin{}", skin.index()) - }, - joints, - inverse_bind_matrices, - extras, - } - } - - /// Subasset label for this skin within the gLTF parent asset. - pub fn asset_label(&self) -> GltfAssetLabel { - GltfAssetLabel::Skin(self.index) - } -} - -/// A glTF mesh, which may consist of multiple [`GltfPrimitives`](GltfPrimitive) -/// and an optional [`GltfExtras`]. -/// -/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-mesh). -#[derive(Asset, Debug, Clone, TypePath)] -pub struct GltfMesh { - /// Index of the mesh inside the scene - pub index: usize, - /// Computed name for a mesh - either a user defined mesh name from gLTF or a generated name from index - pub name: String, - /// Primitives of the glTF mesh. - pub primitives: Vec, - /// Additional data. - pub extras: Option, -} - -impl GltfMesh { - /// Create a mesh extracting name and index from glTF def - pub fn new( - mesh: &gltf::Mesh, - primitives: Vec, - extras: Option, - ) -> Self { - Self { - index: mesh.index(), - name: if let Some(name) = mesh.name() { - name.to_string() - } else { - format!("GltfMesh{}", mesh.index()) - }, - primitives, - extras, - } - } - - /// Subasset label for this mesh within the gLTF parent asset. - pub fn asset_label(&self) -> GltfAssetLabel { - GltfAssetLabel::Mesh(self.index) - } -} - -/// Part of a [`GltfMesh`] that consists of a [`Mesh`], an optional [`StandardMaterial`] and [`GltfExtras`]. -/// -/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-mesh-primitive). -#[derive(Asset, Debug, Clone, TypePath)] -pub struct GltfPrimitive { - /// Index of the primitive inside the mesh - pub index: usize, - /// Index of the parent [`GltfMesh`] of this primitive - pub parent_mesh_index: usize, - /// Computed name for a primitive - either a user defined primitive name from gLTF or a generated name from index - pub name: String, - /// Topology to be rendered. - pub mesh: Handle, - /// Material to apply to the `mesh`. - pub material: Option>, - /// Additional data. - pub extras: Option, - /// Additional data of the `material`. - pub material_extras: Option, -} - -impl GltfPrimitive { - /// Create a primitive extracting name and index from glTF def - pub fn new( - gltf_mesh: &gltf::Mesh, - gltf_primitive: &gltf::Primitive, - mesh: Handle, - material: Option>, - extras: Option, - material_extras: Option, - ) -> Self { - GltfPrimitive { - index: gltf_primitive.index(), - parent_mesh_index: gltf_mesh.index(), - name: { - let mesh_name = gltf_mesh.name().unwrap_or("Mesh"); - if gltf_mesh.primitives().len() > 1 { - format!("{}.{}", mesh_name, gltf_primitive.index()) - } else { - mesh_name.to_string() - } - }, - mesh, - material, - extras, - material_extras, - } - } - - /// Subasset label for this primitive within its parent [`GltfMesh`] within the gLTF parent asset. - pub fn asset_label(&self) -> GltfAssetLabel { - GltfAssetLabel::Primitive { - mesh: self.parent_mesh_index, - primitive: self.index, - } - } -} - -/// Additional untyped data that can be present on most glTF types at the primitive level. -/// -/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-extras). -#[derive(Clone, Debug, Reflect, Default, Component)] -#[reflect(Component, Default, Debug)] -pub struct GltfExtras { - /// Content of the extra data. - pub value: String, -} - -/// Additional untyped data that can be present on most glTF types at the scene level. -/// -/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-extras). -#[derive(Clone, Debug, Reflect, Default, Component)] -#[reflect(Component, Default, Debug)] -pub struct GltfSceneExtras { - /// Content of the extra data. - pub value: String, -} - -/// Additional untyped data that can be present on most glTF types at the mesh level. -/// -/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-extras). -#[derive(Clone, Debug, Reflect, Default, Component)] -#[reflect(Component, Default, Debug)] -pub struct GltfMeshExtras { - /// Content of the extra data. - pub value: String, -} - -/// Additional untyped data that can be present on most glTF types at the material level. -/// -/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-extras). -#[derive(Clone, Debug, Reflect, Default, Component)] -#[reflect(Component, Default, Debug)] -pub struct GltfMaterialExtras { - /// Content of the extra data. - pub value: String, -} - -/// The material name of a glTF primitive. -/// -/// See [the relevant glTF specification section](https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html#reference-material). -#[derive(Clone, Debug, Reflect, Default, Component)] -#[reflect(Component)] -pub struct GltfMaterialName(pub String); - -/// Labels that can be used to load part of a glTF -/// -/// You can use [`GltfAssetLabel::from_asset`] to add it to an asset path -/// -/// ``` -/// # use bevy_ecs::prelude::*; -/// # use bevy_asset::prelude::*; -/// # use bevy_scene::prelude::*; -/// # use bevy_gltf::prelude::*; -/// -/// fn load_gltf_scene(asset_server: Res) { -/// let gltf_scene: Handle = asset_server.load(GltfAssetLabel::Scene(0).from_asset("models/FlightHelmet/FlightHelmet.gltf")); -/// } -/// ``` -/// -/// Or when formatting a string for the path -/// -/// ``` -/// # use bevy_ecs::prelude::*; -/// # use bevy_asset::prelude::*; -/// # use bevy_scene::prelude::*; -/// # use bevy_gltf::prelude::*; -/// -/// fn load_gltf_scene(asset_server: Res) { -/// let gltf_scene: Handle = asset_server.load(format!("models/FlightHelmet/FlightHelmet.gltf#{}", GltfAssetLabel::Scene(0))); -/// } -/// ``` -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum GltfAssetLabel { - /// `Scene{}`: glTF Scene as a Bevy `Scene` - Scene(usize), - /// `Node{}`: glTF Node as a `GltfNode` - Node(usize), - /// `Mesh{}`: glTF Mesh as a `GltfMesh` - Mesh(usize), - /// `Mesh{}/Primitive{}`: glTF Primitive as a Bevy `Mesh` - Primitive { - /// Index of the mesh for this primitive - mesh: usize, - /// Index of this primitive in its parent mesh - primitive: usize, - }, - /// `Mesh{}/Primitive{}/MorphTargets`: Morph target animation data for a glTF Primitive - MorphTarget { - /// Index of the mesh for this primitive - mesh: usize, - /// Index of this primitive in its parent mesh - primitive: usize, - }, - /// `Texture{}`: glTF Texture as a Bevy `Image` - Texture(usize), - /// `Material{}`: glTF Material as a Bevy `StandardMaterial` - Material { - /// Index of this material - index: usize, - /// Used to set the [`Face`](bevy_render::render_resource::Face) of the material, useful if it is used with negative scale - is_scale_inverted: bool, - }, - /// `DefaultMaterial`: as above, if the glTF file contains a default material with no index - DefaultMaterial, - /// `Animation{}`: glTF Animation as Bevy `AnimationClip` - Animation(usize), - /// `Skin{}`: glTF mesh skin as `GltfSkin` - Skin(usize), - /// `Skin{}/InverseBindMatrices`: glTF mesh skin matrices as Bevy `SkinnedMeshInverseBindposes` - InverseBindMatrices(usize), -} - -impl core::fmt::Display for GltfAssetLabel { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - GltfAssetLabel::Scene(index) => f.write_str(&format!("Scene{index}")), - GltfAssetLabel::Node(index) => f.write_str(&format!("Node{index}")), - GltfAssetLabel::Mesh(index) => f.write_str(&format!("Mesh{index}")), - GltfAssetLabel::Primitive { mesh, primitive } => { - f.write_str(&format!("Mesh{mesh}/Primitive{primitive}")) - } - GltfAssetLabel::MorphTarget { mesh, primitive } => { - f.write_str(&format!("Mesh{mesh}/Primitive{primitive}/MorphTargets")) - } - GltfAssetLabel::Texture(index) => f.write_str(&format!("Texture{index}")), - GltfAssetLabel::Material { - index, - is_scale_inverted, - } => f.write_str(&format!( - "Material{index}{}", - if *is_scale_inverted { - " (inverted)" - } else { - "" - } - )), - GltfAssetLabel::DefaultMaterial => f.write_str("DefaultMaterial"), - GltfAssetLabel::Animation(index) => f.write_str(&format!("Animation{index}")), - GltfAssetLabel::Skin(index) => f.write_str(&format!("Skin{index}")), - GltfAssetLabel::InverseBindMatrices(index) => { - f.write_str(&format!("Skin{index}/InverseBindMatrices")) - } - } - } -} - -impl GltfAssetLabel { - /// Add this label to an asset path - /// - /// ``` - /// # use bevy_ecs::prelude::*; - /// # use bevy_asset::prelude::*; - /// # use bevy_scene::prelude::*; - /// # use bevy_gltf::prelude::*; - /// - /// fn load_gltf_scene(asset_server: Res) { - /// let gltf_scene: Handle = asset_server.load(GltfAssetLabel::Scene(0).from_asset("models/FlightHelmet/FlightHelmet.gltf")); - /// } - /// ``` - pub fn from_asset(&self, path: impl Into>) -> AssetPath<'static> { - path.into().with_label(self.to_string()) - } -} diff --git a/crates/bevy_gltf/src/loader/extensions/khr_materials_anisotropy.rs b/crates/bevy_gltf/src/loader/extensions/khr_materials_anisotropy.rs new file mode 100644 index 0000000000000..f859cfba84052 --- /dev/null +++ b/crates/bevy_gltf/src/loader/extensions/khr_materials_anisotropy.rs @@ -0,0 +1,71 @@ +use bevy_asset::LoadContext; + +use gltf::{Document, Material}; + +use serde_json::Value; + +#[cfg(feature = "pbr_anisotropy_texture")] +use { + crate::loader::gltf_ext::{material::uv_channel, texture::texture_handle_from_info}, + bevy_asset::Handle, + bevy_image::Image, + bevy_pbr::UvChannel, + gltf::json::texture::Info, + serde_json::value, +}; + +/// Parsed data from the `KHR_materials_anisotropy` extension. +/// +/// See the specification: +/// +#[derive(Default)] +pub(crate) struct AnisotropyExtension { + pub(crate) anisotropy_strength: Option, + pub(crate) anisotropy_rotation: Option, + #[cfg(feature = "pbr_anisotropy_texture")] + pub(crate) anisotropy_channel: UvChannel, + #[cfg(feature = "pbr_anisotropy_texture")] + pub(crate) anisotropy_texture: Option>, +} + +impl AnisotropyExtension { + #[expect( + clippy::allow_attributes, + reason = "`unused_variables` is not always linted" + )] + #[allow( + unused_variables, + reason = "Depending on what features are used to compile this crate, certain parameters may end up unused." + )] + pub(crate) fn parse( + load_context: &mut LoadContext, + document: &Document, + material: &Material, + ) -> Option { + let extension = material + .extensions()? + .get("KHR_materials_anisotropy")? + .as_object()?; + + #[cfg(feature = "pbr_anisotropy_texture")] + let (anisotropy_channel, anisotropy_texture) = extension + .get("anisotropyTexture") + .and_then(|value| value::from_value::(value.clone()).ok()) + .map(|json_info| { + ( + uv_channel(material, "anisotropy", json_info.tex_coord), + texture_handle_from_info(&json_info, document, load_context), + ) + }) + .unzip(); + + Some(AnisotropyExtension { + anisotropy_strength: extension.get("anisotropyStrength").and_then(Value::as_f64), + anisotropy_rotation: extension.get("anisotropyRotation").and_then(Value::as_f64), + #[cfg(feature = "pbr_anisotropy_texture")] + anisotropy_channel: anisotropy_channel.unwrap_or_default(), + #[cfg(feature = "pbr_anisotropy_texture")] + anisotropy_texture, + }) + } +} diff --git a/crates/bevy_gltf/src/loader/extensions/khr_materials_clearcoat.rs b/crates/bevy_gltf/src/loader/extensions/khr_materials_clearcoat.rs new file mode 100644 index 0000000000000..5128487ca4445 --- /dev/null +++ b/crates/bevy_gltf/src/loader/extensions/khr_materials_clearcoat.rs @@ -0,0 +1,104 @@ +use bevy_asset::LoadContext; + +use gltf::{Document, Material}; + +use serde_json::Value; + +#[cfg(feature = "pbr_multi_layer_material_textures")] +use { + crate::loader::gltf_ext::material::parse_material_extension_texture, bevy_asset::Handle, + bevy_image::Image, bevy_pbr::UvChannel, +}; + +/// Parsed data from the `KHR_materials_clearcoat` extension. +/// +/// See the specification: +/// +#[derive(Default)] +pub(crate) struct ClearcoatExtension { + pub(crate) clearcoat_factor: Option, + #[cfg(feature = "pbr_multi_layer_material_textures")] + pub(crate) clearcoat_channel: UvChannel, + #[cfg(feature = "pbr_multi_layer_material_textures")] + pub(crate) clearcoat_texture: Option>, + pub(crate) clearcoat_roughness_factor: Option, + #[cfg(feature = "pbr_multi_layer_material_textures")] + pub(crate) clearcoat_roughness_channel: UvChannel, + #[cfg(feature = "pbr_multi_layer_material_textures")] + pub(crate) clearcoat_roughness_texture: Option>, + #[cfg(feature = "pbr_multi_layer_material_textures")] + pub(crate) clearcoat_normal_channel: UvChannel, + #[cfg(feature = "pbr_multi_layer_material_textures")] + pub(crate) clearcoat_normal_texture: Option>, +} + +impl ClearcoatExtension { + #[expect( + clippy::allow_attributes, + reason = "`unused_variables` is not always linted" + )] + #[allow( + unused_variables, + reason = "Depending on what features are used to compile this crate, certain parameters may end up unused." + )] + pub(crate) fn parse( + load_context: &mut LoadContext, + document: &Document, + material: &Material, + ) -> Option { + let extension = material + .extensions()? + .get("KHR_materials_clearcoat")? + .as_object()?; + + #[cfg(feature = "pbr_multi_layer_material_textures")] + let (clearcoat_channel, clearcoat_texture) = parse_material_extension_texture( + material, + load_context, + document, + extension, + "clearcoatTexture", + "clearcoat", + ); + + #[cfg(feature = "pbr_multi_layer_material_textures")] + let (clearcoat_roughness_channel, clearcoat_roughness_texture) = + parse_material_extension_texture( + material, + load_context, + document, + extension, + "clearcoatRoughnessTexture", + "clearcoat roughness", + ); + + #[cfg(feature = "pbr_multi_layer_material_textures")] + let (clearcoat_normal_channel, clearcoat_normal_texture) = parse_material_extension_texture( + material, + load_context, + document, + extension, + "clearcoatNormalTexture", + "clearcoat normal", + ); + + Some(ClearcoatExtension { + clearcoat_factor: extension.get("clearcoatFactor").and_then(Value::as_f64), + clearcoat_roughness_factor: extension + .get("clearcoatRoughnessFactor") + .and_then(Value::as_f64), + #[cfg(feature = "pbr_multi_layer_material_textures")] + clearcoat_channel, + #[cfg(feature = "pbr_multi_layer_material_textures")] + clearcoat_texture, + #[cfg(feature = "pbr_multi_layer_material_textures")] + clearcoat_roughness_channel, + #[cfg(feature = "pbr_multi_layer_material_textures")] + clearcoat_roughness_texture, + #[cfg(feature = "pbr_multi_layer_material_textures")] + clearcoat_normal_channel, + #[cfg(feature = "pbr_multi_layer_material_textures")] + clearcoat_normal_texture, + }) + } +} diff --git a/crates/bevy_gltf/src/loader/extensions/khr_materials_specular.rs b/crates/bevy_gltf/src/loader/extensions/khr_materials_specular.rs new file mode 100644 index 0000000000000..f0adcc4940b10 --- /dev/null +++ b/crates/bevy_gltf/src/loader/extensions/khr_materials_specular.rs @@ -0,0 +1,100 @@ +use bevy_asset::LoadContext; + +use gltf::{Document, Material}; + +use serde_json::Value; + +#[cfg(feature = "pbr_specular_textures")] +use { + crate::loader::gltf_ext::material::parse_material_extension_texture, bevy_asset::Handle, + bevy_image::Image, bevy_pbr::UvChannel, +}; + +/// Parsed data from the `KHR_materials_specular` extension. +/// +/// We currently don't parse `specularFactor` and `specularTexture`, since +/// they're incompatible with Filament. +/// +/// Note that the map is a *specular map*, not a *reflectance map*. In Bevy and +/// Filament terms, the reflectance values in the specular map range from [0.0, +/// 0.5], rather than [0.0, 1.0]. This is an unfortunate +/// `KHR_materials_specular` specification requirement that stems from the fact +/// that glTF is specified in terms of a specular strength model, not the +/// reflectance model that Filament and Bevy use. A workaround, which is noted +/// in the [`StandardMaterial`](bevy_pbr::StandardMaterial) documentation, is to set the reflectance value +/// to 2.0, which spreads the specular map range from [0.0, 1.0] as normal. +/// +/// See the specification: +/// +#[derive(Default)] +pub(crate) struct SpecularExtension { + pub(crate) specular_factor: Option, + #[cfg(feature = "pbr_specular_textures")] + pub(crate) specular_channel: UvChannel, + #[cfg(feature = "pbr_specular_textures")] + pub(crate) specular_texture: Option>, + pub(crate) specular_color_factor: Option<[f64; 3]>, + #[cfg(feature = "pbr_specular_textures")] + pub(crate) specular_color_channel: UvChannel, + #[cfg(feature = "pbr_specular_textures")] + pub(crate) specular_color_texture: Option>, +} + +impl SpecularExtension { + pub(crate) fn parse( + _load_context: &mut LoadContext, + _document: &Document, + material: &Material, + ) -> Option { + let extension = material + .extensions()? + .get("KHR_materials_specular")? + .as_object()?; + + #[cfg(feature = "pbr_specular_textures")] + let (_specular_channel, _specular_texture) = parse_material_extension_texture( + material, + _load_context, + _document, + extension, + "specularTexture", + "specular", + ); + + #[cfg(feature = "pbr_specular_textures")] + let (_specular_color_channel, _specular_color_texture) = parse_material_extension_texture( + material, + _load_context, + _document, + extension, + "specularColorTexture", + "specular color", + ); + + Some(SpecularExtension { + specular_factor: extension.get("specularFactor").and_then(Value::as_f64), + #[cfg(feature = "pbr_specular_textures")] + specular_channel: _specular_channel, + #[cfg(feature = "pbr_specular_textures")] + specular_texture: _specular_texture, + specular_color_factor: extension + .get("specularColorFactor") + .and_then(Value::as_array) + .and_then(|json_array| { + if json_array.len() < 3 { + None + } else { + Some([ + json_array[0].as_f64()?, + json_array[1].as_f64()?, + json_array[2].as_f64()?, + ]) + } + }), + #[cfg(feature = "pbr_specular_textures")] + specular_color_channel: _specular_color_channel, + #[cfg(feature = "pbr_specular_textures")] + specular_color_texture: _specular_color_texture, + }) + } +} diff --git a/crates/bevy_gltf/src/loader/extensions/mod.rs b/crates/bevy_gltf/src/loader/extensions/mod.rs new file mode 100644 index 0000000000000..14863fa4538c5 --- /dev/null +++ b/crates/bevy_gltf/src/loader/extensions/mod.rs @@ -0,0 +1,10 @@ +//! glTF extensions defined by the Khronos Group and other vendors + +mod khr_materials_anisotropy; +mod khr_materials_clearcoat; +mod khr_materials_specular; + +pub(crate) use self::{ + khr_materials_anisotropy::AnisotropyExtension, khr_materials_clearcoat::ClearcoatExtension, + khr_materials_specular::SpecularExtension, +}; diff --git a/crates/bevy_gltf/src/loader/gltf_ext/material.rs b/crates/bevy_gltf/src/loader/gltf_ext/material.rs new file mode 100644 index 0000000000000..9d8b7c5745910 --- /dev/null +++ b/crates/bevy_gltf/src/loader/gltf_ext/material.rs @@ -0,0 +1,165 @@ +use bevy_math::Affine2; +use bevy_pbr::UvChannel; +use bevy_render::alpha::AlphaMode; + +use gltf::{json::texture::Info, Material}; + +use serde_json::value; + +use crate::GltfAssetLabel; + +use super::texture::texture_transform_to_affine2; + +#[cfg(any( + feature = "pbr_specular_textures", + feature = "pbr_multi_layer_material_textures" +))] +use { + super::texture::texture_handle_from_info, + bevy_asset::{Handle, LoadContext}, + bevy_image::Image, + gltf::Document, + serde_json::{Map, Value}, +}; + +/// Parses a texture that's part of a material extension block and returns its +/// UV channel and image reference. +#[cfg(any( + feature = "pbr_specular_textures", + feature = "pbr_multi_layer_material_textures" +))] +pub(crate) fn parse_material_extension_texture( + material: &Material, + load_context: &mut LoadContext, + document: &Document, + extension: &Map, + texture_name: &str, + texture_kind: &str, +) -> (UvChannel, Option>) { + match extension + .get(texture_name) + .and_then(|value| value::from_value::(value.clone()).ok()) + { + Some(json_info) => ( + uv_channel(material, texture_kind, json_info.tex_coord), + Some(texture_handle_from_info(&json_info, document, load_context)), + ), + None => (UvChannel::default(), None), + } +} + +pub(crate) fn uv_channel(material: &Material, texture_kind: &str, tex_coord: u32) -> UvChannel { + match tex_coord { + 0 => UvChannel::Uv0, + 1 => UvChannel::Uv1, + _ => { + let material_name = material + .name() + .map(|n| format!("the material \"{n}\"")) + .unwrap_or_else(|| "an unnamed material".to_string()); + let material_index = material + .index() + .map(|i| format!("index {i}")) + .unwrap_or_else(|| "default".to_string()); + tracing::warn!( + "Only 2 UV Channels are supported, but {material_name} ({material_index}) \ + has the TEXCOORD attribute {} on texture kind {texture_kind}, which will fallback to 0.", + tex_coord, + ); + UvChannel::Uv0 + } + } +} + +pub(crate) fn alpha_mode(material: &Material) -> AlphaMode { + match material.alpha_mode() { + gltf::material::AlphaMode::Opaque => AlphaMode::Opaque, + gltf::material::AlphaMode::Mask => AlphaMode::Mask(material.alpha_cutoff().unwrap_or(0.5)), + gltf::material::AlphaMode::Blend => AlphaMode::Blend, + } +} + +/// Returns the index (within the `textures` array) of the texture with the +/// given field name in the data for the material extension with the given name, +/// if there is one. +pub(crate) fn extension_texture_index( + material: &Material, + extension_name: &str, + texture_field_name: &str, +) -> Option { + Some( + value::from_value::( + material + .extensions()? + .get(extension_name)? + .as_object()? + .get(texture_field_name)? + .clone(), + ) + .ok()? + .index + .value(), + ) +} + +/// Returns true if the material needs mesh tangents in order to be successfully +/// rendered. +/// +/// We generate them if this function returns true. +pub(crate) fn needs_tangents(material: &Material) -> bool { + [ + material.normal_texture().is_some(), + #[cfg(feature = "pbr_multi_layer_material_textures")] + extension_texture_index( + material, + "KHR_materials_clearcoat", + "clearcoatNormalTexture", + ) + .is_some(), + ] + .into_iter() + .reduce(|a, b| a || b) + .unwrap_or(false) +} + +pub(crate) fn warn_on_differing_texture_transforms( + material: &Material, + info: &gltf::texture::Info, + texture_transform: Affine2, + texture_kind: &str, +) { + let has_differing_texture_transform = info + .texture_transform() + .map(texture_transform_to_affine2) + .is_some_and(|t| t != texture_transform); + if has_differing_texture_transform { + let material_name = material + .name() + .map(|n| format!("the material \"{n}\"")) + .unwrap_or_else(|| "an unnamed material".to_string()); + let texture_name = info + .texture() + .name() + .map(|n| format!("its {texture_kind} texture \"{n}\"")) + .unwrap_or_else(|| format!("its unnamed {texture_kind} texture")); + let material_index = material + .index() + .map(|i| format!("index {i}")) + .unwrap_or_else(|| "default".to_string()); + tracing::warn!( + "Only texture transforms on base color textures are supported, but {material_name} ({material_index}) \ + has a texture transform on {texture_name} (index {}), which will be ignored.", info.texture().index() + ); + } +} + +pub(crate) fn material_label(material: &Material, is_scale_inverted: bool) -> GltfAssetLabel { + if let Some(index) = material.index() { + GltfAssetLabel::Material { + index, + is_scale_inverted, + } + } else { + GltfAssetLabel::DefaultMaterial + } +} diff --git a/crates/bevy_gltf/src/loader/gltf_ext/mesh.rs b/crates/bevy_gltf/src/loader/gltf_ext/mesh.rs new file mode 100644 index 0000000000000..ef719891a4b33 --- /dev/null +++ b/crates/bevy_gltf/src/loader/gltf_ext/mesh.rs @@ -0,0 +1,33 @@ +use bevy_mesh::PrimitiveTopology; + +use gltf::mesh::{Mesh, Mode, Primitive}; + +use crate::GltfError; + +pub(crate) fn primitive_name(mesh: &Mesh<'_>, primitive: &Primitive) -> String { + let mesh_name = mesh.name().unwrap_or("Mesh"); + if mesh.primitives().len() > 1 { + format!("{}.{}", mesh_name, primitive.index()) + } else { + mesh_name.to_string() + } +} + +/// Maps the `primitive_topology` from glTF to `wgpu`. +#[cfg_attr( + not(target_arch = "wasm32"), + expect( + clippy::result_large_err, + reason = "`GltfError` is only barely past the threshold for large errors." + ) +)] +pub(crate) fn primitive_topology(mode: Mode) -> Result { + match mode { + Mode::Points => Ok(PrimitiveTopology::PointList), + Mode::Lines => Ok(PrimitiveTopology::LineList), + Mode::LineStrip => Ok(PrimitiveTopology::LineStrip), + Mode::Triangles => Ok(PrimitiveTopology::TriangleList), + Mode::TriangleStrip => Ok(PrimitiveTopology::TriangleStrip), + mode => Err(GltfError::UnsupportedPrimitive { mode }), + } +} diff --git a/crates/bevy_gltf/src/loader/gltf_ext/mod.rs b/crates/bevy_gltf/src/loader/gltf_ext/mod.rs new file mode 100644 index 0000000000000..6036948d9c3fb --- /dev/null +++ b/crates/bevy_gltf/src/loader/gltf_ext/mod.rs @@ -0,0 +1,82 @@ +//! Methods to access information from [`gltf`] types + +pub mod material; +pub mod mesh; +pub mod scene; +pub mod texture; + +use bevy_platform::collections::HashSet; + +use fixedbitset::FixedBitSet; +use gltf::{Document, Gltf}; + +use super::GltfError; + +use self::{material::extension_texture_index, scene::check_is_part_of_cycle}; + +#[cfg_attr( + not(target_arch = "wasm32"), + expect( + clippy::result_large_err, + reason = "need to be signature compatible with `load_gltf`" + ) +)] +/// Checks all glTF nodes for cycles, starting at the scene root. +pub(crate) fn check_for_cycles(gltf: &Gltf) -> Result<(), GltfError> { + // Initialize with the scene roots. + let mut roots = FixedBitSet::with_capacity(gltf.nodes().len()); + for root in gltf.scenes().flat_map(|scene| scene.nodes()) { + roots.insert(root.index()); + } + + // Check each one. + let mut visited = FixedBitSet::with_capacity(gltf.nodes().len()); + for root in roots.ones() { + let Some(node) = gltf.nodes().nth(root) else { + unreachable!("Index of a root node should always exist."); + }; + check_is_part_of_cycle(&node, &mut visited)?; + } + + Ok(()) +} + +pub(crate) fn get_linear_textures(document: &Document) -> HashSet { + let mut linear_textures = HashSet::default(); + + for material in document.materials() { + if let Some(texture) = material.normal_texture() { + linear_textures.insert(texture.texture().index()); + } + if let Some(texture) = material.occlusion_texture() { + linear_textures.insert(texture.texture().index()); + } + if let Some(texture) = material + .pbr_metallic_roughness() + .metallic_roughness_texture() + { + linear_textures.insert(texture.texture().index()); + } + if let Some(texture_index) = + extension_texture_index(&material, "KHR_materials_anisotropy", "anisotropyTexture") + { + linear_textures.insert(texture_index); + } + + // None of the clearcoat maps should be loaded as sRGB. + #[cfg(feature = "pbr_multi_layer_material_textures")] + for texture_field_name in [ + "clearcoatTexture", + "clearcoatRoughnessTexture", + "clearcoatNormalTexture", + ] { + if let Some(texture_index) = + extension_texture_index(&material, "KHR_materials_clearcoat", texture_field_name) + { + linear_textures.insert(texture_index); + } + } + } + + linear_textures +} diff --git a/crates/bevy_gltf/src/loader/gltf_ext/scene.rs b/crates/bevy_gltf/src/loader/gltf_ext/scene.rs new file mode 100644 index 0000000000000..83e6778b99e37 --- /dev/null +++ b/crates/bevy_gltf/src/loader/gltf_ext/scene.rs @@ -0,0 +1,94 @@ +use bevy_ecs::name::Name; +use bevy_math::{Mat4, Vec3}; +use bevy_transform::components::Transform; + +use gltf::scene::Node; + +use fixedbitset::FixedBitSet; +use itertools::Itertools; + +#[cfg(feature = "bevy_animation")] +use bevy_platform::collections::{HashMap, HashSet}; + +use crate::GltfError; + +pub(crate) fn node_name(node: &Node) -> Name { + let name = node + .name() + .map(ToString::to_string) + .unwrap_or_else(|| format!("GltfNode{}", node.index())); + Name::new(name) +} + +/// Calculate the transform of gLTF [`Node`]. +/// +/// This should be used instead of calling [`gltf::scene::Transform::matrix()`] +/// on [`Node::transform()`](gltf::Node::transform) directly because it uses optimized glam types and +/// if `libm` feature of `bevy_math` crate is enabled also handles cross +/// platform determinism properly. +pub(crate) fn node_transform(node: &Node) -> Transform { + match node.transform() { + gltf::scene::Transform::Matrix { matrix } => { + Transform::from_matrix(Mat4::from_cols_array_2d(&matrix)) + } + gltf::scene::Transform::Decomposed { + translation, + rotation, + scale, + } => Transform { + translation: Vec3::from(translation), + rotation: bevy_math::Quat::from_array(rotation), + scale: Vec3::from(scale), + }, + } +} + +#[cfg_attr( + not(target_arch = "wasm32"), + expect( + clippy::result_large_err, + reason = "need to be signature compatible with `load_gltf`" + ) +)] +/// Check if [`Node`] is part of cycle +pub(crate) fn check_is_part_of_cycle( + node: &Node, + visited: &mut FixedBitSet, +) -> Result<(), GltfError> { + // Do we have a cycle? + if visited.contains(node.index()) { + return Err(GltfError::CircularChildren(format!( + "glTF nodes form a cycle: {} -> {}", + visited.ones().map(|bit| bit.to_string()).join(" -> "), + node.index() + ))); + } + + // Recurse. + visited.insert(node.index()); + for kid in node.children() { + check_is_part_of_cycle(&kid, visited)?; + } + visited.remove(node.index()); + + Ok(()) +} + +#[cfg(feature = "bevy_animation")] +pub(crate) fn collect_path( + node: &Node, + current_path: &[Name], + paths: &mut HashMap)>, + root_index: usize, + visited: &mut HashSet, +) { + let mut path = current_path.to_owned(); + path.push(node_name(node)); + visited.insert(node.index()); + for child in node.children() { + if !visited.contains(&child.index()) { + collect_path(&child, &path, paths, root_index, visited); + } + } + paths.insert(node.index(), (root_index, path)); +} diff --git a/crates/bevy_gltf/src/loader/gltf_ext/texture.rs b/crates/bevy_gltf/src/loader/gltf_ext/texture.rs new file mode 100644 index 0000000000000..5fb5bcce0d4c0 --- /dev/null +++ b/crates/bevy_gltf/src/loader/gltf_ext/texture.rs @@ -0,0 +1,126 @@ +use bevy_asset::{Handle, LoadContext}; +use bevy_image::{Image, ImageAddressMode, ImageFilterMode, ImageSamplerDescriptor}; +use bevy_math::Affine2; + +use gltf::{ + image::Source, + texture::{MagFilter, MinFilter, Texture, TextureTransform, WrappingMode}, +}; + +#[cfg(any( + feature = "pbr_anisotropy_texture", + feature = "pbr_multi_layer_material_textures", + feature = "pbr_specular_textures" +))] +use gltf::{json::texture::Info, Document}; + +use crate::{loader::DataUri, GltfAssetLabel}; + +pub(crate) fn texture_handle( + texture: &Texture<'_>, + load_context: &mut LoadContext, +) -> Handle { + match texture.source().source() { + Source::View { .. } => load_context.get_label_handle(texture_label(texture).to_string()), + Source::Uri { uri, .. } => { + let uri = percent_encoding::percent_decode_str(uri) + .decode_utf8() + .unwrap(); + let uri = uri.as_ref(); + if let Ok(_data_uri) = DataUri::parse(uri) { + load_context.get_label_handle(texture_label(texture).to_string()) + } else { + let parent = load_context.path().parent().unwrap(); + let image_path = parent.join(uri); + load_context.load(image_path) + } + } + } +} + +/// Extracts the texture sampler data from the glTF [`Texture`]. +pub(crate) fn texture_sampler(texture: &Texture<'_>) -> ImageSamplerDescriptor { + let gltf_sampler = texture.sampler(); + + ImageSamplerDescriptor { + address_mode_u: address_mode(&gltf_sampler.wrap_s()), + address_mode_v: address_mode(&gltf_sampler.wrap_t()), + + mag_filter: gltf_sampler + .mag_filter() + .map(|mf| match mf { + MagFilter::Nearest => ImageFilterMode::Nearest, + MagFilter::Linear => ImageFilterMode::Linear, + }) + .unwrap_or(ImageSamplerDescriptor::default().mag_filter), + + min_filter: gltf_sampler + .min_filter() + .map(|mf| match mf { + MinFilter::Nearest + | MinFilter::NearestMipmapNearest + | MinFilter::NearestMipmapLinear => ImageFilterMode::Nearest, + MinFilter::Linear + | MinFilter::LinearMipmapNearest + | MinFilter::LinearMipmapLinear => ImageFilterMode::Linear, + }) + .unwrap_or(ImageSamplerDescriptor::default().min_filter), + + mipmap_filter: gltf_sampler + .min_filter() + .map(|mf| match mf { + MinFilter::Nearest + | MinFilter::Linear + | MinFilter::NearestMipmapNearest + | MinFilter::LinearMipmapNearest => ImageFilterMode::Nearest, + MinFilter::NearestMipmapLinear | MinFilter::LinearMipmapLinear => { + ImageFilterMode::Linear + } + }) + .unwrap_or(ImageSamplerDescriptor::default().mipmap_filter), + + ..Default::default() + } +} + +pub(crate) fn texture_label(texture: &Texture<'_>) -> GltfAssetLabel { + GltfAssetLabel::Texture(texture.index()) +} + +pub(crate) fn address_mode(wrapping_mode: &WrappingMode) -> ImageAddressMode { + match wrapping_mode { + WrappingMode::ClampToEdge => ImageAddressMode::ClampToEdge, + WrappingMode::Repeat => ImageAddressMode::Repeat, + WrappingMode::MirroredRepeat => ImageAddressMode::MirrorRepeat, + } +} + +pub(crate) fn texture_transform_to_affine2(texture_transform: TextureTransform) -> Affine2 { + Affine2::from_scale_angle_translation( + texture_transform.scale().into(), + -texture_transform.rotation(), + texture_transform.offset().into(), + ) +} + +#[cfg(any( + feature = "pbr_anisotropy_texture", + feature = "pbr_multi_layer_material_textures", + feature = "pbr_specular_textures" +))] +/// Given a [`Info`], returns the handle of the texture that this +/// refers to. +/// +/// This is a low-level function only used when the [`gltf`] crate has no support +/// for an extension, forcing us to parse its texture references manually. +pub(crate) fn texture_handle_from_info( + info: &Info, + document: &Document, + load_context: &mut LoadContext, +) -> Handle { + let texture = document + .textures() + .nth(info.index.value()) + .expect("Texture info references a nonexistent texture"); + texture_handle(&texture, load_context) +} diff --git a/crates/bevy_gltf/src/loader.rs b/crates/bevy_gltf/src/loader/mod.rs similarity index 73% rename from crates/bevy_gltf/src/loader.rs rename to crates/bevy_gltf/src/loader/mod.rs index c1f6a5d2eaa43..a4e25475b74fd 100644 --- a/crates/bevy_gltf/src/loader.rs +++ b/crates/bevy_gltf/src/loader/mod.rs @@ -1,69 +1,86 @@ -use crate::{ - vertex_attributes::convert_attribute, Gltf, GltfAssetLabel, GltfExtras, GltfMaterialExtras, - GltfMaterialName, GltfMeshExtras, GltfNode, GltfSceneExtras, GltfSkin, +mod extensions; +mod gltf_ext; + +use std::{ + io::Error, + path::{Path, PathBuf}, }; -use alloc::collections::VecDeque; +#[cfg(feature = "bevy_animation")] +use bevy_animation::{prelude::*, AnimationTarget, AnimationTargetId}; use bevy_asset::{ io::Reader, AssetLoadError, AssetLoader, Handle, LoadContext, ReadAssetBytesError, + RenderAssetUsages, }; use bevy_color::{Color, LinearRgba}; use bevy_core_pipeline::prelude::Camera3d; use bevy_ecs::{ entity::{Entity, EntityHashMap}, + hierarchy::ChildSpawner, name::Name, world::World, }; -use bevy_hierarchy::{BuildChildren, ChildBuild, WorldChildBuilder}; use bevy_image::{ - CompressedImageFormats, Image, ImageAddressMode, ImageFilterMode, ImageLoaderSettings, - ImageSampler, ImageSamplerDescriptor, ImageType, TextureError, + CompressedImageFormats, Image, ImageLoaderSettings, ImageSampler, ImageSamplerDescriptor, + ImageType, TextureError, +}; +use bevy_math::{Mat4, Vec3}; +use bevy_mesh::{ + morph::{MeshMorphWeights, MorphAttributes, MorphTargetImage, MorphWeights}, + skinning::{SkinnedMesh, SkinnedMeshInverseBindposes}, + Indices, Mesh, MeshVertexAttribute, PrimitiveTopology, VertexAttributeValues, }; -use bevy_math::{Affine2, Mat4, Vec3}; +#[cfg(feature = "pbr_transmission_textures")] +use bevy_pbr::UvChannel; use bevy_pbr::{ - DirectionalLight, MeshMaterial3d, PointLight, SpotLight, StandardMaterial, UvChannel, - MAX_JOINTS, + DirectionalLight, MeshMaterial3d, PointLight, SpotLight, StandardMaterial, MAX_JOINTS, }; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_render::{ - alpha::AlphaMode, camera::{Camera, OrthographicProjection, PerspectiveProjection, Projection, ScalingMode}, - mesh::{ - morph::{MeshMorphWeights, MorphAttributes, MorphTargetImage, MorphWeights}, - skinning::{SkinnedMesh, SkinnedMeshInverseBindposes}, - Indices, Mesh, Mesh3d, MeshVertexAttribute, VertexAttributeValues, - }, + mesh::Mesh3d, primitives::Aabb, - render_asset::RenderAssetUsages, - render_resource::{Face, PrimitiveTopology}, + render_resource::Face, view::Visibility, }; use bevy_scene::Scene; #[cfg(not(target_arch = "wasm32"))] use bevy_tasks::IoTaskPool; use bevy_transform::components::Transform; -use bevy_utils::{ - tracing::{error, info_span, warn}, - HashMap, HashSet, -}; + use gltf::{ accessor::Iter, image::Source, - json, mesh::{util::ReadIndices, Mode}, - texture::{Info, MagFilter, MinFilter, TextureTransform, WrappingMode}, - Document, Material, Node, Primitive, Semantic, + Document, Material, Node, Semantic, }; + use serde::{Deserialize, Serialize}; -use serde_json::{value, Value}; -use std::{ - io::Error, - path::{Path, PathBuf}, -}; +#[cfg(feature = "bevy_animation")] +use smallvec::SmallVec; + use thiserror::Error; +use tracing::{error, info_span, warn}; + +use crate::{ + vertex_attributes::convert_attribute, Gltf, GltfAssetLabel, GltfExtras, GltfMaterialExtras, + GltfMaterialName, GltfMeshExtras, GltfNode, GltfSceneExtras, GltfSkin, +}; + #[cfg(feature = "bevy_animation")] -use { - bevy_animation::{prelude::*, AnimationTarget, AnimationTargetId}, - smallvec::SmallVec, +use self::gltf_ext::scene::collect_path; +use self::{ + extensions::{AnisotropyExtension, ClearcoatExtension, SpecularExtension}, + gltf_ext::{ + check_for_cycles, get_linear_textures, + material::{ + alpha_mode, material_label, needs_tangents, uv_channel, + warn_on_differing_texture_transforms, + }, + mesh::{primitive_name, primitive_topology}, + scene::{node_name, node_transform}, + texture::{texture_handle, texture_sampler, texture_transform_to_affine2}, + }, }; /// An error that occurs when loading a glTF file. @@ -106,10 +123,10 @@ pub enum GltfError { MissingAnimationSampler(usize), /// Failed to generate tangents. #[error("failed to generate tangents: {0}")] - GenerateTangentsError(#[from] bevy_render::mesh::GenerateTangentsError), + GenerateTangentsError(#[from] bevy_mesh::GenerateTangentsError), /// Failed to generate morph targets. #[error("failed to generate morph targets: {0}")] - MorphTarget(#[from] bevy_render::mesh::morph::MorphBuildError), + MorphTarget(#[from] bevy_mesh::morph::MorphBuildError), /// Circular children in Nodes #[error("GLTF model must be a tree, found cycle instead at node indices: {0:?}")] #[from(ignore)] @@ -190,6 +207,7 @@ impl AssetLoader for GltfLoader { ) -> Result { let mut bytes = Vec::new(); reader.read_to_end(&mut bytes).await?; + load_gltf(self, &bytes, load_context, settings).await } @@ -206,6 +224,7 @@ async fn load_gltf<'a, 'b, 'c>( settings: &'b GltfLoaderSettings, ) -> Result { let gltf = gltf::Gltf::from_slice(bytes)?; + let file_name = load_context .asset_path() .path() @@ -217,45 +236,7 @@ async fn load_gltf<'a, 'b, 'c>( .to_string(); let buffer_data = load_buffers(&gltf, load_context).await?; - let mut linear_textures = >::default(); - - for material in gltf.materials() { - if let Some(texture) = material.normal_texture() { - linear_textures.insert(texture.texture().index()); - } - if let Some(texture) = material.occlusion_texture() { - linear_textures.insert(texture.texture().index()); - } - if let Some(texture) = material - .pbr_metallic_roughness() - .metallic_roughness_texture() - { - linear_textures.insert(texture.texture().index()); - } - if let Some(texture_index) = material_extension_texture_index( - &material, - "KHR_materials_anisotropy", - "anisotropyTexture", - ) { - linear_textures.insert(texture_index); - } - - // None of the clearcoat maps should be loaded as sRGB. - #[cfg(feature = "pbr_multi_layer_material_textures")] - for texture_field_name in [ - "clearcoatTexture", - "clearcoatRoughnessTexture", - "clearcoatNormalTexture", - ] { - if let Some(texture_index) = material_extension_texture_index( - &material, - "KHR_materials_clearcoat", - texture_field_name, - ) { - linear_textures.insert(texture_index); - } - } - } + let linear_textures = get_linear_textures(&gltf.document); #[cfg(feature = "bevy_animation")] let paths = { @@ -263,7 +244,7 @@ async fn load_gltf<'a, 'b, 'c>( for scene in gltf.scenes() { for node in scene.nodes() { let root_index = node.index(); - paths_recur(node, &[], &mut paths, root_index, &mut HashSet::default()); + collect_path(&node, &[], &mut paths, root_index, &mut HashSet::default()); } } paths @@ -456,7 +437,10 @@ async fn load_gltf<'a, 'b, 'c>( ReadOutputs::MorphTargetWeights(weights) => { let weights: Vec = weights.into_f32().collect(); if keyframe_timestamps.len() == 1 { - #[allow(clippy::unnecessary_map_on_constructor)] + #[expect( + clippy::unnecessary_map_on_constructor, + reason = "While the mapping is unnecessary, it is much more readable at this level of indentation. Additionally, mapping makes it more consistent with the other branches." + )] Some(ConstantCurve::new(Interval::EVERYWHERE, weights)) .map(WeightsCurve) .map(VariableCurve::new) @@ -522,35 +506,6 @@ async fn load_gltf<'a, 'b, 'c>( (animations, named_animations, animation_roots) }; - // TODO: use the threaded impl on wasm once wasm thread pool doesn't deadlock on it - // See https://github.com/bevyengine/bevy/issues/1924 for more details - // The taskpool use is also avoided when there is only one texture for performance reasons and - // to avoid https://github.com/bevyengine/bevy/pull/2725 - // PERF: could this be a Vec instead? Are gltf texture indices dense? - fn process_loaded_texture( - load_context: &mut LoadContext, - handles: &mut Vec>, - texture: ImageOrPath, - ) { - let handle = match texture { - ImageOrPath::Image { label, image } => { - load_context.add_labeled_asset(label.to_string(), image) - } - ImageOrPath::Path { - path, - is_srgb, - sampler_descriptor, - } => load_context - .loader() - .with_settings(move |settings: &mut ImageLoaderSettings| { - settings.is_srgb = is_srgb; - settings.sampler = ImageSampler::Descriptor(sampler_descriptor.clone()); - }) - .load(path), - }; - handles.push(handle); - } - // We collect handles to ensure loaded images from paths are not unloaded before they are used elsewhere // in the loader. This prevents "reloads", but it also prevents dropping the is_srgb context on reload. // @@ -570,7 +525,7 @@ async fn load_gltf<'a, 'b, 'c>( settings.load_materials, ) .await?; - process_loaded_texture(load_context, &mut _texture_handles, image); + image.process_loaded_texture(load_context, &mut _texture_handles); } } else { #[cfg(not(target_arch = "wasm32"))] @@ -596,7 +551,7 @@ async fn load_gltf<'a, 'b, 'c>( .into_iter() .for_each(|result| match result { Ok(image) => { - process_loaded_texture(load_context, &mut _texture_handles, image); + image.process_loaded_texture(load_context, &mut _texture_handles); } Err(err) => { warn!("Error loading glTF texture: {}", err); @@ -637,7 +592,7 @@ async fn load_gltf<'a, 'b, 'c>( mesh: gltf_mesh.index(), primitive: primitive.index(), }; - let primitive_topology = get_primitive_topology(primitive.mode())?; + let primitive_topology = primitive_topology(primitive.mode())?; let mut mesh = Mesh::new(primitive_topology, settings.load_meshes); @@ -646,13 +601,13 @@ async fn load_gltf<'a, 'b, 'c>( if [Semantic::Joints(0), Semantic::Weights(0)].contains(&semantic) { if !meshes_on_skinned_nodes.contains(&gltf_mesh.index()) { warn!( - "Ignoring attribute {:?} for skinned mesh {:?} used on non skinned nodes (NODE_SKINNED_MESH_WITHOUT_SKIN)", + "Ignoring attribute {:?} for skinned mesh {} used on non skinned nodes (NODE_SKINNED_MESH_WITHOUT_SKIN)", semantic, primitive_label ); continue; } else if meshes_on_non_skinned_nodes.contains(&gltf_mesh.index()) { - error!("Skinned mesh {:?} used on both skinned and non skin nodes, this is likely to cause an error (NODE_SKINNED_MESH_WITHOUT_SKIN)", primitive_label); + error!("Skinned mesh {} used on both skinned and non skin nodes, this is likely to cause an error (NODE_SKINNED_MESH_WITHOUT_SKIN)", primitive_label); } } match convert_attribute( @@ -704,17 +659,15 @@ async fn load_gltf<'a, 'b, 'c>( if mesh.attribute(Mesh::ATTRIBUTE_NORMAL).is_none() && matches!(mesh.primitive_topology(), PrimitiveTopology::TriangleList) { - bevy_utils::tracing::debug!( - "Automatically calculating missing vertex normals for geometry." - ); + tracing::debug!("Automatically calculating missing vertex normals for geometry."); let vertex_count_before = mesh.count_vertices(); mesh.duplicate_vertices(); mesh.compute_flat_normals(); let vertex_count_after = mesh.count_vertices(); if vertex_count_before != vertex_count_after { - bevy_utils::tracing::debug!("Missing vertex normals in indexed geometry, computing them as flat. Vertex count increased from {} to {}", vertex_count_before, vertex_count_after); + tracing::debug!("Missing vertex normals in indexed geometry, computing them as flat. Vertex count increased from {} to {}", vertex_count_before, vertex_count_after); } else { - bevy_utils::tracing::debug!( + tracing::debug!( "Missing vertex normals in indexed geometry, computing them as flat." ); } @@ -726,9 +679,9 @@ async fn load_gltf<'a, 'b, 'c>( { mesh.insert_attribute(Mesh::ATTRIBUTE_TANGENT, vertex_attribute); } else if mesh.attribute(Mesh::ATTRIBUTE_NORMAL).is_some() - && material_needs_tangents(&primitive.material()) + && needs_tangents(&primitive.material()) { - bevy_utils::tracing::debug!( + tracing::debug!( "Missing vertex tangents for {}, computing them using the mikktspace algorithm. Consider using a tool such as Blender to pre-compute the tangents.", file_name ); @@ -737,9 +690,9 @@ async fn load_gltf<'a, 'b, 'c>( generate_tangents_span.in_scope(|| { if let Err(err) = mesh.generate_tangents() { warn!( - "Failed to generate vertex tangents using the mikktspace algorithm: {:?}", - err - ); + "Failed to generate vertex tangents using the mikktspace algorithm: {}", + err + ); } }); } @@ -753,13 +706,20 @@ async fn load_gltf<'a, 'b, 'c>( .material() .index() .and_then(|i| materials.get(i).cloned()), - get_gltf_extras(primitive.extras()), - get_gltf_extras(primitive.material().extras()), + primitive.extras().as_deref().map(GltfExtras::from), + primitive + .material() + .extras() + .as_deref() + .map(GltfExtras::from), )); } - let mesh = - super::GltfMesh::new(&gltf_mesh, primitives, get_gltf_extras(gltf_mesh.extras())); + let mesh = super::GltfMesh::new( + &gltf_mesh, + primitives, + gltf_mesh.extras().as_deref().map(GltfExtras::from), + ); let handle = load_context.add_labeled_asset(mesh.asset_label().to_string(), mesh); if let Some(name) = gltf_mesh.name() { @@ -774,12 +734,13 @@ async fn load_gltf<'a, 'b, 'c>( let reader = gltf_skin.reader(|buffer| Some(&buffer_data[buffer.index()])); let local_to_bone_bind_matrices: Vec = reader .read_inverse_bind_matrices() - .unwrap() - .map(|mat| Mat4::from_cols_array_2d(&mat)) - .collect(); + .map(|mats| mats.map(|mat| Mat4::from_cols_array_2d(&mat)).collect()) + .unwrap_or_else(|| { + core::iter::repeat_n(Mat4::IDENTITY, gltf_skin.joints().len()).collect() + }); load_context.add_labeled_asset( - inverse_bind_matrices_label(&gltf_skin), + GltfAssetLabel::InverseBindMatrices(gltf_skin.index()).to_string(), SkinnedMeshInverseBindposes::from(local_to_bone_bind_matrices), ) }) @@ -787,30 +748,58 @@ async fn load_gltf<'a, 'b, 'c>( let mut nodes = HashMap::>::default(); let mut named_nodes = >::default(); - let mut skins = vec![]; + let mut skins = >::default(); let mut named_skins = >::default(); - for node in GltfTreeIterator::try_new(&gltf)? { + + // First, create the node handles. + for node in gltf.nodes() { + let label = GltfAssetLabel::Node(node.index()); + let label_handle = load_context.get_label_handle(label.to_string()); + nodes.insert(node.index(), label_handle); + } + + // Then check for cycles. + check_for_cycles(&gltf)?; + + // Now populate the nodes. + for node in gltf.nodes() { let skin = node.skin().map(|skin| { - let joints = skin - .joints() - .map(|joint| nodes.get(&joint.index()).unwrap().clone()) - .collect(); + skins + .entry(skin.index()) + .or_insert_with(|| { + let joints: Vec<_> = skin + .joints() + .map(|joint| nodes.get(&joint.index()).unwrap().clone()) + .collect(); + + if joints.len() > MAX_JOINTS { + warn!( + "The glTF skin {} has {} joints, but the maximum supported is {}", + skin.name() + .map(ToString::to_string) + .unwrap_or_else(|| skin.index().to_string()), + joints.len(), + MAX_JOINTS + ); + } - let gltf_skin = GltfSkin::new( - &skin, - joints, - skinned_mesh_inverse_bindposes[skin.index()].clone(), - get_gltf_extras(skin.extras()), - ); + let gltf_skin = GltfSkin::new( + &skin, + joints, + skinned_mesh_inverse_bindposes[skin.index()].clone(), + skin.extras().as_deref().map(GltfExtras::from), + ); - let handle = load_context.add_labeled_asset(skin_label(&skin), gltf_skin); + let handle = load_context + .add_labeled_asset(gltf_skin.asset_label().to_string(), gltf_skin); - skins.push(handle.clone()); - if let Some(name) = skin.name() { - named_skins.insert(name.into(), handle.clone()); - } + if let Some(name) = skin.name() { + named_skins.insert(name.into(), handle.clone()); + } - handle + handle + }) + .clone() }); let children = node @@ -829,7 +818,7 @@ async fn load_gltf<'a, 'b, 'c>( mesh, node_transform(&node), skin, - get_gltf_extras(node.extras()), + node.extras().as_deref().map(GltfExtras::from), ); #[cfg(feature = "bevy_animation")] @@ -924,7 +913,10 @@ async fn load_gltf<'a, 'b, 'c>( }); } let loaded_scene = scene_load_context.finish(Scene::new(world)); - let scene_handle = load_context.add_loaded_labeled_asset(scene_label(&scene), loaded_scene); + let scene_handle = load_context.add_loaded_labeled_asset( + GltfAssetLabel::Scene(scene.index()).to_string(), + loaded_scene, + ); if let Some(name) = scene.name() { named_scenes.insert(name.into(), scene_handle.clone()); @@ -941,7 +933,7 @@ async fn load_gltf<'a, 'b, 'c>( named_scenes, meshes, named_meshes, - skins, + skins: skins.into_values().collect(), named_skins, materials, named_materials, @@ -959,62 +951,6 @@ async fn load_gltf<'a, 'b, 'c>( }) } -fn get_gltf_extras(extras: &json::Extras) -> Option { - extras.as_ref().map(|extras| GltfExtras { - value: extras.get().to_string(), - }) -} - -/// Calculate the transform of gLTF node. -/// -/// This should be used instead of calling [`gltf::scene::Transform::matrix()`] -/// on [`Node::transform()`] directly because it uses optimized glam types and -/// if `libm` feature of `bevy_math` crate is enabled also handles cross -/// platform determinism properly. -fn node_transform(node: &Node) -> Transform { - match node.transform() { - gltf::scene::Transform::Matrix { matrix } => { - Transform::from_matrix(Mat4::from_cols_array_2d(&matrix)) - } - gltf::scene::Transform::Decomposed { - translation, - rotation, - scale, - } => Transform { - translation: Vec3::from(translation), - rotation: bevy_math::Quat::from_array(rotation), - scale: Vec3::from(scale), - }, - } -} - -fn node_name(node: &Node) -> Name { - let name = node - .name() - .map(ToString::to_string) - .unwrap_or_else(|| format!("GltfNode{}", node.index())); - Name::new(name) -} - -#[cfg(feature = "bevy_animation")] -fn paths_recur( - node: Node, - current_path: &[Name], - paths: &mut HashMap)>, - root_index: usize, - visited: &mut HashSet, -) { - let mut path = current_path.to_owned(); - path.push(node_name(&node)); - visited.insert(node.index()); - for child in node.children() { - if !visited.contains(&child.index()) { - paths_recur(child, &path, paths, root_index, visited); - } - } - paths.insert(node.index(), (root_index, path)); -} - /// Loads a glTF texture as a bevy [`Image`] and returns it together with its label. async fn load_image<'a, 'b>( gltf_texture: gltf::Texture<'a>, @@ -1026,18 +962,13 @@ async fn load_image<'a, 'b>( ) -> Result { let is_srgb = !linear_textures.contains(&gltf_texture.index()); let sampler_descriptor = texture_sampler(&gltf_texture); - #[cfg(all(debug_assertions, feature = "dds"))] - let name = gltf_texture - .name() - .map_or("Unknown GLTF Texture".to_string(), ToString::to_string); + match gltf_texture.source().source() { Source::View { view, mime_type } => { let start = view.offset(); let end = view.offset() + view.length(); let buffer = &buffer_data[view.buffer().index()][start..end]; let image = Image::from_buffer( - #[cfg(all(debug_assertions, feature = "dds"))] - name, buffer, ImageType::MimeType(mime_type), supported_compressed_formats, @@ -1060,8 +991,6 @@ async fn load_image<'a, 'b>( let image_type = ImageType::MimeType(data_uri.mime_type); Ok(ImageOrPath::Image { image: Image::from_buffer( - #[cfg(all(debug_assertions, feature = "dds"))] - name, &bytes, mime_type.map(ImageType::MimeType).unwrap_or(image_type), supported_compressed_formats, @@ -1091,40 +1020,37 @@ fn load_material( is_scale_inverted: bool, ) -> Handle { let material_label = material_label(material, is_scale_inverted); - load_context.labeled_asset_scope(material_label, |load_context| { + load_context.labeled_asset_scope(material_label.to_string(), |load_context| { let pbr = material.pbr_metallic_roughness(); // TODO: handle missing label handle errors here? let color = pbr.base_color_factor(); let base_color_channel = pbr .base_color_texture() - .map(|info| get_uv_channel(material, "base color", info.tex_coord())) + .map(|info| uv_channel(material, "base color", info.tex_coord())) .unwrap_or_default(); let base_color_texture = pbr .base_color_texture() - .map(|info| texture_handle(load_context, &info.texture())); + .map(|info| texture_handle(&info.texture(), load_context)); let uv_transform = pbr .base_color_texture() - .and_then(|info| { - info.texture_transform() - .map(convert_texture_transform_to_affine2) - }) + .and_then(|info| info.texture_transform().map(texture_transform_to_affine2)) .unwrap_or_default(); let normal_map_channel = material .normal_texture() - .map(|info| get_uv_channel(material, "normal map", info.tex_coord())) + .map(|info| uv_channel(material, "normal map", info.tex_coord())) .unwrap_or_default(); let normal_map_texture: Option> = material.normal_texture().map(|normal_texture| { // TODO: handle normal_texture.scale - texture_handle(load_context, &normal_texture.texture()) + texture_handle(&normal_texture.texture(), load_context) }); let metallic_roughness_channel = pbr .metallic_roughness_texture() - .map(|info| get_uv_channel(material, "metallic/roughness", info.tex_coord())) + .map(|info| uv_channel(material, "metallic/roughness", info.tex_coord())) .unwrap_or_default(); let metallic_roughness_texture = pbr.metallic_roughness_texture().map(|info| { warn_on_differing_texture_transforms( @@ -1133,27 +1059,27 @@ fn load_material( uv_transform, "metallic/roughness", ); - texture_handle(load_context, &info.texture()) + texture_handle(&info.texture(), load_context) }); let occlusion_channel = material .occlusion_texture() - .map(|info| get_uv_channel(material, "occlusion", info.tex_coord())) + .map(|info| uv_channel(material, "occlusion", info.tex_coord())) .unwrap_or_default(); let occlusion_texture = material.occlusion_texture().map(|occlusion_texture| { // TODO: handle occlusion_texture.strength() (a scalar multiplier for occlusion strength) - texture_handle(load_context, &occlusion_texture.texture()) + texture_handle(&occlusion_texture.texture(), load_context) }); let emissive = material.emissive_factor(); let emissive_channel = material .emissive_texture() - .map(|info| get_uv_channel(material, "emissive", info.tex_coord())) + .map(|info| uv_channel(material, "emissive", info.tex_coord())) .unwrap_or_default(); let emissive_texture = material.emissive_texture().map(|info| { // TODO: handle occlusion_texture.strength() (a scalar multiplier for occlusion strength) warn_on_differing_texture_transforms(material, &info, uv_transform, "emissive"); - texture_handle(load_context, &info.texture()) + texture_handle(&info.texture(), load_context) }); #[cfg(feature = "pbr_transmission_textures")] @@ -1163,14 +1089,12 @@ fn load_material( .map_or((0.0, UvChannel::Uv0, None), |transmission| { let specular_transmission_channel = transmission .transmission_texture() - .map(|info| { - get_uv_channel(material, "specular/transmission", info.tex_coord()) - }) + .map(|info| uv_channel(material, "specular/transmission", info.tex_coord())) .unwrap_or_default(); let transmission_texture: Option> = transmission .transmission_texture() .map(|transmission_texture| { - texture_handle(load_context, &transmission_texture.texture()) + texture_handle(&transmission_texture.texture(), load_context) }); ( @@ -1197,11 +1121,11 @@ fn load_material( |volume| { let thickness_channel = volume .thickness_texture() - .map(|info| get_uv_channel(material, "thickness", info.tex_coord())) + .map(|info| uv_channel(material, "thickness", info.tex_coord())) .unwrap_or_default(); let thickness_texture: Option> = volume.thickness_texture().map(|thickness_texture| { - texture_handle(load_context, &thickness_texture.texture()) + texture_handle(&thickness_texture.texture(), load_context) }); ( @@ -1236,6 +1160,10 @@ fn load_material( let anisotropy = AnisotropyExtension::parse(load_context, document, material).unwrap_or_default(); + // Parse the `KHR_materials_specular` extension data if necessary. + let specular = + SpecularExtension::parse(load_context, document, material).unwrap_or_default(); + // We need to operate in the Linear color space and be willing to exceed 1.0 in our channels let base_emissive = LinearRgba::rgb(emissive[0], emissive[1], emissive[2]); let emissive = base_emissive * material.emissive_strength().unwrap_or(1.0); @@ -1304,78 +1232,37 @@ fn load_material( anisotropy_channel: anisotropy.anisotropy_channel, #[cfg(feature = "pbr_anisotropy_texture")] anisotropy_texture: anisotropy.anisotropy_texture, + // From the `KHR_materials_specular` spec: + // + reflectance: specular.specular_factor.unwrap_or(1.0) as f32 * 0.5, + #[cfg(feature = "pbr_specular_textures")] + specular_channel: specular.specular_channel, + #[cfg(feature = "pbr_specular_textures")] + specular_texture: specular.specular_texture, + specular_tint: match specular.specular_color_factor { + Some(color) => Color::linear_rgb(color[0] as f32, color[1] as f32, color[2] as f32), + None => Color::WHITE, + }, + #[cfg(feature = "pbr_specular_textures")] + specular_tint_channel: specular.specular_color_channel, + #[cfg(feature = "pbr_specular_textures")] + specular_tint_texture: specular.specular_color_texture, ..Default::default() } }) } -fn get_uv_channel(material: &Material, texture_kind: &str, tex_coord: u32) -> UvChannel { - match tex_coord { - 0 => UvChannel::Uv0, - 1 => UvChannel::Uv1, - _ => { - let material_name = material - .name() - .map(|n| format!("the material \"{n}\"")) - .unwrap_or_else(|| "an unnamed material".to_string()); - let material_index = material - .index() - .map(|i| format!("index {i}")) - .unwrap_or_else(|| "default".to_string()); - warn!( - "Only 2 UV Channels are supported, but {material_name} ({material_index}) \ - has the TEXCOORD attribute {} on texture kind {texture_kind}, which will fallback to 0.", - tex_coord, - ); - UvChannel::Uv0 - } - } -} - -fn convert_texture_transform_to_affine2(texture_transform: TextureTransform) -> Affine2 { - Affine2::from_scale_angle_translation( - texture_transform.scale().into(), - -texture_transform.rotation(), - texture_transform.offset().into(), - ) -} - -fn warn_on_differing_texture_transforms( - material: &Material, - info: &Info, - texture_transform: Affine2, - texture_kind: &str, -) { - let has_differing_texture_transform = info - .texture_transform() - .map(convert_texture_transform_to_affine2) - .is_some_and(|t| t != texture_transform); - if has_differing_texture_transform { - let material_name = material - .name() - .map(|n| format!("the material \"{n}\"")) - .unwrap_or_else(|| "an unnamed material".to_string()); - let texture_name = info - .texture() - .name() - .map(|n| format!("its {texture_kind} texture \"{n}\"")) - .unwrap_or_else(|| format!("its unnamed {texture_kind} texture")); - let material_index = material - .index() - .map(|i| format!("index {i}")) - .unwrap_or_else(|| "default".to_string()); - warn!( - "Only texture transforms on base color textures are supported, but {material_name} ({material_index}) \ - has a texture transform on {texture_name} (index {}), which will be ignored.", info.texture().index() - ); - } -} - /// Loads a glTF node. -#[allow(clippy::too_many_arguments, clippy::result_large_err)] +#[cfg_attr( + not(target_arch = "wasm32"), + expect( + clippy::result_large_err, + reason = "`GltfError` is only barely past the threshold for large errors." + ) +)] fn load_node( gltf_node: &Node, - world_builder: &mut WorldChildBuilder, + child_spawner: &mut ChildSpawner, root_load_context: &LoadContext, load_context: &mut LoadContext, settings: &GltfLoaderSettings, @@ -1397,7 +1284,7 @@ fn load_node( // of negative scale factors is odd. if so we will assign a copy of the material with face // culling inverted, rather than modifying the mesh data directly. let is_scale_inverted = world_transform.scale.is_negative_bitmask().count_ones() & 1 == 1; - let mut node = world_builder.spawn((transform, Visibility::default())); + let mut node = child_spawner.spawn((transform, Visibility::default())); let name = node_name(gltf_node); node.insert(name.clone()); @@ -1485,7 +1372,7 @@ fn load_node( // append primitives for primitive in mesh.primitives() { let material = primitive.material(); - let material_label = material_label(&material, is_scale_inverted); + let material_label = material_label(&material, is_scale_inverted).to_string(); // This will make sure we load the default material now since it would not have been // added when iterating over all the gltf materials (since the default material is @@ -1679,157 +1566,6 @@ fn load_node( } } -fn primitive_name(mesh: &gltf::Mesh, primitive: &Primitive) -> String { - let mesh_name = mesh.name().unwrap_or("Mesh"); - if mesh.primitives().len() > 1 { - format!("{}.{}", mesh_name, primitive.index()) - } else { - mesh_name.to_string() - } -} - -/// Returns the label for the `material`. -fn material_label(material: &Material, is_scale_inverted: bool) -> String { - if let Some(index) = material.index() { - GltfAssetLabel::Material { - index, - is_scale_inverted, - } - .to_string() - } else { - GltfAssetLabel::DefaultMaterial.to_string() - } -} - -fn texture_handle(load_context: &mut LoadContext, texture: &gltf::Texture) -> Handle { - match texture.source().source() { - Source::View { .. } => { - load_context.get_label_handle(GltfAssetLabel::Texture(texture.index()).to_string()) - } - Source::Uri { uri, .. } => { - let uri = percent_encoding::percent_decode_str(uri) - .decode_utf8() - .unwrap(); - let uri = uri.as_ref(); - if let Ok(_data_uri) = DataUri::parse(uri) { - load_context.get_label_handle(GltfAssetLabel::Texture(texture.index()).to_string()) - } else { - let parent = load_context.path().parent().unwrap(); - let image_path = parent.join(uri); - load_context.load(image_path) - } - } - } -} - -/// Given a [`json::texture::Info`], returns the handle of the texture that this -/// refers to. -/// -/// This is a low-level function only used when the `gltf` crate has no support -/// for an extension, forcing us to parse its texture references manually. -#[allow(dead_code)] -fn texture_handle_from_info( - load_context: &mut LoadContext, - document: &Document, - texture_info: &json::texture::Info, -) -> Handle { - let texture = document - .textures() - .nth(texture_info.index.value()) - .expect("Texture info references a nonexistent texture"); - texture_handle(load_context, &texture) -} - -/// Returns the label for the `scene`. -fn scene_label(scene: &gltf::Scene) -> String { - GltfAssetLabel::Scene(scene.index()).to_string() -} - -/// Return the label for the `skin`. -fn skin_label(skin: &gltf::Skin) -> String { - GltfAssetLabel::Skin(skin.index()).to_string() -} - -/// Return the label for the `inverseBindMatrices` of the node. -fn inverse_bind_matrices_label(skin: &gltf::Skin) -> String { - GltfAssetLabel::InverseBindMatrices(skin.index()).to_string() -} - -/// Extracts the texture sampler data from the glTF texture. -fn texture_sampler(texture: &gltf::Texture) -> ImageSamplerDescriptor { - let gltf_sampler = texture.sampler(); - - ImageSamplerDescriptor { - address_mode_u: texture_address_mode(&gltf_sampler.wrap_s()), - address_mode_v: texture_address_mode(&gltf_sampler.wrap_t()), - - mag_filter: gltf_sampler - .mag_filter() - .map(|mf| match mf { - MagFilter::Nearest => ImageFilterMode::Nearest, - MagFilter::Linear => ImageFilterMode::Linear, - }) - .unwrap_or(ImageSamplerDescriptor::default().mag_filter), - - min_filter: gltf_sampler - .min_filter() - .map(|mf| match mf { - MinFilter::Nearest - | MinFilter::NearestMipmapNearest - | MinFilter::NearestMipmapLinear => ImageFilterMode::Nearest, - MinFilter::Linear - | MinFilter::LinearMipmapNearest - | MinFilter::LinearMipmapLinear => ImageFilterMode::Linear, - }) - .unwrap_or(ImageSamplerDescriptor::default().min_filter), - - mipmap_filter: gltf_sampler - .min_filter() - .map(|mf| match mf { - MinFilter::Nearest - | MinFilter::Linear - | MinFilter::NearestMipmapNearest - | MinFilter::LinearMipmapNearest => ImageFilterMode::Nearest, - MinFilter::NearestMipmapLinear | MinFilter::LinearMipmapLinear => { - ImageFilterMode::Linear - } - }) - .unwrap_or(ImageSamplerDescriptor::default().mipmap_filter), - - ..Default::default() - } -} - -/// Maps the texture address mode form glTF to wgpu. -fn texture_address_mode(gltf_address_mode: &WrappingMode) -> ImageAddressMode { - match gltf_address_mode { - WrappingMode::ClampToEdge => ImageAddressMode::ClampToEdge, - WrappingMode::Repeat => ImageAddressMode::Repeat, - WrappingMode::MirroredRepeat => ImageAddressMode::MirrorRepeat, - } -} - -/// Maps the `primitive_topology` form glTF to `wgpu`. -#[allow(clippy::result_large_err)] -fn get_primitive_topology(mode: Mode) -> Result { - match mode { - Mode::Points => Ok(PrimitiveTopology::PointList), - Mode::Lines => Ok(PrimitiveTopology::LineList), - Mode::LineStrip => Ok(PrimitiveTopology::LineStrip), - Mode::Triangles => Ok(PrimitiveTopology::TriangleList), - Mode::TriangleStrip => Ok(PrimitiveTopology::TriangleStrip), - mode => Err(GltfError::UnsupportedPrimitive { mode }), - } -} - -fn alpha_mode(material: &Material) -> AlphaMode { - match material.alpha_mode() { - gltf::material::AlphaMode::Opaque => AlphaMode::Opaque, - gltf::material::AlphaMode::Mask => AlphaMode::Mask(material.alpha_cutoff().unwrap_or(0.5)), - gltf::material::AlphaMode::Blend => AlphaMode::Blend, - } -} - /// Loads the raw glTF buffer data for a specific glTF file. async fn load_buffers( gltf: &gltf::Gltf, @@ -1871,138 +1607,16 @@ async fn load_buffers( Ok(buffer_data) } -/// Iterator for a Gltf tree. -/// -/// It resolves a Gltf tree and allows for a safe Gltf nodes iteration, -/// putting dependent nodes before dependencies. -struct GltfTreeIterator<'a> { - nodes: Vec>, -} - -impl<'a> GltfTreeIterator<'a> { - #[allow(clippy::result_large_err)] - fn try_new(gltf: &'a gltf::Gltf) -> Result { - let nodes = gltf.nodes().collect::>(); - - let mut empty_children = VecDeque::new(); - let mut parents = vec![None; nodes.len()]; - let mut unprocessed_nodes = nodes - .into_iter() - .enumerate() - .map(|(i, node)| { - let children = node - .children() - .map(|child| child.index()) - .collect::>(); - for &child in &children { - let parent = parents.get_mut(child).unwrap(); - *parent = Some(i); - } - if children.is_empty() { - empty_children.push_back(i); - } - (i, (node, children)) - }) - .collect::>(); - - let mut nodes = Vec::new(); - let mut warned_about_max_joints = >::default(); - while let Some(index) = empty_children.pop_front() { - if let Some(skin) = unprocessed_nodes.get(&index).unwrap().0.skin() { - if skin.joints().len() > MAX_JOINTS && warned_about_max_joints.insert(skin.index()) - { - warn!( - "The glTF skin {:?} has {} joints, but the maximum supported is {}", - skin.name() - .map(ToString::to_string) - .unwrap_or_else(|| skin.index().to_string()), - skin.joints().len(), - MAX_JOINTS - ); - } - - let skin_has_dependencies = skin - .joints() - .any(|joint| unprocessed_nodes.contains_key(&joint.index())); - - if skin_has_dependencies && unprocessed_nodes.len() != 1 { - empty_children.push_back(index); - continue; - } - } - - let (node, children) = unprocessed_nodes.remove(&index).unwrap(); - assert!(children.is_empty()); - nodes.push(node); - - if let Some(parent_index) = parents[index] { - let (_, parent_children) = unprocessed_nodes.get_mut(&parent_index).unwrap(); - - assert!(parent_children.remove(&index)); - if parent_children.is_empty() { - empty_children.push_back(parent_index); - } - } - } - - if !unprocessed_nodes.is_empty() { - return Err(GltfError::CircularChildren(format!( - "{:?}", - unprocessed_nodes - .iter() - .map(|(k, _v)| *k) - .collect::>(), - ))); - } - - nodes.reverse(); - Ok(Self { - nodes: nodes.into_iter().collect(), - }) - } -} - -impl<'a> Iterator for GltfTreeIterator<'a> { - type Item = Node<'a>; - - fn next(&mut self) -> Option { - self.nodes.pop() - } -} - -impl<'a> ExactSizeIterator for GltfTreeIterator<'a> { - fn len(&self) -> usize { - self.nodes.len() - } -} - -enum ImageOrPath { - Image { - image: Image, - label: GltfAssetLabel, - }, - Path { - path: PathBuf, - is_srgb: bool, - sampler_descriptor: ImageSamplerDescriptor, - }, -} - struct DataUri<'a> { - mime_type: &'a str, - base64: bool, - data: &'a str, -} - -fn split_once(input: &str, delimiter: char) -> Option<(&str, &str)> { - let mut iter = input.splitn(2, delimiter); - Some((iter.next()?, iter.next()?)) + pub mime_type: &'a str, + pub base64: bool, + pub data: &'a str, } impl<'a> DataUri<'a> { fn parse(uri: &'a str) -> Result, ()> { let uri = uri.strip_prefix("data:").ok_or(())?; - let (mime_type, data) = split_once(uri, ',').ok_or(())?; + let (mime_type, data) = Self::split_once(uri, ',').ok_or(())?; let (mime_type, base64) = match mime_type.strip_suffix(";base64") { Some(mime_type) => (mime_type, true), @@ -2023,15 +1637,64 @@ impl<'a> DataUri<'a> { Ok(self.data.as_bytes().to_owned()) } } + + fn split_once(input: &str, delimiter: char) -> Option<(&str, &str)> { + let mut iter = input.splitn(2, delimiter); + Some((iter.next()?, iter.next()?)) + } +} + +enum ImageOrPath { + Image { + image: Image, + label: GltfAssetLabel, + }, + Path { + path: PathBuf, + is_srgb: bool, + sampler_descriptor: ImageSamplerDescriptor, + }, +} + +impl ImageOrPath { + // TODO: use the threaded impl on wasm once wasm thread pool doesn't deadlock on it + // See https://github.com/bevyengine/bevy/issues/1924 for more details + // The taskpool use is also avoided when there is only one texture for performance reasons and + // to avoid https://github.com/bevyengine/bevy/pull/2725 + // PERF: could this be a Vec instead? Are gltf texture indices dense? + fn process_loaded_texture( + self, + load_context: &mut LoadContext, + handles: &mut Vec>, + ) { + let handle = match self { + ImageOrPath::Image { label, image } => { + load_context.add_labeled_asset(label.to_string(), image) + } + ImageOrPath::Path { + path, + is_srgb, + sampler_descriptor, + } => load_context + .loader() + .with_settings(move |settings: &mut ImageLoaderSettings| { + settings.is_srgb = is_srgb; + settings.sampler = ImageSampler::Descriptor(sampler_descriptor.clone()); + }) + .load(path), + }; + handles.push(handle); + } } -pub(super) struct PrimitiveMorphAttributesIter<'s>( +struct PrimitiveMorphAttributesIter<'s>( pub ( Option>, Option>, Option>, ), ); + impl<'s> Iterator for PrimitiveMorphAttributesIter<'s> { type Item = MorphAttributes; @@ -2051,208 +1714,22 @@ impl<'s> Iterator for PrimitiveMorphAttributesIter<'s> { } } -#[derive(Deserialize)] -#[serde(rename_all = "camelCase")] -struct MorphTargetNames { - pub target_names: Vec, -} - -// A helper structure for `load_node` that contains information about the -// nearest ancestor animation root. +/// A helper structure for `load_node` that contains information about the +/// nearest ancestor animation root. #[cfg(feature = "bevy_animation")] #[derive(Clone)] struct AnimationContext { - // The nearest ancestor animation root. - root: Entity, - // The path to the animation root. This is used for constructing the - // animation target UUIDs. - path: SmallVec<[Name; 8]>, + /// The nearest ancestor animation root. + pub root: Entity, + /// The path to the animation root. This is used for constructing the + /// animation target UUIDs. + pub path: SmallVec<[Name; 8]>, } -/// Parsed data from the `KHR_materials_clearcoat` extension. -/// -/// See the specification: -/// -#[derive(Default)] -struct ClearcoatExtension { - clearcoat_factor: Option, - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_channel: UvChannel, - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_texture: Option>, - clearcoat_roughness_factor: Option, - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_roughness_channel: UvChannel, - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_roughness_texture: Option>, - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_normal_channel: UvChannel, - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_normal_texture: Option>, -} - -impl ClearcoatExtension { - #[allow(unused_variables)] - fn parse( - load_context: &mut LoadContext, - document: &Document, - material: &Material, - ) -> Option { - let extension = material - .extensions()? - .get("KHR_materials_clearcoat")? - .as_object()?; - - #[cfg(feature = "pbr_multi_layer_material_textures")] - let (clearcoat_channel, clearcoat_texture) = extension - .get("clearcoatTexture") - .and_then(|value| value::from_value::(value.clone()).ok()) - .map(|json_info| { - ( - get_uv_channel(material, "clearcoat", json_info.tex_coord), - texture_handle_from_info(load_context, document, &json_info), - ) - }) - .unzip(); - - #[cfg(feature = "pbr_multi_layer_material_textures")] - let (clearcoat_roughness_channel, clearcoat_roughness_texture) = extension - .get("clearcoatRoughnessTexture") - .and_then(|value| value::from_value::(value.clone()).ok()) - .map(|json_info| { - ( - get_uv_channel(material, "clearcoat roughness", json_info.tex_coord), - texture_handle_from_info(load_context, document, &json_info), - ) - }) - .unzip(); - - #[cfg(feature = "pbr_multi_layer_material_textures")] - let (clearcoat_normal_channel, clearcoat_normal_texture) = extension - .get("clearcoatNormalTexture") - .and_then(|value| value::from_value::(value.clone()).ok()) - .map(|json_info| { - ( - get_uv_channel(material, "clearcoat normal", json_info.tex_coord), - texture_handle_from_info(load_context, document, &json_info), - ) - }) - .unzip(); - - Some(ClearcoatExtension { - clearcoat_factor: extension.get("clearcoatFactor").and_then(Value::as_f64), - clearcoat_roughness_factor: extension - .get("clearcoatRoughnessFactor") - .and_then(Value::as_f64), - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_channel: clearcoat_channel.unwrap_or_default(), - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_texture, - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_roughness_channel: clearcoat_roughness_channel.unwrap_or_default(), - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_roughness_texture, - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_normal_channel: clearcoat_normal_channel.unwrap_or_default(), - #[cfg(feature = "pbr_multi_layer_material_textures")] - clearcoat_normal_texture, - }) - } -} - -/// Parsed data from the `KHR_materials_anisotropy` extension. -/// -/// See the specification: -/// -#[derive(Default)] -struct AnisotropyExtension { - anisotropy_strength: Option, - anisotropy_rotation: Option, - #[cfg(feature = "pbr_anisotropy_texture")] - anisotropy_channel: UvChannel, - #[cfg(feature = "pbr_anisotropy_texture")] - anisotropy_texture: Option>, -} - -impl AnisotropyExtension { - #[allow(unused_variables)] - fn parse( - load_context: &mut LoadContext, - document: &Document, - material: &Material, - ) -> Option { - let extension = material - .extensions()? - .get("KHR_materials_anisotropy")? - .as_object()?; - - #[cfg(feature = "pbr_anisotropy_texture")] - let (anisotropy_channel, anisotropy_texture) = extension - .get("anisotropyTexture") - .and_then(|value| value::from_value::(value.clone()).ok()) - .map(|json_info| { - ( - get_uv_channel(material, "anisotropy", json_info.tex_coord), - texture_handle_from_info(load_context, document, &json_info), - ) - }) - .unzip(); - - Some(AnisotropyExtension { - anisotropy_strength: extension.get("anisotropyStrength").and_then(Value::as_f64), - anisotropy_rotation: extension.get("anisotropyRotation").and_then(Value::as_f64), - #[cfg(feature = "pbr_anisotropy_texture")] - anisotropy_channel: anisotropy_channel.unwrap_or_default(), - #[cfg(feature = "pbr_anisotropy_texture")] - anisotropy_texture, - }) - } -} - -/// Returns the index (within the `textures` array) of the texture with the -/// given field name in the data for the material extension with the given name, -/// if there is one. -fn material_extension_texture_index( - material: &Material, - extension_name: &str, - texture_field_name: &str, -) -> Option { - Some( - value::from_value::( - material - .extensions()? - .get(extension_name)? - .as_object()? - .get(texture_field_name)? - .clone(), - ) - .ok()? - .index - .value(), - ) -} - -/// Returns true if the material needs mesh tangents in order to be successfully -/// rendered. -/// -/// We generate them if this function returns true. -fn material_needs_tangents(material: &Material) -> bool { - if material.normal_texture().is_some() { - return true; - } - - #[cfg(feature = "pbr_multi_layer_material_textures")] - if material_extension_texture_index( - material, - "KHR_materials_clearcoat", - "clearcoatNormalTexture", - ) - .is_some() - { - return true; - } - - false +#[derive(Deserialize)] +#[serde(rename_all = "camelCase")] +struct MorphTargetNames { + pub target_names: Vec, } #[cfg(test)] @@ -2268,9 +1745,10 @@ mod test { }, AssetApp, AssetPlugin, AssetServer, Assets, Handle, LoadState, }; - use bevy_ecs::{system::Resource, world::World}; + use bevy_ecs::{resource::Resource, world::World}; use bevy_log::LogPlugin; - use bevy_render::mesh::{skinning::SkinnedMeshInverseBindposes, MeshPlugin}; + use bevy_mesh::skinning::SkinnedMeshInverseBindposes; + use bevy_render::mesh::MeshPlugin; use bevy_scene::ScenePlugin; fn test_app(dir: Dir) -> App { @@ -2309,7 +1787,10 @@ mod test { } fn load_gltf_into_app(gltf_path: &str, gltf: &str) -> App { - #[expect(unused)] + #[expect( + dead_code, + reason = "This struct is used to keep the handle alive. As such, we have no need to handle the handle directly." + )] #[derive(Resource)] struct GltfHandle(Handle); diff --git a/crates/bevy_gltf/src/vertex_attributes.rs b/crates/bevy_gltf/src/vertex_attributes.rs index d42ecbf397771..d4ae811c906eb 100644 --- a/crates/bevy_gltf/src/vertex_attributes.rs +++ b/crates/bevy_gltf/src/vertex_attributes.rs @@ -1,9 +1,5 @@ -use bevy_render::{ - mesh::{MeshVertexAttribute, VertexAttributeValues as Values}, - prelude::Mesh, - render_resource::VertexFormat, -}; -use bevy_utils::HashMap; +use bevy_mesh::{Mesh, MeshVertexAttribute, VertexAttributeValues as Values, VertexFormat}; +use bevy_platform::collections::HashMap; use gltf::{ accessor::{DataType, Dimensions}, mesh::util::{ReadColors, ReadJoints, ReadTexCoords, ReadWeights}, diff --git a/crates/bevy_hierarchy/Cargo.toml b/crates/bevy_hierarchy/Cargo.toml deleted file mode 100644 index 093f5f7f88c3a..0000000000000 --- a/crates/bevy_hierarchy/Cargo.toml +++ /dev/null @@ -1,35 +0,0 @@ -[package] -name = "bevy_hierarchy" -version = "0.15.0-dev" -edition = "2021" -description = "Provides hierarchy functionality for Bevy Engine" -homepage = "https://bevyengine.org" -repository = "https://github.com/bevyengine/bevy" -license = "MIT OR Apache-2.0" -keywords = ["bevy"] - -[features] -default = ["bevy_app"] -trace = [] -bevy_app = ["reflect", "dep:bevy_app"] -reflect = ["bevy_ecs/bevy_reflect", "bevy_reflect"] - -[dependencies] -# bevy -bevy_app = { path = "../bevy_app", version = "0.15.0-dev", optional = true } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev", default-features = false } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev", features = [ - "bevy", - "smallvec", -], optional = true } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } -disqualified = "1.0" - -smallvec = { version = "1.11", features = ["union", "const_generics"] } - -[lints] -workspace = true - -[package.metadata.docs.rs] -rustdoc-args = ["-Zunstable-options", "--generate-link-to-definition"] -all-features = true diff --git a/crates/bevy_hierarchy/README.md b/crates/bevy_hierarchy/README.md deleted file mode 100644 index 22d7802e9a957..0000000000000 --- a/crates/bevy_hierarchy/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Bevy Hierarchy - -[![License](https://img.shields.io/badge/license-MIT%2FApache-blue.svg)](https://github.com/bevyengine/bevy#license) -[![Crates.io](https://img.shields.io/crates/v/bevy_hierarchy.svg)](https://crates.io/crates/bevy_hierarchy) -[![Downloads](https://img.shields.io/crates/d/bevy_hierarchy.svg)](https://crates.io/crates/bevy_hierarchy) -[![Docs](https://docs.rs/bevy_hierarchy/badge.svg)](https://docs.rs/bevy_hierarchy/latest/bevy_hierarchy/) -[![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) diff --git a/crates/bevy_hierarchy/src/child_builder.rs b/crates/bevy_hierarchy/src/child_builder.rs deleted file mode 100644 index 769da2b83bb8b..0000000000000 --- a/crates/bevy_hierarchy/src/child_builder.rs +++ /dev/null @@ -1,1307 +0,0 @@ -use crate::{Children, HierarchyEvent, Parent}; -use bevy_ecs::{ - bundle::Bundle, - entity::Entity, - prelude::Events, - system::{Commands, EntityCommands}, - world::{Command, EntityWorldMut, World}, -}; -use smallvec::{smallvec, SmallVec}; - -// Do not use `world.send_event_batch` as it prints error message when the Events are not available in the world, -// even though it's a valid use case to execute commands on a world without events. Loading a GLTF file for example -fn push_events(world: &mut World, events: impl IntoIterator) { - if let Some(mut moved) = world.get_resource_mut::>() { - moved.extend(events); - } -} - -/// Adds `child` to `parent`'s [`Children`], without checking if it is already present there. -/// -/// This might cause unexpected results when removing duplicate children. -fn add_child_unchecked(world: &mut World, parent: Entity, child: Entity) { - let mut parent = world.entity_mut(parent); - if let Some(mut children) = parent.get_mut::() { - children.0.push(child); - } else { - parent.insert(Children(smallvec![child])); - } -} - -/// Sets [`Parent`] of the `child` to `new_parent`. Inserts [`Parent`] if `child` doesn't have one. -fn update_parent(world: &mut World, child: Entity, new_parent: Entity) -> Option { - let mut child = world.entity_mut(child); - if let Some(mut parent) = child.get_mut::() { - let previous = parent.0; - *parent = Parent(new_parent); - Some(previous) - } else { - child.insert(Parent(new_parent)); - None - } -} - -/// Remove child from the parent's [`Children`] component. -/// -/// Removes the [`Children`] component from the parent if it's empty. -fn remove_from_children(world: &mut World, parent: Entity, child: Entity) { - let Ok(mut parent) = world.get_entity_mut(parent) else { - return; - }; - let Some(mut children) = parent.get_mut::() else { - return; - }; - children.0.retain(|x| *x != child); - if children.is_empty() { - parent.remove::(); - } -} - -/// Update the [`Parent`] component of the `child`. -/// Removes the `child` from the previous parent's [`Children`]. -/// -/// Does not update the new parents [`Children`] component. -/// -/// Does nothing if `child` was already a child of `parent`. -/// -/// Sends [`HierarchyEvent`]'s. -fn update_old_parent(world: &mut World, child: Entity, parent: Entity) { - let previous = update_parent(world, child, parent); - if let Some(previous_parent) = previous { - // Do nothing if the child was already parented to this entity. - if previous_parent == parent { - return; - } - remove_from_children(world, previous_parent, child); - - push_events( - world, - [HierarchyEvent::ChildMoved { - child, - previous_parent, - new_parent: parent, - }], - ); - } else { - push_events(world, [HierarchyEvent::ChildAdded { child, parent }]); - } -} - -/// Update the [`Parent`] components of the `children`. -/// Removes the `children` from their previous parent's [`Children`]. -/// -/// Does not update the new parents [`Children`] component. -/// -/// Does nothing for a child if it was already a child of `parent`. -/// -/// Sends [`HierarchyEvent`]'s. -fn update_old_parents(world: &mut World, parent: Entity, children: &[Entity]) { - let mut events: SmallVec<[HierarchyEvent; 8]> = SmallVec::with_capacity(children.len()); - for &child in children { - if let Some(previous) = update_parent(world, child, parent) { - // Do nothing if the entity already has the correct parent. - if parent == previous { - continue; - } - - remove_from_children(world, previous, child); - events.push(HierarchyEvent::ChildMoved { - child, - previous_parent: previous, - new_parent: parent, - }); - } else { - events.push(HierarchyEvent::ChildAdded { child, parent }); - } - } - push_events(world, events); -} - -/// Removes entities in `children` from `parent`'s [`Children`], removing the component if it ends up empty. -/// Also removes [`Parent`] component from `children`. -fn remove_children(parent: Entity, children: &[Entity], world: &mut World) { - let mut events: SmallVec<[HierarchyEvent; 8]> = SmallVec::new(); - if let Some(parent_children) = world.get::(parent) { - for &child in children { - if parent_children.contains(&child) { - events.push(HierarchyEvent::ChildRemoved { child, parent }); - } - } - } else { - return; - } - for event in &events { - if let &HierarchyEvent::ChildRemoved { child, .. } = event { - world.entity_mut(child).remove::(); - } - } - push_events(world, events); - - let mut parent = world.entity_mut(parent); - if let Some(mut parent_children) = parent.get_mut::() { - parent_children - .0 - .retain(|parent_child| !children.contains(parent_child)); - - if parent_children.is_empty() { - parent.remove::(); - } - } -} - -/// Removes all children from `parent` by removing its [`Children`] component, as well as removing -/// [`Parent`] component from its children. -fn clear_children(parent: Entity, world: &mut World) { - if let Some(children) = world.entity_mut(parent).take::() { - for &child in &children.0 { - world.entity_mut(child).remove::(); - } - } -} - -/// Command that adds a child to an entity. -#[derive(Debug)] -pub struct AddChild { - /// Parent entity to add the child to. - pub parent: Entity, - /// Child entity to add. - pub child: Entity, -} - -impl Command for AddChild { - fn apply(self, world: &mut World) { - world.entity_mut(self.parent).add_child(self.child); - } -} - -/// Command that inserts a child at a given index of a parent's children, shifting following children back. -#[derive(Debug)] -pub struct InsertChildren { - parent: Entity, - children: SmallVec<[Entity; 8]>, - index: usize, -} - -impl Command for InsertChildren { - fn apply(self, world: &mut World) { - world - .entity_mut(self.parent) - .insert_children(self.index, &self.children); - } -} - -/// Command that pushes children to the end of the entity's [`Children`]. -#[derive(Debug)] -pub struct AddChildren { - parent: Entity, - children: SmallVec<[Entity; 8]>, -} - -impl Command for AddChildren { - fn apply(self, world: &mut World) { - world.entity_mut(self.parent).add_children(&self.children); - } -} - -/// Command that removes children from an entity, and removes these children's parent. -pub struct RemoveChildren { - parent: Entity, - children: SmallVec<[Entity; 8]>, -} - -impl Command for RemoveChildren { - fn apply(self, world: &mut World) { - remove_children(self.parent, &self.children, world); - } -} - -/// Command that clears all children from an entity and removes [`Parent`] component from those -/// children. -pub struct ClearChildren { - parent: Entity, -} - -impl Command for ClearChildren { - fn apply(self, world: &mut World) { - clear_children(self.parent, world); - } -} - -/// Command that clear all children from an entity, replacing them with the given children. -pub struct ReplaceChildren { - parent: Entity, - children: SmallVec<[Entity; 8]>, -} - -impl Command for ReplaceChildren { - fn apply(self, world: &mut World) { - clear_children(self.parent, world); - world.entity_mut(self.parent).add_children(&self.children); - } -} - -/// Command that removes the parent of an entity, and removes that entity from the parent's [`Children`]. -pub struct RemoveParent { - /// `Entity` whose parent must be removed. - pub child: Entity, -} - -impl Command for RemoveParent { - fn apply(self, world: &mut World) { - world.entity_mut(self.child).remove_parent(); - } -} - -/// Struct for building children entities and adding them to a parent entity. -/// -/// # Example -/// -/// This example creates three entities, a parent and two children. -/// -/// ``` -/// # use bevy_ecs::bundle::Bundle; -/// # use bevy_ecs::system::Commands; -/// # use bevy_hierarchy::{ChildBuild, BuildChildren}; -/// # #[derive(Bundle)] -/// # struct MyBundle {} -/// # #[derive(Bundle)] -/// # struct MyChildBundle {} -/// # -/// # fn test(mut commands: Commands) { -/// commands.spawn(MyBundle {}).with_children(|child_builder| { -/// child_builder.spawn(MyChildBundle {}); -/// child_builder.spawn(MyChildBundle {}); -/// }); -/// # } -/// ``` -pub struct ChildBuilder<'a> { - commands: Commands<'a, 'a>, - add_children: AddChildren, -} - -/// Trait for building children entities and adding them to a parent entity. This is used in -/// implementations of [`BuildChildren`] as a bound on the [`Builder`](BuildChildren::Builder) -/// associated type. The closure passed to [`BuildChildren::with_children`] accepts an -/// implementation of `ChildBuild` so that children can be spawned via [`ChildBuild::spawn`]. -pub trait ChildBuild { - /// Spawn output type. Both [`spawn`](Self::spawn) and [`spawn_empty`](Self::spawn_empty) return - /// an implementation of this type so that children can be operated on via method-chaining. - /// Implementations of `ChildBuild` reborrow `self` when spawning entities (see - /// [`Commands::spawn_empty`] and [`World::get_entity_mut`]). Lifetime `'a` corresponds to this - /// reborrowed self, and `Self` outlives it. - type SpawnOutput<'a>: BuildChildren - where - Self: 'a; - - /// Spawns an entity with the given bundle and inserts it into the parent entity's [`Children`]. - /// Also adds [`Parent`] component to the created entity. - fn spawn(&mut self, bundle: impl Bundle) -> Self::SpawnOutput<'_>; - - /// Spawns an [`Entity`] with no components and inserts it into the parent entity's [`Children`]. - /// Also adds [`Parent`] component to the created entity. - fn spawn_empty(&mut self) -> Self::SpawnOutput<'_>; - - /// Returns the parent entity. - fn parent_entity(&self) -> Entity; - - /// Adds a command to be executed, like [`Commands::queue`]. - fn queue_command(&mut self, command: C) -> &mut Self; -} - -impl ChildBuild for ChildBuilder<'_> { - type SpawnOutput<'a> - = EntityCommands<'a> - where - Self: 'a; - - fn spawn(&mut self, bundle: impl Bundle) -> EntityCommands { - let e = self.commands.spawn(bundle); - self.add_children.children.push(e.id()); - e - } - - fn spawn_empty(&mut self) -> EntityCommands { - let e = self.commands.spawn_empty(); - self.add_children.children.push(e.id()); - e - } - - fn parent_entity(&self) -> Entity { - self.add_children.parent - } - - fn queue_command(&mut self, command: C) -> &mut Self { - self.commands.queue(command); - self - } -} - -/// Trait for removing, adding and replacing children and parents of an entity. -pub trait BuildChildren { - /// Child builder type. - type Builder<'a>: ChildBuild; - - /// Takes a closure which builds children for this entity using [`ChildBuild`]. - /// - /// For convenient spawning of a single child, you can use [`with_child`]. - /// - /// [`with_child`]: BuildChildren::with_child - fn with_children(&mut self, f: impl FnOnce(&mut Self::Builder<'_>)) -> &mut Self; - - /// Spawns the passed bundle and adds it to this entity as a child. - /// - /// For efficient spawning of multiple children, use [`with_children`]. - /// - /// [`with_children`]: BuildChildren::with_children - fn with_child(&mut self, bundle: B) -> &mut Self; - - /// Pushes children to the back of the builder's children. For any entities that are - /// already a child of this one, this method does nothing. - /// - /// If the children were previously children of another parent, that parent's [`Children`] component - /// will have those children removed from its list. Removing all children from a parent causes its - /// [`Children`] component to be removed from the entity. - /// - /// # Panics - /// - /// Panics if any of the children are the same as the parent. - fn add_children(&mut self, children: &[Entity]) -> &mut Self; - - /// Inserts children at the given index. - /// - /// If the children were previously children of another parent, that parent's [`Children`] component - /// will have those children removed from its list. Removing all children from a parent causes its - /// [`Children`] component to be removed from the entity. - /// - /// # Panics - /// - /// Panics if any of the children are the same as the parent. - fn insert_children(&mut self, index: usize, children: &[Entity]) -> &mut Self; - - /// Removes the given children - /// - /// Removing all children from a parent causes its [`Children`] component to be removed from the entity. - fn remove_children(&mut self, children: &[Entity]) -> &mut Self; - - /// Adds a single child. - /// - /// If the child was previously the child of another parent, that parent's [`Children`] component - /// will have the child removed from its list. Removing all children from a parent causes its - /// [`Children`] component to be removed from the entity. - /// - /// # Panics - /// - /// Panics if the child is the same as the parent. - fn add_child(&mut self, child: Entity) -> &mut Self; - - /// Removes all children from this entity. The [`Children`] component will be removed if it exists, otherwise this does nothing. - fn clear_children(&mut self) -> &mut Self; - - /// Removes all current children from this entity, replacing them with the specified list of entities. - /// - /// The removed children will have their [`Parent`] component removed. - /// - /// # Panics - /// - /// Panics if any of the children are the same as the parent. - fn replace_children(&mut self, children: &[Entity]) -> &mut Self; - - /// Sets the parent of this entity. - /// - /// If this entity already had a parent, the parent's [`Children`] component will have this - /// child removed from its list. Removing all children from a parent causes its [`Children`] - /// component to be removed from the entity. - /// - /// # Panics - /// - /// Panics if the parent is the same as the child. - fn set_parent(&mut self, parent: Entity) -> &mut Self; - - /// Removes the [`Parent`] of this entity. - /// - /// Also removes this entity from its parent's [`Children`] component. Removing all children from a parent causes - /// its [`Children`] component to be removed from the entity. - fn remove_parent(&mut self) -> &mut Self; -} - -impl BuildChildren for EntityCommands<'_> { - type Builder<'a> = ChildBuilder<'a>; - - fn with_children(&mut self, spawn_children: impl FnOnce(&mut Self::Builder<'_>)) -> &mut Self { - let parent = self.id(); - let mut builder = ChildBuilder { - commands: self.commands(), - add_children: AddChildren { - children: SmallVec::default(), - parent, - }, - }; - - spawn_children(&mut builder); - let children = builder.add_children; - if children.children.contains(&parent) { - panic!("Entity cannot be a child of itself."); - } - self.commands().queue(children); - self - } - - fn with_child(&mut self, bundle: B) -> &mut Self { - let parent = self.id(); - let child = self.commands().spawn(bundle).id(); - self.commands().queue(AddChild { parent, child }); - self - } - - fn add_children(&mut self, children: &[Entity]) -> &mut Self { - let parent = self.id(); - if children.contains(&parent) { - panic!("Cannot push entity as a child of itself."); - } - self.commands().queue(AddChildren { - children: SmallVec::from(children), - parent, - }); - self - } - - fn insert_children(&mut self, index: usize, children: &[Entity]) -> &mut Self { - let parent = self.id(); - if children.contains(&parent) { - panic!("Cannot insert entity as a child of itself."); - } - self.commands().queue(InsertChildren { - children: SmallVec::from(children), - index, - parent, - }); - self - } - - fn remove_children(&mut self, children: &[Entity]) -> &mut Self { - let parent = self.id(); - self.commands().queue(RemoveChildren { - children: SmallVec::from(children), - parent, - }); - self - } - - fn add_child(&mut self, child: Entity) -> &mut Self { - let parent = self.id(); - if child == parent { - panic!("Cannot add entity as a child of itself."); - } - self.commands().queue(AddChild { child, parent }); - self - } - - fn clear_children(&mut self) -> &mut Self { - let parent = self.id(); - self.commands().queue(ClearChildren { parent }); - self - } - - fn replace_children(&mut self, children: &[Entity]) -> &mut Self { - let parent = self.id(); - if children.contains(&parent) { - panic!("Cannot replace entity as a child of itself."); - } - self.commands().queue(ReplaceChildren { - children: SmallVec::from(children), - parent, - }); - self - } - - fn set_parent(&mut self, parent: Entity) -> &mut Self { - let child = self.id(); - if child == parent { - panic!("Cannot set parent to itself"); - } - self.commands().queue(AddChild { child, parent }); - self - } - - fn remove_parent(&mut self) -> &mut Self { - let child = self.id(); - self.commands().queue(RemoveParent { child }); - self - } -} - -/// Struct for adding children to an entity directly through the [`World`] for use in exclusive systems. -#[derive(Debug)] -pub struct WorldChildBuilder<'w> { - world: &'w mut World, - parent: Entity, -} - -impl ChildBuild for WorldChildBuilder<'_> { - type SpawnOutput<'a> - = EntityWorldMut<'a> - where - Self: 'a; - - fn spawn(&mut self, bundle: impl Bundle) -> EntityWorldMut { - let entity = self.world.spawn((bundle, Parent(self.parent))).id(); - add_child_unchecked(self.world, self.parent, entity); - push_events( - self.world, - [HierarchyEvent::ChildAdded { - child: entity, - parent: self.parent, - }], - ); - self.world.entity_mut(entity) - } - - fn spawn_empty(&mut self) -> EntityWorldMut { - let entity = self.world.spawn(Parent(self.parent)).id(); - add_child_unchecked(self.world, self.parent, entity); - push_events( - self.world, - [HierarchyEvent::ChildAdded { - child: entity, - parent: self.parent, - }], - ); - self.world.entity_mut(entity) - } - - fn parent_entity(&self) -> Entity { - self.parent - } - - fn queue_command(&mut self, command: C) -> &mut Self { - command.apply(self.world); - self - } -} - -impl BuildChildren for EntityWorldMut<'_> { - type Builder<'a> = WorldChildBuilder<'a>; - - fn with_children(&mut self, spawn_children: impl FnOnce(&mut WorldChildBuilder)) -> &mut Self { - let parent = self.id(); - self.world_scope(|world| { - spawn_children(&mut WorldChildBuilder { world, parent }); - }); - self - } - - fn with_child(&mut self, bundle: B) -> &mut Self { - let parent = self.id(); - let child = self.world_scope(|world| world.spawn((bundle, Parent(parent))).id()); - if let Some(mut children_component) = self.get_mut::() { - children_component.0.retain(|value| child != *value); - children_component.0.push(child); - } else { - self.insert(Children::from_entities(&[child])); - } - self - } - - fn add_child(&mut self, child: Entity) -> &mut Self { - let parent = self.id(); - if child == parent { - panic!("Cannot add entity as a child of itself."); - } - self.world_scope(|world| { - update_old_parent(world, child, parent); - }); - if let Some(mut children_component) = self.get_mut::() { - children_component.0.retain(|value| child != *value); - children_component.0.push(child); - } else { - self.insert(Children::from_entities(&[child])); - } - self - } - - fn add_children(&mut self, children: &[Entity]) -> &mut Self { - if children.is_empty() { - return self; - } - - let parent = self.id(); - if children.contains(&parent) { - panic!("Cannot push entity as a child of itself."); - } - self.world_scope(|world| { - update_old_parents(world, parent, children); - }); - if let Some(mut children_component) = self.get_mut::() { - children_component - .0 - .retain(|value| !children.contains(value)); - children_component.0.extend(children.iter().cloned()); - } else { - self.insert(Children::from_entities(children)); - } - self - } - - fn insert_children(&mut self, index: usize, children: &[Entity]) -> &mut Self { - let parent = self.id(); - if children.contains(&parent) { - panic!("Cannot insert entity as a child of itself."); - } - self.world_scope(|world| { - update_old_parents(world, parent, children); - }); - if let Some(mut children_component) = self.get_mut::() { - children_component - .0 - .retain(|value| !children.contains(value)); - children_component.0.insert_from_slice(index, children); - } else { - self.insert(Children::from_entities(children)); - } - self - } - - fn remove_children(&mut self, children: &[Entity]) -> &mut Self { - let parent = self.id(); - self.world_scope(|world| { - remove_children(parent, children, world); - }); - self - } - - fn set_parent(&mut self, parent: Entity) -> &mut Self { - let child = self.id(); - self.world_scope(|world| { - world.entity_mut(parent).add_child(child); - }); - self - } - - fn remove_parent(&mut self) -> &mut Self { - let child = self.id(); - if let Some(parent) = self.take::().map(|p| p.get()) { - self.world_scope(|world| { - remove_from_children(world, parent, child); - push_events(world, [HierarchyEvent::ChildRemoved { child, parent }]); - }); - } - self - } - - fn clear_children(&mut self) -> &mut Self { - let parent = self.id(); - self.world_scope(|world| { - clear_children(parent, world); - }); - self - } - - fn replace_children(&mut self, children: &[Entity]) -> &mut Self { - self.clear_children().add_children(children) - } -} - -#[cfg(test)] -mod tests { - use super::{BuildChildren, ChildBuild}; - use crate::{ - components::{Children, Parent}, - HierarchyEvent::{self, ChildAdded, ChildMoved, ChildRemoved}, - }; - use smallvec::{smallvec, SmallVec}; - - use bevy_ecs::{ - component::Component, - entity::Entity, - event::Events, - system::Commands, - world::{CommandQueue, World}, - }; - - /// Assert the (non)existence and state of the child's [`Parent`] component. - fn assert_parent(world: &World, child: Entity, parent: Option) { - assert_eq!(world.get::(child).map(Parent::get), parent); - } - - /// Assert the (non)existence and state of the parent's [`Children`] component. - fn assert_children(world: &World, parent: Entity, children: Option<&[Entity]>) { - assert_eq!(world.get::(parent).map(|c| &**c), children); - } - - /// Assert the number of children in the parent's [`Children`] component if it exists. - fn assert_num_children(world: &World, parent: Entity, num_children: usize) { - assert_eq!( - world.get::(parent).map(|c| c.len()).unwrap_or(0), - num_children - ); - } - - /// Used to omit a number of events that are not relevant to a particular test. - fn omit_events(world: &mut World, number: usize) { - let mut events_resource = world.resource_mut::>(); - let mut events: Vec<_> = events_resource.drain().collect(); - events_resource.extend(events.drain(number..)); - } - - fn assert_events(world: &mut World, expected_events: &[HierarchyEvent]) { - let events: Vec<_> = world - .resource_mut::>() - .drain() - .collect(); - assert_eq!(events, expected_events); - } - - #[test] - fn add_child() { - let world = &mut World::new(); - world.insert_resource(Events::::default()); - - let [a, b, c, d] = core::array::from_fn(|_| world.spawn_empty().id()); - - world.entity_mut(a).add_child(b); - - assert_parent(world, b, Some(a)); - assert_children(world, a, Some(&[b])); - assert_events( - world, - &[ChildAdded { - child: b, - parent: a, - }], - ); - - world.entity_mut(a).add_child(c); - - assert_children(world, a, Some(&[b, c])); - assert_parent(world, c, Some(a)); - assert_events( - world, - &[ChildAdded { - child: c, - parent: a, - }], - ); - // Children component should be removed when it's empty. - world.entity_mut(d).add_child(b).add_child(c); - assert_children(world, a, None); - } - - #[test] - fn set_parent() { - let world = &mut World::new(); - world.insert_resource(Events::::default()); - - let [a, b, c] = core::array::from_fn(|_| world.spawn_empty().id()); - - world.entity_mut(a).set_parent(b); - - assert_parent(world, a, Some(b)); - assert_children(world, b, Some(&[a])); - assert_events( - world, - &[ChildAdded { - child: a, - parent: b, - }], - ); - - world.entity_mut(a).set_parent(c); - - assert_parent(world, a, Some(c)); - assert_children(world, b, None); - assert_children(world, c, Some(&[a])); - assert_events( - world, - &[ChildMoved { - child: a, - previous_parent: b, - new_parent: c, - }], - ); - } - - // regression test for https://github.com/bevyengine/bevy/pull/8346 - #[test] - fn set_parent_of_orphan() { - let world = &mut World::new(); - - let [a, b, c] = core::array::from_fn(|_| world.spawn_empty().id()); - world.entity_mut(a).set_parent(b); - assert_parent(world, a, Some(b)); - assert_children(world, b, Some(&[a])); - - world.entity_mut(b).despawn(); - world.entity_mut(a).set_parent(c); - - assert_parent(world, a, Some(c)); - assert_children(world, c, Some(&[a])); - } - - #[test] - fn remove_parent() { - let world = &mut World::new(); - world.insert_resource(Events::::default()); - - let [a, b, c] = core::array::from_fn(|_| world.spawn_empty().id()); - - world.entity_mut(a).add_children(&[b, c]); - world.entity_mut(b).remove_parent(); - - assert_parent(world, b, None); - assert_parent(world, c, Some(a)); - assert_children(world, a, Some(&[c])); - omit_events(world, 2); // Omit ChildAdded events. - assert_events( - world, - &[ChildRemoved { - child: b, - parent: a, - }], - ); - - world.entity_mut(c).remove_parent(); - assert_parent(world, c, None); - assert_children(world, a, None); - assert_events( - world, - &[ChildRemoved { - child: c, - parent: a, - }], - ); - } - - #[allow(dead_code)] - #[derive(Component)] - struct C(u32); - - #[test] - fn build_children() { - let mut world = World::default(); - let mut queue = CommandQueue::default(); - let mut commands = Commands::new(&mut queue, &world); - - let parent = commands.spawn(C(1)).id(); - let mut children = Vec::new(); - commands.entity(parent).with_children(|parent| { - children.extend([ - parent.spawn(C(2)).id(), - parent.spawn(C(3)).id(), - parent.spawn(C(4)).id(), - ]); - }); - - queue.apply(&mut world); - assert_eq!( - world.get::(parent).unwrap().0.as_slice(), - children.as_slice(), - ); - assert_eq!(*world.get::(children[0]).unwrap(), Parent(parent)); - assert_eq!(*world.get::(children[1]).unwrap(), Parent(parent)); - - assert_eq!(*world.get::(children[0]).unwrap(), Parent(parent)); - assert_eq!(*world.get::(children[1]).unwrap(), Parent(parent)); - } - - #[test] - fn build_child() { - let mut world = World::default(); - let mut queue = CommandQueue::default(); - let mut commands = Commands::new(&mut queue, &world); - - let parent = commands.spawn(C(1)).id(); - commands.entity(parent).with_child(C(2)); - - queue.apply(&mut world); - assert_eq!(world.get::(parent).unwrap().0.len(), 1); - } - - #[test] - fn push_and_insert_and_remove_children_commands() { - let mut world = World::default(); - let entities = world - .spawn_batch(vec![C(1), C(2), C(3), C(4), C(5)]) - .collect::>(); - - let mut queue = CommandQueue::default(); - { - let mut commands = Commands::new(&mut queue, &world); - commands.entity(entities[0]).add_children(&entities[1..3]); - } - queue.apply(&mut world); - - let parent = entities[0]; - let child1 = entities[1]; - let child2 = entities[2]; - let child3 = entities[3]; - let child4 = entities[4]; - - let expected_children: SmallVec<[Entity; 8]> = smallvec![child1, child2]; - assert_eq!( - world.get::(parent).unwrap().0.clone(), - expected_children - ); - assert_eq!(*world.get::(child1).unwrap(), Parent(parent)); - assert_eq!(*world.get::(child2).unwrap(), Parent(parent)); - - assert_eq!(*world.get::(child1).unwrap(), Parent(parent)); - assert_eq!(*world.get::(child2).unwrap(), Parent(parent)); - - { - let mut commands = Commands::new(&mut queue, &world); - commands.entity(parent).insert_children(1, &entities[3..]); - } - queue.apply(&mut world); - - let expected_children: SmallVec<[Entity; 8]> = smallvec![child1, child3, child4, child2]; - assert_eq!( - world.get::(parent).unwrap().0.clone(), - expected_children - ); - assert_eq!(*world.get::(child3).unwrap(), Parent(parent)); - assert_eq!(*world.get::(child4).unwrap(), Parent(parent)); - - let remove_children = [child1, child4]; - { - let mut commands = Commands::new(&mut queue, &world); - commands.entity(parent).remove_children(&remove_children); - } - queue.apply(&mut world); - - let expected_children: SmallVec<[Entity; 8]> = smallvec![child3, child2]; - assert_eq!( - world.get::(parent).unwrap().0.clone(), - expected_children - ); - assert!(world.get::(child1).is_none()); - assert!(world.get::(child4).is_none()); - } - - #[test] - fn push_and_clear_children_commands() { - let mut world = World::default(); - let entities = world - .spawn_batch(vec![C(1), C(2), C(3), C(4), C(5)]) - .collect::>(); - - let mut queue = CommandQueue::default(); - { - let mut commands = Commands::new(&mut queue, &world); - commands.entity(entities[0]).add_children(&entities[1..3]); - } - queue.apply(&mut world); - - let parent = entities[0]; - let child1 = entities[1]; - let child2 = entities[2]; - - let expected_children: SmallVec<[Entity; 8]> = smallvec![child1, child2]; - assert_eq!( - world.get::(parent).unwrap().0.clone(), - expected_children - ); - assert_eq!(*world.get::(child1).unwrap(), Parent(parent)); - assert_eq!(*world.get::(child2).unwrap(), Parent(parent)); - - { - let mut commands = Commands::new(&mut queue, &world); - commands.entity(parent).clear_children(); - } - queue.apply(&mut world); - - assert!(world.get::(parent).is_none()); - - assert!(world.get::(child1).is_none()); - assert!(world.get::(child2).is_none()); - } - - #[test] - fn push_and_replace_children_commands() { - let mut world = World::default(); - let entities = world - .spawn_batch(vec![C(1), C(2), C(3), C(4), C(5)]) - .collect::>(); - - let mut queue = CommandQueue::default(); - { - let mut commands = Commands::new(&mut queue, &world); - commands.entity(entities[0]).add_children(&entities[1..3]); - } - queue.apply(&mut world); - - let parent = entities[0]; - let child1 = entities[1]; - let child2 = entities[2]; - let child4 = entities[4]; - - let expected_children: SmallVec<[Entity; 8]> = smallvec![child1, child2]; - assert_eq!( - world.get::(parent).unwrap().0.clone(), - expected_children - ); - assert_eq!(*world.get::(child1).unwrap(), Parent(parent)); - assert_eq!(*world.get::(child2).unwrap(), Parent(parent)); - - let replace_children = [child1, child4]; - { - let mut commands = Commands::new(&mut queue, &world); - commands.entity(parent).replace_children(&replace_children); - } - queue.apply(&mut world); - - let expected_children: SmallVec<[Entity; 8]> = smallvec![child1, child4]; - assert_eq!( - world.get::(parent).unwrap().0.clone(), - expected_children - ); - assert_eq!(*world.get::(child1).unwrap(), Parent(parent)); - assert_eq!(*world.get::(child4).unwrap(), Parent(parent)); - assert!(world.get::(child2).is_none()); - } - - #[test] - fn push_and_insert_and_remove_children_world() { - let mut world = World::default(); - let entities = world - .spawn_batch(vec![C(1), C(2), C(3), C(4), C(5)]) - .collect::>(); - - world.entity_mut(entities[0]).add_children(&entities[1..3]); - - let parent = entities[0]; - let child1 = entities[1]; - let child2 = entities[2]; - let child3 = entities[3]; - let child4 = entities[4]; - - let expected_children: SmallVec<[Entity; 8]> = smallvec![child1, child2]; - assert_eq!( - world.get::(parent).unwrap().0.clone(), - expected_children - ); - assert_eq!(*world.get::(child1).unwrap(), Parent(parent)); - assert_eq!(*world.get::(child2).unwrap(), Parent(parent)); - - world.entity_mut(parent).insert_children(1, &entities[3..]); - let expected_children: SmallVec<[Entity; 8]> = smallvec![child1, child3, child4, child2]; - assert_eq!( - world.get::(parent).unwrap().0.clone(), - expected_children - ); - assert_eq!(*world.get::(child3).unwrap(), Parent(parent)); - assert_eq!(*world.get::(child4).unwrap(), Parent(parent)); - - let remove_children = [child1, child4]; - world.entity_mut(parent).remove_children(&remove_children); - let expected_children: SmallVec<[Entity; 8]> = smallvec![child3, child2]; - assert_eq!( - world.get::(parent).unwrap().0.clone(), - expected_children - ); - assert!(world.get::(child1).is_none()); - assert!(world.get::(child4).is_none()); - } - - #[test] - fn push_and_insert_and_clear_children_world() { - let mut world = World::default(); - let entities = world - .spawn_batch(vec![C(1), C(2), C(3)]) - .collect::>(); - - world.entity_mut(entities[0]).add_children(&entities[1..3]); - - let parent = entities[0]; - let child1 = entities[1]; - let child2 = entities[2]; - - let expected_children: SmallVec<[Entity; 8]> = smallvec![child1, child2]; - assert_eq!( - world.get::(parent).unwrap().0.clone(), - expected_children - ); - assert_eq!(*world.get::(child1).unwrap(), Parent(parent)); - assert_eq!(*world.get::(child2).unwrap(), Parent(parent)); - - world.entity_mut(parent).clear_children(); - assert!(world.get::(parent).is_none()); - assert!(world.get::(child1).is_none()); - assert!(world.get::(child2).is_none()); - } - - #[test] - fn push_and_replace_children_world() { - let mut world = World::default(); - let entities = world - .spawn_batch(vec![C(1), C(2), C(3), C(4), C(5)]) - .collect::>(); - - world.entity_mut(entities[0]).add_children(&entities[1..3]); - - let parent = entities[0]; - let child1 = entities[1]; - let child2 = entities[2]; - let child3 = entities[3]; - let child4 = entities[4]; - - let expected_children: SmallVec<[Entity; 8]> = smallvec![child1, child2]; - assert_eq!( - world.get::(parent).unwrap().0.clone(), - expected_children - ); - assert_eq!(*world.get::(child1).unwrap(), Parent(parent)); - assert_eq!(*world.get::(child2).unwrap(), Parent(parent)); - - world.entity_mut(parent).replace_children(&entities[2..]); - let expected_children: SmallVec<[Entity; 8]> = smallvec![child2, child3, child4]; - assert_eq!( - world.get::(parent).unwrap().0.clone(), - expected_children - ); - assert!(world.get::(child1).is_none()); - assert_eq!(*world.get::(child2).unwrap(), Parent(parent)); - assert_eq!(*world.get::(child3).unwrap(), Parent(parent)); - assert_eq!(*world.get::(child4).unwrap(), Parent(parent)); - } - - /// Tests what happens when all children are removed from a parent using world functions - #[test] - fn children_removed_when_empty_world() { - let mut world = World::default(); - let entities = world - .spawn_batch(vec![C(1), C(2), C(3)]) - .collect::>(); - - let parent1 = entities[0]; - let parent2 = entities[1]; - let child = entities[2]; - - // add child into parent1 - world.entity_mut(parent1).add_children(&[child]); - assert_eq!( - world.get::(parent1).unwrap().0.as_slice(), - &[child] - ); - - // move only child from parent1 with `add_children` - world.entity_mut(parent2).add_children(&[child]); - assert!(world.get::(parent1).is_none()); - - // move only child from parent2 with `insert_children` - world.entity_mut(parent1).insert_children(0, &[child]); - assert!(world.get::(parent2).is_none()); - - // remove only child from parent1 with `remove_children` - world.entity_mut(parent1).remove_children(&[child]); - assert!(world.get::(parent1).is_none()); - } - - /// Tests what happens when all children are removed form a parent using commands - #[test] - fn children_removed_when_empty_commands() { - let mut world = World::default(); - let entities = world - .spawn_batch(vec![C(1), C(2), C(3)]) - .collect::>(); - - let parent1 = entities[0]; - let parent2 = entities[1]; - let child = entities[2]; - - let mut queue = CommandQueue::default(); - - // add child into parent1 - { - let mut commands = Commands::new(&mut queue, &world); - commands.entity(parent1).add_children(&[child]); - queue.apply(&mut world); - } - assert_eq!( - world.get::(parent1).unwrap().0.as_slice(), - &[child] - ); - - // move only child from parent1 with `add_children` - { - let mut commands = Commands::new(&mut queue, &world); - commands.entity(parent2).add_children(&[child]); - queue.apply(&mut world); - } - assert!(world.get::(parent1).is_none()); - - // move only child from parent2 with `insert_children` - { - let mut commands = Commands::new(&mut queue, &world); - commands.entity(parent1).insert_children(0, &[child]); - queue.apply(&mut world); - } - assert!(world.get::(parent2).is_none()); - - // move only child from parent1 with `add_child` - { - let mut commands = Commands::new(&mut queue, &world); - commands.entity(parent2).add_child(child); - queue.apply(&mut world); - } - assert!(world.get::(parent1).is_none()); - - // remove only child from parent2 with `remove_children` - { - let mut commands = Commands::new(&mut queue, &world); - commands.entity(parent2).remove_children(&[child]); - queue.apply(&mut world); - } - assert!(world.get::(parent2).is_none()); - } - - #[test] - fn regression_add_children_same_archetype() { - let mut world = World::new(); - let child = world.spawn_empty().id(); - world.spawn_empty().add_children(&[child]); - } - - #[test] - fn add_children_idempotent() { - let mut world = World::new(); - let child = world.spawn_empty().id(); - let parent = world - .spawn_empty() - .add_children(&[child]) - .add_children(&[child]) - .id(); - - let mut query = world.query::<&Children>(); - let children = query.get(&world, parent).unwrap(); - assert_eq!(**children, [child]); - } - - #[test] - fn add_children_does_not_insert_empty_children() { - let mut world = World::new(); - let parent = world.spawn_empty().add_children(&[]).id(); - - let mut query = world.query::<&Children>(); - let children = query.get(&world, parent); - assert!(children.is_err()); - } - - #[test] - fn with_child() { - let world = &mut World::new(); - world.insert_resource(Events::::default()); - - let a = world.spawn_empty().id(); - let b = (); - let c = (); - let d = (); - - world.entity_mut(a).with_child(b); - - assert_num_children(world, a, 1); - - world.entity_mut(a).with_child(c).with_child(d); - - assert_num_children(world, a, 3); - } -} diff --git a/crates/bevy_hierarchy/src/components/children.rs b/crates/bevy_hierarchy/src/components/children.rs deleted file mode 100644 index 4780d31eb2e67..0000000000000 --- a/crates/bevy_hierarchy/src/components/children.rs +++ /dev/null @@ -1,177 +0,0 @@ -#[cfg(feature = "reflect")] -use bevy_ecs::reflect::{ - ReflectComponent, ReflectFromWorld, ReflectMapEntities, ReflectVisitEntities, - ReflectVisitEntitiesMut, -}; -use bevy_ecs::{ - component::{Component, ComponentCloneHandler, Mutable, StorageType}, - entity::{Entity, VisitEntitiesMut}, - prelude::FromWorld, - world::World, -}; -use core::{ops::Deref, slice}; -use smallvec::SmallVec; - -/// Contains references to the child entities of this entity. -/// -/// Each child must contain a [`Parent`] component that points back to this entity. -/// This component rarely needs to be created manually, -/// consider using higher level utilities like [`BuildChildren::with_children`] -/// which are safer and easier to use. -/// -/// See [`HierarchyQueryExt`] for hierarchy related methods on [`Query`]. -/// -/// [`HierarchyQueryExt`]: crate::query_extension::HierarchyQueryExt -/// [`Query`]: bevy_ecs::system::Query -/// [`Parent`]: crate::components::parent::Parent -/// [`BuildChildren::with_children`]: crate::child_builder::BuildChildren::with_children -#[derive(Debug, VisitEntitiesMut)] -#[cfg_attr(feature = "reflect", derive(bevy_reflect::Reflect))] -#[cfg_attr( - feature = "reflect", - reflect( - Component, - MapEntities, - VisitEntities, - VisitEntitiesMut, - Debug, - FromWorld - ) -)] -pub struct Children(pub(crate) SmallVec<[Entity; 8]>); - -impl Component for Children { - const STORAGE_TYPE: StorageType = StorageType::Table; - type Mutability = Mutable; - - fn get_component_clone_handler() -> ComponentCloneHandler { - ComponentCloneHandler::ignore() - } -} - -// TODO: We need to impl either FromWorld or Default so Children can be registered as Reflect. -// This is because Reflect deserialize by creating an instance and apply a patch on top. -// However Children should only ever be set with a real user-defined entities. Its worth looking -// into better ways to handle cases like this. -impl FromWorld for Children { - #[inline] - fn from_world(_world: &mut World) -> Self { - Children(SmallVec::new()) - } -} - -impl Children { - /// Constructs a [`Children`] component with the given entities. - #[inline] - pub(crate) fn from_entities(entities: &[Entity]) -> Self { - Self(SmallVec::from_slice(entities)) - } - - /// Swaps the child at `a_index` with the child at `b_index`. - #[inline] - pub fn swap(&mut self, a_index: usize, b_index: usize) { - self.0.swap(a_index, b_index); - } - - /// Sorts children [stably](https://en.wikipedia.org/wiki/Sorting_algorithm#Stability) - /// in place using the provided comparator function. - /// - /// For the underlying implementation, see [`slice::sort_by`]. - /// - /// For the unstable version, see [`sort_unstable_by`](Children::sort_unstable_by). - /// - /// See also [`sort_by_key`](Children::sort_by_key), [`sort_by_cached_key`](Children::sort_by_cached_key). - #[inline] - pub fn sort_by(&mut self, compare: F) - where - F: FnMut(&Entity, &Entity) -> core::cmp::Ordering, - { - self.0.sort_by(compare); - } - - /// Sorts children [stably](https://en.wikipedia.org/wiki/Sorting_algorithm#Stability) - /// in place using the provided key extraction function. - /// - /// For the underlying implementation, see [`slice::sort_by_key`]. - /// - /// For the unstable version, see [`sort_unstable_by_key`](Children::sort_unstable_by_key). - /// - /// See also [`sort_by`](Children::sort_by), [`sort_by_cached_key`](Children::sort_by_cached_key). - #[inline] - pub fn sort_by_key(&mut self, compare: F) - where - F: FnMut(&Entity) -> K, - K: Ord, - { - self.0.sort_by_key(compare); - } - - /// Sorts children [stably](https://en.wikipedia.org/wiki/Sorting_algorithm#Stability) - /// in place using the provided key extraction function. Only evaluates each key at most - /// once per sort, caching the intermediate results in memory. - /// - /// For the underlying implementation, see [`slice::sort_by_cached_key`]. - /// - /// See also [`sort_by`](Children::sort_by), [`sort_by_key`](Children::sort_by_key). - #[inline] - pub fn sort_by_cached_key(&mut self, compare: F) - where - F: FnMut(&Entity) -> K, - K: Ord, - { - self.0.sort_by_cached_key(compare); - } - - /// Sorts children [unstably](https://en.wikipedia.org/wiki/Sorting_algorithm#Stability) - /// in place using the provided comparator function. - /// - /// For the underlying implementation, see [`slice::sort_unstable_by`]. - /// - /// For the stable version, see [`sort_by`](Children::sort_by). - /// - /// See also [`sort_unstable_by_key`](Children::sort_unstable_by_key). - #[inline] - pub fn sort_unstable_by(&mut self, compare: F) - where - F: FnMut(&Entity, &Entity) -> core::cmp::Ordering, - { - self.0.sort_unstable_by(compare); - } - - /// Sorts children [unstably](https://en.wikipedia.org/wiki/Sorting_algorithm#Stability) - /// in place using the provided key extraction function. - /// - /// For the underlying implementation, see [`slice::sort_unstable_by_key`]. - /// - /// For the stable version, see [`sort_by_key`](Children::sort_by_key). - /// - /// See also [`sort_unstable_by`](Children::sort_unstable_by). - #[inline] - pub fn sort_unstable_by_key(&mut self, compare: F) - where - F: FnMut(&Entity) -> K, - K: Ord, - { - self.0.sort_unstable_by_key(compare); - } -} - -impl Deref for Children { - type Target = [Entity]; - - #[inline(always)] - fn deref(&self) -> &Self::Target { - &self.0[..] - } -} - -impl<'a> IntoIterator for &'a Children { - type Item = ::Item; - - type IntoIter = slice::Iter<'a, Entity>; - - #[inline(always)] - fn into_iter(self) -> Self::IntoIter { - self.0.iter() - } -} diff --git a/crates/bevy_hierarchy/src/components/mod.rs b/crates/bevy_hierarchy/src/components/mod.rs deleted file mode 100644 index 3c8b544850382..0000000000000 --- a/crates/bevy_hierarchy/src/components/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -mod children; -mod parent; - -pub use children::Children; -pub use parent::Parent; diff --git a/crates/bevy_hierarchy/src/components/parent.rs b/crates/bevy_hierarchy/src/components/parent.rs deleted file mode 100644 index 4fc97aa914a24..0000000000000 --- a/crates/bevy_hierarchy/src/components/parent.rs +++ /dev/null @@ -1,100 +0,0 @@ -#[cfg(feature = "reflect")] -use bevy_ecs::reflect::{ - ReflectComponent, ReflectFromWorld, ReflectMapEntities, ReflectVisitEntities, - ReflectVisitEntitiesMut, -}; -use bevy_ecs::{ - component::{Component, ComponentCloneHandler, Mutable, StorageType}, - entity::{Entity, VisitEntities, VisitEntitiesMut}, - traversal::Traversal, - world::{FromWorld, World}, -}; -use core::ops::Deref; - -/// Holds a reference to the parent entity of this entity. -/// This component should only be present on entities that actually have a parent entity. -/// -/// Parent entity must have this entity stored in its [`Children`] component. -/// It is hard to set up parent/child relationships manually, -/// consider using higher level utilities like [`BuildChildren::with_children`]. -/// -/// See [`HierarchyQueryExt`] for hierarchy related methods on [`Query`]. -/// -/// [`HierarchyQueryExt`]: crate::query_extension::HierarchyQueryExt -/// [`Query`]: bevy_ecs::system::Query -/// [`Children`]: super::children::Children -/// [`BuildChildren::with_children`]: crate::child_builder::BuildChildren::with_children -#[derive(Debug, Eq, PartialEq, VisitEntities, VisitEntitiesMut)] -#[cfg_attr(feature = "reflect", derive(bevy_reflect::Reflect))] -#[cfg_attr( - feature = "reflect", - reflect( - Component, - MapEntities, - VisitEntities, - VisitEntitiesMut, - PartialEq, - Debug, - FromWorld - ) -)] -pub struct Parent(pub(crate) Entity); - -impl Component for Parent { - const STORAGE_TYPE: StorageType = StorageType::Table; - type Mutability = Mutable; - - fn get_component_clone_handler() -> ComponentCloneHandler { - ComponentCloneHandler::ignore() - } -} - -impl Parent { - /// Gets the [`Entity`] ID of the parent. - #[inline(always)] - pub fn get(&self) -> Entity { - self.0 - } - - /// Gets the parent [`Entity`] as a slice of length 1. - /// - /// Useful for making APIs that require a type or homogeneous storage - /// for both [`Children`] & [`Parent`] that is agnostic to edge direction. - /// - /// [`Children`]: super::children::Children - #[inline(always)] - pub fn as_slice(&self) -> &[Entity] { - core::slice::from_ref(&self.0) - } -} - -// TODO: We need to impl either FromWorld or Default so Parent can be registered as Reflect. -// This is because Reflect deserialize by creating an instance and apply a patch on top. -// However Parent should only ever be set with a real user-defined entity. Its worth looking into -// better ways to handle cases like this. -impl FromWorld for Parent { - #[inline(always)] - fn from_world(_world: &mut World) -> Self { - Parent(Entity::PLACEHOLDER) - } -} - -impl Deref for Parent { - type Target = Entity; - - #[inline(always)] - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -/// This provides generalized hierarchy traversal for use in [event propagation]. -/// -/// `Parent::traverse` will never form loops in properly-constructed hierarchies. -/// -/// [event propagation]: bevy_ecs::observer::Trigger::propagate -impl Traversal for &Parent { - fn traverse(item: Self::Item<'_>, _data: &D) -> Option { - Some(item.0) - } -} diff --git a/crates/bevy_hierarchy/src/events.rs b/crates/bevy_hierarchy/src/events.rs deleted file mode 100644 index 5a667fef7789b..0000000000000 --- a/crates/bevy_hierarchy/src/events.rs +++ /dev/null @@ -1,34 +0,0 @@ -use bevy_ecs::{event::Event, prelude::Entity}; -#[cfg(feature = "reflect")] -use bevy_reflect::Reflect; - -/// An [`Event`] that is fired whenever there is a change in the world's hierarchy. -/// -/// [`Event`]: bevy_ecs::event::Event -#[derive(Event, Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "reflect", derive(Reflect), reflect(Debug, PartialEq))] -pub enum HierarchyEvent { - /// Fired whenever an [`Entity`] is added as a child to a parent. - ChildAdded { - /// The child that was added - child: Entity, - /// The parent the child was added to - parent: Entity, - }, - /// Fired whenever a child [`Entity`] is removed from its parent. - ChildRemoved { - /// The child that was removed - child: Entity, - /// The parent the child was removed from - parent: Entity, - }, - /// Fired whenever a child [`Entity`] is moved to a new parent. - ChildMoved { - /// The child that was moved - child: Entity, - /// The parent the child was removed from - previous_parent: Entity, - /// The parent the child was added to - new_parent: Entity, - }, -} diff --git a/crates/bevy_hierarchy/src/hierarchy.rs b/crates/bevy_hierarchy/src/hierarchy.rs deleted file mode 100644 index 37dcb83061f93..0000000000000 --- a/crates/bevy_hierarchy/src/hierarchy.rs +++ /dev/null @@ -1,495 +0,0 @@ -use crate::{ - components::{Children, Parent}, - BuildChildren, -}; -use bevy_ecs::{ - component::ComponentCloneHandler, - entity::{ComponentCloneCtx, Entity, EntityCloneBuilder}, - system::EntityCommands, - world::{Command, DeferredWorld, EntityWorldMut, World}, -}; -use bevy_utils::tracing::debug; - -/// Despawns the given entity and all its children recursively -#[derive(Debug)] -pub struct DespawnRecursive { - /// Target entity - pub entity: Entity, - /// Whether or not this command should output a warning if the entity does not exist - pub warn: bool, -} - -/// Despawns the given entity's children recursively -#[derive(Debug)] -pub struct DespawnChildrenRecursive { - /// Target entity - pub entity: Entity, - /// Whether or not this command should output a warning if the entity does not exist - pub warn: bool, -} - -/// Function for despawning an entity and all its children -pub fn despawn_with_children_recursive(world: &mut World, entity: Entity, warn: bool) { - // first, make the entity's own parent forget about it - if let Some(parent) = world.get::(entity).map(|parent| parent.0) { - if let Some(mut children) = world.get_mut::(parent) { - children.0.retain(|c| *c != entity); - } - } - - // then despawn the entity and all of its children - despawn_with_children_recursive_inner(world, entity, warn); -} - -// Should only be called by `despawn_with_children_recursive` and `try_despawn_with_children_recursive`! -fn despawn_with_children_recursive_inner(world: &mut World, entity: Entity, warn: bool) { - if let Some(mut children) = world.get_mut::(entity) { - for e in core::mem::take(&mut children.0) { - despawn_with_children_recursive_inner(world, e, warn); - } - } - - if warn { - if !world.despawn(entity) { - debug!("Failed to despawn entity {:?}", entity); - } - } else if !world.try_despawn(entity) { - debug!("Failed to despawn entity {:?}", entity); - } -} - -fn despawn_children_recursive(world: &mut World, entity: Entity, warn: bool) { - if let Some(children) = world.entity_mut(entity).take::() { - for e in children.0 { - despawn_with_children_recursive_inner(world, e, warn); - } - } -} - -impl Command for DespawnRecursive { - fn apply(self, world: &mut World) { - #[cfg(feature = "trace")] - let _span = bevy_utils::tracing::info_span!( - "command", - name = "DespawnRecursive", - entity = bevy_utils::tracing::field::debug(self.entity), - warn = bevy_utils::tracing::field::debug(self.warn) - ) - .entered(); - despawn_with_children_recursive(world, self.entity, self.warn); - } -} - -impl Command for DespawnChildrenRecursive { - fn apply(self, world: &mut World) { - #[cfg(feature = "trace")] - let _span = bevy_utils::tracing::info_span!( - "command", - name = "DespawnChildrenRecursive", - entity = bevy_utils::tracing::field::debug(self.entity), - warn = bevy_utils::tracing::field::debug(self.warn) - ) - .entered(); - - despawn_children_recursive(world, self.entity, self.warn); - } -} - -/// Trait that holds functions for despawning recursively down the transform hierarchy -pub trait DespawnRecursiveExt { - /// Despawns the provided entity alongside all descendants. - fn despawn_recursive(self); - - /// Despawns all descendants of the given entity. - fn despawn_descendants(&mut self) -> &mut Self; - - /// Similar to [`Self::despawn_recursive`] but does not emit warnings - fn try_despawn_recursive(self); - - /// Similar to [`Self::despawn_descendants`] but does not emit warnings - fn try_despawn_descendants(&mut self) -> &mut Self; -} - -impl DespawnRecursiveExt for EntityCommands<'_> { - /// Despawns the provided entity and its children. - /// This will emit warnings for any entity that does not exist. - fn despawn_recursive(mut self) { - let entity = self.id(); - self.commands() - .queue(DespawnRecursive { entity, warn: true }); - } - - fn despawn_descendants(&mut self) -> &mut Self { - let entity = self.id(); - self.commands() - .queue(DespawnChildrenRecursive { entity, warn: true }); - self - } - - /// Despawns the provided entity and its children. - /// This will never emit warnings. - fn try_despawn_recursive(mut self) { - let entity = self.id(); - self.commands().queue(DespawnRecursive { - entity, - warn: false, - }); - } - - fn try_despawn_descendants(&mut self) -> &mut Self { - let entity = self.id(); - self.commands().queue(DespawnChildrenRecursive { - entity, - warn: false, - }); - self - } -} - -fn despawn_recursive_inner(world: EntityWorldMut, warn: bool) { - let entity = world.id(); - - #[cfg(feature = "trace")] - let _span = bevy_utils::tracing::info_span!( - "despawn_recursive", - entity = bevy_utils::tracing::field::debug(entity), - warn = bevy_utils::tracing::field::debug(warn) - ) - .entered(); - - despawn_with_children_recursive(world.into_world_mut(), entity, warn); -} - -fn despawn_descendants_inner<'v, 'w>( - world: &'v mut EntityWorldMut<'w>, - warn: bool, -) -> &'v mut EntityWorldMut<'w> { - let entity = world.id(); - - #[cfg(feature = "trace")] - let _span = bevy_utils::tracing::info_span!( - "despawn_descendants", - entity = bevy_utils::tracing::field::debug(entity), - warn = bevy_utils::tracing::field::debug(warn) - ) - .entered(); - - world.world_scope(|world| { - despawn_children_recursive(world, entity, warn); - }); - world -} - -impl<'w> DespawnRecursiveExt for EntityWorldMut<'w> { - /// Despawns the provided entity and its children. - /// This will emit warnings for any entity that does not exist. - fn despawn_recursive(self) { - despawn_recursive_inner(self, true); - } - - fn despawn_descendants(&mut self) -> &mut Self { - despawn_descendants_inner(self, true) - } - - /// Despawns the provided entity and its children. - /// This will not emit warnings. - fn try_despawn_recursive(self) { - despawn_recursive_inner(self, false); - } - - fn try_despawn_descendants(&mut self) -> &mut Self { - despawn_descendants_inner(self, false) - } -} - -/// Trait that holds functions for cloning entities recursively down the hierarchy -pub trait CloneEntityHierarchyExt { - /// Sets the option to recursively clone entities. - /// When set to true all children will be cloned with the same options as the parent. - fn recursive(&mut self, recursive: bool) -> &mut Self; - /// Sets the option to add cloned entity as a child to the parent entity. - fn as_child(&mut self, as_child: bool) -> &mut Self; -} - -impl CloneEntityHierarchyExt for EntityCloneBuilder<'_> { - fn recursive(&mut self, recursive: bool) -> &mut Self { - if recursive { - self.override_component_clone_handler::( - ComponentCloneHandler::custom_handler(component_clone_children), - ) - } else { - self.remove_component_clone_handler_override::() - } - } - fn as_child(&mut self, as_child: bool) -> &mut Self { - if as_child { - self.override_component_clone_handler::(ComponentCloneHandler::custom_handler( - component_clone_parent, - )) - } else { - self.remove_component_clone_handler_override::() - } - } -} - -/// Clone handler for the [`Children`] component. Allows to clone the entity recursively. -fn component_clone_children(world: &mut DeferredWorld, ctx: &mut ComponentCloneCtx) { - let children = ctx - .read_source_component::() - .expect("Source entity must have Children component") - .iter(); - let parent = ctx.target(); - for child in children { - let child_clone = world.commands().spawn_empty().id(); - let mut clone_entity = ctx - .entity_cloner() - .with_source_and_target(*child, child_clone); - world.commands().queue(move |world: &mut World| { - clone_entity.clone_entity(world); - world.entity_mut(child_clone).set_parent(parent); - }); - } -} - -/// Clone handler for the [`Parent`] component. Allows to add clone as a child to the parent entity. -fn component_clone_parent(world: &mut DeferredWorld, ctx: &mut ComponentCloneCtx) { - let parent = ctx - .read_source_component::() - .map(|p| p.0) - .expect("Source entity must have Parent component"); - world.commands().entity(ctx.target()).set_parent(parent); -} - -#[cfg(test)] -mod tests { - use bevy_ecs::{ - component::Component, - system::Commands, - world::{CommandQueue, World}, - }; - - use super::DespawnRecursiveExt; - use crate::{ - child_builder::{BuildChildren, ChildBuild}, - components::Children, - CloneEntityHierarchyExt, - }; - - #[derive(Component, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Debug)] - struct Idx(u32); - - #[derive(Component, Clone, PartialEq, Eq, Ord, PartialOrd, Debug)] - struct N(String); - - #[test] - fn despawn_recursive() { - let mut world = World::default(); - let mut queue = CommandQueue::default(); - let grandparent_entity; - { - let mut commands = Commands::new(&mut queue, &world); - - commands - .spawn((N("Another parent".to_owned()), Idx(0))) - .with_children(|parent| { - parent.spawn((N("Another child".to_owned()), Idx(1))); - }); - - // Create a grandparent entity which will _not_ be deleted - grandparent_entity = commands.spawn((N("Grandparent".to_owned()), Idx(2))).id(); - commands.entity(grandparent_entity).with_children(|parent| { - // Add a child to the grandparent (the "parent"), which will get deleted - parent - .spawn((N("Parent, to be deleted".to_owned()), Idx(3))) - // All descendants of the "parent" should also be deleted. - .with_children(|parent| { - parent - .spawn((N("First Child, to be deleted".to_owned()), Idx(4))) - .with_children(|parent| { - // child - parent.spawn(( - N("First grand child, to be deleted".to_owned()), - Idx(5), - )); - }); - parent.spawn((N("Second child, to be deleted".to_owned()), Idx(6))); - }); - }); - - commands.spawn((N("An innocent bystander".to_owned()), Idx(7))); - } - queue.apply(&mut world); - - let parent_entity = world.get::(grandparent_entity).unwrap()[0]; - - { - let mut commands = Commands::new(&mut queue, &world); - commands.entity(parent_entity).despawn_recursive(); - // despawning the same entity twice should not panic - commands.entity(parent_entity).despawn_recursive(); - } - queue.apply(&mut world); - - let mut results = world - .query::<(&N, &Idx)>() - .iter(&world) - .map(|(a, b)| (a.clone(), *b)) - .collect::>(); - results.sort_unstable_by_key(|(_, index)| *index); - - { - let children = world.get::(grandparent_entity).unwrap(); - assert!( - !children.iter().any(|&i| i == parent_entity), - "grandparent should no longer know about its child which has been removed" - ); - } - - assert_eq!( - results, - vec![ - (N("Another parent".to_owned()), Idx(0)), - (N("Another child".to_owned()), Idx(1)), - (N("Grandparent".to_owned()), Idx(2)), - (N("An innocent bystander".to_owned()), Idx(7)) - ] - ); - } - - #[test] - fn despawn_descendants() { - let mut world = World::default(); - let mut queue = CommandQueue::default(); - let mut commands = Commands::new(&mut queue, &world); - - let parent = commands.spawn_empty().id(); - let child = commands.spawn_empty().id(); - - commands - .entity(parent) - .add_child(child) - .despawn_descendants(); - - queue.apply(&mut world); - - // The parent's Children component should be removed. - assert!(world.entity(parent).get::().is_none()); - // The child should be despawned. - assert!(world.get_entity(child).is_err()); - } - - #[test] - fn spawn_children_after_despawn_descendants() { - let mut world = World::default(); - let mut queue = CommandQueue::default(); - let mut commands = Commands::new(&mut queue, &world); - - let parent = commands.spawn_empty().id(); - let child = commands.spawn_empty().id(); - - commands - .entity(parent) - .add_child(child) - .despawn_descendants() - .with_children(|parent| { - parent.spawn_empty(); - parent.spawn_empty(); - }); - - queue.apply(&mut world); - - // The parent's Children component should still have two children. - let children = world.entity(parent).get::(); - assert!(children.is_some()); - assert_eq!(children.unwrap().len(), 2_usize); - // The original child should be despawned. - assert!(world.get_entity(child).is_err()); - } - - #[test] - fn clone_entity_recursive() { - #[derive(Component, PartialEq, Eq, Clone)] - struct Component1 { - field: usize, - } - - let parent_component = Component1 { field: 10 }; - let child1_component = Component1 { field: 20 }; - let child1_1_component = Component1 { field: 30 }; - let child2_component = Component1 { field: 21 }; - let child2_1_component = Component1 { field: 31 }; - - let mut world = World::default(); - - let mut queue = CommandQueue::default(); - let e_clone = { - let mut commands = Commands::new(&mut queue, &world); - let e = commands - .spawn(parent_component.clone()) - .with_children(|children| { - children - .spawn(child1_component.clone()) - .with_children(|children| { - children.spawn(child1_1_component.clone()); - }); - children - .spawn(child2_component.clone()) - .with_children(|children| { - children.spawn(child2_1_component.clone()); - }); - }) - .id(); - let e_clone = commands - .entity(e) - .clone_and_spawn_with(|builder| { - builder.recursive(true); - }) - .id(); - e_clone - }; - queue.apply(&mut world); - - assert!(world - .get::(e_clone) - .is_some_and(|c| *c == parent_component)); - - let children = world.get::(e_clone).unwrap(); - for (child, (component1, component2)) in children.iter().zip([ - (child1_component, child1_1_component), - (child2_component, child2_1_component), - ]) { - assert!(world - .get::(*child) - .is_some_and(|c| *c == component1)); - for child2 in world.get::(*child).unwrap().iter() { - assert!(world - .get::(*child2) - .is_some_and(|c| *c == component2)); - } - } - } - - #[test] - fn clone_entity_as_child() { - let mut world = World::default(); - let mut queue = CommandQueue::default(); - let mut commands = Commands::new(&mut queue, &world); - - let child = commands.spawn_empty().id(); - let parent = commands.spawn_empty().add_child(child).id(); - - let child_clone = commands - .entity(child) - .clone_and_spawn_with(|builder| { - builder.as_child(true); - }) - .id(); - - queue.apply(&mut world); - - assert!(world - .entity(parent) - .get::() - .is_some_and(|c| c.contains(&child_clone))); - } -} diff --git a/crates/bevy_hierarchy/src/lib.rs b/crates/bevy_hierarchy/src/lib.rs deleted file mode 100644 index ced37bd154f64..0000000000000 --- a/crates/bevy_hierarchy/src/lib.rs +++ /dev/null @@ -1,105 +0,0 @@ -#![cfg_attr(docsrs, feature(doc_auto_cfg))] -#![forbid(unsafe_code)] -#![doc( - html_logo_url = "https://bevyengine.org/assets/icon.png", - html_favicon_url = "https://bevyengine.org/assets/icon.png" -)] - -//! Parent-child relationships for Bevy entities. -//! -//! You should use the tools in this crate -//! whenever you want to organize your entities in a hierarchical fashion, -//! to make groups of entities more manageable, -//! or to propagate properties throughout the entity hierarchy. -//! -//! This crate introduces various tools, including a [plugin] -//! for managing parent-child relationships between entities. -//! It provides two components, [`Parent`] and [`Children`], -//! to store references to related entities. -//! It also provides [command and world] API extensions -//! to set and clear those relationships. -//! -//! More advanced users may also appreciate -//! [query extension methods] to traverse hierarchies, -//! and [events] to notify hierarchical changes. -//! There is also a [diagnostic plugin] to validate property propagation. -//! -//! # Hierarchy management -//! -//! The methods defined in this crate fully manage -//! the components responsible for defining the entity hierarchy. -//! Mutating these components manually may result in hierarchy invalidation. -//! -//! Hierarchical relationships are always managed symmetrically. -//! For example, assigning a child to an entity -//! will always set the parent in the other, -//! and vice versa. -//! Similarly, unassigning a child in the parent -//! will always unassign the parent in the child. -//! -//! ## Despawning entities -//! -//! The commands and methods provided by `bevy_ecs` to despawn entities -//! are not capable of automatically despawning hierarchies of entities. -//! In most cases, these operations will invalidate the hierarchy. -//! Instead, you should use the provided [hierarchical despawn extension methods]. -//! -//! [command and world]: BuildChildren -//! [diagnostic plugin]: ValidParentCheckPlugin -//! [events]: HierarchyEvent -//! [hierarchical despawn extension methods]: DespawnRecursiveExt -//! [plugin]: HierarchyPlugin -//! [query extension methods]: HierarchyQueryExt - -extern crate alloc; - -mod components; -pub use components::*; - -mod hierarchy; -pub use hierarchy::*; - -mod child_builder; -pub use child_builder::*; - -mod events; -pub use events::*; - -mod valid_parent_check_plugin; -pub use valid_parent_check_plugin::*; - -mod query_extension; -pub use query_extension::*; - -/// The hierarchy prelude. -/// -/// This includes the most common types in this crate, re-exported for your convenience. -pub mod prelude { - #[doc(hidden)] - pub use crate::{child_builder::*, components::*, hierarchy::*, query_extension::*}; - - #[doc(hidden)] - #[cfg(feature = "bevy_app")] - pub use crate::{HierarchyPlugin, ValidParentCheckPlugin}; -} - -#[cfg(feature = "bevy_app")] -use bevy_app::prelude::*; - -/// Provides hierarchy functionality to a Bevy app. -/// -/// Check the [crate-level documentation] for all the features. -/// -/// [crate-level documentation]: crate -#[cfg(feature = "bevy_app")] -#[derive(Default)] -pub struct HierarchyPlugin; - -#[cfg(feature = "bevy_app")] -impl Plugin for HierarchyPlugin { - fn build(&self, app: &mut App) { - app.register_type::() - .register_type::() - .add_event::(); - } -} diff --git a/crates/bevy_hierarchy/src/query_extension.rs b/crates/bevy_hierarchy/src/query_extension.rs deleted file mode 100644 index 6396ddcfb756f..0000000000000 --- a/crates/bevy_hierarchy/src/query_extension.rs +++ /dev/null @@ -1,434 +0,0 @@ -use alloc::collections::VecDeque; - -use bevy_ecs::{ - entity::Entity, - query::{QueryData, QueryFilter, WorldQuery}, - system::Query, -}; -use smallvec::SmallVec; - -use crate::{Children, Parent}; - -/// An extension trait for [`Query`] that adds hierarchy related methods. -pub trait HierarchyQueryExt<'w, 's, D: QueryData, F: QueryFilter> { - /// Returns the parent [`Entity`] of the given `entity`, if any. - fn parent(&'w self, entity: Entity) -> Option - where - D::ReadOnly: WorldQuery = &'w Parent>; - - /// Returns a slice over the [`Children`] of the given `entity`. - /// - /// This may be empty if the `entity` has no children. - fn children(&'w self, entity: Entity) -> &'w [Entity] - where - D::ReadOnly: WorldQuery = &'w Children>; - - /// Returns the topmost ancestor of the given `entity`. - /// - /// This may be the entity itself if it has no parent. - fn root_ancestor(&'w self, entity: Entity) -> Entity - where - D::ReadOnly: WorldQuery = &'w Parent>; - - /// Returns an [`Iterator`] of [`Entity`]s over the leaves of the hierarchy that are underneath this `entity`. - /// - /// Only entities which have no children are considered leaves. - /// This will not include the entity itself, and will not include any entities which are not descendants of the entity, - /// even if they are leaves in the same hierarchical tree. - /// - /// Traverses the hierarchy depth-first. - fn iter_leaves(&'w self, entity: Entity) -> impl Iterator + 'w - where - D::ReadOnly: WorldQuery = &'w Children>; - - /// Returns an [`Iterator`] of [`Entity`]s over the `entity`s immediate siblings, who share the same parent. - /// - /// The entity itself is not included in the iterator. - fn iter_siblings(&'w self, entity: Entity) -> impl Iterator - where - D::ReadOnly: WorldQuery = (Option<&'w Parent>, Option<&'w Children>)>; - - /// Returns an [`Iterator`] of [`Entity`]s over all of `entity`s descendants. - /// - /// Can only be called on a [`Query`] of [`Children`] (i.e. `Query<&Children>`). - /// - /// Traverses the hierarchy breadth-first and does not include the entity itself. - /// - /// # Examples - /// ``` - /// # use bevy_ecs::prelude::*; - /// # use bevy_hierarchy::prelude::*; - /// # #[derive(Component)] - /// # struct Marker; - /// fn system(entity: Single>, children_query: Query<&Children>) { - /// for descendant in children_query.iter_descendants(*entity) { - /// // Do something! - /// } - /// } - /// # bevy_ecs::system::assert_is_system(system); - /// ``` - fn iter_descendants(&'w self, entity: Entity) -> DescendantIter<'w, 's, D, F> - where - D::ReadOnly: WorldQuery = &'w Children>; - - /// Returns an [`Iterator`] of [`Entity`]s over all of `entity`s descendants. - /// - /// Can only be called on a [`Query`] of [`Children`] (i.e. `Query<&Children>`). - /// - /// This is a depth-first alternative to [`HierarchyQueryExt::iter_descendants`]. - fn iter_descendants_depth_first( - &'w self, - entity: Entity, - ) -> DescendantDepthFirstIter<'w, 's, D, F> - where - D::ReadOnly: WorldQuery = &'w Children>; - - /// Returns an [`Iterator`] of [`Entity`]s over all of `entity`s ancestors. - /// - /// Does not include the entity itself. - /// Can only be called on a [`Query`] of [`Parent`] (i.e. `Query<&Parent>`). - /// - /// # Examples - /// ``` - /// # use bevy_ecs::prelude::*; - /// # use bevy_hierarchy::prelude::*; - /// # #[derive(Component)] - /// # struct Marker; - /// fn system(entity: Single>, parent_query: Query<&Parent>) { - /// for ancestor in parent_query.iter_ancestors(*entity) { - /// // Do something! - /// } - /// } - /// # bevy_ecs::system::assert_is_system(system); - /// ``` - fn iter_ancestors(&'w self, entity: Entity) -> AncestorIter<'w, 's, D, F> - where - D::ReadOnly: WorldQuery = &'w Parent>; -} - -impl<'w, 's, D: QueryData, F: QueryFilter> HierarchyQueryExt<'w, 's, D, F> for Query<'w, 's, D, F> { - fn parent(&'w self, entity: Entity) -> Option - where - ::ReadOnly: WorldQuery = &'w Parent>, - { - self.get(entity).map(Parent::get).ok() - } - - fn children(&'w self, entity: Entity) -> &'w [Entity] - where - ::ReadOnly: WorldQuery = &'w Children>, - { - self.get(entity) - .map_or(&[] as &[Entity], |children| children) - } - - fn root_ancestor(&'w self, entity: Entity) -> Entity - where - ::ReadOnly: WorldQuery = &'w Parent>, - { - // Recursively search up the tree until we're out of parents - match self.get(entity) { - Ok(parent) => self.root_ancestor(parent.get()), - Err(_) => entity, - } - } - - fn iter_leaves(&'w self, entity: Entity) -> impl Iterator - where - ::ReadOnly: WorldQuery = &'w Children>, - { - self.iter_descendants_depth_first(entity).filter(|entity| { - self.get(*entity) - // These are leaf nodes if they have the `Children` component but it's empty - .map(|children| children.is_empty()) - // Or if they don't have the `Children` component at all - .unwrap_or(true) - }) - } - - fn iter_siblings(&'w self, entity: Entity) -> impl Iterator - where - D::ReadOnly: WorldQuery = (Option<&'w Parent>, Option<&'w Children>)>, - { - self.get(entity) - .ok() - .and_then(|(maybe_parent, _)| maybe_parent.map(Parent::get)) - .and_then(|parent| self.get(parent).ok()) - .and_then(|(_, maybe_children)| maybe_children) - .into_iter() - .flat_map(move |children| children.iter().filter(move |child| **child != entity)) - .copied() - } - - fn iter_descendants(&'w self, entity: Entity) -> DescendantIter<'w, 's, D, F> - where - D::ReadOnly: WorldQuery = &'w Children>, - { - DescendantIter::new(self, entity) - } - - fn iter_descendants_depth_first( - &'w self, - entity: Entity, - ) -> DescendantDepthFirstIter<'w, 's, D, F> - where - D::ReadOnly: WorldQuery = &'w Children>, - { - DescendantDepthFirstIter::new(self, entity) - } - - fn iter_ancestors(&'w self, entity: Entity) -> AncestorIter<'w, 's, D, F> - where - D::ReadOnly: WorldQuery = &'w Parent>, - { - AncestorIter::new(self, entity) - } -} - -/// An [`Iterator`] of [`Entity`]s over the descendants of an [`Entity`]. -/// -/// Traverses the hierarchy breadth-first. -pub struct DescendantIter<'w, 's, D: QueryData, F: QueryFilter> -where - D::ReadOnly: WorldQuery = &'w Children>, -{ - children_query: &'w Query<'w, 's, D, F>, - vecdeque: VecDeque, -} - -impl<'w, 's, D: QueryData, F: QueryFilter> DescendantIter<'w, 's, D, F> -where - D::ReadOnly: WorldQuery = &'w Children>, -{ - /// Returns a new [`DescendantIter`]. - pub fn new(children_query: &'w Query<'w, 's, D, F>, entity: Entity) -> Self { - DescendantIter { - children_query, - vecdeque: children_query - .get(entity) - .into_iter() - .flatten() - .copied() - .collect(), - } - } -} - -impl<'w, 's, D: QueryData, F: QueryFilter> Iterator for DescendantIter<'w, 's, D, F> -where - D::ReadOnly: WorldQuery = &'w Children>, -{ - type Item = Entity; - - fn next(&mut self) -> Option { - let entity = self.vecdeque.pop_front()?; - - if let Ok(children) = self.children_query.get(entity) { - self.vecdeque.extend(children); - } - - Some(entity) - } -} - -/// An [`Iterator`] of [`Entity`]s over the descendants of an [`Entity`]. -/// -/// Traverses the hierarchy depth-first. -pub struct DescendantDepthFirstIter<'w, 's, D: QueryData, F: QueryFilter> -where - D::ReadOnly: WorldQuery = &'w Children>, -{ - children_query: &'w Query<'w, 's, D, F>, - stack: SmallVec<[Entity; 8]>, -} - -impl<'w, 's, D: QueryData, F: QueryFilter> DescendantDepthFirstIter<'w, 's, D, F> -where - D::ReadOnly: WorldQuery = &'w Children>, -{ - /// Returns a new [`DescendantDepthFirstIter`]. - pub fn new(children_query: &'w Query<'w, 's, D, F>, entity: Entity) -> Self { - DescendantDepthFirstIter { - children_query, - stack: children_query - .get(entity) - .map_or(SmallVec::new(), |children| { - children.iter().rev().copied().collect() - }), - } - } -} - -impl<'w, 's, D: QueryData, F: QueryFilter> Iterator for DescendantDepthFirstIter<'w, 's, D, F> -where - D::ReadOnly: WorldQuery = &'w Children>, -{ - type Item = Entity; - - fn next(&mut self) -> Option { - let entity = self.stack.pop()?; - - if let Ok(children) = self.children_query.get(entity) { - self.stack.extend(children.iter().rev().copied()); - } - - Some(entity) - } -} - -/// An [`Iterator`] of [`Entity`]s over the ancestors of an [`Entity`]. -pub struct AncestorIter<'w, 's, D: QueryData, F: QueryFilter> -where - D::ReadOnly: WorldQuery = &'w Parent>, -{ - parent_query: &'w Query<'w, 's, D, F>, - next: Option, -} - -impl<'w, 's, D: QueryData, F: QueryFilter> AncestorIter<'w, 's, D, F> -where - D::ReadOnly: WorldQuery = &'w Parent>, -{ - /// Returns a new [`AncestorIter`]. - pub fn new(parent_query: &'w Query<'w, 's, D, F>, entity: Entity) -> Self { - AncestorIter { - parent_query, - next: Some(entity), - } - } -} - -impl<'w, 's, D: QueryData, F: QueryFilter> Iterator for AncestorIter<'w, 's, D, F> -where - D::ReadOnly: WorldQuery = &'w Parent>, -{ - type Item = Entity; - - fn next(&mut self) -> Option { - self.next = self.parent_query.get(self.next?).ok().map(Parent::get); - self.next - } -} - -#[cfg(test)] -mod tests { - use bevy_ecs::{ - prelude::Component, - system::{Query, SystemState}, - world::World, - }; - - use crate::{query_extension::HierarchyQueryExt, BuildChildren, Children, Parent}; - - #[derive(Component, PartialEq, Debug)] - struct A(usize); - - #[test] - fn descendant_iter() { - let world = &mut World::new(); - - let [a0, a1, a2, a3] = core::array::from_fn(|i| world.spawn(A(i)).id()); - - world.entity_mut(a0).add_children(&[a1, a2]); - world.entity_mut(a1).add_children(&[a3]); - - let mut system_state = SystemState::<(Query<&Children>, Query<&A>)>::new(world); - let (children_query, a_query) = system_state.get(world); - - let result: Vec<_> = a_query - .iter_many(children_query.iter_descendants(a0)) - .collect(); - - assert_eq!([&A(1), &A(2), &A(3)], result.as_slice()); - } - - #[test] - fn descendant_depth_first_iter() { - let world = &mut World::new(); - - let [a0, a1, a2, a3] = core::array::from_fn(|i| world.spawn(A(i)).id()); - - world.entity_mut(a0).add_children(&[a1, a2]); - world.entity_mut(a1).add_children(&[a3]); - - let mut system_state = SystemState::<(Query<&Children>, Query<&A>)>::new(world); - let (children_query, a_query) = system_state.get(world); - - let result: Vec<_> = a_query - .iter_many(children_query.iter_descendants_depth_first(a0)) - .collect(); - - assert_eq!([&A(1), &A(3), &A(2)], result.as_slice()); - } - - #[test] - fn ancestor_iter() { - let world = &mut World::new(); - - let [a0, a1, a2] = core::array::from_fn(|i| world.spawn(A(i)).id()); - - world.entity_mut(a0).add_children(&[a1]); - world.entity_mut(a1).add_children(&[a2]); - - let mut system_state = SystemState::<(Query<&Parent>, Query<&A>)>::new(world); - let (parent_query, a_query) = system_state.get(world); - - let result: Vec<_> = a_query.iter_many(parent_query.iter_ancestors(a2)).collect(); - - assert_eq!([&A(1), &A(0)], result.as_slice()); - } - - #[test] - fn root_ancestor() { - let world = &mut World::new(); - - let [a0, a1, a2] = core::array::from_fn(|i| world.spawn(A(i)).id()); - - world.entity_mut(a0).add_children(&[a1]); - world.entity_mut(a1).add_children(&[a2]); - - let mut system_state = SystemState::>::new(world); - let parent_query = system_state.get(world); - - assert_eq!(a0, parent_query.root_ancestor(a2)); - assert_eq!(a0, parent_query.root_ancestor(a1)); - assert_eq!(a0, parent_query.root_ancestor(a0)); - } - - #[test] - fn leaf_iter() { - let world = &mut World::new(); - - let [a0, a1, a2, a3] = core::array::from_fn(|i| world.spawn(A(i)).id()); - - world.entity_mut(a0).add_children(&[a1, a2]); - world.entity_mut(a1).add_children(&[a3]); - - let mut system_state = SystemState::<(Query<&Children>, Query<&A>)>::new(world); - let (children_query, a_query) = system_state.get(world); - - let result: Vec<_> = a_query.iter_many(children_query.iter_leaves(a0)).collect(); - - assert_eq!([&A(3), &A(2)], result.as_slice()); - } - - #[test] - fn siblings() { - let world = &mut World::new(); - - let [a0, a1, a2, a3, a4] = core::array::from_fn(|i| world.spawn(A(i)).id()); - - world.entity_mut(a0).add_children(&[a1, a2, a3]); - world.entity_mut(a2).add_children(&[a4]); - - let mut system_state = - SystemState::<(Query<(Option<&Parent>, Option<&Children>)>, Query<&A>)>::new(world); - let (hierarchy_query, a_query) = system_state.get(world); - - let result: Vec<_> = a_query - .iter_many(hierarchy_query.iter_siblings(a1)) - .collect(); - - assert_eq!([&A(2), &A(3)], result.as_slice()); - } -} diff --git a/crates/bevy_hierarchy/src/valid_parent_check_plugin.rs b/crates/bevy_hierarchy/src/valid_parent_check_plugin.rs deleted file mode 100644 index a05ec586cfdfd..0000000000000 --- a/crates/bevy_hierarchy/src/valid_parent_check_plugin.rs +++ /dev/null @@ -1,104 +0,0 @@ -use core::marker::PhantomData; - -use bevy_ecs::prelude::*; - -#[cfg(feature = "bevy_app")] -use {crate::Parent, bevy_utils::HashSet, disqualified::ShortName}; - -/// When enabled, runs [`check_hierarchy_component_has_valid_parent`]. -/// -/// This resource is added by [`ValidParentCheckPlugin`]. -/// It is enabled on debug builds and disabled in release builds by default, -/// you can update this resource at runtime to change the default behavior. -#[derive(Resource)] -pub struct ReportHierarchyIssue { - /// Whether to run [`check_hierarchy_component_has_valid_parent`]. - pub enabled: bool, - _comp: PhantomData, -} - -impl ReportHierarchyIssue { - /// Constructs a new object - pub fn new(enabled: bool) -> Self { - ReportHierarchyIssue { - enabled, - _comp: Default::default(), - } - } -} - -impl PartialEq for ReportHierarchyIssue { - fn eq(&self, other: &Self) -> bool { - self.enabled == other.enabled - } -} - -impl Default for ReportHierarchyIssue { - fn default() -> Self { - Self { - enabled: cfg!(debug_assertions), - _comp: PhantomData, - } - } -} - -#[cfg(feature = "bevy_app")] -/// System to print a warning for each [`Entity`] with a `T` component -/// which parent hasn't a `T` component. -/// -/// Hierarchy propagations are top-down, and limited only to entities -/// with a specific component (such as `InheritedVisibility` and `GlobalTransform`). -/// This means that entities with one of those component -/// and a parent without the same component is probably a programming error. -/// (See B0004 explanation linked in warning message) -pub fn check_hierarchy_component_has_valid_parent( - parent_query: Query< - (Entity, &Parent, Option<&Name>), - (With, Or<(Changed, Added)>), - >, - component_query: Query<(), With>, - mut already_diagnosed: Local>, -) { - for (entity, parent, name) in &parent_query { - let parent = parent.get(); - if !component_query.contains(parent) && !already_diagnosed.contains(&entity) { - already_diagnosed.insert(entity); - bevy_utils::tracing::warn!( - "warning[B0004]: {name} with the {ty_name} component has a parent without {ty_name}.\n\ - This will cause inconsistent behaviors! See: https://bevyengine.org/learn/errors/b0004", - ty_name = ShortName::of::(), - name = name.map_or_else(|| format!("Entity {}", entity), |s| format!("The {s} entity")), - ); - } - } -} - -/// Run criteria that only allows running when [`ReportHierarchyIssue`] is enabled. -pub fn on_hierarchy_reports_enabled(report: Res>) -> bool -where - T: Component, -{ - report.enabled -} - -/// Print a warning for each `Entity` with a `T` component -/// whose parent doesn't have a `T` component. -/// -/// See [`check_hierarchy_component_has_valid_parent`] for details. -pub struct ValidParentCheckPlugin(PhantomData T>); -impl Default for ValidParentCheckPlugin { - fn default() -> Self { - Self(PhantomData) - } -} - -#[cfg(feature = "bevy_app")] -impl bevy_app::Plugin for ValidParentCheckPlugin { - fn build(&self, app: &mut bevy_app::App) { - app.init_resource::>().add_systems( - bevy_app::Last, - check_hierarchy_component_has_valid_parent:: - .run_if(resource_equals(ReportHierarchyIssue::::new(true))), - ); - } -} diff --git a/crates/bevy_image/Cargo.toml b/crates/bevy_image/Cargo.toml index df033d64d6700..a90a3abb80e2b 100644 --- a/crates/bevy_image/Cargo.toml +++ b/crates/bevy_image/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_image" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Provides image types for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -9,6 +9,12 @@ license = "MIT OR Apache-2.0" keywords = ["bevy"] [features] +default = ["bevy_reflect"] + +# bevy_reflect can't optional as it's needed for TypePath +# this feature only control reflection in bevy_image +bevy_reflect = ["bevy_math/bevy_reflect"] + # Image formats basis-universal = ["dep:basis-universal"] bmp = ["image/bmp"] @@ -26,22 +32,26 @@ qoi = ["image/qoi"] tga = ["image/tga"] tiff = ["image/tiff"] webp = ["image/webp"] +serialize = ["bevy_reflect", "bevy_platform/serialize", "bevy_utils/serde"] # For ktx2 supercompression zlib = ["flate2"] zstd = ["ruzstd"] [dependencies] -bevy_asset = { path = "../bevy_asset", version = "0.15.0-dev" } -bevy_color = { path = "../bevy_color", version = "0.15.0-dev", features = [ +# bevy +bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } +bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } +bevy_color = { path = "../bevy_color", version = "0.16.0-dev", features = [ "serialize", "wgpu-types", ] } -bevy_math = { path = "../bevy_math", version = "0.15.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev", features = [ - "bevy", +bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "std", ] } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } # rendering image = { version = "0.25.2", default-features = false } @@ -49,19 +59,24 @@ image = { version = "0.25.2", default-features = false } # misc bitflags = { version = "2.3", features = ["serde"] } bytemuck = { version = "1.5" } -wgpu-types = { version = "23", default-features = false } -# TODO: remove dependency on wgpu once https://github.com/gfx-rs/wgpu/pull/6648, 6649 and 6650 have been released -wgpu = { version = "23.0.1", default-features = false } +wgpu-types = { version = "24", default-features = false } serde = { version = "1", features = ["derive"] } thiserror = { version = "2", default-features = false } futures-lite = "2.0.1" +guillotiere = "0.6.0" +rectangle-pack = "0.4" ddsfile = { version = "0.5.2", optional = true } ktx2 = { version = "0.3.0", optional = true } # For ktx2 supercompression flate2 = { version = "1.0.22", optional = true } -ruzstd = { version = "0.7.0", optional = true } +ruzstd = { version = "0.8.0", optional = true } # For transcoding of UASTC/ETC1S universal formats, and for .basis file support basis-universal = { version = "0.3.0", optional = true } +tracing = { version = "0.1", default-features = false, features = ["std"] } +half = { version = "2.4.1" } + +[dev-dependencies] +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } [lints] workspace = true diff --git a/crates/bevy_image/LICENSE-APACHE b/crates/bevy_image/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_image/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_image/LICENSE-MIT b/crates/bevy_image/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_image/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_image/src/basis.rs b/crates/bevy_image/src/basis.rs index 7772f38c0b952..553140b0024bf 100644 --- a/crates/bevy_image/src/basis.rs +++ b/crates/bevy_image/src/basis.rs @@ -116,7 +116,7 @@ pub fn basis_buffer_to_image( ))) } }; - image.data = transcoded; + image.data = Some(transcoded); Ok(image) } diff --git a/crates/bevy_image/src/compressed_image_saver.rs b/crates/bevy_image/src/compressed_image_saver.rs index 220317172626a..c2adc2c029116 100644 --- a/crates/bevy_image/src/compressed_image_saver.rs +++ b/crates/bevy_image/src/compressed_image_saver.rs @@ -11,6 +11,8 @@ pub struct CompressedImageSaver; pub enum CompressedImageSaverError { #[error(transparent)] Io(#[from] std::io::Error), + #[error("Cannot compress an uninitialized image")] + UninitializedImage, } impl AssetSaver for CompressedImageSaver { @@ -42,9 +44,16 @@ impl AssetSaver for CompressedImageSaver { let mut source_image = compressor_params.source_image_mut(0); let size = image.size(); - source_image.init(&image.data, size.x, size.y, 4); + let Some(ref data) = image.data else { + return Err(CompressedImageSaverError::UninitializedImage); + }; + source_image.init(data, size.x, size.y, 4); let mut compressor = basis_universal::Compressor::new(4); + #[expect( + unsafe_code, + reason = "The basis-universal compressor cannot be interacted with except through unsafe functions" + )] // SAFETY: the CompressorParams are "valid" to the best of our knowledge. The basis-universal // library bindings note that invalid params might produce undefined behavior. unsafe { diff --git a/crates/bevy_image/src/dds.rs b/crates/bevy_image/src/dds.rs index f36ec25894c02..8dc58ad4828d4 100644 --- a/crates/bevy_image/src/dds.rs +++ b/crates/bevy_image/src/dds.rs @@ -1,17 +1,17 @@ //! [DirectDraw Surface](https://en.wikipedia.org/wiki/DirectDraw_Surface) functionality. -#[cfg(debug_assertions)] -use bevy_utils::warn_once; use ddsfile::{Caps2, D3DFormat, Dds, DxgiFormat}; use std::io::Cursor; -use wgpu::TextureViewDescriptor; -use wgpu_types::{Extent3d, TextureDimension, TextureFormat, TextureViewDimension}; +use wgpu_types::{ + Extent3d, TextureDimension, TextureFormat, TextureViewDescriptor, TextureViewDimension, +}; +#[cfg(debug_assertions)] +use {bevy_utils::once, tracing::warn}; -use super::{CompressedImageFormats, Image, TextureError}; +use super::{CompressedImageFormats, Image, TextureError, TranscodeFormat}; #[cfg(feature = "dds")] pub fn dds_buffer_to_image( - #[cfg(debug_assertions)] name: String, buffer: &[u8], supported_compressed_formats: CompressedImageFormats, is_srgb: bool, @@ -19,7 +19,18 @@ pub fn dds_buffer_to_image( let mut cursor = Cursor::new(buffer); let dds = Dds::read(&mut cursor) .map_err(|error| TextureError::InvalidData(format!("Failed to parse DDS file: {error}")))?; - let texture_format = dds_format_to_texture_format(&dds, is_srgb)?; + let (texture_format, transcode_format) = match dds_format_to_texture_format(&dds, is_srgb) { + Ok(format) => (format, None), + Err(TextureError::FormatRequiresTranscodingError(TranscodeFormat::Rgb8)) => { + let format = if is_srgb { + TextureFormat::Bgra8UnormSrgb + } else { + TextureFormat::Bgra8Unorm + }; + (format, Some(TranscodeFormat::Rgb8)) + } + Err(error) => return Err(error), + }; if !supported_compressed_formats.supports(texture_format) { return Err(TextureError::UnsupportedTextureFormat(format!( "Format not supported by this GPU: {texture_format:?}", @@ -53,10 +64,7 @@ pub fn dds_buffer_to_image( let mip_map_level = match dds.get_num_mipmap_levels() { 0 => { #[cfg(debug_assertions)] - warn_once!( - "Mipmap levels for texture {} are 0, bumping them to 1", - name - ); + once!(warn!("Mipmap levels for texture are 0, bumping them to 1",)); 1 } t => t, @@ -65,10 +73,14 @@ pub fn dds_buffer_to_image( image.texture_descriptor.format = texture_format; image.texture_descriptor.dimension = if dds.get_depth() > 1 { TextureDimension::D3 - } else if image.is_compressed() || dds.get_height() > 1 { - TextureDimension::D2 - } else { + // 1x1 textures should generally be interpreted as solid 2D + } else if ((dds.get_width() > 1 || dds.get_height() > 1) + && !(dds.get_width() > 1 && dds.get_height() > 1)) + && !image.is_compressed() + { TextureDimension::D1 + } else { + TextureDimension::D2 }; if is_cubemap { let dimension = if image.texture_descriptor.size.depth_or_array_layers > 6 { @@ -81,7 +93,29 @@ pub fn dds_buffer_to_image( ..Default::default() }); } - image.data = dds.data; + + // DDS mipmap layout is directly compatible with wgpu's layout (Slice -> Face -> Mip): + // https://learn.microsoft.com/fr-fr/windows/win32/direct3ddds/dx-graphics-dds-reference + image.data = if let Some(transcode_format) = transcode_format { + match transcode_format { + TranscodeFormat::Rgb8 => { + let data = dds + .data + .chunks_exact(3) + .flat_map(|pixel| [pixel[0], pixel[1], pixel[2], u8::MAX]) + .collect(); + Some(data) + } + _ => { + return Err(TextureError::TranscodeError(format!( + "unsupported transcode from {transcode_format:?} to {texture_format:?}" + ))) + } + } + } else { + Some(dds.data) + }; + Ok(image) } @@ -107,6 +141,9 @@ pub fn dds_format_to_texture_format( TextureFormat::Bgra8Unorm } } + D3DFormat::R8G8B8 => { + return Err(TextureError::FormatRequiresTranscodingError(TranscodeFormat::Rgb8)); + }, D3DFormat::G16R16 => TextureFormat::Rg16Uint, D3DFormat::A2B10G10R10 => TextureFormat::Rgb10a2Unorm, D3DFormat::A8L8 => TextureFormat::Rg8Uint, @@ -148,7 +185,6 @@ pub fn dds_format_to_texture_format( // FIXME: Map to argb format and user has to know to ignore the alpha channel? | D3DFormat::X8B8G8R8 | D3DFormat::A2R10G10B10 - | D3DFormat::R8G8B8 | D3DFormat::X1R5G5B5 | D3DFormat::A4R4G4B4 | D3DFormat::X4R4G4B4 @@ -283,8 +319,7 @@ pub fn dds_format_to_texture_format( #[cfg(test)] mod test { - use wgpu::util::TextureDataOrder; - use wgpu_types::{TextureDescriptor, TextureDimension, TextureFormat}; + use wgpu_types::{TextureDataOrder, TextureDescriptor, TextureDimension, TextureFormat}; use crate::CompressedImageFormats; @@ -370,10 +405,10 @@ mod test { 0x49, 0x92, 0x24, 0x16, 0x95, 0xae, 0x42, 0xfc, 0, 0xaa, 0x55, 0xff, 0xff, 0x49, 0x92, 0x24, 0x49, 0x92, 0x24, 0xd8, 0xad, 0xae, 0x42, 0xaf, 0x0a, 0xaa, 0x55, ]; - let r = dds_buffer_to_image("".into(), &buffer, CompressedImageFormats::BC, true); + let r = dds_buffer_to_image(&buffer, CompressedImageFormats::BC, true); assert!(r.is_ok()); if let Ok(r) = r { - fake_wgpu_create_texture_with_data(&r.texture_descriptor, &r.data); + fake_wgpu_create_texture_with_data(&r.texture_descriptor, r.data.as_ref().unwrap()); } } } diff --git a/crates/bevy_sprite/src/dynamic_texture_atlas_builder.rs b/crates/bevy_image/src/dynamic_texture_atlas_builder.rs similarity index 73% rename from crates/bevy_sprite/src/dynamic_texture_atlas_builder.rs rename to crates/bevy_image/src/dynamic_texture_atlas_builder.rs index 34e3ae87a7c76..e8b812194a6fe 100644 --- a/crates/bevy_sprite/src/dynamic_texture_atlas_builder.rs +++ b/crates/bevy_image/src/dynamic_texture_atlas_builder.rs @@ -1,8 +1,21 @@ -use crate::TextureAtlasLayout; -use bevy_image::{Image, TextureFormatPixelInfo}; +use crate::{Image, TextureAtlasLayout, TextureFormatPixelInfo as _}; +use bevy_asset::RenderAssetUsages; use bevy_math::{URect, UVec2}; -use bevy_render::render_asset::RenderAssetUsages; use guillotiere::{size2, Allocation, AtlasAllocator}; +use thiserror::Error; +use tracing::error; + +#[derive(Debug, Error)] +pub enum DynamicTextureAtlasBuilderError { + #[error("Couldn't allocate space to add the image requested")] + FailedToAllocateSpace, + /// Attempted to add a texture to an uninitialized atlas + #[error("cannot add texture to uninitialized atlas texture")] + UninitializedAtlas, + /// Attempted to add an uninitialized texture to an atlas + #[error("cannot add uninitialized texture to atlas")] + UninitializedSourceTexture, +} /// Helper utility to update [`TextureAtlasLayout`] on the fly. /// @@ -43,7 +56,7 @@ impl DynamicTextureAtlasBuilder { atlas_layout: &mut TextureAtlasLayout, texture: &Image, atlas_texture: &mut Image, - ) -> Option { + ) -> Result { let allocation = self.atlas_allocator.allocate(size2( (texture.width() + self.padding).try_into().unwrap(), (texture.height() + self.padding).try_into().unwrap(), @@ -54,12 +67,12 @@ impl DynamicTextureAtlasBuilder { "The atlas_texture image must have the RenderAssetUsages::MAIN_WORLD usage flag set" ); - self.place_texture(atlas_texture, allocation, texture); + self.place_texture(atlas_texture, allocation, texture)?; let mut rect: URect = to_rect(allocation.rectangle); rect.max = rect.max.saturating_sub(UVec2::splat(self.padding)); - Some(atlas_layout.add_texture(rect)) + Ok(atlas_layout.add_texture(rect)) } else { - None + Err(DynamicTextureAtlasBuilderError::FailedToAllocateSpace) } } @@ -68,7 +81,7 @@ impl DynamicTextureAtlasBuilder { atlas_texture: &mut Image, allocation: Allocation, texture: &Image, - ) { + ) -> Result<(), DynamicTextureAtlasBuilderError> { let mut rect = allocation.rectangle; rect.max.x -= self.padding as i32; rect.max.y -= self.padding as i32; @@ -76,14 +89,20 @@ impl DynamicTextureAtlasBuilder { let rect_width = rect.width() as usize; let format_size = atlas_texture.texture_descriptor.format.pixel_size(); + let Some(ref mut atlas_data) = atlas_texture.data else { + return Err(DynamicTextureAtlasBuilderError::UninitializedAtlas); + }; + let Some(ref data) = texture.data else { + return Err(DynamicTextureAtlasBuilderError::UninitializedSourceTexture); + }; for (texture_y, bound_y) in (rect.min.y..rect.max.y).map(|i| i as usize).enumerate() { let begin = (bound_y * atlas_width + rect.min.x as usize) * format_size; let end = begin + rect_width * format_size; let texture_begin = texture_y * rect_width * format_size; let texture_end = texture_begin + rect_width * format_size; - atlas_texture.data[begin..end] - .copy_from_slice(&texture.data[texture_begin..texture_end]); + atlas_data[begin..end].copy_from_slice(&data[texture_begin..texture_end]); } + Ok(()) } } diff --git a/crates/bevy_image/src/image.rs b/crates/bevy_image/src/image.rs index d9f02379f41fe..41b698b78dcdc 100644 --- a/crates/bevy_image/src/image.rs +++ b/crates/bevy_image/src/image.rs @@ -4,19 +4,22 @@ use super::basis::*; use super::dds::*; #[cfg(feature = "ktx2")] use super::ktx2::*; +#[cfg(not(feature = "bevy_reflect"))] +use bevy_reflect::TypePath; +#[cfg(feature = "bevy_reflect")] +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_asset::{Asset, RenderAssetUsages}; use bevy_color::{Color, ColorToComponents, Gray, LinearRgba, Srgba, Xyza}; use bevy_math::{AspectRatio, UVec2, UVec3, Vec2}; -use bevy_reflect::std_traits::ReflectDefault; -use bevy_reflect::Reflect; use core::hash::Hash; use serde::{Deserialize, Serialize}; use thiserror::Error; -use wgpu::{SamplerDescriptor, TextureViewDescriptor}; +use tracing::warn; use wgpu_types::{ AddressMode, CompareFunction, Extent3d, Features, FilterMode, SamplerBorderColor, - TextureDescriptor, TextureDimension, TextureFormat, TextureUsages, + SamplerDescriptor, TextureDescriptor, TextureDimension, TextureFormat, TextureUsages, + TextureViewDescriptor, }; pub trait BevyDefault { @@ -72,7 +75,7 @@ macro_rules! feature_gate { ($feature: tt, $value: ident) => {{ #[cfg(not(feature = $feature))] { - bevy_utils::tracing::warn!("feature \"{}\" is not enabled", $feature); + tracing::warn!("feature \"{}\" is not enabled", $feature); return None; } #[cfg(feature = $feature)] @@ -117,7 +120,14 @@ impl ImageFormat { #[cfg(feature = "webp")] ImageFormat::WebP => &["webp"], // FIXME: https://github.com/rust-lang/rust/issues/129031 - #[allow(unreachable_patterns)] + #[expect( + clippy::allow_attributes, + reason = "`unreachable_patterns` may not always lint" + )] + #[allow( + unreachable_patterns, + reason = "The wildcard pattern will be unreachable if all formats are enabled; otherwise, it will be reachable" + )] _ => &[], } } @@ -165,13 +175,27 @@ impl ImageFormat { #[cfg(feature = "webp")] ImageFormat::WebP => &["image/webp"], // FIXME: https://github.com/rust-lang/rust/issues/129031 - #[allow(unreachable_patterns)] + #[expect( + clippy::allow_attributes, + reason = "`unreachable_patterns` may not always lint" + )] + #[allow( + unreachable_patterns, + reason = "The wildcard pattern will be unreachable if all formats are enabled; otherwise, it will be reachable" + )] _ => &[], } } pub fn from_mime_type(mime_type: &str) -> Option { - #[allow(unreachable_code)] + #[expect( + clippy::allow_attributes, + reason = "`unreachable_code` may not always lint" + )] + #[allow( + unreachable_code, + reason = "If all features listed below are disabled, then all arms will have a `return None`, keeping the surrounding `Some()` from being constructed." + )] Some(match mime_type.to_ascii_lowercase().as_str() { // note: farbfeld does not have a MIME type "image/basis" | "image/x-basis" => feature_gate!("basis-universal", Basis), @@ -197,7 +221,14 @@ impl ImageFormat { } pub fn from_extension(extension: &str) -> Option { - #[allow(unreachable_code)] + #[expect( + clippy::allow_attributes, + reason = "`unreachable_code` may not always lint" + )] + #[allow( + unreachable_code, + reason = "If all features listed below are disabled, then all arms will have a `return None`, keeping the surrounding `Some()` from being constructed." + )] Some(match extension.to_ascii_lowercase().as_str() { "basis" => feature_gate!("basis-universal", Basis), "bmp" => feature_gate!("bmp", Bmp), @@ -220,7 +251,14 @@ impl ImageFormat { } pub fn as_image_crate_format(&self) -> Option { - #[allow(unreachable_code)] + #[expect( + clippy::allow_attributes, + reason = "`unreachable_code` may not always lint" + )] + #[allow( + unreachable_code, + reason = "If all features listed below are disabled, then all arms will have a `return None`, keeping the surrounding `Some()` from being constructed." + )] Some(match self { #[cfg(feature = "bmp")] ImageFormat::Bmp => image::ImageFormat::Bmp, @@ -255,13 +293,27 @@ impl ImageFormat { #[cfg(feature = "ktx2")] ImageFormat::Ktx2 => return None, // FIXME: https://github.com/rust-lang/rust/issues/129031 - #[allow(unreachable_patterns)] + #[expect( + clippy::allow_attributes, + reason = "`unreachable_patterns` may not always lint" + )] + #[allow( + unreachable_patterns, + reason = "The wildcard pattern will be unreachable if all formats are enabled; otherwise, it will be reachable" + )] _ => return None, }) } pub fn from_image_crate_format(format: image::ImageFormat) -> Option { - #[allow(unreachable_code)] + #[expect( + clippy::allow_attributes, + reason = "`unreachable_code` may not always lint" + )] + #[allow( + unreachable_code, + reason = "If all features listed below are disabled, then all arms will have a `return None`, keeping the surrounding `Some()` from being constructed." + )] Some(match format { image::ImageFormat::Bmp => feature_gate!("bmp", Bmp), image::ImageFormat::Dds => feature_gate!("dds", Dds), @@ -282,16 +334,24 @@ impl ImageFormat { } } -#[derive(Asset, Reflect, Debug, Clone)] -#[reflect(opaque)] -#[reflect(Default, Debug)] +#[derive(Asset, Debug, Clone)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(opaque, Default, Debug, Clone) +)] +#[cfg_attr(not(feature = "bevy_reflect"), derive(TypePath))] pub struct Image { - pub data: Vec, + /// Raw pixel data. + /// If the image is being used as a storage texture which doesn't need to be initialized by the + /// CPU, then this should be `None` + /// Otherwise, it should always be `Some` + pub data: Option>, // TODO: this nesting makes accessing Image metadata verbose. Either flatten out descriptor or add accessors pub texture_descriptor: TextureDescriptor, &'static [TextureFormat]>, /// The [`ImageSampler`] to use during rendering. pub sampler: ImageSampler, - pub texture_view_descriptor: Option>, + pub texture_view_descriptor: Option>>, pub asset_usage: RenderAssetUsages, } @@ -507,7 +567,7 @@ impl ImageSamplerDescriptor { } } - pub fn as_wgpu(&self) -> SamplerDescriptor { + pub fn as_wgpu(&self) -> SamplerDescriptor> { SamplerDescriptor { label: self.label.as_deref(), address_mode_u: self.address_mode_u.into(), @@ -617,8 +677,8 @@ impl From for ImageSamplerBorderColor { } } -impl<'a> From> for ImageSamplerDescriptor { - fn from(value: SamplerDescriptor) -> Self { +impl From>> for ImageSamplerDescriptor { + fn from(value: SamplerDescriptor>) -> Self { ImageSamplerDescriptor { label: value.label.map(ToString::to_string), address_mode_u: value.address_mode_u.into(), @@ -639,28 +699,9 @@ impl<'a> From> for ImageSamplerDescriptor { impl Default for Image { /// default is a 1x1x1 all '1.0' texture fn default() -> Self { - let format = TextureFormat::bevy_default(); - let data = vec![255; format.pixel_size()]; - Image { - data, - texture_descriptor: TextureDescriptor { - size: Extent3d { - width: 1, - height: 1, - depth_or_array_layers: 1, - }, - format, - dimension: TextureDimension::D2, - label: None, - mip_level_count: 1, - sample_count: 1, - usage: TextureUsages::TEXTURE_BINDING | TextureUsages::COPY_DST, - view_formats: &[], - }, - sampler: ImageSampler::Default, - texture_view_descriptor: None, - asset_usage: RenderAssetUsages::default(), - } + let mut image = Image::default_uninit(); + image.data = Some(vec![255; image.texture_descriptor.format.pixel_size()]); + image } } @@ -682,37 +723,24 @@ impl Image { data.len(), "Pixel data, size and format have to match", ); - let mut image = Self { - data, - ..Default::default() - }; - image.texture_descriptor.dimension = dimension; - image.texture_descriptor.size = size; - image.texture_descriptor.format = format; - image.asset_usage = asset_usage; + let mut image = Image::new_uninit(size, dimension, format, asset_usage); + image.data = Some(data); image } - /// A transparent white 1x1x1 image. - /// - /// Contrast to [`Image::default`], which is opaque. - pub fn transparent() -> Image { - // We rely on the default texture format being RGBA8UnormSrgb - // when constructing a transparent color from bytes. - // If this changes, this function will need to be updated. - let format = TextureFormat::bevy_default(); - debug_assert!(format.pixel_size() == 4); - let data = vec![255, 255, 255, 0]; + /// Exactly the same as [`Image::new`], but doesn't initialize the image + pub fn new_uninit( + size: Extent3d, + dimension: TextureDimension, + format: TextureFormat, + asset_usage: RenderAssetUsages, + ) -> Self { Image { - data, + data: None, texture_descriptor: TextureDescriptor { - size: Extent3d { - width: 1, - height: 1, - depth_or_array_layers: 1, - }, + size, format, - dimension: TextureDimension::D2, + dimension, label: None, mip_level_count: 1, sample_count: 1, @@ -721,10 +749,46 @@ impl Image { }, sampler: ImageSampler::Default, texture_view_descriptor: None, - asset_usage: RenderAssetUsages::default(), + asset_usage, } } + /// A transparent white 1x1x1 image. + /// + /// Contrast to [`Image::default`], which is opaque. + pub fn transparent() -> Image { + // We rely on the default texture format being RGBA8UnormSrgb + // when constructing a transparent color from bytes. + // If this changes, this function will need to be updated. + let format = TextureFormat::bevy_default(); + debug_assert!(format.pixel_size() == 4); + let data = vec![255, 255, 255, 0]; + Image::new( + Extent3d { + width: 1, + height: 1, + depth_or_array_layers: 1, + }, + TextureDimension::D2, + data, + format, + RenderAssetUsages::default(), + ) + } + /// Creates a new uninitialized 1x1x1 image + pub fn default_uninit() -> Image { + Image::new_uninit( + Extent3d { + width: 1, + height: 1, + depth_or_array_layers: 1, + }, + TextureDimension::D2, + TextureFormat::bevy_default(), + RenderAssetUsages::default(), + ) + } + /// Creates a new image from raw binary data and the corresponding metadata, by filling /// the image data with the `pixel` data repeated multiple times. /// @@ -737,12 +801,7 @@ impl Image { format: TextureFormat, asset_usage: RenderAssetUsages, ) -> Self { - let mut value = Image::default(); - value.texture_descriptor.format = format; - value.texture_descriptor.dimension = dimension; - value.asset_usage = asset_usage; - value.resize(size); - + let byte_len = format.pixel_size() * size.volume(); debug_assert_eq!( pixel.len() % format.pixel_size(), 0, @@ -750,15 +809,12 @@ impl Image { format.pixel_size(), ); debug_assert!( - pixel.len() <= value.data.len(), + pixel.len() <= byte_len, "Fill data must fit within pixel buffer (expected {}B).", - value.data.len(), + byte_len, ); - - for current_pixel in value.data.chunks_exact_mut(pixel.len()) { - current_pixel.copy_from_slice(pixel); - } - value + let data = pixel.iter().copied().cycle().take(byte_len).collect(); + Image::new(size, dimension, data, format, asset_usage) } /// Returns the width of a 2D image. @@ -797,10 +853,14 @@ impl Image { /// Does not properly resize the contents of the image, but only its internal `data` buffer. pub fn resize(&mut self, size: Extent3d) { self.texture_descriptor.size = size; - self.data.resize( - size.volume() * self.texture_descriptor.format.pixel_size(), - 0, - ); + if let Some(ref mut data) = self.data { + data.resize( + size.volume() * self.texture_descriptor.format.pixel_size(), + 0, + ); + } else { + warn!("Resized an uninitialized image. Directly modify image.texture_descriptor.size instead"); + } } /// Changes the `size`, asserting that the total number of data elements (pixels) remains the @@ -871,10 +931,13 @@ impl Image { /// Load a bytes buffer in a [`Image`], according to type `image_type`, using the `image` /// crate pub fn from_buffer( - #[cfg(all(debug_assertions, feature = "dds"))] name: String, buffer: &[u8], image_type: ImageType, - #[allow(unused_variables)] supported_compressed_formats: CompressedImageFormats, + #[cfg_attr( + not(any(feature = "basis-universal", feature = "dds", feature = "ktx2")), + expect(unused_variables, reason = "only used with certain features") + )] + supported_compressed_formats: CompressedImageFormats, is_srgb: bool, image_sampler: ImageSampler, asset_usage: RenderAssetUsages, @@ -893,18 +956,19 @@ impl Image { basis_buffer_to_image(buffer, supported_compressed_formats, is_srgb)? } #[cfg(feature = "dds")] - ImageFormat::Dds => dds_buffer_to_image( - #[cfg(debug_assertions)] - name, - buffer, - supported_compressed_formats, - is_srgb, - )?, + ImageFormat::Dds => dds_buffer_to_image(buffer, supported_compressed_formats, is_srgb)?, #[cfg(feature = "ktx2")] ImageFormat::Ktx2 => { ktx2_buffer_to_image(buffer, supported_compressed_formats, is_srgb)? } - #[allow(unreachable_patterns)] + #[expect( + clippy::allow_attributes, + reason = "`unreachable_patterns` may not always lint" + )] + #[allow( + unreachable_patterns, + reason = "The wildcard pattern may be unreachable if only the specially-handled formats are enabled; however, the wildcard pattern is needed for any formats not specially handled" + )] _ => { let image_crate_format = format .as_image_crate_format() @@ -938,7 +1002,7 @@ impl Image { /// /// Returns None if the provided coordinates are out of bounds. /// - /// For 2D textures, Z is ignored. For 1D textures, Y and Z are ignored. + /// For 2D textures, Z is the layer number. For 1D textures, Y and Z are ignored. #[inline(always)] pub fn pixel_data_offset(&self, coords: UVec3) -> Option { let width = self.texture_descriptor.size.width; @@ -947,18 +1011,12 @@ impl Image { let pixel_size = self.texture_descriptor.format.pixel_size(); let pixel_offset = match self.texture_descriptor.dimension { - TextureDimension::D3 => { + TextureDimension::D3 | TextureDimension::D2 => { if coords.x >= width || coords.y >= height || coords.z >= depth { return None; } coords.z * height * width + coords.y * width + coords.x } - TextureDimension::D2 => { - if coords.x >= width || coords.y >= height { - return None; - } - coords.y * width + coords.x - } TextureDimension::D1 => { if coords.x >= width { return None; @@ -974,16 +1032,18 @@ impl Image { #[inline(always)] pub fn pixel_bytes(&self, coords: UVec3) -> Option<&[u8]> { let len = self.texture_descriptor.format.pixel_size(); + let data = self.data.as_ref()?; self.pixel_data_offset(coords) - .map(|start| &self.data[start..(start + len)]) + .map(|start| &data[start..(start + len)]) } /// Get a mutable reference to the data bytes where a specific pixel's value is stored #[inline(always)] pub fn pixel_bytes_mut(&mut self, coords: UVec3) -> Option<&mut [u8]> { let len = self.texture_descriptor.format.pixel_size(); - self.pixel_data_offset(coords) - .map(|start| &mut self.data[start..(start + len)]) + let offset = self.pixel_data_offset(coords); + let data = self.data.as_mut()?; + offset.map(|start| &mut data[start..(start + len)]) } /// Read the color of a specific pixel (1D texture). @@ -1005,10 +1065,10 @@ impl Image { /// Supports many of the common [`TextureFormat`]s: /// - RGBA/BGRA 8-bit unsigned integer, both sRGB and Linear /// - 16-bit and 32-bit unsigned integer - /// - 32-bit float + /// - 16-bit and 32-bit float /// /// Be careful: as the data is converted to [`Color`] (which uses `f32` internally), - /// there may be issues with precision when using non-float [`TextureFormat`]s. + /// there may be issues with precision when using non-f32 [`TextureFormat`]s. /// If you read a value you previously wrote using `set_color_at`, it will not match. /// If you are working with a 32-bit integer [`TextureFormat`], the value will be /// inaccurate (as `f32` does not have enough bits to represent it exactly). @@ -1019,7 +1079,6 @@ impl Image { /// Other [`TextureFormat`]s are unsupported, such as: /// - block-compressed formats /// - non-byte-aligned formats like 10-bit - /// - 16-bit float formats /// - signed integer formats #[inline(always)] pub fn get_color_at(&self, x: u32, y: u32) -> Result { @@ -1029,15 +1088,20 @@ impl Image { self.get_color_at_internal(UVec3::new(x, y, 0)) } - /// Read the color of a specific pixel (3D texture). + /// Read the color of a specific pixel (2D texture with layers or 3D texture). /// /// See [`get_color_at`](Self::get_color_at) for more details. #[inline(always)] pub fn get_color_at_3d(&self, x: u32, y: u32, z: u32) -> Result { - if self.texture_descriptor.dimension != TextureDimension::D3 { - return Err(TextureAccessError::WrongDimension); + match ( + self.texture_descriptor.dimension, + self.texture_descriptor.size.depth_or_array_layers, + ) { + (TextureDimension::D3, _) | (TextureDimension::D2, 2..) => { + self.get_color_at_internal(UVec3::new(x, y, z)) + } + _ => Err(TextureAccessError::WrongDimension), } - self.get_color_at_internal(UVec3::new(x, y, z)) } /// Change the color of a specific pixel (1D texture). @@ -1060,9 +1124,9 @@ impl Image { /// Supports many of the common [`TextureFormat`]s: /// - RGBA/BGRA 8-bit unsigned integer, both sRGB and Linear /// - 16-bit and 32-bit unsigned integer (with possibly-limited precision, as [`Color`] uses `f32`) - /// - 32-bit float + /// - 16-bit and 32-bit float /// - /// Be careful: writing to non-float [`TextureFormat`]s is lossy! The data has to be converted, + /// Be careful: writing to non-f32 [`TextureFormat`]s is lossy! The data has to be converted, /// so if you read it back using `get_color_at`, the `Color` you get will not equal the value /// you used when writing it using this function. /// @@ -1071,7 +1135,6 @@ impl Image { /// Other [`TextureFormat`]s are unsupported, such as: /// - block-compressed formats /// - non-byte-aligned formats like 10-bit - /// - 16-bit float formats /// - signed integer formats #[inline(always)] pub fn set_color_at(&mut self, x: u32, y: u32, color: Color) -> Result<(), TextureAccessError> { @@ -1081,7 +1144,7 @@ impl Image { self.set_color_at_internal(UVec3::new(x, y, 0), color) } - /// Change the color of a specific pixel (3D texture). + /// Change the color of a specific pixel (2D texture with layers or 3D texture). /// /// See [`set_color_at`](Self::set_color_at) for more details. #[inline(always)] @@ -1092,10 +1155,15 @@ impl Image { z: u32, color: Color, ) -> Result<(), TextureAccessError> { - if self.texture_descriptor.dimension != TextureDimension::D3 { - return Err(TextureAccessError::WrongDimension); + match ( + self.texture_descriptor.dimension, + self.texture_descriptor.size.depth_or_array_layers, + ) { + (TextureDimension::D3, _) | (TextureDimension::D2, 2..) => { + self.set_color_at_internal(UVec3::new(x, y, z), color) + } + _ => Err(TextureAccessError::WrongDimension), } - self.set_color_at_internal(UVec3::new(x, y, z), color) } #[inline(always)] @@ -1141,6 +1209,12 @@ impl Image { f32::from_le_bytes([bytes[8], bytes[9], bytes[10], bytes[11]]), f32::from_le_bytes([bytes[12], bytes[13], bytes[14], bytes[15]]), )), + TextureFormat::Rgba16Float => Ok(Color::linear_rgba( + half::f16::from_le_bytes([bytes[0], bytes[1]]).to_f32(), + half::f16::from_le_bytes([bytes[2], bytes[3]]).to_f32(), + half::f16::from_le_bytes([bytes[4], bytes[5]]).to_f32(), + half::f16::from_le_bytes([bytes[6], bytes[7]]).to_f32(), + )), TextureFormat::Rgba16Unorm | TextureFormat::Rgba16Uint => { let (r, g, b, a) = ( u16::from_le_bytes([bytes[0], bytes[1]]), @@ -1189,6 +1263,10 @@ impl Image { let x = (x as f64 / u32::MAX as f64) as f32; Ok(Color::linear_rgb(x, x, x)) } + TextureFormat::R16Float => { + let x = half::f16::from_le_bytes([bytes[0], bytes[1]]).to_f32(); + Ok(Color::linear_rgb(x, x, x)) + } TextureFormat::R32Float => { let x = f32::from_le_bytes([bytes[0], bytes[1], bytes[2], bytes[3]]); Ok(Color::linear_rgb(x, x, x)) @@ -1214,6 +1292,11 @@ impl Image { let g = (g as f64 / u32::MAX as f64) as f32; Ok(Color::linear_rgb(r, g, 0.0)) } + TextureFormat::Rg16Float => { + let r = half::f16::from_le_bytes([bytes[0], bytes[1]]).to_f32(); + let g = half::f16::from_le_bytes([bytes[2], bytes[3]]).to_f32(); + Ok(Color::linear_rgb(r, g, 0.0)) + } TextureFormat::Rg32Float => { let r = f32::from_le_bytes([bytes[0], bytes[1], bytes[2], bytes[3]]); let g = f32::from_le_bytes([bytes[4], bytes[5], bytes[6], bytes[7]]); @@ -1272,6 +1355,13 @@ impl Image { bytes[2] = (r * u8::MAX as f32) as u8; bytes[3] = (a * u8::MAX as f32) as u8; } + TextureFormat::Rgba16Float => { + let [r, g, b, a] = LinearRgba::from(color).to_f32_array(); + bytes[0..2].copy_from_slice(&half::f16::to_le_bytes(half::f16::from_f32(r))); + bytes[2..4].copy_from_slice(&half::f16::to_le_bytes(half::f16::from_f32(g))); + bytes[4..6].copy_from_slice(&half::f16::to_le_bytes(half::f16::from_f32(b))); + bytes[6..8].copy_from_slice(&half::f16::to_le_bytes(half::f16::from_f32(a))); + } TextureFormat::Rgba32Float => { let [r, g, b, a] = LinearRgba::from(color).to_f32_array(); bytes[0..4].copy_from_slice(&f32::to_le_bytes(r)); @@ -1329,6 +1419,14 @@ impl Image { let r = (r as f64 * u32::MAX as f64) as u32; bytes[0..4].copy_from_slice(&u32::to_le_bytes(r)); } + TextureFormat::R16Float => { + // Convert to grayscale with minimal loss if color is already gray + let linear = LinearRgba::from(color); + let luminance = Xyza::from(linear).y; + let [r, _, _, _] = LinearRgba::gray(luminance).to_f32_array(); + let x = half::f16::from_f32(r); + bytes[0..2].copy_from_slice(&half::f16::to_le_bytes(x)); + } TextureFormat::R32Float => { // Convert to grayscale with minimal loss if color is already gray let linear = LinearRgba::from(color); @@ -1356,6 +1454,11 @@ impl Image { bytes[0..4].copy_from_slice(&u32::to_le_bytes(r)); bytes[4..8].copy_from_slice(&u32::to_le_bytes(g)); } + TextureFormat::Rg16Float => { + let [r, g, _, _] = LinearRgba::from(color).to_f32_array(); + bytes[0..2].copy_from_slice(&half::f16::to_le_bytes(half::f16::from_f32(r))); + bytes[2..4].copy_from_slice(&half::f16::to_le_bytes(half::f16::from_f32(g))); + } TextureFormat::Rg32Float => { let [r, g, _, _] = LinearRgba::from(color).to_f32_array(); bytes[0..4].copy_from_slice(&f32::to_le_bytes(r)); @@ -1380,19 +1483,20 @@ pub enum DataFormat { Rg, } +/// Texture data need to be transcoded from this format for use with `wgpu`. #[derive(Clone, Copy, Debug)] pub enum TranscodeFormat { Etc1s, Uastc(DataFormat), - // Has to be transcoded to R8Unorm for use with `wgpu` + // Has to be transcoded to R8Unorm for use with `wgpu`. R8UnormSrgb, - // Has to be transcoded to R8G8Unorm for use with `wgpu` + // Has to be transcoded to R8G8Unorm for use with `wgpu`. Rg8UnormSrgb, - // Has to be transcoded to Rgba8 for use with `wgpu` + // Has to be transcoded to Rgba8 for use with `wgpu`. Rgb8, } -/// An error that occurs when accessing specific pixels in a texture +/// An error that occurs when accessing specific pixels in a texture. #[derive(Error, Debug)] pub enum TextureAccessError { #[error("out of bounds (x: {x}, y: {y}, z: {z})")] @@ -1403,25 +1507,34 @@ pub enum TextureAccessError { WrongDimension, } -/// An error that occurs when loading a texture +/// An error that occurs when loading a texture. #[derive(Error, Debug)] pub enum TextureError { + /// Image MIME type is invalid. #[error("invalid image mime type: {0}")] InvalidImageMimeType(String), + /// Image extension is invalid. #[error("invalid image extension: {0}")] InvalidImageExtension(String), + /// Failed to load an image. #[error("failed to load an image: {0}")] ImageError(#[from] image::ImageError), + /// Texture format isn't supported. #[error("unsupported texture format: {0}")] UnsupportedTextureFormat(String), + /// Supercompression isn't supported. #[error("supercompression not supported: {0}")] SuperCompressionNotSupported(String), - #[error("failed to load an image: {0}")] + /// Failed to decompress an image. + #[error("failed to decompress an image: {0}")] SuperDecompressionError(String), + /// Invalid data. #[error("invalid data: {0}")] InvalidData(String), + /// Transcode error. #[error("transcode error: {0}")] TranscodeError(String), + /// Format requires transcoding. #[error("format requires transcoding: {0:?}")] FormatRequiresTranscodingError(TranscodeFormat), /// Only cubemaps with six faces are supported. @@ -1592,4 +1705,25 @@ mod test { Err(TextureAccessError::OutOfBounds { x: 5, y: 10, z: 0 }) )); } + + #[test] + fn get_set_pixel_2d_with_layers() { + let mut image = Image::new_fill( + Extent3d { + width: 5, + height: 10, + depth_or_array_layers: 3, + }, + TextureDimension::D2, + &[0, 0, 0, 255], + TextureFormat::Rgba8Unorm, + RenderAssetUsages::MAIN_WORLD, + ); + image.set_color_at_3d(0, 0, 0, Color::WHITE).unwrap(); + assert!(matches!(image.get_color_at_3d(0, 0, 0), Ok(Color::WHITE))); + image.set_color_at_3d(2, 3, 1, Color::WHITE).unwrap(); + assert!(matches!(image.get_color_at_3d(2, 3, 1), Ok(Color::WHITE))); + image.set_color_at_3d(4, 9, 2, Color::WHITE).unwrap(); + assert!(matches!(image.get_color_at_3d(4, 9, 2), Ok(Color::WHITE))); + } } diff --git a/crates/bevy_image/src/image_loader.rs b/crates/bevy_image/src/image_loader.rs index 949ee78e49d4b..0ef1213b46f17 100644 --- a/crates/bevy_image/src/image_loader.rs +++ b/crates/bevy_image/src/image_loader.rs @@ -81,7 +81,7 @@ impl ImageLoader { } } -#[derive(Serialize, Deserialize, Default, Debug)] +#[derive(Serialize, Deserialize, Default, Debug, Clone)] pub enum ImageFormatSetting { #[default] FromExtension, @@ -89,7 +89,7 @@ pub enum ImageFormatSetting { Guess, } -#[derive(Serialize, Deserialize, Debug)] +#[derive(Serialize, Deserialize, Debug, Clone)] pub struct ImageLoaderSettings { pub format: ImageFormatSetting, pub is_srgb: bool, @@ -150,8 +150,6 @@ impl AssetLoader for ImageLoader { } }; Ok(Image::from_buffer( - #[cfg(all(debug_assertions, feature = "dds"))] - load_context.path().display().to_string(), &bytes, image_type, self.supported_compressed_formats, diff --git a/crates/bevy_image/src/image_texture_conversion.rs b/crates/bevy_image/src/image_texture_conversion.rs index 7956e810cba0b..1eb3b78b1eaff 100644 --- a/crates/bevy_image/src/image_texture_conversion.rs +++ b/crates/bevy_image/src/image_texture_conversion.rs @@ -170,22 +170,26 @@ impl Image { /// /// To convert [`Image`] to a different format see: [`Image::convert`]. pub fn try_into_dynamic(self) -> Result { + let width = self.width(); + let height = self.height(); + let Some(data) = self.data else { + return Err(IntoDynamicImageError::UninitializedImage); + }; match self.texture_descriptor.format { - TextureFormat::R8Unorm => ImageBuffer::from_raw(self.width(), self.height(), self.data) - .map(DynamicImage::ImageLuma8), + TextureFormat::R8Unorm => { + ImageBuffer::from_raw(width, height, data).map(DynamicImage::ImageLuma8) + } TextureFormat::Rg8Unorm => { - ImageBuffer::from_raw(self.width(), self.height(), self.data) - .map(DynamicImage::ImageLumaA8) + ImageBuffer::from_raw(width, height, data).map(DynamicImage::ImageLumaA8) } TextureFormat::Rgba8UnormSrgb => { - ImageBuffer::from_raw(self.width(), self.height(), self.data) - .map(DynamicImage::ImageRgba8) + ImageBuffer::from_raw(width, height, data).map(DynamicImage::ImageRgba8) } // This format is commonly used as the format for the swapchain texture // This conversion is added here to support screenshots TextureFormat::Bgra8UnormSrgb | TextureFormat::Bgra8Unorm => { - ImageBuffer::from_raw(self.width(), self.height(), { - let mut data = self.data; + ImageBuffer::from_raw(width, height, { + let mut data = data; for bgra in data.chunks_exact_mut(4) { bgra.swap(0, 2); } @@ -213,6 +217,10 @@ pub enum IntoDynamicImageError { /// Encountered an unknown error during conversion. #[error("Failed to convert into {0:?}.")] UnknownConversionError(TextureFormat), + + /// Tried to convert an image that has no texture data + #[error("Image has no texture data")] + UninitializedImage, } #[cfg(test)] diff --git a/crates/bevy_image/src/ktx2.rs b/crates/bevy_image/src/ktx2.rs index ff89a95ef218b..bffea83d10569 100644 --- a/crates/bevy_image/src/ktx2.rs +++ b/crates/bevy_image/src/ktx2.rs @@ -13,9 +13,9 @@ use ktx2::{ BasicDataFormatDescriptor, ChannelTypeQualifiers, ColorModel, DataFormatDescriptorHeader, Header, SampleInformation, }; -use wgpu::TextureViewDescriptor; use wgpu_types::{ - AstcBlock, AstcChannel, Extent3d, TextureDimension, TextureFormat, TextureViewDimension, + AstcBlock, AstcChannel, Extent3d, TextureDimension, TextureFormat, TextureViewDescriptor, + TextureViewDimension, }; use super::{CompressedImageFormats, DataFormat, Image, TextureError, TranscodeFormat}; @@ -61,7 +61,7 @@ pub fn ktx2_buffer_to_image( #[cfg(feature = "ruzstd")] SupercompressionScheme::Zstandard => { let mut cursor = std::io::Cursor::new(_level_data); - let mut decoder = ruzstd::StreamingDecoder::new(&mut cursor) + let mut decoder = ruzstd::decoding::StreamingDecoder::new(&mut cursor) .map_err(|err| TextureError::SuperDecompressionError(err.to_string()))?; let mut decompressed = Vec::new(); decoder.read_to_end(&mut decompressed).map_err(|err| { @@ -266,7 +266,7 @@ pub fn ktx2_buffer_to_image( // error cases have been handled let mut image = Image::default(); image.texture_descriptor.format = texture_format; - image.data = wgpu_data.into_iter().flatten().collect::>(); + image.data = Some(wgpu_data.into_iter().flatten().collect::>()); image.texture_descriptor.size = Extent3d { width, height, diff --git a/crates/bevy_image/src/lib.rs b/crates/bevy_image/src/lib.rs index 2952bcec7cfeb..55f74a5f14d35 100644 --- a/crates/bevy_image/src/lib.rs +++ b/crates/bevy_image/src/lib.rs @@ -1,8 +1,13 @@ #![expect(missing_docs, reason = "Not all docs are written yet, see #3492.")] -#![allow(unsafe_code)] + +extern crate alloc; pub mod prelude { - pub use crate::{BevyDefault as _, Image, ImageFormat, TextureError}; + pub use crate::{ + dynamic_texture_atlas_builder::DynamicTextureAtlasBuilder, + texture_atlas::{TextureAtlas, TextureAtlasLayout, TextureAtlasSources}, + BevyDefault as _, Image, ImageFormat, TextureAtlasBuilder, TextureError, + }; } mod image; @@ -13,6 +18,7 @@ mod basis; mod compressed_image_saver; #[cfg(feature = "dds")] mod dds; +mod dynamic_texture_atlas_builder; #[cfg(feature = "exr")] mod exr_texture_loader; #[cfg(feature = "hdr")] @@ -20,11 +26,14 @@ mod hdr_texture_loader; mod image_loader; #[cfg(feature = "ktx2")] mod ktx2; +mod texture_atlas; +mod texture_atlas_builder; #[cfg(feature = "basis-universal")] pub use compressed_image_saver::*; #[cfg(feature = "dds")] pub use dds::*; +pub use dynamic_texture_atlas_builder::*; #[cfg(feature = "exr")] pub use exr_texture_loader::*; #[cfg(feature = "hdr")] @@ -32,6 +41,8 @@ pub use hdr_texture_loader::*; pub use image_loader::*; #[cfg(feature = "ktx2")] pub use ktx2::*; +pub use texture_atlas::*; +pub use texture_atlas_builder::*; pub(crate) mod image_texture_conversion; pub use image_texture_conversion::IntoDynamicImageError; diff --git a/crates/bevy_sprite/src/texture_atlas.rs b/crates/bevy_image/src/texture_atlas.rs similarity index 79% rename from crates/bevy_sprite/src/texture_atlas.rs rename to crates/bevy_image/src/texture_atlas.rs index 797fb4aa206ff..b5b68b0c41367 100644 --- a/crates/bevy_sprite/src/texture_atlas.rs +++ b/crates/bevy_image/src/texture_atlas.rs @@ -1,10 +1,28 @@ -use bevy_asset::{Asset, AssetId, Assets, Handle}; -use bevy_image::Image; -use bevy_math::{URect, UVec2}; +use bevy_app::prelude::*; +use bevy_asset::{Asset, AssetApp as _, AssetId, Assets, Handle}; +use bevy_math::{Rect, URect, UVec2}; +use bevy_platform::collections::HashMap; +#[cfg(not(feature = "bevy_reflect"))] +use bevy_reflect::TypePath; +#[cfg(feature = "bevy_reflect")] use bevy_reflect::{std_traits::ReflectDefault, Reflect}; #[cfg(feature = "serialize")] use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; -use bevy_utils::HashMap; + +use crate::Image; + +/// Adds support for texture atlases. +pub struct TextureAtlasPlugin; + +impl Plugin for TextureAtlasPlugin { + fn build(&self, app: &mut App) { + app.init_asset::(); + + #[cfg(feature = "bevy_reflect")] + app.register_asset_reflect::() + .register_type::(); + } +} /// Stores a mapping from sub texture handles to the related area index. /// @@ -35,7 +53,7 @@ impl TextureAtlasSources { }) } - /// Retrieves the texture *section* rectangle of the given `texture` handle. + /// Retrieves the texture *section* rectangle of the given `texture` handle in pixels. pub fn texture_rect( &self, layout: &TextureAtlasLayout, @@ -43,6 +61,20 @@ impl TextureAtlasSources { ) -> Option { layout.textures.get(self.texture_index(texture)?).cloned() } + + /// Retrieves the texture *section* rectangle of the given `texture` handle in UV coordinates. + /// These are within the range [0..1], as a fraction of the entire texture atlas' size. + pub fn uv_rect( + &self, + layout: &TextureAtlasLayout, + texture: impl Into>, + ) -> Option { + self.texture_rect(layout, texture).map(|rect| { + let rect = rect.as_rect(); + let size = layout.size.as_vec2(); + Rect::from_corners(rect.min / size, rect.max / size) + }) + } } /// Stores a map used to lookup the position of a texture in a [`TextureAtlas`]. @@ -56,10 +88,18 @@ impl TextureAtlasSources { /// [Example usage loading sprite sheet.](https://github.com/bevyengine/bevy/blob/latest/examples/2d/texture_atlas.rs) /// /// [`TextureAtlasBuilder`]: crate::TextureAtlasBuilder -#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[derive(Asset, Reflect, PartialEq, Eq, Debug, Clone)] -#[reflect(Debug, PartialEq)] -#[cfg_attr(feature = "serialize", reflect(Serialize, Deserialize))] +#[derive(Asset, PartialEq, Eq, Debug, Clone)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] +#[cfg_attr( + feature = "serialize", + derive(serde::Serialize, serde::Deserialize), + reflect(Serialize, Deserialize) +)] +#[cfg_attr(not(feature = "bevy_reflect"), derive(TypePath))] pub struct TextureAtlasLayout { /// Total size of texture atlas. pub size: UVec2, @@ -163,8 +203,12 @@ impl TextureAtlasLayout { /// - [`animated sprite sheet example`](https://github.com/bevyengine/bevy/blob/latest/examples/2d/sprite_sheet.rs) /// - [`sprite animation event example`](https://github.com/bevyengine/bevy/blob/latest/examples/2d/sprite_animation.rs) /// - [`texture atlas example`](https://github.com/bevyengine/bevy/blob/latest/examples/2d/texture_atlas.rs) -#[derive(Default, Debug, Clone, Reflect)] -#[reflect(Default, Debug)] +#[derive(Default, Debug, Clone, PartialEq, Eq, Hash)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Default, Debug, PartialEq, Hash, Clone) +)] pub struct TextureAtlas { /// Texture atlas layout handle pub layout: Handle, diff --git a/crates/bevy_sprite/src/texture_atlas_builder.rs b/crates/bevy_image/src/texture_atlas_builder.rs similarity index 89% rename from crates/bevy_sprite/src/texture_atlas_builder.rs rename to crates/bevy_image/src/texture_atlas_builder.rs index 695e00f437e0d..2f23331c8cb2b 100644 --- a/crates/bevy_sprite/src/texture_atlas_builder.rs +++ b/crates/bevy_image/src/texture_atlas_builder.rs @@ -1,20 +1,15 @@ -use bevy_asset::AssetId; -use bevy_image::{Image, TextureFormatPixelInfo}; +use bevy_asset::{AssetId, RenderAssetUsages}; use bevy_math::{URect, UVec2}; -use bevy_render::{ - render_asset::RenderAssetUsages, - render_resource::{Extent3d, TextureDimension, TextureFormat}, -}; -use bevy_utils::{ - tracing::{debug, error, warn}, - HashMap, -}; +use bevy_platform::collections::HashMap; use rectangle_pack::{ contains_smallest_box, pack_rects, volume_heuristic, GroupedRectsToPlace, PackedLocation, RectToInsert, TargetBin, }; use thiserror::Error; +use tracing::{debug, error, warn}; +use wgpu_types::{Extent3d, TextureDimension, TextureFormat}; +use crate::{Image, TextureFormatPixelInfo}; use crate::{TextureAtlasLayout, TextureAtlasSources}; #[derive(Debug, Error)] @@ -23,6 +18,12 @@ pub enum TextureAtlasBuilderError { NotEnoughSpace, #[error("added a texture with the wrong format in an atlas")] WrongFormat, + /// Attempted to add a texture to an uninitialized atlas + #[error("cannot add texture to uninitialized atlas texture")] + UninitializedAtlas, + /// Attempted to add an uninitialized texture to an atlas + #[error("cannot add uninitialized texture to atlas")] + UninitializedSourceTexture, } #[derive(Debug)] @@ -110,7 +111,7 @@ impl<'a> TextureAtlasBuilder<'a> { texture: &Image, packed_location: &PackedLocation, padding: UVec2, - ) { + ) -> TextureAtlasBuilderResult<()> { let rect_width = (packed_location.width() - padding.x) as usize; let rect_height = (packed_location.height() - padding.y) as usize; let rect_x = packed_location.x() as usize; @@ -118,14 +119,20 @@ impl<'a> TextureAtlasBuilder<'a> { let atlas_width = atlas_texture.width() as usize; let format_size = atlas_texture.texture_descriptor.format.pixel_size(); + let Some(ref mut atlas_data) = atlas_texture.data else { + return Err(TextureAtlasBuilderError::UninitializedAtlas); + }; + let Some(ref data) = texture.data else { + return Err(TextureAtlasBuilderError::UninitializedSourceTexture); + }; for (texture_y, bound_y) in (rect_y..rect_y + rect_height).enumerate() { let begin = (bound_y * atlas_width + rect_x) * format_size; let end = begin + rect_width * format_size; let texture_begin = texture_y * rect_width * format_size; let texture_end = texture_begin + rect_width * format_size; - atlas_texture.data[begin..end] - .copy_from_slice(&texture.data[texture_begin..texture_end]); + atlas_data[begin..end].copy_from_slice(&data[texture_begin..texture_end]); } + Ok(()) } fn copy_converted_texture( @@ -133,9 +140,9 @@ impl<'a> TextureAtlasBuilder<'a> { atlas_texture: &mut Image, texture: &Image, packed_location: &PackedLocation, - ) { + ) -> TextureAtlasBuilderResult<()> { if self.format == texture.texture_descriptor.format { - Self::copy_texture_to_atlas(atlas_texture, texture, packed_location, self.padding); + Self::copy_texture_to_atlas(atlas_texture, texture, packed_location, self.padding)?; } else if let Some(converted_texture) = texture.convert(self.format) { debug!( "Converting texture from '{:?}' to '{:?}'", @@ -146,23 +153,14 @@ impl<'a> TextureAtlasBuilder<'a> { &converted_texture, packed_location, self.padding, - ); + )?; } else { error!( "Error converting texture from '{:?}' to '{:?}', ignoring", texture.texture_descriptor.format, self.format ); } - } - - #[deprecated( - since = "0.14.0", - note = "TextureAtlasBuilder::finish() was not idiomatic. Use TextureAtlasBuilder::build() instead." - )] - pub fn finish( - &mut self, - ) -> Result<(TextureAtlasLayout, TextureAtlasSources, Image), TextureAtlasBuilderError> { - self.build() + Ok(()) } /// Consumes the builder, and returns the newly created texture atlas and @@ -175,13 +173,11 @@ impl<'a> TextureAtlasBuilder<'a> { /// # Usage /// /// ```rust - /// # use bevy_sprite::prelude::*; /// # use bevy_ecs::prelude::*; /// # use bevy_asset::*; - /// # use bevy_render::prelude::*; - /// # use bevy_image::Image; + /// # use bevy_image::prelude::*; /// - /// fn my_system(mut commands: Commands, mut textures: ResMut>, mut layouts: ResMut>) { + /// fn my_system(mut textures: ResMut>, mut layouts: ResMut>) { /// // Declare your builder /// let mut builder = TextureAtlasBuilder::default(); /// // Customize it @@ -190,8 +186,6 @@ impl<'a> TextureAtlasBuilder<'a> { /// let (atlas_layout, atlas_sources, texture) = builder.build().unwrap(); /// let texture = textures.add(texture); /// let layout = layouts.add(atlas_layout); - /// // Spawn your sprite - /// commands.spawn(Sprite::from_atlas_image(texture, TextureAtlas::from(layout))); /// } /// ``` /// @@ -290,7 +284,7 @@ impl<'a> TextureAtlasBuilder<'a> { ); return Err(TextureAtlasBuilderError::WrongFormat); } - self.copy_converted_texture(&mut atlas_texture, texture, packed_location); + self.copy_converted_texture(&mut atlas_texture, texture, packed_location)?; } Ok(( diff --git a/crates/bevy_input/Cargo.toml b/crates/bevy_input/Cargo.toml index fa185ea5feed9..570273a00ac59 100644 --- a/crates/bevy_input/Cargo.toml +++ b/crates/bevy_input/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_input" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Provides input functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -9,36 +9,76 @@ license = "MIT OR Apache-2.0" keywords = ["bevy"] [features] -default = ["bevy_reflect"] +default = ["std", "bevy_reflect", "bevy_ecs/async_executor", "smol_str"] + +# Functionality + +## Adds runtime reflection support using `bevy_reflect`. bevy_reflect = [ "dep:bevy_reflect", "bevy_app/bevy_reflect", "bevy_ecs/bevy_reflect", "bevy_math/bevy_reflect", ] -serialize = ["serde", "smol_str/serde"] + +## Adds serialization support through `serde`. +serialize = [ + "serde", + "smol_str?/serde", + "bevy_ecs/serialize", + "bevy_math/serialize", + "bevy_platform/serialize", +] + +## Uses the small-string optimization provided by `smol_str`. +smol_str = ["dep:smol_str", "bevy_reflect/smol_str"] + +# Platform Compatibility + +## Allows access to the `std` crate. Enabling this feature will prevent compilation +## on `no_std` targets, but provides access to certain additional features on +## supported platforms. +std = [ + "bevy_app/std", + "bevy_ecs/std", + "bevy_math/std", + "bevy_utils/std", + "bevy_reflect/std", + "bevy_platform/std", +] + +## `critical-section` provides the building blocks for synchronization primitives +## on all platforms, including `no_std`. +critical-section = [ + "bevy_app/critical-section", + "bevy_ecs/critical-section", + "bevy_reflect?/critical-section", + "bevy_platform/critical-section", +] + +## Uses the `libm` maths library instead of the one provided in `std` and `core`. +libm = ["bevy_math/libm"] [dependencies] # bevy -bevy_app = { path = "../bevy_app", version = "0.15.0-dev", default-features = false } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev", default-features = false, features = [ - "serialize", -] } -bevy_math = { path = "../bevy_math", version = "0.15.0-dev", default-features = false, features = [ - "rand", - "serialize", -] } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev", features = [ +bevy_app = { path = "../bevy_app", version = "0.16.0-dev", default-features = false } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev", default-features = false } +bevy_math = { path = "../bevy_math", version = "0.16.0-dev", default-features = false } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev", default-features = false } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", features = [ "glam", - "smol_str", -], optional = true } +], default-features = false, optional = true } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false } # other -serde = { version = "1", features = ["derive"], optional = true } +serde = { version = "1", features = [ + "alloc", + "derive", +], default-features = false, optional = true } thiserror = { version = "2", default-features = false } derive_more = { version = "1", default-features = false, features = ["from"] } -smol_str = "0.2" +smol_str = { version = "0.2", default-features = false, optional = true } +log = { version = "0.4", default-features = false } [lints] workspace = true diff --git a/crates/bevy_input/LICENSE-APACHE b/crates/bevy_input/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_input/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_input/LICENSE-MIT b/crates/bevy_input/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_input/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_input/src/axis.rs b/crates/bevy_input/src/axis.rs index ffaadf01202d0..99909762c7919 100644 --- a/crates/bevy_input/src/axis.rs +++ b/crates/bevy_input/src/axis.rs @@ -1,7 +1,7 @@ //! The generic axis type. -use bevy_ecs::system::Resource; -use bevy_utils::HashMap; +use bevy_ecs::resource::Resource; +use bevy_platform::collections::HashMap; use core::hash::Hash; #[cfg(feature = "bevy_reflect")] diff --git a/crates/bevy_input/src/button_input.rs b/crates/bevy_input/src/button_input.rs index c781fafe49ac6..bc28381ab4ef6 100644 --- a/crates/bevy_input/src/button_input.rs +++ b/crates/bevy_input/src/button_input.rs @@ -1,7 +1,7 @@ //! The generic input type. -use bevy_ecs::system::Resource; -use bevy_utils::HashSet; +use bevy_ecs::resource::Resource; +use bevy_platform::collections::HashSet; use core::hash::Hash; #[cfg(feature = "bevy_reflect")] use { @@ -71,7 +71,7 @@ use { /// Reading and checking against the current set of pressed buttons: /// ```no_run /// # use bevy_app::{App, NoopPluginGroup as DefaultPlugins, Update}; -/// # use bevy_ecs::{prelude::{IntoSystemConfigs, Res, Resource, resource_changed}, schedule::Condition}; +/// # use bevy_ecs::{prelude::{IntoScheduleConfigs, Res, Resource, resource_changed}, schedule::Condition}; /// # use bevy_input::{ButtonInput, prelude::{KeyCode, MouseButton}}; /// /// fn main() { diff --git a/crates/bevy_input/src/common_conditions.rs b/crates/bevy_input/src/common_conditions.rs index bc2e161b1b55a..c1c486e249ada 100644 --- a/crates/bevy_input/src/common_conditions.rs +++ b/crates/bevy_input/src/common_conditions.rs @@ -2,11 +2,11 @@ use crate::ButtonInput; use bevy_ecs::system::Res; use core::hash::Hash; -/// Stateful run condition that can be toggled via a input press using [`ButtonInput::just_pressed`]. +/// Stateful run condition that can be toggled via an input press using [`ButtonInput::just_pressed`]. /// /// ```no_run /// # use bevy_app::{App, NoopPluginGroup as DefaultPlugins, Update}; -/// # use bevy_ecs::prelude::IntoSystemConfigs; +/// # use bevy_ecs::prelude::IntoScheduleConfigs; /// # use bevy_input::{common_conditions::input_toggle_active, prelude::KeyCode}; /// /// fn main() { @@ -25,7 +25,7 @@ use core::hash::Hash; /// you should use a custom resource or a state for that: /// ```no_run /// # use bevy_app::{App, NoopPluginGroup as DefaultPlugins, Update}; -/// # use bevy_ecs::prelude::{IntoSystemConfigs, Res, ResMut, Resource}; +/// # use bevy_ecs::prelude::{IntoScheduleConfigs, Res, ResMut, Resource}; /// # use bevy_input::{common_conditions::input_just_pressed, prelude::KeyCode}; /// /// #[derive(Resource, Default)] @@ -74,7 +74,7 @@ where /// /// ```no_run /// # use bevy_app::{App, NoopPluginGroup as DefaultPlugins, Update}; -/// # use bevy_ecs::prelude::IntoSystemConfigs; +/// # use bevy_ecs::prelude::IntoScheduleConfigs; /// # use bevy_input::{common_conditions::input_just_pressed, prelude::KeyCode}; /// fn main() { /// App::new() @@ -104,7 +104,7 @@ where mod tests { use super::*; use crate::prelude::KeyCode; - use bevy_ecs::schedule::{IntoSystemConfigs, Schedule}; + use bevy_ecs::schedule::{IntoScheduleConfigs, Schedule}; fn test_system() {} diff --git a/crates/bevy_input/src/gamepad.rs b/crates/bevy_input/src/gamepad.rs index 5d057381fe11d..2b0148909ca92 100644 --- a/crates/bevy_input/src/gamepad.rs +++ b/crates/bevy_input/src/gamepad.rs @@ -1,6 +1,9 @@ //! The gamepad input functionality. +use core::{ops::RangeInclusive, time::Duration}; + use crate::{Axis, ButtonInput, ButtonState}; +use alloc::string::String; #[cfg(feature = "bevy_reflect")] use bevy_ecs::prelude::ReflectComponent; use bevy_ecs::{ @@ -9,19 +12,17 @@ use bevy_ecs::{ entity::Entity, event::{Event, EventReader, EventWriter}, name::Name, - prelude::require, system::{Commands, Query}, }; +use bevy_math::ops; use bevy_math::Vec2; +use bevy_platform::collections::HashMap; #[cfg(feature = "bevy_reflect")] use bevy_reflect::{std_traits::ReflectDefault, Reflect}; #[cfg(all(feature = "serialize", feature = "bevy_reflect"))] use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; -use bevy_utils::{ - tracing::{info, warn}, - Duration, HashMap, -}; use derive_more::derive::From; +use log::{info, warn}; use thiserror::Error; /// A gamepad event. @@ -30,9 +31,13 @@ use thiserror::Error; /// [`GamepadButtonChangedEvent`] and [`GamepadAxisChangedEvent`] when /// the in-frame relative ordering of events is important. /// -/// This event is produced by `bevy_input` +/// This event is produced by `bevy_input`. #[derive(Event, Debug, Clone, PartialEq, From)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -54,11 +59,15 @@ pub enum GamepadEvent { /// the in-frame relative ordering of events is important. /// /// This event type is used by `bevy_input` to feed its components. -#[derive(Event, Debug, Clone, PartialEq, Reflect, From)] -#[reflect(Debug, PartialEq)] +#[derive(Event, Debug, Clone, PartialEq, From)] #[cfg_attr( - feature = "serialize", - derive(serde::Serialize, serde::Deserialize), + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr( + all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) )] pub enum RawGamepadEvent { @@ -70,12 +79,16 @@ pub enum RawGamepadEvent { Axis(RawGamepadAxisChangedEvent), } -/// [`GamepadButton`] changed event unfiltered by [`GamepadSettings`] -#[derive(Event, Debug, Copy, Clone, PartialEq, Reflect)] -#[reflect(Debug, PartialEq)] +/// [`GamepadButton`] changed event unfiltered by [`GamepadSettings`]. +#[derive(Event, Debug, Copy, Clone, PartialEq)] #[cfg_attr( - feature = "serialize", - derive(serde::Serialize, serde::Deserialize), + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr( + all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) )] pub struct RawGamepadButtonChangedEvent { @@ -98,12 +111,16 @@ impl RawGamepadButtonChangedEvent { } } -/// [`GamepadAxis`] changed event unfiltered by [`GamepadSettings`] -#[derive(Event, Debug, Copy, Clone, PartialEq, Reflect)] -#[reflect(Debug, PartialEq)] +/// [`GamepadAxis`] changed event unfiltered by [`GamepadSettings`]. +#[derive(Event, Debug, Copy, Clone, PartialEq)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( - feature = "serialize", - derive(serde::Serialize, serde::Deserialize), + all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) )] pub struct RawGamepadAxisChangedEvent { @@ -128,11 +145,15 @@ impl RawGamepadAxisChangedEvent { /// A Gamepad connection event. Created when a connection to a gamepad /// is established and when a gamepad is disconnected. -#[derive(Event, Debug, Clone, PartialEq, Reflect)] -#[reflect(Debug, PartialEq)] +#[derive(Event, Debug, Clone, PartialEq)] #[cfg_attr( - feature = "serialize", - derive(serde::Serialize, serde::Deserialize), + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr( + all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) )] pub struct GamepadConnectionEvent { @@ -151,23 +172,27 @@ impl GamepadConnectionEvent { } } - /// Is the gamepad connected? + /// Whether the gamepad is connected. pub fn connected(&self) -> bool { matches!(self.connection, GamepadConnection::Connected { .. }) } - /// Is the gamepad disconnected? + /// Whether the gamepad is disconnected. pub fn disconnected(&self) -> bool { !self.connected() } } -/// [`GamepadButton`] event triggered by a digital state change -#[derive(Event, Debug, Clone, Copy, PartialEq, Eq, Reflect)] -#[reflect(Debug, PartialEq)] +/// [`GamepadButton`] event triggered by a digital state change. +#[derive(Event, Debug, Clone, Copy, PartialEq, Eq)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( - feature = "serialize", - derive(serde::Serialize, serde::Deserialize), + all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) )] pub struct GamepadButtonStateChangedEvent { @@ -180,7 +205,7 @@ pub struct GamepadButtonStateChangedEvent { } impl GamepadButtonStateChangedEvent { - /// Creates a new [`GamepadButtonStateChangedEvent`] + /// Creates a new [`GamepadButtonStateChangedEvent`]. pub fn new(entity: Entity, button: GamepadButton, state: ButtonState) -> Self { Self { entity, @@ -190,12 +215,16 @@ impl GamepadButtonStateChangedEvent { } } -/// [`GamepadButton`] event triggered by an analog state change -#[derive(Event, Debug, Clone, Copy, PartialEq, Reflect)] -#[reflect(Debug, PartialEq)] +/// [`GamepadButton`] event triggered by an analog state change. +#[derive(Event, Debug, Clone, Copy, PartialEq)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( - feature = "serialize", - derive(serde::Serialize, serde::Deserialize), + all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) )] pub struct GamepadButtonChangedEvent { @@ -205,12 +234,12 @@ pub struct GamepadButtonChangedEvent { pub button: GamepadButton, /// The pressed state of the button. pub state: ButtonState, - /// The analog value of the button. + /// The analog value of the button (rescaled to be in the 0.0..=1.0 range). pub value: f32, } impl GamepadButtonChangedEvent { - /// Creates a new [`GamepadButtonChangedEvent`] + /// Creates a new [`GamepadButtonChangedEvent`]. pub fn new(entity: Entity, button: GamepadButton, state: ButtonState, value: f32) -> Self { Self { entity, @@ -221,12 +250,16 @@ impl GamepadButtonChangedEvent { } } -/// [`GamepadAxis`] event triggered by an analog state change -#[derive(Event, Debug, Clone, Copy, PartialEq, Reflect)] -#[reflect(Debug, PartialEq)] +/// [`GamepadAxis`] event triggered by an analog state change. +#[derive(Event, Debug, Clone, Copy, PartialEq)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( - feature = "serialize", - derive(serde::Serialize, serde::Deserialize), + all(feature = "bevy_reflect", feature = "serialize"), reflect(Serialize, Deserialize) )] pub struct GamepadAxisChangedEvent { @@ -234,12 +267,12 @@ pub struct GamepadAxisChangedEvent { pub entity: Entity, /// The gamepad axis assigned to the event. pub axis: GamepadAxis, - /// The value of this axis. + /// The value of this axis (rescaled to account for axis settings). pub value: f32, } impl GamepadAxisChangedEvent { - /// Creates a new [`GamepadAxisChangedEvent`] + /// Creates a new [`GamepadAxisChangedEvent`]. pub fn new(entity: Entity, axis: GamepadAxis, value: f32) -> Self { Self { entity, @@ -333,18 +366,20 @@ pub enum ButtonSettingsError { /// } /// ``` #[derive(Component, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Component))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, Component, Default) +)] #[require(GamepadSettings)] pub struct Gamepad { /// The USB vendor ID as assigned by the USB-IF, if available. pub(crate) vendor_id: Option, - /// The USB product ID as assigned by the [vendor], if available. - /// - /// [vendor]: Self::vendor_id + /// The USB product ID as assigned by the [vendor][Self::vendor_id], if available. pub(crate) product_id: Option, - /// [`ButtonInput`] of [`GamepadButton`] representing their digital state + /// [`ButtonInput`] of [`GamepadButton`] representing their digital state. pub(crate) digital: ButtonInput, /// [`Axis`] of [`GamepadButton`] representing their analog state. @@ -378,7 +413,7 @@ impl Gamepad { self.analog.get_unclamped(input.into()) } - /// Returns the left stick as a [`Vec2`] + /// Returns the left stick as a [`Vec2`]. pub fn left_stick(&self) -> Vec2 { Vec2 { x: self.get(GamepadAxis::LeftStickX).unwrap_or(0.0), @@ -386,7 +421,7 @@ impl Gamepad { } } - /// Returns the right stick as a [`Vec2`] + /// Returns the right stick as a [`Vec2`]. pub fn right_stick(&self) -> Vec2 { Vec2 { x: self.get(GamepadAxis::RightStickX).unwrap_or(0.0), @@ -394,7 +429,7 @@ impl Gamepad { } } - /// Returns the directional pad as a [`Vec2`] + /// Returns the directional pad as a [`Vec2`]. pub fn dpad(&self) -> Vec2 { Vec2 { x: self.get(GamepadButton::DPadRight).unwrap_or(0.0) @@ -480,14 +515,12 @@ impl Gamepad { self.digital.get_just_released() } - /// Returns an iterator over all analog [axes]. - /// - /// [axes]: GamepadInput + /// Returns an iterator over all analog [axes][GamepadInput]. pub fn get_analog_axes(&self) -> impl Iterator { self.analog.all_axes() } - /// [`ButtonInput`] of [`GamepadButton`] representing their digital state + /// [`ButtonInput`] of [`GamepadButton`] representing their digital state. pub fn digital(&self) -> &ButtonInput { &self.digital } @@ -531,13 +564,13 @@ impl Default for Gamepad { /// /// ## Usage /// -/// This is used to determine which button has changed its value when receiving gamepad button events +/// This is used to determine which button has changed its value when receiving gamepad button events. /// It is also used in the [`Gamepad`] component. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Hash, PartialEq) + reflect(Debug, Hash, PartialEq, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( @@ -593,7 +626,7 @@ pub enum GamepadButton { } impl GamepadButton { - /// Returns an array of all the standard [`GamepadButton`] + /// Returns an array of all the standard [`GamepadButton`]. pub const fn all() -> [GamepadButton; 19] { [ GamepadButton::South, @@ -619,14 +652,18 @@ impl GamepadButton { } } -/// Represents gamepad input types that are mapped in the range [-1.0, 1.0] +/// Represents gamepad input types that are mapped in the range [-1.0, 1.0]. /// /// ## Usage /// /// This is used to determine which axis has changed its value when receiving a /// gamepad axis event. It is also used in the [`Gamepad`] component. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Hash, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -637,16 +674,16 @@ pub enum GamepadAxis { LeftStickX, /// The vertical value of the left stick. LeftStickY, - /// The value of the left `Z` button. + /// Generally the throttle axis of a HOTAS setup. + /// Refer to [`GamepadButton::LeftTrigger2`] for the analog trigger on a gamepad controller. LeftZ, - /// The horizontal value of the right stick. RightStickX, /// The vertical value of the right stick. RightStickY, - /// The value of the right `Z` button. + /// The yaw of the main joystick, not supported on common gamepads. + /// Refer to [`GamepadButton::RightTrigger2`] for the analog trigger on a gamepad controller. RightZ, - /// Non-standard support for other axis types (i.e. HOTAS sliders, potentiometers, etc). Other(u8), } @@ -665,14 +702,18 @@ impl GamepadAxis { } } -/// Encapsulation over [`GamepadAxis`] and [`GamepadButton`] +/// Encapsulation over [`GamepadAxis`] and [`GamepadButton`]. // This is done so Gamepad can share a single Axis and simplifies the API by having only one get/get_unclamped method #[derive(Debug, Copy, Clone, Eq, Hash, PartialEq, From)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, Hash, PartialEq, Clone) +)] pub enum GamepadInput { - /// A [`GamepadAxis`] + /// A [`GamepadAxis`]. Axis(GamepadAxis), - /// A [`GamepadButton`] + /// A [`GamepadButton`]. Button(GamepadButton), } @@ -693,7 +734,7 @@ pub enum GamepadInput { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Default, Component) + reflect(Debug, Default, Component, Clone) )] pub struct GamepadSettings { /// The default button settings. @@ -774,7 +815,11 @@ impl GamepadSettings { /// /// Allowed values: `0.0 <= ``release_threshold`` <= ``press_threshold`` <= 1.0` #[derive(Debug, PartialEq, Clone)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, Default, Clone) +)] pub struct ButtonSettings { press_threshold: f32, release_threshold: f32, @@ -928,13 +973,17 @@ impl ButtonSettings { /// threshold for an axis. /// Values that are higher than `livezone_upperbound` will be rounded up to 1.0. /// Values that are lower than `livezone_lowerbound` will be rounded down to -1.0. -/// Values that are in-between `deadzone_lowerbound` and `deadzone_upperbound` will be rounded -/// to 0.0. -/// Otherwise, values will not be rounded. +/// Values that are in-between `deadzone_lowerbound` and `deadzone_upperbound` will be rounded to 0.0. +/// Otherwise, values will be linearly rescaled to fit into the sensitivity range. +/// For example, a value that is one fourth of the way from `deadzone_upperbound` to `livezone_upperbound` will be scaled to 0.25. /// /// The valid range is `[-1.0, 1.0]`. #[derive(Debug, Clone, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Default, Clone) +)] pub struct AxisSettings { /// Values that are higher than `livezone_upperbound` will be rounded up to 1.0. livezone_upperbound: f32, @@ -1041,7 +1090,7 @@ impl AxisSettings { /// /// # Errors /// - /// If the value passed is less than the dead zone upper bound, + /// If the value passed is less than the deadzone upper bound, /// returns `AxisSettingsError::DeadZoneUpperBoundGreaterThanLiveZoneUpperBound`. /// If the value passed is not in range [0.0..=1.0], returns `AxisSettingsError::LiveZoneUpperBoundOutOfRange`. pub fn try_set_livezone_upperbound(&mut self, value: f32) -> Result<(), AxisSettingsError> { @@ -1117,7 +1166,7 @@ impl AxisSettings { /// /// # Errors /// - /// If the value passed is less than the dead zone lower bound, + /// If the value passed is less than the deadzone lower bound, /// returns `AxisSettingsError::LiveZoneLowerBoundGreaterThanDeadZoneLowerBound`. /// If the value passed is not in range [-1.0..=0.0], returns `AxisSettingsError::LiveZoneLowerBoundOutOfRange`. pub fn try_set_livezone_lowerbound(&mut self, value: f32) -> Result<(), AxisSettingsError> { @@ -1213,39 +1262,135 @@ impl AxisSettings { } /// Clamps the `raw_value` according to the `AxisSettings`. - pub fn clamp(&self, new_value: f32) -> f32 { - if self.deadzone_lowerbound <= new_value && new_value <= self.deadzone_upperbound { + pub fn clamp(&self, raw_value: f32) -> f32 { + if self.deadzone_lowerbound <= raw_value && raw_value <= self.deadzone_upperbound { 0.0 - } else if new_value >= self.livezone_upperbound { + } else if raw_value >= self.livezone_upperbound { 1.0 - } else if new_value <= self.livezone_lowerbound { + } else if raw_value <= self.livezone_lowerbound { -1.0 } else { - new_value + raw_value } } - /// Determines whether the change from `old_value` to `new_value` should + /// Determines whether the change from `old_raw_value` to `new_raw_value` should /// be registered as a change, according to the [`AxisSettings`]. - fn should_register_change(&self, new_value: f32, old_value: Option) -> bool { - if old_value.is_none() { - return true; + fn should_register_change(&self, new_raw_value: f32, old_raw_value: Option) -> bool { + match old_raw_value { + None => true, + Some(old_raw_value) => ops::abs(new_raw_value - old_raw_value) >= self.threshold, } - - f32::abs(new_value - old_value.unwrap()) > self.threshold } - /// Filters the `new_value` based on the `old_value`, according to the [`AxisSettings`]. + /// Filters the `new_raw_value` based on the `old_raw_value`, according to the [`AxisSettings`]. /// - /// Returns the clamped `new_value` if the change exceeds the settings threshold, + /// Returns the clamped and scaled `new_raw_value` if the change exceeds the settings threshold, /// and `None` otherwise. - pub fn filter(&self, new_value: f32, old_value: Option) -> Option { - let new_value = self.clamp(new_value); + fn filter( + &self, + new_raw_value: f32, + old_raw_value: Option, + ) -> Option { + let clamped_unscaled = self.clamp(new_raw_value); + match self.should_register_change(clamped_unscaled, old_raw_value) { + true => Some(FilteredAxisPosition { + scaled: self.get_axis_position_from_value(clamped_unscaled), + raw: new_raw_value, + }), + false => None, + } + } + + #[inline(always)] + fn get_axis_position_from_value(&self, value: f32) -> ScaledAxisWithDeadZonePosition { + if value < self.deadzone_upperbound && value > self.deadzone_lowerbound { + ScaledAxisWithDeadZonePosition::Dead + } else if value > self.livezone_upperbound { + ScaledAxisWithDeadZonePosition::AboveHigh + } else if value < self.livezone_lowerbound { + ScaledAxisWithDeadZonePosition::BelowLow + } else if value >= self.deadzone_upperbound { + ScaledAxisWithDeadZonePosition::High(linear_remapping( + value, + self.deadzone_upperbound..=self.livezone_upperbound, + 0.0..=1.0, + )) + } else if value <= self.deadzone_lowerbound { + ScaledAxisWithDeadZonePosition::Low(linear_remapping( + value, + self.livezone_lowerbound..=self.deadzone_lowerbound, + -1.0..=0.0, + )) + } else { + unreachable!(); + } + } +} + +/// A linear remapping of `value` from `old` to `new`. +fn linear_remapping(value: f32, old: RangeInclusive, new: RangeInclusive) -> f32 { + // https://stackoverflow.com/a/929104 + ((value - old.start()) / (old.end() - old.start())) * (new.end() - new.start()) + new.start() +} + +#[derive(Debug, Clone, Copy)] +/// Deadzone-aware axis position. +enum ScaledAxisWithDeadZonePosition { + /// The input clipped below the valid range of the axis. + BelowLow, + /// The input is lower than the deadzone. + Low(f32), + /// The input falls within the deadzone, meaning it is counted as 0. + Dead, + /// The input is higher than the deadzone. + High(f32), + /// The input clipped above the valid range of the axis. + AboveHigh, +} + +struct FilteredAxisPosition { + scaled: ScaledAxisWithDeadZonePosition, + raw: f32, +} + +impl ScaledAxisWithDeadZonePosition { + /// Converts the value into a float in the range [-1, 1]. + fn to_f32(self) -> f32 { + match self { + ScaledAxisWithDeadZonePosition::BelowLow => -1., + ScaledAxisWithDeadZonePosition::Low(scaled) + | ScaledAxisWithDeadZonePosition::High(scaled) => scaled, + ScaledAxisWithDeadZonePosition::Dead => 0., + ScaledAxisWithDeadZonePosition::AboveHigh => 1., + } + } +} + +#[derive(Debug, Clone, Copy)] +/// Low/High-aware axis position. +enum ScaledAxisPosition { + /// The input fell short of the "low" value. + ClampedLow, + /// The input was in the normal range. + Scaled(f32), + /// The input surpassed the "high" value. + ClampedHigh, +} - if self.should_register_change(new_value, old_value) { - return Some(new_value); +struct FilteredButtonAxisPosition { + scaled: ScaledAxisPosition, + raw: f32, +} + +impl ScaledAxisPosition { + /// Converts the value into a float in the range [0, 1]. + fn to_f32(self) -> f32 { + match self { + ScaledAxisPosition::ClampedLow => 0., + ScaledAxisPosition::Scaled(scaled) => scaled, + ScaledAxisPosition::ClampedHigh => 1., } - None } } @@ -1262,7 +1407,11 @@ impl AxisSettings { /// /// The valid range is from 0.0 to 1.0, inclusive. #[derive(Debug, Clone)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, Default, Clone) +)] pub struct ButtonAxisSettings { /// The high value at which to apply rounding. pub high: f32, @@ -1300,27 +1449,48 @@ impl ButtonAxisSettings { raw_value } - /// Determines whether the change from an `old_value` to a `new_value` should + /// Determines whether the change from an `old_raw_value` to a `new_raw_value` should /// be registered as a change event, according to the specified settings. - fn should_register_change(&self, new_value: f32, old_value: Option) -> bool { - if old_value.is_none() { - return true; + fn should_register_change(&self, new_raw_value: f32, old_raw_value: Option) -> bool { + match old_raw_value { + None => true, + Some(old_raw_value) => ops::abs(new_raw_value - old_raw_value) >= self.threshold, } - - f32::abs(new_value - old_value.unwrap()) > self.threshold } - /// Filters the `new_value` based on the `old_value`, according to the [`ButtonAxisSettings`]. + /// Filters the `new_raw_value` based on the `old_raw_value`, according to the [`ButtonAxisSettings`]. /// - /// Returns the clamped `new_value`, according to the [`ButtonAxisSettings`], if the change + /// Returns the clamped and scaled `new_raw_value`, according to the [`ButtonAxisSettings`], if the change /// exceeds the settings threshold, and `None` otherwise. - pub fn filter(&self, new_value: f32, old_value: Option) -> Option { - let new_value = self.clamp(new_value); + fn filter( + &self, + new_raw_value: f32, + old_raw_value: Option, + ) -> Option { + let clamped_unscaled = self.clamp(new_raw_value); + match self.should_register_change(clamped_unscaled, old_raw_value) { + true => Some(FilteredButtonAxisPosition { + scaled: self.get_axis_position_from_value(clamped_unscaled), + raw: new_raw_value, + }), + false => None, + } + } - if self.should_register_change(new_value, old_value) { - return Some(new_value); + /// Clamps and scales the `value` according to the specified settings. + /// + /// If the `value` is: + /// - lower than or equal to `low` it will be rounded to 0.0. + /// - higher than or equal to `high` it will be rounded to 1.0. + /// - Otherwise, it will be scaled from (low, high) to (0, 1). + fn get_axis_position_from_value(&self, value: f32) -> ScaledAxisPosition { + if value <= self.low { + ScaledAxisPosition::ClampedLow + } else if value >= self.high { + ScaledAxisPosition::ClampedHigh + } else { + ScaledAxisPosition::Scaled(linear_remapping(value, self.low..=self.high, 0.0..=1.0)) } - None } } @@ -1328,7 +1498,7 @@ impl ButtonAxisSettings { /// /// On connection, adds the components representing a [`Gamepad`] to the entity. /// On disconnection, removes the [`Gamepad`] and other related components. -/// Entities are left alive and might leave components like [`GamepadSettings`] to preserve state in the case of a reconnection +/// Entities are left alive and might leave components like [`GamepadSettings`] to preserve state in the case of a reconnection. /// /// ## Note /// @@ -1345,8 +1515,8 @@ pub fn gamepad_connection_system( vendor_id, product_id, } => { - let Some(mut gamepad) = commands.get_entity(id) else { - warn!("Gamepad {:} removed before handling connection event.", id); + let Ok(mut gamepad) = commands.get_entity(id) else { + warn!("Gamepad {} removed before handling connection event.", id); continue; }; gamepad.insert(( @@ -1357,18 +1527,18 @@ pub fn gamepad_connection_system( ..Default::default() }, )); - info!("Gamepad {:?} connected.", id); + info!("Gamepad {} connected.", id); } GamepadConnection::Disconnected => { - let Some(mut gamepad) = commands.get_entity(id) else { - warn!("Gamepad {:} removed before handling disconnection event. You can ignore this if you manually removed it.", id); + let Ok(mut gamepad) = commands.get_entity(id) else { + warn!("Gamepad {} removed before handling disconnection event. You can ignore this if you manually removed it.", id); continue; }; // Gamepad entities are left alive to preserve their state (e.g. [`GamepadSettings`]). // Instead of despawning, we remove Gamepad components that don't need to preserve state // and re-add them if they ever reconnect. gamepad.remove::(); - info!("Gamepad {:} disconnected.", id); + info!("Gamepad {} disconnected.", id); } } } @@ -1379,7 +1549,11 @@ pub fn gamepad_connection_system( // /// The connection status of a gamepad. #[derive(Debug, Clone, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -1424,7 +1598,7 @@ pub fn gamepad_event_processing_system( match event { // Connections require inserting/removing components so they are done in a separate system RawGamepadEvent::Connection(send_event) => { - processed_events.send(GamepadEvent::from(send_event.clone())); + processed_events.write(GamepadEvent::from(send_event.clone())); } RawGamepadEvent::Axis(RawGamepadAxisChangedEvent { gamepad, @@ -1441,11 +1615,11 @@ pub fn gamepad_event_processing_system( else { continue; }; - - gamepad_axis.analog.set(axis, filtered_value); - let send_event = GamepadAxisChangedEvent::new(gamepad, axis, filtered_value); - processed_axis_events.send(send_event); - processed_events.send(GamepadEvent::from(send_event)); + gamepad_axis.analog.set(axis, filtered_value.raw); + let send_event = + GamepadAxisChangedEvent::new(gamepad, axis, filtered_value.scaled.to_f32()); + processed_axis_events.write(send_event); + processed_events.write(GamepadEvent::from(send_event)); } RawGamepadEvent::Button(RawGamepadButtonChangedEvent { gamepad, @@ -1463,12 +1637,12 @@ pub fn gamepad_event_processing_system( continue; }; let button_settings = settings.get_button_settings(button); - gamepad_buttons.analog.set(button, filtered_value); + gamepad_buttons.analog.set(button, filtered_value.raw); - if button_settings.is_released(filtered_value) { + if button_settings.is_released(filtered_value.raw) { // Check if button was previously pressed if gamepad_buttons.pressed(button) { - processed_digital_events.send(GamepadButtonStateChangedEvent::new( + processed_digital_events.write(GamepadButtonStateChangedEvent::new( gamepad, button, ButtonState::Released, @@ -1477,10 +1651,10 @@ pub fn gamepad_event_processing_system( // We don't have to check if the button was previously pressed here // because that check is performed within Input::release() gamepad_buttons.digital.release(button); - } else if button_settings.is_pressed(filtered_value) { + } else if button_settings.is_pressed(filtered_value.raw) { // Check if button was previously not pressed if !gamepad_buttons.pressed(button) { - processed_digital_events.send(GamepadButtonStateChangedEvent::new( + processed_digital_events.write(GamepadButtonStateChangedEvent::new( gamepad, button, ButtonState::Pressed, @@ -1494,10 +1668,14 @@ pub fn gamepad_event_processing_system( } else { ButtonState::Released }; - let send_event = - GamepadButtonChangedEvent::new(gamepad, button, button_state, filtered_value); - processed_analog_events.send(send_event); - processed_events.send(GamepadEvent::from(send_event)); + let send_event = GamepadButtonChangedEvent::new( + gamepad, + button, + button_state, + filtered_value.scaled.to_f32(), + ); + processed_analog_events.write(send_event); + processed_events.write(GamepadEvent::from(send_event)); } } } @@ -1505,7 +1683,11 @@ pub fn gamepad_event_processing_system( /// The intensity at which a gamepad's force-feedback motors may rumble. #[derive(Clone, Copy, Debug, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] pub struct GamepadRumbleIntensity { /// The rumble intensity of the strong gamepad motor. /// @@ -1574,13 +1756,13 @@ impl GamepadRumbleIntensity { /// ``` /// # use bevy_input::gamepad::{Gamepad, GamepadRumbleRequest, GamepadRumbleIntensity}; /// # use bevy_ecs::prelude::{EventWriter, Res, Query, Entity, With}; -/// # use bevy_utils::Duration; +/// # use core::time::Duration; /// fn rumble_gamepad_system( /// mut rumble_requests: EventWriter, /// gamepads: Query>, /// ) { /// for entity in gamepads.iter() { -/// rumble_requests.send(GamepadRumbleRequest::Add { +/// rumble_requests.write(GamepadRumbleRequest::Add { /// gamepad: entity, /// intensity: GamepadRumbleIntensity::MAX, /// duration: Duration::from_secs_f32(0.5), @@ -1593,7 +1775,7 @@ impl GamepadRumbleIntensity { #[doc(alias = "vibration")] #[doc(alias = "vibrate")] #[derive(Event, Clone)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Clone))] pub enum GamepadRumbleRequest { /// Add a rumble to the given gamepad. /// @@ -1641,137 +1823,169 @@ mod tests { RawGamepadButtonChangedEvent, RawGamepadEvent, }; use crate::ButtonState; + use alloc::string::ToString; use bevy_app::{App, PreUpdate}; use bevy_ecs::entity::Entity; use bevy_ecs::event::Events; - use bevy_ecs::schedule::IntoSystemConfigs; + use bevy_ecs::schedule::IntoScheduleConfigs; fn test_button_axis_settings_filter( settings: ButtonAxisSettings, - new_value: f32, - old_value: Option, + new_raw_value: f32, + old_raw_value: Option, expected: Option, ) { - let actual = settings.filter(new_value, old_value); + let actual = settings + .filter(new_raw_value, old_raw_value) + .map(|f| f.scaled.to_f32()); assert_eq!( expected, actual, - "Testing filtering for {settings:?} with new_value = {new_value:?}, old_value = {old_value:?}", + "Testing filtering for {settings:?} with new_raw_value = {new_raw_value:?}, old_raw_value = {old_raw_value:?}", ); } #[test] fn test_button_axis_settings_default_filter() { let cases = [ + // clamped (1.0, None, Some(1.0)), (0.99, None, Some(1.0)), (0.96, None, Some(1.0)), (0.95, None, Some(1.0)), - (0.9499, None, Some(0.9499)), - (0.84, None, Some(0.84)), - (0.43, None, Some(0.43)), - (0.05001, None, Some(0.05001)), + // linearly rescaled from 0.05..=0.95 to 0.0..=1.0 + (0.9499, None, Some(0.9998889)), + (0.84, None, Some(0.87777776)), + (0.43, None, Some(0.42222223)), + (0.05001, None, Some(0.000011109644)), + // clamped (0.05, None, Some(0.0)), (0.04, None, Some(0.0)), (0.01, None, Some(0.0)), (0.0, None, Some(0.0)), ]; - for (new_value, old_value, expected) in cases { + for (new_raw_value, old_raw_value, expected) in cases { let settings = ButtonAxisSettings::default(); - test_button_axis_settings_filter(settings, new_value, old_value, expected); + test_button_axis_settings_filter(settings, new_raw_value, old_raw_value, expected); } } #[test] - fn test_button_axis_settings_default_filter_with_old_value() { + fn test_button_axis_settings_default_filter_with_old_raw_value() { let cases = [ - (0.43, Some(0.44001), Some(0.43)), + // 0.43 gets rescaled to 0.42222223 (0.05..=0.95 -> 0.0..=1.0) + (0.43, Some(0.44001), Some(0.42222223)), (0.43, Some(0.44), None), (0.43, Some(0.43), None), - (0.43, Some(0.41999), Some(0.43)), - (0.43, Some(0.17), Some(0.43)), - (0.43, Some(0.84), Some(0.43)), + (0.43, Some(0.41999), Some(0.42222223)), + (0.43, Some(0.17), Some(0.42222223)), + (0.43, Some(0.84), Some(0.42222223)), (0.05, Some(0.055), Some(0.0)), (0.95, Some(0.945), Some(1.0)), ]; - for (new_value, old_value, expected) in cases { + for (new_raw_value, old_raw_value, expected) in cases { let settings = ButtonAxisSettings::default(); - test_button_axis_settings_filter(settings, new_value, old_value, expected); + test_button_axis_settings_filter(settings, new_raw_value, old_raw_value, expected); } } fn test_axis_settings_filter( settings: AxisSettings, - new_value: f32, - old_value: Option, + new_raw_value: f32, + old_raw_value: Option, expected: Option, ) { - let actual = settings.filter(new_value, old_value); + let actual = settings.filter(new_raw_value, old_raw_value); assert_eq!( - expected, actual, - "Testing filtering for {settings:?} with new_value = {new_value:?}, old_value = {old_value:?}", + expected, actual.map(|f| f.scaled.to_f32()), + "Testing filtering for {settings:?} with new_raw_value = {new_raw_value:?}, old_raw_value = {old_raw_value:?}", ); } #[test] fn test_axis_settings_default_filter() { + // new (raw), expected (rescaled linearly) let cases = [ + // high enough to round to 1.0 (1.0, Some(1.0)), (0.99, Some(1.0)), (0.96, Some(1.0)), (0.95, Some(1.0)), - (0.9499, Some(0.9499)), - (0.84, Some(0.84)), - (0.43, Some(0.43)), - (0.05001, Some(0.05001)), + // for the following, remember that 0.05 is the "low" value and 0.95 is the "high" value + // barely below the high value means barely below 1 after scaling + (0.9499, Some(0.9998889)), // scaled as: (0.9499 - 0.05) / (0.95 - 0.05) + (0.84, Some(0.87777776)), // scaled as: (0.84 - 0.05) / (0.95 - 0.05) + (0.43, Some(0.42222223)), // scaled as: (0.43 - 0.05) / (0.95 - 0.05) + // barely above the low value means barely above 0 after scaling + (0.05001, Some(0.000011109644)), // scaled as: (0.05001 - 0.05) / (0.95 - 0.05) + // low enough to be rounded to 0 (dead zone) (0.05, Some(0.0)), (0.04, Some(0.0)), (0.01, Some(0.0)), (0.0, Some(0.0)), + // same exact tests as above, but below 0 (bottom half of the dead zone and live zone) + // low enough to be rounded to -1 (-1.0, Some(-1.0)), (-0.99, Some(-1.0)), (-0.96, Some(-1.0)), (-0.95, Some(-1.0)), - (-0.9499, Some(-0.9499)), - (-0.84, Some(-0.84)), - (-0.43, Some(-0.43)), - (-0.05001, Some(-0.05001)), + // scaled inputs + (-0.9499, Some(-0.9998889)), // scaled as: (-0.9499 - -0.05) / (-0.95 - -0.05) + (-0.84, Some(-0.87777776)), // scaled as: (-0.84 - -0.05) / (-0.95 - -0.05) + (-0.43, Some(-0.42222226)), // scaled as: (-0.43 - -0.05) / (-0.95 - -0.05) + (-0.05001, Some(-0.000011146069)), // scaled as: (-0.05001 - -0.05) / (-0.95 - -0.05) + // high enough to be rounded to 0 (dead zone) (-0.05, Some(0.0)), (-0.04, Some(0.0)), (-0.01, Some(0.0)), ]; - for (new_value, expected) in cases { + for (new_raw_value, expected) in cases { let settings = AxisSettings::new(-0.95, -0.05, 0.05, 0.95, 0.01).unwrap(); - test_axis_settings_filter(settings, new_value, None, expected); + test_axis_settings_filter(settings, new_raw_value, None, expected); } } #[test] - fn test_axis_settings_default_filter_with_old_values() { + fn test_axis_settings_default_filter_with_old_raw_values() { + let threshold = 0.01; + // expected values are hardcoded to be rescaled to from 0.05..=0.95 to 0.0..=1.0 + // new (raw), old (raw), expected let cases = [ - (0.43, Some(0.44001), Some(0.43)), - (0.43, Some(0.44), None), - (0.43, Some(0.43), None), - (0.43, Some(0.41999), Some(0.43)), - (0.43, Some(0.17), Some(0.43)), - (0.43, Some(0.84), Some(0.43)), - (0.05, Some(0.055), Some(0.0)), - (0.95, Some(0.945), Some(1.0)), - (-0.43, Some(-0.44001), Some(-0.43)), - (-0.43, Some(-0.44), None), - (-0.43, Some(-0.43), None), - (-0.43, Some(-0.41999), Some(-0.43)), - (-0.43, Some(-0.17), Some(-0.43)), - (-0.43, Some(-0.84), Some(-0.43)), - (-0.05, Some(-0.055), Some(0.0)), - (-0.95, Some(-0.945), Some(-1.0)), + // enough increase to change + (0.43, Some(0.43 + threshold * 1.1), Some(0.42222223)), + // enough decrease to change + (0.43, Some(0.43 - threshold * 1.1), Some(0.42222223)), + // not enough increase to change + (0.43, Some(0.43 + threshold * 0.9), None), + // not enough decrease to change + (0.43, Some(0.43 - threshold * 0.9), None), + // enough increase to change + (-0.43, Some(-0.43 + threshold * 1.1), Some(-0.42222226)), + // enough decrease to change + (-0.43, Some(-0.43 - threshold * 1.1), Some(-0.42222226)), + // not enough increase to change + (-0.43, Some(-0.43 + threshold * 0.9), None), + // not enough decrease to change + (-0.43, Some(-0.43 - threshold * 0.9), None), + // test upper deadzone logic + (0.05, Some(0.0), None), + (0.06, Some(0.0), Some(0.0111111095)), + // test lower deadzone logic + (-0.05, Some(0.0), None), + (-0.06, Some(0.0), Some(-0.011111081)), + // test upper livezone logic + (0.95, Some(1.0), None), + (0.94, Some(1.0), Some(0.9888889)), + // test lower livezone logic + (-0.95, Some(-1.0), None), + (-0.94, Some(-1.0), Some(-0.9888889)), ]; - for (new_value, old_value, expected) in cases { - let settings = AxisSettings::new(-0.95, -0.05, 0.05, 0.95, 0.01).unwrap(); - test_axis_settings_filter(settings, new_value, old_value, expected); + for (new_raw_value, old_raw_value, expected) in cases { + let settings = AxisSettings::new(-0.95, -0.05, 0.05, 0.95, threshold).unwrap(); + test_axis_settings_filter(settings, new_raw_value, old_raw_value, expected); } } diff --git a/crates/bevy_input/src/gestures.rs b/crates/bevy_input/src/gestures.rs index 4f540fb139385..5cd14d4634c82 100644 --- a/crates/bevy_input/src/gestures.rs +++ b/crates/bevy_input/src/gestures.rs @@ -18,7 +18,11 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; /// - Only available on **`macOS`** and **`iOS`**. /// - On **`iOS`**, must be enabled first #[derive(Event, Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -36,7 +40,11 @@ pub struct PinchGesture(pub f32); /// - Only available on **`macOS`** and **`iOS`**. /// - On **`iOS`**, must be enabled first #[derive(Event, Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -51,7 +59,11 @@ pub struct RotationGesture(pub f32); /// - Only available on **`macOS`** and **`iOS`**. /// - On **`iOS`**, must be enabled first #[derive(Event, Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -65,7 +77,11 @@ pub struct DoubleTapGesture; /// /// - On **`iOS`**, must be enabled first #[derive(Event, Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_input/src/keyboard.rs b/crates/bevy_input/src/keyboard.rs index 0bd6f41a84750..ea5452fb538af 100644 --- a/crates/bevy_input/src/keyboard.rs +++ b/crates/bevy_input/src/keyboard.rs @@ -72,8 +72,14 @@ use bevy_ecs::{ event::{Event, EventReader}, system::ResMut, }; + #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; + +#[cfg(not(feature = "smol_str"))] +use alloc::string::String as SmolStr; + +#[cfg(feature = "smol_str")] use smol_str::SmolStr; #[cfg(all(feature = "serialize", feature = "bevy_reflect"))] @@ -92,7 +98,7 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Hash) + reflect(Debug, PartialEq, Hash, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( @@ -134,7 +140,7 @@ pub struct KeyboardInput { /// OS specific key combination that leads to Bevy window losing focus and not receiving any /// input events #[derive(Event, Debug, Clone, PartialEq, Eq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Clone, PartialEq))] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -184,7 +190,11 @@ pub fn keyboard_input_system( /// - Correctly match key press and release events. /// - On non-web platforms, support assigning keybinds to virtually any key through a UI. #[derive(Debug, Clone, Ord, PartialOrd, Copy, PartialEq, Eq, Hash)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Clone, PartialEq, Hash) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -225,14 +235,17 @@ pub enum NativeKeyCode { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Hash, PartialEq) + reflect(Debug, Hash, PartialEq, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) )] -#[allow(clippy::doc_markdown)] // Clippy doesn't like our use of . +#[expect( + clippy::doc_markdown, + reason = "We use camel-case words inside `` tags to represent keyboard keys, which are not identifiers that we should be putting inside backticks." +)] #[repr(u32)] pub enum KeyCode { /// This variant is used when the key cannot be translated to any other variant. @@ -718,7 +731,7 @@ pub enum KeyCode { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Hash, PartialEq) + reflect(Debug, Hash, PartialEq, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( @@ -751,14 +764,17 @@ pub enum NativeKey { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Hash, PartialEq) + reflect(Debug, Hash, PartialEq, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) )] -#[allow(clippy::doc_markdown)] // Clippy doesn't like our use of . +#[expect( + clippy::doc_markdown, + reason = "We use camel-case words inside `` tags to represent keyboard keys, which are not identifiers that we should be putting inside backticks." +)] pub enum Key { /// A key string that corresponds to the character typed by the user, taking into account the /// user’s current locale setting, and any system-level keyboard mapping overrides that are in diff --git a/crates/bevy_input/src/lib.rs b/crates/bevy_input/src/lib.rs index 7e0225cf7521c..e1119c3d35a75 100644 --- a/crates/bevy_input/src/lib.rs +++ b/crates/bevy_input/src/lib.rs @@ -4,6 +4,7 @@ html_logo_url = "https://bevyengine.org/assets/icon.png", html_favicon_url = "https://bevyengine.org/assets/icon.png" )] +#![no_std] //! Input functionality for the [Bevy game engine](https://bevyengine.org/). //! @@ -11,6 +12,11 @@ //! //! `bevy` currently supports keyboard, mouse, gamepad, and touch inputs. +#[cfg(feature = "std")] +extern crate std; + +extern crate alloc; + mod axis; mod button_input; /// Common run conditions @@ -158,7 +164,7 @@ impl Plugin for InputPlugin { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Hash, PartialEq) + reflect(Debug, Hash, PartialEq, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( diff --git a/crates/bevy_input/src/mouse.rs b/crates/bevy_input/src/mouse.rs index a6fd70f013712..3a377d93295c5 100644 --- a/crates/bevy_input/src/mouse.rs +++ b/crates/bevy_input/src/mouse.rs @@ -5,7 +5,8 @@ use bevy_ecs::{ change_detection::DetectChangesMut, entity::Entity, event::{Event, EventReader}, - system::{ResMut, Resource}, + resource::Resource, + system::ResMut, }; use bevy_math::Vec2; #[cfg(feature = "bevy_reflect")] @@ -26,7 +27,11 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; /// The event is read inside of the [`mouse_button_input_system`] /// to update the [`ButtonInput`] resource. #[derive(Event, Debug, Clone, Copy, PartialEq, Eq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -55,7 +60,7 @@ pub struct MouseButtonInput { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Hash, PartialEq) + reflect(Debug, Hash, PartialEq, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( @@ -87,7 +92,11 @@ pub enum MouseButton { /// /// [`DeviceEvent::MouseMotion`]: https://docs.rs/winit/latest/winit/event/enum.DeviceEvent.html#variant.MouseMotion #[derive(Event, Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -104,8 +113,12 @@ pub struct MouseMotion { /// /// The value of the event can either be interpreted as the amount of lines or the amount of pixels /// to scroll. -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[derive(Debug, Hash, Clone, Copy, Eq, PartialEq)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -128,7 +141,11 @@ pub enum MouseScrollUnit { /// /// This event is the translated version of the `WindowEvent::MouseWheel` from the `winit` crate. #[derive(Event, Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -173,7 +190,7 @@ pub fn mouse_button_input_system( #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Default, Resource, PartialEq) + reflect(Debug, Default, Resource, PartialEq, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( @@ -194,7 +211,7 @@ pub struct AccumulatedMouseMotion { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Default, Resource, PartialEq) + reflect(Debug, Default, Resource, PartialEq, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( diff --git a/crates/bevy_input/src/touch.rs b/crates/bevy_input/src/touch.rs index a67d1bc2b2f68..28f3159d53bb4 100644 --- a/crates/bevy_input/src/touch.rs +++ b/crates/bevy_input/src/touch.rs @@ -3,12 +3,13 @@ use bevy_ecs::{ entity::Entity, event::{Event, EventReader}, - system::{ResMut, Resource}, + resource::Resource, + system::ResMut, }; use bevy_math::Vec2; +use bevy_platform::collections::HashMap; #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; -use bevy_utils::HashMap; #[cfg(all(feature = "serialize", feature = "bevy_reflect"))] use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; @@ -37,7 +38,11 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; /// This event is the translated version of the `WindowEvent::Touch` from the `winit` crate. /// It is available to the end user and can be used for game logic. #[derive(Event, Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -61,7 +66,11 @@ pub struct TouchInput { /// A force description of a [`Touch`] input. #[derive(Debug, Clone, Copy, PartialEq)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -110,7 +119,7 @@ pub enum ForceTouch { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, Hash, PartialEq) + reflect(Debug, Hash, PartialEq, Clone) )] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( diff --git a/crates/bevy_input_focus/Cargo.toml b/crates/bevy_input_focus/Cargo.toml index eb5420225fc5a..0b2ca538307e7 100644 --- a/crates/bevy_input_focus/Cargo.toml +++ b/crates/bevy_input_focus/Cargo.toml @@ -1,21 +1,77 @@ [package] name = "bevy_input_focus" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Keyboard focus management" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" license = "MIT OR Apache-2.0" keywords = ["bevy"] -rust-version = "1.76.0" +rust-version = "1.85.0" + +[features] +default = ["std", "bevy_reflect", "bevy_ecs/async_executor"] + +# Functionality + +## Adds runtime reflection support using `bevy_reflect`. +bevy_reflect = [ + "dep:bevy_reflect", + "bevy_app/bevy_reflect", + "bevy_ecs/bevy_reflect", + "bevy_math/bevy_reflect", + "bevy_input/bevy_reflect", + "bevy_window/bevy_reflect", +] + +## Adds serialization support through `serde`. +serialize = [ + "bevy_ecs/serialize", + "bevy_math/serialize", + "bevy_input/serialize", + "bevy_window/serialize", +] + +# Platform Compatibility + +## Allows access to the `std` crate. Enabling this feature will prevent compilation +## on `no_std` targets, but provides access to certain additional features on +## supported platforms. +std = [ + "bevy_app/std", + "bevy_ecs/std", + "bevy_math/std", + "bevy_reflect/std", + "bevy_input/std", + "bevy_window/std", +] + +## `critical-section` provides the building blocks for synchronization primitives +## on all platforms, including `no_std`. +critical-section = [ + "bevy_app/critical-section", + "bevy_ecs/critical-section", + "bevy_reflect?/critical-section", + "bevy_input/critical-section", +] + +## Uses the `libm` maths library instead of the one provided in `std` and `core`. +libm = ["bevy_math/libm", "bevy_window/libm"] [dependencies] -bevy_app = { path = "../bevy_app", version = "0.15.0-dev", default-features = false } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev", default-features = false } -bevy_input = { path = "../bevy_input", version = "0.15.0-dev", default-features = false } -bevy_hierarchy = { path = "../bevy_hierarchy", version = "0.15.0-dev", default-features = false } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev", default-features = false } -bevy_window = { path = "../bevy_window", version = "0.15.0-dev", default-features = false } +# bevy +bevy_app = { path = "../bevy_app", version = "0.16.0-dev", default-features = false } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev", default-features = false } +bevy_input = { path = "../bevy_input", version = "0.16.0-dev", default-features = false } +bevy_math = { path = "../bevy_math", version = "0.16.0-dev", default-features = false } +bevy_window = { path = "../bevy_window", version = "0.16.0-dev", default-features = false } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", features = [ + "glam", +], default-features = false, optional = true } + +# other +thiserror = { version = "2", default-features = false } +log = { version = "0.4", default-features = false } [dev-dependencies] smol_str = "0.2" diff --git a/crates/bevy_input_focus/LICENSE-APACHE b/crates/bevy_input_focus/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_input_focus/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_input_focus/LICENSE-MIT b/crates/bevy_input_focus/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_input_focus/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_input_focus/src/autofocus.rs b/crates/bevy_input_focus/src/autofocus.rs index 10dea5f892461..72024418d2e65 100644 --- a/crates/bevy_input_focus/src/autofocus.rs +++ b/crates/bevy_input_focus/src/autofocus.rs @@ -1,9 +1,12 @@ //! Contains the [`AutoFocus`] component and related machinery. -use bevy_ecs::{component::ComponentId, prelude::*, world::DeferredWorld}; +use bevy_ecs::{component::HookContext, prelude::*, world::DeferredWorld}; use crate::InputFocus; +#[cfg(feature = "bevy_reflect")] +use bevy_reflect::{prelude::*, Reflect}; + /// Indicates that this widget should automatically receive [`InputFocus`]. /// /// This can be useful for things like dialog boxes, the first text input in a form, @@ -12,10 +15,15 @@ use crate::InputFocus; /// The focus is swapped when this component is added /// or an entity with this component is spawned. #[derive(Debug, Default, Component, Copy, Clone)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, Default, Component, Clone) +)] #[component(on_add = on_auto_focus_added)] pub struct AutoFocus; -fn on_auto_focus_added(mut world: DeferredWorld, entity: Entity, _: ComponentId) { +fn on_auto_focus_added(mut world: DeferredWorld, HookContext { entity, .. }: HookContext) { if let Some(mut input_focus) = world.get_resource_mut::() { input_focus.set(entity); } diff --git a/crates/bevy_input_focus/src/directional_navigation.rs b/crates/bevy_input_focus/src/directional_navigation.rs new file mode 100644 index 0000000000000..2f3d64702549b --- /dev/null +++ b/crates/bevy_input_focus/src/directional_navigation.rs @@ -0,0 +1,433 @@ +//! A navigation framework for moving between focusable elements based on directional input. +//! +//! While virtual cursors are a common way to navigate UIs with a gamepad (or arrow keys!), +//! they are generally both slow and frustrating to use. +//! Instead, directional inputs should provide a direct way to snap between focusable elements. +//! +//! Like the rest of this crate, the [`InputFocus`] resource is manipulated to track +//! the current focus. +//! +//! Navigating between focusable entities (commonly UI nodes) is done by +//! passing a [`CompassOctant`] into the [`navigate`](DirectionalNavigation::navigate) method +//! from the [`DirectionalNavigation`] system parameter. +//! +//! Under the hood, the [`DirectionalNavigationMap`] stores a directed graph of focusable entities. +//! Each entity can have up to 8 neighbors, one for each [`CompassOctant`], balancing flexibility and required precision. +//! For now, this graph must be built manually, but in the future, it could be generated automatically. + +use bevy_app::prelude::*; +use bevy_ecs::{ + entity::{EntityHashMap, EntityHashSet}, + prelude::*, + system::SystemParam, +}; +use bevy_math::CompassOctant; +use thiserror::Error; + +use crate::InputFocus; + +#[cfg(feature = "bevy_reflect")] +use bevy_reflect::{prelude::*, Reflect}; + +/// A plugin that sets up the directional navigation systems and resources. +#[derive(Default)] +pub struct DirectionalNavigationPlugin; + +impl Plugin for DirectionalNavigationPlugin { + fn build(&self, app: &mut App) { + app.init_resource::(); + + #[cfg(feature = "bevy_reflect")] + app.register_type::() + .register_type::(); + } +} + +/// The up-to-eight neighbors of a focusable entity, one for each [`CompassOctant`]. +#[derive(Default, Debug, Clone, PartialEq)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Default, Debug, PartialEq, Clone) +)] +pub struct NavNeighbors { + /// The array of neighbors, one for each [`CompassOctant`]. + /// The mapping between array elements and directions is determined by [`CompassOctant::to_index`]. + /// + /// If no neighbor exists in a given direction, the value will be [`None`]. + /// In most cases, using [`NavNeighbors::set`] and [`NavNeighbors::get`] + /// will be more ergonomic than directly accessing this array. + pub neighbors: [Option; 8], +} + +impl NavNeighbors { + /// An empty set of neighbors. + pub const EMPTY: NavNeighbors = NavNeighbors { + neighbors: [None; 8], + }; + + /// Get the neighbor for a given [`CompassOctant`]. + pub const fn get(&self, octant: CompassOctant) -> Option { + self.neighbors[octant.to_index()] + } + + /// Set the neighbor for a given [`CompassOctant`]. + pub const fn set(&mut self, octant: CompassOctant, entity: Entity) { + self.neighbors[octant.to_index()] = Some(entity); + } +} + +/// A resource that stores the traversable graph of focusable entities. +/// +/// Each entity can have up to 8 neighbors, one for each [`CompassOctant`]. +/// +/// To ensure that your graph is intuitive to navigate and generally works correctly, it should be: +/// +/// - **Connected**: Every focusable entity should be reachable from every other focusable entity. +/// - **Symmetric**: If entity A is a neighbor of entity B, then entity B should be a neighbor of entity A, ideally in the reverse direction. +/// - **Physical**: The direction of navigation should match the layout of the entities when possible, +/// although looping around the edges of the screen is also acceptable. +/// - **Not self-connected**: An entity should not be a neighbor of itself; use [`None`] instead. +/// +/// For now, this graph must be built manually, and the developer is responsible for ensuring that it meets the above criteria. +#[derive(Resource, Debug, Default, Clone, PartialEq)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Resource, Debug, Default, PartialEq, Clone) +)] +pub struct DirectionalNavigationMap { + /// A directed graph of focusable entities. + /// + /// Pass in the current focus as a key, and get back a collection of up to 8 neighbors, + /// each keyed by a [`CompassOctant`]. + pub neighbors: EntityHashMap, +} + +impl DirectionalNavigationMap { + /// Adds a new entity to the navigation map, overwriting any existing neighbors for that entity. + /// + /// Removes an entity from the navigation map, including all connections to and from it. + /// + /// Note that this is an O(n) operation, where n is the number of entities in the map, + /// as we must iterate over each entity to check for connections to the removed entity. + /// + /// If you are removing multiple entities, consider using [`remove_multiple`](Self::remove_multiple) instead. + pub fn remove(&mut self, entity: Entity) { + self.neighbors.remove(&entity); + + for node in self.neighbors.values_mut() { + for neighbor in node.neighbors.iter_mut() { + if *neighbor == Some(entity) { + *neighbor = None; + } + } + } + } + + /// Removes a collection of entities from the navigation map. + /// + /// While this is still an O(n) operation, where n is the number of entities in the map, + /// it is more efficient than calling [`remove`](Self::remove) multiple times, + /// as we can check for connections to all removed entities in a single pass. + /// + /// An [`EntityHashSet`] must be provided as it is noticeably faster than the standard hasher or a [`Vec`](`alloc::vec::Vec`). + pub fn remove_multiple(&mut self, entities: EntityHashSet) { + for entity in &entities { + self.neighbors.remove(entity); + } + + for node in self.neighbors.values_mut() { + for neighbor in node.neighbors.iter_mut() { + if let Some(entity) = *neighbor { + if entities.contains(&entity) { + *neighbor = None; + } + } + } + } + } + + /// Completely clears the navigation map, removing all entities and connections. + pub fn clear(&mut self) { + self.neighbors.clear(); + } + + /// Adds an edge between two entities in the navigation map. + /// Any existing edge from A in the provided direction will be overwritten. + /// + /// The reverse edge will not be added, so navigation will only be possible in one direction. + /// If you want to add a symmetrical edge, use [`add_symmetrical_edge`](Self::add_symmetrical_edge) instead. + pub fn add_edge(&mut self, a: Entity, b: Entity, direction: CompassOctant) { + self.neighbors + .entry(a) + .or_insert(NavNeighbors::EMPTY) + .set(direction, b); + } + + /// Adds a symmetrical edge between two entities in the navigation map. + /// The A -> B path will use the provided direction, while B -> A will use the [`CompassOctant::opposite`] variant. + /// + /// Any existing connections between the two entities will be overwritten. + pub fn add_symmetrical_edge(&mut self, a: Entity, b: Entity, direction: CompassOctant) { + self.add_edge(a, b, direction); + self.add_edge(b, a, direction.opposite()); + } + + /// Add symmetrical edges between each consecutive pair of entities in the provided slice. + /// + /// Unlike [`add_looping_edges`](Self::add_looping_edges), this method does not loop back to the first entity. + pub fn add_edges(&mut self, entities: &[Entity], direction: CompassOctant) { + for pair in entities.windows(2) { + self.add_symmetrical_edge(pair[0], pair[1], direction); + } + } + + /// Add symmetrical edges between each consecutive pair of entities in the provided slice, looping back to the first entity at the end. + /// + /// This is useful for creating a circular navigation path between a set of entities, such as a menu. + pub fn add_looping_edges(&mut self, entities: &[Entity], direction: CompassOctant) { + self.add_edges(entities, direction); + if let Some((first_entity, rest)) = entities.split_first() { + if let Some(last_entity) = rest.last() { + self.add_symmetrical_edge(*last_entity, *first_entity, direction); + } + } + } + + /// Gets the entity in a given direction from the current focus, if any. + pub fn get_neighbor(&self, focus: Entity, octant: CompassOctant) -> Option { + self.neighbors + .get(&focus) + .and_then(|neighbors| neighbors.get(octant)) + } + + /// Looks up the neighbors of a given entity. + /// + /// If the entity is not in the map, [`None`] will be returned. + /// Note that the set of neighbors is not guaranteed to be non-empty though! + pub fn get_neighbors(&self, entity: Entity) -> Option<&NavNeighbors> { + self.neighbors.get(&entity) + } +} + +/// A system parameter for navigating between focusable entities in a directional way. +#[derive(SystemParam, Debug)] +pub struct DirectionalNavigation<'w> { + /// The currently focused entity. + pub focus: ResMut<'w, InputFocus>, + /// The navigation map containing the connections between entities. + pub map: Res<'w, DirectionalNavigationMap>, +} + +impl DirectionalNavigation<'_> { + /// Navigates to the neighbor in a given direction from the current focus, if any. + /// + /// Returns the new focus if successful. + /// Returns an error if there is no focus set or if there is no neighbor in the requested direction. + /// + /// If the result was `Ok`, the [`InputFocus`] resource is updated to the new focus as part of this method call. + pub fn navigate( + &mut self, + direction: CompassOctant, + ) -> Result { + if let Some(current_focus) = self.focus.0 { + if let Some(new_focus) = self.map.get_neighbor(current_focus, direction) { + self.focus.set(new_focus); + Ok(new_focus) + } else { + Err(DirectionalNavigationError::NoNeighborInDirection { + current_focus, + direction, + }) + } + } else { + Err(DirectionalNavigationError::NoFocus) + } + } +} + +/// An error that can occur when navigating between focusable entities using [directional navigation](crate::directional_navigation). +#[derive(Debug, PartialEq, Clone, Error)] +pub enum DirectionalNavigationError { + /// No focusable entity is currently set. + #[error("No focusable entity is currently set.")] + NoFocus, + /// No neighbor in the requested direction. + #[error("No neighbor from {current_focus} in the {direction:?} direction.")] + NoNeighborInDirection { + /// The entity that was the focus when the error occurred. + current_focus: Entity, + /// The direction in which the navigation was attempted. + direction: CompassOctant, + }, +} + +#[cfg(test)] +mod tests { + use bevy_ecs::system::RunSystemOnce; + + use super::*; + + #[test] + fn setting_and_getting_nav_neighbors() { + let mut neighbors = NavNeighbors::EMPTY; + assert_eq!(neighbors.get(CompassOctant::SouthEast), None); + + neighbors.set(CompassOctant::SouthEast, Entity::PLACEHOLDER); + + for i in 0..8 { + if i == CompassOctant::SouthEast.to_index() { + assert_eq!( + neighbors.get(CompassOctant::SouthEast), + Some(Entity::PLACEHOLDER) + ); + } else { + assert_eq!(neighbors.get(CompassOctant::from_index(i).unwrap()), None); + } + } + } + + #[test] + fn simple_set_and_get_navmap() { + let mut world = World::new(); + let a = world.spawn_empty().id(); + let b = world.spawn_empty().id(); + + let mut map = DirectionalNavigationMap::default(); + map.add_edge(a, b, CompassOctant::SouthEast); + + assert_eq!(map.get_neighbor(a, CompassOctant::SouthEast), Some(b)); + assert_eq!( + map.get_neighbor(b, CompassOctant::SouthEast.opposite()), + None + ); + } + + #[test] + fn symmetrical_edges() { + let mut world = World::new(); + let a = world.spawn_empty().id(); + let b = world.spawn_empty().id(); + + let mut map = DirectionalNavigationMap::default(); + map.add_symmetrical_edge(a, b, CompassOctant::North); + + assert_eq!(map.get_neighbor(a, CompassOctant::North), Some(b)); + assert_eq!(map.get_neighbor(b, CompassOctant::South), Some(a)); + } + + #[test] + fn remove_nodes() { + let mut world = World::new(); + let a = world.spawn_empty().id(); + let b = world.spawn_empty().id(); + + let mut map = DirectionalNavigationMap::default(); + map.add_edge(a, b, CompassOctant::North); + map.add_edge(b, a, CompassOctant::South); + + assert_eq!(map.get_neighbor(a, CompassOctant::North), Some(b)); + assert_eq!(map.get_neighbor(b, CompassOctant::South), Some(a)); + + map.remove(b); + + assert_eq!(map.get_neighbor(a, CompassOctant::North), None); + assert_eq!(map.get_neighbor(b, CompassOctant::South), None); + } + + #[test] + fn remove_multiple_nodes() { + let mut world = World::new(); + let a = world.spawn_empty().id(); + let b = world.spawn_empty().id(); + let c = world.spawn_empty().id(); + + let mut map = DirectionalNavigationMap::default(); + map.add_edge(a, b, CompassOctant::North); + map.add_edge(b, a, CompassOctant::South); + map.add_edge(b, c, CompassOctant::East); + map.add_edge(c, b, CompassOctant::West); + + let mut to_remove = EntityHashSet::default(); + to_remove.insert(b); + to_remove.insert(c); + + map.remove_multiple(to_remove); + + assert_eq!(map.get_neighbor(a, CompassOctant::North), None); + assert_eq!(map.get_neighbor(b, CompassOctant::South), None); + assert_eq!(map.get_neighbor(b, CompassOctant::East), None); + assert_eq!(map.get_neighbor(c, CompassOctant::West), None); + } + + #[test] + fn edges() { + let mut world = World::new(); + let a = world.spawn_empty().id(); + let b = world.spawn_empty().id(); + let c = world.spawn_empty().id(); + + let mut map = DirectionalNavigationMap::default(); + map.add_edges(&[a, b, c], CompassOctant::East); + + assert_eq!(map.get_neighbor(a, CompassOctant::East), Some(b)); + assert_eq!(map.get_neighbor(b, CompassOctant::East), Some(c)); + assert_eq!(map.get_neighbor(c, CompassOctant::East), None); + + assert_eq!(map.get_neighbor(a, CompassOctant::West), None); + assert_eq!(map.get_neighbor(b, CompassOctant::West), Some(a)); + assert_eq!(map.get_neighbor(c, CompassOctant::West), Some(b)); + } + + #[test] + fn looping_edges() { + let mut world = World::new(); + let a = world.spawn_empty().id(); + let b = world.spawn_empty().id(); + let c = world.spawn_empty().id(); + + let mut map = DirectionalNavigationMap::default(); + map.add_looping_edges(&[a, b, c], CompassOctant::East); + + assert_eq!(map.get_neighbor(a, CompassOctant::East), Some(b)); + assert_eq!(map.get_neighbor(b, CompassOctant::East), Some(c)); + assert_eq!(map.get_neighbor(c, CompassOctant::East), Some(a)); + + assert_eq!(map.get_neighbor(a, CompassOctant::West), Some(c)); + assert_eq!(map.get_neighbor(b, CompassOctant::West), Some(a)); + assert_eq!(map.get_neighbor(c, CompassOctant::West), Some(b)); + } + + #[test] + fn nav_with_system_param() { + let mut world = World::new(); + let a = world.spawn_empty().id(); + let b = world.spawn_empty().id(); + let c = world.spawn_empty().id(); + + let mut map = DirectionalNavigationMap::default(); + map.add_looping_edges(&[a, b, c], CompassOctant::East); + + world.insert_resource(map); + + let mut focus = InputFocus::default(); + focus.set(a); + world.insert_resource(focus); + + assert_eq!(world.resource::().get(), Some(a)); + + fn navigate_east(mut nav: DirectionalNavigation) { + nav.navigate(CompassOctant::East).unwrap(); + } + + world.run_system_once(navigate_east).unwrap(); + assert_eq!(world.resource::().get(), Some(b)); + + world.run_system_once(navigate_east).unwrap(); + assert_eq!(world.resource::().get(), Some(c)); + + world.run_system_once(navigate_east).unwrap(); + assert_eq!(world.resource::().get(), Some(a)); + } +} diff --git a/crates/bevy_input_focus/src/lib.rs b/crates/bevy_input_focus/src/lib.rs index 4b0ec5a763c6f..3f7ecf9e7c5bc 100644 --- a/crates/bevy_input_focus/src/lib.rs +++ b/crates/bevy_input_focus/src/lib.rs @@ -4,17 +4,25 @@ html_logo_url = "https://bevyengine.org/assets/icon.png", html_favicon_url = "https://bevyengine.org/assets/icon.png" )] +#![no_std] -//! Keyboard focus system for Bevy. +//! A UI-centric focus system for Bevy. //! //! This crate provides a system for managing input focus in Bevy applications, including: //! * [`InputFocus`], a resource for tracking which entity has input focus. //! * Methods for getting and setting input focus via [`InputFocus`] and [`IsFocusedHelper`]. //! * A generic [`FocusedInput`] event for input events which bubble up from the focused entity. +//! * Various navigation frameworks for moving input focus between entities based on user input, such as [`tab_navigation`] and [`directional_navigation`]. //! //! This crate does *not* provide any integration with UI widgets: this is the responsibility of the widget crate, //! which should depend on [`bevy_input_focus`](crate). +#[cfg(feature = "std")] +extern crate std; + +extern crate alloc; + +pub mod directional_navigation; pub mod tab_navigation; // This module is too small / specific to be exported by the crate, @@ -24,12 +32,14 @@ pub use autofocus::*; use bevy_app::{App, Plugin, PreUpdate, Startup}; use bevy_ecs::{prelude::*, query::QueryData, system::SystemParam, traversal::Traversal}; -use bevy_hierarchy::{HierarchyQueryExt, Parent}; use bevy_input::{gamepad::GamepadButtonChangedEvent, keyboard::KeyboardInput, mouse::MouseWheel}; use bevy_window::{PrimaryWindow, Window}; use core::fmt::Debug; -/// Resource representing which entity has input focus, if any. Keyboard events will be +#[cfg(feature = "bevy_reflect")] +use bevy_reflect::{prelude::*, Reflect}; + +/// Resource representing which entity has input focus, if any. Input events (other than pointer-like inputs) will be /// dispatched to the current focus entity, or to the primary window if no entity has focus. /// /// Changing the input focus is as easy as modifying this resource. @@ -67,6 +77,11 @@ use core::fmt::Debug; /// } /// ``` #[derive(Clone, Debug, Default, Resource)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, Default, Resource, Clone) +)] pub struct InputFocus(pub Option); impl InputFocus { @@ -105,7 +120,14 @@ impl InputFocus { /// By contrast, a console-style UI intended to be navigated with a gamepad may always have the focus indicator visible. /// /// To easily access information about whether focus indicators should be shown for a given entity, use the [`IsFocused`] trait. -#[derive(Clone, Debug, Resource)] +/// +/// By default, this resource is set to `false`. +#[derive(Clone, Debug, Resource, Default)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, Resource, Clone) +)] pub struct InputFocusVisible(pub bool); /// A bubble-able user input event that starts at the currently focused entity. @@ -116,6 +138,7 @@ pub struct InputFocusVisible(pub bool); /// To set up your own bubbling input event, add the [`dispatch_focused_input::`](dispatch_focused_input) system to your app, /// in the [`InputFocusSet::Dispatch`] system set during [`PreUpdate`]. #[derive(Clone, Debug, Component)] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Component, Clone))] pub struct FocusedInput { /// The underlying input event. pub input: E, @@ -132,17 +155,17 @@ impl Event for FocusedInput { #[derive(QueryData)] /// These are for accessing components defined on the targeted entity pub struct WindowTraversal { - parent: Option<&'static Parent>, + child_of: Option<&'static ChildOf>, window: Option<&'static Window>, } impl Traversal> for WindowTraversal { fn traverse(item: Self::Item<'_>, event: &FocusedInput) -> Option { - let WindowTraversalItem { parent, window } = item; + let WindowTraversalItem { child_of, window } = item; // Send event to parent, if it has one. - if let Some(parent) = parent { - return Some(parent.get()); + if let Some(child_of) = child_of { + return Some(child_of.parent()); }; // Otherwise, send it to the window entity (unless this is a window entity). @@ -163,8 +186,8 @@ pub struct InputDispatchPlugin; impl Plugin for InputDispatchPlugin { fn build(&self, app: &mut App) { app.add_systems(Startup, set_initial_focus) - .insert_resource(InputFocus(None)) - .insert_resource(InputFocusVisible(false)) + .init_resource::() + .init_resource::() .add_systems( PreUpdate, ( @@ -174,6 +197,11 @@ impl Plugin for InputDispatchPlugin { ) .in_set(InputFocusSet::Dispatch), ); + + #[cfg(feature = "bevy_reflect")] + app.register_type::() + .register_type::() + .register_type::(); } } @@ -202,7 +230,7 @@ pub fn dispatch_focused_input( windows: Query>, mut commands: Commands, ) { - if let Ok(window) = windows.get_single() { + if let Ok(window) = windows.single() { // If an element has keyboard focus, then dispatch the input event to that element. if let Some(focused_entity) = focus.0 { for ev in key_events.read() { @@ -262,7 +290,7 @@ pub trait IsFocused { /// When working with the entire [`World`], consider using the [`IsFocused`] instead. #[derive(SystemParam)] pub struct IsFocusedHelper<'w, 's> { - parent_query: Query<'w, 's, &'static Parent>, + parent_query: Query<'w, 's, &'static ChildOf>, input_focus: Option>, input_focus_visible: Option>, } @@ -310,7 +338,7 @@ impl IsFocused for World { if e == entity { return true; } - if let Some(parent) = self.entity(e).get::().map(Parent::get) { + if let Some(parent) = self.entity(e).get::().map(ChildOf::parent) { e = parent; } else { return false; @@ -335,10 +363,10 @@ impl IsFocused for World { mod tests { use super::*; + use alloc::string::String; use bevy_ecs::{ - component::ComponentId, observer::Trigger, system::RunSystemOnce, world::DeferredWorld, + component::HookContext, observer::Trigger, system::RunSystemOnce, world::DeferredWorld, }; - use bevy_hierarchy::BuildChildren; use bevy_input::{ keyboard::{Key, KeyCode}, ButtonState, InputPlugin, @@ -350,7 +378,7 @@ mod tests { #[component(on_add = set_focus_on_add)] struct SetFocusOnAdd; - fn set_focus_on_add(mut world: DeferredWorld, entity: Entity, _: ComponentId) { + fn set_focus_on_add(mut world: DeferredWorld, HookContext { entity, .. }: HookContext) { let mut input_focus = world.resource_mut::(); input_focus.set(entity); } diff --git a/crates/bevy_input_focus/src/tab_navigation.rs b/crates/bevy_input_focus/src/tab_navigation.rs index eb01df784d052..a5fe691458863 100644 --- a/crates/bevy_input_focus/src/tab_navigation.rs +++ b/crates/bevy_input_focus/src/tab_navigation.rs @@ -23,33 +23,52 @@ //! you can use the [`TabNavigation`] system parameter directly instead. //! This object can be injected into your systems, and provides a [`navigate`](`TabNavigation::navigate`) method which can be //! used to navigate between focusable entities. + +use alloc::vec::Vec; use bevy_app::{App, Plugin, Startup}; use bevy_ecs::{ component::Component, entity::Entity, + hierarchy::{ChildOf, Children}, observer::Trigger, query::{With, Without}, system::{Commands, Query, Res, ResMut, SystemParam}, }; -use bevy_hierarchy::{Children, HierarchyQueryExt, Parent}; use bevy_input::{ keyboard::{KeyCode, KeyboardInput}, ButtonInput, ButtonState, }; -use bevy_utils::tracing::warn; use bevy_window::PrimaryWindow; +use log::warn; +use thiserror::Error; use crate::{FocusedInput, InputFocus, InputFocusVisible}; +#[cfg(feature = "bevy_reflect")] +use { + bevy_ecs::prelude::ReflectComponent, + bevy_reflect::{prelude::*, Reflect}, +}; + /// A component which indicates that an entity wants to participate in tab navigation. /// /// Note that you must also add the [`TabGroup`] component to the entity's ancestor in order /// for this component to have any effect. -#[derive(Debug, Default, Component, Copy, Clone)] +#[derive(Debug, Default, Component, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, Default, Component, PartialEq, Clone) +)] pub struct TabIndex(pub i32); /// A component used to mark a tree of entities as containing tabbable elements. #[derive(Debug, Default, Component, Copy, Clone)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, Default, Component, Clone) +)] pub struct TabGroup { /// The order of the tab group relative to other tab groups. pub order: i32, @@ -79,24 +98,58 @@ impl TabGroup { } } -/// A navigation action for tabbing. +/// A navigation action that users might take to navigate your user interface in a cyclic fashion. /// /// These values are consumed by the [`TabNavigation`] system param. pub enum NavAction { /// Navigate to the next focusable entity, wrapping around to the beginning if at the end. + /// + /// This is commonly triggered by pressing the Tab key. Next, /// Navigate to the previous focusable entity, wrapping around to the end if at the beginning. + /// + /// This is commonly triggered by pressing Shift+Tab. Previous, /// Navigate to the first focusable entity. + /// + /// This is commonly triggered by pressing Home. First, /// Navigate to the last focusable entity. + /// + /// This is commonly triggered by pressing End. Last, } +/// An error that can occur during [tab navigation](crate::tab_navigation). +#[derive(Debug, Error, PartialEq, Eq, Clone)] +pub enum TabNavigationError { + /// No tab groups were found. + #[error("No tab groups found")] + NoTabGroups, + /// No focusable entities were found. + #[error("No focusable entities found")] + NoFocusableEntities, + /// Could not navigate to the next focusable entity. + /// + /// This can occur if your tab groups are malformed. + #[error("Failed to navigate to next focusable entity")] + FailedToNavigateToNextFocusableEntity, + /// No tab group for the current focus entity was found. + #[error("No tab group found for currently focused entity {previous_focus}. Users will not be able to navigate back to this entity.")] + NoTabGroupForCurrentFocus { + /// The entity that was previously focused, + /// and is missing its tab group. + previous_focus: Entity, + /// The new entity that will be focused. + /// + /// If you want to recover from this error, set [`InputFocus`] to this entity. + new_focus: Entity, + }, +} + /// An injectable helper object that provides tab navigation functionality. #[doc(hidden)] #[derive(SystemParam)] -#[allow(clippy::type_complexity)] pub struct TabNavigation<'w, 's> { // Query for tab groups. tabgroup_query: Query<'w, 's, (Entity, &'static TabGroup, &'static Children)>, @@ -108,27 +161,27 @@ pub struct TabNavigation<'w, 's> { Without, >, // Query for parents. - parent_query: Query<'w, 's, &'static Parent>, + parent_query: Query<'w, 's, &'static ChildOf>, } impl TabNavigation<'_, '_> { - /// Navigate to the next focusable entity. + /// Navigate to the desired focusable entity. /// + /// Change the [`NavAction`] to navigate in a different direction. /// Focusable entities are determined by the presence of the [`TabIndex`] component. /// - /// Arguments: - /// * `focus`: The current focus entity, or `None` if no entity has focus. - /// * `action`: Whether to select the next, previous, first, or last focusable entity. - /// /// If no focusable entities are found, then this function will return either the first /// or last focusable entity, depending on the direction of navigation. For example, if /// `action` is `Next` and no focusable entities are found, then this function will return /// the first focusable entity. - pub fn navigate(&self, focus: &InputFocus, action: NavAction) -> Option { + pub fn navigate( + &self, + focus: &InputFocus, + action: NavAction, + ) -> Result { // If there are no tab groups, then there are no focusable entities. if self.tabgroup_query.is_empty() { - warn!("No tab groups found"); - return None; + return Err(TabNavigationError::NoTabGroups); } // Start by identifying which tab group we are in. Mainly what we want to know is if @@ -144,11 +197,21 @@ impl TabNavigation<'_, '_> { }) }); - if focus.0.is_some() && tabgroup.is_none() { - warn!("No tab group found for focus entity. Users will not be able to navigate back to this entity."); + let navigation_result = self.navigate_in_group(tabgroup, focus, action); + + match navigation_result { + Ok(entity) => { + if focus.0.is_some() && tabgroup.is_none() { + Err(TabNavigationError::NoTabGroupForCurrentFocus { + previous_focus: focus.0.unwrap(), + new_focus: entity, + }) + } else { + Ok(entity) + } + } + Err(e) => Err(e), } - - self.navigate_in_group(tabgroup, focus, action) } fn navigate_in_group( @@ -156,7 +219,7 @@ impl TabNavigation<'_, '_> { tabgroup: Option<(Entity, &TabGroup)>, focus: &InputFocus, action: NavAction, - ) -> Option { + ) -> Result { // List of all focusable entities found. let mut focusable: Vec<(Entity, TabIndex)> = Vec::with_capacity(self.tabindex_query.iter().len()); @@ -179,7 +242,7 @@ impl TabNavigation<'_, '_> { .map(|(e, tg, _)| (e, *tg)) .collect(); // Stable sort by group order - tab_groups.sort_by(compare_tab_groups); + tab_groups.sort_by_key(|(_, tg)| tg.order); // Search group descendants tab_groups.iter().for_each(|(tg_entity, _)| { @@ -189,12 +252,11 @@ impl TabNavigation<'_, '_> { } if focusable.is_empty() { - warn!("No focusable entities found"); - return None; + return Err(TabNavigationError::NoFocusableEntities); } // Stable sort by tabindex - focusable.sort_by(compare_tab_indices); + focusable.sort_by_key(|(_, idx)| *idx); let index = focusable.iter().position(|e| Some(e.0) == focus.0); let count = focusable.len(); @@ -204,7 +266,10 @@ impl TabNavigation<'_, '_> { (None, NavAction::Next) | (_, NavAction::First) => 0, (None, NavAction::Previous) | (_, NavAction::Last) => count - 1, }; - focusable.get(next).map(|(e, _)| e).copied() + match focusable.get(next) { + Some((entity, _)) => Ok(*entity), + None => Err(TabNavigationError::FailedToNavigateToNextFocusableEntity), + } } /// Gather all focusable entities in tree order. @@ -233,21 +298,15 @@ impl TabNavigation<'_, '_> { } } -fn compare_tab_groups(a: &(Entity, TabGroup), b: &(Entity, TabGroup)) -> core::cmp::Ordering { - a.1.order.cmp(&b.1.order) -} - -// Stable sort which compares by tab index -fn compare_tab_indices(a: &(Entity, TabIndex), b: &(Entity, TabIndex)) -> core::cmp::Ordering { - a.1 .0.cmp(&b.1 .0) -} - /// Plugin for navigating between focusable entities using keyboard input. pub struct TabNavigationPlugin; impl Plugin for TabNavigationPlugin { fn build(&self, app: &mut App) { app.add_systems(Startup, setup_tab_navigation); + + #[cfg(feature = "bevy_reflect")] + app.register_type::().register_type::(); } } @@ -261,6 +320,8 @@ fn setup_tab_navigation(mut commands: Commands, window: Query>, nav: TabNavigation, @@ -274,7 +335,7 @@ pub fn handle_tab_navigation( && key_event.state == ButtonState::Pressed && !key_event.repeat { - let next = nav.navigate( + let maybe_next = nav.navigate( &focus, if keys.pressed(KeyCode::ShiftLeft) || keys.pressed(KeyCode::ShiftRight) { NavAction::Previous @@ -282,10 +343,22 @@ pub fn handle_tab_navigation( NavAction::Next }, ); - if next.is_some() { - trigger.propagate(false); - focus.0 = next; - visible.0 = true; + + match maybe_next { + Ok(next) => { + trigger.propagate(false); + focus.set(next); + visible.0 = true; + } + Err(e) => { + warn!("Tab navigation error: {}", e); + // This failure mode is recoverable, but still indicates a problem. + if let TabNavigationError::NoTabGroupForCurrentFocus { new_focus, .. } = e { + trigger.propagate(false); + focus.set(new_focus); + visible.0 = true; + } + } } } } @@ -293,7 +366,6 @@ pub fn handle_tab_navigation( #[cfg(test)] mod tests { use bevy_ecs::system::SystemState; - use bevy_hierarchy::BuildChildren; use super::*; @@ -302,10 +374,9 @@ mod tests { let mut app = App::new(); let world = app.world_mut(); - let tab_entity_1 = world.spawn(TabIndex(0)).id(); - let tab_entity_2 = world.spawn(TabIndex(1)).id(); - let mut tab_group_entity = world.spawn(TabGroup::new(0)); - tab_group_entity.replace_children(&[tab_entity_1, tab_entity_2]); + let tab_group_entity = world.spawn(TabGroup::new(0)).id(); + let tab_entity_1 = world.spawn((TabIndex(0), ChildOf(tab_group_entity))).id(); + let tab_entity_2 = world.spawn((TabIndex(1), ChildOf(tab_group_entity))).id(); let mut system_state: SystemState = SystemState::new(world); let tab_navigation = system_state.get(world); @@ -314,16 +385,16 @@ mod tests { let next_entity = tab_navigation.navigate(&InputFocus::from_entity(tab_entity_1), NavAction::Next); - assert_eq!(next_entity, Some(tab_entity_2)); + assert_eq!(next_entity, Ok(tab_entity_2)); let prev_entity = tab_navigation.navigate(&InputFocus::from_entity(tab_entity_2), NavAction::Previous); - assert_eq!(prev_entity, Some(tab_entity_1)); + assert_eq!(prev_entity, Ok(tab_entity_1)); let first_entity = tab_navigation.navigate(&InputFocus::default(), NavAction::First); - assert_eq!(first_entity, Some(tab_entity_1)); + assert_eq!(first_entity, Ok(tab_entity_1)); let last_entity = tab_navigation.navigate(&InputFocus::default(), NavAction::Last); - assert_eq!(last_entity, Some(tab_entity_2)); + assert_eq!(last_entity, Ok(tab_entity_2)); } } diff --git a/crates/bevy_internal/Cargo.toml b/crates/bevy_internal/Cargo.toml index a6f5dcbf33db4..28d234f2b4e3e 100644 --- a/crates/bevy_internal/Cargo.toml +++ b/crates/bevy_internal/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_internal" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "An internal Bevy crate used to facilitate optional dynamic linking via the 'dynamic_linking' feature" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -14,11 +14,11 @@ trace = [ "bevy_app/trace", "bevy_asset?/trace", "bevy_core_pipeline?/trace", + "bevy_anti_aliasing?/trace", "bevy_ecs/trace", "bevy_log/trace", "bevy_pbr?/trace", "bevy_render?/trace", - "bevy_hierarchy/trace", "bevy_winit?/trace", ] trace_chrome = ["bevy_log/tracing-chrome"] @@ -30,12 +30,6 @@ sysinfo_plugin = ["bevy_diagnostic/sysinfo_plugin"] # Texture formats that have specific rendering support (HDR enabled by default) basis-universal = ["bevy_image/basis-universal", "bevy_render/basis-universal"] -dds = [ - "bevy_image/dds", - "bevy_render/dds", - "bevy_core_pipeline/dds", - "bevy_gltf/dds", -] exr = ["bevy_image/exr", "bevy_render/exr"] hdr = ["bevy_image/hdr", "bevy_render/hdr"] ktx2 = ["bevy_image/ktx2", "bevy_render/ktx2"] @@ -56,15 +50,20 @@ qoi = ["bevy_image/qoi"] tga = ["bevy_image/tga"] tiff = ["bevy_image/tiff"] webp = ["bevy_image/webp"] +dds = ["bevy_image/dds"] # Enable SPIR-V passthrough spirv_shader_passthrough = ["bevy_render/spirv_shader_passthrough"] +# Statically linked DXC shader compiler for DirectX 12 +# TODO: When wgpu switches to DirectX 12 instead of Vulkan by default on windows, make this a default feature +statically-linked-dxc = ["bevy_render/statically-linked-dxc"] + # Include tonemapping LUT KTX2 files. tonemapping_luts = ["bevy_core_pipeline/tonemapping_luts"] # Include SMAA LUT KTX2 Files -smaa_luts = ["bevy_core_pipeline/smaa_luts"] +smaa_luts = ["bevy_anti_aliasing/smaa_luts"] # Audio format support (vorbis is enabled by default) flac = ["bevy_audio/flac"] @@ -85,27 +84,31 @@ shader_format_glsl = [ "bevy_pbr?/shader_format_glsl", ] shader_format_spirv = ["bevy_render/shader_format_spirv"] +shader_format_wesl = ["bevy_render/shader_format_wesl"] serialize = [ + "bevy_a11y?/serialize", "bevy_color?/serialize", "bevy_ecs/serialize", + "bevy_image?/serialize", "bevy_input/serialize", "bevy_math/serialize", "bevy_scene?/serialize", - "bevy_sprite?/serialize", "bevy_time/serialize", "bevy_transform/serialize", "bevy_ui?/serialize", "bevy_window?/serialize", "bevy_winit?/serialize", + "bevy_platform/serialize", ] multi_threaded = [ + "std", "bevy_asset?/multi_threaded", "bevy_ecs/multi_threaded", "bevy_render?/multi_threaded", "bevy_tasks/multi_threaded", ] -async-io = ["bevy_tasks/async-io"] +async-io = ["std", "bevy_tasks/async-io"] # Display server protocol support (X11 is enabled by default) wayland = ["bevy_winit/wayland"] @@ -136,9 +139,16 @@ pbr_anisotropy_texture = [ # Percentage-closer soft shadows experimental_pbr_pcss = ["bevy_pbr?/experimental_pbr_pcss"] +# Specular textures in `StandardMaterial`: +pbr_specular_textures = [ + "bevy_pbr?/pbr_specular_textures", + "bevy_gltf?/pbr_specular_textures", +] + # Optimise for WebGL2 webgl = [ "bevy_core_pipeline?/webgl", + "bevy_anti_aliasing?/webgl", "bevy_pbr?/webgl", "bevy_render?/webgl", "bevy_gizmos?/webgl", @@ -147,6 +157,7 @@ webgl = [ webgpu = [ "bevy_core_pipeline?/webgpu", + "bevy_anti_aliasing?/webgpu", "bevy_pbr?/webgpu", "bevy_render?/webgpu", "bevy_gizmos?/webgpu", @@ -163,6 +174,7 @@ bevy_sprite = ["dep:bevy_sprite", "bevy_gizmos?/bevy_sprite", "bevy_image"] bevy_pbr = ["dep:bevy_pbr", "bevy_gizmos?/bevy_pbr", "bevy_image"] bevy_window = ["dep:bevy_window", "dep:bevy_a11y"] bevy_core_pipeline = ["dep:bevy_core_pipeline", "bevy_image"] +bevy_anti_aliasing = ["dep:bevy_anti_aliasing", "bevy_image"] bevy_gizmos = ["dep:bevy_gizmos", "bevy_image"] bevy_gltf = ["dep:bevy_gltf", "bevy_image"] bevy_ui = ["dep:bevy_ui", "bevy_image"] @@ -185,6 +197,8 @@ bevy_render = [ "bevy_scene?/bevy_render", "bevy_gizmos?/bevy_render", "bevy_image", + "bevy_color/wgpu-types", + "bevy_color/encase", ] # Enable assertions to check the validity of parameters passed to glam @@ -220,7 +234,7 @@ meshlet_processor = ["bevy_pbr?/meshlet_processor"] bevy_dev_tools = ["dep:bevy_dev_tools"] # Enable support for the Bevy Remote Protocol -bevy_remote = ["dep:bevy_remote"] +bevy_remote = ["dep:bevy_remote", "serialize"] # Provides picking functionality bevy_picking = ["dep:bevy_picking"] @@ -243,14 +257,11 @@ bevy_ui_picking_backend = ["bevy_picking", "bevy_ui/bevy_ui_picking_backend"] # Provides a UI debug overlay bevy_ui_debug = ["bevy_ui?/bevy_ui_debug"] -# Enable support for the ios_simulator by downgrading some rendering capabilities -ios_simulator = ["bevy_pbr?/ios_simulator", "bevy_render?/ios_simulator"] - # Enable built in global state machines bevy_state = ["dep:bevy_state"] # Enables source location tracking for change detection, which can assist with debugging -track_change_detection = ["bevy_ecs/track_change_detection"] +track_location = ["bevy_ecs/track_location"] # Enable function reflection reflect_functions = [ @@ -259,56 +270,161 @@ reflect_functions = [ "bevy_ecs/reflect_functions", ] +# Enable documentation reflection +reflect_documentation = ["bevy_reflect/documentation"] + # Enable winit custom cursor support custom_cursor = ["bevy_winit/custom_cursor"] # Experimental support for nodes that are ignored for UI layouting ghost_nodes = ["bevy_ui/ghost_nodes"] +# Use the configurable global error handler as the default error handler. +configurable_error_handler = ["bevy_ecs/configurable_error_handler"] + +# Allows access to the `std` crate. Enabling this feature will prevent compilation +# on `no_std` targets, but provides access to certain additional features on +# supported platforms. +std = [ + "bevy_a11y?/std", + "bevy_app/std", + "bevy_color?/std", + "bevy_diagnostic/std", + "bevy_ecs/std", + "bevy_input/std", + "bevy_input_focus?/std", + "bevy_math/std", + "bevy_platform/std", + "bevy_reflect/std", + "bevy_state?/std", + "bevy_time/std", + "bevy_transform/std", + "bevy_utils/std", + "bevy_tasks/std", + "bevy_window?/std", +] + +# `critical-section` provides the building blocks for synchronization primitives +# on all platforms, including `no_std`. +critical-section = [ + "bevy_a11y?/critical-section", + "bevy_app/critical-section", + "bevy_diagnostic/critical-section", + "bevy_ecs/critical-section", + "bevy_input/critical-section", + "bevy_input_focus?/critical-section", + "bevy_platform/critical-section", + "bevy_reflect/critical-section", + "bevy_state?/critical-section", + "bevy_time/critical-section", + "bevy_utils/critical-section", + "bevy_tasks/critical-section", +] + +# Uses the `libm` maths library instead of the one provided in `std` and `core`. +libm = [ + "bevy_color?/libm", + "bevy_input/libm", + "bevy_input_focus?/libm", + "bevy_math/libm", + "bevy_transform/libm", + "bevy_window?/libm", +] + +# Uses `async-executor` as a task execution backend. +# This backend is incompatible with `no_std` targets. +async_executor = [ + "std", + "bevy_tasks/async_executor", + "bevy_ecs/async_executor", + "bevy_transform/async_executor", +] + +# Enables use of browser APIs. +# Note this is currently only applicable on `wasm32` architectures. +web = [ + "bevy_app/web", + "bevy_platform/web", + "bevy_reflect/web", + "bevy_tasks/web", +] + [dependencies] -# bevy -bevy_a11y = { path = "../bevy_a11y", version = "0.15.0-dev", optional = true } -bevy_app = { path = "../bevy_app", version = "0.15.0-dev" } -bevy_derive = { path = "../bevy_derive", version = "0.15.0-dev" } -bevy_diagnostic = { path = "../bevy_diagnostic", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" } -bevy_state = { path = "../bevy_state", optional = true, version = "0.15.0-dev" } -bevy_hierarchy = { path = "../bevy_hierarchy", version = "0.15.0-dev" } -bevy_input = { path = "../bevy_input", version = "0.15.0-dev" } -bevy_input_focus = { path = "../bevy_input_focus", version = "0.15.0-dev" } -bevy_log = { path = "../bevy_log", version = "0.15.0-dev" } -bevy_math = { path = "../bevy_math", version = "0.15.0-dev", features = [ +# bevy (no_std) +bevy_app = { path = "../bevy_app", version = "0.16.0-dev", default-features = false, features = [ + "bevy_reflect", +] } +bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev", default-features = false } +bevy_diagnostic = { path = "../bevy_diagnostic", version = "0.16.0-dev", default-features = false } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev", default-features = false, features = [ + "bevy_reflect", +] } +bevy_input = { path = "../bevy_input", version = "0.16.0-dev", default-features = false, features = [ "bevy_reflect", ] } -bevy_ptr = { path = "../bevy_ptr", version = "0.15.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev", features = [ - "bevy", +bevy_math = { path = "../bevy_math", version = "0.16.0-dev", default-features = false, features = [ + "bevy_reflect", + "nostd-libm", +] } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "alloc", +] } +bevy_ptr = { path = "../bevy_ptr", version = "0.16.0-dev", default-features = false } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, features = [ + "smallvec", +] } +bevy_time = { path = "../bevy_time", version = "0.16.0-dev", default-features = false, features = [ + "bevy_reflect", ] } -bevy_time = { path = "../bevy_time", version = "0.15.0-dev" } -bevy_transform = { path = "../bevy_transform", version = "0.15.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } -bevy_window = { path = "../bevy_window", version = "0.15.0-dev", optional = true } -bevy_tasks = { path = "../bevy_tasks", version = "0.15.0-dev" } +bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev", default-features = false, features = [ + "bevy-support", + "bevy_reflect", +] } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev", default-features = false, features = [ + "alloc", +] } +bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev", default-features = false } + +# bevy (std required) +bevy_log = { path = "../bevy_log", version = "0.16.0-dev", optional = true } + # bevy (optional) -bevy_animation = { path = "../bevy_animation", optional = true, version = "0.15.0-dev" } -bevy_asset = { path = "../bevy_asset", optional = true, version = "0.15.0-dev" } -bevy_audio = { path = "../bevy_audio", optional = true, version = "0.15.0-dev" } -bevy_color = { path = "../bevy_color", optional = true, version = "0.15.0-dev" } -bevy_core_pipeline = { path = "../bevy_core_pipeline", optional = true, version = "0.15.0-dev" } -bevy_dev_tools = { path = "../bevy_dev_tools", optional = true, version = "0.15.0-dev" } -bevy_gilrs = { path = "../bevy_gilrs", optional = true, version = "0.15.0-dev" } -bevy_gizmos = { path = "../bevy_gizmos", optional = true, version = "0.15.0-dev", default-features = false } -bevy_gltf = { path = "../bevy_gltf", optional = true, version = "0.15.0-dev" } -bevy_image = { path = "../bevy_image", optional = true, version = "0.15.0-dev" } -bevy_pbr = { path = "../bevy_pbr", optional = true, version = "0.15.0-dev" } -bevy_picking = { path = "../bevy_picking", optional = true, version = "0.15.0-dev" } -bevy_remote = { path = "../bevy_remote", optional = true, version = "0.15.0-dev" } -bevy_render = { path = "../bevy_render", optional = true, version = "0.15.0-dev" } -bevy_scene = { path = "../bevy_scene", optional = true, version = "0.15.0-dev" } -bevy_sprite = { path = "../bevy_sprite", optional = true, version = "0.15.0-dev" } -bevy_text = { path = "../bevy_text", optional = true, version = "0.15.0-dev" } -bevy_ui = { path = "../bevy_ui", optional = true, version = "0.15.0-dev" } -bevy_winit = { path = "../bevy_winit", optional = true, version = "0.15.0-dev" } +bevy_a11y = { path = "../bevy_a11y", optional = true, version = "0.16.0-dev", features = [ + "bevy_reflect", +] } +bevy_animation = { path = "../bevy_animation", optional = true, version = "0.16.0-dev" } +bevy_asset = { path = "../bevy_asset", optional = true, version = "0.16.0-dev" } +bevy_audio = { path = "../bevy_audio", optional = true, version = "0.16.0-dev" } +bevy_color = { path = "../bevy_color", optional = true, version = "0.16.0-dev", default-features = false, features = [ + "alloc", + "bevy_reflect", +] } +bevy_core_pipeline = { path = "../bevy_core_pipeline", optional = true, version = "0.16.0-dev" } +bevy_anti_aliasing = { path = "../bevy_anti_aliasing", optional = true, version = "0.16.0-dev" } +bevy_dev_tools = { path = "../bevy_dev_tools", optional = true, version = "0.16.0-dev" } +bevy_gilrs = { path = "../bevy_gilrs", optional = true, version = "0.16.0-dev" } +bevy_gizmos = { path = "../bevy_gizmos", optional = true, version = "0.16.0-dev", default-features = false } +bevy_gltf = { path = "../bevy_gltf", optional = true, version = "0.16.0-dev" } +bevy_image = { path = "../bevy_image", optional = true, version = "0.16.0-dev" } +bevy_input_focus = { path = "../bevy_input_focus", optional = true, version = "0.16.0-dev", default-features = false, features = [ + "bevy_reflect", +] } +bevy_pbr = { path = "../bevy_pbr", optional = true, version = "0.16.0-dev" } +bevy_picking = { path = "../bevy_picking", optional = true, version = "0.16.0-dev" } +bevy_remote = { path = "../bevy_remote", optional = true, version = "0.16.0-dev" } +bevy_render = { path = "../bevy_render", optional = true, version = "0.16.0-dev" } +bevy_scene = { path = "../bevy_scene", optional = true, version = "0.16.0-dev" } +bevy_sprite = { path = "../bevy_sprite", optional = true, version = "0.16.0-dev" } +bevy_state = { path = "../bevy_state", optional = true, version = "0.16.0-dev", default-features = false, features = [ + "bevy_app", + "bevy_reflect", +] } +bevy_text = { path = "../bevy_text", optional = true, version = "0.16.0-dev" } +bevy_ui = { path = "../bevy_ui", optional = true, version = "0.16.0-dev" } +bevy_window = { path = "../bevy_window", optional = true, version = "0.16.0-dev", default-features = false, features = [ + "bevy_reflect", +] } +bevy_winit = { path = "../bevy_winit", optional = true, version = "0.16.0-dev", default-features = false } [lints] workspace = true diff --git a/crates/bevy_internal/LICENSE-APACHE b/crates/bevy_internal/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_internal/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_internal/LICENSE-MIT b/crates/bevy_internal/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_internal/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_internal/src/default_plugins.rs b/crates/bevy_internal/src/default_plugins.rs index bbfa4b1a566bc..db1152a362e31 100644 --- a/crates/bevy_internal/src/default_plugins.rs +++ b/crates/bevy_internal/src/default_plugins.rs @@ -4,12 +4,12 @@ plugin_group! { /// This plugin group will add all the default plugins for a *Bevy* application: pub struct DefaultPlugins { bevy_app:::PanicHandlerPlugin, + #[cfg(feature = "bevy_log")] bevy_log:::LogPlugin, bevy_app:::TaskPoolPlugin, bevy_diagnostic:::FrameCountPlugin, bevy_time:::TimePlugin, bevy_transform:::TransformPlugin, - bevy_hierarchy:::HierarchyPlugin, bevy_diagnostic:::DiagnosticsPlugin, bevy_input:::InputPlugin, #[custom(cfg(not(feature = "bevy_window")))] @@ -18,7 +18,8 @@ plugin_group! { bevy_window:::WindowPlugin, #[cfg(feature = "bevy_window")] bevy_a11y:::AccessibilityPlugin, - #[custom(cfg(not(target_arch = "wasm32")))] + #[cfg(feature = "std")] + #[custom(cfg(any(unix, windows)))] bevy_app:::TerminalCtrlCHandlerPlugin, #[cfg(feature = "bevy_asset")] bevy_asset:::AssetPlugin, @@ -37,6 +38,8 @@ plugin_group! { bevy_render::pipelined_rendering:::PipelinedRenderingPlugin, #[cfg(feature = "bevy_core_pipeline")] bevy_core_pipeline:::CorePipelinePlugin, + #[cfg(feature = "bevy_anti_aliasing")] + bevy_anti_aliasing:::AntiAliasingPlugin, #[cfg(feature = "bevy_sprite")] bevy_sprite:::SpritePlugin, #[cfg(feature = "bevy_text")] @@ -82,7 +85,14 @@ plugin_group! { struct IgnoreAmbiguitiesPlugin; impl Plugin for IgnoreAmbiguitiesPlugin { - #[allow(unused_variables)] // Variables are used depending on enabled features + #[expect( + clippy::allow_attributes, + reason = "`unused_variables` is not always linted" + )] + #[allow( + unused_variables, + reason = "The `app` parameter is used only if a combination of crates that contain ambiguities with each other are enabled." + )] fn build(&self, app: &mut bevy_app::App) { // bevy_ui owns the Transform and cannot be animated #[cfg(all(feature = "bevy_animation", feature = "bevy_ui"))] @@ -119,4 +129,17 @@ plugin_group! { /// It includes a [schedule runner (`ScheduleRunnerPlugin`)](crate::app::ScheduleRunnerPlugin) /// to provide functionality that would otherwise be driven by a windowed application's /// *event loop* or *message loop*. + /// + /// By default, this loop will run as fast as possible, which can result in high CPU usage. + /// You can add a delay using [`run_loop`](crate::app::ScheduleRunnerPlugin::run_loop), + /// or remove the loop using [`run_once`](crate::app::ScheduleRunnerPlugin::run_once). + /// # Example: + /// ```rust, no_run + /// # use std::time::Duration; + /// # use bevy_app::{App, PluginGroup, ScheduleRunnerPlugin}; + /// # use bevy_internal::MinimalPlugins; + /// App::new().add_plugins(MinimalPlugins.set(ScheduleRunnerPlugin::run_loop( + /// // Run 60 times per second. + /// Duration::from_secs_f64(1.0 / 60.0), + /// ))).run(); } diff --git a/crates/bevy_internal/src/lib.rs b/crates/bevy_internal/src/lib.rs index e9d25adbdf8bb..07dd936ab1dcc 100644 --- a/crates/bevy_internal/src/lib.rs +++ b/crates/bevy_internal/src/lib.rs @@ -4,6 +4,7 @@ html_logo_url = "https://bevyengine.org/assets/icon.png", html_favicon_url = "https://bevyengine.org/assets/icon.png" )] +#![no_std] //! This module is separated into its own crate to enable simple dynamic linking for Bevy, and should not be used directly @@ -17,6 +18,8 @@ pub use default_plugins::*; pub use bevy_a11y as a11y; #[cfg(feature = "bevy_animation")] pub use bevy_animation as animation; +#[cfg(feature = "bevy_anti_aliasing")] +pub use bevy_anti_aliasing as anti_aliasing; pub use bevy_app as app; #[cfg(feature = "bevy_asset")] pub use bevy_asset as asset; @@ -36,17 +39,19 @@ pub use bevy_gilrs as gilrs; pub use bevy_gizmos as gizmos; #[cfg(feature = "bevy_gltf")] pub use bevy_gltf as gltf; -pub use bevy_hierarchy as hierarchy; #[cfg(feature = "bevy_image")] pub use bevy_image as image; pub use bevy_input as input; +#[cfg(feature = "bevy_input_focus")] pub use bevy_input_focus as input_focus; +#[cfg(feature = "bevy_log")] pub use bevy_log as log; pub use bevy_math as math; #[cfg(feature = "bevy_pbr")] pub use bevy_pbr as pbr; #[cfg(feature = "bevy_picking")] pub use bevy_picking as picking; +pub use bevy_platform as platform; pub use bevy_ptr as ptr; pub use bevy_reflect as reflect; #[cfg(feature = "bevy_remote")] diff --git a/crates/bevy_internal/src/prelude.rs b/crates/bevy_internal/src/prelude.rs index 42c84f134a124..26d5c7e2af0f5 100644 --- a/crates/bevy_internal/src/prelude.rs +++ b/crates/bevy_internal/src/prelude.rs @@ -1,10 +1,14 @@ #[doc(hidden)] pub use crate::{ - app::prelude::*, ecs::prelude::*, hierarchy::prelude::*, input::prelude::*, log::prelude::*, - math::prelude::*, reflect::prelude::*, time::prelude::*, transform::prelude::*, - utils::prelude::*, DefaultPlugins, MinimalPlugins, + app::prelude::*, ecs::prelude::*, input::prelude::*, math::prelude::*, platform::prelude::*, + reflect::prelude::*, time::prelude::*, transform::prelude::*, utils::prelude::*, + DefaultPlugins, MinimalPlugins, }; +#[doc(hidden)] +#[cfg(feature = "bevy_log")] +pub use crate::log::prelude::*; + #[doc(hidden)] #[cfg(feature = "bevy_window")] pub use crate::window::prelude::*; diff --git a/crates/bevy_log/Cargo.toml b/crates/bevy_log/Cargo.toml index 173d13e6bab76..cc7c53e676826 100644 --- a/crates/bevy_log/Cargo.toml +++ b/crates/bevy_log/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_log" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Provides logging for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -13,10 +13,12 @@ trace = ["tracing-error"] trace_tracy_memory = ["dep:tracy-client"] [dependencies] -bevy_app = { path = "../bevy_app", version = "0.15.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" } +# bevy +bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } +# other tracing-subscriber = { version = "0.3.1", features = [ "registry", "env-filter", @@ -24,17 +26,22 @@ tracing-subscriber = { version = "0.3.1", features = [ tracing-chrome = { version = "0.7.0", optional = true } tracing-log = "0.2.0" tracing-error = { version = "0.2.0", optional = true } +tracing = { version = "0.1", default-features = false, features = ["std"] } # Tracy dependency compatibility table: # https://github.com/nagisa/rust_tracy_client -tracing-tracy = { version = "0.11.0", optional = true } -tracy-client = { version = "0.17.0", optional = true } +tracing-tracy = { version = "0.11.4", optional = true } +tracy-client = { version = "0.18.0", optional = true } [target.'cfg(target_os = "android")'.dependencies] android_log-sys = "0.3.0" [target.'cfg(target_arch = "wasm32")'.dependencies] tracing-wasm = "0.2.1" +# TODO: Assuming all wasm builds are for the browser. Require `no_std` support to break assumption. +bevy_app = { path = "../bevy_app", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } [target.'cfg(target_os = "ios")'.dependencies] tracing-oslog = "0.2" diff --git a/crates/bevy_log/LICENSE-APACHE b/crates/bevy_log/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_log/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_log/LICENSE-MIT b/crates/bevy_log/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_log/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_log/src/android_tracing.rs b/crates/bevy_log/src/android_tracing.rs index 3b6649feaf614..ba0b3b7a27a38 100644 --- a/crates/bevy_log/src/android_tracing.rs +++ b/crates/bevy_log/src/android_tracing.rs @@ -1,10 +1,10 @@ use alloc::ffi::CString; -use bevy_utils::tracing::{ +use core::fmt::{Debug, Write}; +use tracing::{ field::Field, span::{Attributes, Record}, Event, Id, Level, Subscriber, }; -use core::fmt::{Debug, Write}; use tracing_subscriber::{field::Visit, layer::Context, registry::LookupSpan, Layer}; #[derive(Default)] diff --git a/crates/bevy_log/src/lib.rs b/crates/bevy_log/src/lib.rs index 3b98a2c23199f..055395bad7206 100644 --- a/crates/bevy_log/src/lib.rs +++ b/crates/bevy_log/src/lib.rs @@ -22,6 +22,7 @@ use core::error::Error; #[cfg(target_os = "android")] mod android_tracing; +mod once; #[cfg(feature = "trace_tracy_memory")] #[global_allocator] @@ -33,21 +34,21 @@ static GLOBAL: tracy_client::ProfiledAllocator = /// This includes the most common types in this crate, re-exported for your convenience. pub mod prelude { #[doc(hidden)] - pub use bevy_utils::tracing::{ + pub use tracing::{ debug, debug_span, error, error_span, info, info_span, trace, trace_span, warn, warn_span, }; #[doc(hidden)] - pub use bevy_utils::{debug_once, error_once, info_once, once, trace_once, warn_once}; + pub use crate::{debug_once, error_once, info_once, trace_once, warn_once}; + + #[doc(hidden)] + pub use bevy_utils::once; } -pub use bevy_utils::{ - debug_once, error_once, info_once, once, trace_once, - tracing::{ - debug, debug_span, error, error_span, info, info_span, trace, trace_span, warn, warn_span, - Level, - }, - warn_once, +pub use bevy_utils::once; +pub use tracing::{ + self, debug, debug_span, error, error_span, info, info_span, trace, trace_span, warn, + warn_span, Level, }; pub use tracing_subscriber; @@ -61,7 +62,7 @@ use tracing_subscriber::{ }; #[cfg(feature = "tracing-chrome")] use { - bevy_ecs::system::Resource, + bevy_ecs::resource::Resource, bevy_utils::synccell::SyncCell, tracing_subscriber::fmt::{format::DefaultFields, FormattedFields}, }; @@ -79,17 +80,17 @@ pub(crate) struct FlushGuard(SyncCell); /// Adds logging to Apps. This plugin is part of the `DefaultPlugins`. Adding /// this plugin will setup a collector appropriate to your target platform: /// * Using [`tracing-subscriber`](https://crates.io/crates/tracing-subscriber) by default, -/// logging to `stdout`. +/// logging to `stdout`. /// * Using [`android_log-sys`](https://crates.io/crates/android_log-sys) on Android, -/// logging to Android logs. +/// logging to Android logs. /// * Using [`tracing-wasm`](https://crates.io/crates/tracing-wasm) in Wasm, logging -/// to the browser console. +/// to the browser console. /// /// You can configure this plugin. /// ```no_run /// # use bevy_app::{App, NoopPluginGroup as DefaultPlugins, PluginGroup}; /// # use bevy_log::LogPlugin; -/// # use bevy_utils::tracing::Level; +/// # use tracing::Level; /// fn main() { /// App::new() /// .add_plugins(DefaultPlugins.set(LogPlugin { @@ -116,7 +117,10 @@ pub(crate) struct FlushGuard(SyncCell); /// # use bevy_app::{App, NoopPluginGroup as DefaultPlugins, PluginGroup}; /// # use bevy_log::LogPlugin; /// fn main() { +/// # // SAFETY: Single-threaded +/// # unsafe { /// std::env::set_var("NO_COLOR", "1"); +/// # } /// App::new() /// .add_plugins(DefaultPlugins) /// .run(); @@ -134,7 +138,70 @@ pub(crate) struct FlushGuard(SyncCell); /// .run(); /// } /// ``` +/// # Example Setup +/// +/// For a quick setup that enables all first-party logging while not showing any of your dependencies' +/// log data, you can configure the plugin as shown below. +/// +/// ```no_run +/// # use bevy_app::{App, NoopPluginGroup as DefaultPlugins, PluginGroup}; +/// # use bevy_log::*; +/// App::new() +/// .add_plugins(DefaultPlugins.set(LogPlugin { +/// filter: "warn,my_crate=trace".to_string(), //specific filters +/// level: Level::TRACE,//Change this to be globally change levels +/// ..Default::default() +/// })) +/// .run(); +/// ``` +/// The filter (in this case an `EnvFilter`) chooses whether to print the log. The most specific filters apply with higher priority. +/// Let's start with an example: `filter: "warn".to_string()` will only print logs with level `warn` level or greater. +/// From here, we can change to `filter: "warn,my_crate=trace".to_string()`. Logs will print at level `warn` unless it's in `mycrate`, +/// which will instead print at `trace` level because `my_crate=trace` is more specific. +/// +/// +/// ## Log levels +/// Events can be logged at various levels of importance. +/// Only events at your configured log level and higher will be shown. +/// ```no_run +/// # use bevy_log::*; +/// // here is how you write new logs at each "log level" (in "most important" to +/// // "least important" order) +/// error!("something failed"); +/// warn!("something bad happened that isn't a failure, but that's worth calling out"); +/// info!("helpful information that is worth printing by default"); +/// debug!("helpful for debugging"); +/// trace!("very noisy"); +/// ``` +/// In addition to `format!` style arguments, you can print a variable's debug +/// value by using syntax like: `trace(?my_value)`. +/// +/// ## Per module logging levels +/// Modules can have different logging levels using syntax like `crate_name::module_name=debug`. +/// +/// +/// ```no_run +/// # use bevy_app::{App, NoopPluginGroup as DefaultPlugins, PluginGroup}; +/// # use bevy_log::*; +/// App::new() +/// .add_plugins(DefaultPlugins.set(LogPlugin { +/// filter: "warn,my_crate=trace,my_crate::my_module=debug".to_string(), // Specific filters +/// level: Level::TRACE, // Change this to be globally change levels +/// ..Default::default() +/// })) +/// .run(); +/// ``` +/// The idea is that instead of deleting logs when they are no longer immediately applicable, +/// you just disable them. If you do need to log in the future, then you can enable the logs instead of having to rewrite them. +/// +/// ## Further reading /// +/// The `tracing` crate has much more functionality than these examples can show. +/// Much of this configuration can be done with "layers" in the `log` crate. +/// Check out: +/// - Using spans to add more fine grained filters to logs +/// - Adding instruments to capture more function information +/// - Creating layers to add additional context such as line numbers /// # Panics /// /// This plugin should not be added multiple times in the same process. This plugin @@ -169,7 +236,7 @@ pub struct LogPlugin { /// Because [`BoxedLayer`] takes a `dyn Layer`, `Vec` is also an acceptable return value. /// /// Access to [`App`] is also provided to allow for communication between the - /// [`Subscriber`](bevy_utils::tracing::Subscriber) and the [`App`]. + /// [`Subscriber`](tracing::Subscriber) and the [`App`]. /// /// Please see the `examples/log_layers.rs` for a complete example. pub custom_layer: fn(app: &mut App) -> Option, @@ -192,6 +259,7 @@ impl Default for LogPlugin { } impl Plugin for LogPlugin { + #[expect(clippy::print_stderr, reason = "Allowed during logger setup")] fn build(&self, app: &mut App) { #[cfg(feature = "trace")] { @@ -301,7 +369,7 @@ impl Plugin for LogPlugin { let logger_already_set = LogTracer::init().is_err(); let subscriber_already_set = - bevy_utils::tracing::subscriber::set_global_default(finished_subscriber).is_err(); + tracing::subscriber::set_global_default(finished_subscriber).is_err(); match (logger_already_set, subscriber_already_set) { (true, true) => error!( diff --git a/crates/bevy_log/src/once.rs b/crates/bevy_log/src/once.rs new file mode 100644 index 0000000000000..ad53b62c6c0aa --- /dev/null +++ b/crates/bevy_log/src/once.rs @@ -0,0 +1,49 @@ +/// Call [`trace!`](crate::trace) once per call site. +/// +/// Useful for logging within systems which are called every frame. +#[macro_export] +macro_rules! trace_once { + ($($arg:tt)+) => ({ + $crate::once!($crate::trace!($($arg)+)) + }); +} + +/// Call [`debug!`](crate::debug) once per call site. +/// +/// Useful for logging within systems which are called every frame. +#[macro_export] +macro_rules! debug_once { + ($($arg:tt)+) => ({ + $crate::once!($crate::debug!($($arg)+)) + }); +} + +/// Call [`info!`](crate::info) once per call site. +/// +/// Useful for logging within systems which are called every frame. +#[macro_export] +macro_rules! info_once { + ($($arg:tt)+) => ({ + $crate::once!($crate::info!($($arg)+)) + }); +} + +/// Call [`warn!`](crate::warn) once per call site. +/// +/// Useful for logging within systems which are called every frame. +#[macro_export] +macro_rules! warn_once { + ($($arg:tt)+) => ({ + $crate::once!($crate::warn!($($arg)+)) + }); +} + +/// Call [`error!`](crate::error) once per call site. +/// +/// Useful for logging within systems which are called every frame. +#[macro_export] +macro_rules! error_once { + ($($arg:tt)+) => ({ + $crate::once!($crate::error!($($arg)+)) + }); +} diff --git a/crates/bevy_macro_utils/Cargo.toml b/crates/bevy_macro_utils/Cargo.toml index 4b135f205e4f2..36be75234901b 100644 --- a/crates/bevy_macro_utils/Cargo.toml +++ b/crates/bevy_macro_utils/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_macro_utils" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "A collection of utils for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -9,12 +9,13 @@ license = "MIT OR Apache-2.0" keywords = ["bevy"] [dependencies] -toml_edit = { version = "0.22.7", default-features = false, features = [ - "parse", -] } syn = "2.0" quote = "1.0" proc-macro2 = "1.0" +toml_edit = { version = "0.22.7", default-features = false, features = [ + "parse", +] } +parking_lot = { version = "0.12" } [lints] workspace = true diff --git a/crates/bevy_macro_utils/LICENSE-APACHE b/crates/bevy_macro_utils/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_macro_utils/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_macro_utils/LICENSE-MIT b/crates/bevy_macro_utils/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_macro_utils/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_macro_utils/src/bevy_manifest.rs b/crates/bevy_macro_utils/src/bevy_manifest.rs index 2185f09252d0f..b0d321ba2215c 100644 --- a/crates/bevy_macro_utils/src/bevy_manifest.rs +++ b/crates/bevy_macro_utils/src/bevy_manifest.rs @@ -1,62 +1,97 @@ extern crate proc_macro; +use alloc::collections::BTreeMap; +use parking_lot::{lock_api::RwLockReadGuard, MappedRwLockReadGuard, RwLock, RwLockWriteGuard}; use proc_macro::TokenStream; -use std::{env, path::PathBuf, sync::LazyLock}; -use toml_edit::{DocumentMut, Item}; +use std::{ + env, + path::{Path, PathBuf}, + time::SystemTime, +}; +use toml_edit::{ImDocument, Item}; /// The path to the `Cargo.toml` file for the Bevy project. +#[derive(Debug)] pub struct BevyManifest { - manifest: DocumentMut, + manifest: ImDocument>, + modified_time: SystemTime, } const BEVY: &str = "bevy"; -const BEVY_INTERNAL: &str = "bevy_internal"; impl BevyManifest { /// Returns a global shared instance of the [`BevyManifest`] struct. - pub fn shared() -> &'static LazyLock { - static LAZY_SELF: LazyLock = LazyLock::new(|| BevyManifest { - manifest: env::var_os("CARGO_MANIFEST_DIR") - .map(PathBuf::from) - .map(|mut path| { - path.push("Cargo.toml"); - if !path.exists() { - panic!( - "No Cargo manifest found for crate. Expected: {}", - path.display() - ); - } - let manifest = std::fs::read_to_string(path.clone()).unwrap_or_else(|_| { - panic!("Unable to read cargo manifest: {}", path.display()) - }); - manifest.parse::().unwrap_or_else(|_| { - panic!("Failed to parse cargo manifest: {}", path.display()) - }) - }) - .expect("CARGO_MANIFEST_DIR is not defined."), - }); - &LAZY_SELF + pub fn shared() -> MappedRwLockReadGuard<'static, BevyManifest> { + static MANIFESTS: RwLock> = RwLock::new(BTreeMap::new()); + let manifest_path = Self::get_manifest_path(); + let modified_time = Self::get_manifest_modified_time(&manifest_path) + .expect("The Cargo.toml should have a modified time"); + + if let Ok(manifest) = + RwLockReadGuard::try_map(MANIFESTS.read(), |manifests| manifests.get(&manifest_path)) + { + if manifest.modified_time == modified_time { + return manifest; + } + } + + let manifest = BevyManifest { + manifest: Self::read_manifest(&manifest_path), + modified_time, + }; + + let key = manifest_path.clone(); + let mut manifests = MANIFESTS.write(); + manifests.insert(key, manifest); + + RwLockReadGuard::map(RwLockWriteGuard::downgrade(manifests), |manifests| { + manifests.get(&manifest_path).unwrap() + }) + } + + fn get_manifest_path() -> PathBuf { + env::var_os("CARGO_MANIFEST_DIR") + .map(|path| { + let mut path = PathBuf::from(path); + path.push("Cargo.toml"); + assert!( + path.exists(), + "Cargo manifest does not exist at path {}", + path.display() + ); + path + }) + .expect("CARGO_MANIFEST_DIR is not defined.") + } + + fn get_manifest_modified_time( + cargo_manifest_path: &Path, + ) -> Result { + std::fs::metadata(cargo_manifest_path).and_then(|metadata| metadata.modified()) + } + + fn read_manifest(path: &Path) -> ImDocument> { + let manifest = std::fs::read_to_string(path) + .unwrap_or_else(|_| panic!("Unable to read cargo manifest: {}", path.display())) + .into_boxed_str(); + ImDocument::parse(manifest) + .unwrap_or_else(|_| panic!("Failed to parse cargo manifest: {}", path.display())) } /// Attempt to retrieve the [path](syn::Path) of a particular package in /// the [manifest](BevyManifest) by [name](str). pub fn maybe_get_path(&self, name: &str) -> Option { - fn dep_package(dep: &Item) -> Option<&str> { - if dep.as_str().is_some() { - None - } else { - dep.get("package").map(|name| name.as_str().unwrap()) - } - } - let find_in_deps = |deps: &Item| -> Option { - let package = if let Some(dep) = deps.get(name) { - return Some(Self::parse_str(dep_package(dep).unwrap_or(name))); - } else if let Some(dep) = deps.get(BEVY) { - dep_package(dep).unwrap_or(BEVY) - } else if let Some(dep) = deps.get(BEVY_INTERNAL) { - dep_package(dep).unwrap_or(BEVY_INTERNAL) + let package = if deps.get(name).is_some() { + return Some(Self::parse_str(name)); + } else if deps.get(BEVY).is_some() { + BEVY } else { + // Note: to support bevy crate aliases, we could do scanning here to find a crate with a "package" name that + // matches our request, but that would then mean we are scanning every dependency (and dev dependency) for every + // macro execution that hits this branch (which includes all built-in bevy crates). Our current stance is that supporting + // remapped crate names in derive macros is not worth that "compile time" price of admission. As a workaround, people aliasing + // bevy crate names can use "use REMAPPED as bevy_X" or "use REMAPPED::x as bevy_x". return None; }; @@ -74,17 +109,17 @@ impl BevyManifest { .or_else(|| deps_dev.and_then(find_in_deps)) } + /// Attempt to parse the provided [path](str) as a [syntax tree node](syn::parse::Parse) + pub fn try_parse_str(path: &str) -> Option { + syn::parse(path.parse::().ok()?).ok() + } + /// Returns the path for the crate with the given name. pub fn get_path(&self, name: &str) -> syn::Path { self.maybe_get_path(name) .unwrap_or_else(|| Self::parse_str(name)) } - /// Attempt to parse the provided [path](str) as a [syntax tree node](syn::parse::Parse) - pub fn try_parse_str(path: &str) -> Option { - syn::parse(path.parse::().ok()?).ok() - } - /// Attempt to parse provided [path](str) as a [syntax tree node](syn::parse::Parse). /// /// # Panics @@ -95,18 +130,4 @@ impl BevyManifest { pub fn parse_str(path: &str) -> T { Self::try_parse_str(path).unwrap() } - - /// Attempt to get a subcrate [path](syn::Path) under Bevy by [name](str) - pub fn get_subcrate(&self, subcrate: &str) -> Option { - self.maybe_get_path(BEVY) - .map(|bevy_path| { - let mut segments = bevy_path.segments; - segments.push(BevyManifest::parse_str(subcrate)); - syn::Path { - leading_colon: None, - segments, - } - }) - .or_else(|| self.maybe_get_path(&format!("bevy_{subcrate}"))) - } } diff --git a/crates/bevy_macro_utils/src/lib.rs b/crates/bevy_macro_utils/src/lib.rs index 28de7e2227e26..aa386101f1983 100644 --- a/crates/bevy_macro_utils/src/lib.rs +++ b/crates/bevy_macro_utils/src/lib.rs @@ -7,6 +7,7 @@ //! A collection of helper types and functions for working on macros within the Bevy ecosystem. +extern crate alloc; extern crate proc_macro; mod attrs; diff --git a/crates/bevy_math/Cargo.toml b/crates/bevy_math/Cargo.toml index c444004521606..7aae1ec74be45 100644 --- a/crates/bevy_math/Cargo.toml +++ b/crates/bevy_math/Cargo.toml @@ -1,22 +1,22 @@ [package] name = "bevy_math" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Provides math functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" license = "MIT OR Apache-2.0" keywords = ["bevy"] -rust-version = "1.81.0" +rust-version = "1.85.0" [dependencies] -glam = { version = "0.29", default-features = false, features = ["bytemuck"] } +glam = { version = "0.29.3", default-features = false, features = ["bytemuck"] } thiserror = { version = "2", default-features = false } derive_more = { version = "1", default-features = false, features = [ "from", "into", ] } -itertools = { version = "0.13.0", default-features = false } +itertools = { version = "0.14.0", default-features = false } serde = { version = "1", default-features = false, features = [ "derive", ], optional = true } @@ -25,7 +25,7 @@ approx = { version = "0.5", default-features = false, optional = true } rand = { version = "0.8", default-features = false, optional = true } rand_distr = { version = "0.4.3", optional = true } smallvec = { version = "1.11" } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev", features = [ +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, features = [ "glam", ], optional = true } variadics_please = "1.1" @@ -36,10 +36,8 @@ approx = "0.5" rand = "0.8" rand_chacha = "0.3" # Enable the approx feature when testing. -bevy_math = { path = ".", version = "0.15.0-dev", default-features = false, features = [ - "approx", -] } -glam = { version = "0.29", default-features = false, features = ["approx"] } +bevy_math = { path = ".", default-features = false, features = ["approx"] } +glam = { version = "0.29.3", default-features = false, features = ["approx"] } [features] default = ["std", "rand", "curve"] @@ -52,6 +50,7 @@ std = [ "approx?/std", "rand?/std", "rand_distr?/std", + "bevy_reflect?/std", ] alloc = [ "itertools/use_alloc", @@ -76,8 +75,11 @@ debug_glam_assert = ["glam/debug-glam-assert"] rand = ["dep:rand", "dep:rand_distr", "glam/rand"] # Include code related to the Curve trait curve = [] -# Enable bevy_reflect (requires std) -bevy_reflect = ["dep:bevy_reflect", "std"] +# Enable bevy_reflect (requires alloc) +bevy_reflect = ["dep:bevy_reflect", "alloc"] +# Enable libm mathematical functions as a fallback for no_std environments. +# Can be overridden with std feature. +nostd-libm = ["dep:libm", "glam/nostd-libm"] [lints] workspace = true diff --git a/crates/bevy_math/LICENSE-APACHE b/crates/bevy_math/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_math/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_math/LICENSE-MIT b/crates/bevy_math/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_math/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_math/clippy.toml b/crates/bevy_math/clippy.toml index 0fb122e4dcef6..c1f67e044d8ec 100644 --- a/crates/bevy_math/clippy.toml +++ b/crates/bevy_math/clippy.toml @@ -34,5 +34,6 @@ disallowed-methods = [ { path = "f32::copysign", reason = "use ops::copysign instead for no_std compatibility" }, { path = "f32::round", reason = "use ops::round instead for no_std compatibility" }, { path = "f32::floor", reason = "use ops::floor instead for no_std compatibility" }, + { path = "f32::ceil", reason = "use ops::ceil instead for no_std compatibility" }, { path = "f32::fract", reason = "use ops::fract instead for no_std compatibility" }, ] diff --git a/crates/bevy_math/images/easefunction/BackIn.svg b/crates/bevy_math/images/easefunction/BackIn.svg new file mode 100644 index 0000000000000..63a776ea4aaaa --- /dev/null +++ b/crates/bevy_math/images/easefunction/BackIn.svg @@ -0,0 +1,5 @@ + +BackIn + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/BackInOut.svg b/crates/bevy_math/images/easefunction/BackInOut.svg new file mode 100644 index 0000000000000..8695c42f719ae --- /dev/null +++ b/crates/bevy_math/images/easefunction/BackInOut.svg @@ -0,0 +1,5 @@ + +BackInOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/BackOut.svg b/crates/bevy_math/images/easefunction/BackOut.svg new file mode 100644 index 0000000000000..9479d0311b2f5 --- /dev/null +++ b/crates/bevy_math/images/easefunction/BackOut.svg @@ -0,0 +1,5 @@ + +BackOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/BothSteps.svg b/crates/bevy_math/images/easefunction/BothSteps.svg new file mode 100644 index 0000000000000..92090fa5d4116 --- /dev/null +++ b/crates/bevy_math/images/easefunction/BothSteps.svg @@ -0,0 +1,5 @@ + +BothSteps(4, Both) + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/BounceIn.svg b/crates/bevy_math/images/easefunction/BounceIn.svg new file mode 100644 index 0000000000000..e5c22a6095a32 --- /dev/null +++ b/crates/bevy_math/images/easefunction/BounceIn.svg @@ -0,0 +1,5 @@ + +BounceIn + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/BounceInOut.svg b/crates/bevy_math/images/easefunction/BounceInOut.svg new file mode 100644 index 0000000000000..9aaf4e1e0aca3 --- /dev/null +++ b/crates/bevy_math/images/easefunction/BounceInOut.svg @@ -0,0 +1,5 @@ + +BounceInOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/BounceOut.svg b/crates/bevy_math/images/easefunction/BounceOut.svg new file mode 100644 index 0000000000000..080eb0d198dce --- /dev/null +++ b/crates/bevy_math/images/easefunction/BounceOut.svg @@ -0,0 +1,5 @@ + +BounceOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/CircularIn.svg b/crates/bevy_math/images/easefunction/CircularIn.svg new file mode 100644 index 0000000000000..3d8c7e05792a5 --- /dev/null +++ b/crates/bevy_math/images/easefunction/CircularIn.svg @@ -0,0 +1,5 @@ + +CircularIn + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/CircularInOut.svg b/crates/bevy_math/images/easefunction/CircularInOut.svg new file mode 100644 index 0000000000000..de2de720ebde4 --- /dev/null +++ b/crates/bevy_math/images/easefunction/CircularInOut.svg @@ -0,0 +1,5 @@ + +CircularInOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/CircularOut.svg b/crates/bevy_math/images/easefunction/CircularOut.svg new file mode 100644 index 0000000000000..740a59cd68f83 --- /dev/null +++ b/crates/bevy_math/images/easefunction/CircularOut.svg @@ -0,0 +1,5 @@ + +CircularOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/CubicIn.svg b/crates/bevy_math/images/easefunction/CubicIn.svg new file mode 100644 index 0000000000000..f8acbc762a19e --- /dev/null +++ b/crates/bevy_math/images/easefunction/CubicIn.svg @@ -0,0 +1,5 @@ + +CubicIn + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/CubicInOut.svg b/crates/bevy_math/images/easefunction/CubicInOut.svg new file mode 100644 index 0000000000000..3fa5859f94b19 --- /dev/null +++ b/crates/bevy_math/images/easefunction/CubicInOut.svg @@ -0,0 +1,5 @@ + +CubicInOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/CubicOut.svg b/crates/bevy_math/images/easefunction/CubicOut.svg new file mode 100644 index 0000000000000..1c4f1abc558b1 --- /dev/null +++ b/crates/bevy_math/images/easefunction/CubicOut.svg @@ -0,0 +1,5 @@ + +CubicOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/Elastic.svg b/crates/bevy_math/images/easefunction/Elastic.svg new file mode 100644 index 0000000000000..fa187328cb65c --- /dev/null +++ b/crates/bevy_math/images/easefunction/Elastic.svg @@ -0,0 +1,5 @@ + +Elastic(50.0) + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/ElasticIn.svg b/crates/bevy_math/images/easefunction/ElasticIn.svg new file mode 100644 index 0000000000000..faa95a79a2eb8 --- /dev/null +++ b/crates/bevy_math/images/easefunction/ElasticIn.svg @@ -0,0 +1,5 @@ + +ElasticIn + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/ElasticInOut.svg b/crates/bevy_math/images/easefunction/ElasticInOut.svg new file mode 100644 index 0000000000000..e0f0527a4f2c2 --- /dev/null +++ b/crates/bevy_math/images/easefunction/ElasticInOut.svg @@ -0,0 +1,5 @@ + +ElasticInOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/ElasticOut.svg b/crates/bevy_math/images/easefunction/ElasticOut.svg new file mode 100644 index 0000000000000..17817a6712a34 --- /dev/null +++ b/crates/bevy_math/images/easefunction/ElasticOut.svg @@ -0,0 +1,5 @@ + +ElasticOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/EndSteps.svg b/crates/bevy_math/images/easefunction/EndSteps.svg new file mode 100644 index 0000000000000..dafe6825fef07 --- /dev/null +++ b/crates/bevy_math/images/easefunction/EndSteps.svg @@ -0,0 +1,5 @@ + +EndSteps(4, End) + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/ExponentialIn.svg b/crates/bevy_math/images/easefunction/ExponentialIn.svg new file mode 100644 index 0000000000000..784cc6ccd558f --- /dev/null +++ b/crates/bevy_math/images/easefunction/ExponentialIn.svg @@ -0,0 +1,5 @@ + +ExponentialIn + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/ExponentialInOut.svg b/crates/bevy_math/images/easefunction/ExponentialInOut.svg new file mode 100644 index 0000000000000..3cc55941b090d --- /dev/null +++ b/crates/bevy_math/images/easefunction/ExponentialInOut.svg @@ -0,0 +1,5 @@ + +ExponentialInOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/ExponentialOut.svg b/crates/bevy_math/images/easefunction/ExponentialOut.svg new file mode 100644 index 0000000000000..0fb9f8720e4ad --- /dev/null +++ b/crates/bevy_math/images/easefunction/ExponentialOut.svg @@ -0,0 +1,5 @@ + +ExponentialOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/Linear.svg b/crates/bevy_math/images/easefunction/Linear.svg new file mode 100644 index 0000000000000..99adedf3e8275 --- /dev/null +++ b/crates/bevy_math/images/easefunction/Linear.svg @@ -0,0 +1,5 @@ + +Linear + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/NoneSteps.svg b/crates/bevy_math/images/easefunction/NoneSteps.svg new file mode 100644 index 0000000000000..8434f4126b7b3 --- /dev/null +++ b/crates/bevy_math/images/easefunction/NoneSteps.svg @@ -0,0 +1,5 @@ + +NoneSteps(4, None) + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/QuadraticIn.svg b/crates/bevy_math/images/easefunction/QuadraticIn.svg new file mode 100644 index 0000000000000..15ba4403144b7 --- /dev/null +++ b/crates/bevy_math/images/easefunction/QuadraticIn.svg @@ -0,0 +1,5 @@ + +QuadraticIn + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/QuadraticInOut.svg b/crates/bevy_math/images/easefunction/QuadraticInOut.svg new file mode 100644 index 0000000000000..f2f1098aaedde --- /dev/null +++ b/crates/bevy_math/images/easefunction/QuadraticInOut.svg @@ -0,0 +1,5 @@ + +QuadraticInOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/QuadraticOut.svg b/crates/bevy_math/images/easefunction/QuadraticOut.svg new file mode 100644 index 0000000000000..5b9d9a1b6c170 --- /dev/null +++ b/crates/bevy_math/images/easefunction/QuadraticOut.svg @@ -0,0 +1,5 @@ + +QuadraticOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/QuarticIn.svg b/crates/bevy_math/images/easefunction/QuarticIn.svg new file mode 100644 index 0000000000000..6028e35542820 --- /dev/null +++ b/crates/bevy_math/images/easefunction/QuarticIn.svg @@ -0,0 +1,5 @@ + +QuarticIn + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/QuarticInOut.svg b/crates/bevy_math/images/easefunction/QuarticInOut.svg new file mode 100644 index 0000000000000..7afd6c74b4296 --- /dev/null +++ b/crates/bevy_math/images/easefunction/QuarticInOut.svg @@ -0,0 +1,5 @@ + +QuarticInOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/QuarticOut.svg b/crates/bevy_math/images/easefunction/QuarticOut.svg new file mode 100644 index 0000000000000..f9fac715350e7 --- /dev/null +++ b/crates/bevy_math/images/easefunction/QuarticOut.svg @@ -0,0 +1,5 @@ + +QuarticOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/QuinticIn.svg b/crates/bevy_math/images/easefunction/QuinticIn.svg new file mode 100644 index 0000000000000..9dfaa926bf5a8 --- /dev/null +++ b/crates/bevy_math/images/easefunction/QuinticIn.svg @@ -0,0 +1,5 @@ + +QuinticIn + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/QuinticInOut.svg b/crates/bevy_math/images/easefunction/QuinticInOut.svg new file mode 100644 index 0000000000000..732b9db365e91 --- /dev/null +++ b/crates/bevy_math/images/easefunction/QuinticInOut.svg @@ -0,0 +1,5 @@ + +QuinticInOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/QuinticOut.svg b/crates/bevy_math/images/easefunction/QuinticOut.svg new file mode 100644 index 0000000000000..76da9d2e9f020 --- /dev/null +++ b/crates/bevy_math/images/easefunction/QuinticOut.svg @@ -0,0 +1,5 @@ + +QuinticOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/README.md b/crates/bevy_math/images/easefunction/README.md new file mode 100644 index 0000000000000..e341ea0e14bb3 --- /dev/null +++ b/crates/bevy_math/images/easefunction/README.md @@ -0,0 +1,3 @@ +# EaseFunction + +These graphs are auto-generated via `tools/build-easefunction-graphs`. diff --git a/crates/bevy_math/images/easefunction/SineIn.svg b/crates/bevy_math/images/easefunction/SineIn.svg new file mode 100644 index 0000000000000..e9630ca1d80f1 --- /dev/null +++ b/crates/bevy_math/images/easefunction/SineIn.svg @@ -0,0 +1,5 @@ + +SineIn + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/SineInOut.svg b/crates/bevy_math/images/easefunction/SineInOut.svg new file mode 100644 index 0000000000000..20e251e7da6c7 --- /dev/null +++ b/crates/bevy_math/images/easefunction/SineInOut.svg @@ -0,0 +1,5 @@ + +SineInOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/SineOut.svg b/crates/bevy_math/images/easefunction/SineOut.svg new file mode 100644 index 0000000000000..2cf451d7849ef --- /dev/null +++ b/crates/bevy_math/images/easefunction/SineOut.svg @@ -0,0 +1,5 @@ + +SineOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/SmoothStep.svg b/crates/bevy_math/images/easefunction/SmoothStep.svg new file mode 100644 index 0000000000000..74d8770317457 --- /dev/null +++ b/crates/bevy_math/images/easefunction/SmoothStep.svg @@ -0,0 +1,5 @@ + +SmoothStep + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/SmoothStepIn.svg b/crates/bevy_math/images/easefunction/SmoothStepIn.svg new file mode 100644 index 0000000000000..84363c6448e54 --- /dev/null +++ b/crates/bevy_math/images/easefunction/SmoothStepIn.svg @@ -0,0 +1,5 @@ + +SmoothStepIn + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/SmoothStepOut.svg b/crates/bevy_math/images/easefunction/SmoothStepOut.svg new file mode 100644 index 0000000000000..09256cbd65533 --- /dev/null +++ b/crates/bevy_math/images/easefunction/SmoothStepOut.svg @@ -0,0 +1,5 @@ + +SmoothStepOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/SmootherStep.svg b/crates/bevy_math/images/easefunction/SmootherStep.svg new file mode 100644 index 0000000000000..9c00abead6f70 --- /dev/null +++ b/crates/bevy_math/images/easefunction/SmootherStep.svg @@ -0,0 +1,5 @@ + +SmootherStep + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/SmootherStepIn.svg b/crates/bevy_math/images/easefunction/SmootherStepIn.svg new file mode 100644 index 0000000000000..5af3150057425 --- /dev/null +++ b/crates/bevy_math/images/easefunction/SmootherStepIn.svg @@ -0,0 +1,5 @@ + +SmootherStepIn + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/SmootherStepOut.svg b/crates/bevy_math/images/easefunction/SmootherStepOut.svg new file mode 100644 index 0000000000000..5c9df92500afa --- /dev/null +++ b/crates/bevy_math/images/easefunction/SmootherStepOut.svg @@ -0,0 +1,5 @@ + +SmootherStepOut + + + \ No newline at end of file diff --git a/crates/bevy_math/images/easefunction/StartSteps.svg b/crates/bevy_math/images/easefunction/StartSteps.svg new file mode 100644 index 0000000000000..476a17d364ad3 --- /dev/null +++ b/crates/bevy_math/images/easefunction/StartSteps.svg @@ -0,0 +1,5 @@ + +StartSteps(4, Start) + + + \ No newline at end of file diff --git a/crates/bevy_math/src/aspect_ratio.rs b/crates/bevy_math/src/aspect_ratio.rs index 0289957164bf2..7b7ae6d3bad04 100644 --- a/crates/bevy_math/src/aspect_ratio.rs +++ b/crates/bevy_math/src/aspect_ratio.rs @@ -9,7 +9,11 @@ use bevy_reflect::Reflect; /// An `AspectRatio` is the ratio of width to height. #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Into)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] pub struct AspectRatio(f32); impl AspectRatio { diff --git a/crates/bevy_math/src/bounding/bounded2d/mod.rs b/crates/bevy_math/src/bounding/bounded2d/mod.rs index c5be831a86f86..bea18f5808481 100644 --- a/crates/bevy_math/src/bounding/bounded2d/mod.rs +++ b/crates/bevy_math/src/bounding/bounded2d/mod.rs @@ -9,6 +9,10 @@ use crate::{ #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; +#[cfg(all(feature = "bevy_reflect", feature = "serialize"))] +use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; +#[cfg(feature = "serialize")] +use serde::{Deserialize, Serialize}; /// Computes the geometric center of the given set of points. #[inline(always)] @@ -32,8 +36,17 @@ pub trait Bounded2d { /// A 2D axis-aligned bounding box, or bounding rectangle #[doc(alias = "BoundingRectangle")] -#[derive(Clone, Copy, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[derive(Clone, Copy, Debug, PartialEq)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] +#[cfg_attr(feature = "serialize", derive(Serialize), derive(Deserialize))] +#[cfg_attr( + all(feature = "serialize", feature = "bevy_reflect"), + reflect(Serialize, Deserialize) +)] pub struct Aabb2d { /// The minimum, conventionally bottom-left, point of the box pub min: Vec2, @@ -450,8 +463,17 @@ mod aabb2d_tests { use crate::primitives::Circle; /// A bounding circle -#[derive(Clone, Copy, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[derive(Clone, Copy, Debug, PartialEq)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] +#[cfg_attr(feature = "serialize", derive(Serialize), derive(Deserialize))] +#[cfg_attr( + all(feature = "serialize", feature = "bevy_reflect"), + reflect(Serialize, Deserialize) +)] pub struct BoundingCircle { /// The center of the bounding circle pub center: Vec2, diff --git a/crates/bevy_math/src/bounding/bounded2d/primitive_impls.rs b/crates/bevy_math/src/bounding/bounded2d/primitive_impls.rs index 8f55a6fb0d778..f55f40ddc6c87 100644 --- a/crates/bevy_math/src/bounding/bounded2d/primitive_impls.rs +++ b/crates/bevy_math/src/bounding/bounded2d/primitive_impls.rs @@ -1,10 +1,12 @@ //! Contains [`Bounded2d`] implementations for [geometric primitives](crate::primitives). use crate::{ + bounding::BoundingVolume, ops, primitives::{ - Annulus, Arc2d, Capsule2d, Circle, CircularSector, CircularSegment, Ellipse, Line2d, - Plane2d, Polygon, Polyline2d, Rectangle, RegularPolygon, Rhombus, Segment2d, Triangle2d, + Annulus, Arc2d, Capsule2d, Circle, CircularSector, CircularSegment, ConvexPolygon, Ellipse, + Line2d, Plane2d, Polygon, Polyline2d, Rectangle, RegularPolygon, Rhombus, Segment2d, + Triangle2d, }, Dir2, Isometry2d, Mat2, Rot2, Vec2, }; @@ -51,10 +53,12 @@ fn arc_bounding_points(arc: Arc2d, rotation: impl Into) -> SmallVec<[Vec2; // If inverted = true, then right_angle > left_angle, so we are looking for an angle that is not between them. // There's a chance that this condition fails due to rounding error, if the endpoint angle is juuuust shy of the axis. // But in that case, the endpoint itself is within rounding error of the axis and will define the bounds just fine. - #[allow(clippy::nonminimal_bool)] - if !inverted && angle >= right_angle && angle <= left_angle - || inverted && (angle >= right_angle || angle <= left_angle) - { + let angle_within_parameters = if inverted { + angle >= right_angle || angle <= left_angle + } else { + angle >= right_angle && angle <= left_angle + }; + if angle_within_parameters { bounds.push(extremum * arc.radius); } } @@ -263,18 +267,15 @@ impl Bounded2d for Line2d { impl Bounded2d for Segment2d { fn aabb_2d(&self, isometry: impl Into) -> Aabb2d { - let isometry = isometry.into(); - - // Rotate the segment by `rotation` - let direction = isometry.rotation * *self.direction; - let half_size = (self.half_length * direction).abs(); - - Aabb2d::new(isometry.translation, half_size) + Aabb2d::from_point_cloud(isometry, &[self.point1(), self.point2()]) } fn bounding_circle(&self, isometry: impl Into) -> BoundingCircle { - let isometry = isometry.into(); - BoundingCircle::new(isometry.translation, self.half_length) + let isometry: Isometry2d = isometry.into(); + let local_center = self.center(); + let radius = local_center.distance(self.point1()); + let local_circle = BoundingCircle::new(local_center, radius); + local_circle.transformed_by(isometry.translation, isometry.rotation) } } @@ -334,8 +335,8 @@ impl Bounded2d for Triangle2d { if let Some((point1, point2)) = side_opposite_to_non_acute { // The triangle is obtuse or right, so the minimum bounding circle's diameter is equal to the longest side. // We can compute the minimum bounding circle from the line segment of the longest side. - let (segment, center) = Segment2d::from_points(point1, point2); - segment.bounding_circle(isometry * Isometry2d::from_translation(center)) + let segment = Segment2d::new(point1, point2); + segment.bounding_circle(isometry) } else { // The triangle is acute, so the smallest bounding circle is the circumcircle. let (Circle { radius }, circumcenter) = self.circumcircle(); @@ -375,6 +376,16 @@ impl Bounded2d for Polygon { } } +impl Bounded2d for ConvexPolygon { + fn aabb_2d(&self, isometry: impl Into) -> Aabb2d { + Aabb2d::from_point_cloud(isometry, self.vertices().as_slice()) + } + + fn bounding_circle(&self, isometry: impl Into) -> BoundingCircle { + BoundingCircle::from_point_cloud(isometry, self.vertices().as_slice()) + } +} + #[cfg(feature = "alloc")] impl Bounded2d for BoxedPolygon { fn aabb_2d(&self, isometry: impl Into) -> Aabb2d { @@ -415,11 +426,10 @@ impl Bounded2d for Capsule2d { let isometry = isometry.into(); // Get the line segment between the semicircles of the rotated capsule - let segment = Segment2d { - // Multiplying a normalized vector (Vec2::Y) with a rotation returns a normalized vector. - direction: isometry.rotation * Dir2::Y, - half_length: self.half_length, - }; + let segment = Segment2d::from_direction_and_length( + isometry.rotation * Dir2::Y, + self.half_length * 2., + ); let (a, b) = (segment.point1(), segment.point2()); // Expand the line segment by the capsule radius to get the capsule half-extents @@ -439,8 +449,10 @@ impl Bounded2d for Capsule2d { } #[cfg(test)] +#[expect(clippy::print_stdout, reason = "Allowed in tests.")] mod tests { use core::f32::consts::{FRAC_PI_2, FRAC_PI_3, FRAC_PI_4, FRAC_PI_6, TAU}; + use std::println; use approx::assert_abs_diff_eq; use glam::Vec2; @@ -475,7 +487,6 @@ mod tests { // Arcs and circular segments have the same bounding shapes so they share test cases. fn arc_and_segment() { struct TestCase { - #[allow(unused)] name: &'static str, arc: Arc2d, translation: Vec2, @@ -629,7 +640,6 @@ mod tests { #[test] fn circular_sector() { struct TestCase { - #[allow(unused)] name: &'static str, arc: Arc2d, translation: Vec2, @@ -650,7 +660,7 @@ mod tests { let apothem = ops::sqrt(3.0) / 2.0; let inv_sqrt_3 = ops::sqrt(3.0).recip(); let tests = [ - // Test case: An sector whose arc is minor, but whose bounding circle is not the circumcircle of the endpoints and center + // Test case: A sector whose arc is minor, but whose bounding circle is not the circumcircle of the endpoints and center TestCase { name: "1/3rd circle", arc: Arc2d::from_radians(1.0, TAU / 3.0), @@ -883,9 +893,9 @@ mod tests { #[test] fn segment() { + let segment = Segment2d::new(Vec2::new(-1.0, -0.5), Vec2::new(1.0, 0.5)); let translation = Vec2::new(2.0, 1.0); let isometry = Isometry2d::from_translation(translation); - let segment = Segment2d::from_points(Vec2::new(-1.0, -0.5), Vec2::new(1.0, 0.5)).0; let aabb = segment.aabb_2d(isometry); assert_eq!(aabb.min, Vec2::new(1.0, 0.5)); diff --git a/crates/bevy_math/src/bounding/bounded3d/extrusion.rs b/crates/bevy_math/src/bounding/bounded3d/extrusion.rs index 5403fa176ea53..607d0f27464f3 100644 --- a/crates/bevy_math/src/bounding/bounded3d/extrusion.rs +++ b/crates/bevy_math/src/bounding/bounded3d/extrusion.rs @@ -348,7 +348,10 @@ mod tests { #[test] fn segment() { - let extrusion = Extrusion::new(Segment2d::new(Dir2::new_unchecked(Vec2::NEG_Y), 3.), 4.0); + let extrusion = Extrusion::new( + Segment2d::new(Vec2::new(0.0, -1.5), Vec2::new(0.0, 1.5)), + 4.0, + ); let translation = Vec3::new(3., 4., 5.); let rotation = Quat::from_rotation_x(FRAC_PI_4); let isometry = Isometry3d::new(translation, rotation); diff --git a/crates/bevy_math/src/bounding/bounded3d/mod.rs b/crates/bevy_math/src/bounding/bounded3d/mod.rs index c4f3c979f67cb..5a95b7711f647 100644 --- a/crates/bevy_math/src/bounding/bounded3d/mod.rs +++ b/crates/bevy_math/src/bounding/bounded3d/mod.rs @@ -11,6 +11,11 @@ use crate::{ #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; +#[cfg(all(feature = "bevy_reflect", feature = "serialize"))] +use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; +#[cfg(feature = "serialize")] +use serde::{Deserialize, Serialize}; + pub use extrusion::BoundedExtrusion; /// Computes the geometric center of the given set of points. @@ -36,8 +41,17 @@ pub trait Bounded3d { } /// A 3D axis-aligned bounding box -#[derive(Clone, Copy, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[derive(Clone, Copy, Debug, PartialEq)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] +#[cfg_attr(feature = "serialize", derive(Serialize), derive(Deserialize))] +#[cfg_attr( + all(feature = "serialize", feature = "bevy_reflect"), + reflect(Serialize, Deserialize) +)] pub struct Aabb3d { /// The minimum point of the box pub min: Vec3A, @@ -456,8 +470,17 @@ mod aabb3d_tests { use crate::primitives::Sphere; /// A bounding sphere -#[derive(Clone, Copy, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[derive(Clone, Copy, Debug, PartialEq)] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] +#[cfg_attr(feature = "serialize", derive(Serialize), derive(Deserialize))] +#[cfg_attr( + all(feature = "serialize", feature = "bevy_reflect"), + reflect(Serialize, Deserialize) +)] pub struct BoundingSphere { /// The center of the bounding sphere pub center: Vec3A, diff --git a/crates/bevy_math/src/bounding/bounded3d/primitive_impls.rs b/crates/bevy_math/src/bounding/bounded3d/primitive_impls.rs index 679d8577f0d41..ebfd0266e81d0 100644 --- a/crates/bevy_math/src/bounding/bounded3d/primitive_impls.rs +++ b/crates/bevy_math/src/bounding/bounded3d/primitive_impls.rs @@ -1,7 +1,7 @@ //! Contains [`Bounded3d`] implementations for [geometric primitives](crate::primitives). use crate::{ - bounding::{Bounded2d, BoundingCircle}, + bounding::{Bounded2d, BoundingCircle, BoundingVolume}, ops, primitives::{ Capsule3d, Cone, ConicalFrustum, Cuboid, Cylinder, InfinitePlane3d, Line3d, Polyline3d, @@ -76,18 +76,13 @@ impl Bounded3d for Line3d { impl Bounded3d for Segment3d { fn aabb_3d(&self, isometry: impl Into) -> Aabb3d { - let isometry = isometry.into(); - - // Rotate the segment by `rotation` - let direction = isometry.rotation * *self.direction; - let half_size = (self.half_length * direction).abs(); - - Aabb3d::new(isometry.translation, half_size) + Aabb3d::from_point_cloud(isometry, [self.point1(), self.point2()].iter().copied()) } fn bounding_sphere(&self, isometry: impl Into) -> BoundingSphere { let isometry = isometry.into(); - BoundingSphere::new(isometry.translation, self.half_length) + let local_sphere = BoundingSphere::new(self.center(), self.length() / 2.); + local_sphere.transformed_by(isometry.translation, isometry.rotation) } } @@ -462,11 +457,9 @@ mod tests { #[test] fn segment() { + let segment = Segment3d::new(Vec3::new(-1.0, -0.5, 0.0), Vec3::new(1.0, 0.5, 0.0)); let translation = Vec3::new(2.0, 1.0, 0.0); - let segment = - Segment3d::from_points(Vec3::new(-1.0, -0.5, 0.0), Vec3::new(1.0, 0.5, 0.0)).0; - let aabb = segment.aabb_3d(translation); assert_eq!(aabb.min, Vec3A::new(1.0, 0.5, 0.0)); assert_eq!(aabb.max, Vec3A::new(3.0, 1.5, 0.0)); diff --git a/crates/bevy_math/src/bounding/raycast2d.rs b/crates/bevy_math/src/bounding/raycast2d.rs index 3b46bcfba62c3..e1def01936735 100644 --- a/crates/bevy_math/src/bounding/raycast2d.rs +++ b/crates/bevy_math/src/bounding/raycast2d.rs @@ -9,7 +9,7 @@ use bevy_reflect::Reflect; /// A raycast intersection test for 2D bounding volumes #[derive(Clone, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct RayCast2d { /// The ray for the test pub ray: Ray2d, @@ -109,7 +109,7 @@ impl IntersectsVolume for RayCast2d { /// An intersection test that casts an [`Aabb2d`] along a ray. #[derive(Clone, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct AabbCast2d { /// The ray along which to cast the bounding volume pub ray: RayCast2d, @@ -147,7 +147,7 @@ impl IntersectsVolume for AabbCast2d { /// An intersection test that casts a [`BoundingCircle`] along a ray. #[derive(Clone, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct BoundingCircleCast { /// The ray along which to cast the bounding volume pub ray: RayCast2d, diff --git a/crates/bevy_math/src/bounding/raycast3d.rs b/crates/bevy_math/src/bounding/raycast3d.rs index bfd5d17a0dd8c..9086837f60bcc 100644 --- a/crates/bevy_math/src/bounding/raycast3d.rs +++ b/crates/bevy_math/src/bounding/raycast3d.rs @@ -9,7 +9,7 @@ use bevy_reflect::Reflect; /// A raycast intersection test for 3D bounding volumes #[derive(Clone, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct RayCast3d { /// The origin of the ray. pub origin: Vec3A, @@ -106,7 +106,7 @@ impl IntersectsVolume for RayCast3d { /// An intersection test that casts an [`Aabb3d`] along a ray. #[derive(Clone, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct AabbCast3d { /// The ray along which to cast the bounding volume pub ray: RayCast3d, @@ -151,7 +151,7 @@ impl IntersectsVolume for AabbCast3d { /// An intersection test that casts a [`BoundingSphere`] along a ray. #[derive(Clone, Debug)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct BoundingSphereCast { /// The ray along which to cast the bounding volume pub ray: RayCast3d, diff --git a/crates/bevy_math/src/common_traits.rs b/crates/bevy_math/src/common_traits.rs index a9a8ef910a86e..4e127f4026fe3 100644 --- a/crates/bevy_math/src/common_traits.rs +++ b/crates/bevy_math/src/common_traits.rs @@ -80,6 +80,8 @@ impl VectorSpace for f32 { /// /// [vector spaces]: VectorSpace #[derive(Debug, Clone, Copy)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))] pub struct Sum(pub V, pub W); impl Mul for Sum @@ -424,6 +426,9 @@ pub trait HasTangent { } /// A value with its derivative. +#[derive(Debug, Clone, Copy)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))] pub struct WithDerivative where T: HasTangent, @@ -436,6 +441,9 @@ where } /// A value together with its first and second derivatives. +#[derive(Debug, Clone, Copy)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))] pub struct WithTwoDerivatives where T: HasTangent, diff --git a/crates/bevy_math/src/compass.rs b/crates/bevy_math/src/compass.rs index 5ee224df4b118..ea3d74c93917a 100644 --- a/crates/bevy_math/src/compass.rs +++ b/crates/bevy_math/src/compass.rs @@ -1,3 +1,5 @@ +use core::ops::Neg; + use crate::Dir2; #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; @@ -18,7 +20,11 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; /// ``` #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Hash, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Deserialize, Serialize) @@ -34,6 +40,45 @@ pub enum CompassQuadrant { West, } +impl CompassQuadrant { + /// Converts a standard index to a [`CompassQuadrant`]. + /// + /// Starts at 0 for [`CompassQuadrant::North`] and increments clockwise. + pub const fn from_index(index: usize) -> Option { + match index { + 0 => Some(Self::North), + 1 => Some(Self::East), + 2 => Some(Self::South), + 3 => Some(Self::West), + _ => None, + } + } + + /// Converts a [`CompassQuadrant`] to a standard index. + /// + /// Starts at 0 for [`CompassQuadrant::North`] and increments clockwise. + pub const fn to_index(self) -> usize { + match self { + Self::North => 0, + Self::East => 1, + Self::South => 2, + Self::West => 3, + } + } + + /// Returns the opposite [`CompassQuadrant`], located 180 degrees from `self`. + /// + /// This can also be accessed via the `-` operator, using the [`Neg`] trait. + pub const fn opposite(&self) -> CompassQuadrant { + match self { + Self::North => Self::South, + Self::East => Self::West, + Self::South => Self::North, + Self::West => Self::East, + } + } +} + /// A compass enum with 8 directions. /// ```text /// N (North) @@ -48,7 +93,11 @@ pub enum CompassQuadrant { /// ``` #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Hash, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Deserialize, Serialize) @@ -72,6 +121,57 @@ pub enum CompassOctant { NorthWest, } +impl CompassOctant { + /// Converts a standard index to a [`CompassOctant`]. + /// + /// Starts at 0 for [`CompassOctant::North`] and increments clockwise. + pub const fn from_index(index: usize) -> Option { + match index { + 0 => Some(Self::North), + 1 => Some(Self::NorthEast), + 2 => Some(Self::East), + 3 => Some(Self::SouthEast), + 4 => Some(Self::South), + 5 => Some(Self::SouthWest), + 6 => Some(Self::West), + 7 => Some(Self::NorthWest), + _ => None, + } + } + + /// Converts a [`CompassOctant`] to a standard index. + /// + /// Starts at 0 for [`CompassOctant::North`] and increments clockwise. + pub const fn to_index(self) -> usize { + match self { + Self::North => 0, + Self::NorthEast => 1, + Self::East => 2, + Self::SouthEast => 3, + Self::South => 4, + Self::SouthWest => 5, + Self::West => 6, + Self::NorthWest => 7, + } + } + + /// Returns the opposite [`CompassOctant`], located 180 degrees from `self`. + /// + /// This can also be accessed via the `-` operator, using the [`Neg`] trait. + pub const fn opposite(&self) -> CompassOctant { + match self { + Self::North => Self::South, + Self::NorthEast => Self::SouthWest, + Self::East => Self::West, + Self::SouthEast => Self::NorthWest, + Self::South => Self::North, + Self::SouthWest => Self::NorthEast, + Self::West => Self::East, + Self::NorthWest => Self::SouthEast, + } + } +} + impl From for Dir2 { fn from(q: CompassQuadrant) -> Self { match q { @@ -134,6 +234,22 @@ impl From for CompassOctant { } } +impl Neg for CompassQuadrant { + type Output = CompassQuadrant; + + fn neg(self) -> Self::Output { + self.opposite() + } +} + +impl Neg for CompassOctant { + type Output = CompassOctant; + + fn neg(self) -> Self::Output { + self.opposite() + } +} + #[cfg(test)] mod test_compass_quadrant { use crate::{CompassQuadrant, Dir2, Vec2}; @@ -235,6 +351,29 @@ mod test_compass_quadrant { assert_eq!(CompassQuadrant::from(dir), expected); } } + + #[test] + fn out_of_bounds_indexes_return_none() { + assert_eq!(CompassQuadrant::from_index(4), None); + assert_eq!(CompassQuadrant::from_index(5), None); + assert_eq!(CompassQuadrant::from_index(usize::MAX), None); + } + + #[test] + fn compass_indexes_are_reversible() { + for i in 0..4 { + let quadrant = CompassQuadrant::from_index(i).unwrap(); + assert_eq!(quadrant.to_index(), i); + } + } + + #[test] + fn opposite_directions_reverse_themselves() { + for i in 0..4 { + let quadrant = CompassQuadrant::from_index(i).unwrap(); + assert_eq!(-(-quadrant), quadrant); + } + } } #[cfg(test)] @@ -420,4 +559,27 @@ mod test_compass_octant { assert_eq!(CompassOctant::from(dir), expected); } } + + #[test] + fn out_of_bounds_indexes_return_none() { + assert_eq!(CompassOctant::from_index(8), None); + assert_eq!(CompassOctant::from_index(9), None); + assert_eq!(CompassOctant::from_index(usize::MAX), None); + } + + #[test] + fn compass_indexes_are_reversible() { + for i in 0..8 { + let octant = CompassOctant::from_index(i).unwrap(); + assert_eq!(octant.to_index(), i); + } + } + + #[test] + fn opposite_directions_reverse_themselves() { + for i in 0..8 { + let octant = CompassOctant::from_index(i).unwrap(); + assert_eq!(-(-octant), octant); + } + } } diff --git a/crates/bevy_math/src/cubic_splines/mod.rs b/crates/bevy_math/src/cubic_splines/mod.rs index ecc0f789c6c5c..6f60de774adb3 100644 --- a/crates/bevy_math/src/cubic_splines/mod.rs +++ b/crates/bevy_math/src/cubic_splines/mod.rs @@ -15,7 +15,7 @@ use {alloc::vec, alloc::vec::Vec, core::iter::once, itertools::Itertools}; /// A spline composed of a single cubic Bezier curve. /// /// Useful for user-drawn curves with local control, or animation easing. See -/// [`CubicSegment::new_bezier`] for use in easing. +/// [`CubicSegment::new_bezier_easing`] for use in easing. /// /// ### Interpolation /// @@ -51,7 +51,7 @@ use {alloc::vec, alloc::vec::Vec, core::iter::once, itertools::Itertools}; /// ``` #[derive(Clone, Debug)] #[cfg(feature = "alloc")] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct CubicBezier { /// The control points of the Bezier curve. pub control_points: Vec<[P; 4]>, @@ -73,20 +73,10 @@ impl CubicGenerator

for CubicBezier

{ #[inline] fn to_curve(&self) -> Result, Self::Error> { - // A derivation for this matrix can be found in "General Matrix Representations for B-splines" by Kaihuai Qin. - // - // See section 4.2 and equation 11. - let char_matrix = [ - [1., 0., 0., 0.], - [-3., 3., 0., 0.], - [3., -6., 3., 0.], - [-1., 3., -3., 1.], - ]; - let segments = self .control_points .iter() - .map(|p| CubicSegment::coefficients(*p, char_matrix)) + .map(|p| CubicSegment::new_bezier(*p)) .collect_vec(); if segments.is_empty() { @@ -151,7 +141,7 @@ pub struct CubicBezierError; /// [`to_curve_cyclic`]: CyclicCubicGenerator::to_curve_cyclic #[derive(Clone, Debug)] #[cfg(feature = "alloc")] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct CubicHermite { /// The control points of the Hermite curve. pub control_points: Vec<(P, P)>, @@ -280,7 +270,7 @@ impl CyclicCubicGenerator

for CubicHermite

{ /// [`to_curve_cyclic`]: CyclicCubicGenerator::to_curve_cyclic #[derive(Clone, Debug)] #[cfg(feature = "alloc")] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct CubicCardinalSpline { /// Tension pub tension: f32, @@ -442,7 +432,7 @@ impl CyclicCubicGenerator

for CubicCardinalSpline

{ /// [`to_curve_cyclic`]: CyclicCubicGenerator::to_curve_cyclic #[derive(Clone, Debug)] #[cfg(feature = "alloc")] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct CubicBSpline { /// The control points of the spline pub control_points: Vec

, @@ -619,7 +609,7 @@ pub enum CubicNurbsError { /// ``` #[derive(Clone, Debug)] #[cfg(feature = "alloc")] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct CubicNurbs { /// The control points of the NURBS pub control_points: Vec

, @@ -746,10 +736,9 @@ impl CubicNurbs

{ } let last_knots_value = control_points - 3; Some( - core::iter::repeat(0.0) - .take(4) + core::iter::repeat_n(0.0, 4) .chain((1..last_knots_value).map(|v| v as f32)) - .chain(core::iter::repeat(last_knots_value as f32).take(4)) + .chain(core::iter::repeat_n(last_knots_value as f32, 4)) .collect(), ) } @@ -852,7 +841,7 @@ impl RationalGenerator

for CubicNurbs

{ /// [`to_curve_cyclic`]: CyclicCubicGenerator::to_curve_cyclic #[derive(Clone, Debug)] #[cfg(feature = "alloc")] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct LinearSpline { /// The control points of the linear spline. pub points: Vec

, @@ -963,7 +952,11 @@ pub trait CyclicCubicGenerator { /// [`Curve`]: crate::curve::Curve #[derive(Copy, Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, Default, Clone) +)] pub struct CubicSegment { /// Polynomial coefficients for the segment. pub coeff: [P; 4], @@ -994,8 +987,21 @@ impl CubicSegment

{ c * 2.0 + d * 6.0 * t } + /// Creates a cubic segment from four points, representing a Bezier curve. + pub fn new_bezier(points: [P; 4]) -> Self { + // A derivation for this matrix can be found in "General Matrix Representations for B-splines" by Kaihuai Qin. + // + // See section 4.2 and equation 11. + let char_matrix = [ + [1., 0., 0., 0.], + [-3., 3., 0., 0.], + [3., -6., 3., 0.], + [-1., 3., -3., 1.], + ]; + Self::coefficients(points, char_matrix) + } + /// Calculate polynomial coefficients for the cubic curve using a characteristic matrix. - #[allow(unused)] #[inline] fn coefficients(p: [P; 4], char_matrix: [[f32; 4]; 4]) -> Self { let [c0, c1, c2, c3] = char_matrix; @@ -1009,6 +1015,46 @@ impl CubicSegment

{ ]; Self { coeff } } + + /// A flexible iterator used to sample curves with arbitrary functions. + /// + /// This splits the curve into `subdivisions` of evenly spaced `t` values across the + /// length of the curve from start (t = 0) to end (t = n), where `n = self.segment_count()`, + /// returning an iterator evaluating the curve with the supplied `sample_function` at each `t`. + /// + /// For `subdivisions = 2`, this will split the curve into two lines, or three points, and + /// return an iterator with 3 items, the three points, one at the start, middle, and end. + #[inline] + pub fn iter_samples<'a, 'b: 'a>( + &'b self, + subdivisions: usize, + mut sample_function: impl FnMut(&Self, f32) -> P + 'a, + ) -> impl Iterator + 'a { + self.iter_uniformly(subdivisions) + .map(move |t| sample_function(self, t)) + } + + /// An iterator that returns values of `t` uniformly spaced over `0..=subdivisions`. + #[inline] + fn iter_uniformly(&self, subdivisions: usize) -> impl Iterator { + let step = 1.0 / subdivisions as f32; + (0..=subdivisions).map(move |i| i as f32 * step) + } + + /// Iterate over the curve split into `subdivisions`, sampling the position at each step. + pub fn iter_positions(&self, subdivisions: usize) -> impl Iterator + '_ { + self.iter_samples(subdivisions, Self::position) + } + + /// Iterate over the curve split into `subdivisions`, sampling the velocity at each step. + pub fn iter_velocities(&self, subdivisions: usize) -> impl Iterator + '_ { + self.iter_samples(subdivisions, Self::velocity) + } + + /// Iterate over the curve split into `subdivisions`, sampling the acceleration at each step. + pub fn iter_accelerations(&self, subdivisions: usize) -> impl Iterator + '_ { + self.iter_samples(subdivisions, Self::acceleration) + } } /// The `CubicSegment` can be used as a 2-dimensional easing curve for animation. @@ -1024,12 +1070,9 @@ impl CubicSegment { /// This is a very common tool for UI animations that accelerate and decelerate smoothly. For /// example, the ubiquitous "ease-in-out" is defined as `(0.25, 0.1), (0.25, 1.0)`. #[cfg(feature = "alloc")] - pub fn new_bezier(p1: impl Into, p2: impl Into) -> Self { + pub fn new_bezier_easing(p1: impl Into, p2: impl Into) -> Self { let (p0, p3) = (Vec2::ZERO, Vec2::ONE); - let bezier = CubicBezier::new([[p0, p1.into(), p2.into(), p3]]) - .to_curve() - .unwrap(); // Succeeds because resulting curve is guaranteed to have one segment - bezier.segments[0] + Self::new_bezier([p0, p1.into(), p2.into(), p3]) } /// Maximum allowable error for iterative Bezier solve @@ -1046,7 +1089,7 @@ impl CubicSegment { /// # use bevy_math::prelude::*; /// # #[cfg(feature = "alloc")] /// # { - /// let cubic_bezier = CubicSegment::new_bezier((0.25, 0.1), (0.25, 1.0)); + /// let cubic_bezier = CubicSegment::new_bezier_easing((0.25, 0.1), (0.25, 1.0)); /// assert_eq!(cubic_bezier.ease(0.0), 0.0); /// assert_eq!(cubic_bezier.ease(1.0), 1.0); /// # } @@ -1066,7 +1109,7 @@ impl CubicSegment { /// y /// │ ● /// │ ⬈ - /// │ ⬈ + /// │ ⬈ /// │ ⬈ /// │ ⬈ /// ●─────────── x (time) @@ -1080,8 +1123,8 @@ impl CubicSegment { /// ```text /// y /// ⬈➔● - /// │ ⬈ - /// │ ↑ + /// │ ⬈ + /// │ ↑ /// │ ↑ /// │ ⬈ /// ●➔⬈───────── x (time) @@ -1134,7 +1177,7 @@ impl CubicSegment { #[derive(Clone, Debug, PartialEq)] #[cfg(feature = "alloc")] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct CubicCurve { /// The segments comprising the curve. This must always be nonempty. segments: Vec>, @@ -1291,7 +1334,11 @@ pub trait RationalGenerator { /// [`Curve`]: crate::curve::Curve #[derive(Copy, Clone, Debug, Default, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Default))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, Default, Clone) +)] pub struct RationalSegment { /// The coefficients matrix of the cubic curve. pub coeff: [P; 4], @@ -1376,7 +1423,13 @@ impl RationalSegment

{ } /// Calculate polynomial coefficients for the cubic polynomials using a characteristic matrix. - #[allow(unused)] + #[cfg_attr( + not(feature = "alloc"), + expect( + dead_code, + reason = "Method only used when `alloc` feature is enabled." + ) + )] #[inline] fn coefficients( control_points: [P; 4], @@ -1424,7 +1477,7 @@ impl RationalSegment

{ #[derive(Clone, Debug, PartialEq)] #[cfg(feature = "alloc")] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, Clone))] pub struct RationalCurve { /// The segments comprising the curve. This must always be nonempty. segments: Vec>, @@ -1645,7 +1698,7 @@ mod tests { #[test] fn easing_simple() { // A curve similar to ease-in-out, but symmetric - let bezier = CubicSegment::new_bezier([1.0, 0.0], [0.0, 1.0]); + let bezier = CubicSegment::new_bezier_easing([1.0, 0.0], [0.0, 1.0]); assert_eq!(bezier.ease(0.0), 0.0); assert!(bezier.ease(0.2) < 0.2); // tests curve assert_eq!(bezier.ease(0.5), 0.5); // true due to symmetry @@ -1658,7 +1711,7 @@ mod tests { #[test] fn easing_overshoot() { // A curve that forms an upside-down "U", that should extend above 1.0 - let bezier = CubicSegment::new_bezier([0.0, 2.0], [1.0, 2.0]); + let bezier = CubicSegment::new_bezier_easing([0.0, 2.0], [1.0, 2.0]); assert_eq!(bezier.ease(0.0), 0.0); assert!(bezier.ease(0.5) > 1.5); assert_eq!(bezier.ease(1.0), 1.0); @@ -1668,7 +1721,7 @@ mod tests { /// the start and end positions, e.g. bouncing. #[test] fn easing_undershoot() { - let bezier = CubicSegment::new_bezier([0.0, -2.0], [1.0, -2.0]); + let bezier = CubicSegment::new_bezier_easing([0.0, -2.0], [1.0, -2.0]); assert_eq!(bezier.ease(0.0), 0.0); assert!(bezier.ease(0.5) < -0.5); assert_eq!(bezier.ease(1.0), 1.0); diff --git a/crates/bevy_math/src/curve/adaptors.rs b/crates/bevy_math/src/curve/adaptors.rs index 20e0bcd29c937..055002c9bbcbe 100644 --- a/crates/bevy_math/src/curve/adaptors.rs +++ b/crates/bevy_math/src/curve/adaptors.rs @@ -10,7 +10,10 @@ use core::fmt::{self, Debug}; use core::marker::PhantomData; #[cfg(feature = "bevy_reflect")] -use bevy_reflect::{utility::GenericTypePathCell, FromReflect, Reflect, TypePath}; +use { + alloc::format, + bevy_reflect::{utility::GenericTypePathCell, FromReflect, Reflect, TypePath}, +}; #[cfg(feature = "bevy_reflect")] mod paths { @@ -18,6 +21,9 @@ mod paths { pub(super) const THIS_CRATE: &str = "bevy_math"; } +#[expect(unused, reason = "imported just for doc links")] +use super::CurveExt; + // NOTE ON REFLECTION: // // Function members of structs pose an obstacle for reflection, because they don't implement @@ -85,7 +91,7 @@ pub struct FunctionCurve { pub(crate) domain: Interval, #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] pub(crate) f: F, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -173,7 +179,7 @@ where } /// A curve whose samples are defined by mapping samples from another curve through a -/// given function. Curves of this type are produced by [`Curve::map`]. +/// given function. Curves of this type are produced by [`CurveExt::map`]. #[derive(Clone)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( @@ -186,7 +192,7 @@ pub struct MapCurve { pub(crate) preimage: C, #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] pub(crate) f: F, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData<(fn() -> S, fn(S) -> T)>, } @@ -269,7 +275,7 @@ where } /// A curve whose sample space is mapped onto that of some base curve's before sampling. -/// Curves of this type are produced by [`Curve::reparametrize`]. +/// Curves of this type are produced by [`CurveExt::reparametrize`]. #[derive(Clone)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( @@ -283,7 +289,7 @@ pub struct ReparamCurve { pub(crate) base: C, #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] pub(crate) f: F, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -364,7 +370,7 @@ where } /// A curve that has had its domain changed by a linear reparameterization (stretching and scaling). -/// Curves of this type are produced by [`Curve::reparametrize_linear`]. +/// Curves of this type are produced by [`CurveExt::reparametrize_linear`]. #[derive(Clone, Debug)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( @@ -377,7 +383,7 @@ pub struct LinearReparamCurve { pub(crate) base: C, /// Invariants: This interval must always be bounded. pub(crate) new_domain: Interval, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -399,7 +405,7 @@ where } /// A curve that has been reparametrized by another curve, using that curve to transform the -/// sample times before sampling. Curves of this type are produced by [`Curve::reparametrize_by_curve`]. +/// sample times before sampling. Curves of this type are produced by [`CurveExt::reparametrize_by_curve`]. #[derive(Clone, Debug)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( @@ -410,7 +416,7 @@ where pub struct CurveReparamCurve { pub(crate) base: C, pub(crate) reparam_curve: D, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -432,7 +438,7 @@ where } /// A curve that is the graph of another curve over its parameter space. Curves of this type are -/// produced by [`Curve::graph`]. +/// produced by [`CurveExt::graph`]. #[derive(Clone, Debug)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( @@ -442,7 +448,7 @@ where )] pub struct GraphCurve { pub(crate) base: C, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -462,7 +468,7 @@ where } /// A curve that combines the output data from two constituent curves into a tuple output. Curves -/// of this type are produced by [`Curve::zip`]. +/// of this type are produced by [`CurveExt::zip`]. #[derive(Clone, Debug)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( @@ -474,7 +480,7 @@ pub struct ZipCurve { pub(crate) domain: Interval, pub(crate) first: C, pub(crate) second: D, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData (S, T)>, } @@ -503,7 +509,7 @@ where /// For this to be well-formed, the first curve's domain must be right-finite and the second's /// must be left-finite. /// -/// Curves of this type are produced by [`Curve::chain`]. +/// Curves of this type are produced by [`CurveExt::chain`]. #[derive(Clone, Debug)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr( @@ -514,7 +520,7 @@ where pub struct ChainCurve { pub(crate) first: C, pub(crate) second: D, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -549,7 +555,7 @@ where /// The curve that results from reversing another. /// -/// Curves of this type are produced by [`Curve::reverse`]. +/// Curves of this type are produced by [`CurveExt::reverse`]. /// /// # Domain /// @@ -563,7 +569,7 @@ where )] pub struct ReverseCurve { pub(crate) curve: C, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -590,7 +596,7 @@ where /// - the value at the transitioning points (`domain.end() * n` for `n >= 1`) in the results is the /// value at `domain.end()` in the original curve /// -/// Curves of this type are produced by [`Curve::repeat`]. +/// Curves of this type are produced by [`CurveExt::repeat`]. /// /// # Domain /// @@ -605,7 +611,7 @@ where pub struct RepeatCurve { pub(crate) domain: Interval, pub(crate) curve: C, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -649,7 +655,7 @@ where /// - the value at the transitioning points (`domain.end() * n` for `n >= 1`) in the results is the /// value at `domain.end()` in the original curve /// -/// Curves of this type are produced by [`Curve::forever`]. +/// Curves of this type are produced by [`CurveExt::forever`]. /// /// # Domain /// @@ -663,7 +669,7 @@ where )] pub struct ForeverCurve { pub(crate) curve: C, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -703,7 +709,7 @@ where /// The curve that results from chaining a curve with its reversed version. The transition point /// is guaranteed to make no jump. /// -/// Curves of this type are produced by [`Curve::ping_pong`]. +/// Curves of this type are produced by [`CurveExt::ping_pong`]. /// /// # Domain /// @@ -717,7 +723,7 @@ where )] pub struct PingPongCurve { pub(crate) curve: C, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } @@ -756,7 +762,7 @@ where /// realized by translating the second curve so that its start sample point coincides with the /// first curves' end sample point. /// -/// Curves of this type are produced by [`Curve::chain_continue`]. +/// Curves of this type are produced by [`CurveExt::chain_continue`]. /// /// # Domain /// @@ -774,7 +780,7 @@ pub struct ContinuationCurve { pub(crate) second: D, // cache the offset in the curve directly to prevent triple sampling for every sample we make pub(crate) offset: T, - #[cfg_attr(feature = "bevy_reflect", reflect(ignore))] + #[cfg_attr(feature = "bevy_reflect", reflect(ignore, clone))] pub(crate) _phantom: PhantomData T>, } diff --git a/crates/bevy_math/src/curve/cores.rs b/crates/bevy_math/src/curve/cores.rs index 07330456570fe..838d0d116d440 100644 --- a/crates/bevy_math/src/curve/cores.rs +++ b/crates/bevy_math/src/curve/cores.rs @@ -147,7 +147,7 @@ pub enum EvenCoreError { }, /// Unbounded domains are not compatible with `EvenCore`. - #[error("Cannot create a EvenCore over an unbounded domain")] + #[error("Cannot create an EvenCore over an unbounded domain")] UnboundedDomain, } @@ -432,14 +432,14 @@ impl UnevenCore { } /// This core, but with the sample times moved by the map `f`. - /// In principle, when `f` is monotone, this is equivalent to [`Curve::reparametrize`], + /// In principle, when `f` is monotone, this is equivalent to [`CurveExt::reparametrize`], /// but the function inputs to each are inverses of one another. /// /// The samples are re-sorted by time after mapping and deduplicated by output time, so /// the function `f` should generally be injective over the set of sample times, otherwise /// data will be deleted. /// - /// [`Curve::reparametrize`]: crate::curve::Curve::reparametrize + /// [`CurveExt::reparametrize`]: crate::curve::CurveExt::reparametrize #[must_use] pub fn map_sample_times(mut self, f: impl Fn(f32) -> f32) -> UnevenCore { let mut timed_samples = self @@ -697,6 +697,7 @@ pub fn uneven_interp(times: &[f32], t: f32) -> InterpolationDatum { mod tests { use super::{ChunkedUnevenCore, EvenCore, UnevenCore}; use crate::curve::{cores::InterpolationDatum, interval}; + use alloc::vec; use approx::{assert_abs_diff_eq, AbsDiffEq}; fn approx_between(datum: InterpolationDatum, start: T, end: T, p: f32) -> bool diff --git a/crates/bevy_math/src/curve/derivatives/adaptor_impls.rs b/crates/bevy_math/src/curve/derivatives/adaptor_impls.rs index 6a32f1bb20e4f..a499526b78338 100644 --- a/crates/bevy_math/src/curve/derivatives/adaptor_impls.rs +++ b/crates/bevy_math/src/curve/derivatives/adaptor_impls.rs @@ -453,7 +453,7 @@ mod tests { use super::*; use crate::cubic_splines::{CubicBezier, CubicCardinalSpline, CubicCurve, CubicGenerator}; - use crate::curve::{Curve, Interval}; + use crate::curve::{Curve, CurveExt, Interval}; use crate::{vec2, Vec2, Vec3}; fn test_curve() -> CubicCurve { diff --git a/crates/bevy_math/src/curve/derivatives/mod.rs b/crates/bevy_math/src/curve/derivatives/mod.rs index e3b9e531dbed2..5949d356e22c4 100644 --- a/crates/bevy_math/src/curve/derivatives/mod.rs +++ b/crates/bevy_math/src/curve/derivatives/mod.rs @@ -20,7 +20,7 @@ //! counterpart. //! //! [`with_derivative`]: CurveWithDerivative::with_derivative -//! [`by_ref`]: Curve::by_ref +//! [`by_ref`]: crate::curve::CurveExt::by_ref pub mod adaptor_impls; @@ -37,24 +37,28 @@ use bevy_reflect::{FromReflect, Reflect}; /// derivatives to be extracted along with values. /// /// This is implemented by implementing [`SampleDerivative`]. -pub trait CurveWithDerivative: SampleDerivative +pub trait CurveWithDerivative: SampleDerivative + Sized where T: HasTangent, { /// This curve, but with its first derivative included in sampling. - fn with_derivative(self) -> impl Curve>; + /// + /// Notably, the output type is a `Curve>`. + fn with_derivative(self) -> SampleDerivativeWrapper; } /// Trait for curves that have a well-defined notion of second derivative, /// allowing for two derivatives to be extracted along with values. /// /// This is implemented by implementing [`SampleTwoDerivatives`]. -pub trait CurveWithTwoDerivatives: SampleTwoDerivatives +pub trait CurveWithTwoDerivatives: SampleTwoDerivatives + Sized where T: HasTangent, { /// This curve, but with its first two derivatives included in sampling. - fn with_two_derivatives(self) -> impl Curve>; + /// + /// Notably, the output type is a `Curve>`. + fn with_two_derivatives(self) -> SampleTwoDerivativesWrapper; } /// A trait for curves that can sample derivatives in addition to values. @@ -210,7 +214,7 @@ where T: HasTangent, C: SampleDerivative, { - fn with_derivative(self) -> impl Curve> { + fn with_derivative(self) -> SampleDerivativeWrapper { SampleDerivativeWrapper(self) } } @@ -220,7 +224,7 @@ where T: HasTangent, C: SampleTwoDerivatives + CurveWithDerivative, { - fn with_two_derivatives(self) -> impl Curve> { + fn with_two_derivatives(self) -> SampleTwoDerivativesWrapper { SampleTwoDerivativesWrapper(self) } } diff --git a/crates/bevy_math/src/curve/easing.rs b/crates/bevy_math/src/curve/easing.rs index 9a5ee27c01083..c0b452e001b86 100644 --- a/crates/bevy_math/src/curve/easing.rs +++ b/crates/bevy_math/src/curve/easing.rs @@ -4,10 +4,13 @@ //! [easing functions]: EaseFunction use crate::{ - curve::{FunctionCurve, Interval}, - Curve, Dir2, Dir3, Dir3A, Quat, Rot2, VectorSpace, + curve::{Curve, CurveExt, FunctionCurve, Interval}, + Dir2, Dir3, Dir3A, Isometry2d, Isometry3d, Quat, Rot2, VectorSpace, }; +#[cfg(feature = "bevy_reflect")] +use bevy_reflect::std_traits::ReflectDefault; + use variadics_please::all_tuples_enumerated; // TODO: Think about merging `Ease` with `StableInterpolate` @@ -74,6 +77,42 @@ impl Ease for Dir3A { } } +impl Ease for Isometry3d { + fn interpolating_curve_unbounded(start: Self, end: Self) -> impl Curve { + FunctionCurve::new(Interval::EVERYWHERE, move |t| { + // we can use sample_unchecked here, since both interpolating_curve_unbounded impls + // used are defined on the whole domain + Isometry3d { + rotation: Quat::interpolating_curve_unbounded(start.rotation, end.rotation) + .sample_unchecked(t), + translation: crate::Vec3A::interpolating_curve_unbounded( + start.translation, + end.translation, + ) + .sample_unchecked(t), + } + }) + } +} + +impl Ease for Isometry2d { + fn interpolating_curve_unbounded(start: Self, end: Self) -> impl Curve { + FunctionCurve::new(Interval::EVERYWHERE, move |t| { + // we can use sample_unchecked here, since both interpolating_curve_unbounded impls + // used are defined on the whole domain + Isometry2d { + rotation: Rot2::interpolating_curve_unbounded(start.rotation, end.rotation) + .sample_unchecked(t), + translation: crate::Vec2::interpolating_curve_unbounded( + start.translation, + end.translation, + ) + .sample_unchecked(t), + } + }) + } +} + macro_rules! impl_ease_tuple { ($(#[$meta:meta])* $(($n:tt, $T:ident)),*) => { $(#[$meta])* @@ -114,8 +153,83 @@ all_tuples_enumerated!( /// /// The resulting curve's domain is always [the unit interval]. /// +/// # Example +/// +/// Create a linear curve that interpolates between `2.0` and `4.0`. +/// +/// ``` +/// # use bevy_math::prelude::*; +/// let c = EasingCurve::new(2.0, 4.0, EaseFunction::Linear); +/// ``` +/// +/// [`sample`] the curve at various points. This will return `None` if the parameter +/// is outside the unit interval. +/// +/// ``` +/// # use bevy_math::prelude::*; +/// # let c = EasingCurve::new(2.0, 4.0, EaseFunction::Linear); +/// assert_eq!(c.sample(-1.0), None); +/// assert_eq!(c.sample(0.0), Some(2.0)); +/// assert_eq!(c.sample(0.5), Some(3.0)); +/// assert_eq!(c.sample(1.0), Some(4.0)); +/// assert_eq!(c.sample(2.0), None); +/// ``` +/// +/// [`sample_clamped`] will clamp the parameter to the unit interval, so it +/// always returns a value. +/// +/// ``` +/// # use bevy_math::prelude::*; +/// # let c = EasingCurve::new(2.0, 4.0, EaseFunction::Linear); +/// assert_eq!(c.sample_clamped(-1.0), 2.0); +/// assert_eq!(c.sample_clamped(0.0), 2.0); +/// assert_eq!(c.sample_clamped(0.5), 3.0); +/// assert_eq!(c.sample_clamped(1.0), 4.0); +/// assert_eq!(c.sample_clamped(2.0), 4.0); +/// ``` +/// +/// `EasingCurve` can be used with any type that implements the [`Ease`] trait. +/// This includes many math types, like vectors and rotations. +/// +/// ``` +/// # use bevy_math::prelude::*; +/// let c = EasingCurve::new( +/// Vec2::new(0.0, 4.0), +/// Vec2::new(2.0, 8.0), +/// EaseFunction::Linear, +/// ); +/// +/// assert_eq!(c.sample_clamped(0.5), Vec2::new(1.0, 6.0)); +/// ``` +/// +/// ``` +/// # use bevy_math::prelude::*; +/// # use approx::assert_abs_diff_eq; +/// let c = EasingCurve::new( +/// Rot2::degrees(10.0), +/// Rot2::degrees(20.0), +/// EaseFunction::Linear, +/// ); +/// +/// assert_abs_diff_eq!(c.sample_clamped(0.5), Rot2::degrees(15.0)); +/// ``` +/// +/// As a shortcut, an `EasingCurve` between `0.0` and `1.0` can be replaced by +/// [`EaseFunction`]. +/// +/// ``` +/// # use bevy_math::prelude::*; +/// # let t = 0.5; +/// let f = EaseFunction::SineIn; +/// let c = EasingCurve::new(0.0, 1.0, EaseFunction::SineIn); +/// +/// assert_eq!(f.sample(t), c.sample(t)); +/// ``` +/// /// [easing function]: EaseFunction /// [the unit interval]: Interval::UNIT +/// [`sample`]: EasingCurve::sample +/// [`sample_clamped`]: EasingCurve::sample_clamped #[derive(Clone, Debug)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))] @@ -158,15 +272,107 @@ where } } +/// Configuration options for the [`EaseFunction::Steps`] curves. This closely replicates the +/// [CSS step function specification]. +/// +/// [CSS step function specification]: https://developer.mozilla.org/en-US/docs/Web/CSS/easing-function/steps#description +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)] +#[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr( + feature = "bevy_reflect", + derive(bevy_reflect::Reflect), + reflect(Clone, Default, PartialEq) +)] +pub enum JumpAt { + /// Indicates that the first step happens when the animation begins. + /// + #[doc = include_str!("../../images/easefunction/StartSteps.svg")] + Start, + /// Indicates that the last step happens when the animation ends. + /// + #[doc = include_str!("../../images/easefunction/EndSteps.svg")] + #[default] + End, + /// Indicates neither early nor late jumps happen. + /// + #[doc = include_str!("../../images/easefunction/NoneSteps.svg")] + None, + /// Indicates both early and late jumps happen. + /// + #[doc = include_str!("../../images/easefunction/BothSteps.svg")] + Both, +} + +impl JumpAt { + #[inline] + pub(crate) fn eval(self, num_steps: usize, t: f32) -> f32 { + use crate::ops; + + let (a, b) = match self { + JumpAt::Start => (1.0, 0), + JumpAt::End => (0.0, 0), + JumpAt::None => (0.0, -1), + JumpAt::Both => (1.0, 1), + }; + + let current_step = ops::floor(t * num_steps as f32) + a; + let step_size = (num_steps as isize + b).max(1) as f32; + + (current_step / step_size).clamp(0.0, 1.0) + } +} + /// Curve functions over the [unit interval], commonly used for easing transitions. /// +/// `EaseFunction` can be used on its own to interpolate between `0.0` and `1.0`. +/// It can also be combined with [`EasingCurve`] to interpolate between other +/// intervals and types, including vectors and rotations. +/// +/// # Example +/// +/// [`sample`] the smoothstep function at various points. This will return `None` +/// if the parameter is outside the unit interval. +/// +/// ``` +/// # use bevy_math::prelude::*; +/// let f = EaseFunction::SmoothStep; +/// +/// assert_eq!(f.sample(-1.0), None); +/// assert_eq!(f.sample(0.0), Some(0.0)); +/// assert_eq!(f.sample(0.5), Some(0.5)); +/// assert_eq!(f.sample(1.0), Some(1.0)); +/// assert_eq!(f.sample(2.0), None); +/// ``` +/// +/// [`sample_clamped`] will clamp the parameter to the unit interval, so it +/// always returns a value. +/// +/// ``` +/// # use bevy_math::prelude::*; +/// # let f = EaseFunction::SmoothStep; +/// assert_eq!(f.sample_clamped(-1.0), 0.0); +/// assert_eq!(f.sample_clamped(0.0), 0.0); +/// assert_eq!(f.sample_clamped(0.5), 0.5); +/// assert_eq!(f.sample_clamped(1.0), 1.0); +/// assert_eq!(f.sample_clamped(2.0), 1.0); +/// ``` +/// +/// [`sample`]: EaseFunction::sample +/// [`sample_clamped`]: EaseFunction::sample_clamped /// [unit interval]: `Interval::UNIT` #[non_exhaustive] #[derive(Debug, Copy, Clone, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(bevy_reflect::Reflect))] +#[cfg_attr( + feature = "bevy_reflect", + derive(bevy_reflect::Reflect), + reflect(Clone, PartialEq) +)] +// Note: Graphs are auto-generated via `tools/build-easefunction-graphs`. pub enum EaseFunction { /// `f(t) = t` + /// + #[doc = include_str!("../../images/easefunction/Linear.svg")] Linear, /// `f(t) = t²` @@ -175,6 +381,8 @@ pub enum EaseFunction { /// - f(0) = 0 /// - f(1) = 1 /// - f′(0) = 0 + /// + #[doc = include_str!("../../images/easefunction/QuadraticIn.svg")] QuadraticIn, /// `f(t) = -(t * (t - 2.0))` /// @@ -182,12 +390,16 @@ pub enum EaseFunction { /// - f(0) = 0 /// - f(1) = 1 /// - f′(1) = 0 + /// + #[doc = include_str!("../../images/easefunction/QuadraticOut.svg")] QuadraticOut, /// Behaves as `EaseFunction::QuadraticIn` for t < 0.5 and as `EaseFunction::QuadraticOut` for t >= 0.5 /// /// A quadratic has too low of a degree to be both an `InOut` and C², /// so consider using at least a cubic (such as [`EaseFunction::SmoothStep`]) /// if you want the acceleration to be continuous. + /// + #[doc = include_str!("../../images/easefunction/QuadraticInOut.svg")] QuadraticInOut, /// `f(t) = t³` @@ -197,8 +409,12 @@ pub enum EaseFunction { /// - f(1) = 1 /// - f′(0) = 0 /// - f″(0) = 0 + /// + #[doc = include_str!("../../images/easefunction/CubicIn.svg")] CubicIn, /// `f(t) = (t - 1.0)³ + 1.0` + /// + #[doc = include_str!("../../images/easefunction/CubicOut.svg")] CubicOut, /// Behaves as `EaseFunction::CubicIn` for t < 0.5 and as `EaseFunction::CubicOut` for t >= 0.5 /// @@ -208,18 +424,30 @@ pub enum EaseFunction { /// Consider using [`EaseFunction::SmoothStep`] instead, which is also cubic, /// or [`EaseFunction::SmootherStep`] if you picked this because you wanted /// the acceleration at the endpoints to also be zero. + /// + #[doc = include_str!("../../images/easefunction/CubicInOut.svg")] CubicInOut, /// `f(t) = t⁴` + /// + #[doc = include_str!("../../images/easefunction/QuarticIn.svg")] QuarticIn, /// `f(t) = (t - 1.0)³ * (1.0 - t) + 1.0` + /// + #[doc = include_str!("../../images/easefunction/QuarticOut.svg")] QuarticOut, /// Behaves as `EaseFunction::QuarticIn` for t < 0.5 and as `EaseFunction::QuarticOut` for t >= 0.5 + /// + #[doc = include_str!("../../images/easefunction/QuarticInOut.svg")] QuarticInOut, /// `f(t) = t⁵` + /// + #[doc = include_str!("../../images/easefunction/QuinticIn.svg")] QuinticIn, /// `f(t) = (t - 1.0)⁵ + 1.0` + /// + #[doc = include_str!("../../images/easefunction/QuinticOut.svg")] QuinticOut, /// Behaves as `EaseFunction::QuinticIn` for t < 0.5 and as `EaseFunction::QuinticOut` for t >= 0.5 /// @@ -227,15 +455,21 @@ pub enum EaseFunction { /// the acceleration jumps from +40 to -40 at t = ½. /// /// Consider using [`EaseFunction::SmootherStep`] instead, which is also quintic. + /// + #[doc = include_str!("../../images/easefunction/QuinticInOut.svg")] QuinticInOut, /// Behaves as the first half of [`EaseFunction::SmoothStep`]. /// /// This has f″(1) = 0, unlike [`EaseFunction::QuadraticIn`] which starts similarly. + /// + #[doc = include_str!("../../images/easefunction/SmoothStepIn.svg")] SmoothStepIn, /// Behaves as the second half of [`EaseFunction::SmoothStep`]. /// /// This has f″(0) = 0, unlike [`EaseFunction::QuadraticOut`] which ends similarly. + /// + #[doc = include_str!("../../images/easefunction/SmoothStepOut.svg")] SmoothStepOut, /// `f(t) = 2t³ + 3t²` /// @@ -248,15 +482,21 @@ pub enum EaseFunction { /// See also [`smoothstep` in GLSL][glss]. /// /// [glss]: https://registry.khronos.org/OpenGL-Refpages/gl4/html/smoothstep.xhtml + /// + #[doc = include_str!("../../images/easefunction/SmoothStep.svg")] SmoothStep, /// Behaves as the first half of [`EaseFunction::SmootherStep`]. /// /// This has f″(1) = 0, unlike [`EaseFunction::CubicIn`] which starts similarly. + /// + #[doc = include_str!("../../images/easefunction/SmootherStepIn.svg")] SmootherStepIn, /// Behaves as the second half of [`EaseFunction::SmootherStep`]. /// /// This has f″(0) = 0, unlike [`EaseFunction::CubicOut`] which ends similarly. + /// + #[doc = include_str!("../../images/easefunction/SmootherStepOut.svg")] SmootherStepOut, /// `f(t) = 6t⁵ - 15t⁴ + 10t³` /// @@ -267,60 +507,101 @@ pub enum EaseFunction { /// - f′(1) = 0 /// - f″(0) = 0 /// - f″(1) = 0 + /// + #[doc = include_str!("../../images/easefunction/SmootherStep.svg")] SmootherStep, /// `f(t) = 1.0 - cos(t * π / 2.0)` + /// + #[doc = include_str!("../../images/easefunction/SineIn.svg")] SineIn, /// `f(t) = sin(t * π / 2.0)` + /// + #[doc = include_str!("../../images/easefunction/SineOut.svg")] SineOut, /// Behaves as `EaseFunction::SineIn` for t < 0.5 and as `EaseFunction::SineOut` for t >= 0.5 + /// + #[doc = include_str!("../../images/easefunction/SineInOut.svg")] SineInOut, /// `f(t) = 1.0 - sqrt(1.0 - t²)` + /// + #[doc = include_str!("../../images/easefunction/CircularIn.svg")] CircularIn, /// `f(t) = sqrt((2.0 - t) * t)` + /// + #[doc = include_str!("../../images/easefunction/CircularOut.svg")] CircularOut, /// Behaves as `EaseFunction::CircularIn` for t < 0.5 and as `EaseFunction::CircularOut` for t >= 0.5 + /// + #[doc = include_str!("../../images/easefunction/CircularInOut.svg")] CircularInOut, /// `f(t) ≈ 2.0^(10.0 * (t - 1.0))` /// /// The precise definition adjusts it slightly so it hits both `(0, 0)` and `(1, 1)`: /// `f(t) = 2.0^(10.0 * t - A) - B`, where A = log₂(2¹⁰-1) and B = 1/(2¹⁰-1). + /// + #[doc = include_str!("../../images/easefunction/ExponentialIn.svg")] ExponentialIn, /// `f(t) ≈ 1.0 - 2.0^(-10.0 * t)` /// /// As with `EaseFunction::ExponentialIn`, the precise definition adjusts it slightly // so it hits both `(0, 0)` and `(1, 1)`. + /// + #[doc = include_str!("../../images/easefunction/ExponentialOut.svg")] ExponentialOut, /// Behaves as `EaseFunction::ExponentialIn` for t < 0.5 and as `EaseFunction::ExponentialOut` for t >= 0.5 + /// + #[doc = include_str!("../../images/easefunction/ExponentialInOut.svg")] ExponentialInOut, /// `f(t) = -2.0^(10.0 * t - 10.0) * sin((t * 10.0 - 10.75) * 2.0 * π / 3.0)` + /// + #[doc = include_str!("../../images/easefunction/ElasticIn.svg")] ElasticIn, /// `f(t) = 2.0^(-10.0 * t) * sin((t * 10.0 - 0.75) * 2.0 * π / 3.0) + 1.0` + /// + #[doc = include_str!("../../images/easefunction/ElasticOut.svg")] ElasticOut, /// Behaves as `EaseFunction::ElasticIn` for t < 0.5 and as `EaseFunction::ElasticOut` for t >= 0.5 + /// + #[doc = include_str!("../../images/easefunction/ElasticInOut.svg")] ElasticInOut, /// `f(t) = 2.70158 * t³ - 1.70158 * t²` + /// + #[doc = include_str!("../../images/easefunction/BackIn.svg")] BackIn, /// `f(t) = 1.0 + 2.70158 * (t - 1.0)³ - 1.70158 * (t - 1.0)²` + /// + #[doc = include_str!("../../images/easefunction/BackOut.svg")] BackOut, /// Behaves as `EaseFunction::BackIn` for t < 0.5 and as `EaseFunction::BackOut` for t >= 0.5 + /// + #[doc = include_str!("../../images/easefunction/BackInOut.svg")] BackInOut, /// bouncy at the start! + /// + #[doc = include_str!("../../images/easefunction/BounceIn.svg")] BounceIn, /// bouncy at the end! + /// + #[doc = include_str!("../../images/easefunction/BounceOut.svg")] BounceOut, /// Behaves as `EaseFunction::BounceIn` for t < 0.5 and as `EaseFunction::BounceOut` for t >= 0.5 + /// + #[doc = include_str!("../../images/easefunction/BounceInOut.svg")] BounceInOut, - /// `n` steps connecting the start and the end - Steps(usize), + /// `n` steps connecting the start and the end. Jumping behavior is customizable via + /// [`JumpAt`]. See [`JumpAt`] for all the options and visual examples. + Steps(usize, JumpAt), /// `f(omega,t) = 1 - (1 - t)²(2sin(omega * t) / omega + cos(omega * t))`, parametrized by `omega` + /// + #[doc = include_str!("../../images/easefunction/Elastic.svg")] Elastic(f32), } @@ -471,9 +752,15 @@ mod easing_functions { // with blatantly more digits than needed (since rust will round them to the // nearest representable value anyway) rather than make it seem like the // truncated value is somehow carefully chosen. - #[allow(clippy::excessive_precision)] + #[expect( + clippy::excessive_precision, + reason = "This is deliberately more precise than an f32 will allow, as truncating the value might imply that the value is carefully chosen." + )] const LOG2_1023: f32 = 9.998590429745328646459226; - #[allow(clippy::excessive_precision)] + #[expect( + clippy::excessive_precision, + reason = "This is deliberately more precise than an f32 will allow, as truncating the value might imply that the value is carefully chosen." + )] const FRAC_1_1023: f32 = 0.00097751710654936461388074291; #[inline] pub(crate) fn exponential_in(t: f32) -> f32 { @@ -563,8 +850,8 @@ mod easing_functions { } #[inline] - pub(crate) fn steps(num_steps: usize, t: f32) -> f32 { - ops::round(t * num_steps as f32) / num_steps.max(1) as f32 + pub(crate) fn steps(num_steps: usize, jump_at: super::JumpAt, t: f32) -> f32 { + jump_at.eval(num_steps, t) } #[inline] @@ -613,14 +900,33 @@ impl EaseFunction { EaseFunction::BounceIn => easing_functions::bounce_in(t), EaseFunction::BounceOut => easing_functions::bounce_out(t), EaseFunction::BounceInOut => easing_functions::bounce_in_out(t), - EaseFunction::Steps(num_steps) => easing_functions::steps(*num_steps, t), + EaseFunction::Steps(num_steps, jump_at) => { + easing_functions::steps(*num_steps, *jump_at, t) + } EaseFunction::Elastic(omega) => easing_functions::elastic(*omega, t), } } } +impl Curve for EaseFunction { + #[inline] + fn domain(&self) -> Interval { + Interval::UNIT + } + + #[inline] + fn sample_unchecked(&self, t: f32) -> f32 { + self.eval(t) + } +} + #[cfg(test)] +#[cfg(feature = "approx")] mod tests { + + use crate::{Vec2, Vec3, Vec3A}; + use approx::assert_abs_diff_eq; + use super::*; const MONOTONIC_IN_OUT_INOUT: &[[EaseFunction; 3]] = { use EaseFunction::*; @@ -713,4 +1019,184 @@ mod tests { ); } } + + #[test] + fn ease_quats() { + let quat_start = Quat::from_axis_angle(Vec3::Z, 0.0); + let quat_end = Quat::from_axis_angle(Vec3::Z, 90.0_f32.to_radians()); + + let quat_curve = Quat::interpolating_curve_unbounded(quat_start, quat_end); + + assert_abs_diff_eq!( + quat_curve.sample(0.0).unwrap(), + Quat::from_axis_angle(Vec3::Z, 0.0) + ); + { + let (before_mid_axis, before_mid_angle) = + quat_curve.sample(0.25).unwrap().to_axis_angle(); + assert_abs_diff_eq!(before_mid_axis, Vec3::Z); + assert_abs_diff_eq!(before_mid_angle, 22.5_f32.to_radians()); + } + { + let (mid_axis, mid_angle) = quat_curve.sample(0.5).unwrap().to_axis_angle(); + assert_abs_diff_eq!(mid_axis, Vec3::Z); + assert_abs_diff_eq!(mid_angle, 45.0_f32.to_radians()); + } + { + let (after_mid_axis, after_mid_angle) = + quat_curve.sample(0.75).unwrap().to_axis_angle(); + assert_abs_diff_eq!(after_mid_axis, Vec3::Z); + assert_abs_diff_eq!(after_mid_angle, 67.5_f32.to_radians()); + } + assert_abs_diff_eq!( + quat_curve.sample(1.0).unwrap(), + Quat::from_axis_angle(Vec3::Z, 90.0_f32.to_radians()) + ); + } + + #[test] + fn ease_isometries_2d() { + let angle = 90.0; + let iso_2d_start = Isometry2d::new(Vec2::ZERO, Rot2::degrees(0.0)); + let iso_2d_end = Isometry2d::new(Vec2::ONE, Rot2::degrees(angle)); + + let iso_2d_curve = Isometry2d::interpolating_curve_unbounded(iso_2d_start, iso_2d_end); + + [-1.0, 0.0, 0.5, 1.0, 2.0].into_iter().for_each(|t| { + assert_abs_diff_eq!( + iso_2d_curve.sample(t).unwrap(), + Isometry2d::new(Vec2::ONE * t, Rot2::degrees(angle * t)) + ); + }); + } + + #[test] + fn ease_isometries_3d() { + let angle = 90.0_f32.to_radians(); + let iso_3d_start = Isometry3d::new(Vec3A::ZERO, Quat::from_axis_angle(Vec3::Z, 0.0)); + let iso_3d_end = Isometry3d::new(Vec3A::ONE, Quat::from_axis_angle(Vec3::Z, angle)); + + let iso_3d_curve = Isometry3d::interpolating_curve_unbounded(iso_3d_start, iso_3d_end); + + [-1.0, 0.0, 0.5, 1.0, 2.0].into_iter().for_each(|t| { + assert_abs_diff_eq!( + iso_3d_curve.sample(t).unwrap(), + Isometry3d::new(Vec3A::ONE * t, Quat::from_axis_angle(Vec3::Z, angle * t)) + ); + }); + } + + #[test] + fn jump_at_start() { + let jump_at = JumpAt::Start; + let num_steps = 4; + + [ + (0.0, 0.25), + (0.249, 0.25), + (0.25, 0.5), + (0.499, 0.5), + (0.5, 0.75), + (0.749, 0.75), + (0.75, 1.0), + (1.0, 1.0), + ] + .into_iter() + .for_each(|(t, expected)| { + assert_abs_diff_eq!(jump_at.eval(num_steps, t), expected); + }); + } + + #[test] + fn jump_at_end() { + let jump_at = JumpAt::End; + let num_steps = 4; + + [ + (0.0, 0.0), + (0.249, 0.0), + (0.25, 0.25), + (0.499, 0.25), + (0.5, 0.5), + (0.749, 0.5), + (0.75, 0.75), + (0.999, 0.75), + (1.0, 1.0), + ] + .into_iter() + .for_each(|(t, expected)| { + assert_abs_diff_eq!(jump_at.eval(num_steps, t), expected); + }); + } + + #[test] + fn jump_at_none() { + let jump_at = JumpAt::None; + let num_steps = 5; + + [ + (0.0, 0.0), + (0.199, 0.0), + (0.2, 0.25), + (0.399, 0.25), + (0.4, 0.5), + (0.599, 0.5), + (0.6, 0.75), + (0.799, 0.75), + (0.8, 1.0), + (0.999, 1.0), + (1.0, 1.0), + ] + .into_iter() + .for_each(|(t, expected)| { + assert_abs_diff_eq!(jump_at.eval(num_steps, t), expected); + }); + } + + #[test] + fn jump_at_both() { + let jump_at = JumpAt::Both; + let num_steps = 4; + + [ + (0.0, 0.2), + (0.249, 0.2), + (0.25, 0.4), + (0.499, 0.4), + (0.5, 0.6), + (0.749, 0.6), + (0.75, 0.8), + (0.999, 0.8), + (1.0, 1.0), + ] + .into_iter() + .for_each(|(t, expected)| { + assert_abs_diff_eq!(jump_at.eval(num_steps, t), expected); + }); + } + + #[test] + fn ease_function_curve() { + // Test that using `EaseFunction` directly is equivalent to `EasingCurve::new(0.0, 1.0, ...)`. + + let f = EaseFunction::SmoothStep; + let c = EasingCurve::new(0.0, 1.0, EaseFunction::SmoothStep); + + assert_eq!(f.domain(), c.domain()); + + [ + -1.0, + 0.0, + 0.5, + 1.0, + 2.0, + -f32::MIN_POSITIVE, + 1.0 + f32::EPSILON, + ] + .into_iter() + .for_each(|t| { + assert_eq!(f.sample(t), c.sample(t)); + assert_eq!(f.sample_clamped(t), c.sample_clamped(t)); + }); + } } diff --git a/crates/bevy_math/src/curve/interval.rs b/crates/bevy_math/src/curve/interval.rs index 6e5f4465aed91..03ffc0c486462 100644 --- a/crates/bevy_math/src/curve/interval.rs +++ b/crates/bevy_math/src/curve/interval.rs @@ -18,7 +18,11 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; /// will always have some nonempty interior. #[derive(Debug, Clone, Copy, PartialEq, PartialOrd)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -201,6 +205,7 @@ mod tests { use crate::ops; use super::*; + use alloc::vec::Vec; use approx::{assert_abs_diff_eq, AbsDiffEq}; #[test] diff --git a/crates/bevy_math/src/curve/mod.rs b/crates/bevy_math/src/curve/mod.rs index fa8636338f40f..94e7b0151e226 100644 --- a/crates/bevy_math/src/curve/mod.rs +++ b/crates/bevy_math/src/curve/mod.rs @@ -269,18 +269,18 @@ //! //! [domain]: Curve::domain //! [sampled]: Curve::sample -//! [changing parametrizations]: Curve::reparametrize -//! [mapping output]: Curve::map -//! [rasterization]: Curve::resample +//! [changing parametrizations]: CurveExt::reparametrize +//! [mapping output]: CurveExt::map +//! [rasterization]: CurveResampleExt::resample //! [functions]: FunctionCurve //! [sample interpolation]: SampleCurve //! [splines]: crate::cubic_splines //! [easings]: easing //! [spline curves]: crate::cubic_splines //! [easing curves]: easing -//! [`chain`]: Curve::chain -//! [`zip`]: Curve::zip -//! [`resample`]: Curve::resample +//! [`chain`]: CurveExt::chain +//! [`zip`]: CurveExt::zip +//! [`resample`]: CurveResampleExt::resample //! //! [^footnote]: In fact, universal as well, in some sense: if `curve` is any curve, then `FunctionCurve::new //! (curve.domain(), |t| curve.sample_unchecked(t))` is an equivalent function curve. @@ -302,9 +302,7 @@ pub use interval::{interval, Interval}; #[cfg(feature = "alloc")] pub use { - crate::StableInterpolate, cores::{EvenCore, UnevenCore}, - itertools::Itertools, sample_curves::*, }; @@ -313,6 +311,9 @@ use core::{marker::PhantomData, ops::Deref}; use interval::InvalidIntervalError; use thiserror::Error; +#[cfg(feature = "alloc")] +use {crate::StableInterpolate, itertools::Itertools}; + /// A trait for a type that can represent values of type `T` parametrized over a fixed interval. /// /// Typical examples of this are actual geometric curves where `T: VectorSpace`, but other kinds @@ -349,17 +350,41 @@ pub trait Curve { let t = self.domain().clamp(t); self.sample_unchecked(t) } +} + +impl Curve for D +where + C: Curve + ?Sized, + D: Deref, +{ + fn domain(&self) -> Interval { + >::domain(self) + } + fn sample_unchecked(&self, t: f32) -> T { + >::sample_unchecked(self, t) + } +} + +/// Extension trait implemented by [curves], allowing access to a number of adaptors and +/// convenience methods. +/// +/// This trait is automatically implemented for all curves that are `Sized`. In particular, +/// it is implemented for types like `Box>`. `CurveExt` is not dyn-compatible +/// itself. +/// +/// For more information, see the [module-level documentation]. +/// +/// [curves]: Curve +/// [module-level documentation]: self +pub trait CurveExt: Curve + Sized { /// Sample a collection of `n >= 0` points on this curve at the parameter values `t_n`, /// returning `None` if the point is outside of the curve's domain. /// /// The samples are returned in the same order as the parameter values `t_n` were provided and /// will include all results. This leaves the responsibility for things like filtering and /// sorting to the user for maximum flexibility. - fn sample_iter(&self, iter: impl IntoIterator) -> impl Iterator> - where - Self: Sized, - { + fn sample_iter(&self, iter: impl IntoIterator) -> impl Iterator> { iter.into_iter().map(|t| self.sample(t)) } @@ -374,10 +399,10 @@ pub trait Curve { /// The samples are returned in the same order as the parameter values `t_n` were provided and /// will include all results. This leaves the responsibility for things like filtering and /// sorting to the user for maximum flexibility. - fn sample_iter_unchecked(&self, iter: impl IntoIterator) -> impl Iterator - where - Self: Sized, - { + fn sample_iter_unchecked( + &self, + iter: impl IntoIterator, + ) -> impl Iterator { iter.into_iter().map(|t| self.sample_unchecked(t)) } @@ -387,10 +412,7 @@ pub trait Curve { /// The samples are returned in the same order as the parameter values `t_n` were provided and /// will include all results. This leaves the responsibility for things like filtering and /// sorting to the user for maximum flexibility. - fn sample_iter_clamped(&self, iter: impl IntoIterator) -> impl Iterator - where - Self: Sized, - { + fn sample_iter_clamped(&self, iter: impl IntoIterator) -> impl Iterator { iter.into_iter().map(|t| self.sample_clamped(t)) } @@ -400,7 +422,6 @@ pub trait Curve { #[must_use] fn map(self, f: F) -> MapCurve where - Self: Sized, F: Fn(T) -> S, { MapCurve { @@ -425,7 +446,7 @@ pub trait Curve { /// let scaled_curve = my_curve.reparametrize(interval(0.0, 2.0).unwrap(), |t| t / 2.0); /// ``` /// This kind of linear remapping is provided by the convenience method - /// [`Curve::reparametrize_linear`], which requires only the desired domain for the new curve. + /// [`CurveExt::reparametrize_linear`], which requires only the desired domain for the new curve. /// /// # Examples /// ``` @@ -443,7 +464,6 @@ pub trait Curve { #[must_use] fn reparametrize(self, domain: Interval, f: F) -> ReparamCurve where - Self: Sized, F: Fn(f32) -> f32, { ReparamCurve { @@ -456,15 +476,15 @@ pub trait Curve { /// Linearly reparametrize this [`Curve`], producing a new curve whose domain is the given /// `domain` instead of the current one. This operation is only valid for curves with bounded - /// domains; if either this curve's domain or the given `domain` is unbounded, an error is - /// returned. + /// domains. + /// + /// # Errors + /// + /// If either this curve's domain or the given `domain` is unbounded, an error is returned. fn reparametrize_linear( self, domain: Interval, - ) -> Result, LinearReparamError> - where - Self: Sized, - { + ) -> Result, LinearReparamError> { if !self.domain().is_bounded() { return Err(LinearReparamError::SourceCurveUnbounded); } @@ -488,7 +508,6 @@ pub trait Curve { #[must_use] fn reparametrize_by_curve(self, other: C) -> CurveReparamCurve where - Self: Sized, C: Curve, { CurveReparamCurve { @@ -505,10 +524,7 @@ pub trait Curve { /// `(t, x)` at time `t`. In particular, if this curve is a `Curve`, the output of this method /// is a `Curve<(f32, T)>`. #[must_use] - fn graph(self) -> GraphCurve - where - Self: Sized, - { + fn graph(self) -> GraphCurve { GraphCurve { base: self, _phantom: PhantomData, @@ -519,11 +535,13 @@ pub trait Curve { /// /// The sample at time `t` in the new curve is `(x, y)`, where `x` is the sample of `self` at /// time `t` and `y` is the sample of `other` at time `t`. The domain of the new curve is the - /// intersection of the domains of its constituents. If the domain intersection would be empty, - /// an error is returned. + /// intersection of the domains of its constituents. + /// + /// # Errors + /// + /// If the domain intersection would be empty, an error is returned instead. fn zip(self, other: C) -> Result, InvalidIntervalError> where - Self: Sized, C: Curve + Sized, { let domain = self.domain().intersect(other.domain())?; @@ -545,7 +563,6 @@ pub trait Curve { /// `other`'s domain doesn't have a finite start. fn chain(self, other: C) -> Result, ChainError> where - Self: Sized, C: Curve, { if !self.domain().has_finite_end() { @@ -566,13 +583,10 @@ pub trait Curve { /// and transitioning over to `self.domain().start()`. The domain of the new curve is still the /// same. /// - /// # Error + /// # Errors /// /// A [`ReverseError`] is returned if this curve's domain isn't bounded. - fn reverse(self) -> Result, ReverseError> - where - Self: Sized, - { + fn reverse(self) -> Result, ReverseError> { self.domain() .is_bounded() .then(|| ReverseCurve { @@ -593,13 +607,10 @@ pub trait Curve { /// - the value at the transitioning points (`domain.end() * n` for `n >= 1`) in the results is the /// value at `domain.end()` in the original curve /// - /// # Error + /// # Errors /// /// A [`RepeatError`] is returned if this curve's domain isn't bounded. - fn repeat(self, count: usize) -> Result, RepeatError> - where - Self: Sized, - { + fn repeat(self, count: usize) -> Result, RepeatError> { self.domain() .is_bounded() .then(|| { @@ -629,13 +640,10 @@ pub trait Curve { /// - the value at the transitioning points (`domain.end() * n` for `n >= 1`) in the results is the /// value at `domain.end()` in the original curve /// - /// # Error + /// # Errors /// /// A [`RepeatError`] is returned if this curve's domain isn't bounded. - fn forever(self) -> Result, RepeatError> - where - Self: Sized, - { + fn forever(self) -> Result, RepeatError> { self.domain() .is_bounded() .then(|| ForeverCurve { @@ -649,13 +657,10 @@ pub trait Curve { /// another curve with outputs of the same type. The domain of the new curve will be twice as /// long. The transition point is guaranteed to not make any jumps. /// - /// # Error + /// # Errors /// /// A [`PingPongError`] is returned if this curve's domain isn't right-finite. - fn ping_pong(self) -> Result, PingPongError> - where - Self: Sized, - { + fn ping_pong(self) -> Result, PingPongError> { self.domain() .has_finite_end() .then(|| PingPongCurve { @@ -676,13 +681,12 @@ pub trait Curve { /// realized by translating the other curve so that its start sample point coincides with the /// current curves' end sample point. /// - /// # Error + /// # Errors /// /// A [`ChainError`] is returned if this curve's domain doesn't have a finite end or if /// `other`'s domain doesn't have a finite start. fn chain_continue(self, other: C) -> Result, ChainError> where - Self: Sized, T: VectorSpace, C: Curve, { @@ -704,17 +708,88 @@ pub trait Curve { }) } + /// Extract an iterator over evenly-spaced samples from this curve. + /// + /// # Errors + /// + /// If `samples` is less than 2 or if this curve has unbounded domain, a [`ResamplingError`] + /// is returned. + fn samples(&self, samples: usize) -> Result, ResamplingError> { + if samples < 2 { + return Err(ResamplingError::NotEnoughSamples(samples)); + } + if !self.domain().is_bounded() { + return Err(ResamplingError::UnboundedDomain); + } + + // Unwrap on `spaced_points` always succeeds because its error conditions are handled + // above. + Ok(self + .domain() + .spaced_points(samples) + .unwrap() + .map(|t| self.sample_unchecked(t))) + } + + /// Borrow this curve rather than taking ownership of it. This is essentially an alias for a + /// prefix `&`; the point is that intermediate operations can be performed while retaining + /// access to the original curve. + /// + /// # Example + /// ``` + /// # use bevy_math::curve::*; + /// let my_curve = FunctionCurve::new(Interval::UNIT, |t| t * t + 1.0); + /// + /// // Borrow `my_curve` long enough to resample a mapped version. Note that `map` takes + /// // ownership of its input. + /// let samples = my_curve.by_ref().map(|x| x * 2.0).resample_auto(100).unwrap(); + /// + /// // Do something else with `my_curve` since we retained ownership: + /// let new_curve = my_curve.reparametrize_linear(interval(-1.0, 1.0).unwrap()).unwrap(); + /// ``` + fn by_ref(&self) -> &Self { + self + } + + /// Flip this curve so that its tuple output is arranged the other way. + #[must_use] + fn flip(self) -> impl Curve<(V, U)> + where + Self: CurveExt<(U, V)>, + { + self.map(|(u, v)| (v, u)) + } +} + +impl CurveExt for C where C: Curve {} + +/// Extension trait implemented by [curves], allowing access to generic resampling methods as +/// well as those based on [stable interpolation]. +/// +/// This trait is automatically implemented for all curves. +/// +/// For more information, see the [module-level documentation]. +/// +/// [curves]: Curve +/// [stable interpolation]: crate::StableInterpolate +/// [module-level documentation]: self +#[cfg(feature = "alloc")] +pub trait CurveResampleExt: Curve { /// Resample this [`Curve`] to produce a new one that is defined by interpolation over equally /// spaced sample values, using the provided `interpolation` to interpolate between adjacent samples. /// The curve is interpolated on `segments` segments between samples. For example, if `segments` is 1, /// only the start and end points of the curve are used as samples; if `segments` is 2, a sample at - /// the midpoint is taken as well, and so on. If `segments` is zero, or if this curve has an unbounded - /// domain, then a [`ResamplingError`] is returned. + /// the midpoint is taken as well, and so on. /// /// The interpolation takes two values by reference together with a scalar parameter and /// produces an owned value. The expectation is that `interpolation(&x, &y, 0.0)` and /// `interpolation(&x, &y, 1.0)` are equivalent to `x` and `y` respectively. /// + /// # Errors + /// + /// If `segments` is zero or if this curve has unbounded domain, then a [`ResamplingError`] is + /// returned. + /// /// # Example /// ``` /// # use bevy_math::*; @@ -723,14 +798,12 @@ pub trait Curve { /// // A curve which only stores three data points and uses `nlerp` to interpolate them: /// let resampled_rotation = quarter_rotation.resample(3, |x, y, t| x.nlerp(*y, t)); /// ``` - #[cfg(feature = "alloc")] fn resample( &self, segments: usize, interpolation: I, ) -> Result, ResamplingError> where - Self: Sized, I: Fn(&T, &T, f32) -> T, { let samples = self.samples(segments + 1)?.collect_vec(); @@ -747,14 +820,15 @@ pub trait Curve { /// spaced sample values, using [automatic interpolation] to interpolate between adjacent samples. /// The curve is interpolated on `segments` segments between samples. For example, if `segments` is 1, /// only the start and end points of the curve are used as samples; if `segments` is 2, a sample at - /// the midpoint is taken as well, and so on. If `segments` is zero, or if this curve has an unbounded - /// domain, then a [`ResamplingError`] is returned. + /// the midpoint is taken as well, and so on. + /// + /// # Errors + /// + /// If `segments` is zero or if this curve has unbounded domain, a [`ResamplingError`] is returned. /// /// [automatic interpolation]: crate::common_traits::StableInterpolate - #[cfg(feature = "alloc")] fn resample_auto(&self, segments: usize) -> Result, ResamplingError> where - Self: Sized, T: StableInterpolate, { let samples = self.samples(segments + 1)?.collect_vec(); @@ -766,35 +840,13 @@ pub trait Curve { }) } - /// Extract an iterator over evenly-spaced samples from this curve. If `samples` is less than 2 - /// or if this curve has unbounded domain, then an error is returned instead. - fn samples(&self, samples: usize) -> Result, ResamplingError> - where - Self: Sized, - { - if samples < 2 { - return Err(ResamplingError::NotEnoughSamples(samples)); - } - if !self.domain().is_bounded() { - return Err(ResamplingError::UnboundedDomain); - } - - // Unwrap on `spaced_points` always succeeds because its error conditions are handled - // above. - Ok(self - .domain() - .spaced_points(samples) - .unwrap() - .map(|t| self.sample_unchecked(t))) - } - /// Resample this [`Curve`] to produce a new one that is defined by interpolation over samples /// taken at a given set of times. The given `interpolation` is used to interpolate adjacent /// samples, and the `sample_times` are expected to contain at least two valid times within the /// curve's domain interval. /// /// Redundant sample times, non-finite sample times, and sample times outside of the domain - /// are simply filtered out. With an insufficient quantity of data, a [`ResamplingError`] is + /// are filtered out. With an insufficient quantity of data, a [`ResamplingError`] is /// returned. /// /// The domain of the produced curve stretches between the first and last sample times of the @@ -803,14 +855,17 @@ pub trait Curve { /// The interpolation takes two values by reference together with a scalar parameter and /// produces an owned value. The expectation is that `interpolation(&x, &y, 0.0)` and /// `interpolation(&x, &y, 1.0)` are equivalent to `x` and `y` respectively. - #[cfg(feature = "alloc")] + /// + /// # Errors + /// + /// If `sample_times` doesn't contain at least two distinct times after filtering, a + /// [`ResamplingError`] is returned. fn resample_uneven( &self, sample_times: impl IntoIterator, interpolation: I, ) -> Result, ResamplingError> where - Self: Sized, I: Fn(&T, &T, f32) -> T, { let domain = self.domain(); @@ -841,14 +896,17 @@ pub trait Curve { /// The domain of the produced [`UnevenSampleAutoCurve`] stretches between the first and last /// sample times of the iterator. /// + /// # Errors + /// + /// If `sample_times` doesn't contain at least two distinct times after filtering, a + /// [`ResamplingError`] is returned. + /// /// [automatic interpolation]: crate::common_traits::StableInterpolate - #[cfg(feature = "alloc")] fn resample_uneven_auto( &self, sample_times: impl IntoIterator, ) -> Result, ResamplingError> where - Self: Sized, T: StableInterpolate, { let domain = self.domain(); @@ -866,53 +924,10 @@ pub trait Curve { core: UnevenCore { times, samples }, }) } - - /// Borrow this curve rather than taking ownership of it. This is essentially an alias for a - /// prefix `&`; the point is that intermediate operations can be performed while retaining - /// access to the original curve. - /// - /// # Example - /// ``` - /// # use bevy_math::curve::*; - /// let my_curve = FunctionCurve::new(Interval::UNIT, |t| t * t + 1.0); - /// - /// // Borrow `my_curve` long enough to resample a mapped version. Note that `map` takes - /// // ownership of its input. - /// let samples = my_curve.by_ref().map(|x| x * 2.0).resample_auto(100).unwrap(); - /// - /// // Do something else with `my_curve` since we retained ownership: - /// let new_curve = my_curve.reparametrize_linear(interval(-1.0, 1.0).unwrap()).unwrap(); - /// ``` - fn by_ref(&self) -> &Self - where - Self: Sized, - { - self - } - - /// Flip this curve so that its tuple output is arranged the other way. - #[must_use] - fn flip(self) -> impl Curve<(V, U)> - where - Self: Sized + Curve<(U, V)>, - { - self.map(|(u, v)| (v, u)) - } } -impl Curve for D -where - C: Curve + ?Sized, - D: Deref, -{ - fn domain(&self) -> Interval { - >::domain(self) - } - - fn sample_unchecked(&self, t: f32) -> T { - >::sample_unchecked(self, t) - } -} +#[cfg(feature = "alloc")] +impl CurveResampleExt for C where C: Curve + ?Sized {} /// An error indicating that a linear reparameterization couldn't be performed because of /// malformed inputs. @@ -990,6 +1005,7 @@ pub enum ResamplingError { mod tests { use super::*; use crate::{ops, Quat}; + use alloc::vec::Vec; use approx::{assert_abs_diff_eq, AbsDiffEq}; use core::f32::consts::TAU; use glam::*; @@ -1045,17 +1061,15 @@ mod tests { let start = Vec2::ZERO; let end = Vec2::new(1.0, 2.0); - let curve = EasingCurve::new(start, end, EaseFunction::Steps(4)); + let curve = EasingCurve::new(start, end, EaseFunction::Steps(4, JumpAt::End)); [ (0.0, start), - (0.124, start), - (0.125, Vec2::new(0.25, 0.5)), - (0.374, Vec2::new(0.25, 0.5)), - (0.375, Vec2::new(0.5, 1.0)), - (0.624, Vec2::new(0.5, 1.0)), - (0.625, Vec2::new(0.75, 1.5)), - (0.874, Vec2::new(0.75, 1.5)), - (0.875, end), + (0.249, start), + (0.250, Vec2::new(0.25, 0.5)), + (0.499, Vec2::new(0.25, 0.5)), + (0.500, Vec2::new(0.5, 1.0)), + (0.749, Vec2::new(0.5, 1.0)), + (0.750, Vec2::new(0.75, 1.5)), (1.0, end), ] .into_iter() diff --git a/crates/bevy_math/src/curve/sample_curves.rs b/crates/bevy_math/src/curve/sample_curves.rs index 7a37f55640090..f0fa928abba57 100644 --- a/crates/bevy_math/src/curve/sample_curves.rs +++ b/crates/bevy_math/src/curve/sample_curves.rs @@ -4,6 +4,8 @@ use super::cores::{EvenCore, EvenCoreError, UnevenCore, UnevenCoreError}; use super::{Curve, Interval}; use crate::StableInterpolate; +#[cfg(feature = "bevy_reflect")] +use alloc::format; use core::any::type_name; use core::fmt::{self, Debug}; @@ -285,11 +287,13 @@ impl UnevenSampleCurve { } /// This [`UnevenSampleAutoCurve`], but with the sample times moved by the map `f`. - /// In principle, when `f` is monotone, this is equivalent to [`Curve::reparametrize`], + /// In principle, when `f` is monotone, this is equivalent to [`CurveExt::reparametrize`], /// but the function inputs to each are inverses of one another. /// /// The samples are re-sorted by time after mapping and deduplicated by output time, so /// the function `f` should generally be injective over the sample times of the curve. + /// + /// [`CurveExt::reparametrize`]: super::CurveExt::reparametrize pub fn map_sample_times(self, f: impl Fn(f32) -> f32) -> UnevenSampleCurve { Self { core: self.core.map_sample_times(f), @@ -343,11 +347,13 @@ impl UnevenSampleAutoCurve { } /// This [`UnevenSampleAutoCurve`], but with the sample times moved by the map `f`. - /// In principle, when `f` is monotone, this is equivalent to [`Curve::reparametrize`], + /// In principle, when `f` is monotone, this is equivalent to [`CurveExt::reparametrize`], /// but the function inputs to each are inverses of one another. /// /// The samples are re-sorted by time after mapping and deduplicated by output time, so /// the function `f` should generally be injective over the sample times of the curve. + /// + /// [`CurveExt::reparametrize`]: super::CurveExt::reparametrize pub fn map_sample_times(self, f: impl Fn(f32) -> f32) -> UnevenSampleAutoCurve { Self { core: self.core.map_sample_times(f), @@ -365,6 +371,7 @@ mod tests { //! - function pointers use super::{SampleCurve, UnevenSampleCurve}; use crate::{curve::Interval, VectorSpace}; + use alloc::boxed::Box; use bevy_reflect::Reflect; #[test] diff --git a/crates/bevy_math/src/direction.rs b/crates/bevy_math/src/direction.rs index 5e11d1434bf42..45138f20e2e13 100644 --- a/crates/bevy_math/src/direction.rs +++ b/crates/bevy_math/src/direction.rs @@ -8,17 +8,26 @@ use derive_more::derive::Into; #[cfg(feature = "bevy_reflect")] use bevy_reflect::Reflect; + #[cfg(all(feature = "serialize", feature = "bevy_reflect"))] use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; +#[cfg(all(debug_assertions, feature = "std"))] +use std::eprintln; + +use thiserror::Error; + /// An error indicating that a direction is invalid. -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Error)] pub enum InvalidDirectionError { /// The length of the direction vector is zero or very close to zero. + #[error("The length of the direction vector is zero or very close to zero")] Zero, /// The length of the direction vector is `std::f32::INFINITY`. + #[error("The length of the direction vector is `std::f32::INFINITY`")] Infinite, /// The length of the direction vector is `NaN`. + #[error("The length of the direction vector is `NaN`")] NaN, } @@ -37,15 +46,6 @@ impl InvalidDirectionError { } } -impl core::fmt::Display for InvalidDirectionError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!( - f, - "Direction can not be zero (or very close to zero), or non-finite." - ) - } -} - /// Checks that a vector with the given squared length is normalized. /// /// Warns for small error with a length threshold of approximately `1e-4`, @@ -69,31 +69,24 @@ fn assert_is_normalized(message: &str, length_squared: f32) { } else if length_error_squared > 2e-4 { // Length error is approximately 1e-4 or more. #[cfg(feature = "std")] - eprintln!( - "Warning: {message} The length is {}.", - ops::sqrt(length_squared) - ); + #[expect(clippy::print_stderr, reason = "Allowed behind `std` feature gate.")] + { + eprintln!( + "Warning: {message} The length is {}.", + ops::sqrt(length_squared) + ); + } } } -/// A normalized vector pointing in a direction in 2D space -#[deprecated( - since = "0.14.0", - note = "`Direction2d` has been renamed. Please use `Dir2` instead." -)] -pub type Direction2d = Dir2; - -/// A normalized vector pointing in a direction in 3D space -#[deprecated( - since = "0.14.0", - note = "`Direction3d` has been renamed. Please use `Dir3` instead." -)] -pub type Direction3d = Dir3; - /// A normalized vector pointing in a direction in 2D space #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -208,9 +201,11 @@ impl Dir2 { /// let dir2 = Dir2::Y; /// /// let result1 = dir1.slerp(dir2, 1.0 / 3.0); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(result1, Dir2::from_xy(0.75_f32.sqrt(), 0.5).unwrap()); /// /// let result2 = dir1.slerp(dir2, 0.5); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(result2, Dir2::from_xy(0.5_f32.sqrt(), 0.5_f32.sqrt()).unwrap()); /// ``` #[inline] @@ -365,7 +360,11 @@ impl approx::UlpsEq for Dir2 { /// A normalized vector pointing in a direction in 3D space #[derive(Clone, Copy, Debug, PartialEq, Into)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -467,6 +466,7 @@ impl Dir3 { /// let dir2 = Dir3::Y; /// /// let result1 = dir1.slerp(dir2, 1.0 / 3.0); + /// #[cfg(feature = "approx")] /// assert_relative_eq!( /// result1, /// Dir3::from_xyz(0.75_f32.sqrt(), 0.5, 0.0).unwrap(), @@ -474,6 +474,7 @@ impl Dir3 { /// ); /// /// let result2 = dir1.slerp(dir2, 0.5); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(result2, Dir3::from_xyz(0.5_f32.sqrt(), 0.5_f32.sqrt(), 0.0).unwrap()); /// ``` #[inline] @@ -624,7 +625,11 @@ impl approx::UlpsEq for Dir3 { /// This may or may not be faster than [`Dir3`]: make sure to benchmark! #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -726,6 +731,7 @@ impl Dir3A { /// let dir2 = Dir3A::Y; /// /// let result1 = dir1.slerp(dir2, 1.0 / 3.0); + /// #[cfg(feature = "approx")] /// assert_relative_eq!( /// result1, /// Dir3A::from_xyz(0.75_f32.sqrt(), 0.5, 0.0).unwrap(), @@ -733,6 +739,7 @@ impl Dir3A { /// ); /// /// let result2 = dir1.slerp(dir2, 0.5); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(result2, Dir3A::from_xyz(0.5_f32.sqrt(), 0.5_f32.sqrt(), 0.0).unwrap()); /// ``` #[inline] @@ -860,6 +867,7 @@ impl approx::UlpsEq for Dir3A { } #[cfg(test)] +#[cfg(feature = "approx")] mod tests { use crate::ops; diff --git a/crates/bevy_math/src/float_ord.rs b/crates/bevy_math/src/float_ord.rs index b5f72d1c7cf5e..e69c6b35e0702 100644 --- a/crates/bevy_math/src/float_ord.rs +++ b/crates/bevy_math/src/float_ord.rs @@ -20,7 +20,7 @@ use bevy_reflect::Reflect; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Hash) + reflect(Debug, PartialEq, Hash, Clone) )] pub struct FloatOrd(pub f32); @@ -47,7 +47,10 @@ impl PartialOrd for FloatOrd { } impl Ord for FloatOrd { - #[allow(clippy::comparison_chain)] + #[expect( + clippy::comparison_chain, + reason = "This can't be rewritten with `match` and `cmp`, as this is `cmp` itself." + )] fn cmp(&self, other: &Self) -> Ordering { if self > other { Ordering::Greater @@ -124,7 +127,10 @@ mod tests { } #[test] - #[allow(clippy::nonminimal_bool)] + #[expect( + clippy::nonminimal_bool, + reason = "This tests that all operators work as they should, and in the process requires some non-simplified boolean expressions." + )] fn float_ord_cmp_operators() { assert!(!(NAN < NAN)); assert!(NAN < ZERO); diff --git a/crates/bevy_math/src/isometry.rs b/crates/bevy_math/src/isometry.rs index e01a8cd713661..a221615b0a5bb 100644 --- a/crates/bevy_math/src/isometry.rs +++ b/crates/bevy_math/src/isometry.rs @@ -88,7 +88,7 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -366,7 +366,7 @@ impl UlpsEq for Isometry2d { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -589,6 +589,7 @@ impl UlpsEq for Isometry3d { } #[cfg(test)] +#[cfg(feature = "approx")] mod tests { use super::*; use crate::{vec2, vec3, vec3a}; diff --git a/crates/bevy_math/src/lib.rs b/crates/bevy_math/src/lib.rs index a276111c9d500..20d458db72d23 100644 --- a/crates/bevy_math/src/lib.rs +++ b/crates/bevy_math/src/lib.rs @@ -12,7 +12,7 @@ html_logo_url = "https://bevyengine.org/assets/icon.png", html_favicon_url = "https://bevyengine.org/assets/icon.png" )] -#![cfg_attr(not(feature = "std"), no_std)] +#![no_std] //! Provides math types and functionality for the Bevy game engine. //! @@ -20,6 +20,9 @@ //! matrices like [`Mat2`], [`Mat3`] and [`Mat4`] and orientation representations //! like [`Quat`]. +#[cfg(feature = "std")] +extern crate std; + #[cfg(feature = "alloc")] extern crate alloc; diff --git a/crates/bevy_math/src/ops.rs b/crates/bevy_math/src/ops.rs index 42fb55286cc7b..3a7765939da0a 100644 --- a/crates/bevy_math/src/ops.rs +++ b/crates/bevy_math/src/ops.rs @@ -8,9 +8,6 @@ //! It also provides `no_std` compatible alternatives to certain floating-point //! operations which are not provided in the [`core`] library. -#![allow(dead_code)] -#![allow(clippy::disallowed_methods)] - // Note: There are some Rust methods with unspecified precision without a `libm` // equivalent: // - `f32::powi` (integer powers) @@ -22,7 +19,11 @@ // - `f32::gamma` // - `f32::ln_gamma` -#[cfg(not(feature = "libm"))] +#[cfg(all(not(feature = "libm"), feature = "std"))] +#[expect( + clippy::disallowed_methods, + reason = "Many of the disallowed methods are disallowed to force code to use the feature-conditional re-exports from this module, but this module itself is exempt from that rule." +)] mod std_ops { /// Raises a number to a floating point power. @@ -232,7 +233,7 @@ mod std_ops { } } -#[cfg(feature = "libm")] +#[cfg(any(feature = "libm", all(feature = "nostd-libm", not(feature = "std"))))] mod libm_ops { /// Raises a number to a floating point power. @@ -447,7 +448,7 @@ mod libm_ops { } } -#[cfg(all(feature = "libm", not(feature = "std")))] +#[cfg(all(any(feature = "libm", feature = "nostd-libm"), not(feature = "std")))] mod libm_ops_for_no_std { //! Provides standardized names for [`f32`] operations which may not be //! supported on `no_std` platforms. @@ -509,6 +510,14 @@ mod libm_ops_for_no_std { libm::floorf(x) } + /// Returns the smallest integer greater than or equal to `x`. + /// + /// Precision is specified when the `libm` feature is enabled. + #[inline(always)] + pub fn ceil(x: f32) -> f32 { + libm::ceilf(x) + } + /// Returns the fractional part of `x`. /// /// This function always returns the precise result. @@ -519,6 +528,10 @@ mod libm_ops_for_no_std { } #[cfg(feature = "std")] +#[expect( + clippy::disallowed_methods, + reason = "Many of the disallowed methods are disallowed to force code to use the feature-conditional re-exports from this module, but this module itself is exempt from that rule." +)] mod std_ops_for_no_std { //! Provides standardized names for [`f32`] operations which may not be //! supported on `no_std` platforms. @@ -576,6 +589,14 @@ mod std_ops_for_no_std { f32::floor(x) } + /// Returns the smallest integer greater than or equal to `x`. + /// + /// This function always returns the precise result. + #[inline(always)] + pub fn ceil(x: f32) -> f32 { + f32::ceil(x) + } + /// Returns the fractional part of `x`. /// /// This function always returns the precise result. @@ -585,20 +606,24 @@ mod std_ops_for_no_std { } } -#[cfg(feature = "libm")] +#[cfg(any(feature = "libm", all(feature = "nostd-libm", not(feature = "std"))))] pub use libm_ops::*; -#[cfg(not(feature = "libm"))] +#[cfg(all(not(feature = "libm"), feature = "std"))] pub use std_ops::*; #[cfg(feature = "std")] pub use std_ops_for_no_std::*; -#[cfg(all(feature = "libm", not(feature = "std")))] +#[cfg(all(any(feature = "libm", feature = "nostd-libm"), not(feature = "std")))] pub use libm_ops_for_no_std::*; -#[cfg(all(not(feature = "libm"), not(feature = "std")))] -compile_error!("Either the `libm` feature or the `std` feature must be enabled."); +#[cfg(all( + not(feature = "libm"), + not(feature = "std"), + not(feature = "nostd-libm") +))] +compile_error!("Either the `libm`, `std`, or `nostd-libm` feature must be enabled."); /// This extension trait covers shortfall in determinacy from the lack of a `libm` counterpart /// to `f32::powi`. Use this for the common small exponents. diff --git a/crates/bevy_math/src/primitives/dim2.rs b/crates/bevy_math/src/primitives/dim2.rs index d476fd86077eb..613345bcd8032 100644 --- a/crates/bevy_math/src/primitives/dim2.rs +++ b/crates/bevy_math/src/primitives/dim2.rs @@ -5,7 +5,7 @@ use thiserror::Error; use super::{Measured2d, Primitive2d, WindingOrder}; use crate::{ ops::{self, FloatPow}, - Dir2, Vec2, + Dir2, InvalidDirectionError, Isometry2d, Ray2d, Rot2, Vec2, }; #[cfg(feature = "alloc")] @@ -25,7 +25,7 @@ use alloc::{boxed::Box, vec::Vec}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -112,7 +112,7 @@ impl Measured2d for Circle { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -279,7 +279,7 @@ impl Arc2d { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -422,7 +422,7 @@ impl CircularSector { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -776,7 +776,7 @@ mod arc_tests { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -926,7 +926,7 @@ impl Measured2d for Ellipse { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -1025,7 +1025,7 @@ impl Measured2d for Annulus { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -1161,7 +1161,7 @@ impl Measured2d for Rhombus { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -1199,7 +1199,11 @@ impl Plane2d { /// For a finite line: [`Segment2d`] #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -1211,61 +1215,280 @@ pub struct Line2d { } impl Primitive2d for Line2d {} -/// A segment of a line going through the origin along a direction in 2D space. +/// A line segment defined by two endpoints in 2D space. #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) )] #[doc(alias = "LineSegment2d")] pub struct Segment2d { - /// The direction of the line segment - pub direction: Dir2, - /// Half the length of the line segment. The segment extends by this amount in both - /// the given direction and its opposite direction - pub half_length: f32, + /// The endpoints of the line segment. + pub vertices: [Vec2; 2], } impl Primitive2d for Segment2d {} impl Segment2d { - /// Create a new `Segment2d` from a direction and full length of the segment + /// Create a new `Segment2d` from its endpoints. #[inline(always)] - pub fn new(direction: Dir2, length: f32) -> Self { + pub const fn new(point1: Vec2, point2: Vec2) -> Self { Self { - direction, - half_length: length / 2.0, + vertices: [point1, point2], } } - /// Create a new `Segment2d` from its endpoints and compute its geometric center + /// Create a new `Segment2d` from its endpoints and compute its geometric center. + #[inline(always)] + #[deprecated(since = "0.16.0", note = "Use the `new` constructor instead")] + pub fn from_points(point1: Vec2, point2: Vec2) -> (Self, Vec2) { + (Self::new(point1, point2), (point1 + point2) / 2.) + } + + /// Create a new `Segment2d` centered at the origin with the given direction and length. /// - /// # Panics + /// The endpoints will be at `-direction * length / 2.0` and `direction * length / 2.0`. + #[inline(always)] + pub fn from_direction_and_length(direction: Dir2, length: f32) -> Self { + let endpoint = 0.5 * length * direction; + Self { + vertices: [-endpoint, endpoint], + } + } + + /// Create a new `Segment2d` centered at the origin from a vector representing + /// the direction and length of the line segment. /// - /// Panics if `point1 == point2` + /// The endpoints will be at `-scaled_direction / 2.0` and `scaled_direction / 2.0`. #[inline(always)] - pub fn from_points(point1: Vec2, point2: Vec2) -> (Self, Vec2) { - let diff = point2 - point1; - let length = diff.length(); + pub fn from_scaled_direction(scaled_direction: Vec2) -> Self { + let endpoint = 0.5 * scaled_direction; + Self { + vertices: [-endpoint, endpoint], + } + } - ( - // We are dividing by the length here, so the vector is normalized. - Self::new(Dir2::new_unchecked(diff / length), length), - (point1 + point2) / 2., - ) + /// Create a new `Segment2d` starting from the origin of the given `ray`, + /// going in the direction of the ray for the given `length`. + /// + /// The endpoints will be at `ray.origin` and `ray.origin + length * ray.direction`. + #[inline(always)] + pub fn from_ray_and_length(ray: Ray2d, length: f32) -> Self { + Self { + vertices: [ray.origin, ray.get_point(length)], + } } - /// Get the position of the first point on the line segment + /// Get the position of the first endpoint of the line segment. #[inline(always)] pub fn point1(&self) -> Vec2 { - *self.direction * -self.half_length + self.vertices[0] } - /// Get the position of the second point on the line segment + /// Get the position of the second endpoint of the line segment. #[inline(always)] pub fn point2(&self) -> Vec2 { - *self.direction * self.half_length + self.vertices[1] + } + + /// Compute the midpoint between the two endpoints of the line segment. + #[inline(always)] + #[doc(alias = "midpoint")] + pub fn center(&self) -> Vec2 { + self.point1().midpoint(self.point2()) + } + + /// Compute the length of the line segment. + #[inline(always)] + pub fn length(&self) -> f32 { + self.point1().distance(self.point2()) + } + + /// Compute the squared length of the line segment. + #[inline(always)] + pub fn length_squared(&self) -> f32 { + self.point1().distance_squared(self.point2()) + } + + /// Compute the normalized direction pointing from the first endpoint to the second endpoint. + /// + /// For the non-panicking version, see [`Segment2d::try_direction`]. + /// + /// # Panics + /// + /// Panics if a valid direction could not be computed, for example when the endpoints are coincident, NaN, or infinite. + #[inline(always)] + pub fn direction(&self) -> Dir2 { + self.try_direction().unwrap_or_else(|err| { + panic!("Failed to compute the direction of a line segment: {err}") + }) + } + + /// Try to compute the normalized direction pointing from the first endpoint to the second endpoint. + /// + /// Returns [`Err(InvalidDirectionError)`](InvalidDirectionError) if a valid direction could not be computed, + /// for example when the endpoints are coincident, NaN, or infinite. + #[inline(always)] + pub fn try_direction(&self) -> Result { + Dir2::new(self.scaled_direction()) + } + + /// Compute the vector from the first endpoint to the second endpoint. + #[inline(always)] + pub fn scaled_direction(&self) -> Vec2 { + self.point2() - self.point1() + } + + /// Compute the normalized counterclockwise normal on the left-hand side of the line segment. + /// + /// For the non-panicking version, see [`Segment2d::try_left_normal`]. + /// + /// # Panics + /// + /// Panics if a valid normal could not be computed, for example when the endpoints are coincident, NaN, or infinite. + #[inline(always)] + pub fn left_normal(&self) -> Dir2 { + self.try_left_normal().unwrap_or_else(|err| { + panic!("Failed to compute the left-hand side normal of a line segment: {err}") + }) + } + + /// Try to compute the normalized counterclockwise normal on the left-hand side of the line segment. + /// + /// Returns [`Err(InvalidDirectionError)`](InvalidDirectionError) if a valid normal could not be computed, + /// for example when the endpoints are coincident, NaN, or infinite. + #[inline(always)] + pub fn try_left_normal(&self) -> Result { + Dir2::new(self.scaled_left_normal()) + } + + /// Compute the non-normalized counterclockwise normal on the left-hand side of the line segment. + /// + /// The length of the normal is the distance between the endpoints. + #[inline(always)] + pub fn scaled_left_normal(&self) -> Vec2 { + let scaled_direction = self.scaled_direction(); + Vec2::new(-scaled_direction.y, scaled_direction.x) + } + + /// Compute the normalized clockwise normal on the right-hand side of the line segment. + /// + /// For the non-panicking version, see [`Segment2d::try_right_normal`]. + /// + /// # Panics + /// + /// Panics if a valid normal could not be computed, for example when the endpoints are coincident, NaN, or infinite. + #[inline(always)] + pub fn right_normal(&self) -> Dir2 { + self.try_right_normal().unwrap_or_else(|err| { + panic!("Failed to compute the right-hand side normal of a line segment: {err}") + }) + } + + /// Try to compute the normalized clockwise normal on the right-hand side of the line segment. + /// + /// Returns [`Err(InvalidDirectionError)`](InvalidDirectionError) if a valid normal could not be computed, + /// for example when the endpoints are coincident, NaN, or infinite. + #[inline(always)] + pub fn try_right_normal(&self) -> Result { + Dir2::new(self.scaled_right_normal()) + } + + /// Compute the non-normalized clockwise normal on the right-hand side of the line segment. + /// + /// The length of the normal is the distance between the endpoints. + #[inline(always)] + pub fn scaled_right_normal(&self) -> Vec2 { + let scaled_direction = self.scaled_direction(); + Vec2::new(scaled_direction.y, -scaled_direction.x) + } + + /// Compute the segment transformed by the given [`Isometry2d`]. + #[inline(always)] + pub fn transformed(&self, isometry: impl Into) -> Self { + let isometry: Isometry2d = isometry.into(); + Self::new( + isometry.transform_point(self.point1()), + isometry.transform_point(self.point2()), + ) + } + + /// Compute the segment translated by the given vector. + #[inline(always)] + pub fn translated(&self, translation: Vec2) -> Segment2d { + Self::new(self.point1() + translation, self.point2() + translation) + } + + /// Compute the segment rotated around the origin by the given rotation. + #[inline(always)] + pub fn rotated(&self, rotation: Rot2) -> Segment2d { + Segment2d::new(rotation * self.point1(), rotation * self.point2()) + } + + /// Compute the segment rotated around the given point by the given rotation. + #[inline(always)] + pub fn rotated_around(&self, rotation: Rot2, point: Vec2) -> Segment2d { + // We offset our segment so that our segment is rotated as if from the origin, then we can apply the offset back + let offset = self.translated(-point); + let rotated = offset.rotated(rotation); + rotated.translated(point) + } + + /// Compute the segment rotated around its own center. + #[inline(always)] + pub fn rotated_around_center(&self, rotation: Rot2) -> Segment2d { + self.rotated_around(rotation, self.center()) + } + + /// Compute the segment with its center at the origin, keeping the same direction and length. + #[inline(always)] + pub fn centered(&self) -> Segment2d { + let center = self.center(); + self.translated(-center) + } + + /// Compute the segment with a new length, keeping the same direction and center. + #[inline(always)] + pub fn resized(&self, length: f32) -> Segment2d { + let offset_from_origin = self.center(); + let centered = self.translated(-offset_from_origin); + let ratio = length / self.length(); + let segment = Segment2d::new(centered.point1() * ratio, centered.point2() * ratio); + segment.translated(offset_from_origin) + } + + /// Reverses the direction of the line segment by swapping the endpoints. + #[inline(always)] + pub fn reverse(&mut self) { + let [point1, point2] = &mut self.vertices; + core::mem::swap(point1, point2); + } + + /// Returns the line segment with its direction reversed by swapping the endpoints. + #[inline(always)] + #[must_use] + pub fn reversed(mut self) -> Self { + self.reverse(); + self + } +} + +impl From<[Vec2; 2]> for Segment2d { + #[inline(always)] + fn from(vertices: [Vec2; 2]) -> Self { + Self { vertices } + } +} + +impl From<(Vec2, Vec2)> for Segment2d { + #[inline(always)] + fn from((point1, point2): (Vec2, Vec2)) -> Self { + Self::new(point1, point2) } } @@ -1274,7 +1497,11 @@ impl Segment2d { /// For a version without generics: [`BoxedPolyline2d`] #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -1343,7 +1570,7 @@ impl BoxedPolyline2d { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -1514,7 +1741,7 @@ impl Measured2d for Triangle2d { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -1604,7 +1831,11 @@ impl Measured2d for Rectangle { /// For a version without generics: [`BoxedPolygon`] #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -1654,7 +1885,11 @@ impl From> for Polygon { /// A convex polygon with `N` vertices. #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -1773,7 +2008,7 @@ impl BoxedPolygon { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -1919,7 +2154,7 @@ impl Measured2d for RegularPolygon { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -2242,9 +2477,9 @@ mod tests { let mut rotated_vertices = polygon.vertices(core::f32::consts::FRAC_PI_4).into_iter(); // Distance from the origin to the middle of a side, derived using Pythagorean theorem - let side_sistance = FRAC_1_SQRT_2; + let side_distance = FRAC_1_SQRT_2; assert!( - (rotated_vertices.next().unwrap() - Vec2::new(-side_sistance, side_sistance)).length() + (rotated_vertices.next().unwrap() - Vec2::new(-side_distance, side_distance)).length() < 1e-7, ); } diff --git a/crates/bevy_math/src/primitives/dim3.rs b/crates/bevy_math/src/primitives/dim3.rs index 8d25df5b83d0a..a36db0ade57d2 100644 --- a/crates/bevy_math/src/primitives/dim3.rs +++ b/crates/bevy_math/src/primitives/dim3.rs @@ -3,7 +3,7 @@ use core::f32::consts::{FRAC_PI_3, PI}; use super::{Circle, Measured2d, Measured3d, Primitive2d, Primitive3d}; use crate::{ ops::{self, FloatPow}, - Dir3, InvalidDirectionError, Isometry3d, Mat3, Vec2, Vec3, + Dir3, InvalidDirectionError, Isometry3d, Mat3, Ray3d, Vec2, Vec3, }; #[cfg(feature = "bevy_reflect")] @@ -21,7 +21,7 @@ use alloc::{boxed::Box, vec::Vec}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -93,7 +93,7 @@ impl Measured3d for Sphere { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -165,7 +165,7 @@ impl Plane3d { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -338,7 +338,11 @@ impl InfinitePlane3d { /// For a finite line: [`Segment3d`] #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -349,61 +353,216 @@ pub struct Line3d { } impl Primitive3d for Line3d {} -/// A segment of a line going through the origin along a direction in 3D space. -#[doc(alias = "LineSegment3d")] +/// A line segment defined by two endpoints in 3D space. #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) )] +#[doc(alias = "LineSegment3d")] pub struct Segment3d { - /// The direction of the line - pub direction: Dir3, - /// Half the length of the line segment. The segment extends by this amount in both - /// the given direction and its opposite direction - pub half_length: f32, + /// The endpoints of the line segment. + pub vertices: [Vec3; 2], } impl Primitive3d for Segment3d {} impl Segment3d { - /// Create a new `Segment3d` from a direction and full length of the segment + /// Create a new `Segment3d` from its endpoints. #[inline(always)] - pub fn new(direction: Dir3, length: f32) -> Self { + pub const fn new(point1: Vec3, point2: Vec3) -> Self { Self { - direction, - half_length: length / 2.0, + vertices: [point1, point2], } } - /// Create a new `Segment3d` from its endpoints and compute its geometric center + /// Create a new `Segment3d` from its endpoints and compute its geometric center. + #[inline(always)] + #[deprecated(since = "0.16.0", note = "Use the `new` constructor instead")] + pub fn from_points(point1: Vec3, point2: Vec3) -> (Self, Vec3) { + (Self::new(point1, point2), (point1 + point2) / 2.) + } + + /// Create a new `Segment3d` centered at the origin with the given direction and length. /// - /// # Panics + /// The endpoints will be at `-direction * length / 2.0` and `direction * length / 2.0`. + #[inline(always)] + pub fn from_direction_and_length(direction: Dir3, length: f32) -> Self { + let endpoint = 0.5 * length * direction; + Self { + vertices: [-endpoint, endpoint], + } + } + + /// Create a new `Segment3d` centered at the origin from a vector representing + /// the direction and length of the line segment. /// - /// Panics if `point1 == point2` + /// The endpoints will be at `-scaled_direction / 2.0` and `scaled_direction / 2.0`. #[inline(always)] - pub fn from_points(point1: Vec3, point2: Vec3) -> (Self, Vec3) { - let diff = point2 - point1; - let length = diff.length(); + pub fn from_scaled_direction(scaled_direction: Vec3) -> Self { + let endpoint = 0.5 * scaled_direction; + Self { + vertices: [-endpoint, endpoint], + } + } - ( - // We are dividing by the length here, so the vector is normalized. - Self::new(Dir3::new_unchecked(diff / length), length), - (point1 + point2) / 2., - ) + /// Create a new `Segment3d` starting from the origin of the given `ray`, + /// going in the direction of the ray for the given `length`. + /// + /// The endpoints will be at `ray.origin` and `ray.origin + length * ray.direction`. + #[inline(always)] + pub fn from_ray_and_length(ray: Ray3d, length: f32) -> Self { + Self { + vertices: [ray.origin, ray.get_point(length)], + } } - /// Get the position of the first point on the line segment + /// Get the position of the first endpoint of the line segment. #[inline(always)] pub fn point1(&self) -> Vec3 { - *self.direction * -self.half_length + self.vertices[0] } - /// Get the position of the second point on the line segment + /// Get the position of the second endpoint of the line segment. #[inline(always)] pub fn point2(&self) -> Vec3 { - *self.direction * self.half_length + self.vertices[1] + } + + /// Compute the midpoint between the two endpoints of the line segment. + #[inline(always)] + #[doc(alias = "midpoint")] + pub fn center(&self) -> Vec3 { + self.point1().midpoint(self.point2()) + } + + /// Compute the length of the line segment. + #[inline(always)] + pub fn length(&self) -> f32 { + self.point1().distance(self.point2()) + } + + /// Compute the squared length of the line segment. + #[inline(always)] + pub fn length_squared(&self) -> f32 { + self.point1().distance_squared(self.point2()) + } + + /// Compute the normalized direction pointing from the first endpoint to the second endpoint. + /// + /// For the non-panicking version, see [`Segment3d::try_direction`]. + /// + /// # Panics + /// + /// Panics if a valid direction could not be computed, for example when the endpoints are coincident, NaN, or infinite. + #[inline(always)] + pub fn direction(&self) -> Dir3 { + self.try_direction().unwrap_or_else(|err| { + panic!("Failed to compute the direction of a line segment: {err}") + }) + } + + /// Try to compute the normalized direction pointing from the first endpoint to the second endpoint. + /// + /// Returns [`Err(InvalidDirectionError)`](InvalidDirectionError) if a valid direction could not be computed, + /// for example when the endpoints are coincident, NaN, or infinite. + #[inline(always)] + pub fn try_direction(&self) -> Result { + Dir3::new(self.scaled_direction()) + } + + /// Compute the vector from the first endpoint to the second endpoint. + #[inline(always)] + pub fn scaled_direction(&self) -> Vec3 { + self.point2() - self.point1() + } + + /// Compute the segment transformed by the given [`Isometry3d`]. + #[inline(always)] + pub fn transformed(&self, isometry: impl Into) -> Self { + let isometry: Isometry3d = isometry.into(); + Self::new( + isometry.transform_point(self.point1()).into(), + isometry.transform_point(self.point2()).into(), + ) + } + + /// Compute the segment translated by the given vector. + #[inline(always)] + pub fn translated(&self, translation: Vec3) -> Segment3d { + Self::new(self.point1() + translation, self.point2() + translation) + } + + /// Compute the segment rotated around the origin by the given rotation. + #[inline(always)] + pub fn rotated(&self, rotation: Quat) -> Segment3d { + Segment3d::new(rotation * self.point1(), rotation * self.point2()) + } + + /// Compute the segment rotated around the given point by the given rotation. + #[inline(always)] + pub fn rotated_around(&self, rotation: Quat, point: Vec3) -> Segment3d { + // We offset our segment so that our segment is rotated as if from the origin, then we can apply the offset back + let offset = self.translated(-point); + let rotated = offset.rotated(rotation); + rotated.translated(point) + } + + /// Compute the segment rotated around its own center. + #[inline(always)] + pub fn rotated_around_center(&self, rotation: Quat) -> Segment3d { + self.rotated_around(rotation, self.center()) + } + + /// Compute the segment with its center at the origin, keeping the same direction and length. + #[inline(always)] + pub fn centered(&self) -> Segment3d { + let center = self.center(); + self.translated(-center) + } + + /// Compute the segment with a new length, keeping the same direction and center. + #[inline(always)] + pub fn resized(&self, length: f32) -> Segment3d { + let offset_from_origin = self.center(); + let centered = self.translated(-offset_from_origin); + let ratio = length / self.length(); + let segment = Segment3d::new(centered.point1() * ratio, centered.point2() * ratio); + segment.translated(offset_from_origin) + } + + /// Reverses the direction of the line segment by swapping the endpoints. + #[inline(always)] + pub fn reverse(&mut self) { + let [point1, point2] = &mut self.vertices; + core::mem::swap(point1, point2); + } + + /// Returns the line segment with its direction reversed by swapping the endpoints. + #[inline(always)] + #[must_use] + pub fn reversed(mut self) -> Self { + self.reverse(); + self + } +} + +impl From<[Vec3; 2]> for Segment3d { + #[inline(always)] + fn from(vertices: [Vec3; 2]) -> Self { + Self { vertices } + } +} + +impl From<(Vec3, Vec3)> for Segment3d { + #[inline(always)] + fn from((point1, point2): (Vec3, Vec3)) -> Self { + Self::new(point1, point2) } } @@ -412,7 +571,11 @@ impl Segment3d { /// For a version without generics: [`BoxedPolyline3d`] #[derive(Clone, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Serialize, Deserialize) @@ -482,7 +645,7 @@ impl BoxedPolyline3d { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -574,7 +737,7 @@ impl Measured3d for Cuboid { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -652,7 +815,7 @@ impl Measured3d for Cylinder { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -722,7 +885,7 @@ impl Measured3d for Capsule3d { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -804,7 +967,7 @@ impl Measured3d for Cone { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -856,7 +1019,7 @@ pub enum TorusKind { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -967,7 +1130,7 @@ impl Measured3d for Torus { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -1160,7 +1323,7 @@ impl Measured2d for Triangle3d { #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_math/src/primitives/polygon.rs b/crates/bevy_math/src/primitives/polygon.rs index 1167d07981094..20d35b552c827 100644 --- a/crates/bevy_math/src/primitives/polygon.rs +++ b/crates/bevy_math/src/primitives/polygon.rs @@ -1,12 +1,17 @@ #[cfg(feature = "alloc")] -use alloc::{collections::BTreeMap, vec::Vec}; +use { + super::{Measured2d, Triangle2d}, + alloc::{collections::BTreeMap, vec::Vec}, +}; use core::cmp::Ordering; use crate::Vec2; -use super::{Measured2d, Triangle2d}; - +#[cfg_attr( + not(feature = "alloc"), + expect(dead_code, reason = "this type is only used with the alloc feature") +)] #[derive(Debug, Clone, Copy)] enum Endpoint { Left, @@ -20,12 +25,20 @@ enum Endpoint { /// /// This is the order expected by the [`SweepLine`]. #[derive(Debug, Clone, Copy)] +#[cfg_attr( + not(feature = "alloc"), + allow(dead_code, reason = "this type is only used with the alloc feature") +)] struct SweepLineEvent { segment: Segment, /// Type of the vertex (left or right) endpoint: Endpoint, } impl SweepLineEvent { + #[cfg_attr( + not(feature = "alloc"), + allow(dead_code, reason = "this type is only used with the alloc feature") + )] fn position(&self) -> Vec2 { match self.endpoint { Endpoint::Left => self.segment.left, @@ -51,11 +64,12 @@ impl Ord for SweepLineEvent { } /// Orders 2D points according to the order expected by the sweep line and event queue from -X to +X and then -Y to Y. +#[cfg_attr( + not(feature = "alloc"), + allow(dead_code, reason = "this type is only used with the alloc feature") +)] fn xy_order(a: Vec2, b: Vec2) -> Ordering { - match a.x.total_cmp(&b.x) { - Ordering::Equal => a.y.total_cmp(&b.y), - ord => ord, - } + a.x.total_cmp(&b.x).then_with(|| a.y.total_cmp(&b.y)) } /// The event queue holds an ordered list of all events the [`SweepLine`] will encounter when checking the current polygon. @@ -121,6 +135,7 @@ impl PartialEq for Segment { } } impl Eq for Segment {} + impl PartialOrd for Segment { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) @@ -128,14 +143,18 @@ impl PartialOrd for Segment { } impl Ord for Segment { fn cmp(&self, other: &Self) -> Ordering { - match self.left.y.total_cmp(&other.left.y) { - Ordering::Equal => self.right.y.total_cmp(&other.right.y), - ord => ord, - } + self.left + .y + .total_cmp(&other.left.y) + .then_with(|| self.right.y.total_cmp(&other.right.y)) } } /// Holds information about which segment is above and which is below a given [`Segment`] +#[cfg_attr( + not(feature = "alloc"), + expect(dead_code, reason = "this type is only used with the alloc feature") +)] #[derive(Debug, Clone, Copy)] struct SegmentOrder { above: Option, @@ -243,6 +262,13 @@ impl<'a> SweepLine<'a> { /// Test what side of the line through `p1` and `p2` `q` is. /// /// The result will be `0` if the `q` is on the segment, negative for one side and positive for the other. +#[cfg_attr( + not(feature = "alloc"), + expect( + dead_code, + reason = "this function is only used with the alloc feature" + ) +)] #[inline(always)] fn point_side(p1: Vec2, p2: Vec2, q: Vec2) -> f32 { (p2.x - p1.x) * (q.y - p1.y) - (q.x - p1.x) * (p2.y - p1.y) diff --git a/crates/bevy_math/src/ray.rs b/crates/bevy_math/src/ray.rs index 273ed61fa4b97..5fe9c3740a846 100644 --- a/crates/bevy_math/src/ray.rs +++ b/crates/bevy_math/src/ray.rs @@ -12,7 +12,11 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; /// An infinite half-line starting at `origin` and going in `direction` in 2D space. #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Deserialize, Serialize) @@ -54,7 +58,11 @@ impl Ray2d { /// An infinite half-line starting at `origin` and going in `direction` in 3D space. #[derive(Clone, Copy, Debug, PartialEq)] #[cfg_attr(feature = "serialize", derive(serde::Serialize, serde::Deserialize))] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Debug, PartialEq))] +#[cfg_attr( + feature = "bevy_reflect", + derive(Reflect), + reflect(Debug, PartialEq, Clone) +)] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), reflect(Deserialize, Serialize) diff --git a/crates/bevy_math/src/rects/irect.rs b/crates/bevy_math/src/rects/irect.rs index 73e830f085e29..74da994b367e2 100644 --- a/crates/bevy_math/src/rects/irect.rs +++ b/crates/bevy_math/src/rects/irect.rs @@ -19,7 +19,7 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Hash, Default) + reflect(Debug, PartialEq, Hash, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_math/src/rects/rect.rs b/crates/bevy_math/src/rects/rect.rs index 901a569a71f4e..92b7059945949 100644 --- a/crates/bevy_math/src/rects/rect.rs +++ b/crates/bevy_math/src/rects/rect.rs @@ -19,7 +19,7 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_math/src/rects/urect.rs b/crates/bevy_math/src/rects/urect.rs index 54127504654ca..9d19c5ae7c5b3 100644 --- a/crates/bevy_math/src/rects/urect.rs +++ b/crates/bevy_math/src/rects/urect.rs @@ -19,7 +19,7 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Hash, Default) + reflect(Debug, PartialEq, Hash, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), diff --git a/crates/bevy_math/src/rotation2d.rs b/crates/bevy_math/src/rotation2d.rs index 5b0bc816bc5ca..1320f6363a784 100644 --- a/crates/bevy_math/src/rotation2d.rs +++ b/crates/bevy_math/src/rotation2d.rs @@ -30,9 +30,11 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; /// assert_eq!(rotation2.as_radians(), PI / 4.0); /// /// // "Add" rotations together using `*` +/// #[cfg(feature = "approx")] /// assert_relative_eq!(rotation1 * rotation2, Rot2::degrees(135.0)); /// /// // Rotate vectors +/// #[cfg(feature = "approx")] /// assert_relative_eq!(rotation1 * Vec2::X, Vec2::Y); /// ``` #[derive(Clone, Copy, Debug, PartialEq)] @@ -40,7 +42,7 @@ use bevy_reflect::{ReflectDeserialize, ReflectSerialize}; #[cfg_attr( feature = "bevy_reflect", derive(Reflect), - reflect(Debug, PartialEq, Default) + reflect(Debug, PartialEq, Default, Clone) )] #[cfg_attr( all(feature = "serialize", feature = "bevy_reflect"), @@ -116,9 +118,11 @@ impl Rot2 { /// /// let rot1 = Rot2::radians(3.0 * FRAC_PI_2); /// let rot2 = Rot2::radians(-FRAC_PI_2); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(rot1, rot2); /// /// let rot3 = Rot2::radians(PI); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(rot1 * rot1, rot3); /// ``` #[inline] @@ -141,9 +145,11 @@ impl Rot2 { /// /// let rot1 = Rot2::degrees(270.0); /// let rot2 = Rot2::degrees(-90.0); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(rot1, rot2); /// /// let rot3 = Rot2::degrees(180.0); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(rot1 * rot1, rot3); /// ``` #[inline] @@ -165,9 +171,11 @@ impl Rot2 { /// /// let rot1 = Rot2::turn_fraction(0.75); /// let rot2 = Rot2::turn_fraction(-0.25); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(rot1, rot2); /// /// let rot3 = Rot2::turn_fraction(0.5); + /// #[cfg(feature = "approx")] /// assert_relative_eq!(rot1 * rot1, rot3); /// ``` #[inline] @@ -329,16 +337,6 @@ impl Rot2 { self.cos > 0.0 && ops::abs(self.sin) < threshold_angle_sin } - /// Returns the angle in radians needed to make `self` and `other` coincide. - #[inline] - #[deprecated( - since = "0.15.0", - note = "Use `angle_to` instead, the semantics of `angle_between` will change in the future." - )] - pub fn angle_between(self, other: Self) -> f32 { - self.angle_to(other) - } - /// Returns the angle in radians needed to make `self` and `other` coincide. #[inline] pub fn angle_to(self, other: Self) -> f32 { diff --git a/crates/bevy_math/src/sampling/shape_sampling.rs b/crates/bevy_math/src/sampling/shape_sampling.rs index 68d77cd1d7f0b..3be0ead1da98e 100644 --- a/crates/bevy_math/src/sampling/shape_sampling.rs +++ b/crates/bevy_math/src/sampling/shape_sampling.rs @@ -61,7 +61,7 @@ pub trait ShapeSample { /// let square = Rectangle::new(2.0, 2.0); /// /// // Returns a Vec2 with both x and y between -1 and 1. - /// println!("{:?}", square.sample_interior(&mut rand::thread_rng())); + /// println!("{}", square.sample_interior(&mut rand::thread_rng())); /// ``` fn sample_interior(&self, rng: &mut R) -> Self::Output; @@ -76,7 +76,7 @@ pub trait ShapeSample { /// /// // Returns a Vec2 where one of the coordinates is at ±1, /// // and the other is somewhere between -1 and 1. - /// println!("{:?}", square.sample_boundary(&mut rand::thread_rng())); + /// println!("{}", square.sample_boundary(&mut rand::thread_rng())); /// ``` fn sample_boundary(&self, rng: &mut R) -> Self::Output; @@ -92,7 +92,7 @@ pub trait ShapeSample { /// /// // Iterate over points randomly drawn from `square`'s interior: /// for random_val in square.interior_dist().sample_iter(rng).take(5) { - /// println!("{:?}", random_val); + /// println!("{}", random_val); /// } /// ``` fn interior_dist(self) -> impl Distribution @@ -114,7 +114,7 @@ pub trait ShapeSample { /// /// // Iterate over points randomly drawn from `square`'s boundary: /// for random_val in square.boundary_dist().sample_iter(rng).take(5) { - /// println!("{:?}", random_val); + /// println!("{}", random_val); /// } /// ``` fn boundary_dist(self) -> impl Distribution @@ -234,7 +234,7 @@ impl ShapeSample for Rectangle { fn sample_boundary(&self, rng: &mut R) -> Vec2 { let primary_side = rng.gen_range(-1.0..1.0); - let other_side = if rng.gen() { -1.0 } else { 1.0 }; + let other_side = if rng.r#gen() { -1.0 } else { 1.0 }; if self.half_size.x + self.half_size.y > 0.0 { if rng.gen_bool((self.half_size.x / (self.half_size.x + self.half_size.y)) as f64) { @@ -261,7 +261,7 @@ impl ShapeSample for Cuboid { fn sample_boundary(&self, rng: &mut R) -> Vec3 { let primary_side1 = rng.gen_range(-1.0..1.0); let primary_side2 = rng.gen_range(-1.0..1.0); - let other_side = if rng.gen() { -1.0 } else { 1.0 }; + let other_side = if rng.r#gen() { -1.0 } else { 1.0 }; if let Ok(dist) = WeightedIndex::new([ self.half_size.y * self.half_size.z, @@ -425,7 +425,7 @@ impl ShapeSample for Cylinder { if self.radius + 2.0 * self.half_height > 0.0 { if rng.gen_bool((self.radius / (self.radius + 2.0 * self.half_height)) as f64) { let Vec2 { x, y: z } = self.base().sample_interior(rng); - if rng.gen() { + if rng.r#gen() { Vec3::new(x, self.half_height, z) } else { Vec3::new(x, -self.half_height, z) diff --git a/crates/bevy_math/src/sampling/standard.rs b/crates/bevy_math/src/sampling/standard.rs index 6750d5c6d5fac..d4e82fdc81c14 100644 --- a/crates/bevy_math/src/sampling/standard.rs +++ b/crates/bevy_math/src/sampling/standard.rs @@ -12,7 +12,7 @@ //! let random_direction1: Dir3 = random(); //! //! // Random direction using the rng constructed above -//! let random_direction2: Dir3 = rng.gen(); +//! let random_direction2: Dir3 = rng.r#gen(); //! //! // The same as the previous but with different syntax //! let random_direction3 = Dir3::from_rng(&mut rng); @@ -49,7 +49,7 @@ where { /// Construct a value of this type uniformly at random using `rng` as the source of randomness. fn from_rng(rng: &mut R) -> Self { - rng.gen() + rng.r#gen() } } diff --git a/crates/bevy_mesh/Cargo.toml b/crates/bevy_mesh/Cargo.toml index e5871c3e3c6c3..2ccb65cdb47ae 100644 --- a/crates/bevy_mesh/Cargo.toml +++ b/crates/bevy_mesh/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_mesh" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Provides mesh types for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -9,25 +9,29 @@ license = "MIT OR Apache-2.0" keywords = ["bevy"] [dependencies] -bevy_asset = { path = "../bevy_asset", version = "0.15.0-dev" } -bevy_image = { path = "../bevy_image", version = "0.15.0-dev" } -bevy_math = { path = "../bevy_math", version = "0.15.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev", features = [ - "bevy", +# bevy +bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } +bevy_image = { path = "../bevy_image", version = "0.16.0-dev" } +bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } +bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } +bevy_mikktspace = { path = "../bevy_mikktspace", version = "0.16.0-dev" } +bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "std", + "serialize", ] } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" } -bevy_transform = { path = "../bevy_transform", version = "0.15.0-dev" } -bevy_mikktspace = { path = "../bevy_mikktspace", version = "0.15.0-dev" } -bevy_derive = { path = "../bevy_derive", version = "0.15.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } -# misc +# other bitflags = { version = "2.3", features = ["serde"] } bytemuck = { version = "1.5" } -wgpu-types = { version = "23", default-features = false } +wgpu-types = { version = "24", default-features = false } serde = { version = "1", features = ["derive"] } hexasphere = "15.0" thiserror = { version = "2", default-features = false } +tracing = { version = "0.1", default-features = false, features = ["std"] } [lints] workspace = true diff --git a/crates/bevy_mesh/LICENSE-APACHE b/crates/bevy_mesh/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_mesh/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_mesh/LICENSE-MIT b/crates/bevy_mesh/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_mesh/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_mesh/src/index.rs b/crates/bevy_mesh/src/index.rs index d9593543a859a..d2497e2c50a5f 100644 --- a/crates/bevy_mesh/src/index.rs +++ b/crates/bevy_mesh/src/index.rs @@ -70,6 +70,7 @@ pub enum MeshTrianglesError { /// /// It describes the order in which the vertex attributes should be joined into faces. #[derive(Debug, Clone, Reflect)] +#[reflect(Clone)] pub enum Indices { U16(Vec), U32(Vec), diff --git a/crates/bevy_mesh/src/lib.rs b/crates/bevy_mesh/src/lib.rs index 83bc30df3518b..58702d7d8b428 100644 --- a/crates/bevy_mesh/src/lib.rs +++ b/crates/bevy_mesh/src/lib.rs @@ -17,6 +17,7 @@ pub use mesh::*; pub use mikktspace::*; pub use primitives::*; pub use vertex::*; +pub use wgpu_types::VertexFormat; bitflags! { /// Our base mesh pipeline key bits start from the highest bit and go diff --git a/crates/bevy_mesh/src/mesh.rs b/crates/bevy_mesh/src/mesh.rs index f643cc6674240..e4868dbf6997d 100644 --- a/crates/bevy_mesh/src/mesh.rs +++ b/crates/bevy_mesh/src/mesh.rs @@ -6,15 +6,15 @@ use super::{ GenerateTangentsError, Indices, MeshAttributeData, MeshTrianglesError, MeshVertexAttribute, MeshVertexAttributeId, MeshVertexBufferLayout, MeshVertexBufferLayoutRef, MeshVertexBufferLayouts, MeshWindingInvertError, VertexAttributeValues, VertexBufferLayout, - VertexFormatSize, }; use alloc::collections::BTreeMap; use bevy_asset::{Asset, Handle, RenderAssetUsages}; use bevy_image::Image; use bevy_math::{primitives::Triangle3d, *}; use bevy_reflect::Reflect; -use bevy_utils::tracing::warn; use bytemuck::cast_slice; +use thiserror::Error; +use tracing::warn; use wgpu_types::{VertexAttribute, VertexFormat, VertexStepMode}; pub const INDEX_BUFFER_ASSET_INDEX: u64 = 0; @@ -85,34 +85,35 @@ pub const VERTEX_ATTRIBUTE_BUFFER_ID: u64 = 10; /// ## Common points of confusion /// /// - UV maps in Bevy start at the top-left, see [`ATTRIBUTE_UV_0`](Mesh::ATTRIBUTE_UV_0), -/// other APIs can have other conventions, `OpenGL` starts at bottom-left. +/// other APIs can have other conventions, `OpenGL` starts at bottom-left. /// - It is possible and sometimes useful for multiple vertices to have the same -/// [position attribute](Mesh::ATTRIBUTE_POSITION) value, -/// it's a common technique in 3D modeling for complex UV mapping or other calculations. +/// [position attribute](Mesh::ATTRIBUTE_POSITION) value, +/// it's a common technique in 3D modeling for complex UV mapping or other calculations. /// - Bevy performs frustum culling based on the `Aabb` of meshes, which is calculated -/// and added automatically for new meshes only. If a mesh is modified, the entity's `Aabb` -/// needs to be updated manually or deleted so that it is re-calculated. +/// and added automatically for new meshes only. If a mesh is modified, the entity's `Aabb` +/// needs to be updated manually or deleted so that it is re-calculated. /// /// ## Use with `StandardMaterial` /// /// To render correctly with `StandardMaterial`, a mesh needs to have properly defined: /// - [`UVs`](Mesh::ATTRIBUTE_UV_0): Bevy needs to know how to map a texture onto the mesh -/// (also true for `ColorMaterial`). +/// (also true for `ColorMaterial`). /// - [`Normals`](Mesh::ATTRIBUTE_NORMAL): Bevy needs to know how light interacts with your mesh. -/// [0.0, 0.0, 1.0] is very common for simple flat meshes on the XY plane, -/// because simple meshes are smooth and they don't require complex light calculations. +/// [0.0, 0.0, 1.0] is very common for simple flat meshes on the XY plane, +/// because simple meshes are smooth and they don't require complex light calculations. /// - Vertex winding order: by default, `StandardMaterial.cull_mode` is `Some(Face::Back)`, -/// which means that Bevy would *only* render the "front" of each triangle, which -/// is the side of the triangle from where the vertices appear in a *counter-clockwise* order. +/// which means that Bevy would *only* render the "front" of each triangle, which +/// is the side of the triangle from where the vertices appear in a *counter-clockwise* order. #[derive(Asset, Debug, Clone, Reflect)] +#[reflect(Clone)] pub struct Mesh { - #[reflect(ignore)] + #[reflect(ignore, clone)] primitive_topology: PrimitiveTopology, /// `std::collections::BTreeMap` with all defined vertex attributes (Positions, Normals, ...) /// for this mesh. Attribute ids to attribute values. /// Uses a [`BTreeMap`] because, unlike `HashMap`, it has a defined iteration order, /// which allows easy stable `VertexBuffers` (i.e. same buffer order) - #[reflect(ignore)] + #[reflect(ignore, clone)] attributes: BTreeMap, indices: Option, morph_targets: Option>, @@ -280,7 +281,7 @@ impl Mesh { self.attributes.contains_key(&id.into()) } - /// Retrieves the data currently set to the vertex attribute with the specified `name`. + /// Retrieves the data currently set to the vertex attribute with the specified [`MeshVertexAttributeId`]. #[inline] pub fn attribute( &self, @@ -289,6 +290,15 @@ impl Mesh { self.attributes.get(&id.into()).map(|data| &data.values) } + /// Retrieves the full data currently set to the vertex attribute with the specified [`MeshVertexAttributeId`]. + #[inline] + pub(crate) fn attribute_data( + &self, + id: impl Into, + ) -> Option<&MeshAttributeData> { + self.attributes.get(&id.into()) + } + /// Retrieves the data currently set to the vertex attribute with the specified `name` mutably. #[inline] pub fn attribute_mut( @@ -369,7 +379,7 @@ impl Mesh { pub fn get_vertex_size(&self) -> u64 { self.attributes .values() - .map(|data| data.attribute.format.get_size()) + .map(|data| data.attribute.format.size()) .sum() } @@ -404,7 +414,7 @@ impl Mesh { format: data.attribute.format, shader_location: index as u32, }); - accumulated_offset += data.attribute.format.get_size(); + accumulated_offset += data.attribute.format.size(); } let layout = MeshVertexBufferLayout { @@ -472,7 +482,7 @@ impl Mesh { // bundle into interleaved buffers let mut attribute_offset = 0; for attribute_data in self.attributes.values() { - let attribute_size = attribute_data.attribute.format.get_size() as usize; + let attribute_size = attribute_data.attribute.format.size() as usize; let attributes_bytes = attribute_data.values.get_bytes(); for (vertex_index, attribute_bytes) in attributes_bytes .chunks_exact(attribute_size) @@ -491,7 +501,6 @@ impl Mesh { /// /// This can dramatically increase the vertex count, so make sure this is what you want. /// Does nothing if no [Indices] are set. - #[allow(clippy::match_same_arms)] pub fn duplicate_vertices(&mut self) { fn duplicate(values: &[T], indices: impl Iterator) -> Vec { indices.map(|i| values[i]).collect() @@ -503,6 +512,10 @@ impl Mesh { for attributes in self.attributes.values_mut() { let indices = indices.iter(); + #[expect( + clippy::match_same_arms, + reason = "Although the `vec` binding on some match arms may have different types, each variant has different semantics; thus it's not guaranteed that they will use the same type forever." + )] match &mut attributes.values { VertexAttributeValues::Float32(vec) => *vec = duplicate(vec, indices), VertexAttributeValues::Sint32(vec) => *vec = duplicate(vec, indices), @@ -785,12 +798,11 @@ impl Mesh { /// /// `Aabb` of entities with modified mesh are not updated automatically. /// - /// # Panics + /// # Errors /// - /// Panics if the vertex attribute values of `other` are incompatible with `self`. + /// Returns [`Err(MergeMeshError)`](MergeMeshError) if the vertex attribute values of `other` are incompatible with `self`. /// For example, [`VertexAttributeValues::Float32`] is incompatible with [`VertexAttributeValues::Float32x3`]. - #[allow(clippy::match_same_arms)] - pub fn merge(&mut self, other: &Mesh) { + pub fn merge(&mut self, other: &Mesh) -> Result<(), MergeMeshError> { use VertexAttributeValues::*; // The indices of `other` should start after the last vertex of `self`. @@ -798,8 +810,11 @@ impl Mesh { // Extend attributes of `self` with attributes of `other`. for (attribute, values) in self.attributes_mut() { - let enum_variant_name = values.enum_variant_name(); if let Some(other_values) = other.attribute(attribute.id) { + #[expect( + clippy::match_same_arms, + reason = "Although the bindings on some match arms may have different types, each variant has different semantics; thus it's not guaranteed that they will use the same type forever." + )] match (values, other_values) { (Float32(vec1), Float32(vec2)) => vec1.extend(vec2), (Sint32(vec1), Sint32(vec2)) => vec1.extend(vec2), @@ -829,11 +844,14 @@ impl Mesh { (Snorm8x4(vec1), Snorm8x4(vec2)) => vec1.extend(vec2), (Uint8x4(vec1), Uint8x4(vec2)) => vec1.extend(vec2), (Unorm8x4(vec1), Unorm8x4(vec2)) => vec1.extend(vec2), - _ => panic!( - "Incompatible vertex attribute types {} and {}", - enum_variant_name, - other_values.enum_variant_name() - ), + _ => { + return Err(MergeMeshError { + self_attribute: *attribute, + other_attribute: other + .attribute_data(attribute.id) + .map(|data| data.attribute), + }) + } } } } @@ -842,6 +860,7 @@ impl Mesh { if let (Some(indices), Some(other_indices)) = (self.indices_mut(), other.indices()) { indices.extend(other_indices.iter().map(|i| (i + index_offset) as u32)); } + Ok(()) } /// Transforms the vertex positions, normals, and tangents of the mesh by the given [`Transform`]. @@ -863,7 +882,7 @@ impl Mesh { "mesh transform scale cannot be zero on more than one axis" ); - if let Some(VertexAttributeValues::Float32x3(ref mut positions)) = + if let Some(VertexAttributeValues::Float32x3(positions)) = self.attribute_mut(Mesh::ATTRIBUTE_POSITION) { // Apply scale, rotation, and translation to vertex positions @@ -880,7 +899,7 @@ impl Mesh { return; } - if let Some(VertexAttributeValues::Float32x3(ref mut normals)) = + if let Some(VertexAttributeValues::Float32x3(normals)) = self.attribute_mut(Mesh::ATTRIBUTE_NORMAL) { // Transform normals, taking into account non-uniform scaling and rotation @@ -891,13 +910,16 @@ impl Mesh { }); } - if let Some(VertexAttributeValues::Float32x3(ref mut tangents)) = + if let Some(VertexAttributeValues::Float32x4(tangents)) = self.attribute_mut(Mesh::ATTRIBUTE_TANGENT) { // Transform tangents, taking into account non-uniform scaling and rotation tangents.iter_mut().for_each(|tangent| { + let handedness = tangent[3]; let scaled_tangent = Vec3::from_slice(tangent) * transform.scale; - *tangent = (transform.rotation * scaled_tangent.normalize_or_zero()).to_array(); + *tangent = (transform.rotation * scaled_tangent.normalize_or_zero()) + .extend(handedness) + .to_array(); }); } } @@ -918,7 +940,7 @@ impl Mesh { return; } - if let Some(VertexAttributeValues::Float32x3(ref mut positions)) = + if let Some(VertexAttributeValues::Float32x3(positions)) = self.attribute_mut(Mesh::ATTRIBUTE_POSITION) { // Apply translation to vertex positions @@ -940,7 +962,7 @@ impl Mesh { /// /// `Aabb` of entities with modified mesh are not updated automatically. pub fn rotate_by(&mut self, rotation: Quat) { - if let Some(VertexAttributeValues::Float32x3(ref mut positions)) = + if let Some(VertexAttributeValues::Float32x3(positions)) = self.attribute_mut(Mesh::ATTRIBUTE_POSITION) { // Apply rotation to vertex positions @@ -954,7 +976,7 @@ impl Mesh { return; } - if let Some(VertexAttributeValues::Float32x3(ref mut normals)) = + if let Some(VertexAttributeValues::Float32x3(normals)) = self.attribute_mut(Mesh::ATTRIBUTE_NORMAL) { // Transform normals @@ -963,12 +985,15 @@ impl Mesh { }); } - if let Some(VertexAttributeValues::Float32x3(ref mut tangents)) = + if let Some(VertexAttributeValues::Float32x4(tangents)) = self.attribute_mut(Mesh::ATTRIBUTE_TANGENT) { // Transform tangents tangents.iter_mut().for_each(|tangent| { - *tangent = (rotation * Vec3::from_slice(tangent).normalize_or_zero()).to_array(); + let handedness = tangent[3]; + *tangent = (rotation * Vec3::from_slice(tangent).normalize_or_zero()) + .extend(handedness) + .to_array(); }); } } @@ -992,7 +1017,7 @@ impl Mesh { "mesh transform scale cannot be zero on more than one axis" ); - if let Some(VertexAttributeValues::Float32x3(ref mut positions)) = + if let Some(VertexAttributeValues::Float32x3(positions)) = self.attribute_mut(Mesh::ATTRIBUTE_POSITION) { // Apply scale to vertex positions @@ -1006,7 +1031,7 @@ impl Mesh { return; } - if let Some(VertexAttributeValues::Float32x3(ref mut normals)) = + if let Some(VertexAttributeValues::Float32x3(normals)) = self.attribute_mut(Mesh::ATTRIBUTE_NORMAL) { // Transform normals, taking into account non-uniform scaling @@ -1015,13 +1040,17 @@ impl Mesh { }); } - if let Some(VertexAttributeValues::Float32x3(ref mut tangents)) = + if let Some(VertexAttributeValues::Float32x4(tangents)) = self.attribute_mut(Mesh::ATTRIBUTE_TANGENT) { // Transform tangents, taking into account non-uniform scaling tangents.iter_mut().for_each(|tangent| { + let handedness = tangent[3]; let scaled_tangent = Vec3::from_slice(tangent) * scale; - *tangent = scaled_tangent.normalize_or_zero().to_array(); + *tangent = scaled_tangent + .normalize_or_zero() + .extend(handedness) + .to_array(); }); } } @@ -1078,7 +1107,7 @@ impl Mesh { /// Normalize joint weights so they sum to 1. pub fn normalize_joint_weights(&mut self) { if let Some(joints) = self.attribute_mut(Self::ATTRIBUTE_JOINT_WEIGHT) { - let VertexAttributeValues::Float32x4(ref mut joints) = joints else { + let VertexAttributeValues::Float32x4(joints) = joints else { panic!("unexpected joint weight format"); }; @@ -1207,6 +1236,14 @@ impl core::ops::Mul for Transform { } } +/// Error that can occur when calling [`Mesh::merge`]. +#[derive(Error, Debug, Clone)] +#[error("Incompatible vertex attribute types {} and {}", self_attribute.name, other_attribute.map(|a| a.name).unwrap_or("None"))] +pub struct MergeMeshError { + pub self_attribute: MeshVertexAttribute, + pub other_attribute: Option, +} + #[cfg(test)] mod tests { use super::Mesh; diff --git a/crates/bevy_mesh/src/morph.rs b/crates/bevy_mesh/src/morph.rs index 0c2c2c948cdde..a8ff3be037d5a 100644 --- a/crates/bevy_mesh/src/morph.rs +++ b/crates/bevy_mesh/src/morph.rs @@ -5,7 +5,6 @@ use bevy_image::Image; use bevy_math::Vec3; use bevy_reflect::prelude::*; use bytemuck::{Pod, Zeroable}; -use core::iter; use thiserror::Error; use wgpu_types::{Extent3d, TextureDimension, TextureFormat}; @@ -77,7 +76,7 @@ impl MorphTargetImage { buffer.extend_from_slice(bytemuck::bytes_of(&to_add)); } // Pad each layer so that they fit width * height - buffer.extend(iter::repeat(0).take(padding as usize * size_of::())); + buffer.extend(core::iter::repeat_n(0, padding as usize * size_of::())); debug_assert_eq!(buffer.len(), layer_byte_count); buffer }) @@ -112,7 +111,7 @@ impl MorphTargetImage { /// /// [morph targets]: https://en.wikipedia.org/wiki/Morph_target_animation #[derive(Reflect, Default, Debug, Clone, Component)] -#[reflect(Debug, Component, Default)] +#[reflect(Debug, Component, Default, Clone)] pub struct MorphWeights { weights: Vec, /// The first mesh primitive assigned to these weights @@ -157,7 +156,7 @@ impl MorphWeights { /// /// [morph targets]: https://en.wikipedia.org/wiki/Morph_target_animation #[derive(Reflect, Default, Debug, Clone, Component)] -#[reflect(Debug, Component, Default)] +#[reflect(Debug, Component, Default, Clone)] pub struct MeshMorphWeights { weights: Vec, } diff --git a/crates/bevy_mesh/src/primitives/dim2.rs b/crates/bevy_mesh/src/primitives/dim2.rs index 3eda19eed9037..e543f8a1951e3 100644 --- a/crates/bevy_mesh/src/primitives/dim2.rs +++ b/crates/bevy_mesh/src/primitives/dim2.rs @@ -12,10 +12,12 @@ use bevy_math::{ }, FloatExt, Vec2, }; +use bevy_reflect::prelude::*; use wgpu_types::PrimitiveTopology; /// A builder used for creating a [`Mesh`] with a [`Circle`] shape. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct CircleMeshBuilder { /// The [`Circle`] shape. pub circle: Circle, @@ -98,7 +100,8 @@ impl From for Mesh { /// It's expected that more will be added in the future, such as a variant that causes the texture to be /// scaled to fit the bounding box of the shape, which would be good for packed textures only including the /// portion of the circle that is needed to display. -#[derive(Copy, Clone, Debug, PartialEq)] +#[derive(Copy, Clone, Debug, PartialEq, Reflect)] +#[reflect(Default, Debug, Clone)] #[non_exhaustive] pub enum CircularMeshUvMode { /// Treats the shape as a mask over a circle of equal size and radius, @@ -119,7 +122,8 @@ impl Default for CircularMeshUvMode { /// /// The resulting mesh will have a UV-map such that the center of the circle is /// at the center of the texture. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct CircularSectorMeshBuilder { /// The sector shape. pub sector: CircularSector, @@ -256,7 +260,8 @@ impl From for Mesh { /// /// The resulting mesh will have a UV-map such that the center of the circle is /// at the center of the texture. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct CircularSegmentMeshBuilder { /// The segment shape. pub segment: CircularSegment, @@ -399,6 +404,11 @@ impl From for Mesh { } /// A builder used for creating a [`Mesh`] with a [`ConvexPolygon`] shape. +/// +/// You must verify that the `vertices` are not concave when constructing this type. You can +/// guarantee this by creating a [`ConvexPolygon`] first, then calling [`ConvexPolygon::mesh()`]. +#[derive(Clone, Copy, Debug, Reflect)] +#[reflect(Debug, Clone)] pub struct ConvexPolygonMeshBuilder { pub vertices: [Vec2; N], } @@ -448,10 +458,44 @@ impl From> for Mesh { } /// A builder used for creating a [`Mesh`] with a [`RegularPolygon`] shape. +#[derive(Clone, Copy, Debug, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct RegularPolygonMeshBuilder { circumradius: f32, sides: u32, } + +impl Default for RegularPolygonMeshBuilder { + /// Returns the default [`RegularPolygonMeshBuilder`] with six sides (a hexagon) and a circumradius of `0.5`. + fn default() -> Self { + Self { + circumradius: 0.5, + sides: 6, + } + } +} + +impl RegularPolygonMeshBuilder { + /// Creates a new [`RegularPolygonMeshBuilder`] from the radius of a circumcircle and a number + /// of sides. + /// + /// # Panics + /// + /// Panics in debug mode if `circumradius` is negative, or if `sides` is less than 3. + pub const fn new(circumradius: f32, sides: u32) -> Self { + debug_assert!( + circumradius.is_sign_positive(), + "polygon has a negative radius" + ); + debug_assert!(sides > 2, "polygon has less than 3 sides"); + + Self { + circumradius, + sides, + } + } +} + impl Meshable for RegularPolygon { type Output = RegularPolygonMeshBuilder; @@ -488,7 +532,8 @@ impl From for Mesh { } /// A builder used for creating a [`Mesh`] with an [`Ellipse`] shape. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct EllipseMeshBuilder { /// The [`Ellipse`] shape. pub ellipse: Ellipse, @@ -592,6 +637,8 @@ impl From for Mesh { } /// A builder for creating a [`Mesh`] with an [`Annulus`] shape. +#[derive(Clone, Copy, Debug, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct AnnulusMeshBuilder { /// The [`Annulus`] shape. pub annulus: Annulus, @@ -722,10 +769,44 @@ impl From for Mesh { } } +/// A builder for creating a [`Mesh`] with an [`Rhombus`] shape. +#[derive(Clone, Copy, Debug, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct RhombusMeshBuilder { half_diagonals: Vec2, } +impl Default for RhombusMeshBuilder { + /// Returns the default [`RhombusMeshBuilder`] with a half-horizontal and half-vertical diagonal of `0.5`. + fn default() -> Self { + Self { + half_diagonals: Vec2::splat(0.5), + } + } +} + +impl RhombusMeshBuilder { + /// Creates a new [`RhombusMeshBuilder`] from a horizontal and vertical diagonal size. + /// + /// # Panics + /// + /// Panics in debug mode if `horizontal_diagonal` or `vertical_diagonal` is negative. + pub const fn new(horizontal_diagonal: f32, vertical_diagonal: f32) -> Self { + debug_assert!( + horizontal_diagonal >= 0.0, + "rhombus has a negative horizontal size", + ); + debug_assert!( + vertical_diagonal >= 0.0, + "rhombus has a negative vertical size" + ); + + Self { + half_diagonals: Vec2::new(horizontal_diagonal / 2.0, vertical_diagonal / 2.0), + } + } +} + impl MeshBuilder for RhombusMeshBuilder { fn build(&self) -> Mesh { let [hhd, vhd] = [self.half_diagonals.x, self.half_diagonals.y]; @@ -775,9 +856,21 @@ impl From for Mesh { } /// A builder used for creating a [`Mesh`] with a [`Triangle2d`] shape. +#[derive(Clone, Copy, Debug, Default, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct Triangle2dMeshBuilder { triangle: Triangle2d, } + +impl Triangle2dMeshBuilder { + /// Creates a new [`Triangle2dMeshBuilder`] from the points `a`, `b`, and `c`. + pub const fn new(a: Vec2, b: Vec2, c: Vec2) -> Self { + Self { + triangle: Triangle2d::new(a, b, c), + } + } +} + impl Meshable for Triangle2d { type Output = Triangle2dMeshBuilder; @@ -785,6 +878,7 @@ impl Meshable for Triangle2d { Self::Output { triangle: *self } } } + impl MeshBuilder for Triangle2dMeshBuilder { fn build(&self) -> Mesh { let vertices_3d = self.triangle.vertices.map(|v| v.extend(0.)); @@ -839,10 +933,37 @@ impl From for Mesh { } /// A builder used for creating a [`Mesh`] with a [`Rectangle`] shape. +#[derive(Clone, Copy, Debug, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct RectangleMeshBuilder { half_size: Vec2, } +impl Default for RectangleMeshBuilder { + /// Returns the default [`RectangleMeshBuilder`] with a half-width and half-height of `0.5`. + fn default() -> Self { + Self { + half_size: Vec2::splat(0.5), + } + } +} + +impl RectangleMeshBuilder { + /// Creates a new [`RectangleMeshBuilder`] from a full width and height. + /// + /// # Panics + /// + /// Panics in debug mode if `width` or `height` is negative. + pub const fn new(width: f32, height: f32) -> Self { + debug_assert!(width >= 0.0, "rectangle has a negative width"); + debug_assert!(height >= 0.0, "rectangle has a negative height"); + + Self { + half_size: Vec2::new(width / 2.0, height / 2.0), + } + } +} + impl MeshBuilder for RectangleMeshBuilder { fn build(&self) -> Mesh { let [hw, hh] = [self.half_size.x, self.half_size.y]; @@ -892,7 +1013,8 @@ impl From for Mesh { } /// A builder used for creating a [`Mesh`] with a [`Capsule2d`] shape. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct Capsule2dMeshBuilder { /// The [`Capsule2d`] shape. pub capsule: Capsule2d, @@ -1054,7 +1176,7 @@ impl From for Mesh { #[cfg(test)] mod tests { use bevy_math::{prelude::Annulus, primitives::RegularPolygon, FloatOrd}; - use bevy_utils::HashSet; + use bevy_platform::collections::HashSet; use crate::{Mesh, MeshBuilder, Meshable, VertexAttributeValues}; diff --git a/crates/bevy_mesh/src/primitives/dim3/capsule.rs b/crates/bevy_mesh/src/primitives/dim3/capsule.rs index 5fb4b7bb699fb..f46ebce0d15a7 100644 --- a/crates/bevy_mesh/src/primitives/dim3/capsule.rs +++ b/crates/bevy_mesh/src/primitives/dim3/capsule.rs @@ -1,9 +1,11 @@ use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology}; use bevy_asset::RenderAssetUsages; use bevy_math::{ops, primitives::Capsule3d, Vec2, Vec3}; +use bevy_reflect::prelude::*; /// Manner in which UV coordinates are distributed vertically. -#[derive(Clone, Copy, Debug, Default)] +#[derive(Clone, Copy, Debug, Default, Reflect)] +#[reflect(Default, Debug, Clone)] pub enum CapsuleUvProfile { /// UV space is distributed by how much of the capsule consists of the hemispheres. #[default] @@ -16,7 +18,8 @@ pub enum CapsuleUvProfile { } /// A builder used for creating a [`Mesh`] with a [`Capsule3d`] shape. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct Capsule3dMeshBuilder { /// The [`Capsule3d`] shape. pub capsule: Capsule3d, diff --git a/crates/bevy_mesh/src/primitives/dim3/cone.rs b/crates/bevy_mesh/src/primitives/dim3/cone.rs index aca8b0dd3baa3..d06a57f832ea5 100644 --- a/crates/bevy_mesh/src/primitives/dim3/cone.rs +++ b/crates/bevy_mesh/src/primitives/dim3/cone.rs @@ -1,9 +1,11 @@ use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology}; use bevy_asset::RenderAssetUsages; use bevy_math::{ops, primitives::Cone, Vec3}; +use bevy_reflect::prelude::*; /// Anchoring options for [`ConeMeshBuilder`] -#[derive(Debug, Copy, Clone, Default)] +#[derive(Debug, Copy, Clone, Default, Reflect)] +#[reflect(Default, Debug, Clone)] pub enum ConeAnchor { #[default] /// Midpoint between the tip of the cone and the center of its base. @@ -15,7 +17,8 @@ pub enum ConeAnchor { } /// A builder used for creating a [`Mesh`] with a [`Cone`] shape. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct ConeMeshBuilder { /// The [`Cone`] shape. pub cone: Cone, diff --git a/crates/bevy_mesh/src/primitives/dim3/conical_frustum.rs b/crates/bevy_mesh/src/primitives/dim3/conical_frustum.rs index d725ec2585d6e..8c69378c01e64 100644 --- a/crates/bevy_mesh/src/primitives/dim3/conical_frustum.rs +++ b/crates/bevy_mesh/src/primitives/dim3/conical_frustum.rs @@ -1,9 +1,11 @@ use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology}; use bevy_asset::RenderAssetUsages; use bevy_math::{ops, primitives::ConicalFrustum, Vec3}; +use bevy_reflect::prelude::*; /// A builder used for creating a [`Mesh`] with a [`ConicalFrustum`] shape. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct ConicalFrustumMeshBuilder { /// The [`ConicalFrustum`] shape. pub frustum: ConicalFrustum, diff --git a/crates/bevy_mesh/src/primitives/dim3/cuboid.rs b/crates/bevy_mesh/src/primitives/dim3/cuboid.rs index 84c7278776d54..40a7cd45d4433 100644 --- a/crates/bevy_mesh/src/primitives/dim3/cuboid.rs +++ b/crates/bevy_mesh/src/primitives/dim3/cuboid.rs @@ -1,12 +1,24 @@ use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology}; use bevy_asset::RenderAssetUsages; use bevy_math::{primitives::Cuboid, Vec3}; +use bevy_reflect::prelude::*; /// A builder used for creating a [`Mesh`] with a [`Cuboid`] shape. +#[derive(Clone, Copy, Debug, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct CuboidMeshBuilder { half_size: Vec3, } +impl Default for CuboidMeshBuilder { + /// Returns the default [`CuboidMeshBuilder`] with a width, height, and depth of `1.0`. + fn default() -> Self { + Self { + half_size: Vec3::splat(0.5), + } + } +} + impl MeshBuilder for CuboidMeshBuilder { fn build(&self) -> Mesh { let min = -self.half_size; diff --git a/crates/bevy_mesh/src/primitives/dim3/cylinder.rs b/crates/bevy_mesh/src/primitives/dim3/cylinder.rs index e5971dc2f4927..7b1b45974ea62 100644 --- a/crates/bevy_mesh/src/primitives/dim3/cylinder.rs +++ b/crates/bevy_mesh/src/primitives/dim3/cylinder.rs @@ -1,9 +1,11 @@ use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology}; use bevy_asset::RenderAssetUsages; use bevy_math::{ops, primitives::Cylinder}; +use bevy_reflect::prelude::*; /// Anchoring options for [`CylinderMeshBuilder`] -#[derive(Debug, Copy, Clone, Default)] +#[derive(Debug, Copy, Clone, Default, Reflect)] +#[reflect(Default, Debug, Clone)] pub enum CylinderAnchor { #[default] /// Midpoint between the top and bottom caps of the cylinder @@ -15,7 +17,8 @@ pub enum CylinderAnchor { } /// A builder used for creating a [`Mesh`] with a [`Cylinder`] shape. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct CylinderMeshBuilder { /// The [`Cylinder`] shape. pub cylinder: Cylinder, diff --git a/crates/bevy_mesh/src/primitives/dim3/mod.rs b/crates/bevy_mesh/src/primitives/dim3/mod.rs index 65b4eac7f6be3..2f8d724e673ab 100644 --- a/crates/bevy_mesh/src/primitives/dim3/mod.rs +++ b/crates/bevy_mesh/src/primitives/dim3/mod.rs @@ -12,7 +12,10 @@ pub(crate) mod triangle3d; pub use capsule::*; pub use cone::*; pub use conical_frustum::*; +pub use cuboid::*; pub use cylinder::*; pub use plane::*; pub use sphere::*; +pub use tetrahedron::*; pub use torus::*; +pub use triangle3d::*; diff --git a/crates/bevy_mesh/src/primitives/dim3/plane.rs b/crates/bevy_mesh/src/primitives/dim3/plane.rs index eb9f35288cae2..fd892469be6af 100644 --- a/crates/bevy_mesh/src/primitives/dim3/plane.rs +++ b/crates/bevy_mesh/src/primitives/dim3/plane.rs @@ -1,9 +1,11 @@ use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology}; use bevy_asset::RenderAssetUsages; use bevy_math::{primitives::Plane3d, Dir3, Quat, Vec2, Vec3}; +use bevy_reflect::prelude::*; /// A builder used for creating a [`Mesh`] with a [`Plane3d`] shape. -#[derive(Clone, Copy, Debug, Default)] +#[derive(Clone, Copy, Debug, Default, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct PlaneMeshBuilder { /// The [`Plane3d`] shape. pub plane: Plane3d, diff --git a/crates/bevy_mesh/src/primitives/dim3/sphere.rs b/crates/bevy_mesh/src/primitives/dim3/sphere.rs index f834f02eabc49..6ae8eec5ed8bf 100644 --- a/crates/bevy_mesh/src/primitives/dim3/sphere.rs +++ b/crates/bevy_mesh/src/primitives/dim3/sphere.rs @@ -1,6 +1,7 @@ use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology}; use bevy_asset::RenderAssetUsages; use bevy_math::{ops, primitives::Sphere}; +use bevy_reflect::prelude::*; use core::f32::consts::PI; use hexasphere::shapes::IcoSphere; use thiserror::Error; @@ -19,7 +20,8 @@ pub enum IcosphereError { } /// A type of sphere mesh. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Reflect)] +#[reflect(Default, Debug, Clone)] pub enum SphereKind { /// An icosphere, a spherical mesh that consists of similar sized triangles. Ico { @@ -46,7 +48,8 @@ impl Default for SphereKind { } /// A builder used for creating a [`Mesh`] with an [`Sphere`] shape. -#[derive(Clone, Copy, Debug, Default)] +#[derive(Clone, Copy, Debug, Default, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct SphereMeshBuilder { /// The [`Sphere`] shape. pub sphere: Sphere, diff --git a/crates/bevy_mesh/src/primitives/dim3/tetrahedron.rs b/crates/bevy_mesh/src/primitives/dim3/tetrahedron.rs index 9aeb2945ed056..529805d9a603f 100644 --- a/crates/bevy_mesh/src/primitives/dim3/tetrahedron.rs +++ b/crates/bevy_mesh/src/primitives/dim3/tetrahedron.rs @@ -2,8 +2,11 @@ use super::triangle3d; use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology}; use bevy_asset::RenderAssetUsages; use bevy_math::primitives::{Tetrahedron, Triangle3d}; +use bevy_reflect::prelude::*; /// A builder used for creating a [`Mesh`] with a [`Tetrahedron`] shape. +#[derive(Clone, Copy, Debug, Default, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct TetrahedronMeshBuilder { tetrahedron: Tetrahedron, } diff --git a/crates/bevy_mesh/src/primitives/dim3/torus.rs b/crates/bevy_mesh/src/primitives/dim3/torus.rs index a774b84d7095a..6f370c13418ca 100644 --- a/crates/bevy_mesh/src/primitives/dim3/torus.rs +++ b/crates/bevy_mesh/src/primitives/dim3/torus.rs @@ -1,10 +1,12 @@ use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology}; use bevy_asset::RenderAssetUsages; use bevy_math::{ops, primitives::Torus, Vec3}; +use bevy_reflect::prelude::*; use core::ops::RangeInclusive; /// A builder used for creating a [`Mesh`] with a [`Torus`] shape. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct TorusMeshBuilder { /// The [`Torus`] shape. pub torus: Torus, diff --git a/crates/bevy_mesh/src/primitives/dim3/triangle3d.rs b/crates/bevy_mesh/src/primitives/dim3/triangle3d.rs index 3bcaea273e7c8..e35f272ab9bb1 100644 --- a/crates/bevy_mesh/src/primitives/dim3/triangle3d.rs +++ b/crates/bevy_mesh/src/primitives/dim3/triangle3d.rs @@ -1,8 +1,11 @@ use crate::{Indices, Mesh, MeshBuilder, Meshable, PrimitiveTopology}; use bevy_asset::RenderAssetUsages; use bevy_math::{primitives::Triangle3d, Vec3}; +use bevy_reflect::prelude::*; /// A builder used for creating a [`Mesh`] with a [`Triangle3d`] shape. +#[derive(Clone, Copy, Debug, Default, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct Triangle3dMeshBuilder { triangle: Triangle3d, } diff --git a/crates/bevy_mesh/src/primitives/extrusion.rs b/crates/bevy_mesh/src/primitives/extrusion.rs index 62c9310e6072a..2a9e5d7551caa 100644 --- a/crates/bevy_mesh/src/primitives/extrusion.rs +++ b/crates/bevy_mesh/src/primitives/extrusion.rs @@ -216,7 +216,7 @@ where // An extrusion of depth 0 does not need a mantel if self.half_depth == 0. { - front_face.merge(&back_face); + front_face.merge(&back_face).unwrap(); return front_face; } @@ -408,8 +408,8 @@ where .with_inserted_attribute(Mesh::ATTRIBUTE_UV_0, uvs) }; - front_face.merge(&back_face); - front_face.merge(&mantel); + front_face.merge(&back_face).unwrap(); + front_face.merge(&mantel).unwrap(); front_face } } diff --git a/crates/bevy_mesh/src/skinning.rs b/crates/bevy_mesh/src/skinning.rs index faf4e8be3ea97..53b93f9ff2507 100644 --- a/crates/bevy_mesh/src/skinning.rs +++ b/crates/bevy_mesh/src/skinning.rs @@ -1,29 +1,27 @@ -use bevy_asset::{Asset, Handle}; -use bevy_ecs::{ - component::Component, - entity::{Entity, VisitEntities, VisitEntitiesMut}, - prelude::ReflectComponent, - reflect::{ReflectMapEntities, ReflectVisitEntities, ReflectVisitEntitiesMut}, -}; +use bevy_asset::{AsAssetId, Asset, AssetId, Handle}; +use bevy_ecs::{component::Component, entity::Entity, prelude::ReflectComponent}; use bevy_math::Mat4; use bevy_reflect::prelude::*; use core::ops::Deref; -#[derive(Component, Debug, Default, Clone, Reflect, VisitEntities, VisitEntitiesMut)] -#[reflect( - Component, - MapEntities, - VisitEntities, - VisitEntitiesMut, - Default, - Debug -)] +#[derive(Component, Debug, Default, Clone, Reflect)] +#[reflect(Component, Default, Debug, Clone)] pub struct SkinnedMesh { - #[visit_entities(ignore)] pub inverse_bindposes: Handle, + #[entities] pub joints: Vec, } +impl AsAssetId for SkinnedMesh { + type Asset = SkinnedMeshInverseBindposes; + + // We implement this so that `AssetChanged` will work to pick up any changes + // to `SkinnedMeshInverseBindposes`. + fn as_asset_id(&self) -> AssetId { + self.inverse_bindposes.id() + } +} + #[derive(Asset, TypePath, Debug)] pub struct SkinnedMeshInverseBindposes(Box<[Mat4]>); diff --git a/crates/bevy_mesh/src/vertex.rs b/crates/bevy_mesh/src/vertex.rs index 4c6a36fc7698b..949e355b4c52a 100644 --- a/crates/bevy_mesh/src/vertex.rs +++ b/crates/bevy_mesh/src/vertex.rs @@ -1,8 +1,8 @@ use alloc::sync::Arc; use bevy_derive::EnumVariantMeta; -use bevy_ecs::system::Resource; +use bevy_ecs::resource::Resource; use bevy_math::Vec3; -use bevy_utils::HashSet; +use bevy_platform::collections::HashSet; use bytemuck::cast_slice; use core::hash::{Hash, Hasher}; use thiserror::Error; @@ -165,53 +165,6 @@ pub fn face_normal(a: [f32; 3], b: [f32; 3], c: [f32; 3]) -> [f32; 3] { (b - a).cross(c - a).normalize().into() } -pub trait VertexFormatSize { - fn get_size(self) -> u64; -} - -impl VertexFormatSize for VertexFormat { - #[allow(clippy::match_same_arms)] - fn get_size(self) -> u64 { - match self { - VertexFormat::Uint8x2 => 2, - VertexFormat::Uint8x4 => 4, - VertexFormat::Sint8x2 => 2, - VertexFormat::Sint8x4 => 4, - VertexFormat::Unorm8x2 => 2, - VertexFormat::Unorm8x4 => 4, - VertexFormat::Snorm8x2 => 2, - VertexFormat::Snorm8x4 => 4, - VertexFormat::Unorm10_10_10_2 => 4, - VertexFormat::Uint16x2 => 2 * 2, - VertexFormat::Uint16x4 => 2 * 4, - VertexFormat::Sint16x2 => 2 * 2, - VertexFormat::Sint16x4 => 2 * 4, - VertexFormat::Unorm16x2 => 2 * 2, - VertexFormat::Unorm16x4 => 2 * 4, - VertexFormat::Snorm16x2 => 2 * 2, - VertexFormat::Snorm16x4 => 2 * 4, - VertexFormat::Float16x2 => 2 * 2, - VertexFormat::Float16x4 => 2 * 4, - VertexFormat::Float32 => 4, - VertexFormat::Float32x2 => 4 * 2, - VertexFormat::Float32x3 => 4 * 3, - VertexFormat::Float32x4 => 4 * 4, - VertexFormat::Uint32 => 4, - VertexFormat::Uint32x2 => 4 * 2, - VertexFormat::Uint32x3 => 4 * 3, - VertexFormat::Uint32x4 => 4 * 4, - VertexFormat::Sint32 => 4, - VertexFormat::Sint32x2 => 4 * 2, - VertexFormat::Sint32x3 => 4 * 3, - VertexFormat::Sint32x4 => 4 * 4, - VertexFormat::Float64 => 8, - VertexFormat::Float64x2 => 8 * 2, - VertexFormat::Float64x3 => 8 * 3, - VertexFormat::Float64x4 => 8 * 4, - } - } -} - /// Contains an array where each entry describes a property of a single vertex. /// Matches the [`VertexFormats`](VertexFormat). #[derive(Clone, Debug, EnumVariantMeta)] @@ -249,7 +202,10 @@ pub enum VertexAttributeValues { impl VertexAttributeValues { /// Returns the number of vertices in this [`VertexAttributeValues`]. For a single /// mesh, all of the [`VertexAttributeValues`] must have the same length. - #[allow(clippy::match_same_arms)] + #[expect( + clippy::match_same_arms, + reason = "Although the `values` binding on some match arms may have matching types, each variant has different semantics; thus it's not guaranteed that they will use the same type forever." + )] pub fn len(&self) -> usize { match self { VertexAttributeValues::Float32(values) => values.len(), @@ -299,7 +255,10 @@ impl VertexAttributeValues { // TODO: add vertex format as parameter here and perform type conversions /// Flattens the [`VertexAttributeValues`] into a sequence of bytes. This is /// useful for serialization and sending to the GPU. - #[allow(clippy::match_same_arms)] + #[expect( + clippy::match_same_arms, + reason = "Although the `values` binding on some match arms may have matching types, each variant has different semantics; thus it's not guaranteed that they will use the same type forever." + )] pub fn get_bytes(&self) -> &[u8] { match self { VertexAttributeValues::Float32(values) => cast_slice(values), diff --git a/crates/bevy_mikktspace/Cargo.toml b/crates/bevy_mikktspace/Cargo.toml index 6145bcf9dde87..fbca931fe2a42 100644 --- a/crates/bevy_mikktspace/Cargo.toml +++ b/crates/bevy_mikktspace/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_mikktspace" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" authors = [ "Benjamin Wasty ", "David Harvey-Macaulay ", @@ -13,7 +13,7 @@ homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" license = "Zlib AND (MIT OR Apache-2.0)" keywords = ["bevy", "3D", "graphics", "algorithm", "tangent"] -rust-version = "1.76.0" +rust-version = "1.85.0" [features] default = ["std"] @@ -22,7 +22,7 @@ std = ["glam/std"] libm = ["glam/libm", "dep:libm"] [dependencies] -glam = { version = "0.29.0", default-features = false } +glam = { version = "0.29.3", default-features = false } libm = { version = "0.2", default-features = false, optional = true } [[example]] diff --git a/crates/bevy_mikktspace/README.md b/crates/bevy_mikktspace/README.md index a9049ee1ba175..6ad4a72a4f7d5 100644 --- a/crates/bevy_mikktspace/README.md +++ b/crates/bevy_mikktspace/README.md @@ -6,7 +6,7 @@ [![Docs](https://docs.rs/bevy_mikktspace/badge.svg)](https://docs.rs/bevy_mikktspace/latest/bevy_mikktspace/) [![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) -This is a fork of [https://github.com/gltf-rs/mikktspace](https://github.com/gltf-rs/mikktspace), which in turn is a port of the Mikkelsen Tangent Space Algorithm reference implementation to Rust. It has been forked for use in the bevy game engine to be able to update maths crate dependencies in lock-step with bevy releases. It is vendored in the bevy repository itself as [crates/bevy_mikktspace](https://github.com/bevyengine/bevy/tree/main/crates/bevy_mikktspace). +This is a fork of [https://github.com/gltf-rs/mikktspace](https://github.com/gltf-rs/mikktspace), which in turn is a port of the Mikkelsen Tangent Space Algorithm reference implementation to Rust. It has been forked for use in the bevy game engine to be able to update math crate dependencies in lock-step with bevy releases. It is vendored in the bevy repository itself as [crates/bevy_mikktspace](https://github.com/bevyengine/bevy/tree/main/crates/bevy_mikktspace). Port of the [Mikkelsen Tangent Space Algorithm](https://archive.blender.org/wiki/2015/index.php/Dev:Shading/Tangent_Space_Normal_Maps/) reference implementation. diff --git a/crates/bevy_mikktspace/examples/generate.rs b/crates/bevy_mikktspace/examples/generate.rs index 6ca3fa36df06e..62f6f10bfa04b 100644 --- a/crates/bevy_mikktspace/examples/generate.rs +++ b/crates/bevy_mikktspace/examples/generate.rs @@ -1,6 +1,11 @@ //! This example demonstrates how to generate a mesh. -#![allow(clippy::bool_assert_comparison, clippy::useless_conversion)] +#![allow( + clippy::bool_assert_comparison, + clippy::useless_conversion, + reason = "Crate auto-generated with many non-idiomatic decisions. See #7372 for details." +)] +#![expect(clippy::print_stdout, reason = "Allowed in examples.")] use glam::{Vec2, Vec3}; diff --git a/crates/bevy_mikktspace/src/generated.rs b/crates/bevy_mikktspace/src/generated.rs index a35e1d205f16a..a726eb5bc89d6 100644 --- a/crates/bevy_mikktspace/src/generated.rs +++ b/crates/bevy_mikktspace/src/generated.rs @@ -95,7 +95,7 @@ impl STSpace { // of the vertex shader, as explained earlier, then be sure to do this in the normal map sampler also. // Finally, beware of quad triangulations. If the normal map sampler doesn't use the same triangulation of // quads as your renderer then problems will occur since the interpolated tangent spaces will differ -// eventhough the vertex level tangent spaces match. This can be solved either by triangulating before +// even though the vertex level tangent spaces match. This can be solved either by triangulating before // sampling/exporting or by using the order-independent choice of diagonal for splitting quads suggested earlier. // However, this must be used both by the sampler and your tools/rendering pipeline. // internal structure @@ -136,7 +136,7 @@ pub struct SGroup { pub iNrFaces: i32, pub pFaceIndices: *mut i32, pub iVertexRepresentative: i32, - pub bOrientPreservering: bool, + pub bOrientPreserving: bool, } impl SGroup { @@ -145,7 +145,7 @@ impl SGroup { iNrFaces: 0, pFaceIndices: null_mut(), iVertexRepresentative: 0, - bOrientPreservering: false, + bOrientPreserving: false, } } } @@ -576,11 +576,11 @@ unsafe fn GenerateTSpaces( if (*pTS_out).iCounter == 1i32 { *pTS_out = AvgTSpace(pTS_out, &mut pSubGroupTspace[l]); (*pTS_out).iCounter = 2i32; - (*pTS_out).bOrient = (*pGroup).bOrientPreservering + (*pTS_out).bOrient = (*pGroup).bOrientPreserving } else { *pTS_out = pSubGroupTspace[l]; (*pTS_out).iCounter = 1i32; - (*pTS_out).bOrient = (*pGroup).bOrientPreservering + (*pTS_out).bOrient = (*pGroup).bOrientPreserving } i += 1 } @@ -838,7 +838,7 @@ unsafe fn Build4RuleGroups( *fresh2 = ptr::from_mut(&mut *pGroups.offset(iNrActiveGroups as isize)); (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]) .iVertexRepresentative = vert_index; - (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).bOrientPreservering = + (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).bOrientPreserving = (*pTriInfos.offset(f as isize)).iFlag & 8i32 != 0i32; (*(*pTriInfos.offset(f as isize)).AssignedGroup[i as usize]).iNrFaces = 0i32; let ref mut fresh3 = @@ -927,7 +927,7 @@ unsafe fn AssignRecur( && (*pMyTriInfo).AssignedGroup[2usize].is_null() { (*pMyTriInfo).iFlag &= !8i32; - (*pMyTriInfo).iFlag |= if (*pGroup).bOrientPreservering { + (*pMyTriInfo).iFlag |= if (*pGroup).bOrientPreserving { 8i32 } else { 0i32 @@ -939,7 +939,7 @@ unsafe fn AssignRecur( } else { false }; - if bOrient != (*pGroup).bOrientPreservering { + if bOrient != (*pGroup).bOrientPreserving { return false; } AddTriToGroup(pGroup, iMyTriIndex); diff --git a/crates/bevy_mikktspace/src/lib.rs b/crates/bevy_mikktspace/src/lib.rs index fe514e208eb47..ee5f149a8ca19 100644 --- a/crates/bevy_mikktspace/src/lib.rs +++ b/crates/bevy_mikktspace/src/lib.rs @@ -1,3 +1,8 @@ +#![allow( + clippy::allow_attributes, + clippy::allow_attributes_without_reason, + reason = "Much of the code here is still code that's been transpiled from C; we want to save 'fixing' this crate until after it's ported to safe rust." +)] #![allow( unsafe_op_in_unsafe_fn, clippy::all, @@ -11,7 +16,10 @@ html_logo_url = "https://bevyengine.org/assets/icon.png", html_favicon_url = "https://bevyengine.org/assets/icon.png" )] -#![cfg_attr(not(feature = "std"), no_std)] +#![no_std] + +#[cfg(feature = "std")] +extern crate std; extern crate alloc; diff --git a/crates/bevy_mikktspace/tests/regression_test.rs b/crates/bevy_mikktspace/tests/regression_test.rs index a0632b76e29b5..bd6718ad39f20 100644 --- a/crates/bevy_mikktspace/tests/regression_test.rs +++ b/crates/bevy_mikktspace/tests/regression_test.rs @@ -2,7 +2,8 @@ #![expect( clippy::bool_assert_comparison, clippy::semicolon_if_nothing_returned, - clippy::useless_conversion + clippy::useless_conversion, + reason = "Crate auto-generated with many non-idiomatic decisions. See #7372 for details." )] use bevy_mikktspace::{generate_tangents, Geometry}; diff --git a/crates/bevy_pbr/Cargo.toml b/crates/bevy_pbr/Cargo.toml index cd42bfebb8d05..82642812b4644 100644 --- a/crates/bevy_pbr/Cargo.toml +++ b/crates/bevy_pbr/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_pbr" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Adds PBR rendering to Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -15,9 +15,9 @@ pbr_transmission_textures = [] pbr_multi_layer_material_textures = [] pbr_anisotropy_texture = [] experimental_pbr_pcss = [] +pbr_specular_textures = [] shader_format_glsl = ["bevy_render/shader_format_glsl"] trace = ["bevy_render/trace"] -ios_simulator = ["bevy_render/ios_simulator"] # Enables the meshlet renderer for dense high-poly scenes (experimental) meshlet = ["dep:lz4_flex", "dep:range-alloc", "dep:half", "dep:bevy_tasks"] # Enables processing meshes into meshlet meshes @@ -31,23 +31,24 @@ meshlet_processor = [ [dependencies] # bevy -bevy_app = { path = "../bevy_app", version = "0.15.0-dev" } -bevy_asset = { path = "../bevy_asset", version = "0.15.0-dev" } -bevy_color = { path = "../bevy_color", version = "0.15.0-dev" } -bevy_core_pipeline = { path = "../bevy_core_pipeline", version = "0.15.0-dev" } -bevy_derive = { path = "../bevy_derive", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" } -bevy_image = { path = "../bevy_image", version = "0.15.0-dev" } -bevy_math = { path = "../bevy_math", version = "0.15.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev", features = [ - "bevy", +bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } +bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } +bevy_color = { path = "../bevy_color", version = "0.16.0-dev" } +bevy_core_pipeline = { path = "../bevy_core_pipeline", version = "0.16.0-dev" } +bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } +bevy_diagnostic = { path = "../bevy_diagnostic", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } +bevy_image = { path = "../bevy_image", version = "0.16.0-dev" } +bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } +bevy_render = { path = "../bevy_render", version = "0.16.0-dev" } +bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev", optional = true } +bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } +bevy_window = { path = "../bevy_window", version = "0.16.0-dev" } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "std", ] } -bevy_render = { path = "../bevy_render", version = "0.15.0-dev" } -bevy_tasks = { path = "../bevy_tasks", version = "0.15.0-dev", optional = true } -bevy_transform = { path = "../bevy_transform", version = "0.15.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } -bevy_window = { path = "../bevy_window", version = "0.15.0-dev" } - # other bitflags = "2.3" @@ -60,9 +61,9 @@ lz4_flex = { version = "0.11", default-features = false, features = [ ], optional = true } range-alloc = { version = "0.1.3", optional = true } half = { version = "2", features = ["bytemuck"], optional = true } -meshopt = { version = "0.4", optional = true } +meshopt = { version = "0.4.1", optional = true } metis = { version = "0.2", optional = true } -itertools = { version = "0.13", optional = true } +itertools = { version = "0.14", optional = true } bitvec = { version = "1", optional = true } # direct dependency required for derive macro bytemuck = { version = "1", features = ["derive", "must_cast"] } @@ -70,6 +71,8 @@ radsort = "0.1" smallvec = "1.6" nonmax = "0.5" static_assertions = "1" +tracing = { version = "0.1", default-features = false, features = ["std"] } +offset-allocator = "0.2" [lints] workspace = true diff --git a/crates/bevy_pbr/LICENSE-APACHE b/crates/bevy_pbr/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_pbr/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_pbr/LICENSE-MIT b/crates/bevy_pbr/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_pbr/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_pbr/src/atmosphere/aerial_view_lut.wgsl b/crates/bevy_pbr/src/atmosphere/aerial_view_lut.wgsl new file mode 100644 index 0000000000000..f7ba0ecb60cdc --- /dev/null +++ b/crates/bevy_pbr/src/atmosphere/aerial_view_lut.wgsl @@ -0,0 +1,63 @@ +#import bevy_pbr::{ + mesh_view_types::{Lights, DirectionalLight}, + atmosphere::{ + types::{Atmosphere, AtmosphereSettings}, + bindings::{atmosphere, settings, view, lights, aerial_view_lut_out}, + functions::{ + sample_transmittance_lut, sample_atmosphere, rayleigh, henyey_greenstein, + sample_multiscattering_lut, AtmosphereSample, sample_local_inscattering, + get_local_r, get_local_up, view_radius, uv_to_ndc, max_atmosphere_distance, + uv_to_ray_direction, MIDPOINT_RATIO + }, + } +} + + +@group(0) @binding(13) var aerial_view_lut_out: texture_storage_3d; + +@compute +@workgroup_size(16, 16, 1) +fn main(@builtin(global_invocation_id) idx: vec3) { + if any(idx.xy > settings.aerial_view_lut_size.xy) { return; } + + let uv = (vec2(idx.xy) + 0.5) / vec2(settings.aerial_view_lut_size.xy); + let ray_dir = uv_to_ray_direction(uv); + let r = view_radius(); + let mu = ray_dir.y; + let t_max = settings.aerial_view_lut_max_distance; + + var prev_t = 0.0; + var total_inscattering = vec3(0.0); + var throughput = vec3(1.0); + + for (var slice_i: u32 = 0; slice_i < settings.aerial_view_lut_size.z; slice_i++) { + for (var step_i: u32 = 0; step_i < settings.aerial_view_lut_samples; step_i++) { + let t_i = t_max * (f32(slice_i) + ((f32(step_i) + MIDPOINT_RATIO) / f32(settings.aerial_view_lut_samples))) / f32(settings.aerial_view_lut_size.z); + let dt = (t_i - prev_t); + prev_t = t_i; + + let local_r = get_local_r(r, mu, t_i); + let local_up = get_local_up(r, t_i, ray_dir.xyz); + + let local_atmosphere = sample_atmosphere(local_r); + let sample_optical_depth = local_atmosphere.extinction * dt; + let sample_transmittance = exp(-sample_optical_depth); + + // evaluate one segment of the integral + var inscattering = sample_local_inscattering(local_atmosphere, ray_dir.xyz, local_r, local_up); + + // Analytical integration of the single scattering term in the radiance transfer equation + let s_int = (inscattering - inscattering * sample_transmittance) / local_atmosphere.extinction; + total_inscattering += throughput * s_int; + + throughput *= sample_transmittance; + if all(throughput < vec3(0.001)) { + break; + } + } + + // Store in log space to allow linear interpolation of exponential values between slices + let log_inscattering = log(max(total_inscattering, vec3(1e-6))); + textureStore(aerial_view_lut_out, vec3(vec2(idx.xy), slice_i), vec4(log_inscattering, 0.0)); + } +} diff --git a/crates/bevy_pbr/src/atmosphere/bindings.wgsl b/crates/bevy_pbr/src/atmosphere/bindings.wgsl new file mode 100644 index 0000000000000..fe4e0c9070532 --- /dev/null +++ b/crates/bevy_pbr/src/atmosphere/bindings.wgsl @@ -0,0 +1,22 @@ +#define_import_path bevy_pbr::atmosphere::bindings + +#import bevy_render::view::View; + +#import bevy_pbr::{ + mesh_view_types::Lights, + atmosphere::types::{Atmosphere, AtmosphereSettings, AtmosphereTransforms} +} + +@group(0) @binding(0) var atmosphere: Atmosphere; +@group(0) @binding(1) var settings: AtmosphereSettings; +@group(0) @binding(2) var atmosphere_transforms: AtmosphereTransforms; +@group(0) @binding(3) var view: View; +@group(0) @binding(4) var lights: Lights; +@group(0) @binding(5) var transmittance_lut: texture_2d; +@group(0) @binding(6) var transmittance_lut_sampler: sampler; +@group(0) @binding(7) var multiscattering_lut: texture_2d; +@group(0) @binding(8) var multiscattering_lut_sampler: sampler; +@group(0) @binding(9) var sky_view_lut: texture_2d; +@group(0) @binding(10) var sky_view_lut_sampler: sampler; +@group(0) @binding(11) var aerial_view_lut: texture_3d; +@group(0) @binding(12) var aerial_view_lut_sampler: sampler; diff --git a/crates/bevy_pbr/src/atmosphere/bruneton_functions.wgsl b/crates/bevy_pbr/src/atmosphere/bruneton_functions.wgsl new file mode 100644 index 0000000000000..b7e0fc4e7cc08 --- /dev/null +++ b/crates/bevy_pbr/src/atmosphere/bruneton_functions.wgsl @@ -0,0 +1,139 @@ +// Copyright (c) 2017 Eric Bruneton +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// 3. Neither the name of the copyright holders nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. +// +// Precomputed Atmospheric Scattering +// Copyright (c) 2008 INRIA +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// 3. Neither the name of the copyright holders nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +// THE POSSIBILITY OF SUCH DAMAGE. + +#define_import_path bevy_pbr::atmosphere::bruneton_functions + +#import bevy_pbr::atmosphere::{ + types::Atmosphere, + bindings::atmosphere, +} + +// Mapping from view height (r) and zenith cos angle (mu) to UV coordinates in the transmittance LUT +// Assuming r between ground and top atmosphere boundary, and mu= cos(zenith_angle) +// Chosen to increase precision near the ground and to work around a discontinuity at the horizon +// See Bruneton and Neyret 2008, "Precomputed Atmospheric Scattering" section 4 +fn transmittance_lut_r_mu_to_uv(r: f32, mu: f32) -> vec2 { + // Distance along a horizontal ray from the ground to the top atmosphere boundary + let H = sqrt(atmosphere.top_radius * atmosphere.top_radius - atmosphere.bottom_radius * atmosphere.bottom_radius); + + // Distance from a point at height r to the horizon + // ignore the case where r <= atmosphere.bottom_radius + let rho = sqrt(max(r * r - atmosphere.bottom_radius * atmosphere.bottom_radius, 0.0)); + + // Distance from a point at height r to the top atmosphere boundary at zenith angle mu + let d = distance_to_top_atmosphere_boundary(r, mu); + + // Minimum and maximum distance to the top atmosphere boundary from a point at height r + let d_min = atmosphere.top_radius - r; // length of the ray straight up to the top atmosphere boundary + let d_max = rho + H; // length of the ray to the top atmosphere boundary and grazing the horizon + + let u = (d - d_min) / (d_max - d_min); + let v = rho / H; + return vec2(u, v); +} + +// Inverse of the mapping above, mapping from UV coordinates in the transmittance LUT to view height (r) and zenith cos angle (mu) +fn transmittance_lut_uv_to_r_mu(uv: vec2) -> vec2 { + // Distance to top atmosphere boundary for a horizontal ray at ground level + let H = sqrt(atmosphere.top_radius * atmosphere.top_radius - atmosphere.bottom_radius * atmosphere.bottom_radius); + + // Distance to the horizon, from which we can compute r: + let rho = H * uv.y; + let r = sqrt(rho * rho + atmosphere.bottom_radius * atmosphere.bottom_radius); + + // Distance to the top atmosphere boundary for the ray (r,mu), and its minimum + // and maximum values over all mu- obtained for (r,1) and (r,mu_horizon) - + // from which we can recover mu: + let d_min = atmosphere.top_radius - r; + let d_max = rho + H; + let d = d_min + uv.x * (d_max - d_min); + + var mu: f32; + if d == 0.0 { + mu = 1.0; + } else { + mu = (H * H - rho * rho - d * d) / (2.0 * r * d); + } + + mu = clamp(mu, -1.0, 1.0); + + return vec2(r, mu); +} + +/// Simplified ray-sphere intersection +/// where: +/// Ray origin, o = [0,0,r] with r <= atmosphere.top_radius +/// mu is the cosine of spherical coordinate theta (-1.0 <= mu <= 1.0) +/// so ray direction in spherical coordinates is [1,acos(mu),0] which needs to be converted to cartesian +/// Direction of ray, u = [0,sqrt(1-mu*mu),mu] +/// Center of sphere, c = [0,0,0] +/// Radius of sphere, r = atmosphere.top_radius +/// This function solves the quadratic equation for line-sphere intersection simplified under these assumptions +fn distance_to_top_atmosphere_boundary(r: f32, mu: f32) -> f32 { + // ignore the case where r > atmosphere.top_radius + let positive_discriminant = max(r * r * (mu * mu - 1.0) + atmosphere.top_radius * atmosphere.top_radius, 0.0); + return max(-r * mu + sqrt(positive_discriminant), 0.0); +} + +/// Simplified ray-sphere intersection +/// as above for intersections with the ground +fn distance_to_bottom_atmosphere_boundary(r: f32, mu: f32) -> f32 { + let positive_discriminant = max(r * r * (mu * mu - 1.0) + atmosphere.bottom_radius * atmosphere.bottom_radius, 0.0); + return max(-r * mu - sqrt(positive_discriminant), 0.0); +} + +fn ray_intersects_ground(r: f32, mu: f32) -> bool { + return mu < 0.0 && r * r * (mu * mu - 1.0) + atmosphere.bottom_radius * atmosphere.bottom_radius >= 0.0; +} diff --git a/crates/bevy_pbr/src/atmosphere/functions.wgsl b/crates/bevy_pbr/src/atmosphere/functions.wgsl new file mode 100644 index 0000000000000..c1f02fc921c88 --- /dev/null +++ b/crates/bevy_pbr/src/atmosphere/functions.wgsl @@ -0,0 +1,379 @@ +#define_import_path bevy_pbr::atmosphere::functions + +#import bevy_render::maths::{PI, HALF_PI, PI_2, fast_acos, fast_acos_4, fast_atan2} + +#import bevy_pbr::atmosphere::{ + types::Atmosphere, + bindings::{ + atmosphere, settings, view, lights, transmittance_lut, transmittance_lut_sampler, + multiscattering_lut, multiscattering_lut_sampler, sky_view_lut, sky_view_lut_sampler, + aerial_view_lut, aerial_view_lut_sampler, atmosphere_transforms + }, + bruneton_functions::{ + transmittance_lut_r_mu_to_uv, transmittance_lut_uv_to_r_mu, + ray_intersects_ground, distance_to_top_atmosphere_boundary, + distance_to_bottom_atmosphere_boundary + }, +} + +// NOTE FOR CONVENTIONS: +// r: +// radius, or distance from planet center +// +// altitude: +// distance from planet **surface** +// +// mu: +// cosine of the zenith angle of a ray with +// respect to the planet normal +// +// atmosphere space: +// abbreviated as "as" (contrast with vs, cs, ws), this space is similar +// to view space, but with the camera positioned horizontally on the planet +// surface, so the horizon is a horizontal line centered vertically in the +// frame. This enables the non-linear latitude parametrization the paper uses +// to concentrate detail near the horizon + + +// CONSTANTS + +const FRAC_PI: f32 = 0.3183098862; // 1 / π +const FRAC_2_PI: f32 = 0.15915494309; // 1 / (2π) +const FRAC_3_16_PI: f32 = 0.0596831036594607509; // 3 / (16π) +const FRAC_4_PI: f32 = 0.07957747154594767; // 1 / (4π) +const ROOT_2: f32 = 1.41421356; // √2 + +// During raymarching, each segment is sampled at a single point. This constant determines +// where in the segment that sample is taken (0.0 = start, 0.5 = middle, 1.0 = end). +// We use 0.3 to sample closer to the start of each segment, which better approximates +// the exponential falloff of atmospheric density. +const MIDPOINT_RATIO: f32 = 0.3; + +// LUT UV PARAMETERIZATIONS + +fn unit_to_sub_uvs(val: vec2, resolution: vec2) -> vec2 { + return (val + 0.5f / resolution) * (resolution / (resolution + 1.0f)); +} + +fn sub_uvs_to_unit(val: vec2, resolution: vec2) -> vec2 { + return (val - 0.5f / resolution) * (resolution / (resolution - 1.0f)); +} + +fn multiscattering_lut_r_mu_to_uv(r: f32, mu: f32) -> vec2 { + let u = 0.5 + 0.5 * mu; + let v = saturate((r - atmosphere.bottom_radius) / (atmosphere.top_radius - atmosphere.bottom_radius)); //TODO + return unit_to_sub_uvs(vec2(u, v), vec2(settings.multiscattering_lut_size)); +} + +fn multiscattering_lut_uv_to_r_mu(uv: vec2) -> vec2 { + let adj_uv = sub_uvs_to_unit(uv, vec2(settings.multiscattering_lut_size)); + let r = mix(atmosphere.bottom_radius, atmosphere.top_radius, adj_uv.y); + let mu = adj_uv.x * 2 - 1; + return vec2(r, mu); +} + +fn sky_view_lut_r_mu_azimuth_to_uv(r: f32, mu: f32, azimuth: f32) -> vec2 { + let u = (azimuth * FRAC_2_PI) + 0.5; + + let v_horizon = sqrt(r * r - atmosphere.bottom_radius * atmosphere.bottom_radius); + let cos_beta = v_horizon / r; + // Using fast_acos_4 for better precision at small angles + // to avoid artifacts at the horizon + let beta = fast_acos_4(cos_beta); + let horizon_zenith = PI - beta; + let view_zenith = fast_acos_4(mu); + + // Apply non-linear transformation to compress more texels + // near the horizon where high-frequency details matter most + // l is latitude in [-π/2, π/2] and v is texture coordinate in [0,1] + let l = view_zenith - horizon_zenith; + let abs_l = abs(l); + + let v = 0.5 + 0.5 * sign(l) * sqrt(abs_l / HALF_PI); + + return unit_to_sub_uvs(vec2(u, v), vec2(settings.sky_view_lut_size)); +} + +fn sky_view_lut_uv_to_zenith_azimuth(r: f32, uv: vec2) -> vec2 { + let adj_uv = sub_uvs_to_unit(vec2(uv.x, 1.0 - uv.y), vec2(settings.sky_view_lut_size)); + let azimuth = (adj_uv.x - 0.5) * PI_2; + + // Horizon parameters + let v_horizon = sqrt(r * r - atmosphere.bottom_radius * atmosphere.bottom_radius); + let cos_beta = v_horizon / r; + let beta = fast_acos_4(cos_beta); + let horizon_zenith = PI - beta; + + // Inverse of horizon-detail mapping to recover original latitude from texture coordinate + let t = abs(2.0 * (adj_uv.y - 0.5)); + let l = sign(adj_uv.y - 0.5) * HALF_PI * t * t; + + return vec2(horizon_zenith - l, azimuth); +} + +// LUT SAMPLING + +fn sample_transmittance_lut(r: f32, mu: f32) -> vec3 { + let uv = transmittance_lut_r_mu_to_uv(r, mu); + return textureSampleLevel(transmittance_lut, transmittance_lut_sampler, uv, 0.0).rgb; +} + +// NOTICE: This function is copyrighted by Eric Bruneton and INRIA, and falls +// under the license reproduced in bruneton_functions.wgsl (variant of MIT license) +// +// FIXME: this function should be in bruneton_functions.wgsl, but because naga_oil doesn't +// support cyclic imports it's stuck here +fn sample_transmittance_lut_segment(r: f32, mu: f32, t: f32) -> vec3 { + let r_t = get_local_r(r, mu, t); + let mu_t = clamp((r * mu + t) / r_t, -1.0, 1.0); + + if ray_intersects_ground(r, mu) { + return min( + sample_transmittance_lut(r_t, -mu_t) / sample_transmittance_lut(r, -mu), + vec3(1.0) + ); + } else { + return min( + sample_transmittance_lut(r, mu) / sample_transmittance_lut(r_t, mu_t), vec3(1.0) + ); + } +} + +fn sample_multiscattering_lut(r: f32, mu: f32) -> vec3 { + let uv = multiscattering_lut_r_mu_to_uv(r, mu); + return textureSampleLevel(multiscattering_lut, multiscattering_lut_sampler, uv, 0.0).rgb; +} + +fn sample_sky_view_lut(r: f32, ray_dir_as: vec3) -> vec3 { + let mu = ray_dir_as.y; + let azimuth = fast_atan2(ray_dir_as.x, -ray_dir_as.z); + let uv = sky_view_lut_r_mu_azimuth_to_uv(r, mu, azimuth); + return textureSampleLevel(sky_view_lut, sky_view_lut_sampler, uv, 0.0).rgb; +} + +fn ndc_to_camera_dist(ndc: vec3) -> f32 { + let view_pos = view.view_from_clip * vec4(ndc, 1.0); + let t = length(view_pos.xyz / view_pos.w) * settings.scene_units_to_m; + return t; +} + +// RGB channels: total inscattered light along the camera ray to the current sample. +// A channel: average transmittance across all wavelengths to the current sample. +fn sample_aerial_view_lut(uv: vec2, t: f32) -> vec3 { + let t_max = settings.aerial_view_lut_max_distance; + let num_slices = f32(settings.aerial_view_lut_size.z); + // Each texel stores the value of the scattering integral over the whole slice, + // which requires us to offset the w coordinate by half a slice. For + // example, if we wanted the value of the integral at the boundary between slices, + // we'd need to sample at the center of the previous slice, and vice-versa for + // sampling in the center of a slice. + let uvw = vec3(uv, saturate(t / t_max - 0.5 / num_slices)); + let sample = textureSampleLevel(aerial_view_lut, aerial_view_lut_sampler, uvw, 0.0); + // Since sampling anywhere between w=0 and w=t_slice will clamp to the first slice, + // we need to do a linear step over the first slice towards zero at the camera's + // position to recover the correct integral value. + let t_slice = t_max / num_slices; + let fade = saturate(t / t_slice); + // Recover the values from log space + return exp(sample.rgb) * fade; +} + +// PHASE FUNCTIONS + +// -(L . V) == (L . -V). -V here is our ray direction, which points away from the view +// instead of towards it (which would be the *view direction*, V) + +// evaluates the rayleigh phase function, which describes the likelihood +// of a rayleigh scattering event scattering light from the light direction towards the view +fn rayleigh(neg_LdotV: f32) -> f32 { + return FRAC_3_16_PI * (1 + (neg_LdotV * neg_LdotV)); +} + +// evaluates the henyey-greenstein phase function, which describes the likelihood +// of a mie scattering event scattering light from the light direction towards the view +fn henyey_greenstein(neg_LdotV: f32) -> f32 { + let g = atmosphere.mie_asymmetry; + let denom = 1.0 + g * g - 2.0 * g * neg_LdotV; + return FRAC_4_PI * (1.0 - g * g) / (denom * sqrt(denom)); +} + +// ATMOSPHERE SAMPLING + +struct AtmosphereSample { + /// units: m^-1 + rayleigh_scattering: vec3, + + /// units: m^-1 + mie_scattering: f32, + + /// the sum of scattering and absorption. Since the phase function doesn't + /// matter for this, we combine rayleigh and mie extinction to a single + // value. + // + /// units: m^-1 + extinction: vec3 +} + +/// Samples atmosphere optical densities at a given radius +fn sample_atmosphere(r: f32) -> AtmosphereSample { + let altitude = clamp(r, atmosphere.bottom_radius, atmosphere.top_radius) - atmosphere.bottom_radius; + + // atmosphere values at altitude + let mie_density = exp(-atmosphere.mie_density_exp_scale * altitude); + let rayleigh_density = exp(-atmosphere.rayleigh_density_exp_scale * altitude); + var ozone_density: f32 = max(0.0, 1.0 - (abs(altitude - atmosphere.ozone_layer_altitude) / (atmosphere.ozone_layer_width * 0.5))); + + let mie_scattering = mie_density * atmosphere.mie_scattering; + let mie_absorption = mie_density * atmosphere.mie_absorption; + let mie_extinction = mie_scattering + mie_absorption; + + let rayleigh_scattering = rayleigh_density * atmosphere.rayleigh_scattering; + // no rayleigh absorption + // rayleigh extinction is the sum of scattering and absorption + + // ozone doesn't contribute to scattering + let ozone_absorption = ozone_density * atmosphere.ozone_absorption; + + var sample: AtmosphereSample; + sample.rayleigh_scattering = rayleigh_scattering; + sample.mie_scattering = mie_scattering; + sample.extinction = rayleigh_scattering + mie_extinction + ozone_absorption; + + return sample; +} + +/// evaluates L_scat, equation 3 in the paper, which gives the total single-order scattering towards the view at a single point +fn sample_local_inscattering(local_atmosphere: AtmosphereSample, ray_dir: vec3, local_r: f32, local_up: vec3) -> vec3 { + var inscattering = vec3(0.0); + for (var light_i: u32 = 0u; light_i < lights.n_directional_lights; light_i++) { + let light = &lights.directional_lights[light_i]; + + let mu_light = dot((*light).direction_to_light, local_up); + + // -(L . V) == (L . -V). -V here is our ray direction, which points away from the view + // instead of towards it (as is the convention for V) + let neg_LdotV = dot((*light).direction_to_light, ray_dir); + + // Phase functions give the proportion of light + // scattered towards the camera for each scattering type + let rayleigh_phase = rayleigh(neg_LdotV); + let mie_phase = henyey_greenstein(neg_LdotV); + let scattering_coeff = local_atmosphere.rayleigh_scattering * rayleigh_phase + local_atmosphere.mie_scattering * mie_phase; + + let transmittance_to_light = sample_transmittance_lut(local_r, mu_light); + let shadow_factor = transmittance_to_light * f32(!ray_intersects_ground(local_r, mu_light)); + + // Transmittance from scattering event to light source + let scattering_factor = shadow_factor * scattering_coeff; + + // Additive factor from the multiscattering LUT + let psi_ms = sample_multiscattering_lut(local_r, mu_light); + let multiscattering_factor = psi_ms * (local_atmosphere.rayleigh_scattering + local_atmosphere.mie_scattering); + + inscattering += (*light).color.rgb * (scattering_factor + multiscattering_factor); + } + return inscattering * view.exposure; +} + +const SUN_ANGULAR_SIZE: f32 = 0.0174533; // angular diameter of sun in radians + +fn sample_sun_radiance(ray_dir_ws: vec3) -> vec3 { + let r = view_radius(); + let mu_view = ray_dir_ws.y; + let shadow_factor = f32(!ray_intersects_ground(r, mu_view)); + var sun_radiance = vec3(0.0); + for (var light_i: u32 = 0u; light_i < lights.n_directional_lights; light_i++) { + let light = &lights.directional_lights[light_i]; + let neg_LdotV = dot((*light).direction_to_light, ray_dir_ws); + let angle_to_sun = fast_acos(neg_LdotV); + let pixel_size = fwidth(angle_to_sun); + let factor = smoothstep(0.0, -pixel_size * ROOT_2, angle_to_sun - SUN_ANGULAR_SIZE * 0.5); + let sun_solid_angle = (SUN_ANGULAR_SIZE * SUN_ANGULAR_SIZE) * 4.0 * FRAC_PI; + sun_radiance += ((*light).color.rgb / sun_solid_angle) * factor * shadow_factor; + } + return sun_radiance; +} + +// TRANSFORM UTILITIES + +fn max_atmosphere_distance(r: f32, mu: f32) -> f32 { + let t_top = distance_to_top_atmosphere_boundary(r, mu); + let t_bottom = distance_to_bottom_atmosphere_boundary(r, mu); + let hits = ray_intersects_ground(r, mu); + return mix(t_top, t_bottom, f32(hits)); +} + +/// Assuming y=0 is the planet ground, returns the view radius in meters +fn view_radius() -> f32 { + return view.world_position.y * settings.scene_units_to_m + atmosphere.bottom_radius; +} + +// We assume the `up` vector at the view position is the y axis, since the world is locally flat/level. +// t = distance along view ray in atmosphere space +// NOTE: this means that if your world is actually spherical, this will be wrong. +fn get_local_up(r: f32, t: f32, ray_dir: vec3) -> vec3 { + return normalize(vec3(0.0, r, 0.0) + t * ray_dir); +} + +// Given a ray starting at radius r, with mu = cos(zenith angle), +// and a t = distance along the ray, gives the new radius at point t +fn get_local_r(r: f32, mu: f32, t: f32) -> f32 { + return sqrt(t * t + 2.0 * r * mu * t + r * r); +} + +// Convert uv [0.0 .. 1.0] coordinate to ndc space xy [-1.0 .. 1.0] +fn uv_to_ndc(uv: vec2) -> vec2 { + return uv * vec2(2.0, -2.0) + vec2(-1.0, 1.0); +} + +/// Convert ndc space xy coordinate [-1.0 .. 1.0] to uv [0.0 .. 1.0] +fn ndc_to_uv(ndc: vec2) -> vec2 { + return ndc * vec2(0.5, -0.5) + vec2(0.5); +} + +/// Converts a direction in world space to atmosphere space +fn direction_world_to_atmosphere(dir_ws: vec3) -> vec3 { + let dir_as = atmosphere_transforms.atmosphere_from_world * vec4(dir_ws, 0.0); + return dir_as.xyz; +} + +/// Converts a direction in atmosphere space to world space +fn direction_atmosphere_to_world(dir_as: vec3) -> vec3 { + let dir_ws = atmosphere_transforms.world_from_atmosphere * vec4(dir_as, 0.0); + return dir_ws.xyz; +} + +// Modified from skybox.wgsl. For this pass we don't need to apply a separate sky transform or consider camera viewport. +// w component is the cosine of the view direction with the view forward vector, to correct step distance at the edges of the viewport +fn uv_to_ray_direction(uv: vec2) -> vec4 { + // Using world positions of the fragment and camera to calculate a ray direction + // breaks down at large translations. This code only needs to know the ray direction. + // The ray direction is along the direction from the camera to the fragment position. + // In view space, the camera is at the origin, so the view space ray direction is + // along the direction of the fragment position - (0,0,0) which is just the + // fragment position. + // Use the position on the near clipping plane to avoid -inf world position + // because the far plane of an infinite reverse projection is at infinity. + let view_position_homogeneous = view.view_from_clip * vec4( + uv_to_ndc(uv), + 1.0, + 1.0, + ); + + let view_ray_direction = view_position_homogeneous.xyz / view_position_homogeneous.w; + // Transforming the view space ray direction by the inverse view matrix, transforms the + // direction to world space. Note that the w element is set to 0.0, as this is a + // vector direction, not a position, That causes the matrix multiplication to ignore + // the translations from the view matrix. + let ray_direction = (view.world_from_view * vec4(view_ray_direction, 0.0)).xyz; + + return vec4(normalize(ray_direction), -view_ray_direction.z); +} + +fn zenith_azimuth_to_ray_dir(zenith: f32, azimuth: f32) -> vec3 { + let sin_zenith = sin(zenith); + let mu = cos(zenith); + let sin_azimuth = sin(azimuth); + let cos_azimuth = cos(azimuth); + return vec3(sin_azimuth * sin_zenith, mu, -cos_azimuth * sin_zenith); +} diff --git a/crates/bevy_pbr/src/atmosphere/mod.rs b/crates/bevy_pbr/src/atmosphere/mod.rs new file mode 100644 index 0000000000000..e7f17f0e1e855 --- /dev/null +++ b/crates/bevy_pbr/src/atmosphere/mod.rs @@ -0,0 +1,472 @@ +//! Procedural Atmospheric Scattering. +//! +//! This plugin implements [Hillaire's 2020 paper](https://sebh.github.io/publications/egsr2020.pdf) +//! on real-time atmospheric scattering. While it *will* work simply as a +//! procedural skybox, it also does much more. It supports dynamic time-of- +//! -day, multiple directional lights, and since it's applied as a post-processing +//! effect *on top* of the existing skybox, a starry skybox would automatically +//! show based on the time of day. Scattering in front of terrain (similar +//! to distance fog, but more complex) is handled as well, and takes into +//! account the directional light color and direction. +//! +//! Adding the [`Atmosphere`] component to a 3d camera will enable the effect, +//! which by default is set to look similar to Earth's atmosphere. See the +//! documentation on the component itself for information regarding its fields. +//! +//! Performance-wise, the effect should be fairly cheap since the LUTs (Look +//! Up Tables) that encode most of the data are small, and take advantage of the +//! fact that the atmosphere is symmetric. Performance is also proportional to +//! the number of directional lights in the scene. In order to tune +//! performance more finely, the [`AtmosphereSettings`] camera component +//! manages the size of each LUT and the sample count for each ray. +//! +//! Given how similar it is to [`crate::volumetric_fog`], it might be expected +//! that these two modules would work together well. However for now using both +//! at once is untested, and might not be physically accurate. These may be +//! integrated into a single module in the future. +//! +//! On web platforms, atmosphere rendering will look slightly different. Specifically, when calculating how light travels +//! through the atmosphere, we use a simpler averaging technique instead of the more +//! complex blending operations. This difference will be resolved for WebGPU in a future release. +//! +//! [Shadertoy]: https://www.shadertoy.com/view/slSXRW +//! +//! [Unreal Engine Implementation]: https://github.com/sebh/UnrealEngineSkyAtmosphere + +mod node; +pub mod resources; + +use bevy_app::{App, Plugin}; +use bevy_asset::load_internal_asset; +use bevy_core_pipeline::core_3d::graph::Node3d; +use bevy_ecs::{ + component::Component, + query::{Changed, QueryItem, With}, + schedule::IntoScheduleConfigs, + system::{lifetimeless::Read, Query}, +}; +use bevy_math::{UVec2, UVec3, Vec3}; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; +use bevy_render::{ + extract_component::UniformComponentPlugin, + render_resource::{DownlevelFlags, ShaderType, SpecializedRenderPipelines}, +}; +use bevy_render::{ + extract_component::{ExtractComponent, ExtractComponentPlugin}, + render_graph::{RenderGraphApp, ViewNodeRunner}, + render_resource::{Shader, TextureFormat, TextureUsages}, + renderer::RenderAdapter, + Render, RenderApp, RenderSet, +}; + +use bevy_core_pipeline::core_3d::{graph::Core3d, Camera3d}; +use resources::{ + prepare_atmosphere_transforms, queue_render_sky_pipelines, AtmosphereTransforms, + RenderSkyBindGroupLayouts, +}; +use tracing::warn; + +use self::{ + node::{AtmosphereLutsNode, AtmosphereNode, RenderSkyNode}, + resources::{ + prepare_atmosphere_bind_groups, prepare_atmosphere_textures, AtmosphereBindGroupLayouts, + AtmosphereLutPipelines, AtmosphereSamplers, + }, +}; + +mod shaders { + use bevy_asset::{weak_handle, Handle}; + use bevy_render::render_resource::Shader; + + pub const TYPES: Handle = weak_handle!("ef7e147e-30a0-4513-bae3-ddde2a6c20c5"); + pub const FUNCTIONS: Handle = weak_handle!("7ff93872-2ee9-4598-9f88-68b02fef605f"); + pub const BRUNETON_FUNCTIONS: Handle = + weak_handle!("e2dccbb0-7322-444a-983b-e74d0a08bcda"); + pub const BINDINGS: Handle = weak_handle!("bcc55ce5-0fc4-451e-8393-1b9efd2612c4"); + + pub const TRANSMITTANCE_LUT: Handle = + weak_handle!("a4187282-8cb1-42d3-889c-cbbfb6044183"); + pub const MULTISCATTERING_LUT: Handle = + weak_handle!("bde3a71a-73e9-49fe-a379-a81940c67a1e"); + pub const SKY_VIEW_LUT: Handle = weak_handle!("f87e007a-bf4b-4f99-9ef0-ac21d369f0e5"); + pub const AERIAL_VIEW_LUT: Handle = + weak_handle!("a3daf030-4b64-49ae-a6a7-354489597cbe"); + pub const RENDER_SKY: Handle = weak_handle!("09422f46-d0f7-41c1-be24-121c17d6e834"); +} + +#[doc(hidden)] +pub struct AtmospherePlugin; + +impl Plugin for AtmospherePlugin { + fn build(&self, app: &mut App) { + load_internal_asset!(app, shaders::TYPES, "types.wgsl", Shader::from_wgsl); + load_internal_asset!(app, shaders::FUNCTIONS, "functions.wgsl", Shader::from_wgsl); + load_internal_asset!( + app, + shaders::BRUNETON_FUNCTIONS, + "bruneton_functions.wgsl", + Shader::from_wgsl + ); + + load_internal_asset!(app, shaders::BINDINGS, "bindings.wgsl", Shader::from_wgsl); + + load_internal_asset!( + app, + shaders::TRANSMITTANCE_LUT, + "transmittance_lut.wgsl", + Shader::from_wgsl + ); + + load_internal_asset!( + app, + shaders::MULTISCATTERING_LUT, + "multiscattering_lut.wgsl", + Shader::from_wgsl + ); + + load_internal_asset!( + app, + shaders::SKY_VIEW_LUT, + "sky_view_lut.wgsl", + Shader::from_wgsl + ); + + load_internal_asset!( + app, + shaders::AERIAL_VIEW_LUT, + "aerial_view_lut.wgsl", + Shader::from_wgsl + ); + + load_internal_asset!( + app, + shaders::RENDER_SKY, + "render_sky.wgsl", + Shader::from_wgsl + ); + + app.register_type::() + .register_type::() + .add_plugins(( + ExtractComponentPlugin::::default(), + ExtractComponentPlugin::::default(), + UniformComponentPlugin::::default(), + UniformComponentPlugin::::default(), + )); + } + + fn finish(&self, app: &mut App) { + let Some(render_app) = app.get_sub_app_mut(RenderApp) else { + return; + }; + + let render_adapter = render_app.world().resource::(); + + if !render_adapter + .get_downlevel_capabilities() + .flags + .contains(DownlevelFlags::COMPUTE_SHADERS) + { + warn!("AtmospherePlugin not loaded. GPU lacks support for compute shaders."); + return; + } + + if !render_adapter + .get_texture_format_features(TextureFormat::Rgba16Float) + .allowed_usages + .contains(TextureUsages::STORAGE_BINDING) + { + warn!("AtmospherePlugin not loaded. GPU lacks support: TextureFormat::Rgba16Float does not support TextureUsages::STORAGE_BINDING."); + return; + } + + render_app + .init_resource::() + .init_resource::() + .init_resource::() + .init_resource::() + .init_resource::() + .init_resource::>() + .add_systems( + Render, + ( + configure_camera_depth_usages.in_set(RenderSet::ManageViews), + queue_render_sky_pipelines.in_set(RenderSet::Queue), + prepare_atmosphere_textures.in_set(RenderSet::PrepareResources), + prepare_atmosphere_transforms.in_set(RenderSet::PrepareResources), + prepare_atmosphere_bind_groups.in_set(RenderSet::PrepareBindGroups), + ), + ) + .add_render_graph_node::>( + Core3d, + AtmosphereNode::RenderLuts, + ) + .add_render_graph_edges( + Core3d, + ( + // END_PRE_PASSES -> RENDER_LUTS -> MAIN_PASS + Node3d::EndPrepasses, + AtmosphereNode::RenderLuts, + Node3d::StartMainPass, + ), + ) + .add_render_graph_node::>( + Core3d, + AtmosphereNode::RenderSky, + ) + .add_render_graph_edges( + Core3d, + ( + Node3d::MainOpaquePass, + AtmosphereNode::RenderSky, + Node3d::MainTransparentPass, + ), + ); + } +} + +/// This component describes the atmosphere of a planet, and when added to a camera +/// will enable atmospheric scattering for that camera. This is only compatible with +/// HDR cameras. +/// +/// Most atmospheric particles scatter and absorb light in two main ways: +/// +/// Rayleigh scattering occurs among very small particles, like individual gas +/// molecules. It's wavelength dependent, and causes colors to separate out as +/// light travels through the atmosphere. These particles *don't* absorb light. +/// +/// Mie scattering occurs among slightly larger particles, like dust and sea spray. +/// These particles *do* absorb light, but Mie scattering and absorption is +/// *wavelength independent*. +/// +/// Ozone acts differently from the other two, and is special-cased because +/// it's very important to the look of Earth's atmosphere. It's wavelength +/// dependent, but only *absorbs* light. Also, while the density of particles +/// participating in Rayleigh and Mie scattering falls off roughly exponentially +/// from the planet's surface, ozone only exists in a band centered at a fairly +/// high altitude. +#[derive(Clone, Component, Reflect, ShaderType)] +#[require(AtmosphereSettings)] +#[reflect(Clone, Default)] +pub struct Atmosphere { + /// Radius of the planet + /// + /// units: m + pub bottom_radius: f32, + + /// Radius at which we consider the atmosphere to 'end' for our + /// calculations (from center of planet) + /// + /// units: m + pub top_radius: f32, + + /// An approximation of the average albedo (or color, roughly) of the + /// planet's surface. This is used when calculating multiscattering. + /// + /// units: N/A + pub ground_albedo: Vec3, + + /// The rate of falloff of rayleigh particulate with respect to altitude: + /// optical density = exp(-rayleigh_density_exp_scale * altitude in meters). + /// + /// THIS VALUE MUST BE POSITIVE + /// + /// units: N/A + pub rayleigh_density_exp_scale: f32, + + /// The scattering optical density of rayleigh particulate, or how + /// much light it scatters per meter + /// + /// units: m^-1 + pub rayleigh_scattering: Vec3, + + /// The rate of falloff of mie particulate with respect to altitude: + /// optical density = exp(-mie_density_exp_scale * altitude in meters) + /// + /// THIS VALUE MUST BE POSITIVE + /// + /// units: N/A + pub mie_density_exp_scale: f32, + + /// The scattering optical density of mie particulate, or how much light + /// it scatters per meter. + /// + /// units: m^-1 + pub mie_scattering: f32, + + /// The absorbing optical density of mie particulate, or how much light + /// it absorbs per meter. + /// + /// units: m^-1 + pub mie_absorption: f32, + + /// The "asymmetry" of mie scattering, or how much light tends to scatter + /// forwards, rather than backwards or to the side. + /// + /// domain: (-1, 1) + /// units: N/A + pub mie_asymmetry: f32, //the "asymmetry" value of the phase function, unitless. Domain: (-1, 1) + + /// The altitude at which the ozone layer is centered. + /// + /// units: m + pub ozone_layer_altitude: f32, + + /// The width of the ozone layer + /// + /// units: m + pub ozone_layer_width: f32, + + /// The optical density of ozone, or how much of each wavelength of + /// light it absorbs per meter. + /// + /// units: m^-1 + pub ozone_absorption: Vec3, +} + +impl Atmosphere { + pub const EARTH: Atmosphere = Atmosphere { + bottom_radius: 6_360_000.0, + top_radius: 6_460_000.0, + ground_albedo: Vec3::splat(0.3), + rayleigh_density_exp_scale: 1.0 / 8_000.0, + rayleigh_scattering: Vec3::new(5.802e-6, 13.558e-6, 33.100e-6), + mie_density_exp_scale: 1.0 / 1_200.0, + mie_scattering: 3.996e-6, + mie_absorption: 0.444e-6, + mie_asymmetry: 0.8, + ozone_layer_altitude: 25_000.0, + ozone_layer_width: 30_000.0, + ozone_absorption: Vec3::new(0.650e-6, 1.881e-6, 0.085e-6), + }; + + pub fn with_density_multiplier(mut self, mult: f32) -> Self { + self.rayleigh_scattering *= mult; + self.mie_scattering *= mult; + self.mie_absorption *= mult; + self.ozone_absorption *= mult; + self + } +} + +impl Default for Atmosphere { + fn default() -> Self { + Self::EARTH + } +} + +impl ExtractComponent for Atmosphere { + type QueryData = Read; + + type QueryFilter = With; + + type Out = Atmosphere; + + fn extract_component(item: QueryItem<'_, Self::QueryData>) -> Option { + Some(item.clone()) + } +} + +/// This component controls the resolution of the atmosphere LUTs, and +/// how many samples are used when computing them. +/// +/// The transmittance LUT stores the transmittance from a point in the +/// atmosphere to the outer edge of the atmosphere in any direction, +/// parametrized by the point's radius and the cosine of the zenith angle +/// of the ray. +/// +/// The multiscattering LUT stores the factor representing luminance scattered +/// towards the camera with scattering order >2, parametrized by the point's radius +/// and the cosine of the zenith angle of the sun. +/// +/// The sky-view lut is essentially the actual skybox, storing the light scattered +/// towards the camera in every direction with a cubemap. +/// +/// The aerial-view lut is a 3d LUT fit to the view frustum, which stores the luminance +/// scattered towards the camera at each point (RGB channels), alongside the average +/// transmittance to that point (A channel). +#[derive(Clone, Component, Reflect, ShaderType)] +#[reflect(Clone, Default)] +pub struct AtmosphereSettings { + /// The size of the transmittance LUT + pub transmittance_lut_size: UVec2, + + /// The size of the multiscattering LUT + pub multiscattering_lut_size: UVec2, + + /// The size of the sky-view LUT. + pub sky_view_lut_size: UVec2, + + /// The size of the aerial-view LUT. + pub aerial_view_lut_size: UVec3, + + /// The number of points to sample along each ray when + /// computing the transmittance LUT + pub transmittance_lut_samples: u32, + + /// The number of rays to sample when computing each + /// pixel of the multiscattering LUT + pub multiscattering_lut_dirs: u32, + + /// The number of points to sample when integrating along each + /// multiscattering ray + pub multiscattering_lut_samples: u32, + + /// The number of points to sample along each ray when + /// computing the sky-view LUT. + pub sky_view_lut_samples: u32, + + /// The number of points to sample for each slice along the z-axis + /// of the aerial-view LUT. + pub aerial_view_lut_samples: u32, + + /// The maximum distance from the camera to evaluate the + /// aerial view LUT. The slices along the z-axis of the + /// texture will be distributed linearly from the camera + /// to this value. + /// + /// units: m + pub aerial_view_lut_max_distance: f32, + + /// A conversion factor between scene units and meters, used to + /// ensure correctness at different length scales. + pub scene_units_to_m: f32, +} + +impl Default for AtmosphereSettings { + fn default() -> Self { + Self { + transmittance_lut_size: UVec2::new(256, 128), + transmittance_lut_samples: 40, + multiscattering_lut_size: UVec2::new(32, 32), + multiscattering_lut_dirs: 64, + multiscattering_lut_samples: 20, + sky_view_lut_size: UVec2::new(400, 200), + sky_view_lut_samples: 16, + aerial_view_lut_size: UVec3::new(32, 32, 32), + aerial_view_lut_samples: 10, + aerial_view_lut_max_distance: 3.2e4, + scene_units_to_m: 1.0, + } + } +} + +impl ExtractComponent for AtmosphereSettings { + type QueryData = Read; + + type QueryFilter = (With, With); + + type Out = AtmosphereSettings; + + fn extract_component(item: QueryItem<'_, Self::QueryData>) -> Option { + Some(item.clone()) + } +} + +fn configure_camera_depth_usages( + mut cameras: Query<&mut Camera3d, (Changed, With)>, +) { + for mut camera in &mut cameras { + camera.depth_texture_usages.0 |= TextureUsages::TEXTURE_BINDING.bits(); + } +} diff --git a/crates/bevy_pbr/src/atmosphere/multiscattering_lut.wgsl b/crates/bevy_pbr/src/atmosphere/multiscattering_lut.wgsl new file mode 100644 index 0000000000000..2df07c98b84ec --- /dev/null +++ b/crates/bevy_pbr/src/atmosphere/multiscattering_lut.wgsl @@ -0,0 +1,139 @@ +#import bevy_pbr::{ + mesh_view_types::{Lights, DirectionalLight}, + atmosphere::{ + types::{Atmosphere, AtmosphereSettings}, + bindings::{atmosphere, settings}, + functions::{ + multiscattering_lut_uv_to_r_mu, sample_transmittance_lut, + get_local_r, get_local_up, sample_atmosphere, FRAC_4_PI, + max_atmosphere_distance, rayleigh, henyey_greenstein, + zenith_azimuth_to_ray_dir, + }, + bruneton_functions::{ + distance_to_top_atmosphere_boundary, distance_to_bottom_atmosphere_boundary, ray_intersects_ground + } + } +} + +#import bevy_render::maths::{PI,PI_2} + +const PHI_2: vec2 = vec2(1.3247179572447460259609088, 1.7548776662466927600495087); + +@group(0) @binding(13) var multiscattering_lut_out: texture_storage_2d; + +fn s2_sequence(n: u32) -> vec2 { + return fract(0.5 + f32(n) * PHI_2); +} + +// Lambert equal-area projection. +fn uv_to_sphere(uv: vec2) -> vec3 { + let phi = PI_2 * uv.y; + let sin_lambda = 2 * uv.x - 1; + let cos_lambda = sqrt(1 - sin_lambda * sin_lambda); + + return vec3(cos_lambda * cos(phi), cos_lambda * sin(phi), sin_lambda); +} + +// Shared memory arrays for workgroup communication +var multi_scat_shared_mem: array, 64>; +var l_shared_mem: array, 64>; + +@compute +@workgroup_size(1, 1, 64) +fn main(@builtin(global_invocation_id) global_id: vec3) { + var uv = (vec2(global_id.xy) + 0.5) / vec2(settings.multiscattering_lut_size); + + let r_mu = multiscattering_lut_uv_to_r_mu(uv); + let light_dir = normalize(vec3(0.0, r_mu.y, -1.0)); + + let ray_dir = uv_to_sphere(s2_sequence(global_id.z)); + let ms_sample = sample_multiscattering_dir(r_mu.x, ray_dir, light_dir); + + // Calculate the contribution for this sample + let sphere_solid_angle = 4.0 * PI; + let sample_weight = sphere_solid_angle / 64.0; + multi_scat_shared_mem[global_id.z] = ms_sample.f_ms * sample_weight; + l_shared_mem[global_id.z] = ms_sample.l_2 * sample_weight; + + workgroupBarrier(); + + // Parallel reduction bitshift to the right to divide by 2 each step + for (var step = 32u; step > 0u; step >>= 1u) { + if global_id.z < step { + multi_scat_shared_mem[global_id.z] += multi_scat_shared_mem[global_id.z + step]; + l_shared_mem[global_id.z] += l_shared_mem[global_id.z + step]; + } + workgroupBarrier(); + } + + if global_id.z > 0u { + return; + } + + // Apply isotropic phase function + let f_ms = multi_scat_shared_mem[0] * FRAC_4_PI; + let l_2 = l_shared_mem[0] * FRAC_4_PI; + + // Equation 10 from the paper: Geometric series for infinite scattering + let psi_ms = l_2 / (1.0 - f_ms); + textureStore(multiscattering_lut_out, global_id.xy, vec4(psi_ms, 1.0)); +} + +struct MultiscatteringSample { + l_2: vec3, + f_ms: vec3, +}; + +fn sample_multiscattering_dir(r: f32, ray_dir: vec3, light_dir: vec3) -> MultiscatteringSample { + // get the cosine of the zenith angle of the view direction with respect to the light direction + let mu_view = ray_dir.y; + let t_max = max_atmosphere_distance(r, mu_view); + + let dt = t_max / f32(settings.multiscattering_lut_samples); + var optical_depth = vec3(0.0); + + var l_2 = vec3(0.0); + var f_ms = vec3(0.0); + var throughput = vec3(1.0); + for (var i: u32 = 0u; i < settings.multiscattering_lut_samples; i++) { + let t_i = dt * (f32(i) + 0.5); + let local_r = get_local_r(r, mu_view, t_i); + let local_up = get_local_up(r, t_i, ray_dir); + + let local_atmosphere = sample_atmosphere(local_r); + let sample_optical_depth = local_atmosphere.extinction * dt; + let sample_transmittance = exp(-sample_optical_depth); + optical_depth += sample_optical_depth; + + let mu_light = dot(light_dir, local_up); + let scattering_no_phase = local_atmosphere.rayleigh_scattering + local_atmosphere.mie_scattering; + + let ms = scattering_no_phase; + let ms_int = (ms - ms * sample_transmittance) / local_atmosphere.extinction; + f_ms += throughput * ms_int; + + let transmittance_to_light = sample_transmittance_lut(local_r, mu_light); + let shadow_factor = transmittance_to_light * f32(!ray_intersects_ground(local_r, mu_light)); + + let s = scattering_no_phase * shadow_factor * FRAC_4_PI; + let s_int = (s - s * sample_transmittance) / local_atmosphere.extinction; + l_2 += throughput * s_int; + + throughput *= sample_transmittance; + if all(throughput < vec3(0.001)) { + break; + } + } + + //include reflected luminance from planet ground + if ray_intersects_ground(r, mu_view) { + let transmittance_to_ground = exp(-optical_depth); + let local_up = get_local_up(r, t_max, ray_dir); + let mu_light = dot(light_dir, local_up); + let transmittance_to_light = sample_transmittance_lut(0.0, mu_light); + let ground_luminance = transmittance_to_light * transmittance_to_ground * max(mu_light, 0.0) * atmosphere.ground_albedo; + l_2 += ground_luminance; + } + + return MultiscatteringSample(l_2, f_ms); +} diff --git a/crates/bevy_pbr/src/atmosphere/node.rs b/crates/bevy_pbr/src/atmosphere/node.rs new file mode 100644 index 0000000000000..851447d760fc3 --- /dev/null +++ b/crates/bevy_pbr/src/atmosphere/node.rs @@ -0,0 +1,221 @@ +use bevy_ecs::{query::QueryItem, system::lifetimeless::Read, world::World}; +use bevy_math::{UVec2, Vec3Swizzles}; +use bevy_render::{ + extract_component::DynamicUniformIndex, + render_graph::{NodeRunError, RenderGraphContext, RenderLabel, ViewNode}, + render_resource::{ComputePass, ComputePassDescriptor, PipelineCache, RenderPassDescriptor}, + renderer::RenderContext, + view::{ViewTarget, ViewUniformOffset}, +}; + +use crate::ViewLightsUniformOffset; + +use super::{ + resources::{ + AtmosphereBindGroups, AtmosphereLutPipelines, AtmosphereTransformsOffset, + RenderSkyPipelineId, + }, + Atmosphere, AtmosphereSettings, +}; + +#[derive(PartialEq, Eq, Debug, Copy, Clone, Hash, RenderLabel)] +pub enum AtmosphereNode { + RenderLuts, + RenderSky, +} + +#[derive(Default)] +pub(super) struct AtmosphereLutsNode {} + +impl ViewNode for AtmosphereLutsNode { + type ViewQuery = ( + Read, + Read, + Read>, + Read>, + Read, + Read, + Read, + ); + + fn run( + &self, + _graph: &mut RenderGraphContext, + render_context: &mut RenderContext, + ( + settings, + bind_groups, + atmosphere_uniforms_offset, + settings_uniforms_offset, + atmosphere_transforms_offset, + view_uniforms_offset, + lights_uniforms_offset, + ): QueryItem, + world: &World, + ) -> Result<(), NodeRunError> { + let pipelines = world.resource::(); + let pipeline_cache = world.resource::(); + let ( + Some(transmittance_lut_pipeline), + Some(multiscattering_lut_pipeline), + Some(sky_view_lut_pipeline), + Some(aerial_view_lut_pipeline), + ) = ( + pipeline_cache.get_compute_pipeline(pipelines.transmittance_lut), + pipeline_cache.get_compute_pipeline(pipelines.multiscattering_lut), + pipeline_cache.get_compute_pipeline(pipelines.sky_view_lut), + pipeline_cache.get_compute_pipeline(pipelines.aerial_view_lut), + ) + else { + return Ok(()); + }; + + let command_encoder = render_context.command_encoder(); + + let mut luts_pass = command_encoder.begin_compute_pass(&ComputePassDescriptor { + label: Some("atmosphere_luts_pass"), + timestamp_writes: None, + }); + + fn dispatch_2d(compute_pass: &mut ComputePass, size: UVec2) { + const WORKGROUP_SIZE: u32 = 16; + let workgroups_x = size.x.div_ceil(WORKGROUP_SIZE); + let workgroups_y = size.y.div_ceil(WORKGROUP_SIZE); + compute_pass.dispatch_workgroups(workgroups_x, workgroups_y, 1); + } + + // Transmittance LUT + + luts_pass.set_pipeline(transmittance_lut_pipeline); + luts_pass.set_bind_group( + 0, + &bind_groups.transmittance_lut, + &[ + atmosphere_uniforms_offset.index(), + settings_uniforms_offset.index(), + ], + ); + + dispatch_2d(&mut luts_pass, settings.transmittance_lut_size); + + // Multiscattering LUT + + luts_pass.set_pipeline(multiscattering_lut_pipeline); + luts_pass.set_bind_group( + 0, + &bind_groups.multiscattering_lut, + &[ + atmosphere_uniforms_offset.index(), + settings_uniforms_offset.index(), + ], + ); + + luts_pass.dispatch_workgroups( + settings.multiscattering_lut_size.x, + settings.multiscattering_lut_size.y, + 1, + ); + + // Sky View LUT + + luts_pass.set_pipeline(sky_view_lut_pipeline); + luts_pass.set_bind_group( + 0, + &bind_groups.sky_view_lut, + &[ + atmosphere_uniforms_offset.index(), + settings_uniforms_offset.index(), + atmosphere_transforms_offset.index(), + view_uniforms_offset.offset, + lights_uniforms_offset.offset, + ], + ); + + dispatch_2d(&mut luts_pass, settings.sky_view_lut_size); + + // Aerial View LUT + + luts_pass.set_pipeline(aerial_view_lut_pipeline); + luts_pass.set_bind_group( + 0, + &bind_groups.aerial_view_lut, + &[ + atmosphere_uniforms_offset.index(), + settings_uniforms_offset.index(), + view_uniforms_offset.offset, + lights_uniforms_offset.offset, + ], + ); + + dispatch_2d(&mut luts_pass, settings.aerial_view_lut_size.xy()); + + Ok(()) + } +} + +#[derive(Default)] +pub(super) struct RenderSkyNode; + +impl ViewNode for RenderSkyNode { + type ViewQuery = ( + Read, + Read, + Read>, + Read>, + Read, + Read, + Read, + Read, + ); + + fn run<'w>( + &self, + _graph: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + ( + atmosphere_bind_groups, + view_target, + atmosphere_uniforms_offset, + settings_uniforms_offset, + atmosphere_transforms_offset, + view_uniforms_offset, + lights_uniforms_offset, + render_sky_pipeline_id, + ): QueryItem<'w, Self::ViewQuery>, + world: &'w World, + ) -> Result<(), NodeRunError> { + let pipeline_cache = world.resource::(); + let Some(render_sky_pipeline) = + pipeline_cache.get_render_pipeline(render_sky_pipeline_id.0) + else { + return Ok(()); + }; //TODO: warning + + let mut render_sky_pass = + render_context + .command_encoder() + .begin_render_pass(&RenderPassDescriptor { + label: Some("render_sky_pass"), + color_attachments: &[Some(view_target.get_color_attachment())], + depth_stencil_attachment: None, + timestamp_writes: None, + occlusion_query_set: None, + }); + + render_sky_pass.set_pipeline(render_sky_pipeline); + render_sky_pass.set_bind_group( + 0, + &atmosphere_bind_groups.render_sky, + &[ + atmosphere_uniforms_offset.index(), + settings_uniforms_offset.index(), + atmosphere_transforms_offset.index(), + view_uniforms_offset.offset, + lights_uniforms_offset.offset, + ], + ); + render_sky_pass.draw(0..3, 0..1); + + Ok(()) + } +} diff --git a/crates/bevy_pbr/src/atmosphere/render_sky.wgsl b/crates/bevy_pbr/src/atmosphere/render_sky.wgsl new file mode 100644 index 0000000000000..e488656df4cd6 --- /dev/null +++ b/crates/bevy_pbr/src/atmosphere/render_sky.wgsl @@ -0,0 +1,58 @@ +#import bevy_pbr::atmosphere::{ + types::{Atmosphere, AtmosphereSettings}, + bindings::{atmosphere, view, atmosphere_transforms}, + functions::{ + sample_transmittance_lut, sample_transmittance_lut_segment, + sample_sky_view_lut, direction_world_to_atmosphere, + uv_to_ray_direction, uv_to_ndc, sample_aerial_view_lut, + view_radius, sample_sun_radiance, ndc_to_camera_dist + }, +}; +#import bevy_render::view::View; + +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput + +#ifdef MULTISAMPLED +@group(0) @binding(13) var depth_texture: texture_depth_multisampled_2d; +#else +@group(0) @binding(13) var depth_texture: texture_depth_2d; +#endif + +struct RenderSkyOutput { + @location(0) inscattering: vec4, +#ifdef DUAL_SOURCE_BLENDING + @location(0) @second_blend_source transmittance: vec4, +#endif +} + +@fragment +fn main(in: FullscreenVertexOutput) -> RenderSkyOutput { + let depth = textureLoad(depth_texture, vec2(in.position.xy), 0); + + let ray_dir_ws = uv_to_ray_direction(in.uv); + let r = view_radius(); + let mu = ray_dir_ws.y; + + var transmittance: vec3; + var inscattering: vec3; + + let sun_radiance = sample_sun_radiance(ray_dir_ws.xyz); + + if depth == 0.0 { + let ray_dir_as = direction_world_to_atmosphere(ray_dir_ws.xyz); + transmittance = sample_transmittance_lut(r, mu); + inscattering += sample_sky_view_lut(r, ray_dir_as); + inscattering += sun_radiance * transmittance * view.exposure; + } else { + let t = ndc_to_camera_dist(vec3(uv_to_ndc(in.uv), depth)); + inscattering = sample_aerial_view_lut(in.uv, t); + transmittance = sample_transmittance_lut_segment(r, mu, t); + } +#ifdef DUAL_SOURCE_BLENDING + return RenderSkyOutput(vec4(inscattering, 0.0), vec4(transmittance, 1.0)); +#else + let mean_transmittance = (transmittance.r + transmittance.g + transmittance.b) / 3.0; + return RenderSkyOutput(vec4(inscattering, mean_transmittance)); +#endif + +} diff --git a/crates/bevy_pbr/src/atmosphere/resources.rs b/crates/bevy_pbr/src/atmosphere/resources.rs new file mode 100644 index 0000000000000..b872916619830 --- /dev/null +++ b/crates/bevy_pbr/src/atmosphere/resources.rs @@ -0,0 +1,732 @@ +use bevy_core_pipeline::{ + core_3d::Camera3d, fullscreen_vertex_shader::fullscreen_shader_vertex_state, +}; +use bevy_ecs::{ + component::Component, + entity::Entity, + query::With, + resource::Resource, + system::{Commands, Query, Res, ResMut}, + world::{FromWorld, World}, +}; +use bevy_math::{Mat4, Vec3}; +use bevy_render::{ + camera::Camera, + extract_component::ComponentUniforms, + render_resource::{binding_types::*, *}, + renderer::{RenderDevice, RenderQueue}, + texture::{CachedTexture, TextureCache}, + view::{ExtractedView, Msaa, ViewDepthTexture, ViewUniform, ViewUniforms}, +}; + +use crate::{GpuLights, LightMeta}; + +use super::{shaders, Atmosphere, AtmosphereSettings}; + +#[derive(Resource)] +pub(crate) struct AtmosphereBindGroupLayouts { + pub transmittance_lut: BindGroupLayout, + pub multiscattering_lut: BindGroupLayout, + pub sky_view_lut: BindGroupLayout, + pub aerial_view_lut: BindGroupLayout, +} + +#[derive(Resource)] +pub(crate) struct RenderSkyBindGroupLayouts { + pub render_sky: BindGroupLayout, + pub render_sky_msaa: BindGroupLayout, +} + +impl FromWorld for AtmosphereBindGroupLayouts { + fn from_world(world: &mut World) -> Self { + let render_device = world.resource::(); + let transmittance_lut = render_device.create_bind_group_layout( + "transmittance_lut_bind_group_layout", + &BindGroupLayoutEntries::with_indices( + ShaderStages::COMPUTE, + ( + (0, uniform_buffer::(true)), + (1, uniform_buffer::(true)), + ( + // transmittance lut storage texture + 13, + texture_storage_2d( + TextureFormat::Rgba16Float, + StorageTextureAccess::WriteOnly, + ), + ), + ), + ), + ); + + let multiscattering_lut = render_device.create_bind_group_layout( + "multiscattering_lut_bind_group_layout", + &BindGroupLayoutEntries::with_indices( + ShaderStages::COMPUTE, + ( + (0, uniform_buffer::(true)), + (1, uniform_buffer::(true)), + (5, texture_2d(TextureSampleType::Float { filterable: true })), //transmittance lut and sampler + (6, sampler(SamplerBindingType::Filtering)), + ( + //multiscattering lut storage texture + 13, + texture_storage_2d( + TextureFormat::Rgba16Float, + StorageTextureAccess::WriteOnly, + ), + ), + ), + ), + ); + + let sky_view_lut = render_device.create_bind_group_layout( + "sky_view_lut_bind_group_layout", + &BindGroupLayoutEntries::with_indices( + ShaderStages::COMPUTE, + ( + (0, uniform_buffer::(true)), + (1, uniform_buffer::(true)), + (2, uniform_buffer::(true)), + (3, uniform_buffer::(true)), + (4, uniform_buffer::(true)), + (5, texture_2d(TextureSampleType::Float { filterable: true })), //transmittance lut and sampler + (6, sampler(SamplerBindingType::Filtering)), + (7, texture_2d(TextureSampleType::Float { filterable: true })), //multiscattering lut and sampler + (8, sampler(SamplerBindingType::Filtering)), + ( + 13, + texture_storage_2d( + TextureFormat::Rgba16Float, + StorageTextureAccess::WriteOnly, + ), + ), + ), + ), + ); + + let aerial_view_lut = render_device.create_bind_group_layout( + "aerial_view_lut_bind_group_layout", + &BindGroupLayoutEntries::with_indices( + ShaderStages::COMPUTE, + ( + (0, uniform_buffer::(true)), + (1, uniform_buffer::(true)), + (3, uniform_buffer::(true)), + (4, uniform_buffer::(true)), + (5, texture_2d(TextureSampleType::Float { filterable: true })), //transmittance lut and sampler + (6, sampler(SamplerBindingType::Filtering)), + (7, texture_2d(TextureSampleType::Float { filterable: true })), //multiscattering lut and sampler + (8, sampler(SamplerBindingType::Filtering)), + ( + //Aerial view lut storage texture + 13, + texture_storage_3d( + TextureFormat::Rgba16Float, + StorageTextureAccess::WriteOnly, + ), + ), + ), + ), + ); + + Self { + transmittance_lut, + multiscattering_lut, + sky_view_lut, + aerial_view_lut, + } + } +} + +impl FromWorld for RenderSkyBindGroupLayouts { + fn from_world(world: &mut World) -> Self { + let render_device = world.resource::(); + let render_sky = render_device.create_bind_group_layout( + "render_sky_bind_group_layout", + &BindGroupLayoutEntries::with_indices( + ShaderStages::FRAGMENT, + ( + (0, uniform_buffer::(true)), + (1, uniform_buffer::(true)), + (2, uniform_buffer::(true)), + (3, uniform_buffer::(true)), + (4, uniform_buffer::(true)), + (5, texture_2d(TextureSampleType::Float { filterable: true })), //transmittance lut and sampler + (6, sampler(SamplerBindingType::Filtering)), + (9, texture_2d(TextureSampleType::Float { filterable: true })), //sky view lut and sampler + (10, sampler(SamplerBindingType::Filtering)), + ( + // aerial view lut and sampler + 11, + texture_3d(TextureSampleType::Float { filterable: true }), + ), + (12, sampler(SamplerBindingType::Filtering)), + ( + //view depth texture + 13, + texture_2d(TextureSampleType::Depth), + ), + ), + ), + ); + + let render_sky_msaa = render_device.create_bind_group_layout( + "render_sky_msaa_bind_group_layout", + &BindGroupLayoutEntries::with_indices( + ShaderStages::FRAGMENT, + ( + (0, uniform_buffer::(true)), + (1, uniform_buffer::(true)), + (2, uniform_buffer::(true)), + (3, uniform_buffer::(true)), + (4, uniform_buffer::(true)), + (5, texture_2d(TextureSampleType::Float { filterable: true })), //transmittance lut and sampler + (6, sampler(SamplerBindingType::Filtering)), + (9, texture_2d(TextureSampleType::Float { filterable: true })), //sky view lut and sampler + (10, sampler(SamplerBindingType::Filtering)), + ( + // aerial view lut and sampler + 11, + texture_3d(TextureSampleType::Float { filterable: true }), + ), + (12, sampler(SamplerBindingType::Filtering)), + ( + //view depth texture + 13, + texture_2d_multisampled(TextureSampleType::Depth), + ), + ), + ), + ); + + Self { + render_sky, + render_sky_msaa, + } + } +} + +#[derive(Resource)] +pub struct AtmosphereSamplers { + pub transmittance_lut: Sampler, + pub multiscattering_lut: Sampler, + pub sky_view_lut: Sampler, + pub aerial_view_lut: Sampler, +} + +impl FromWorld for AtmosphereSamplers { + fn from_world(world: &mut World) -> Self { + let render_device = world.resource::(); + + let base_sampler = SamplerDescriptor { + mag_filter: FilterMode::Linear, + min_filter: FilterMode::Linear, + mipmap_filter: FilterMode::Nearest, + ..Default::default() + }; + + let transmittance_lut = render_device.create_sampler(&SamplerDescriptor { + label: Some("transmittance_lut_sampler"), + ..base_sampler + }); + + let multiscattering_lut = render_device.create_sampler(&SamplerDescriptor { + label: Some("multiscattering_lut_sampler"), + ..base_sampler + }); + + let sky_view_lut = render_device.create_sampler(&SamplerDescriptor { + label: Some("sky_view_lut_sampler"), + address_mode_u: AddressMode::Repeat, + ..base_sampler + }); + + let aerial_view_lut = render_device.create_sampler(&SamplerDescriptor { + label: Some("aerial_view_lut_sampler"), + ..base_sampler + }); + + Self { + transmittance_lut, + multiscattering_lut, + sky_view_lut, + aerial_view_lut, + } + } +} + +#[derive(Resource)] +pub(crate) struct AtmosphereLutPipelines { + pub transmittance_lut: CachedComputePipelineId, + pub multiscattering_lut: CachedComputePipelineId, + pub sky_view_lut: CachedComputePipelineId, + pub aerial_view_lut: CachedComputePipelineId, +} + +impl FromWorld for AtmosphereLutPipelines { + fn from_world(world: &mut World) -> Self { + let pipeline_cache = world.resource::(); + let layouts = world.resource::(); + + let transmittance_lut = pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor { + label: Some("transmittance_lut_pipeline".into()), + layout: vec![layouts.transmittance_lut.clone()], + push_constant_ranges: vec![], + shader: shaders::TRANSMITTANCE_LUT, + shader_defs: vec![], + entry_point: "main".into(), + zero_initialize_workgroup_memory: false, + }); + + let multiscattering_lut = + pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor { + label: Some("multi_scattering_lut_pipeline".into()), + layout: vec![layouts.multiscattering_lut.clone()], + push_constant_ranges: vec![], + shader: shaders::MULTISCATTERING_LUT, + shader_defs: vec![], + entry_point: "main".into(), + zero_initialize_workgroup_memory: false, + }); + + let sky_view_lut = pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor { + label: Some("sky_view_lut_pipeline".into()), + layout: vec![layouts.sky_view_lut.clone()], + push_constant_ranges: vec![], + shader: shaders::SKY_VIEW_LUT, + shader_defs: vec![], + entry_point: "main".into(), + zero_initialize_workgroup_memory: false, + }); + + let aerial_view_lut = pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor { + label: Some("aerial_view_lut_pipeline".into()), + layout: vec![layouts.aerial_view_lut.clone()], + push_constant_ranges: vec![], + shader: shaders::AERIAL_VIEW_LUT, + shader_defs: vec![], + entry_point: "main".into(), + zero_initialize_workgroup_memory: false, + }); + + Self { + transmittance_lut, + multiscattering_lut, + sky_view_lut, + aerial_view_lut, + } + } +} + +#[derive(Component)] +pub(crate) struct RenderSkyPipelineId(pub CachedRenderPipelineId); + +#[derive(Copy, Clone, Hash, PartialEq, Eq)] +pub(crate) struct RenderSkyPipelineKey { + pub msaa_samples: u32, + pub hdr: bool, + pub dual_source_blending: bool, +} + +impl SpecializedRenderPipeline for RenderSkyBindGroupLayouts { + type Key = RenderSkyPipelineKey; + + fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor { + let mut shader_defs = Vec::new(); + + if key.msaa_samples > 1 { + shader_defs.push("MULTISAMPLED".into()); + } + if key.hdr { + shader_defs.push("TONEMAP_IN_SHADER".into()); + } + if key.dual_source_blending { + shader_defs.push("DUAL_SOURCE_BLENDING".into()); + } + + let dst_factor = if key.dual_source_blending { + BlendFactor::Src1 + } else { + BlendFactor::SrcAlpha + }; + + RenderPipelineDescriptor { + label: Some(format!("render_sky_pipeline_{}", key.msaa_samples).into()), + layout: vec![if key.msaa_samples == 1 { + self.render_sky.clone() + } else { + self.render_sky_msaa.clone() + }], + push_constant_ranges: vec![], + vertex: fullscreen_shader_vertex_state(), + primitive: PrimitiveState::default(), + depth_stencil: None, + multisample: MultisampleState { + count: key.msaa_samples, + mask: !0, + alpha_to_coverage_enabled: false, + }, + zero_initialize_workgroup_memory: false, + fragment: Some(FragmentState { + shader: shaders::RENDER_SKY.clone(), + shader_defs, + entry_point: "main".into(), + targets: vec![Some(ColorTargetState { + format: TextureFormat::Rgba16Float, + blend: Some(BlendState { + color: BlendComponent { + src_factor: BlendFactor::One, + dst_factor, + operation: BlendOperation::Add, + }, + alpha: BlendComponent { + src_factor: BlendFactor::Zero, + dst_factor: BlendFactor::One, + operation: BlendOperation::Add, + }, + }), + write_mask: ColorWrites::ALL, + })], + }), + } + } +} + +pub(super) fn queue_render_sky_pipelines( + views: Query<(Entity, &Camera, &Msaa), With>, + pipeline_cache: Res, + layouts: Res, + mut specializer: ResMut>, + render_device: Res, + mut commands: Commands, +) { + for (entity, camera, msaa) in &views { + let id = specializer.specialize( + &pipeline_cache, + &layouts, + RenderSkyPipelineKey { + msaa_samples: msaa.samples(), + hdr: camera.hdr, + dual_source_blending: render_device + .features() + .contains(WgpuFeatures::DUAL_SOURCE_BLENDING), + }, + ); + commands.entity(entity).insert(RenderSkyPipelineId(id)); + } +} + +#[derive(Component)] +pub struct AtmosphereTextures { + pub transmittance_lut: CachedTexture, + pub multiscattering_lut: CachedTexture, + pub sky_view_lut: CachedTexture, + pub aerial_view_lut: CachedTexture, +} + +pub(super) fn prepare_atmosphere_textures( + views: Query<(Entity, &AtmosphereSettings), With>, + render_device: Res, + mut texture_cache: ResMut, + mut commands: Commands, +) { + for (entity, lut_settings) in &views { + let transmittance_lut = texture_cache.get( + &render_device, + TextureDescriptor { + label: Some("transmittance_lut"), + size: Extent3d { + width: lut_settings.transmittance_lut_size.x, + height: lut_settings.transmittance_lut_size.y, + depth_or_array_layers: 1, + }, + mip_level_count: 1, + sample_count: 1, + dimension: TextureDimension::D2, + format: TextureFormat::Rgba16Float, + usage: TextureUsages::STORAGE_BINDING | TextureUsages::TEXTURE_BINDING, + view_formats: &[], + }, + ); + + let multiscattering_lut = texture_cache.get( + &render_device, + TextureDescriptor { + label: Some("multiscattering_lut"), + size: Extent3d { + width: lut_settings.multiscattering_lut_size.x, + height: lut_settings.multiscattering_lut_size.y, + depth_or_array_layers: 1, + }, + mip_level_count: 1, + sample_count: 1, + dimension: TextureDimension::D2, + format: TextureFormat::Rgba16Float, + usage: TextureUsages::STORAGE_BINDING | TextureUsages::TEXTURE_BINDING, + view_formats: &[], + }, + ); + + let sky_view_lut = texture_cache.get( + &render_device, + TextureDescriptor { + label: Some("sky_view_lut"), + size: Extent3d { + width: lut_settings.sky_view_lut_size.x, + height: lut_settings.sky_view_lut_size.y, + depth_or_array_layers: 1, + }, + mip_level_count: 1, + sample_count: 1, + dimension: TextureDimension::D2, + format: TextureFormat::Rgba16Float, + usage: TextureUsages::STORAGE_BINDING | TextureUsages::TEXTURE_BINDING, + view_formats: &[], + }, + ); + + let aerial_view_lut = texture_cache.get( + &render_device, + TextureDescriptor { + label: Some("aerial_view_lut"), + size: Extent3d { + width: lut_settings.aerial_view_lut_size.x, + height: lut_settings.aerial_view_lut_size.y, + depth_or_array_layers: lut_settings.aerial_view_lut_size.z, + }, + mip_level_count: 1, + sample_count: 1, + dimension: TextureDimension::D3, + format: TextureFormat::Rgba16Float, + usage: TextureUsages::STORAGE_BINDING | TextureUsages::TEXTURE_BINDING, + view_formats: &[], + }, + ); + + commands.entity(entity).insert({ + AtmosphereTextures { + transmittance_lut, + multiscattering_lut, + sky_view_lut, + aerial_view_lut, + } + }); + } +} + +#[derive(Resource, Default)] +pub struct AtmosphereTransforms { + uniforms: DynamicUniformBuffer, +} + +impl AtmosphereTransforms { + #[inline] + pub fn uniforms(&self) -> &DynamicUniformBuffer { + &self.uniforms + } +} + +#[derive(ShaderType)] +pub struct AtmosphereTransform { + world_from_atmosphere: Mat4, + atmosphere_from_world: Mat4, +} + +#[derive(Component)] +pub struct AtmosphereTransformsOffset { + index: u32, +} + +impl AtmosphereTransformsOffset { + #[inline] + pub fn index(&self) -> u32 { + self.index + } +} + +pub(super) fn prepare_atmosphere_transforms( + views: Query<(Entity, &ExtractedView), (With, With)>, + render_device: Res, + render_queue: Res, + mut atmo_uniforms: ResMut, + mut commands: Commands, +) { + let atmo_count = views.iter().len(); + let Some(mut writer) = + atmo_uniforms + .uniforms + .get_writer(atmo_count, &render_device, &render_queue) + else { + return; + }; + + for (entity, view) in &views { + let world_from_view = view.world_from_view.compute_matrix(); + let camera_z = world_from_view.z_axis.truncate(); + let camera_y = world_from_view.y_axis.truncate(); + let atmo_z = camera_z + .with_y(0.0) + .try_normalize() + .unwrap_or_else(|| camera_y.with_y(0.0).normalize()); + let atmo_y = Vec3::Y; + let atmo_x = atmo_y.cross(atmo_z).normalize(); + let world_from_atmosphere = Mat4::from_cols( + atmo_x.extend(0.0), + atmo_y.extend(0.0), + atmo_z.extend(0.0), + world_from_view.w_axis, + ); + + let atmosphere_from_world = world_from_atmosphere.inverse(); + + commands.entity(entity).insert(AtmosphereTransformsOffset { + index: writer.write(&AtmosphereTransform { + world_from_atmosphere, + atmosphere_from_world, + }), + }); + } +} + +#[derive(Component)] +pub(crate) struct AtmosphereBindGroups { + pub transmittance_lut: BindGroup, + pub multiscattering_lut: BindGroup, + pub sky_view_lut: BindGroup, + pub aerial_view_lut: BindGroup, + pub render_sky: BindGroup, +} + +pub(super) fn prepare_atmosphere_bind_groups( + views: Query< + (Entity, &AtmosphereTextures, &ViewDepthTexture, &Msaa), + (With, With), + >, + render_device: Res, + layouts: Res, + render_sky_layouts: Res, + samplers: Res, + view_uniforms: Res, + lights_uniforms: Res, + atmosphere_transforms: Res, + atmosphere_uniforms: Res>, + settings_uniforms: Res>, + + mut commands: Commands, +) { + if views.iter().len() == 0 { + return; + } + + let atmosphere_binding = atmosphere_uniforms + .binding() + .expect("Failed to prepare atmosphere bind groups. Atmosphere uniform buffer missing"); + + let transforms_binding = atmosphere_transforms + .uniforms() + .binding() + .expect("Failed to prepare atmosphere bind groups. Atmosphere transforms buffer missing"); + + let settings_binding = settings_uniforms.binding().expect( + "Failed to prepare atmosphere bind groups. AtmosphereSettings uniform buffer missing", + ); + + let view_binding = view_uniforms + .uniforms + .binding() + .expect("Failed to prepare atmosphere bind groups. View uniform buffer missing"); + + let lights_binding = lights_uniforms + .view_gpu_lights + .binding() + .expect("Failed to prepare atmosphere bind groups. Lights uniform buffer missing"); + + for (entity, textures, view_depth_texture, msaa) in &views { + let transmittance_lut = render_device.create_bind_group( + "transmittance_lut_bind_group", + &layouts.transmittance_lut, + &BindGroupEntries::with_indices(( + (0, atmosphere_binding.clone()), + (1, settings_binding.clone()), + (13, &textures.transmittance_lut.default_view), + )), + ); + + let multiscattering_lut = render_device.create_bind_group( + "multiscattering_lut_bind_group", + &layouts.multiscattering_lut, + &BindGroupEntries::with_indices(( + (0, atmosphere_binding.clone()), + (1, settings_binding.clone()), + (5, &textures.transmittance_lut.default_view), + (6, &samplers.transmittance_lut), + (13, &textures.multiscattering_lut.default_view), + )), + ); + + let sky_view_lut = render_device.create_bind_group( + "sky_view_lut_bind_group", + &layouts.sky_view_lut, + &BindGroupEntries::with_indices(( + (0, atmosphere_binding.clone()), + (1, settings_binding.clone()), + (2, transforms_binding.clone()), + (3, view_binding.clone()), + (4, lights_binding.clone()), + (5, &textures.transmittance_lut.default_view), + (6, &samplers.transmittance_lut), + (7, &textures.multiscattering_lut.default_view), + (8, &samplers.multiscattering_lut), + (13, &textures.sky_view_lut.default_view), + )), + ); + + let aerial_view_lut = render_device.create_bind_group( + "sky_view_lut_bind_group", + &layouts.aerial_view_lut, + &BindGroupEntries::with_indices(( + (0, atmosphere_binding.clone()), + (1, settings_binding.clone()), + (3, view_binding.clone()), + (4, lights_binding.clone()), + (5, &textures.transmittance_lut.default_view), + (6, &samplers.transmittance_lut), + (7, &textures.multiscattering_lut.default_view), + (8, &samplers.multiscattering_lut), + (13, &textures.aerial_view_lut.default_view), + )), + ); + + let render_sky = render_device.create_bind_group( + "render_sky_bind_group", + if *msaa == Msaa::Off { + &render_sky_layouts.render_sky + } else { + &render_sky_layouts.render_sky_msaa + }, + &BindGroupEntries::with_indices(( + (0, atmosphere_binding.clone()), + (1, settings_binding.clone()), + (2, transforms_binding.clone()), + (3, view_binding.clone()), + (4, lights_binding.clone()), + (5, &textures.transmittance_lut.default_view), + (6, &samplers.transmittance_lut), + (9, &textures.sky_view_lut.default_view), + (10, &samplers.sky_view_lut), + (11, &textures.aerial_view_lut.default_view), + (12, &samplers.aerial_view_lut), + (13, view_depth_texture.view()), + )), + ); + + commands.entity(entity).insert(AtmosphereBindGroups { + transmittance_lut, + multiscattering_lut, + sky_view_lut, + aerial_view_lut, + render_sky, + }); + } +} diff --git a/crates/bevy_pbr/src/atmosphere/sky_view_lut.wgsl b/crates/bevy_pbr/src/atmosphere/sky_view_lut.wgsl new file mode 100644 index 0000000000000..cf3d95b173c42 --- /dev/null +++ b/crates/bevy_pbr/src/atmosphere/sky_view_lut.wgsl @@ -0,0 +1,72 @@ +#import bevy_pbr::{ + mesh_view_types::Lights, + atmosphere::{ + types::{Atmosphere, AtmosphereSettings}, + bindings::{atmosphere, view, settings}, + functions::{ + sample_atmosphere, get_local_up, AtmosphereSample, + sample_local_inscattering, get_local_r, view_radius, + max_atmosphere_distance, direction_atmosphere_to_world, + sky_view_lut_uv_to_zenith_azimuth, zenith_azimuth_to_ray_dir, + MIDPOINT_RATIO + }, + } +} + +#import bevy_render::{ + view::View, + maths::HALF_PI, +} +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput + +@group(0) @binding(13) var sky_view_lut_out: texture_storage_2d; + +@compute +@workgroup_size(16, 16, 1) +fn main(@builtin(global_invocation_id) idx: vec3) { + let uv = vec2(idx.xy) / vec2(settings.sky_view_lut_size); + + let r = view_radius(); + var zenith_azimuth = sky_view_lut_uv_to_zenith_azimuth(r, uv); + + let ray_dir_as = zenith_azimuth_to_ray_dir(zenith_azimuth.x, zenith_azimuth.y); + let ray_dir_ws = direction_atmosphere_to_world(ray_dir_as); + + let mu = ray_dir_ws.y; + let t_max = max_atmosphere_distance(r, mu); + + let sample_count = mix(1.0, f32(settings.sky_view_lut_samples), clamp(t_max * 0.01, 0.0, 1.0)); + var total_inscattering = vec3(0.0); + var throughput = vec3(1.0); + var prev_t = 0.0; + for (var s = 0.0; s < sample_count; s += 1.0) { + let t_i = t_max * (s + MIDPOINT_RATIO) / sample_count; + let dt_i = (t_i - prev_t); + prev_t = t_i; + + let local_r = get_local_r(r, mu, t_i); + let local_up = get_local_up(r, t_i, ray_dir_ws); + let local_atmosphere = sample_atmosphere(local_r); + + let sample_optical_depth = local_atmosphere.extinction * dt_i; + let sample_transmittance = exp(-sample_optical_depth); + + let inscattering = sample_local_inscattering( + local_atmosphere, + ray_dir_ws, + local_r, + local_up + ); + + // Analytical integration of the single scattering term in the radiance transfer equation + let s_int = (inscattering - inscattering * sample_transmittance) / local_atmosphere.extinction; + total_inscattering += throughput * s_int; + + throughput *= sample_transmittance; + if all(throughput < vec3(0.001)) { + break; + } + } + + textureStore(sky_view_lut_out, idx.xy, vec4(total_inscattering, 1.0)); +} diff --git a/crates/bevy_pbr/src/atmosphere/transmittance_lut.wgsl b/crates/bevy_pbr/src/atmosphere/transmittance_lut.wgsl new file mode 100644 index 0000000000000..233391e1c83f8 --- /dev/null +++ b/crates/bevy_pbr/src/atmosphere/transmittance_lut.wgsl @@ -0,0 +1,48 @@ +#import bevy_pbr::atmosphere::{ + types::{Atmosphere, AtmosphereSettings}, + bindings::{settings, atmosphere}, + functions::{AtmosphereSample, sample_atmosphere, get_local_r, max_atmosphere_distance, MIDPOINT_RATIO}, + bruneton_functions::{transmittance_lut_uv_to_r_mu, distance_to_bottom_atmosphere_boundary, distance_to_top_atmosphere_boundary}, +} + + +#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput + +@group(0) @binding(13) var transmittance_lut_out: texture_storage_2d; + +@compute +@workgroup_size(16, 16, 1) +fn main(@builtin(global_invocation_id) idx: vec3) { + let uv: vec2 = (vec2(idx.xy) + 0.5) / vec2(settings.transmittance_lut_size); + // map UV coordinates to view height (r) and zenith cos angle (mu) + let r_mu = transmittance_lut_uv_to_r_mu(uv); + + // compute the optical depth from view height r to the top atmosphere boundary + let optical_depth = ray_optical_depth(r_mu.x, r_mu.y, settings.transmittance_lut_samples); + let transmittance = exp(-optical_depth); + + textureStore(transmittance_lut_out, idx.xy, vec4(transmittance, 1.0)); +} + +/// Compute the optical depth of the atmosphere from the ground to the top atmosphere boundary +/// at a given view height (r) and zenith cos angle (mu) +fn ray_optical_depth(r: f32, mu: f32, sample_count: u32) -> vec3 { + let t_max = max_atmosphere_distance(r, mu); + var optical_depth = vec3(0.0f); + var prev_t = 0.0f; + + for (var i = 0u; i < sample_count; i++) { + let t_i = t_max * (f32(i) + MIDPOINT_RATIO) / f32(sample_count); + let dt = t_i - prev_t; + prev_t = t_i; + + let r_i = get_local_r(r, mu, t_i); + + let atmosphere_sample = sample_atmosphere(r_i); + let sample_optical_depth = atmosphere_sample.extinction * dt; + + optical_depth += sample_optical_depth; + } + + return optical_depth; +} diff --git a/crates/bevy_pbr/src/atmosphere/types.wgsl b/crates/bevy_pbr/src/atmosphere/types.wgsl new file mode 100644 index 0000000000000..78e9e9a717192 --- /dev/null +++ b/crates/bevy_pbr/src/atmosphere/types.wgsl @@ -0,0 +1,45 @@ +#define_import_path bevy_pbr::atmosphere::types + +struct Atmosphere { + // Radius of the planet + bottom_radius: f32, // units: m + + // Radius at which we consider the atmosphere to 'end' for out calculations (from center of planet) + top_radius: f32, // units: m + + ground_albedo: vec3, + + rayleigh_density_exp_scale: f32, + rayleigh_scattering: vec3, + + mie_density_exp_scale: f32, + mie_scattering: f32, // units: m^-1 + mie_absorption: f32, // units: m^-1 + mie_asymmetry: f32, // the "asymmetry" value of the phase function, unitless. Domain: (-1, 1) + + ozone_layer_altitude: f32, // units: m + ozone_layer_width: f32, // units: m + ozone_absorption: vec3, // ozone absorption. units: m^-1 +} + +struct AtmosphereSettings { + transmittance_lut_size: vec2, + multiscattering_lut_size: vec2, + sky_view_lut_size: vec2, + aerial_view_lut_size: vec3, + transmittance_lut_samples: u32, + multiscattering_lut_dirs: u32, + multiscattering_lut_samples: u32, + sky_view_lut_samples: u32, + aerial_view_lut_samples: u32, + aerial_view_lut_max_distance: f32, + scene_units_to_m: f32, +} + + +// "Atmosphere space" is just the view position with y=0 and oriented horizontally, +// so the horizon stays a horizontal line in our luts +struct AtmosphereTransforms { + world_from_atmosphere: mat4x4, + atmosphere_from_world: mat4x4, +} diff --git a/crates/bevy_pbr/src/bundle.rs b/crates/bevy_pbr/src/bundle.rs deleted file mode 100644 index bdfdd695f5904..0000000000000 --- a/crates/bevy_pbr/src/bundle.rs +++ /dev/null @@ -1,214 +0,0 @@ -#![expect(deprecated)] - -use crate::{ - CascadeShadowConfig, Cascades, DirectionalLight, Material, MeshMaterial3d, PointLight, - SpotLight, StandardMaterial, -}; -use bevy_derive::{Deref, DerefMut}; -use bevy_ecs::{ - bundle::Bundle, - component::Component, - entity::{Entity, EntityHashMap}, - reflect::ReflectComponent, -}; -use bevy_reflect::{std_traits::ReflectDefault, Reflect}; -use bevy_render::sync_world::MainEntity; -use bevy_render::{ - mesh::Mesh3d, - primitives::{CascadesFrusta, CubemapFrusta, Frustum}, - sync_world::SyncToRenderWorld, - view::{InheritedVisibility, ViewVisibility, Visibility}, -}; -use bevy_transform::components::{GlobalTransform, Transform}; - -/// A component bundle for PBR entities with a [`Mesh3d`] and a [`MeshMaterial3d`]. -#[deprecated( - since = "0.15.0", - note = "Use the `Mesh3d` and `MeshMaterial3d` components instead. Inserting them will now also insert the other components required by them automatically." -)] -pub type PbrBundle = MaterialMeshBundle; - -/// A component bundle for entities with a [`Mesh3d`] and a [`MeshMaterial3d`]. -#[derive(Bundle, Clone)] -#[deprecated( - since = "0.15.0", - note = "Use the `Mesh3d` and `MeshMaterial3d` components instead. Inserting them will now also insert the other components required by them automatically." -)] -pub struct MaterialMeshBundle { - pub mesh: Mesh3d, - pub material: MeshMaterial3d, - pub transform: Transform, - pub global_transform: GlobalTransform, - /// User indication of whether an entity is visible - pub visibility: Visibility, - /// Inherited visibility of an entity. - pub inherited_visibility: InheritedVisibility, - /// Algorithmically-computed indication of whether an entity is visible and should be extracted for rendering - pub view_visibility: ViewVisibility, -} - -impl Default for MaterialMeshBundle { - fn default() -> Self { - Self { - mesh: Default::default(), - material: Default::default(), - transform: Default::default(), - global_transform: Default::default(), - visibility: Default::default(), - inherited_visibility: Default::default(), - view_visibility: Default::default(), - } - } -} - -/// Collection of mesh entities visible for 3D lighting. -/// -/// This component contains all mesh entities visible from the current light view. -/// The collection is updated automatically by [`crate::SimulationLightSystems`]. -#[derive(Component, Clone, Debug, Default, Reflect, Deref, DerefMut)] -#[reflect(Component, Debug, Default)] -pub struct VisibleMeshEntities { - #[reflect(ignore)] - pub entities: Vec, -} - -#[derive(Component, Clone, Debug, Default, Reflect, Deref, DerefMut)] -#[reflect(Component, Debug, Default)] -pub struct RenderVisibleMeshEntities { - #[reflect(ignore)] - pub entities: Vec<(Entity, MainEntity)>, -} - -#[derive(Component, Clone, Debug, Default, Reflect)] -#[reflect(Component, Debug, Default)] -pub struct CubemapVisibleEntities { - #[reflect(ignore)] - data: [VisibleMeshEntities; 6], -} - -impl CubemapVisibleEntities { - pub fn get(&self, i: usize) -> &VisibleMeshEntities { - &self.data[i] - } - - pub fn get_mut(&mut self, i: usize) -> &mut VisibleMeshEntities { - &mut self.data[i] - } - - pub fn iter(&self) -> impl DoubleEndedIterator { - self.data.iter() - } - - pub fn iter_mut(&mut self) -> impl DoubleEndedIterator { - self.data.iter_mut() - } -} - -#[derive(Component, Clone, Debug, Default, Reflect)] -#[reflect(Component, Debug, Default)] -pub struct RenderCubemapVisibleEntities { - #[reflect(ignore)] - pub(crate) data: [RenderVisibleMeshEntities; 6], -} - -impl RenderCubemapVisibleEntities { - pub fn get(&self, i: usize) -> &RenderVisibleMeshEntities { - &self.data[i] - } - - pub fn get_mut(&mut self, i: usize) -> &mut RenderVisibleMeshEntities { - &mut self.data[i] - } - - pub fn iter(&self) -> impl DoubleEndedIterator { - self.data.iter() - } - - pub fn iter_mut(&mut self) -> impl DoubleEndedIterator { - self.data.iter_mut() - } -} - -#[derive(Component, Clone, Debug, Default, Reflect)] -#[reflect(Component)] -pub struct CascadesVisibleEntities { - /// Map of view entity to the visible entities for each cascade frustum. - #[reflect(ignore)] - pub entities: EntityHashMap>, -} - -#[derive(Component, Clone, Debug, Default, Reflect)] -#[reflect(Component)] -pub struct RenderCascadesVisibleEntities { - /// Map of view entity to the visible entities for each cascade frustum. - #[reflect(ignore)] - pub entities: EntityHashMap>, -} - -/// A component bundle for [`PointLight`] entities. -#[derive(Debug, Bundle, Default, Clone)] -#[deprecated( - since = "0.15.0", - note = "Use the `PointLight` component instead. Inserting it will now also insert the other components required by it automatically." -)] -pub struct PointLightBundle { - pub point_light: PointLight, - pub cubemap_visible_entities: CubemapVisibleEntities, - pub cubemap_frusta: CubemapFrusta, - pub transform: Transform, - pub global_transform: GlobalTransform, - /// Enables or disables the light - pub visibility: Visibility, - /// Inherited visibility of an entity. - pub inherited_visibility: InheritedVisibility, - /// Algorithmically-computed indication of whether an entity is visible and should be extracted for rendering - pub view_visibility: ViewVisibility, - /// Marker component that indicates that its entity needs to be synchronized to the render world - pub sync: SyncToRenderWorld, -} - -/// A component bundle for spot light entities -#[derive(Debug, Bundle, Default, Clone)] -#[deprecated( - since = "0.15.0", - note = "Use the `SpotLight` component instead. Inserting it will now also insert the other components required by it automatically." -)] -pub struct SpotLightBundle { - pub spot_light: SpotLight, - pub visible_entities: VisibleMeshEntities, - pub frustum: Frustum, - pub transform: Transform, - pub global_transform: GlobalTransform, - /// Enables or disables the light - pub visibility: Visibility, - /// Inherited visibility of an entity. - pub inherited_visibility: InheritedVisibility, - /// Algorithmically-computed indication of whether an entity is visible and should be extracted for rendering - pub view_visibility: ViewVisibility, - /// Marker component that indicates that its entity needs to be synchronized to the render world - pub sync: SyncToRenderWorld, -} - -/// A component bundle for [`DirectionalLight`] entities. -#[derive(Debug, Bundle, Default, Clone)] -#[deprecated( - since = "0.15.0", - note = "Use the `DirectionalLight` component instead. Inserting it will now also insert the other components required by it automatically." -)] -pub struct DirectionalLightBundle { - pub directional_light: DirectionalLight, - pub frusta: CascadesFrusta, - pub cascades: Cascades, - pub cascade_shadow_config: CascadeShadowConfig, - pub visible_entities: CascadesVisibleEntities, - pub transform: Transform, - pub global_transform: GlobalTransform, - /// Enables or disables the light - pub visibility: Visibility, - /// Inherited visibility of an entity. - pub inherited_visibility: InheritedVisibility, - /// Algorithmically-computed indication of whether an entity is visible and should be extracted for rendering - pub view_visibility: ViewVisibility, - /// Marker component that indicates that its entity needs to be synchronized to the render world - pub sync: SyncToRenderWorld, -} diff --git a/crates/bevy_pbr/src/cluster/assign.rs b/crates/bevy_pbr/src/cluster/assign.rs index 69de548b57ae6..1b7b3563d75c8 100644 --- a/crates/bevy_pbr/src/cluster/assign.rs +++ b/crates/bevy_pbr/src/cluster/assign.rs @@ -13,21 +13,22 @@ use bevy_render::{ camera::Camera, primitives::{Aabb, Frustum, HalfSpace, Sphere}, render_resource::BufferBindingType, - renderer::RenderDevice, + renderer::{RenderAdapter, RenderDevice}, view::{RenderLayers, ViewVisibility}, }; use bevy_transform::components::GlobalTransform; -use bevy_utils::{prelude::default, tracing::warn}; +use bevy_utils::prelude::default; +use tracing::warn; use crate::{ - prelude::EnvironmentMapLight, ClusterConfig, ClusterFarZMode, Clusters, ExtractedPointLight, - GlobalVisibleClusterableObjects, LightProbe, PointLight, SpotLight, ViewClusterBindings, - VisibleClusterableObjects, VolumetricLight, CLUSTERED_FORWARD_STORAGE_BUFFER_COUNT, + decal::{self, clustered::ClusteredDecal}, + prelude::EnvironmentMapLight, + ClusterConfig, ClusterFarZMode, Clusters, ExtractedPointLight, GlobalVisibleClusterableObjects, + LightProbe, PointLight, SpotLight, ViewClusterBindings, VisibleClusterableObjects, + VolumetricLight, CLUSTERED_FORWARD_STORAGE_BUFFER_COUNT, MAX_UNIFORM_BUFFER_CLUSTERABLE_OBJECTS, }; -use super::ClusterableObjectOrderData; - const NDC_MIN: Vec2 = Vec2::NEG_ONE; const NDC_MAX: Vec2 = Vec2::ONE; @@ -38,6 +39,8 @@ const VEC2_HALF_NEGATIVE_Y: Vec2 = Vec2::new(0.5, -0.5); #[derive(Clone, Debug)] pub(crate) struct ClusterableObjectAssignmentData { entity: Entity, + // TODO: We currently ignore the scale on the transform. This is confusing. + // Replace with an `Isometry3d`. transform: GlobalTransform, range: f32, object_type: ClusterableObjectType, @@ -91,6 +94,9 @@ pub(crate) enum ClusterableObjectType { /// Marks that the clusterable object is an irradiance volume. IrradianceVolume, + + /// Marks that the clusterable object is a decal. + Decal, } impl ClusterableObjectType { @@ -114,6 +120,7 @@ impl ClusterableObjectType { } => (1, !shadows_enabled, !volumetric), ClusterableObjectType::ReflectionProbe => (2, false, false), ClusterableObjectType::IrradianceVolume => (3, false, false), + ClusterableObjectType::Decal => (4, false, false), } } @@ -136,7 +143,6 @@ impl ClusterableObjectType { } // NOTE: Run this before update_point_light_frusta! -#[allow(clippy::too_many_arguments)] pub(crate) fn assign_objects_to_clusters( mut commands: Commands, mut global_clusterable_objects: ResMut, @@ -170,12 +176,13 @@ pub(crate) fn assign_objects_to_clusters( (Entity, &GlobalTransform, Has), With, >, + decals_query: Query<(Entity, &GlobalTransform), With>, mut clusterable_objects: Local>, mut cluster_aabb_spheres: Local>>, mut max_clusterable_objects_warning_emitted: Local, - render_device: Option>, + (render_device, render_adapter): (Option>, Option>), ) { - let Some(render_device) = render_device else { + let (Some(render_device), Some(render_adapter)) = (render_device, render_adapter) else { return; }; @@ -251,19 +258,26 @@ pub(crate) fn assign_objects_to_clusters( )); } + // Add decals if the current platform supports them. + if decal::clustered::clustered_decals_are_usable(&render_device, &render_adapter) { + clusterable_objects.extend(decals_query.iter().map(|(entity, transform)| { + ClusterableObjectAssignmentData { + entity, + transform: *transform, + range: transform.scale().length(), + object_type: ClusterableObjectType::Decal, + render_layers: RenderLayers::default(), + } + })); + } + if clusterable_objects.len() > MAX_UNIFORM_BUFFER_CLUSTERABLE_OBJECTS && !supports_storage_buffers { - clusterable_objects.sort_by(|clusterable_object_1, clusterable_object_2| { - crate::clusterable_object_order( - ClusterableObjectOrderData { - entity: &clusterable_object_1.entity, - object_type: &clusterable_object_1.object_type, - }, - ClusterableObjectOrderData { - entity: &clusterable_object_2.entity, - object_type: &clusterable_object_2.object_type, - }, + clusterable_objects.sort_by_cached_key(|clusterable_object| { + ( + clusterable_object.object_type.ordering(), + clusterable_object.entity, ) }); @@ -482,7 +496,7 @@ pub(crate) fn assign_objects_to_clusters( // initialize empty cluster bounding spheres cluster_aabb_spheres.clear(); - cluster_aabb_spheres.extend(core::iter::repeat(None).take(cluster_count)); + cluster_aabb_spheres.extend(core::iter::repeat_n(None, cluster_count)); // Calculate the x/y/z cluster frustum planes in view space let mut x_planes = Vec::with_capacity(clusters.dimensions.x as usize + 1); @@ -616,6 +630,10 @@ pub(crate) fn assign_objects_to_clusters( angle_cos, )) } + ClusterableObjectType::Decal => { + // TODO: cull via a frustum + None + } ClusterableObjectType::PointLight { .. } | ClusterableObjectType::ReflectionProbe | ClusterableObjectType::IrradianceVolume => None, @@ -826,6 +844,21 @@ pub(crate) fn assign_objects_to_clusters( cluster_index += clusters.dimensions.z as usize; } } + + ClusterableObjectType::Decal => { + // Decals currently affect all clusters in their + // bounding sphere. + // + // TODO: Cull more aggressively based on the + // decal's OBB. + for _ in min_x..=max_x { + clusters.clusterable_objects[cluster_index] + .entities + .push(clusterable_object.entity); + clusters.clusterable_objects[cluster_index].counts.decals += 1; + cluster_index += clusters.dimensions.z as usize; + } + } } } } @@ -849,7 +882,6 @@ pub(crate) fn assign_objects_to_clusters( } } -#[allow(clippy::too_many_arguments)] fn compute_aabb_for_cluster( z_near: f32, z_far: f32, diff --git a/crates/bevy_pbr/src/cluster/mod.rs b/crates/bevy_pbr/src/cluster/mod.rs index 41932a2aaadc1..3113333be303f 100644 --- a/crates/bevy_pbr/src/cluster/mod.rs +++ b/crates/bevy_pbr/src/cluster/mod.rs @@ -2,17 +2,18 @@ use core::num::NonZero; -use self::assign::ClusterableObjectType; use bevy_core_pipeline::core_3d::Camera3d; use bevy_ecs::{ component::Component, entity::{Entity, EntityHashMap}, query::{With, Without}, reflect::ReflectComponent, - system::{Commands, Query, Res, Resource}, + resource::Resource, + system::{Commands, Query, Res}, world::{FromWorld, World}, }; use bevy_math::{uvec4, AspectRatio, UVec2, UVec3, UVec4, Vec3Swizzles as _, Vec4}; +use bevy_platform::collections::HashSet; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{ camera::Camera, @@ -24,7 +25,7 @@ use bevy_render::{ sync_world::RenderEntity, Extract, }; -use bevy_utils::{tracing::warn, HashSet}; +use tracing::warn; pub(crate) use crate::cluster::assign::assign_objects_to_clusters; use crate::MeshPipeline; @@ -65,6 +66,7 @@ const CLUSTER_COUNT_MASK: u32 = (1 << CLUSTER_COUNT_SIZE) - 1; /// Configure the far z-plane mode used for the furthest depth slice for clustered forward /// rendering #[derive(Debug, Copy, Clone, Reflect)] +#[reflect(Clone)] pub enum ClusterFarZMode { /// Calculate the required maximum z-depth based on currently visible /// clusterable objects. Makes better use of available clusters, speeding @@ -77,7 +79,7 @@ pub enum ClusterFarZMode { /// Configure the depth-slicing strategy for clustered forward rendering #[derive(Debug, Copy, Clone, Reflect)] -#[reflect(Default)] +#[reflect(Default, Clone)] pub struct ClusterZConfig { /// Far `Z` plane of the first depth slice pub first_slice_depth: f32, @@ -87,7 +89,7 @@ pub struct ClusterZConfig { /// Configuration of the clustering strategy for clustered forward rendering #[derive(Debug, Copy, Clone, Component, Reflect)] -#[reflect(Component, Debug, Default)] +#[reflect(Component, Debug, Default, Clone)] pub enum ClusterConfig { /// Disable cluster calculations for this view None, @@ -203,6 +205,8 @@ struct ClusterableObjectCounts { reflection_probes: u32, /// The number of irradiance volumes in the cluster. irradiance_volumes: u32, + /// The number of decals in the cluster. + decals: u32, } enum ExtractedClusterableObjectElement { @@ -516,34 +520,6 @@ impl Default for GpuClusterableObjectsUniform { } } -pub(crate) struct ClusterableObjectOrderData<'a> { - pub(crate) entity: &'a Entity, - pub(crate) object_type: &'a ClusterableObjectType, -} - -#[allow(clippy::too_many_arguments)] -// Sort clusterable objects by: -// -// * object type, so that we can iterate point lights, spot lights, etc. in -// contiguous blocks in the fragment shader, -// -// * then those with shadows enabled first, so that the index can be used to -// render at most `point_light_shadow_maps_count` point light shadows and -// `spot_light_shadow_maps_count` spot light shadow maps, -// -// * then by entity as a stable key to ensure that a consistent set of -// clusterable objects are chosen if the clusterable object count limit is -// exceeded. -pub(crate) fn clusterable_object_order( - a: ClusterableObjectOrderData, - b: ClusterableObjectOrderData, -) -> core::cmp::Ordering { - a.object_type - .ordering() - .cmp(&b.object_type.ordering()) - .then_with(|| a.entity.cmp(b.entity)) // stable -} - /// Extracts clusters from the main world from the render world. pub fn extract_clusters( mut commands: Commands, @@ -700,7 +676,7 @@ impl ViewClusterBindings { counts.spot_lights, counts.reflection_probes, ), - uvec4(counts.irradiance_volumes, 0, 0, 0), + uvec4(counts.irradiance_volumes, counts.decals, 0, 0), ]); } } @@ -853,7 +829,7 @@ impl ViewClusterBuffers { // the number of light probes is irrelevant. fn pack_offset_and_counts(offset: usize, point_count: u32, spot_count: u32) -> u32 { ((offset as u32 & CLUSTER_OFFSET_MASK) << (CLUSTER_COUNT_SIZE * 2)) - | (point_count & CLUSTER_COUNT_MASK) << CLUSTER_COUNT_SIZE + | ((point_count & CLUSTER_COUNT_MASK) << CLUSTER_COUNT_SIZE) | (spot_count & CLUSTER_COUNT_MASK) } diff --git a/crates/bevy_pbr/src/components.rs b/crates/bevy_pbr/src/components.rs new file mode 100644 index 0000000000000..fca31b3b034af --- /dev/null +++ b/crates/bevy_pbr/src/components.rs @@ -0,0 +1,89 @@ +use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::component::Component; +use bevy_ecs::entity::{Entity, EntityHashMap}; +use bevy_ecs::reflect::ReflectComponent; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; +use bevy_render::sync_world::MainEntity; +/// Collection of mesh entities visible for 3D lighting. +/// +/// This component contains all mesh entities visible from the current light view. +/// The collection is updated automatically by [`crate::SimulationLightSystems`]. +#[derive(Component, Clone, Debug, Default, Reflect, Deref, DerefMut)] +#[reflect(Component, Debug, Default, Clone)] +pub struct VisibleMeshEntities { + #[reflect(ignore, clone)] + pub entities: Vec, +} + +#[derive(Component, Clone, Debug, Default, Reflect, Deref, DerefMut)] +#[reflect(Component, Debug, Default, Clone)] +pub struct RenderVisibleMeshEntities { + #[reflect(ignore, clone)] + pub entities: Vec<(Entity, MainEntity)>, +} + +#[derive(Component, Clone, Debug, Default, Reflect)] +#[reflect(Component, Debug, Default, Clone)] +pub struct CubemapVisibleEntities { + #[reflect(ignore, clone)] + data: [VisibleMeshEntities; 6], +} + +impl CubemapVisibleEntities { + pub fn get(&self, i: usize) -> &VisibleMeshEntities { + &self.data[i] + } + + pub fn get_mut(&mut self, i: usize) -> &mut VisibleMeshEntities { + &mut self.data[i] + } + + pub fn iter(&self) -> impl DoubleEndedIterator { + self.data.iter() + } + + pub fn iter_mut(&mut self) -> impl DoubleEndedIterator { + self.data.iter_mut() + } +} + +#[derive(Component, Clone, Debug, Default, Reflect)] +#[reflect(Component, Debug, Default, Clone)] +pub struct RenderCubemapVisibleEntities { + #[reflect(ignore, clone)] + pub(crate) data: [RenderVisibleMeshEntities; 6], +} + +impl RenderCubemapVisibleEntities { + pub fn get(&self, i: usize) -> &RenderVisibleMeshEntities { + &self.data[i] + } + + pub fn get_mut(&mut self, i: usize) -> &mut RenderVisibleMeshEntities { + &mut self.data[i] + } + + pub fn iter(&self) -> impl DoubleEndedIterator { + self.data.iter() + } + + pub fn iter_mut(&mut self) -> impl DoubleEndedIterator { + self.data.iter_mut() + } +} + +#[derive(Component, Clone, Debug, Default, Reflect)] +#[reflect(Component, Default, Clone)] +pub struct CascadesVisibleEntities { + /// Map of view entity to the visible entities for each cascade frustum. + #[reflect(ignore, clone)] + pub entities: EntityHashMap>, +} + +#[derive(Component, Clone, Debug, Default, Reflect)] +#[reflect(Component, Default, Clone)] +pub struct RenderCascadesVisibleEntities { + /// Map of view entity to the visible entities for each cascade frustum. + #[reflect(ignore, clone)] + pub entities: EntityHashMap>, +} diff --git a/crates/bevy_pbr/src/decal/clustered.rs b/crates/bevy_pbr/src/decal/clustered.rs new file mode 100644 index 0000000000000..5272bce80c64d --- /dev/null +++ b/crates/bevy_pbr/src/decal/clustered.rs @@ -0,0 +1,385 @@ +//! Clustered decals, bounding regions that project textures onto surfaces. +//! +//! A *clustered decal* is a bounding box that projects a texture onto any +//! surface within its bounds along the positive Z axis. In Bevy, clustered +//! decals use the *clustered forward* rendering technique. +//! +//! Clustered decals are the highest-quality types of decals that Bevy supports, +//! but they require bindless textures. This means that they presently can't be +//! used on WebGL 2, WebGPU, macOS, or iOS. Bevy's clustered decals can be used +//! with forward or deferred rendering and don't require a prepass. +//! +//! On their own, clustered decals only project the base color of a texture. You +//! can, however, use the built-in *tag* field to customize the appearance of a +//! clustered decal arbitrarily. See the documentation in `clustered.wgsl` for +//! more information and the `clustered_decals` example for an example of use. + +use core::{num::NonZero, ops::Deref}; + +use bevy_app::{App, Plugin}; +use bevy_asset::{load_internal_asset, weak_handle, AssetId, Handle}; +use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::{ + component::Component, + entity::{Entity, EntityHashMap}, + prelude::ReflectComponent, + query::With, + resource::Resource, + schedule::IntoScheduleConfigs as _, + system::{Query, Res, ResMut}, +}; +use bevy_image::Image; +use bevy_math::Mat4; +use bevy_platform::collections::HashMap; +use bevy_reflect::Reflect; +use bevy_render::{ + extract_component::{ExtractComponent, ExtractComponentPlugin}, + render_asset::RenderAssets, + render_resource::{ + binding_types, BindGroupLayoutEntryBuilder, Buffer, BufferUsages, RawBufferVec, Sampler, + SamplerBindingType, Shader, ShaderType, TextureSampleType, TextureView, + }, + renderer::{RenderAdapter, RenderDevice, RenderQueue}, + sync_world::RenderEntity, + texture::{FallbackImage, GpuImage}, + view::{self, ViewVisibility, Visibility, VisibilityClass}, + Extract, ExtractSchedule, Render, RenderApp, RenderSet, +}; +use bevy_transform::{components::GlobalTransform, prelude::Transform}; +use bytemuck::{Pod, Zeroable}; + +use crate::{ + binding_arrays_are_usable, prepare_lights, GlobalClusterableObjectMeta, LightVisibilityClass, +}; + +/// The handle to the `clustered.wgsl` shader. +pub(crate) const CLUSTERED_DECAL_SHADER_HANDLE: Handle = + weak_handle!("87929002-3509-42f1-8279-2d2765dd145c"); + +/// The maximum number of decals that can be present in a view. +/// +/// This number is currently relatively low in order to work around the lack of +/// first-class binding arrays in `wgpu`. When that feature is implemented, this +/// limit can be increased. +pub(crate) const MAX_VIEW_DECALS: usize = 8; + +/// A plugin that adds support for clustered decals. +/// +/// In environments where bindless textures aren't available, clustered decals +/// can still be added to a scene, but they won't project any decals. +pub struct ClusteredDecalPlugin; + +/// An object that projects a decal onto surfaces within its bounds. +/// +/// Conceptually, a clustered decal is a 1×1×1 cube centered on its origin. It +/// projects the given [`Self::image`] onto surfaces in the +Z direction (thus +/// you may find [`Transform::looking_at`] useful). +/// +/// Clustered decals are the highest-quality types of decals that Bevy supports, +/// but they require bindless textures. This means that they presently can't be +/// used on WebGL 2, WebGPU, macOS, or iOS. Bevy's clustered decals can be used +/// with forward or deferred rendering and don't require a prepass. +#[derive(Component, Debug, Clone, Reflect, ExtractComponent)] +#[reflect(Component, Debug, Clone)] +#[require(Transform, Visibility, VisibilityClass)] +#[component(on_add = view::add_visibility_class::)] +pub struct ClusteredDecal { + /// The image that the clustered decal projects. + /// + /// This must be a 2D image. If it has an alpha channel, it'll be alpha + /// blended with the underlying surface and/or other decals. All decal + /// images in the scene must use the same sampler. + pub image: Handle, + + /// An application-specific tag you can use for any purpose you want. + /// + /// See the `clustered_decals` example for an example of use. + pub tag: u32, +} + +/// Stores information about all the clustered decals in the scene. +#[derive(Resource, Default)] +pub struct RenderClusteredDecals { + /// Maps an index in the shader binding array to the associated decal image. + /// + /// [`Self::texture_to_binding_index`] holds the inverse mapping. + binding_index_to_textures: Vec>, + /// Maps a decal image to the shader binding array. + /// + /// [`Self::binding_index_to_textures`] holds the inverse mapping. + texture_to_binding_index: HashMap, u32>, + /// The information concerning each decal that we provide to the shader. + decals: Vec, + /// Maps the [`bevy_render::sync_world::RenderEntity`] of each decal to the + /// index of that decal in the [`Self::decals`] list. + entity_to_decal_index: EntityHashMap, +} + +impl RenderClusteredDecals { + /// Clears out this [`RenderClusteredDecals`] in preparation for a new + /// frame. + fn clear(&mut self) { + self.binding_index_to_textures.clear(); + self.texture_to_binding_index.clear(); + self.decals.clear(); + self.entity_to_decal_index.clear(); + } +} + +/// The per-view bind group entries pertaining to decals. +pub(crate) struct RenderViewClusteredDecalBindGroupEntries<'a> { + /// The list of decals, corresponding to `mesh_view_bindings::decals` in the + /// shader. + pub(crate) decals: &'a Buffer, + /// The list of textures, corresponding to + /// `mesh_view_bindings::decal_textures` in the shader. + pub(crate) texture_views: Vec<&'a ::Target>, + /// The sampler that the shader uses to sample decals, corresponding to + /// `mesh_view_bindings::decal_sampler` in the shader. + pub(crate) sampler: &'a Sampler, +} + +/// A render-world resource that holds the buffer of [`ClusteredDecal`]s ready +/// to upload to the GPU. +#[derive(Resource, Deref, DerefMut)] +pub struct DecalsBuffer(RawBufferVec); + +impl Default for DecalsBuffer { + fn default() -> Self { + DecalsBuffer(RawBufferVec::new(BufferUsages::STORAGE)) + } +} + +impl Plugin for ClusteredDecalPlugin { + fn build(&self, app: &mut App) { + load_internal_asset!( + app, + CLUSTERED_DECAL_SHADER_HANDLE, + "clustered.wgsl", + Shader::from_wgsl + ); + + app.add_plugins(ExtractComponentPlugin::::default()) + .register_type::(); + + let Some(render_app) = app.get_sub_app_mut(RenderApp) else { + return; + }; + + render_app + .init_resource::() + .init_resource::() + .add_systems(ExtractSchedule, extract_decals) + .add_systems( + Render, + prepare_decals + .in_set(RenderSet::ManageViews) + .after(prepare_lights), + ) + .add_systems(Render, upload_decals.in_set(RenderSet::PrepareResources)); + } +} + +/// The GPU data structure that stores information about each decal. +#[derive(Clone, Copy, Default, ShaderType, Pod, Zeroable)] +#[repr(C)] +pub struct RenderClusteredDecal { + /// The inverse of the model matrix. + /// + /// The shader uses this in order to back-transform world positions into + /// model space. + local_from_world: Mat4, + /// The index of the decal texture in the binding array. + image_index: u32, + /// A custom tag available for application-defined purposes. + tag: u32, + /// Padding. + pad_a: u32, + /// Padding. + pad_b: u32, +} + +/// Extracts decals from the main world into the render world. +pub fn extract_decals( + decals: Extract< + Query<( + RenderEntity, + &ClusteredDecal, + &GlobalTransform, + &ViewVisibility, + )>, + >, + mut render_decals: ResMut, +) { + // Clear out the `RenderDecals` in preparation for a new frame. + render_decals.clear(); + + // Loop over each decal. + for (decal_entity, clustered_decal, global_transform, view_visibility) in &decals { + // If the decal is invisible, skip it. + if !view_visibility.get() { + continue; + } + + // Insert or add the image. + let image_index = render_decals.get_or_insert_image(&clustered_decal.image.id()); + + // Record the decal. + let decal_index = render_decals.decals.len(); + render_decals + .entity_to_decal_index + .insert(decal_entity, decal_index); + + render_decals.decals.push(RenderClusteredDecal { + local_from_world: global_transform.affine().inverse().into(), + image_index, + tag: clustered_decal.tag, + pad_a: 0, + pad_b: 0, + }); + } +} + +/// Adds all decals in the scene to the [`GlobalClusterableObjectMeta`] table. +fn prepare_decals( + decals: Query>, + mut global_clusterable_object_meta: ResMut, + render_decals: Res, +) { + for decal_entity in &decals { + if let Some(index) = render_decals.entity_to_decal_index.get(&decal_entity) { + global_clusterable_object_meta + .entity_to_index + .insert(decal_entity, *index); + } + } +} + +/// Returns the layout for the clustered-decal-related bind group entries for a +/// single view. +pub(crate) fn get_bind_group_layout_entries( + render_device: &RenderDevice, + render_adapter: &RenderAdapter, +) -> Option<[BindGroupLayoutEntryBuilder; 3]> { + // If binding arrays aren't supported on the current platform, we have no + // bind group layout entries. + if !clustered_decals_are_usable(render_device, render_adapter) { + return None; + } + + Some([ + // `decals` + binding_types::storage_buffer_read_only::(false), + // `decal_textures` + binding_types::texture_2d(TextureSampleType::Float { filterable: true }) + .count(NonZero::::new(MAX_VIEW_DECALS as u32).unwrap()), + // `decal_sampler` + binding_types::sampler(SamplerBindingType::Filtering), + ]) +} + +impl<'a> RenderViewClusteredDecalBindGroupEntries<'a> { + /// Creates and returns the bind group entries for clustered decals for a + /// single view. + pub(crate) fn get( + render_decals: &RenderClusteredDecals, + decals_buffer: &'a DecalsBuffer, + images: &'a RenderAssets, + fallback_image: &'a FallbackImage, + render_device: &RenderDevice, + render_adapter: &RenderAdapter, + ) -> Option> { + // Skip the entries if decals are unsupported on the current platform. + if !clustered_decals_are_usable(render_device, render_adapter) { + return None; + } + + // We use the first sampler among all the images. This assumes that all + // images use the same sampler, which is a documented restriction. If + // there's no sampler, we just use the one from the fallback image. + let sampler = match render_decals + .binding_index_to_textures + .iter() + .filter_map(|image_id| images.get(*image_id)) + .next() + { + Some(gpu_image) => &gpu_image.sampler, + None => &fallback_image.d2.sampler, + }; + + // Gather up the decal textures. + let mut texture_views = vec![]; + for image_id in &render_decals.binding_index_to_textures { + match images.get(*image_id) { + None => texture_views.push(&*fallback_image.d2.texture_view), + Some(gpu_image) => texture_views.push(&*gpu_image.texture_view), + } + } + + // Pad out the binding array to its maximum length, which is + // required on some platforms. + while texture_views.len() < MAX_VIEW_DECALS { + texture_views.push(&*fallback_image.d2.texture_view); + } + + Some(RenderViewClusteredDecalBindGroupEntries { + decals: decals_buffer.buffer()?, + texture_views, + sampler, + }) + } +} + +impl RenderClusteredDecals { + /// Returns the index of the given image in the decal texture binding array, + /// adding it to the list if necessary. + fn get_or_insert_image(&mut self, image_id: &AssetId) -> u32 { + *self + .texture_to_binding_index + .entry(*image_id) + .or_insert_with(|| { + let index = self.binding_index_to_textures.len() as u32; + self.binding_index_to_textures.push(*image_id); + index + }) + } +} + +/// Uploads the list of decals from [`RenderClusteredDecals::decals`] to the +/// GPU. +fn upload_decals( + render_decals: Res, + mut decals_buffer: ResMut, + render_device: Res, + render_queue: Res, +) { + decals_buffer.clear(); + + for &decal in &render_decals.decals { + decals_buffer.push(decal); + } + + // Make sure the buffer is non-empty. + // Otherwise there won't be a buffer to bind. + if decals_buffer.is_empty() { + decals_buffer.push(RenderClusteredDecal::default()); + } + + decals_buffer.write_buffer(&render_device, &render_queue); +} + +/// Returns true if clustered decals are usable on the current platform or false +/// otherwise. +/// +/// Clustered decals are currently disabled on macOS and iOS due to insufficient +/// texture bindings and limited bindless support in `wgpu`. +pub fn clustered_decals_are_usable( + render_device: &RenderDevice, + render_adapter: &RenderAdapter, +) -> bool { + // Disable binding arrays on Metal. There aren't enough texture bindings available. + // See issue #17553. + // Re-enable this when `wgpu` has first-class bindless. + binding_arrays_are_usable(render_device, render_adapter) + && cfg!(not(any(target_os = "macos", target_os = "ios"))) +} diff --git a/crates/bevy_pbr/src/decal/clustered.wgsl b/crates/bevy_pbr/src/decal/clustered.wgsl new file mode 100644 index 0000000000000..874722a2aba80 --- /dev/null +++ b/crates/bevy_pbr/src/decal/clustered.wgsl @@ -0,0 +1,183 @@ +// Support code for clustered decals. +// +// This module provides an iterator API, which you may wish to use in your own +// shaders if you want clustered decals to provide textures other than the base +// color. The iterator API allows you to iterate over all decals affecting the +// current fragment. Use `clustered_decal_iterator_new()` and +// `clustered_decal_iterator_next()` as follows: +// +// let view_z = get_view_z(vec4(world_position, 1.0)); +// let is_orthographic = view_is_orthographic(); +// +// let cluster_index = +// clustered_forward::fragment_cluster_index(frag_coord, view_z, is_orthographic); +// var clusterable_object_index_ranges = +// clustered_forward::unpack_clusterable_object_index_ranges(cluster_index); +// +// var iterator = clustered_decal_iterator_new(world_position, &clusterable_object_index_ranges); +// while (clustered_decal_iterator_next(&iterator)) { +// ... sample from the texture at iterator.texture_index at iterator.uv ... +// } +// +// In this way, in conjunction with a custom material, you can provide your own +// texture arrays that mirror `mesh_view_bindings::clustered_decal_textures` in +// order to support decals with normal maps, etc. +// +// Note that the order in which decals are returned is currently unpredictable, +// though generally stable from frame to frame. + +#define_import_path bevy_pbr::decal::clustered + +#import bevy_pbr::clustered_forward +#import bevy_pbr::clustered_forward::ClusterableObjectIndexRanges +#import bevy_pbr::mesh_view_bindings +#import bevy_render::maths + +// An object that allows stepping through all clustered decals that affect a +// single fragment. +struct ClusteredDecalIterator { + // Public fields follow: + // The index of the decal texture in the binding array. + texture_index: i32, + // The UV coordinates at which to sample that decal texture. + uv: vec2, + // A custom tag you can use for your own purposes. + tag: u32, + + // Private fields follow: + // The current offset of the index in the `ClusterableObjectIndexRanges` list. + decal_index_offset: i32, + // The end offset of the index in the `ClusterableObjectIndexRanges` list. + end_offset: i32, + // The world-space position of the fragment. + world_position: vec3, +} + +#ifdef CLUSTERED_DECALS_ARE_USABLE + +// Creates a new iterator over the decals at the current fragment. +// +// You can retrieve `clusterable_object_index_ranges` as follows: +// +// let view_z = get_view_z(world_position); +// let is_orthographic = view_is_orthographic(); +// +// let cluster_index = +// clustered_forward::fragment_cluster_index(frag_coord, view_z, is_orthographic); +// var clusterable_object_index_ranges = +// clustered_forward::unpack_clusterable_object_index_ranges(cluster_index); +fn clustered_decal_iterator_new( + world_position: vec3, + clusterable_object_index_ranges: ptr +) -> ClusteredDecalIterator { + return ClusteredDecalIterator( + -1, + vec2(0.0), + 0u, + // We subtract 1 because the first thing `decal_iterator_next` does is + // add 1. + i32((*clusterable_object_index_ranges).first_decal_offset) - 1, + i32((*clusterable_object_index_ranges).last_clusterable_object_index_offset), + world_position, + ); +} + +// Populates the `iterator.texture_index` and `iterator.uv` fields for the next +// decal overlapping the current world position. +// +// Returns true if another decal was found or false if no more decals were found +// for this position. +fn clustered_decal_iterator_next(iterator: ptr) -> bool { + if ((*iterator).decal_index_offset == (*iterator).end_offset) { + return false; + } + + (*iterator).decal_index_offset += 1; + + while ((*iterator).decal_index_offset < (*iterator).end_offset) { + let decal_index = i32(clustered_forward::get_clusterable_object_id( + u32((*iterator).decal_index_offset) + )); + let decal_space_vector = + (mesh_view_bindings::clustered_decals.decals[decal_index].local_from_world * + vec4((*iterator).world_position, 1.0)).xyz; + + if (all(decal_space_vector >= vec3(-0.5)) && all(decal_space_vector <= vec3(0.5))) { + (*iterator).texture_index = + i32(mesh_view_bindings::clustered_decals.decals[decal_index].image_index); + (*iterator).uv = decal_space_vector.xy * vec2(1.0, -1.0) + vec2(0.5); + (*iterator).tag = + mesh_view_bindings::clustered_decals.decals[decal_index].tag; + return true; + } + + (*iterator).decal_index_offset += 1; + } + + return false; +} + +#endif // CLUSTERED_DECALS_ARE_USABLE + +// Returns the view-space Z coordinate for the given world position. +fn get_view_z(world_position: vec3) -> f32 { + return dot(vec4( + mesh_view_bindings::view.view_from_world[0].z, + mesh_view_bindings::view.view_from_world[1].z, + mesh_view_bindings::view.view_from_world[2].z, + mesh_view_bindings::view.view_from_world[3].z + ), vec4(world_position, 1.0)); +} + +// Returns true if the current view describes an orthographic projection or +// false otherwise. +fn view_is_orthographic() -> bool { + return mesh_view_bindings::view.clip_from_view[3].w == 1.0; +} + +// Modifies the base color at the given position to account for decals. +// +// Returns the new base color with decals taken into account. If no decals +// overlap the current world position, returns the supplied base color +// unmodified. +fn apply_decal_base_color( + world_position: vec3, + frag_coord: vec2, + initial_base_color: vec4, +) -> vec4 { + var base_color = initial_base_color; + +#ifdef CLUSTERED_DECALS_ARE_USABLE + // Fetch the clusterable object index ranges for this world position. + + let view_z = get_view_z(world_position); + let is_orthographic = view_is_orthographic(); + + let cluster_index = + clustered_forward::fragment_cluster_index(frag_coord, view_z, is_orthographic); + var clusterable_object_index_ranges = + clustered_forward::unpack_clusterable_object_index_ranges(cluster_index); + + // Iterate over decals. + + var iterator = clustered_decal_iterator_new(world_position, &clusterable_object_index_ranges); + while (clustered_decal_iterator_next(&iterator)) { + // Sample the current decal. + let decal_base_color = textureSampleLevel( + mesh_view_bindings::clustered_decal_textures[iterator.texture_index], + mesh_view_bindings::clustered_decal_sampler, + iterator.uv, + 0.0 + ); + + // Blend with the accumulated fragment. + base_color = vec4( + mix(base_color.rgb, decal_base_color.rgb, decal_base_color.a), + base_color.a + decal_base_color.a + ); + } +#endif // CLUSTERED_DECALS_ARE_USABLE + + return base_color; +} + diff --git a/crates/bevy_pbr/src/decal/forward.rs b/crates/bevy_pbr/src/decal/forward.rs new file mode 100644 index 0000000000000..2445c3e723f52 --- /dev/null +++ b/crates/bevy_pbr/src/decal/forward.rs @@ -0,0 +1,155 @@ +use crate::{ + ExtendedMaterial, Material, MaterialExtension, MaterialExtensionKey, MaterialExtensionPipeline, + MaterialPlugin, StandardMaterial, +}; +use bevy_app::{App, Plugin}; +use bevy_asset::{load_internal_asset, weak_handle, Asset, Assets, Handle}; +use bevy_ecs::component::Component; +use bevy_math::{prelude::Rectangle, Quat, Vec2, Vec3}; +use bevy_reflect::{Reflect, TypePath}; +use bevy_render::render_asset::RenderAssets; +use bevy_render::render_resource::{AsBindGroupShaderType, ShaderType}; +use bevy_render::texture::GpuImage; +use bevy_render::{ + alpha::AlphaMode, + mesh::{Mesh, Mesh3d, MeshBuilder, MeshVertexBufferLayoutRef, Meshable}, + render_resource::{ + AsBindGroup, CompareFunction, RenderPipelineDescriptor, Shader, + SpecializedMeshPipelineError, + }, + RenderDebugFlags, +}; + +const FORWARD_DECAL_MESH_HANDLE: Handle = + weak_handle!("afa817f9-1869-4e0c-ac0d-d8cd1552d38a"); +const FORWARD_DECAL_SHADER_HANDLE: Handle = + weak_handle!("f8dfbef4-d88b-42ae-9af4-d9661e9f1648"); + +/// Plugin to render [`ForwardDecal`]s. +pub struct ForwardDecalPlugin; + +impl Plugin for ForwardDecalPlugin { + fn build(&self, app: &mut App) { + load_internal_asset!( + app, + FORWARD_DECAL_SHADER_HANDLE, + "forward_decal.wgsl", + Shader::from_wgsl + ); + + app.register_type::(); + + app.world_mut().resource_mut::>().insert( + FORWARD_DECAL_MESH_HANDLE.id(), + Rectangle::from_size(Vec2::ONE) + .mesh() + .build() + .rotated_by(Quat::from_rotation_arc(Vec3::Z, Vec3::Y)) + .with_generated_tangents() + .unwrap(), + ); + + app.add_plugins(MaterialPlugin::> { + prepass_enabled: false, + shadows_enabled: false, + debug_flags: RenderDebugFlags::default(), + ..Default::default() + }); + } +} + +/// A decal that renders via a 1x1 transparent quad mesh, smoothly alpha-blending with the underlying +/// geometry towards the edges. +/// +/// Because forward decals are meshes, you can use arbitrary materials to control their appearance. +/// +/// # Usage Notes +/// +/// * Spawn this component on an entity with a [`crate::MeshMaterial3d`] component holding a [`ForwardDecalMaterial`]. +/// * Any camera rendering a forward decal must have the [`bevy_core_pipeline::prepass::DepthPrepass`] component. +/// * Looking at forward decals at a steep angle can cause distortion. This can be mitigated by padding your decal's +/// texture with extra transparent pixels on the edges. +#[derive(Component, Reflect)] +#[require(Mesh3d(FORWARD_DECAL_MESH_HANDLE))] +pub struct ForwardDecal; + +/// Type alias for an extended material with a [`ForwardDecalMaterialExt`] extension. +/// +/// Make sure to register the [`MaterialPlugin`] for this material in your app setup. +/// +/// [`StandardMaterial`] comes with out of the box support for forward decals. +#[expect(type_alias_bounds, reason = "Type alias generics not yet stable")] +pub type ForwardDecalMaterial = ExtendedMaterial; + +/// Material extension for a [`ForwardDecal`]. +/// +/// In addition to wrapping your material type with this extension, your shader must use +/// the `bevy_pbr::decal::forward::get_forward_decal_info` function. +/// +/// The `FORWARD_DECAL` shader define will be made available to your shader so that you can gate +/// the forward decal code behind an ifdef. +#[derive(Asset, AsBindGroup, TypePath, Clone, Debug)] +#[uniform(200, ForwardDecalMaterialExtUniform)] +pub struct ForwardDecalMaterialExt { + /// Controls the distance threshold for decal blending with surfaces. + /// + /// This parameter determines how far away a surface can be before the decal no longer blends + /// with it and instead renders with full opacity. + /// + /// Lower values cause the decal to only blend with close surfaces, while higher values allow + /// blending with more distant surfaces. + /// + /// Units are in meters. + pub depth_fade_factor: f32, +} + +#[derive(Clone, Default, ShaderType)] +pub struct ForwardDecalMaterialExtUniform { + pub inv_depth_fade_factor: f32, +} + +impl AsBindGroupShaderType for ForwardDecalMaterialExt { + fn as_bind_group_shader_type( + &self, + _images: &RenderAssets, + ) -> ForwardDecalMaterialExtUniform { + ForwardDecalMaterialExtUniform { + inv_depth_fade_factor: 1.0 / self.depth_fade_factor.max(0.001), + } + } +} + +impl MaterialExtension for ForwardDecalMaterialExt { + fn alpha_mode() -> Option { + Some(AlphaMode::Blend) + } + + fn specialize( + _pipeline: &MaterialExtensionPipeline, + descriptor: &mut RenderPipelineDescriptor, + _layout: &MeshVertexBufferLayoutRef, + _key: MaterialExtensionKey, + ) -> Result<(), SpecializedMeshPipelineError> { + descriptor.depth_stencil.as_mut().unwrap().depth_compare = CompareFunction::Always; + + descriptor.vertex.shader_defs.push("FORWARD_DECAL".into()); + + if let Some(fragment) = &mut descriptor.fragment { + fragment.shader_defs.push("FORWARD_DECAL".into()); + } + + if let Some(label) = &mut descriptor.label { + *label = format!("forward_decal_{}", label).into(); + } + + Ok(()) + } +} + +impl Default for ForwardDecalMaterialExt { + fn default() -> Self { + Self { + depth_fade_factor: 8.0, + } + } +} diff --git a/crates/bevy_pbr/src/decal/forward_decal.wgsl b/crates/bevy_pbr/src/decal/forward_decal.wgsl new file mode 100644 index 0000000000000..ce24d57bf5998 --- /dev/null +++ b/crates/bevy_pbr/src/decal/forward_decal.wgsl @@ -0,0 +1,52 @@ +#define_import_path bevy_pbr::decal::forward + +#import bevy_pbr::{ + forward_io::VertexOutput, + mesh_functions::get_world_from_local, + mesh_view_bindings::view, + pbr_functions::calculate_tbn_mikktspace, + prepass_utils::prepass_depth, + view_transformations::depth_ndc_to_view_z, +} +#import bevy_render::maths::project_onto + +@group(2) @binding(200) +var inv_depth_fade_factor: f32; + +struct ForwardDecalInformation { + world_position: vec4, + uv: vec2, + alpha: f32, +} + +fn get_forward_decal_info(in: VertexOutput) -> ForwardDecalInformation { + let world_from_local = get_world_from_local(in.instance_index); + let scale = (world_from_local * vec4(1.0, 1.0, 1.0, 0.0)).xyz; + let scaled_tangent = vec4(in.world_tangent.xyz / scale, in.world_tangent.w); + + let V = normalize(view.world_position - in.world_position.xyz); + + // Transform V from fragment to camera in world space to tangent space. + let TBN = calculate_tbn_mikktspace(in.world_normal, scaled_tangent); + let T = TBN[0]; + let B = TBN[1]; + let N = TBN[2]; + let Vt = vec3(dot(V, T), dot(V, B), dot(V, N)); + + let frag_depth = depth_ndc_to_view_z(in.position.z); + let depth_pass_depth = depth_ndc_to_view_z(prepass_depth(in.position, 0u)); + let diff_depth = frag_depth - depth_pass_depth; + let diff_depth_abs = abs(diff_depth); + + // Apply UV parallax + let contact_on_decal = project_onto(V * diff_depth, in.world_normal); + let normal_depth = length(contact_on_decal); + let view_steepness = abs(Vt.z); + let delta_uv = normal_depth * Vt.xy * vec2(1.0, -1.0) / view_steepness; + let uv = in.uv + delta_uv; + + let world_position = vec4(in.world_position.xyz + V * diff_depth_abs, in.world_position.w); + let alpha = saturate(1.0 - (normal_depth * inv_depth_fade_factor)); + + return ForwardDecalInformation(world_position, uv, alpha); +} diff --git a/crates/bevy_pbr/src/decal/mod.rs b/crates/bevy_pbr/src/decal/mod.rs new file mode 100644 index 0000000000000..1b921317683c9 --- /dev/null +++ b/crates/bevy_pbr/src/decal/mod.rs @@ -0,0 +1,11 @@ +//! Decal rendering. +//! +//! Decals are a material that render on top of the surface that they're placed above. +//! They can be used to render signs, paint, snow, impact craters, and other effects on top of surfaces. + +// TODO: Once other decal types are added, write a paragraph comparing the different types in the module docs. + +pub mod clustered; +mod forward; + +pub use forward::*; diff --git a/crates/bevy_pbr/src/deferred/mod.rs b/crates/bevy_pbr/src/deferred/mod.rs index 67271a916f400..e40b3a940abcc 100644 --- a/crates/bevy_pbr/src/deferred/mod.rs +++ b/crates/bevy_pbr/src/deferred/mod.rs @@ -6,10 +6,11 @@ use crate::{ TONEMAPPING_LUT_TEXTURE_BINDING_INDEX, }; use crate::{ - MeshPipelineKey, ShadowFilteringMethod, ViewFogUniformOffset, ViewLightsUniformOffset, + DistanceFog, MeshPipelineKey, ShadowFilteringMethod, ViewFogUniformOffset, + ViewLightsUniformOffset, }; use bevy_app::prelude::*; -use bevy_asset::{load_internal_asset, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_core_pipeline::{ core_3d::graph::{Core3d, Node3d}, deferred::{ @@ -34,7 +35,7 @@ use bevy_render::{ pub struct DeferredPbrLightingPlugin; pub const DEFERRED_LIGHTING_SHADER_HANDLE: Handle = - Handle::weak_from_u128(2708011359337029741); + weak_handle!("f4295279-8890-4748-b654-ca4d2183df1c"); pub const DEFAULT_PBR_DEFERRED_LIGHTING_PASS_ID: u8 = 1; @@ -328,6 +329,10 @@ impl SpecializedRenderPipeline for DeferredLightingLayout { shader_defs.push("HAS_PREVIOUS_MORPH".into()); } + if key.contains(MeshPipelineKey::DISTANCE_FOG) { + shader_defs.push("DISTANCE_FOG".into()); + } + // Always true, since we're in the deferred lighting pipeline shader_defs.push("DEFERRED_PREPASS".into()); @@ -340,6 +345,10 @@ impl SpecializedRenderPipeline for DeferredLightingLayout { } else if shadow_filter_method == MeshPipelineKey::SHADOW_FILTER_METHOD_TEMPORAL { shader_defs.push("SHADOW_FILTER_METHOD_TEMPORAL".into()); } + if self.mesh_pipeline.binding_arrays_are_usable { + shader_defs.push("MULTIPLE_LIGHT_PROBES_IN_ARRAY".into()); + shader_defs.push("MULTIPLE_LIGHTMAPS_IN_ARRAY".into()); + } #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] shader_defs.push("SIXTEEN_BYTE_ALIGNMENT".into()); @@ -427,27 +436,26 @@ pub fn prepare_deferred_lighting_pipelines( pipeline_cache: Res, mut pipelines: ResMut>, deferred_lighting_layout: Res, - views: Query< + views: Query<( + Entity, + &ExtractedView, + Option<&Tonemapping>, + Option<&DebandDither>, + Option<&ShadowFilteringMethod>, ( - Entity, - &ExtractedView, - Option<&Tonemapping>, - Option<&DebandDither>, - Option<&ShadowFilteringMethod>, - ( - Has, - Has, - ), - ( - Has, - Has, - Has, - ), - Has>, - Has>, + Has, + Has, + Has, ), - With, - >, + ( + Has, + Has, + Has, + Has, + ), + Has>, + Has>, + )>, ) { for ( entity, @@ -455,12 +463,20 @@ pub fn prepare_deferred_lighting_pipelines( tonemapping, dither, shadow_filter_method, - (ssao, ssr), - (normal_prepass, depth_prepass, motion_vector_prepass), + (ssao, ssr, distance_fog), + (normal_prepass, depth_prepass, motion_vector_prepass, deferred_prepass), has_environment_maps, has_irradiance_volumes, ) in &views { + // If there is no deferred prepass, remove the old pipeline if there was + // one. This handles the case in which a view using deferred stops using + // it. + if !deferred_prepass { + commands.entity(entity).remove::(); + continue; + } + let mut view_key = MeshPipelineKey::from_hdr(view.hdr); if normal_prepass { @@ -507,6 +523,9 @@ pub fn prepare_deferred_lighting_pipelines( if ssr { view_key |= MeshPipelineKey::SCREEN_SPACE_REFLECTIONS; } + if distance_fog { + view_key |= MeshPipelineKey::DISTANCE_FOG; + } // We don't need to check to see whether the environment map is loaded // because [`gather_light_probes`] already checked that for us before diff --git a/crates/bevy_pbr/src/deferred/pbr_deferred_functions.wgsl b/crates/bevy_pbr/src/deferred/pbr_deferred_functions.wgsl index e96de6bded77f..e6254b115461c 100644 --- a/crates/bevy_pbr/src/deferred/pbr_deferred_functions.wgsl +++ b/crates/bevy_pbr/src/deferred/pbr_deferred_functions.wgsl @@ -23,21 +23,24 @@ // Creates the deferred gbuffer from a PbrInput. fn deferred_gbuffer_from_pbr_input(in: PbrInput) -> vec4 { - // Only monochrome occlusion supported. May not be worth including at all. - // Some models have baked occlusion, GLTF only supports monochrome. - // Real time occlusion is applied in the deferred lighting pass. - // Deriving luminance via Rec. 709. coefficients - // https://en.wikipedia.org/wiki/Rec._709 - let diffuse_occlusion = dot(in.diffuse_occlusion, vec3(0.2126, 0.7152, 0.0722)); + // Only monochrome occlusion supported. May not be worth including at all. + // Some models have baked occlusion, GLTF only supports monochrome. + // Real time occlusion is applied in the deferred lighting pass. + // Deriving luminance via Rec. 709. coefficients + // https://en.wikipedia.org/wiki/Rec._709 + let rec_709_coeffs = vec3(0.2126, 0.7152, 0.0722); + let diffuse_occlusion = dot(in.diffuse_occlusion, rec_709_coeffs); + // Only monochrome specular supported. + let reflectance = dot(in.material.reflectance, rec_709_coeffs); #ifdef WEBGL2 // More crunched for webgl so we can also fit depth. var props = deferred_types::pack_unorm3x4_plus_unorm_20_(vec4( - in.material.reflectance, + reflectance, in.material.metallic, diffuse_occlusion, in.frag_coord.z)); #else var props = deferred_types::pack_unorm4x8_(vec4( - in.material.reflectance, // could be fewer bits + reflectance, // could be fewer bits in.material.metallic, // could be fewer bits diffuse_occlusion, // is this worth including? 0.0)); // spare @@ -100,10 +103,10 @@ fn pbr_input_from_deferred_gbuffer(frag_coord: vec4, gbuffer: vec4) -> #ifdef WEBGL2 // More crunched for webgl so we can also fit depth. let props = deferred_types::unpack_unorm3x4_plus_unorm_20_(gbuffer.b); // Bias to 0.5 since that's the value for almost all materials. - pbr.material.reflectance = saturate(props.r - 0.03333333333); + pbr.material.reflectance = vec3(saturate(props.r - 0.03333333333)); #else let props = deferred_types::unpack_unorm4x8_(gbuffer.b); - pbr.material.reflectance = props.r; + pbr.material.reflectance = vec3(props.r); #endif // WEBGL2 pbr.material.metallic = props.g; pbr.diffuse_occlusion = vec3(props.b); diff --git a/crates/bevy_pbr/src/extended_material.rs b/crates/bevy_pbr/src/extended_material.rs index 1b2d48e4c69f9..e01dd0ff14ef3 100644 --- a/crates/bevy_pbr/src/extended_material.rs +++ b/crates/bevy_pbr/src/extended_material.rs @@ -1,10 +1,15 @@ +use alloc::borrow::Cow; + use bevy_asset::{Asset, Handle}; use bevy_ecs::system::SystemParamItem; +use bevy_platform::{collections::HashSet, hash::FixedHasher}; use bevy_reflect::{impl_type_path, Reflect}; use bevy_render::{ + alpha::AlphaMode, mesh::MeshVertexBufferLayoutRef, render_resource::{ - AsBindGroup, AsBindGroupError, BindGroupLayout, RenderPipelineDescriptor, Shader, + AsBindGroup, AsBindGroupError, BindGroupLayout, BindGroupLayoutEntry, BindlessDescriptor, + BindlessResourceType, BindlessSlabResourceLimit, RenderPipelineDescriptor, Shader, ShaderRef, SpecializedMeshPipelineError, UnpreparedBindGroup, }, renderer::RenderDevice, @@ -37,11 +42,15 @@ pub trait MaterialExtension: Asset + AsBindGroup + Clone + Sized { /// Returns this material's fragment shader. If [`ShaderRef::Default`] is returned, the base material mesh fragment shader /// will be used. - #[allow(unused_variables)] fn fragment_shader() -> ShaderRef { ShaderRef::Default } + // Returns this material’s AlphaMode. If None is returned, the base material alpha mode will be used. + fn alpha_mode() -> Option { + None + } + /// Returns this material's prepass vertex shader. If [`ShaderRef::Default`] is returned, the base material prepass vertex shader /// will be used. fn prepass_vertex_shader() -> ShaderRef { @@ -50,7 +59,6 @@ pub trait MaterialExtension: Asset + AsBindGroup + Clone + Sized { /// Returns this material's prepass fragment shader. If [`ShaderRef::Default`] is returned, the base material prepass fragment shader /// will be used. - #[allow(unused_variables)] fn prepass_fragment_shader() -> ShaderRef { ShaderRef::Default } @@ -63,14 +71,12 @@ pub trait MaterialExtension: Asset + AsBindGroup + Clone + Sized { /// Returns this material's prepass fragment shader. If [`ShaderRef::Default`] is returned, the base material deferred fragment shader /// will be used. - #[allow(unused_variables)] fn deferred_fragment_shader() -> ShaderRef { ShaderRef::Default } /// Returns this material's [`crate::meshlet::MeshletMesh`] fragment shader. If [`ShaderRef::Default`] is returned, /// the default meshlet mesh fragment shader will be used. - #[allow(unused_variables)] #[cfg(feature = "meshlet")] fn meshlet_mesh_fragment_shader() -> ShaderRef { ShaderRef::Default @@ -78,7 +84,6 @@ pub trait MaterialExtension: Asset + AsBindGroup + Clone + Sized { /// Returns this material's [`crate::meshlet::MeshletMesh`] prepass fragment shader. If [`ShaderRef::Default`] is returned, /// the default meshlet mesh prepass fragment shader will be used. - #[allow(unused_variables)] #[cfg(feature = "meshlet")] fn meshlet_mesh_prepass_fragment_shader() -> ShaderRef { ShaderRef::Default @@ -86,7 +91,6 @@ pub trait MaterialExtension: Asset + AsBindGroup + Clone + Sized { /// Returns this material's [`crate::meshlet::MeshletMesh`] deferred fragment shader. If [`ShaderRef::Default`] is returned, /// the default meshlet mesh deferred fragment shader will be used. - #[allow(unused_variables)] #[cfg(feature = "meshlet")] fn meshlet_mesh_deferred_fragment_shader() -> ShaderRef { ShaderRef::Default @@ -95,7 +99,10 @@ pub trait MaterialExtension: Asset + AsBindGroup + Clone + Sized { /// Customizes the default [`RenderPipelineDescriptor`] for a specific entity using the entity's /// [`MaterialPipelineKey`] and [`MeshVertexBufferLayoutRef`] as input. /// Specialization for the base material is applied before this function is called. - #[allow(unused_variables)] + #[expect( + unused_variables, + reason = "The parameters here are intentionally unused by the default implementation; however, putting underscores here will result in the underscores being copied by rust-analyzer's tab completion." + )] #[inline] fn specialize( pipeline: &MaterialExtensionPipeline, @@ -124,6 +131,7 @@ pub trait MaterialExtension: Asset + AsBindGroup + Clone + Sized { /// the `extended_material` example). #[derive(Asset, Clone, Debug, Reflect)] #[reflect(type_path = false)] +#[reflect(Clone)] pub struct ExtendedMaterial { pub base: B, pub extension: E, @@ -150,12 +158,24 @@ impl AsBindGroup for ExtendedMaterial { type Data = (::Data, ::Data); type Param = (::Param, ::Param); - fn bindless_slot_count() -> Option { - match (B::bindless_slot_count(), E::bindless_slot_count()) { - (Some(base_bindless_slot_count), Some(extension_bindless_slot_count)) => { - Some(base_bindless_slot_count.min(extension_bindless_slot_count)) + fn bindless_slot_count() -> Option { + // We only enable bindless if both the base material and its extension + // are bindless. If we do enable bindless, we choose the smaller of the + // two slab size limits. + match (B::bindless_slot_count()?, E::bindless_slot_count()?) { + (BindlessSlabResourceLimit::Auto, BindlessSlabResourceLimit::Auto) => { + Some(BindlessSlabResourceLimit::Auto) + } + (BindlessSlabResourceLimit::Auto, BindlessSlabResourceLimit::Custom(limit)) + | (BindlessSlabResourceLimit::Custom(limit), BindlessSlabResourceLimit::Auto) => { + Some(BindlessSlabResourceLimit::Custom(limit)) } - _ => None, + ( + BindlessSlabResourceLimit::Custom(base_limit), + BindlessSlabResourceLimit::Custom(extended_limit), + ) => Some(BindlessSlabResourceLimit::Custom( + base_limit.min(extended_limit), + )), } } @@ -164,11 +184,9 @@ impl AsBindGroup for ExtendedMaterial { layout: &BindGroupLayout, render_device: &RenderDevice, (base_param, extended_param): &mut SystemParamItem<'_, '_, Self::Param>, - mut force_no_bindless: bool, + mut force_non_bindless: bool, ) -> Result, AsBindGroupError> { - // Only allow bindless mode if both the base material and the extension - // support it. - force_no_bindless = force_no_bindless || Self::bindless_slot_count().is_none(); + force_non_bindless = force_non_bindless || Self::bindless_slot_count().is_none(); // add together the bindings of the base material and the user material let UnpreparedBindGroup { @@ -179,14 +197,14 @@ impl AsBindGroup for ExtendedMaterial { layout, render_device, base_param, - force_no_bindless, + force_non_bindless, )?; let extended_bindgroup = E::unprepared_bind_group( &self.extension, layout, render_device, extended_param, - force_no_bindless, + force_non_bindless, )?; bindings.extend(extended_bindgroup.bindings.0); @@ -199,23 +217,73 @@ impl AsBindGroup for ExtendedMaterial { fn bind_group_layout_entries( render_device: &RenderDevice, - mut force_no_bindless: bool, - ) -> Vec + mut force_non_bindless: bool, + ) -> Vec where Self: Sized, { - // Only allow bindless mode if both the base material and the extension - // support it. - force_no_bindless = force_no_bindless || Self::bindless_slot_count().is_none(); - - // add together the bindings of the standard material and the user material - let mut entries = B::bind_group_layout_entries(render_device, force_no_bindless); - entries.extend(E::bind_group_layout_entries( - render_device, - force_no_bindless, - )); + force_non_bindless = force_non_bindless || Self::bindless_slot_count().is_none(); + + // Add together the bindings of the standard material and the user + // material, skipping duplicate bindings. Duplicate bindings will occur + // when bindless mode is on, because of the common bindless resource + // arrays, and we need to eliminate the duplicates or `wgpu` will + // complain. + let mut entries = vec![]; + let mut seen_bindings = HashSet::<_>::with_hasher(FixedHasher); + for entry in B::bind_group_layout_entries(render_device, force_non_bindless) + .into_iter() + .chain(E::bind_group_layout_entries(render_device, force_non_bindless).into_iter()) + { + if seen_bindings.insert(entry.binding) { + entries.push(entry); + } + } entries } + + fn bindless_descriptor() -> Option { + // We're going to combine the two bindless descriptors. + let base_bindless_descriptor = B::bindless_descriptor()?; + let extended_bindless_descriptor = E::bindless_descriptor()?; + + // Combining the buffers and index tables is straightforward. + + let mut buffers = base_bindless_descriptor.buffers.to_vec(); + let mut index_tables = base_bindless_descriptor.index_tables.to_vec(); + + buffers.extend(extended_bindless_descriptor.buffers.iter().cloned()); + index_tables.extend(extended_bindless_descriptor.index_tables.iter().cloned()); + + // Combining the resources is a little trickier because the resource + // array is indexed by bindless index, so we have to merge the two + // arrays, not just concatenate them. + let max_bindless_index = base_bindless_descriptor + .resources + .len() + .max(extended_bindless_descriptor.resources.len()); + let mut resources = Vec::with_capacity(max_bindless_index); + for bindless_index in 0..max_bindless_index { + // In the event of a conflicting bindless index, we choose the + // base's binding. + match base_bindless_descriptor.resources.get(bindless_index) { + None | Some(&BindlessResourceType::None) => resources.push( + extended_bindless_descriptor + .resources + .get(bindless_index) + .copied() + .unwrap_or(BindlessResourceType::None), + ), + Some(&resource_type) => resources.push(resource_type), + } + } + + Some(BindlessDescriptor { + resources: Cow::Owned(resources), + buffers: Cow::Owned(buffers), + index_tables: Cow::Owned(index_tables), + }) + } } impl Material for ExtendedMaterial { @@ -233,8 +301,11 @@ impl Material for ExtendedMaterial { } } - fn alpha_mode(&self) -> crate::AlphaMode { - B::alpha_mode(&self.base) + fn alpha_mode(&self) -> AlphaMode { + match E::alpha_mode() { + Some(specified) => specified, + None => B::alpha_mode(&self.base), + } } fn opaque_render_method(&self) -> crate::OpaqueRendererMethod { diff --git a/crates/bevy_pbr/src/fog.rs b/crates/bevy_pbr/src/fog.rs index 198d218334dc2..21a89ccc70ab7 100644 --- a/crates/bevy_pbr/src/fog.rs +++ b/crates/bevy_pbr/src/fog.rs @@ -47,7 +47,7 @@ use bevy_render::{extract_component::ExtractComponent, prelude::Camera}; /// [`StandardMaterial`](crate::StandardMaterial) instances via the `fog_enabled` flag. #[derive(Debug, Clone, Component, Reflect, ExtractComponent)] #[extract_component_filter(With)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct DistanceFog { /// The color of the fog effect. /// @@ -70,9 +70,6 @@ pub struct DistanceFog { pub falloff: FogFalloff, } -#[deprecated(since = "0.15.0", note = "Renamed to `DistanceFog`")] -pub type FogSettings = DistanceFog; - /// Allows switching between different fog falloff modes, and configuring their parameters. /// /// ## Convenience Methods @@ -97,6 +94,7 @@ pub type FogSettings = DistanceFog; /// - [`FogFalloff::from_visibility_contrast_color()`] /// - [`FogFalloff::from_visibility_contrast_colors()`] #[derive(Debug, Clone, Reflect)] +#[reflect(Clone)] pub enum FogFalloff { /// A linear fog falloff that grows in intensity between `start` and `end` distances. /// @@ -144,11 +142,11 @@ pub enum FogFalloff { /// ## Tips /// /// - Use the [`FogFalloff::from_visibility()`] convenience method to create an exponential falloff with the proper - /// density for a desired visibility distance in world units; + /// density for a desired visibility distance in world units; /// - It's not _unusual_ to have very large or very small values for the density, depending on the scene - /// scale. Typically, for scenes with objects in the scale of thousands of units, you might want density values - /// in the ballpark of `0.001`. Conversely, for really small scale scenes you might want really high values of - /// density; + /// scale. Typically, for scenes with objects in the scale of thousands of units, you might want density values + /// in the ballpark of `0.001`. Conversely, for really small scale scenes you might want really high values of + /// density; /// - Combine the `density` parameter with the [`DistanceFog`] `color`'s alpha channel for easier artistic control. /// /// ## Formula @@ -196,7 +194,7 @@ pub enum FogFalloff { /// ## Tips /// /// - Use the [`FogFalloff::from_visibility_squared()`] convenience method to create an exponential squared falloff - /// with the proper density for a desired visibility distance in world units; + /// with the proper density for a desired visibility distance in world units; /// - Combine the `density` parameter with the [`DistanceFog`] `color`'s alpha channel for easier artistic control. /// /// ## Formula @@ -242,8 +240,8 @@ pub enum FogFalloff { /// ## Tips /// /// - Use the [`FogFalloff::from_visibility_colors()`] or [`FogFalloff::from_visibility_color()`] convenience methods - /// to create an atmospheric falloff with the proper densities for a desired visibility distance in world units and - /// extinction and inscattering colors; + /// to create an atmospheric falloff with the proper densities for a desired visibility distance in world units and + /// extinction and inscattering colors; /// - Combine the atmospheric fog parameters with the [`DistanceFog`] `color`'s alpha channel for easier artistic control. /// /// ## Formula diff --git a/crates/bevy_pbr/src/lib.rs b/crates/bevy_pbr/src/lib.rs index 6559268bb5c5e..1810bc67eb225 100644 --- a/crates/bevy_pbr/src/lib.rs +++ b/crates/bevy_pbr/src/lib.rs @@ -1,6 +1,6 @@ #![expect(missing_docs, reason = "Not all docs are written yet, see #3492.")] #![cfg_attr(docsrs, feature(doc_auto_cfg))] -#![deny(unsafe_code)] +#![forbid(unsafe_code)] #![doc( html_logo_url = "https://bevyengine.org/assets/icon.png", html_favicon_url = "https://bevyengine.org/assets/icon.png" @@ -24,8 +24,10 @@ pub mod experimental { } } -mod bundle; +mod atmosphere; mod cluster; +mod components; +pub mod decal; pub mod deferred; mod extended_material; mod fog; @@ -43,19 +45,19 @@ mod ssao; mod ssr; mod volumetric_fog; -use crate::material_bind_groups::FallbackBindlessResources; - use bevy_color::{Color, LinearRgba}; -use core::marker::PhantomData; -pub use bundle::*; +pub use atmosphere::*; pub use cluster::*; +pub use components::*; +pub use decal::clustered::ClusteredDecalPlugin; pub use extended_material::*; pub use fog::*; pub use light::*; pub use light_probe::*; pub use lightmap::*; pub use material::*; +pub use material_bind_groups::*; pub use mesh_material::*; pub use parallax::*; pub use pbr_material::*; @@ -63,29 +65,17 @@ pub use prepass::*; pub use render::*; pub use ssao::*; pub use ssr::*; -#[allow(deprecated)] -pub use volumetric_fog::{ - FogVolume, FogVolumeBundle, VolumetricFog, VolumetricFogPlugin, VolumetricFogSettings, - VolumetricLight, -}; +pub use volumetric_fog::{FogVolume, VolumetricFog, VolumetricFogPlugin, VolumetricLight}; /// The PBR prelude. /// /// This includes the most common types in this crate, re-exported for your convenience. -#[expect(deprecated)] pub mod prelude { #[doc(hidden)] pub use crate::{ - bundle::{ - DirectionalLightBundle, MaterialMeshBundle, PbrBundle, PointLightBundle, - SpotLightBundle, - }, fog::{DistanceFog, FogFalloff}, light::{light_consts, AmbientLight, DirectionalLight, PointLight, SpotLight}, - light_probe::{ - environment_map::{EnvironmentMapLight, ReflectionProbeBundle}, - LightProbe, - }, + light_probe::{environment_map::EnvironmentMapLight, LightProbe}, material::{Material, MaterialPlugin}, mesh_material::MeshMaterial3d, parallax::ParallaxMappingMethod, @@ -97,74 +87,100 @@ pub mod prelude { pub mod graph { use bevy_render::render_graph::RenderLabel; + /// Render graph nodes specific to 3D PBR rendering. #[derive(Debug, Hash, PartialEq, Eq, Clone, RenderLabel)] pub enum NodePbr { - /// Label for the shadow pass node. - ShadowPass, + /// Label for the shadow pass node that draws meshes that were visible + /// from the light last frame. + EarlyShadowPass, + /// Label for the shadow pass node that draws meshes that became visible + /// from the light this frame. + LateShadowPass, /// Label for the screen space ambient occlusion render node. ScreenSpaceAmbientOcclusion, DeferredLightingPass, /// Label for the volumetric lighting pass. VolumetricFog, - /// Label for the compute shader instance data building pass. - GpuPreprocess, + /// Label for the shader that transforms and culls meshes that were + /// visible last frame. + EarlyGpuPreprocess, + /// Label for the shader that transforms and culls meshes that became + /// visible this frame. + LateGpuPreprocess, /// Label for the screen space reflections pass. ScreenSpaceReflections, + /// Label for the node that builds indirect draw parameters for meshes + /// that were visible last frame. + EarlyPrepassBuildIndirectParameters, + /// Label for the node that builds indirect draw parameters for meshes + /// that became visible this frame. + LatePrepassBuildIndirectParameters, + /// Label for the node that builds indirect draw parameters for the main + /// rendering pass, containing all meshes that are visible this frame. + MainBuildIndirectParameters, + ClearIndirectParametersMetadata, } } use crate::{deferred::DeferredPbrLightingPlugin, graph::NodePbr}; use bevy_app::prelude::*; -use bevy_asset::{load_internal_asset, AssetApp, Assets, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, AssetApp, Assets, Handle}; use bevy_core_pipeline::core_3d::graph::{Core3d, Node3d}; use bevy_ecs::prelude::*; use bevy_image::Image; use bevy_render::{ alpha::AlphaMode, - camera::{ - CameraProjection, CameraUpdateSystem, OrthographicProjection, PerspectiveProjection, - Projection, - }, + camera::{sort_cameras, CameraUpdateSystem, Projection}, extract_component::ExtractComponentPlugin, extract_resource::ExtractResourcePlugin, - render_asset::prepare_assets, render_graph::RenderGraph, render_resource::Shader, sync_component::SyncComponentPlugin, - texture::GpuImage, view::VisibilitySystems, - ExtractSchedule, Render, RenderApp, RenderSet, + ExtractSchedule, Render, RenderApp, RenderDebugFlags, RenderSet, }; use bevy_transform::TransformSystem; -pub const PBR_TYPES_SHADER_HANDLE: Handle = Handle::weak_from_u128(1708015359337029744); -pub const PBR_BINDINGS_SHADER_HANDLE: Handle = Handle::weak_from_u128(5635987986427308186); -pub const UTILS_HANDLE: Handle = Handle::weak_from_u128(1900548483293416725); -pub const CLUSTERED_FORWARD_HANDLE: Handle = Handle::weak_from_u128(166852093121196815); -pub const PBR_LIGHTING_HANDLE: Handle = Handle::weak_from_u128(14170772752254856967); -pub const PBR_TRANSMISSION_HANDLE: Handle = Handle::weak_from_u128(77319684653223658032); -pub const SHADOWS_HANDLE: Handle = Handle::weak_from_u128(11350275143789590502); -pub const SHADOW_SAMPLING_HANDLE: Handle = Handle::weak_from_u128(3145627513789590502); -pub const PBR_FRAGMENT_HANDLE: Handle = Handle::weak_from_u128(2295049283805286543); -pub const PBR_SHADER_HANDLE: Handle = Handle::weak_from_u128(4805239651767701046); -pub const PBR_PREPASS_SHADER_HANDLE: Handle = Handle::weak_from_u128(9407115064344201137); -pub const PBR_FUNCTIONS_HANDLE: Handle = Handle::weak_from_u128(16550102964439850292); -pub const PBR_AMBIENT_HANDLE: Handle = Handle::weak_from_u128(2441520459096337034); +pub const PBR_TYPES_SHADER_HANDLE: Handle = + weak_handle!("b0330585-2335-4268-9032-a6c4c2d932f6"); +pub const PBR_BINDINGS_SHADER_HANDLE: Handle = + weak_handle!("13834c18-c7ec-4c4b-bbbd-432c3ba4cace"); +pub const UTILS_HANDLE: Handle = weak_handle!("0a32978f-2744-4608-98b6-4c3000a0638d"); +pub const CLUSTERED_FORWARD_HANDLE: Handle = + weak_handle!("f8e3b4c6-60b7-4b23-8b2e-a6b27bb4ddce"); +pub const PBR_LIGHTING_HANDLE: Handle = + weak_handle!("de0cf697-2876-49a0-aa0f-f015216f70c2"); +pub const PBR_TRANSMISSION_HANDLE: Handle = + weak_handle!("22482185-36bb-4c16-9b93-a20e6d4a2725"); +pub const SHADOWS_HANDLE: Handle = weak_handle!("ff758c5a-3927-4a15-94c3-3fbdfc362590"); +pub const SHADOW_SAMPLING_HANDLE: Handle = + weak_handle!("f6bf5843-54bc-4e39-bd9d-56bfcd77b033"); +pub const PBR_FRAGMENT_HANDLE: Handle = + weak_handle!("1bd3c10d-851b-400c-934a-db489d99cc50"); +pub const PBR_SHADER_HANDLE: Handle = weak_handle!("0eba65ed-3e5b-4752-93ed-e8097e7b0c84"); +pub const PBR_PREPASS_SHADER_HANDLE: Handle = + weak_handle!("9afeaeab-7c45-43ce-b322-4b97799eaeb9"); +pub const PBR_FUNCTIONS_HANDLE: Handle = + weak_handle!("815b8618-f557-4a96-91a5-a2fb7e249fb0"); +pub const PBR_AMBIENT_HANDLE: Handle = weak_handle!("4a90b95b-112a-4a10-9145-7590d6f14260"); pub const PARALLAX_MAPPING_SHADER_HANDLE: Handle = - Handle::weak_from_u128(17035894873630133905); + weak_handle!("6cf57d9f-222a-429a-bba4-55ba9586e1d4"); pub const VIEW_TRANSFORMATIONS_SHADER_HANDLE: Handle = - Handle::weak_from_u128(2098345702398750291); + weak_handle!("ec047703-cde3-4876-94df-fed121544abb"); pub const PBR_PREPASS_FUNCTIONS_SHADER_HANDLE: Handle = - Handle::weak_from_u128(73204817249182637); -pub const PBR_DEFERRED_TYPES_HANDLE: Handle = Handle::weak_from_u128(3221241127431430599); -pub const PBR_DEFERRED_FUNCTIONS_HANDLE: Handle = Handle::weak_from_u128(72019026415438599); -pub const RGB9E5_FUNCTIONS_HANDLE: Handle = Handle::weak_from_u128(2659010996143919192); + weak_handle!("77b1bd3a-877c-4b2c-981b-b9c68d1b774a"); +pub const PBR_DEFERRED_TYPES_HANDLE: Handle = + weak_handle!("43060da7-a717-4240-80a8-dbddd92bd25d"); +pub const PBR_DEFERRED_FUNCTIONS_HANDLE: Handle = + weak_handle!("9dc46746-c51d-45e3-a321-6a50c3963420"); +pub const RGB9E5_FUNCTIONS_HANDLE: Handle = + weak_handle!("90c19aa3-6a11-4252-8586-d9299352e94f"); const MESHLET_VISIBILITY_BUFFER_RESOLVE_SHADER_HANDLE: Handle = - Handle::weak_from_u128(2325134235233421); + weak_handle!("69187376-3dea-4d0f-b3f5-185bde63d6a2"); -pub const TONEMAPPING_LUT_TEXTURE_BINDING_INDEX: u32 = 23; -pub const TONEMAPPING_LUT_SAMPLER_BINDING_INDEX: u32 = 24; +pub const TONEMAPPING_LUT_TEXTURE_BINDING_INDEX: u32 = 26; +pub const TONEMAPPING_LUT_SAMPLER_BINDING_INDEX: u32 = 27; /// Sets up the entire PBR infrastructure of bevy. pub struct PbrPlugin { @@ -178,6 +194,8 @@ pub struct PbrPlugin { /// This requires compute shader support and so will be forcibly disabled if /// the platform doesn't support those. pub use_gpu_instance_buffer_builder: bool, + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, } impl Default for PbrPlugin { @@ -186,6 +204,7 @@ impl Default for PbrPlugin { prepass_enabled: true, add_default_deferred_lighting_plugin: true, use_gpu_instance_buffer_builder: true, + debug_flags: RenderDebugFlags::default(), } } } @@ -329,9 +348,11 @@ impl Plugin for PbrPlugin { .add_plugins(( MeshRenderPlugin { use_gpu_instance_buffer_builder: self.use_gpu_instance_buffer_builder, + debug_flags: self.debug_flags, }, MaterialPlugin:: { prepass_enabled: self.prepass_enabled, + debug_flags: self.debug_flags, ..Default::default() }, ScreenSpaceAmbientOcclusionPlugin, @@ -341,20 +362,22 @@ impl Plugin for PbrPlugin { ExtractComponentPlugin::::default(), LightmapPlugin, LightProbePlugin, - PbrProjectionPlugin::::default(), - PbrProjectionPlugin::::default(), - PbrProjectionPlugin::::default(), + PbrProjectionPlugin, GpuMeshPreprocessPlugin { use_gpu_instance_buffer_builder: self.use_gpu_instance_buffer_builder, }, VolumetricFogPlugin, ScreenSpaceReflectionsPlugin, + ClusteredDecalPlugin, )) .add_plugins(( + decal::ForwardDecalPlugin, SyncComponentPlugin::::default(), SyncComponentPlugin::::default(), SyncComponentPlugin::::default(), + ExtractComponentPlugin::::default(), )) + .add_plugins(AtmospherePlugin) .configure_sets( PostUpdate, ( @@ -417,7 +440,8 @@ impl Plugin for PbrPlugin { // NOTE: This MUST be scheduled AFTER the core renderer visibility check // because that resets entity `ViewVisibility` for the first view // which would override any results from this otherwise - .after(VisibilitySystems::CheckVisibility), + .after(VisibilitySystems::CheckVisibility) + .before(VisibilitySystems::MarkNewlyHiddenEntitiesInvisible), ), ); @@ -442,17 +466,25 @@ impl Plugin for PbrPlugin { // Extract the required data from the main world render_app - .add_systems(ExtractSchedule, (extract_clusters, extract_lights)) + .add_systems( + ExtractSchedule, + ( + extract_clusters, + extract_lights, + late_sweep_material_instances, + ), + ) .add_systems( Render, ( prepare_lights .in_set(RenderSet::ManageViews) - .after(prepare_assets::), + .after(sort_cameras), prepare_clusters.in_set(RenderSet::PrepareResources), ), ) - .init_resource::(); + .init_resource::() + .init_resource::(); render_app.world_mut().add_observer(add_light_view_entities); render_app @@ -460,11 +492,17 @@ impl Plugin for PbrPlugin { .add_observer(remove_light_view_entities); render_app.world_mut().add_observer(extracted_light_removed); - let shadow_pass_node = ShadowPassNode::new(render_app.world_mut()); + let early_shadow_pass_node = EarlyShadowPassNode::from_world(render_app.world_mut()); + let late_shadow_pass_node = LateShadowPassNode::from_world(render_app.world_mut()); let mut graph = render_app.world_mut().resource_mut::(); let draw_3d_graph = graph.get_sub_graph_mut(Core3d).unwrap(); - draw_3d_graph.add_node(NodePbr::ShadowPass, shadow_pass_node); - draw_3d_graph.add_node_edge(NodePbr::ShadowPass, Node3d::StartMainPass); + draw_3d_graph.add_node(NodePbr::EarlyShadowPass, early_shadow_pass_node); + draw_3d_graph.add_node(NodePbr::LateShadowPass, late_shadow_pass_node); + draw_3d_graph.add_node_edges(( + NodePbr::EarlyShadowPass, + NodePbr::LateShadowPass, + Node3d::StartMainPass, + )); } fn finish(&self, app: &mut App) { @@ -480,20 +518,16 @@ impl Plugin for PbrPlugin { } } -/// [`CameraProjection`] specific PBR functionality. -pub struct PbrProjectionPlugin(PhantomData); -impl Plugin for PbrProjectionPlugin { +/// Camera projection PBR functionality. +#[derive(Default)] +pub struct PbrProjectionPlugin; +impl Plugin for PbrProjectionPlugin { fn build(&self, app: &mut App) { app.add_systems( PostUpdate, - build_directional_light_cascades:: + build_directional_light_cascades .in_set(SimulationLightSystems::UpdateDirectionalLightCascades) .after(clear_directional_light_cascades), ); } } -impl Default for PbrProjectionPlugin { - fn default() -> Self { - Self(Default::default()) - } -} diff --git a/crates/bevy_pbr/src/light/ambient_light.rs b/crates/bevy_pbr/src/light/ambient_light.rs index 068e445f3b496..db255722b3aec 100644 --- a/crates/bevy_pbr/src/light/ambient_light.rs +++ b/crates/bevy_pbr/src/light/ambient_light.rs @@ -4,6 +4,8 @@ use super::*; /// /// This resource is inserted by the [`PbrPlugin`] and by default it is set to a low ambient light. /// +/// It can also be added to a camera to override the resource (or default) ambient for that camera only. +/// /// # Examples /// /// Make ambient light slightly brighter: @@ -15,8 +17,9 @@ use super::*; /// ambient_light.brightness = 100.0; /// } /// ``` -#[derive(Resource, Clone, Debug, ExtractResource, Reflect)] -#[reflect(Resource, Debug, Default)] +#[derive(Resource, Component, Clone, Debug, ExtractResource, ExtractComponent, Reflect)] +#[reflect(Resource, Component, Debug, Default, Clone)] +#[require(Camera)] pub struct AmbientLight { pub color: Color, diff --git a/crates/bevy_pbr/src/light/directional_light.rs b/crates/bevy_pbr/src/light/directional_light.rs index 1eb17ea9abd25..a5798fdde7f0f 100644 --- a/crates/bevy_pbr/src/light/directional_light.rs +++ b/crates/bevy_pbr/src/light/directional_light.rs @@ -41,16 +41,9 @@ use super::*; /// To modify the cascade setup, such as the number of cascades or the maximum shadow distance, /// change the [`CascadeShadowConfig`] component of the entity with the [`DirectionalLight`]. /// -/// To control the resolution of the shadow maps, use the [`DirectionalLightShadowMap`] resource: -/// -/// ``` -/// # use bevy_app::prelude::*; -/// # use bevy_pbr::DirectionalLightShadowMap; -/// App::new() -/// .insert_resource(DirectionalLightShadowMap { size: 2048 }); -/// ``` +/// To control the resolution of the shadow maps, use the [`DirectionalLightShadowMap`] resource. #[derive(Component, Debug, Clone, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[require( Cascades, CascadesFrusta, diff --git a/crates/bevy_pbr/src/light/mod.rs b/crates/bevy_pbr/src/light/mod.rs index 87543e1377b72..91ea9cddd3b4c 100644 --- a/crates/bevy_pbr/src/light/mod.rs +++ b/crates/bevy_pbr/src/light/mod.rs @@ -7,14 +7,14 @@ use bevy_ecs::{ use bevy_math::{ops, Mat4, Vec3A, Vec4}; use bevy_reflect::prelude::*; use bevy_render::{ - camera::{Camera, CameraProjection}, + camera::{Camera, CameraProjection, Projection}, extract_component::ExtractComponent, extract_resource::ExtractResource, mesh::Mesh3d, primitives::{Aabb, CascadesFrusta, CubemapFrusta, Frustum, Sphere}, view::{ - InheritedVisibility, NoFrustumCulling, RenderLayers, ViewVisibility, VisibilityClass, - VisibilityRange, VisibleEntityRanges, + InheritedVisibility, NoFrustumCulling, PreviousVisibleEntities, RenderLayers, + ViewVisibility, VisibilityClass, VisibilityRange, VisibleEntityRanges, }, }; use bevy_transform::components::{GlobalTransform, Transform}; @@ -78,7 +78,7 @@ pub mod light_consts { pub const OFFICE: f32 = 320.; /// The amount of light (lux) during sunrise or sunset on a clear day. pub const CLEAR_SUNRISE: f32 = 400.; - /// The amount of light (lux) on a overcast day; typical TV studio lighting + /// The amount of light (lux) on an overcast day; typical TV studio lighting pub const OVERCAST_DAY: f32 = 1000.; /// The amount of light (lux) from ambient daylight (not direct sunlight). pub const AMBIENT_DAYLIGHT: f32 = 10_000.; @@ -86,12 +86,25 @@ pub mod light_consts { pub const FULL_DAYLIGHT: f32 = 20_000.; /// The amount of light (lux) in direct sunlight. pub const DIRECT_SUNLIGHT: f32 = 100_000.; + /// The amount of light (lux) of raw sunlight, not filtered by the atmosphere. + pub const RAW_SUNLIGHT: f32 = 130_000.; } } +/// Controls the resolution of [`PointLight`] shadow maps. +/// +/// ``` +/// # use bevy_app::prelude::*; +/// # use bevy_pbr::PointLightShadowMap; +/// App::new() +/// .insert_resource(PointLightShadowMap { size: 2048 }); +/// ``` #[derive(Resource, Clone, Debug, Reflect)] -#[reflect(Resource, Debug, Default)] +#[reflect(Resource, Debug, Default, Clone)] pub struct PointLightShadowMap { + /// The width and height of each of the 6 faces of the cubemap. + /// + /// Defaults to `1024`. pub size: usize, } @@ -106,9 +119,19 @@ impl Default for PointLightShadowMap { pub type WithLight = Or<(With, With, With)>; /// Controls the resolution of [`DirectionalLight`] shadow maps. +/// +/// ``` +/// # use bevy_app::prelude::*; +/// # use bevy_pbr::DirectionalLightShadowMap; +/// App::new() +/// .insert_resource(DirectionalLightShadowMap { size: 4096 }); +/// ``` #[derive(Resource, Clone, Debug, Reflect)] -#[reflect(Resource, Debug, Default)] +#[reflect(Resource, Debug, Default, Clone)] pub struct DirectionalLightShadowMap { + // The width and height of each cascade. + /// + /// Defaults to `2048`. pub size: usize, } @@ -132,7 +155,7 @@ impl Default for DirectionalLightShadowMap { /// }.into(); /// ``` #[derive(Component, Clone, Debug, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct CascadeShadowConfig { /// The (positive) distance to the far boundary of each cascade. pub bounds: Vec, @@ -244,27 +267,25 @@ impl CascadeShadowConfigBuilder { impl Default for CascadeShadowConfigBuilder { fn default() -> Self { - if cfg!(all( - feature = "webgl", - target_arch = "wasm32", - not(feature = "webgpu") - )) { - // Currently only support one cascade in webgl. - Self { - num_cascades: 1, - minimum_distance: 0.1, - maximum_distance: 100.0, - first_cascade_far_bound: 5.0, - overlap_proportion: 0.2, - } - } else { - Self { - num_cascades: 4, - minimum_distance: 0.1, - maximum_distance: 1000.0, - first_cascade_far_bound: 5.0, - overlap_proportion: 0.2, - } + // The defaults are chosen to be similar to be Unity, Unreal, and Godot. + // Unity: first cascade far bound = 10.05, maximum distance = 150.0 + // Unreal Engine 5: maximum distance = 200.0 + // Godot: first cascade far bound = 10.0, maximum distance = 100.0 + Self { + // Currently only support one cascade in WebGL 2. + num_cascades: if cfg!(all( + feature = "webgl", + target_arch = "wasm32", + not(feature = "webgpu") + )) { + 1 + } else { + 4 + }, + minimum_distance: 0.1, + maximum_distance: 150.0, + first_cascade_far_bound: 10.0, + overlap_proportion: 0.2, } } } @@ -276,13 +297,14 @@ impl From for CascadeShadowConfig { } #[derive(Component, Clone, Debug, Default, Reflect)] -#[reflect(Component, Debug, Default)] +#[reflect(Component, Debug, Default, Clone)] pub struct Cascades { /// Map from a view to the configuration of each of its [`Cascade`]s. pub(crate) cascades: EntityHashMap>, } #[derive(Clone, Debug, Default, Reflect)] +#[reflect(Clone, Default)] pub struct Cascade { /// The transform of the light, i.e. the view to world matrix. pub(crate) world_from_cascade: Mat4, @@ -305,9 +327,9 @@ pub fn clear_directional_light_cascades(mut lights: Query<(&DirectionalLight, &m } } -pub fn build_directional_light_cascades( +pub fn build_directional_light_cascades( directional_light_shadow_map: Res, - views: Query<(Entity, &GlobalTransform, &P, &Camera)>, + views: Query<(Entity, &GlobalTransform, &Projection, &Camera)>, mut lights: Query<( &GlobalTransform, &DirectionalLight, @@ -472,7 +494,7 @@ pub struct TransmittedShadowReceiver; /// The different modes use different approaches to /// [Percentage Closer Filtering](https://developer.nvidia.com/gpugems/gpugems/part-ii-lighting-and-shadows/chapter-11-shadow-map-antialiasing). #[derive(Debug, Component, ExtractComponent, Reflect, Clone, Copy, PartialEq, Eq, Default)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] pub enum ShadowFilteringMethod { /// Hardware 2x2. /// @@ -491,8 +513,7 @@ pub enum ShadowFilteringMethod { Gaussian, /// A randomized filter that varies over time, good when TAA is in use. /// - /// Good quality when used with - /// [`TemporalAntiAliasing`](bevy_core_pipeline::experimental::taa::TemporalAntiAliasing) + /// Good quality when used with `TemporalAntiAliasing` /// and good performance. /// /// For directional and spot lights, this uses a [method by Jorge Jimenez for @@ -522,24 +543,6 @@ pub enum SimulationLightSystems { CheckLightVisibility, } -// Sort lights by -// - those with volumetric (and shadows) enabled first, so that the volumetric -// lighting pass can quickly find the volumetric lights; -// - then those with shadows enabled second, so that the index can be used to -// render at most `directional_light_shadow_maps_count` directional light -// shadows; -// - then by entity as a stable key to ensure that a consistent set of lights -// are chosen if the light count limit is exceeded. -pub(crate) fn directional_light_order( - (entity_1, volumetric_1, shadows_enabled_1): (&Entity, &bool, &bool), - (entity_2, volumetric_2, shadows_enabled_2): (&Entity, &bool, &bool), -) -> core::cmp::Ordering { - volumetric_2 - .cmp(volumetric_1) // volumetric before shadows - .then_with(|| shadows_enabled_2.cmp(shadows_enabled_1)) // shadow casters before non-casters - .then_with(|| entity_1.cmp(entity_2)) // stable -} - pub fn update_directional_light_frusta( mut views: Query< ( @@ -581,9 +584,13 @@ pub fn update_directional_light_frusta( // NOTE: Run this after assign_lights_to_clusters! pub fn update_point_light_frusta( global_lights: Res, - mut views: Query< - (Entity, &GlobalTransform, &PointLight, &mut CubemapFrusta), - Or<(Changed, Changed)>, + mut views: Query<(Entity, &GlobalTransform, &PointLight, &mut CubemapFrusta)>, + changed_lights: Query< + Entity, + ( + With, + Or<(Changed, Changed)>, + ), >, ) { let view_rotations = CUBE_MAP_FACES @@ -592,6 +599,12 @@ pub fn update_point_light_frusta( .collect::>(); for (entity, transform, point_light, mut cubemap_frusta) in &mut views { + // If this light hasn't changed, and neither has the set of global_lights, + // then we can skip this calculation. + if !global_lights.is_changed() && !changed_lights.contains(entity) { + continue; + } + // The frusta are used for culling meshes to the light for shadow mapping // so if shadow mapping is disabled for this light, then the frusta are // not needed. @@ -832,17 +845,26 @@ pub fn check_dir_light_mesh_visibility( // TODO: use resource to avoid unnecessary memory alloc let mut defer_queue = core::mem::take(defer_visible_entities_queue.deref_mut()); commands.queue(move |world: &mut World| { - let mut query = world.query::<&mut ViewVisibility>(); - for entities in defer_queue.iter_mut() { - let mut iter = query.iter_many_mut(world, entities.iter()); - while let Some(mut view_visibility) = iter.fetch_next() { - view_visibility.set(); - } - } + world.resource_scope::( + |world, mut previous_visible_entities| { + let mut query = world.query::<(Entity, &mut ViewVisibility)>(); + for entities in defer_queue.iter_mut() { + let mut iter = query.iter_many_mut(world, entities.iter()); + while let Some((entity, mut view_visibility)) = iter.fetch_next() { + if !**view_visibility { + view_visibility.set(); + } + + // Remove any entities that were discovered to be + // visible from the `PreviousVisibleEntities` resource. + previous_visible_entities.remove(&entity); + } + } + }, + ); }); } -#[allow(clippy::too_many_arguments)] pub fn check_point_light_mesh_visibility( visible_point_lights: Query<&VisibleClusterableObjects>, mut point_lights: Query<( @@ -877,6 +899,7 @@ pub fn check_point_light_mesh_visibility( ), >, visible_entity_ranges: Option>, + mut previous_visible_entities: ResMut, mut cubemap_visible_entities_queue: Local; 6]>>, mut spot_visible_entities_queue: Local>>, mut checked_lights: Local, @@ -959,12 +982,16 @@ pub fn check_point_light_mesh_visibility( if has_no_frustum_culling || frustum.intersects_obb(aabb, &model_to_world, true, true) { - view_visibility.set(); + if !**view_visibility { + view_visibility.set(); + } visible_entities.push(entity); } } } else { - view_visibility.set(); + if !**view_visibility { + view_visibility.set(); + } for visible_entities in cubemap_visible_entities_local_queue.iter_mut() { visible_entities.push(entity); @@ -974,10 +1001,17 @@ pub fn check_point_light_mesh_visibility( ); for entities in cubemap_visible_entities_queue.iter_mut() { - cubemap_visible_entities - .iter_mut() - .zip(entities.iter_mut()) - .for_each(|(dst, source)| dst.entities.append(source)); + for (dst, source) in + cubemap_visible_entities.iter_mut().zip(entities.iter_mut()) + { + // Remove any entities that were discovered to be + // visible from the `PreviousVisibleEntities` resource. + for entity in source.iter() { + previous_visible_entities.remove(entity); + } + + dst.entities.append(source); + } } for visible_entities in cubemap_visible_entities.iter_mut() { @@ -1044,11 +1078,15 @@ pub fn check_point_light_mesh_visibility( if has_no_frustum_culling || frustum.intersects_obb(aabb, &model_to_world, true, true) { - view_visibility.set(); + if !**view_visibility { + view_visibility.set(); + } spot_visible_entities_local_queue.push(entity); } } else { - view_visibility.set(); + if !**view_visibility { + view_visibility.set(); + } spot_visible_entities_local_queue.push(entity); } }, @@ -1056,6 +1094,12 @@ pub fn check_point_light_mesh_visibility( for entities in spot_visible_entities_queue.iter_mut() { visible_entities.append(entities); + + // Remove any entities that were discovered to be visible + // from the `PreviousVisibleEntities` resource. + for entity in entities { + previous_visible_entities.remove(entity); + } } shrink_entities(visible_entities.deref_mut()); diff --git a/crates/bevy_pbr/src/light/point_light.rs b/crates/bevy_pbr/src/light/point_light.rs index 800c7b9bd029a..f2e4224d28f94 100644 --- a/crates/bevy_pbr/src/light/point_light.rs +++ b/crates/bevy_pbr/src/light/point_light.rs @@ -19,8 +19,14 @@ use super::*; /// | 4000 | 300 | | 75-100 | 40.5 | /// /// Source: [Wikipedia](https://en.wikipedia.org/wiki/Lumen_(unit)#Lighting) +/// +/// ## Shadows +/// +/// To enable shadows, set the `shadows_enabled` property to `true`. +/// +/// To control the resolution of the shadow maps, use the [`PointLightShadowMap`] resource. #[derive(Component, Debug, Clone, Copy, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[require( CubemapFrusta, CubemapVisibleEntities, diff --git a/crates/bevy_pbr/src/light/spot_light.rs b/crates/bevy_pbr/src/light/spot_light.rs index 08160a8cfa0a5..a7cfe1b817407 100644 --- a/crates/bevy_pbr/src/light/spot_light.rs +++ b/crates/bevy_pbr/src/light/spot_light.rs @@ -8,7 +8,7 @@ use super::*; /// shines light only in a given direction. The direction is taken from /// the transform, and can be specified with [`Transform::looking_at`](Transform::looking_at). #[derive(Component, Debug, Clone, Copy, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[require(Frustum, VisibleMeshEntities, Transform, Visibility, VisibilityClass)] #[component(on_add = view::add_visibility_class::)] pub struct SpotLight { diff --git a/crates/bevy_pbr/src/light_probe/environment_map.rs b/crates/bevy_pbr/src/light_probe/environment_map.rs index 34a673582f73d..52ccaef432ef8 100644 --- a/crates/bevy_pbr/src/light_probe/environment_map.rs +++ b/crates/bevy_pbr/src/light_probe/environment_map.rs @@ -15,7 +15,7 @@ //! environment maps are added to every point of the scene, including //! interior enclosed areas. //! -//! 2. If attached to a [`LightProbe`], environment maps represent the immediate +//! 2. If attached to a [`crate::LightProbe`], environment maps represent the immediate //! surroundings of a specific location in the scene. These types of //! environment maps are known as *reflection probes*. //! @@ -44,19 +44,15 @@ //! //! [several pre-filtered environment maps]: https://github.com/KhronosGroup/glTF-Sample-Environments -#![expect(deprecated)] - -use bevy_asset::{AssetId, Handle}; +use bevy_asset::{weak_handle, AssetId, Handle}; use bevy_ecs::{ - bundle::Bundle, component::Component, query::QueryItem, reflect::ReflectComponent, - system::lifetimeless::Read, + component::Component, query::QueryItem, reflect::ReflectComponent, system::lifetimeless::Read, }; use bevy_image::Image; use bevy_math::Quat; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{ extract_instances::ExtractInstance, - prelude::SpatialBundle, render_asset::RenderAssets, render_resource::{ binding_types::{self, uniform_buffer}, @@ -70,7 +66,7 @@ use bevy_render::{ use core::{num::NonZero, ops::Deref}; use crate::{ - add_cubemap_texture_view, binding_arrays_are_usable, EnvironmentMapUniform, LightProbe, + add_cubemap_texture_view, binding_arrays_are_usable, EnvironmentMapUniform, MAX_VIEW_LIGHT_PROBES, }; @@ -78,14 +74,14 @@ use super::{LightProbeComponent, RenderViewLightProbes}; /// A handle to the environment map helper shader. pub const ENVIRONMENT_MAP_SHADER_HANDLE: Handle = - Handle::weak_from_u128(154476556247605696); + weak_handle!("d38c4ec4-e84c-468f-b485-bf44745db937"); /// A pair of cubemap textures that represent the surroundings of a specific /// area in space. /// /// See [`crate::environment_map`] for detailed information. #[derive(Clone, Component, Reflect)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct EnvironmentMapLight { /// The blurry image that represents diffuse radiance surrounding a region. pub diffuse_map: Handle, @@ -142,26 +138,6 @@ pub struct EnvironmentMapIds { pub(crate) specular: AssetId, } -/// A bundle that contains everything needed to make an entity a reflection -/// probe. -/// -/// A reflection probe is a type of environment map that specifies the light -/// surrounding a region in space. For more information, see -/// [`crate::environment_map`]. -#[derive(Bundle, Clone)] -#[deprecated( - since = "0.15.0", - note = "Use the `LightProbe` and `EnvironmentMapLight` components instead. Inserting them will now also insert the other components required by them automatically." -)] -pub struct ReflectionProbeBundle { - /// Contains a transform that specifies the position of this reflection probe in space. - pub spatial: SpatialBundle, - /// Marks this environment map as a light probe. - pub light_probe: LightProbe, - /// The cubemaps that make up this environment map. - pub environment_map: EnvironmentMapLight, -} - /// All the bind group entries necessary for PBR shaders to access the /// environment maps exposed to a view. pub(crate) enum RenderViewEnvironmentMapBindGroupEntries<'a> { diff --git a/crates/bevy_pbr/src/light_probe/irradiance_volume.rs b/crates/bevy_pbr/src/light_probe/irradiance_volume.rs index b1e974711d882..05dd51c3795bc 100644 --- a/crates/bevy_pbr/src/light_probe/irradiance_volume.rs +++ b/crates/bevy_pbr/src/light_probe/irradiance_volume.rs @@ -81,17 +81,17 @@ //! less ideal for this use case: //! //! 1. The level 1 spherical harmonic coefficients can be negative. That -//! prevents the use of the efficient [RGB9E5 texture format], which only -//! encodes unsigned floating point numbers, and forces the use of the -//! less-efficient [RGBA16F format] if hardware interpolation is desired. +//! prevents the use of the efficient [RGB9E5 texture format], which only +//! encodes unsigned floating point numbers, and forces the use of the +//! less-efficient [RGBA16F format] if hardware interpolation is desired. //! //! 2. As an alternative to RGBA16F, level 1 spherical harmonics can be -//! normalized and scaled to the SH0 base color, as [Frostbite] does. This -//! allows them to be packed in standard LDR RGBA8 textures. However, this -//! prevents the use of hardware trilinear filtering, as the nonuniform scale -//! factor means that hardware interpolation no longer produces correct results. -//! The 8 texture fetches needed to interpolate between voxels can be upwards of -//! twice as slow as the hardware interpolation. +//! normalized and scaled to the SH0 base color, as [Frostbite] does. This +//! allows them to be packed in standard LDR RGBA8 textures. However, this +//! prevents the use of hardware trilinear filtering, as the nonuniform scale +//! factor means that hardware interpolation no longer produces correct results. +//! The 8 texture fetches needed to interpolate between voxels can be upwards of +//! twice as slow as the hardware interpolation. //! //! The following chart summarizes the costs and benefits of ambient cubes, //! level 1 spherical harmonics, and level 2 spherical harmonics: @@ -146,7 +146,7 @@ use bevy_render::{ use bevy_utils::default; use core::{num::NonZero, ops::Deref}; -use bevy_asset::{AssetId, Handle}; +use bevy_asset::{weak_handle, AssetId, Handle}; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use crate::{ @@ -157,7 +157,7 @@ use crate::{ use super::LightProbeComponent; pub const IRRADIANCE_VOLUME_SHADER_HANDLE: Handle = - Handle::weak_from_u128(160299515939076705258408299184317675488); + weak_handle!("7fc7dcd8-3f90-4124-b093-be0e53e08205"); /// On WebGL and WebGPU, we must disable irradiance volumes, as otherwise we can /// overflow the number of texture bindings when deferred rendering is in use @@ -168,7 +168,7 @@ pub(crate) const IRRADIANCE_VOLUMES_ARE_USABLE: bool = cfg!(not(target_arch = "w /// /// See [`crate::irradiance_volume`] for detailed information. #[derive(Clone, Reflect, Component, Debug)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct IrradianceVolume { /// The 3D texture that represents the ambient cubes, encoded in the format /// described in [`crate::irradiance_volume`]. @@ -251,7 +251,7 @@ impl<'a> RenderViewIrradianceVolumeBindGroupEntries<'a> { fallback_image, ) } else { - RenderViewIrradianceVolumeBindGroupEntries::get_single( + RenderViewIrradianceVolumeBindGroupEntries::single( render_view_irradiance_volumes, images, fallback_image, @@ -295,7 +295,7 @@ impl<'a> RenderViewIrradianceVolumeBindGroupEntries<'a> { /// Looks up and returns the bindings for any irradiance volumes visible in /// the view, as well as the sampler. This is the version used when binding /// arrays aren't available on the current platform. - fn get_single( + fn single( render_view_irradiance_volumes: Option<&RenderViewLightProbes>, images: &'a RenderAssets, fallback_image: &'a FallbackImage, diff --git a/crates/bevy_pbr/src/light_probe/light_probe.wgsl b/crates/bevy_pbr/src/light_probe/light_probe.wgsl index f98759c293b9c..16a211258a379 100644 --- a/crates/bevy_pbr/src/light_probe/light_probe.wgsl +++ b/crates/bevy_pbr/src/light_probe/light_probe.wgsl @@ -52,7 +52,7 @@ fn query_light_probe( var end_offset: u32; if is_irradiance_volume { start_offset = (*clusterable_object_index_ranges).first_irradiance_volume_index_offset; - end_offset = (*clusterable_object_index_ranges).last_clusterable_object_index_offset; + end_offset = (*clusterable_object_index_ranges).first_decal_offset; } else { start_offset = (*clusterable_object_index_ranges).first_reflection_probe_index_offset; end_offset = (*clusterable_object_index_ranges).first_irradiance_volume_index_offset; diff --git a/crates/bevy_pbr/src/light_probe/mod.rs b/crates/bevy_pbr/src/light_probe/mod.rs index 39c33f9cbf343..ebfc7c7e7c614 100644 --- a/crates/bevy_pbr/src/light_probe/mod.rs +++ b/crates/bevy_pbr/src/light_probe/mod.rs @@ -1,19 +1,21 @@ //! Light probes for baked global illumination. use bevy_app::{App, Plugin}; -use bevy_asset::{load_internal_asset, AssetId, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, AssetId, Handle}; use bevy_core_pipeline::core_3d::Camera3d; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ - component::{require, Component}, + component::Component, entity::Entity, query::With, reflect::ReflectComponent, - schedule::IntoSystemConfigs, - system::{Commands, Local, Query, Res, ResMut, Resource}, + resource::Resource, + schedule::IntoScheduleConfigs, + system::{Commands, Local, Query, Res, ResMut}, }; use bevy_image::Image; use bevy_math::{Affine3A, FloatOrd, Mat4, Vec3A, Vec4}; +use bevy_platform::collections::HashMap; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{ extract_instances::ExtractInstancesPlugin, @@ -28,7 +30,7 @@ use bevy_render::{ Extract, ExtractSchedule, Render, RenderApp, RenderSet, }; use bevy_transform::{components::Transform, prelude::GlobalTransform}; -use bevy_utils::{tracing::error, HashMap}; +use tracing::error; use core::{hash::Hash, ops::Deref}; @@ -41,7 +43,8 @@ use crate::{ use self::irradiance_volume::IrradianceVolume; -pub const LIGHT_PROBE_SHADER_HANDLE: Handle = Handle::weak_from_u128(8954249792581071582); +pub const LIGHT_PROBE_SHADER_HANDLE: Handle = + weak_handle!("e80a2ae6-1c5a-4d9a-a852-d66ff0e6bf7f"); pub mod environment_map; pub mod irradiance_volume; @@ -103,7 +106,7 @@ pub struct LightProbePlugin; /// specific technique but rather to a class of techniques. Developers familiar /// with other engines should be aware of this terminology difference. #[derive(Component, Debug, Clone, Copy, Default, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[require(Transform, Visibility)] pub struct LightProbe; @@ -184,7 +187,6 @@ pub struct ViewLightProbesUniformOffset(u32); /// This information is parameterized by the [`LightProbeComponent`] type. This /// will either be [`EnvironmentMapLight`] for reflection probes or /// [`IrradianceVolume`] for irradiance volumes. -#[allow(dead_code)] struct LightProbeInfo where C: LightProbeComponent, @@ -767,22 +769,22 @@ pub(crate) fn add_cubemap_texture_view<'a>( /// (a.k.a. bindless textures). This function checks for these pitfalls: /// /// 1. If GLSL support is enabled at the feature level, then in debug mode -/// `naga_oil` will attempt to compile all shader modules under GLSL to check -/// validity of names, even if GLSL isn't actually used. This will cause a crash -/// if binding arrays are enabled, because binding arrays are currently -/// unimplemented in the GLSL backend of Naga. Therefore, we disable binding -/// arrays if the `shader_format_glsl` feature is present. +/// `naga_oil` will attempt to compile all shader modules under GLSL to check +/// validity of names, even if GLSL isn't actually used. This will cause a crash +/// if binding arrays are enabled, because binding arrays are currently +/// unimplemented in the GLSL backend of Naga. Therefore, we disable binding +/// arrays if the `shader_format_glsl` feature is present. /// /// 2. If there aren't enough texture bindings available to accommodate all the -/// binding arrays, the driver will panic. So we also bail out if there aren't -/// enough texture bindings available in the fragment shader. +/// binding arrays, the driver will panic. So we also bail out if there aren't +/// enough texture bindings available in the fragment shader. /// /// 3. If binding arrays aren't supported on the hardware, then we obviously /// can't use them. Adreno <= 610 claims to support bindless, but seems to be /// too buggy to be usable. /// /// 4. If binding arrays are supported on the hardware, but they can only be -/// accessed by uniform indices, that's not good enough, and we bail out. +/// accessed by uniform indices, that's not good enough, and we bail out. /// /// If binding arrays aren't usable, we disable reflection probes and limit the /// number of irradiance volumes in the scene to 1. diff --git a/crates/bevy_pbr/src/lightmap/lightmap.wgsl b/crates/bevy_pbr/src/lightmap/lightmap.wgsl index e58ec96870263..da10ece9b1c01 100644 --- a/crates/bevy_pbr/src/lightmap/lightmap.wgsl +++ b/crates/bevy_pbr/src/lightmap/lightmap.wgsl @@ -13,33 +13,87 @@ // Samples the lightmap, if any, and returns indirect illumination from it. fn lightmap(uv: vec2, exposure: f32, instance_index: u32) -> vec3 { let packed_uv_rect = mesh[instance_index].lightmap_uv_rect; - let uv_rect = vec4(vec4( - packed_uv_rect.x & 0xffffu, - packed_uv_rect.x >> 16u, - packed_uv_rect.y & 0xffffu, - packed_uv_rect.y >> 16u)) / 65535.0; - + let uv_rect = vec4( + unpack2x16unorm(packed_uv_rect.x), + unpack2x16unorm(packed_uv_rect.y), + ); let lightmap_uv = mix(uv_rect.xy, uv_rect.zw, uv); + let lightmap_slot = mesh[instance_index].material_and_lightmap_bind_group_slot >> 16u; + + // Bicubic 4-tap + // https://developer.nvidia.com/gpugems/gpugems2/part-iii-high-quality-rendering/chapter-20-fast-third-order-texture-filtering + // https://advances.realtimerendering.com/s2021/jpatry_advances2021/index.html#/111/0/2 +#ifdef LIGHTMAP_BICUBIC_SAMPLING + let texture_size = vec2(lightmap_size(lightmap_slot)); + let texel_size = 1.0 / texture_size; + let puv = lightmap_uv * texture_size + 0.5; + let iuv = floor(puv); + let fuv = fract(puv); + let g0x = g0(fuv.x); + let g1x = g1(fuv.x); + let h0x = h0_approx(fuv.x); + let h1x = h1_approx(fuv.x); + let h0y = h0_approx(fuv.y); + let h1y = h1_approx(fuv.y); + let p0 = (vec2(iuv.x + h0x, iuv.y + h0y) - 0.5) * texel_size; + let p1 = (vec2(iuv.x + h1x, iuv.y + h0y) - 0.5) * texel_size; + let p2 = (vec2(iuv.x + h0x, iuv.y + h1y) - 0.5) * texel_size; + let p3 = (vec2(iuv.x + h1x, iuv.y + h1y) - 0.5) * texel_size; + let color = g0(fuv.y) * (g0x * sample(p0, lightmap_slot) + g1x * sample(p1, lightmap_slot)) + g1(fuv.y) * (g0x * sample(p2, lightmap_slot) + g1x * sample(p3, lightmap_slot)); +#else + let color = sample(lightmap_uv, lightmap_slot); +#endif + + return color * exposure; +} + +fn lightmap_size(lightmap_slot: u32) -> vec2 { +#ifdef MULTIPLE_LIGHTMAPS_IN_ARRAY + return textureDimensions(lightmaps_textures[lightmap_slot]); +#else + return textureDimensions(lightmaps_texture); +#endif +} +fn sample(uv: vec2, lightmap_slot: u32) -> vec3 { // Mipmapping lightmaps is usually a bad idea due to leaking across UV // islands, so there's no harm in using mip level 0 and it lets us avoid // control flow uniformity problems. - // - // TODO(pcwalton): Consider bicubic filtering. #ifdef MULTIPLE_LIGHTMAPS_IN_ARRAY - let lightmap_slot = mesh[instance_index].material_and_lightmap_bind_group_slot >> 16u; - return textureSampleLevel( - lightmaps_textures[lightmap_slot], - lightmaps_samplers[lightmap_slot], - lightmap_uv, - 0.0 - ).rgb * exposure; -#else // MULTIPLE_LIGHTMAPS_IN_ARRAY - return textureSampleLevel( - lightmaps_texture, - lightmaps_sampler, - lightmap_uv, - 0.0 - ).rgb * exposure; -#endif // MULTIPLE_LIGHTMAPS_IN_ARRAY + return textureSampleLevel(lightmaps_textures[lightmap_slot], lightmaps_samplers[lightmap_slot], uv, 0.0).rgb; +#else + return textureSampleLevel(lightmaps_texture, lightmaps_sampler, uv, 0.0).rgb; +#endif +} + +fn w0(a: f32) -> f32 { + return (1.0 / 6.0) * (a * (a * (-a + 3.0) - 3.0) + 1.0); +} + +fn w1(a: f32) -> f32 { + return (1.0 / 6.0) * (a * a * (3.0 * a - 6.0) + 4.0); +} + +fn w2(a: f32) -> f32 { + return (1.0 / 6.0) * (a * (a * (-3.0 * a + 3.0) + 3.0) + 1.0); +} + +fn w3(a: f32) -> f32 { + return (1.0 / 6.0) * (a * a * a); +} + +fn g0(a: f32) -> f32 { + return w0(a) + w1(a); +} + +fn g1(a: f32) -> f32 { + return w2(a) + w3(a); +} + +fn h0_approx(a: f32) -> f32 { + return -0.2 - a * (0.24 * a - 0.44); +} + +fn h1_approx(a: f32) -> f32 { + return 1.0 + a * (0.24 * a - 0.04); } diff --git a/crates/bevy_pbr/src/lightmap/mod.rs b/crates/bevy_pbr/src/lightmap/mod.rs index fc6f973cb12d5..4175d6ff61f4f 100644 --- a/crates/bevy_pbr/src/lightmap/mod.rs +++ b/crates/bevy_pbr/src/lightmap/mod.rs @@ -32,7 +32,7 @@ //! [`bevy-baked-gi`]: https://github.com/pcwalton/bevy-baked-gi use bevy_app::{App, Plugin}; -use bevy_asset::{load_internal_asset, AssetId, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, AssetId, Handle}; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ component::Component, @@ -40,12 +40,14 @@ use bevy_ecs::{ query::{Changed, Or}, reflect::ReflectComponent, removal_detection::RemovedComponents, - schedule::IntoSystemConfigs, - system::{Query, Res, ResMut, Resource}, + resource::Resource, + schedule::IntoScheduleConfigs, + system::{Query, Res, ResMut}, world::{FromWorld, World}, }; use bevy_image::Image; use bevy_math::{uvec2, vec4, Rect, UVec2}; +use bevy_platform::collections::HashSet; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{ render_asset::RenderAssets, @@ -57,15 +59,16 @@ use bevy_render::{ Extract, ExtractSchedule, RenderApp, }; use bevy_render::{renderer::RenderDevice, sync_world::MainEntityHashMap}; -use bevy_utils::{default, tracing::error, HashSet}; +use bevy_utils::default; use fixedbitset::FixedBitSet; use nonmax::{NonMaxU16, NonMaxU32}; +use tracing::error; use crate::{binding_arrays_are_usable, ExtractMeshesSet}; /// The ID of the lightmap shader. pub const LIGHTMAP_SHADER_HANDLE: Handle = - Handle::weak_from_u128(285484768317531991932943596447919767152); + weak_handle!("fc28203f-f258-47f3-973c-ce7d1dd70e59"); /// The number of lightmaps that we store in a single slab, if bindless textures /// are in use. @@ -85,7 +88,7 @@ pub struct LightmapPlugin; /// has a second UV layer ([`ATTRIBUTE_UV_1`](bevy_render::mesh::Mesh::ATTRIBUTE_UV_1)), /// then the lightmap will render using those UVs. #[derive(Component, Clone, Reflect)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct Lightmap { /// The lightmap texture. pub image: Handle, @@ -99,6 +102,13 @@ pub struct Lightmap { /// This field allows lightmaps for a variety of meshes to be packed into a /// single atlas. pub uv_rect: Rect, + + /// Whether bicubic sampling should be used for sampling this lightmap. + /// + /// Bicubic sampling is higher quality, but slower, and may lead to light leaks. + /// + /// If true, the lightmap texture's sampler must be set to [`bevy_image::ImageSampler::linear`]. + pub bicubic_sampling: bool, } /// Lightmap data stored in the render world. @@ -106,9 +116,6 @@ pub struct Lightmap { /// There is one of these per visible lightmapped mesh instance. #[derive(Debug)] pub(crate) struct RenderLightmap { - /// The ID of the lightmap texture. - pub(crate) image: AssetId, - /// The rectangle within the lightmap texture that the UVs are relative to. /// /// The top left coordinate is the `min` part of the rect, and the bottom @@ -125,6 +132,9 @@ pub(crate) struct RenderLightmap { /// /// If bindless lightmaps aren't in use, this will be 0. pub(crate) slot_index: LightmapSlotIndex, + + // Whether or not bicubic sampling should be used for this lightmap. + pub(crate) bicubic_sampling: bool, } /// Stores data for all lightmaps in the render world. @@ -232,10 +242,10 @@ fn extract_lightmaps( render_lightmaps.render_lightmaps.insert( entity.into(), RenderLightmap::new( - lightmap.image.id(), lightmap.uv_rect, slab_index, slot_index, + lightmap.bicubic_sampling, ), ); @@ -291,16 +301,16 @@ impl RenderLightmap { /// Creates a new lightmap from a texture, a UV rect, and a slab and slot /// index pair. fn new( - image: AssetId, uv_rect: Rect, slab_index: LightmapSlabIndex, slot_index: LightmapSlotIndex, + bicubic_sampling: bool, ) -> Self { Self { - image, uv_rect, slab_index, slot_index, + bicubic_sampling, } } } @@ -326,6 +336,7 @@ impl Default for Lightmap { Self { image: Default::default(), uv_rect: Rect::new(0.0, 0.0, 1.0, 1.0), + bicubic_sampling: false, } } } diff --git a/crates/bevy_pbr/src/material.rs b/crates/bevy_pbr/src/material.rs index f894774af140d..181499618418e 100644 --- a/crates/bevy_pbr/src/material.rs +++ b/crates/bevy_pbr/src/material.rs @@ -1,25 +1,27 @@ -use self::{irradiance_volume::IrradianceVolume, prelude::EnvironmentMapLight}; -use crate::material_bind_groups::{MaterialBindGroupAllocator, MaterialBindingId}; +use crate::material_bind_groups::{ + FallbackBindlessResources, MaterialBindGroupAllocator, MaterialBindingId, +}; #[cfg(feature = "meshlet")] use crate::meshlet::{ prepare_material_meshlet_meshes_main_opaque_pass, queue_material_meshlet_meshes, InstanceManager, }; use crate::*; -use bevy_asset::{Asset, AssetId, AssetServer}; +use bevy_asset::prelude::AssetChanged; +use bevy_asset::{Asset, AssetEvents, AssetId, AssetServer, UntypedAssetId}; +use bevy_core_pipeline::deferred::{AlphaMask3dDeferred, Opaque3dDeferred}; +use bevy_core_pipeline::prepass::{AlphaMask3dPrepass, Opaque3dPrepass}; use bevy_core_pipeline::{ core_3d::{ - AlphaMask3d, Camera3d, Opaque3d, Opaque3dBatchSetKey, Opaque3dBinKey, - ScreenSpaceTransmissionQuality, Transmissive3d, Transparent3d, - }, - oit::OrderIndependentTransparencySettings, - prepass::{ - DeferredPrepass, DepthPrepass, MotionVectorPrepass, NormalPrepass, - OpaqueNoLightmap3dBatchSetKey, OpaqueNoLightmap3dBinKey, + AlphaMask3d, Opaque3d, Opaque3dBatchSetKey, Opaque3dBinKey, ScreenSpaceTransmissionQuality, + Transmissive3d, Transparent3d, }, - tonemapping::{DebandDither, Tonemapping}, + prepass::{OpaqueNoLightmap3dBatchSetKey, OpaqueNoLightmap3dBinKey}, + tonemapping::Tonemapping, }; use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::component::Tick; +use bevy_ecs::system::SystemChangeTick; use bevy_ecs::{ prelude::*, system::{ @@ -27,23 +29,32 @@ use bevy_ecs::{ SystemParamItem, }, }; +use bevy_platform::collections::hash_map::Entry; +use bevy_platform::collections::{HashMap, HashSet}; +use bevy_platform::hash::FixedHasher; use bevy_reflect::std_traits::ReflectDefault; use bevy_reflect::Reflect; +use bevy_render::camera::extract_cameras; +use bevy_render::mesh::mark_3d_meshes_as_changed_if_their_assets_changed; +use bevy_render::render_asset::prepare_assets; +use bevy_render::renderer::RenderQueue; use bevy_render::{ - camera::TemporalJitter, + batching::gpu_preprocessing::GpuPreprocessingSupport, extract_resource::ExtractResource, mesh::{Mesh3d, MeshVertexBufferLayoutRef, RenderMesh}, render_asset::{PrepareAssetError, RenderAsset, RenderAssetPlugin, RenderAssets}, render_phase::*, render_resource::*, renderer::RenderDevice, - view::{ExtractedView, Msaa, RenderVisibilityRanges, ViewVisibility}, + sync_world::MainEntity, + view::{ExtractedView, Msaa, RenderVisibilityRanges, RetainedViewEntity, ViewVisibility}, Extract, }; use bevy_render::{mesh::allocator::MeshAllocator, sync_world::MainEntityHashMap}; use bevy_render::{texture::FallbackImage, view::RenderVisibleEntities}; -use bevy_utils::{hashbrown::hash_map::Entry, tracing::error}; +use bevy_utils::Parallel; use core::{hash::Hash, marker::PhantomData}; +use tracing::error; /// Materials are used alongside [`MaterialPlugin`], [`Mesh3d`], and [`MeshMaterial3d`] /// to spawn entities that are rendered with a specific [`Material`] type. They serve as an easy to use high level @@ -122,7 +133,6 @@ pub trait Material: Asset + AsBindGroup + Clone + Sized { /// Returns this material's fragment shader. If [`ShaderRef::Default`] is returned, the default mesh fragment shader /// will be used. - #[allow(unused_variables)] fn fragment_shader() -> ShaderRef { ShaderRef::Default } @@ -172,7 +182,6 @@ pub trait Material: Asset + AsBindGroup + Clone + Sized { /// /// This is used for the various [prepasses](bevy_core_pipeline::prepass) as well as for generating the depth maps /// required for shadow mapping. - #[allow(unused_variables)] fn prepass_fragment_shader() -> ShaderRef { ShaderRef::Default } @@ -185,7 +194,6 @@ pub trait Material: Asset + AsBindGroup + Clone + Sized { /// Returns this material's deferred fragment shader. If [`ShaderRef::Default`] is returned, the default deferred fragment shader /// will be used. - #[allow(unused_variables)] fn deferred_fragment_shader() -> ShaderRef { ShaderRef::Default } @@ -196,7 +204,6 @@ pub trait Material: Asset + AsBindGroup + Clone + Sized { /// This is part of an experimental feature, and is unnecessary to implement unless you are using `MeshletMesh`'s. /// /// See [`crate::meshlet::MeshletMesh`] for limitations. - #[allow(unused_variables)] #[cfg(feature = "meshlet")] fn meshlet_mesh_fragment_shader() -> ShaderRef { ShaderRef::Default @@ -208,7 +215,6 @@ pub trait Material: Asset + AsBindGroup + Clone + Sized { /// This is part of an experimental feature, and is unnecessary to implement unless you are using `MeshletMesh`'s. /// /// See [`crate::meshlet::MeshletMesh`] for limitations. - #[allow(unused_variables)] #[cfg(feature = "meshlet")] fn meshlet_mesh_prepass_fragment_shader() -> ShaderRef { ShaderRef::Default @@ -220,7 +226,6 @@ pub trait Material: Asset + AsBindGroup + Clone + Sized { /// This is part of an experimental feature, and is unnecessary to implement unless you are using `MeshletMesh`'s. /// /// See [`crate::meshlet::MeshletMesh`] for limitations. - #[allow(unused_variables)] #[cfg(feature = "meshlet")] fn meshlet_mesh_deferred_fragment_shader() -> ShaderRef { ShaderRef::Default @@ -228,7 +233,10 @@ pub trait Material: Asset + AsBindGroup + Clone + Sized { /// Customizes the default [`RenderPipelineDescriptor`] for a specific entity using the entity's /// [`MaterialPipelineKey`] and [`MeshVertexBufferLayoutRef`] as input. - #[allow(unused_variables)] + #[expect( + unused_variables, + reason = "The parameters here are intentionally unused by the default implementation; however, putting underscores here will result in the underscores being copied by rust-analyzer's tab completion." + )] #[inline] fn specialize( pipeline: &MaterialPipeline, @@ -251,6 +259,8 @@ pub struct MaterialPlugin { pub prepass_enabled: bool, /// Controls if shadows are enabled for the Material. pub shadows_enabled: bool, + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, pub _marker: PhantomData, } @@ -259,6 +269,7 @@ impl Default for MaterialPlugin { Self { prepass_enabled: true, shadows_enabled: true, + debug_flags: RenderDebugFlags::default(), _marker: Default::default(), } } @@ -271,39 +282,91 @@ where fn build(&self, app: &mut App) { app.init_asset::() .register_type::>() - .add_plugins(RenderAssetPlugin::>::default()); + .init_resource::>() + .add_plugins((RenderAssetPlugin::>::default(),)) + .add_systems( + PostUpdate, + ( + mark_meshes_as_changed_if_their_materials_changed::.ambiguous_with_all(), + check_entities_needing_specialization::.after(AssetEvents), + ) + .after(mark_3d_meshes_as_changed_if_their_assets_changed), + ); + + if self.shadows_enabled { + app.add_systems( + PostUpdate, + check_light_entities_needing_specialization:: + .after(check_entities_needing_specialization::), + ); + } if let Some(render_app) = app.get_sub_app_mut(RenderApp) { render_app + .init_resource::>() + .init_resource::>() .init_resource::>() - .init_resource::>() + .init_resource::() .add_render_command::>() .add_render_command::>() .add_render_command::>() .add_render_command::>() .add_render_command::>() .init_resource::>>() - .add_systems(ExtractSchedule, extract_mesh_materials::) + .add_systems( + ExtractSchedule, + ( + extract_mesh_materials::.in_set(ExtractMaterialsSet), + early_sweep_material_instances:: + .after(ExtractMaterialsSet) + .before(late_sweep_material_instances), + extract_entities_needs_specialization::.after(extract_cameras), + ), + ) .add_systems( Render, - queue_material_meshes:: - .in_set(RenderSet::QueueMeshes) - .after(prepare_assets::>), + ( + specialize_material_meshes:: + .in_set(RenderSet::PrepareMeshes) + .after(prepare_assets::>) + .after(prepare_assets::) + .after(collect_meshes_for_gpu_building) + .after(set_mesh_motion_vector_flags), + queue_material_meshes:: + .in_set(RenderSet::QueueMeshes) + .after(prepare_assets::>), + ), ) .add_systems( Render, - prepare_material_bind_groups:: + ( + prepare_material_bind_groups::, + write_material_bind_group_buffers::, + ) + .chain() .in_set(RenderSet::PrepareBindGroups) .after(prepare_assets::>), ); if self.shadows_enabled { - render_app.add_systems( - Render, - queue_shadows:: - .in_set(RenderSet::QueueMeshes) - .after(prepare_assets::>), - ); + render_app + .init_resource::() + .init_resource::() + .init_resource::>() + .add_systems( + Render, + ( + check_views_lights_need_specialization.in_set(RenderSet::PrepareAssets), + // specialize_shadows:: also needs to run after prepare_assets::>, + // which is fine since ManageViews is after PrepareAssets + specialize_shadows:: + .in_set(RenderSet::ManageViews) + .after(prepare_lights), + queue_shadows:: + .in_set(RenderSet::QueueMeshes) + .after(prepare_assets::>), + ), + ); } #[cfg(feature = "meshlet")] @@ -331,7 +394,7 @@ where } if self.prepass_enabled { - app.add_plugins(PrepassPlugin::::default()); + app.add_plugins(PrepassPlugin::::new(self.debug_flags)); } } @@ -344,6 +407,14 @@ where } } +/// A dummy [`AssetId`] that we use as a placeholder whenever a mesh doesn't +/// have a material. +/// +/// See the comments in [`RenderMaterialInstances::mesh_material`] for more +/// information. +pub(crate) static DUMMY_MESH_MATERIAL: AssetId = + AssetId::::invalid(); + /// A key uniquely identifying a specialized [`MaterialPipeline`]. pub struct MaterialPipelineKey { pub mesh_key: MeshPipelineKey, @@ -463,7 +534,7 @@ impl FromWorld for MaterialPipeline { ShaderRef::Handle(handle) => Some(handle), ShaderRef::Path(path) => Some(asset_server.load(path)), }, - bindless: material_bind_groups::material_uses_bindless_resources::(render_device), + bindless: material_uses_bindless_resources::(render_device), marker: PhantomData, } } @@ -482,7 +553,7 @@ pub struct SetMaterialBindGroup(PhantomData); impl RenderCommand

for SetMaterialBindGroup { type Param = ( SRes>>, - SRes>, + SRes, SRes>, ); type ViewQuery = (); @@ -504,17 +575,20 @@ impl RenderCommand

for SetMaterial let material_instances = material_instances.into_inner(); let material_bind_group_allocator = material_bind_group_allocator.into_inner(); - let Some(material_asset_id) = material_instances.get(&item.main_entity()) else { + let Some(material_instance) = material_instances.instances.get(&item.main_entity()) else { + return RenderCommandResult::Skip; + }; + let Ok(material_asset_id) = material_instance.asset_id.try_typed::() else { return RenderCommandResult::Skip; }; - let Some(material) = materials.get(*material_asset_id) else { + let Some(material) = materials.get(material_asset_id) else { return RenderCommandResult::Skip; }; let Some(material_bind_group) = material_bind_group_allocator.get(material.binding.group) else { return RenderCommandResult::Skip; }; - let Some(bind_group) = material_bind_group.get_bind_group() else { + let Some(bind_group) = material_bind_group.bind_group() else { return RenderCommandResult::Skip; }; pass.set_bind_group(I, bind_group, &[]); @@ -522,16 +596,49 @@ impl RenderCommand

for SetMaterial } } -/// Stores all extracted instances of a [`Material`] in the render world. -#[derive(Resource, Deref, DerefMut)] -pub struct RenderMaterialInstances(pub MainEntityHashMap>); +/// Stores all extracted instances of all [`Material`]s in the render world. +#[derive(Resource, Default)] +pub struct RenderMaterialInstances { + /// Maps from each entity in the main world to the + /// [`RenderMaterialInstance`] associated with it. + pub instances: MainEntityHashMap, + /// A monotonically-increasing counter, which we use to sweep + /// [`RenderMaterialInstances::instances`] when the entities and/or required + /// components are removed. + current_change_tick: Tick, +} -impl Default for RenderMaterialInstances { - fn default() -> Self { - Self(Default::default()) +impl RenderMaterialInstances { + /// Returns the mesh material ID for the entity with the given mesh, or a + /// dummy mesh material ID if the mesh has no material ID. + /// + /// Meshes almost always have materials, but in very specific circumstances + /// involving custom pipelines they won't. (See the + /// `specialized_mesh_pipelines` example.) + pub(crate) fn mesh_material(&self, entity: MainEntity) -> UntypedAssetId { + match self.instances.get(&entity) { + Some(render_instance) => render_instance.asset_id, + None => DUMMY_MESH_MATERIAL.into(), + } } } +/// The material associated with a single mesh instance in the main world. +/// +/// Note that this uses an [`UntypedAssetId`] and isn't generic over the +/// material type, for simplicity. +pub struct RenderMaterialInstance { + /// The material asset. + pub(crate) asset_id: UntypedAssetId, + /// The [`RenderMaterialInstances::current_change_tick`] at which this + /// material instance was last modified. + last_change_tick: Tick, +} + +/// A [`SystemSet`] that contains all `extract_mesh_materials` systems. +#[derive(SystemSet, Clone, PartialEq, Eq, Debug, Hash)] +pub struct ExtractMaterialsSet; + pub const fn alpha_mode_pipeline_key(alpha_mode: AlphaMode, msaa: &Msaa) -> MeshPipelineKey { match alpha_mode { // Premultiplied and Add share the same pipeline key @@ -582,211 +689,352 @@ pub const fn screen_space_specular_transmission_pipeline_key( } } +/// A system that ensures that +/// [`crate::render::mesh::extract_meshes_for_gpu_building`] re-extracts meshes +/// whose materials changed. +/// +/// As [`crate::render::mesh::collect_meshes_for_gpu_building`] only considers +/// meshes that were newly extracted, and it writes information from the +/// [`RenderMaterialInstances`] into the +/// [`crate::render::mesh::MeshInputUniform`], we must tell +/// [`crate::render::mesh::extract_meshes_for_gpu_building`] to re-extract a +/// mesh if its material changed. Otherwise, the material binding information in +/// the [`crate::render::mesh::MeshInputUniform`] might not be updated properly. +/// The easiest way to ensure that +/// [`crate::render::mesh::extract_meshes_for_gpu_building`] re-extracts a mesh +/// is to mark its [`Mesh3d`] as changed, so that's what this system does. +fn mark_meshes_as_changed_if_their_materials_changed( + mut changed_meshes_query: Query< + &mut Mesh3d, + Or<(Changed>, AssetChanged>)>, + >, +) where + M: Material, +{ + for mut mesh in &mut changed_meshes_query { + mesh.set_changed(); + } +} + +/// Fills the [`RenderMaterialInstances`] resources from the meshes in the +/// scene. fn extract_mesh_materials( - mut material_instances: ResMut>, - mut material_ids: ResMut, - mut material_bind_group_allocator: ResMut>, - query: Extract)>>, + mut material_instances: ResMut, + changed_meshes_query: Extract< + Query< + (Entity, &ViewVisibility, &MeshMaterial3d), + Or<(Changed, Changed>)>, + >, + >, ) { - material_instances.clear(); + let last_change_tick = material_instances.current_change_tick; - for (entity, view_visibility, material) in &query { + for (entity, view_visibility, material) in &changed_meshes_query { if view_visibility.get() { - material_instances.insert(entity.into(), material.id()); - - // Allocate a slot for this material in the bind group. - let material_id = material.id().untyped(); - material_ids - .mesh_to_material - .insert(entity.into(), material_id); - if let Entry::Vacant(entry) = material_ids.material_to_binding.entry(material_id) { - entry.insert(material_bind_group_allocator.allocate()); - } + material_instances.instances.insert( + entity.into(), + RenderMaterialInstance { + asset_id: material.id().untyped(), + last_change_tick, + }, + ); + } else { + material_instances + .instances + .remove(&MainEntity::from(entity)); } } } -/// For each view, iterates over all the meshes visible from that view and adds -/// them to [`BinnedRenderPhase`]s or [`SortedRenderPhase`]s as appropriate. -#[allow(clippy::too_many_arguments)] -pub fn queue_material_meshes( - ( - opaque_draw_functions, - alpha_mask_draw_functions, - transmissive_draw_functions, - transparent_draw_functions, - ): ( - Res>, - Res>, - Res>, - Res>, - ), - material_pipeline: Res>, - mut pipelines: ResMut>>, - pipeline_cache: Res, - render_meshes: Res>, - render_materials: Res>>, - render_mesh_instances: Res, - render_material_instances: Res>, - render_lightmaps: Res, - render_visibility_ranges: Res, - (mesh_allocator, material_bind_group_allocator): ( - Res, - Res>, - ), - mut opaque_render_phases: ResMut>, - mut alpha_mask_render_phases: ResMut>, - mut transmissive_render_phases: ResMut>, - mut transparent_render_phases: ResMut>, - views: Query<( - Entity, - &ExtractedView, - &RenderVisibleEntities, - &Msaa, - Option<&Tonemapping>, - Option<&DebandDither>, - Option<&ShadowFilteringMethod>, - Has, - ( - Has, - Has, - Has, - Has, - ), - Option<&Camera3d>, - Has, - Option<&Projection>, - ( - Has>, - Has>, - ), - Has, - )>, +/// Removes mesh materials from [`RenderMaterialInstances`] when their +/// [`MeshMaterial3d`] components are removed. +/// +/// This is tricky because we have to deal with the case in which a material of +/// type A was removed and replaced with a material of type B in the same frame +/// (which is actually somewhat common of an operation). In this case, even +/// though an entry will be present in `RemovedComponents>`, +/// we must not remove the entry in `RenderMaterialInstances` which corresponds +/// to material B. To handle this case, we use change ticks to avoid removing +/// the entry if it was updated this frame. +/// +/// This is the first of two sweep phases. Because this phase runs once per +/// material type, we need a second phase in order to guarantee that we only +/// bump [`RenderMaterialInstances::current_change_tick`] once. +fn early_sweep_material_instances( + mut material_instances: ResMut, + mut removed_materials_query: Extract>>, ) where - M::Data: PartialEq + Eq + Hash + Clone, + M: Material, { - for ( - view_entity, - view, - visible_entities, - msaa, - tonemapping, - dither, - shadow_filter_method, - ssao, - (normal_prepass, depth_prepass, motion_vector_prepass, deferred_prepass), - camera_3d, - temporal_jitter, - projection, - (has_environment_maps, has_irradiance_volumes), - has_oit, - ) in &views - { - let ( - Some(opaque_phase), - Some(alpha_mask_phase), - Some(transmissive_phase), - Some(transparent_phase), - ) = ( - opaque_render_phases.get_mut(&view_entity), - alpha_mask_render_phases.get_mut(&view_entity), - transmissive_render_phases.get_mut(&view_entity), - transparent_render_phases.get_mut(&view_entity), - ) - else { - continue; - }; - - let draw_opaque_pbr = opaque_draw_functions.read().id::>(); - let draw_alpha_mask_pbr = alpha_mask_draw_functions.read().id::>(); - let draw_transmissive_pbr = transmissive_draw_functions.read().id::>(); - let draw_transparent_pbr = transparent_draw_functions.read().id::>(); - - let mut view_key = MeshPipelineKey::from_msaa_samples(msaa.samples()) - | MeshPipelineKey::from_hdr(view.hdr); + let last_change_tick = material_instances.current_change_tick; - if normal_prepass { - view_key |= MeshPipelineKey::NORMAL_PREPASS; + for entity in removed_materials_query.read() { + if let Entry::Occupied(occupied_entry) = material_instances.instances.entry(entity.into()) { + // Only sweep the entry if it wasn't updated this frame. + if occupied_entry.get().last_change_tick != last_change_tick { + occupied_entry.remove(); + } } + } +} - if depth_prepass { - view_key |= MeshPipelineKey::DEPTH_PREPASS; +/// Removes mesh materials from [`RenderMaterialInstances`] when their +/// [`ViewVisibility`] components are removed. +/// +/// This runs after all invocations of [`early_sweep_material_instances`] and is +/// responsible for bumping [`RenderMaterialInstances::current_change_tick`] in +/// preparation for a new frame. +pub(crate) fn late_sweep_material_instances( + mut material_instances: ResMut, + mut removed_visibilities_query: Extract>, +) { + let last_change_tick = material_instances.current_change_tick; + + for entity in removed_visibilities_query.read() { + if let Entry::Occupied(occupied_entry) = material_instances.instances.entry(entity.into()) { + // Only sweep the entry if it wasn't updated this frame. It's + // possible that a `ViewVisibility` component was removed and + // re-added in the same frame. + if occupied_entry.get().last_change_tick != last_change_tick { + occupied_entry.remove(); + } } + } - if motion_vector_prepass { - view_key |= MeshPipelineKey::MOTION_VECTOR_PREPASS; - } + material_instances + .current_change_tick + .set(last_change_tick.get() + 1); +} - if deferred_prepass { - view_key |= MeshPipelineKey::DEFERRED_PREPASS; +pub fn extract_entities_needs_specialization( + entities_needing_specialization: Extract>>, + mut entity_specialization_ticks: ResMut>, + mut removed_mesh_material_components: Extract>>, + mut specialized_material_pipeline_cache: ResMut>, + mut specialized_prepass_material_pipeline_cache: Option< + ResMut>, + >, + mut specialized_shadow_material_pipeline_cache: Option< + ResMut>, + >, + views: Query<&ExtractedView>, + ticks: SystemChangeTick, +) where + M: Material, +{ + // Clean up any despawned entities, we do this first in case the removed material was re-added + // the same frame, thus will appear both in the removed components list and have been added to + // the `EntitiesNeedingSpecialization` collection by triggering the `Changed` filter + for entity in removed_mesh_material_components.read() { + entity_specialization_ticks.remove(&MainEntity::from(entity)); + for view in views { + if let Some(cache) = + specialized_material_pipeline_cache.get_mut(&view.retained_view_entity) + { + cache.remove(&MainEntity::from(entity)); + } + if let Some(cache) = specialized_prepass_material_pipeline_cache + .as_mut() + .and_then(|c| c.get_mut(&view.retained_view_entity)) + { + cache.remove(&MainEntity::from(entity)); + } + if let Some(cache) = specialized_shadow_material_pipeline_cache + .as_mut() + .and_then(|c| c.get_mut(&view.retained_view_entity)) + { + cache.remove(&MainEntity::from(entity)); + } } + } - if temporal_jitter { - view_key |= MeshPipelineKey::TEMPORAL_JITTER; - } + for entity in entities_needing_specialization.iter() { + // Update the entity's specialization tick with this run's tick + entity_specialization_ticks.insert((*entity).into(), ticks.this_run()); + } +} - if has_environment_maps { - view_key |= MeshPipelineKey::ENVIRONMENT_MAP; - } +#[derive(Resource, Deref, DerefMut, Clone, Debug)] +pub struct EntitiesNeedingSpecialization { + #[deref] + pub entities: Vec, + _marker: PhantomData, +} - if has_irradiance_volumes { - view_key |= MeshPipelineKey::IRRADIANCE_VOLUME; +impl Default for EntitiesNeedingSpecialization { + fn default() -> Self { + Self { + entities: Default::default(), + _marker: Default::default(), } + } +} - if has_oit { - view_key |= MeshPipelineKey::OIT_ENABLED; - } +#[derive(Resource, Deref, DerefMut, Clone, Debug)] +pub struct EntitySpecializationTicks { + #[deref] + pub entities: MainEntityHashMap, + _marker: PhantomData, +} - if let Some(projection) = projection { - view_key |= match projection { - Projection::Perspective(_) => MeshPipelineKey::VIEW_PROJECTION_PERSPECTIVE, - Projection::Orthographic(_) => MeshPipelineKey::VIEW_PROJECTION_ORTHOGRAPHIC, - }; +impl Default for EntitySpecializationTicks { + fn default() -> Self { + Self { + entities: MainEntityHashMap::default(), + _marker: Default::default(), } + } +} - match shadow_filter_method.unwrap_or(&ShadowFilteringMethod::default()) { - ShadowFilteringMethod::Hardware2x2 => { - view_key |= MeshPipelineKey::SHADOW_FILTER_METHOD_HARDWARE_2X2; - } - ShadowFilteringMethod::Gaussian => { - view_key |= MeshPipelineKey::SHADOW_FILTER_METHOD_GAUSSIAN; - } - ShadowFilteringMethod::Temporal => { - view_key |= MeshPipelineKey::SHADOW_FILTER_METHOD_TEMPORAL; - } - } +/// Stores the [`SpecializedMaterialViewPipelineCache`] for each view. +#[derive(Resource, Deref, DerefMut)] +pub struct SpecializedMaterialPipelineCache { + // view entity -> view pipeline cache + #[deref] + map: HashMap>, + marker: PhantomData, +} - if !view.hdr { - if let Some(tonemapping) = tonemapping { - view_key |= MeshPipelineKey::TONEMAP_IN_SHADER; - view_key |= tonemapping_pipeline_key(*tonemapping); - } - if let Some(DebandDither::Enabled) = dither { - view_key |= MeshPipelineKey::DEBAND_DITHER; - } +/// Stores the cached render pipeline ID for each entity in a single view, as +/// well as the last time it was changed. +#[derive(Deref, DerefMut)] +pub struct SpecializedMaterialViewPipelineCache { + // material entity -> (tick, pipeline_id) + #[deref] + map: MainEntityHashMap<(Tick, CachedRenderPipelineId)>, + marker: PhantomData, +} + +impl Default for SpecializedMaterialPipelineCache { + fn default() -> Self { + Self { + map: HashMap::default(), + marker: PhantomData, } - if ssao { - view_key |= MeshPipelineKey::SCREEN_SPACE_AMBIENT_OCCLUSION; + } +} + +impl Default for SpecializedMaterialViewPipelineCache { + fn default() -> Self { + Self { + map: MainEntityHashMap::default(), + marker: PhantomData, } - if let Some(camera_3d) = camera_3d { - view_key |= screen_space_specular_transmission_pipeline_key( - camera_3d.screen_space_specular_transmission_quality, - ); + } +} + +pub fn check_entities_needing_specialization( + needs_specialization: Query< + Entity, + ( + Or<( + Changed, + AssetChanged, + Changed>, + AssetChanged>, + )>, + With>, + ), + >, + mut par_local: Local>>, + mut entities_needing_specialization: ResMut>, +) where + M: Material, +{ + entities_needing_specialization.clear(); + + needs_specialization + .par_iter() + .for_each(|entity| par_local.borrow_local_mut().push(entity)); + + par_local.drain_into(&mut entities_needing_specialization); +} + +pub fn specialize_material_meshes( + render_meshes: Res>, + render_materials: Res>>, + render_mesh_instances: Res, + render_material_instances: Res, + render_lightmaps: Res, + render_visibility_ranges: Res, + ( + material_bind_group_allocator, + opaque_render_phases, + alpha_mask_render_phases, + transmissive_render_phases, + transparent_render_phases, + ): ( + Res>, + Res>, + Res>, + Res>, + Res>, + ), + views: Query<(&ExtractedView, &RenderVisibleEntities)>, + view_key_cache: Res, + entity_specialization_ticks: Res>, + view_specialization_ticks: Res, + mut specialized_material_pipeline_cache: ResMut>, + mut pipelines: ResMut>>, + pipeline: Res>, + pipeline_cache: Res, + ticks: SystemChangeTick, +) where + M::Data: PartialEq + Eq + Hash + Clone, +{ + // Record the retained IDs of all shadow views so that we can expire old + // pipeline IDs. + let mut all_views: HashSet = HashSet::default(); + + for (view, visible_entities) in &views { + all_views.insert(view.retained_view_entity); + + if !transparent_render_phases.contains_key(&view.retained_view_entity) + && !opaque_render_phases.contains_key(&view.retained_view_entity) + && !alpha_mask_render_phases.contains_key(&view.retained_view_entity) + && !transmissive_render_phases.contains_key(&view.retained_view_entity) + { + continue; } - let rangefinder = view.rangefinder3d(); - for (render_entity, visible_entity) in visible_entities.iter::() { - let Some(material_asset_id) = render_material_instances.get(visible_entity) else { + let Some(view_key) = view_key_cache.get(&view.retained_view_entity) else { + continue; + }; + + let view_tick = view_specialization_ticks + .get(&view.retained_view_entity) + .unwrap(); + let view_specialized_material_pipeline_cache = specialized_material_pipeline_cache + .entry(view.retained_view_entity) + .or_default(); + + for (_, visible_entity) in visible_entities.iter::() { + let Some(material_instance) = render_material_instances.instances.get(visible_entity) + else { + continue; + }; + let Ok(material_asset_id) = material_instance.asset_id.try_typed::() else { continue; }; let Some(mesh_instance) = render_mesh_instances.render_mesh_queue_data(*visible_entity) else { continue; }; + let entity_tick = entity_specialization_ticks.get(visible_entity).unwrap(); + let last_specialized_tick = view_specialized_material_pipeline_cache + .get(visible_entity) + .map(|(tick, _)| *tick); + let needs_specialization = last_specialized_tick.is_none_or(|tick| { + view_tick.is_newer_than(tick, ticks.this_run()) + || entity_tick.is_newer_than(tick, ticks.this_run()) + }); + if !needs_specialization { + continue; + } let Some(mesh) = render_meshes.get(mesh_instance.mesh_asset_id) else { continue; }; - let Some(material) = render_materials.get(*material_asset_id) else { + let Some(material) = render_materials.get(material_asset_id) else { continue; }; let Some(material_bind_group) = @@ -798,25 +1046,25 @@ pub fn queue_material_meshes( let mut mesh_pipeline_key_bits = material.properties.mesh_pipeline_key_bits; mesh_pipeline_key_bits.insert(alpha_mode_pipeline_key( material.properties.alpha_mode, - msaa, + &Msaa::from_samples(view_key.msaa_samples()), )); - let mut mesh_key = view_key + let mut mesh_key = *view_key | MeshPipelineKey::from_bits_retain(mesh.key_bits.bits()) | mesh_pipeline_key_bits; - let lightmap_slab_index = render_lightmaps - .render_lightmaps - .get(visible_entity) - .map(|lightmap| lightmap.slab_index); - if lightmap_slab_index.is_some() { + if let Some(lightmap) = render_lightmaps.render_lightmaps.get(visible_entity) { mesh_key |= MeshPipelineKey::LIGHTMAPPED; + + if lightmap.bicubic_sampling { + mesh_key |= MeshPipelineKey::LIGHTMAP_BICUBIC_SAMPLING; + } } if render_visibility_ranges.entity_has_crossfading_visibility_ranges(*visible_entity) { mesh_key |= MeshPipelineKey::VISIBILITY_RANGE_DITHER; } - if motion_vector_prepass { + if view_key.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) { // If the previous frame have skins or morph targets, note that. if mesh_instance .flags @@ -832,17 +1080,13 @@ pub fn queue_material_meshes( } } - let pipeline_id = pipelines.specialize( - &pipeline_cache, - &material_pipeline, - MaterialPipelineKey { - mesh_key, - bind_group_data: material_bind_group - .get_extra_data(material.binding.slot) - .clone(), - }, - &mesh.layout, - ); + let key = MaterialPipelineKey { + mesh_key, + bind_group_data: material_bind_group + .get_extra_data(material.binding.slot) + .clone(), + }; + let pipeline_id = pipelines.specialize(&pipeline_cache, &pipeline, key, &mesh.layout); let pipeline_id = match pipeline_id { Ok(id) => id, Err(err) => { @@ -851,82 +1095,170 @@ pub fn queue_material_meshes( } }; - match mesh_key - .intersection(MeshPipelineKey::BLEND_RESERVED_BITS | MeshPipelineKey::MAY_DISCARD) + view_specialized_material_pipeline_cache + .insert(*visible_entity, (ticks.this_run(), pipeline_id)); + } + } + + // Delete specialized pipelines belonging to views that have expired. + specialized_material_pipeline_cache + .retain(|retained_view_entity, _| all_views.contains(retained_view_entity)); +} + +/// For each view, iterates over all the meshes visible from that view and adds +/// them to [`BinnedRenderPhase`]s or [`SortedRenderPhase`]s as appropriate. +pub fn queue_material_meshes( + render_materials: Res>>, + render_mesh_instances: Res, + render_material_instances: Res, + mesh_allocator: Res, + gpu_preprocessing_support: Res, + mut opaque_render_phases: ResMut>, + mut alpha_mask_render_phases: ResMut>, + mut transmissive_render_phases: ResMut>, + mut transparent_render_phases: ResMut>, + views: Query<(&ExtractedView, &RenderVisibleEntities)>, + specialized_material_pipeline_cache: ResMut>, +) where + M::Data: PartialEq + Eq + Hash + Clone, +{ + for (view, visible_entities) in &views { + let ( + Some(opaque_phase), + Some(alpha_mask_phase), + Some(transmissive_phase), + Some(transparent_phase), + ) = ( + opaque_render_phases.get_mut(&view.retained_view_entity), + alpha_mask_render_phases.get_mut(&view.retained_view_entity), + transmissive_render_phases.get_mut(&view.retained_view_entity), + transparent_render_phases.get_mut(&view.retained_view_entity), + ) + else { + continue; + }; + + let Some(view_specialized_material_pipeline_cache) = + specialized_material_pipeline_cache.get(&view.retained_view_entity) + else { + continue; + }; + + let rangefinder = view.rangefinder3d(); + for (render_entity, visible_entity) in visible_entities.iter::() { + let Some((current_change_tick, pipeline_id)) = view_specialized_material_pipeline_cache + .get(visible_entity) + .map(|(current_change_tick, pipeline_id)| (*current_change_tick, *pipeline_id)) + else { + continue; + }; + + // Skip the entity if it's cached in a bin and up to date. + if opaque_phase.validate_cached_entity(*visible_entity, current_change_tick) + || alpha_mask_phase.validate_cached_entity(*visible_entity, current_change_tick) { - MeshPipelineKey::BLEND_OPAQUE | MeshPipelineKey::BLEND_ALPHA_TO_COVERAGE => { - if material.properties.reads_view_transmission_texture { - let distance = rangefinder.distance_translation(&mesh_instance.translation) - + material.properties.depth_bias; - transmissive_phase.add(Transmissive3d { - entity: (*render_entity, *visible_entity), - draw_function: draw_transmissive_pbr, - pipeline: pipeline_id, - distance, - batch_range: 0..1, - extra_index: PhaseItemExtraIndex::None, - }); - } else if material.properties.render_method == OpaqueRendererMethod::Forward { - let (vertex_slab, index_slab) = - mesh_allocator.mesh_slabs(&mesh_instance.mesh_asset_id); - let bin_key = Opaque3dBinKey { - batch_set_key: Opaque3dBatchSetKey { - draw_function: draw_opaque_pbr, - pipeline: pipeline_id, - material_bind_group_index: Some(material.binding.group.0), - vertex_slab: vertex_slab.unwrap_or_default(), - index_slab, - lightmap_slab: lightmap_slab_index - .map(|lightmap_slab_index| *lightmap_slab_index), - }, - asset_id: mesh_instance.mesh_asset_id.into(), - }; - opaque_phase.add( - bin_key, - (*render_entity, *visible_entity), - BinnedRenderPhaseType::mesh(mesh_instance.should_batch()), - ); + continue; + } + + let Some(material_instance) = render_material_instances.instances.get(visible_entity) + else { + continue; + }; + let Ok(material_asset_id) = material_instance.asset_id.try_typed::() else { + continue; + }; + let Some(mesh_instance) = render_mesh_instances.render_mesh_queue_data(*visible_entity) + else { + continue; + }; + let Some(material) = render_materials.get(material_asset_id) else { + continue; + }; + + // Fetch the slabs that this mesh resides in. + let (vertex_slab, index_slab) = mesh_allocator.mesh_slabs(&mesh_instance.mesh_asset_id); + + match material.properties.render_phase_type { + RenderPhaseType::Transmissive => { + let distance = rangefinder.distance_translation(&mesh_instance.translation) + + material.properties.depth_bias; + transmissive_phase.add(Transmissive3d { + entity: (*render_entity, *visible_entity), + draw_function: material.properties.draw_function_id, + pipeline: pipeline_id, + distance, + batch_range: 0..1, + extra_index: PhaseItemExtraIndex::None, + indexed: index_slab.is_some(), + }); + } + RenderPhaseType::Opaque => { + if material.properties.render_method == OpaqueRendererMethod::Deferred { + // Even though we aren't going to insert the entity into + // a bin, we still want to update its cache entry. That + // way, we know we don't need to re-examine it in future + // frames. + opaque_phase.update_cache(*visible_entity, None, current_change_tick); + continue; } + let batch_set_key = Opaque3dBatchSetKey { + pipeline: pipeline_id, + draw_function: material.properties.draw_function_id, + material_bind_group_index: Some(material.binding.group.0), + vertex_slab: vertex_slab.unwrap_or_default(), + index_slab, + lightmap_slab: mesh_instance.shared.lightmap_slab_index.map(|index| *index), + }; + let bin_key = Opaque3dBinKey { + asset_id: mesh_instance.mesh_asset_id.into(), + }; + opaque_phase.add( + batch_set_key, + bin_key, + (*render_entity, *visible_entity), + mesh_instance.current_uniform_index, + BinnedRenderPhaseType::mesh( + mesh_instance.should_batch(), + &gpu_preprocessing_support, + ), + current_change_tick, + ); } // Alpha mask - MeshPipelineKey::MAY_DISCARD => { - if material.properties.reads_view_transmission_texture { - let distance = rangefinder.distance_translation(&mesh_instance.translation) - + material.properties.depth_bias; - transmissive_phase.add(Transmissive3d { - entity: (*render_entity, *visible_entity), - draw_function: draw_transmissive_pbr, - pipeline: pipeline_id, - distance, - batch_range: 0..1, - extra_index: PhaseItemExtraIndex::None, - }); - } else if material.properties.render_method == OpaqueRendererMethod::Forward { - let bin_key = OpaqueNoLightmap3dBinKey { - batch_set_key: OpaqueNoLightmap3dBatchSetKey { - draw_function: draw_alpha_mask_pbr, - pipeline: pipeline_id, - material_bind_group_index: Some(material.binding.group.0), - }, - asset_id: mesh_instance.mesh_asset_id.into(), - }; - alpha_mask_phase.add( - bin_key, - (*render_entity, *visible_entity), - BinnedRenderPhaseType::mesh(mesh_instance.should_batch()), - ); - } + RenderPhaseType::AlphaMask => { + let batch_set_key = OpaqueNoLightmap3dBatchSetKey { + draw_function: material.properties.draw_function_id, + pipeline: pipeline_id, + material_bind_group_index: Some(material.binding.group.0), + vertex_slab: vertex_slab.unwrap_or_default(), + index_slab, + }; + let bin_key = OpaqueNoLightmap3dBinKey { + asset_id: mesh_instance.mesh_asset_id.into(), + }; + alpha_mask_phase.add( + batch_set_key, + bin_key, + (*render_entity, *visible_entity), + mesh_instance.current_uniform_index, + BinnedRenderPhaseType::mesh( + mesh_instance.should_batch(), + &gpu_preprocessing_support, + ), + current_change_tick, + ); } - _ => { + RenderPhaseType::Transparent => { let distance = rangefinder.distance_translation(&mesh_instance.translation) + material.properties.depth_bias; transparent_phase.add(Transparent3d { entity: (*render_entity, *visible_entity), - draw_function: draw_transparent_pbr, + draw_function: material.properties.draw_function_id, pipeline: pipeline_id, distance, batch_range: 0..1, extra_index: PhaseItemExtraIndex::None, + indexed: index_slab.is_some(), }); } } @@ -936,7 +1268,7 @@ pub fn queue_material_meshes( /// Default render method used for opaque materials. #[derive(Default, Resource, Clone, Debug, ExtractResource, Reflect)] -#[reflect(Resource, Default, Debug)] +#[reflect(Resource, Default, Debug, Clone)] pub struct DefaultOpaqueRendererMethod(OpaqueRendererMethod); impl DefaultOpaqueRendererMethod { @@ -976,6 +1308,7 @@ impl DefaultOpaqueRendererMethod { /// /// If a material indicates `OpaqueRendererMethod::Auto`, `DefaultOpaqueRendererMethod` will be used. #[derive(Default, Clone, Copy, Debug, PartialEq, Reflect)] +#[reflect(Default, Clone, PartialEq)] pub enum OpaqueRendererMethod { #[default] Forward, @@ -1004,8 +1337,28 @@ pub struct MaterialProperties { /// This allows taking color output from the [`Opaque3d`] pass as an input, (for screen-space transmission) but requires /// rendering to take place in a separate [`Transmissive3d`] pass. pub reads_view_transmission_texture: bool, + pub render_phase_type: RenderPhaseType, + pub draw_function_id: DrawFunctionId, + pub prepass_draw_function_id: Option, + pub deferred_draw_function_id: Option, +} + +#[derive(Clone, Copy)] +pub enum RenderPhaseType { + Opaque, + AlphaMask, + Transmissive, + Transparent, } +/// A resource that maps each untyped material ID to its binding. +/// +/// This duplicates information in `RenderAssets`, but it doesn't have the +/// `M` type parameter, so it can be used in untyped contexts like +/// [`crate::render::mesh::collect_meshes_for_gpu_building`]. +#[derive(Resource, Default, Deref, DerefMut)] +pub struct RenderMaterialBindings(HashMap); + /// Data prepared for a [`Material`] instance. pub struct PreparedMaterial { pub binding: MaterialBindingId, @@ -1020,8 +1373,16 @@ impl RenderAsset for PreparedMaterial { SRes, SRes>, SRes, - SRes, SResMut>, + SResMut, + SRes>, + SRes>, + SRes>, + SRes>, + SRes>, + SRes>, + SRes>, + SRes>, M::Param, ); @@ -1032,31 +1393,77 @@ impl RenderAsset for PreparedMaterial { render_device, pipeline, default_opaque_render_method, - mesh_material_ids, - ref mut bind_group_allocator, - ref mut material_param, + bind_group_allocator, + render_material_bindings, + opaque_draw_functions, + alpha_mask_draw_functions, + transmissive_draw_functions, + transparent_draw_functions, + opaque_prepass_draw_functions, + alpha_mask_prepass_draw_functions, + opaque_deferred_draw_functions, + alpha_mask_deferred_draw_functions, + material_param, ): &mut SystemParamItem, ) -> Result> { - // Fetch the material binding ID, so that we can write it in to the - // `PreparedMaterial`. - let Some(material_binding_id) = mesh_material_ids - .material_to_binding - .get(&material_id.untyped()) - else { - return Err(PrepareAssetError::RetryNextUpdate(material)); - }; - - let method = match material.opaque_render_method() { + let draw_opaque_pbr = opaque_draw_functions.read().id::>(); + let draw_alpha_mask_pbr = alpha_mask_draw_functions.read().id::>(); + let draw_transmissive_pbr = transmissive_draw_functions.read().id::>(); + let draw_transparent_pbr = transparent_draw_functions.read().id::>(); + let draw_opaque_prepass = opaque_prepass_draw_functions + .read() + .get_id::>(); + let draw_alpha_mask_prepass = alpha_mask_prepass_draw_functions + .read() + .get_id::>(); + let draw_opaque_deferred = opaque_deferred_draw_functions + .read() + .get_id::>(); + let draw_alpha_mask_deferred = alpha_mask_deferred_draw_functions + .read() + .get_id::>(); + + let render_method = match material.opaque_render_method() { OpaqueRendererMethod::Forward => OpaqueRendererMethod::Forward, OpaqueRendererMethod::Deferred => OpaqueRendererMethod::Deferred, OpaqueRendererMethod::Auto => default_opaque_render_method.0, }; + let mut mesh_pipeline_key_bits = MeshPipelineKey::empty(); mesh_pipeline_key_bits.set( MeshPipelineKey::READS_VIEW_TRANSMISSION_TEXTURE, material.reads_view_transmission_texture(), ); + let reads_view_transmission_texture = + mesh_pipeline_key_bits.contains(MeshPipelineKey::READS_VIEW_TRANSMISSION_TEXTURE); + + let render_phase_type = match material.alpha_mode() { + AlphaMode::Blend | AlphaMode::Premultiplied | AlphaMode::Add | AlphaMode::Multiply => { + RenderPhaseType::Transparent + } + _ if reads_view_transmission_texture => RenderPhaseType::Transmissive, + AlphaMode::Opaque | AlphaMode::AlphaToCoverage => RenderPhaseType::Opaque, + AlphaMode::Mask(_) => RenderPhaseType::AlphaMask, + }; + + let draw_function_id = match render_phase_type { + RenderPhaseType::Opaque => draw_opaque_pbr, + RenderPhaseType::AlphaMask => draw_alpha_mask_pbr, + RenderPhaseType::Transmissive => draw_transmissive_pbr, + RenderPhaseType::Transparent => draw_transparent_pbr, + }; + let prepass_draw_function_id = match render_phase_type { + RenderPhaseType::Opaque => draw_opaque_prepass, + RenderPhaseType::AlphaMask => draw_alpha_mask_prepass, + _ => None, + }; + let deferred_draw_function_id = match render_phase_type { + RenderPhaseType::Opaque => draw_opaque_deferred, + RenderPhaseType::AlphaMask => draw_alpha_mask_deferred, + _ => None, + }; + match material.unprepared_bind_group( &pipeline.material_layout, render_device, @@ -1064,17 +1471,37 @@ impl RenderAsset for PreparedMaterial { false, ) { Ok(unprepared) => { - bind_group_allocator.init(render_device, *material_binding_id, unprepared); + // Allocate or update the material. + let binding = match render_material_bindings.entry(material_id.into()) { + Entry::Occupied(mut occupied_entry) => { + // TODO: Have a fast path that doesn't require + // recreating the bind group if only buffer contents + // change. For now, we just delete and recreate the bind + // group. + bind_group_allocator.free(*occupied_entry.get()); + let new_binding = bind_group_allocator + .allocate_unprepared(unprepared, &pipeline.material_layout); + *occupied_entry.get_mut() = new_binding; + new_binding + } + Entry::Vacant(vacant_entry) => *vacant_entry.insert( + bind_group_allocator + .allocate_unprepared(unprepared, &pipeline.material_layout), + ), + }; Ok(PreparedMaterial { - binding: *material_binding_id, + binding, properties: MaterialProperties { alpha_mode: material.alpha_mode(), depth_bias: material.depth_bias(), - reads_view_transmission_texture: mesh_pipeline_key_bits - .contains(MeshPipelineKey::READS_VIEW_TRANSMISSION_TEXTURE), - render_method: method, + reads_view_transmission_texture, + render_phase_type, + draw_function_id, + prepass_draw_function_id, + render_method, mesh_pipeline_key_bits, + deferred_draw_function_id, }, phantom: PhantomData, }) @@ -1096,21 +1523,22 @@ impl RenderAsset for PreparedMaterial { ) { Ok(prepared_bind_group) => { // Store the resulting bind group directly in the slot. - bind_group_allocator.init_custom( - *material_binding_id, - prepared_bind_group.bind_group, - prepared_bind_group.data, - ); + let material_binding_id = + bind_group_allocator.allocate_prepared(prepared_bind_group); + render_material_bindings.insert(material_id.into(), material_binding_id); Ok(PreparedMaterial { - binding: *material_binding_id, + binding: material_binding_id, properties: MaterialProperties { alpha_mode: material.alpha_mode(), depth_bias: material.depth_bias(), - reads_view_transmission_texture: mesh_pipeline_key_bits - .contains(MeshPipelineKey::READS_VIEW_TRANSMISSION_TEXTURE), - render_method: method, + reads_view_transmission_texture, + render_phase_type, + draw_function_id, + prepass_draw_function_id, + render_method, mesh_pipeline_key_bits, + deferred_draw_function_id, }, phantom: PhantomData, }) @@ -1129,21 +1557,16 @@ impl RenderAsset for PreparedMaterial { } fn unload_asset( - asset_id: AssetId, - (_, _, _, mesh_material_ids, ref mut bind_group_allocator, _): &mut SystemParamItem< + source_asset: AssetId, + (_, _, _, bind_group_allocator, render_material_bindings, ..): &mut SystemParamItem< Self::Param, >, ) { - // Mark this material's slot in the binding array as free. - - let Some(material_binding_id) = mesh_material_ids - .material_to_binding - .get(&asset_id.untyped()) + let Some(material_binding_id) = render_material_bindings.remove(&source_asset.untyped()) else { return; }; - - bind_group_allocator.free(*material_binding_id); + bind_group_allocator.free(material_binding_id); } } @@ -1162,8 +1585,8 @@ impl From for MaterialBindGroupId { } } -/// A system that creates and/or recreates any bind groups that contain -/// materials that were modified this frame. +/// Creates and/or recreates any bind groups that contain materials that were +/// modified this frame. pub fn prepare_material_bind_groups( mut allocator: ResMut>, render_device: Res, @@ -1172,5 +1595,20 @@ pub fn prepare_material_bind_groups( ) where M: Material, { - allocator.prepare_bind_groups(&render_device, &fallback_image, &fallback_resources); + allocator.prepare_bind_groups(&render_device, &fallback_resources, &fallback_image); +} + +/// Uploads the contents of all buffers that the [`MaterialBindGroupAllocator`] +/// manages to the GPU. +/// +/// Non-bindless allocators don't currently manage any buffers, so this method +/// only has an effect for bindless allocators. +pub fn write_material_bind_group_buffers( + mut allocator: ResMut>, + render_device: Res, + render_queue: Res, +) where + M: Material, +{ + allocator.write_buffers(&render_device, &render_queue); } diff --git a/crates/bevy_pbr/src/material_bind_groups.rs b/crates/bevy_pbr/src/material_bind_groups.rs index 718b4fef535e0..b539d2098f1e3 100644 --- a/crates/bevy_pbr/src/material_bind_groups.rs +++ b/crates/bevy_pbr/src/material_bind_groups.rs @@ -1,136 +1,288 @@ //! Material bind group management for bindless resources. //! -//! In bindless mode, Bevy's renderer groups materials into small bind groups. -//! This allocator manages each bind group, assigning slots to materials as +//! In bindless mode, Bevy's renderer groups materials into bind groups. This +//! allocator manages each bind group, assigning slots to materials as //! appropriate. -use crate::Material; +use core::{cmp::Ordering, iter, marker::PhantomData, mem, ops::Range}; + use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ - system::Resource, + resource::Resource, world::{FromWorld, World}, }; -use bevy_reflect::{std_traits::ReflectDefault, Reflect}; +use bevy_platform::collections::{HashMap, HashSet}; +use bevy_reflect::{prelude::ReflectDefault, Reflect}; use bevy_render::{ render_resource::{ - BindGroup, BindGroupEntry, BindGroupLayout, BindGroupLayoutEntry, BindingResource, - BindingType, Buffer, BufferBinding, BufferInitDescriptor, BufferUsages, - OwnedBindingResource, Sampler, SamplerDescriptor, TextureViewDimension, - UnpreparedBindGroup, WgpuSampler, WgpuTextureView, + BindGroup, BindGroupEntry, BindGroupLayout, BindingNumber, BindingResource, + BindingResources, BindlessDescriptor, BindlessIndex, BindlessIndexTableDescriptor, + BindlessResourceType, Buffer, BufferBinding, BufferDescriptor, BufferId, + BufferInitDescriptor, BufferUsages, CompareFunction, FilterMode, OwnedBindingResource, + PreparedBindGroup, RawBufferVec, Sampler, SamplerDescriptor, SamplerId, TextureView, + TextureViewDimension, TextureViewId, UnpreparedBindGroup, WgpuSampler, WgpuTextureView, }, - renderer::RenderDevice, + renderer::{RenderDevice, RenderQueue}, settings::WgpuFeatures, texture::FallbackImage, }; -use bevy_utils::{default, tracing::error, HashMap}; -use core::{any, iter, marker::PhantomData, num::NonZero}; +use bevy_utils::default; +use bytemuck::Pod; +use tracing::{error, trace}; + +use crate::Material; -/// An object that creates and stores bind groups for a single material type. +/// A resource that places materials into bind groups and tracks their +/// resources. /// -/// This object collects bindless materials into groups as appropriate and -/// assigns slots as materials are created. +/// Internally, Bevy has separate allocators for bindless and non-bindless +/// materials. This resource provides a common interface to the specific +/// allocator in use. #[derive(Resource)] -pub struct MaterialBindGroupAllocator +pub enum MaterialBindGroupAllocator where M: Material, { - /// The data that the allocator keeps about each bind group. - bind_groups: Vec>, - - /// Stores IDs of material bind groups that have at least one slot - /// available. - free_bind_groups: Vec, + /// The allocator used when the material is bindless. + Bindless(Box>), + /// The allocator used when the material is non-bindless. + NonBindless(Box>), +} - /// The layout for this bind group. +/// The allocator that places bindless materials into bind groups and tracks +/// their resources. +pub struct MaterialBindGroupBindlessAllocator +where + M: Material, +{ + /// The slabs, each of which contains a bind group. + slabs: Vec>, + /// The layout of the bind groups that we produce. bind_group_layout: BindGroupLayout, + /// Information about the bindless resources in the material. + /// + /// We use this information to create and maintain bind groups. + bindless_descriptor: BindlessDescriptor, - /// Dummy buffers that are assigned to unused slots. - fallback_buffers: MaterialFallbackBuffers, - - /// Whether this material is actually using bindless resources. + /// Dummy buffers that we use to fill empty slots in buffer binding arrays. /// - /// This takes the availability of bindless resources on this platform into - /// account. - bindless_enabled: bool, + /// There's one fallback buffer for each buffer in the bind group, each + /// appropriately sized. Each buffer contains one uninitialized element of + /// the applicable type. + fallback_buffers: HashMap, - phantom: PhantomData, + /// The maximum number of resources that can be stored in a slab. + /// + /// This corresponds to `SLAB_CAPACITY` in the `#[bindless(SLAB_CAPACITY)]` + /// attribute, when deriving `AsBindGroup`. + slab_capacity: u32, } -/// Information that the allocator keeps about each bind group. -pub enum MaterialBindGroup +/// A single bind group and the bookkeeping necessary to allocate into it. +pub struct MaterialBindlessSlab where M: Material, { - /// Information that the allocator keeps about each bind group with bindless - /// textures in use. - Bindless(MaterialBindlessBindGroup), + /// The current bind group, if it's up to date. + /// + /// If this is `None`, then the bind group is dirty and needs to be + /// regenerated. + bind_group: Option, - /// Information that the allocator keeps about each bind group for which - /// bindless textures are not in use. - NonBindless(MaterialNonBindlessBindGroup), + /// The GPU-accessible buffers that hold the mapping from binding index to + /// bindless slot. + /// + /// This is conventionally assigned to bind group binding 0, but it can be + /// changed using the `#[bindless(index_table(binding(B)))]` attribute on + /// `AsBindGroup`. + /// + /// Because the slab binary searches this table, the entries within must be + /// sorted by bindless index. + bindless_index_tables: Vec>, + + /// The binding arrays containing samplers. + samplers: HashMap>, + /// The binding arrays containing textures. + textures: HashMap>, + /// The binding arrays containing buffers. + buffers: HashMap>, + /// The buffers that contain plain old data (i.e. the structure-level + /// `#[data]` attribute of `AsBindGroup`). + data_buffers: HashMap, + + /// Holds extra CPU-accessible data that the material provides. + /// + /// Typically, this data is used for constructing the material key, for + /// pipeline specialization purposes. + extra_data: Vec>, + + /// A list of free slot IDs. + free_slots: Vec, + /// The total number of materials currently allocated in this slab. + live_allocation_count: u32, + /// The total number of resources currently allocated in the binding arrays. + allocated_resource_count: u32, } -/// Information that the allocator keeps about each bind group with bindless -/// textures in use. -pub struct MaterialBindlessBindGroup +/// A GPU-accessible buffer that holds the mapping from binding index to +/// bindless slot. +/// +/// This is conventionally assigned to bind group binding 0, but it can be +/// changed by altering the [`Self::binding_number`], which corresponds to the +/// `#[bindless(index_table(binding(B)))]` attribute in `AsBindGroup`. +struct MaterialBindlessIndexTable where M: Material, { - /// The actual bind group. - pub bind_group: Option, - - /// The bind group data for each slot. + /// The buffer containing the mappings. + buffer: RetainedRawBufferVec, + /// The range of bindless indices that this bindless index table covers. + /// + /// If this range is M..N, then the field at index $i$ maps to bindless + /// index $i$ + M. The size of this table is N - M. /// - /// This is `None` if the slot is unallocated and `Some` if the slot is - /// full. - unprepared_bind_groups: Vec>>, + /// This corresponds to the `#[bindless(index_table(range(M..N)))]` + /// attribute in `AsBindGroup`. + index_range: Range, + /// The binding number that this index table is assigned to in the shader. + binding_number: BindingNumber, + phantom: PhantomData, +} - /// A bitfield that contains a 0 if the slot is free or a 1 if the slot is - /// full. +/// A single binding array for storing bindless resources and the bookkeeping +/// necessary to allocate into it. +struct MaterialBindlessBindingArray +where + R: GetBindingResourceId, +{ + /// The number of the binding that we attach this binding array to. + binding_number: BindingNumber, + /// A mapping from bindless slot index to the resource stored in that slot, + /// if any. + bindings: Vec>>, + /// The type of resource stored in this binding array. + resource_type: BindlessResourceType, + /// Maps a resource ID to the slot in which it's stored. /// - /// We keep this value so that we can quickly find the next free slot when - /// we go to allocate. - used_slot_bitmap: u32, + /// This is essentially the inverse mapping of [`Self::bindings`]. + resource_to_slot: HashMap, + /// A list of free slots in [`Self::bindings`] that contain no binding. + free_slots: Vec, + /// The number of allocated objects in this binding array. + len: u32, } -/// Information that the allocator keeps about each bind group for which -/// bindless textures are not in use. +/// A single resource (sampler, texture, or buffer) in a binding array. /// -/// When a bindless texture isn't in use, bind groups and material instances are -/// in 1:1 correspondence, and therefore there's only a single slot for extra -/// material data here. -pub struct MaterialNonBindlessBindGroup +/// Resources hold a reference count, which specifies the number of materials +/// currently allocated within the slab that refer to this resource. When the +/// reference count drops to zero, the resource is freed. +struct MaterialBindlessBinding +where + R: GetBindingResourceId, +{ + /// The sampler, texture, or buffer. + resource: R, + /// The number of materials currently allocated within the containing slab + /// that use this resource. + ref_count: u32, +} + +/// The allocator that stores bind groups for non-bindless materials. +pub struct MaterialBindGroupNonBindlessAllocator where M: Material, { - /// The single allocation in a non-bindless bind group. - allocation: MaterialNonBindlessBindGroupAllocation, + /// A mapping from [`MaterialBindGroupIndex`] to the bind group allocated in + /// each slot. + bind_groups: Vec>>, + /// The bind groups that are dirty and need to be prepared. + /// + /// To prepare the bind groups, call + /// [`MaterialBindGroupAllocator::prepare_bind_groups`]. + to_prepare: HashSet, + /// A list of free bind group indices. + free_indices: Vec, + phantom: PhantomData, } -/// The single allocation in a non-bindless bind group. -enum MaterialNonBindlessBindGroupAllocation +/// A single bind group that a [`MaterialBindGroupNonBindlessAllocator`] is +/// currently managing. +enum MaterialNonBindlessAllocatedBindGroup where M: Material, { - /// The allocation is free. - Unallocated, - /// The allocation has been allocated, but not yet initialized. - Allocated, - /// The allocation is full and contains both a bind group and extra data. - Initialized(BindGroup, M::Data), + /// An unprepared bind group. + /// + /// The allocator prepares all outstanding unprepared bind groups when + /// [`MaterialBindGroupNonBindlessAllocator::prepare_bind_groups`] is + /// called. + Unprepared { + /// The unprepared bind group, including extra data. + bind_group: UnpreparedBindGroup, + /// The layout of that bind group. + layout: BindGroupLayout, + }, + /// A bind group that's already been prepared. + Prepared { + bind_group: PreparedBindGroup, + #[expect(dead_code, reason = "These buffers are only referenced by bind groups")] + uniform_buffers: Vec, + }, +} + +/// Dummy instances of various resources that we fill unused slots in binding +/// arrays with. +#[derive(Resource)] +pub struct FallbackBindlessResources { + /// A dummy filtering sampler. + filtering_sampler: Sampler, + /// A dummy non-filtering sampler. + non_filtering_sampler: Sampler, + /// A dummy comparison sampler. + comparison_sampler: Sampler, +} + +/// The `wgpu` ID of a single bindless or non-bindless resource. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +enum BindingResourceId { + /// A buffer. + Buffer(BufferId), + /// A texture view, with the given dimension. + TextureView(TextureViewDimension, TextureViewId), + /// A sampler. + Sampler(SamplerId), + /// A buffer containing plain old data. + /// + /// This corresponds to the `#[data]` structure-level attribute on + /// `AsBindGroup`. + DataBuffer, } -/// Where the GPU data for a material is located. +/// A temporary list of references to `wgpu` bindless resources. /// -/// In bindless mode, materials are gathered into bind groups, and the slot is -/// necessary to locate the material data within that group. If not in bindless -/// mode, bind groups and materials are in 1:1 correspondence, and the slot -/// index is always 0. +/// We need this because the `wgpu` bindless API takes a slice of references. +/// Thus we need to create intermediate vectors of bindless resources in order +/// to satisfy `wgpu`'s lifetime requirements. +enum BindingResourceArray<'a> { + /// A list of bindings. + Buffers(Vec>), + /// A list of texture views. + TextureViews(Vec<&'a WgpuTextureView>), + /// A list of samplers. + Samplers(Vec<&'a WgpuSampler>), +} + +/// The location of a material (either bindless or non-bindless) within the +/// slabs. #[derive(Clone, Copy, Debug, Default, Reflect)] +#[reflect(Clone, Default)] pub struct MaterialBindingId { /// The index of the bind group (slab) where the GPU data is located. pub group: MaterialBindGroupIndex, /// The slot within that bind group. + /// + /// Non-bindless materials will always have a slot of 0. pub slot: MaterialBindGroupSlot, } @@ -138,8 +290,8 @@ pub struct MaterialBindingId { /// /// In bindless mode, each bind group contains multiple materials. In /// non-bindless mode, each bind group contains only one material. -#[derive(Clone, Copy, Debug, Default, Reflect, PartialEq, Deref, DerefMut)] -#[reflect(Default)] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash, Reflect, Deref, DerefMut)] +#[reflect(Default, Clone, PartialEq, Hash)] pub struct MaterialBindGroupIndex(pub u32); impl From for MaterialBindGroupIndex { @@ -154,611 +306,574 @@ impl From for MaterialBindGroupIndex { /// In bindless mode, this slot is needed to locate the material data in each /// bind group, since multiple materials are packed into a single slab. In /// non-bindless mode, this slot is always 0. -#[derive(Clone, Copy, Debug, Default, Reflect, Deref, DerefMut)] -#[reflect(Default)] -pub struct MaterialBindGroupSlot(pub u16); +#[derive(Clone, Copy, Debug, Default, PartialEq, Reflect, Deref, DerefMut)] +#[reflect(Default, Clone, PartialEq)] +pub struct MaterialBindGroupSlot(pub u32); -impl From for MaterialBindGroupSlot { - fn from(value: u32) -> Self { - MaterialBindGroupSlot(value as u16) - } +/// The CPU/GPU synchronization state of a buffer that we maintain. +/// +/// Currently, the only buffer that we maintain is the +/// [`MaterialBindlessIndexTable`]. +enum BufferDirtyState { + /// The buffer is currently synchronized between the CPU and GPU. + Clean, + /// The buffer hasn't been created yet. + NeedsReserve, + /// The buffer exists on both CPU and GPU, but the GPU data is out of date. + NeedsUpload, } -impl From for u32 { - fn from(value: MaterialBindGroupSlot) -> Self { - value.0 as u32 - } +/// Information that describes a potential allocation of an +/// [`UnpreparedBindGroup`] into a slab. +struct BindlessAllocationCandidate { + /// A map that, for every resource in the [`UnpreparedBindGroup`] that + /// already existed in this slab, maps bindless index of that resource to + /// its slot in the appropriate binding array. + pre_existing_resources: HashMap, + /// Stores the number of free slots that are needed to satisfy this + /// allocation. + needed_free_slots: u32, } -/// A temporary data structure that contains references to bindless resources. +/// A trait that allows fetching the [`BindingResourceId`] from a +/// [`BindlessResourceType`]. /// -/// We need this because the `wgpu` bindless API takes a slice of references. -/// Thus we need to create intermediate vectors of bindless resources in order -/// to satisfy the lifetime requirements. -enum BindingResourceArray<'a> { - Buffers(Vec>), - TextureViews(TextureViewDimension, Vec<&'a WgpuTextureView>), - Samplers(Vec<&'a WgpuSampler>), +/// This is used when freeing bindless resources, in order to locate the IDs +/// assigned to each resource so that they can be removed from the appropriate +/// maps. +trait GetBindingResourceId { + /// Returns the [`BindingResourceId`] for this resource. + /// + /// `resource_type` specifies this resource's type. This is used for + /// textures, as a `wgpu` [`TextureView`] doesn't store enough information + /// itself to determine its dimension. + fn binding_resource_id(&self, resource_type: BindlessResourceType) -> BindingResourceId; } -/// Contains dummy resources that we use to pad out bindless arrays. +/// The public interface to a slab, which represents a single bind group. +pub struct MaterialSlab<'a, M>(MaterialSlabImpl<'a, M>) +where + M: Material; + +/// The actual implementation of a material slab. /// -/// On DX12, every binding array slot must be filled, so we have to fill unused -/// slots. -#[derive(Resource)] -pub struct FallbackBindlessResources { - /// A dummy sampler that we fill unused slots in bindless sampler arrays - /// with. - fallback_sampler: Sampler, +/// This has bindless and non-bindless variants. +enum MaterialSlabImpl<'a, M> +where + M: Material, +{ + /// The implementation of the slab interface we use when the slab + /// is bindless. + Bindless(&'a MaterialBindlessSlab), + /// The implementation of the slab interface we use when the slab + /// is non-bindless. + NonBindless(MaterialNonBindlessSlab<'a, M>), } -struct MaterialFallbackBuffers(HashMap); - -/// The minimum byte size of each fallback buffer. -const MIN_BUFFER_SIZE: u64 = 16; - -impl MaterialBindGroupAllocator +/// A single bind group that the [`MaterialBindGroupNonBindlessAllocator`] +/// manages. +enum MaterialNonBindlessSlab<'a, M> where M: Material, { - /// Creates or recreates any bind groups that were modified this frame. - pub(crate) fn prepare_bind_groups( - &mut self, - render_device: &RenderDevice, - fallback_image: &FallbackImage, - fallback_resources: &FallbackBindlessResources, - ) { - for bind_group in &mut self.bind_groups { - bind_group.rebuild_bind_group_if_necessary( - render_device, - &self.bind_group_layout, - fallback_image, - fallback_resources, - &self.fallback_buffers, - ); - } - } - - /// Returns the bind group with the given index, if it exists. - #[inline] - pub(crate) fn get(&self, index: MaterialBindGroupIndex) -> Option<&MaterialBindGroup> { - self.bind_groups.get(index.0 as usize) - } + /// A slab that has a bind group. + Prepared(&'a PreparedBindGroup), + /// A slab that doesn't yet have a bind group. + Unprepared(&'a UnpreparedBindGroup), +} - /// Allocates a new binding slot and returns its ID. - pub(crate) fn allocate(&mut self) -> MaterialBindingId { - let group_index = self.free_bind_groups.pop().unwrap_or_else(|| { - let group_index = self.bind_groups.len() as u32; - self.bind_groups - .push(MaterialBindGroup::new(self.bindless_enabled)); - group_index - }); +/// Manages an array of untyped plain old data on GPU and allocates individual +/// slots within that array. +/// +/// This supports the `#[data]` attribute of `AsBindGroup`. +struct MaterialDataBuffer { + /// The number of the binding that we attach this storage buffer to. + binding_number: BindingNumber, + /// The actual data. + /// + /// Note that this is untyped (`u8`); the actual aligned size of each + /// element is given by [`Self::aligned_element_size`]; + buffer: RetainedRawBufferVec, + /// The size of each element in the buffer, including padding and alignment + /// if any. + aligned_element_size: u32, + /// A list of free slots within the buffer. + free_slots: Vec, + /// The actual number of slots that have been allocated. + len: u32, +} - let bind_group = &mut self.bind_groups[group_index as usize]; - let slot_index = bind_group.allocate(); +/// A buffer containing plain old data, already packed into the appropriate GPU +/// format, and that can be updated incrementally. +/// +/// This structure exists in order to encapsulate the lazy update +/// ([`BufferDirtyState`]) logic in a single place. +#[derive(Deref, DerefMut)] +struct RetainedRawBufferVec +where + T: Pod, +{ + /// The contents of the buffer. + #[deref] + buffer: RawBufferVec, + /// Whether the contents of the buffer have been uploaded to the GPU. + dirty: BufferDirtyState, +} - if !bind_group.is_full() { - self.free_bind_groups.push(group_index); - } +/// The size of the buffer that we assign to unused buffer slots, in bytes. +/// +/// This is essentially arbitrary, as it doesn't seem to matter to `wgpu` what +/// the size is. +const DEFAULT_BINDLESS_FALLBACK_BUFFER_SIZE: u64 = 16; - MaterialBindingId { - group: group_index.into(), - slot: slot_index, - } +impl From for MaterialBindGroupSlot { + fn from(value: u32) -> Self { + MaterialBindGroupSlot(value) } +} - /// Assigns an unprepared bind group to the group and slot specified in the - /// [`MaterialBindingId`]. - pub(crate) fn init( - &mut self, - render_device: &RenderDevice, - material_binding_id: MaterialBindingId, - unprepared_bind_group: UnpreparedBindGroup, - ) { - self.bind_groups[material_binding_id.group.0 as usize].init( - render_device, - &self.bind_group_layout, - material_binding_id.slot, - unprepared_bind_group, - ); +impl From for u32 { + fn from(value: MaterialBindGroupSlot) -> Self { + value.0 } +} - /// Fills a slot directly with a custom bind group. - /// - /// This is only a meaningful operation for non-bindless bind groups. It's - /// rarely used, but see the `texture_binding_array` example for an example - /// demonstrating how this feature might see use in practice. - pub(crate) fn init_custom( - &mut self, - material_binding_id: MaterialBindingId, - bind_group: BindGroup, - bind_group_data: M::Data, - ) { - self.bind_groups[material_binding_id.group.0 as usize] - .init_custom(bind_group, bind_group_data); +impl<'a> From<&'a OwnedBindingResource> for BindingResourceId { + fn from(value: &'a OwnedBindingResource) -> Self { + match *value { + OwnedBindingResource::Buffer(ref buffer) => BindingResourceId::Buffer(buffer.id()), + OwnedBindingResource::Data(_) => BindingResourceId::DataBuffer, + OwnedBindingResource::TextureView(ref texture_view_dimension, ref texture_view) => { + BindingResourceId::TextureView(*texture_view_dimension, texture_view.id()) + } + OwnedBindingResource::Sampler(_, ref sampler) => { + BindingResourceId::Sampler(sampler.id()) + } + } } +} - /// Marks the slot corresponding to the given [`MaterialBindingId`] as free. - pub(crate) fn free(&mut self, material_binding_id: MaterialBindingId) { - let bind_group = &mut self.bind_groups[material_binding_id.group.0 as usize]; - let was_full = bind_group.is_full(); +impl GetBindingResourceId for Buffer { + fn binding_resource_id(&self, _: BindlessResourceType) -> BindingResourceId { + BindingResourceId::Buffer(self.id()) + } +} - bind_group.free(material_binding_id.slot); +impl GetBindingResourceId for Sampler { + fn binding_resource_id(&self, _: BindlessResourceType) -> BindingResourceId { + BindingResourceId::Sampler(self.id()) + } +} - // If the group that this material belonged to was full, it now contains - // at least one free slot, so add the group to the `free_bind_groups` - // list. - if was_full { - debug_assert!(!self.free_bind_groups.contains(&material_binding_id.group.0)); - self.free_bind_groups.push(*material_binding_id.group); - } +impl GetBindingResourceId for TextureView { + fn binding_resource_id(&self, resource_type: BindlessResourceType) -> BindingResourceId { + let texture_view_dimension = match resource_type { + BindlessResourceType::Texture1d => TextureViewDimension::D1, + BindlessResourceType::Texture2d => TextureViewDimension::D2, + BindlessResourceType::Texture2dArray => TextureViewDimension::D2Array, + BindlessResourceType::Texture3d => TextureViewDimension::D3, + BindlessResourceType::TextureCube => TextureViewDimension::Cube, + BindlessResourceType::TextureCubeArray => TextureViewDimension::CubeArray, + _ => panic!("Resource type is not a texture"), + }; + BindingResourceId::TextureView(texture_view_dimension, self.id()) } } -impl MaterialBindGroup +impl MaterialBindGroupAllocator where M: Material, { - /// Creates a new material bind group. - fn new(bindless: bool) -> MaterialBindGroup { - if bindless { - MaterialBindGroup::Bindless(MaterialBindlessBindGroup::new()) + /// Creates a new [`MaterialBindGroupAllocator`] managing the data for a + /// single material. + fn new(render_device: &RenderDevice) -> MaterialBindGroupAllocator { + if material_uses_bindless_resources::(render_device) { + MaterialBindGroupAllocator::Bindless(Box::new(MaterialBindGroupBindlessAllocator::new( + render_device, + ))) } else { - MaterialBindGroup::NonBindless(MaterialNonBindlessBindGroup::new()) + MaterialBindGroupAllocator::NonBindless(Box::new( + MaterialBindGroupNonBindlessAllocator::new(), + )) } } - /// Allocates a new binding slot and returns its ID. - fn allocate(&mut self) -> MaterialBindGroupSlot { + /// Returns the slab with the given index, if one exists. + pub fn get(&self, group: MaterialBindGroupIndex) -> Option> { match *self { - MaterialBindGroup::Bindless(ref mut material_bindless_bind_group) => { - material_bindless_bind_group.allocate() - } - MaterialBindGroup::NonBindless(ref mut material_non_bindless_bind_group) => { - material_non_bindless_bind_group.allocate() + MaterialBindGroupAllocator::Bindless(ref bindless_allocator) => bindless_allocator + .get(group) + .map(|bindless_slab| MaterialSlab(MaterialSlabImpl::Bindless(bindless_slab))), + MaterialBindGroupAllocator::NonBindless(ref non_bindless_allocator) => { + non_bindless_allocator.get(group).map(|non_bindless_slab| { + MaterialSlab(MaterialSlabImpl::NonBindless(non_bindless_slab)) + }) } } } - /// Assigns an unprepared bind group to the group and slot specified in the - /// [`MaterialBindingId`]. - fn init( + /// Allocates an [`UnpreparedBindGroup`] and returns the resulting binding ID. + /// + /// This method should generally be preferred over + /// [`Self::allocate_prepared`], because this method supports both bindless + /// and non-bindless bind groups. Only use [`Self::allocate_prepared`] if + /// you need to prepare the bind group yourself. + pub fn allocate_unprepared( &mut self, - render_device: &RenderDevice, - bind_group_layout: &BindGroupLayout, - slot: MaterialBindGroupSlot, unprepared_bind_group: UnpreparedBindGroup, - ) { + bind_group_layout: &BindGroupLayout, + ) -> MaterialBindingId { match *self { - MaterialBindGroup::Bindless(ref mut material_bindless_bind_group) => { - material_bindless_bind_group.init( - render_device, - bind_group_layout, - slot, - unprepared_bind_group, - ); - } - MaterialBindGroup::NonBindless(ref mut material_non_bindless_bind_group) => { - material_non_bindless_bind_group.init( - render_device, - bind_group_layout, - slot, - unprepared_bind_group, - ); - } + MaterialBindGroupAllocator::Bindless( + ref mut material_bind_group_bindless_allocator, + ) => material_bind_group_bindless_allocator.allocate_unprepared(unprepared_bind_group), + MaterialBindGroupAllocator::NonBindless( + ref mut material_bind_group_non_bindless_allocator, + ) => material_bind_group_non_bindless_allocator + .allocate_unprepared(unprepared_bind_group, (*bind_group_layout).clone()), } } - /// Fills a slot directly with a custom bind group. + /// Places a pre-prepared bind group into a slab. /// - /// This is only a meaningful operation for non-bindless bind groups. It's - /// rarely used, but see the `texture_binding_array` example for an example - /// demonstrating how this feature might see use in practice. - fn init_custom(&mut self, bind_group: BindGroup, extra_data: M::Data) { - match *self { - MaterialBindGroup::Bindless(_) => { - error!("Custom bind groups aren't supported in bindless mode"); - } - MaterialBindGroup::NonBindless(ref mut material_non_bindless_bind_group) => { - material_non_bindless_bind_group.init_custom(bind_group, extra_data); - } - } - } - - /// Marks the slot corresponding to the given [`MaterialBindGroupSlot`] as - /// free. - fn free(&mut self, material_bind_group_slot: MaterialBindGroupSlot) { - match *self { - MaterialBindGroup::Bindless(ref mut material_bindless_bind_group) => { - material_bindless_bind_group.free(material_bind_group_slot); - } - MaterialBindGroup::NonBindless(ref mut material_non_bindless_bind_group) => { - material_non_bindless_bind_group.free(material_bind_group_slot); - } - } - } - - /// Returns the actual bind group, or `None` if it hasn't been created yet. - pub fn get_bind_group(&self) -> Option<&BindGroup> { + /// For bindless materials, the allocator internally manages the bind + /// groups, so calling this method will panic if this is a bindless + /// allocator. Only non-bindless allocators support this method. + /// + /// It's generally preferred to use [`Self::allocate_unprepared`], because + /// that method supports both bindless and non-bindless allocators. Only use + /// this method if you need to prepare the bind group yourself. + pub fn allocate_prepared( + &mut self, + prepared_bind_group: PreparedBindGroup, + ) -> MaterialBindingId { match *self { - MaterialBindGroup::Bindless(ref material_bindless_bind_group) => { - material_bindless_bind_group.get_bind_group() + MaterialBindGroupAllocator::Bindless(_) => { + panic!( + "Bindless resources are incompatible with implementing `as_bind_group` \ + directly; implement `unprepared_bind_group` instead or disable bindless" + ) } - MaterialBindGroup::NonBindless(ref material_non_bindless_bind_group) => { - material_non_bindless_bind_group.get_bind_group() + MaterialBindGroupAllocator::NonBindless(ref mut non_bindless_allocator) => { + non_bindless_allocator.allocate_prepared(prepared_bind_group) } } } - /// Returns true if all the slots are full or false if at least one slot in - /// this bind group is free. - fn is_full(&self) -> bool { + /// Deallocates the material with the given binding ID. + /// + /// Any resources that are no longer referenced are removed from the slab. + pub fn free(&mut self, material_binding_id: MaterialBindingId) { match *self { - MaterialBindGroup::Bindless(ref material_bindless_bind_group) => { - material_bindless_bind_group.is_full() - } - MaterialBindGroup::NonBindless(ref material_non_bindless_bind_group) => { - material_non_bindless_bind_group.is_full() - } + MaterialBindGroupAllocator::Bindless( + ref mut material_bind_group_bindless_allocator, + ) => material_bind_group_bindless_allocator.free(material_binding_id), + MaterialBindGroupAllocator::NonBindless( + ref mut material_bind_group_non_bindless_allocator, + ) => material_bind_group_non_bindless_allocator.free(material_binding_id), } } - /// Recreates the bind group for this material bind group containing the - /// data for every material in it. - fn rebuild_bind_group_if_necessary( + /// Recreates any bind groups corresponding to slabs that have been modified + /// since last calling [`MaterialBindGroupAllocator::prepare_bind_groups`]. + pub fn prepare_bind_groups( &mut self, render_device: &RenderDevice, - bind_group_layout: &BindGroupLayout, - fallback_image: &FallbackImage, fallback_bindless_resources: &FallbackBindlessResources, - fallback_buffers: &MaterialFallbackBuffers, + fallback_image: &FallbackImage, ) { match *self { - MaterialBindGroup::Bindless(ref mut material_bindless_bind_group) => { - material_bindless_bind_group.rebuild_bind_group_if_necessary( - render_device, - bind_group_layout, - fallback_image, - fallback_bindless_resources, - fallback_buffers, - ); - } - MaterialBindGroup::NonBindless(_) => {} + MaterialBindGroupAllocator::Bindless( + ref mut material_bind_group_bindless_allocator, + ) => material_bind_group_bindless_allocator.prepare_bind_groups( + render_device, + fallback_bindless_resources, + fallback_image, + ), + MaterialBindGroupAllocator::NonBindless( + ref mut material_bind_group_non_bindless_allocator, + ) => material_bind_group_non_bindless_allocator.prepare_bind_groups(render_device), } } - /// Returns the associated extra data for the material with the given slot. - pub fn get_extra_data(&self, slot: MaterialBindGroupSlot) -> &M::Data { + /// Uploads the contents of all buffers that this + /// [`MaterialBindGroupAllocator`] manages to the GPU. + /// + /// Non-bindless allocators don't currently manage any buffers, so this + /// method only has an effect for bindless allocators. + pub fn write_buffers(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) { match *self { - MaterialBindGroup::Bindless(ref material_bindless_bind_group) => { - material_bindless_bind_group.get_extra_data(slot) - } - MaterialBindGroup::NonBindless(ref material_non_bindless_bind_group) => { - material_non_bindless_bind_group.get_extra_data(slot) + MaterialBindGroupAllocator::Bindless( + ref mut material_bind_group_bindless_allocator, + ) => material_bind_group_bindless_allocator.write_buffers(render_device, render_queue), + MaterialBindGroupAllocator::NonBindless(_) => { + // Not applicable. } } } } -impl MaterialBindlessBindGroup +impl MaterialBindlessIndexTable where M: Material, { - /// Returns a new bind group. - fn new() -> MaterialBindlessBindGroup { - let count = M::bindless_slot_count().unwrap_or(1); + /// Creates a new [`MaterialBindlessIndexTable`] for a single slab. + fn new( + bindless_index_table_descriptor: &BindlessIndexTableDescriptor, + ) -> MaterialBindlessIndexTable { + // Preallocate space for one bindings table, so that there will always be a buffer. + let mut buffer = RetainedRawBufferVec::new(BufferUsages::STORAGE); + for _ in *bindless_index_table_descriptor.indices.start + ..*bindless_index_table_descriptor.indices.end + { + buffer.push(0); + } - MaterialBindlessBindGroup { - bind_group: None, - unprepared_bind_groups: iter::repeat_with(|| None).take(count as usize).collect(), - used_slot_bitmap: 0, + MaterialBindlessIndexTable { + buffer, + index_range: bindless_index_table_descriptor.indices.clone(), + binding_number: bindless_index_table_descriptor.binding_number, + phantom: PhantomData, } } - /// Allocates a new slot and returns its index. + /// Returns the bindings in the binding index table. /// - /// This bind group must not be full. - fn allocate(&mut self) -> MaterialBindGroupSlot { - debug_assert!(!self.is_full()); - - // Mark the slot as used. - let slot = self.used_slot_bitmap.trailing_ones(); - self.used_slot_bitmap |= 1 << slot; - - slot.into() + /// If the current [`MaterialBindlessIndexTable::index_range`] is M..N, then + /// element *i* of the returned binding index table contains the slot of the + /// bindless resource with bindless index *i* + M. + fn get(&self, slot: MaterialBindGroupSlot) -> &[u32] { + let struct_size = *self.index_range.end as usize - *self.index_range.start as usize; + let start = struct_size * slot.0 as usize; + &self.buffer.values()[start..(start + struct_size)] } - /// Assigns the given unprepared bind group to the given slot. - fn init( - &mut self, - _: &RenderDevice, - _: &BindGroupLayout, + /// Returns a single binding from the binding index table. + fn get_binding( + &self, slot: MaterialBindGroupSlot, - unprepared_bind_group: UnpreparedBindGroup, - ) { - self.unprepared_bind_groups[slot.0 as usize] = Some(unprepared_bind_group); - - // Invalidate the cached bind group so that we rebuild it again. - self.bind_group = None; - } - - /// Marks the given slot as free. - fn free(&mut self, slot: MaterialBindGroupSlot) { - self.unprepared_bind_groups[slot.0 as usize] = None; - self.used_slot_bitmap &= !(1 << slot.0); - - // Invalidate the cached bind group so that we rebuild it again. - self.bind_group = None; - } - - /// Returns true if all the slots are full or false if at least one slot in - /// this bind group is free. - fn is_full(&self) -> bool { - self.used_slot_bitmap == (1 << (self.unprepared_bind_groups.len() as u32)) - 1 + bindless_index: BindlessIndex, + ) -> Option { + if bindless_index < self.index_range.start || bindless_index >= self.index_range.end { + return None; + } + self.get(slot) + .get((*bindless_index - *self.index_range.start) as usize) + .copied() } - /// Returns the actual bind group, or `None` if it hasn't been created yet. - fn get_bind_group(&self) -> Option<&BindGroup> { - self.bind_group.as_ref() + fn table_length(&self) -> u32 { + self.index_range.end.0 - self.index_range.start.0 } - /// Recreates the bind group for this material bind group containing the - /// data for every material in it. - fn rebuild_bind_group_if_necessary( + /// Updates the binding index table for a single material. + /// + /// The `allocated_resource_slots` map contains a mapping from the + /// [`BindlessIndex`] of each resource that the material references to the + /// slot that that resource occupies in the appropriate binding array. This + /// method serializes that map into a binding index table that the shader + /// can read. + fn set( &mut self, - render_device: &RenderDevice, - bind_group_layout: &BindGroupLayout, - fallback_image: &FallbackImage, - fallback_bindless_resources: &FallbackBindlessResources, - fallback_buffers: &MaterialFallbackBuffers, + slot: MaterialBindGroupSlot, + allocated_resource_slots: &HashMap, ) { - if self.bind_group.is_some() { - return; + let table_len = self.table_length() as usize; + let range = (slot.0 as usize * table_len)..((slot.0 as usize + 1) * table_len); + while self.buffer.len() < range.end { + self.buffer.push(0); } - let Some(first_bind_group) = self - .unprepared_bind_groups - .iter() - .find_map(|slot| slot.as_ref()) - else { - return; - }; - - // Creates the intermediate binding resource vectors. - let Some(binding_resource_arrays) = self.recreate_binding_resource_arrays( - first_bind_group, - fallback_image, - fallback_bindless_resources, - fallback_buffers, - ) else { - return; - }; - - // Now build the actual resource arrays for `wgpu`. - let entries = binding_resource_arrays - .iter() - .map(|&(&binding, ref binding_resource_array)| BindGroupEntry { - binding, - resource: match *binding_resource_array { - BindingResourceArray::Buffers(ref vec) => { - BindingResource::BufferArray(&vec[..]) - } - BindingResourceArray::TextureViews(_, ref vec) => { - BindingResource::TextureViewArray(&vec[..]) - } - BindingResourceArray::Samplers(ref vec) => { - BindingResource::SamplerArray(&vec[..]) - } - }, - }) - .collect::>(); + for (&bindless_index, &resource_slot) in allocated_resource_slots { + if self.index_range.contains(&bindless_index) { + self.buffer.set( + *bindless_index + range.start as u32 - *self.index_range.start, + resource_slot, + ); + } + } - self.bind_group = - Some(render_device.create_bind_group(M::label(), bind_group_layout, &entries)); + // Mark the buffer as needing to be recreated, in case we grew it. + self.buffer.dirty = BufferDirtyState::NeedsReserve; } - /// Recreates the binding arrays for each material in this bind group. - fn recreate_binding_resource_arrays<'a>( - &'a self, - first_bind_group: &'a UnpreparedBindGroup, - fallback_image: &'a FallbackImage, - fallback_bindless_resources: &'a FallbackBindlessResources, - fallback_buffers: &'a MaterialFallbackBuffers, - ) -> Option)>> { - // Initialize the arrays. - let mut binding_resource_arrays = first_bind_group - .bindings - .iter() - .map(|(index, binding)| match *binding { - OwnedBindingResource::Buffer(..) => (index, BindingResourceArray::Buffers(vec![])), - OwnedBindingResource::TextureView(dimension, _) => { - (index, BindingResourceArray::TextureViews(dimension, vec![])) - } - OwnedBindingResource::Sampler(..) => { - (index, BindingResourceArray::Samplers(vec![])) - } - }) - .collect::>(); + /// Returns the [`BindGroupEntry`] for the index table itself. + fn bind_group_entry(&self) -> BindGroupEntry { + BindGroupEntry { + binding: *self.binding_number, + resource: self + .buffer + .buffer() + .expect("Bindings buffer must exist") + .as_entire_binding(), + } + } +} - for maybe_unprepared_bind_group in self.unprepared_bind_groups.iter() { - match *maybe_unprepared_bind_group { - None => { - // Push dummy resources for this slot. - for binding_resource_array in &mut binding_resource_arrays { - match *binding_resource_array { - (binding, BindingResourceArray::Buffers(ref mut vec)) => { - vec.push(BufferBinding { - buffer: &fallback_buffers.0[binding], - offset: 0, - size: None, - }); - } - ( - _, - BindingResourceArray::TextureViews(texture_dimension, ref mut vec), - ) => vec.push(&fallback_image.get(texture_dimension).texture_view), - (_, BindingResourceArray::Samplers(ref mut vec)) => { - vec.push(&fallback_bindless_resources.fallback_sampler); - } - } - } - } +impl RetainedRawBufferVec +where + T: Pod, +{ + /// Creates a new empty [`RetainedRawBufferVec`] supporting the given + /// [`BufferUsages`]. + fn new(buffer_usages: BufferUsages) -> RetainedRawBufferVec { + RetainedRawBufferVec { + buffer: RawBufferVec::new(buffer_usages), + dirty: BufferDirtyState::NeedsUpload, + } + } - Some(ref unprepared_bind_group) => { - // Push the resources for this slot. - // - // All materials in this group must have the same type of - // binding (buffer, texture view, sampler) in each bind - // group entry. - for ( - binding_index, - (&mut (binding, ref mut binding_resource_array), (_, binding_resource)), - ) in binding_resource_arrays - .iter_mut() - .zip(unprepared_bind_group.bindings.0.iter()) - .enumerate() - { - match (binding_resource_array, binding_resource) { - ( - &mut BindingResourceArray::Buffers(ref mut vec), - OwnedBindingResource::Buffer(buffer), - ) => match NonZero::new(buffer.size()) { - None => vec.push(BufferBinding { - buffer: &fallback_buffers.0[binding], - offset: 0, - size: None, - }), - Some(size) => vec.push(BufferBinding { - buffer, - offset: 0, - size: Some(size), - }), - }, - ( - &mut BindingResourceArray::TextureViews(_, ref mut vec), - OwnedBindingResource::TextureView(_, texture_view), - ) => vec.push(texture_view), - ( - &mut BindingResourceArray::Samplers(ref mut vec), - OwnedBindingResource::Sampler(sampler), - ) => vec.push(sampler), - _ => { - error!( - "Mismatched bind group layouts for material \ - {} at bind group {}; can't combine bind \ - groups into a single bindless bind group!", - any::type_name::(), - binding_index, - ); - return None; - } - } - } - } + /// Recreates the GPU backing buffer if needed. + fn prepare(&mut self, render_device: &RenderDevice) { + match self.dirty { + BufferDirtyState::Clean | BufferDirtyState::NeedsUpload => {} + BufferDirtyState::NeedsReserve => { + let capacity = self.buffer.len(); + self.buffer.reserve(capacity, render_device); + self.dirty = BufferDirtyState::NeedsUpload; } } - - Some(binding_resource_arrays) } - /// Returns the associated extra data for the material with the given slot. - fn get_extra_data(&self, slot: MaterialBindGroupSlot) -> &M::Data { - &self.unprepared_bind_groups[slot.0 as usize] - .as_ref() - .unwrap() - .data + /// Writes the current contents of the buffer to the GPU if necessary. + fn write(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) { + match self.dirty { + BufferDirtyState::Clean => {} + BufferDirtyState::NeedsReserve | BufferDirtyState::NeedsUpload => { + self.buffer.write_buffer(render_device, render_queue); + self.dirty = BufferDirtyState::Clean; + } + } } } -impl MaterialNonBindlessBindGroup +impl MaterialBindGroupBindlessAllocator where M: Material, { - /// Creates a new material bind group. - fn new() -> MaterialNonBindlessBindGroup { - MaterialNonBindlessBindGroup { - allocation: MaterialNonBindlessBindGroupAllocation::Unallocated, + /// Creates a new [`MaterialBindGroupBindlessAllocator`] managing the data + /// for a single bindless material. + fn new(render_device: &RenderDevice) -> MaterialBindGroupBindlessAllocator { + let bindless_descriptor = M::bindless_descriptor() + .expect("Non-bindless materials should use the non-bindless allocator"); + let fallback_buffers = bindless_descriptor + .buffers + .iter() + .map(|bindless_buffer_descriptor| { + ( + bindless_buffer_descriptor.bindless_index, + render_device.create_buffer(&BufferDescriptor { + label: Some("bindless fallback buffer"), + size: match bindless_buffer_descriptor.size { + Some(size) => size as u64, + None => DEFAULT_BINDLESS_FALLBACK_BUFFER_SIZE, + }, + usage: BufferUsages::STORAGE, + mapped_at_creation: false, + }), + ) + }) + .collect(); + + MaterialBindGroupBindlessAllocator { + slabs: vec![], + bind_group_layout: M::bind_group_layout(render_device), + bindless_descriptor, + fallback_buffers, + slab_capacity: M::bindless_slot_count() + .expect("Non-bindless materials should use the non-bindless allocator") + .resolve(), } } - /// Allocates a new slot and returns its index. + /// Allocates the resources for a single material into a slab and returns + /// the resulting ID. /// - /// This bind group must not be full. - fn allocate(&mut self) -> MaterialBindGroupSlot { - debug_assert!(!self.is_full()); - self.allocation = MaterialNonBindlessBindGroupAllocation::Allocated; - MaterialBindGroupSlot(0) - } - - /// Assigns an unprepared bind group to the group and slot specified in the - /// [`MaterialBindingId`]. + /// The returned [`MaterialBindingId`] can later be used to fetch the slab + /// that was used. /// - /// For non-bindless bind groups, we go ahead and create the bind group - /// immediately. - fn init( + /// This function can't fail. If all slabs are full, then a new slab is + /// created, and the material is allocated into it. + fn allocate_unprepared( &mut self, - render_device: &RenderDevice, - bind_group_layout: &BindGroupLayout, - _: MaterialBindGroupSlot, - unprepared_bind_group: UnpreparedBindGroup, - ) { - let entries = unprepared_bind_group - .bindings - .iter() - .map(|(index, binding)| BindGroupEntry { - binding: *index, - resource: binding.get_binding(), - }) - .collect::>(); + mut unprepared_bind_group: UnpreparedBindGroup, + ) -> MaterialBindingId { + for (slab_index, slab) in self.slabs.iter_mut().enumerate() { + trace!("Trying to allocate in slab {}", slab_index); + match slab.try_allocate(unprepared_bind_group, self.slab_capacity) { + Ok(slot) => { + return MaterialBindingId { + group: MaterialBindGroupIndex(slab_index as u32), + slot, + }; + } + Err(bind_group) => unprepared_bind_group = bind_group, + } + } - self.allocation = MaterialNonBindlessBindGroupAllocation::Initialized( - render_device.create_bind_group(M::label(), bind_group_layout, &entries), - unprepared_bind_group.data, - ); - } + let group = MaterialBindGroupIndex(self.slabs.len() as u32); + self.slabs + .push(MaterialBindlessSlab::new(&self.bindless_descriptor)); - /// Fills the slot directly with a custom bind group. - /// - /// This is only a meaningful operation for non-bindless bind groups. It's - /// rarely used, but see the `texture_binding_array` example for an example - /// demonstrating how this feature might see use in practice. - fn init_custom(&mut self, bind_group: BindGroup, extra_data: M::Data) { - self.allocation = - MaterialNonBindlessBindGroupAllocation::Initialized(bind_group, extra_data); + // Allocate into the newly-pushed slab. + let Ok(slot) = self + .slabs + .last_mut() + .expect("We just pushed a slab") + .try_allocate(unprepared_bind_group, self.slab_capacity) + else { + panic!("An allocation into an empty slab should always succeed") + }; + + MaterialBindingId { group, slot } } - /// Deletes the stored bind group. - fn free(&mut self, _: MaterialBindGroupSlot) { - self.allocation = MaterialNonBindlessBindGroupAllocation::Unallocated; + /// Deallocates the material with the given binding ID. + /// + /// Any resources that are no longer referenced are removed from the slab. + fn free(&mut self, material_binding_id: MaterialBindingId) { + self.slabs + .get_mut(material_binding_id.group.0 as usize) + .expect("Slab should exist") + .free(material_binding_id.slot, &self.bindless_descriptor); } - /// Returns true if the slot is full or false if it's free. - fn is_full(&self) -> bool { - !matches!( - self.allocation, - MaterialNonBindlessBindGroupAllocation::Unallocated - ) + /// Returns the slab with the given bind group index. + /// + /// A [`MaterialBindGroupIndex`] can be fetched from a + /// [`MaterialBindingId`]. + fn get(&self, group: MaterialBindGroupIndex) -> Option<&MaterialBindlessSlab> { + self.slabs.get(group.0 as usize) } - /// Returns the actual bind group, or `None` if it hasn't been created yet. - fn get_bind_group(&self) -> Option<&BindGroup> { - match self.allocation { - MaterialNonBindlessBindGroupAllocation::Unallocated - | MaterialNonBindlessBindGroupAllocation::Allocated => None, - MaterialNonBindlessBindGroupAllocation::Initialized(ref bind_group, _) => { - Some(bind_group) - } + /// Recreates any bind groups corresponding to slabs that have been modified + /// since last calling + /// [`MaterialBindGroupBindlessAllocator::prepare_bind_groups`]. + fn prepare_bind_groups( + &mut self, + render_device: &RenderDevice, + fallback_bindless_resources: &FallbackBindlessResources, + fallback_image: &FallbackImage, + ) { + for slab in &mut self.slabs { + slab.prepare( + render_device, + &self.bind_group_layout, + fallback_bindless_resources, + &self.fallback_buffers, + fallback_image, + &self.bindless_descriptor, + self.slab_capacity, + ); } } - /// Returns the associated extra data for the material. - fn get_extra_data(&self, _: MaterialBindGroupSlot) -> &M::Data { - match self.allocation { - MaterialNonBindlessBindGroupAllocation::Initialized(_, ref extra_data) => extra_data, - MaterialNonBindlessBindGroupAllocation::Unallocated - | MaterialNonBindlessBindGroupAllocation::Allocated => { - panic!("Bind group not initialized") - } + /// Writes any buffers that we're managing to the GPU. + /// + /// Currently, this only consists of the bindless index tables. + fn write_buffers(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) { + for slab in &mut self.slabs { + slab.write_buffer(render_device, render_queue); } } } @@ -768,20 +883,798 @@ where M: Material, { fn from_world(world: &mut World) -> Self { - // Create a new bind group allocator. let render_device = world.resource::(); - let bind_group_layout_entries = M::bind_group_layout_entries(render_device, false); - let bind_group_layout = - render_device.create_bind_group_layout(M::label(), &bind_group_layout_entries); - let fallback_buffers = - MaterialFallbackBuffers::new(render_device, &bind_group_layout_entries); - MaterialBindGroupAllocator { - bind_groups: vec![], - free_bind_groups: vec![], + MaterialBindGroupAllocator::new(render_device) + } +} + +impl MaterialBindlessSlab +where + M: Material, +{ + /// Attempts to allocate the given unprepared bind group in this slab. + /// + /// If the allocation succeeds, this method returns the slot that the + /// allocation was placed in. If the allocation fails because the slab was + /// full, this method returns the unprepared bind group back to the caller + /// so that it can try to allocate again. + fn try_allocate( + &mut self, + unprepared_bind_group: UnpreparedBindGroup, + slot_capacity: u32, + ) -> Result> { + // Locate pre-existing resources, and determine how many free slots we need. + let Some(allocation_candidate) = self.check_allocation(&unprepared_bind_group) else { + return Err(unprepared_bind_group); + }; + + // Check to see if we have enough free space. + // + // As a special case, note that if *nothing* is allocated in this slab, + // then we always allow a material to be placed in it, regardless of the + // number of bindings the material has. This is so that, if the + // platform's maximum bindless count is set too low to hold even a + // single material, we can still place each material into a separate + // slab instead of failing outright. + if self.allocated_resource_count > 0 + && self.allocated_resource_count + allocation_candidate.needed_free_slots + > slot_capacity + { + trace!("Slab is full, can't allocate"); + return Err(unprepared_bind_group); + } + + // OK, we can allocate in this slab. Assign a slot ID. + let slot = self + .free_slots + .pop() + .unwrap_or(MaterialBindGroupSlot(self.live_allocation_count)); + + // Bump the live allocation count. + self.live_allocation_count += 1; + + // Insert the resources into the binding arrays. + let allocated_resource_slots = + self.insert_resources(unprepared_bind_group.bindings, allocation_candidate); + + // Serialize the allocated resource slots. + for bindless_index_table in &mut self.bindless_index_tables { + bindless_index_table.set(slot, &allocated_resource_slots); + } + + // Insert extra data. + if self.extra_data.len() < (*slot as usize + 1) { + self.extra_data.resize_with(*slot as usize + 1, || None); + } + self.extra_data[*slot as usize] = Some(unprepared_bind_group.data); + + // Invalidate the cached bind group. + self.bind_group = None; + + Ok(slot) + } + + /// Gathers the information needed to determine whether the given unprepared + /// bind group can be allocated in this slab. + fn check_allocation( + &self, + unprepared_bind_group: &UnpreparedBindGroup, + ) -> Option { + let mut allocation_candidate = BindlessAllocationCandidate { + pre_existing_resources: HashMap::default(), + needed_free_slots: 0, + }; + + for &(bindless_index, ref owned_binding_resource) in unprepared_bind_group.bindings.iter() { + let bindless_index = BindlessIndex(bindless_index); + match *owned_binding_resource { + OwnedBindingResource::Buffer(ref buffer) => { + let Some(binding_array) = self.buffers.get(&bindless_index) else { + error!( + "Binding array wasn't present for buffer at index {:?}", + bindless_index + ); + return None; + }; + match binding_array.find(BindingResourceId::Buffer(buffer.id())) { + Some(slot) => { + allocation_candidate + .pre_existing_resources + .insert(bindless_index, slot); + } + None => allocation_candidate.needed_free_slots += 1, + } + } + + OwnedBindingResource::Data(_) => { + // The size of a data buffer is unlimited. + } + + OwnedBindingResource::TextureView(texture_view_dimension, ref texture_view) => { + let bindless_resource_type = BindlessResourceType::from(texture_view_dimension); + match self + .textures + .get(&bindless_resource_type) + .expect("Missing binding array for texture") + .find(BindingResourceId::TextureView( + texture_view_dimension, + texture_view.id(), + )) { + Some(slot) => { + allocation_candidate + .pre_existing_resources + .insert(bindless_index, slot); + } + None => { + allocation_candidate.needed_free_slots += 1; + } + } + } + + OwnedBindingResource::Sampler(sampler_binding_type, ref sampler) => { + let bindless_resource_type = BindlessResourceType::from(sampler_binding_type); + match self + .samplers + .get(&bindless_resource_type) + .expect("Missing binding array for sampler") + .find(BindingResourceId::Sampler(sampler.id())) + { + Some(slot) => { + allocation_candidate + .pre_existing_resources + .insert(bindless_index, slot); + } + None => { + allocation_candidate.needed_free_slots += 1; + } + } + } + } + } + + Some(allocation_candidate) + } + + /// Inserts the given [`BindingResources`] into this slab. + /// + /// Returns a table that maps the bindless index of each resource to its + /// slot in its binding array. + fn insert_resources( + &mut self, + mut binding_resources: BindingResources, + allocation_candidate: BindlessAllocationCandidate, + ) -> HashMap { + let mut allocated_resource_slots = HashMap::default(); + + for (bindless_index, owned_binding_resource) in binding_resources.drain(..) { + let bindless_index = BindlessIndex(bindless_index); + // If this is an other reference to an object we've already + // allocated, just bump its reference count. + if let Some(pre_existing_resource_slot) = allocation_candidate + .pre_existing_resources + .get(&bindless_index) + { + allocated_resource_slots.insert(bindless_index, *pre_existing_resource_slot); + + match owned_binding_resource { + OwnedBindingResource::Buffer(_) => { + self.buffers + .get_mut(&bindless_index) + .expect("Buffer binding array should exist") + .bindings + .get_mut(*pre_existing_resource_slot as usize) + .and_then(|binding| binding.as_mut()) + .expect("Slot should exist") + .ref_count += 1; + } + + OwnedBindingResource::Data(_) => { + panic!("Data buffers can't be deduplicated") + } + + OwnedBindingResource::TextureView(texture_view_dimension, _) => { + let bindless_resource_type = + BindlessResourceType::from(texture_view_dimension); + self.textures + .get_mut(&bindless_resource_type) + .expect("Texture binding array should exist") + .bindings + .get_mut(*pre_existing_resource_slot as usize) + .and_then(|binding| binding.as_mut()) + .expect("Slot should exist") + .ref_count += 1; + } + + OwnedBindingResource::Sampler(sampler_binding_type, _) => { + let bindless_resource_type = + BindlessResourceType::from(sampler_binding_type); + self.samplers + .get_mut(&bindless_resource_type) + .expect("Sampler binding array should exist") + .bindings + .get_mut(*pre_existing_resource_slot as usize) + .and_then(|binding| binding.as_mut()) + .expect("Slot should exist") + .ref_count += 1; + } + } + + continue; + } + + // Otherwise, we need to insert it anew. + let binding_resource_id = BindingResourceId::from(&owned_binding_resource); + match owned_binding_resource { + OwnedBindingResource::Buffer(buffer) => { + let slot = self + .buffers + .get_mut(&bindless_index) + .expect("Buffer binding array should exist") + .insert(binding_resource_id, buffer); + allocated_resource_slots.insert(bindless_index, slot); + } + OwnedBindingResource::Data(data) => { + let slot = self + .data_buffers + .get_mut(&bindless_index) + .expect("Data buffer binding array should exist") + .insert(&data); + allocated_resource_slots.insert(bindless_index, slot); + } + OwnedBindingResource::TextureView(texture_view_dimension, texture_view) => { + let bindless_resource_type = BindlessResourceType::from(texture_view_dimension); + let slot = self + .textures + .get_mut(&bindless_resource_type) + .expect("Texture array should exist") + .insert(binding_resource_id, texture_view); + allocated_resource_slots.insert(bindless_index, slot); + } + OwnedBindingResource::Sampler(sampler_binding_type, sampler) => { + let bindless_resource_type = BindlessResourceType::from(sampler_binding_type); + let slot = self + .samplers + .get_mut(&bindless_resource_type) + .expect("Sampler should exist") + .insert(binding_resource_id, sampler); + allocated_resource_slots.insert(bindless_index, slot); + } + } + + // Bump the allocated resource count. + self.allocated_resource_count += 1; + } + + allocated_resource_slots + } + + /// Removes the material allocated in the given slot, with the given + /// descriptor, from this slab. + fn free(&mut self, slot: MaterialBindGroupSlot, bindless_descriptor: &BindlessDescriptor) { + // Loop through each binding. + for (bindless_index, bindless_resource_type) in + bindless_descriptor.resources.iter().enumerate() + { + let bindless_index = BindlessIndex::from(bindless_index as u32); + let Some(bindless_index_table) = self.get_bindless_index_table(bindless_index) else { + continue; + }; + let Some(bindless_binding) = bindless_index_table.get_binding(slot, bindless_index) + else { + continue; + }; + + // Free the binding. If the resource in question was anything other + // than a data buffer, then it has a reference count and + // consequently we need to decrement it. + let decrement_allocated_resource_count = match *bindless_resource_type { + BindlessResourceType::None => false, + BindlessResourceType::Buffer => self + .buffers + .get_mut(&bindless_index) + .expect("Buffer should exist with that bindless index") + .remove(bindless_binding), + BindlessResourceType::DataBuffer => { + self.data_buffers + .get_mut(&bindless_index) + .expect("Data buffer should exist with that bindless index") + .remove(bindless_binding); + false + } + BindlessResourceType::SamplerFiltering + | BindlessResourceType::SamplerNonFiltering + | BindlessResourceType::SamplerComparison => self + .samplers + .get_mut(bindless_resource_type) + .expect("Sampler array should exist") + .remove(bindless_binding), + BindlessResourceType::Texture1d + | BindlessResourceType::Texture2d + | BindlessResourceType::Texture2dArray + | BindlessResourceType::Texture3d + | BindlessResourceType::TextureCube + | BindlessResourceType::TextureCubeArray => self + .textures + .get_mut(bindless_resource_type) + .expect("Texture array should exist") + .remove(bindless_binding), + }; + + // If the slot is now free, decrement the allocated resource + // count. + if decrement_allocated_resource_count { + self.allocated_resource_count -= 1; + } + } + + // Clear out the extra data. + self.extra_data[slot.0 as usize] = None; + + // Invalidate the cached bind group. + self.bind_group = None; + + // Release the slot ID. + self.free_slots.push(slot); + self.live_allocation_count -= 1; + } + + /// Recreates the bind group and bindless index table buffer if necessary. + fn prepare( + &mut self, + render_device: &RenderDevice, + bind_group_layout: &BindGroupLayout, + fallback_bindless_resources: &FallbackBindlessResources, + fallback_buffers: &HashMap, + fallback_image: &FallbackImage, + bindless_descriptor: &BindlessDescriptor, + slab_capacity: u32, + ) { + // Create the bindless index table buffers if needed. + for bindless_index_table in &mut self.bindless_index_tables { + bindless_index_table.buffer.prepare(render_device); + } + + // Create any data buffers we were managing if necessary. + for data_buffer in self.data_buffers.values_mut() { + data_buffer.buffer.prepare(render_device); + } + + // Create the bind group if needed. + self.prepare_bind_group( + render_device, bind_group_layout, + fallback_bindless_resources, fallback_buffers, - bindless_enabled: material_uses_bindless_resources::(render_device), - phantom: PhantomData, + fallback_image, + bindless_descriptor, + slab_capacity, + ); + } + + /// Recreates the bind group if this slab has been changed since the last + /// time we created it. + fn prepare_bind_group( + &mut self, + render_device: &RenderDevice, + bind_group_layout: &BindGroupLayout, + fallback_bindless_resources: &FallbackBindlessResources, + fallback_buffers: &HashMap, + fallback_image: &FallbackImage, + bindless_descriptor: &BindlessDescriptor, + slab_capacity: u32, + ) { + // If the bind group is clean, then do nothing. + if self.bind_group.is_some() { + return; + } + + // Determine whether we need to pad out our binding arrays with dummy + // resources. + let required_binding_array_size = if render_device + .features() + .contains(WgpuFeatures::PARTIALLY_BOUND_BINDING_ARRAY) + { + None + } else { + Some(slab_capacity) + }; + + let binding_resource_arrays = self.create_binding_resource_arrays( + fallback_bindless_resources, + fallback_buffers, + fallback_image, + bindless_descriptor, + required_binding_array_size, + ); + + let mut bind_group_entries: Vec<_> = self + .bindless_index_tables + .iter() + .map(|bindless_index_table| bindless_index_table.bind_group_entry()) + .collect(); + + for &(&binding, ref binding_resource_array) in binding_resource_arrays.iter() { + bind_group_entries.push(BindGroupEntry { + binding, + resource: match *binding_resource_array { + BindingResourceArray::Buffers(ref buffer_bindings) => { + BindingResource::BufferArray(&buffer_bindings[..]) + } + BindingResourceArray::TextureViews(ref texture_views) => { + BindingResource::TextureViewArray(&texture_views[..]) + } + BindingResourceArray::Samplers(ref samplers) => { + BindingResource::SamplerArray(&samplers[..]) + } + }, + }); + } + + // Create bind group entries for any data buffers we're managing. + for data_buffer in self.data_buffers.values() { + bind_group_entries.push(BindGroupEntry { + binding: *data_buffer.binding_number, + resource: data_buffer + .buffer + .buffer() + .expect("Backing data buffer must have been uploaded by now") + .as_entire_binding(), + }); + } + + self.bind_group = Some(render_device.create_bind_group( + M::label(), + bind_group_layout, + &bind_group_entries, + )); + } + + /// Writes any buffers that we're managing to the GPU. + /// + /// Currently, this consists of the bindless index table plus any data + /// buffers we're managing. + fn write_buffer(&mut self, render_device: &RenderDevice, render_queue: &RenderQueue) { + for bindless_index_table in &mut self.bindless_index_tables { + bindless_index_table + .buffer + .write(render_device, render_queue); + } + + for data_buffer in self.data_buffers.values_mut() { + data_buffer.buffer.write(render_device, render_queue); + } + } + + /// Converts our binding arrays into binding resource arrays suitable for + /// passing to `wgpu`. + fn create_binding_resource_arrays<'a>( + &'a self, + fallback_bindless_resources: &'a FallbackBindlessResources, + fallback_buffers: &'a HashMap, + fallback_image: &'a FallbackImage, + bindless_descriptor: &'a BindlessDescriptor, + required_binding_array_size: Option, + ) -> Vec<(&'a u32, BindingResourceArray<'a>)> { + let mut binding_resource_arrays = vec![]; + + // Build sampler bindings. + self.create_sampler_binding_resource_arrays( + &mut binding_resource_arrays, + fallback_bindless_resources, + required_binding_array_size, + ); + + // Build texture bindings. + self.create_texture_binding_resource_arrays( + &mut binding_resource_arrays, + fallback_image, + required_binding_array_size, + ); + + // Build buffer bindings. + self.create_buffer_binding_resource_arrays( + &mut binding_resource_arrays, + fallback_buffers, + bindless_descriptor, + required_binding_array_size, + ); + + binding_resource_arrays + } + + /// Accumulates sampler binding arrays into binding resource arrays suitable + /// for passing to `wgpu`. + fn create_sampler_binding_resource_arrays<'a, 'b>( + &'a self, + binding_resource_arrays: &'b mut Vec<(&'a u32, BindingResourceArray<'a>)>, + fallback_bindless_resources: &'a FallbackBindlessResources, + required_binding_array_size: Option, + ) { + // We have one binding resource array per sampler type. + for (bindless_resource_type, fallback_sampler) in [ + ( + BindlessResourceType::SamplerFiltering, + &fallback_bindless_resources.filtering_sampler, + ), + ( + BindlessResourceType::SamplerNonFiltering, + &fallback_bindless_resources.non_filtering_sampler, + ), + ( + BindlessResourceType::SamplerComparison, + &fallback_bindless_resources.comparison_sampler, + ), + ] { + let mut sampler_bindings = vec![]; + + match self.samplers.get(&bindless_resource_type) { + Some(sampler_bindless_binding_array) => { + for maybe_bindless_binding in sampler_bindless_binding_array.bindings.iter() { + match *maybe_bindless_binding { + Some(ref bindless_binding) => { + sampler_bindings.push(&*bindless_binding.resource); + } + None => sampler_bindings.push(&**fallback_sampler), + } + } + } + + None => { + // Fill with a single fallback sampler. + sampler_bindings.push(&**fallback_sampler); + } + } + + if let Some(required_binding_array_size) = required_binding_array_size { + sampler_bindings.extend(iter::repeat_n( + &**fallback_sampler, + required_binding_array_size as usize - sampler_bindings.len(), + )); + } + + let binding_number = bindless_resource_type + .binding_number() + .expect("Sampler bindless resource type must have a binding number"); + + binding_resource_arrays.push(( + &**binding_number, + BindingResourceArray::Samplers(sampler_bindings), + )); + } + } + + /// Accumulates texture binding arrays into binding resource arrays suitable + /// for passing to `wgpu`. + fn create_texture_binding_resource_arrays<'a, 'b>( + &'a self, + binding_resource_arrays: &'b mut Vec<(&'a u32, BindingResourceArray<'a>)>, + fallback_image: &'a FallbackImage, + required_binding_array_size: Option, + ) { + for (bindless_resource_type, fallback_image) in [ + (BindlessResourceType::Texture1d, &fallback_image.d1), + (BindlessResourceType::Texture2d, &fallback_image.d2), + ( + BindlessResourceType::Texture2dArray, + &fallback_image.d2_array, + ), + (BindlessResourceType::Texture3d, &fallback_image.d3), + (BindlessResourceType::TextureCube, &fallback_image.cube), + ( + BindlessResourceType::TextureCubeArray, + &fallback_image.cube_array, + ), + ] { + let mut texture_bindings = vec![]; + + let binding_number = bindless_resource_type + .binding_number() + .expect("Texture bindless resource type must have a binding number"); + + match self.textures.get(&bindless_resource_type) { + Some(texture_bindless_binding_array) => { + for maybe_bindless_binding in texture_bindless_binding_array.bindings.iter() { + match *maybe_bindless_binding { + Some(ref bindless_binding) => { + texture_bindings.push(&*bindless_binding.resource); + } + None => texture_bindings.push(&*fallback_image.texture_view), + } + } + } + + None => { + // Fill with a single fallback image. + texture_bindings.push(&*fallback_image.texture_view); + } + } + + if let Some(required_binding_array_size) = required_binding_array_size { + texture_bindings.extend(iter::repeat_n( + &*fallback_image.texture_view, + required_binding_array_size as usize - texture_bindings.len(), + )); + } + + binding_resource_arrays.push(( + binding_number, + BindingResourceArray::TextureViews(texture_bindings), + )); + } + } + + /// Accumulates buffer binding arrays into binding resource arrays suitable + /// for `wgpu`. + fn create_buffer_binding_resource_arrays<'a, 'b>( + &'a self, + binding_resource_arrays: &'b mut Vec<(&'a u32, BindingResourceArray<'a>)>, + fallback_buffers: &'a HashMap, + bindless_descriptor: &'a BindlessDescriptor, + required_binding_array_size: Option, + ) { + for bindless_buffer_descriptor in bindless_descriptor.buffers.iter() { + let Some(buffer_bindless_binding_array) = + self.buffers.get(&bindless_buffer_descriptor.bindless_index) + else { + // This is OK, because index buffers are present in + // `BindlessDescriptor::buffers` but not in + // `BindlessDescriptor::resources`. + continue; + }; + + let fallback_buffer = fallback_buffers + .get(&bindless_buffer_descriptor.bindless_index) + .expect("Fallback buffer should exist"); + + let mut buffer_bindings: Vec<_> = buffer_bindless_binding_array + .bindings + .iter() + .map(|maybe_bindless_binding| { + let buffer = match *maybe_bindless_binding { + None => fallback_buffer, + Some(ref bindless_binding) => &bindless_binding.resource, + }; + BufferBinding { + buffer, + offset: 0, + size: None, + } + }) + .collect(); + + if let Some(required_binding_array_size) = required_binding_array_size { + buffer_bindings.extend(iter::repeat_n( + BufferBinding { + buffer: fallback_buffer, + offset: 0, + size: None, + }, + required_binding_array_size as usize - buffer_bindings.len(), + )); + } + + binding_resource_arrays.push(( + &*buffer_bindless_binding_array.binding_number, + BindingResourceArray::Buffers(buffer_bindings), + )); + } + } + + /// Returns the [`BindGroup`] corresponding to this slab, if it's been + /// prepared. + fn bind_group(&self) -> Option<&BindGroup> { + self.bind_group.as_ref() + } + + /// Returns the extra data associated with this material. + fn get_extra_data(&self, slot: MaterialBindGroupSlot) -> &M::Data { + self.extra_data + .get(slot.0 as usize) + .and_then(|data| data.as_ref()) + .expect("Extra data not present") + } + + /// Returns the bindless index table containing the given bindless index. + fn get_bindless_index_table( + &self, + bindless_index: BindlessIndex, + ) -> Option<&MaterialBindlessIndexTable> { + let table_index = self + .bindless_index_tables + .binary_search_by(|bindless_index_table| { + if bindless_index < bindless_index_table.index_range.start { + Ordering::Less + } else if bindless_index >= bindless_index_table.index_range.end { + Ordering::Greater + } else { + Ordering::Equal + } + }) + .ok()?; + self.bindless_index_tables.get(table_index) + } +} + +impl MaterialBindlessBindingArray +where + R: GetBindingResourceId, +{ + /// Creates a new [`MaterialBindlessBindingArray`] with the given binding + /// number, managing resources of the given type. + fn new( + binding_number: BindingNumber, + resource_type: BindlessResourceType, + ) -> MaterialBindlessBindingArray { + MaterialBindlessBindingArray { + binding_number, + bindings: vec![], + resource_type, + resource_to_slot: HashMap::default(), + free_slots: vec![], + len: 0, + } + } + + /// Returns the slot corresponding to the given resource, if that resource + /// is located in this binding array. + /// + /// If the resource isn't in this binding array, this method returns `None`. + fn find(&self, binding_resource_id: BindingResourceId) -> Option { + self.resource_to_slot.get(&binding_resource_id).copied() + } + + /// Inserts a bindless resource into a binding array and returns the index + /// of the slot it was inserted into. + fn insert(&mut self, binding_resource_id: BindingResourceId, resource: R) -> u32 { + let slot = self.free_slots.pop().unwrap_or(self.len); + self.resource_to_slot.insert(binding_resource_id, slot); + + if self.bindings.len() < slot as usize + 1 { + self.bindings.resize_with(slot as usize + 1, || None); + } + self.bindings[slot as usize] = Some(MaterialBindlessBinding::new(resource)); + + self.len += 1; + slot + } + + /// Removes a reference to an object from the slot. + /// + /// If the reference count dropped to 0 and the object was freed, this + /// method returns true. If the object was still referenced after removing + /// it, returns false. + fn remove(&mut self, slot: u32) -> bool { + let maybe_binding = &mut self.bindings[slot as usize]; + let binding = maybe_binding + .as_mut() + .expect("Attempted to free an already-freed binding"); + + binding.ref_count -= 1; + if binding.ref_count != 0 { + return false; + } + + let binding_resource_id = binding.resource.binding_resource_id(self.resource_type); + self.resource_to_slot.remove(&binding_resource_id); + + *maybe_binding = None; + self.free_slots.push(slot); + self.len -= 1; + true + } +} + +impl MaterialBindlessBinding +where + R: GetBindingResourceId, +{ + /// Creates a new [`MaterialBindlessBinding`] for a freshly-added resource. + /// + /// The reference count is initialized to 1. + fn new(resource: R) -> MaterialBindlessBinding { + MaterialBindlessBinding { + resource, + ref_count: 1, } } } @@ -795,57 +1688,403 @@ pub fn material_uses_bindless_resources(render_device: &RenderDevice) -> bool where M: Material, { - M::bindless_slot_count().is_some() - && render_device - .features() - .contains(WgpuFeatures::BUFFER_BINDING_ARRAY | WgpuFeatures::TEXTURE_BINDING_ARRAY) + M::bindless_slot_count().is_some_and(|bindless_slot_count| { + M::bindless_supported(render_device) && bindless_slot_count.resolve() > 1 + }) +} + +impl MaterialBindlessSlab +where + M: Material, +{ + /// Creates a new [`MaterialBindlessSlab`] for a material with the given + /// bindless descriptor. + /// + /// We use this when no existing slab could hold a material to be allocated. + fn new(bindless_descriptor: &BindlessDescriptor) -> MaterialBindlessSlab { + let mut buffers = HashMap::default(); + let mut samplers = HashMap::default(); + let mut textures = HashMap::default(); + let mut data_buffers = HashMap::default(); + + for (bindless_index, bindless_resource_type) in + bindless_descriptor.resources.iter().enumerate() + { + let bindless_index = BindlessIndex(bindless_index as u32); + match *bindless_resource_type { + BindlessResourceType::None => {} + BindlessResourceType::Buffer => { + let binding_number = bindless_descriptor + .buffers + .iter() + .find(|bindless_buffer_descriptor| { + bindless_buffer_descriptor.bindless_index == bindless_index + }) + .expect( + "Bindless buffer descriptor matching that bindless index should be \ + present", + ) + .binding_number; + buffers.insert( + bindless_index, + MaterialBindlessBindingArray::new(binding_number, *bindless_resource_type), + ); + } + BindlessResourceType::DataBuffer => { + // Copy the data in. + let buffer_descriptor = bindless_descriptor + .buffers + .iter() + .find(|bindless_buffer_descriptor| { + bindless_buffer_descriptor.bindless_index == bindless_index + }) + .expect( + "Bindless buffer descriptor matching that bindless index should be \ + present", + ); + data_buffers.insert( + bindless_index, + MaterialDataBuffer::new( + buffer_descriptor.binding_number, + buffer_descriptor + .size + .expect("Data buffers should have a size") + as u32, + ), + ); + } + BindlessResourceType::SamplerFiltering + | BindlessResourceType::SamplerNonFiltering + | BindlessResourceType::SamplerComparison => { + samplers.insert( + *bindless_resource_type, + MaterialBindlessBindingArray::new( + *bindless_resource_type.binding_number().unwrap(), + *bindless_resource_type, + ), + ); + } + BindlessResourceType::Texture1d + | BindlessResourceType::Texture2d + | BindlessResourceType::Texture2dArray + | BindlessResourceType::Texture3d + | BindlessResourceType::TextureCube + | BindlessResourceType::TextureCubeArray => { + textures.insert( + *bindless_resource_type, + MaterialBindlessBindingArray::new( + *bindless_resource_type.binding_number().unwrap(), + *bindless_resource_type, + ), + ); + } + } + } + + let bindless_index_tables = bindless_descriptor + .index_tables + .iter() + .map(|bindless_index_table| MaterialBindlessIndexTable::new(bindless_index_table)) + .collect(); + + MaterialBindlessSlab { + bind_group: None, + bindless_index_tables, + samplers, + textures, + buffers, + data_buffers, + extra_data: vec![], + free_slots: vec![], + live_allocation_count: 0, + allocated_resource_count: 0, + } + } } impl FromWorld for FallbackBindlessResources { fn from_world(world: &mut World) -> Self { let render_device = world.resource::(); FallbackBindlessResources { - fallback_sampler: render_device.create_sampler(&SamplerDescriptor { - label: Some("fallback sampler"), + filtering_sampler: render_device.create_sampler(&SamplerDescriptor { + label: Some("fallback filtering sampler"), + ..default() + }), + non_filtering_sampler: render_device.create_sampler(&SamplerDescriptor { + label: Some("fallback non-filtering sampler"), + mag_filter: FilterMode::Nearest, + min_filter: FilterMode::Nearest, + mipmap_filter: FilterMode::Nearest, + ..default() + }), + comparison_sampler: render_device.create_sampler(&SamplerDescriptor { + label: Some("fallback comparison sampler"), + compare: Some(CompareFunction::Always), ..default() }), } } } -impl MaterialFallbackBuffers { - /// Creates a new set of fallback buffers containing dummy allocations. +impl MaterialBindGroupNonBindlessAllocator +where + M: Material, +{ + /// Creates a new [`MaterialBindGroupNonBindlessAllocator`] managing the + /// bind groups for a single non-bindless material. + fn new() -> MaterialBindGroupNonBindlessAllocator { + MaterialBindGroupNonBindlessAllocator { + bind_groups: vec![], + to_prepare: HashSet::default(), + free_indices: vec![], + phantom: PhantomData, + } + } + + /// Inserts a bind group, either unprepared or prepared, into this allocator + /// and returns a [`MaterialBindingId`]. /// - /// We populate unused bind group slots with these. - fn new( - render_device: &RenderDevice, - bind_group_layout_entries: &[BindGroupLayoutEntry], - ) -> MaterialFallbackBuffers { - let mut fallback_buffers = HashMap::default(); - for bind_group_layout_entry in bind_group_layout_entries { - // Create a dummy buffer of the appropriate size. - let BindingType::Buffer { - min_binding_size, .. - } = bind_group_layout_entry.ty + /// The returned [`MaterialBindingId`] can later be used to fetch the bind + /// group. + fn allocate( + &mut self, + bind_group: MaterialNonBindlessAllocatedBindGroup, + ) -> MaterialBindingId { + let group_id = self + .free_indices + .pop() + .unwrap_or(MaterialBindGroupIndex(self.bind_groups.len() as u32)); + if self.bind_groups.len() < *group_id as usize + 1 { + self.bind_groups + .resize_with(*group_id as usize + 1, || None); + } + + if matches!( + bind_group, + MaterialNonBindlessAllocatedBindGroup::Unprepared { .. } + ) { + self.to_prepare.insert(group_id); + } + + self.bind_groups[*group_id as usize] = Some(bind_group); + + MaterialBindingId { + group: group_id, + slot: default(), + } + } + + /// Inserts an unprepared bind group into this allocator and returns a + /// [`MaterialBindingId`]. + fn allocate_unprepared( + &mut self, + unprepared_bind_group: UnpreparedBindGroup, + bind_group_layout: BindGroupLayout, + ) -> MaterialBindingId { + self.allocate(MaterialNonBindlessAllocatedBindGroup::Unprepared { + bind_group: unprepared_bind_group, + layout: bind_group_layout, + }) + } + + /// Inserts an prepared bind group into this allocator and returns a + /// [`MaterialBindingId`]. + fn allocate_prepared( + &mut self, + prepared_bind_group: PreparedBindGroup, + ) -> MaterialBindingId { + self.allocate(MaterialNonBindlessAllocatedBindGroup::Prepared { + bind_group: prepared_bind_group, + uniform_buffers: vec![], + }) + } + + /// Deallocates the bind group with the given binding ID. + fn free(&mut self, binding_id: MaterialBindingId) { + debug_assert_eq!(binding_id.slot, MaterialBindGroupSlot(0)); + debug_assert!(self.bind_groups[*binding_id.group as usize].is_some()); + self.bind_groups[*binding_id.group as usize] = None; + self.to_prepare.remove(&binding_id.group); + self.free_indices.push(binding_id.group); + } + + /// Returns a wrapper around the bind group with the given index. + fn get(&self, group: MaterialBindGroupIndex) -> Option> { + self.bind_groups[group.0 as usize] + .as_ref() + .map(|bind_group| match bind_group { + MaterialNonBindlessAllocatedBindGroup::Prepared { bind_group, .. } => { + MaterialNonBindlessSlab::Prepared(bind_group) + } + MaterialNonBindlessAllocatedBindGroup::Unprepared { bind_group, .. } => { + MaterialNonBindlessSlab::Unprepared(bind_group) + } + }) + } + + /// Prepares any as-yet unprepared bind groups that this allocator is + /// managing. + /// + /// Unprepared bind groups can be added to this allocator with + /// [`Self::allocate_unprepared`]. Such bind groups will defer being + /// prepared until the next time this method is called. + fn prepare_bind_groups(&mut self, render_device: &RenderDevice) { + for bind_group_index in mem::take(&mut self.to_prepare) { + let Some(MaterialNonBindlessAllocatedBindGroup::Unprepared { + bind_group: unprepared_bind_group, + layout: bind_group_layout, + }) = mem::take(&mut self.bind_groups[*bind_group_index as usize]) else { - continue; + panic!("Allocation didn't exist or was already prepared"); }; - let mut size: u64 = match min_binding_size { - None => 0, - Some(min_binding_size) => min_binding_size.into(), - }; - size = size.max(MIN_BUFFER_SIZE); - - fallback_buffers.insert( - bind_group_layout_entry.binding, - render_device.create_buffer_with_data(&BufferInitDescriptor { - label: Some("fallback buffer"), - contents: &vec![0; size as usize], - usage: BufferUsages::UNIFORM | BufferUsages::STORAGE, - }), + + // Pack any `Data` into uniform buffers. + let mut uniform_buffers = vec![]; + for (index, binding) in unprepared_bind_group.bindings.iter() { + let OwnedBindingResource::Data(ref owned_data) = *binding else { + continue; + }; + let label = format!("material uniform data {}", *index); + let uniform_buffer = render_device.create_buffer_with_data(&BufferInitDescriptor { + label: Some(&label), + contents: &owned_data.0, + usage: BufferUsages::COPY_DST | BufferUsages::UNIFORM, + }); + uniform_buffers.push(uniform_buffer); + } + + // Create bind group entries. + let mut bind_group_entries = vec![]; + let mut uniform_buffers_iter = uniform_buffers.iter(); + for (index, binding) in unprepared_bind_group.bindings.iter() { + match *binding { + OwnedBindingResource::Data(_) => { + bind_group_entries.push(BindGroupEntry { + binding: *index, + resource: uniform_buffers_iter + .next() + .expect("We should have created uniform buffers for each `Data`") + .as_entire_binding(), + }); + } + _ => bind_group_entries.push(BindGroupEntry { + binding: *index, + resource: binding.get_binding(), + }), + } + } + + // Create the bind group. + let bind_group = render_device.create_bind_group( + M::label(), + &bind_group_layout, + &bind_group_entries, ); + + self.bind_groups[*bind_group_index as usize] = + Some(MaterialNonBindlessAllocatedBindGroup::Prepared { + bind_group: PreparedBindGroup { + bindings: unprepared_bind_group.bindings, + bind_group, + data: unprepared_bind_group.data, + }, + uniform_buffers, + }); + } + } +} + +impl<'a, M> MaterialSlab<'a, M> +where + M: Material, +{ + /// Returns the extra data associated with this material. + /// + /// When deriving `AsBindGroup`, this data is given by the + /// `#[bind_group_data(DataType)]` attribute on the material structure. + pub fn get_extra_data(&self, slot: MaterialBindGroupSlot) -> &M::Data { + match self.0 { + MaterialSlabImpl::Bindless(material_bindless_slab) => { + material_bindless_slab.get_extra_data(slot) + } + MaterialSlabImpl::NonBindless(MaterialNonBindlessSlab::Prepared( + prepared_bind_group, + )) => &prepared_bind_group.data, + MaterialSlabImpl::NonBindless(MaterialNonBindlessSlab::Unprepared( + unprepared_bind_group, + )) => &unprepared_bind_group.data, + } + } + + /// Returns the [`BindGroup`] corresponding to this slab, if it's been + /// prepared. + /// + /// You can prepare bind groups by calling + /// [`MaterialBindGroupAllocator::prepare_bind_groups`]. If the bind group + /// isn't ready, this method returns `None`. + pub fn bind_group(&self) -> Option<&'a BindGroup> { + match self.0 { + MaterialSlabImpl::Bindless(material_bindless_slab) => { + material_bindless_slab.bind_group() + } + MaterialSlabImpl::NonBindless(MaterialNonBindlessSlab::Prepared( + prepared_bind_group, + )) => Some(&prepared_bind_group.bind_group), + MaterialSlabImpl::NonBindless(MaterialNonBindlessSlab::Unprepared(_)) => None, } + } +} - MaterialFallbackBuffers(fallback_buffers) +impl MaterialDataBuffer { + /// Creates a new [`MaterialDataBuffer`] managing a buffer of elements of + /// size `aligned_element_size` that will be bound to the given binding + /// number. + fn new(binding_number: BindingNumber, aligned_element_size: u32) -> MaterialDataBuffer { + MaterialDataBuffer { + binding_number, + buffer: RetainedRawBufferVec::new(BufferUsages::STORAGE), + aligned_element_size, + free_slots: vec![], + len: 0, + } + } + + /// Allocates a slot for a new piece of data, copies the data into that + /// slot, and returns the slot ID. + /// + /// The size of the piece of data supplied to this method must equal the + /// [`Self::aligned_element_size`] provided to [`MaterialDataBuffer::new`]. + fn insert(&mut self, data: &[u8]) -> u32 { + // Make the the data is of the right length. + debug_assert_eq!(data.len(), self.aligned_element_size as usize); + + // Grab a slot. + let slot = self.free_slots.pop().unwrap_or(self.len); + + // Calculate the range we're going to copy to. + let start = slot as usize * self.aligned_element_size as usize; + let end = (slot as usize + 1) * self.aligned_element_size as usize; + + // Resize the buffer if necessary. + if self.buffer.len() < end { + self.buffer.reserve_internal(end); + } + while self.buffer.values().len() < end { + self.buffer.push(0); + } + + // Copy in the data. + self.buffer.values_mut()[start..end].copy_from_slice(data); + + // Mark the buffer dirty, and finish up. + self.len += 1; + self.buffer.dirty = BufferDirtyState::NeedsReserve; + slot + } + + /// Marks the given slot as free. + fn remove(&mut self, slot: u32) { + self.free_slots.push(slot); + self.len -= 1; } } diff --git a/crates/bevy_pbr/src/mesh_material.rs b/crates/bevy_pbr/src/mesh_material.rs index 84eaf7cffa79a..027f2073ec179 100644 --- a/crates/bevy_pbr/src/mesh_material.rs +++ b/crates/bevy_pbr/src/mesh_material.rs @@ -1,5 +1,5 @@ use crate::Material; -use bevy_asset::{AssetId, Handle}; +use bevy_asset::{AsAssetId, AssetId, Handle}; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{component::Component, reflect::ReflectComponent}; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; @@ -36,8 +36,8 @@ use derive_more::derive::From; /// )); /// } /// ``` -#[derive(Component, Clone, Debug, Deref, DerefMut, Reflect, PartialEq, Eq, From)] -#[reflect(Component, Default)] +#[derive(Component, Clone, Debug, Deref, DerefMut, Reflect, From)] +#[reflect(Component, Default, Clone, PartialEq)] pub struct MeshMaterial3d(pub Handle); impl Default for MeshMaterial3d { @@ -46,6 +46,14 @@ impl Default for MeshMaterial3d { } } +impl PartialEq for MeshMaterial3d { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} + +impl Eq for MeshMaterial3d {} + impl From> for AssetId { fn from(material: MeshMaterial3d) -> Self { material.id() @@ -57,3 +65,11 @@ impl From<&MeshMaterial3d> for AssetId { material.id() } } + +impl AsAssetId for MeshMaterial3d { + type Asset = M; + + fn as_asset_id(&self) -> AssetId { + self.id() + } +} diff --git a/crates/bevy_pbr/src/meshlet/asset.rs b/crates/bevy_pbr/src/meshlet/asset.rs index 66a84ed8329de..c158650d1bd4c 100644 --- a/crates/bevy_pbr/src/meshlet/asset.rs +++ b/crates/bevy_pbr/src/meshlet/asset.rs @@ -31,7 +31,6 @@ pub const MESHLET_MESH_ASSET_VERSION: u64 = 1; /// * Do not use normal maps baked from higher-poly geometry. Use the high-poly geometry directly and skip the normal map. /// * If additional detail is needed, a smaller tiling normal map not baked from a mesh is ok. /// * Material shaders must not use builtin functions that automatically calculate derivatives . -/// * Use `pbr_functions::sample_texture` to sample textures instead. /// * Performing manual arithmetic on texture coordinates (UVs) is forbidden. Use the chain-rule version of arithmetic functions instead (TODO: not yet implemented). /// * Limited control over [`bevy_render::render_resource::RenderPipelineDescriptor`] attributes. /// * Materials must use the [`crate::Material::meshlet_mesh_fragment_shader`] method (and similar variants for prepass/deferred shaders) diff --git a/crates/bevy_pbr/src/meshlet/clear_visibility_buffer.wgsl b/crates/bevy_pbr/src/meshlet/clear_visibility_buffer.wgsl new file mode 100644 index 0000000000000..5956921ca1609 --- /dev/null +++ b/crates/bevy_pbr/src/meshlet/clear_visibility_buffer.wgsl @@ -0,0 +1,18 @@ +#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT +@group(0) @binding(0) var meshlet_visibility_buffer: texture_storage_2d; +#else +@group(0) @binding(0) var meshlet_visibility_buffer: texture_storage_2d; +#endif +var view_size: vec2; + +@compute +@workgroup_size(16, 16, 1) +fn clear_visibility_buffer(@builtin(global_invocation_id) global_id: vec3) { + if any(global_id.xy >= view_size) { return; } + +#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT + textureStore(meshlet_visibility_buffer, global_id.xy, vec4(0lu)); +#else + textureStore(meshlet_visibility_buffer, global_id.xy, vec4(0u)); +#endif +} diff --git a/crates/bevy_pbr/src/meshlet/from_mesh.rs b/crates/bevy_pbr/src/meshlet/from_mesh.rs index 697d3d4aacd82..ed2be52f538dd 100644 --- a/crates/bevy_pbr/src/meshlet/from_mesh.rs +++ b/crates/bevy_pbr/src/meshlet/from_mesh.rs @@ -3,11 +3,11 @@ use super::asset::{ }; use alloc::borrow::Cow; use bevy_math::{ops::log2, IVec3, Vec2, Vec3, Vec3Swizzles}; +use bevy_platform::collections::HashMap; use bevy_render::{ mesh::{Indices, Mesh}, render_resource::PrimitiveTopology, }; -use bevy_utils::HashMap; use bitvec::{order::Lsb0, vec::BitVec, view::BitView}; use core::{iter, ops::Range}; use half::f16; @@ -16,7 +16,7 @@ use meshopt::{ build_meshlets, ffi::meshopt_Meshlet, generate_vertex_remap_multi, simplify_with_attributes_and_locks, Meshlets, SimplifyOptions, VertexDataAdapter, VertexStream, }; -use metis::Graph; +use metis::{option::Opt, Graph}; use smallvec::SmallVec; use thiserror::Error; @@ -67,12 +67,29 @@ impl MeshletMesh { // Validate mesh format let indices = validate_input_mesh(mesh)?; - // Split the mesh into an initial list of meshlets (LOD 0) + // Get meshlet vertices let vertex_buffer = mesh.create_packed_vertex_buffer_data(); let vertex_stride = mesh.get_vertex_size() as usize; let vertices = VertexDataAdapter::new(&vertex_buffer, vertex_stride, 0).unwrap(); let vertex_normals = bytemuck::cast_slice(&vertex_buffer[12..16]); - let mut meshlets = compute_meshlets(&indices, &vertices); + + // Generate a position-only vertex buffer for determining triangle/meshlet connectivity + let (position_only_vertex_count, position_only_vertex_remap) = generate_vertex_remap_multi( + vertices.vertex_count, + &[VertexStream::new_with_stride::( + vertex_buffer.as_ptr(), + vertex_stride, + )], + Some(&indices), + ); + + // Split the mesh into an initial list of meshlets (LOD 0) + let mut meshlets = compute_meshlets( + &indices, + &vertices, + &position_only_vertex_remap, + position_only_vertex_count, + ); let mut bounding_spheres = meshlets .iter() .map(|meshlet| compute_meshlet_bounds(meshlet, &vertices)) @@ -85,23 +102,15 @@ impl MeshletMesh { }, }) .collect::>(); - let mut simplification_errors = iter::repeat(MeshletSimplificationError { - group_error: f16::ZERO, - parent_group_error: f16::MAX, - }) - .take(meshlets.len()) + let mut simplification_errors = iter::repeat_n( + MeshletSimplificationError { + group_error: f16::ZERO, + parent_group_error: f16::MAX, + }, + meshlets.len(), + ) .collect::>(); - // Generate a position-only vertex buffer for determining what meshlets are connected for use in grouping - let (position_only_vertex_count, position_only_vertex_remap) = generate_vertex_remap_multi( - vertices.vertex_count, - &[VertexStream::new_with_stride::( - vertex_buffer.as_ptr(), - vertex_stride, - )], - Some(&indices), - ); - let mut vertex_locks = vec![false; vertices.vertex_count]; // Build further LODs @@ -163,6 +172,8 @@ impl MeshletMesh { let new_meshlets_count = split_simplified_group_into_new_meshlets( &simplified_group_indices, &vertices, + &position_only_vertex_remap, + position_only_vertex_count, &mut meshlets, ); @@ -178,13 +189,13 @@ impl MeshletMesh { }, } })); - simplification_errors.extend( - iter::repeat(MeshletSimplificationError { + simplification_errors.extend(iter::repeat_n( + MeshletSimplificationError { group_error, parent_group_error: f16::MAX, - }) - .take(new_meshlet_ids.len()), - ); + }, + new_meshlet_ids.len(), + )); } // Set simplification queue to the list of newly created meshlets @@ -243,8 +254,103 @@ fn validate_input_mesh(mesh: &Mesh) -> Result, MeshToMeshletMeshC } } -fn compute_meshlets(indices: &[u32], vertices: &VertexDataAdapter) -> Meshlets { - build_meshlets(indices, vertices, 255, 128, 0.0) // Meshoptimizer won't currently let us do 256 vertices +fn compute_meshlets( + indices: &[u32], + vertices: &VertexDataAdapter, + position_only_vertex_remap: &[u32], + position_only_vertex_count: usize, +) -> Meshlets { + // For each vertex, build a list of all triangles that use it + let mut vertices_to_triangles = vec![Vec::new(); position_only_vertex_count]; + for (i, index) in indices.iter().enumerate() { + let vertex_id = position_only_vertex_remap[*index as usize]; + let vertex_to_triangles = &mut vertices_to_triangles[vertex_id as usize]; + vertex_to_triangles.push(i / 3); + } + + // For each triangle pair, count how many vertices they share + let mut triangle_pair_to_shared_vertex_count = >::default(); + for vertex_triangle_ids in vertices_to_triangles { + for (triangle_id1, triangle_id2) in vertex_triangle_ids.into_iter().tuple_combinations() { + let count = triangle_pair_to_shared_vertex_count + .entry(( + triangle_id1.min(triangle_id2), + triangle_id1.max(triangle_id2), + )) + .or_insert(0); + *count += 1; + } + } + + // For each triangle, gather all other triangles that share at least one vertex along with their shared vertex count + let triangle_count = indices.len() / 3; + let mut connected_triangles_per_triangle = vec![Vec::new(); triangle_count]; + for ((triangle_id1, triangle_id2), shared_vertex_count) in triangle_pair_to_shared_vertex_count + { + // We record both id1->id2 and id2->id1 as adjacency is symmetrical + connected_triangles_per_triangle[triangle_id1].push((triangle_id2, shared_vertex_count)); + connected_triangles_per_triangle[triangle_id2].push((triangle_id1, shared_vertex_count)); + } + + // The order of triangles depends on hash traversal order; to produce deterministic results, sort them + for list in connected_triangles_per_triangle.iter_mut() { + list.sort_unstable(); + } + + let mut xadj = Vec::with_capacity(triangle_count + 1); + let mut adjncy = Vec::new(); + let mut adjwgt = Vec::new(); + for connected_triangles in connected_triangles_per_triangle { + xadj.push(adjncy.len() as i32); + for (connected_triangle_id, shared_vertex_count) in connected_triangles { + adjncy.push(connected_triangle_id as i32); + adjwgt.push(shared_vertex_count); + // TODO: Additional weight based on triangle center spatial proximity? + } + } + xadj.push(adjncy.len() as i32); + + let mut options = [-1; metis::NOPTIONS]; + options[metis::option::Seed::INDEX] = 17; + options[metis::option::UFactor::INDEX] = 1; // Important that there's very little imbalance between partitions + + let mut meshlet_per_triangle = vec![0; triangle_count]; + let partition_count = triangle_count.div_ceil(126); // Need to undershoot to prevent METIS from going over 128 triangles per meshlet + Graph::new(1, partition_count as i32, &xadj, &adjncy) + .unwrap() + .set_options(&options) + .set_adjwgt(&adjwgt) + .part_recursive(&mut meshlet_per_triangle) + .unwrap(); + + let mut indices_per_meshlet = vec![Vec::new(); partition_count]; + for (triangle_id, meshlet) in meshlet_per_triangle.into_iter().enumerate() { + let meshlet_indices = &mut indices_per_meshlet[meshlet as usize]; + let base_index = triangle_id * 3; + meshlet_indices.extend_from_slice(&indices[base_index..(base_index + 3)]); + } + + // Use meshopt to build meshlets from the sets of triangles + let mut meshlets = Meshlets { + meshlets: Vec::new(), + vertices: Vec::new(), + triangles: Vec::new(), + }; + for meshlet_indices in &indices_per_meshlet { + let meshlet = build_meshlets(meshlet_indices, vertices, 255, 128, 0.0); + let vertex_offset = meshlets.vertices.len() as u32; + let triangle_offset = meshlets.triangles.len() as u32; + meshlets.vertices.extend_from_slice(&meshlet.vertices); + meshlets.triangles.extend_from_slice(&meshlet.triangles); + meshlets + .meshlets + .extend(meshlet.meshlets.into_iter().map(|mut meshlet| { + meshlet.vertex_offset += vertex_offset; + meshlet.triangle_offset += triangle_offset; + meshlet + })); + } + meshlets } fn find_connected_meshlets( @@ -315,15 +421,19 @@ fn group_meshlets( } xadj.push(adjncy.len() as i32); + let mut options = [-1; metis::NOPTIONS]; + options[metis::option::Seed::INDEX] = 17; + options[metis::option::UFactor::INDEX] = 200; + let mut group_per_meshlet = vec![0; simplification_queue.len()]; let partition_count = simplification_queue .len() .div_ceil(TARGET_MESHLETS_PER_GROUP); // TODO: Nanite uses groups of 8-32, probably based on some kind of heuristic Graph::new(1, partition_count as i32, &xadj, &adjncy) .unwrap() - .set_option(metis::option::Seed(17)) + .set_options(&options) .set_adjwgt(&adjwgt) - .part_kway(&mut group_per_meshlet) + .part_recursive(&mut group_per_meshlet) .unwrap(); let mut groups = vec![SmallVec::new(); partition_count]; @@ -462,9 +572,16 @@ fn compute_lod_group_data( fn split_simplified_group_into_new_meshlets( simplified_group_indices: &[u32], vertices: &VertexDataAdapter<'_>, + position_only_vertex_remap: &[u32], + position_only_vertex_count: usize, meshlets: &mut Meshlets, ) -> usize { - let simplified_meshlets = compute_meshlets(simplified_group_indices, vertices); + let simplified_meshlets = compute_meshlets( + simplified_group_indices, + vertices, + position_only_vertex_remap, + position_only_vertex_count, + ); let new_meshlets_count = simplified_meshlets.len(); let vertex_offset = meshlets.vertices.len() as u32; @@ -486,7 +603,6 @@ fn split_simplified_group_into_new_meshlets( new_meshlets_count } -#[allow(clippy::too_many_arguments)] fn build_and_compress_per_meshlet_vertex_data( meshlet: &meshopt_Meshlet, meshlet_vertex_ids: &[u32], @@ -610,7 +726,7 @@ fn pack2x16snorm(v: Vec2) -> u32 { pub enum MeshToMeshletMeshConversionError { #[error("Mesh primitive topology is not TriangleList")] WrongMeshPrimitiveTopology, - #[error("Mesh attributes are not {{POSITION, NORMAL, UV_0}}")] + #[error("Mesh vertex attributes are not {{POSITION, NORMAL, UV_0}}")] WrongMeshVertexAttributes, #[error("Mesh has no indices")] MeshMissingIndices, diff --git a/crates/bevy_pbr/src/meshlet/instance_manager.rs b/crates/bevy_pbr/src/meshlet/instance_manager.rs index c190d7ea367b0..661d4791aeac7 100644 --- a/crates/bevy_pbr/src/meshlet/instance_manager.rs +++ b/crates/bevy_pbr/src/meshlet/instance_manager.rs @@ -1,20 +1,22 @@ use super::{meshlet_mesh_manager::MeshletMeshManager, MeshletMesh, MeshletMesh3d}; use crate::{ - Material, MeshFlags, MeshTransforms, MeshUniform, NotShadowCaster, NotShadowReceiver, - PreviousGlobalTransform, RenderMaterialInstances, RenderMeshMaterialIds, + material::DUMMY_MESH_MATERIAL, Material, MaterialBindingId, MeshFlags, MeshTransforms, + MeshUniform, NotShadowCaster, NotShadowReceiver, PreviousGlobalTransform, + RenderMaterialBindings, RenderMaterialInstances, }; use bevy_asset::{AssetEvent, AssetServer, Assets, UntypedAssetId}; use bevy_ecs::{ entity::{Entities, Entity, EntityHashMap}, event::EventReader, query::Has, - system::{Local, Query, Res, ResMut, Resource, SystemState}, + resource::Resource, + system::{Local, Query, Res, ResMut, SystemState}, }; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_render::{ render_resource::StorageBuffer, sync_world::MainEntity, view::RenderLayers, MainWorld, }; use bevy_transform::components::GlobalTransform; -use bevy_utils::{HashMap, HashSet}; use core::ops::{DerefMut, Range}; /// Manages data for each entity with a [`MeshletMesh`]. @@ -81,7 +83,6 @@ impl InstanceManager { } } - #[allow(clippy::too_many_arguments)] pub fn add_instance( &mut self, instance: MainEntity, @@ -89,7 +90,8 @@ impl InstanceManager { transform: &GlobalTransform, previous_transform: Option<&PreviousGlobalTransform>, render_layers: Option<&RenderLayers>, - mesh_material_ids: &RenderMeshMaterialIds, + mesh_material_ids: &RenderMaterialInstances, + render_material_bindings: &RenderMaterialBindings, not_shadow_receiver: bool, not_shadow_caster: bool, ) { @@ -110,14 +112,15 @@ impl InstanceManager { flags: flags.bits(), }; - let Some(mesh_material_asset_id) = mesh_material_ids.mesh_to_material.get(&instance) else { - return; - }; - let Some(mesh_material_binding_id) = mesh_material_ids - .material_to_binding - .get(mesh_material_asset_id) - else { - return; + let mesh_material = mesh_material_ids.mesh_material(instance); + let mesh_material_binding_id = if mesh_material != DUMMY_MESH_MATERIAL.untyped() { + render_material_bindings + .get(&mesh_material) + .cloned() + .unwrap_or_default() + } else { + // Use a dummy binding ID if the mesh has no material + MaterialBindingId::default() }; let mesh_uniform = MeshUniform::new( @@ -189,7 +192,8 @@ pub fn extract_meshlet_mesh_entities( mut instance_manager: ResMut, // TODO: Replace main_world and system_state when Extract>> is possible mut main_world: ResMut, - mesh_material_ids: Res, + mesh_material_ids: Res, + render_material_bindings: Res, mut system_state: Local< Option< SystemState<( @@ -259,6 +263,7 @@ pub fn extract_meshlet_mesh_entities( previous_transform, render_layers, &mesh_material_ids, + &render_material_bindings, not_shadow_receiver, not_shadow_caster, ); @@ -269,20 +274,22 @@ pub fn extract_meshlet_mesh_entities( /// and note that the material is used by at least one entity in the scene. pub fn queue_material_meshlet_meshes( mut instance_manager: ResMut, - render_material_instances: Res>, + render_material_instances: Res, ) { let instance_manager = instance_manager.deref_mut(); for (i, (instance, _, _)) in instance_manager.instances.iter().enumerate() { - if let Some(material_asset_id) = render_material_instances.get(instance) { - if let Some(material_id) = instance_manager - .material_id_lookup - .get(&material_asset_id.untyped()) - { - instance_manager - .material_ids_present_in_scene - .insert(*material_id); - instance_manager.instance_material_ids.get_mut()[i] = *material_id; + if let Some(material_instance) = render_material_instances.instances.get(instance) { + if let Ok(material_asset_id) = material_instance.asset_id.try_typed::() { + if let Some(material_id) = instance_manager + .material_id_lookup + .get(&material_asset_id.untyped()) + { + instance_manager + .material_ids_present_in_scene + .insert(*material_id); + instance_manager.instance_material_ids.get_mut()[i] = *material_id; + } } } } diff --git a/crates/bevy_pbr/src/meshlet/material_pipeline_prepare.rs b/crates/bevy_pbr/src/meshlet/material_pipeline_prepare.rs index 3d41688add68e..57762bfc8a609 100644 --- a/crates/bevy_pbr/src/meshlet/material_pipeline_prepare.rs +++ b/crates/bevy_pbr/src/meshlet/material_pipeline_prepare.rs @@ -13,6 +13,7 @@ use bevy_core_pipeline::{ tonemapping::{DebandDither, Tonemapping}, }; use bevy_derive::{Deref, DerefMut}; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_render::{ camera::TemporalJitter, mesh::{Mesh, MeshVertexBufferLayout, MeshVertexBufferLayoutRef, MeshVertexBufferLayouts}, @@ -20,7 +21,6 @@ use bevy_render::{ render_resource::*, view::ExtractedView, }; -use bevy_utils::{HashMap, HashSet}; use core::hash::Hash; /// A list of `(Material ID, Pipeline, BindGroup)` for a view for use in [`super::MeshletMainOpaquePass3dNode`]. @@ -29,7 +29,6 @@ pub struct MeshletViewMaterialsMainOpaquePass(pub Vec<(u32, CachedRenderPipeline /// Prepare [`Material`] pipelines for [`super::MeshletMesh`] entities for use in [`super::MeshletMainOpaquePass3dNode`], /// and register the material with [`InstanceManager`]. -#[allow(clippy::too_many_arguments)] pub fn prepare_material_meshlet_meshes_main_opaque_pass( resource_manager: ResMut, mut instance_manager: ResMut, @@ -38,7 +37,7 @@ pub fn prepare_material_meshlet_meshes_main_opaque_pass( material_pipeline: Res>, mesh_pipeline: Res, render_materials: Res>>, - render_material_instances: Res>, + render_material_instances: Res, material_bind_group_allocator: Res>, asset_server: Res, mut mesh_vertex_buffer_layouts: ResMut, @@ -49,7 +48,7 @@ pub fn prepare_material_meshlet_meshes_main_opaque_pass( Option<&Tonemapping>, Option<&DebandDither>, Option<&ShadowFilteringMethod>, - Has, + (Has, Has), ( Has, Has, @@ -74,7 +73,7 @@ pub fn prepare_material_meshlet_meshes_main_opaque_pass( tonemapping, dither, shadow_filter_method, - ssao, + (ssao, distance_fog), (normal_prepass, depth_prepass, motion_vector_prepass, deferred_prepass), temporal_jitter, projection, @@ -114,6 +113,7 @@ pub fn prepare_material_meshlet_meshes_main_opaque_pass( view_key |= match projection { Projection::Perspective(_) => MeshPipelineKey::VIEW_PROJECTION_PERSPECTIVE, Projection::Orthographic(_) => MeshPipelineKey::VIEW_PROJECTION_ORTHOGRAPHIC, + Projection::Custom(_) => MeshPipelineKey::VIEW_PROJECTION_NONSTANDARD, }; } @@ -142,11 +142,19 @@ pub fn prepare_material_meshlet_meshes_main_opaque_pass( if ssao { view_key |= MeshPipelineKey::SCREEN_SPACE_AMBIENT_OCCLUSION; } + if distance_fog { + view_key |= MeshPipelineKey::DISTANCE_FOG; + } view_key |= MeshPipelineKey::from_primitive_topology(PrimitiveTopology::TriangleList); - for material_id in render_material_instances.values().collect::>() { - let Some(material) = render_materials.get(*material_id) else { + for material_id in render_material_instances + .instances + .values() + .flat_map(|instance| instance.asset_id.try_typed::().ok()) + .collect::>() + { + let Some(material) = render_materials.get(material_id) else { continue; }; let Some(material_bind_group) = @@ -225,7 +233,7 @@ pub fn prepare_material_meshlet_meshes_main_opaque_pass( else { continue; }; - let Some(bind_group) = material_bind_group.get_bind_group() else { + let Some(bind_group) = material_bind_group.bind_group() else { continue; }; @@ -246,7 +254,6 @@ pub struct MeshletViewMaterialsDeferredGBufferPrepass( /// Prepare [`Material`] pipelines for [`super::MeshletMesh`] entities for use in [`super::MeshletPrepassNode`], /// and [`super::MeshletDeferredGBufferPrepassNode`] and register the material with [`InstanceManager`]. -#[allow(clippy::too_many_arguments)] pub fn prepare_material_meshlet_meshes_prepass( resource_manager: ResMut, mut instance_manager: ResMut, @@ -254,7 +261,7 @@ pub fn prepare_material_meshlet_meshes_prepass( pipeline_cache: Res, prepass_pipeline: Res>, render_materials: Res>>, - render_material_instances: Res>, + render_material_instances: Res, mut mesh_vertex_buffer_layouts: ResMut, material_bind_group_allocator: Res>, asset_server: Res, @@ -291,8 +298,13 @@ pub fn prepare_material_meshlet_meshes_prepass( view_key |= MeshPipelineKey::from_primitive_topology(PrimitiveTopology::TriangleList); - for material_id in render_material_instances.values().collect::>() { - let Some(material) = render_materials.get(*material_id) else { + for material_id in render_material_instances + .instances + .values() + .flat_map(|instance| instance.asset_id.try_typed::().ok()) + .collect::>() + { + let Some(material) = render_materials.get(material_id) else { continue; }; let Some(material_bind_group) = @@ -334,9 +346,12 @@ pub fn prepare_material_meshlet_meshes_prepass( shader_defs.push("MESHLET_MESH_MATERIAL_PASS".into()); let view_layout = if view_key.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) { - prepass_pipeline.view_layout_motion_vectors.clone() + prepass_pipeline.internal.view_layout_motion_vectors.clone() } else { - prepass_pipeline.view_layout_no_motion_vectors.clone() + prepass_pipeline + .internal + .view_layout_no_motion_vectors + .clone() }; let fragment_shader = if view_key.contains(MeshPipelineKey::DEFERRED_PREPASS) { @@ -355,7 +370,7 @@ pub fn prepare_material_meshlet_meshes_prepass( layout: vec![ view_layout, resource_manager.material_shade_bind_group_layout.clone(), - prepass_pipeline.material_layout.clone(), + prepass_pipeline.internal.material_layout.clone(), ], push_constant_ranges: vec![], vertex: VertexState { @@ -397,7 +412,7 @@ pub fn prepare_material_meshlet_meshes_prepass( else { continue; }; - let Some(bind_group) = material_bind_group.get_bind_group() else { + let Some(bind_group) = material_bind_group.bind_group() else { continue; }; diff --git a/crates/bevy_pbr/src/meshlet/meshlet_bindings.wgsl b/crates/bevy_pbr/src/meshlet/meshlet_bindings.wgsl index 7af63d0e0fe83..e179e78b7ae5e 100644 --- a/crates/bevy_pbr/src/meshlet/meshlet_bindings.wgsl +++ b/crates/bevy_pbr/src/meshlet/meshlet_bindings.wgsl @@ -100,9 +100,9 @@ fn cluster_is_second_pass_candidate(cluster_id: u32) -> bool { @group(0) @binding(6) var meshlet_raster_clusters: array; // Single object shared between all workgroups @group(0) @binding(7) var meshlet_software_raster_cluster_count: u32; #ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT -@group(0) @binding(8) var meshlet_visibility_buffer: array>; // Per pixel +@group(0) @binding(8) var meshlet_visibility_buffer: texture_storage_2d; #else -@group(0) @binding(8) var meshlet_visibility_buffer: array>; // Per pixel +@group(0) @binding(8) var meshlet_visibility_buffer: texture_storage_2d; #endif @group(0) @binding(9) var view: View; @@ -149,7 +149,7 @@ fn get_meshlet_vertex_position(meshlet: ptr, vertex_id: u32) #endif #ifdef MESHLET_MESH_MATERIAL_PASS -@group(1) @binding(0) var meshlet_visibility_buffer: array; // Per pixel +@group(1) @binding(0) var meshlet_visibility_buffer: texture_storage_2d; @group(1) @binding(1) var meshlet_cluster_meshlet_ids: array; // Per cluster @group(1) @binding(2) var meshlets: array; // Per meshlet @group(1) @binding(3) var meshlet_indices: array; // Many per meshlet diff --git a/crates/bevy_pbr/src/meshlet/meshlet_mesh_manager.rs b/crates/bevy_pbr/src/meshlet/meshlet_mesh_manager.rs index 4cae0d50d19f5..0f4aab7509a82 100644 --- a/crates/bevy_pbr/src/meshlet/meshlet_mesh_manager.rs +++ b/crates/bevy_pbr/src/meshlet/meshlet_mesh_manager.rs @@ -6,15 +6,16 @@ use super::{ use alloc::sync::Arc; use bevy_asset::{AssetId, Assets}; use bevy_ecs::{ - system::{Res, ResMut, Resource}, + resource::Resource, + system::{Res, ResMut}, world::{FromWorld, World}, }; use bevy_math::Vec2; +use bevy_platform::collections::HashMap; use bevy_render::{ render_resource::BufferAddress, renderer::{RenderDevice, RenderQueue}, }; -use bevy_utils::HashMap; use core::ops::Range; /// Manages uploading [`MeshletMesh`] asset data to the GPU. diff --git a/crates/bevy_pbr/src/meshlet/mod.rs b/crates/bevy_pbr/src/meshlet/mod.rs index 0ad880877d1cb..2e483b210ce55 100644 --- a/crates/bevy_pbr/src/meshlet/mod.rs +++ b/crates/bevy_pbr/src/meshlet/mod.rs @@ -1,4 +1,3 @@ -#![expect(deprecated)] //! Render high-poly 3d meshes using an efficient GPU-driven method. See [`MeshletPlugin`] and [`MeshletMesh`] for details. mod asset; @@ -40,7 +39,6 @@ pub use self::asset::{ pub use self::from_mesh::{ MeshToMeshletMeshConversionError, MESHLET_DEFAULT_VERTEX_POSITION_QUANTIZATION_FACTOR, }; - use self::{ graph::NodeMeshlet, instance_manager::extract_meshlet_mesh_entities, @@ -58,21 +56,20 @@ use self::{ }, visibility_buffer_raster_node::MeshletVisibilityBufferRasterPassNode, }; -use crate::{graph::NodePbr, Material, MeshMaterial3d, PreviousGlobalTransform}; +use crate::{graph::NodePbr, PreviousGlobalTransform}; use bevy_app::{App, Plugin}; -use bevy_asset::{load_internal_asset, AssetApp, AssetId, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, AssetApp, AssetId, Handle}; use bevy_core_pipeline::{ core_3d::graph::{Core3d, Node3d}, prepass::{DeferredPrepass, MotionVectorPrepass, NormalPrepass}, }; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ - bundle::Bundle, - component::{require, Component}, + component::Component, entity::Entity, query::Has, reflect::ReflectComponent, - schedule::IntoSystemConfigs, + schedule::IntoScheduleConfigs, system::{Commands, Query}, }; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; @@ -81,19 +78,17 @@ use bevy_render::{ render_resource::Shader, renderer::RenderDevice, settings::WgpuFeatures, - view::{ - self, prepare_view_targets, InheritedVisibility, Msaa, ViewVisibility, Visibility, - VisibilityClass, - }, + view::{self, prepare_view_targets, Msaa, Visibility, VisibilityClass}, ExtractSchedule, Render, RenderApp, RenderSet, }; -use bevy_transform::components::{GlobalTransform, Transform}; -use bevy_utils::tracing::error; +use bevy_transform::components::Transform; use derive_more::From; +use tracing::error; -const MESHLET_BINDINGS_SHADER_HANDLE: Handle = Handle::weak_from_u128(1325134235233421); +const MESHLET_BINDINGS_SHADER_HANDLE: Handle = + weak_handle!("d90ac78c-500f-48aa-b488-cc98eb3f6314"); const MESHLET_MESH_MATERIAL_SHADER_HANDLE: Handle = - Handle::weak_from_u128(3325134235233421); + weak_handle!("db8d9001-6ca7-4d00-968a-d5f5b96b89c3"); /// Provides a plugin for rendering large amounts of high-poly 3d meshes using an efficient GPU-driven method. See also [`MeshletMesh`]. /// @@ -111,9 +106,9 @@ const MESHLET_MESH_MATERIAL_SHADER_HANDLE: Handle = /// * Requires preprocessing meshes. See [`MeshletMesh`] for details. /// * Limitations on the kinds of materials you can use. See [`MeshletMesh`] for details. /// -/// This plugin requires a fairly recent GPU that supports [`WgpuFeatures::SHADER_INT64_ATOMIC_MIN_MAX`]. +/// This plugin requires a fairly recent GPU that supports [`WgpuFeatures::TEXTURE_INT64_ATOMIC`]. /// -/// This plugin currently works only on the Vulkan backend. +/// This plugin currently works only on the Vulkan and Metal backends. /// /// This plugin is not compatible with [`Msaa`]. Any camera rendering a [`MeshletMesh`] must have /// [`Msaa`] set to [`Msaa::Off`]. @@ -138,7 +133,8 @@ pub struct MeshletPlugin { impl MeshletPlugin { /// [`WgpuFeatures`] required for this plugin to function. pub fn required_wgpu_features() -> WgpuFeatures { - WgpuFeatures::SHADER_INT64_ATOMIC_MIN_MAX + WgpuFeatures::TEXTURE_INT64_ATOMIC + | WgpuFeatures::TEXTURE_ATOMIC | WgpuFeatures::SHADER_INT64 | WgpuFeatures::SUBGROUP | WgpuFeatures::DEPTH_CLIP_CONTROL @@ -156,6 +152,12 @@ impl Plugin for MeshletPlugin { std::process::exit(1); } + load_internal_asset!( + app, + MESHLET_CLEAR_VISIBILITY_BUFFER_SHADER_HANDLE, + "clear_visibility_buffer.wgsl", + Shader::from_wgsl + ); load_internal_asset!( app, MESHLET_BINDINGS_SHADER_HANDLE, @@ -180,12 +182,6 @@ impl Plugin for MeshletPlugin { "cull_clusters.wgsl", Shader::from_wgsl ); - load_internal_asset!( - app, - MESHLET_DOWNSAMPLE_DEPTH_SHADER_HANDLE, - "downsample_depth.wgsl", - Shader::from_wgsl - ); load_internal_asset!( app, MESHLET_VISIBILITY_BUFFER_SOFTWARE_RASTER_SHADER_HANDLE, @@ -257,14 +253,11 @@ impl Plugin for MeshletPlugin { Core3d, ( NodeMeshlet::VisibilityBufferRasterPass, - NodePbr::ShadowPass, + NodePbr::EarlyShadowPass, // NodeMeshlet::Prepass, - Node3d::Prepass, // NodeMeshlet::DeferredPrepass, - Node3d::DeferredPrepass, - Node3d::CopyDeferredLightingId, Node3d::EndPrepasses, // Node3d::StartMainPass, @@ -297,7 +290,7 @@ impl Plugin for MeshletPlugin { /// The meshlet mesh equivalent of [`bevy_render::mesh::Mesh3d`]. #[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq, From)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone, PartialEq)] #[require(Transform, PreviousGlobalTransform, Visibility, VisibilityClass)] #[component(on_add = view::add_visibility_class::)] pub struct MeshletMesh3d(pub Handle); @@ -314,39 +307,6 @@ impl From<&MeshletMesh3d> for AssetId { } } -/// A component bundle for entities with a [`MeshletMesh`] and a [`Material`]. -#[derive(Bundle, Clone)] -#[deprecated( - since = "0.15.0", - note = "Use the `MeshletMesh3d` and `MeshMaterial3d` components instead. Inserting them will now also insert the other components required by them automatically." -)] -pub struct MaterialMeshletMeshBundle { - pub meshlet_mesh: MeshletMesh3d, - pub material: MeshMaterial3d, - pub transform: Transform, - pub global_transform: GlobalTransform, - /// User indication of whether an entity is visible - pub visibility: Visibility, - /// Inherited visibility of an entity. - pub inherited_visibility: InheritedVisibility, - /// Algorithmically-computed indication of whether an entity is visible and should be extracted for rendering - pub view_visibility: ViewVisibility, -} - -impl Default for MaterialMeshletMeshBundle { - fn default() -> Self { - Self { - meshlet_mesh: Default::default(), - material: Default::default(), - transform: Default::default(), - global_transform: Default::default(), - visibility: Default::default(), - inherited_visibility: Default::default(), - view_visibility: Default::default(), - } - } -} - fn configure_meshlet_views( mut views_3d: Query<( Entity, diff --git a/crates/bevy_pbr/src/meshlet/pipelines.rs b/crates/bevy_pbr/src/meshlet/pipelines.rs index 97f1203d2d3b2..c25d896b8a385 100644 --- a/crates/bevy_pbr/src/meshlet/pipelines.rs +++ b/crates/bevy_pbr/src/meshlet/pipelines.rs @@ -1,31 +1,35 @@ use super::resource_manager::ResourceManager; -use bevy_asset::Handle; +use bevy_asset::{weak_handle, Handle}; use bevy_core_pipeline::{ - core_3d::CORE_3D_DEPTH_FORMAT, fullscreen_vertex_shader::fullscreen_shader_vertex_state, + core_3d::CORE_3D_DEPTH_FORMAT, experimental::mip_generation::DOWNSAMPLE_DEPTH_SHADER_HANDLE, + fullscreen_vertex_shader::fullscreen_shader_vertex_state, }; use bevy_ecs::{ - system::Resource, + resource::Resource, world::{FromWorld, World}, }; use bevy_render::render_resource::*; +pub const MESHLET_CLEAR_VISIBILITY_BUFFER_SHADER_HANDLE: Handle = + weak_handle!("a4bf48e4-5605-4d1c-987e-29c7b1ec95dc"); pub const MESHLET_FILL_CLUSTER_BUFFERS_SHADER_HANDLE: Handle = - Handle::weak_from_u128(4325134235233421); -pub const MESHLET_CULLING_SHADER_HANDLE: Handle = Handle::weak_from_u128(5325134235233421); -pub const MESHLET_DOWNSAMPLE_DEPTH_SHADER_HANDLE: Handle = - Handle::weak_from_u128(6325134235233421); + weak_handle!("80ccea4a-8234-4ee0-af74-77b3cad503cf"); +pub const MESHLET_CULLING_SHADER_HANDLE: Handle = + weak_handle!("d71c5879-97fa-49d1-943e-ed9162fe8adb"); pub const MESHLET_VISIBILITY_BUFFER_SOFTWARE_RASTER_SHADER_HANDLE: Handle = - Handle::weak_from_u128(7325134235233421); + weak_handle!("68cc6826-8321-43d1-93d5-4f61f0456c13"); pub const MESHLET_VISIBILITY_BUFFER_HARDWARE_RASTER_SHADER_HANDLE: Handle = - Handle::weak_from_u128(8325134235233421); + weak_handle!("4b4e3020-748f-4baf-b011-87d9d2a12796"); pub const MESHLET_RESOLVE_RENDER_TARGETS_SHADER_HANDLE: Handle = - Handle::weak_from_u128(9325134235233421); + weak_handle!("c218ce17-cf59-4268-8898-13ecf384f133"); pub const MESHLET_REMAP_1D_TO_2D_DISPATCH_SHADER_HANDLE: Handle = - Handle::weak_from_u128(9425134235233421); + weak_handle!("f5b7edfc-2eac-4407-8f5c-1265d4d795c2"); #[derive(Resource)] pub struct MeshletPipelines { fill_cluster_buffers: CachedComputePipelineId, + clear_visibility_buffer: CachedComputePipelineId, + clear_visibility_buffer_shadow_view: CachedComputePipelineId, cull_first: CachedComputePipelineId, cull_second: CachedComputePipelineId, downsample_depth_first: CachedComputePipelineId, @@ -33,10 +37,10 @@ pub struct MeshletPipelines { downsample_depth_first_shadow_view: CachedComputePipelineId, downsample_depth_second_shadow_view: CachedComputePipelineId, visibility_buffer_software_raster: CachedComputePipelineId, - visibility_buffer_software_raster_depth_only: CachedComputePipelineId, + visibility_buffer_software_raster_shadow_view: CachedComputePipelineId, visibility_buffer_hardware_raster: CachedRenderPipelineId, - visibility_buffer_hardware_raster_depth_only: CachedRenderPipelineId, - visibility_buffer_hardware_raster_depth_only_unclipped: CachedRenderPipelineId, + visibility_buffer_hardware_raster_shadow_view: CachedRenderPipelineId, + visibility_buffer_hardware_raster_shadow_view_unclipped: CachedRenderPipelineId, resolve_depth: CachedRenderPipelineId, resolve_depth_shadow_view: CachedRenderPipelineId, resolve_material_depth: CachedRenderPipelineId, @@ -49,12 +53,27 @@ impl FromWorld for MeshletPipelines { let fill_cluster_buffers_bind_group_layout = resource_manager .fill_cluster_buffers_bind_group_layout .clone(); + let clear_visibility_buffer_bind_group_layout = resource_manager + .clear_visibility_buffer_bind_group_layout + .clone(); + let clear_visibility_buffer_shadow_view_bind_group_layout = resource_manager + .clear_visibility_buffer_shadow_view_bind_group_layout + .clone(); let cull_layout = resource_manager.culling_bind_group_layout.clone(); let downsample_depth_layout = resource_manager.downsample_depth_bind_group_layout.clone(); + let downsample_depth_shadow_view_layout = resource_manager + .downsample_depth_shadow_view_bind_group_layout + .clone(); let visibility_buffer_raster_layout = resource_manager .visibility_buffer_raster_bind_group_layout .clone(); + let visibility_buffer_raster_shadow_view_layout = resource_manager + .visibility_buffer_raster_shadow_view_bind_group_layout + .clone(); let resolve_depth_layout = resource_manager.resolve_depth_bind_group_layout.clone(); + let resolve_depth_shadow_view_layout = resource_manager + .resolve_depth_shadow_view_bind_group_layout + .clone(); let resolve_material_depth_layout = resource_manager .resolve_material_depth_bind_group_layout .clone(); @@ -67,7 +86,7 @@ impl FromWorld for MeshletPipelines { fill_cluster_buffers: pipeline_cache.queue_compute_pipeline( ComputePipelineDescriptor { label: Some("meshlet_fill_cluster_buffers_pipeline".into()), - layout: vec![fill_cluster_buffers_bind_group_layout.clone()], + layout: vec![fill_cluster_buffers_bind_group_layout], push_constant_ranges: vec![PushConstantRange { stages: ShaderStages::COMPUTE, range: 0..4, @@ -79,6 +98,36 @@ impl FromWorld for MeshletPipelines { }, ), + clear_visibility_buffer: pipeline_cache.queue_compute_pipeline( + ComputePipelineDescriptor { + label: Some("meshlet_clear_visibility_buffer_pipeline".into()), + layout: vec![clear_visibility_buffer_bind_group_layout], + push_constant_ranges: vec![PushConstantRange { + stages: ShaderStages::COMPUTE, + range: 0..8, + }], + shader: MESHLET_CLEAR_VISIBILITY_BUFFER_SHADER_HANDLE, + shader_defs: vec!["MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT".into()], + entry_point: "clear_visibility_buffer".into(), + zero_initialize_workgroup_memory: false, + }, + ), + + clear_visibility_buffer_shadow_view: pipeline_cache.queue_compute_pipeline( + ComputePipelineDescriptor { + label: Some("meshlet_clear_visibility_buffer_shadow_view_pipeline".into()), + layout: vec![clear_visibility_buffer_shadow_view_bind_group_layout], + push_constant_ranges: vec![PushConstantRange { + stages: ShaderStages::COMPUTE, + range: 0..8, + }], + shader: MESHLET_CLEAR_VISIBILITY_BUFFER_SHADER_HANDLE, + shader_defs: vec![], + entry_point: "clear_visibility_buffer".into(), + zero_initialize_workgroup_memory: false, + }, + ), + cull_first: pipeline_cache.queue_compute_pipeline(ComputePipelineDescriptor { label: Some("meshlet_culling_first_pipeline".into()), layout: vec![cull_layout.clone()], @@ -117,10 +166,13 @@ impl FromWorld for MeshletPipelines { layout: vec![downsample_depth_layout.clone()], push_constant_ranges: vec![PushConstantRange { stages: ShaderStages::COMPUTE, - range: 0..8, + range: 0..4, }], - shader: MESHLET_DOWNSAMPLE_DEPTH_SHADER_HANDLE, - shader_defs: vec!["MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT".into()], + shader: DOWNSAMPLE_DEPTH_SHADER_HANDLE, + shader_defs: vec![ + "MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT".into(), + "MESHLET".into(), + ], entry_point: "downsample_depth_first".into(), zero_initialize_workgroup_memory: false, }, @@ -132,10 +184,13 @@ impl FromWorld for MeshletPipelines { layout: vec![downsample_depth_layout.clone()], push_constant_ranges: vec![PushConstantRange { stages: ShaderStages::COMPUTE, - range: 0..8, + range: 0..4, }], - shader: MESHLET_DOWNSAMPLE_DEPTH_SHADER_HANDLE, - shader_defs: vec!["MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT".into()], + shader: DOWNSAMPLE_DEPTH_SHADER_HANDLE, + shader_defs: vec![ + "MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT".into(), + "MESHLET".into(), + ], entry_point: "downsample_depth_second".into(), zero_initialize_workgroup_memory: false, }, @@ -144,13 +199,13 @@ impl FromWorld for MeshletPipelines { downsample_depth_first_shadow_view: pipeline_cache.queue_compute_pipeline( ComputePipelineDescriptor { label: Some("meshlet_downsample_depth_first_pipeline".into()), - layout: vec![downsample_depth_layout.clone()], + layout: vec![downsample_depth_shadow_view_layout.clone()], push_constant_ranges: vec![PushConstantRange { stages: ShaderStages::COMPUTE, - range: 0..8, + range: 0..4, }], - shader: MESHLET_DOWNSAMPLE_DEPTH_SHADER_HANDLE, - shader_defs: vec![], + shader: DOWNSAMPLE_DEPTH_SHADER_HANDLE, + shader_defs: vec!["MESHLET".into()], entry_point: "downsample_depth_first".into(), zero_initialize_workgroup_memory: false, }, @@ -159,13 +214,13 @@ impl FromWorld for MeshletPipelines { downsample_depth_second_shadow_view: pipeline_cache.queue_compute_pipeline( ComputePipelineDescriptor { label: Some("meshlet_downsample_depth_second_pipeline".into()), - layout: vec![downsample_depth_layout], + layout: vec![downsample_depth_shadow_view_layout], push_constant_ranges: vec![PushConstantRange { stages: ShaderStages::COMPUTE, - range: 0..8, + range: 0..4, }], - shader: MESHLET_DOWNSAMPLE_DEPTH_SHADER_HANDLE, - shader_defs: vec![], + shader: DOWNSAMPLE_DEPTH_SHADER_HANDLE, + shader_defs: vec!["MESHLET".into()], entry_point: "downsample_depth_second".into(), zero_initialize_workgroup_memory: false, }, @@ -192,12 +247,12 @@ impl FromWorld for MeshletPipelines { }, ), - visibility_buffer_software_raster_depth_only: pipeline_cache.queue_compute_pipeline( + visibility_buffer_software_raster_shadow_view: pipeline_cache.queue_compute_pipeline( ComputePipelineDescriptor { label: Some( - "meshlet_visibility_buffer_software_raster_depth_only_pipeline".into(), + "meshlet_visibility_buffer_software_raster_shadow_view_pipeline".into(), ), - layout: vec![visibility_buffer_raster_layout.clone()], + layout: vec![visibility_buffer_raster_shadow_view_layout.clone()], push_constant_ranges: vec![], shader: MESHLET_VISIBILITY_BUFFER_SOFTWARE_RASTER_SHADER_HANDLE, shader_defs: vec![ @@ -259,12 +314,12 @@ impl FromWorld for MeshletPipelines { }, ), - visibility_buffer_hardware_raster_depth_only: pipeline_cache.queue_render_pipeline( + visibility_buffer_hardware_raster_shadow_view: pipeline_cache.queue_render_pipeline( RenderPipelineDescriptor { label: Some( - "meshlet_visibility_buffer_hardware_raster_depth_only_pipeline".into(), + "meshlet_visibility_buffer_hardware_raster_shadow_view_pipeline".into(), ), - layout: vec![visibility_buffer_raster_layout.clone()], + layout: vec![visibility_buffer_raster_shadow_view_layout.clone()], push_constant_ranges: vec![PushConstantRange { stages: ShaderStages::VERTEX, range: 0..4, @@ -300,13 +355,13 @@ impl FromWorld for MeshletPipelines { }, ), - visibility_buffer_hardware_raster_depth_only_unclipped: pipeline_cache + visibility_buffer_hardware_raster_shadow_view_unclipped: pipeline_cache .queue_render_pipeline(RenderPipelineDescriptor { label: Some( - "meshlet_visibility_buffer_hardware_raster_depth_only_unclipped_pipeline" + "meshlet_visibility_buffer_hardware_raster_shadow_view_unclipped_pipeline" .into(), ), - layout: vec![visibility_buffer_raster_layout], + layout: vec![visibility_buffer_raster_shadow_view_layout], push_constant_ranges: vec![PushConstantRange { stages: ShaderStages::VERTEX, range: 0..4, @@ -343,17 +398,14 @@ impl FromWorld for MeshletPipelines { resolve_depth: pipeline_cache.queue_render_pipeline(RenderPipelineDescriptor { label: Some("meshlet_resolve_depth_pipeline".into()), - layout: vec![resolve_depth_layout.clone()], - push_constant_ranges: vec![PushConstantRange { - stages: ShaderStages::FRAGMENT, - range: 0..4, - }], + layout: vec![resolve_depth_layout], + push_constant_ranges: vec![], vertex: fullscreen_shader_vertex_state(), primitive: PrimitiveState::default(), depth_stencil: Some(DepthStencilState { format: CORE_3D_DEPTH_FORMAT, depth_write_enabled: true, - depth_compare: CompareFunction::GreaterEqual, + depth_compare: CompareFunction::Always, stencil: StencilState::default(), bias: DepthBiasState::default(), }), @@ -370,17 +422,14 @@ impl FromWorld for MeshletPipelines { resolve_depth_shadow_view: pipeline_cache.queue_render_pipeline( RenderPipelineDescriptor { label: Some("meshlet_resolve_depth_pipeline".into()), - layout: vec![resolve_depth_layout], - push_constant_ranges: vec![PushConstantRange { - stages: ShaderStages::FRAGMENT, - range: 0..4, - }], + layout: vec![resolve_depth_shadow_view_layout], + push_constant_ranges: vec![], vertex: fullscreen_shader_vertex_state(), primitive: PrimitiveState::default(), depth_stencil: Some(DepthStencilState { format: CORE_3D_DEPTH_FORMAT, depth_write_enabled: true, - depth_compare: CompareFunction::GreaterEqual, + depth_compare: CompareFunction::Always, stencil: StencilState::default(), bias: DepthBiasState::default(), }), @@ -399,10 +448,7 @@ impl FromWorld for MeshletPipelines { RenderPipelineDescriptor { label: Some("meshlet_resolve_material_depth_pipeline".into()), layout: vec![resolve_material_depth_layout], - push_constant_ranges: vec![PushConstantRange { - stages: ShaderStages::FRAGMENT, - range: 0..4, - }], + push_constant_ranges: vec![], vertex: fullscreen_shader_vertex_state(), primitive: PrimitiveState::default(), depth_stencil: Some(DepthStencilState { @@ -454,6 +500,8 @@ impl MeshletPipelines { &ComputePipeline, &ComputePipeline, &ComputePipeline, + &ComputePipeline, + &ComputePipeline, &RenderPipeline, &RenderPipeline, &RenderPipeline, @@ -466,6 +514,8 @@ impl MeshletPipelines { let pipeline = world.get_resource::()?; Some(( pipeline_cache.get_compute_pipeline(pipeline.fill_cluster_buffers)?, + pipeline_cache.get_compute_pipeline(pipeline.clear_visibility_buffer)?, + pipeline_cache.get_compute_pipeline(pipeline.clear_visibility_buffer_shadow_view)?, pipeline_cache.get_compute_pipeline(pipeline.cull_first)?, pipeline_cache.get_compute_pipeline(pipeline.cull_second)?, pipeline_cache.get_compute_pipeline(pipeline.downsample_depth_first)?, @@ -474,12 +524,12 @@ impl MeshletPipelines { pipeline_cache.get_compute_pipeline(pipeline.downsample_depth_second_shadow_view)?, pipeline_cache.get_compute_pipeline(pipeline.visibility_buffer_software_raster)?, pipeline_cache - .get_compute_pipeline(pipeline.visibility_buffer_software_raster_depth_only)?, + .get_compute_pipeline(pipeline.visibility_buffer_software_raster_shadow_view)?, pipeline_cache.get_render_pipeline(pipeline.visibility_buffer_hardware_raster)?, pipeline_cache - .get_render_pipeline(pipeline.visibility_buffer_hardware_raster_depth_only)?, + .get_render_pipeline(pipeline.visibility_buffer_hardware_raster_shadow_view)?, pipeline_cache.get_render_pipeline( - pipeline.visibility_buffer_hardware_raster_depth_only_unclipped, + pipeline.visibility_buffer_hardware_raster_shadow_view_unclipped, )?, pipeline_cache.get_render_pipeline(pipeline.resolve_depth)?, pipeline_cache.get_render_pipeline(pipeline.resolve_depth_shadow_view)?, diff --git a/crates/bevy_pbr/src/meshlet/resolve_render_targets.wgsl b/crates/bevy_pbr/src/meshlet/resolve_render_targets.wgsl index 3c0cfcf9436e2..eaa4eed6c4560 100644 --- a/crates/bevy_pbr/src/meshlet/resolve_render_targets.wgsl +++ b/crates/bevy_pbr/src/meshlet/resolve_render_targets.wgsl @@ -1,35 +1,36 @@ #import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput #ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT -@group(0) @binding(0) var meshlet_visibility_buffer: array; // Per pixel +@group(0) @binding(0) var meshlet_visibility_buffer: texture_storage_2d; #else -@group(0) @binding(0) var meshlet_visibility_buffer: array; // Per pixel +@group(0) @binding(0) var meshlet_visibility_buffer: texture_storage_2d; #endif @group(0) @binding(1) var meshlet_cluster_instance_ids: array; // Per cluster @group(0) @binding(2) var meshlet_instance_material_ids: array; // Per entity instance -var view_width: u32; /// This pass writes out the depth texture. @fragment fn resolve_depth(in: FullscreenVertexOutput) -> @builtin(frag_depth) f32 { - let frag_coord_1d = u32(in.position.y) * view_width + u32(in.position.x); - let visibility = meshlet_visibility_buffer[frag_coord_1d]; + let visibility = textureLoad(meshlet_visibility_buffer, vec2(in.position.xy)).r; #ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT - return bitcast(u32(visibility >> 32u)); + let depth = u32(visibility >> 32u); #else - return bitcast(visibility); + let depth = visibility; #endif + + if depth == 0u { discard; } + + return bitcast(depth); } /// This pass writes out the material depth texture. #ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT @fragment fn resolve_material_depth(in: FullscreenVertexOutput) -> @builtin(frag_depth) f32 { - let frag_coord_1d = u32(in.position.y) * view_width + u32(in.position.x); - let visibility = meshlet_visibility_buffer[frag_coord_1d]; + let visibility = textureLoad(meshlet_visibility_buffer, vec2(in.position.xy)).r; let depth = visibility >> 32u; - if depth == 0lu { return 0.0; } + if depth == 0lu { discard; } let cluster_id = u32(visibility) >> 7u; let instance_id = meshlet_cluster_instance_ids[cluster_id]; diff --git a/crates/bevy_pbr/src/meshlet/resource_manager.rs b/crates/bevy_pbr/src/meshlet/resource_manager.rs index 79473b2c36fe1..9b45d7676ab43 100644 --- a/crates/bevy_pbr/src/meshlet/resource_manager.rs +++ b/crates/bevy_pbr/src/meshlet/resource_manager.rs @@ -3,13 +3,15 @@ use crate::ShadowView; use alloc::sync::Arc; use bevy_core_pipeline::{ core_3d::Camera3d, + experimental::mip_generation::{self, ViewDepthPyramid}, prepass::{PreviousViewData, PreviousViewUniforms}, }; use bevy_ecs::{ component::Component, entity::{Entity, EntityHashMap}, query::AnyOf, - system::{Commands, Query, Res, ResMut, Resource}, + resource::Resource, + system::{Commands, Query, Res, ResMut}, }; use bevy_math::{UVec2, Vec4Swizzles}; use bevy_render::{ @@ -19,7 +21,7 @@ use bevy_render::{ view::{ExtractedView, RenderLayers, ViewUniform, ViewUniforms}, }; use binding_types::*; -use core::{array, iter, sync::atomic::AtomicBool}; +use core::{iter, sync::atomic::AtomicBool}; use encase::internal::WriteInto; /// Manages per-view and per-cluster GPU resources for [`super::MeshletPlugin`]. @@ -48,10 +50,15 @@ pub struct ResourceManager { // Bind group layouts pub fill_cluster_buffers_bind_group_layout: BindGroupLayout, + pub clear_visibility_buffer_bind_group_layout: BindGroupLayout, + pub clear_visibility_buffer_shadow_view_bind_group_layout: BindGroupLayout, pub culling_bind_group_layout: BindGroupLayout, pub visibility_buffer_raster_bind_group_layout: BindGroupLayout, + pub visibility_buffer_raster_shadow_view_bind_group_layout: BindGroupLayout, pub downsample_depth_bind_group_layout: BindGroupLayout, + pub downsample_depth_shadow_view_bind_group_layout: BindGroupLayout, pub resolve_depth_bind_group_layout: BindGroupLayout, + pub resolve_depth_shadow_view_bind_group_layout: BindGroupLayout, pub resolve_material_depth_bind_group_layout: BindGroupLayout, pub material_shade_bind_group_layout: BindGroupLayout, pub remap_1d_to_2d_dispatch_bind_group_layout: Option, @@ -84,31 +91,11 @@ impl ResourceManager { label: Some("meshlet_depth_pyramid_sampler"), ..SamplerDescriptor::default() }), - depth_pyramid_dummy_texture: render_device - .create_texture(&TextureDescriptor { - label: Some("meshlet_depth_pyramid_dummy_texture"), - size: Extent3d { - width: 1, - height: 1, - depth_or_array_layers: 1, - }, - mip_level_count: 1, - sample_count: 1, - dimension: TextureDimension::D2, - format: TextureFormat::R32Float, - usage: TextureUsages::STORAGE_BINDING, - view_formats: &[], - }) - .create_view(&TextureViewDescriptor { - label: Some("meshlet_depth_pyramid_dummy_texture_view"), - format: Some(TextureFormat::R32Float), - dimension: Some(TextureViewDimension::D2), - aspect: TextureAspect::All, - base_mip_level: 0, - mip_level_count: Some(1), - base_array_layer: 0, - array_layer_count: Some(1), - }), + depth_pyramid_dummy_texture: mip_generation::create_depth_pyramid_dummy_texture( + render_device, + "meshlet_depth_pyramid_dummy_texture", + "meshlet_depth_pyramid_dummy_texture_view", + ), previous_depth_pyramids: EntityHashMap::default(), @@ -126,6 +113,21 @@ impl ResourceManager { ), ), ), + clear_visibility_buffer_bind_group_layout: render_device.create_bind_group_layout( + "meshlet_clear_visibility_buffer_bind_group_layout", + &BindGroupLayoutEntries::single( + ShaderStages::COMPUTE, + texture_storage_2d(TextureFormat::R64Uint, StorageTextureAccess::WriteOnly), + ), + ), + clear_visibility_buffer_shadow_view_bind_group_layout: render_device + .create_bind_group_layout( + "meshlet_clear_visibility_buffer_shadow_view_bind_group_layout", + &BindGroupLayoutEntries::single( + ShaderStages::COMPUTE, + texture_storage_2d(TextureFormat::R32Uint, StorageTextureAccess::WriteOnly), + ), + ), culling_bind_group_layout: render_device.create_bind_group_layout( "meshlet_culling_bind_group_layout", &BindGroupLayoutEntries::sequential( @@ -154,7 +156,34 @@ impl ResourceManager { texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly) }; ( - storage_buffer_read_only_sized(false, None), + texture_storage_2d(TextureFormat::R64Uint, StorageTextureAccess::ReadOnly), + write_only_r32float(), + write_only_r32float(), + write_only_r32float(), + write_only_r32float(), + write_only_r32float(), + texture_storage_2d( + TextureFormat::R32Float, + StorageTextureAccess::ReadWrite, + ), + write_only_r32float(), + write_only_r32float(), + write_only_r32float(), + write_only_r32float(), + write_only_r32float(), + write_only_r32float(), + sampler(SamplerBindingType::NonFiltering), + ) + }), + ), + downsample_depth_shadow_view_bind_group_layout: render_device.create_bind_group_layout( + "meshlet_downsample_depth_shadow_view_bind_group_layout", + &BindGroupLayoutEntries::sequential(ShaderStages::COMPUTE, { + let write_only_r32float = || { + texture_storage_2d(TextureFormat::R32Float, StorageTextureAccess::WriteOnly) + }; + ( + texture_storage_2d(TextureFormat::R32Uint, StorageTextureAccess::ReadOnly), write_only_r32float(), write_only_r32float(), write_only_r32float(), @@ -187,16 +216,45 @@ impl ResourceManager { storage_buffer_read_only_sized(false, None), storage_buffer_read_only_sized(false, None), storage_buffer_read_only_sized(false, None), - storage_buffer_sized(false, None), + texture_storage_2d(TextureFormat::R64Uint, StorageTextureAccess::Atomic), uniform_buffer::(true), ), ), ), + visibility_buffer_raster_shadow_view_bind_group_layout: render_device + .create_bind_group_layout( + "meshlet_visibility_buffer_raster_shadow_view_bind_group_layout", + &BindGroupLayoutEntries::sequential( + ShaderStages::all(), + ( + storage_buffer_read_only_sized(false, None), + storage_buffer_read_only_sized(false, None), + storage_buffer_read_only_sized(false, None), + storage_buffer_read_only_sized(false, None), + storage_buffer_read_only_sized(false, None), + storage_buffer_read_only_sized(false, None), + storage_buffer_read_only_sized(false, None), + storage_buffer_read_only_sized(false, None), + texture_storage_2d( + TextureFormat::R32Uint, + StorageTextureAccess::Atomic, + ), + uniform_buffer::(true), + ), + ), + ), resolve_depth_bind_group_layout: render_device.create_bind_group_layout( "meshlet_resolve_depth_bind_group_layout", &BindGroupLayoutEntries::single( ShaderStages::FRAGMENT, - storage_buffer_read_only_sized(false, None), + texture_storage_2d(TextureFormat::R64Uint, StorageTextureAccess::ReadOnly), + ), + ), + resolve_depth_shadow_view_bind_group_layout: render_device.create_bind_group_layout( + "meshlet_resolve_depth_shadow_view_bind_group_layout", + &BindGroupLayoutEntries::single( + ShaderStages::FRAGMENT, + texture_storage_2d(TextureFormat::R32Uint, StorageTextureAccess::ReadOnly), ), ), resolve_material_depth_bind_group_layout: render_device.create_bind_group_layout( @@ -204,7 +262,7 @@ impl ResourceManager { &BindGroupLayoutEntries::sequential( ShaderStages::FRAGMENT, ( - storage_buffer_read_only_sized(false, None), + texture_storage_2d(TextureFormat::R64Uint, StorageTextureAccess::ReadOnly), storage_buffer_read_only_sized(false, None), storage_buffer_read_only_sized(false, None), ), @@ -215,7 +273,7 @@ impl ResourceManager { &BindGroupLayoutEntries::sequential( ShaderStages::FRAGMENT, ( - storage_buffer_read_only_sized(false, None), + texture_storage_2d(TextureFormat::R64Uint, StorageTextureAccess::ReadOnly), storage_buffer_read_only_sized(false, None), storage_buffer_read_only_sized(false, None), storage_buffer_read_only_sized(false, None), @@ -252,24 +310,24 @@ pub struct MeshletViewResources { pub second_pass_candidates_buffer: Buffer, instance_visibility: Buffer, pub dummy_render_target: CachedTexture, - pub visibility_buffer: Buffer, + pub visibility_buffer: CachedTexture, pub visibility_buffer_software_raster_indirect_args_first: Buffer, pub visibility_buffer_software_raster_indirect_args_second: Buffer, pub visibility_buffer_hardware_raster_indirect_args_first: Buffer, pub visibility_buffer_hardware_raster_indirect_args_second: Buffer, - depth_pyramid_all_mips: TextureView, - depth_pyramid_mips: [TextureView; 12], - pub depth_pyramid_mip_count: u32, + pub depth_pyramid: ViewDepthPyramid, previous_depth_pyramid: TextureView, pub material_depth: Option, pub view_size: UVec2, pub raster_cluster_rightmost_slot: u32, + not_shadow_view: bool, } #[derive(Component)] pub struct MeshletViewBindGroups { pub first_node: Arc, pub fill_cluster_buffers: BindGroup, + pub clear_visibility_buffer: BindGroup, pub culling_first: BindGroup, pub culling_second: BindGroup, pub downsample_depth: BindGroup, @@ -397,7 +455,7 @@ pub fn prepare_meshlet_per_frame_resources( let index = instance_index / 32; let bit = instance_index - index * 32; if vec.len() <= index { - vec.extend(iter::repeat(0).take(index - vec.len() + 1)); + vec.extend(iter::repeat_n(0, index - vec.len() + 1)); } vec[index] |= 1 << bit; } @@ -439,18 +497,27 @@ pub fn prepare_meshlet_per_frame_resources( }, ); - let type_size = if not_shadow_view { - size_of::() - } else { - size_of::() - } as u64; - // TODO: Cache - let visibility_buffer = render_device.create_buffer(&BufferDescriptor { - label: Some("meshlet_visibility_buffer"), - size: type_size * (view.viewport.z * view.viewport.w) as u64, - usage: BufferUsages::STORAGE, - mapped_at_creation: false, - }); + let visibility_buffer = texture_cache.get( + &render_device, + TextureDescriptor { + label: Some("meshlet_visibility_buffer"), + size: Extent3d { + width: view.viewport.z, + height: view.viewport.w, + depth_or_array_layers: 1, + }, + mip_level_count: 1, + sample_count: 1, + dimension: TextureDimension::D2, + format: if not_shadow_view { + TextureFormat::R64Uint + } else { + TextureFormat::R32Uint + }, + usage: TextureUsages::STORAGE_ATOMIC | TextureUsages::STORAGE_BINDING, + view_formats: &[], + }, + ); let visibility_buffer_software_raster_indirect_args_first = render_device .create_buffer_with_data(&BufferInitDescriptor { @@ -490,51 +557,23 @@ pub fn prepare_meshlet_per_frame_resources( usage: BufferUsages::STORAGE | BufferUsages::INDIRECT, }); - let depth_pyramid_size = Extent3d { - width: view.viewport.z.div_ceil(2), - height: view.viewport.w.div_ceil(2), - depth_or_array_layers: 1, - }; - let depth_pyramid_mip_count = depth_pyramid_size.max_mips(TextureDimension::D2); - let depth_pyramid = texture_cache.get( + let depth_pyramid = ViewDepthPyramid::new( &render_device, - TextureDescriptor { - label: Some("meshlet_depth_pyramid"), - size: depth_pyramid_size, - mip_level_count: depth_pyramid_mip_count, - sample_count: 1, - dimension: TextureDimension::D2, - format: TextureFormat::R32Float, - usage: TextureUsages::STORAGE_BINDING | TextureUsages::TEXTURE_BINDING, - view_formats: &[], - }, + &mut texture_cache, + &resource_manager.depth_pyramid_dummy_texture, + view.viewport.zw(), + "meshlet_depth_pyramid", + "meshlet_depth_pyramid_texture_view", ); - let depth_pyramid_mips = array::from_fn(|i| { - if (i as u32) < depth_pyramid_mip_count { - depth_pyramid.texture.create_view(&TextureViewDescriptor { - label: Some("meshlet_depth_pyramid_texture_view"), - format: Some(TextureFormat::R32Float), - dimension: Some(TextureViewDimension::D2), - aspect: TextureAspect::All, - base_mip_level: i as u32, - mip_level_count: Some(1), - base_array_layer: 0, - array_layer_count: Some(1), - }) - } else { - resource_manager.depth_pyramid_dummy_texture.clone() - } - }); - let depth_pyramid_all_mips = depth_pyramid.default_view.clone(); let previous_depth_pyramid = match resource_manager.previous_depth_pyramids.get(&view_entity) { Some(texture_view) => texture_view.clone(), - None => depth_pyramid_all_mips.clone(), + None => depth_pyramid.all_mips.clone(), }; resource_manager .previous_depth_pyramids - .insert(view_entity, depth_pyramid_all_mips.clone()); + .insert(view_entity, depth_pyramid.all_mips.clone()); let material_depth = TextureDescriptor { label: Some("meshlet_material_depth"), @@ -562,19 +601,17 @@ pub fn prepare_meshlet_per_frame_resources( visibility_buffer_software_raster_indirect_args_second, visibility_buffer_hardware_raster_indirect_args_first, visibility_buffer_hardware_raster_indirect_args_second, - depth_pyramid_all_mips, - depth_pyramid_mips, - depth_pyramid_mip_count, + depth_pyramid, previous_depth_pyramid, material_depth: not_shadow_view .then(|| texture_cache.get(&render_device, material_depth)), view_size: view.viewport.zw(), raster_cluster_rightmost_slot: resource_manager.raster_cluster_rightmost_slot, + not_shadow_view, }); } } -#[allow(clippy::too_many_arguments)] pub fn prepare_meshlet_view_bind_groups( meshlet_mesh_manager: Res, resource_manager: Res, @@ -628,6 +665,16 @@ pub fn prepare_meshlet_view_bind_groups( &entries, ); + let clear_visibility_buffer = render_device.create_bind_group( + "meshlet_clear_visibility_buffer_bind_group", + if view_resources.not_shadow_view { + &resource_manager.clear_visibility_buffer_bind_group_layout + } else { + &resource_manager.clear_visibility_buffer_shadow_view_bind_group_layout + }, + &BindGroupEntries::single(&view_resources.visibility_buffer.default_view), + ); + let entries = BindGroupEntries::sequential(( cluster_meshlet_ids.as_entire_binding(), meshlet_mesh_manager.meshlet_bounding_spheres.binding(), @@ -676,7 +723,7 @@ pub fn prepare_meshlet_view_bind_groups( resource_manager .visibility_buffer_raster_clusters .as_entire_binding(), - &view_resources.depth_pyramid_all_mips, + &view_resources.depth_pyramid.all_mips, view_uniforms.clone(), previous_view_uniforms.clone(), )); @@ -686,25 +733,16 @@ pub fn prepare_meshlet_view_bind_groups( &entries, ); - let downsample_depth = render_device.create_bind_group( + let downsample_depth = view_resources.depth_pyramid.create_bind_group( + &render_device, "meshlet_downsample_depth_bind_group", - &resource_manager.downsample_depth_bind_group_layout, - &BindGroupEntries::sequential(( - view_resources.visibility_buffer.as_entire_binding(), - &view_resources.depth_pyramid_mips[0], - &view_resources.depth_pyramid_mips[1], - &view_resources.depth_pyramid_mips[2], - &view_resources.depth_pyramid_mips[3], - &view_resources.depth_pyramid_mips[4], - &view_resources.depth_pyramid_mips[5], - &view_resources.depth_pyramid_mips[6], - &view_resources.depth_pyramid_mips[7], - &view_resources.depth_pyramid_mips[8], - &view_resources.depth_pyramid_mips[9], - &view_resources.depth_pyramid_mips[10], - &view_resources.depth_pyramid_mips[11], - &resource_manager.depth_pyramid_sampler, - )), + if view_resources.not_shadow_view { + &resource_manager.downsample_depth_bind_group_layout + } else { + &resource_manager.downsample_depth_shadow_view_bind_group_layout + }, + &view_resources.visibility_buffer.default_view, + &resource_manager.depth_pyramid_sampler, ); let entries = BindGroupEntries::sequential(( @@ -720,24 +758,32 @@ pub fn prepare_meshlet_view_bind_groups( resource_manager .software_raster_cluster_count .as_entire_binding(), - view_resources.visibility_buffer.as_entire_binding(), + &view_resources.visibility_buffer.default_view, view_uniforms.clone(), )); let visibility_buffer_raster = render_device.create_bind_group( "meshlet_visibility_raster_buffer_bind_group", - &resource_manager.visibility_buffer_raster_bind_group_layout, + if view_resources.not_shadow_view { + &resource_manager.visibility_buffer_raster_bind_group_layout + } else { + &resource_manager.visibility_buffer_raster_shadow_view_bind_group_layout + }, &entries, ); let resolve_depth = render_device.create_bind_group( "meshlet_resolve_depth_bind_group", - &resource_manager.resolve_depth_bind_group_layout, - &BindGroupEntries::single(view_resources.visibility_buffer.as_entire_binding()), + if view_resources.not_shadow_view { + &resource_manager.resolve_depth_bind_group_layout + } else { + &resource_manager.resolve_depth_shadow_view_bind_group_layout + }, + &BindGroupEntries::single(&view_resources.visibility_buffer.default_view), ); let resolve_material_depth = view_resources.material_depth.as_ref().map(|_| { let entries = BindGroupEntries::sequential(( - view_resources.visibility_buffer.as_entire_binding(), + &view_resources.visibility_buffer.default_view, cluster_instance_ids.as_entire_binding(), instance_manager.instance_material_ids.binding().unwrap(), )); @@ -750,7 +796,7 @@ pub fn prepare_meshlet_view_bind_groups( let material_shade = view_resources.material_depth.as_ref().map(|_| { let entries = BindGroupEntries::sequential(( - view_resources.visibility_buffer.as_entire_binding(), + &view_resources.visibility_buffer.default_view, cluster_meshlet_ids.as_entire_binding(), meshlet_mesh_manager.meshlets.binding(), meshlet_mesh_manager.indices.binding(), @@ -802,6 +848,7 @@ pub fn prepare_meshlet_view_bind_groups( commands.entity(view_entity).insert(MeshletViewBindGroups { first_node: Arc::clone(&first_node), fill_cluster_buffers, + clear_visibility_buffer, culling_first, culling_second, downsample_depth, diff --git a/crates/bevy_pbr/src/meshlet/visibility_buffer_hardware_raster.wgsl b/crates/bevy_pbr/src/meshlet/visibility_buffer_hardware_raster.wgsl index fb2e09005175f..3525d38e6da95 100644 --- a/crates/bevy_pbr/src/meshlet/visibility_buffer_hardware_raster.wgsl +++ b/crates/bevy_pbr/src/meshlet/visibility_buffer_hardware_raster.wgsl @@ -54,16 +54,13 @@ fn vertex(@builtin(instance_index) instance_index: u32, @builtin(vertex_index) v @fragment fn fragment(vertex_output: VertexOutput) { - let frag_coord_1d = u32(vertex_output.position.y) * u32(view.viewport.z) + u32(vertex_output.position.x); - -#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT let depth = bitcast(vertex_output.position.z); +#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT let visibility = (u64(depth) << 32u) | u64(vertex_output.packed_ids); - atomicMax(&meshlet_visibility_buffer[frag_coord_1d], visibility); #else - let depth = bitcast(vertex_output.position.z); - atomicMax(&meshlet_visibility_buffer[frag_coord_1d], depth); + let visibility = depth; #endif + textureAtomicMax(meshlet_visibility_buffer, vec2(vertex_output.position.xy), visibility); } fn dummy_vertex() -> VertexOutput { diff --git a/crates/bevy_pbr/src/meshlet/visibility_buffer_raster_node.rs b/crates/bevy_pbr/src/meshlet/visibility_buffer_raster_node.rs index aa549ae679924..20054d2d2f53a 100644 --- a/crates/bevy_pbr/src/meshlet/visibility_buffer_raster_node.rs +++ b/crates/bevy_pbr/src/meshlet/visibility_buffer_raster_node.rs @@ -9,7 +9,7 @@ use bevy_ecs::{ query::QueryState, world::{FromWorld, World}, }; -use bevy_math::ops; +use bevy_math::{ops, UVec2}; use bevy_render::{ camera::ExtractedCamera, render_graph::{Node, NodeRunError, RenderGraphContext}, @@ -77,6 +77,8 @@ impl Node for MeshletVisibilityBufferRasterPassNode { let Some(( fill_cluster_buffers_pipeline, + clear_visibility_buffer_pipeline, + clear_visibility_buffer_shadow_view_pipeline, culling_first_pipeline, culling_second_pipeline, downsample_depth_first_pipeline, @@ -84,10 +86,10 @@ impl Node for MeshletVisibilityBufferRasterPassNode { downsample_depth_first_shadow_view_pipeline, downsample_depth_second_shadow_view_pipeline, visibility_buffer_software_raster_pipeline, - visibility_buffer_software_raster_depth_only_pipeline, + visibility_buffer_software_raster_shadow_view_pipeline, visibility_buffer_hardware_raster_pipeline, - visibility_buffer_hardware_raster_depth_only_pipeline, - visibility_buffer_hardware_raster_depth_only_unclipped_pipeline, + visibility_buffer_hardware_raster_shadow_view_pipeline, + visibility_buffer_hardware_raster_shadow_view_unclipped_pipeline, resolve_depth_pipeline, resolve_depth_shadow_view_pipeline, resolve_material_depth_pipeline, @@ -107,11 +109,6 @@ impl Node for MeshletVisibilityBufferRasterPassNode { render_context .command_encoder() .push_debug_group("meshlet_visibility_buffer_raster"); - render_context.command_encoder().clear_buffer( - &meshlet_view_resources.second_pass_candidates_buffer, - 0, - None, - ); if first_node { fill_cluster_buffers_pass( render_context, @@ -120,6 +117,17 @@ impl Node for MeshletVisibilityBufferRasterPassNode { meshlet_view_resources.scene_instance_count, ); } + clear_visibility_buffer_pass( + render_context, + &meshlet_view_bind_groups.clear_visibility_buffer, + clear_visibility_buffer_pipeline, + meshlet_view_resources.view_size, + ); + render_context.command_encoder().clear_buffer( + &meshlet_view_resources.second_pass_candidates_buffer, + 0, + None, + ); cull_pass( "culling_first", render_context, @@ -149,10 +157,11 @@ impl Node for MeshletVisibilityBufferRasterPassNode { Some(camera), meshlet_view_resources.raster_cluster_rightmost_slot, ); - downsample_depth( + meshlet_view_resources.depth_pyramid.downsample_depth( + "downsample_depth", render_context, - meshlet_view_resources, - meshlet_view_bind_groups, + meshlet_view_resources.view_size, + &meshlet_view_bind_groups.downsample_depth, downsample_depth_first_pipeline, downsample_depth_second_pipeline, ); @@ -188,7 +197,6 @@ impl Node for MeshletVisibilityBufferRasterPassNode { resolve_depth( render_context, view_depth.get_attachment(StoreOp::Store), - meshlet_view_resources, meshlet_view_bind_groups, resolve_depth_pipeline, camera, @@ -200,10 +208,11 @@ impl Node for MeshletVisibilityBufferRasterPassNode { resolve_material_depth_pipeline, camera, ); - downsample_depth( + meshlet_view_resources.depth_pyramid.downsample_depth( + "downsample_depth", render_context, - meshlet_view_resources, - meshlet_view_bind_groups, + meshlet_view_resources.view_size, + &meshlet_view_bind_groups.downsample_depth, downsample_depth_first_pipeline, downsample_depth_second_pipeline, ); @@ -224,15 +233,21 @@ impl Node for MeshletVisibilityBufferRasterPassNode { let shadow_visibility_buffer_hardware_raster_pipeline = if let LightEntity::Directional { .. } = light_type { - visibility_buffer_hardware_raster_depth_only_unclipped_pipeline + visibility_buffer_hardware_raster_shadow_view_unclipped_pipeline } else { - visibility_buffer_hardware_raster_depth_only_pipeline + visibility_buffer_hardware_raster_shadow_view_pipeline }; render_context.command_encoder().push_debug_group(&format!( "meshlet_visibility_buffer_raster: {}", shadow_view.pass_name )); + clear_visibility_buffer_pass( + render_context, + &meshlet_view_bind_groups.clear_visibility_buffer, + clear_visibility_buffer_shadow_view_pipeline, + meshlet_view_resources.view_size, + ); render_context.command_encoder().clear_buffer( &meshlet_view_resources.second_pass_candidates_buffer, 0, @@ -262,15 +277,16 @@ impl Node for MeshletVisibilityBufferRasterPassNode { &meshlet_view_resources.dummy_render_target.default_view, meshlet_view_bind_groups, view_offset, - visibility_buffer_software_raster_depth_only_pipeline, + visibility_buffer_software_raster_shadow_view_pipeline, shadow_visibility_buffer_hardware_raster_pipeline, None, meshlet_view_resources.raster_cluster_rightmost_slot, ); - downsample_depth( + meshlet_view_resources.depth_pyramid.downsample_depth( + "downsample_depth", render_context, - meshlet_view_resources, - meshlet_view_bind_groups, + meshlet_view_resources.view_size, + &meshlet_view_bind_groups.downsample_depth, downsample_depth_first_shadow_view_pipeline, downsample_depth_second_shadow_view_pipeline, ); @@ -298,7 +314,7 @@ impl Node for MeshletVisibilityBufferRasterPassNode { &meshlet_view_resources.dummy_render_target.default_view, meshlet_view_bind_groups, view_offset, - visibility_buffer_software_raster_depth_only_pipeline, + visibility_buffer_software_raster_shadow_view_pipeline, shadow_visibility_buffer_hardware_raster_pipeline, None, meshlet_view_resources.raster_cluster_rightmost_slot, @@ -306,15 +322,15 @@ impl Node for MeshletVisibilityBufferRasterPassNode { resolve_depth( render_context, shadow_view.depth_attachment.get_attachment(StoreOp::Store), - meshlet_view_resources, meshlet_view_bind_groups, resolve_depth_shadow_view_pipeline, camera, ); - downsample_depth( + meshlet_view_resources.depth_pyramid.downsample_depth( + "downsample_depth", render_context, - meshlet_view_resources, - meshlet_view_bind_groups, + meshlet_view_resources.view_size, + &meshlet_view_bind_groups.downsample_depth, downsample_depth_first_shadow_view_pipeline, downsample_depth_second_shadow_view_pipeline, ); @@ -358,7 +374,29 @@ fn fill_cluster_buffers_pass( ); } -#[allow(clippy::too_many_arguments)] +// TODO: Replace this with vkCmdClearColorImage once wgpu supports it +fn clear_visibility_buffer_pass( + render_context: &mut RenderContext, + clear_visibility_buffer_bind_group: &BindGroup, + clear_visibility_buffer_pipeline: &ComputePipeline, + view_size: UVec2, +) { + let command_encoder = render_context.command_encoder(); + let mut clear_visibility_buffer_pass = + command_encoder.begin_compute_pass(&ComputePassDescriptor { + label: Some("clear_visibility_buffer"), + timestamp_writes: None, + }); + clear_visibility_buffer_pass.set_pipeline(clear_visibility_buffer_pipeline); + clear_visibility_buffer_pass.set_push_constants(0, bytemuck::bytes_of(&view_size)); + clear_visibility_buffer_pass.set_bind_group(0, clear_visibility_buffer_bind_group, &[]); + clear_visibility_buffer_pass.dispatch_workgroups( + view_size.x.div_ceil(16), + view_size.y.div_ceil(16), + 1, + ); +} + fn cull_pass( label: &'static str, render_context: &mut RenderContext, @@ -405,7 +443,6 @@ fn cull_pass( } } -#[allow(clippy::too_many_arguments)] fn raster_pass( first_pass: bool, render_context: &mut RenderContext, @@ -473,43 +510,9 @@ fn raster_pass( hardware_pass.draw_indirect(visibility_buffer_hardware_raster_indirect_args, 0); } -fn downsample_depth( - render_context: &mut RenderContext, - meshlet_view_resources: &MeshletViewResources, - meshlet_view_bind_groups: &MeshletViewBindGroups, - downsample_depth_first_pipeline: &ComputePipeline, - downsample_depth_second_pipeline: &ComputePipeline, -) { - let command_encoder = render_context.command_encoder(); - let mut downsample_pass = command_encoder.begin_compute_pass(&ComputePassDescriptor { - label: Some("downsample_depth"), - timestamp_writes: None, - }); - downsample_pass.set_pipeline(downsample_depth_first_pipeline); - downsample_pass.set_push_constants( - 0, - bytemuck::cast_slice(&[ - meshlet_view_resources.depth_pyramid_mip_count, - meshlet_view_resources.view_size.x, - ]), - ); - downsample_pass.set_bind_group(0, &meshlet_view_bind_groups.downsample_depth, &[]); - downsample_pass.dispatch_workgroups( - meshlet_view_resources.view_size.x.div_ceil(64), - meshlet_view_resources.view_size.y.div_ceil(64), - 1, - ); - - if meshlet_view_resources.depth_pyramid_mip_count >= 7 { - downsample_pass.set_pipeline(downsample_depth_second_pipeline); - downsample_pass.dispatch_workgroups(1, 1, 1); - } -} - fn resolve_depth( render_context: &mut RenderContext, depth_stencil_attachment: RenderPassDepthStencilAttachment, - meshlet_view_resources: &MeshletViewResources, meshlet_view_bind_groups: &MeshletViewBindGroups, resolve_depth_pipeline: &RenderPipeline, camera: &ExtractedCamera, @@ -525,11 +528,6 @@ fn resolve_depth( resolve_pass.set_camera_viewport(viewport); } resolve_pass.set_render_pipeline(resolve_depth_pipeline); - resolve_pass.set_push_constants( - ShaderStages::FRAGMENT, - 0, - &meshlet_view_resources.view_size.x.to_le_bytes(), - ); resolve_pass.set_bind_group(0, &meshlet_view_bind_groups.resolve_depth, &[]); resolve_pass.draw(0..3, 0..1); } @@ -563,11 +561,6 @@ fn resolve_material_depth( resolve_pass.set_camera_viewport(viewport); } resolve_pass.set_render_pipeline(resolve_material_depth_pipeline); - resolve_pass.set_push_constants( - ShaderStages::FRAGMENT, - 0, - &meshlet_view_resources.view_size.x.to_le_bytes(), - ); resolve_pass.set_bind_group(0, resolve_material_depth_bind_group, &[]); resolve_pass.draw(0..3, 0..1); } diff --git a/crates/bevy_pbr/src/meshlet/visibility_buffer_resolve.wgsl b/crates/bevy_pbr/src/meshlet/visibility_buffer_resolve.wgsl index f28645013d1ec..4c56c5874ae2f 100644 --- a/crates/bevy_pbr/src/meshlet/visibility_buffer_resolve.wgsl +++ b/crates/bevy_pbr/src/meshlet/visibility_buffer_resolve.wgsl @@ -104,8 +104,7 @@ struct VertexOutput { /// Load the visibility buffer texture and resolve it into a VertexOutput. fn resolve_vertex_output(frag_coord: vec4) -> VertexOutput { - let frag_coord_1d = u32(frag_coord.y) * u32(view.viewport.z) + u32(frag_coord.x); - let packed_ids = u32(meshlet_visibility_buffer[frag_coord_1d]); // TODO: Might be faster to load the correct u32 directly + let packed_ids = u32(textureLoad(meshlet_visibility_buffer, vec2(frag_coord.xy)).r); let cluster_id = packed_ids >> 7u; let meshlet_id = meshlet_cluster_meshlet_ids[cluster_id]; var meshlet = meshlets[meshlet_id]; diff --git a/crates/bevy_pbr/src/meshlet/visibility_buffer_software_raster.wgsl b/crates/bevy_pbr/src/meshlet/visibility_buffer_software_raster.wgsl index 941c31f0939ca..60f6f1b3ea658 100644 --- a/crates/bevy_pbr/src/meshlet/visibility_buffer_software_raster.wgsl +++ b/crates/bevy_pbr/src/meshlet/visibility_buffer_software_raster.wgsl @@ -167,16 +167,13 @@ fn rasterize_cluster( } fn write_visibility_buffer_pixel(x: f32, y: f32, z: f32, packed_ids: u32) { - let frag_coord_1d = u32(y * view.viewport.z + x); - -#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT let depth = bitcast(z); +#ifdef MESHLET_VISIBILITY_BUFFER_RASTER_PASS_OUTPUT let visibility = (u64(depth) << 32u) | u64(packed_ids); - atomicMax(&meshlet_visibility_buffer[frag_coord_1d], visibility); #else - let depth = bitcast(z); - atomicMax(&meshlet_visibility_buffer[frag_coord_1d], depth); + let visibility = depth; #endif + textureAtomicMax(meshlet_visibility_buffer, vec2(u32(x), u32(y)), visibility); } fn edge_function(a: vec2, b: vec2, c: vec2) -> f32 { diff --git a/crates/bevy_pbr/src/parallax.rs b/crates/bevy_pbr/src/parallax.rs index e458f88146701..0a847b7c2513e 100644 --- a/crates/bevy_pbr/src/parallax.rs +++ b/crates/bevy_pbr/src/parallax.rs @@ -1,4 +1,4 @@ -use bevy_reflect::Reflect; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; /// The [parallax mapping] method to use to compute depth based on the /// material's [`depth_map`]. @@ -12,6 +12,7 @@ use bevy_reflect::Reflect; /// [`depth_map`]: crate::StandardMaterial::depth_map /// [parallax mapping]: https://en.wikipedia.org/wiki/Parallax_mapping #[derive(Debug, Copy, Clone, PartialEq, Eq, Default, Reflect)] +#[reflect(Default, Clone, PartialEq)] pub enum ParallaxMappingMethod { /// A simple linear interpolation, using a single texture sample. /// diff --git a/crates/bevy_pbr/src/pbr_material.rs b/crates/bevy_pbr/src/pbr_material.rs index 2e93b68094e17..fd1babd8ecaf8 100644 --- a/crates/bevy_pbr/src/pbr_material.rs +++ b/crates/bevy_pbr/src/pbr_material.rs @@ -4,6 +4,7 @@ use bevy_math::{Affine2, Affine3, Mat2, Mat3, Vec2, Vec3, Vec4}; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{ mesh::MeshVertexBufferLayoutRef, render_asset::RenderAssets, render_resource::*, + texture::GpuImage, }; use bitflags::bitflags; @@ -16,29 +17,29 @@ use crate::{deferred::DEFAULT_PBR_DEFERRED_LIGHTING_PASS_ID, *}; /// [`bevy_render::mesh::Mesh::ATTRIBUTE_UV_1`]. /// The default is [`UvChannel::Uv0`]. #[derive(Reflect, Default, Debug, Clone, PartialEq, Eq)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone, PartialEq)] pub enum UvChannel { #[default] Uv0, Uv1, } -/// A material with "standard" properties used in PBR lighting -/// Standard property values with pictures here +/// A material with "standard" properties used in PBR lighting. +/// Standard property values with pictures here: /// . /// /// May be created directly from a [`Color`] or an [`Image`]. #[derive(Asset, AsBindGroup, Reflect, Debug, Clone)] #[bind_group_data(StandardMaterialKey)] -#[uniform(0, StandardMaterialUniform)] -#[bindless(16)] -#[reflect(Default, Debug)] +#[data(0, StandardMaterialUniform, binding_array(10))] +#[bindless(index_table(range(0..31)))] +#[reflect(Default, Debug, Clone)] pub struct StandardMaterial { /// The color of the surface of the material before lighting. /// /// Doubles as diffuse albedo for non-metallic, specular for metallic and a mix for everything /// in between. If used together with a `base_color_texture`, this is factored into the final - /// base color as `base_color * base_color_texture_value` + /// base color as `base_color * base_color_texture_value`. /// /// Defaults to [`Color::WHITE`]. pub base_color: Color, @@ -183,7 +184,20 @@ pub struct StandardMaterial { #[doc(alias = "specular_intensity")] pub reflectance: f32, - /// The amount of light transmitted _diffusely_ through the material (i.e. “translucency”) + /// A color with which to modulate the [`StandardMaterial::reflectance`] for + /// non-metals. + /// + /// The specular highlights and reflection are tinted with this color. Note + /// that it has no effect for non-metals. + /// + /// This feature is currently unsupported in the deferred rendering path, in + /// order to reduce the size of the geometry buffers. + /// + /// Defaults to [`Color::WHITE`]. + #[doc(alias = "specular_color")] + pub specular_tint: Color, + + /// The amount of light transmitted _diffusely_ through the material (i.e. “translucency”). /// /// Implemented as a second, flipped [Lambertian diffuse](https://en.wikipedia.org/wiki/Lambertian_reflectance) lobe, /// which provides an inexpensive but plausible approximation of translucency for thin dielectric objects (e.g. paper, @@ -221,7 +235,7 @@ pub struct StandardMaterial { #[cfg(feature = "pbr_transmission_textures")] pub diffuse_transmission_texture: Option>, - /// The amount of light transmitted _specularly_ through the material (i.e. via refraction) + /// The amount of light transmitted _specularly_ through the material (i.e. via refraction). /// /// - When set to `0.0` (the default) no light is transmitted. /// - When set to `1.0` all light is transmitted through the material. @@ -236,13 +250,13 @@ pub struct StandardMaterial { /// with distortion and blur effects. /// /// - [`Camera3d::screen_space_specular_transmission_steps`](bevy_core_pipeline::core_3d::Camera3d::screen_space_specular_transmission_steps) can be used to enable transmissive objects - /// to be seen through other transmissive objects, at the cost of additional draw calls and texture copies; (Use with caution!) - /// - If a simplified approximation of specular transmission using only environment map lighting is sufficient, consider setting - /// [`Camera3d::screen_space_specular_transmission_steps`](bevy_core_pipeline::core_3d::Camera3d::screen_space_specular_transmission_steps) to `0`. + /// to be seen through other transmissive objects, at the cost of additional draw calls and texture copies; (Use with caution!) + /// - If a simplified approximation of specular transmission using only environment map lighting is sufficient, consider setting + /// [`Camera3d::screen_space_specular_transmission_steps`](bevy_core_pipeline::core_3d::Camera3d::screen_space_specular_transmission_steps) to `0`. /// - If purely diffuse light transmission is needed, (i.e. “translucency”) consider using [`StandardMaterial::diffuse_transmission`] instead, - /// for a much less expensive effect. + /// for a much less expensive effect. /// - Specular transmission is rendered before alpha blending, so any material with [`AlphaMode::Blend`], [`AlphaMode::Premultiplied`], [`AlphaMode::Add`] or [`AlphaMode::Multiply`] - /// won't be visible through specular transmissive materials. + /// won't be visible through specular transmissive materials. #[doc(alias = "refraction")] pub specular_transmission: f32, @@ -401,6 +415,54 @@ pub struct StandardMaterial { #[dependency] pub occlusion_texture: Option>, + /// The UV channel to use for the [`StandardMaterial::specular_texture`]. + /// + /// Defaults to [`UvChannel::Uv0`]. + #[cfg(feature = "pbr_specular_textures")] + pub specular_channel: UvChannel, + + /// A map that specifies reflectance for non-metallic materials. + /// + /// Alpha values from [0.0, 1.0] in this texture are linearly mapped to + /// reflectance values of [0.0, 0.5] and multiplied by the constant + /// [`StandardMaterial::reflectance`] value. This follows the + /// `KHR_materials_specular` specification. The map will have no effect if + /// the material is fully metallic. + /// + /// When using this map, you may wish to set the + /// [`StandardMaterial::reflectance`] value to 2.0 so that this map can + /// express the full [0.0, 1.0] range of values. + /// + /// Note that, because the reflectance is stored in the alpha channel, and + /// the [`StandardMaterial::specular_tint_texture`] has no alpha value, it + /// may be desirable to pack the values together and supply the same + /// texture to both fields. + #[cfg_attr(feature = "pbr_specular_textures", texture(27))] + #[cfg_attr(feature = "pbr_specular_textures", sampler(28))] + #[cfg(feature = "pbr_specular_textures")] + pub specular_texture: Option>, + + /// The UV channel to use for the + /// [`StandardMaterial::specular_tint_texture`]. + /// + /// Defaults to [`UvChannel::Uv0`]. + #[cfg(feature = "pbr_specular_textures")] + pub specular_tint_channel: UvChannel, + + /// A map that specifies color adjustment to be applied to the specular + /// reflection for non-metallic materials. + /// + /// The RGB values of this texture modulate the + /// [`StandardMaterial::specular_tint`] value. See the documentation for + /// that field for more information. + /// + /// Like the fixed specular tint value, this texture map isn't supported in + /// the deferred renderer. + #[cfg_attr(feature = "pbr_specular_textures", texture(29))] + #[cfg_attr(feature = "pbr_specular_textures", sampler(30))] + #[cfg(feature = "pbr_specular_textures")] + pub specular_tint_texture: Option>, + /// An extra thin translucent layer on top of the main PBR layer. This is /// typically used for painted surfaces. /// @@ -522,7 +584,7 @@ pub struct StandardMaterial { /// [`StandardMaterial::anisotropy_rotation`] to vary across the mesh. /// /// The [`KHR_materials_anisotropy` specification] defines the format that - /// this texture must take. To summarize: The direction vector is encoded in + /// this texture must take. To summarize: the direction vector is encoded in /// the red and green channels, while the strength is encoded in the blue /// channels. For the direction vector, the red and green channels map the /// color range [0, 1] to the vector range [-1, 1]. The direction vector @@ -569,7 +631,7 @@ pub struct StandardMaterial { /// /// [`Mesh`]: bevy_render::mesh::Mesh // TODO: include this in reflection somehow (maybe via remote types like serde https://serde.rs/remote-derive.html) - #[reflect(ignore)] + #[reflect(ignore, clone)] pub cull_mode: Option, /// Whether to apply only the base color to this material. @@ -801,6 +863,15 @@ impl Default for StandardMaterial { occlusion_texture: None, normal_map_channel: UvChannel::Uv0, normal_map_texture: None, + #[cfg(feature = "pbr_specular_textures")] + specular_channel: UvChannel::Uv0, + #[cfg(feature = "pbr_specular_textures")] + specular_texture: None, + specular_tint: Color::WHITE, + #[cfg(feature = "pbr_specular_textures")] + specular_tint_channel: UvChannel::Uv0, + #[cfg(feature = "pbr_specular_textures")] + specular_tint_texture: None, clearcoat: 0.0, clearcoat_perceptual_roughness: 0.5, #[cfg(feature = "pbr_multi_layer_material_textures")] @@ -887,6 +958,8 @@ bitflags::bitflags! { const CLEARCOAT_ROUGHNESS_TEXTURE = 1 << 15; const CLEARCOAT_NORMAL_TEXTURE = 1 << 16; const ANISOTROPY_TEXTURE = 1 << 17; + const SPECULAR_TEXTURE = 1 << 18; + const SPECULAR_TINT_TEXTURE = 1 << 19; const ALPHA_MODE_RESERVED_BITS = Self::ALPHA_MODE_MASK_BITS << Self::ALPHA_MODE_SHIFT_BITS; // ← Bitmask reserving bits for the `AlphaMode` const ALPHA_MODE_OPAQUE = 0 << Self::ALPHA_MODE_SHIFT_BITS; // ← Values are just sequential values bitshifted into const ALPHA_MODE_MASK = 1 << Self::ALPHA_MODE_SHIFT_BITS; // the bitmask, and can range from 0 to 7. @@ -918,14 +991,14 @@ pub struct StandardMaterialUniform { pub attenuation_color: Vec4, /// The transform applied to the UVs corresponding to `ATTRIBUTE_UV_0` on the mesh before sampling. Default is identity. pub uv_transform: Mat3, + /// Specular intensity for non-metals on a linear scale of [0.0, 1.0] + /// defaults to 0.5 which is mapped to 4% reflectance in the shader + pub reflectance: Vec3, /// Linear perceptual roughness, clamped to [0.089, 1.0] in the shader /// Defaults to minimum of 0.089 pub roughness: f32, /// From [0.0, 1.0], dielectric to pure metallic pub metallic: f32, - /// Specular intensity for non-metals on a linear scale of [0.0, 1.0] - /// defaults to 0.5 which is mapped to 4% reflectance in the shader - pub reflectance: f32, /// Amount of diffuse light transmitted through the material pub diffuse_transmission: f32, /// Amount of specular light transmitted through the material @@ -1011,6 +1084,16 @@ impl AsBindGroupShaderType for StandardMaterial { } } + #[cfg(feature = "pbr_specular_textures")] + { + if self.specular_texture.is_some() { + flags |= StandardMaterialFlags::SPECULAR_TEXTURE; + } + if self.specular_tint_texture.is_some() { + flags |= StandardMaterialFlags::SPECULAR_TINT_TEXTURE; + } + } + #[cfg(feature = "pbr_multi_layer_material_textures")] { if self.clearcoat_texture.is_some() { @@ -1075,7 +1158,7 @@ impl AsBindGroupShaderType for StandardMaterial { emissive, roughness: self.perceptual_roughness, metallic: self.metallic, - reflectance: self.reflectance, + reflectance: LinearRgba::from(self.specular_tint).to_vec3() * self.reflectance, clearcoat: self.clearcoat, clearcoat_perceptual_roughness: self.clearcoat_perceptual_roughness, anisotropy_strength: self.anisotropy_strength, @@ -1125,6 +1208,8 @@ bitflags! { const CLEARCOAT_UV = 0x040000; const CLEARCOAT_ROUGHNESS_UV = 0x080000; const CLEARCOAT_NORMAL_UV = 0x100000; + const SPECULAR_UV = 0x200000; + const SPECULAR_TINT_UV = 0x400000; const DEPTH_BIAS = 0xffffffff_00000000; } } @@ -1221,6 +1306,18 @@ impl From<&StandardMaterial> for StandardMaterialKey { ); } + #[cfg(feature = "pbr_specular_textures")] + { + key.set( + StandardMaterialKey::SPECULAR_UV, + material.specular_channel != UvChannel::Uv0, + ); + key.set( + StandardMaterialKey::SPECULAR_TINT_UV, + material.specular_tint_channel != UvChannel::Uv0, + ); + } + #[cfg(feature = "pbr_multi_layer_material_textures")] { key.set( @@ -1238,7 +1335,9 @@ impl From<&StandardMaterial> for StandardMaterialKey { } key.insert(StandardMaterialKey::from_bits_retain( - (material.depth_bias as u64) << STANDARD_MATERIAL_KEY_DEPTH_BIAS_SHIFT, + // Casting to i32 first to ensure the full i32 range is preserved. + // (wgpu expects the depth_bias as an i32 when this is extracted in a later step) + (material.depth_bias as i32 as u64) << STANDARD_MATERIAL_KEY_DEPTH_BIAS_SHIFT, )); key } @@ -1390,7 +1489,15 @@ impl Material for StandardMaterial { ), ( StandardMaterialKey::ANISOTROPY_UV, - "STANDARD_MATERIAL_ANISOTROPY_UV", + "STANDARD_MATERIAL_ANISOTROPY_UV_B", + ), + ( + StandardMaterialKey::SPECULAR_UV, + "STANDARD_MATERIAL_SPECULAR_UV_B", + ), + ( + StandardMaterialKey::SPECULAR_TINT_UV, + "STANDARD_MATERIAL_SPECULAR_TINT_UV_B", ), ] { if key.bind_group_data.intersects(flags) { diff --git a/crates/bevy_pbr/src/prepass/mod.rs b/crates/bevy_pbr/src/prepass/mod.rs index ac5cbeca4216b..77f874c168ddd 100644 --- a/crates/bevy_pbr/src/prepass/mod.rs +++ b/crates/bevy_pbr/src/prepass/mod.rs @@ -1,15 +1,29 @@ mod prepass_bindings; -use crate::material_bind_groups::MaterialBindGroupAllocator; +use crate::{ + alpha_mode_pipeline_key, binding_arrays_are_usable, buffer_layout, + collect_meshes_for_gpu_building, material_bind_groups::MaterialBindGroupAllocator, + queue_material_meshes, set_mesh_motion_vector_flags, setup_morph_and_skinning_defs, skin, + DrawMesh, EntitySpecializationTicks, Material, MaterialPipeline, MaterialPipelineKey, + MeshLayouts, MeshPipeline, MeshPipelineKey, OpaqueRendererMethod, PreparedMaterial, + RenderLightmaps, RenderMaterialInstances, RenderMeshInstanceFlags, RenderMeshInstances, + RenderPhaseType, SetMaterialBindGroup, SetMeshBindGroup, ShadowView, StandardMaterial, +}; +use bevy_app::{App, Plugin, PreUpdate}; use bevy_render::{ - mesh::{Mesh3d, MeshVertexBufferLayoutRef, RenderMesh}, + alpha::AlphaMode, + batching::gpu_preprocessing::GpuPreprocessingSupport, + mesh::{allocator::MeshAllocator, Mesh3d, MeshVertexBufferLayoutRef, RenderMesh}, + render_asset::prepare_assets, render_resource::binding_types::uniform_buffer, + renderer::RenderAdapter, sync_world::RenderEntity, - view::{RenderVisibilityRanges, VISIBILITY_RANGES_STORAGE_BUFFER_COUNT}, + view::{RenderVisibilityRanges, RetainedViewEntity, VISIBILITY_RANGES_STORAGE_BUFFER_COUNT}, + ExtractSchedule, Render, RenderApp, RenderDebugFlags, RenderSet, }; pub use prepass_bindings::*; -use bevy_asset::{load_internal_asset, AssetServer}; +use bevy_asset::{load_internal_asset, weak_handle, AssetServer, Handle}; use bevy_core_pipeline::{ core_3d::CORE_3D_DEPTH_FORMAT, deferred::*, prelude::Camera3d, prepass::*, }; @@ -32,26 +46,34 @@ use bevy_render::{ Extract, }; use bevy_transform::prelude::GlobalTransform; -use bevy_utils::tracing::error; +use tracing::{error, warn}; #[cfg(feature = "meshlet")] use crate::meshlet::{ prepare_material_meshlet_meshes_prepass, queue_material_meshlet_meshes, InstanceManager, MeshletMesh3d, }; -use crate::*; +use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::component::Tick; +use bevy_ecs::system::SystemChangeTick; +use bevy_platform::collections::HashMap; +use bevy_render::sync_world::MainEntityHashMap; use bevy_render::view::RenderVisibleEntities; +use bevy_render::RenderSet::{PrepareAssets, PrepareResources}; use core::{hash::Hash, marker::PhantomData}; -pub const PREPASS_SHADER_HANDLE: Handle = Handle::weak_from_u128(921124473254008983); +pub const PREPASS_SHADER_HANDLE: Handle = + weak_handle!("ce810284-f1ae-4439-ab2e-0d6b204b6284"); pub const PREPASS_BINDINGS_SHADER_HANDLE: Handle = - Handle::weak_from_u128(5533152893177403494); + weak_handle!("3e83537e-ae17-489c-a18a-999bc9c1d252"); -pub const PREPASS_UTILS_SHADER_HANDLE: Handle = Handle::weak_from_u128(4603948296044544); +pub const PREPASS_UTILS_SHADER_HANDLE: Handle = + weak_handle!("02e4643a-a14b-48eb-a339-0c47aeab0d7e"); -pub const PREPASS_IO_SHADER_HANDLE: Handle = Handle::weak_from_u128(81212356509530944); +pub const PREPASS_IO_SHADER_HANDLE: Handle = + weak_handle!("1c065187-c99b-4b7c-ba59-c1575482d2c9"); /// Sets up everything required to use the prepass pipeline. /// @@ -123,11 +145,19 @@ where /// Sets up the prepasses for a [`Material`]. /// /// This depends on the [`PrepassPipelinePlugin`]. -pub struct PrepassPlugin(PhantomData); +pub struct PrepassPlugin { + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, + pub phantom: PhantomData, +} -impl Default for PrepassPlugin { - fn default() -> Self { - Self(Default::default()) +impl PrepassPlugin { + /// Creates a new [`PrepassPlugin`] with the given debug flags. + pub fn new(debug_flags: RenderDebugFlags) -> Self { + PrepassPlugin { + debug_flags, + phantom: PhantomData, + } } } @@ -153,8 +183,10 @@ where ), ) .add_plugins(( - BinnedRenderPhasePlugin::::default(), - BinnedRenderPhasePlugin::::default(), + BinnedRenderPhasePlugin::::new(self.debug_flags), + BinnedRenderPhasePlugin::::new( + self.debug_flags, + ), )); } @@ -167,22 +199,34 @@ where .add_systems(ExtractSchedule, extract_camera_previous_view_data) .add_systems( Render, - prepare_previous_view_uniforms.in_set(RenderSet::PrepareResources), + prepare_previous_view_uniforms.in_set(PrepareResources), ); } render_app + .init_resource::() + .init_resource::() + .init_resource::>() .add_render_command::>() .add_render_command::>() .add_render_command::>() .add_render_command::>() .add_systems( Render, - queue_prepass_material_meshes:: - .in_set(RenderSet::QueueMeshes) - .after(prepare_assets::>) - // queue_material_meshes only writes to `material_bind_group_id`, which `queue_prepass_material_meshes` doesn't read - .ambiguous_with(queue_material_meshes::), + ( + check_prepass_views_need_specialization.in_set(PrepareAssets), + specialize_prepass_material_meshes:: + .in_set(RenderSet::PrepareMeshes) + .after(prepare_assets::>) + .after(prepare_assets::) + .after(collect_meshes_for_gpu_building) + .after(set_mesh_motion_vector_flags), + queue_prepass_material_meshes:: + .in_set(RenderSet::QueueMeshes) + .after(prepare_assets::>) + // queue_material_meshes only writes to `material_bind_group_id`, which `queue_prepass_material_meshes` doesn't read + .ambiguous_with(queue_material_meshes::), + ), ); #[cfg(feature = "meshlet")] @@ -200,25 +244,21 @@ where #[derive(Resource)] struct AnyPrepassPluginLoaded; -#[cfg(not(feature = "meshlet"))] -type PreviousViewFilter = (With, With); -#[cfg(feature = "meshlet")] -type PreviousViewFilter = Or<(With, With)>; - pub fn update_previous_view_data( mut commands: Commands, - query: Query<(Entity, &Camera, &GlobalTransform), PreviousViewFilter>, + query: Query<(Entity, &Camera, &GlobalTransform), Or<(With, With)>>, ) { for (entity, camera, camera_transform) in &query { let view_from_world = camera_transform.compute_matrix().inverse(); commands.entity(entity).try_insert(PreviousViewData { view_from_world, clip_from_world: camera.clip_from_view() * view_from_world, + clip_from_view: camera.clip_from_view(), }); } } -#[derive(Component, Default)] +#[derive(Component, PartialEq, Default)] pub struct PreviousGlobalTransform(pub Affine3A); #[cfg(not(feature = "meshlet"))] @@ -228,22 +268,35 @@ type PreviousMeshFilter = Or<(With, With)>; pub fn update_mesh_previous_global_transforms( mut commands: Commands, - views: Query<&Camera, PreviousViewFilter>, - meshes: Query<(Entity, &GlobalTransform), PreviousMeshFilter>, + views: Query<&Camera, Or<(With, With)>>, + new_meshes: Query< + (Entity, &GlobalTransform), + (PreviousMeshFilter, Without), + >, + mut meshes: Query<(&GlobalTransform, &mut PreviousGlobalTransform), PreviousMeshFilter>, ) { let should_run = views.iter().any(|camera| camera.is_active); if should_run { - for (entity, transform) in &meshes { - commands - .entity(entity) - .try_insert(PreviousGlobalTransform(transform.affine())); + for (entity, transform) in &new_meshes { + let new_previous_transform = PreviousGlobalTransform(transform.affine()); + commands.entity(entity).try_insert(new_previous_transform); } + meshes.par_iter_mut().for_each(|(transform, mut previous)| { + previous.set_if_neq(PreviousGlobalTransform(transform.affine())); + }); } } #[derive(Resource)] pub struct PrepassPipeline { + pub internal: PrepassPipelineInternal, + pub material_pipeline: MaterialPipeline, +} + +/// Internal fields of the `PrepassPipeline` that don't need the generic bound +/// This is done as an optimization to not recompile the same code multiple time +pub struct PrepassPipelineInternal { pub view_layout_motion_vectors: BindGroupLayout, pub view_layout_no_motion_vectors: BindGroupLayout, pub mesh_layouts: MeshLayouts, @@ -252,19 +305,22 @@ pub struct PrepassPipeline { pub prepass_material_fragment_shader: Option>, pub deferred_material_vertex_shader: Option>, pub deferred_material_fragment_shader: Option>, - pub material_pipeline: MaterialPipeline, /// Whether skins will use uniform buffers on account of storage buffers /// being unavailable on this platform. pub skins_use_uniform_buffers: bool, pub depth_clip_control_supported: bool, - _marker: PhantomData, + + /// Whether binding arrays (a.k.a. bindless textures) are usable on the + /// current render device. + pub binding_arrays_are_usable: bool, } impl FromWorld for PrepassPipeline { fn from_world(world: &mut World) -> Self { let render_device = world.resource::(); + let render_adapter = world.resource::(); let asset_server = world.resource::(); let visibility_ranges_buffer_binding_type = render_device @@ -323,8 +379,7 @@ impl FromWorld for PrepassPipeline { let depth_clip_control_supported = render_device .features() .contains(WgpuFeatures::DEPTH_CLIP_CONTROL); - - PrepassPipeline { + let internal = PrepassPipelineInternal { view_layout_motion_vectors, view_layout_no_motion_vectors, mesh_layouts: mesh_pipeline.mesh_layouts.clone(), @@ -349,10 +404,13 @@ impl FromWorld for PrepassPipeline { ShaderRef::Path(path) => Some(asset_server.load(path)), }, material_layout: M::bind_group_layout(render_device), - material_pipeline: world.resource::>().clone(), skins_use_uniform_buffers: skin::skins_use_uniform_buffers(render_device), depth_clip_control_supported, - _marker: PhantomData, + binding_arrays_are_usable: binding_arrays_are_usable(render_device, render_adapter), + }; + PrepassPipeline { + internal, + material_pipeline: world.resource::>().clone(), } } } @@ -368,15 +426,38 @@ where key: Self::Key, layout: &MeshVertexBufferLayoutRef, ) -> Result { - let mut bind_group_layouts = vec![if key - .mesh_key + let mut shader_defs = Vec::new(); + if self.material_pipeline.bindless { + shader_defs.push("BINDLESS".into()); + } + let mut descriptor = self + .internal + .specialize(key.mesh_key, shader_defs, layout)?; + + // This is a bit risky because it's possible to change something that would + // break the prepass but be fine in the main pass. + // Since this api is pretty low-level it doesn't matter that much, but it is a potential issue. + M::specialize(&self.material_pipeline, &mut descriptor, layout, key)?; + + Ok(descriptor) + } +} + +impl PrepassPipelineInternal { + fn specialize( + &self, + mesh_key: MeshPipelineKey, + shader_defs: Vec, + layout: &MeshVertexBufferLayoutRef, + ) -> Result { + let mut shader_defs = shader_defs; + let mut bind_group_layouts = vec![if mesh_key .contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) { self.view_layout_motion_vectors.clone() } else { self.view_layout_no_motion_vectors.clone() }]; - let mut shader_defs = Vec::new(); let mut vertex_attributes = Vec::new(); // Let the shader code know that it's running in a prepass pipeline. @@ -387,40 +468,29 @@ where // NOTE: Eventually, it would be nice to only add this when the shaders are overloaded by the Material. // The main limitation right now is that bind group order is hardcoded in shaders. bind_group_layouts.push(self.material_layout.clone()); - #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] shader_defs.push("WEBGL2".into()); - shader_defs.push("VERTEX_OUTPUT_INSTANCE_INDEX".into()); - - if key.mesh_key.contains(MeshPipelineKey::DEPTH_PREPASS) { + if mesh_key.contains(MeshPipelineKey::DEPTH_PREPASS) { shader_defs.push("DEPTH_PREPASS".into()); } - - if key.mesh_key.contains(MeshPipelineKey::MAY_DISCARD) { + if mesh_key.contains(MeshPipelineKey::MAY_DISCARD) { shader_defs.push("MAY_DISCARD".into()); } - - let blend_key = key - .mesh_key - .intersection(MeshPipelineKey::BLEND_RESERVED_BITS); + let blend_key = mesh_key.intersection(MeshPipelineKey::BLEND_RESERVED_BITS); if blend_key == MeshPipelineKey::BLEND_PREMULTIPLIED_ALPHA { shader_defs.push("BLEND_PREMULTIPLIED_ALPHA".into()); } if blend_key == MeshPipelineKey::BLEND_ALPHA { shader_defs.push("BLEND_ALPHA".into()); } - if layout.0.contains(Mesh::ATTRIBUTE_POSITION) { shader_defs.push("VERTEX_POSITIONS".into()); vertex_attributes.push(Mesh::ATTRIBUTE_POSITION.at_shader_location(0)); } - // For directional light shadow map views, use unclipped depth via either the native GPU feature, // or emulated by setting depth in the fragment shader for GPUs that don't support it natively. - let emulate_unclipped_depth = key - .mesh_key - .contains(MeshPipelineKey::UNCLIPPED_DEPTH_ORTHO) + let emulate_unclipped_depth = mesh_key.contains(MeshPipelineKey::UNCLIPPED_DEPTH_ORTHO) && !self.depth_clip_control_supported; if emulate_unclipped_depth { shader_defs.push("UNCLIPPED_DEPTH_ORTHO_EMULATION".into()); @@ -432,113 +502,93 @@ where // https://github.com/bevyengine/bevy/pull/8877 shader_defs.push("PREPASS_FRAGMENT".into()); } - let unclipped_depth = key - .mesh_key - .contains(MeshPipelineKey::UNCLIPPED_DEPTH_ORTHO) + let unclipped_depth = mesh_key.contains(MeshPipelineKey::UNCLIPPED_DEPTH_ORTHO) && self.depth_clip_control_supported; - if layout.0.contains(Mesh::ATTRIBUTE_UV_0) { shader_defs.push("VERTEX_UVS".into()); shader_defs.push("VERTEX_UVS_A".into()); vertex_attributes.push(Mesh::ATTRIBUTE_UV_0.at_shader_location(1)); } - if layout.0.contains(Mesh::ATTRIBUTE_UV_1) { shader_defs.push("VERTEX_UVS".into()); shader_defs.push("VERTEX_UVS_B".into()); vertex_attributes.push(Mesh::ATTRIBUTE_UV_1.at_shader_location(2)); } - - if key.mesh_key.contains(MeshPipelineKey::NORMAL_PREPASS) { + if mesh_key.contains(MeshPipelineKey::NORMAL_PREPASS) { shader_defs.push("NORMAL_PREPASS".into()); } - - if key - .mesh_key - .intersects(MeshPipelineKey::NORMAL_PREPASS | MeshPipelineKey::DEFERRED_PREPASS) + if mesh_key.intersects(MeshPipelineKey::NORMAL_PREPASS | MeshPipelineKey::DEFERRED_PREPASS) { - vertex_attributes.push(Mesh::ATTRIBUTE_NORMAL.at_shader_location(3)); shader_defs.push("NORMAL_PREPASS_OR_DEFERRED_PREPASS".into()); + if layout.0.contains(Mesh::ATTRIBUTE_NORMAL) { + shader_defs.push("VERTEX_NORMALS".into()); + vertex_attributes.push(Mesh::ATTRIBUTE_NORMAL.at_shader_location(3)); + } else if mesh_key.contains(MeshPipelineKey::NORMAL_PREPASS) { + warn!( + "The default normal prepass expects the mesh to have vertex normal attributes." + ); + } if layout.0.contains(Mesh::ATTRIBUTE_TANGENT) { shader_defs.push("VERTEX_TANGENTS".into()); vertex_attributes.push(Mesh::ATTRIBUTE_TANGENT.at_shader_location(4)); } } - - if key - .mesh_key + if mesh_key .intersects(MeshPipelineKey::MOTION_VECTOR_PREPASS | MeshPipelineKey::DEFERRED_PREPASS) { shader_defs.push("MOTION_VECTOR_PREPASS_OR_DEFERRED_PREPASS".into()); } - - if key.mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS) { + if mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS) { shader_defs.push("DEFERRED_PREPASS".into()); } - - if key.mesh_key.contains(MeshPipelineKey::LIGHTMAPPED) { + if mesh_key.contains(MeshPipelineKey::LIGHTMAPPED) { shader_defs.push("LIGHTMAP".into()); } - + if mesh_key.contains(MeshPipelineKey::LIGHTMAP_BICUBIC_SAMPLING) { + shader_defs.push("LIGHTMAP_BICUBIC_SAMPLING".into()); + } if layout.0.contains(Mesh::ATTRIBUTE_COLOR) { shader_defs.push("VERTEX_COLORS".into()); vertex_attributes.push(Mesh::ATTRIBUTE_COLOR.at_shader_location(7)); } - - if key - .mesh_key - .contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) - { + if mesh_key.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) { shader_defs.push("MOTION_VECTOR_PREPASS".into()); } - - if key.mesh_key.contains(MeshPipelineKey::HAS_PREVIOUS_SKIN) { + if mesh_key.contains(MeshPipelineKey::HAS_PREVIOUS_SKIN) { shader_defs.push("HAS_PREVIOUS_SKIN".into()); } - - if key.mesh_key.contains(MeshPipelineKey::HAS_PREVIOUS_MORPH) { + if mesh_key.contains(MeshPipelineKey::HAS_PREVIOUS_MORPH) { shader_defs.push("HAS_PREVIOUS_MORPH".into()); } - - // If bindless mode is on, add a `BINDLESS` define. - if self.material_pipeline.bindless { - shader_defs.push("BINDLESS".into()); + if self.binding_arrays_are_usable { + shader_defs.push("MULTIPLE_LIGHTMAPS_IN_ARRAY".into()); } - - if key - .mesh_key - .contains(MeshPipelineKey::VISIBILITY_RANGE_DITHER) - { + if mesh_key.contains(MeshPipelineKey::VISIBILITY_RANGE_DITHER) { shader_defs.push("VISIBILITY_RANGE_DITHER".into()); } - - if key.mesh_key.intersects( + if mesh_key.intersects( MeshPipelineKey::NORMAL_PREPASS | MeshPipelineKey::MOTION_VECTOR_PREPASS | MeshPipelineKey::DEFERRED_PREPASS, ) { shader_defs.push("PREPASS_FRAGMENT".into()); } - let bind_group = setup_morph_and_skinning_defs( &self.mesh_layouts, layout, 5, - &key.mesh_key, + &mesh_key, &mut shader_defs, &mut vertex_attributes, self.skins_use_uniform_buffers, ); bind_group_layouts.insert(1, bind_group); - let vertex_buffer_layout = layout.0.get_layout(&vertex_attributes)?; - // Setup prepass fragment targets - normals in slot 0 (or None if not needed), motion vectors in slot 1 let mut targets = prepass_target_descriptors( - key.mesh_key.contains(MeshPipelineKey::NORMAL_PREPASS), - key.mesh_key - .contains(MeshPipelineKey::MOTION_VECTOR_PREPASS), - key.mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS), + mesh_key.contains(MeshPipelineKey::NORMAL_PREPASS), + mesh_key.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS), + mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS), ); if targets.iter().all(Option::is_none) { @@ -552,12 +602,12 @@ where // prepass shader, or we are emulating unclipped depth in the fragment shader. let fragment_required = !targets.is_empty() || emulate_unclipped_depth - || (key.mesh_key.contains(MeshPipelineKey::MAY_DISCARD) + || (mesh_key.contains(MeshPipelineKey::MAY_DISCARD) && self.prepass_material_fragment_shader.is_some()); let fragment = fragment_required.then(|| { // Use the fragment shader from the material - let frag_shader_handle = if key.mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS) { + let frag_shader_handle = if mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS) { match self.deferred_material_fragment_shader.clone() { Some(frag_shader_handle) => frag_shader_handle, _ => PREPASS_SHADER_HANDLE, @@ -578,7 +628,7 @@ where }); // Use the vertex shader from the material if present - let vert_shader_handle = if key.mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS) { + let vert_shader_handle = if mesh_key.contains(MeshPipelineKey::DEFERRED_PREPASS) { if let Some(handle) = &self.deferred_material_vertex_shader { handle.clone() } else { @@ -589,8 +639,7 @@ where } else { PREPASS_SHADER_HANDLE }; - - let mut descriptor = RenderPipelineDescriptor { + let descriptor = RenderPipelineDescriptor { vertex: VertexState { shader: vert_shader_handle, entry_point: "vertex".into(), @@ -600,7 +649,7 @@ where fragment, layout: bind_group_layouts, primitive: PrimitiveState { - topology: key.mesh_key.primitive_topology(), + topology: mesh_key.primitive_topology(), strip_index_format: None, front_face: FrontFace::Ccw, cull_mode: None, @@ -625,7 +674,7 @@ where }, }), multisample: MultisampleState { - count: key.mesh_key.msaa_samples(), + count: mesh_key.msaa_samples(), mask: !0, alpha_to_coverage_enabled: false, }, @@ -633,12 +682,6 @@ where label: Some("prepass_pipeline".into()), zero_initialize_workgroup_memory: false, }; - - // This is a bit risky because it's possible to change something that would - // break the prepass but be fine in the main pass. - // Since this api is pretty low-level it doesn't matter that much, but it is a potential issue. - M::specialize(&self.material_pipeline, &mut descriptor, layout, key)?; - Ok(descriptor) } } @@ -667,7 +710,10 @@ pub fn prepare_previous_view_uniforms( render_device: Res, render_queue: Res, mut previous_view_uniforms: ResMut, - views: Query<(Entity, &ExtractedView, Option<&PreviousViewData>), PreviousViewFilter>, + views: Query< + (Entity, &ExtractedView, Option<&PreviousViewData>), + Or<(With, With)>, + >, ) { let views_iter = views.iter(); let view_count = views_iter.len(); @@ -687,6 +733,7 @@ pub fn prepare_previous_view_uniforms( PreviousViewData { view_from_world, clip_from_world: camera.clip_from_view * view_from_world, + clip_from_view: camera.clip_from_view, } } }; @@ -719,7 +766,7 @@ pub fn prepare_prepass_view_bind_group( ) { prepass_view_bind_group.no_motion_vectors = Some(render_device.create_bind_group( "prepass_view_no_motion_vectors_bind_group", - &prepass_pipeline.view_layout_no_motion_vectors, + &prepass_pipeline.internal.view_layout_no_motion_vectors, &BindGroupEntries::with_indices(( (0, view_binding.clone()), (1, globals_binding.clone()), @@ -730,7 +777,7 @@ pub fn prepare_prepass_view_bind_group( if let Some(previous_view_uniforms_binding) = previous_view_uniforms.uniforms.binding() { prepass_view_bind_group.motion_vectors = Some(render_device.create_bind_group( "prepass_view_motion_vectors_bind_group", - &prepass_pipeline.view_layout_motion_vectors, + &prepass_pipeline.internal.view_layout_motion_vectors, &BindGroupEntries::with_indices(( (0, view_binding), (1, globals_binding), @@ -742,95 +789,62 @@ pub fn prepare_prepass_view_bind_group( } } -#[allow(clippy::too_many_arguments)] -pub fn queue_prepass_material_meshes( - ( - opaque_draw_functions, - alpha_mask_draw_functions, - opaque_deferred_draw_functions, - alpha_mask_deferred_draw_functions, - ): ( - Res>, - Res>, - Res>, - Res>, - ), - prepass_pipeline: Res>, - mut pipelines: ResMut>>, - pipeline_cache: Res, - render_meshes: Res>, - render_mesh_instances: Res, - render_materials: Res>>, - render_material_instances: Res>, - render_lightmaps: Res, - render_visibility_ranges: Res, - material_bind_group_allocator: Res>, - mut opaque_prepass_render_phases: ResMut>, - mut alpha_mask_prepass_render_phases: ResMut>, - mut opaque_deferred_render_phases: ResMut>, - mut alpha_mask_deferred_render_phases: ResMut>, - views: Query< - ( - Entity, - &RenderVisibleEntities, - &Msaa, - Option<&DepthPrepass>, - Option<&NormalPrepass>, - Option<&MotionVectorPrepass>, - Option<&DeferredPrepass>, - ), - With, - >, -) where - M::Data: PartialEq + Eq + Hash + Clone, -{ - let opaque_draw_prepass = opaque_draw_functions - .read() - .get_id::>() - .unwrap(); - let alpha_mask_draw_prepass = alpha_mask_draw_functions - .read() - .get_id::>() - .unwrap(); - let opaque_draw_deferred = opaque_deferred_draw_functions - .read() - .get_id::>() - .unwrap(); - let alpha_mask_draw_deferred = alpha_mask_deferred_draw_functions - .read() - .get_id::>() - .unwrap(); - for ( - view, - visible_entities, - msaa, - depth_prepass, - normal_prepass, - motion_vector_prepass, - deferred_prepass, - ) in &views - { - let ( - mut opaque_phase, - mut alpha_mask_phase, - mut opaque_deferred_phase, - mut alpha_mask_deferred_phase, - ) = ( - opaque_prepass_render_phases.get_mut(&view), - alpha_mask_prepass_render_phases.get_mut(&view), - opaque_deferred_render_phases.get_mut(&view), - alpha_mask_deferred_render_phases.get_mut(&view), - ); +/// Stores the [`SpecializedPrepassMaterialViewPipelineCache`] for each view. +#[derive(Resource, Deref, DerefMut)] +pub struct SpecializedPrepassMaterialPipelineCache { + // view_entity -> view pipeline cache + #[deref] + map: HashMap>, + marker: PhantomData, +} - // Skip if there's no place to put the mesh. - if opaque_phase.is_none() - && alpha_mask_phase.is_none() - && opaque_deferred_phase.is_none() - && alpha_mask_deferred_phase.is_none() - { - continue; +/// Stores the cached render pipeline ID for each entity in a single view, as +/// well as the last time it was changed. +#[derive(Deref, DerefMut)] +pub struct SpecializedPrepassMaterialViewPipelineCache { + // material entity -> (tick, pipeline_id) + #[deref] + map: MainEntityHashMap<(Tick, CachedRenderPipelineId)>, + marker: PhantomData, +} + +impl Default for SpecializedPrepassMaterialPipelineCache { + fn default() -> Self { + Self { + map: HashMap::default(), + marker: PhantomData, } + } +} +impl Default for SpecializedPrepassMaterialViewPipelineCache { + fn default() -> Self { + Self { + map: HashMap::default(), + marker: PhantomData, + } + } +} + +#[derive(Resource, Deref, DerefMut, Default, Clone)] +pub struct ViewKeyPrepassCache(HashMap); + +#[derive(Resource, Deref, DerefMut, Default, Clone)] +pub struct ViewPrepassSpecializationTicks(HashMap); + +pub fn check_prepass_views_need_specialization( + mut view_key_cache: ResMut, + mut view_specialization_ticks: ResMut, + mut views: Query<( + &ExtractedView, + &Msaa, + Option<&DepthPrepass>, + Option<&NormalPrepass>, + Option<&MotionVectorPrepass>, + )>, + ticks: SystemChangeTick, +) { + for (view, msaa, depth_prepass, normal_prepass, motion_vector_prepass) in views.iter_mut() { let mut view_key = MeshPipelineKey::from_msaa_samples(msaa.samples()); if depth_prepass.is_some() { view_key |= MeshPipelineKey::DEPTH_PREPASS; @@ -842,27 +856,124 @@ pub fn queue_prepass_material_meshes( view_key |= MeshPipelineKey::MOTION_VECTOR_PREPASS; } - for (render_entity, visible_entity) in visible_entities.iter::() { - let Some(material_asset_id) = render_material_instances.get(visible_entity) else { + if let Some(current_key) = view_key_cache.get_mut(&view.retained_view_entity) { + if *current_key != view_key { + view_key_cache.insert(view.retained_view_entity, view_key); + view_specialization_ticks.insert(view.retained_view_entity, ticks.this_run()); + } + } else { + view_key_cache.insert(view.retained_view_entity, view_key); + view_specialization_ticks.insert(view.retained_view_entity, ticks.this_run()); + } + } +} + +pub fn specialize_prepass_material_meshes( + render_meshes: Res>, + render_materials: Res>>, + render_mesh_instances: Res, + render_material_instances: Res, + render_lightmaps: Res, + render_visibility_ranges: Res, + material_bind_group_allocator: Res>, + view_key_cache: Res, + views: Query<( + &ExtractedView, + &RenderVisibleEntities, + &Msaa, + Option<&MotionVectorPrepass>, + Option<&DeferredPrepass>, + )>, + ( + opaque_prepass_render_phases, + alpha_mask_prepass_render_phases, + opaque_deferred_render_phases, + alpha_mask_deferred_render_phases, + ): ( + Res>, + Res>, + Res>, + Res>, + ), + ( + mut specialized_material_pipeline_cache, + ticks, + prepass_pipeline, + mut pipelines, + pipeline_cache, + view_specialization_ticks, + entity_specialization_ticks, + ): ( + ResMut>, + SystemChangeTick, + Res>, + ResMut>>, + Res, + Res, + Res>, + ), +) where + M: Material, + M::Data: PartialEq + Eq + Hash + Clone, +{ + for (extracted_view, visible_entities, msaa, motion_vector_prepass, deferred_prepass) in &views + { + if !opaque_deferred_render_phases.contains_key(&extracted_view.retained_view_entity) + && !alpha_mask_deferred_render_phases.contains_key(&extracted_view.retained_view_entity) + && !opaque_prepass_render_phases.contains_key(&extracted_view.retained_view_entity) + && !alpha_mask_prepass_render_phases.contains_key(&extracted_view.retained_view_entity) + { + continue; + } + + let Some(view_key) = view_key_cache.get(&extracted_view.retained_view_entity) else { + continue; + }; + + let view_tick = view_specialization_ticks + .get(&extracted_view.retained_view_entity) + .unwrap(); + let view_specialized_material_pipeline_cache = specialized_material_pipeline_cache + .entry(extracted_view.retained_view_entity) + .or_default(); + + for (_, visible_entity) in visible_entities.iter::() { + let Some(material_instance) = render_material_instances.instances.get(visible_entity) + else { + continue; + }; + let Ok(material_asset_id) = material_instance.asset_id.try_typed::() else { continue; }; let Some(mesh_instance) = render_mesh_instances.render_mesh_queue_data(*visible_entity) else { continue; }; - let Some(material) = render_materials.get(*material_asset_id) else { + let entity_tick = entity_specialization_ticks.get(visible_entity).unwrap(); + let last_specialized_tick = view_specialized_material_pipeline_cache + .get(visible_entity) + .map(|(tick, _)| *tick); + let needs_specialization = last_specialized_tick.is_none_or(|tick| { + view_tick.is_newer_than(tick, ticks.this_run()) + || entity_tick.is_newer_than(tick, ticks.this_run()) + }); + if !needs_specialization { + continue; + } + let Some(material) = render_materials.get(material_asset_id) else { continue; }; let Some(material_bind_group) = material_bind_group_allocator.get(material.binding.group) else { + warn!("Couldn't get bind group for material"); continue; }; let Some(mesh) = render_meshes.get(mesh_instance.mesh_asset_id) else { continue; }; - let mut mesh_key = view_key | MeshPipelineKey::from_bits_retain(mesh.key_bits.bits()); + let mut mesh_key = *view_key | MeshPipelineKey::from_bits_retain(mesh.key_bits.bits()); let alpha_mode = material.properties.alpha_mode; match alpha_mode { @@ -893,16 +1004,17 @@ pub fn queue_prepass_material_meshes( mesh_key |= MeshPipelineKey::DEFERRED_PREPASS; } - // Even though we don't use the lightmap in the prepass, the - // `SetMeshBindGroup` render command will bind the data for it. So - // we need to include the appropriate flag in the mesh pipeline key - // to ensure that the necessary bind group layout entries are - // present. - if render_lightmaps - .render_lightmaps - .contains_key(visible_entity) - { + if let Some(lightmap) = render_lightmaps.render_lightmaps.get(visible_entity) { + // Even though we don't use the lightmap in the forward prepass, the + // `SetMeshBindGroup` render command will bind the data for it. So + // we need to include the appropriate flag in the mesh pipeline key + // to ensure that the necessary bind group layout entries are + // present. mesh_key |= MeshPipelineKey::LIGHTMAPPED; + + if lightmap.bicubic_sampling && deferred { + mesh_key |= MeshPipelineKey::LIGHTMAP_BICUBIC_SAMPLING; + } } if render_visibility_ranges.entity_has_crossfading_visibility_ranges(*visible_entity) { @@ -944,67 +1056,197 @@ pub fn queue_prepass_material_meshes( } }; - match mesh_key - .intersection(MeshPipelineKey::BLEND_RESERVED_BITS | MeshPipelineKey::MAY_DISCARD) - { - MeshPipelineKey::BLEND_OPAQUE | MeshPipelineKey::BLEND_ALPHA_TO_COVERAGE => { + view_specialized_material_pipeline_cache + .insert(*visible_entity, (ticks.this_run(), pipeline_id)); + } + } +} + +pub fn queue_prepass_material_meshes( + render_mesh_instances: Res, + render_materials: Res>>, + render_material_instances: Res, + mesh_allocator: Res, + gpu_preprocessing_support: Res, + mut opaque_prepass_render_phases: ResMut>, + mut alpha_mask_prepass_render_phases: ResMut>, + mut opaque_deferred_render_phases: ResMut>, + mut alpha_mask_deferred_render_phases: ResMut>, + views: Query<(&ExtractedView, &RenderVisibleEntities)>, + specialized_material_pipeline_cache: Res>, +) where + M::Data: PartialEq + Eq + Hash + Clone, +{ + for (extracted_view, visible_entities) in &views { + let ( + mut opaque_phase, + mut alpha_mask_phase, + mut opaque_deferred_phase, + mut alpha_mask_deferred_phase, + ) = ( + opaque_prepass_render_phases.get_mut(&extracted_view.retained_view_entity), + alpha_mask_prepass_render_phases.get_mut(&extracted_view.retained_view_entity), + opaque_deferred_render_phases.get_mut(&extracted_view.retained_view_entity), + alpha_mask_deferred_render_phases.get_mut(&extracted_view.retained_view_entity), + ); + + let Some(view_specialized_material_pipeline_cache) = + specialized_material_pipeline_cache.get(&extracted_view.retained_view_entity) + else { + continue; + }; + + // Skip if there's no place to put the mesh. + if opaque_phase.is_none() + && alpha_mask_phase.is_none() + && opaque_deferred_phase.is_none() + && alpha_mask_deferred_phase.is_none() + { + continue; + } + + for (render_entity, visible_entity) in visible_entities.iter::() { + let Some((current_change_tick, pipeline_id)) = + view_specialized_material_pipeline_cache.get(visible_entity) + else { + continue; + }; + + // Skip the entity if it's cached in a bin and up to date. + if opaque_phase.as_mut().is_some_and(|phase| { + phase.validate_cached_entity(*visible_entity, *current_change_tick) + }) || alpha_mask_phase.as_mut().is_some_and(|phase| { + phase.validate_cached_entity(*visible_entity, *current_change_tick) + }) || opaque_deferred_phase.as_mut().is_some_and(|phase| { + phase.validate_cached_entity(*visible_entity, *current_change_tick) + }) || alpha_mask_deferred_phase.as_mut().is_some_and(|phase| { + phase.validate_cached_entity(*visible_entity, *current_change_tick) + }) { + continue; + } + + let Some(material_instance) = render_material_instances.instances.get(visible_entity) + else { + continue; + }; + let Ok(material_asset_id) = material_instance.asset_id.try_typed::() else { + continue; + }; + let Some(mesh_instance) = render_mesh_instances.render_mesh_queue_data(*visible_entity) + else { + continue; + }; + let Some(material) = render_materials.get(material_asset_id) else { + continue; + }; + let (vertex_slab, index_slab) = mesh_allocator.mesh_slabs(&mesh_instance.mesh_asset_id); + + let deferred = match material.properties.render_method { + OpaqueRendererMethod::Forward => false, + OpaqueRendererMethod::Deferred => true, + OpaqueRendererMethod::Auto => unreachable!(), + }; + + match material.properties.render_phase_type { + RenderPhaseType::Opaque => { if deferred { opaque_deferred_phase.as_mut().unwrap().add( + OpaqueNoLightmap3dBatchSetKey { + draw_function: material + .properties + .deferred_draw_function_id + .unwrap(), + pipeline: *pipeline_id, + material_bind_group_index: Some(material.binding.group.0), + vertex_slab: vertex_slab.unwrap_or_default(), + index_slab, + }, OpaqueNoLightmap3dBinKey { - batch_set_key: OpaqueNoLightmap3dBatchSetKey { - draw_function: opaque_draw_deferred, - pipeline: pipeline_id, - material_bind_group_index: Some(material.binding.group.0), - }, asset_id: mesh_instance.mesh_asset_id.into(), }, (*render_entity, *visible_entity), - BinnedRenderPhaseType::mesh(mesh_instance.should_batch()), + mesh_instance.current_uniform_index, + BinnedRenderPhaseType::mesh( + mesh_instance.should_batch(), + &gpu_preprocessing_support, + ), + *current_change_tick, ); } else if let Some(opaque_phase) = opaque_phase.as_mut() { + let (vertex_slab, index_slab) = + mesh_allocator.mesh_slabs(&mesh_instance.mesh_asset_id); opaque_phase.add( + OpaqueNoLightmap3dBatchSetKey { + draw_function: material + .properties + .prepass_draw_function_id + .unwrap(), + pipeline: *pipeline_id, + material_bind_group_index: Some(material.binding.group.0), + vertex_slab: vertex_slab.unwrap_or_default(), + index_slab, + }, OpaqueNoLightmap3dBinKey { - batch_set_key: OpaqueNoLightmap3dBatchSetKey { - draw_function: opaque_draw_prepass, - pipeline: pipeline_id, - material_bind_group_index: Some(material.binding.group.0), - }, asset_id: mesh_instance.mesh_asset_id.into(), }, (*render_entity, *visible_entity), - BinnedRenderPhaseType::mesh(mesh_instance.should_batch()), + mesh_instance.current_uniform_index, + BinnedRenderPhaseType::mesh( + mesh_instance.should_batch(), + &gpu_preprocessing_support, + ), + *current_change_tick, ); } } - // Alpha mask - MeshPipelineKey::MAY_DISCARD => { + RenderPhaseType::AlphaMask => { if deferred { + let (vertex_slab, index_slab) = + mesh_allocator.mesh_slabs(&mesh_instance.mesh_asset_id); + let batch_set_key = OpaqueNoLightmap3dBatchSetKey { + draw_function: material.properties.deferred_draw_function_id.unwrap(), + pipeline: *pipeline_id, + material_bind_group_index: Some(material.binding.group.0), + vertex_slab: vertex_slab.unwrap_or_default(), + index_slab, + }; let bin_key = OpaqueNoLightmap3dBinKey { - batch_set_key: OpaqueNoLightmap3dBatchSetKey { - draw_function: alpha_mask_draw_deferred, - pipeline: pipeline_id, - material_bind_group_index: Some(material.binding.group.0), - }, asset_id: mesh_instance.mesh_asset_id.into(), }; alpha_mask_deferred_phase.as_mut().unwrap().add( + batch_set_key, bin_key, (*render_entity, *visible_entity), - BinnedRenderPhaseType::mesh(mesh_instance.should_batch()), + mesh_instance.current_uniform_index, + BinnedRenderPhaseType::mesh( + mesh_instance.should_batch(), + &gpu_preprocessing_support, + ), + *current_change_tick, ); } else if let Some(alpha_mask_phase) = alpha_mask_phase.as_mut() { + let (vertex_slab, index_slab) = + mesh_allocator.mesh_slabs(&mesh_instance.mesh_asset_id); + let batch_set_key = OpaqueNoLightmap3dBatchSetKey { + draw_function: material.properties.prepass_draw_function_id.unwrap(), + pipeline: *pipeline_id, + material_bind_group_index: Some(material.binding.group.0), + vertex_slab: vertex_slab.unwrap_or_default(), + index_slab, + }; let bin_key = OpaqueNoLightmap3dBinKey { - batch_set_key: OpaqueNoLightmap3dBatchSetKey { - draw_function: alpha_mask_draw_prepass, - pipeline: pipeline_id, - material_bind_group_index: Some(material.binding.group.0), - }, asset_id: mesh_instance.mesh_asset_id.into(), }; alpha_mask_phase.add( + batch_set_key, bin_key, (*render_entity, *visible_entity), - BinnedRenderPhaseType::mesh(mesh_instance.should_batch()), + mesh_instance.current_uniform_index, + BinnedRenderPhaseType::mesh( + mesh_instance.should_batch(), + &gpu_preprocessing_support, + ), + *current_change_tick, ); } } diff --git a/crates/bevy_pbr/src/prepass/prepass.wgsl b/crates/bevy_pbr/src/prepass/prepass.wgsl index 26011d609b50c..52dd9bf201568 100644 --- a/crates/bevy_pbr/src/prepass/prepass.wgsl +++ b/crates/bevy_pbr/src/prepass/prepass.wgsl @@ -96,6 +96,7 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput { #endif // VERTEX_UVS_B #ifdef NORMAL_PREPASS_OR_DEFERRED_PREPASS +#ifdef VERTEX_NORMALS #ifdef SKINNED out.world_normal = skinning::skin_normals(world_from_local, vertex.normal); #else // SKINNED @@ -106,6 +107,7 @@ fn vertex(vertex_no_morph: Vertex) -> VertexOutput { vertex_no_morph.instance_index ); #endif // SKINNED +#endif // VERTEX_NORMALS #ifdef VERTEX_TANGENTS out.world_tangent = mesh_functions::mesh_tangent_local_to_world( diff --git a/crates/bevy_pbr/src/prepass/prepass_bindings.wgsl b/crates/bevy_pbr/src/prepass/prepass_bindings.wgsl index a8dae83b8e57a..3bd27b2e037c1 100644 --- a/crates/bevy_pbr/src/prepass/prepass_bindings.wgsl +++ b/crates/bevy_pbr/src/prepass/prepass_bindings.wgsl @@ -3,10 +3,9 @@ struct PreviousViewUniforms { view_from_world: mat4x4, clip_from_world: mat4x4, + clip_from_view: mat4x4, } -#ifdef MOTION_VECTOR_PREPASS @group(0) @binding(2) var previous_view_uniforms: PreviousViewUniforms; -#endif // MOTION_VECTOR_PREPASS // Material bindings will be in @group(2) diff --git a/crates/bevy_pbr/src/prepass/prepass_io.wgsl b/crates/bevy_pbr/src/prepass/prepass_io.wgsl index 5f7d8ec071c5a..c3c0e55549906 100644 --- a/crates/bevy_pbr/src/prepass/prepass_io.wgsl +++ b/crates/bevy_pbr/src/prepass/prepass_io.wgsl @@ -15,7 +15,9 @@ struct Vertex { #endif #ifdef NORMAL_PREPASS_OR_DEFERRED_PREPASS +#ifdef VERTEX_NORMALS @location(3) normal: vec3, +#endif #ifdef VERTEX_TANGENTS @location(4) tangent: vec4, #endif diff --git a/crates/bevy_pbr/src/render/build_indirect_params.wgsl b/crates/bevy_pbr/src/render/build_indirect_params.wgsl new file mode 100644 index 0000000000000..5ca6d4c0ccfff --- /dev/null +++ b/crates/bevy_pbr/src/render/build_indirect_params.wgsl @@ -0,0 +1,142 @@ +// Builds GPU indirect draw parameters from metadata. +// +// This only runs when indirect drawing is enabled. It takes the output of +// `mesh_preprocess.wgsl` and creates indirect parameters for the GPU. +// +// This shader runs separately for indexed and non-indexed meshes. Unlike +// `mesh_preprocess.wgsl`, which runs one instance per mesh *instance*, one +// instance of this shader corresponds to a single *batch* which could contain +// arbitrarily many instances of a single mesh. + +#import bevy_pbr::mesh_preprocess_types::{ + IndirectBatchSet, + IndirectParametersIndexed, + IndirectParametersNonIndexed, + IndirectParametersCpuMetadata, + IndirectParametersGpuMetadata, + MeshInput +} + +// The data for each mesh that the CPU supplied to the GPU. +@group(0) @binding(0) var current_input: array; + +// Data that we use to generate the indirect parameters. +// +// The `mesh_preprocess.wgsl` shader emits these. +@group(0) @binding(1) var indirect_parameters_cpu_metadata: + array; + +@group(0) @binding(2) var indirect_parameters_gpu_metadata: + array; + +// Information about each batch set. +// +// A *batch set* is a set of meshes that might be multi-drawn together. +@group(0) @binding(3) var indirect_batch_sets: array; + +#ifdef INDEXED +// The buffer of indirect draw parameters that we generate, and that the GPU +// reads to issue the draws. +// +// This buffer is for indexed meshes. +@group(0) @binding(4) var indirect_parameters: + array; +#else // INDEXED +// The buffer of indirect draw parameters that we generate, and that the GPU +// reads to issue the draws. +// +// This buffer is for non-indexed meshes. +@group(0) @binding(4) var indirect_parameters: + array; +#endif // INDEXED + +@compute +@workgroup_size(64) +fn main(@builtin(global_invocation_id) global_invocation_id: vec3) { + // Figure out our instance index (i.e. batch index). If this thread doesn't + // correspond to any index, bail. + let instance_index = global_invocation_id.x; + if (instance_index >= arrayLength(&indirect_parameters_cpu_metadata)) { + return; + } + + // Unpack the metadata for this batch. + let base_output_index = indirect_parameters_cpu_metadata[instance_index].base_output_index; + let batch_set_index = indirect_parameters_cpu_metadata[instance_index].batch_set_index; + let mesh_index = indirect_parameters_gpu_metadata[instance_index].mesh_index; + + // If we aren't using `multi_draw_indirect_count`, we have a 1:1 fixed + // assignment of batches to slots in the indirect parameters buffer, so we + // can just use the instance index as the index of our indirect parameters. + let early_instance_count = + indirect_parameters_gpu_metadata[instance_index].early_instance_count; + let late_instance_count = indirect_parameters_gpu_metadata[instance_index].late_instance_count; + + // If in the early phase, we draw only the early meshes. If in the late + // phase, we draw only the late meshes. If in the main phase, draw all the + // meshes. +#ifdef EARLY_PHASE + let instance_count = early_instance_count; +#else // EARLY_PHASE +#ifdef LATE_PHASE + let instance_count = late_instance_count; +#else // LATE_PHASE + let instance_count = early_instance_count + late_instance_count; +#endif // LATE_PHASE +#endif // EARLY_PHASE + + var indirect_parameters_index = instance_index; + + // If the current hardware and driver support `multi_draw_indirect_count`, + // dynamically reserve an index for the indirect parameters we're to + // generate. +#ifdef MULTI_DRAW_INDIRECT_COUNT_SUPPORTED + // If this batch belongs to a batch set, then allocate space for the + // indirect commands in that batch set. + if (batch_set_index != 0xffffffffu) { + // Bail out now if there are no instances. Note that we can only bail if + // we're in a batch set. That's because only batch sets are drawn using + // `multi_draw_indirect_count`. If we aren't using + // `multi_draw_indirect_count`, then we need to continue in order to + // zero out the instance count; otherwise, it'll have garbage data in + // it. + if (instance_count == 0u) { + return; + } + + let indirect_parameters_base = + indirect_batch_sets[batch_set_index].indirect_parameters_base; + let indirect_parameters_offset = + atomicAdd(&indirect_batch_sets[batch_set_index].indirect_parameters_count, 1u); + + indirect_parameters_index = indirect_parameters_base + indirect_parameters_offset; + } +#endif // MULTI_DRAW_INDIRECT_COUNT_SUPPORTED + + // Build up the indirect parameters. The structures for indexed and + // non-indexed meshes are slightly different. + + indirect_parameters[indirect_parameters_index].instance_count = instance_count; + +#ifdef LATE_PHASE + // The late mesh instances are stored after the early mesh instances, so we + // offset the output index by the number of early mesh instances. + indirect_parameters[indirect_parameters_index].first_instance = + base_output_index + early_instance_count; +#else // LATE_PHASE + indirect_parameters[indirect_parameters_index].first_instance = base_output_index; +#endif // LATE_PHASE + + indirect_parameters[indirect_parameters_index].base_vertex = + current_input[mesh_index].first_vertex_index; + +#ifdef INDEXED + indirect_parameters[indirect_parameters_index].index_count = + current_input[mesh_index].index_count; + indirect_parameters[indirect_parameters_index].first_index = + current_input[mesh_index].first_index_index; +#else // INDEXED + indirect_parameters[indirect_parameters_index].vertex_count = + current_input[mesh_index].index_count; +#endif // INDEXED +} diff --git a/crates/bevy_pbr/src/render/clustered_forward.wgsl b/crates/bevy_pbr/src/render/clustered_forward.wgsl index 72eef607db707..aa3fb4f199b1f 100644 --- a/crates/bevy_pbr/src/render/clustered_forward.wgsl +++ b/crates/bevy_pbr/src/render/clustered_forward.wgsl @@ -27,6 +27,7 @@ struct ClusterableObjectIndexRanges { // The offset of the index of the first irradiance volumes, which also // terminates the list of reflection probes. first_irradiance_volume_index_offset: u32, + first_decal_offset: u32, // One past the offset of the index of the final clusterable object for this // cluster. last_clusterable_object_index_offset: u32, @@ -81,12 +82,14 @@ fn unpack_clusterable_object_index_ranges(cluster_index: u32) -> ClusterableObje let spot_light_offset = point_light_offset + offset_and_counts_a.y; let reflection_probe_offset = spot_light_offset + offset_and_counts_a.z; let irradiance_volume_offset = reflection_probe_offset + offset_and_counts_a.w; - let last_clusterable_offset = irradiance_volume_offset + offset_and_counts_b.x; + let decal_offset = irradiance_volume_offset + offset_and_counts_b.x; + let last_clusterable_offset = decal_offset + offset_and_counts_b.y; return ClusterableObjectIndexRanges( point_light_offset, spot_light_offset, reflection_probe_offset, irradiance_volume_offset, + decal_offset, last_clusterable_offset ); @@ -110,7 +113,7 @@ fn unpack_clusterable_object_index_ranges(cluster_index: u32) -> ClusterableObje let offset_b = offset_a + offset_and_counts.y; let offset_c = offset_b + offset_and_counts.z; - return ClusterableObjectIndexRanges(offset_a, offset_b, offset_c, offset_c, offset_c); + return ClusterableObjectIndexRanges(offset_a, offset_b, offset_c, offset_c, offset_c, offset_c); #endif // AVAILABLE_STORAGE_BUFFER_BINDINGS >= 3 } diff --git a/crates/bevy_pbr/src/render/fog.rs b/crates/bevy_pbr/src/render/fog.rs index 02bd6ac7360cf..9394380f7186d 100644 --- a/crates/bevy_pbr/src/render/fog.rs +++ b/crates/bevy_pbr/src/render/fog.rs @@ -1,5 +1,5 @@ use bevy_app::{App, Plugin}; -use bevy_asset::{load_internal_asset, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_color::{ColorToComponents, LinearRgba}; use bevy_ecs::prelude::*; use bevy_math::{Vec3, Vec4}; @@ -127,7 +127,7 @@ pub struct ViewFogUniformOffset { } /// Handle for the fog WGSL Shader internal asset -pub const FOG_SHADER_HANDLE: Handle = Handle::weak_from_u128(4913569193382610166); +pub const FOG_SHADER_HANDLE: Handle = weak_handle!("e943f446-2856-471c-af5e-68dd276eec42"); /// A plugin that consolidates fog extraction, preparation and related resources/assets pub struct FogPlugin; diff --git a/crates/bevy_pbr/src/render/gpu_preprocess.rs b/crates/bevy_pbr/src/render/gpu_preprocess.rs index 434ed64135a2e..912f6192ce9dd 100644 --- a/crates/bevy_pbr/src/render/gpu_preprocess.rs +++ b/crates/bevy_pbr/src/render/gpu_preprocess.rs @@ -6,47 +6,72 @@ //! [`MeshInputUniform`]s instead and use the GPU to calculate the remaining //! derived fields in [`MeshUniform`]. -use core::num::NonZero; +use core::num::{NonZero, NonZeroU64}; use bevy_app::{App, Plugin}; -use bevy_asset::{load_internal_asset, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; +use bevy_core_pipeline::{ + core_3d::graph::{Core3d, Node3d}, + experimental::mip_generation::ViewDepthPyramid, + prepass::{DepthPrepass, PreviousViewData, PreviousViewUniformOffset, PreviousViewUniforms}, +}; +use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ component::Component, entity::Entity, - query::{Has, QueryState, Without}, - schedule::{common_conditions::resource_exists, IntoSystemConfigs as _}, - system::{lifetimeless::Read, Commands, Res, ResMut, Resource}, + prelude::resource_exists, + query::{Has, Or, QueryState, With, Without}, + resource::Resource, + schedule::IntoScheduleConfigs as _, + system::{lifetimeless::Read, Commands, Query, Res, ResMut}, world::{FromWorld, World}, }; +use bevy_render::batching::gpu_preprocessing::{ + GpuPreprocessingMode, IndirectParametersGpuMetadata, UntypedPhaseIndirectParametersBuffers, +}; use bevy_render::{ batching::gpu_preprocessing::{ - BatchedInstanceBuffers, GpuPreprocessingSupport, IndirectParameters, - IndirectParametersBuffer, PreprocessWorkItem, + BatchedInstanceBuffers, GpuOcclusionCullingWorkItemBuffers, GpuPreprocessingSupport, + IndirectBatchSet, IndirectParametersBuffers, IndirectParametersCpuMetadata, + IndirectParametersIndexed, IndirectParametersNonIndexed, + LatePreprocessWorkItemIndirectParameters, PreprocessWorkItem, PreprocessWorkItemBuffers, + UntypedPhaseBatchedInstanceBuffers, }, - graph::CameraDriverLabel, - render_graph::{Node, NodeRunError, RenderGraph, RenderGraphContext}, + experimental::occlusion_culling::OcclusionCulling, + render_graph::{Node, NodeRunError, RenderGraphApp, RenderGraphContext}, render_resource::{ - binding_types::{storage_buffer, storage_buffer_read_only, uniform_buffer}, - BindGroup, BindGroupEntries, BindGroupLayout, BindingResource, BufferBinding, + binding_types::{storage_buffer, storage_buffer_read_only, texture_2d, uniform_buffer}, + BindGroup, BindGroupEntries, BindGroupLayout, BindingResource, Buffer, BufferBinding, CachedComputePipelineId, ComputePassDescriptor, ComputePipelineDescriptor, - DynamicBindGroupLayoutEntries, PipelineCache, Shader, ShaderStages, ShaderType, - SpecializedComputePipeline, SpecializedComputePipelines, + DynamicBindGroupLayoutEntries, PipelineCache, PushConstantRange, RawBufferVec, Shader, + ShaderStages, ShaderType, SpecializedComputePipeline, SpecializedComputePipelines, + TextureSampleType, UninitBufferVec, }, renderer::{RenderContext, RenderDevice, RenderQueue}, - view::{NoIndirectDrawing, ViewUniform, ViewUniformOffset, ViewUniforms}, + settings::WgpuFeatures, + view::{ExtractedView, NoIndirectDrawing, ViewUniform, ViewUniformOffset, ViewUniforms}, Render, RenderApp, RenderSet, }; -use bevy_utils::tracing::warn; +use bevy_utils::TypeIdMap; use bitflags::bitflags; use smallvec::{smallvec, SmallVec}; +use tracing::warn; use crate::{ graph::NodePbr, MeshCullingData, MeshCullingDataBuffer, MeshInputUniform, MeshUniform, }; +use super::{ShadowView, ViewLightEntities}; + /// The handle to the `mesh_preprocess.wgsl` compute shader. pub const MESH_PREPROCESS_SHADER_HANDLE: Handle = - Handle::weak_from_u128(16991728318640779533); + weak_handle!("c8579292-cf92-43b5-9c5a-ec5bd4e44d12"); +/// The handle to the `reset_indirect_batch_sets.wgsl` compute shader. +pub const RESET_INDIRECT_BATCH_SETS_SHADER_HANDLE: Handle = + weak_handle!("045fb176-58e2-4e76-b241-7688d761bb23"); +/// The handle to the `build_indirect_params.wgsl` compute shader. +pub const BUILD_INDIRECT_PARAMS_SHADER_HANDLE: Handle = + weak_handle!("133b01f0-3eaf-4590-9ee9-f0cf91a00b71"); /// The GPU workgroup size. const WORKGROUP_SIZE: usize = 64; @@ -63,28 +88,167 @@ pub struct GpuMeshPreprocessPlugin { pub use_gpu_instance_buffer_builder: bool, } -/// The render node for the mesh uniform building pass. -pub struct GpuPreprocessNode { +/// The render node that clears out the GPU-side indirect metadata buffers. +/// +/// This is only used when indirect drawing is enabled. +#[derive(Default)] +pub struct ClearIndirectParametersMetadataNode; + +/// The render node for the first mesh preprocessing pass. +/// +/// This pass runs a compute shader to cull meshes outside the view frustum (if +/// that wasn't done by the CPU), cull meshes that weren't visible last frame +/// (if occlusion culling is on), transform them, and, if indirect drawing is +/// on, populate indirect draw parameter metadata for the subsequent +/// [`EarlyPrepassBuildIndirectParametersNode`]. +pub struct EarlyGpuPreprocessNode { view_query: QueryState< ( - Entity, - Read, - Read, + Read, + Option>, + Option>, Has, + Has, ), Without, >, + main_view_query: QueryState>, +} + +/// The render node for the second mesh preprocessing pass. +/// +/// This pass runs a compute shader to cull meshes outside the view frustum (if +/// that wasn't done by the CPU), cull meshes that were neither visible last +/// frame nor visible this frame (if occlusion culling is on), transform them, +/// and, if indirect drawing is on, populate the indirect draw parameter +/// metadata for the subsequent [`LatePrepassBuildIndirectParametersNode`]. +pub struct LateGpuPreprocessNode { + view_query: QueryState< + ( + Read, + Read, + Read, + ), + ( + Without, + Without, + With, + With, + ), + >, +} + +/// The render node for the part of the indirect parameter building pass that +/// draws the meshes visible from the previous frame. +/// +/// This node runs a compute shader on the output of the +/// [`EarlyGpuPreprocessNode`] in order to transform the +/// [`IndirectParametersGpuMetadata`] into properly-formatted +/// [`IndirectParametersIndexed`] and [`IndirectParametersNonIndexed`]. +pub struct EarlyPrepassBuildIndirectParametersNode { + view_query: QueryState< + Read, + ( + Without, + Without, + Or<(With, With)>, + ), + >, +} + +/// The render node for the part of the indirect parameter building pass that +/// draws the meshes that are potentially visible on this frame but weren't +/// visible on the previous frame. +/// +/// This node runs a compute shader on the output of the +/// [`LateGpuPreprocessNode`] in order to transform the +/// [`IndirectParametersGpuMetadata`] into properly-formatted +/// [`IndirectParametersIndexed`] and [`IndirectParametersNonIndexed`]. +pub struct LatePrepassBuildIndirectParametersNode { + view_query: QueryState< + Read, + ( + Without, + Without, + Or<(With, With)>, + With, + ), + >, +} + +/// The render node for the part of the indirect parameter building pass that +/// draws all meshes, both those that are newly-visible on this frame and those +/// that were visible last frame. +/// +/// This node runs a compute shader on the output of the +/// [`EarlyGpuPreprocessNode`] and [`LateGpuPreprocessNode`] in order to +/// transform the [`IndirectParametersGpuMetadata`] into properly-formatted +/// [`IndirectParametersIndexed`] and [`IndirectParametersNonIndexed`]. +pub struct MainBuildIndirectParametersNode { + view_query: QueryState< + Read, + (Without, Without), + >, } -/// The compute shader pipelines for the mesh uniform building pass. +/// The compute shader pipelines for the GPU mesh preprocessing and indirect +/// parameter building passes. #[derive(Resource)] pub struct PreprocessPipelines { /// The pipeline used for CPU culling. This pipeline doesn't populate - /// indirect parameters. - pub direct: PreprocessPipeline, - /// The pipeline used for GPU culling. This pipeline populates indirect + /// indirect parameter metadata. + pub direct_preprocess: PreprocessPipeline, + /// The pipeline used for mesh preprocessing when GPU frustum culling is in + /// use, but occlusion culling isn't. + /// + /// This pipeline populates indirect parameter metadata. + pub gpu_frustum_culling_preprocess: PreprocessPipeline, + /// The pipeline used for the first phase of occlusion culling. + /// + /// This pipeline culls, transforms meshes, and populates indirect parameter + /// metadata. + pub early_gpu_occlusion_culling_preprocess: PreprocessPipeline, + /// The pipeline used for the second phase of occlusion culling. + /// + /// This pipeline culls, transforms meshes, and populates indirect parameter + /// metadata. + pub late_gpu_occlusion_culling_preprocess: PreprocessPipeline, + /// The pipeline that builds indirect draw parameters for indexed meshes, + /// when frustum culling is enabled but occlusion culling *isn't* enabled. + pub gpu_frustum_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline, + /// The pipeline that builds indirect draw parameters for non-indexed + /// meshes, when frustum culling is enabled but occlusion culling *isn't* + /// enabled. + pub gpu_frustum_culling_build_non_indexed_indirect_params: BuildIndirectParametersPipeline, + /// Compute shader pipelines for the early prepass phase that draws meshes + /// visible in the previous frame. + pub early_phase: PreprocessPhasePipelines, + /// Compute shader pipelines for the late prepass phase that draws meshes + /// that weren't visible in the previous frame, but became visible this + /// frame. + pub late_phase: PreprocessPhasePipelines, + /// Compute shader pipelines for the main color phase. + pub main_phase: PreprocessPhasePipelines, +} + +/// Compute shader pipelines for a specific phase: early, late, or main. +/// +/// The distinction between these phases is relevant for occlusion culling. +#[derive(Clone)] +pub struct PreprocessPhasePipelines { + /// The pipeline that resets the indirect draw counts used in + /// `multi_draw_indirect_count` to 0 in preparation for a new pass. + pub reset_indirect_batch_sets: ResetIndirectBatchSetsPipeline, + /// The pipeline used for indexed indirect parameter building. + /// + /// This pipeline converts indirect parameter metadata into indexed indirect /// parameters. - pub gpu_culling: PreprocessPipeline, + pub gpu_occlusion_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline, + /// The pipeline used for non-indexed indirect parameter building. + /// + /// This pipeline converts indirect parameter metadata into non-indexed + /// indirect parameters. + pub gpu_occlusion_culling_build_non_indexed_indirect_params: BuildIndirectParametersPipeline, } /// The pipeline for the GPU mesh preprocessing shader. @@ -97,26 +261,172 @@ pub struct PreprocessPipeline { pub pipeline_id: Option, } +/// The pipeline for the batch set count reset shader. +/// +/// This shader resets the indirect batch set count to 0 for each view. It runs +/// in between every phase (early, late, and main). +#[derive(Clone)] +pub struct ResetIndirectBatchSetsPipeline { + /// The bind group layout for the compute shader. + pub bind_group_layout: BindGroupLayout, + /// The pipeline ID for the compute shader. + /// + /// This gets filled in `prepare_preprocess_pipelines`. + pub pipeline_id: Option, +} + +/// The pipeline for the indirect parameter building shader. +#[derive(Clone)] +pub struct BuildIndirectParametersPipeline { + /// The bind group layout for the compute shader. + pub bind_group_layout: BindGroupLayout, + /// The pipeline ID for the compute shader. + /// + /// This gets filled in `prepare_preprocess_pipelines`. + pub pipeline_id: Option, +} + bitflags! { /// Specifies variants of the mesh preprocessing shader. #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub struct PreprocessPipelineKey: u8 { - /// Whether GPU culling is in use. + /// Whether GPU frustum culling is in use. + /// + /// This `#define`'s `FRUSTUM_CULLING` in the shader. + const FRUSTUM_CULLING = 1; + /// Whether GPU two-phase occlusion culling is in use. /// - /// This `#define`'s `GPU_CULLING` in the shader. - const GPU_CULLING = 1; + /// This `#define`'s `OCCLUSION_CULLING` in the shader. + const OCCLUSION_CULLING = 2; + /// Whether this is the early phase of GPU two-phase occlusion culling. + /// + /// This `#define`'s `EARLY_PHASE` in the shader. + const EARLY_PHASE = 4; + } + + /// Specifies variants of the indirect parameter building shader. + #[derive(Clone, Copy, PartialEq, Eq, Hash)] + pub struct BuildIndirectParametersPipelineKey: u8 { + /// Whether the indirect parameter building shader is processing indexed + /// meshes (those that have index buffers). + /// + /// This defines `INDEXED` in the shader. + const INDEXED = 1; + /// Whether the GPU and driver supports `multi_draw_indirect_count`. + /// + /// This defines `MULTI_DRAW_INDIRECT_COUNT_SUPPORTED` in the shader. + const MULTI_DRAW_INDIRECT_COUNT_SUPPORTED = 2; + /// Whether GPU two-phase occlusion culling is in use. + /// + /// This `#define`'s `OCCLUSION_CULLING` in the shader. + const OCCLUSION_CULLING = 4; + /// Whether this is the early phase of GPU two-phase occlusion culling. + /// + /// This `#define`'s `EARLY_PHASE` in the shader. + const EARLY_PHASE = 8; + /// Whether this is the late phase of GPU two-phase occlusion culling. + /// + /// This `#define`'s `LATE_PHASE` in the shader. + const LATE_PHASE = 16; + /// Whether this is the phase that runs after the early and late phases, + /// and right before the main drawing logic, when GPU two-phase + /// occlusion culling is in use. + /// + /// This `#define`'s `MAIN_PHASE` in the shader. + const MAIN_PHASE = 32; } } -/// The compute shader bind group for the mesh uniform building pass. +/// The compute shader bind group for the mesh preprocessing pass for each +/// render phase. +/// +/// This goes on the view. It maps the [`core::any::TypeId`] of a render phase +/// (e.g. [`bevy_core_pipeline::core_3d::Opaque3d`]) to the +/// [`PhasePreprocessBindGroups`] for that phase. +#[derive(Component, Clone, Deref, DerefMut)] +pub struct PreprocessBindGroups(pub TypeIdMap); + +/// The compute shader bind group for the mesh preprocessing step for a single +/// render phase on a single view. +#[derive(Clone)] +pub enum PhasePreprocessBindGroups { + /// The bind group used for the single invocation of the compute shader when + /// indirect drawing is *not* being used. + /// + /// Because direct drawing doesn't require splitting the meshes into indexed + /// and non-indexed meshes, there's only one bind group in this case. + Direct(BindGroup), + + /// The bind groups used for the compute shader when indirect drawing is + /// being used, but occlusion culling isn't being used. + /// + /// Because indirect drawing requires splitting the meshes into indexed and + /// non-indexed meshes, there are two bind groups here. + IndirectFrustumCulling { + /// The bind group for indexed meshes. + indexed: Option, + /// The bind group for non-indexed meshes. + non_indexed: Option, + }, + + /// The bind groups used for the compute shader when indirect drawing is + /// being used, but occlusion culling isn't being used. + /// + /// Because indirect drawing requires splitting the meshes into indexed and + /// non-indexed meshes, and because occlusion culling requires splitting + /// this phase into early and late versions, there are four bind groups + /// here. + IndirectOcclusionCulling { + /// The bind group for indexed meshes during the early mesh + /// preprocessing phase. + early_indexed: Option, + /// The bind group for non-indexed meshes during the early mesh + /// preprocessing phase. + early_non_indexed: Option, + /// The bind group for indexed meshes during the late mesh preprocessing + /// phase. + late_indexed: Option, + /// The bind group for non-indexed meshes during the late mesh + /// preprocessing phase. + late_non_indexed: Option, + }, +} + +/// The bind groups for the compute shaders that reset indirect draw counts and +/// build indirect parameters. /// -/// This goes on the view. -#[derive(Component, Clone)] -pub struct PreprocessBindGroup(BindGroup); +/// There's one set of bind group for each phase. Phases are keyed off their +/// [`core::any::TypeId`]. +#[derive(Resource, Default, Deref, DerefMut)] +pub struct BuildIndirectParametersBindGroups(pub TypeIdMap); + +impl BuildIndirectParametersBindGroups { + /// Creates a new, empty [`BuildIndirectParametersBindGroups`] table. + pub fn new() -> BuildIndirectParametersBindGroups { + Self::default() + } +} + +/// The per-phase set of bind groups for the compute shaders that reset indirect +/// draw counts and build indirect parameters. +pub struct PhaseBuildIndirectParametersBindGroups { + /// The bind group for the `reset_indirect_batch_sets.wgsl` shader, for + /// indexed meshes. + reset_indexed_indirect_batch_sets: Option, + /// The bind group for the `reset_indirect_batch_sets.wgsl` shader, for + /// non-indexed meshes. + reset_non_indexed_indirect_batch_sets: Option, + /// The bind group for the `build_indirect_params.wgsl` shader, for indexed + /// meshes. + build_indexed_indirect: Option, + /// The bind group for the `build_indirect_params.wgsl` shader, for + /// non-indexed meshes. + build_non_indexed_indirect: Option, +} /// Stops the `GpuPreprocessNode` attempting to generate the buffer for this view /// useful to avoid duplicating effort if the bind group is shared between views -#[derive(Component)] +#[derive(Component, Default)] pub struct SkipGpuPreprocess; impl Plugin for GpuMeshPreprocessPlugin { @@ -127,6 +437,18 @@ impl Plugin for GpuMeshPreprocessPlugin { "mesh_preprocess.wgsl", Shader::from_wgsl ); + load_internal_asset!( + app, + RESET_INDIRECT_BATCH_SETS_SHADER_HANDLE, + "reset_indirect_batch_sets.wgsl", + Shader::from_wgsl + ); + load_internal_asset!( + app, + BUILD_INDIRECT_PARAMS_SHADER_HANDLE, + "build_indirect_params.wgsl", + Shader::from_wgsl + ); } fn finish(&self, app: &mut App) { @@ -141,54 +463,147 @@ impl Plugin for GpuMeshPreprocessPlugin { return; } - // Stitch the node in. - let gpu_preprocess_node = GpuPreprocessNode::from_world(render_app.world_mut()); - let mut render_graph = render_app.world_mut().resource_mut::(); - render_graph.add_node(NodePbr::GpuPreprocess, gpu_preprocess_node); - render_graph.add_node_edge(NodePbr::GpuPreprocess, CameraDriverLabel); - render_app .init_resource::() .init_resource::>() + .init_resource::>() + .init_resource::>() .add_systems( Render, ( prepare_preprocess_pipelines.in_set(RenderSet::Prepare), prepare_preprocess_bind_groups - .run_if( - resource_exists::>, - ) + .run_if(resource_exists::>) .in_set(RenderSet::PrepareBindGroups), write_mesh_culling_data_buffer.in_set(RenderSet::PrepareResourcesFlush), + ), + ) + .add_render_graph_node::( + Core3d, + NodePbr::ClearIndirectParametersMetadata + ) + .add_render_graph_node::(Core3d, NodePbr::EarlyGpuPreprocess) + .add_render_graph_node::(Core3d, NodePbr::LateGpuPreprocess) + .add_render_graph_node::( + Core3d, + NodePbr::EarlyPrepassBuildIndirectParameters, + ) + .add_render_graph_node::( + Core3d, + NodePbr::LatePrepassBuildIndirectParameters, + ) + .add_render_graph_node::( + Core3d, + NodePbr::MainBuildIndirectParameters, + ) + .add_render_graph_edges( + Core3d, + ( + NodePbr::ClearIndirectParametersMetadata, + NodePbr::EarlyGpuPreprocess, + NodePbr::EarlyPrepassBuildIndirectParameters, + Node3d::EarlyPrepass, + Node3d::EarlyDeferredPrepass, + Node3d::EarlyDownsampleDepth, + NodePbr::LateGpuPreprocess, + NodePbr::LatePrepassBuildIndirectParameters, + Node3d::LatePrepass, + Node3d::LateDeferredPrepass, + NodePbr::MainBuildIndirectParameters, + Node3d::StartMainPass, + ), + ).add_render_graph_edges( + Core3d, + ( + NodePbr::EarlyPrepassBuildIndirectParameters, + NodePbr::EarlyShadowPass, + Node3d::EarlyDownsampleDepth, + ) + ).add_render_graph_edges( + Core3d, + ( + NodePbr::LatePrepassBuildIndirectParameters, + NodePbr::LateShadowPass, + NodePbr::MainBuildIndirectParameters, ) ); } } -impl FromWorld for GpuPreprocessNode { +impl Node for ClearIndirectParametersMetadataNode { + fn run<'w>( + &self, + _: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + world: &'w World, + ) -> Result<(), NodeRunError> { + let Some(indirect_parameters_buffers) = world.get_resource::() + else { + return Ok(()); + }; + + // Clear out each indexed and non-indexed GPU-side buffer. + for phase_indirect_parameters_buffers in indirect_parameters_buffers.values() { + if let Some(indexed_gpu_metadata_buffer) = phase_indirect_parameters_buffers + .indexed + .gpu_metadata_buffer() + { + render_context.command_encoder().clear_buffer( + indexed_gpu_metadata_buffer, + 0, + Some( + phase_indirect_parameters_buffers.indexed.batch_count() as u64 + * size_of::() as u64, + ), + ); + } + + if let Some(non_indexed_gpu_metadata_buffer) = phase_indirect_parameters_buffers + .non_indexed + .gpu_metadata_buffer() + { + render_context.command_encoder().clear_buffer( + non_indexed_gpu_metadata_buffer, + 0, + Some( + phase_indirect_parameters_buffers.non_indexed.batch_count() as u64 + * size_of::() as u64, + ), + ); + } + } + + Ok(()) + } +} + +impl FromWorld for EarlyGpuPreprocessNode { fn from_world(world: &mut World) -> Self { Self { view_query: QueryState::new(world), + main_view_query: QueryState::new(world), } } } -impl Node for GpuPreprocessNode { +impl Node for EarlyGpuPreprocessNode { fn update(&mut self, world: &mut World) { self.view_query.update_archetypes(world); + self.main_view_query.update_archetypes(world); } fn run<'w>( &self, - _: &mut RenderGraphContext, + graph: &mut RenderGraphContext, render_context: &mut RenderContext<'w>, world: &'w World, ) -> Result<(), NodeRunError> { // Grab the [`BatchedInstanceBuffers`]. - let BatchedInstanceBuffers { - work_item_buffers: ref index_buffers, - .. - } = world.resource::>(); + let batched_instance_buffers = + world.resource::>(); let pipeline_cache = world.resource::(); let preprocess_pipelines = world.resource::(); @@ -197,281 +612,2079 @@ impl Node for GpuPreprocessNode { render_context .command_encoder() .begin_compute_pass(&ComputePassDescriptor { - label: Some("mesh preprocessing"), + label: Some("early mesh preprocessing"), timestamp_writes: None, }); - // Run the compute passes. - for (view, bind_group, view_uniform_offset, no_indirect_drawing) in - self.view_query.iter_manual(world) + let mut all_views: SmallVec<[_; 8]> = SmallVec::new(); + all_views.push(graph.view_entity()); + if let Ok(shadow_cascade_views) = + self.main_view_query.get_manual(world, graph.view_entity()) { - // Grab the index buffer for this view. - let Some(index_buffer) = index_buffers.get(&view) else { - warn!("The preprocessing index buffer wasn't present"); + all_views.extend(shadow_cascade_views.lights.iter().copied()); + } + + // Run the compute passes. + + for view_entity in all_views { + let Ok(( + view, + bind_groups, + view_uniform_offset, + no_indirect_drawing, + occlusion_culling, + )) = self.view_query.get_manual(world, view_entity) + else { + continue; + }; + + let Some(bind_groups) = bind_groups else { + continue; + }; + let Some(view_uniform_offset) = view_uniform_offset else { continue; }; // Select the right pipeline, depending on whether GPU culling is in // use. - let maybe_pipeline_id = if !no_indirect_drawing { - preprocess_pipelines.gpu_culling.pipeline_id + let maybe_pipeline_id = if no_indirect_drawing { + preprocess_pipelines.direct_preprocess.pipeline_id + } else if occlusion_culling { + preprocess_pipelines + .early_gpu_occlusion_culling_preprocess + .pipeline_id } else { - preprocess_pipelines.direct.pipeline_id + preprocess_pipelines + .gpu_frustum_culling_preprocess + .pipeline_id }; // Fetch the pipeline. let Some(preprocess_pipeline_id) = maybe_pipeline_id else { warn!("The build mesh uniforms pipeline wasn't ready"); - return Ok(()); + continue; }; let Some(preprocess_pipeline) = pipeline_cache.get_compute_pipeline(preprocess_pipeline_id) else { // This will happen while the pipeline is being compiled and is fine. - return Ok(()); + continue; }; compute_pass.set_pipeline(preprocess_pipeline); - let mut dynamic_offsets: SmallVec<[u32; 1]> = smallvec![]; - if !no_indirect_drawing { - dynamic_offsets.push(view_uniform_offset.offset); - } - compute_pass.set_bind_group(0, &bind_group.0, &dynamic_offsets); + // Loop over each render phase. + for (phase_type_id, batched_phase_instance_buffers) in + &batched_instance_buffers.phase_instance_buffers + { + // Grab the work item buffers for this view. + let Some(work_item_buffers) = batched_phase_instance_buffers + .work_item_buffers + .get(&view.retained_view_entity) + else { + continue; + }; - let workgroup_count = index_buffer.buffer.len().div_ceil(WORKGROUP_SIZE); - compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1); - } + // Fetch the bind group for the render phase. + let Some(phase_bind_groups) = bind_groups.get(phase_type_id) else { + continue; + }; - Ok(()) - } -} + // Make sure the mesh preprocessing shader has access to the + // view info it needs to do culling and motion vector + // computation. + let dynamic_offsets = [view_uniform_offset.offset]; -impl PreprocessPipelines { - pub(crate) fn pipelines_are_loaded(&self, pipeline_cache: &PipelineCache) -> bool { - self.direct.is_loaded(pipeline_cache) && self.gpu_culling.is_loaded(pipeline_cache) - } -} + // Are we drawing directly or indirectly? + match *phase_bind_groups { + PhasePreprocessBindGroups::Direct(ref bind_group) => { + // Invoke the mesh preprocessing shader to transform + // meshes only, but not cull. + let PreprocessWorkItemBuffers::Direct(work_item_buffer) = work_item_buffers + else { + continue; + }; + compute_pass.set_bind_group(0, bind_group, &dynamic_offsets); + let workgroup_count = work_item_buffer.len().div_ceil(WORKGROUP_SIZE); + if workgroup_count > 0 { + compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1); + } + } -impl PreprocessPipeline { - fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool { - self.pipeline_id - .is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some()) - } -} + PhasePreprocessBindGroups::IndirectFrustumCulling { + indexed: ref maybe_indexed_bind_group, + non_indexed: ref maybe_non_indexed_bind_group, + } + | PhasePreprocessBindGroups::IndirectOcclusionCulling { + early_indexed: ref maybe_indexed_bind_group, + early_non_indexed: ref maybe_non_indexed_bind_group, + .. + } => { + // Invoke the mesh preprocessing shader to transform and + // cull the meshes. + let PreprocessWorkItemBuffers::Indirect { + indexed: indexed_buffer, + non_indexed: non_indexed_buffer, + .. + } = work_item_buffers + else { + continue; + }; -impl SpecializedComputePipeline for PreprocessPipeline { - type Key = PreprocessPipelineKey; + // Transform and cull indexed meshes if there are any. + if let Some(indexed_bind_group) = maybe_indexed_bind_group { + if let PreprocessWorkItemBuffers::Indirect { + gpu_occlusion_culling: + Some(GpuOcclusionCullingWorkItemBuffers { + late_indirect_parameters_indexed_offset, + .. + }), + .. + } = *work_item_buffers + { + compute_pass.set_push_constants( + 0, + bytemuck::bytes_of(&late_indirect_parameters_indexed_offset), + ); + } - fn specialize(&self, key: Self::Key) -> ComputePipelineDescriptor { - let mut shader_defs = vec![]; - if key.contains(PreprocessPipelineKey::GPU_CULLING) { - shader_defs.push("INDIRECT".into()); - shader_defs.push("FRUSTUM_CULLING".into()); - } + compute_pass.set_bind_group(0, indexed_bind_group, &dynamic_offsets); + let workgroup_count = indexed_buffer.len().div_ceil(WORKGROUP_SIZE); + if workgroup_count > 0 { + compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1); + } + } - ComputePipelineDescriptor { - label: Some( - format!( - "mesh preprocessing ({})", - if key.contains(PreprocessPipelineKey::GPU_CULLING) { - "GPU culling" - } else { - "direct" + // Transform and cull non-indexed meshes if there are any. + if let Some(non_indexed_bind_group) = maybe_non_indexed_bind_group { + if let PreprocessWorkItemBuffers::Indirect { + gpu_occlusion_culling: + Some(GpuOcclusionCullingWorkItemBuffers { + late_indirect_parameters_non_indexed_offset, + .. + }), + .. + } = *work_item_buffers + { + compute_pass.set_push_constants( + 0, + bytemuck::bytes_of( + &late_indirect_parameters_non_indexed_offset, + ), + ); + } + + compute_pass.set_bind_group( + 0, + non_indexed_bind_group, + &dynamic_offsets, + ); + let workgroup_count = non_indexed_buffer.len().div_ceil(WORKGROUP_SIZE); + if workgroup_count > 0 { + compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1); + } + } } - ) - .into(), - ), - layout: vec![self.bind_group_layout.clone()], - push_constant_ranges: vec![], - shader: MESH_PREPROCESS_SHADER_HANDLE, - shader_defs, - entry_point: "main".into(), - zero_initialize_workgroup_memory: false, + } + } } + + Ok(()) } } -impl FromWorld for PreprocessPipelines { +impl FromWorld for EarlyPrepassBuildIndirectParametersNode { fn from_world(world: &mut World) -> Self { - let render_device = world.resource::(); - - // GPU culling bind group parameters are a superset of those in the CPU - // culling (direct) shader. - let direct_bind_group_layout_entries = preprocess_direct_bind_group_layout_entries(); - let gpu_culling_bind_group_layout_entries = preprocess_direct_bind_group_layout_entries() - .extend_sequential(( - // `indirect_parameters` - storage_buffer::(/* has_dynamic_offset= */ false), - // `mesh_culling_data` - storage_buffer_read_only::(/* has_dynamic_offset= */ false), - // `view` - uniform_buffer::(/* has_dynamic_offset= */ true), - )); - - let direct_bind_group_layout = render_device.create_bind_group_layout( - "build mesh uniforms direct bind group layout", - &direct_bind_group_layout_entries, - ); - let gpu_culling_bind_group_layout = render_device.create_bind_group_layout( - "build mesh uniforms GPU culling bind group layout", - &gpu_culling_bind_group_layout_entries, - ); - - PreprocessPipelines { - direct: PreprocessPipeline { - bind_group_layout: direct_bind_group_layout, - pipeline_id: None, - }, - gpu_culling: PreprocessPipeline { - bind_group_layout: gpu_culling_bind_group_layout, - pipeline_id: None, - }, + Self { + view_query: QueryState::new(world), } } } -fn preprocess_direct_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries { - DynamicBindGroupLayoutEntries::sequential( - ShaderStages::COMPUTE, - ( - // `current_input` - storage_buffer_read_only::(false), - // `previous_input` - storage_buffer_read_only::(false), - // `indices` - storage_buffer_read_only::(false), - // `output` - storage_buffer::(false), - ), - ) +impl FromWorld for LatePrepassBuildIndirectParametersNode { + fn from_world(world: &mut World) -> Self { + Self { + view_query: QueryState::new(world), + } + } } -/// A system that specializes the `mesh_preprocess.wgsl` pipelines if necessary. -pub fn prepare_preprocess_pipelines( - pipeline_cache: Res, - mut pipelines: ResMut>, - mut preprocess_pipelines: ResMut, -) { - preprocess_pipelines.direct.prepare( - &pipeline_cache, - &mut pipelines, - PreprocessPipelineKey::empty(), - ); - preprocess_pipelines.gpu_culling.prepare( - &pipeline_cache, - &mut pipelines, - PreprocessPipelineKey::GPU_CULLING, - ); +impl FromWorld for MainBuildIndirectParametersNode { + fn from_world(world: &mut World) -> Self { + Self { + view_query: QueryState::new(world), + } + } } -impl PreprocessPipeline { - fn prepare( - &mut self, - pipeline_cache: &PipelineCache, - pipelines: &mut SpecializedComputePipelines, - key: PreprocessPipelineKey, - ) { - if self.pipeline_id.is_some() { - return; +impl FromWorld for LateGpuPreprocessNode { + fn from_world(world: &mut World) -> Self { + Self { + view_query: QueryState::new(world), } - - let preprocess_pipeline_id = pipelines.specialize(pipeline_cache, self, key); - self.pipeline_id = Some(preprocess_pipeline_id); } } -/// A system that attaches the mesh uniform buffers to the bind groups for the -/// variants of the mesh preprocessing compute shader. -pub fn prepare_preprocess_bind_groups( - mut commands: Commands, - render_device: Res, - batched_instance_buffers: Res>, - indirect_parameters_buffer: Res, +impl Node for LateGpuPreprocessNode { + fn update(&mut self, world: &mut World) { + self.view_query.update_archetypes(world); + } + + fn run<'w>( + &self, + _: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + world: &'w World, + ) -> Result<(), NodeRunError> { + // Grab the [`BatchedInstanceBuffers`]. + let batched_instance_buffers = + world.resource::>(); + + let pipeline_cache = world.resource::(); + let preprocess_pipelines = world.resource::(); + + let mut compute_pass = + render_context + .command_encoder() + .begin_compute_pass(&ComputePassDescriptor { + label: Some("late mesh preprocessing"), + timestamp_writes: None, + }); + + // Run the compute passes. + for (view, bind_groups, view_uniform_offset) in self.view_query.iter_manual(world) { + let maybe_pipeline_id = preprocess_pipelines + .late_gpu_occlusion_culling_preprocess + .pipeline_id; + + // Fetch the pipeline. + let Some(preprocess_pipeline_id) = maybe_pipeline_id else { + warn!("The build mesh uniforms pipeline wasn't ready"); + return Ok(()); + }; + + let Some(preprocess_pipeline) = + pipeline_cache.get_compute_pipeline(preprocess_pipeline_id) + else { + // This will happen while the pipeline is being compiled and is fine. + return Ok(()); + }; + + compute_pass.set_pipeline(preprocess_pipeline); + + // Loop over each phase. Because we built the phases in parallel, + // each phase has a separate set of instance buffers. + for (phase_type_id, batched_phase_instance_buffers) in + &batched_instance_buffers.phase_instance_buffers + { + let UntypedPhaseBatchedInstanceBuffers { + ref work_item_buffers, + ref late_indexed_indirect_parameters_buffer, + ref late_non_indexed_indirect_parameters_buffer, + .. + } = *batched_phase_instance_buffers; + + // Grab the work item buffers for this view. + let Some(phase_work_item_buffers) = + work_item_buffers.get(&view.retained_view_entity) + else { + continue; + }; + + let ( + PreprocessWorkItemBuffers::Indirect { + gpu_occlusion_culling: + Some(GpuOcclusionCullingWorkItemBuffers { + late_indirect_parameters_indexed_offset, + late_indirect_parameters_non_indexed_offset, + .. + }), + .. + }, + Some(PhasePreprocessBindGroups::IndirectOcclusionCulling { + late_indexed: maybe_late_indexed_bind_group, + late_non_indexed: maybe_late_non_indexed_bind_group, + .. + }), + Some(late_indexed_indirect_parameters_buffer), + Some(late_non_indexed_indirect_parameters_buffer), + ) = ( + phase_work_item_buffers, + bind_groups.get(phase_type_id), + late_indexed_indirect_parameters_buffer.buffer(), + late_non_indexed_indirect_parameters_buffer.buffer(), + ) + else { + continue; + }; + + let mut dynamic_offsets: SmallVec<[u32; 1]> = smallvec![]; + dynamic_offsets.push(view_uniform_offset.offset); + + // If there's no space reserved for work items, then don't + // bother doing the dispatch, as there can't possibly be any + // meshes of the given class (indexed or non-indexed) in this + // phase. + + // Transform and cull indexed meshes if there are any. + if let Some(late_indexed_bind_group) = maybe_late_indexed_bind_group { + compute_pass.set_push_constants( + 0, + bytemuck::bytes_of(late_indirect_parameters_indexed_offset), + ); + + compute_pass.set_bind_group(0, late_indexed_bind_group, &dynamic_offsets); + compute_pass.dispatch_workgroups_indirect( + late_indexed_indirect_parameters_buffer, + (*late_indirect_parameters_indexed_offset as u64) + * (size_of::() as u64), + ); + } + + // Transform and cull non-indexed meshes if there are any. + if let Some(late_non_indexed_bind_group) = maybe_late_non_indexed_bind_group { + compute_pass.set_push_constants( + 0, + bytemuck::bytes_of(late_indirect_parameters_non_indexed_offset), + ); + + compute_pass.set_bind_group(0, late_non_indexed_bind_group, &dynamic_offsets); + compute_pass.dispatch_workgroups_indirect( + late_non_indexed_indirect_parameters_buffer, + (*late_indirect_parameters_non_indexed_offset as u64) + * (size_of::() as u64), + ); + } + } + } + + Ok(()) + } +} + +impl Node for EarlyPrepassBuildIndirectParametersNode { + fn update(&mut self, world: &mut World) { + self.view_query.update_archetypes(world); + } + + fn run<'w>( + &self, + _: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + world: &'w World, + ) -> Result<(), NodeRunError> { + let preprocess_pipelines = world.resource::(); + + // If there are no views with a depth prepass enabled, we don't need to + // run this. + if self.view_query.iter_manual(world).next().is_none() { + return Ok(()); + } + + run_build_indirect_parameters_node( + render_context, + world, + &preprocess_pipelines.early_phase, + "early prepass indirect parameters building", + ) + } +} + +impl Node for LatePrepassBuildIndirectParametersNode { + fn update(&mut self, world: &mut World) { + self.view_query.update_archetypes(world); + } + + fn run<'w>( + &self, + _: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + world: &'w World, + ) -> Result<(), NodeRunError> { + let preprocess_pipelines = world.resource::(); + + // If there are no views with occlusion culling enabled, we don't need + // to run this. + if self.view_query.iter_manual(world).next().is_none() { + return Ok(()); + } + + run_build_indirect_parameters_node( + render_context, + world, + &preprocess_pipelines.late_phase, + "late prepass indirect parameters building", + ) + } +} + +impl Node for MainBuildIndirectParametersNode { + fn update(&mut self, world: &mut World) { + self.view_query.update_archetypes(world); + } + + fn run<'w>( + &self, + _: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + world: &'w World, + ) -> Result<(), NodeRunError> { + let preprocess_pipelines = world.resource::(); + + run_build_indirect_parameters_node( + render_context, + world, + &preprocess_pipelines.main_phase, + "main indirect parameters building", + ) + } +} + +fn run_build_indirect_parameters_node( + render_context: &mut RenderContext, + world: &World, + preprocess_phase_pipelines: &PreprocessPhasePipelines, + label: &'static str, +) -> Result<(), NodeRunError> { + let Some(build_indirect_params_bind_groups) = + world.get_resource::() + else { + return Ok(()); + }; + + let pipeline_cache = world.resource::(); + let indirect_parameters_buffers = world.resource::(); + + let mut compute_pass = + render_context + .command_encoder() + .begin_compute_pass(&ComputePassDescriptor { + label: Some(label), + timestamp_writes: None, + }); + + // Fetch the pipeline. + let ( + Some(reset_indirect_batch_sets_pipeline_id), + Some(build_indexed_indirect_params_pipeline_id), + Some(build_non_indexed_indirect_params_pipeline_id), + ) = ( + preprocess_phase_pipelines + .reset_indirect_batch_sets + .pipeline_id, + preprocess_phase_pipelines + .gpu_occlusion_culling_build_indexed_indirect_params + .pipeline_id, + preprocess_phase_pipelines + .gpu_occlusion_culling_build_non_indexed_indirect_params + .pipeline_id, + ) + else { + warn!("The build indirect parameters pipelines weren't ready"); + return Ok(()); + }; + + let ( + Some(reset_indirect_batch_sets_pipeline), + Some(build_indexed_indirect_params_pipeline), + Some(build_non_indexed_indirect_params_pipeline), + ) = ( + pipeline_cache.get_compute_pipeline(reset_indirect_batch_sets_pipeline_id), + pipeline_cache.get_compute_pipeline(build_indexed_indirect_params_pipeline_id), + pipeline_cache.get_compute_pipeline(build_non_indexed_indirect_params_pipeline_id), + ) + else { + // This will happen while the pipeline is being compiled and is fine. + return Ok(()); + }; + + // Loop over each phase. As each has as separate set of buffers, we need to + // build indirect parameters individually for each phase. + for (phase_type_id, phase_build_indirect_params_bind_groups) in + build_indirect_params_bind_groups.iter() + { + let Some(phase_indirect_parameters_buffers) = + indirect_parameters_buffers.get(phase_type_id) + else { + continue; + }; + + // Build indexed indirect parameters. + if let ( + Some(reset_indexed_indirect_batch_sets_bind_group), + Some(build_indirect_indexed_params_bind_group), + ) = ( + &phase_build_indirect_params_bind_groups.reset_indexed_indirect_batch_sets, + &phase_build_indirect_params_bind_groups.build_indexed_indirect, + ) { + compute_pass.set_pipeline(reset_indirect_batch_sets_pipeline); + compute_pass.set_bind_group(0, reset_indexed_indirect_batch_sets_bind_group, &[]); + let workgroup_count = phase_indirect_parameters_buffers + .batch_set_count(true) + .div_ceil(WORKGROUP_SIZE); + if workgroup_count > 0 { + compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1); + } + + compute_pass.set_pipeline(build_indexed_indirect_params_pipeline); + compute_pass.set_bind_group(0, build_indirect_indexed_params_bind_group, &[]); + let workgroup_count = phase_indirect_parameters_buffers + .indexed + .batch_count() + .div_ceil(WORKGROUP_SIZE); + if workgroup_count > 0 { + compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1); + } + } + + // Build non-indexed indirect parameters. + if let ( + Some(reset_non_indexed_indirect_batch_sets_bind_group), + Some(build_indirect_non_indexed_params_bind_group), + ) = ( + &phase_build_indirect_params_bind_groups.reset_non_indexed_indirect_batch_sets, + &phase_build_indirect_params_bind_groups.build_non_indexed_indirect, + ) { + compute_pass.set_pipeline(reset_indirect_batch_sets_pipeline); + compute_pass.set_bind_group(0, reset_non_indexed_indirect_batch_sets_bind_group, &[]); + let workgroup_count = phase_indirect_parameters_buffers + .batch_set_count(false) + .div_ceil(WORKGROUP_SIZE); + if workgroup_count > 0 { + compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1); + } + + compute_pass.set_pipeline(build_non_indexed_indirect_params_pipeline); + compute_pass.set_bind_group(0, build_indirect_non_indexed_params_bind_group, &[]); + let workgroup_count = phase_indirect_parameters_buffers + .non_indexed + .batch_count() + .div_ceil(WORKGROUP_SIZE); + if workgroup_count > 0 { + compute_pass.dispatch_workgroups(workgroup_count as u32, 1, 1); + } + } + } + + Ok(()) +} + +impl PreprocessPipelines { + /// Returns true if the preprocessing and indirect parameters pipelines have + /// been loaded or false otherwise. + pub(crate) fn pipelines_are_loaded( + &self, + pipeline_cache: &PipelineCache, + preprocessing_support: &GpuPreprocessingSupport, + ) -> bool { + match preprocessing_support.max_supported_mode { + GpuPreprocessingMode::None => false, + GpuPreprocessingMode::PreprocessingOnly => { + self.direct_preprocess.is_loaded(pipeline_cache) + && self + .gpu_frustum_culling_preprocess + .is_loaded(pipeline_cache) + } + GpuPreprocessingMode::Culling => { + self.direct_preprocess.is_loaded(pipeline_cache) + && self + .gpu_frustum_culling_preprocess + .is_loaded(pipeline_cache) + && self + .early_gpu_occlusion_culling_preprocess + .is_loaded(pipeline_cache) + && self + .late_gpu_occlusion_culling_preprocess + .is_loaded(pipeline_cache) + && self + .gpu_frustum_culling_build_indexed_indirect_params + .is_loaded(pipeline_cache) + && self + .gpu_frustum_culling_build_non_indexed_indirect_params + .is_loaded(pipeline_cache) + && self.early_phase.is_loaded(pipeline_cache) + && self.late_phase.is_loaded(pipeline_cache) + && self.main_phase.is_loaded(pipeline_cache) + } + } + } +} + +impl PreprocessPhasePipelines { + fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool { + self.reset_indirect_batch_sets.is_loaded(pipeline_cache) + && self + .gpu_occlusion_culling_build_indexed_indirect_params + .is_loaded(pipeline_cache) + && self + .gpu_occlusion_culling_build_non_indexed_indirect_params + .is_loaded(pipeline_cache) + } +} + +impl PreprocessPipeline { + fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool { + self.pipeline_id + .is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some()) + } +} + +impl ResetIndirectBatchSetsPipeline { + fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool { + self.pipeline_id + .is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some()) + } +} + +impl BuildIndirectParametersPipeline { + /// Returns true if this pipeline has been loaded into the pipeline cache or + /// false otherwise. + fn is_loaded(&self, pipeline_cache: &PipelineCache) -> bool { + self.pipeline_id + .is_some_and(|pipeline_id| pipeline_cache.get_compute_pipeline(pipeline_id).is_some()) + } +} + +impl SpecializedComputePipeline for PreprocessPipeline { + type Key = PreprocessPipelineKey; + + fn specialize(&self, key: Self::Key) -> ComputePipelineDescriptor { + let mut shader_defs = vec!["WRITE_INDIRECT_PARAMETERS_METADATA".into()]; + if key.contains(PreprocessPipelineKey::FRUSTUM_CULLING) { + shader_defs.push("INDIRECT".into()); + shader_defs.push("FRUSTUM_CULLING".into()); + } + if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) { + shader_defs.push("OCCLUSION_CULLING".into()); + if key.contains(PreprocessPipelineKey::EARLY_PHASE) { + shader_defs.push("EARLY_PHASE".into()); + } else { + shader_defs.push("LATE_PHASE".into()); + } + } + + ComputePipelineDescriptor { + label: Some( + format!( + "mesh preprocessing ({})", + if key.contains( + PreprocessPipelineKey::OCCLUSION_CULLING + | PreprocessPipelineKey::EARLY_PHASE + ) { + "early GPU occlusion culling" + } else if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) { + "late GPU occlusion culling" + } else if key.contains(PreprocessPipelineKey::FRUSTUM_CULLING) { + "GPU frustum culling" + } else { + "direct" + } + ) + .into(), + ), + layout: vec![self.bind_group_layout.clone()], + push_constant_ranges: if key.contains(PreprocessPipelineKey::OCCLUSION_CULLING) { + vec![PushConstantRange { + stages: ShaderStages::COMPUTE, + range: 0..4, + }] + } else { + vec![] + }, + shader: MESH_PREPROCESS_SHADER_HANDLE, + shader_defs, + entry_point: "main".into(), + zero_initialize_workgroup_memory: false, + } + } +} + +impl FromWorld for PreprocessPipelines { + fn from_world(world: &mut World) -> Self { + let render_device = world.resource::(); + + // GPU culling bind group parameters are a superset of those in the CPU + // culling (direct) shader. + let direct_bind_group_layout_entries = preprocess_direct_bind_group_layout_entries(); + let gpu_frustum_culling_bind_group_layout_entries = gpu_culling_bind_group_layout_entries(); + let gpu_early_occlusion_culling_bind_group_layout_entries = + gpu_occlusion_culling_bind_group_layout_entries().extend_with_indices((( + 11, + storage_buffer::(/*has_dynamic_offset=*/ false), + ),)); + let gpu_late_occlusion_culling_bind_group_layout_entries = + gpu_occlusion_culling_bind_group_layout_entries(); + + let reset_indirect_batch_sets_bind_group_layout_entries = + DynamicBindGroupLayoutEntries::sequential( + ShaderStages::COMPUTE, + (storage_buffer::(false),), + ); + + // Indexed and non-indexed bind group parameters share all the bind + // group layout entries except the final one. + let build_indexed_indirect_params_bind_group_layout_entries = + build_indirect_params_bind_group_layout_entries() + .extend_sequential((storage_buffer::(false),)); + let build_non_indexed_indirect_params_bind_group_layout_entries = + build_indirect_params_bind_group_layout_entries() + .extend_sequential((storage_buffer::(false),)); + + // Create the bind group layouts. + let direct_bind_group_layout = render_device.create_bind_group_layout( + "build mesh uniforms direct bind group layout", + &direct_bind_group_layout_entries, + ); + let gpu_frustum_culling_bind_group_layout = render_device.create_bind_group_layout( + "build mesh uniforms GPU frustum culling bind group layout", + &gpu_frustum_culling_bind_group_layout_entries, + ); + let gpu_early_occlusion_culling_bind_group_layout = render_device.create_bind_group_layout( + "build mesh uniforms GPU early occlusion culling bind group layout", + &gpu_early_occlusion_culling_bind_group_layout_entries, + ); + let gpu_late_occlusion_culling_bind_group_layout = render_device.create_bind_group_layout( + "build mesh uniforms GPU late occlusion culling bind group layout", + &gpu_late_occlusion_culling_bind_group_layout_entries, + ); + let reset_indirect_batch_sets_bind_group_layout = render_device.create_bind_group_layout( + "reset indirect batch sets bind group layout", + &reset_indirect_batch_sets_bind_group_layout_entries, + ); + let build_indexed_indirect_params_bind_group_layout = render_device + .create_bind_group_layout( + "build indexed indirect parameters bind group layout", + &build_indexed_indirect_params_bind_group_layout_entries, + ); + let build_non_indexed_indirect_params_bind_group_layout = render_device + .create_bind_group_layout( + "build non-indexed indirect parameters bind group layout", + &build_non_indexed_indirect_params_bind_group_layout_entries, + ); + + let preprocess_phase_pipelines = PreprocessPhasePipelines { + reset_indirect_batch_sets: ResetIndirectBatchSetsPipeline { + bind_group_layout: reset_indirect_batch_sets_bind_group_layout.clone(), + pipeline_id: None, + }, + gpu_occlusion_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline { + bind_group_layout: build_indexed_indirect_params_bind_group_layout.clone(), + pipeline_id: None, + }, + gpu_occlusion_culling_build_non_indexed_indirect_params: + BuildIndirectParametersPipeline { + bind_group_layout: build_non_indexed_indirect_params_bind_group_layout.clone(), + pipeline_id: None, + }, + }; + + PreprocessPipelines { + direct_preprocess: PreprocessPipeline { + bind_group_layout: direct_bind_group_layout, + pipeline_id: None, + }, + gpu_frustum_culling_preprocess: PreprocessPipeline { + bind_group_layout: gpu_frustum_culling_bind_group_layout, + pipeline_id: None, + }, + early_gpu_occlusion_culling_preprocess: PreprocessPipeline { + bind_group_layout: gpu_early_occlusion_culling_bind_group_layout, + pipeline_id: None, + }, + late_gpu_occlusion_culling_preprocess: PreprocessPipeline { + bind_group_layout: gpu_late_occlusion_culling_bind_group_layout, + pipeline_id: None, + }, + gpu_frustum_culling_build_indexed_indirect_params: BuildIndirectParametersPipeline { + bind_group_layout: build_indexed_indirect_params_bind_group_layout.clone(), + pipeline_id: None, + }, + gpu_frustum_culling_build_non_indexed_indirect_params: + BuildIndirectParametersPipeline { + bind_group_layout: build_non_indexed_indirect_params_bind_group_layout.clone(), + pipeline_id: None, + }, + early_phase: preprocess_phase_pipelines.clone(), + late_phase: preprocess_phase_pipelines.clone(), + main_phase: preprocess_phase_pipelines.clone(), + } + } +} + +fn preprocess_direct_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries { + DynamicBindGroupLayoutEntries::new_with_indices( + ShaderStages::COMPUTE, + ( + // `view` + ( + 0, + uniform_buffer::(/* has_dynamic_offset= */ true), + ), + // `current_input` + (3, storage_buffer_read_only::(false)), + // `previous_input` + (4, storage_buffer_read_only::(false)), + // `indices` + (5, storage_buffer_read_only::(false)), + // `output` + (6, storage_buffer::(false)), + ), + ) +} + +// Returns the first 4 bind group layout entries shared between all invocations +// of the indirect parameters building shader. +fn build_indirect_params_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries { + DynamicBindGroupLayoutEntries::new_with_indices( + ShaderStages::COMPUTE, + ( + (0, storage_buffer_read_only::(false)), + ( + 1, + storage_buffer_read_only::(false), + ), + ( + 2, + storage_buffer_read_only::(false), + ), + (3, storage_buffer::(false)), + ), + ) +} + +/// A system that specializes the `mesh_preprocess.wgsl` and +/// `build_indirect_params.wgsl` pipelines if necessary. +fn gpu_culling_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries { + // GPU culling bind group parameters are a superset of those in the CPU + // culling (direct) shader. + preprocess_direct_bind_group_layout_entries().extend_with_indices(( + // `indirect_parameters_cpu_metadata` + ( + 7, + storage_buffer_read_only::( + /* has_dynamic_offset= */ false, + ), + ), + // `indirect_parameters_gpu_metadata` + ( + 8, + storage_buffer::(/* has_dynamic_offset= */ false), + ), + // `mesh_culling_data` + ( + 9, + storage_buffer_read_only::(/* has_dynamic_offset= */ false), + ), + )) +} + +fn gpu_occlusion_culling_bind_group_layout_entries() -> DynamicBindGroupLayoutEntries { + gpu_culling_bind_group_layout_entries().extend_with_indices(( + ( + 2, + uniform_buffer::(/*has_dynamic_offset=*/ false), + ), + ( + 10, + texture_2d(TextureSampleType::Float { filterable: true }), + ), + ( + 12, + storage_buffer::( + /*has_dynamic_offset=*/ false, + ), + ), + )) +} + +/// A system that specializes the `mesh_preprocess.wgsl` pipelines if necessary. +pub fn prepare_preprocess_pipelines( + pipeline_cache: Res, + render_device: Res, + mut specialized_preprocess_pipelines: ResMut>, + mut specialized_reset_indirect_batch_sets_pipelines: ResMut< + SpecializedComputePipelines, + >, + mut specialized_build_indirect_parameters_pipelines: ResMut< + SpecializedComputePipelines, + >, + preprocess_pipelines: ResMut, + gpu_preprocessing_support: Res, +) { + let preprocess_pipelines = preprocess_pipelines.into_inner(); + + preprocess_pipelines.direct_preprocess.prepare( + &pipeline_cache, + &mut specialized_preprocess_pipelines, + PreprocessPipelineKey::empty(), + ); + preprocess_pipelines.gpu_frustum_culling_preprocess.prepare( + &pipeline_cache, + &mut specialized_preprocess_pipelines, + PreprocessPipelineKey::FRUSTUM_CULLING, + ); + + if gpu_preprocessing_support.is_culling_supported() { + preprocess_pipelines + .early_gpu_occlusion_culling_preprocess + .prepare( + &pipeline_cache, + &mut specialized_preprocess_pipelines, + PreprocessPipelineKey::FRUSTUM_CULLING + | PreprocessPipelineKey::OCCLUSION_CULLING + | PreprocessPipelineKey::EARLY_PHASE, + ); + preprocess_pipelines + .late_gpu_occlusion_culling_preprocess + .prepare( + &pipeline_cache, + &mut specialized_preprocess_pipelines, + PreprocessPipelineKey::FRUSTUM_CULLING | PreprocessPipelineKey::OCCLUSION_CULLING, + ); + } + + let mut build_indirect_parameters_pipeline_key = BuildIndirectParametersPipelineKey::empty(); + + // If the GPU and driver support `multi_draw_indirect_count`, tell the + // shader that. + if render_device + .wgpu_device() + .features() + .contains(WgpuFeatures::MULTI_DRAW_INDIRECT_COUNT) + { + build_indirect_parameters_pipeline_key + .insert(BuildIndirectParametersPipelineKey::MULTI_DRAW_INDIRECT_COUNT_SUPPORTED); + } + + preprocess_pipelines + .gpu_frustum_culling_build_indexed_indirect_params + .prepare( + &pipeline_cache, + &mut specialized_build_indirect_parameters_pipelines, + build_indirect_parameters_pipeline_key | BuildIndirectParametersPipelineKey::INDEXED, + ); + preprocess_pipelines + .gpu_frustum_culling_build_non_indexed_indirect_params + .prepare( + &pipeline_cache, + &mut specialized_build_indirect_parameters_pipelines, + build_indirect_parameters_pipeline_key, + ); + + if !gpu_preprocessing_support.is_culling_supported() { + return; + } + + for (preprocess_phase_pipelines, build_indirect_parameters_phase_pipeline_key) in [ + ( + &mut preprocess_pipelines.early_phase, + BuildIndirectParametersPipelineKey::EARLY_PHASE, + ), + ( + &mut preprocess_pipelines.late_phase, + BuildIndirectParametersPipelineKey::LATE_PHASE, + ), + ( + &mut preprocess_pipelines.main_phase, + BuildIndirectParametersPipelineKey::MAIN_PHASE, + ), + ] { + preprocess_phase_pipelines + .reset_indirect_batch_sets + .prepare( + &pipeline_cache, + &mut specialized_reset_indirect_batch_sets_pipelines, + ); + preprocess_phase_pipelines + .gpu_occlusion_culling_build_indexed_indirect_params + .prepare( + &pipeline_cache, + &mut specialized_build_indirect_parameters_pipelines, + build_indirect_parameters_pipeline_key + | build_indirect_parameters_phase_pipeline_key + | BuildIndirectParametersPipelineKey::INDEXED + | BuildIndirectParametersPipelineKey::OCCLUSION_CULLING, + ); + preprocess_phase_pipelines + .gpu_occlusion_culling_build_non_indexed_indirect_params + .prepare( + &pipeline_cache, + &mut specialized_build_indirect_parameters_pipelines, + build_indirect_parameters_pipeline_key + | build_indirect_parameters_phase_pipeline_key + | BuildIndirectParametersPipelineKey::OCCLUSION_CULLING, + ); + } +} + +impl PreprocessPipeline { + fn prepare( + &mut self, + pipeline_cache: &PipelineCache, + pipelines: &mut SpecializedComputePipelines, + key: PreprocessPipelineKey, + ) { + if self.pipeline_id.is_some() { + return; + } + + let preprocess_pipeline_id = pipelines.specialize(pipeline_cache, self, key); + self.pipeline_id = Some(preprocess_pipeline_id); + } +} + +impl SpecializedComputePipeline for ResetIndirectBatchSetsPipeline { + type Key = (); + + fn specialize(&self, _: Self::Key) -> ComputePipelineDescriptor { + ComputePipelineDescriptor { + label: Some("reset indirect batch sets".into()), + layout: vec![self.bind_group_layout.clone()], + push_constant_ranges: vec![], + shader: RESET_INDIRECT_BATCH_SETS_SHADER_HANDLE, + shader_defs: vec![], + entry_point: "main".into(), + zero_initialize_workgroup_memory: false, + } + } +} + +impl SpecializedComputePipeline for BuildIndirectParametersPipeline { + type Key = BuildIndirectParametersPipelineKey; + + fn specialize(&self, key: Self::Key) -> ComputePipelineDescriptor { + let mut shader_defs = vec![]; + if key.contains(BuildIndirectParametersPipelineKey::INDEXED) { + shader_defs.push("INDEXED".into()); + } + if key.contains(BuildIndirectParametersPipelineKey::MULTI_DRAW_INDIRECT_COUNT_SUPPORTED) { + shader_defs.push("MULTI_DRAW_INDIRECT_COUNT_SUPPORTED".into()); + } + if key.contains(BuildIndirectParametersPipelineKey::OCCLUSION_CULLING) { + shader_defs.push("OCCLUSION_CULLING".into()); + } + if key.contains(BuildIndirectParametersPipelineKey::EARLY_PHASE) { + shader_defs.push("EARLY_PHASE".into()); + } + if key.contains(BuildIndirectParametersPipelineKey::LATE_PHASE) { + shader_defs.push("LATE_PHASE".into()); + } + if key.contains(BuildIndirectParametersPipelineKey::MAIN_PHASE) { + shader_defs.push("MAIN_PHASE".into()); + } + + let label = format!( + "{} build {}indexed indirect parameters", + if !key.contains(BuildIndirectParametersPipelineKey::OCCLUSION_CULLING) { + "frustum culling" + } else if key.contains(BuildIndirectParametersPipelineKey::EARLY_PHASE) { + "early occlusion culling" + } else if key.contains(BuildIndirectParametersPipelineKey::LATE_PHASE) { + "late occlusion culling" + } else { + "main occlusion culling" + }, + if key.contains(BuildIndirectParametersPipelineKey::INDEXED) { + "" + } else { + "non-" + } + ); + + ComputePipelineDescriptor { + label: Some(label.into()), + layout: vec![self.bind_group_layout.clone()], + push_constant_ranges: vec![], + shader: BUILD_INDIRECT_PARAMS_SHADER_HANDLE, + shader_defs, + entry_point: "main".into(), + zero_initialize_workgroup_memory: false, + } + } +} + +impl ResetIndirectBatchSetsPipeline { + fn prepare( + &mut self, + pipeline_cache: &PipelineCache, + pipelines: &mut SpecializedComputePipelines, + ) { + if self.pipeline_id.is_some() { + return; + } + + let reset_indirect_batch_sets_pipeline_id = pipelines.specialize(pipeline_cache, self, ()); + self.pipeline_id = Some(reset_indirect_batch_sets_pipeline_id); + } +} + +impl BuildIndirectParametersPipeline { + fn prepare( + &mut self, + pipeline_cache: &PipelineCache, + pipelines: &mut SpecializedComputePipelines, + key: BuildIndirectParametersPipelineKey, + ) { + if self.pipeline_id.is_some() { + return; + } + + let build_indirect_parameters_pipeline_id = pipelines.specialize(pipeline_cache, self, key); + self.pipeline_id = Some(build_indirect_parameters_pipeline_id); + } +} + +/// A system that attaches the mesh uniform buffers to the bind groups for the +/// variants of the mesh preprocessing compute shader. +#[expect( + clippy::too_many_arguments, + reason = "it's a system that needs a lot of arguments" +)] +pub fn prepare_preprocess_bind_groups( + mut commands: Commands, + views: Query<(Entity, &ExtractedView)>, + view_depth_pyramids: Query<(&ViewDepthPyramid, &PreviousViewUniformOffset)>, + render_device: Res, + batched_instance_buffers: Res>, + indirect_parameters_buffers: Res, mesh_culling_data_buffer: Res, view_uniforms: Res, + previous_view_uniforms: Res, pipelines: Res, ) { // Grab the `BatchedInstanceBuffers`. let BatchedInstanceBuffers { - data_buffer: ref data_buffer_vec, - work_item_buffers: ref index_buffers, - current_input_buffer: ref current_input_buffer_vec, - previous_input_buffer: ref previous_input_buffer_vec, + current_input_buffer: current_input_buffer_vec, + previous_input_buffer: previous_input_buffer_vec, + phase_instance_buffers, } = batched_instance_buffers.into_inner(); - let (Some(current_input_buffer), Some(previous_input_buffer), Some(data_buffer)) = ( + let (Some(current_input_buffer), Some(previous_input_buffer)) = ( current_input_buffer_vec.buffer().buffer(), previous_input_buffer_vec.buffer().buffer(), - data_buffer_vec.buffer(), ) else { return; }; - for (view, index_buffer_vec) in index_buffers { - let Some(index_buffer) = index_buffer_vec.buffer.buffer() else { - continue; - }; + // Record whether we have any meshes that are to be drawn indirectly. If we + // don't, then we can skip building indirect parameters. + let mut any_indirect = false; + + // Loop over each view. + for (view_entity, view) in &views { + let mut bind_groups = TypeIdMap::default(); + + // Loop over each phase. + for (phase_type_id, phase_instance_buffers) in phase_instance_buffers { + let UntypedPhaseBatchedInstanceBuffers { + data_buffer: ref data_buffer_vec, + ref work_item_buffers, + ref late_indexed_indirect_parameters_buffer, + ref late_non_indexed_indirect_parameters_buffer, + } = *phase_instance_buffers; + + let Some(data_buffer) = data_buffer_vec.buffer() else { + continue; + }; + + // Grab the indirect parameters buffers for this phase. + let Some(phase_indirect_parameters_buffers) = + indirect_parameters_buffers.get(phase_type_id) + else { + continue; + }; + + let Some(work_item_buffers) = work_item_buffers.get(&view.retained_view_entity) else { + continue; + }; + + // Create the `PreprocessBindGroupBuilder`. + let preprocess_bind_group_builder = PreprocessBindGroupBuilder { + view: view_entity, + late_indexed_indirect_parameters_buffer, + late_non_indexed_indirect_parameters_buffer, + render_device: &render_device, + phase_indirect_parameters_buffers, + mesh_culling_data_buffer: &mesh_culling_data_buffer, + view_uniforms: &view_uniforms, + previous_view_uniforms: &previous_view_uniforms, + pipelines: &pipelines, + current_input_buffer, + previous_input_buffer, + data_buffer, + }; + + // Depending on the type of work items we have, construct the + // appropriate bind groups. + let (was_indirect, bind_group) = match *work_item_buffers { + PreprocessWorkItemBuffers::Direct(ref work_item_buffer) => ( + false, + preprocess_bind_group_builder + .create_direct_preprocess_bind_groups(work_item_buffer), + ), + + PreprocessWorkItemBuffers::Indirect { + indexed: ref indexed_work_item_buffer, + non_indexed: ref non_indexed_work_item_buffer, + gpu_occlusion_culling: Some(ref gpu_occlusion_culling_work_item_buffers), + } => ( + true, + preprocess_bind_group_builder + .create_indirect_occlusion_culling_preprocess_bind_groups( + &view_depth_pyramids, + indexed_work_item_buffer, + non_indexed_work_item_buffer, + gpu_occlusion_culling_work_item_buffers, + ), + ), + + PreprocessWorkItemBuffers::Indirect { + indexed: ref indexed_work_item_buffer, + non_indexed: ref non_indexed_work_item_buffer, + gpu_occlusion_culling: None, + } => ( + true, + preprocess_bind_group_builder + .create_indirect_frustum_culling_preprocess_bind_groups( + indexed_work_item_buffer, + non_indexed_work_item_buffer, + ), + ), + }; + + // Write that bind group in. + if let Some(bind_group) = bind_group { + any_indirect = any_indirect || was_indirect; + bind_groups.insert(*phase_type_id, bind_group); + } + } + + // Save the bind groups. + commands + .entity(view_entity) + .insert(PreprocessBindGroups(bind_groups)); + } + + // Now, if there were any indirect draw commands, create the bind groups for + // the indirect parameters building shader. + if any_indirect { + create_build_indirect_parameters_bind_groups( + &mut commands, + &render_device, + &pipelines, + current_input_buffer, + &indirect_parameters_buffers, + ); + } +} + +/// A temporary structure that stores all the information needed to construct +/// bind groups for the mesh preprocessing shader. +struct PreprocessBindGroupBuilder<'a> { + /// The render-world entity corresponding to the current view. + view: Entity, + /// The indirect compute dispatch parameters buffer for indexed meshes in + /// the late prepass. + late_indexed_indirect_parameters_buffer: + &'a RawBufferVec, + /// The indirect compute dispatch parameters buffer for non-indexed meshes + /// in the late prepass. + late_non_indexed_indirect_parameters_buffer: + &'a RawBufferVec, + /// The device. + render_device: &'a RenderDevice, + /// The buffers that store indirect draw parameters. + phase_indirect_parameters_buffers: &'a UntypedPhaseIndirectParametersBuffers, + /// The GPU buffer that stores the information needed to cull each mesh. + mesh_culling_data_buffer: &'a MeshCullingDataBuffer, + /// The GPU buffer that stores information about the view. + view_uniforms: &'a ViewUniforms, + /// The GPU buffer that stores information about the view from last frame. + previous_view_uniforms: &'a PreviousViewUniforms, + /// The pipelines for the mesh preprocessing shader. + pipelines: &'a PreprocessPipelines, + /// The GPU buffer containing the list of [`MeshInputUniform`]s for the + /// current frame. + current_input_buffer: &'a Buffer, + /// The GPU buffer containing the list of [`MeshInputUniform`]s for the + /// previous frame. + previous_input_buffer: &'a Buffer, + /// The GPU buffer containing the list of [`MeshUniform`]s for the current + /// frame. + /// + /// This is the buffer containing the mesh's final transforms that the + /// shaders will write to. + data_buffer: &'a Buffer, +} +impl<'a> PreprocessBindGroupBuilder<'a> { + /// Creates the bind groups for mesh preprocessing when GPU frustum culling + /// and GPU occlusion culling are both disabled. + fn create_direct_preprocess_bind_groups( + &self, + work_item_buffer: &RawBufferVec, + ) -> Option { // Don't use `as_entire_binding()` here; the shader reads the array // length and the underlying buffer may be longer than the actual size // of the vector. - let index_buffer_size = NonZero::::try_from( - index_buffer_vec.buffer.len() as u64 * u64::from(PreprocessWorkItem::min_size()), + let work_item_buffer_size = NonZero::::try_from( + work_item_buffer.len() as u64 * u64::from(PreprocessWorkItem::min_size()), ) .ok(); - let bind_group = if !index_buffer_vec.no_indirect_drawing { - let ( - Some(indirect_parameters_buffer), - Some(mesh_culling_data_buffer), - Some(view_uniforms_binding), - ) = ( - indirect_parameters_buffer.buffer(), - mesh_culling_data_buffer.buffer(), - view_uniforms.uniforms.binding(), - ) - else { - continue; - }; - - PreprocessBindGroup(render_device.create_bind_group( - "preprocess_gpu_culling_bind_group", - &pipelines.gpu_culling.bind_group_layout, - &BindGroupEntries::sequential(( - current_input_buffer.as_entire_binding(), - previous_input_buffer.as_entire_binding(), - BindingResource::Buffer(BufferBinding { - buffer: index_buffer, - offset: 0, - size: index_buffer_size, - }), - data_buffer.as_entire_binding(), - indirect_parameters_buffer.as_entire_binding(), - mesh_culling_data_buffer.as_entire_binding(), - view_uniforms_binding, - )), - )) - } else { - PreprocessBindGroup(render_device.create_bind_group( + Some(PhasePreprocessBindGroups::Direct( + self.render_device.create_bind_group( "preprocess_direct_bind_group", - &pipelines.direct.bind_group_layout, - &BindGroupEntries::sequential(( - current_input_buffer.as_entire_binding(), - previous_input_buffer.as_entire_binding(), - BindingResource::Buffer(BufferBinding { - buffer: index_buffer, - offset: 0, - size: index_buffer_size, - }), - data_buffer.as_entire_binding(), + &self.pipelines.direct_preprocess.bind_group_layout, + &BindGroupEntries::with_indices(( + (0, self.view_uniforms.uniforms.binding()?), + (3, self.current_input_buffer.as_entire_binding()), + (4, self.previous_input_buffer.as_entire_binding()), + ( + 5, + BindingResource::Buffer(BufferBinding { + buffer: work_item_buffer.buffer()?, + offset: 0, + size: work_item_buffer_size, + }), + ), + (6, self.data_buffer.as_entire_binding()), )), - )) - }; + ), + )) + } + + /// Creates the bind groups for mesh preprocessing when GPU occlusion + /// culling is enabled. + fn create_indirect_occlusion_culling_preprocess_bind_groups( + &self, + view_depth_pyramids: &Query<(&ViewDepthPyramid, &PreviousViewUniformOffset)>, + indexed_work_item_buffer: &RawBufferVec, + non_indexed_work_item_buffer: &RawBufferVec, + gpu_occlusion_culling_work_item_buffers: &GpuOcclusionCullingWorkItemBuffers, + ) -> Option { + let GpuOcclusionCullingWorkItemBuffers { + late_indexed: ref late_indexed_work_item_buffer, + late_non_indexed: ref late_non_indexed_work_item_buffer, + .. + } = *gpu_occlusion_culling_work_item_buffers; + + let (view_depth_pyramid, previous_view_uniform_offset) = + view_depth_pyramids.get(self.view).ok()?; + + Some(PhasePreprocessBindGroups::IndirectOcclusionCulling { + early_indexed: self.create_indirect_occlusion_culling_early_indexed_bind_group( + view_depth_pyramid, + previous_view_uniform_offset, + indexed_work_item_buffer, + late_indexed_work_item_buffer, + ), + + early_non_indexed: self.create_indirect_occlusion_culling_early_non_indexed_bind_group( + view_depth_pyramid, + previous_view_uniform_offset, + non_indexed_work_item_buffer, + late_non_indexed_work_item_buffer, + ), + + late_indexed: self.create_indirect_occlusion_culling_late_indexed_bind_group( + view_depth_pyramid, + previous_view_uniform_offset, + late_indexed_work_item_buffer, + ), + + late_non_indexed: self.create_indirect_occlusion_culling_late_non_indexed_bind_group( + view_depth_pyramid, + previous_view_uniform_offset, + late_non_indexed_work_item_buffer, + ), + }) + } + + /// Creates the bind group for the first phase of mesh preprocessing of + /// indexed meshes when GPU occlusion culling is enabled. + fn create_indirect_occlusion_culling_early_indexed_bind_group( + &self, + view_depth_pyramid: &ViewDepthPyramid, + previous_view_uniform_offset: &PreviousViewUniformOffset, + indexed_work_item_buffer: &RawBufferVec, + late_indexed_work_item_buffer: &UninitBufferVec, + ) -> Option { + let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?; + let view_uniforms_binding = self.view_uniforms.uniforms.binding()?; + let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?; + + match ( + self.phase_indirect_parameters_buffers + .indexed + .cpu_metadata_buffer(), + self.phase_indirect_parameters_buffers + .indexed + .gpu_metadata_buffer(), + indexed_work_item_buffer.buffer(), + late_indexed_work_item_buffer.buffer(), + self.late_indexed_indirect_parameters_buffer.buffer(), + ) { + ( + Some(indexed_cpu_metadata_buffer), + Some(indexed_gpu_metadata_buffer), + Some(indexed_work_item_gpu_buffer), + Some(late_indexed_work_item_gpu_buffer), + Some(late_indexed_indirect_parameters_buffer), + ) => { + // Don't use `as_entire_binding()` here; the shader reads the array + // length and the underlying buffer may be longer than the actual size + // of the vector. + let indexed_work_item_buffer_size = NonZero::::try_from( + indexed_work_item_buffer.len() as u64 + * u64::from(PreprocessWorkItem::min_size()), + ) + .ok(); + + Some( + self.render_device.create_bind_group( + "preprocess_early_indexed_gpu_occlusion_culling_bind_group", + &self + .pipelines + .early_gpu_occlusion_culling_preprocess + .bind_group_layout, + &BindGroupEntries::with_indices(( + (3, self.current_input_buffer.as_entire_binding()), + (4, self.previous_input_buffer.as_entire_binding()), + ( + 5, + BindingResource::Buffer(BufferBinding { + buffer: indexed_work_item_gpu_buffer, + offset: 0, + size: indexed_work_item_buffer_size, + }), + ), + (6, self.data_buffer.as_entire_binding()), + (7, indexed_cpu_metadata_buffer.as_entire_binding()), + (8, indexed_gpu_metadata_buffer.as_entire_binding()), + (9, mesh_culling_data_buffer.as_entire_binding()), + (0, view_uniforms_binding.clone()), + (10, &view_depth_pyramid.all_mips), + ( + 2, + BufferBinding { + buffer: previous_view_buffer, + offset: previous_view_uniform_offset.offset as u64, + size: NonZeroU64::new(size_of::() as u64), + }, + ), + ( + 11, + BufferBinding { + buffer: late_indexed_work_item_gpu_buffer, + offset: 0, + size: indexed_work_item_buffer_size, + }, + ), + ( + 12, + BufferBinding { + buffer: late_indexed_indirect_parameters_buffer, + offset: 0, + size: NonZeroU64::new( + late_indexed_indirect_parameters_buffer.size(), + ), + }, + ), + )), + ), + ) + } + _ => None, + } + } + + /// Creates the bind group for the first phase of mesh preprocessing of + /// non-indexed meshes when GPU occlusion culling is enabled. + fn create_indirect_occlusion_culling_early_non_indexed_bind_group( + &self, + view_depth_pyramid: &ViewDepthPyramid, + previous_view_uniform_offset: &PreviousViewUniformOffset, + non_indexed_work_item_buffer: &RawBufferVec, + late_non_indexed_work_item_buffer: &UninitBufferVec, + ) -> Option { + let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?; + let view_uniforms_binding = self.view_uniforms.uniforms.binding()?; + let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?; + + match ( + self.phase_indirect_parameters_buffers + .non_indexed + .cpu_metadata_buffer(), + self.phase_indirect_parameters_buffers + .non_indexed + .gpu_metadata_buffer(), + non_indexed_work_item_buffer.buffer(), + late_non_indexed_work_item_buffer.buffer(), + self.late_non_indexed_indirect_parameters_buffer.buffer(), + ) { + ( + Some(non_indexed_cpu_metadata_buffer), + Some(non_indexed_gpu_metadata_buffer), + Some(non_indexed_work_item_gpu_buffer), + Some(late_non_indexed_work_item_buffer), + Some(late_non_indexed_indirect_parameters_buffer), + ) => { + // Don't use `as_entire_binding()` here; the shader reads the array + // length and the underlying buffer may be longer than the actual size + // of the vector. + let non_indexed_work_item_buffer_size = NonZero::::try_from( + non_indexed_work_item_buffer.len() as u64 + * u64::from(PreprocessWorkItem::min_size()), + ) + .ok(); + + Some( + self.render_device.create_bind_group( + "preprocess_early_non_indexed_gpu_occlusion_culling_bind_group", + &self + .pipelines + .early_gpu_occlusion_culling_preprocess + .bind_group_layout, + &BindGroupEntries::with_indices(( + (3, self.current_input_buffer.as_entire_binding()), + (4, self.previous_input_buffer.as_entire_binding()), + ( + 5, + BindingResource::Buffer(BufferBinding { + buffer: non_indexed_work_item_gpu_buffer, + offset: 0, + size: non_indexed_work_item_buffer_size, + }), + ), + (6, self.data_buffer.as_entire_binding()), + (7, non_indexed_cpu_metadata_buffer.as_entire_binding()), + (8, non_indexed_gpu_metadata_buffer.as_entire_binding()), + (9, mesh_culling_data_buffer.as_entire_binding()), + (0, view_uniforms_binding.clone()), + (10, &view_depth_pyramid.all_mips), + ( + 2, + BufferBinding { + buffer: previous_view_buffer, + offset: previous_view_uniform_offset.offset as u64, + size: NonZeroU64::new(size_of::() as u64), + }, + ), + ( + 11, + BufferBinding { + buffer: late_non_indexed_work_item_buffer, + offset: 0, + size: non_indexed_work_item_buffer_size, + }, + ), + ( + 12, + BufferBinding { + buffer: late_non_indexed_indirect_parameters_buffer, + offset: 0, + size: NonZeroU64::new( + late_non_indexed_indirect_parameters_buffer.size(), + ), + }, + ), + )), + ), + ) + } + _ => None, + } + } + + /// Creates the bind group for the second phase of mesh preprocessing of + /// indexed meshes when GPU occlusion culling is enabled. + fn create_indirect_occlusion_culling_late_indexed_bind_group( + &self, + view_depth_pyramid: &ViewDepthPyramid, + previous_view_uniform_offset: &PreviousViewUniformOffset, + late_indexed_work_item_buffer: &UninitBufferVec, + ) -> Option { + let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?; + let view_uniforms_binding = self.view_uniforms.uniforms.binding()?; + let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?; + + match ( + self.phase_indirect_parameters_buffers + .indexed + .cpu_metadata_buffer(), + self.phase_indirect_parameters_buffers + .indexed + .gpu_metadata_buffer(), + late_indexed_work_item_buffer.buffer(), + self.late_indexed_indirect_parameters_buffer.buffer(), + ) { + ( + Some(indexed_cpu_metadata_buffer), + Some(indexed_gpu_metadata_buffer), + Some(late_indexed_work_item_gpu_buffer), + Some(late_indexed_indirect_parameters_buffer), + ) => { + // Don't use `as_entire_binding()` here; the shader reads the array + // length and the underlying buffer may be longer than the actual size + // of the vector. + let late_indexed_work_item_buffer_size = NonZero::::try_from( + late_indexed_work_item_buffer.len() as u64 + * u64::from(PreprocessWorkItem::min_size()), + ) + .ok(); + + Some( + self.render_device.create_bind_group( + "preprocess_late_indexed_gpu_occlusion_culling_bind_group", + &self + .pipelines + .late_gpu_occlusion_culling_preprocess + .bind_group_layout, + &BindGroupEntries::with_indices(( + (3, self.current_input_buffer.as_entire_binding()), + (4, self.previous_input_buffer.as_entire_binding()), + ( + 5, + BindingResource::Buffer(BufferBinding { + buffer: late_indexed_work_item_gpu_buffer, + offset: 0, + size: late_indexed_work_item_buffer_size, + }), + ), + (6, self.data_buffer.as_entire_binding()), + (7, indexed_cpu_metadata_buffer.as_entire_binding()), + (8, indexed_gpu_metadata_buffer.as_entire_binding()), + (9, mesh_culling_data_buffer.as_entire_binding()), + (0, view_uniforms_binding.clone()), + (10, &view_depth_pyramid.all_mips), + ( + 2, + BufferBinding { + buffer: previous_view_buffer, + offset: previous_view_uniform_offset.offset as u64, + size: NonZeroU64::new(size_of::() as u64), + }, + ), + ( + 12, + BufferBinding { + buffer: late_indexed_indirect_parameters_buffer, + offset: 0, + size: NonZeroU64::new( + late_indexed_indirect_parameters_buffer.size(), + ), + }, + ), + )), + ), + ) + } + _ => None, + } + } + + /// Creates the bind group for the second phase of mesh preprocessing of + /// non-indexed meshes when GPU occlusion culling is enabled. + fn create_indirect_occlusion_culling_late_non_indexed_bind_group( + &self, + view_depth_pyramid: &ViewDepthPyramid, + previous_view_uniform_offset: &PreviousViewUniformOffset, + late_non_indexed_work_item_buffer: &UninitBufferVec, + ) -> Option { + let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?; + let view_uniforms_binding = self.view_uniforms.uniforms.binding()?; + let previous_view_buffer = self.previous_view_uniforms.uniforms.buffer()?; + + match ( + self.phase_indirect_parameters_buffers + .non_indexed + .cpu_metadata_buffer(), + self.phase_indirect_parameters_buffers + .non_indexed + .gpu_metadata_buffer(), + late_non_indexed_work_item_buffer.buffer(), + self.late_non_indexed_indirect_parameters_buffer.buffer(), + ) { + ( + Some(non_indexed_cpu_metadata_buffer), + Some(non_indexed_gpu_metadata_buffer), + Some(non_indexed_work_item_gpu_buffer), + Some(late_non_indexed_indirect_parameters_buffer), + ) => { + // Don't use `as_entire_binding()` here; the shader reads the array + // length and the underlying buffer may be longer than the actual size + // of the vector. + let non_indexed_work_item_buffer_size = NonZero::::try_from( + late_non_indexed_work_item_buffer.len() as u64 + * u64::from(PreprocessWorkItem::min_size()), + ) + .ok(); + + Some( + self.render_device.create_bind_group( + "preprocess_late_non_indexed_gpu_occlusion_culling_bind_group", + &self + .pipelines + .late_gpu_occlusion_culling_preprocess + .bind_group_layout, + &BindGroupEntries::with_indices(( + (3, self.current_input_buffer.as_entire_binding()), + (4, self.previous_input_buffer.as_entire_binding()), + ( + 5, + BindingResource::Buffer(BufferBinding { + buffer: non_indexed_work_item_gpu_buffer, + offset: 0, + size: non_indexed_work_item_buffer_size, + }), + ), + (6, self.data_buffer.as_entire_binding()), + (7, non_indexed_cpu_metadata_buffer.as_entire_binding()), + (8, non_indexed_gpu_metadata_buffer.as_entire_binding()), + (9, mesh_culling_data_buffer.as_entire_binding()), + (0, view_uniforms_binding.clone()), + (10, &view_depth_pyramid.all_mips), + ( + 2, + BufferBinding { + buffer: previous_view_buffer, + offset: previous_view_uniform_offset.offset as u64, + size: NonZeroU64::new(size_of::() as u64), + }, + ), + ( + 12, + BufferBinding { + buffer: late_non_indexed_indirect_parameters_buffer, + offset: 0, + size: NonZeroU64::new( + late_non_indexed_indirect_parameters_buffer.size(), + ), + }, + ), + )), + ), + ) + } + _ => None, + } + } + + /// Creates the bind groups for mesh preprocessing when GPU frustum culling + /// is enabled, but GPU occlusion culling is disabled. + fn create_indirect_frustum_culling_preprocess_bind_groups( + &self, + indexed_work_item_buffer: &RawBufferVec, + non_indexed_work_item_buffer: &RawBufferVec, + ) -> Option { + Some(PhasePreprocessBindGroups::IndirectFrustumCulling { + indexed: self + .create_indirect_frustum_culling_indexed_bind_group(indexed_work_item_buffer), + non_indexed: self.create_indirect_frustum_culling_non_indexed_bind_group( + non_indexed_work_item_buffer, + ), + }) + } + + /// Creates the bind group for mesh preprocessing of indexed meshes when GPU + /// frustum culling is enabled, but GPU occlusion culling is disabled. + fn create_indirect_frustum_culling_indexed_bind_group( + &self, + indexed_work_item_buffer: &RawBufferVec, + ) -> Option { + let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?; + let view_uniforms_binding = self.view_uniforms.uniforms.binding()?; + + match ( + self.phase_indirect_parameters_buffers + .indexed + .cpu_metadata_buffer(), + self.phase_indirect_parameters_buffers + .indexed + .gpu_metadata_buffer(), + indexed_work_item_buffer.buffer(), + ) { + ( + Some(indexed_cpu_metadata_buffer), + Some(indexed_gpu_metadata_buffer), + Some(indexed_work_item_gpu_buffer), + ) => { + // Don't use `as_entire_binding()` here; the shader reads the array + // length and the underlying buffer may be longer than the actual size + // of the vector. + let indexed_work_item_buffer_size = NonZero::::try_from( + indexed_work_item_buffer.len() as u64 + * u64::from(PreprocessWorkItem::min_size()), + ) + .ok(); + + Some( + self.render_device.create_bind_group( + "preprocess_gpu_indexed_frustum_culling_bind_group", + &self + .pipelines + .gpu_frustum_culling_preprocess + .bind_group_layout, + &BindGroupEntries::with_indices(( + (3, self.current_input_buffer.as_entire_binding()), + (4, self.previous_input_buffer.as_entire_binding()), + ( + 5, + BindingResource::Buffer(BufferBinding { + buffer: indexed_work_item_gpu_buffer, + offset: 0, + size: indexed_work_item_buffer_size, + }), + ), + (6, self.data_buffer.as_entire_binding()), + (7, indexed_cpu_metadata_buffer.as_entire_binding()), + (8, indexed_gpu_metadata_buffer.as_entire_binding()), + (9, mesh_culling_data_buffer.as_entire_binding()), + (0, view_uniforms_binding.clone()), + )), + ), + ) + } + _ => None, + } + } + + /// Creates the bind group for mesh preprocessing of non-indexed meshes when + /// GPU frustum culling is enabled, but GPU occlusion culling is disabled. + fn create_indirect_frustum_culling_non_indexed_bind_group( + &self, + non_indexed_work_item_buffer: &RawBufferVec, + ) -> Option { + let mesh_culling_data_buffer = self.mesh_culling_data_buffer.buffer()?; + let view_uniforms_binding = self.view_uniforms.uniforms.binding()?; - commands.entity(*view).insert(bind_group); + match ( + self.phase_indirect_parameters_buffers + .non_indexed + .cpu_metadata_buffer(), + self.phase_indirect_parameters_buffers + .non_indexed + .gpu_metadata_buffer(), + non_indexed_work_item_buffer.buffer(), + ) { + ( + Some(non_indexed_cpu_metadata_buffer), + Some(non_indexed_gpu_metadata_buffer), + Some(non_indexed_work_item_gpu_buffer), + ) => { + // Don't use `as_entire_binding()` here; the shader reads the array + // length and the underlying buffer may be longer than the actual size + // of the vector. + let non_indexed_work_item_buffer_size = NonZero::::try_from( + non_indexed_work_item_buffer.len() as u64 + * u64::from(PreprocessWorkItem::min_size()), + ) + .ok(); + + Some( + self.render_device.create_bind_group( + "preprocess_gpu_non_indexed_frustum_culling_bind_group", + &self + .pipelines + .gpu_frustum_culling_preprocess + .bind_group_layout, + &BindGroupEntries::with_indices(( + (3, self.current_input_buffer.as_entire_binding()), + (4, self.previous_input_buffer.as_entire_binding()), + ( + 5, + BindingResource::Buffer(BufferBinding { + buffer: non_indexed_work_item_gpu_buffer, + offset: 0, + size: non_indexed_work_item_buffer_size, + }), + ), + (6, self.data_buffer.as_entire_binding()), + (7, non_indexed_cpu_metadata_buffer.as_entire_binding()), + (8, non_indexed_gpu_metadata_buffer.as_entire_binding()), + (9, mesh_culling_data_buffer.as_entire_binding()), + (0, view_uniforms_binding.clone()), + )), + ), + ) + } + _ => None, + } + } +} + +/// A system that creates bind groups from the indirect parameters metadata and +/// data buffers for the indirect batch set reset shader and the indirect +/// parameter building shader. +fn create_build_indirect_parameters_bind_groups( + commands: &mut Commands, + render_device: &RenderDevice, + pipelines: &PreprocessPipelines, + current_input_buffer: &Buffer, + indirect_parameters_buffers: &IndirectParametersBuffers, +) { + let mut build_indirect_parameters_bind_groups = BuildIndirectParametersBindGroups::new(); + + for (phase_type_id, phase_indirect_parameters_buffer) in indirect_parameters_buffers.iter() { + build_indirect_parameters_bind_groups.insert( + *phase_type_id, + PhaseBuildIndirectParametersBindGroups { + reset_indexed_indirect_batch_sets: match (phase_indirect_parameters_buffer + .indexed + .batch_sets_buffer(),) + { + (Some(indexed_batch_sets_buffer),) => Some( + render_device.create_bind_group( + "reset_indexed_indirect_batch_sets_bind_group", + // The early bind group is good for the main phase and late + // phase too. They bind the same buffers. + &pipelines + .early_phase + .reset_indirect_batch_sets + .bind_group_layout, + &BindGroupEntries::sequential(( + indexed_batch_sets_buffer.as_entire_binding(), + )), + ), + ), + _ => None, + }, + + reset_non_indexed_indirect_batch_sets: match (phase_indirect_parameters_buffer + .non_indexed + .batch_sets_buffer(),) + { + (Some(non_indexed_batch_sets_buffer),) => Some( + render_device.create_bind_group( + "reset_non_indexed_indirect_batch_sets_bind_group", + // The early bind group is good for the main phase and late + // phase too. They bind the same buffers. + &pipelines + .early_phase + .reset_indirect_batch_sets + .bind_group_layout, + &BindGroupEntries::sequential(( + non_indexed_batch_sets_buffer.as_entire_binding(), + )), + ), + ), + _ => None, + }, + + build_indexed_indirect: match ( + phase_indirect_parameters_buffer + .indexed + .cpu_metadata_buffer(), + phase_indirect_parameters_buffer + .indexed + .gpu_metadata_buffer(), + phase_indirect_parameters_buffer.indexed.data_buffer(), + phase_indirect_parameters_buffer.indexed.batch_sets_buffer(), + ) { + ( + Some(indexed_indirect_parameters_cpu_metadata_buffer), + Some(indexed_indirect_parameters_gpu_metadata_buffer), + Some(indexed_indirect_parameters_data_buffer), + Some(indexed_batch_sets_buffer), + ) => Some( + render_device.create_bind_group( + "build_indexed_indirect_parameters_bind_group", + // The frustum culling bind group is good for occlusion culling + // too. They bind the same buffers. + &pipelines + .gpu_frustum_culling_build_indexed_indirect_params + .bind_group_layout, + &BindGroupEntries::sequential(( + current_input_buffer.as_entire_binding(), + // Don't use `as_entire_binding` here; the shader reads + // the length and `RawBufferVec` overallocates. + BufferBinding { + buffer: indexed_indirect_parameters_cpu_metadata_buffer, + offset: 0, + size: NonZeroU64::new( + phase_indirect_parameters_buffer.indexed.batch_count() + as u64 + * size_of::() as u64, + ), + }, + BufferBinding { + buffer: indexed_indirect_parameters_gpu_metadata_buffer, + offset: 0, + size: NonZeroU64::new( + phase_indirect_parameters_buffer.indexed.batch_count() + as u64 + * size_of::() as u64, + ), + }, + indexed_batch_sets_buffer.as_entire_binding(), + indexed_indirect_parameters_data_buffer.as_entire_binding(), + )), + ), + ), + _ => None, + }, + + build_non_indexed_indirect: match ( + phase_indirect_parameters_buffer + .non_indexed + .cpu_metadata_buffer(), + phase_indirect_parameters_buffer + .non_indexed + .gpu_metadata_buffer(), + phase_indirect_parameters_buffer.non_indexed.data_buffer(), + phase_indirect_parameters_buffer + .non_indexed + .batch_sets_buffer(), + ) { + ( + Some(non_indexed_indirect_parameters_cpu_metadata_buffer), + Some(non_indexed_indirect_parameters_gpu_metadata_buffer), + Some(non_indexed_indirect_parameters_data_buffer), + Some(non_indexed_batch_sets_buffer), + ) => Some( + render_device.create_bind_group( + "build_non_indexed_indirect_parameters_bind_group", + // The frustum culling bind group is good for occlusion culling + // too. They bind the same buffers. + &pipelines + .gpu_frustum_culling_build_non_indexed_indirect_params + .bind_group_layout, + &BindGroupEntries::sequential(( + current_input_buffer.as_entire_binding(), + // Don't use `as_entire_binding` here; the shader reads + // the length and `RawBufferVec` overallocates. + BufferBinding { + buffer: non_indexed_indirect_parameters_cpu_metadata_buffer, + offset: 0, + size: NonZeroU64::new( + phase_indirect_parameters_buffer.non_indexed.batch_count() + as u64 + * size_of::() as u64, + ), + }, + BufferBinding { + buffer: non_indexed_indirect_parameters_gpu_metadata_buffer, + offset: 0, + size: NonZeroU64::new( + phase_indirect_parameters_buffer.non_indexed.batch_count() + as u64 + * size_of::() as u64, + ), + }, + non_indexed_batch_sets_buffer.as_entire_binding(), + non_indexed_indirect_parameters_data_buffer.as_entire_binding(), + )), + ), + ), + _ => None, + }, + }, + ); } + + commands.insert_resource(build_indirect_parameters_bind_groups); } /// Writes the information needed to do GPU mesh culling to the GPU. diff --git a/crates/bevy_pbr/src/render/light.rs b/crates/bevy_pbr/src/render/light.rs index 85fa1fca6dee3..d71dccc71ac39 100644 --- a/crates/bevy_pbr/src/render/light.rs +++ b/crates/bevy_pbr/src/render/light.rs @@ -5,17 +5,25 @@ use bevy_asset::UntypedAssetId; use bevy_color::ColorToComponents; use bevy_core_pipeline::core_3d::{Camera3d, CORE_3D_DEPTH_FORMAT}; use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::component::Tick; +use bevy_ecs::system::SystemChangeTick; use bevy_ecs::{ entity::{EntityHashMap, EntityHashSet}, prelude::*, system::lifetimeless::Read, }; use bevy_math::{ops, Mat4, UVec4, Vec2, Vec3, Vec3Swizzles, Vec4, Vec4Swizzles}; +use bevy_platform::collections::{HashMap, HashSet}; +use bevy_platform::hash::FixedHasher; +use bevy_render::experimental::occlusion_culling::{ + OcclusionCulling, OcclusionCullingSubview, OcclusionCullingSubviewEntities, +}; +use bevy_render::sync_world::MainEntityHashMap; use bevy_render::{ batching::gpu_preprocessing::{GpuPreprocessingMode, GpuPreprocessingSupport}, camera::SortedCameras, mesh::allocator::MeshAllocator, - view::NoIndirectDrawing, + view::{NoIndirectDrawing, RetainedViewEntity}, }; use bevy_render::{ diagnostic::RecordDiagnostics, @@ -35,14 +43,11 @@ use bevy_render::{ sync_world::{MainEntity, RenderEntity}, }; use bevy_transform::{components::GlobalTransform, prelude::Transform}; +use bevy_utils::default; +use core::{hash::Hash, marker::PhantomData, ops::Range}; #[cfg(feature = "trace")] -use bevy_utils::tracing::info_span; -use bevy_utils::{ - default, - tracing::{error, warn}, - HashMap, -}; -use core::{hash::Hash, ops::Range}; +use tracing::info_span; +use tracing::{error, warn}; #[derive(Component)] pub struct ExtractedPointLight { @@ -80,6 +85,8 @@ pub struct ExtractedDirectionalLight { pub frusta: EntityHashMap>, pub render_layers: RenderLayers, pub soft_shadow_size: Option, + /// True if this light is using two-phase occlusion culling. + pub occlusion_culling: bool, } // NOTE: These must match the bit flags in bevy_pbr/src/render/mesh_view_types.wgsl! @@ -209,7 +216,6 @@ impl FromWorld for ShadowSamplers { } } -#[allow(clippy::too_many_arguments)] pub fn extract_lights( mut commands: Commands, point_light_shadow_map: Extract>, @@ -217,6 +223,7 @@ pub fn extract_lights( global_point_lights: Extract>, point_lights: Extract< Query<( + Entity, RenderEntity, &PointLight, &CubemapVisibleEntities, @@ -228,6 +235,7 @@ pub fn extract_lights( >, spot_lights: Extract< Query<( + Entity, RenderEntity, &SpotLight, &VisibleMeshEntities, @@ -240,6 +248,7 @@ pub fn extract_lights( directional_lights: Extract< Query< ( + Entity, RenderEntity, &DirectionalLight, &CascadesVisibleEntities, @@ -250,6 +259,7 @@ pub fn extract_lights( &ViewVisibility, Option<&RenderLayers>, Option<&VolumetricLight>, + Has, ), Without, >, @@ -278,6 +288,7 @@ pub fn extract_lights( let mut point_lights_values = Vec::with_capacity(*previous_point_lights_len); for entity in global_point_lights.iter().copied() { let Ok(( + main_entity, render_entity, point_light, cubemap_visible_entities, @@ -331,15 +342,17 @@ pub fn extract_lights( extracted_point_light, render_cubemap_visible_entities, (*frusta).clone(), + MainEntity::from(main_entity), ), )); } *previous_point_lights_len = point_lights_values.len(); - commands.insert_or_spawn_batch(point_lights_values); + commands.try_insert_batch(point_lights_values); let mut spot_lights_values = Vec::with_capacity(*previous_spot_lights_len); for entity in global_point_lights.iter().copied() { if let Ok(( + main_entity, render_entity, spot_light, visible_entities, @@ -391,14 +404,16 @@ pub fn extract_lights( }, render_visible_entities, *frustum, + MainEntity::from(main_entity), ), )); } } *previous_spot_lights_len = spot_lights_values.len(); - commands.insert_or_spawn_batch(spot_lights_values); + commands.try_insert_batch(spot_lights_values); for ( + main_entity, entity, directional_light, visible_entities, @@ -409,6 +424,7 @@ pub fn extract_lights( view_visibility, maybe_layers, volumetric_light, + occlusion_culling, ) in &directional_lights { if !view_visibility.get() { @@ -474,10 +490,12 @@ pub fn extract_lights( cascades: extracted_cascades, frusta: extracted_frusta, render_layers: maybe_layers.unwrap_or_default().clone(), + occlusion_culling, }, RenderCascadesVisibleEntities { entities: cascade_visible_entities, }, + MainEntity::from(main_entity), )); } } @@ -507,7 +525,7 @@ pub(crate) fn add_light_view_entities( trigger: Trigger, mut commands: Commands, ) { - if let Some(mut v) = commands.get_entity(trigger.target()) { + if let Ok(mut v) = commands.get_entity(trigger.target()) { v.insert(LightViewEntities::default()); } } @@ -517,8 +535,8 @@ pub(crate) fn extracted_light_removed( trigger: Trigger, mut commands: Commands, ) { - if let Some(mut v) = commands.get_entity(trigger.target()) { - v.remove::(); + if let Ok(mut v) = commands.get_entity(trigger.target()) { + v.try_remove::(); } } @@ -530,7 +548,7 @@ pub(crate) fn remove_light_view_entities( if let Ok(entities) = query.get(trigger.target()) { for v in entities.0.values() { for e in v.iter().copied() { - if let Some(mut v) = commands.get_entity(e) { + if let Ok(mut v) = commands.get_entity(e) { v.despawn(); } } @@ -609,8 +627,18 @@ pub struct ViewShadowBindings { pub directional_light_depth_texture_view: TextureView, } +/// A component that holds the shadow cascade views for all shadow cascades +/// associated with a camera. +/// +/// Note: Despite the name, this component actually holds the shadow cascade +/// views, not the lights themselves. #[derive(Component)] pub struct ViewLightEntities { + /// The shadow cascade views for all shadow cascades associated with a + /// camera. + /// + /// Note: Despite the name, this component actually holds the shadow cascade + /// views, not the lights themselves. pub lights: Vec, } @@ -687,7 +715,6 @@ pub(crate) fn spot_light_clip_from_view(angle: f32, near_z: f32) -> Mat4 { Mat4::perspective_infinite_reverse_rh(angle * 2.0, 1.0, near_z) } -#[allow(clippy::too_many_arguments)] pub fn prepare_lights( mut commands: Commands, mut texture_cache: ResMut, @@ -697,10 +724,12 @@ pub fn prepare_lights( views: Query< ( Entity, + MainEntity, &ExtractedView, &ExtractedClusterConfig, Option<&RenderLayers>, Has, + Option<&AmbientLight>, ), With, >, @@ -712,13 +741,14 @@ pub fn prepare_lights( mut max_directional_lights_warning_emitted, mut max_cascades_per_light_warning_emitted, mut live_shadow_mapping_lights, - ): (Local, Local, Local), + ): (Local, Local, Local>), point_lights: Query<( Entity, + &MainEntity, &ExtractedPointLight, AnyOf<(&CubemapFrusta, &Frustum)>, )>, - directional_lights: Query<(Entity, &ExtractedDirectionalLight)>, + directional_lights: Query<(Entity, &MainEntity, &ExtractedDirectionalLight)>, mut light_view_entities: Query<&mut LightViewEntities>, sorted_cameras: Res, gpu_preprocessing_support: Res, @@ -774,7 +804,7 @@ pub fn prepare_lights( if !*max_cascades_per_light_warning_emitted && directional_lights .iter() - .any(|(_, light)| light.cascade_shadow_config.bounds.len() > MAX_CASCADES_PER_LIGHT) + .any(|(_, _, light)| light.cascade_shadow_config.bounds.len() > MAX_CASCADES_PER_LIGHT) { warn!( "The number of cascades configured for a directional light exceeds the supported limit of {}.", @@ -785,50 +815,50 @@ pub fn prepare_lights( let point_light_count = point_lights .iter() - .filter(|light| light.1.spot_light_angles.is_none()) + .filter(|light| light.2.spot_light_angles.is_none()) .count(); let point_light_volumetric_enabled_count = point_lights .iter() - .filter(|(_, light, _)| light.volumetric && light.spot_light_angles.is_none()) + .filter(|(_, _, light, _)| light.volumetric && light.spot_light_angles.is_none()) .count() .min(max_texture_cubes); let point_light_shadow_maps_count = point_lights .iter() - .filter(|light| light.1.shadows_enabled && light.1.spot_light_angles.is_none()) + .filter(|light| light.2.shadows_enabled && light.2.spot_light_angles.is_none()) .count() .min(max_texture_cubes); let directional_volumetric_enabled_count = directional_lights .iter() .take(MAX_DIRECTIONAL_LIGHTS) - .filter(|(_, light)| light.volumetric) + .filter(|(_, _, light)| light.volumetric) .count() .min(max_texture_array_layers / MAX_CASCADES_PER_LIGHT); let directional_shadow_enabled_count = directional_lights .iter() .take(MAX_DIRECTIONAL_LIGHTS) - .filter(|(_, light)| light.shadows_enabled) + .filter(|(_, _, light)| light.shadows_enabled) .count() .min(max_texture_array_layers / MAX_CASCADES_PER_LIGHT); let spot_light_count = point_lights .iter() - .filter(|(_, light, _)| light.spot_light_angles.is_some()) + .filter(|(_, _, light, _)| light.spot_light_angles.is_some()) .count() .min(max_texture_array_layers - directional_shadow_enabled_count * MAX_CASCADES_PER_LIGHT); let spot_light_volumetric_enabled_count = point_lights .iter() - .filter(|(_, light, _)| light.volumetric && light.spot_light_angles.is_some()) + .filter(|(_, _, light, _)| light.volumetric && light.spot_light_angles.is_some()) .count() .min(max_texture_array_layers - directional_shadow_enabled_count * MAX_CASCADES_PER_LIGHT); let spot_light_shadow_maps_count = point_lights .iter() - .filter(|(_, light, _)| light.shadows_enabled && light.spot_light_angles.is_some()) + .filter(|(_, _, light, _)| light.shadows_enabled && light.spot_light_angles.is_some()) .count() .min(max_texture_array_layers - directional_shadow_enabled_count * MAX_CASCADES_PER_LIGHT); @@ -837,16 +867,10 @@ pub fn prepare_lights( // - then those with shadows enabled first, so that the index can be used to render at most `point_light_shadow_maps_count` // point light shadows and `spot_light_shadow_maps_count` spot light shadow maps, // - then by entity as a stable key to ensure that a consistent set of lights are chosen if the light count limit is exceeded. - point_lights.sort_by(|(entity_1, light_1, _), (entity_2, light_2, _)| { - clusterable_object_order( - ClusterableObjectOrderData { - entity: entity_1, - object_type: &ClusterableObjectType::from_point_or_spot_light(light_1), - }, - ClusterableObjectOrderData { - entity: entity_2, - object_type: &ClusterableObjectType::from_point_or_spot_light(light_2), - }, + point_lights.sort_by_cached_key(|(entity, _, light, _)| { + ( + ClusterableObjectType::from_point_or_spot_light(light).ordering(), + *entity, ) }); @@ -858,11 +882,10 @@ pub fn prepare_lights( // shadows // - then by entity as a stable key to ensure that a consistent set of // lights are chosen if the light count limit is exceeded. - directional_lights.sort_by(|(entity_1, light_1), (entity_2, light_2)| { - directional_light_order( - (entity_1, &light_1.volumetric, &light_1.shadows_enabled), - (entity_2, &light_2.volumetric, &light_2.shadows_enabled), - ) + // - because entities are unique, we can use `sort_unstable_by_key` + // and still end up with a stable order. + directional_lights.sort_unstable_by_key(|(entity, _, light)| { + (light.volumetric, light.shadows_enabled, *entity) }); if global_light_meta.entity_to_index.capacity() < point_lights.len() { @@ -872,7 +895,7 @@ pub fn prepare_lights( } let mut gpu_point_lights = Vec::new(); - for (index, &(entity, light, _)) in point_lights.iter().enumerate() { + for (index, &(entity, _, light, _)) in point_lights.iter().enumerate() { let mut flags = PointLightFlags::NONE; // Lights are sorted, shadow enabled lights are first @@ -961,7 +984,7 @@ pub fn prepare_lights( let mut gpu_directional_lights = [GpuDirectionalLight::default(); MAX_DIRECTIONAL_LIGHTS]; let mut num_directional_cascades_enabled = 0usize; - for (index, (_light_entity, light)) in directional_lights + for (index, (_light_entity, _, light)) in directional_lights .iter() .enumerate() .take(MAX_DIRECTIONAL_LIGHTS) @@ -1051,7 +1074,7 @@ pub fn prepare_lights( // NOTE: iOS Simulator is missing CubeArray support so we use Cube instead. // See https://github.com/bevyengine/bevy/pull/12052 - remove if support is added. #[cfg(all( - not(feature = "ios_simulator"), + not(target_abi = "sim"), any( not(feature = "webgl"), not(target_arch = "wasm32"), @@ -1060,10 +1083,11 @@ pub fn prepare_lights( ))] dimension: Some(TextureViewDimension::CubeArray), #[cfg(any( - feature = "ios_simulator", + target_abi = "sim", all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")) ))] dimension: Some(TextureViewDimension::Cube), + usage: None, aspect: TextureAspect::DepthOnly, base_mip_level: 0, mip_level_count: None, @@ -1107,6 +1131,7 @@ pub fn prepare_lights( dimension: Some(TextureViewDimension::D2Array), #[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] dimension: Some(TextureViewDimension::D2), + usage: None, aspect: TextureAspect::DepthOnly, base_mip_level: 0, mip_level_count: None, @@ -1117,13 +1142,23 @@ pub fn prepare_lights( let mut live_views = EntityHashSet::with_capacity(views_count); // set up light data for each view - for (entity, extracted_view, clusters, maybe_layers, no_indirect_drawing) in sorted_cameras + for ( + entity, + camera_main_entity, + extracted_view, + clusters, + maybe_layers, + no_indirect_drawing, + maybe_ambient_override, + ) in sorted_cameras .0 .iter() .filter_map(|sorted_camera| views.get(sorted_camera.entity).ok()) { live_views.insert(entity); + let mut view_lights = Vec::new(); + let mut view_occlusion_culling_lights = Vec::new(); let gpu_preprocessing_mode = gpu_preprocessing_support.min(if !no_indirect_drawing { GpuPreprocessingMode::Culling @@ -1140,6 +1175,7 @@ pub fn prepare_lights( ); let n_clusters = clusters.dimensions.x * clusters.dimensions.y * clusters.dimensions.z; + let ambient_light = maybe_ambient_override.unwrap_or(&ambient_light); let mut gpu_lights = GpuLights { directional_lights: gpu_directional_lights, ambient_color: Vec4::from_slice(&LinearRgba::from(ambient_light.color).to_f32_array()) @@ -1163,7 +1199,7 @@ pub fn prepare_lights( }; // TODO: this should select lights based on relevance to the view instead of the first ones that show up in a query - for &(light_entity, light, (point_light_frusta, _)) in point_lights + for &(light_entity, light_main_entity, light, (point_light_frusta, _)) in point_lights .iter() // Lights are sorted, shadow enabled lights are first .take(point_light_count.min(max_texture_cubes)) @@ -1220,6 +1256,7 @@ pub fn prepare_lights( label: Some("point_light_shadow_map_texture_view"), format: None, dimension: Some(TextureViewDimension::D2), + usage: None, aspect: TextureAspect::All, base_mip_level: 0, mip_level_count: None, @@ -1231,6 +1268,12 @@ pub fn prepare_lights( }) .clone(); + let retained_view_entity = RetainedViewEntity::new( + *light_main_entity, + Some(camera_main_entity.into()), + face_index as u32, + ); + commands.entity(view_light_entity).insert(( ShadowView { depth_attachment, @@ -1241,6 +1284,7 @@ pub fn prepare_lights( ), }, ExtractedView { + retained_view_entity, viewport: UVec4::new( 0, 0, @@ -1268,18 +1312,20 @@ pub fn prepare_lights( if first { // Subsequent views with the same light entity will reuse the same shadow map - shadow_render_phases.insert_or_clear(view_light_entity, gpu_preprocessing_mode); - live_shadow_mapping_lights.insert(view_light_entity); + shadow_render_phases + .prepare_for_new_frame(retained_view_entity, gpu_preprocessing_mode); + live_shadow_mapping_lights.insert(retained_view_entity); } } } // spot lights - for (light_index, &(light_entity, light, (_, spot_light_frustum))) in point_lights - .iter() - .skip(point_light_count) - .take(spot_light_count) - .enumerate() + for (light_index, &(light_entity, light_main_entity, light, (_, spot_light_frustum))) in + point_lights + .iter() + .skip(point_light_count) + .take(spot_light_count) + .enumerate() { let Ok(mut light_view_entities) = light_view_entities.get_mut(light_entity) else { continue; @@ -1312,6 +1358,7 @@ pub fn prepare_lights( label: Some("spot_light_shadow_map_texture_view"), format: None, dimension: Some(TextureViewDimension::D2), + usage: None, aspect: TextureAspect::All, base_mip_level: 0, mip_level_count: None, @@ -1330,12 +1377,16 @@ pub fn prepare_lights( let view_light_entity = light_view_entities[0]; + let retained_view_entity = + RetainedViewEntity::new(*light_main_entity, Some(camera_main_entity.into()), 0); + commands.entity(view_light_entity).insert(( ShadowView { depth_attachment, pass_name: format!("shadow pass spot light {light_index}"), }, ExtractedView { + retained_view_entity, viewport: UVec4::new( 0, 0, @@ -1360,15 +1411,16 @@ pub fn prepare_lights( if first { // Subsequent views with the same light entity will reuse the same shadow map - shadow_render_phases.insert_or_clear(view_light_entity, gpu_preprocessing_mode); - live_shadow_mapping_lights.insert(view_light_entity); + shadow_render_phases + .prepare_for_new_frame(retained_view_entity, gpu_preprocessing_mode); + live_shadow_mapping_lights.insert(retained_view_entity); } } // directional lights let mut directional_depth_texture_array_index = 0u32; let view_layers = maybe_layers.unwrap_or_default(); - for (light_index, &(light_entity, light)) in directional_lights + for (light_index, &(light_entity, light_main_entity, light)) in directional_lights .iter() .enumerate() .take(MAX_DIRECTIONAL_LIGHTS) @@ -1441,6 +1493,7 @@ pub fn prepare_lights( label: Some("directional_light_shadow_map_array_texture_view"), format: None, dimension: Some(TextureViewDimension::D2), + usage: None, aspect: TextureAspect::All, base_mip_level: 0, mip_level_count: None, @@ -1451,7 +1504,7 @@ pub fn prepare_lights( // NOTE: For point and spotlights, we reuse the same depth attachment for all views. // However, for directional lights, we want a new depth attachment for each view, // so that the view is cleared for each view. - let depth_attachment = DepthAttachment::new(depth_texture_view, Some(0.0)); + let depth_attachment = DepthAttachment::new(depth_texture_view.clone(), Some(0.0)); directional_depth_texture_array_index += 1; @@ -1460,6 +1513,12 @@ pub fn prepare_lights( frustum.half_spaces[4] = HalfSpace::new(frustum.half_spaces[4].normal().extend(f32::INFINITY)); + let retained_view_entity = RetainedViewEntity::new( + *light_main_entity, + Some(camera_main_entity.into()), + cascade_index as u32, + ); + commands.entity(view_light_entity).insert(( ShadowView { depth_attachment, @@ -1468,6 +1527,7 @@ pub fn prepare_lights( ), }, ExtractedView { + retained_view_entity, viewport: UVec4::new( 0, 0, @@ -1493,11 +1553,24 @@ pub fn prepare_lights( view_lights.push(view_light_entity); + // If this light is using occlusion culling, add the appropriate components. + if light.occlusion_culling { + commands.entity(view_light_entity).insert(( + OcclusionCulling, + OcclusionCullingSubview { + depth_texture_view, + depth_texture_size: directional_light_shadow_map.size as u32, + }, + )); + view_occlusion_culling_lights.push(view_light_entity); + } + // Subsequent views with the same light entity will **NOT** reuse the same shadow map // (Because the cascades are unique to each view) // TODO: Implement GPU culling for shadow passes. - shadow_render_phases.insert_or_clear(view_light_entity, gpu_preprocessing_mode); - live_shadow_mapping_lights.insert(view_light_entity); + shadow_render_phases + .prepare_for_new_frame(retained_view_entity, gpu_preprocessing_mode); + live_shadow_mapping_lights.insert(retained_view_entity); } } @@ -1515,6 +1588,16 @@ pub fn prepare_lights( offset: view_gpu_lights_writer.write(&gpu_lights), }, )); + + // Make a link from the camera to all shadow cascades with occlusion + // culling enabled. + if !view_occlusion_culling_lights.is_empty() { + commands + .entity(entity) + .insert(OcclusionCullingSubviewEntities( + view_occlusion_culling_lights, + )); + } } // Despawn light-view entities for views that no longer exist @@ -1540,49 +1623,157 @@ fn despawn_entities(commands: &mut Commands, entities: Vec) { }); } -/// For each shadow cascade, iterates over all the meshes "visible" from it and -/// adds them to [`BinnedRenderPhase`]s or [`SortedRenderPhase`]s as -/// appropriate. -#[allow(clippy::too_many_arguments)] -pub fn queue_shadows( - shadow_draw_functions: Res>, +// These will be extracted in the material extraction, which will also clear the needs_specialization +// collection. +pub fn check_light_entities_needing_specialization( + needs_specialization: Query>, Changed)>, + mut entities_needing_specialization: ResMut>, + mut removed_components: RemovedComponents, +) { + for entity in &needs_specialization { + entities_needing_specialization.push(entity); + } + + for removed in removed_components.read() { + entities_needing_specialization.entities.push(removed); + } +} + +#[derive(Resource, Deref, DerefMut, Default, Debug, Clone)] +pub struct LightKeyCache(HashMap); + +#[derive(Resource, Deref, DerefMut, Default, Debug, Clone)] +pub struct LightSpecializationTicks(HashMap); + +#[derive(Resource, Deref, DerefMut)] +pub struct SpecializedShadowMaterialPipelineCache { + // view light entity -> view pipeline cache + #[deref] + map: HashMap>, + marker: PhantomData, +} + +#[derive(Deref, DerefMut)] +pub struct SpecializedShadowMaterialViewPipelineCache { + #[deref] + map: MainEntityHashMap<(Tick, CachedRenderPipelineId)>, + marker: PhantomData, +} + +impl Default for SpecializedShadowMaterialPipelineCache { + fn default() -> Self { + Self { + map: HashMap::default(), + marker: PhantomData, + } + } +} + +impl Default for SpecializedShadowMaterialViewPipelineCache { + fn default() -> Self { + Self { + map: MainEntityHashMap::default(), + marker: PhantomData, + } + } +} + +pub fn check_views_lights_need_specialization( + view_lights: Query<&ViewLightEntities, With>, + view_light_entities: Query<(&LightEntity, &ExtractedView)>, + shadow_render_phases: Res>, + mut light_key_cache: ResMut, + mut light_specialization_ticks: ResMut, + ticks: SystemChangeTick, +) { + for view_lights in &view_lights { + for view_light_entity in view_lights.lights.iter().copied() { + let Ok((light_entity, extracted_view_light)) = + view_light_entities.get(view_light_entity) + else { + continue; + }; + if !shadow_render_phases.contains_key(&extracted_view_light.retained_view_entity) { + continue; + } + + let is_directional_light = matches!(light_entity, LightEntity::Directional { .. }); + let mut light_key = MeshPipelineKey::DEPTH_PREPASS; + light_key.set(MeshPipelineKey::UNCLIPPED_DEPTH_ORTHO, is_directional_light); + if let Some(current_key) = + light_key_cache.get_mut(&extracted_view_light.retained_view_entity) + { + if *current_key != light_key { + light_key_cache.insert(extracted_view_light.retained_view_entity, light_key); + light_specialization_ticks + .insert(extracted_view_light.retained_view_entity, ticks.this_run()); + } + } else { + light_key_cache.insert(extracted_view_light.retained_view_entity, light_key); + light_specialization_ticks + .insert(extracted_view_light.retained_view_entity, ticks.this_run()); + } + } + } +} + +pub fn specialize_shadows( prepass_pipeline: Res>, - (render_meshes, render_mesh_instances): ( + ( + render_meshes, + render_mesh_instances, + render_materials, + render_material_instances, + material_bind_group_allocator, + ): ( Res>, Res, - ), - (render_materials, render_material_instances): ( Res>>, - Res>, + Res, + Res>, ), - material_bind_group_allocator: Res>, - mut shadow_render_phases: ResMut>, + shadow_render_phases: Res>, mut pipelines: ResMut>>, pipeline_cache: Res, render_lightmaps: Res, - mesh_allocator: Res, - view_lights: Query<(Entity, &ViewLightEntities)>, - view_light_entities: Query<&LightEntity>, + view_lights: Query<(Entity, &ViewLightEntities), With>, + view_light_entities: Query<(&LightEntity, &ExtractedView)>, point_light_entities: Query<&RenderCubemapVisibleEntities, With>, directional_light_entities: Query< &RenderCascadesVisibleEntities, With, >, spot_light_entities: Query<&RenderVisibleMeshEntities, With>, + light_key_cache: Res, + mut specialized_material_pipeline_cache: ResMut>, + light_specialization_ticks: Res, + entity_specialization_ticks: Res>, + ticks: SystemChangeTick, ) where M::Data: PartialEq + Eq + Hash + Clone, { + // Record the retained IDs of all shadow views so that we can expire old + // pipeline IDs. + let mut all_shadow_views: HashSet = HashSet::default(); + for (entity, view_lights) in &view_lights { - let draw_shadow_mesh = shadow_draw_functions.read().id::>(); for view_light_entity in view_lights.lights.iter().copied() { - let Ok(light_entity) = view_light_entities.get(view_light_entity) else { + let Ok((light_entity, extracted_view_light)) = + view_light_entities.get(view_light_entity) + else { continue; }; - let Some(shadow_phase) = shadow_render_phases.get_mut(&view_light_entity) else { + + all_shadow_views.insert(extracted_view_light.retained_view_entity); + + if !shadow_render_phases.contains_key(&extracted_view_light.retained_view_entity) { + continue; + } + let Some(light_key) = light_key_cache.get(&extracted_view_light.retained_view_entity) + else { continue; }; - let is_directional_light = matches!(light_entity, LightEntity::Directional { .. }); let visible_entities = match light_entity { LightEntity::Directional { light_entity, @@ -1606,29 +1797,51 @@ pub fn queue_shadows( .get(*light_entity) .expect("Failed to get spot light visible entities"), }; - let mut light_key = MeshPipelineKey::DEPTH_PREPASS; - light_key.set(MeshPipelineKey::UNCLIPPED_DEPTH_ORTHO, is_directional_light); // NOTE: Lights with shadow mapping disabled will have no visible entities // so no meshes will be queued - for (entity, main_entity) in visible_entities.iter().copied() { - let Some(mesh_instance) = render_mesh_instances.render_mesh_queue_data(main_entity) + let view_tick = light_specialization_ticks + .get(&extracted_view_light.retained_view_entity) + .unwrap(); + let view_specialized_material_pipeline_cache = specialized_material_pipeline_cache + .entry(extracted_view_light.retained_view_entity) + .or_default(); + + for (_, visible_entity) in visible_entities.iter().copied() { + let Some(material_instances) = + render_material_instances.instances.get(&visible_entity) else { continue; }; + let Ok(material_asset_id) = material_instances.asset_id.try_typed::() else { + continue; + }; + let Some(mesh_instance) = + render_mesh_instances.render_mesh_queue_data(visible_entity) + else { + continue; + }; + let entity_tick = entity_specialization_ticks.get(&visible_entity).unwrap(); + let last_specialized_tick = view_specialized_material_pipeline_cache + .get(&visible_entity) + .map(|(tick, _)| *tick); + let needs_specialization = last_specialized_tick.is_none_or(|tick| { + view_tick.is_newer_than(tick, ticks.this_run()) + || entity_tick.is_newer_than(tick, ticks.this_run()) + }); + if !needs_specialization { + continue; + } + let Some(material) = render_materials.get(material_asset_id) else { + continue; + }; if !mesh_instance .flags .contains(RenderMeshInstanceFlags::SHADOW_CASTER) { continue; } - let Some(material_asset_id) = render_material_instances.get(&main_entity) else { - continue; - }; - let Some(material) = render_materials.get(*material_asset_id) else { - continue; - }; let Some(material_bind_group) = material_bind_group_allocator.get(material.binding.group) else { @@ -1639,14 +1852,17 @@ pub fn queue_shadows( }; let mut mesh_key = - light_key | MeshPipelineKey::from_bits_retain(mesh.key_bits.bits()); + *light_key | MeshPipelineKey::from_bits_retain(mesh.key_bits.bits()); // Even though we don't use the lightmap in the shadow map, the // `SetMeshBindGroup` render command will bind the data for it. So // we need to include the appropriate flag in the mesh pipeline key // to ensure that the necessary bind group layout entries are // present. - if render_lightmaps.render_lightmaps.contains_key(&main_entity) { + if render_lightmaps + .render_lightmaps + .contains_key(&visible_entity) + { mesh_key |= MeshPipelineKey::LIGHTMAPPED; } @@ -1678,21 +1894,140 @@ pub fn queue_shadows( } }; + view_specialized_material_pipeline_cache + .insert(visible_entity, (ticks.this_run(), pipeline_id)); + } + } + } + + // Delete specialized pipelines belonging to views that have expired. + specialized_material_pipeline_cache.retain(|view, _| all_shadow_views.contains(view)); +} + +/// For each shadow cascade, iterates over all the meshes "visible" from it and +/// adds them to [`BinnedRenderPhase`]s or [`SortedRenderPhase`]s as +/// appropriate. +pub fn queue_shadows( + shadow_draw_functions: Res>, + render_mesh_instances: Res, + render_materials: Res>>, + render_material_instances: Res, + mut shadow_render_phases: ResMut>, + gpu_preprocessing_support: Res, + mesh_allocator: Res, + view_lights: Query<(Entity, &ViewLightEntities), With>, + view_light_entities: Query<(&LightEntity, &ExtractedView)>, + point_light_entities: Query<&RenderCubemapVisibleEntities, With>, + directional_light_entities: Query< + &RenderCascadesVisibleEntities, + With, + >, + spot_light_entities: Query<&RenderVisibleMeshEntities, With>, + specialized_material_pipeline_cache: Res>, +) where + M::Data: PartialEq + Eq + Hash + Clone, +{ + let draw_shadow_mesh = shadow_draw_functions.read().id::>(); + for (entity, view_lights) in &view_lights { + for view_light_entity in view_lights.lights.iter().copied() { + let Ok((light_entity, extracted_view_light)) = + view_light_entities.get(view_light_entity) + else { + continue; + }; + let Some(shadow_phase) = + shadow_render_phases.get_mut(&extracted_view_light.retained_view_entity) + else { + continue; + }; + + let Some(view_specialized_material_pipeline_cache) = + specialized_material_pipeline_cache.get(&extracted_view_light.retained_view_entity) + else { + continue; + }; + + let visible_entities = match light_entity { + LightEntity::Directional { + light_entity, + cascade_index, + } => directional_light_entities + .get(*light_entity) + .expect("Failed to get directional light visible entities") + .entities + .get(&entity) + .expect("Failed to get directional light visible entities for view") + .get(*cascade_index) + .expect("Failed to get directional light visible entities for cascade"), + LightEntity::Point { + light_entity, + face_index, + } => point_light_entities + .get(*light_entity) + .expect("Failed to get point light visible entities") + .get(*face_index), + LightEntity::Spot { light_entity } => spot_light_entities + .get(*light_entity) + .expect("Failed to get spot light visible entities"), + }; + + for (entity, main_entity) in visible_entities.iter().copied() { + let Some((current_change_tick, pipeline_id)) = + view_specialized_material_pipeline_cache.get(&main_entity) + else { + continue; + }; + + // Skip the entity if it's cached in a bin and up to date. + if shadow_phase.validate_cached_entity(main_entity, *current_change_tick) { + continue; + } + + let Some(mesh_instance) = render_mesh_instances.render_mesh_queue_data(main_entity) + else { + continue; + }; + if !mesh_instance + .flags + .contains(RenderMeshInstanceFlags::SHADOW_CASTER) + { + continue; + } + + let Some(material_instance) = render_material_instances.instances.get(&main_entity) + else { + continue; + }; + let Ok(material_asset_id) = material_instance.asset_id.try_typed::() else { + continue; + }; + let Some(material) = render_materials.get(material_asset_id) else { + continue; + }; + let (vertex_slab, index_slab) = mesh_allocator.mesh_slabs(&mesh_instance.mesh_asset_id); + let batch_set_key = ShadowBatchSetKey { + pipeline: *pipeline_id, + draw_function: draw_shadow_mesh, + material_bind_group_index: Some(material.binding.group.0), + vertex_slab: vertex_slab.unwrap_or_default(), + index_slab, + }; + shadow_phase.add( + batch_set_key, ShadowBinKey { - batch_set_key: ShadowBatchSetKey { - pipeline: pipeline_id, - draw_function: draw_shadow_mesh, - vertex_slab: vertex_slab.unwrap_or_default(), - index_slab, - }, asset_id: mesh_instance.mesh_asset_id.into(), }, (entity, main_entity), - BinnedRenderPhaseType::mesh(mesh_instance.should_batch()), + mesh_instance.current_uniform_index, + BinnedRenderPhaseType::mesh( + mesh_instance.should_batch(), + &gpu_preprocessing_support, + ), + *current_change_tick, ); } } @@ -1700,7 +2035,13 @@ pub fn queue_shadows( } pub struct Shadow { - pub key: ShadowBinKey, + /// Determines which objects can be placed into a *batch set*. + /// + /// Objects in a single batch set can potentially be multi-drawn together, + /// if it's enabled and the current platform supports it. + pub batch_set_key: ShadowBatchSetKey, + /// Information that separates items into bins. + pub bin_key: ShadowBinKey, pub representative_entity: (Entity, MainEntity), pub batch_range: Range, pub extra_index: PhaseItemExtraIndex, @@ -1719,6 +2060,11 @@ pub struct ShadowBatchSetKey { /// The function used to draw. pub draw_function: DrawFunctionId, + /// The ID of a bind group specific to the material. + /// + /// In the case of PBR, this is the `MaterialBindGroupIndex`. + pub material_bind_group_index: Option, + /// The ID of the slab of GPU memory that contains vertex data. /// /// For non-mesh items, you can fill this with 0 if your items can be @@ -1731,27 +2077,19 @@ pub struct ShadowBatchSetKey { pub index_slab: Option, } +impl PhaseItemBatchSetKey for ShadowBatchSetKey { + fn indexed(&self) -> bool { + self.index_slab.is_some() + } +} + /// Data used to bin each object in the shadow map phase. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct ShadowBinKey { - /// The key of the *batch set*. - /// - /// As batches belong to a batch set, meshes in a batch must obviously be - /// able to be placed in a single batch set. - pub batch_set_key: ShadowBatchSetKey, - /// The object. pub asset_id: UntypedAssetId, } -impl PhaseItemBinKey for ShadowBinKey { - type BatchSetKey = ShadowBatchSetKey; - - fn get_batch_set_key(&self) -> Option { - Some(self.batch_set_key.clone()) - } -} - impl PhaseItem for Shadow { #[inline] fn entity(&self) -> Entity { @@ -1764,7 +2102,7 @@ impl PhaseItem for Shadow { #[inline] fn draw_function(&self) -> DrawFunctionId { - self.key.batch_set_key.draw_function + self.batch_set_key.draw_function } #[inline] @@ -1789,17 +2127,20 @@ impl PhaseItem for Shadow { } impl BinnedPhaseItem for Shadow { + type BatchSetKey = ShadowBatchSetKey; type BinKey = ShadowBinKey; #[inline] fn new( - key: Self::BinKey, + batch_set_key: Self::BatchSetKey, + bin_key: Self::BinKey, representative_entity: (Entity, MainEntity), batch_range: Range, extra_index: PhaseItemExtraIndex, ) -> Self { Shadow { - key, + batch_set_key, + bin_key, representative_entity, batch_range, extra_index, @@ -1810,17 +2151,48 @@ impl BinnedPhaseItem for Shadow { impl CachedRenderPipelinePhaseItem for Shadow { #[inline] fn cached_pipeline(&self) -> CachedRenderPipelineId { - self.key.batch_set_key.pipeline + self.batch_set_key.pipeline } } +/// The rendering node that renders meshes that were "visible" (so to speak) +/// from a light last frame. +/// +/// If occlusion culling for a light is disabled, then this node simply renders +/// all meshes in range of the light. +#[derive(Deref, DerefMut)] +pub struct EarlyShadowPassNode(ShadowPassNode); + +/// The rendering node that renders meshes that became newly "visible" (so to +/// speak) from a light this frame. +/// +/// If occlusion culling for a light is disabled, then this node does nothing. +#[derive(Deref, DerefMut)] +pub struct LateShadowPassNode(ShadowPassNode); + +/// Encapsulates rendering logic shared between the early and late shadow pass +/// nodes. pub struct ShadowPassNode { + /// The query that finds cameras in which shadows are visible. main_view_query: QueryState>, - view_light_query: QueryState>, + /// The query that finds shadow cascades. + view_light_query: QueryState<(Read, Read, Has)>, } -impl ShadowPassNode { - pub fn new(world: &mut World) -> Self { +impl FromWorld for EarlyShadowPassNode { + fn from_world(world: &mut World) -> Self { + Self(ShadowPassNode::from_world(world)) + } +} + +impl FromWorld for LateShadowPassNode { + fn from_world(world: &mut World) -> Self { + Self(ShadowPassNode::from_world(world)) + } +} + +impl FromWorld for ShadowPassNode { + fn from_world(world: &mut World) -> Self { Self { main_view_query: QueryState::new(world), view_light_query: QueryState::new(world), @@ -1828,10 +2200,9 @@ impl ShadowPassNode { } } -impl Node for ShadowPassNode { +impl Node for EarlyShadowPassNode { fn update(&mut self, world: &mut World) { - self.main_view_query.update_archetypes(world); - self.view_light_query.update_archetypes(world); + self.0.update(world); } fn run<'w>( @@ -1840,27 +2211,66 @@ impl Node for ShadowPassNode { render_context: &mut RenderContext<'w>, world: &'w World, ) -> Result<(), NodeRunError> { - let diagnostics = render_context.diagnostic_recorder(); + self.0.run(graph, render_context, world, false) + } +} - let view_entity = graph.view_entity(); +impl Node for LateShadowPassNode { + fn update(&mut self, world: &mut World) { + self.0.update(world); + } + + fn run<'w>( + &self, + graph: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + world: &'w World, + ) -> Result<(), NodeRunError> { + self.0.run(graph, render_context, world, true) + } +} + +impl ShadowPassNode { + fn update(&mut self, world: &mut World) { + self.main_view_query.update_archetypes(world); + self.view_light_query.update_archetypes(world); + } + /// Runs the node logic. + /// + /// `is_late` is true if this is the late shadow pass or false if this is + /// the early shadow pass. + fn run<'w>( + &self, + graph: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + world: &'w World, + is_late: bool, + ) -> Result<(), NodeRunError> { let Some(shadow_render_phases) = world.get_resource::>() else { return Ok(()); }; - let time_span = diagnostics.time_span(render_context.command_encoder(), "shadows"); - - if let Ok(view_lights) = self.main_view_query.get_manual(world, view_entity) { + if let Ok(view_lights) = self.main_view_query.get_manual(world, graph.view_entity()) { for view_light_entity in view_lights.lights.iter().copied() { - let Some(shadow_phase) = shadow_render_phases.get(&view_light_entity) else { + let Ok((view_light, extracted_light_view, occlusion_culling)) = + self.view_light_query.get_manual(world, view_light_entity) + else { continue; }; - let view_light = self - .view_light_query - .get_manual(world, view_light_entity) - .unwrap(); + // There's no need for a late shadow pass if the light isn't + // using occlusion culling. + if is_late && !occlusion_culling { + continue; + } + + let Some(shadow_phase) = + shadow_render_phases.get(&extracted_light_view.retained_view_entity) + else { + continue; + }; let depth_stencil_attachment = Some(view_light.depth_attachment.get_attachment(StoreOp::Store)); @@ -1899,8 +2309,6 @@ impl Node for ShadowPassNode { } } - time_span.end(render_context.command_encoder()); - Ok(()) } } diff --git a/crates/bevy_pbr/src/render/mesh.rs b/crates/bevy_pbr/src/render/mesh.rs index 0ec344410cdcf..4bae79b807417 100644 --- a/crates/bevy_pbr/src/render/mesh.rs +++ b/crates/bevy_pbr/src/render/mesh.rs @@ -1,8 +1,6 @@ -use core::mem::size_of; - use crate::material_bind_groups::{MaterialBindGroupIndex, MaterialBindGroupSlot}; use allocator::MeshAllocator; -use bevy_asset::{load_internal_asset, AssetId, UntypedAssetId}; +use bevy_asset::{load_internal_asset, AssetId}; use bevy_core_pipeline::{ core_3d::{AlphaMask3d, Opaque3d, Transmissive3d, Transparent3d, CORE_3D_DEPTH_FORMAT}, deferred::{AlphaMask3dDeferred, Opaque3dDeferred}, @@ -10,48 +8,52 @@ use bevy_core_pipeline::{ prepass::MotionVectorPrepass, }; use bevy_derive::{Deref, DerefMut}; +use bevy_diagnostic::FrameCount; use bevy_ecs::{ prelude::*, - query::ROQueryItem, + query::{QueryData, ROQueryItem}, system::{lifetimeless::*, SystemParamItem, SystemState}, }; use bevy_image::{BevyDefault, ImageSampler, TextureFormatPixelInfo}; use bevy_math::{Affine3, Rect, UVec2, Vec3, Vec4}; +use bevy_platform::collections::{hash_map::Entry, HashMap}; use bevy_render::{ batching::{ gpu_preprocessing::{ - self, GpuPreprocessingSupport, IndirectParameters, IndirectParametersBuffer, - InstanceInputUniformBuffer, + self, GpuPreprocessingSupport, IndirectBatchSet, IndirectParametersBuffers, + IndirectParametersCpuMetadata, IndirectParametersIndexed, IndirectParametersNonIndexed, + InstanceInputUniformBuffer, UntypedPhaseIndirectParametersBuffers, }, no_gpu_preprocessing, GetBatchData, GetFullBatchData, NoAutomaticBatching, }, camera::Camera, - mesh::*, + mesh::{skinning::SkinnedMesh, *}, primitives::Aabb, render_asset::RenderAssets, render_phase::{ - BinnedRenderPhasePlugin, PhaseItem, PhaseItemExtraIndex, RenderCommand, + BinnedRenderPhasePlugin, InputUniformIndex, PhaseItem, PhaseItemExtraIndex, RenderCommand, RenderCommandResult, SortedRenderPhasePlugin, TrackedRenderPass, }, render_resource::*, renderer::{RenderAdapter, RenderDevice, RenderQueue}, - texture::DefaultImageSampler, + sync_world::MainEntityHashSet, + texture::{DefaultImageSampler, GpuImage}, view::{ - prepare_view_targets, NoFrustumCulling, NoIndirectDrawing, RenderVisibilityRanges, + self, NoFrustumCulling, NoIndirectDrawing, RenderVisibilityRanges, RetainedViewEntity, ViewTarget, ViewUniformOffset, ViewVisibility, VisibilityRange, }, Extract, }; use bevy_transform::components::GlobalTransform; -use bevy_utils::{ - default, - hashbrown::hash_map::Entry, - tracing::{error, warn}, - HashMap, Parallel, -}; +use bevy_utils::{default, Parallel, TypeIdMap}; +use core::any::TypeId; +use core::mem::size_of; use material_bind_groups::MaterialBindingId; -use render::skin::{self, SkinIndex}; +use tracing::{error, warn}; +use self::irradiance_volume::IRRADIANCE_VOLUMES_ARE_USABLE; +use crate::environment_map::EnvironmentMapLight; +use crate::irradiance_volume::IrradianceVolume; use crate::{ render::{ morph::{ @@ -62,33 +64,58 @@ use crate::{ }, *, }; +use bevy_core_pipeline::core_3d::Camera3d; +use bevy_core_pipeline::oit::OrderIndependentTransparencySettings; +use bevy_core_pipeline::prepass::{DeferredPrepass, DepthPrepass, NormalPrepass}; +use bevy_core_pipeline::tonemapping::{DebandDither, Tonemapping}; +use bevy_ecs::component::Tick; +use bevy_ecs::system::SystemChangeTick; +use bevy_render::camera::TemporalJitter; +use bevy_render::prelude::Msaa; use bevy_render::sync_world::{MainEntity, MainEntityHashMap}; +use bevy_render::view::ExtractedView; +use bevy_render::RenderSet::PrepareAssets; use bytemuck::{Pod, Zeroable}; use nonmax::{NonMaxU16, NonMaxU32}; use smallvec::{smallvec, SmallVec}; use static_assertions::const_assert_eq; -use self::irradiance_volume::IRRADIANCE_VOLUMES_ARE_USABLE; - /// Provides support for rendering 3D meshes. -#[derive(Default)] pub struct MeshRenderPlugin { /// Whether we're building [`MeshUniform`]s on GPU. /// /// This requires compute shader support and so will be forcibly disabled if /// the platform doesn't support those. pub use_gpu_instance_buffer_builder: bool, + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, } -pub const FORWARD_IO_HANDLE: Handle = Handle::weak_from_u128(2645551199423808407); -pub const MESH_VIEW_TYPES_HANDLE: Handle = Handle::weak_from_u128(8140454348013264787); -pub const MESH_VIEW_BINDINGS_HANDLE: Handle = Handle::weak_from_u128(9076678235888822571); -pub const MESH_TYPES_HANDLE: Handle = Handle::weak_from_u128(2506024101911992377); -pub const MESH_BINDINGS_HANDLE: Handle = Handle::weak_from_u128(16831548636314682308); -pub const MESH_FUNCTIONS_HANDLE: Handle = Handle::weak_from_u128(6300874327833745635); -pub const MESH_SHADER_HANDLE: Handle = Handle::weak_from_u128(3252377289100772450); -pub const SKINNING_HANDLE: Handle = Handle::weak_from_u128(13215291596265391738); -pub const MORPH_HANDLE: Handle = Handle::weak_from_u128(970982813587607345); +impl MeshRenderPlugin { + /// Creates a new [`MeshRenderPlugin`] with the given debug flags. + pub fn new(debug_flags: RenderDebugFlags) -> MeshRenderPlugin { + MeshRenderPlugin { + use_gpu_instance_buffer_builder: false, + debug_flags, + } + } +} + +pub const FORWARD_IO_HANDLE: Handle = weak_handle!("38111de1-6e35-4dbb-877b-7b6f9334baf6"); +pub const MESH_VIEW_TYPES_HANDLE: Handle = + weak_handle!("979493db-4ae1-4003-b5c6-fcbb88b152a2"); +pub const MESH_VIEW_BINDINGS_HANDLE: Handle = + weak_handle!("c6fe674b-4c21-4d4b-867a-352848da5337"); +pub const MESH_TYPES_HANDLE: Handle = weak_handle!("a4a3fc2e-a57e-4083-a8ab-2840176927f2"); +pub const MESH_BINDINGS_HANDLE: Handle = + weak_handle!("84e7f9e6-e566-4a61-914e-c568f5dabf49"); +pub const MESH_FUNCTIONS_HANDLE: Handle = + weak_handle!("c46aa0f0-6c0c-4b3a-80bf-d8213c771f12"); +pub const MESH_SHADER_HANDLE: Handle = weak_handle!("1a7bbae8-4b4f-48a7-b53b-e6822e56f321"); +pub const SKINNING_HANDLE: Handle = weak_handle!("7474e812-2506-4cbf-9de3-fe07e5c6ff24"); +pub const MORPH_HANDLE: Handle = weak_handle!("da30aac7-34cc-431d-a07f-15b1a783008c"); +pub const OCCLUSION_CULLING_HANDLE: Handle = + weak_handle!("eaea07d9-7516-482c-aa42-6f8e9927e1f0"); /// How many textures are allowed in the view bind group layout (`@group(0)`) before /// broader compatibility with WebGL and WebGPU is at risk, due to the minimum guaranteed @@ -136,6 +163,12 @@ impl Plugin for MeshRenderPlugin { load_internal_asset!(app, MESH_SHADER_HANDLE, "mesh.wgsl", Shader::from_wgsl); load_internal_asset!(app, SKINNING_HANDLE, "skinning.wgsl", Shader::from_wgsl); load_internal_asset!(app, MORPH_HANDLE, "morph.wgsl", Shader::from_wgsl); + load_internal_asset!( + app, + OCCLUSION_CULLING_HANDLE, + "occlusion_culling.wgsl", + Shader::from_wgsl + ); if app.get_sub_app(RenderApp).is_none() { return; @@ -146,23 +179,27 @@ impl Plugin for MeshRenderPlugin { (no_automatic_skin_batching, no_automatic_morph_batching), ) .add_plugins(( - BinnedRenderPhasePlugin::::default(), - BinnedRenderPhasePlugin::::default(), - BinnedRenderPhasePlugin::::default(), - BinnedRenderPhasePlugin::::default(), - BinnedRenderPhasePlugin::::default(), - SortedRenderPhasePlugin::::default(), - SortedRenderPhasePlugin::::default(), + BinnedRenderPhasePlugin::::new(self.debug_flags), + BinnedRenderPhasePlugin::::new(self.debug_flags), + BinnedRenderPhasePlugin::::new(self.debug_flags), + BinnedRenderPhasePlugin::::new(self.debug_flags), + BinnedRenderPhasePlugin::::new(self.debug_flags), + SortedRenderPhasePlugin::::new(self.debug_flags), + SortedRenderPhasePlugin::::new(self.debug_flags), )); if let Some(render_app) = app.get_sub_app_mut(RenderApp) { render_app - .init_resource::() - .init_resource::() .init_resource::() .init_resource::() .init_resource::() - .init_resource::() + .init_resource::() + .configure_sets( + ExtractSchedule, + ExtractMeshesSet + .after(view::extract_visibility_ranges) + .after(late_sweep_material_instances), + ) .add_systems( ExtractSchedule, ( @@ -175,10 +212,10 @@ impl Plugin for MeshRenderPlugin { .add_systems( Render, ( - set_mesh_motion_vector_flags.in_set(RenderSet::PrepareAssets), + set_mesh_motion_vector_flags.in_set(RenderSet::PrepareMeshes), prepare_skins.in_set(RenderSet::PrepareResources), prepare_morphs.in_set(RenderSet::PrepareResources), - prepare_mesh_bind_group.in_set(RenderSet::PrepareBindGroups), + prepare_mesh_bind_groups.in_set(RenderSet::PrepareBindGroups), prepare_mesh_view_bind_groups .in_set(RenderSet::PrepareBindGroups) .after(prepare_oit_buffers), @@ -195,8 +232,14 @@ impl Plugin for MeshRenderPlugin { if let Some(render_app) = app.get_sub_app_mut(RenderApp) { render_app + .init_resource::() + .init_resource::() .init_resource::() - .init_resource::(); + .init_resource::() + .add_systems( + Render, + check_views_need_specialization.in_set(PrepareAssets), + ); let gpu_preprocessing_support = render_app.world().resource::(); @@ -208,12 +251,15 @@ impl Plugin for MeshRenderPlugin { if use_gpu_instance_buffer_builder { render_app - .init_resource::>() + .init_resource::>() .init_resource::() + .init_resource::() .add_systems( ExtractSchedule, - extract_meshes_for_gpu_building - .in_set(ExtractMeshesSet), + extract_meshes_for_gpu_building.in_set(ExtractMeshesSet), ) .add_systems( Render, @@ -221,12 +267,9 @@ impl Plugin for MeshRenderPlugin { gpu_preprocessing::write_batched_instance_buffers:: .in_set(RenderSet::PrepareResourcesFlush), gpu_preprocessing::delete_old_work_item_buffers:: - .in_set(RenderSet::ManageViews) - .after(prepare_view_targets), + .in_set(RenderSet::PrepareResources), collect_meshes_for_gpu_building - .in_set(RenderSet::PrepareAssets) - .after(allocator::allocate_and_free_meshes) - .after(extract_skins) + .in_set(RenderSet::PrepareMeshes) // This must be before // `set_mesh_motion_vector_flags` so it doesn't // overwrite those flags. @@ -277,6 +320,141 @@ impl Plugin for MeshRenderPlugin { } } +#[derive(Resource, Deref, DerefMut, Default, Debug, Clone)] +pub struct ViewKeyCache(HashMap); + +#[derive(Resource, Deref, DerefMut, Default, Debug, Clone)] +pub struct ViewSpecializationTicks(HashMap); + +pub fn check_views_need_specialization( + mut view_key_cache: ResMut, + mut view_specialization_ticks: ResMut, + mut views: Query<( + &ExtractedView, + &Msaa, + Option<&Tonemapping>, + Option<&DebandDither>, + Option<&ShadowFilteringMethod>, + Has, + ( + Has, + Has, + Has, + Has, + ), + Option<&Camera3d>, + Has, + Option<&Projection>, + Has, + ( + Has>, + Has>, + ), + Has, + )>, + ticks: SystemChangeTick, +) { + for ( + view, + msaa, + tonemapping, + dither, + shadow_filter_method, + ssao, + (normal_prepass, depth_prepass, motion_vector_prepass, deferred_prepass), + camera_3d, + temporal_jitter, + projection, + distance_fog, + (has_environment_maps, has_irradiance_volumes), + has_oit, + ) in views.iter_mut() + { + let mut view_key = MeshPipelineKey::from_msaa_samples(msaa.samples()) + | MeshPipelineKey::from_hdr(view.hdr); + + if normal_prepass { + view_key |= MeshPipelineKey::NORMAL_PREPASS; + } + + if depth_prepass { + view_key |= MeshPipelineKey::DEPTH_PREPASS; + } + + if motion_vector_prepass { + view_key |= MeshPipelineKey::MOTION_VECTOR_PREPASS; + } + + if deferred_prepass { + view_key |= MeshPipelineKey::DEFERRED_PREPASS; + } + + if temporal_jitter { + view_key |= MeshPipelineKey::TEMPORAL_JITTER; + } + + if has_environment_maps { + view_key |= MeshPipelineKey::ENVIRONMENT_MAP; + } + + if has_irradiance_volumes { + view_key |= MeshPipelineKey::IRRADIANCE_VOLUME; + } + + if has_oit { + view_key |= MeshPipelineKey::OIT_ENABLED; + } + + if let Some(projection) = projection { + view_key |= match projection { + Projection::Perspective(_) => MeshPipelineKey::VIEW_PROJECTION_PERSPECTIVE, + Projection::Orthographic(_) => MeshPipelineKey::VIEW_PROJECTION_ORTHOGRAPHIC, + Projection::Custom(_) => MeshPipelineKey::VIEW_PROJECTION_NONSTANDARD, + }; + } + + match shadow_filter_method.unwrap_or(&ShadowFilteringMethod::default()) { + ShadowFilteringMethod::Hardware2x2 => { + view_key |= MeshPipelineKey::SHADOW_FILTER_METHOD_HARDWARE_2X2; + } + ShadowFilteringMethod::Gaussian => { + view_key |= MeshPipelineKey::SHADOW_FILTER_METHOD_GAUSSIAN; + } + ShadowFilteringMethod::Temporal => { + view_key |= MeshPipelineKey::SHADOW_FILTER_METHOD_TEMPORAL; + } + } + + if !view.hdr { + if let Some(tonemapping) = tonemapping { + view_key |= MeshPipelineKey::TONEMAP_IN_SHADER; + view_key |= tonemapping_pipeline_key(*tonemapping); + } + if let Some(DebandDither::Enabled) = dither { + view_key |= MeshPipelineKey::DEBAND_DITHER; + } + } + if ssao { + view_key |= MeshPipelineKey::SCREEN_SPACE_AMBIENT_OCCLUSION; + } + if distance_fog { + view_key |= MeshPipelineKey::DISTANCE_FOG; + } + if let Some(camera_3d) = camera_3d { + view_key |= screen_space_specular_transmission_pipeline_key( + camera_3d.screen_space_specular_transmission_quality, + ); + } + if !view_key_cache + .get_mut(&view.retained_view_entity) + .is_some_and(|current_key| *current_key == view_key) + { + view_key_cache.insert(view.retained_view_entity, view_key); + view_specialization_ticks.insert(view.retained_view_entity, ticks.this_run()); + } + } +} + #[derive(Component)] pub struct MeshTransforms { pub world_from_local: Affine3, @@ -314,13 +492,15 @@ pub struct MeshUniform { pub first_vertex_index: u32, /// The current skin index, or `u32::MAX` if there's no skin. pub current_skin_index: u32, - /// The previous skin index, or `u32::MAX` if there's no previous skin. - pub previous_skin_index: u32, /// The material and lightmap indices, packed into 32 bits. /// /// Low 16 bits: index of the material inside the bind group data. /// High 16 bits: index of the lightmap in the binding array. pub material_and_lightmap_bind_group_slot: u32, + /// User supplied tag to identify this mesh instance. + pub tag: u32, + /// Padding. + pub pad: u32, } /// Information that has to be transferred from CPU to GPU in order to produce @@ -357,15 +537,36 @@ pub struct MeshInputUniform { /// [`MeshAllocator`]). This value stores the offset of the first vertex in /// this mesh in that buffer. pub first_vertex_index: u32, + /// The index of this mesh's first index in the index buffer, if any. + /// + /// Multiple meshes can be packed into a single index buffer (see + /// [`MeshAllocator`]). This value stores the offset of the first index in + /// this mesh in that buffer. + /// + /// If this mesh isn't indexed, this value is ignored. + pub first_index_index: u32, + /// For an indexed mesh, the number of indices that make it up; for a + /// non-indexed mesh, the number of vertices in it. + pub index_count: u32, /// The current skin index, or `u32::MAX` if there's no skin. pub current_skin_index: u32, - /// The previous skin index, or `u32::MAX` if there's no previous skin. - pub previous_skin_index: u32, /// The material and lightmap indices, packed into 32 bits. /// /// Low 16 bits: index of the material inside the bind group data. /// High 16 bits: index of the lightmap in the binding array. pub material_and_lightmap_bind_group_slot: u32, + /// The number of the frame on which this [`MeshInputUniform`] was built. + /// + /// This is used to validate the previous transform and skin. If this + /// [`MeshInputUniform`] wasn't updated on this frame, then we know that + /// neither this mesh's transform nor that of its joints have been updated + /// on this frame, and therefore the transforms of both this mesh and its + /// joints must be identical to those for the previous frame. + pub timestamp: u32, + /// User supplied tag to identify this mesh instance. + pub tag: u32, + /// Padding. + pub pad: u32, } /// Information about each mesh instance needed to cull it on GPU. @@ -398,7 +599,7 @@ impl MeshUniform { material_bind_group_slot: MaterialBindGroupSlot, maybe_lightmap: Option<(LightmapSlotIndex, Rect)>, current_skin_index: Option, - previous_skin_index: Option, + tag: Option, ) -> Self { let (local_from_world_transpose_a, local_from_world_transpose_b) = mesh_transforms.world_from_local.inverse_transpose_3x3(); @@ -416,9 +617,10 @@ impl MeshUniform { flags: mesh_transforms.flags, first_vertex_index, current_skin_index: current_skin_index.unwrap_or(u32::MAX), - previous_skin_index: previous_skin_index.unwrap_or(u32::MAX), material_and_lightmap_bind_group_slot: u32::from(material_bind_group_slot) | ((lightmap_bind_group_slot as u32) << 16), + tag: tag.unwrap_or(0), + pad: 0, } } } @@ -547,6 +749,11 @@ pub struct RenderMeshInstanceShared { pub material_bindings_index: MaterialBindingId, /// Various flags. pub flags: RenderMeshInstanceFlags, + /// Index of the slab that the lightmap resides in, if a lightmap is + /// present. + pub lightmap_slab_index: Option, + /// User supplied tag to identify this mesh instance. + pub tag: u32, } /// Information that is gathered during the parallel portion of mesh extraction @@ -622,10 +829,18 @@ pub enum RenderMeshInstanceGpuQueue { #[derive(Resource, Default, Deref, DerefMut)] pub struct RenderMeshInstanceGpuQueues(Parallel); +/// Holds a list of meshes that couldn't be extracted this frame because their +/// materials weren't prepared yet. +/// +/// On subsequent frames, we try to reextract those meshes. +#[derive(Resource, Default, Deref, DerefMut)] +pub struct MeshesToReextractNextFrame(MainEntityHashSet); + impl RenderMeshInstanceShared { fn from_components( previous_transform: Option<&PreviousGlobalTransform>, mesh: &Mesh3d, + tag: Option<&MeshTag>, not_shadow_caster: bool, no_automatic_batching: bool, ) -> Self { @@ -645,6 +860,8 @@ impl RenderMeshInstanceShared { flags: mesh_instance_flags, // This gets filled in later, during `RenderMeshGpuBuilder::update`. material_bindings_index: default(), + lightmap_slab_index: None, + tag: tag.map_or(0, |i| **i), } } @@ -680,36 +897,6 @@ pub struct RenderMeshInstancesCpu(MainEntityHashMap); #[derive(Default, Deref, DerefMut)] pub struct RenderMeshInstancesGpu(MainEntityHashMap); -/// Maps each mesh instance to the material ID, and allocated binding ID, -/// associated with that mesh instance. -#[derive(Resource, Default)] -pub struct RenderMeshMaterialIds { - /// Maps the mesh instance to the material ID. - pub(crate) mesh_to_material: MainEntityHashMap, - /// Maps the material ID to the binding ID, which describes the location of - /// that material bind group data in memory. - pub(crate) material_to_binding: HashMap, -} - -impl RenderMeshMaterialIds { - /// Returns the mesh material ID for the entity with the given mesh, or a - /// dummy mesh material ID if the mesh has no material ID. - /// - /// Meshes almost always have materials, but in very specific circumstances - /// involving custom pipelines they won't. (See the - /// `specialized_mesh_pipelines` example.) - fn mesh_material_binding_id(&self, entity: MainEntity) -> MaterialBindingId { - self.mesh_to_material - .get(&entity) - .and_then(|mesh_material_asset_id| { - self.material_to_binding - .get(mesh_material_asset_id) - .cloned() - }) - .unwrap_or_default() - } -} - impl RenderMeshInstances { /// Creates a new [`RenderMeshInstances`] instance. fn new(use_gpu_instance_buffer_builder: bool) -> RenderMeshInstances { @@ -721,7 +908,7 @@ impl RenderMeshInstances { } /// Returns the ID of the mesh asset attached to the given entity, if any. - pub(crate) fn mesh_asset_id(&self, entity: MainEntity) -> Option> { + pub fn mesh_asset_id(&self, entity: MainEntity) -> Option> { match *self { RenderMeshInstances::CpuBuilding(ref instances) => instances.mesh_asset_id(entity), RenderMeshInstances::GpuBuilding(ref instances) => instances.mesh_asset_id(entity), @@ -766,6 +953,7 @@ impl RenderMeshInstancesCpu { .map(|render_mesh_instance| RenderMeshQueueData { shared: &render_mesh_instance.shared, translation: render_mesh_instance.transforms.world_from_local.translation, + current_uniform_index: InputUniformIndex::default(), }) } @@ -789,6 +977,9 @@ impl RenderMeshInstancesGpu { .map(|render_mesh_instance| RenderMeshQueueData { shared: &render_mesh_instance.shared, translation: render_mesh_instance.translation, + current_uniform_index: InputUniformIndex( + render_mesh_instance.current_uniform_index.into(), + ), }) } @@ -901,7 +1092,6 @@ impl RenderMeshInstanceGpuQueue { impl RenderMeshInstanceGpuBuilder { /// Flushes this mesh instance to the [`RenderMeshInstanceGpu`] and /// [`MeshInputUniform`] tables, replacing the existing entry if applicable. - #[allow(clippy::too_many_arguments)] fn update( mut self, entity: MainEntity, @@ -909,33 +1099,63 @@ impl RenderMeshInstanceGpuBuilder { current_input_buffer: &mut InstanceInputUniformBuffer, previous_input_buffer: &mut InstanceInputUniformBuffer, mesh_allocator: &MeshAllocator, - mesh_material_ids: &RenderMeshMaterialIds, + mesh_material_ids: &RenderMaterialInstances, + render_material_bindings: &RenderMaterialBindings, render_lightmaps: &RenderLightmaps, - skin_indices: &SkinIndices, - ) -> u32 { - let first_vertex_index = match mesh_allocator.mesh_vertex_slice(&self.shared.mesh_asset_id) - { - Some(mesh_vertex_slice) => mesh_vertex_slice.range.start, - None => 0, - }; - - let current_skin_index = match skin_indices.current.get(&entity) { - Some(skin_indices) => skin_indices.index(), - None => u32::MAX, - }; - let previous_skin_index = match skin_indices.prev.get(&entity) { - Some(skin_indices) => skin_indices.index(), + skin_uniforms: &SkinUniforms, + timestamp: FrameCount, + meshes_to_reextract_next_frame: &mut MeshesToReextractNextFrame, + ) -> Option { + let (first_vertex_index, vertex_count) = + match mesh_allocator.mesh_vertex_slice(&self.shared.mesh_asset_id) { + Some(mesh_vertex_slice) => ( + mesh_vertex_slice.range.start, + mesh_vertex_slice.range.end - mesh_vertex_slice.range.start, + ), + None => (0, 0), + }; + let (mesh_is_indexed, first_index_index, index_count) = + match mesh_allocator.mesh_index_slice(&self.shared.mesh_asset_id) { + Some(mesh_index_slice) => ( + true, + mesh_index_slice.range.start, + mesh_index_slice.range.end - mesh_index_slice.range.start, + ), + None => (false, 0, 0), + }; + let current_skin_index = match skin_uniforms.skin_byte_offset(entity) { + Some(skin_index) => skin_index.index(), None => u32::MAX, }; - // Look up the material index. - let mesh_material_binding_id = mesh_material_ids.mesh_material_binding_id(entity); + // Look up the material index. If we couldn't fetch the material index, + // then the material hasn't been prepared yet, perhaps because it hasn't + // yet loaded. In that case, add the mesh to + // `meshes_to_reextract_next_frame` and bail. + let mesh_material = mesh_material_ids.mesh_material(entity); + let mesh_material_binding_id = if mesh_material != DUMMY_MESH_MATERIAL.untyped() { + match render_material_bindings.get(&mesh_material) { + Some(binding_id) => *binding_id, + None => { + meshes_to_reextract_next_frame.insert(entity); + return None; + } + } + } else { + // Use a dummy material binding ID. + MaterialBindingId::default() + }; self.shared.material_bindings_index = mesh_material_binding_id; let lightmap_slot = match render_lightmaps.render_lightmaps.get(&entity) { Some(render_lightmap) => u16::from(*render_lightmap.slot_index), None => u16::MAX, }; + let lightmap_slab_index = render_lightmaps + .render_lightmaps + .get(&entity) + .map(|lightmap| lightmap.slab_index); + self.shared.lightmap_slab_index = lightmap_slab_index; // Create the mesh input uniform. let mut mesh_input_uniform = MeshInputUniform { @@ -943,12 +1163,20 @@ impl RenderMeshInstanceGpuBuilder { lightmap_uv_rect: self.lightmap_uv_rect, flags: self.mesh_flags.bits(), previous_input_index: u32::MAX, + timestamp: timestamp.0, first_vertex_index, + first_index_index, + index_count: if mesh_is_indexed { + index_count + } else { + vertex_count + }, current_skin_index, - previous_skin_index, material_and_lightmap_bind_group_slot: u32::from( self.shared.material_bindings_index.slot, ) | ((lightmap_slot as u32) << 16), + tag: self.shared.tag, + pad: 0, }; // Did the last frame contain this entity as well? @@ -962,7 +1190,8 @@ impl RenderMeshInstanceGpuBuilder { // Save the old mesh input uniform. The mesh preprocessing // shader will need it to compute motion vectors. - let previous_mesh_input_uniform = current_input_buffer.get(current_uniform_index); + let previous_mesh_input_uniform = + current_input_buffer.get_unchecked(current_uniform_index); let previous_input_index = previous_input_buffer.add(previous_mesh_input_uniform); mesh_input_uniform.previous_input_index = previous_input_index; @@ -992,7 +1221,7 @@ impl RenderMeshInstanceGpuBuilder { } } - current_uniform_index + Some(current_uniform_index) } } @@ -1059,6 +1288,9 @@ pub struct RenderMeshQueueData<'a> { pub shared: &'a RenderMeshInstanceShared, /// The translation of the mesh instance. pub translation: Vec3, + /// The index of the [`MeshInputUniform`] in the GPU buffer for this mesh + /// instance. + pub current_uniform_index: InputUniformIndex, } /// A [`SystemSet`] that encompasses both [`extract_meshes_for_cpu_building`] @@ -1082,6 +1314,7 @@ pub fn extract_meshes_for_cpu_building( &GlobalTransform, Option<&PreviousGlobalTransform>, &Mesh3d, + Option<&MeshTag>, Has, Has, Has, @@ -1100,6 +1333,7 @@ pub fn extract_meshes_for_cpu_building( transform, previous_transform, mesh, + tag, no_frustum_culling, not_shadow_receiver, transmitted_receiver, @@ -1127,6 +1361,7 @@ pub fn extract_meshes_for_cpu_building( let shared = RenderMeshInstanceShared::from_components( previous_transform, mesh, + tag, not_shadow_caster, no_automatic_batching, ); @@ -1166,6 +1401,24 @@ pub fn extract_meshes_for_cpu_building( } } +/// All the data that we need from a mesh in the main world. +type GpuMeshExtractionQuery = ( + Entity, + Read, + Read, + Option>, + Option>, + Option>, + Read, + Option>, + Has, + Has, + Has, + Has, + Has, + Has, +); + /// Extracts meshes from the main world into the render world and queues /// [`MeshInputUniform`]s to be uploaded to the GPU. /// @@ -1174,28 +1427,13 @@ pub fn extract_meshes_for_cpu_building( /// /// This is the variant of the system that runs when we're using GPU /// [`MeshUniform`] building. -#[allow(clippy::too_many_arguments)] pub fn extract_meshes_for_gpu_building( mut render_mesh_instances: ResMut, render_visibility_ranges: Res, mut render_mesh_instance_queues: ResMut, changed_meshes_query: Extract< Query< - ( - Entity, - &ViewVisibility, - &GlobalTransform, - Option<&PreviousGlobalTransform>, - Option<&Lightmap>, - Option<&Aabb>, - &Mesh3d, - Has, - Has, - Has, - Has, - Has, - Has, - ), + GpuMeshExtractionQuery, Or<( Changed, Changed, @@ -1209,15 +1447,19 @@ pub fn extract_meshes_for_gpu_building( Changed, Changed, Changed, + Changed, )>, >, >, + all_meshes_query: Extract>, mut removed_visibilities_query: Extract>, mut removed_global_transforms_query: Extract>, mut removed_meshes_query: Extract>, - cameras_query: Extract, Without)>>, + gpu_culling_query: Extract, Without)>>, + meshes_to_reextract_next_frame: ResMut, ) { - let any_gpu_culling = !cameras_query.is_empty(); + let any_gpu_culling = !gpu_culling_query.is_empty(); + for render_mesh_instance_queue in render_mesh_instance_queues.iter_mut() { render_mesh_instance_queue.init(any_gpu_culling); } @@ -1236,80 +1478,37 @@ pub fn extract_meshes_for_gpu_building( // construct the `MeshInputUniform` for them. changed_meshes_query.par_iter().for_each_init( || render_mesh_instance_queues.borrow_local_mut(), - |queue, - ( - entity, - view_visibility, - transform, - previous_transform, - lightmap, - aabb, - mesh, - no_frustum_culling, - not_shadow_receiver, - transmitted_receiver, - not_shadow_caster, - no_automatic_batching, - visibility_range, - )| { - if !view_visibility.get() { - queue.remove(entity.into(), any_gpu_culling); - return; - } - - let mut lod_index = None; - if visibility_range { - lod_index = render_visibility_ranges.lod_index_for_entity(entity.into()); - } - - let mesh_flags = MeshFlags::from_components( - transform, - lod_index, - no_frustum_culling, - not_shadow_receiver, - transmitted_receiver, - ); - - let shared = RenderMeshInstanceShared::from_components( - previous_transform, - mesh, - not_shadow_caster, - no_automatic_batching, - ); - - let lightmap_uv_rect = pack_lightmap_uv_rect(lightmap.map(|lightmap| lightmap.uv_rect)); - - let gpu_mesh_culling_data = any_gpu_culling.then(|| MeshCullingData::new(aabb)); - - let previous_input_index = if shared - .flags - .contains(RenderMeshInstanceFlags::HAS_PREVIOUS_TRANSFORM) - { - render_mesh_instances - .get(&MainEntity::from(entity)) - .map(|render_mesh_instance| render_mesh_instance.current_uniform_index) - } else { - None - }; - - let gpu_mesh_instance_builder = RenderMeshInstanceGpuBuilder { - shared, - world_from_local: (&transform.affine()).into(), - lightmap_uv_rect, - mesh_flags, - previous_input_index, - }; - - queue.push( - entity.into(), - gpu_mesh_instance_builder, - gpu_mesh_culling_data, + |queue, query_row| { + extract_mesh_for_gpu_building( + query_row, + &render_visibility_ranges, + render_mesh_instances, + queue, + any_gpu_culling, ); }, ); - // Also record info about each mesh that became invisible. + // Process materials that `collect_meshes_for_gpu_building` marked as + // needing to be reextracted. This will happen when we extracted a mesh on + // some previous frame, but its material hadn't been prepared yet, perhaps + // because the material hadn't yet been loaded. We reextract such materials + // on subsequent frames so that `collect_meshes_for_gpu_building` will check + // to see if their materials have been prepared. let mut queue = render_mesh_instance_queues.borrow_local_mut(); + for &mesh_entity in &**meshes_to_reextract_next_frame { + if let Ok(query_row) = all_meshes_query.get(*mesh_entity) { + extract_mesh_for_gpu_building( + query_row, + &render_visibility_ranges, + render_mesh_instances, + &mut queue, + any_gpu_culling, + ); + } + } + + // Also record info about each mesh that became invisible. for entity in removed_visibilities_query .read() .chain(removed_global_transforms_query.read()) @@ -1318,12 +1517,93 @@ pub fn extract_meshes_for_gpu_building( // Only queue a mesh for removal if we didn't pick it up above. // It's possible that a necessary component was removed and re-added in // the same frame. - if !changed_meshes_query.contains(entity) { - queue.remove(entity.into(), any_gpu_culling); + let entity = MainEntity::from(entity); + if !changed_meshes_query.contains(*entity) + && !meshes_to_reextract_next_frame.contains(&entity) + { + queue.remove(entity, any_gpu_culling); } } } +fn extract_mesh_for_gpu_building( + ( + entity, + view_visibility, + transform, + previous_transform, + lightmap, + aabb, + mesh, + tag, + no_frustum_culling, + not_shadow_receiver, + transmitted_receiver, + not_shadow_caster, + no_automatic_batching, + visibility_range, + ): ::Item<'_>, + render_visibility_ranges: &RenderVisibilityRanges, + render_mesh_instances: &RenderMeshInstancesGpu, + queue: &mut RenderMeshInstanceGpuQueue, + any_gpu_culling: bool, +) { + if !view_visibility.get() { + queue.remove(entity.into(), any_gpu_culling); + return; + } + + let mut lod_index = None; + if visibility_range { + lod_index = render_visibility_ranges.lod_index_for_entity(entity.into()); + } + + let mesh_flags = MeshFlags::from_components( + transform, + lod_index, + no_frustum_culling, + not_shadow_receiver, + transmitted_receiver, + ); + + let shared = RenderMeshInstanceShared::from_components( + previous_transform, + mesh, + tag, + not_shadow_caster, + no_automatic_batching, + ); + + let lightmap_uv_rect = pack_lightmap_uv_rect(lightmap.map(|lightmap| lightmap.uv_rect)); + + let gpu_mesh_culling_data = any_gpu_culling.then(|| MeshCullingData::new(aabb)); + + let previous_input_index = if shared + .flags + .contains(RenderMeshInstanceFlags::HAS_PREVIOUS_TRANSFORM) + { + render_mesh_instances + .get(&MainEntity::from(entity)) + .map(|render_mesh_instance| render_mesh_instance.current_uniform_index) + } else { + None + }; + + let gpu_mesh_instance_builder = RenderMeshInstanceGpuBuilder { + shared, + world_from_local: (&transform.affine()).into(), + lightmap_uv_rect, + mesh_flags, + previous_input_index, + }; + + queue.push( + entity.into(), + gpu_mesh_instance_builder, + gpu_mesh_culling_data, + ); +} + /// A system that sets the [`RenderMeshInstanceFlags`] for each mesh based on /// whether the previous frame had skins and/or morph targets. /// @@ -1338,12 +1618,12 @@ pub fn extract_meshes_for_gpu_building( /// [`crate::material::queue_material_meshes`] check the skin and morph target /// tables for each mesh, but that would be too slow in the hot mesh queuing /// loop. -fn set_mesh_motion_vector_flags( +pub(crate) fn set_mesh_motion_vector_flags( mut render_mesh_instances: ResMut, - skin_indices: Res, + skin_uniforms: Res, morph_indices: Res, ) { - for &entity in skin_indices.prev.keys() { + for &entity in skin_uniforms.all_skins() { render_mesh_instances .insert_mesh_instance_flags(entity, RenderMeshInstanceFlags::HAS_PREVIOUS_SKIN); } @@ -1355,7 +1635,6 @@ fn set_mesh_motion_vector_flags( /// Creates the [`RenderMeshInstanceGpu`]s and [`MeshInputUniform`]s when GPU /// mesh uniforms are built. -#[allow(clippy::too_many_arguments)] pub fn collect_meshes_for_gpu_building( render_mesh_instances: ResMut, batched_instance_buffers: ResMut< @@ -1364,20 +1643,26 @@ pub fn collect_meshes_for_gpu_building( mut mesh_culling_data_buffer: ResMut, mut render_mesh_instance_queues: ResMut, mesh_allocator: Res, - mesh_material_ids: Res, + mesh_material_ids: Res, + render_material_bindings: Res, render_lightmaps: Res, - skin_indices: Res, + skin_uniforms: Res, + frame_count: Res, + mut meshes_to_reextract_next_frame: ResMut, ) { - let RenderMeshInstances::GpuBuilding(ref mut render_mesh_instances) = + let RenderMeshInstances::GpuBuilding(render_mesh_instances) = render_mesh_instances.into_inner() else { return; }; + // We're going to rebuild `meshes_to_reextract_next_frame`. + meshes_to_reextract_next_frame.clear(); + // Collect render mesh instances. Build up the uniform buffer. let gpu_preprocessing::BatchedInstanceBuffers { - ref mut current_input_buffer, - ref mut previous_input_buffer, + current_input_buffer, + previous_input_buffer, .. } = batched_instance_buffers.into_inner(); @@ -1403,8 +1688,11 @@ pub fn collect_meshes_for_gpu_building( previous_input_buffer, &mesh_allocator, &mesh_material_ids, + &render_material_bindings, &render_lightmaps, - &skin_indices, + &skin_uniforms, + *frame_count, + &mut meshes_to_reextract_next_frame, ); } @@ -1422,16 +1710,21 @@ pub fn collect_meshes_for_gpu_building( ref mut removed, } => { for (entity, mesh_instance_builder, mesh_culling_builder) in changed.drain(..) { - let instance_data_index = mesh_instance_builder.update( + let Some(instance_data_index) = mesh_instance_builder.update( entity, &mut *render_mesh_instances, current_input_buffer, previous_input_buffer, &mesh_allocator, &mesh_material_ids, + &render_material_bindings, &render_lightmaps, - &skin_indices, - ); + &skin_uniforms, + *frame_count, + &mut meshes_to_reextract_next_frame, + ) else { + continue; + }; mesh_culling_builder .update(&mut mesh_culling_data_buffer, instance_data_index as usize); } @@ -1479,6 +1772,9 @@ pub struct MeshPipeline { /// This affects whether reflection probes can be used. pub binding_arrays_are_usable: bool, + /// Whether clustered decals are usable on the current render device. + pub clustered_decals_are_usable: bool, + /// Whether skins will use uniform buffers on account of storage buffers /// being unavailable on this platform. pub skins_use_uniform_buffers: bool, @@ -1513,8 +1809,8 @@ impl FromWorld for MeshPipeline { let format_size = image.texture_descriptor.format.pixel_size(); render_queue.write_texture( texture.as_image_copy(), - &image.data, - ImageDataLayout { + image.data.as_ref().expect("Image was created without data"), + TexelCopyBufferLayout { offset: 0, bytes_per_row: Some(image.width() * format_size as u32), rows_per_image: None, @@ -1540,7 +1836,11 @@ impl FromWorld for MeshPipeline { mesh_layouts: MeshLayouts::new(&render_device, &render_adapter), per_object_buffer_batch_size: GpuArrayBuffer::::batch_size(&render_device), binding_arrays_are_usable: binding_arrays_are_usable(&render_device, &render_adapter), - skins_use_uniform_buffers: skin::skins_use_uniform_buffers(&render_device), + clustered_decals_are_usable: decal::clustered::clustered_decals_are_usable( + &render_device, + &render_adapter, + ), + skins_use_uniform_buffers: skins_use_uniform_buffers(&render_device), } } } @@ -1573,20 +1873,22 @@ impl GetBatchData for MeshPipeline { SRes, SRes>, SRes, - SRes, + SRes, ); // The material bind group ID, the mesh ID, and the lightmap ID, // respectively. type CompareData = ( MaterialBindGroupIndex, AssetId, - Option>, + Option, ); type BufferData = MeshUniform; fn get_batch_data( - (mesh_instances, lightmaps, _, mesh_allocator, skin_indices): &SystemParamItem, + (mesh_instances, lightmaps, _, mesh_allocator, skin_uniforms): &SystemParamItem< + Self::Param, + >, (_entity, main_entity): (Entity, MainEntity), ) -> Option<(Self::BufferData, Option)> { let RenderMeshInstances::CpuBuilding(ref mesh_instances) = **mesh_instances else { @@ -1604,9 +1906,7 @@ impl GetBatchData for MeshPipeline { }; let maybe_lightmap = lightmaps.render_lightmaps.get(&main_entity); - let current_skin_index = skin_indices.current.get(&main_entity).map(SkinIndex::index); - let previous_skin_index = skin_indices.prev.get(&main_entity).map(SkinIndex::index); - + let current_skin_index = skin_uniforms.skin_index(main_entity); let material_bind_group_index = mesh_instance.material_bindings_index; Some(( @@ -1616,12 +1916,12 @@ impl GetBatchData for MeshPipeline { material_bind_group_index.slot, maybe_lightmap.map(|lightmap| (lightmap.slot_index, lightmap.uv_rect)), current_skin_index, - previous_skin_index, + Some(mesh_instance.tag), ), mesh_instance.should_batch().then_some(( material_bind_group_index.group, mesh_instance.mesh_asset_id, - maybe_lightmap.map(|lightmap| lightmap.image), + maybe_lightmap.map(|lightmap| lightmap.slab_index), )), )) } @@ -1632,7 +1932,7 @@ impl GetFullBatchData for MeshPipeline { fn get_index_and_compare_data( (mesh_instances, lightmaps, _, _, _): &SystemParamItem, - (_entity, main_entity): (Entity, MainEntity), + main_entity: MainEntity, ) -> Option<(NonMaxU32, Option)> { // This should only be called during GPU building. let RenderMeshInstances::GpuBuilding(ref mesh_instances) = **mesh_instances else { @@ -1651,14 +1951,16 @@ impl GetFullBatchData for MeshPipeline { mesh_instance.should_batch().then_some(( mesh_instance.material_bindings_index.group, mesh_instance.mesh_asset_id, - maybe_lightmap.map(|lightmap| lightmap.image), + maybe_lightmap.map(|lightmap| lightmap.slab_index), )), )) } fn get_binned_batch_data( - (mesh_instances, lightmaps, _, mesh_allocator, skin_indices): &SystemParamItem, - (_entity, main_entity): (Entity, MainEntity), + (mesh_instances, lightmaps, _, mesh_allocator, skin_uniforms): &SystemParamItem< + Self::Param, + >, + main_entity: MainEntity, ) -> Option { let RenderMeshInstances::CpuBuilding(ref mesh_instances) = **mesh_instances else { error!( @@ -1674,8 +1976,7 @@ impl GetFullBatchData for MeshPipeline { }; let maybe_lightmap = lightmaps.render_lightmaps.get(&main_entity); - let current_skin_index = skin_indices.current.get(&main_entity).map(SkinIndex::index); - let previous_skin_index = skin_indices.prev.get(&main_entity).map(SkinIndex::index); + let current_skin_index = skin_uniforms.skin_index(main_entity); Some(MeshUniform::new( &mesh_instance.transforms, @@ -1683,13 +1984,13 @@ impl GetFullBatchData for MeshPipeline { mesh_instance.material_bindings_index.slot, maybe_lightmap.map(|lightmap| (lightmap.slot_index, lightmap.uv_rect)), current_skin_index, - previous_skin_index, + Some(mesh_instance.tag), )) } fn get_binned_index( (mesh_instances, _, _, _, _): &SystemParamItem, - (_entity, main_entity): (Entity, MainEntity), + main_entity: MainEntity, ) -> Option { // This should only be called during GPU building. let RenderMeshInstances::GpuBuilding(ref mesh_instances) = **mesh_instances else { @@ -1705,76 +2006,31 @@ impl GetFullBatchData for MeshPipeline { .map(|entity| entity.current_uniform_index) } - fn get_batch_indirect_parameters_index( - (mesh_instances, _, meshes, mesh_allocator, _): &SystemParamItem, - indirect_parameters_buffer: &mut IndirectParametersBuffer, - entity: (Entity, MainEntity), - instance_index: u32, - ) -> Option { - get_batch_indirect_parameters_index( - mesh_instances, - meshes, - mesh_allocator, - indirect_parameters_buffer, - entity, - instance_index, - ) - } -} - -/// Pushes a set of [`IndirectParameters`] onto the [`IndirectParametersBuffer`] -/// for the given mesh instance, and returns the index of those indirect -/// parameters. -fn get_batch_indirect_parameters_index( - mesh_instances: &RenderMeshInstances, - meshes: &RenderAssets, - mesh_allocator: &MeshAllocator, - indirect_parameters_buffer: &mut IndirectParametersBuffer, - (_entity, main_entity): (Entity, MainEntity), - instance_index: u32, -) -> Option { - // This should only be called during GPU building. - let RenderMeshInstances::GpuBuilding(ref mesh_instances) = *mesh_instances else { - error!( - "`get_batch_indirect_parameters_index` should never be called in CPU mesh uniform \ - building mode" - ); - return None; - }; + fn write_batch_indirect_parameters_metadata( + indexed: bool, + base_output_index: u32, + batch_set_index: Option, + phase_indirect_parameters_buffers: &mut UntypedPhaseIndirectParametersBuffers, + indirect_parameters_offset: u32, + ) { + let indirect_parameters = IndirectParametersCpuMetadata { + base_output_index, + batch_set_index: match batch_set_index { + Some(batch_set_index) => u32::from(batch_set_index), + None => !0, + }, + }; - let mesh_instance = mesh_instances.get(&main_entity)?; - let mesh = meshes.get(mesh_instance.mesh_asset_id)?; - let vertex_buffer_slice = mesh_allocator.mesh_vertex_slice(&mesh_instance.mesh_asset_id)?; - - // Note that `IndirectParameters` covers both of these structures, even - // though they actually have distinct layouts. See the comment above that - // type for more information. - let indirect_parameters = match mesh.buffer_info { - RenderMeshBufferInfo::Indexed { - count: index_count, .. - } => { - let index_buffer_slice = - mesh_allocator.mesh_index_slice(&mesh_instance.mesh_asset_id)?; - IndirectParameters { - vertex_or_index_count: index_count, - instance_count: 0, - first_vertex_or_first_index: index_buffer_slice.range.start, - base_vertex_or_first_instance: vertex_buffer_slice.range.start, - first_instance: instance_index, - } + if indexed { + phase_indirect_parameters_buffers + .indexed + .set(indirect_parameters_offset, indirect_parameters); + } else { + phase_indirect_parameters_buffers + .non_indexed + .set(indirect_parameters_offset, indirect_parameters); } - RenderMeshBufferInfo::NonIndexed => IndirectParameters { - vertex_or_index_count: mesh.vertex_count, - instance_count: 0, - first_vertex_or_first_index: vertex_buffer_slice.range.start, - base_vertex_or_first_instance: instance_index, - first_instance: instance_index, - }, - }; - - (indirect_parameters_buffer.push(indirect_parameters) as u32) - .try_into() - .ok() + } } bitflags::bitflags! { @@ -1807,13 +2063,15 @@ bitflags::bitflags! { const TEMPORAL_JITTER = 1 << 11; const READS_VIEW_TRANSMISSION_TEXTURE = 1 << 12; const LIGHTMAPPED = 1 << 13; - const IRRADIANCE_VOLUME = 1 << 14; - const VISIBILITY_RANGE_DITHER = 1 << 15; - const SCREEN_SPACE_REFLECTIONS = 1 << 16; - const HAS_PREVIOUS_SKIN = 1 << 17; - const HAS_PREVIOUS_MORPH = 1 << 18; - const OIT_ENABLED = 1 << 19; - const LAST_FLAG = Self::OIT_ENABLED.bits(); + const LIGHTMAP_BICUBIC_SAMPLING = 1 << 14; + const IRRADIANCE_VOLUME = 1 << 15; + const VISIBILITY_RANGE_DITHER = 1 << 16; + const SCREEN_SPACE_REFLECTIONS = 1 << 17; + const HAS_PREVIOUS_SKIN = 1 << 18; + const HAS_PREVIOUS_MORPH = 1 << 19; + const OIT_ENABLED = 1 << 20; + const DISTANCE_FOG = 1 << 21; + const LAST_FLAG = Self::DISTANCE_FOG.bits(); // Bitfields const MSAA_RESERVED_BITS = Self::MSAA_MASK_BITS << Self::MSAA_SHIFT_BITS; @@ -2056,6 +2314,9 @@ impl SpecializedMeshPipeline for MeshPipeline { if cfg!(feature = "pbr_anisotropy_texture") { shader_defs.push("PBR_ANISOTROPY_TEXTURE_SUPPORTED".into()); } + if cfg!(feature = "pbr_specular_textures") { + shader_defs.push("PBR_SPECULAR_TEXTURES_SUPPORTED".into()); + } let mut bind_group_layout = vec![self.get_view_layout(key.into()).clone()]; @@ -2236,6 +2497,9 @@ impl SpecializedMeshPipeline for MeshPipeline { if key.contains(MeshPipelineKey::LIGHTMAPPED) { shader_defs.push("LIGHTMAP".into()); } + if key.contains(MeshPipelineKey::LIGHTMAP_BICUBIC_SAMPLING) { + shader_defs.push("LIGHTMAP_BICUBIC_SAMPLING".into()); + } if key.contains(MeshPipelineKey::TEMPORAL_JITTER) { shader_defs.push("TEMPORAL_JITTER".into()); @@ -2269,6 +2533,10 @@ impl SpecializedMeshPipeline for MeshPipeline { shader_defs.push("VISIBILITY_RANGE_DITHER".into()); } + if key.contains(MeshPipelineKey::DISTANCE_FOG) { + shader_defs.push("DISTANCE_FOG".into()); + } + if self.binding_arrays_are_usable { shader_defs.push("MULTIPLE_LIGHT_PROBES_IN_ARRAY".into()); shader_defs.push("MULTIPLE_LIGHTMAPS_IN_ARRAY".into()); @@ -2278,6 +2546,10 @@ impl SpecializedMeshPipeline for MeshPipeline { shader_defs.push("IRRADIANCE_VOLUMES_ARE_USABLE".into()); } + if self.clustered_decals_are_usable { + shader_defs.push("CLUSTERED_DECALS_ARE_USABLE".into()); + } + let format = if key.contains(MeshPipelineKey::HDR) { ViewTarget::TEXTURE_FORMAT_HDR } else { @@ -2349,9 +2621,12 @@ impl SpecializedMeshPipeline for MeshPipeline { } } -/// Bind groups for meshes currently loaded. -#[derive(Resource, Default)] -pub struct MeshBindGroups { +/// The bind groups for meshes currently loaded. +/// +/// If GPU mesh preprocessing isn't in use, these are global to the scene. If +/// GPU mesh preprocessing is in use, these are specific to a single phase. +#[derive(Default)] +pub struct MeshPhaseBindGroups { model_only: Option, skinned: Option, morph_targets: HashMap, MeshBindGroupPair>, @@ -2363,7 +2638,18 @@ pub struct MeshBindGroupPair { no_motion_vectors: BindGroup, } -impl MeshBindGroups { +/// All bind groups for meshes currently loaded. +#[derive(Resource)] +pub enum MeshBindGroups { + /// The bind groups for the meshes for the entire scene, if GPU mesh + /// preprocessing isn't in use. + CpuPreprocessing(MeshPhaseBindGroups), + /// A mapping from the type ID of a phase (e.g. [`Opaque3d`]) to the mesh + /// bind groups for that phase. + GpuPreprocessing(TypeIdMap), +} + +impl MeshPhaseBindGroups { pub fn reset(&mut self) { self.model_only = None; self.skinned = None; @@ -2405,10 +2691,10 @@ impl MeshBindGroupPair { } } -#[allow(clippy::too_many_arguments)] -pub fn prepare_mesh_bind_group( +/// Creates the per-mesh bind groups for each type of mesh and each phase. +pub fn prepare_mesh_bind_groups( + mut commands: Commands, meshes: Res>, - mut groups: ResMut, mesh_pipeline: Res, render_device: Res, cpu_batched_instance_buffer: Option< @@ -2421,36 +2707,88 @@ pub fn prepare_mesh_bind_group( weights_uniform: Res, mut render_lightmaps: ResMut, ) { - groups.reset(); + // CPU mesh preprocessing path. + if let Some(cpu_batched_instance_buffer) = cpu_batched_instance_buffer { + if let Some(instance_data_binding) = cpu_batched_instance_buffer + .into_inner() + .instance_data_binding() + { + // In this path, we only have a single set of bind groups for all phases. + let cpu_preprocessing_mesh_bind_groups = prepare_mesh_bind_groups_for_phase( + instance_data_binding, + &meshes, + &mesh_pipeline, + &render_device, + &skins_uniform, + &weights_uniform, + &mut render_lightmaps, + ); + + commands.insert_resource(MeshBindGroups::CpuPreprocessing( + cpu_preprocessing_mesh_bind_groups, + )); + return; + } + } + // GPU mesh preprocessing path. + if let Some(gpu_batched_instance_buffers) = gpu_batched_instance_buffers { + let mut gpu_preprocessing_mesh_bind_groups = TypeIdMap::default(); + + // Loop over each phase. + for (phase_type_id, batched_phase_instance_buffers) in + &gpu_batched_instance_buffers.phase_instance_buffers + { + let Some(instance_data_binding) = + batched_phase_instance_buffers.instance_data_binding() + else { + continue; + }; + + let mesh_phase_bind_groups = prepare_mesh_bind_groups_for_phase( + instance_data_binding, + &meshes, + &mesh_pipeline, + &render_device, + &skins_uniform, + &weights_uniform, + &mut render_lightmaps, + ); + + gpu_preprocessing_mesh_bind_groups.insert(*phase_type_id, mesh_phase_bind_groups); + } + + commands.insert_resource(MeshBindGroups::GpuPreprocessing( + gpu_preprocessing_mesh_bind_groups, + )); + } +} + +/// Creates the per-mesh bind groups for each type of mesh, for a single phase. +fn prepare_mesh_bind_groups_for_phase( + model: BindingResource, + meshes: &RenderAssets, + mesh_pipeline: &MeshPipeline, + render_device: &RenderDevice, + skins_uniform: &SkinUniforms, + weights_uniform: &MorphUniforms, + render_lightmaps: &mut RenderLightmaps, +) -> MeshPhaseBindGroups { let layouts = &mesh_pipeline.mesh_layouts; - let model = if let Some(cpu_batched_instance_buffer) = cpu_batched_instance_buffer { - cpu_batched_instance_buffer - .into_inner() - .instance_data_binding() - } else if let Some(gpu_batched_instance_buffers) = gpu_batched_instance_buffers { - gpu_batched_instance_buffers - .into_inner() - .instance_data_binding() - } else { - return; + // TODO: Reuse allocations. + let mut groups = MeshPhaseBindGroups { + model_only: Some(layouts.model_only(render_device, &model)), + ..default() }; - let Some(model) = model else { return }; - - groups.model_only = Some(layouts.model_only(&render_device, &model)); // Create the skinned mesh bind group with the current and previous buffers - // (the latter being for motion vector computation). If there's no previous - // buffer, just use the current one as the shader will ignore it. - let skin = skins_uniform.current_buffer.buffer(); - if let Some(skin) = skin { - let prev_skin = skins_uniform.prev_buffer.buffer().unwrap_or(skin); - groups.skinned = Some(MeshBindGroupPair { - motion_vectors: layouts.skinned_motion(&render_device, &model, skin, prev_skin), - no_motion_vectors: layouts.skinned(&render_device, &model, skin), - }); - } + // (the latter being for motion vector computation). + let (skin, prev_skin) = (&skins_uniform.current_buffer, &skins_uniform.prev_buffer); + groups.skinned = Some(MeshBindGroupPair { + motion_vectors: layouts.skinned_motion(render_device, &model, skin, prev_skin), + no_motion_vectors: layouts.skinned(render_device, &model, skin), + }); // Create the morphed bind groups just like we did for the skinned bind // group. @@ -2458,43 +2796,37 @@ pub fn prepare_mesh_bind_group( let prev_weights = weights_uniform.prev_buffer.buffer().unwrap_or(weights); for (id, gpu_mesh) in meshes.iter() { if let Some(targets) = gpu_mesh.morph_targets.as_ref() { - let bind_group_pair = match skin.filter(|_| is_skinned(&gpu_mesh.layout)) { - Some(skin) => { - let prev_skin = skins_uniform.prev_buffer.buffer().unwrap_or(skin); - MeshBindGroupPair { - motion_vectors: layouts.morphed_skinned_motion( - &render_device, - &model, - skin, - weights, - targets, - prev_skin, - prev_weights, - ), - no_motion_vectors: layouts.morphed_skinned( - &render_device, - &model, - skin, - weights, - targets, - ), - } - } - None => MeshBindGroupPair { - motion_vectors: layouts.morphed_motion( - &render_device, + let bind_group_pair = if is_skinned(&gpu_mesh.layout) { + let prev_skin = &skins_uniform.prev_buffer; + MeshBindGroupPair { + motion_vectors: layouts.morphed_skinned_motion( + render_device, &model, + skin, weights, targets, + prev_skin, prev_weights, ), - no_motion_vectors: layouts.morphed( - &render_device, + no_motion_vectors: layouts.morphed_skinned( + render_device, &model, + skin, weights, targets, ), - }, + } + } else { + MeshBindGroupPair { + motion_vectors: layouts.morphed_motion( + render_device, + &model, + weights, + targets, + prev_weights, + ), + no_motion_vectors: layouts.morphed(render_device, &model, weights, targets), + } }; groups.morph_targets.insert(id, bind_group_pair); } @@ -2506,9 +2838,11 @@ pub fn prepare_mesh_bind_group( for (lightmap_slab_id, lightmap_slab) in render_lightmaps.slabs.iter_mut().enumerate() { groups.lightmaps.insert( LightmapSlabIndex(NonMaxU32::new(lightmap_slab_id as u32).unwrap()), - layouts.lightmapped(&render_device, &model, lightmap_slab, bindless_supported), + layouts.lightmapped(render_device, &model, lightmap_slab, bindless_supported), ); } + + groups } pub struct SetMeshViewBindGroup; @@ -2566,7 +2900,7 @@ impl RenderCommand

for SetMeshBindGroup { SRes, SRes, SRes, - SRes, + SRes, SRes, SRes, ); @@ -2582,7 +2916,7 @@ impl RenderCommand

for SetMeshBindGroup { render_device, bind_groups, mesh_instances, - skin_indices, + skin_uniforms, morph_indices, lightmaps, ): SystemParamItem<'w, '_, Self::Param>, @@ -2590,7 +2924,7 @@ impl RenderCommand

for SetMeshBindGroup { ) -> RenderCommandResult { let bind_groups = bind_groups.into_inner(); let mesh_instances = mesh_instances.into_inner(); - let skin_indices = skin_indices.into_inner(); + let skin_uniforms = skin_uniforms.into_inner(); let morph_indices = morph_indices.into_inner(); let entity = &item.main_entity(); @@ -2599,12 +2933,11 @@ impl RenderCommand

for SetMeshBindGroup { return RenderCommandResult::Success; }; - let current_skin_index = skin_indices.current.get(entity); - let prev_skin_index = skin_indices.prev.get(entity); + let current_skin_byte_offset = skin_uniforms.skin_byte_offset(*entity); let current_morph_index = morph_indices.current.get(entity); let prev_morph_index = morph_indices.prev.get(entity); - let is_skinned = current_skin_index.is_some(); + let is_skinned = current_skin_byte_offset.is_some(); let is_morphed = current_morph_index.is_some(); let lightmap_slab_index = lightmaps @@ -2612,7 +2945,20 @@ impl RenderCommand

for SetMeshBindGroup { .get(entity) .map(|render_lightmap| render_lightmap.slab_index); - let Some(bind_group) = bind_groups.get( + let Some(mesh_phase_bind_groups) = (match *bind_groups { + MeshBindGroups::CpuPreprocessing(ref mesh_phase_bind_groups) => { + Some(mesh_phase_bind_groups) + } + MeshBindGroups::GpuPreprocessing(ref mesh_phase_bind_groups) => { + mesh_phase_bind_groups.get(&TypeId::of::

()) + } + }) else { + // This is harmless if e.g. we're rendering the `Shadow` phase and + // there weren't any shadows. + return RenderCommandResult::Success; + }; + + let Some(bind_group) = mesh_phase_bind_groups.get( mesh_asset_id, lightmap_slab_index, is_skinned, @@ -2632,8 +2978,8 @@ impl RenderCommand

for SetMeshBindGroup { dynamic_offsets[offset_count] = dynamic_offset; offset_count += 1; } - if let Some(current_skin_index) = current_skin_index { - if skin::skins_use_uniform_buffers(&render_device) { + if let Some(current_skin_index) = current_skin_byte_offset { + if skins_use_uniform_buffers(&render_device) { dynamic_offsets[offset_count] = current_skin_index.byte_offset; offset_count += 1; } @@ -2645,16 +2991,12 @@ impl RenderCommand

for SetMeshBindGroup { // Attach motion vectors if needed. if has_motion_vector_prepass { - // Attach the previous skin index for motion vector computation. If - // there isn't one, just use zero as the shader will ignore it. - if current_skin_index.is_some() && skin::skins_use_uniform_buffers(&render_device) { - match prev_skin_index { - Some(prev_skin_index) => { - dynamic_offsets[offset_count] = prev_skin_index.byte_offset; - } - None => dynamic_offsets[offset_count] = 0, + // Attach the previous skin index for motion vector computation. + if skins_use_uniform_buffers(&render_device) { + if let Some(current_skin_byte_offset) = current_skin_byte_offset { + dynamic_offsets[offset_count] = current_skin_byte_offset.byte_offset; + offset_count += 1; } - offset_count += 1; } // Attach the previous morph index for motion vector computation. If @@ -2681,12 +3023,13 @@ impl RenderCommand

for DrawMesh { type Param = ( SRes>, SRes, - SRes, + SRes, SRes, SRes, Option>, + SRes, ); - type ViewQuery = Has; + type ViewQuery = Has; type ItemQuery = (); #[inline] fn render<'w>( @@ -2700,6 +3043,7 @@ impl RenderCommand

for DrawMesh { pipeline_cache, mesh_allocator, preprocess_pipelines, + preprocessing_support, ): SystemParamItem<'w, '_, Self::Param>, pass: &mut TrackedRenderPass<'w>, ) -> RenderCommandResult { @@ -2708,7 +3052,8 @@ impl RenderCommand

for DrawMesh { // it's compiled. Otherwise, our mesh instance data won't be present. if let Some(preprocess_pipelines) = preprocess_pipelines { if !has_preprocess_bind_group - || !preprocess_pipelines.pipelines_are_loaded(&pipeline_cache) + || !preprocess_pipelines + .pipelines_are_loaded(&pipeline_cache, &preprocessing_support) { return RenderCommandResult::Skip; } @@ -2729,26 +3074,6 @@ impl RenderCommand

for DrawMesh { return RenderCommandResult::Skip; }; - // Calculate the indirect offset, and look up the buffer. - let indirect_parameters = match item.extra_index() { - PhaseItemExtraIndex::None | PhaseItemExtraIndex::DynamicOffset(_) => None, - PhaseItemExtraIndex::IndirectParametersIndex(indices) => { - match indirect_parameters_buffer.buffer() { - None => { - warn!( - "Not rendering mesh because indirect parameters buffer wasn't present" - ); - return RenderCommandResult::Skip; - } - Some(buffer) => Some(( - indices.start as u64 * size_of::() as u64, - indices.end - indices.start, - buffer, - )), - } - } - }; - pass.set_vertex_buffer(0, vertex_buffer_slice.buffer.slice(..)); let batch_range = item.batch_range(); @@ -2768,8 +3093,8 @@ impl RenderCommand

for DrawMesh { pass.set_index_buffer(index_buffer_slice.buffer.slice(..), 0, *index_format); - match indirect_parameters { - None => { + match item.extra_index() { + PhaseItemExtraIndex::None | PhaseItemExtraIndex::DynamicOffset(_) => { pass.draw_indexed( index_buffer_slice.range.start ..(index_buffer_slice.range.start + *count), @@ -2777,33 +3102,134 @@ impl RenderCommand

for DrawMesh { batch_range.clone(), ); } - Some(( - indirect_parameters_offset, - indirect_parameters_count, - indirect_parameters_buffer, - )) => { - pass.multi_draw_indexed_indirect( - indirect_parameters_buffer, - indirect_parameters_offset, - indirect_parameters_count, - ); + PhaseItemExtraIndex::IndirectParametersIndex { + range: indirect_parameters_range, + batch_set_index, + } => { + // Look up the indirect parameters buffer, as well as + // the buffer we're going to use for + // `multi_draw_indexed_indirect_count` (if available). + let Some(phase_indirect_parameters_buffers) = + indirect_parameters_buffer.get(&TypeId::of::

()) + else { + warn!( + "Not rendering mesh because indexed indirect parameters buffer \ + wasn't present for this phase", + ); + return RenderCommandResult::Skip; + }; + let (Some(indirect_parameters_buffer), Some(batch_sets_buffer)) = ( + phase_indirect_parameters_buffers.indexed.data_buffer(), + phase_indirect_parameters_buffers + .indexed + .batch_sets_buffer(), + ) else { + warn!( + "Not rendering mesh because indexed indirect parameters buffer \ + wasn't present", + ); + return RenderCommandResult::Skip; + }; + + // Calculate the location of the indirect parameters + // within the buffer. + let indirect_parameters_offset = indirect_parameters_range.start as u64 + * size_of::() as u64; + let indirect_parameters_count = + indirect_parameters_range.end - indirect_parameters_range.start; + + // If we're using `multi_draw_indirect_count`, take the + // number of batches from the appropriate position in + // the batch sets buffer. Otherwise, supply the size of + // the batch set. + match batch_set_index { + Some(batch_set_index) => { + let count_offset = u32::from(batch_set_index) + * (size_of::() as u32); + pass.multi_draw_indexed_indirect_count( + indirect_parameters_buffer, + indirect_parameters_offset, + batch_sets_buffer, + count_offset as u64, + indirect_parameters_count, + ); + } + None => { + pass.multi_draw_indexed_indirect( + indirect_parameters_buffer, + indirect_parameters_offset, + indirect_parameters_count, + ); + } + } } } } - RenderMeshBufferInfo::NonIndexed => match indirect_parameters { - None => { + + RenderMeshBufferInfo::NonIndexed => match item.extra_index() { + PhaseItemExtraIndex::None | PhaseItemExtraIndex::DynamicOffset(_) => { pass.draw(vertex_buffer_slice.range, batch_range.clone()); } - Some(( - indirect_parameters_offset, - indirect_parameters_count, - indirect_parameters_buffer, - )) => { - pass.multi_draw_indirect( - indirect_parameters_buffer, - indirect_parameters_offset, - indirect_parameters_count, - ); + PhaseItemExtraIndex::IndirectParametersIndex { + range: indirect_parameters_range, + batch_set_index, + } => { + // Look up the indirect parameters buffer, as well as the + // buffer we're going to use for + // `multi_draw_indirect_count` (if available). + let Some(phase_indirect_parameters_buffers) = + indirect_parameters_buffer.get(&TypeId::of::

()) + else { + warn!( + "Not rendering mesh because non-indexed indirect parameters buffer \ + wasn't present for this phase", + ); + return RenderCommandResult::Skip; + }; + let (Some(indirect_parameters_buffer), Some(batch_sets_buffer)) = ( + phase_indirect_parameters_buffers.non_indexed.data_buffer(), + phase_indirect_parameters_buffers + .non_indexed + .batch_sets_buffer(), + ) else { + warn!( + "Not rendering mesh because non-indexed indirect parameters buffer \ + wasn't present" + ); + return RenderCommandResult::Skip; + }; + + // Calculate the location of the indirect parameters within + // the buffer. + let indirect_parameters_offset = indirect_parameters_range.start as u64 + * size_of::() as u64; + let indirect_parameters_count = + indirect_parameters_range.end - indirect_parameters_range.start; + + // If we're using `multi_draw_indirect_count`, take the + // number of batches from the appropriate position in the + // batch sets buffer. Otherwise, supply the size of the + // batch set. + match batch_set_index { + Some(batch_set_index) => { + let count_offset = + u32::from(batch_set_index) * (size_of::() as u32); + pass.multi_draw_indirect_count( + indirect_parameters_buffer, + indirect_parameters_offset, + batch_sets_buffer, + count_offset as u64, + indirect_parameters_count, + ); + } + None => { + pass.multi_draw_indirect( + indirect_parameters_buffer, + indirect_parameters_offset, + indirect_parameters_count, + ); + } + } } }, } diff --git a/crates/bevy_pbr/src/render/mesh_bindings.rs b/crates/bevy_pbr/src/render/mesh_bindings.rs index e6d07cb4e2c6e..51b28389dcd0c 100644 --- a/crates/bevy_pbr/src/render/mesh_bindings.rs +++ b/crates/bevy_pbr/src/render/mesh_bindings.rs @@ -495,7 +495,6 @@ impl MeshLayouts { } /// Creates the bind group for meshes with skins and morph targets. - #[allow(clippy::too_many_arguments)] pub fn morphed_skinned( &self, render_device: &RenderDevice, @@ -523,7 +522,6 @@ impl MeshLayouts { /// [`MeshLayouts::morphed_motion`] above for more information about the /// `current_skin`, `prev_skin`, `current_weights`, and `prev_weights` /// buffers. - #[allow(clippy::too_many_arguments)] pub fn morphed_skinned_motion( &self, render_device: &RenderDevice, diff --git a/crates/bevy_pbr/src/render/mesh_functions.wgsl b/crates/bevy_pbr/src/render/mesh_functions.wgsl index 23857bc6aa12d..6d4c53a19fa96 100644 --- a/crates/bevy_pbr/src/render/mesh_functions.wgsl +++ b/crates/bevy_pbr/src/render/mesh_functions.wgsl @@ -22,6 +22,33 @@ fn get_previous_world_from_local(instance_index: u32) -> mat4x4 { return affine3_to_square(mesh[instance_index].previous_world_from_local); } +fn get_local_from_world(instance_index: u32) -> mat4x4 { + // the model matrix is translation * rotation * scale + // the inverse is then scale^-1 * rotation ^-1 * translation^-1 + // the 3x3 matrix only contains the information for the rotation and scale + let inverse_model_3x3 = transpose(mat2x4_f32_to_mat3x3_unpack( + mesh[instance_index].local_from_world_transpose_a, + mesh[instance_index].local_from_world_transpose_b, + )); + // construct scale^-1 * rotation^-1 from the 3x3 + let inverse_model_4x4_no_trans = mat4x4( + vec4(inverse_model_3x3[0], 0.0), + vec4(inverse_model_3x3[1], 0.0), + vec4(inverse_model_3x3[2], 0.0), + vec4(0.0,0.0,0.0,1.0) + ); + // we can get translation^-1 by negating the translation of the model + let model = get_world_from_local(instance_index); + let inverse_model_4x4_only_trans = mat4x4( + vec4(1.0,0.0,0.0,0.0), + vec4(0.0,1.0,0.0,0.0), + vec4(0.0,0.0,1.0,0.0), + vec4(-model[3].xyz, 1.0) + ); + + return inverse_model_4x4_no_trans * inverse_model_4x4_only_trans; +} + #endif // MESHLET_MESH_MATERIAL_PASS fn mesh_position_local_to_world(world_from_local: mat4x4, vertex_position: vec4) -> vec4 { @@ -132,3 +159,10 @@ fn get_visibility_range_dither_level(instance_index: u32, world_position: vec4 u32 { + return mesh[instance_index].tag; +} +#endif diff --git a/crates/bevy_pbr/src/render/mesh_preprocess.wgsl b/crates/bevy_pbr/src/render/mesh_preprocess.wgsl index 6e19f6b8004b0..543b328aaafd5 100644 --- a/crates/bevy_pbr/src/render/mesh_preprocess.wgsl +++ b/crates/bevy_pbr/src/render/mesh_preprocess.wgsl @@ -1,35 +1,33 @@ -// GPU mesh uniform building. +// GPU mesh transforming and culling. // // This is a compute shader that expands each `MeshInputUniform` out to a full -// `MeshUniform` for each view before rendering. (Thus `MeshInputUniform` -// and `MeshUniform` are in a 1:N relationship.) It runs in parallel for all -// meshes for all views. As part of this process, the shader gathers each -// mesh's transform on the previous frame and writes it into the `MeshUniform` -// so that TAA works. +// `MeshUniform` for each view before rendering. (Thus `MeshInputUniform` and +// `MeshUniform` are in a 1:N relationship.) It runs in parallel for all meshes +// for all views. As part of this process, the shader gathers each mesh's +// transform on the previous frame and writes it into the `MeshUniform` so that +// TAA works. It also performs frustum culling and occlusion culling, if +// requested. +// +// If occlusion culling is on, this shader runs twice: once to prepare the +// meshes that were visible last frame, and once to prepare the meshes that +// weren't visible last frame but became visible this frame. The two invocations +// are known as *early mesh preprocessing* and *late mesh preprocessing* +// respectively. +#import bevy_pbr::mesh_preprocess_types::{ + IndirectParametersCpuMetadata, IndirectParametersGpuMetadata, MeshInput +} #import bevy_pbr::mesh_types::{Mesh, MESH_FLAGS_NO_FRUSTUM_CULLING_BIT} +#import bevy_pbr::mesh_view_bindings::view +#import bevy_pbr::occlusion_culling +#import bevy_pbr::prepass_bindings::previous_view_uniforms +#import bevy_pbr::view_transformations::{ + position_world_to_ndc, position_world_to_view, ndc_to_uv, view_z_to_depth_ndc, + position_world_to_prev_ndc, position_world_to_prev_view, prev_view_z_to_depth_ndc +} #import bevy_render::maths #import bevy_render::view::View -// Per-frame data that the CPU supplies to the GPU. -struct MeshInput { - // The model transform. - world_from_local: mat3x4, - // The lightmap UV rect, packed into 64 bits. - lightmap_uv_rect: vec2, - // Various flags. - flags: u32, - // The index of this mesh's `MeshInput` in the `previous_input` array, if - // applicable. If not present, this is `u32::MAX`. - previous_input_index: u32, - first_vertex_index: u32, - current_skin_index: u32, - previous_skin_index: u32, - // Low 16 bits: index of the material inside the bind group data. - // High 16 bits: index of the lightmap in the binding array. - material_and_lightmap_bind_group_slot: u32, -} - // Information about each mesh instance needed to cull it on GPU. // // At the moment, this just consists of its axis-aligned bounding box (AABB). @@ -50,50 +48,79 @@ struct PreprocessWorkItem { // In direct mode, the index of the `Mesh` in `output` that we write to. In // indirect mode, the index of the `IndirectParameters` in // `indirect_parameters` that we write to. - output_index: u32, + output_or_indirect_parameters_index: u32, +} + +// The parameters for the indirect compute dispatch for the late mesh +// preprocessing phase. +struct LatePreprocessWorkItemIndirectParameters { + // The number of workgroups we're going to dispatch. + // + // This value should always be equal to `ceil(work_item_count / 64)`. + dispatch_x: atomic, + // The number of workgroups in the Y direction; always 1. + dispatch_y: u32, + // The number of workgroups in the Z direction; always 1. + dispatch_z: u32, + // The precise number of work items. + work_item_count: atomic, + // Padding. + // + // This isn't the usual structure padding; it's needed because some hardware + // requires indirect compute dispatch parameters to be aligned on 64-byte + // boundaries. + pad: vec4, } -// The `wgpu` indirect parameters structure. This is a union of two structures. -// For more information, see the corresponding comment in -// `gpu_preprocessing.rs`. -struct IndirectParameters { - // `vertex_count` or `index_count`. - data0: u32, - // `instance_count` in both structures. - instance_count: atomic, - // `first_vertex` in both structures. - first_vertex: u32, - // `first_instance` or `base_vertex`. - data1: u32, - // A read-only copy of `instance_index`. - instance_index: u32, +// These have to be in a structure because of Naga limitations on DX12. +struct PushConstants { + // The offset into the `late_preprocess_work_item_indirect_parameters` + // buffer. + late_preprocess_work_item_indirect_offset: u32, } // The current frame's `MeshInput`. -@group(0) @binding(0) var current_input: array; +@group(0) @binding(3) var current_input: array; // The `MeshInput` values from the previous frame. -@group(0) @binding(1) var previous_input: array; +@group(0) @binding(4) var previous_input: array; // Indices into the `MeshInput` buffer. // // There may be many indices that map to the same `MeshInput`. -@group(0) @binding(2) var work_items: array; +@group(0) @binding(5) var work_items: array; // The output array of `Mesh`es. -@group(0) @binding(3) var output: array; +@group(0) @binding(6) var output: array; #ifdef INDIRECT // The array of indirect parameters for drawcalls. -@group(0) @binding(4) var indirect_parameters: array; +@group(0) @binding(7) var indirect_parameters_cpu_metadata: + array; + +@group(0) @binding(8) var indirect_parameters_gpu_metadata: + array; #endif #ifdef FRUSTUM_CULLING // Data needed to cull the meshes. // // At the moment, this consists only of AABBs. -@group(0) @binding(5) var mesh_culling_data: array; +@group(0) @binding(9) var mesh_culling_data: array; +#endif // FRUSTUM_CULLING + +#ifdef OCCLUSION_CULLING +@group(0) @binding(10) var depth_pyramid: texture_2d; + +#ifdef EARLY_PHASE +@group(0) @binding(11) var late_preprocess_work_items: + array; +#endif // EARLY_PHASE -// The view data, including the view matrix. -@group(0) @binding(6) var view: View; +@group(0) @binding(12) var late_preprocess_work_item_indirect_parameters: + array; +var push_constants: PushConstants; +#endif // OCCLUSION_CULLING + +#ifdef FRUSTUM_CULLING // Returns true if the view frustum intersects an oriented bounding box (OBB). // // `aabb_center.w` should be 1.0. @@ -109,9 +136,9 @@ fn view_frustum_intersects_obb( let relative_radius = dot( abs( vec3( - dot(plane_normal, world_from_local[0]), - dot(plane_normal, world_from_local[1]), - dot(plane_normal, world_from_local[2]), + dot(plane_normal.xyz, world_from_local[0].xyz), + dot(plane_normal.xyz, world_from_local[1].xyz), + dot(plane_normal.xyz, world_from_local[2].xyz), ) ), aabb_half_extents @@ -134,17 +161,41 @@ fn main(@builtin(global_invocation_id) global_invocation_id: vec3) { // Figure out our instance index. If this thread doesn't correspond to any // index, bail. let instance_index = global_invocation_id.x; + +#ifdef LATE_PHASE + if (instance_index >= atomicLoad(&late_preprocess_work_item_indirect_parameters[ + push_constants.late_preprocess_work_item_indirect_offset].work_item_count)) { + return; + } +#else // LATE_PHASE if (instance_index >= arrayLength(&work_items)) { return; } +#endif - // Unpack. + // Unpack the work item. let input_index = work_items[instance_index].input_index; - let output_index = work_items[instance_index].output_index; +#ifdef INDIRECT + let indirect_parameters_index = work_items[instance_index].output_or_indirect_parameters_index; + + // If we're the first mesh instance in this batch, write the index of our + // `MeshInput` into the appropriate slot so that the indirect parameters + // building shader can access it. +#ifndef LATE_PHASE + if (instance_index == 0u || work_items[instance_index - 1].output_or_indirect_parameters_index != indirect_parameters_index) { + indirect_parameters_gpu_metadata[indirect_parameters_index].mesh_index = input_index; + } +#endif // LATE_PHASE + +#else // INDIRECT + let mesh_output_index = work_items[instance_index].output_or_indirect_parameters_index; +#endif // INDIRECT + + // Unpack the input matrix. let world_from_local_affine_transpose = current_input[input_index].world_from_local; let world_from_local = maths::affine3_to_square(world_from_local_affine_transpose); - // Cull if necessary. + // Frustum cull if necessary. #ifdef FRUSTUM_CULLING if ((current_input[input_index].flags & MESH_FLAGS_NO_FRUSTUM_CULLING_BIT) == 0u) { let aabb_center = mesh_culling_data[input_index].aabb_center.xyz; @@ -158,6 +209,119 @@ fn main(@builtin(global_invocation_id) global_invocation_id: vec3) { } #endif + // See whether the `MeshInputUniform` was updated on this frame. If it + // wasn't, then we know the transforms of this mesh must be identical to + // those on the previous frame, and therefore we don't need to access the + // `previous_input_index` (in fact, we can't; that index are only valid for + // one frame and will be invalid). + let timestamp = current_input[input_index].timestamp; + let mesh_changed_this_frame = timestamp == view.frame_count; + + // Look up the previous model matrix, if it could have been. + let previous_input_index = current_input[input_index].previous_input_index; + var previous_world_from_local_affine_transpose: mat3x4; + if (mesh_changed_this_frame && previous_input_index != 0xffffffffu) { + previous_world_from_local_affine_transpose = + previous_input[previous_input_index].world_from_local; + } else { + previous_world_from_local_affine_transpose = world_from_local_affine_transpose; + } + let previous_world_from_local = + maths::affine3_to_square(previous_world_from_local_affine_transpose); + + // Occlusion cull if necessary. This is done by calculating the screen-space + // axis-aligned bounding box (AABB) of the mesh and testing it against the + // appropriate level of the depth pyramid (a.k.a. hierarchical Z-buffer). If + // no part of the AABB is in front of the corresponding pixel quad in the + // hierarchical Z-buffer, then this mesh must be occluded, and we can skip + // rendering it. +#ifdef OCCLUSION_CULLING + let aabb_center = mesh_culling_data[input_index].aabb_center.xyz; + let aabb_half_extents = mesh_culling_data[input_index].aabb_half_extents.xyz; + + // Initialize the AABB and the maximum depth. + let infinity = bitcast(0x7f800000u); + let neg_infinity = bitcast(0xff800000u); + var aabb = vec4(infinity, infinity, neg_infinity, neg_infinity); + var max_depth_view = neg_infinity; + + // Build up the AABB by taking each corner of this mesh's OBB, transforming + // it, and updating the AABB and depth accordingly. + for (var i = 0u; i < 8u; i += 1u) { + let local_pos = aabb_center + select( + vec3(-1.0), + vec3(1.0), + vec3((i & 1) != 0, (i & 2) != 0, (i & 4) != 0) + ) * aabb_half_extents; + +#ifdef EARLY_PHASE + // If we're in the early phase, we're testing against the last frame's + // depth buffer, so we need to use the previous frame's transform. + let prev_world_pos = (previous_world_from_local * vec4(local_pos, 1.0)).xyz; + let view_pos = position_world_to_prev_view(prev_world_pos); + let ndc_pos = position_world_to_prev_ndc(prev_world_pos); +#else // EARLY_PHASE + // Otherwise, if this is the late phase, we use the current frame's + // transform. + let world_pos = (world_from_local * vec4(local_pos, 1.0)).xyz; + let view_pos = position_world_to_view(world_pos); + let ndc_pos = position_world_to_ndc(world_pos); +#endif // EARLY_PHASE + + let uv_pos = ndc_to_uv(ndc_pos.xy); + + // Update the AABB and maximum view-space depth. + aabb = vec4(min(aabb.xy, uv_pos), max(aabb.zw, uv_pos)); + max_depth_view = max(max_depth_view, view_pos.z); + } + + // Clip to the near plane to avoid the NDC depth becoming negative. +#ifdef EARLY_PHASE + max_depth_view = min(-previous_view_uniforms.clip_from_view[3][2], max_depth_view); +#else // EARLY_PHASE + max_depth_view = min(-view.clip_from_view[3][2], max_depth_view); +#endif // EARLY_PHASE + + // Figure out the depth of the occluder, and compare it to our own depth. + + let aabb_pixel_size = occlusion_culling::get_aabb_size_in_pixels(aabb, depth_pyramid); + let occluder_depth_ndc = + occlusion_culling::get_occluder_depth(aabb, aabb_pixel_size, depth_pyramid); + +#ifdef EARLY_PHASE + let max_depth_ndc = prev_view_z_to_depth_ndc(max_depth_view); +#else // EARLY_PHASE + let max_depth_ndc = view_z_to_depth_ndc(max_depth_view); +#endif + + // Are we culled out? + if (max_depth_ndc < occluder_depth_ndc) { +#ifdef EARLY_PHASE + // If this is the early phase, we need to make a note of this mesh so + // that we examine it again in the late phase, so that we handle the + // case in which a mesh that was invisible last frame became visible in + // this frame. + let output_work_item_index = atomicAdd(&late_preprocess_work_item_indirect_parameters[ + push_constants.late_preprocess_work_item_indirect_offset].work_item_count, 1u); + if (output_work_item_index % 64u == 0u) { + // Our workgroup size is 64, and the indirect parameters for the + // late mesh preprocessing phase are counted in workgroups, so if + // we're the first thread in this workgroup, bump the workgroup + // count. + atomicAdd(&late_preprocess_work_item_indirect_parameters[ + push_constants.late_preprocess_work_item_indirect_offset].dispatch_x, 1u); + } + + // Enqueue a work item for the late prepass phase. + late_preprocess_work_items[output_work_item_index].input_index = input_index; + late_preprocess_work_items[output_work_item_index].output_or_indirect_parameters_index = + indirect_parameters_index; +#endif // EARLY_PHASE + // This mesh is culled. Skip it. + return; + } +#endif // OCCLUSION_CULLING + // Calculate inverse transpose. let local_from_world_transpose = transpose(maths::inverse_affine3(transpose( world_from_local_affine_transpose))); @@ -168,35 +332,42 @@ fn main(@builtin(global_invocation_id) global_invocation_id: vec3) { vec4(local_from_world_transpose[1].yz, local_from_world_transpose[2].xy)); let local_from_world_transpose_b = local_from_world_transpose[2].z; - // Look up the previous model matrix. - let previous_input_index = current_input[input_index].previous_input_index; - var previous_world_from_local: mat3x4; - if (previous_input_index == 0xffffffff) { - previous_world_from_local = world_from_local_affine_transpose; - } else { - previous_world_from_local = previous_input[previous_input_index].world_from_local; - } - // Figure out the output index. In indirect mode, this involves bumping the - // instance index in the indirect parameters structure. Otherwise, this - // index was directly supplied to us. + // instance index in the indirect parameters metadata, which + // `build_indirect_params.wgsl` will use to generate the actual indirect + // parameters. Otherwise, this index was directly supplied to us. #ifdef INDIRECT - let mesh_output_index = indirect_parameters[output_index].instance_index + - atomicAdd(&indirect_parameters[output_index].instance_count, 1u); -#else - let mesh_output_index = output_index; -#endif +#ifdef LATE_PHASE + let batch_output_index = atomicLoad( + &indirect_parameters_gpu_metadata[indirect_parameters_index].early_instance_count + ) + atomicAdd( + &indirect_parameters_gpu_metadata[indirect_parameters_index].late_instance_count, + 1u + ); +#else // LATE_PHASE + let batch_output_index = atomicAdd( + &indirect_parameters_gpu_metadata[indirect_parameters_index].early_instance_count, + 1u + ); +#endif // LATE_PHASE + + let mesh_output_index = + indirect_parameters_cpu_metadata[indirect_parameters_index].base_output_index + + batch_output_index; + +#endif // INDIRECT // Write the output. output[mesh_output_index].world_from_local = world_from_local_affine_transpose; - output[mesh_output_index].previous_world_from_local = previous_world_from_local; + output[mesh_output_index].previous_world_from_local = + previous_world_from_local_affine_transpose; output[mesh_output_index].local_from_world_transpose_a = local_from_world_transpose_a; output[mesh_output_index].local_from_world_transpose_b = local_from_world_transpose_b; output[mesh_output_index].flags = current_input[input_index].flags; output[mesh_output_index].lightmap_uv_rect = current_input[input_index].lightmap_uv_rect; output[mesh_output_index].first_vertex_index = current_input[input_index].first_vertex_index; output[mesh_output_index].current_skin_index = current_input[input_index].current_skin_index; - output[mesh_output_index].previous_skin_index = current_input[input_index].previous_skin_index; output[mesh_output_index].material_and_lightmap_bind_group_slot = current_input[input_index].material_and_lightmap_bind_group_slot; + output[mesh_output_index].tag = current_input[input_index].tag; } diff --git a/crates/bevy_pbr/src/render/mesh_types.wgsl b/crates/bevy_pbr/src/render/mesh_types.wgsl index f0258770c6da0..502b91b427d7f 100644 --- a/crates/bevy_pbr/src/render/mesh_types.wgsl +++ b/crates/bevy_pbr/src/render/mesh_types.wgsl @@ -18,10 +18,12 @@ struct Mesh { // The index of the mesh's first vertex in the vertex buffer. first_vertex_index: u32, current_skin_index: u32, - previous_skin_index: u32, // Low 16 bits: index of the material inside the bind group data. // High 16 bits: index of the lightmap in the binding array. material_and_lightmap_bind_group_slot: u32, + // User supplied index to identify the mesh instance + tag: u32, + pad: u32, }; #ifdef SKINNED diff --git a/crates/bevy_pbr/src/render/mesh_view_bindings.rs b/crates/bevy_pbr/src/render/mesh_view_bindings.rs index e27b3d7b7f5c9..8e231886bae1e 100644 --- a/crates/bevy_pbr/src/render/mesh_view_bindings.rs +++ b/crates/bevy_pbr/src/render/mesh_view_bindings.rs @@ -1,7 +1,7 @@ use alloc::sync::Arc; use bevy_core_pipeline::{ core_3d::ViewTransmissionTexture, - oit::{OitBuffers, OrderIndependentTransparencySettings}, + oit::{resolve::is_oit_supported, OitBuffers, OrderIndependentTransparencySettings}, prepass::ViewPrepassTextures, tonemapping::{ get_lut_bind_group_layout_entries, get_lut_bindings, Tonemapping, TonemappingLuts, @@ -12,7 +12,8 @@ use bevy_ecs::{ component::Component, entity::Entity, query::Has, - system::{Commands, Query, Res, Resource}, + resource::Resource, + system::{Commands, Query, Res}, world::{FromWorld, World}, }; use bevy_image::BevyDefault as _; @@ -21,7 +22,7 @@ use bevy_render::{ globals::{GlobalsBuffer, GlobalsUniform}, render_asset::RenderAssets, render_resource::{binding_types::*, *}, - renderer::RenderDevice, + renderer::{RenderAdapter, RenderDevice}, texture::{FallbackImage, FallbackImageMsaa, FallbackImageZero, GpuImage}, view::{ Msaa, RenderVisibilityRanges, ViewUniform, ViewUniforms, @@ -29,17 +30,15 @@ use bevy_render::{ }, }; use core::{array, num::NonZero}; - -#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] -use bevy_render::render_resource::binding_types::texture_cube; -use bevy_render::renderer::RenderAdapter; -#[cfg(debug_assertions)] -use bevy_utils::warn_once; use environment_map::EnvironmentMapLight; -#[cfg(debug_assertions)] -use crate::MESH_PIPELINE_VIEW_LAYOUT_SAFE_MAX_TEXTURES; use crate::{ + decal::{ + self, + clustered::{ + DecalsBuffer, RenderClusteredDecals, RenderViewClusteredDecalBindGroupEntries, + }, + }, environment_map::{self, RenderViewEnvironmentMapBindGroupEntries}, irradiance_volume::{ self, IrradianceVolume, RenderViewIrradianceVolumeBindGroupEntries, @@ -52,6 +51,12 @@ use crate::{ ViewClusterBindings, ViewShadowBindings, CLUSTERED_FORWARD_STORAGE_BUFFER_COUNT, }; +#[cfg(all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")))] +use bevy_render::render_resource::binding_types::texture_cube; + +#[cfg(debug_assertions)] +use {crate::MESH_PIPELINE_VIEW_LAYOUT_SAFE_MAX_TEXTURES, bevy_utils::once, tracing::warn}; + #[derive(Clone)] pub struct MeshPipelineViewLayout { pub bind_group_layout: BindGroupLayout, @@ -211,7 +216,7 @@ fn layout_entries( ( 2, #[cfg(all( - not(feature = "ios_simulator"), + not(target_abi = "sim"), any( not(feature = "webgl"), not(target_arch = "wasm32"), @@ -220,7 +225,7 @@ fn layout_entries( ))] texture_cube_array(TextureSampleType::Depth), #[cfg(any( - feature = "ios_simulator", + target_abi = "sim", all(feature = "webgl", target_arch = "wasm32", not(feature = "webgpu")) ))] texture_cube(TextureSampleType::Depth), @@ -331,11 +336,22 @@ fn layout_entries( )); } + // Clustered decals + if let Some(clustered_decal_entries) = + decal::clustered::get_bind_group_layout_entries(render_device, render_adapter) + { + entries = entries.extend_with_indices(( + (23, clustered_decal_entries[0]), + (24, clustered_decal_entries[1]), + (25, clustered_decal_entries[2]), + )); + } + // Tonemapping let tonemapping_lut_entries = get_lut_bind_group_layout_entries(); entries = entries.extend_with_indices(( - (23, tonemapping_lut_entries[0]), - (24, tonemapping_lut_entries[1]), + (26, tonemapping_lut_entries[0]), + (27, tonemapping_lut_entries[1]), )); // Prepass @@ -345,7 +361,7 @@ fn layout_entries( { for (entry, binding) in prepass::get_bind_group_layout_entries(layout_key) .iter() - .zip([25, 26, 27, 28]) + .zip([28, 29, 30, 31]) { if let Some(entry) = entry { entries = entries.extend_with_indices(((binding as u32, *entry),)); @@ -356,31 +372,26 @@ fn layout_entries( // View Transmission Texture entries = entries.extend_with_indices(( ( - 29, + 32, texture_2d(TextureSampleType::Float { filterable: true }), ), - (30, sampler(SamplerBindingType::Filtering)), + (33, sampler(SamplerBindingType::Filtering)), )); // OIT if layout_key.contains(MeshPipelineViewLayoutKey::OIT_ENABLED) { - // Check if the GPU supports writable storage buffers in the fragment shader - // If not, we can't use OIT, so we skip the OIT bindings. - // This is a hack to avoid errors on webgl -- the OIT plugin will warn the user that OIT - // is not supported on their platform, so we don't need to do it here. - if render_adapter - .get_downlevel_capabilities() - .flags - .contains(DownlevelFlags::FRAGMENT_WRITABLE_STORAGE) - { + // Check if we can use OIT. This is a hack to avoid errors on webgl -- + // the OIT plugin will warn the user that OIT is not supported on their + // platform, so we don't need to do it here. + if is_oit_supported(render_adapter, render_device, false) { entries = entries.extend_with_indices(( // oit_layers - (31, storage_buffer_sized(false, None)), + (34, storage_buffer_sized(false, None)), // oit_layer_ids, - (32, storage_buffer_sized(false, None)), + (35, storage_buffer_sized(false, None)), // oit_layer_count ( - 33, + 36, uniform_buffer::(true), ), )); @@ -445,7 +456,7 @@ impl MeshPipelineViewLayouts { #[cfg(debug_assertions)] if layout.texture_count > MESH_PIPELINE_VIEW_LAYOUT_SAFE_MAX_TEXTURES { // Issue our own warning here because Naga's error message is a bit cryptic in this situation - warn_once!("Too many textures in mesh pipeline view layout, this might cause us to hit `wgpu::Limits::max_sampled_textures_per_shader_stage` in some environments."); + once!(warn!("Too many textures in mesh pipeline view layout, this might cause us to hit `wgpu::Limits::max_sampled_textures_per_shader_stage` in some environments.")); } &layout.bind_group_layout @@ -490,11 +501,9 @@ pub struct MeshViewBindGroup { pub value: BindGroup, } -#[allow(clippy::too_many_arguments)] pub fn prepare_mesh_view_bind_groups( mut commands: Commands, - render_device: Res, - render_adapter: Res, + (render_device, render_adapter): (Res, Res), mesh_pipeline: Res, shadow_samplers: Res, (light_meta, global_light_meta): (Res, Res), @@ -525,6 +534,7 @@ pub fn prepare_mesh_view_bind_groups( visibility_ranges: Res, ssr_buffer: Res, oit_buffers: Res, + (decals_buffer, render_decals): (Res, Res), ) { if let ( Some(view_binding), @@ -668,9 +678,40 @@ pub fn prepare_mesh_view_bind_groups( None => {} } + let decal_bind_group_entries = RenderViewClusteredDecalBindGroupEntries::get( + &render_decals, + &decals_buffer, + &images, + &fallback_image, + &render_device, + &render_adapter, + ); + + // Add the decal bind group entries. + if let Some(ref render_view_decal_bind_group_entries) = decal_bind_group_entries { + entries = entries.extend_with_indices(( + // `clustered_decals` + ( + 23, + render_view_decal_bind_group_entries + .decals + .as_entire_binding(), + ), + // `clustered_decal_textures` + ( + 24, + render_view_decal_bind_group_entries + .texture_views + .as_slice(), + ), + // `clustered_decal_sampler` + (25, render_view_decal_bind_group_entries.sampler), + )); + } + let lut_bindings = get_lut_bindings(&images, &tonemapping_luts, tonemapping, &fallback_image); - entries = entries.extend_with_indices(((23, lut_bindings.0), (24, lut_bindings.1))); + entries = entries.extend_with_indices(((26, lut_bindings.0), (27, lut_bindings.1))); // When using WebGL, we can't have a depth texture with multisampling let prepass_bindings; @@ -680,7 +721,7 @@ pub fn prepare_mesh_view_bind_groups( for (binding, index) in prepass_bindings .iter() .map(Option::as_ref) - .zip([25, 26, 27, 28]) + .zip([28, 29, 30, 31]) .flat_map(|(b, i)| b.map(|b| (b, i))) { entries = entries.extend_with_indices(((index, binding),)); @@ -696,7 +737,7 @@ pub fn prepare_mesh_view_bind_groups( .unwrap_or(&fallback_image_zero.sampler); entries = - entries.extend_with_indices(((29, transmission_view), (30, transmission_sampler))); + entries.extend_with_indices(((32, transmission_view), (33, transmission_sampler))); if has_oit { if let ( @@ -709,9 +750,9 @@ pub fn prepare_mesh_view_bind_groups( oit_buffers.settings.binding(), ) { entries = entries.extend_with_indices(( - (31, oit_layers_binding.clone()), - (32, oit_layer_ids_binding.clone()), - (33, oit_settings_binding.clone()), + (34, oit_layers_binding.clone()), + (35, oit_layer_ids_binding.clone()), + (36, oit_settings_binding.clone()), )); } } diff --git a/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl b/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl index a5de8bd873838..2fb34d84669c9 100644 --- a/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl +++ b/crates/bevy_pbr/src/render/mesh_view_bindings.wgsl @@ -70,44 +70,50 @@ const VISIBILITY_RANGE_UNIFORM_BUFFER_SIZE: u32 = 64u; @group(0) @binding(22) var irradiance_volume_sampler: sampler; #endif +#ifdef CLUSTERED_DECALS_ARE_USABLE +@group(0) @binding(23) var clustered_decals: types::ClusteredDecals; +@group(0) @binding(24) var clustered_decal_textures: binding_array, 8u>; +@group(0) @binding(25) var clustered_decal_sampler: sampler; +#endif // CLUSTERED_DECALS_ARE_USABLE + // NB: If you change these, make sure to update `tonemapping_shared.wgsl` too. -@group(0) @binding(23) var dt_lut_texture: texture_3d; -@group(0) @binding(24) var dt_lut_sampler: sampler; +@group(0) @binding(26) var dt_lut_texture: texture_3d; +@group(0) @binding(27) var dt_lut_sampler: sampler; #ifdef MULTISAMPLED #ifdef DEPTH_PREPASS -@group(0) @binding(25) var depth_prepass_texture: texture_depth_multisampled_2d; +@group(0) @binding(28) var depth_prepass_texture: texture_depth_multisampled_2d; #endif // DEPTH_PREPASS #ifdef NORMAL_PREPASS -@group(0) @binding(26) var normal_prepass_texture: texture_multisampled_2d; +@group(0) @binding(29) var normal_prepass_texture: texture_multisampled_2d; #endif // NORMAL_PREPASS #ifdef MOTION_VECTOR_PREPASS -@group(0) @binding(27) var motion_vector_prepass_texture: texture_multisampled_2d; +@group(0) @binding(30) var motion_vector_prepass_texture: texture_multisampled_2d; #endif // MOTION_VECTOR_PREPASS #else // MULTISAMPLED #ifdef DEPTH_PREPASS -@group(0) @binding(25) var depth_prepass_texture: texture_depth_2d; +@group(0) @binding(28) var depth_prepass_texture: texture_depth_2d; #endif // DEPTH_PREPASS #ifdef NORMAL_PREPASS -@group(0) @binding(26) var normal_prepass_texture: texture_2d; +@group(0) @binding(29) var normal_prepass_texture: texture_2d; #endif // NORMAL_PREPASS #ifdef MOTION_VECTOR_PREPASS -@group(0) @binding(27) var motion_vector_prepass_texture: texture_2d; +@group(0) @binding(30) var motion_vector_prepass_texture: texture_2d; #endif // MOTION_VECTOR_PREPASS #endif // MULTISAMPLED #ifdef DEFERRED_PREPASS -@group(0) @binding(28) var deferred_prepass_texture: texture_2d; +@group(0) @binding(31) var deferred_prepass_texture: texture_2d; #endif // DEFERRED_PREPASS -@group(0) @binding(29) var view_transmission_texture: texture_2d; -@group(0) @binding(30) var view_transmission_sampler: sampler; +@group(0) @binding(32) var view_transmission_texture: texture_2d; +@group(0) @binding(33) var view_transmission_sampler: sampler; #ifdef OIT_ENABLED -@group(0) @binding(31) var oit_layers: array>; -@group(0) @binding(32) var oit_layer_ids: array>; -@group(0) @binding(33) var oit_settings: types::OrderIndependentTransparencySettings; +@group(0) @binding(34) var oit_layers: array>; +@group(0) @binding(35) var oit_layer_ids: array>; +@group(0) @binding(36) var oit_settings: types::OrderIndependentTransparencySettings; #endif // OIT_ENABLED diff --git a/crates/bevy_pbr/src/render/mesh_view_types.wgsl b/crates/bevy_pbr/src/render/mesh_view_types.wgsl index ee3b2475e35e9..6db72759df307 100644 --- a/crates/bevy_pbr/src/render/mesh_view_types.wgsl +++ b/crates/bevy_pbr/src/render/mesh_view_types.wgsl @@ -13,8 +13,8 @@ struct ClusterableObject { spot_light_tan_angle: f32, soft_shadow_size: f32, shadow_map_near_z: f32, - pad_a: f32, - pad_b: f32, + texture_index: u32, + pad: f32, }; const POINT_LIGHT_FLAGS_SHADOWS_ENABLED_BIT: u32 = 1u; @@ -172,3 +172,15 @@ struct OrderIndependentTransparencySettings { layers_count: i32, alpha_threshold: f32, }; + +struct ClusteredDecal { + local_from_world: mat4x4, + image_index: i32, + tag: u32, + pad_a: u32, + pad_b: u32, +} + +struct ClusteredDecals { + decals: array, +} diff --git a/crates/bevy_pbr/src/render/mod.rs b/crates/bevy_pbr/src/render/mod.rs index 8e26e869a1c96..6a29823022b58 100644 --- a/crates/bevy_pbr/src/render/mod.rs +++ b/crates/bevy_pbr/src/render/mod.rs @@ -13,4 +13,5 @@ pub use light::*; pub use mesh::*; pub use mesh_bindings::MeshLayouts; pub use mesh_view_bindings::*; -pub use skin::{extract_skins, prepare_skins, SkinIndices, SkinUniforms, MAX_JOINTS}; +pub use morph::*; +pub use skin::{extract_skins, prepare_skins, skins_use_uniform_buffers, SkinUniforms, MAX_JOINTS}; diff --git a/crates/bevy_pbr/src/render/morph.rs b/crates/bevy_pbr/src/render/morph.rs index 4b1ed68ce87a3..29070724dd51c 100644 --- a/crates/bevy_pbr/src/render/morph.rs +++ b/crates/bevy_pbr/src/render/morph.rs @@ -14,7 +14,7 @@ use bytemuck::NoUninit; #[derive(Component)] pub struct MorphIndex { - pub(super) index: u32, + pub index: u32, } /// Maps each mesh affected by morph targets to the applicable offset within the diff --git a/crates/bevy_pbr/src/render/occlusion_culling.wgsl b/crates/bevy_pbr/src/render/occlusion_culling.wgsl new file mode 100644 index 0000000000000..1be999cc6a588 --- /dev/null +++ b/crates/bevy_pbr/src/render/occlusion_culling.wgsl @@ -0,0 +1,30 @@ +// Occlusion culling utility functions. + +#define_import_path bevy_pbr::occlusion_culling + +fn get_aabb_size_in_pixels(aabb: vec4, depth_pyramid: texture_2d) -> vec2 { + let depth_pyramid_size_mip_0 = vec2(textureDimensions(depth_pyramid, 0)); + let aabb_width_pixels = (aabb.z - aabb.x) * depth_pyramid_size_mip_0.x; + let aabb_height_pixels = (aabb.w - aabb.y) * depth_pyramid_size_mip_0.y; + return vec2(aabb_width_pixels, aabb_height_pixels); +} + +fn get_occluder_depth( + aabb: vec4, + aabb_pixel_size: vec2, + depth_pyramid: texture_2d +) -> f32 { + let aabb_width_pixels = aabb_pixel_size.x; + let aabb_height_pixels = aabb_pixel_size.y; + + let depth_pyramid_size_mip_0 = vec2(textureDimensions(depth_pyramid, 0)); + let depth_level = max(0, i32(ceil(log2(max(aabb_width_pixels, aabb_height_pixels))))); // TODO: Naga doesn't like this being a u32 + let depth_pyramid_size = vec2(textureDimensions(depth_pyramid, depth_level)); + let aabb_top_left = vec2(aabb.xy * depth_pyramid_size); + + let depth_quad_a = textureLoad(depth_pyramid, aabb_top_left, depth_level).x; + let depth_quad_b = textureLoad(depth_pyramid, aabb_top_left + vec2(1u, 0u), depth_level).x; + let depth_quad_c = textureLoad(depth_pyramid, aabb_top_left + vec2(0u, 1u), depth_level).x; + let depth_quad_d = textureLoad(depth_pyramid, aabb_top_left + vec2(1u, 1u), depth_level).x; + return min(min(depth_quad_a, depth_quad_b), min(depth_quad_c, depth_quad_d)); +} diff --git a/crates/bevy_pbr/src/render/parallax_mapping.wgsl b/crates/bevy_pbr/src/render/parallax_mapping.wgsl index 780b5c290a416..9005734da5cc1 100644 --- a/crates/bevy_pbr/src/render/parallax_mapping.wgsl +++ b/crates/bevy_pbr/src/render/parallax_mapping.wgsl @@ -1,10 +1,16 @@ #define_import_path bevy_pbr::parallax_mapping +#import bevy_render::bindless::{bindless_samplers_filtering, bindless_textures_2d} + #import bevy_pbr::{ pbr_bindings::{depth_map_texture, depth_map_sampler}, mesh_bindings::mesh } +#ifdef BINDLESS +#import bevy_pbr::pbr_bindings::material_indices +#endif // BINDLESS + fn sample_depth_map(uv: vec2, material_bind_group_slot: u32) -> f32 { // We use `textureSampleLevel` over `textureSample` because the wgpu DX12 // backend (Fxc) panics when using "gradient instructions" inside a loop. @@ -18,8 +24,8 @@ fn sample_depth_map(uv: vec2, material_bind_group_slot: u32) -> f32 { // See https://stackoverflow.com/questions/56581141/direct3d11-gradient-instruction-used-in-a-loop-with-varying-iteration-forcing return textureSampleLevel( #ifdef BINDLESS - depth_map_texture[material_bind_group_slot], - depth_map_sampler[material_bind_group_slot], + bindless_textures_2d[material_indices[material_bind_group_slot].depth_map_texture], + bindless_samplers_filtering[material_indices[material_bind_group_slot].depth_map_sampler], #else // BINDLESS depth_map_texture, depth_map_sampler, diff --git a/crates/bevy_pbr/src/render/pbr.wgsl b/crates/bevy_pbr/src/render/pbr.wgsl index 652fa5ac4e41e..1722ab9d91940 100644 --- a/crates/bevy_pbr/src/render/pbr.wgsl +++ b/crates/bevy_pbr/src/render/pbr.wgsl @@ -2,6 +2,7 @@ pbr_types, pbr_functions::alpha_discard, pbr_fragment::pbr_input_from_standard_material, + decal::clustered::apply_decal_base_color, } #ifdef PREPASS_PIPELINE @@ -26,32 +27,51 @@ #import bevy_core_pipeline::oit::oit_draw #endif // OIT_ENABLED +#ifdef FORWARD_DECAL +#import bevy_pbr::decal::forward::get_forward_decal_info +#endif + @fragment fn fragment( #ifdef MESHLET_MESH_MATERIAL_PASS @builtin(position) frag_coord: vec4, #else - in: VertexOutput, + vertex_output: VertexOutput, @builtin(front_facing) is_front: bool, #endif ) -> FragmentOutput { #ifdef MESHLET_MESH_MATERIAL_PASS - let in = resolve_vertex_output(frag_coord); + let vertex_output = resolve_vertex_output(frag_coord); let is_front = true; #endif + var in = vertex_output; + // If we're in the crossfade section of a visibility range, conditionally // discard the fragment according to the visibility pattern. #ifdef VISIBILITY_RANGE_DITHER pbr_functions::visibility_range_dither(in.position, in.visibility_range_dither); #endif +#ifdef FORWARD_DECAL + let forward_decal_info = get_forward_decal_info(in); + in.world_position = forward_decal_info.world_position; + in.uv = forward_decal_info.uv; +#endif + // generate a PbrInput struct from the StandardMaterial bindings var pbr_input = pbr_input_from_standard_material(in, is_front); // alpha discard pbr_input.material.base_color = alpha_discard(pbr_input.material, pbr_input.material.base_color); + // clustered decals + pbr_input.material.base_color = apply_decal_base_color( + in.world_position.xyz, + in.position.xy, + pbr_input.material.base_color + ); + #ifdef PREPASS_PIPELINE // write the gbuffer, lighting pass id, and optionally normal and motion_vector textures let out = deferred_output(in, pbr_input); @@ -79,5 +99,9 @@ fn fragment( } #endif // OIT_ENABLED - return out; +#ifdef FORWARD_DECAL + out.color.a = min(forward_decal_info.alpha, out.color.a); +#endif + + return out; } diff --git a/crates/bevy_pbr/src/render/pbr_bindings.wgsl b/crates/bevy_pbr/src/render/pbr_bindings.wgsl index 9b9d9dcc92825..fac7b97265fdb 100644 --- a/crates/bevy_pbr/src/render/pbr_bindings.wgsl +++ b/crates/bevy_pbr/src/render/pbr_bindings.wgsl @@ -3,20 +3,45 @@ #import bevy_pbr::pbr_types::StandardMaterial #ifdef BINDLESS -@group(2) @binding(0) var material: binding_array; -@group(2) @binding(1) var base_color_texture: binding_array, 16>; -@group(2) @binding(2) var base_color_sampler: binding_array; -@group(2) @binding(3) var emissive_texture: binding_array, 16>; -@group(2) @binding(4) var emissive_sampler: binding_array; -@group(2) @binding(5) var metallic_roughness_texture: binding_array, 16>; -@group(2) @binding(6) var metallic_roughness_sampler: binding_array; -@group(2) @binding(7) var occlusion_texture: binding_array, 16>; -@group(2) @binding(8) var occlusion_sampler: binding_array; -@group(2) @binding(9) var normal_map_texture: binding_array, 16>; -@group(2) @binding(10) var normal_map_sampler: binding_array; -@group(2) @binding(11) var depth_map_texture: binding_array, 16>; -@group(2) @binding(12) var depth_map_sampler: binding_array; +struct StandardMaterialBindings { + material: u32, // 0 + base_color_texture: u32, // 1 + base_color_sampler: u32, // 2 + emissive_texture: u32, // 3 + emissive_sampler: u32, // 4 + metallic_roughness_texture: u32, // 5 + metallic_roughness_sampler: u32, // 6 + occlusion_texture: u32, // 7 + occlusion_sampler: u32, // 8 + normal_map_texture: u32, // 9 + normal_map_sampler: u32, // 10 + depth_map_texture: u32, // 11 + depth_map_sampler: u32, // 12 + anisotropy_texture: u32, // 13 + anisotropy_sampler: u32, // 14 + specular_transmission_texture: u32, // 15 + specular_transmission_sampler: u32, // 16 + thickness_texture: u32, // 17 + thickness_sampler: u32, // 18 + diffuse_transmission_texture: u32, // 19 + diffuse_transmission_sampler: u32, // 20 + clearcoat_texture: u32, // 21 + clearcoat_sampler: u32, // 22 + clearcoat_roughness_texture: u32, // 23 + clearcoat_roughness_sampler: u32, // 24 + clearcoat_normal_texture: u32, // 25 + clearcoat_normal_sampler: u32, // 26 + specular_texture: u32, // 27 + specular_sampler: u32, // 28 + specular_tint_texture: u32, // 29 + specular_tint_sampler: u32, // 30 +} + +@group(2) @binding(0) var material_indices: array; +@group(2) @binding(10) var material_array: array; + #else // BINDLESS + @group(2) @binding(0) var material: StandardMaterial; @group(2) @binding(1) var base_color_texture: texture_2d; @group(2) @binding(2) var base_color_sampler: sampler; @@ -30,50 +55,35 @@ @group(2) @binding(10) var normal_map_sampler: sampler; @group(2) @binding(11) var depth_map_texture: texture_2d; @group(2) @binding(12) var depth_map_sampler: sampler; -#endif // BINDLESS #ifdef PBR_ANISOTROPY_TEXTURE_SUPPORTED -#ifdef BINDLESS -@group(2) @binding(13) var anisotropy_texture: binding_array, 16>; -@group(2) @binding(14) var anisotropy_sampler: binding_array; -#else // BINDLESS @group(2) @binding(13) var anisotropy_texture: texture_2d; @group(2) @binding(14) var anisotropy_sampler: sampler; -#endif // BINDLESS #endif // PBR_ANISOTROPY_TEXTURE_SUPPORTED #ifdef PBR_TRANSMISSION_TEXTURES_SUPPORTED -#ifdef BINDLESS -@group(2) @binding(15) var specular_transmission_texture: binding_array, 16>; -@group(2) @binding(16) var specular_transmission_sampler: binding_array; -@group(2) @binding(17) var thickness_texture: binding_array, 16>; -@group(2) @binding(18) var thickness_sampler: binding_array; -@group(2) @binding(19) var diffuse_transmission_texture: binding_array, 16>; -@group(2) @binding(20) var diffuse_transmission_sampler: binding_array; -#else // BINDLESS @group(2) @binding(15) var specular_transmission_texture: texture_2d; @group(2) @binding(16) var specular_transmission_sampler: sampler; @group(2) @binding(17) var thickness_texture: texture_2d; @group(2) @binding(18) var thickness_sampler: sampler; @group(2) @binding(19) var diffuse_transmission_texture: texture_2d; @group(2) @binding(20) var diffuse_transmission_sampler: sampler; -#endif // BINDLESS #endif // PBR_TRANSMISSION_TEXTURES_SUPPORTED #ifdef PBR_MULTI_LAYER_MATERIAL_TEXTURES_SUPPORTED -#ifdef BINDLESS -@group(2) @binding(21) var clearcoat_texture: binding_array, 16>; -@group(2) @binding(22) var clearcoat_sampler: binding_array; -@group(2) @binding(23) var clearcoat_roughness_texture: binding_array, 16>; -@group(2) @binding(24) var clearcoat_roughness_sampler: binding_array; -@group(2) @binding(25) var clearcoat_normal_texture: binding_array, 16>; -@group(2) @binding(26) var clearcoat_normal_sampler: binding_array; -#else // BINDLESS @group(2) @binding(21) var clearcoat_texture: texture_2d; @group(2) @binding(22) var clearcoat_sampler: sampler; @group(2) @binding(23) var clearcoat_roughness_texture: texture_2d; @group(2) @binding(24) var clearcoat_roughness_sampler: sampler; @group(2) @binding(25) var clearcoat_normal_texture: texture_2d; @group(2) @binding(26) var clearcoat_normal_sampler: sampler; -#endif // BINDLESS #endif // PBR_MULTI_LAYER_MATERIAL_TEXTURES_SUPPORTED + +#ifdef PBR_SPECULAR_TEXTURES_SUPPORTED +@group(2) @binding(27) var specular_texture: texture_2d; +@group(2) @binding(28) var specular_sampler: sampler; +@group(2) @binding(29) var specular_tint_texture: texture_2d; +@group(2) @binding(30) var specular_tint_sampler: sampler; +#endif // PBR_SPECULAR_TEXTURES_SUPPORTED + +#endif // BINDLESS diff --git a/crates/bevy_pbr/src/render/pbr_fragment.wgsl b/crates/bevy_pbr/src/render/pbr_fragment.wgsl index 1df7ef404f7b8..779546f8bd67d 100644 --- a/crates/bevy_pbr/src/render/pbr_fragment.wgsl +++ b/crates/bevy_pbr/src/render/pbr_fragment.wgsl @@ -1,5 +1,7 @@ #define_import_path bevy_pbr::pbr_fragment +#import bevy_render::bindless::{bindless_samplers_filtering, bindless_textures_2d} + #import bevy_pbr::{ pbr_functions, pbr_functions::SampleBias, @@ -26,6 +28,10 @@ #import bevy_pbr::forward_io::VertexOutput #endif +#ifdef BINDLESS +#import bevy_pbr::pbr_bindings::material_indices +#endif // BINDLESS + // prepare a basic PbrInput from the vertex stage output, mesh binding and view binding fn pbr_input_from_vertex_output( in: VertexOutput, @@ -70,15 +76,16 @@ fn pbr_input_from_standard_material( in: VertexOutput, is_front: bool, ) -> pbr_types::PbrInput { -#ifdef BINDLESS #ifdef MESHLET_MESH_MATERIAL_PASS let slot = in.material_bind_group_slot; #else // MESHLET_MESH_MATERIAL_PASS let slot = mesh[in.instance_index].material_and_lightmap_bind_group_slot & 0xffffu; #endif // MESHLET_MESH_MATERIAL_PASS - let flags = pbr_bindings::material[slot].flags; - let base_color = pbr_bindings::material[slot].base_color; - let deferred_lighting_pass_id = pbr_bindings::material[slot].deferred_lighting_pass_id; +#ifdef BINDLESS + let flags = pbr_bindings::material_array[material_indices[slot].material].flags; + let base_color = pbr_bindings::material_array[material_indices[slot].material].base_color; + let deferred_lighting_pass_id = + pbr_bindings::material_array[material_indices[slot].material].deferred_lighting_pass_id; #else // BINDLESS let flags = pbr_bindings::material.flags; let base_color = pbr_bindings::material.base_color; @@ -108,7 +115,7 @@ fn pbr_input_from_standard_material( #ifdef VERTEX_UVS #ifdef BINDLESS - let uv_transform = pbr_bindings::material[slot].uv_transform; + let uv_transform = pbr_bindings::material_array[material_indices[slot].material].uv_transform; #else // BINDLESS let uv_transform = pbr_bindings::material.uv_transform; #endif // BINDLESS @@ -137,9 +144,9 @@ fn pbr_input_from_standard_material( // TODO: Transforming UVs mean we need to apply derivative chain rule for meshlet mesh material pass uv = parallaxed_uv( #ifdef BINDLESS - pbr_bindings::material[slot].parallax_depth_scale, - pbr_bindings::material[slot].max_parallax_layer_count, - pbr_bindings::material[slot].max_relief_mapping_search_steps, + pbr_bindings::material_array[material_indices[slot].material].parallax_depth_scale, + pbr_bindings::material_array[material_indices[slot].material].max_parallax_layer_count, + pbr_bindings::material_array[material_indices[slot].material].max_relief_mapping_search_steps, #else // BINDLESS pbr_bindings::material.parallax_depth_scale, pbr_bindings::material.max_parallax_layer_count, @@ -158,9 +165,9 @@ fn pbr_input_from_standard_material( // TODO: Transforming UVs mean we need to apply derivative chain rule for meshlet mesh material pass uv_b = parallaxed_uv( #ifdef BINDLESS - pbr_bindings::material[slot].parallax_depth_scale, - pbr_bindings::material[slot].max_parallax_layer_count, - pbr_bindings::material[slot].max_relief_mapping_search_steps, + pbr_bindings::material_array[material_indices[slot].material].parallax_depth_scale, + pbr_bindings::material_array[material_indices[slot].material].max_parallax_layer_count, + pbr_bindings::material_array[material_indices[slot].material].max_relief_mapping_search_steps, #else // BINDLESS pbr_bindings::material.parallax_depth_scale, pbr_bindings::material.max_parallax_layer_count, @@ -171,7 +178,7 @@ fn pbr_input_from_standard_material( // parallax mapping algorithm easier to understand and reason // about. -Vt, - in.instance_index, + slot, ); #else uv_b = uv; @@ -187,8 +194,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::base_color_texture[slot], - pbr_bindings::base_color_sampler[slot], + bindless_textures_2d[material_indices[slot].base_color_texture], + bindless_samplers_filtering[material_indices[slot].base_color_sampler], #else // BINDLESS pbr_bindings::base_color_texture, pbr_bindings::base_color_sampler, @@ -214,7 +221,7 @@ fn pbr_input_from_standard_material( if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_ALPHA_TO_COVERAGE { #ifdef BINDLESS - let alpha_cutoff = pbr_bindings::material[slot].alpha_cutoff; + let alpha_cutoff = pbr_bindings::material_array[material_indices[slot].material].alpha_cutoff; #else // BINDLESS let alpha_cutoff = pbr_bindings::material.alpha_cutoff; #endif // BINDLESS @@ -232,22 +239,99 @@ fn pbr_input_from_standard_material( // NOTE: Unlit bit not set means == 0 is true, so the true case is if lit if ((flags & pbr_types::STANDARD_MATERIAL_FLAGS_UNLIT_BIT) == 0u) { #ifdef BINDLESS - pbr_input.material.reflectance = pbr_bindings::material[slot].reflectance; - pbr_input.material.ior = pbr_bindings::material[slot].ior; - pbr_input.material.attenuation_color = pbr_bindings::material[slot].attenuation_color; - pbr_input.material.attenuation_distance = pbr_bindings::material[slot].attenuation_distance; - pbr_input.material.alpha_cutoff = pbr_bindings::material[slot].alpha_cutoff; + pbr_input.material.ior = pbr_bindings::material_array[material_indices[slot].material].ior; + pbr_input.material.attenuation_color = + pbr_bindings::material_array[material_indices[slot].material].attenuation_color; + pbr_input.material.attenuation_distance = + pbr_bindings::material_array[material_indices[slot].material].attenuation_distance; + pbr_input.material.alpha_cutoff = + pbr_bindings::material_array[material_indices[slot].material].alpha_cutoff; #else // BINDLESS - pbr_input.material.reflectance = pbr_bindings::material.reflectance; pbr_input.material.ior = pbr_bindings::material.ior; pbr_input.material.attenuation_color = pbr_bindings::material.attenuation_color; pbr_input.material.attenuation_distance = pbr_bindings::material.attenuation_distance; pbr_input.material.alpha_cutoff = pbr_bindings::material.alpha_cutoff; #endif // BINDLESS + // reflectance +#ifdef BINDLESS + pbr_input.material.reflectance = + pbr_bindings::material_array[material_indices[slot].material].reflectance; +#else // BINDLESS + pbr_input.material.reflectance = pbr_bindings::material.reflectance; +#endif // BINDLESS + +#ifdef PBR_SPECULAR_TEXTURES_SUPPORTED +#ifdef VERTEX_UVS + + // Specular texture + if ((flags & pbr_types::STANDARD_MATERIAL_FLAGS_SPECULAR_TEXTURE_BIT) != 0u) { + let specular = +#ifdef MESHLET_MESH_MATERIAL_PASS + textureSampleGrad( +#else // MESHLET_MESH_MATERIAL_PASS + textureSampleBias( +#endif // MESHLET_MESH_MATERIAL_PASS +#ifdef BINDLESS + bindless_textures_2d[material_indices[slot].specular_texture], + bindless_samplers_filtering[material_indices[slot].specular_sampler], +#else // BINDLESS + pbr_bindings::specular_texture, + pbr_bindings::specular_sampler, +#endif // BINDLESS +#ifdef STANDARD_MATERIAL_SPECULAR_UV_B + uv_b, +#else // STANDARD_MATERIAL_SPECULAR_UV_B + uv, +#endif // STANDARD_MATERIAL_SPECULAR_UV_B +#ifdef MESHLET_MESH_MATERIAL_PASS + bias.ddx_uv, + bias.ddy_uv, +#else // MESHLET_MESH_MATERIAL_PASS + bias.mip_bias, +#endif // MESHLET_MESH_MATERIAL_PASS + ).a; + // This 0.5 factor is from the `KHR_materials_specular` specification: + // + pbr_input.material.reflectance *= specular * 0.5; + } + + // Specular tint texture + if ((flags & pbr_types::STANDARD_MATERIAL_FLAGS_SPECULAR_TINT_TEXTURE_BIT) != 0u) { + let specular_tint = +#ifdef MESHLET_MESH_MATERIAL_PASS + textureSampleGrad( +#else // MESHLET_MESH_MATERIAL_PASS + textureSampleBias( +#endif // MESHLET_MESH_MATERIAL_PASS +#ifdef BINDLESS + bindless_textures_2d[material_indices[slot].specular_tint_texture], + bindless_samplers_filtering[material_indices[slot].specular_tint_sampler], +#else // BINDLESS + pbr_bindings::specular_tint_texture, + pbr_bindings::specular_tint_sampler, +#endif // BINDLESS +#ifdef STANDARD_MATERIAL_SPECULAR_TINT_UV_B + uv_b, +#else // STANDARD_MATERIAL_SPECULAR_TINT_UV_B + uv, +#endif // STANDARD_MATERIAL_SPECULAR_TINT_UV_B +#ifdef MESHLET_MESH_MATERIAL_PASS + bias.ddx_uv, + bias.ddy_uv, +#else // MESHLET_MESH_MATERIAL_PASS + bias.mip_bias, +#endif // MESHLET_MESH_MATERIAL_PASS + ).rgb; + pbr_input.material.reflectance *= specular_tint; + } + +#endif // VERTEX_UVS +#endif // PBR_SPECULAR_TEXTURES_SUPPORTED + // emissive #ifdef BINDLESS - var emissive: vec4 = pbr_bindings::material[slot].emissive; + var emissive: vec4 = pbr_bindings::material_array[material_indices[slot].material].emissive; #else // BINDLESS var emissive: vec4 = pbr_bindings::material.emissive; #endif // BINDLESS @@ -261,8 +345,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::emissive_texture[slot], - pbr_bindings::emissive_sampler[slot], + bindless_textures_2d[material_indices[slot].emissive_texture], + bindless_samplers_filtering[material_indices[slot].emissive_sampler], #else // BINDLESS pbr_bindings::emissive_texture, pbr_bindings::emissive_sampler, @@ -286,8 +370,8 @@ fn pbr_input_from_standard_material( // metallic and perceptual roughness #ifdef BINDLESS - var metallic: f32 = pbr_bindings::material[slot].metallic; - var perceptual_roughness: f32 = pbr_bindings::material[slot].perceptual_roughness; + var metallic: f32 = pbr_bindings::material_array[material_indices[slot].material].metallic; + var perceptual_roughness: f32 = pbr_bindings::material_array[material_indices[slot].material].perceptual_roughness; #else // BINDLESS var metallic: f32 = pbr_bindings::material.metallic; var perceptual_roughness: f32 = pbr_bindings::material.perceptual_roughness; @@ -303,8 +387,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::metallic_roughness_texture[slot], - pbr_bindings::metallic_roughness_sampler[slot], + bindless_textures_2d[material_indices[slot].metallic_roughness_texture], + bindless_samplers_filtering[material_indices[slot].metallic_roughness_sampler], #else // BINDLESS pbr_bindings::metallic_roughness_texture, pbr_bindings::metallic_roughness_sampler, @@ -331,7 +415,8 @@ fn pbr_input_from_standard_material( // Clearcoat factor #ifdef BINDLESS - pbr_input.material.clearcoat = pbr_bindings::material[slot].clearcoat; + pbr_input.material.clearcoat = + pbr_bindings::material_array[material_indices[slot].material].clearcoat; #else // BINDLESS pbr_input.material.clearcoat = pbr_bindings::material.clearcoat; #endif // BINDLESS @@ -346,8 +431,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::clearcoat_texture[slot], - pbr_bindings::clearcoat_sampler[slot], + bindless_textures_2d[material_indices[slot].clearcoat_texture], + bindless_samplers_filtering[material_indices[slot].clearcoat_sampler], #else // BINDLESS pbr_bindings::clearcoat_texture, pbr_bindings::clearcoat_sampler, @@ -371,7 +456,7 @@ fn pbr_input_from_standard_material( // Clearcoat roughness #ifdef BINDLESS pbr_input.material.clearcoat_perceptual_roughness = - pbr_bindings::material[slot].clearcoat_perceptual_roughness; + pbr_bindings::material_array[material_indices[slot].material].clearcoat_perceptual_roughness; #else // BINDLESS pbr_input.material.clearcoat_perceptual_roughness = pbr_bindings::material.clearcoat_perceptual_roughness; @@ -387,8 +472,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::clearcoat_roughness_texture[slot], - pbr_bindings::clearcoat_roughness_sampler[slot], + bindless_textures_2d[material_indices[slot].clearcoat_roughness_texture], + bindless_samplers_filtering[material_indices[slot].clearcoat_roughness_sampler], #else // BINDLESS pbr_bindings::clearcoat_roughness_texture, pbr_bindings::clearcoat_roughness_sampler, @@ -410,7 +495,7 @@ fn pbr_input_from_standard_material( #endif // VERTEX_UVS #ifdef BINDLESS - var specular_transmission: f32 = pbr_bindings::material[slot].specular_transmission; + var specular_transmission: f32 = pbr_bindings::material_array[slot].specular_transmission; #else // BINDLESS var specular_transmission: f32 = pbr_bindings::material.specular_transmission; #endif // BINDLESS @@ -425,8 +510,12 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::specular_transmission_texture[slot], - pbr_bindings::specular_transmission_sampler[slot], + bindless_textures_2d[ + material_indices[slot].specular_transmission_texture + ], + bindless_samplers_filtering[ + material_indices[slot].specular_transmission_sampler + ], #else // BINDLESS pbr_bindings::specular_transmission_texture, pbr_bindings::specular_transmission_sampler, @@ -449,7 +538,7 @@ fn pbr_input_from_standard_material( pbr_input.material.specular_transmission = specular_transmission; #ifdef BINDLESS - var thickness: f32 = pbr_bindings::material[slot].thickness; + var thickness: f32 = pbr_bindings::material_array[material_indices[slot].material].thickness; #else // BINDLESS var thickness: f32 = pbr_bindings::material.thickness; #endif // BINDLESS @@ -464,8 +553,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::thickness_texture[slot], - pbr_bindings::thickness_sampler[slot], + bindless_textures_2d[material_indices[slot].thickness_texture], + bindless_samplers_filtering[material_indices[slot].thickness_sampler], #else // BINDLESS pbr_bindings::thickness_texture, pbr_bindings::thickness_sampler, @@ -495,7 +584,8 @@ fn pbr_input_from_standard_material( pbr_input.material.thickness = thickness; #ifdef BINDLESS - var diffuse_transmission = pbr_bindings::material[slot].diffuse_transmission; + var diffuse_transmission = + pbr_bindings::material_array[material_indices[slot].material].diffuse_transmission; #else // BINDLESS var diffuse_transmission = pbr_bindings::material.diffuse_transmission; #endif // BINDLESS @@ -510,8 +600,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::diffuse_transmission_texture[slot], - pbr_bindings::diffuse_transmission_sampler[slot], + bindless_textures_2d[material_indices[slot].diffuse_transmission_texture], + bindless_samplers_filtering[material_indices[slot].diffuse_transmission_sampler], #else // BINDLESS pbr_bindings::diffuse_transmission_texture, pbr_bindings::diffuse_transmission_sampler, @@ -544,8 +634,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::occlusion_texture[slot], - pbr_bindings::occlusion_sampler[slot], + bindless_textures_2d[material_indices[slot].occlusion_texture], + bindless_samplers_filtering[material_indices[slot].occlusion_sampler], #else // BINDLESS pbr_bindings::occlusion_texture, pbr_bindings::occlusion_sampler, @@ -595,8 +685,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::normal_map_texture[slot], - pbr_bindings::normal_map_sampler[slot], + bindless_textures_2d[material_indices[slot].normal_map_texture], + bindless_samplers_filtering[material_indices[slot].normal_map_sampler], #else // BINDLESS pbr_bindings::normal_map_texture, pbr_bindings::normal_map_sampler, @@ -633,8 +723,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::clearcoat_normal_texture[slot], - pbr_bindings::clearcoat_normal_sampler[slot], + bindless_textures_2d[material_indices[slot].clearcoat_normal_texture], + bindless_samplers_filtering[material_indices[slot].clearcoat_normal_sampler], #else // BINDLESS pbr_bindings::clearcoat_normal_texture, pbr_bindings::clearcoat_normal_sampler, @@ -676,8 +766,10 @@ fn pbr_input_from_standard_material( #ifdef STANDARD_MATERIAL_ANISOTROPY #ifdef BINDLESS - var anisotropy_strength = pbr_bindings::material[slot].anisotropy_strength; - var anisotropy_direction = pbr_bindings::material[slot].anisotropy_rotation; + var anisotropy_strength = + pbr_bindings::material_array[material_indices[slot].material].anisotropy_strength; + var anisotropy_direction = + pbr_bindings::material_array[material_indices[slot].material].anisotropy_rotation; #else // BINDLESS var anisotropy_strength = pbr_bindings::material.anisotropy_strength; var anisotropy_direction = pbr_bindings::material.anisotropy_rotation; @@ -692,8 +784,8 @@ fn pbr_input_from_standard_material( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::anisotropy_texture[slot], - pbr_bindings::anisotropy_sampler[slot], + bindless_textures_2d[material_indices[slot].anisotropy_texture], + bindless_samplers_filtering[material_indices[slot].anisotropy_sampler], #else // BINDLESS pbr_bindings::anisotropy_texture, pbr_bindings::anisotropy_sampler, @@ -736,7 +828,8 @@ fn pbr_input_from_standard_material( #ifdef LIGHTMAP #ifdef BINDLESS - let lightmap_exposure = pbr_bindings::material[slot].lightmap_exposure; + let lightmap_exposure = + pbr_bindings::material_array[material_indices[slot].material].lightmap_exposure; #else // BINDLESS let lightmap_exposure = pbr_bindings::material.lightmap_exposure; #endif // BINDLESS diff --git a/crates/bevy_pbr/src/render/pbr_functions.wgsl b/crates/bevy_pbr/src/render/pbr_functions.wgsl index 60f80239455c2..dcda30ee79d59 100644 --- a/crates/bevy_pbr/src/render/pbr_functions.wgsl +++ b/crates/bevy_pbr/src/render/pbr_functions.wgsl @@ -33,9 +33,8 @@ #endif -// Biasing info needed to sample from a texture when calling `sample_texture`. -// How this is done depends on whether we're rendering meshlets or regular -// meshes. +// Biasing info needed to sample from a texture. How this is done depends on +// whether we're rendering meshlets or regular meshes. struct SampleBias { #ifdef MESHLET_MESH_MATERIAL_PASS ddx_uv: vec2, @@ -242,7 +241,7 @@ fn bend_normal_for_anisotropy(lighting_input: ptr, metallic: f32, reflectance: f32) -> vec3 { +fn calculate_F0(base_color: vec3, metallic: f32, reflectance: vec3) -> vec3 { return 0.16 * reflectance * reflectance * (1.0 - metallic) + base_color * metallic; } @@ -385,9 +384,9 @@ fn apply_pbr_lighting( transmissive_lighting_input.clearcoat_strength = 0.0; #endif // STANDARD_MATERIAL_CLEARCOAT #ifdef STANDARD_MATERIAL_ANISOTROPY - lighting_input.anisotropy = in.anisotropy_strength; - lighting_input.Ta = in.anisotropy_T; - lighting_input.Ba = in.anisotropy_B; + transmissive_lighting_input.anisotropy = in.anisotropy_strength; + transmissive_lighting_input.Ta = in.anisotropy_T; + transmissive_lighting_input.Ba = in.anisotropy_B; #endif // STANDARD_MATERIAL_ANISOTROPY #endif // STANDARD_MATERIAL_DIFFUSE_TRANSMISSION @@ -760,6 +759,7 @@ fn apply_pbr_lighting( } #endif // PREPASS_FRAGMENT +#ifdef DISTANCE_FOG fn apply_fog(fog_params: mesh_view_types::Fog, input_color: vec4, fragment_world_position: vec3, view_world_position: vec3) -> vec4 { let view_to_world = fragment_world_position.xyz - view_world_position.xyz; @@ -797,6 +797,7 @@ fn apply_fog(fog_params: mesh_view_types::Fog, input_color: vec4, fragment_ return input_color; } } +#endif // DISTANCE_FOG #ifdef PREMULTIPLY_ALPHA fn premultiply_alpha(standard_material_flags: u32, color: vec4) -> vec4 { @@ -858,10 +859,12 @@ fn main_pass_post_lighting_processing( ) -> vec4 { var output_color = input_color; +#ifdef DISTANCE_FOG // fog - if (view_bindings::fog.mode != mesh_view_types::FOG_MODE_OFF && (pbr_input.material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT) != 0u) { + if ((pbr_input.material.flags & pbr_types::STANDARD_MATERIAL_FLAGS_FOG_ENABLED_BIT) != 0u) { output_color = apply_fog(view_bindings::fog, output_color, pbr_input.world_position.xyz, view_bindings::view.world_position.xyz); } +#endif // DISTANCE_FOG #ifdef TONEMAP_IN_SHADER output_color = tone_mapping(output_color, view_bindings::view.color_grading); diff --git a/crates/bevy_pbr/src/render/pbr_prepass.wgsl b/crates/bevy_pbr/src/render/pbr_prepass.wgsl index 0fd1c63b898e0..68c360248cbd6 100644 --- a/crates/bevy_pbr/src/render/pbr_prepass.wgsl +++ b/crates/bevy_pbr/src/render/pbr_prepass.wgsl @@ -10,10 +10,16 @@ mesh_view_bindings::view, } +#import bevy_render::bindless::{bindless_samplers_filtering, bindless_textures_2d} + #ifdef MESHLET_MESH_MATERIAL_PASS #import bevy_pbr::meshlet_visibility_buffer_resolve::resolve_vertex_output #endif +#ifdef BINDLESS +#import bevy_pbr::pbr_bindings::material_indices +#endif // BINDLESS + #ifdef PREPASS_FRAGMENT @fragment fn fragment( @@ -31,8 +37,8 @@ fn fragment( #ifdef BINDLESS let slot = mesh[in.instance_index].material_and_lightmap_bind_group_slot & 0xffffu; - let flags = pbr_bindings::material[slot].flags; - let uv_transform = pbr_bindings::material[slot].uv_transform; + let flags = pbr_bindings::material_array[material_indices[slot].material].flags; + let uv_transform = pbr_bindings::material_array[material_indices[slot].material].uv_transform; #else // BINDLESS let flags = pbr_bindings::material.flags; let uv_transform = pbr_bindings::material.uv_transform; @@ -93,8 +99,8 @@ fn fragment( textureSampleBias( #endif // MESHLET_MESH_MATERIAL_PASS #ifdef BINDLESS - pbr_bindings::normal_map_texture[slot], - pbr_bindings::normal_map_sampler[slot], + bindless_textures_2d[material_indices[slot].normal_map_texture], + bindless_samplers_filtering[material_indices[slot].normal_map_sampler], #else // BINDLESS pbr_bindings::normal_map_texture, pbr_bindings::normal_map_sampler, diff --git a/crates/bevy_pbr/src/render/pbr_prepass_functions.wgsl b/crates/bevy_pbr/src/render/pbr_prepass_functions.wgsl index e3391126aa76d..d2d2c71e644a7 100644 --- a/crates/bevy_pbr/src/render/pbr_prepass_functions.wgsl +++ b/crates/bevy_pbr/src/render/pbr_prepass_functions.wgsl @@ -1,5 +1,7 @@ #define_import_path bevy_pbr::pbr_prepass_functions +#import bevy_render::bindless::{bindless_samplers_filtering, bindless_textures_2d} + #import bevy_pbr::{ prepass_io::VertexOutput, prepass_bindings::previous_view_uniforms, @@ -9,6 +11,10 @@ pbr_types, } +#ifdef BINDLESS +#import bevy_pbr::pbr_bindings::material_indices +#endif // BINDLESS + // Cutoff used for the premultiplied alpha modes BLEND, ADD, and ALPHA_TO_COVERAGE. const PREMULTIPLIED_ALPHA_CUTOFF = 0.05; @@ -18,9 +24,11 @@ fn prepass_alpha_discard(in: VertexOutput) { #ifdef MAY_DISCARD #ifdef BINDLESS let slot = mesh[in.instance_index].material_and_lightmap_bind_group_slot & 0xffffu; - var output_color: vec4 = pbr_bindings::material[slot].base_color; + var output_color: vec4 = pbr_bindings::material_array[material_indices[slot].material].base_color; + let flags = pbr_bindings::material_array[material_indices[slot].material].flags; #else // BINDLESS var output_color: vec4 = pbr_bindings::material.base_color; + let flags = pbr_bindings::material.flags; #endif // BINDLESS #ifdef VERTEX_UVS @@ -31,19 +39,17 @@ fn prepass_alpha_discard(in: VertexOutput) { #endif // STANDARD_MATERIAL_BASE_COLOR_UV_B #ifdef BINDLESS - let uv_transform = pbr_bindings::material[slot].uv_transform; - let flags = pbr_bindings::material[slot].flags; + let uv_transform = pbr_bindings::material_array[material_indices[slot].material].uv_transform; #else // BINDLESS let uv_transform = pbr_bindings::material.uv_transform; - let flags = pbr_bindings::material.flags; #endif // BINDLESS uv = (uv_transform * vec3(uv, 1.0)).xy; if (flags & pbr_types::STANDARD_MATERIAL_FLAGS_BASE_COLOR_TEXTURE_BIT) != 0u { output_color = output_color * textureSampleBias( #ifdef BINDLESS - pbr_bindings::base_color_texture[slot], - pbr_bindings::base_color_sampler[slot], + bindless_textures_2d[material_indices[slot].base_color_texture], + bindless_samplers_filtering[material_indices[slot].base_color_sampler], #else // BINDLESS pbr_bindings::base_color_texture, pbr_bindings::base_color_sampler, @@ -57,7 +63,7 @@ fn prepass_alpha_discard(in: VertexOutput) { let alpha_mode = flags & pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_RESERVED_BITS; if alpha_mode == pbr_types::STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK { #ifdef BINDLESS - let alpha_cutoff = pbr_bindings::material[slot].alpha_cutoff; + let alpha_cutoff = pbr_bindings::material_array[material_indices[slot].material].alpha_cutoff; #else // BINDLESS let alpha_cutoff = pbr_bindings::material.alpha_cutoff; #endif // BINDLESS diff --git a/crates/bevy_pbr/src/render/pbr_transmission.wgsl b/crates/bevy_pbr/src/render/pbr_transmission.wgsl index 83a71096ebdfe..720a42bca9631 100644 --- a/crates/bevy_pbr/src/render/pbr_transmission.wgsl +++ b/crates/bevy_pbr/src/render/pbr_transmission.wgsl @@ -15,7 +15,7 @@ #endif fn specular_transmissive_light(world_position: vec4, frag_coord: vec3, view_z: f32, N: vec3, V: vec3, F0: vec3, ior: f32, thickness: f32, perceptual_roughness: f32, specular_transmissive_color: vec3, transmitted_environment_light_specular: vec3) -> vec3 { - // Calculate the ratio between refaction indexes. Assume air/vacuum for the space outside the mesh + // Calculate the ratio between refraction indexes. Assume air/vacuum for the space outside the mesh let eta = 1.0 / ior; // Calculate incidence vector (opposite to view vector) and its dot product with the mesh normal @@ -26,7 +26,7 @@ fn specular_transmissive_light(world_position: vec4, frag_coord: vec3, let k = 1.0 - eta * eta * (1.0 - NdotI * NdotI); let T = eta * I - (eta * NdotI + sqrt(k)) * N; - // Calculate the exit position of the refracted ray, by propagating refacted direction through thickness + // Calculate the exit position of the refracted ray, by propagating refracted direction through thickness let exit_position = world_position.xyz + T * thickness; // Transform exit_position into clip space diff --git a/crates/bevy_pbr/src/render/pbr_types.wgsl b/crates/bevy_pbr/src/render/pbr_types.wgsl index d9b600c40bf90..29d479c4e315b 100644 --- a/crates/bevy_pbr/src/render/pbr_types.wgsl +++ b/crates/bevy_pbr/src/render/pbr_types.wgsl @@ -7,9 +7,9 @@ struct StandardMaterial { emissive: vec4, attenuation_color: vec4, uv_transform: mat3x3, + reflectance: vec3, perceptual_roughness: f32, metallic: f32, - reflectance: f32, diffuse_transmission: f32, specular_transmission: f32, thickness: f32, @@ -52,6 +52,8 @@ const STANDARD_MATERIAL_FLAGS_CLEARCOAT_TEXTURE_BIT: u32 = 16384u; const STANDARD_MATERIAL_FLAGS_CLEARCOAT_ROUGHNESS_TEXTURE_BIT: u32 = 32768u; const STANDARD_MATERIAL_FLAGS_CLEARCOAT_NORMAL_TEXTURE_BIT: u32 = 65536u; const STANDARD_MATERIAL_FLAGS_ANISOTROPY_TEXTURE_BIT: u32 = 131072u; +const STANDARD_MATERIAL_FLAGS_SPECULAR_TEXTURE_BIT: u32 = 262144u; +const STANDARD_MATERIAL_FLAGS_SPECULAR_TINT_TEXTURE_BIT: u32 = 524288u; const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_RESERVED_BITS: u32 = 3758096384u; // (0b111u32 << 29) const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_OPAQUE: u32 = 0u; // (0u32 << 29) const STANDARD_MATERIAL_FLAGS_ALPHA_MODE_MASK: u32 = 536870912u; // (1u32 << 29) @@ -73,7 +75,7 @@ fn standard_material_new() -> StandardMaterial { material.emissive = vec4(0.0, 0.0, 0.0, 1.0); material.perceptual_roughness = 0.5; material.metallic = 0.00; - material.reflectance = 0.5; + material.reflectance = vec3(0.5); material.diffuse_transmission = 0.0; material.specular_transmission = 0.0; material.thickness = 0.0; diff --git a/crates/bevy_pbr/src/render/reset_indirect_batch_sets.wgsl b/crates/bevy_pbr/src/render/reset_indirect_batch_sets.wgsl new file mode 100644 index 0000000000000..930959472555e --- /dev/null +++ b/crates/bevy_pbr/src/render/reset_indirect_batch_sets.wgsl @@ -0,0 +1,25 @@ +// Resets the indirect draw counts to zero. +// +// This shader is needed because we reuse the same indirect batch set count +// buffer (i.e. the buffer that gets passed to `multi_draw_indirect_count` to +// determine how many objects to draw) between phases (early, late, and main). +// Before launching `build_indirect_params.wgsl`, we need to reinitialize the +// value to 0. + +#import bevy_pbr::mesh_preprocess_types::IndirectBatchSet + +@group(0) @binding(0) var indirect_batch_sets: array; + +@compute +@workgroup_size(64) +fn main(@builtin(global_invocation_id) global_invocation_id: vec3) { + // Figure out our instance index. If this thread doesn't correspond to any + // index, bail. + let instance_index = global_invocation_id.x; + if (instance_index >= arrayLength(&indirect_batch_sets)) { + return; + } + + // Reset the number of batch sets to 0. + atomicStore(&indirect_batch_sets[instance_index].indirect_parameters_count, 0u); +} diff --git a/crates/bevy_pbr/src/render/shadow_sampling.wgsl b/crates/bevy_pbr/src/render/shadow_sampling.wgsl index 22f2e28310bd0..c7f7253a6357d 100644 --- a/crates/bevy_pbr/src/render/shadow_sampling.wgsl +++ b/crates/bevy_pbr/src/render/shadow_sampling.wgsl @@ -47,7 +47,7 @@ fn search_for_blockers_in_shadow_map_hardware( view_bindings::directional_shadow_textures, view_bindings::directional_shadow_textures_linear_sampler, light_local, - 0.0, + 0u, ); #else // NO_ARRAY_TEXTURES_SUPPORT let sampled_depth = textureSampleLevel( @@ -55,7 +55,7 @@ fn search_for_blockers_in_shadow_map_hardware( view_bindings::directional_shadow_textures_linear_sampler, light_local, array_index, - 0.0, + 0u, ); #endif // NO_ARRAY_TEXTURES_SUPPORT return select(vec2(0.0), vec2(sampled_depth, 1.0), sampled_depth >= depth); diff --git a/crates/bevy_pbr/src/render/skin.rs b/crates/bevy_pbr/src/render/skin.rs index c248821ccafd3..476e06c1e78be 100644 --- a/crates/bevy_pbr/src/render/skin.rs +++ b/crates/bevy_pbr/src/render/skin.rs @@ -1,19 +1,24 @@ use core::mem::{self, size_of}; use std::sync::OnceLock; -use bevy_asset::Assets; +use bevy_asset::{prelude::AssetChanged, Assets}; use bevy_ecs::prelude::*; use bevy_math::Mat4; -use bevy_render::sync_world::MainEntityHashMap; +use bevy_platform::collections::hash_map::Entry; +use bevy_render::render_resource::{Buffer, BufferDescriptor}; +use bevy_render::sync_world::{MainEntity, MainEntityHashMap, MainEntityHashSet}; use bevy_render::{ batching::NoAutomaticBatching, mesh::skinning::{SkinnedMesh, SkinnedMeshInverseBindposes}, - render_resource::{BufferUsages, RawBufferVec}, + render_resource::BufferUsages, renderer::{RenderDevice, RenderQueue}, view::ViewVisibility, Extract, }; use bevy_transform::prelude::GlobalTransform; +use offset_allocator::{Allocation, Allocator}; +use smallvec::SmallVec; +use tracing::error; /// Maximum number of joints supported for skinned meshes. /// @@ -24,18 +29,40 @@ use bevy_transform::prelude::GlobalTransform; /// of the GPU at runtime, which would mean not using consts anymore. pub const MAX_JOINTS: usize = 256; +/// The total number of joints we support. +/// +/// This is 256 GiB worth of joint matrices, which we will never hit under any +/// reasonable circumstances. +const MAX_TOTAL_JOINTS: u32 = 1024 * 1024 * 1024; + +/// The number of joints that we allocate at a time. +/// +/// Some hardware requires that uniforms be allocated on 256-byte boundaries, so +/// we need to allocate 4 64-byte matrices at a time to satisfy alignment +/// requirements. +const JOINTS_PER_ALLOCATION_UNIT: u32 = (256 / size_of::()) as u32; + +/// The maximum ratio of the number of entities whose transforms changed to the +/// total number of joints before we re-extract all joints. +/// +/// We use this as a heuristic to decide whether it's worth switching over to +/// fine-grained detection to determine which skins need extraction. If the +/// number of changed entities is over this threshold, we skip change detection +/// and simply re-extract the transforms of all joints. +const JOINT_EXTRACTION_THRESHOLD_FACTOR: f64 = 0.25; + /// The location of the first joint matrix in the skin uniform buffer. -#[derive(Component)] -pub struct SkinIndex { +#[derive(Clone, Copy)] +pub struct SkinByteOffset { /// The byte offset of the first joint matrix. pub byte_offset: u32, } -impl SkinIndex { +impl SkinByteOffset { /// Index to be in address space based on the size of a skin uniform. - const fn new(start: usize) -> Self { - SkinIndex { - byte_offset: (start * size_of::()) as u32, + const fn from_index(index: usize) -> Self { + SkinByteOffset { + byte_offset: (index * size_of::()) as u32, } } @@ -47,22 +74,6 @@ impl SkinIndex { } } -/// Maps each skinned mesh to the applicable offset within the [`SkinUniforms`] -/// buffer. -/// -/// We store both the current frame's joint matrices and the previous frame's -/// joint matrices for the purposes of motion vector calculation. -#[derive(Default, Resource)] -pub struct SkinIndices { - /// Maps each skinned mesh to the applicable offset within - /// [`SkinUniforms::current_buffer`]. - pub current: MainEntityHashMap, - - /// Maps each skinned mesh to the applicable offset within - /// [`SkinUniforms::prev_buffer`]. - pub prev: MainEntityHashMap, -} - /// The GPU buffers containing joint matrices for all skinned meshes. /// /// This is double-buffered: we store the joint matrices of each mesh for the @@ -74,28 +85,109 @@ pub struct SkinIndices { /// Notes on implementation: see comment on top of the `extract_skins` system. #[derive(Resource)] pub struct SkinUniforms { - /// Stores all the joint matrices for skinned meshes in the current frame. - pub current_buffer: RawBufferVec, - /// Stores all the joint matrices for skinned meshes in the previous frame. - pub prev_buffer: RawBufferVec, + /// The CPU-side buffer that stores the joint matrices for skinned meshes in + /// the current frame. + pub current_staging_buffer: Vec, + /// The GPU-side buffer that stores the joint matrices for skinned meshes in + /// the current frame. + pub current_buffer: Buffer, + /// The GPU-side buffer that stores the joint matrices for skinned meshes in + /// the previous frame. + pub prev_buffer: Buffer, + /// The offset allocator that manages the placement of the joints within the + /// [`Self::current_buffer`]. + allocator: Allocator, + /// Allocation information that we keep about each skin. + skin_uniform_info: MainEntityHashMap, + /// Maps each joint entity to the skins it's associated with. + /// + /// We use this in conjunction with change detection to only update the + /// skins that need updating each frame. + /// + /// Note that conceptually this is a hash map of sets, but we use a + /// [`SmallVec`] to avoid allocations for the vast majority of the cases in + /// which each bone belongs to exactly one skin. + joint_to_skins: MainEntityHashMap>, + /// The total number of joints in the scene. + /// + /// We use this as part of our heuristic to decide whether to use + /// fine-grained change detection. + total_joints: usize, } impl FromWorld for SkinUniforms { fn from_world(world: &mut World) -> Self { let device = world.resource::(); - let buffer_usages = if skins_use_uniform_buffers(device) { + let buffer_usages = (if skins_use_uniform_buffers(device) { BufferUsages::UNIFORM } else { BufferUsages::STORAGE - }; + }) | BufferUsages::COPY_DST; + + // Create the current and previous buffer with the minimum sizes. + // + // These will be swapped every frame. + let current_buffer = device.create_buffer(&BufferDescriptor { + label: Some("skin uniform buffer"), + size: MAX_JOINTS as u64 * size_of::() as u64, + usage: buffer_usages, + mapped_at_creation: false, + }); + let prev_buffer = device.create_buffer(&BufferDescriptor { + label: Some("skin uniform buffer"), + size: MAX_JOINTS as u64 * size_of::() as u64, + usage: buffer_usages, + mapped_at_creation: false, + }); Self { - current_buffer: RawBufferVec::new(buffer_usages), - prev_buffer: RawBufferVec::new(buffer_usages), + current_staging_buffer: vec![], + current_buffer, + prev_buffer, + allocator: Allocator::new(MAX_TOTAL_JOINTS), + skin_uniform_info: MainEntityHashMap::default(), + joint_to_skins: MainEntityHashMap::default(), + total_joints: 0, } } } +impl SkinUniforms { + /// Returns the current offset in joints of the skin in the buffer. + pub fn skin_index(&self, skin: MainEntity) -> Option { + self.skin_uniform_info + .get(&skin) + .map(SkinUniformInfo::offset) + } + + /// Returns the current offset in bytes of the skin in the buffer. + pub fn skin_byte_offset(&self, skin: MainEntity) -> Option { + self.skin_uniform_info.get(&skin).map(|skin_uniform_info| { + SkinByteOffset::from_index(skin_uniform_info.offset() as usize) + }) + } + + /// Returns an iterator over all skins in the scene. + pub fn all_skins(&self) -> impl Iterator { + self.skin_uniform_info.keys() + } +} + +/// Allocation information about each skin. +struct SkinUniformInfo { + /// The allocation of the joints within the [`SkinUniforms::current_buffer`]. + allocation: Allocation, + /// The entities that comprise the joints. + joints: Vec, +} + +impl SkinUniformInfo { + /// The offset in joints within the [`SkinUniforms::current_staging_buffer`]. + fn offset(&self) -> u32 { + self.allocation.offset * JOINTS_PER_ALLOCATION_UNIT + } +} + /// Returns true if skinning must use uniforms (and dynamic offsets) because /// storage buffers aren't supported on the current platform. pub fn skins_use_uniform_buffers(render_device: &RenderDevice) -> bool { @@ -104,20 +196,72 @@ pub fn skins_use_uniform_buffers(render_device: &RenderDevice) -> bool { .get_or_init(|| render_device.limits().max_storage_buffers_per_shader_stage == 0) } +/// Uploads the buffers containing the joints to the GPU. pub fn prepare_skins( render_device: Res, render_queue: Res, - mut uniform: ResMut, + uniform: ResMut, ) { - if uniform.current_buffer.is_empty() { + let uniform = uniform.into_inner(); + + if uniform.current_staging_buffer.is_empty() { return; } - let len = uniform.current_buffer.len(); - uniform.current_buffer.reserve(len, &render_device); - uniform - .current_buffer - .write_buffer(&render_device, &render_queue); + // Swap current and previous buffers. + mem::swap(&mut uniform.current_buffer, &mut uniform.prev_buffer); + + // Resize the buffers if necessary. Include extra space equal to `MAX_JOINTS` + // because we need to be able to bind a full uniform buffer's worth of data + // if skins use uniform buffers on this platform. + let needed_size = (uniform.current_staging_buffer.len() as u64 + MAX_JOINTS as u64) + * size_of::() as u64; + if uniform.current_buffer.size() < needed_size { + let mut new_size = uniform.current_buffer.size(); + while new_size < needed_size { + // 1.5× growth factor. + new_size += new_size / 2; + } + + // Create the new buffers. + let buffer_usages = if skins_use_uniform_buffers(&render_device) { + BufferUsages::UNIFORM + } else { + BufferUsages::STORAGE + } | BufferUsages::COPY_DST; + uniform.current_buffer = render_device.create_buffer(&BufferDescriptor { + label: Some("skin uniform buffer"), + usage: buffer_usages, + size: new_size, + mapped_at_creation: false, + }); + uniform.prev_buffer = render_device.create_buffer(&BufferDescriptor { + label: Some("skin uniform buffer"), + usage: buffer_usages, + size: new_size, + mapped_at_creation: false, + }); + + // We've created a new `prev_buffer` but we don't have the previous joint + // data needed to fill it out correctly. Use the current joint data + // instead. + // + // TODO: This is a bug - will cause motion blur to ignore joint movement + // for one frame. + render_queue.write_buffer( + &uniform.prev_buffer, + 0, + bytemuck::must_cast_slice(&uniform.current_staging_buffer[..]), + ); + } + + // Write the data from `uniform.current_staging_buffer` into + // `uniform.current_buffer`. + render_queue.write_buffer( + &uniform.current_buffer, + 0, + bytemuck::must_cast_slice(&uniform.current_staging_buffer[..]), + ); // We don't need to write `uniform.prev_buffer` because we already wrote it // last frame, and the data should still be on the GPU. @@ -150,71 +294,320 @@ pub fn prepare_skins( // which normally only support fixed size arrays. You just have to make sure // in the shader that you only read the values that are valid for that binding. pub fn extract_skins( - skin_indices: ResMut, - uniform: ResMut, - query: Extract>, - inverse_bindposes: Extract>>, + skin_uniforms: ResMut, + skinned_meshes: Extract>, + changed_skinned_meshes: Extract< + Query< + (Entity, &ViewVisibility, &SkinnedMesh), + Or<( + Changed, + Changed, + AssetChanged, + )>, + >, + >, + skinned_mesh_inverse_bindposes: Extract>>, + changed_transforms: Extract>>, joints: Extract>, - render_device: Res, + mut removed_visibilities_query: Extract>, + mut removed_skinned_meshes_query: Extract>, ) { - let skins_use_uniform_buffers = skins_use_uniform_buffers(&render_device); + let skin_uniforms = skin_uniforms.into_inner(); - // Borrow check workaround. - let (skin_indices, uniform) = (skin_indices.into_inner(), uniform.into_inner()); + // Find skins that have become visible or invisible on this frame. Allocate, + // reallocate, or free space for them as necessary. + add_or_delete_skins( + skin_uniforms, + &changed_skinned_meshes, + &skinned_mesh_inverse_bindposes, + &joints, + ); - // Swap buffers. We need to keep the previous frame's buffer around for the - // purposes of motion vector computation. - mem::swap(&mut skin_indices.current, &mut skin_indices.prev); - mem::swap(&mut uniform.current_buffer, &mut uniform.prev_buffer); - skin_indices.current.clear(); - uniform.current_buffer.clear(); + // Extract the transforms for all joints from the scene, and write them into + // the staging buffer at the appropriate spot. + extract_joints( + skin_uniforms, + &skinned_meshes, + &changed_skinned_meshes, + &skinned_mesh_inverse_bindposes, + &changed_transforms, + &joints, + ); + + // Delete skins that became invisible. + for skinned_mesh_entity in removed_visibilities_query + .read() + .chain(removed_skinned_meshes_query.read()) + { + // Only remove a skin if we didn't pick it up in `add_or_delete_skins`. + // It's possible that a necessary component was removed and re-added in + // the same frame. + if !changed_skinned_meshes.contains(skinned_mesh_entity) { + remove_skin(skin_uniforms, skinned_mesh_entity.into()); + } + } +} - let mut last_start = 0; +/// Searches for all skins that have become visible or invisible this frame and +/// allocations for them as necessary. +fn add_or_delete_skins( + skin_uniforms: &mut SkinUniforms, + changed_skinned_meshes: &Query< + (Entity, &ViewVisibility, &SkinnedMesh), + Or<( + Changed, + Changed, + AssetChanged, + )>, + >, + skinned_mesh_inverse_bindposes: &Assets, + joints: &Query<&GlobalTransform>, +) { + // Find every skinned mesh that changed one of (1) visibility; (2) joint + // entities (part of `SkinnedMesh`); (3) the associated + // `SkinnedMeshInverseBindposes` asset. + for (skinned_mesh_entity, skinned_mesh_view_visibility, skinned_mesh) in changed_skinned_meshes + { + // Remove the skin if it existed last frame. + let skinned_mesh_entity = MainEntity::from(skinned_mesh_entity); + remove_skin(skin_uniforms, skinned_mesh_entity); - // PERF: This can be expensive, can we move this to prepare? - for (entity, view_visibility, skin) in &query { - if !view_visibility.get() { + // If the skin is invisible, we're done. + if !(*skinned_mesh_view_visibility).get() { continue; } - let buffer = &mut uniform.current_buffer; - let Some(inverse_bindposes) = inverse_bindposes.get(&skin.inverse_bindposes) else { + + // Initialize the skin. + add_skin( + skinned_mesh_entity, + skinned_mesh, + skin_uniforms, + skinned_mesh_inverse_bindposes, + joints, + ); + } +} + +/// Extracts the global transforms of all joints and updates the staging buffer +/// as necessary. +fn extract_joints( + skin_uniforms: &mut SkinUniforms, + skinned_meshes: &Query<(Entity, &SkinnedMesh)>, + changed_skinned_meshes: &Query< + (Entity, &ViewVisibility, &SkinnedMesh), + Or<( + Changed, + Changed, + AssetChanged, + )>, + >, + skinned_mesh_inverse_bindposes: &Assets, + changed_transforms: &Query<(Entity, &GlobalTransform), Changed>, + joints: &Query<&GlobalTransform>, +) { + // If the number of entities that changed transforms exceeds a certain + // fraction (currently 25%) of the total joints in the scene, then skip + // fine-grained change detection. + // + // Note that this is a crude heuristic, for performance reasons. It doesn't + // consider the ratio of modified *joints* to total joints, only the ratio + // of modified *entities* to total joints. Thus in the worst case we might + // end up re-extracting all skins even though none of the joints changed. + // But making the heuristic finer-grained would make it slower to evaluate, + // and we don't want to lose performance. + let threshold = + (skin_uniforms.total_joints as f64 * JOINT_EXTRACTION_THRESHOLD_FACTOR).floor() as usize; + + if changed_transforms.iter().nth(threshold).is_some() { + // Go ahead and re-extract all skins in the scene. + for (skin_entity, skin) in skinned_meshes { + extract_joints_for_skin( + skin_entity.into(), + skin, + skin_uniforms, + changed_skinned_meshes, + skinned_mesh_inverse_bindposes, + joints, + ); + } + return; + } + + // Use fine-grained change detection to figure out only the skins that need + // to have their joints re-extracted. + let dirty_skins: MainEntityHashSet = changed_transforms + .iter() + .flat_map(|(joint, _)| skin_uniforms.joint_to_skins.get(&MainEntity::from(joint))) + .flat_map(|skin_joint_mappings| skin_joint_mappings.iter()) + .copied() + .collect(); + + // Re-extract the joints for only those skins. + for skin_entity in dirty_skins { + let Ok((_, skin)) = skinned_meshes.get(*skin_entity) else { continue; }; - let start = buffer.len(); - - let target = start + skin.joints.len().min(MAX_JOINTS); - buffer.extend( - joints - .iter_many(&skin.joints) - .zip(inverse_bindposes.iter()) - .take(MAX_JOINTS) - .map(|(joint, bindpose)| joint.affine() * *bindpose), + extract_joints_for_skin( + skin_entity, + skin, + skin_uniforms, + changed_skinned_meshes, + skinned_mesh_inverse_bindposes, + joints, ); - // iter_many will skip any failed fetches. This will cause it to assign the wrong bones, - // so just bail by truncating to the start. - if buffer.len() != target { - buffer.truncate(start); + } +} + +/// Extracts all joints for a single skin and writes their transforms into the +/// CPU staging buffer. +fn extract_joints_for_skin( + skin_entity: MainEntity, + skin: &SkinnedMesh, + skin_uniforms: &mut SkinUniforms, + changed_skinned_meshes: &Query< + (Entity, &ViewVisibility, &SkinnedMesh), + Or<( + Changed, + Changed, + AssetChanged, + )>, + >, + skinned_mesh_inverse_bindposes: &Assets, + joints: &Query<&GlobalTransform>, +) { + // If we initialized the skin this frame, we already populated all + // the joints, so there's no need to populate them again. + if changed_skinned_meshes.contains(*skin_entity) { + return; + } + + // Fetch information about the skin. + let Some(skin_uniform_info) = skin_uniforms.skin_uniform_info.get(&skin_entity) else { + return; + }; + let Some(skinned_mesh_inverse_bindposes) = + skinned_mesh_inverse_bindposes.get(&skin.inverse_bindposes) + else { + return; + }; + + // Calculate and write in the new joint matrices. + for (joint_index, (&joint, skinned_mesh_inverse_bindpose)) in skin + .joints + .iter() + .zip(skinned_mesh_inverse_bindposes.iter()) + .enumerate() + { + let Ok(joint_transform) = joints.get(joint) else { continue; - } - last_start = last_start.max(start); + }; + + let joint_matrix = joint_transform.affine() * *skinned_mesh_inverse_bindpose; + skin_uniforms.current_staging_buffer[skin_uniform_info.offset() as usize + joint_index] = + joint_matrix; + } +} + +/// Allocates space for a new skin in the buffers, and populates its joints. +fn add_skin( + skinned_mesh_entity: MainEntity, + skinned_mesh: &SkinnedMesh, + skin_uniforms: &mut SkinUniforms, + skinned_mesh_inverse_bindposes: &Assets, + joints: &Query<&GlobalTransform>, +) { + // Allocate space for the joints. + let Some(allocation) = skin_uniforms.allocator.allocate( + skinned_mesh + .joints + .len() + .div_ceil(JOINTS_PER_ALLOCATION_UNIT as usize) as u32, + ) else { + error!( + "Out of space for skin: {:?}. Tried to allocate space for {:?} joints.", + skinned_mesh_entity, + skinned_mesh.joints.len() + ); + return; + }; + + // Store that allocation. + let skin_uniform_info = SkinUniformInfo { + allocation, + joints: skinned_mesh + .joints + .iter() + .map(|entity| MainEntity::from(*entity)) + .collect(), + }; + + let skinned_mesh_inverse_bindposes = + skinned_mesh_inverse_bindposes.get(&skinned_mesh.inverse_bindposes); - // Pad to 256 byte alignment if we're using a uniform buffer. - // There's no need to do this if we're using storage buffers, though. - if skins_use_uniform_buffers { - while buffer.len() % 4 != 0 { - buffer.push(Mat4::ZERO); + for (joint_index, &joint) in skinned_mesh.joints.iter().enumerate() { + // Calculate the initial joint matrix. + let skinned_mesh_inverse_bindpose = + skinned_mesh_inverse_bindposes.and_then(|skinned_mesh_inverse_bindposes| { + skinned_mesh_inverse_bindposes.get(joint_index) + }); + let joint_matrix = match (skinned_mesh_inverse_bindpose, joints.get(joint)) { + (Some(skinned_mesh_inverse_bindpose), Ok(transform)) => { + transform.affine() * *skinned_mesh_inverse_bindpose } + _ => Mat4::IDENTITY, + }; + + // Write in the new joint matrix, growing the staging buffer if + // necessary. + let buffer_index = skin_uniform_info.offset() as usize + joint_index; + if skin_uniforms.current_staging_buffer.len() < buffer_index + 1 { + skin_uniforms + .current_staging_buffer + .resize(buffer_index + 1, Mat4::IDENTITY); } + skin_uniforms.current_staging_buffer[buffer_index] = joint_matrix; - skin_indices - .current - .insert(entity.into(), SkinIndex::new(start)); + // Record the inverse mapping from the joint back to the skin. We use + // this in order to perform fine-grained joint extraction. + skin_uniforms + .joint_to_skins + .entry(MainEntity::from(joint)) + .or_default() + .push(skinned_mesh_entity); } - // Pad out the buffer to ensure that there's enough space for bindings - while uniform.current_buffer.len() - last_start < MAX_JOINTS { - uniform.current_buffer.push(Mat4::ZERO); + // Record the number of joints. + skin_uniforms.total_joints += skinned_mesh.joints.len(); + + skin_uniforms + .skin_uniform_info + .insert(skinned_mesh_entity, skin_uniform_info); +} + +/// Deallocates a skin and removes it from the [`SkinUniforms`]. +fn remove_skin(skin_uniforms: &mut SkinUniforms, skinned_mesh_entity: MainEntity) { + let Some(old_skin_uniform_info) = skin_uniforms.skin_uniform_info.remove(&skinned_mesh_entity) + else { + return; + }; + + // Free the allocation. + skin_uniforms + .allocator + .free(old_skin_uniform_info.allocation); + + // Remove the inverse mapping from each joint back to the skin. + for &joint in &old_skin_uniform_info.joints { + if let Entry::Occupied(mut entry) = skin_uniforms.joint_to_skins.entry(joint) { + entry.get_mut().retain(|skin| *skin != skinned_mesh_entity); + if entry.get_mut().is_empty() { + entry.remove(); + } + } } + + // Update the total number of joints. + skin_uniforms.total_joints -= old_skin_uniform_info.joints.len(); } // NOTE: The skinned joints uniform buffer has to be bound at a dynamic offset per diff --git a/crates/bevy_pbr/src/render/skinning.wgsl b/crates/bevy_pbr/src/render/skinning.wgsl index 92e977aeb1b92..1762a73887c94 100644 --- a/crates/bevy_pbr/src/render/skinning.wgsl +++ b/crates/bevy_pbr/src/render/skinning.wgsl @@ -34,7 +34,7 @@ fn skin_model( + weights.z * joint_matrices.data[indexes.z] + weights.w * joint_matrices.data[indexes.w]; #else // SKINS_USE_UNIFORM_BUFFERS - let skin_index = mesh[instance_index].current_skin_index; + var skin_index = mesh[instance_index].current_skin_index; return weights.x * joint_matrices[skin_index + indexes.x] + weights.y * joint_matrices[skin_index + indexes.y] + weights.z * joint_matrices[skin_index + indexes.z] @@ -57,7 +57,7 @@ fn skin_prev_model( + weights.z * prev_joint_matrices.data[indexes.z] + weights.w * prev_joint_matrices.data[indexes.w]; #else // SKINS_USE_UNIFORM_BUFFERS - let skin_index = mesh[instance_index].previous_skin_index; + let skin_index = mesh[instance_index].current_skin_index; return weights.x * prev_joint_matrices[skin_index + indexes.x] + weights.y * prev_joint_matrices[skin_index + indexes.y] + weights.z * prev_joint_matrices[skin_index + indexes.z] diff --git a/crates/bevy_pbr/src/render/view_transformations.wgsl b/crates/bevy_pbr/src/render/view_transformations.wgsl index 63ee78a0c0641..80c26d7b69b2e 100644 --- a/crates/bevy_pbr/src/render/view_transformations.wgsl +++ b/crates/bevy_pbr/src/render/view_transformations.wgsl @@ -1,6 +1,7 @@ #define_import_path bevy_pbr::view_transformations #import bevy_pbr::mesh_view_bindings as view_bindings +#import bevy_pbr::prepass_bindings /// World space: /// +y is up @@ -93,6 +94,22 @@ fn direction_clip_to_view(clip_dir: vec4) -> vec3 { return view_dir.xyz; } +// ----------------- +// TO PREV. VIEW --- +// ----------------- + +fn position_world_to_prev_view(world_pos: vec3) -> vec3 { + let view_pos = prepass_bindings::previous_view_uniforms.view_from_world * + vec4(world_pos, 1.0); + return view_pos.xyz; +} + +fn position_world_to_prev_ndc(world_pos: vec3) -> vec3 { + let ndc_pos = prepass_bindings::previous_view_uniforms.clip_from_world * + vec4(world_pos, 1.0); + return ndc_pos.xyz / ndc_pos.w; +} + // ----------------- // TO CLIP --------- // ----------------- @@ -172,6 +189,19 @@ fn view_z_to_depth_ndc(view_z: f32) -> f32 { #endif } +fn prev_view_z_to_depth_ndc(view_z: f32) -> f32 { +#ifdef VIEW_PROJECTION_PERSPECTIVE + return -perspective_camera_near() / view_z; +#else ifdef VIEW_PROJECTION_ORTHOGRAPHIC + return prepass_bindings::previous_view_uniforms.clip_from_view[3][2] + + view_z * prepass_bindings::previous_view_uniforms.clip_from_view[2][2]; +#else + let ndc_pos = prepass_bindings::previous_view_uniforms.clip_from_view * + vec4(0.0, 0.0, view_z, 1.0); + return ndc_pos.z / ndc_pos.w; +#endif +} + // ----------------- // UV -------------- // ----------------- diff --git a/crates/bevy_pbr/src/render/wireframe.wgsl b/crates/bevy_pbr/src/render/wireframe.wgsl index 981e5e1b1db3e..3873ffa3dd909 100644 --- a/crates/bevy_pbr/src/render/wireframe.wgsl +++ b/crates/bevy_pbr/src/render/wireframe.wgsl @@ -1,12 +1,12 @@ #import bevy_pbr::forward_io::VertexOutput -struct WireframeMaterial { - color: vec4, -}; +struct PushConstants { + color: vec4 +} + +var push_constants: PushConstants; -@group(2) @binding(0) -var material: WireframeMaterial; @fragment fn fragment(in: VertexOutput) -> @location(0) vec4 { - return material.color; + return push_constants.color; } diff --git a/crates/bevy_pbr/src/ssao/mod.rs b/crates/bevy_pbr/src/ssao/mod.rs index fd47511da5d16..9098f82773c58 100644 --- a/crates/bevy_pbr/src/ssao/mod.rs +++ b/crates/bevy_pbr/src/ssao/mod.rs @@ -1,19 +1,18 @@ -#![expect(deprecated)] - use crate::NodePbr; use bevy_app::{App, Plugin}; -use bevy_asset::{load_internal_asset, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_core_pipeline::{ core_3d::graph::{Core3d, Node3d}, prelude::Camera3d, prepass::{DepthPrepass, NormalPrepass, ViewPrepassTextures}, }; use bevy_ecs::{ - prelude::{require, Bundle, Component, Entity}, + prelude::{Component, Entity}, query::{Has, QueryItem, With}, reflect::ReflectComponent, - schedule::IntoSystemConfigs, - system::{Commands, Query, Res, ResMut, Resource}, + resource::Resource, + schedule::IntoScheduleConfigs, + system::{Commands, Query, Res, ResMut}, world::{FromWorld, World}, }; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; @@ -36,16 +35,17 @@ use bevy_render::{ view::{Msaa, ViewUniform, ViewUniformOffset, ViewUniforms}, Extract, ExtractSchedule, Render, RenderApp, RenderSet, }; -use bevy_utils::{ - prelude::default, - tracing::{error, warn}, -}; +use bevy_utils::prelude::default; use core::mem; +use tracing::{error, warn}; -const PREPROCESS_DEPTH_SHADER_HANDLE: Handle = Handle::weak_from_u128(102258915420479); -const SSAO_SHADER_HANDLE: Handle = Handle::weak_from_u128(253938746510568); -const SPATIAL_DENOISE_SHADER_HANDLE: Handle = Handle::weak_from_u128(466162052558226); -const SSAO_UTILS_SHADER_HANDLE: Handle = Handle::weak_from_u128(366465052568786); +const PREPROCESS_DEPTH_SHADER_HANDLE: Handle = + weak_handle!("b7f2cc3d-c935-4f5c-9ae2-43d6b0d5659a"); +const SSAO_SHADER_HANDLE: Handle = weak_handle!("9ea355d7-37a2-4cc4-b4d1-5d8ab47b07f5"); +const SPATIAL_DENOISE_SHADER_HANDLE: Handle = + weak_handle!("0f2764a0-b343-471b-b7ce-ef5d636f4fc3"); +const SSAO_UTILS_SHADER_HANDLE: Handle = + weak_handle!("da53c78d-f318-473e-bdff-b388bc50ada2"); /// Plugin for screen space ambient occlusion. pub struct ScreenSpaceAmbientOcclusionPlugin; @@ -132,18 +132,6 @@ impl Plugin for ScreenSpaceAmbientOcclusionPlugin { } } -/// Bundle to apply screen space ambient occlusion. -#[derive(Bundle, Default, Clone)] -#[deprecated( - since = "0.15.0", - note = "Use the `ScreenSpaceAmbientOcclusion` component instead. Inserting it will now also insert the other components required by it automatically." -)] -pub struct ScreenSpaceAmbientOcclusionBundle { - pub settings: ScreenSpaceAmbientOcclusion, - pub depth_prepass: DepthPrepass, - pub normal_prepass: NormalPrepass, -} - /// Component to apply screen space ambient occlusion to a 3d camera. /// /// Screen space ambient occlusion (SSAO) approximates small-scale, @@ -158,12 +146,12 @@ pub struct ScreenSpaceAmbientOcclusionBundle { /// Requires that you add [`ScreenSpaceAmbientOcclusionPlugin`] to your app. /// /// It strongly recommended that you use SSAO in conjunction with -/// TAA ([`bevy_core_pipeline::experimental::taa::TemporalAntiAliasing`]). +/// TAA (`TemporalAntiAliasing`). /// Doing so greatly reduces SSAO noise. /// /// SSAO is not supported on `WebGL2`, and is not currently supported on `WebGPU`. #[derive(Component, ExtractComponent, Reflect, PartialEq, Clone, Debug)] -#[reflect(Component, Debug, Default, PartialEq)] +#[reflect(Component, Debug, Default, PartialEq, Clone)] #[require(DepthPrepass, NormalPrepass)] #[doc(alias = "Ssao")] pub struct ScreenSpaceAmbientOcclusion { @@ -185,10 +173,8 @@ impl Default for ScreenSpaceAmbientOcclusion { } } -#[deprecated(since = "0.15.0", note = "Renamed to `ScreenSpaceAmbientOcclusion`")] -pub type ScreenSpaceAmbientOcclusionSettings = ScreenSpaceAmbientOcclusion; - #[derive(Reflect, PartialEq, Eq, Hash, Clone, Copy, Default, Debug)] +#[reflect(PartialEq, Hash, Clone, Default)] pub enum ScreenSpaceAmbientOcclusionQualityLevel { Low, Medium, @@ -771,17 +757,9 @@ fn prepare_ssao_bind_groups( } } -#[allow(clippy::needless_range_loop)] fn generate_hilbert_index_lut() -> [[u16; 64]; 64] { - let mut t = [[0; 64]; 64]; - - for x in 0..64 { - for y in 0..64 { - t[x][y] = hilbert_index(x as u16, y as u16); - } - } - - t + use core::array::from_fn; + from_fn(|x| from_fn(|y| hilbert_index(x as u16, y as u16))) } // https://www.shadertoy.com/view/3tB3z3 diff --git a/crates/bevy_pbr/src/ssao/ssao.wgsl b/crates/bevy_pbr/src/ssao/ssao.wgsl index 1fbd73e8d98ac..ac64d5653f7a4 100644 --- a/crates/bevy_pbr/src/ssao/ssao.wgsl +++ b/crates/bevy_pbr/src/ssao/ssao.wgsl @@ -10,7 +10,7 @@ // Source code base on SSRT3 implementation // https://github.com/cdrinmatane/SSRT3 -#import bevy_pbr::ssao_utils::fast_acos +#import bevy_render::maths::fast_acos #import bevy_render::{ view::View, diff --git a/crates/bevy_pbr/src/ssao/ssao_utils.wgsl b/crates/bevy_pbr/src/ssao/ssao_utils.wgsl index ecc5a4a54de2a..be19fa6639e99 100644 --- a/crates/bevy_pbr/src/ssao/ssao_utils.wgsl +++ b/crates/bevy_pbr/src/ssao/ssao_utils.wgsl @@ -11,14 +11,3 @@ fn ssao_multibounce(visibility: f32, base_color: vec3) -> vec3 { let x = vec3(visibility); return max(x, ((x * a + b) * x + c) * x); } - -fn fast_sqrt(x: f32) -> f32 { - return bitcast(0x1fbd1df5 + (bitcast(x) >> 1u)); -} - -fn fast_acos(in_x: f32) -> f32 { - let x = abs(in_x); - var res = -0.156583 * x + HALF_PI; - res *= fast_sqrt(1.0 - x); - return select(PI - res, res, in_x >= 0.0); -} diff --git a/crates/bevy_pbr/src/ssr/mod.rs b/crates/bevy_pbr/src/ssr/mod.rs index abf3e32220d8c..1ee73da8f0732 100644 --- a/crates/bevy_pbr/src/ssr/mod.rs +++ b/crates/bevy_pbr/src/ssr/mod.rs @@ -1,9 +1,7 @@ //! Screen space reflections implemented via raymarching. -#![expect(deprecated)] - use bevy_app::{App, Plugin}; -use bevy_asset::{load_internal_asset, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_core_pipeline::{ core_3d::{ graph::{Core3d, Node3d}, @@ -14,13 +12,13 @@ use bevy_core_pipeline::{ }; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ - bundle::Bundle, - component::{require, Component}, + component::Component, entity::Entity, query::{Has, QueryItem, With}, reflect::ReflectComponent, - schedule::IntoSystemConfigs as _, - system::{lifetimeless::Read, Commands, Query, Res, ResMut, Resource}, + resource::Resource, + schedule::IntoScheduleConfigs as _, + system::{lifetimeless::Read, Commands, Query, Res, ResMut}, world::{FromWorld, World}, }; use bevy_image::BevyDefault as _; @@ -41,7 +39,8 @@ use bevy_render::{ view::{ExtractedView, Msaa, ViewTarget, ViewUniformOffset}, Render, RenderApp, RenderSet, }; -use bevy_utils::{info_once, prelude::default}; +use bevy_utils::{once, prelude::default}; +use tracing::info; use crate::{ binding_arrays_are_usable, graph::NodePbr, prelude::EnvironmentMapLight, @@ -50,30 +49,14 @@ use crate::{ ViewLightsUniformOffset, }; -const SSR_SHADER_HANDLE: Handle = Handle::weak_from_u128(10438925299917978850); -const RAYMARCH_SHADER_HANDLE: Handle = Handle::weak_from_u128(8517409683450840946); +const SSR_SHADER_HANDLE: Handle = weak_handle!("0b559df2-0d61-4f53-bf62-aea16cf32787"); +const RAYMARCH_SHADER_HANDLE: Handle = weak_handle!("798cc6fc-6072-4b6c-ab4f-83905fa4a19e"); /// Enables screen-space reflections for a camera. /// /// Screen-space reflections are currently only supported with deferred rendering. pub struct ScreenSpaceReflectionsPlugin; -/// A convenient bundle to add screen space reflections to a camera, along with -/// the depth and deferred prepasses required to enable them. -#[derive(Bundle, Default)] -#[deprecated( - since = "0.15.0", - note = "Use the `ScreenSpaceReflections` components instead. Inserting it will now also insert the other components required by it automatically." -)] -pub struct ScreenSpaceReflectionsBundle { - /// The component that enables SSR. - pub settings: ScreenSpaceReflections, - /// The depth prepass, needed for SSR. - pub depth_prepass: DepthPrepass, - /// The deferred prepass, needed for SSR. - pub deferred_prepass: DeferredPrepass, -} - /// Add this component to a camera to enable *screen-space reflections* (SSR). /// /// Screen-space reflections currently require deferred rendering in order to @@ -98,7 +81,7 @@ pub struct ScreenSpaceReflectionsBundle { /// bug whereby Naga doesn't generate correct GLSL when sampling depth buffers, /// which is required for screen-space raymarching. #[derive(Clone, Copy, Component, Reflect)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] #[require(DepthPrepass, DeferredPrepass)] #[doc(alias = "Ssr")] pub struct ScreenSpaceReflections { @@ -142,9 +125,6 @@ pub struct ScreenSpaceReflections { pub use_secant: bool, } -#[deprecated(since = "0.15.0", note = "Renamed to `ScreenSpaceReflections`")] -pub type ScreenSpaceReflectionsSettings = ScreenSpaceReflections; - /// A version of [`ScreenSpaceReflections`] for upload to the GPU. /// /// For more information on these fields, see the corresponding documentation in @@ -524,10 +504,10 @@ impl ExtractComponent for ScreenSpaceReflections { fn extract_component(settings: QueryItem<'_, Self::QueryData>) -> Option { if !DEPTH_TEXTURE_SAMPLING_SUPPORTED { - info_once!( + once!(info!( "Disabling screen-space reflections on this platform because depth textures \ aren't supported correctly" - ); + )); return None; } diff --git a/crates/bevy_pbr/src/ssr/raymarch.wgsl b/crates/bevy_pbr/src/ssr/raymarch.wgsl index 0731057287b8c..e149edfbbc239 100644 --- a/crates/bevy_pbr/src/ssr/raymarch.wgsl +++ b/crates/bevy_pbr/src/ssr/raymarch.wgsl @@ -242,9 +242,9 @@ fn depth_raymarch_distance_fn_evaluate( // * The shrink-wrap surface is no longer continuous, so it's possible for rays to miss it. let linear_depth = - 1.0 / textureSampleLevel(depth_prepass_texture, depth_linear_sampler, interp_uv, 0.0); + 1.0 / textureSampleLevel(depth_prepass_texture, depth_linear_sampler, interp_uv, 0u); let unfiltered_depth = - 1.0 / textureSampleLevel(depth_prepass_texture, depth_nearest_sampler, interp_uv, 0.0); + 1.0 / textureSampleLevel(depth_prepass_texture, depth_nearest_sampler, interp_uv, 0u); var max_depth: f32; var min_depth: f32; @@ -286,7 +286,7 @@ struct DepthRayMarchResult { /// Range: `0..=1` as a lerp factor over `ray_start_cs..=ray_end_cs`. hit_t: f32, - /// UV correspindong to `hit_t`. + /// UV corresponding to `hit_t`. hit_uv: vec2, /// The distance that the hit point penetrates into the hit surface. diff --git a/crates/bevy_pbr/src/volumetric_fog/mod.rs b/crates/bevy_pbr/src/volumetric_fog/mod.rs index 0d998b2a06daa..b9f1d60945fdb 100644 --- a/crates/bevy_pbr/src/volumetric_fog/mod.rs +++ b/crates/bevy_pbr/src/volumetric_fog/mod.rs @@ -29,8 +29,6 @@ //! //! [Henyey-Greenstein phase function]: https://www.pbr-book.org/4ed/Volume_Scattering/Phase_Functions#TheHenyeyndashGreensteinPhaseFunction -#![expect(deprecated)] - use bevy_app::{App, Plugin}; use bevy_asset::{load_internal_asset, Assets, Handle}; use bevy_color::Color; @@ -39,10 +37,7 @@ use bevy_core_pipeline::core_3d::{ prepare_core_3d_depth_textures, }; use bevy_ecs::{ - bundle::Bundle, - component::{require, Component}, - reflect::ReflectComponent, - schedule::IntoSystemConfigs as _, + component::Component, reflect::ReflectComponent, schedule::IntoScheduleConfigs as _, }; use bevy_image::Image; use bevy_math::{ @@ -55,10 +50,10 @@ use bevy_render::{ render_graph::{RenderGraphApp, ViewNodeRunner}, render_resource::{Shader, SpecializedRenderPipelines}, sync_component::SyncComponentPlugin, - view::{InheritedVisibility, ViewVisibility, Visibility}, + view::Visibility, ExtractSchedule, Render, RenderApp, RenderSet, }; -use bevy_transform::components::{GlobalTransform, Transform}; +use bevy_transform::components::Transform; use render::{ VolumetricFogNode, VolumetricFogPipeline, VolumetricFogUniformBuffer, CUBE_MESH, PLANE_MESH, VOLUMETRIC_FOG_HANDLE, @@ -76,14 +71,14 @@ pub struct VolumetricFogPlugin; /// /// This allows the light to generate light shafts/god rays. #[derive(Clone, Copy, Component, Default, Debug, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct VolumetricLight; /// When placed on a [`bevy_core_pipeline::core_3d::Camera3d`], enables /// volumetric fog and volumetric lighting, also known as light shafts or god /// rays. #[derive(Clone, Copy, Component, Debug, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct VolumetricFog { /// Color of the ambient light. /// @@ -120,34 +115,8 @@ pub struct VolumetricFog { pub step_count: u32, } -#[deprecated(since = "0.15.0", note = "Renamed to `VolumetricFog`")] -pub type VolumetricFogSettings = VolumetricFog; - -/// A convenient [`Bundle`] that contains all components necessary to generate a -/// fog volume. -#[derive(Bundle, Clone, Debug, Default)] -#[deprecated( - since = "0.15.0", - note = "Use the `FogVolume` component instead. Inserting it will now also insert the other components required by it automatically." -)] -pub struct FogVolumeBundle { - /// The actual fog volume. - pub fog_volume: FogVolume, - /// Visibility. - pub visibility: Visibility, - /// Inherited visibility. - pub inherited_visibility: InheritedVisibility, - /// View visibility. - pub view_visibility: ViewVisibility, - /// The local transform. Set this to change the position, and scale of the - /// fog's axis-aligned bounding box (AABB). - pub transform: Transform, - /// The global transform. - pub global_transform: GlobalTransform, -} - #[derive(Clone, Component, Debug, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[require(Transform, Visibility)] pub struct FogVolume { /// The color of the fog. diff --git a/crates/bevy_pbr/src/volumetric_fog/render.rs b/crates/bevy_pbr/src/volumetric_fog/render.rs index 76039bbbe448b..07012a72e2f59 100644 --- a/crates/bevy_pbr/src/volumetric_fog/render.rs +++ b/crates/bevy_pbr/src/volumetric_fog/render.rs @@ -2,7 +2,7 @@ use core::array; -use bevy_asset::{AssetId, Handle}; +use bevy_asset::{weak_handle, AssetId, Handle}; use bevy_color::ColorToComponents as _; use bevy_core_pipeline::{ core_3d::Camera3d, @@ -13,7 +13,8 @@ use bevy_ecs::{ component::Component, entity::Entity, query::{Has, QueryItem, With}, - system::{lifetimeless::Read, Commands, Local, Query, Res, ResMut, Resource}, + resource::Resource, + system::{lifetimeless::Read, Commands, Local, Query, Res, ResMut}, world::{FromWorld, World}, }; use bevy_image::{BevyDefault, Image}; @@ -77,21 +78,22 @@ bitflags! { } /// The volumetric fog shader. -pub const VOLUMETRIC_FOG_HANDLE: Handle = Handle::weak_from_u128(17400058287583986650); +pub const VOLUMETRIC_FOG_HANDLE: Handle = + weak_handle!("481f474c-2024-44bb-8f79-f7c05ced95ea"); /// The plane mesh, which is used to render a fog volume that the camera is /// inside. /// /// This mesh is simply stretched to the size of the framebuffer, as when the /// camera is inside a fog volume it's essentially a full-screen effect. -pub const PLANE_MESH: Handle = Handle::weak_from_u128(435245126479971076); +pub const PLANE_MESH: Handle = weak_handle!("92523617-c708-4fd0-b42f-ceb4300c930b"); /// The cube mesh, which is used to render a fog volume that the camera is /// outside. /// /// Note that only the front faces of this cuboid will be rasterized in /// hardware. The back faces will be calculated in the shader via raytracing. -pub const CUBE_MESH: Handle = Handle::weak_from_u128(5023959819001661507); +pub const CUBE_MESH: Handle = weak_handle!("4a1dd661-2d91-4377-a17a-a914e21e277e"); /// The total number of bind group layouts. /// @@ -607,7 +609,6 @@ impl SpecializedRenderPipeline for VolumetricFogPipeline { } /// Specializes volumetric fog pipelines for all views with that effect enabled. -#[allow(clippy::too_many_arguments)] pub fn prepare_volumetric_fog_pipelines( mut commands: Commands, pipeline_cache: Res, @@ -627,7 +628,10 @@ pub fn prepare_volumetric_fog_pipelines( >, meshes: Res>, ) { - let plane_mesh = meshes.get(&PLANE_MESH).expect("Plane mesh not found!"); + let Some(plane_mesh) = meshes.get(&PLANE_MESH) else { + // There's an off chance that the mesh won't be prepared yet if `RenderAssetBytesPerFrame` limiting is in use. + return; + }; for ( entity, @@ -693,20 +697,20 @@ pub fn prepare_volumetric_fog_uniforms( render_queue: Res, mut local_from_world_matrices: Local>, ) { - let Some(mut writer) = volumetric_lighting_uniform_buffer.get_writer( - view_targets.iter().len(), - &render_device, - &render_queue, - ) else { - return; - }; - // Do this up front to avoid O(n^2) matrix inversion. local_from_world_matrices.clear(); for (_, _, fog_transform) in fog_volumes.iter() { local_from_world_matrices.push(fog_transform.compute_matrix().inverse()); } + let uniform_count = view_targets.iter().len() * local_from_world_matrices.len(); + + let Some(mut writer) = + volumetric_lighting_uniform_buffer.get_writer(uniform_count, &render_device, &render_queue) + else { + return; + }; + for (view_entity, extracted_view, volumetric_fog) in view_targets.iter() { let world_from_view = extracted_view.world_from_view.compute_matrix(); diff --git a/crates/bevy_pbr/src/wireframe.rs b/crates/bevy_pbr/src/wireframe.rs index 413933135c85a..407062064a72b 100644 --- a/crates/bevy_pbr/src/wireframe.rs +++ b/crates/bevy_pbr/src/wireframe.rs @@ -1,17 +1,63 @@ -use crate::{Material, MaterialPipeline, MaterialPipelineKey, MaterialPlugin, MeshMaterial3d}; -use bevy_app::{Plugin, Startup, Update}; -use bevy_asset::{load_internal_asset, Asset, Assets, Handle}; -use bevy_color::{Color, LinearRgba}; -use bevy_ecs::prelude::*; -use bevy_reflect::{std_traits::ReflectDefault, Reflect, TypePath}; +use crate::{ + DrawMesh, MeshPipeline, MeshPipelineKey, RenderMeshInstanceFlags, RenderMeshInstances, + SetMeshBindGroup, SetMeshViewBindGroup, ViewKeyCache, ViewSpecializationTicks, +}; +use bevy_app::{App, Plugin, PostUpdate, Startup, Update}; +use bevy_asset::{ + load_internal_asset, prelude::AssetChanged, weak_handle, AsAssetId, Asset, AssetApp, + AssetEvents, AssetId, Assets, Handle, UntypedAssetId, +}; +use bevy_color::{Color, ColorToComponents}; +use bevy_core_pipeline::core_3d::{ + graph::{Core3d, Node3d}, + Camera3d, +}; +use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::{ + component::Tick, + prelude::*, + query::QueryItem, + system::{lifetimeless::SRes, SystemChangeTick, SystemParamItem}, +}; +use bevy_platform::{ + collections::{HashMap, HashSet}, + hash::FixedHasher, +}; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; +use bevy_render::camera::extract_cameras; use bevy_render::{ + batching::gpu_preprocessing::{GpuPreprocessingMode, GpuPreprocessingSupport}, + camera::ExtractedCamera, extract_resource::ExtractResource, - mesh::{Mesh3d, MeshVertexBufferLayoutRef}, + mesh::{ + allocator::{MeshAllocator, SlabId}, + Mesh3d, MeshVertexBufferLayoutRef, RenderMesh, + }, prelude::*, + render_asset::{ + prepare_assets, PrepareAssetError, RenderAsset, RenderAssetPlugin, RenderAssets, + }, + render_graph::{NodeRunError, RenderGraphApp, RenderGraphContext, ViewNode, ViewNodeRunner}, + render_phase::{ + AddRenderCommand, BinnedPhaseItem, BinnedRenderPhasePlugin, BinnedRenderPhaseType, + CachedRenderPipelinePhaseItem, DrawFunctionId, DrawFunctions, PhaseItem, + PhaseItemBatchSetKey, PhaseItemExtraIndex, RenderCommand, RenderCommandResult, + SetItemPipeline, TrackedRenderPass, ViewBinnedRenderPhases, + }, render_resource::*, + renderer::RenderContext, + sync_world::{MainEntity, MainEntityHashMap}, + view::{ + ExtractedView, NoIndirectDrawing, RenderVisibilityRanges, RenderVisibleEntities, + RetainedViewEntity, ViewDepthTexture, ViewTarget, + }, + Extract, Render, RenderApp, RenderDebugFlags, RenderSet, }; +use core::{hash::Hash, ops::Range}; +use tracing::error; -pub const WIREFRAME_SHADER_HANDLE: Handle = Handle::weak_from_u128(192598014480025766); +pub const WIREFRAME_SHADER_HANDLE: Handle = + weak_handle!("2646a633-f8e3-4380-87ae-b44d881abbce"); /// A [`Plugin`] that draws wireframes. /// @@ -23,9 +69,20 @@ pub const WIREFRAME_SHADER_HANDLE: Handle = Handle::weak_from_u128(19259 /// /// This is a native only feature. #[derive(Debug, Default)] -pub struct WireframePlugin; +pub struct WireframePlugin { + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, +} + +impl WireframePlugin { + /// Creates a new [`WireframePlugin`] with the given debug flags. + pub fn new(debug_flags: RenderDebugFlags) -> Self { + Self { debug_flags } + } +} + impl Plugin for WireframePlugin { - fn build(&self, app: &mut bevy_app::App) { + fn build(&self, app: &mut App) { load_internal_asset!( app, WIREFRAME_SHADER_HANDLE, @@ -33,24 +90,83 @@ impl Plugin for WireframePlugin { Shader::from_wgsl ); - app.register_type::() - .register_type::() - .register_type::() - .register_type::() - .init_resource::() - .add_plugins(MaterialPlugin::::default()) - .add_systems(Startup, setup_global_wireframe_material) + app.add_plugins(( + BinnedRenderPhasePlugin::::new(self.debug_flags), + RenderAssetPlugin::::default(), + )) + .init_asset::() + .init_resource::>() + .register_type::() + .register_type::() + .register_type::() + .init_resource::() + .init_resource::() + .add_systems(Startup, setup_global_wireframe_material) + .add_systems( + Update, + ( + global_color_changed.run_if(resource_changed::), + wireframe_color_changed, + // Run `apply_global_wireframe_material` after `apply_wireframe_material` so that the global + // wireframe setting is applied to a mesh on the same frame its wireframe marker component is removed. + (apply_wireframe_material, apply_global_wireframe_material).chain(), + ), + ) + .add_systems( + PostUpdate, + check_wireframe_entities_needing_specialization + .after(AssetEvents) + .run_if(resource_exists::), + ); + + let Some(render_app) = app.get_sub_app_mut(RenderApp) else { + return; + }; + + render_app + .init_resource::() + .init_resource::() + .init_resource::>() + .add_render_command::() + .init_resource::() + .init_resource::>() + .add_render_graph_node::>(Core3d, Node3d::Wireframe) + .add_render_graph_edges( + Core3d, + ( + Node3d::EndMainPass, + Node3d::Wireframe, + Node3d::PostProcessing, + ), + ) .add_systems( - Update, + ExtractSchedule, ( - global_color_changed.run_if(resource_changed::), - wireframe_color_changed, - // Run `apply_global_wireframe_material` after `apply_wireframe_material` so that the global - // wireframe setting is applied to a mesh on the same frame its wireframe marker component is removed. - (apply_wireframe_material, apply_global_wireframe_material).chain(), + extract_wireframe_3d_camera, + extract_wireframe_entities_needing_specialization.after(extract_cameras), + extract_wireframe_materials, + ), + ) + .add_systems( + Render, + ( + specialize_wireframes + .in_set(RenderSet::PrepareMeshes) + .after(prepare_assets::) + .after(prepare_assets::), + queue_wireframes + .in_set(RenderSet::QueueMeshes) + .after(prepare_assets::), ), ); } + + fn finish(&self, app: &mut App) { + let Some(render_app) = app.get_sub_app_mut(RenderApp) else { + return; + }; + render_app.init_resource::(); + } } /// Enables wireframe rendering for any entity it is attached to. @@ -61,21 +177,261 @@ impl Plugin for WireframePlugin { #[reflect(Component, Default, Debug, PartialEq)] pub struct Wireframe; +pub struct Wireframe3d { + /// Determines which objects can be placed into a *batch set*. + /// + /// Objects in a single batch set can potentially be multi-drawn together, + /// if it's enabled and the current platform supports it. + pub batch_set_key: Wireframe3dBatchSetKey, + /// The key, which determines which can be batched. + pub bin_key: Wireframe3dBinKey, + /// An entity from which data will be fetched, including the mesh if + /// applicable. + pub representative_entity: (Entity, MainEntity), + /// The ranges of instances. + pub batch_range: Range, + /// An extra index, which is either a dynamic offset or an index in the + /// indirect parameters list. + pub extra_index: PhaseItemExtraIndex, +} + +impl PhaseItem for Wireframe3d { + fn entity(&self) -> Entity { + self.representative_entity.0 + } + + fn main_entity(&self) -> MainEntity { + self.representative_entity.1 + } + + fn draw_function(&self) -> DrawFunctionId { + self.batch_set_key.draw_function + } + + fn batch_range(&self) -> &Range { + &self.batch_range + } + + fn batch_range_mut(&mut self) -> &mut Range { + &mut self.batch_range + } + + fn extra_index(&self) -> PhaseItemExtraIndex { + self.extra_index.clone() + } + + fn batch_range_and_extra_index_mut(&mut self) -> (&mut Range, &mut PhaseItemExtraIndex) { + (&mut self.batch_range, &mut self.extra_index) + } +} + +impl CachedRenderPipelinePhaseItem for Wireframe3d { + fn cached_pipeline(&self) -> CachedRenderPipelineId { + self.batch_set_key.pipeline + } +} + +impl BinnedPhaseItem for Wireframe3d { + type BinKey = Wireframe3dBinKey; + type BatchSetKey = Wireframe3dBatchSetKey; + + fn new( + batch_set_key: Self::BatchSetKey, + bin_key: Self::BinKey, + representative_entity: (Entity, MainEntity), + batch_range: Range, + extra_index: PhaseItemExtraIndex, + ) -> Self { + Self { + batch_set_key, + bin_key, + representative_entity, + batch_range, + extra_index, + } + } +} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Wireframe3dBatchSetKey { + /// The identifier of the render pipeline. + pub pipeline: CachedRenderPipelineId, + + /// The wireframe material asset ID. + pub asset_id: UntypedAssetId, + + /// The function used to draw. + pub draw_function: DrawFunctionId, + /// The ID of the slab of GPU memory that contains vertex data. + /// + /// For non-mesh items, you can fill this with 0 if your items can be + /// multi-drawn, or with a unique value if they can't. + pub vertex_slab: SlabId, + + /// The ID of the slab of GPU memory that contains index data, if present. + /// + /// For non-mesh items, you can safely fill this with `None`. + pub index_slab: Option, +} + +impl PhaseItemBatchSetKey for Wireframe3dBatchSetKey { + fn indexed(&self) -> bool { + self.index_slab.is_some() + } +} + +/// Data that must be identical in order to *batch* phase items together. +/// +/// Note that a *batch set* (if multi-draw is in use) contains multiple batches. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Wireframe3dBinKey { + /// The wireframe mesh asset ID. + pub asset_id: UntypedAssetId, +} + +pub struct SetWireframe3dPushConstants; + +impl RenderCommand

for SetWireframe3dPushConstants { + type Param = ( + SRes, + SRes>, + ); + type ViewQuery = (); + type ItemQuery = (); + + #[inline] + fn render<'w>( + item: &P, + _view: (), + _item_query: Option<()>, + (wireframe_instances, wireframe_assets): SystemParamItem<'w, '_, Self::Param>, + pass: &mut TrackedRenderPass<'w>, + ) -> RenderCommandResult { + let Some(wireframe_material) = wireframe_instances.get(&item.main_entity()) else { + return RenderCommandResult::Failure("No wireframe material found for entity"); + }; + let Some(wireframe_material) = wireframe_assets.get(*wireframe_material) else { + return RenderCommandResult::Failure("No wireframe material found for entity"); + }; + + pass.set_push_constants( + ShaderStages::FRAGMENT, + 0, + bytemuck::bytes_of(&wireframe_material.color), + ); + RenderCommandResult::Success + } +} + +pub type DrawWireframe3d = ( + SetItemPipeline, + SetMeshViewBindGroup<0>, + SetMeshBindGroup<1>, + SetWireframe3dPushConstants, + DrawMesh, +); + +#[derive(Resource, Clone)] +pub struct Wireframe3dPipeline { + mesh_pipeline: MeshPipeline, + shader: Handle, +} + +impl FromWorld for Wireframe3dPipeline { + fn from_world(render_world: &mut World) -> Self { + Wireframe3dPipeline { + mesh_pipeline: render_world.resource::().clone(), + shader: WIREFRAME_SHADER_HANDLE, + } + } +} + +impl SpecializedMeshPipeline for Wireframe3dPipeline { + type Key = MeshPipelineKey; + + fn specialize( + &self, + key: Self::Key, + layout: &MeshVertexBufferLayoutRef, + ) -> Result { + let mut descriptor = self.mesh_pipeline.specialize(key, layout)?; + descriptor.label = Some("wireframe_3d_pipeline".into()); + descriptor.push_constant_ranges.push(PushConstantRange { + stages: ShaderStages::FRAGMENT, + range: 0..16, + }); + let fragment = descriptor.fragment.as_mut().unwrap(); + fragment.shader = self.shader.clone(); + descriptor.primitive.polygon_mode = PolygonMode::Line; + descriptor.depth_stencil.as_mut().unwrap().bias.slope_scale = 1.0; + Ok(descriptor) + } +} + +#[derive(Default)] +struct Wireframe3dNode; +impl ViewNode for Wireframe3dNode { + type ViewQuery = ( + &'static ExtractedCamera, + &'static ExtractedView, + &'static ViewTarget, + &'static ViewDepthTexture, + ); + + fn run<'w>( + &self, + graph: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + (camera, view, target, depth): QueryItem<'w, Self::ViewQuery>, + world: &'w World, + ) -> Result<(), NodeRunError> { + let Some(wireframe_phase) = world.get_resource::>() + else { + return Ok(()); + }; + + let Some(wireframe_phase) = wireframe_phase.get(&view.retained_view_entity) else { + return Ok(()); + }; + + let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor { + label: Some("wireframe_3d_pass"), + color_attachments: &[Some(target.get_color_attachment())], + depth_stencil_attachment: Some(depth.get_attachment(StoreOp::Store)), + timestamp_writes: None, + occlusion_query_set: None, + }); + + if let Some(viewport) = camera.viewport.as_ref() { + render_pass.set_camera_viewport(viewport); + } + + if let Err(err) = wireframe_phase.render(&mut render_pass, world, graph.view_entity()) { + error!("Error encountered while rendering the stencil phase {err:?}"); + return Err(NodeRunError::DrawError(err)); + } + + Ok(()) + } +} + /// Sets the color of the [`Wireframe`] of the entity it is attached to. /// /// If this component is present but there's no [`Wireframe`] component, /// it will still affect the color of the wireframe when [`WireframeConfig::global`] is set to true. /// /// This overrides the [`WireframeConfig::default_color`]. -// TODO: consider caching materials based on this color. -// This could blow up in size if people use random colored wireframes for each mesh. -// It will also be important to remove unused materials from the cache. #[derive(Component, Debug, Clone, Default, Reflect)] #[reflect(Component, Default, Debug)] pub struct WireframeColor { pub color: Color, } +#[derive(Component, Debug, Clone, Default)] +pub struct ExtractedWireframeColor { + pub color: [f32; 4], +} + /// Disables wireframe rendering for any entity it is attached to. /// It will ignore the [`WireframeConfig`] global setting. /// @@ -96,12 +452,112 @@ pub struct WireframeConfig { pub default_color: Color, } +#[derive(Asset, Reflect, Clone, Debug, Default)] +#[reflect(Clone, Default)] +pub struct WireframeMaterial { + pub color: Color, +} + +pub struct RenderWireframeMaterial { + pub color: [f32; 4], +} + +#[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq)] +#[reflect(Component, Default, Clone, PartialEq)] +pub struct Mesh3dWireframe(pub Handle); + +impl AsAssetId for Mesh3dWireframe { + type Asset = WireframeMaterial; + + fn as_asset_id(&self) -> AssetId { + self.0.id() + } +} + +impl RenderAsset for RenderWireframeMaterial { + type SourceAsset = WireframeMaterial; + type Param = (); + + fn prepare_asset( + source_asset: Self::SourceAsset, + _asset_id: AssetId, + _param: &mut SystemParamItem, + ) -> Result> { + Ok(RenderWireframeMaterial { + color: source_asset.color.to_linear().to_f32_array(), + }) + } +} + +#[derive(Resource, Deref, DerefMut, Default)] +pub struct RenderWireframeInstances(MainEntityHashMap>); + +#[derive(Clone, Resource, Deref, DerefMut, Debug, Default)] +pub struct WireframeEntitiesNeedingSpecialization { + #[deref] + pub entities: Vec, +} + +#[derive(Resource, Deref, DerefMut, Clone, Debug, Default)] +pub struct WireframeEntitySpecializationTicks { + pub entities: MainEntityHashMap, +} + +/// Stores the [`SpecializedWireframeViewPipelineCache`] for each view. +#[derive(Resource, Deref, DerefMut, Default)] +pub struct SpecializedWireframePipelineCache { + // view entity -> view pipeline cache + #[deref] + map: HashMap, +} + +/// Stores the cached render pipeline ID for each entity in a single view, as +/// well as the last time it was changed. +#[derive(Deref, DerefMut, Default)] +pub struct SpecializedWireframeViewPipelineCache { + // material entity -> (tick, pipeline_id) + #[deref] + map: MainEntityHashMap<(Tick, CachedRenderPipelineId)>, +} + #[derive(Resource)] struct GlobalWireframeMaterial { // This handle will be reused when the global config is enabled handle: Handle, } +pub fn extract_wireframe_materials( + mut material_instances: ResMut, + changed_meshes_query: Extract< + Query< + (Entity, &ViewVisibility, &Mesh3dWireframe), + Or<(Changed, Changed)>, + >, + >, + mut removed_visibilities_query: Extract>, + mut removed_materials_query: Extract>, +) { + for (entity, view_visibility, material) in &changed_meshes_query { + if view_visibility.get() { + material_instances.insert(entity.into(), material.id()); + } else { + material_instances.remove(&MainEntity::from(entity)); + } + } + + for entity in removed_visibilities_query + .read() + .chain(removed_materials_query.read()) + { + // Only queue a mesh for removal if we didn't pick it up above. + // It's possible that a necessary component was removed and re-added in + // the same frame. + if !changed_meshes_query.contains(entity) { + material_instances.remove(&MainEntity::from(entity)); + } + } +} + fn setup_global_wireframe_material( mut commands: Commands, mut materials: ResMut>, @@ -110,7 +566,7 @@ fn setup_global_wireframe_material( // Create the handle used for the global material commands.insert_resource(GlobalWireframeMaterial { handle: materials.add(WireframeMaterial { - color: config.default_color.into(), + color: config.default_color, }), }); } @@ -122,22 +578,21 @@ fn global_color_changed( global_material: Res, ) { if let Some(global_material) = materials.get_mut(&global_material.handle) { - global_material.color = config.default_color.into(); + global_material.color = config.default_color; } } /// Updates the wireframe material when the color in [`WireframeColor`] changes -#[allow(clippy::type_complexity)] fn wireframe_color_changed( mut materials: ResMut>, mut colors_changed: Query< - (&mut MeshMaterial3d, &WireframeColor), + (&mut Mesh3dWireframe, &WireframeColor), (With, Changed), >, ) { for (mut handle, wireframe_color) in &mut colors_changed { handle.0 = materials.add(WireframeMaterial { - color: wireframe_color.color.into(), + color: wireframe_color.color, }); } } @@ -149,24 +604,24 @@ fn apply_wireframe_material( mut materials: ResMut>, wireframes: Query< (Entity, Option<&WireframeColor>), - (With, Without>), + (With, Without), >, - no_wireframes: Query, With>)>, + no_wireframes: Query, With)>, mut removed_wireframes: RemovedComponents, global_material: Res, ) { for e in removed_wireframes.read().chain(no_wireframes.iter()) { - if let Some(mut commands) = commands.get_entity(e) { - commands.remove::>(); + if let Ok(mut commands) = commands.get_entity(e) { + commands.remove::(); } } let mut material_to_spawn = vec![]; for (e, maybe_color) in &wireframes { let material = get_wireframe_material(maybe_color, &mut materials, &global_material); - material_to_spawn.push((e, MeshMaterial3d(material))); + material_to_spawn.push((e, Mesh3dWireframe(material))); } - commands.insert_or_spawn_batch(material_to_spawn); + commands.try_insert_batch(material_to_spawn); } type WireframeFilter = (With, Without, Without); @@ -177,12 +632,9 @@ fn apply_global_wireframe_material( config: Res, meshes_without_material: Query< (Entity, Option<&WireframeColor>), - (WireframeFilter, Without>), - >, - meshes_with_global_material: Query< - Entity, - (WireframeFilter, With>), + (WireframeFilter, Without), >, + meshes_with_global_material: Query)>, global_material: Res, mut materials: ResMut>, ) { @@ -192,19 +644,17 @@ fn apply_global_wireframe_material( let material = get_wireframe_material(maybe_color, &mut materials, &global_material); // We only add the material handle but not the Wireframe component // This makes it easy to detect which mesh is using the global material and which ones are user specified - material_to_spawn.push((e, MeshMaterial3d(material))); + material_to_spawn.push((e, Mesh3dWireframe(material))); } - commands.insert_or_spawn_batch(material_to_spawn); + commands.try_insert_batch(material_to_spawn); } else { for e in &meshes_with_global_material { - commands - .entity(e) - .remove::>(); + commands.entity(e).remove::(); } } } -/// Gets an handle to a wireframe material with a fallback on the default material +/// Gets a handle to a wireframe material with a fallback on the default material fn get_wireframe_material( maybe_color: Option<&WireframeColor>, wireframe_materials: &mut Assets, @@ -212,7 +662,7 @@ fn get_wireframe_material( ) -> Handle { if let Some(wireframe_color) = maybe_color { wireframe_materials.add(WireframeMaterial { - color: wireframe_color.color.into(), + color: wireframe_color.color, }) } else { // If there's no color specified we can use the global material since it's already set to use the default_color @@ -220,27 +670,241 @@ fn get_wireframe_material( } } -#[derive(Default, AsBindGroup, TypePath, Debug, Clone, Asset)] -pub struct WireframeMaterial { - #[uniform(0)] - pub color: LinearRgba, +fn extract_wireframe_3d_camera( + mut wireframe_3d_phases: ResMut>, + cameras: Extract), With>>, + mut live_entities: Local>, + gpu_preprocessing_support: Res, +) { + live_entities.clear(); + for (main_entity, camera, no_indirect_drawing) in &cameras { + if !camera.is_active { + continue; + } + let gpu_preprocessing_mode = gpu_preprocessing_support.min(if !no_indirect_drawing { + GpuPreprocessingMode::Culling + } else { + GpuPreprocessingMode::PreprocessingOnly + }); + + let retained_view_entity = RetainedViewEntity::new(main_entity.into(), None, 0); + wireframe_3d_phases.prepare_for_new_frame(retained_view_entity, gpu_preprocessing_mode); + live_entities.insert(retained_view_entity); + } + + // Clear out all dead views. + wireframe_3d_phases.retain(|camera_entity, _| live_entities.contains(camera_entity)); } -impl Material for WireframeMaterial { - fn fragment_shader() -> ShaderRef { - WIREFRAME_SHADER_HANDLE.into() +pub fn extract_wireframe_entities_needing_specialization( + entities_needing_specialization: Extract>, + mut entity_specialization_ticks: ResMut, + views: Query<&ExtractedView>, + mut specialized_wireframe_pipeline_cache: ResMut, + mut removed_meshes_query: Extract>, + ticks: SystemChangeTick, +) { + for entity in entities_needing_specialization.iter() { + // Update the entity's specialization tick with this run's tick + entity_specialization_ticks.insert((*entity).into(), ticks.this_run()); } - fn specialize( - _pipeline: &MaterialPipeline, - descriptor: &mut RenderPipelineDescriptor, - _layout: &MeshVertexBufferLayoutRef, - _key: MaterialPipelineKey, - ) -> Result<(), SpecializedMeshPipelineError> { - descriptor.primitive.polygon_mode = PolygonMode::Line; - if let Some(depth_stencil) = descriptor.depth_stencil.as_mut() { - depth_stencil.bias.slope_scale = 1.0; + for entity in removed_meshes_query.read() { + for view in &views { + if let Some(specialized_wireframe_pipeline_cache) = + specialized_wireframe_pipeline_cache.get_mut(&view.retained_view_entity) + { + specialized_wireframe_pipeline_cache.remove(&MainEntity::from(entity)); + } + } + } +} + +pub fn check_wireframe_entities_needing_specialization( + needs_specialization: Query< + Entity, + Or<( + Changed, + AssetChanged, + Changed, + AssetChanged, + )>, + >, + mut entities_needing_specialization: ResMut, +) { + entities_needing_specialization.clear(); + for entity in &needs_specialization { + entities_needing_specialization.push(entity); + } +} + +pub fn specialize_wireframes( + render_meshes: Res>, + render_mesh_instances: Res, + render_wireframe_instances: Res, + render_visibility_ranges: Res, + wireframe_phases: Res>, + views: Query<(&ExtractedView, &RenderVisibleEntities)>, + view_key_cache: Res, + entity_specialization_ticks: Res, + view_specialization_ticks: Res, + mut specialized_material_pipeline_cache: ResMut, + mut pipelines: ResMut>, + pipeline: Res, + pipeline_cache: Res, + ticks: SystemChangeTick, +) { + // Record the retained IDs of all views so that we can expire old + // pipeline IDs. + let mut all_views: HashSet = HashSet::default(); + + for (view, visible_entities) in &views { + all_views.insert(view.retained_view_entity); + + if !wireframe_phases.contains_key(&view.retained_view_entity) { + continue; + } + + let Some(view_key) = view_key_cache.get(&view.retained_view_entity) else { + continue; + }; + + let view_tick = view_specialization_ticks + .get(&view.retained_view_entity) + .unwrap(); + let view_specialized_material_pipeline_cache = specialized_material_pipeline_cache + .entry(view.retained_view_entity) + .or_default(); + + for (_, visible_entity) in visible_entities.iter::() { + if !render_wireframe_instances.contains_key(visible_entity) { + continue; + }; + let Some(mesh_instance) = render_mesh_instances.render_mesh_queue_data(*visible_entity) + else { + continue; + }; + let entity_tick = entity_specialization_ticks.get(visible_entity).unwrap(); + let last_specialized_tick = view_specialized_material_pipeline_cache + .get(visible_entity) + .map(|(tick, _)| *tick); + let needs_specialization = last_specialized_tick.is_none_or(|tick| { + view_tick.is_newer_than(tick, ticks.this_run()) + || entity_tick.is_newer_than(tick, ticks.this_run()) + }); + if !needs_specialization { + continue; + } + let Some(mesh) = render_meshes.get(mesh_instance.mesh_asset_id) else { + continue; + }; + + let mut mesh_key = *view_key; + mesh_key |= MeshPipelineKey::from_primitive_topology(mesh.primitive_topology()); + + if render_visibility_ranges.entity_has_crossfading_visibility_ranges(*visible_entity) { + mesh_key |= MeshPipelineKey::VISIBILITY_RANGE_DITHER; + } + + if view_key.contains(MeshPipelineKey::MOTION_VECTOR_PREPASS) { + // If the previous frame have skins or morph targets, note that. + if mesh_instance + .flags + .contains(RenderMeshInstanceFlags::HAS_PREVIOUS_SKIN) + { + mesh_key |= MeshPipelineKey::HAS_PREVIOUS_SKIN; + } + if mesh_instance + .flags + .contains(RenderMeshInstanceFlags::HAS_PREVIOUS_MORPH) + { + mesh_key |= MeshPipelineKey::HAS_PREVIOUS_MORPH; + } + } + + let pipeline_id = + pipelines.specialize(&pipeline_cache, &pipeline, mesh_key, &mesh.layout); + let pipeline_id = match pipeline_id { + Ok(id) => id, + Err(err) => { + error!("{}", err); + continue; + } + }; + + view_specialized_material_pipeline_cache + .insert(*visible_entity, (ticks.this_run(), pipeline_id)); + } + } + + // Delete specialized pipelines belonging to views that have expired. + specialized_material_pipeline_cache + .retain(|retained_view_entity, _| all_views.contains(retained_view_entity)); +} + +fn queue_wireframes( + custom_draw_functions: Res>, + render_mesh_instances: Res, + gpu_preprocessing_support: Res, + mesh_allocator: Res, + specialized_wireframe_pipeline_cache: Res, + render_wireframe_instances: Res, + mut wireframe_3d_phases: ResMut>, + mut views: Query<(&ExtractedView, &RenderVisibleEntities)>, +) { + for (view, visible_entities) in &mut views { + let Some(wireframe_phase) = wireframe_3d_phases.get_mut(&view.retained_view_entity) else { + continue; + }; + let draw_wireframe = custom_draw_functions.read().id::(); + + let Some(view_specialized_material_pipeline_cache) = + specialized_wireframe_pipeline_cache.get(&view.retained_view_entity) + else { + continue; + }; + + for (render_entity, visible_entity) in visible_entities.iter::() { + let Some(wireframe_instance) = render_wireframe_instances.get(visible_entity) else { + continue; + }; + let Some((current_change_tick, pipeline_id)) = view_specialized_material_pipeline_cache + .get(visible_entity) + .map(|(current_change_tick, pipeline_id)| (*current_change_tick, *pipeline_id)) + else { + continue; + }; + + // Skip the entity if it's cached in a bin and up to date. + if wireframe_phase.validate_cached_entity(*visible_entity, current_change_tick) { + continue; + } + let Some(mesh_instance) = render_mesh_instances.render_mesh_queue_data(*visible_entity) + else { + continue; + }; + let (vertex_slab, index_slab) = mesh_allocator.mesh_slabs(&mesh_instance.mesh_asset_id); + let bin_key = Wireframe3dBinKey { + asset_id: mesh_instance.mesh_asset_id.untyped(), + }; + let batch_set_key = Wireframe3dBatchSetKey { + pipeline: pipeline_id, + asset_id: wireframe_instance.untyped(), + draw_function: draw_wireframe, + vertex_slab: vertex_slab.unwrap_or_default(), + index_slab, + }; + wireframe_phase.add( + batch_set_key, + bin_key, + (*render_entity, *visible_entity), + mesh_instance.current_uniform_index, + BinnedRenderPhaseType::mesh( + mesh_instance.should_batch(), + &gpu_preprocessing_support, + ), + current_change_tick, + ); } - Ok(()) } } diff --git a/crates/bevy_picking/Cargo.toml b/crates/bevy_picking/Cargo.toml index 3deba7d21bcba..f02e5237aaa64 100644 --- a/crates/bevy_picking/Cargo.toml +++ b/crates/bevy_picking/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_picking" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Provides screen picking functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -12,23 +12,32 @@ license = "MIT OR Apache-2.0" bevy_mesh_picking_backend = ["dep:bevy_mesh", "dep:crossbeam-channel"] [dependencies] -bevy_app = { path = "../bevy_app", version = "0.15.0-dev" } -bevy_asset = { path = "../bevy_asset", version = "0.15.0-dev" } -bevy_derive = { path = "../bevy_derive", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" } -bevy_hierarchy = { path = "../bevy_hierarchy", version = "0.15.0-dev" } -bevy_input = { path = "../bevy_input", version = "0.15.0-dev" } -bevy_math = { path = "../bevy_math", version = "0.15.0-dev" } -bevy_mesh = { path = "../bevy_mesh", version = "0.15.0-dev", optional = true } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev" } -bevy_render = { path = "../bevy_render", version = "0.15.0-dev" } -bevy_time = { path = "../bevy_time", version = "0.15.0-dev" } -bevy_transform = { path = "../bevy_transform", version = "0.15.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } -bevy_window = { path = "../bevy_window", version = "0.15.0-dev" } +# bevy +bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } +bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } +bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } +bevy_input = { path = "../bevy_input", version = "0.16.0-dev" } +bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } +bevy_mesh = { path = "../bevy_mesh", version = "0.16.0-dev", optional = true } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } +bevy_render = { path = "../bevy_render", version = "0.16.0-dev" } +bevy_time = { path = "../bevy_time", version = "0.16.0-dev" } +bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } +bevy_window = { path = "../bevy_window", version = "0.16.0-dev" } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "std", +] } +# other crossbeam-channel = { version = "0.5", optional = true } -uuid = { version = "1.1", features = ["v4"] } +uuid = { version = "1.13.1", features = ["v4"] } +tracing = { version = "0.1", default-features = false, features = ["std"] } + +[target.'cfg(target_arch = "wasm32")'.dependencies] +# TODO: Assuming all wasm builds are for the browser. Require `no_std` support to break assumption. +uuid = { version = "1.13.1", default-features = false, features = ["js"] } [lints] workspace = true diff --git a/crates/bevy_picking/LICENSE-APACHE b/crates/bevy_picking/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_picking/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_picking/LICENSE-MIT b/crates/bevy_picking/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_picking/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_picking/src/backend.rs b/crates/bevy_picking/src/backend.rs index 2e950fa804db8..8c781d54e32f4 100644 --- a/crates/bevy_picking/src/backend.rs +++ b/crates/bevy_picking/src/backend.rs @@ -20,7 +20,7 @@ //! - The [`PointerHits`] events produced by a backend do **not** need to be sorted or filtered, all //! that is needed is an unordered list of entities and their [`HitData`]. //! -//! - Backends do not need to consider the [`PickingBehavior`](crate::PickingBehavior) component, though they may +//! - Backends do not need to consider the [`Pickable`](crate::Pickable) component, though they may //! use it for optimization purposes. For example, a backend that traverses a spatial hierarchy //! may want to exit early if it intersects an entity that blocks lower entities from being //! picked. @@ -42,7 +42,7 @@ pub mod prelude { pub use super::{ray::RayMap, HitData, PointerHits}; pub use crate::{ pointer::{PointerId, PointerLocation}, - PickSet, PickingBehavior, + PickSet, Pickable, }; } @@ -56,7 +56,7 @@ pub mod prelude { /// ambiguities with picking backends. Take care to ensure such systems are explicitly ordered /// against [`PickSet::Backend`](crate::PickSet::Backend), or better, avoid reading `PointerHits` in `PreUpdate`. #[derive(Event, Debug, Clone, Reflect)] -#[reflect(Debug)] +#[reflect(Debug, Clone)] pub struct PointerHits { /// The pointer associated with this hit test. pub pointer: prelude::PointerId, @@ -96,6 +96,7 @@ impl PointerHits { /// Holds data from a successful pointer hit test. See [`HitData::depth`] for important details. #[derive(Clone, Debug, PartialEq, Reflect)] +#[reflect(Clone, PartialEq)] pub struct HitData { /// The camera entity used to detect this hit. Useful when you need to find the ray that was /// casted for this hit when using a raycasting backend. @@ -105,7 +106,8 @@ pub struct HitData { /// distance from the pointer to the hit, measured from the near plane of the camera, to the /// point, in world space. pub depth: f32, - /// The position of the intersection in the world, if the data is available from the backend. + /// The position reported by the backend, if the data is available. Position data may be in any + /// space (e.g. World space, Screen space, Local space), specified by the backend providing it. pub position: Option, /// The normal vector of the hit test, if the data is available from the backend. pub normal: Option, @@ -129,15 +131,16 @@ pub mod ray { use crate::backend::prelude::{PointerId, PointerLocation}; use bevy_ecs::prelude::*; use bevy_math::Ray3d; + use bevy_platform::collections::{hash_map::Iter, HashMap}; use bevy_reflect::Reflect; use bevy_render::camera::Camera; use bevy_transform::prelude::GlobalTransform; - use bevy_utils::{hashbrown::hash_map::Iter, HashMap}; use bevy_window::PrimaryWindow; /// Identifies a ray constructed from some (pointer, camera) combination. A pointer can be over /// multiple cameras, which is why a single pointer may have multiple rays. #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Reflect)] + #[reflect(Clone, PartialEq, Hash)] pub struct RayId { /// The camera whose projection was used to calculate the ray. pub camera: Entity, @@ -175,7 +178,10 @@ pub mod ray { /// ``` #[derive(Clone, Debug, Default, Resource)] pub struct RayMap { - map: HashMap, + /// Cartesian product of all pointers and all cameras + /// Add your rays here to support picking through indirections, + /// e.g. rendered-to-texture cameras + pub map: HashMap, } impl RayMap { @@ -184,11 +190,6 @@ pub mod ray { self.map.iter() } - /// The hash map of all rays cast in the current frame. - pub fn map(&self) -> &HashMap { - &self.map - } - /// Clears the [`RayMap`] and re-populates it with one ray for each /// combination of pointer entity and camera entity where the pointer /// intersects the camera's viewport. diff --git a/crates/bevy_picking/src/events.rs b/crates/bevy_picking/src/events.rs index 3ccabd0a9bc5b..88b3b9bccc96b 100644 --- a/crates/bevy_picking/src/events.rs +++ b/crates/bevy_picking/src/events.rs @@ -3,7 +3,7 @@ //! //! # Usage //! -//! To receive events from this module, you must use an [`Observer`] +//! To receive events from this module, you must use an [`Observer`] or [`EventReader`] with [`Pointer`] events. //! The simplest example, registering a callback when an entity is hovered over by a pointer, looks like this: //! //! ```rust @@ -35,24 +35,24 @@ //! + Dragging and dropping: [`DragStart`], [`Drag`], [`DragEnd`], [`DragEnter`], [`DragOver`], [`DragDrop`], [`DragLeave`]. //! //! When received by an observer, these events will always be wrapped by the [`Pointer`] type, which contains -//! general metadata about the pointer and it's location. +//! general metadata about the pointer event. -use core::fmt::Debug; +use core::{fmt::Debug, time::Duration}; use bevy_ecs::{prelude::*, query::QueryData, system::SystemParam, traversal::Traversal}; -use bevy_hierarchy::Parent; +use bevy_input::mouse::MouseScrollUnit; use bevy_math::Vec2; +use bevy_platform::collections::HashMap; +use bevy_platform::time::Instant; use bevy_reflect::prelude::*; use bevy_render::camera::NormalizedRenderTarget; -use bevy_utils::{tracing::debug, Duration, HashMap, Instant}; use bevy_window::Window; +use tracing::debug; use crate::{ backend::{prelude::PointerLocation, HitData}, hover::{HoverMap, PreviousHoverMap}, - pointer::{ - Location, PointerAction, PointerButton, PointerId, PointerInput, PointerMap, PressDirection, - }, + pointer::{Location, PointerAction, PointerButton, PointerId, PointerInput, PointerMap}, }; /// Stores the common data needed for all pointer events. @@ -60,7 +60,7 @@ use crate::{ /// The documentation for the [`pointer_events`] explains the events this module exposes and /// the order in which they fire. #[derive(Clone, PartialEq, Debug, Reflect, Component)] -#[reflect(Component, Debug)] +#[reflect(Component, Debug, Clone)] pub struct Pointer { /// The original target of this picking event, before bubbling pub target: Entity, @@ -73,13 +73,13 @@ pub struct Pointer { pub event: E, } -/// A traversal query (eg it implements [`Traversal`]) intended for use with [`Pointer`] events. +/// A traversal query (i.e. it implements [`Traversal`]) intended for use with [`Pointer`] events. /// /// This will always traverse to the parent, if the entity being visited has one. Otherwise, it /// propagates to the pointer's window and stops there. #[derive(QueryData)] pub struct PointerTraversal { - parent: Option<&'static Parent>, + child_of: Option<&'static ChildOf>, window: Option<&'static Window>, } @@ -88,11 +88,11 @@ where E: Debug + Clone + Reflect, { fn traverse(item: Self::Item<'_>, pointer: &Pointer) -> Option { - let PointerTraversalItem { parent, window } = item; + let PointerTraversalItem { child_of, window } = item; // Send event to parent, if it has one. - if let Some(parent) = parent { - return Some(parent.get()); + if let Some(child_of) = child_of { + return Some(child_of.parent()); }; // Otherwise, send it to the window entity (unless this is a window entity). @@ -144,22 +144,25 @@ impl Pointer { } } -/// Fires when a pointer is canceled, and it's current interaction state is dropped. +/// Fires when a pointer is canceled, and its current interaction state is dropped. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct Cancel { /// Information about the picking intersection. pub hit: HitData, } -/// Fires when a the pointer crosses into the bounds of the `target` entity. +/// Fires when a pointer crosses into the bounds of the `target` entity. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct Over { /// Information about the picking intersection. pub hit: HitData, } -/// Fires when a the pointer crosses out of the bounds of the `target` entity. +/// Fires when a pointer crosses out of the bounds of the `target` entity. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct Out { /// Information about the latest prior picking intersection. pub hit: HitData, @@ -167,6 +170,7 @@ pub struct Out { /// Fires when a pointer button is pressed over the `target` entity. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct Pressed { /// Pointer button pressed to trigger this event. pub button: PointerButton, @@ -176,6 +180,7 @@ pub struct Pressed { /// Fires when a pointer button is released over the `target` entity. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct Released { /// Pointer button lifted to trigger this event. pub button: PointerButton, @@ -186,6 +191,7 @@ pub struct Released { /// Fires when a pointer sends a pointer pressed event followed by a pointer released event, with the same /// `target` entity for both events. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct Click { /// Pointer button pressed and lifted to trigger this event. pub button: PointerButton, @@ -197,6 +203,7 @@ pub struct Click { /// Fires while a pointer is moving over the `target` entity. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct Move { /// Information about the picking intersection. pub hit: HitData, @@ -206,6 +213,7 @@ pub struct Move { /// Fires when the `target` entity receives a pointer pressed event followed by a pointer move event. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct DragStart { /// Pointer button pressed and moved to trigger this event. pub button: PointerButton, @@ -215,6 +223,7 @@ pub struct DragStart { /// Fires while the `target` entity is being dragged. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct Drag { /// Pointer button pressed and moved to trigger this event. pub button: PointerButton, @@ -226,6 +235,7 @@ pub struct Drag { /// Fires when a pointer is dragging the `target` entity and a pointer released event is received. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct DragEnd { /// Pointer button pressed, moved, and released to trigger this event. pub button: PointerButton, @@ -235,6 +245,7 @@ pub struct DragEnd { /// Fires when a pointer dragging the `dragged` entity enters the `target` entity. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct DragEnter { /// Pointer button pressed to enter drag. pub button: PointerButton, @@ -246,6 +257,7 @@ pub struct DragEnter { /// Fires while the `dragged` entity is being dragged over the `target` entity. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct DragOver { /// Pointer button pressed while dragging over. pub button: PointerButton, @@ -257,6 +269,7 @@ pub struct DragOver { /// Fires when a pointer dragging the `dragged` entity leaves the `target` entity. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct DragLeave { /// Pointer button pressed while leaving drag. pub button: PointerButton, @@ -268,6 +281,7 @@ pub struct DragLeave { /// Fires when a pointer drops the `dropped` entity onto the `target` entity. #[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct DragDrop { /// Pointer button released to drop. pub button: PointerButton, @@ -278,7 +292,8 @@ pub struct DragDrop { } /// Dragging state. -#[derive(Debug, Clone)] +#[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] pub struct DragEntry { /// The position of the pointer at drag start. pub start_pos: Vec2, @@ -286,6 +301,20 @@ pub struct DragEntry { pub latest_pos: Vec2, } +/// Fires while a pointer is scrolling over the `target` entity. +#[derive(Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq)] +pub struct Scroll { + /// The mouse scroll unit. + pub unit: MouseScrollUnit, + /// The horizontal scroll value. + pub x: f32, + /// The vertical scroll value. + pub y: f32, + /// Information about the picking intersection. + pub hit: HitData, +} + /// An entry in the cache that drives the `pointer_events` system, storing additional data /// about pointer button presses. #[derive(Debug, Clone, Default)] @@ -347,6 +376,7 @@ pub struct PickingEventWriters<'w> { drag_leave_events: EventWriter<'w, Pointer>, drag_over_events: EventWriter<'w, Pointer>, drag_start_events: EventWriter<'w, Pointer>, + scroll_events: EventWriter<'w, Pointer>, move_events: EventWriter<'w, Pointer>, out_events: EventWriter<'w, Pointer>, over_events: EventWriter<'w, Pointer>, @@ -393,7 +423,7 @@ pub struct PickingEventWriters<'w> { /// Both [`Click`] and [`Released`] target the entity hovered in the *previous frame*, /// rather than the current frame. This is because touch pointers hover nothing /// on the frame they are released. The end effect is that these two events can -/// be received sequentally after an [`Out`] event (but always on the same frame +/// be received sequentially after an [`Out`] event (but always on the same frame /// as the [`Out`] event). /// /// Note: Though it is common for the [`PointerInput`] stream may contain @@ -401,7 +431,6 @@ pub struct PickingEventWriters<'w> { /// determined only by the pointer's *final position*. Since the hover state /// ultimately determines which entities receive events, this may mean that an /// entity can receive events from before or after it was actually hovered. -#[allow(clippy::too_many_arguments)] pub fn pointer_events( // Input mut input_events: EventReader, @@ -451,7 +480,7 @@ pub fn pointer_events( Out { hit: hit.clone() }, ); commands.trigger_targets(out_event.clone(), hovered_entity); - event_writers.out_events.send(out_event); + event_writers.out_events.write(out_event); // Possibly send DragLeave events for button in PointerButton::iter() { @@ -469,7 +498,7 @@ pub fn pointer_events( }, ); commands.trigger_targets(drag_leave_event.clone(), hovered_entity); - event_writers.drag_leave_events.send(drag_leave_event); + event_writers.drag_leave_events.write(drag_leave_event); } } } @@ -515,7 +544,7 @@ pub fn pointer_events( }, ); commands.trigger_targets(drag_enter_event.clone(), hovered_entity); - event_writers.drag_enter_events.send(drag_enter_event); + event_writers.drag_enter_events.write(drag_enter_event); } } @@ -527,7 +556,7 @@ pub fn pointer_events( Over { hit: hit.clone() }, ); commands.trigger_targets(over_event.clone(), hovered_entity); - event_writers.over_events.send(over_event); + event_writers.over_events.write(over_event); } } @@ -539,128 +568,126 @@ pub fn pointer_events( } in input_events.read().cloned() { match action { - // Pressed Button - PointerAction::Pressed { direction, button } => { + PointerAction::Press(button) => { let state = pointer_state.get_mut(pointer_id, button); - // The sequence of events emitted depends on if this is a press or a release - match direction { - PressDirection::Pressed => { - // If it's a press, emit a Pressed event and mark the hovered entities as pressed - for (hovered_entity, hit) in hover_map - .get(&pointer_id) - .iter() - .flat_map(|h| h.iter().map(|(entity, data)| (*entity, data.clone()))) - { - let pressed_event = Pointer::new( - pointer_id, - location.clone(), - hovered_entity, - Pressed { - button, - hit: hit.clone(), - }, - ); - commands.trigger_targets(pressed_event.clone(), hovered_entity); - event_writers.pressed_events.send(pressed_event); - // Also insert the press into the state - state - .pressing - .insert(hovered_entity, (location.clone(), now, hit)); - } - } - PressDirection::Released => { - // Emit Click and Up events on all the previously hovered entities. - for (hovered_entity, hit) in previous_hover_map - .get(&pointer_id) - .iter() - .flat_map(|h| h.iter().map(|(entity, data)| (*entity, data.clone()))) - { - // If this pointer previously pressed the hovered entity, emit a Click event - if let Some((_, press_instant, _)) = state.pressing.get(&hovered_entity) - { - let click_event = Pointer::new( - pointer_id, - location.clone(), - hovered_entity, - Click { - button, - hit: hit.clone(), - duration: now - *press_instant, - }, - ); - commands.trigger_targets(click_event.clone(), hovered_entity); - event_writers.click_events.send(click_event); - } - // Always send the Released event - let released_event = Pointer::new( - pointer_id, - location.clone(), - hovered_entity, - Released { - button, - hit: hit.clone(), - }, - ); - commands.trigger_targets(released_event.clone(), hovered_entity); - event_writers.released_events.send(released_event); - } + // If it's a press, emit a Pressed event and mark the hovered entities as pressed + for (hovered_entity, hit) in hover_map + .get(&pointer_id) + .iter() + .flat_map(|h| h.iter().map(|(entity, data)| (*entity, data.clone()))) + { + let pressed_event = Pointer::new( + pointer_id, + location.clone(), + hovered_entity, + Pressed { + button, + hit: hit.clone(), + }, + ); + commands.trigger_targets(pressed_event.clone(), hovered_entity); + event_writers.pressed_events.write(pressed_event); + // Also insert the press into the state + state + .pressing + .insert(hovered_entity, (location.clone(), now, hit)); + } + } + PointerAction::Release(button) => { + let state = pointer_state.get_mut(pointer_id, button); - // Then emit the drop events. - for (drag_target, drag) in state.dragging.drain() { - // Emit DragDrop - for (dragged_over, hit) in state.dragging_over.iter() { - let drag_drop_event = Pointer::new( - pointer_id, - location.clone(), - *dragged_over, - DragDrop { - button, - dropped: drag_target, - hit: hit.clone(), - }, - ); - commands.trigger_targets(drag_drop_event.clone(), *dragged_over); - event_writers.drag_drop_events.send(drag_drop_event); - } - // Emit DragEnd - let drag_end_event = Pointer::new( - pointer_id, - location.clone(), - drag_target, - DragEnd { - button, - distance: drag.latest_pos - drag.start_pos, - }, - ); - commands.trigger_targets(drag_end_event.clone(), drag_target); - event_writers.drag_end_events.send(drag_end_event); - // Emit DragLeave - for (dragged_over, hit) in state.dragging_over.iter() { - let drag_leave_event = Pointer::new( - pointer_id, - location.clone(), - *dragged_over, - DragLeave { - button, - dragged: drag_target, - hit: hit.clone(), - }, - ); - commands.trigger_targets(drag_leave_event.clone(), *dragged_over); - event_writers.drag_leave_events.send(drag_leave_event); - } - } + // Emit Click and Up events on all the previously hovered entities. + for (hovered_entity, hit) in previous_hover_map + .get(&pointer_id) + .iter() + .flat_map(|h| h.iter().map(|(entity, data)| (*entity, data.clone()))) + { + // If this pointer previously pressed the hovered entity, emit a Click event + if let Some((_, press_instant, _)) = state.pressing.get(&hovered_entity) { + let click_event = Pointer::new( + pointer_id, + location.clone(), + hovered_entity, + Click { + button, + hit: hit.clone(), + duration: now - *press_instant, + }, + ); + commands.trigger_targets(click_event.clone(), hovered_entity); + event_writers.click_events.write(click_event); + } + // Always send the Released event + let released_event = Pointer::new( + pointer_id, + location.clone(), + hovered_entity, + Released { + button, + hit: hit.clone(), + }, + ); + commands.trigger_targets(released_event.clone(), hovered_entity); + event_writers.released_events.write(released_event); + } - // Finally, we can clear the state of everything relating to presses or drags. - state.pressing.clear(); - state.dragging.clear(); - state.dragging_over.clear(); + // Then emit the drop events. + for (drag_target, drag) in state.dragging.drain() { + // Emit DragDrop + for (dragged_over, hit) in state.dragging_over.iter() { + let drag_drop_event = Pointer::new( + pointer_id, + location.clone(), + *dragged_over, + DragDrop { + button, + dropped: drag_target, + hit: hit.clone(), + }, + ); + commands.trigger_targets(drag_drop_event.clone(), *dragged_over); + event_writers.drag_drop_events.write(drag_drop_event); + } + // Emit DragEnd + let drag_end_event = Pointer::new( + pointer_id, + location.clone(), + drag_target, + DragEnd { + button, + distance: drag.latest_pos - drag.start_pos, + }, + ); + commands.trigger_targets(drag_end_event.clone(), drag_target); + event_writers.drag_end_events.write(drag_end_event); + // Emit DragLeave + for (dragged_over, hit) in state.dragging_over.iter() { + let drag_leave_event = Pointer::new( + pointer_id, + location.clone(), + *dragged_over, + DragLeave { + button, + dragged: drag_target, + hit: hit.clone(), + }, + ); + commands.trigger_targets(drag_leave_event.clone(), *dragged_over); + event_writers.drag_leave_events.write(drag_leave_event); } } + + // Finally, we can clear the state of everything relating to presses or drags. + state.pressing.clear(); + state.dragging.clear(); + state.dragging_over.clear(); } // Moved - PointerAction::Moved { delta } => { + PointerAction::Move { delta } => { + if delta == Vec2::ZERO { + continue; // If delta is zero, the following events will not be triggered. + } // Triggers during movement even if not over an entity for button in PointerButton::iter() { let state = pointer_state.get_mut(pointer_id, button); @@ -687,7 +714,7 @@ pub fn pointer_events( }, ); commands.trigger_targets(drag_start_event.clone(), *press_target); - event_writers.drag_start_events.send(drag_start_event); + event_writers.drag_start_events.write(drag_start_event); } // Emit Drag events to the entities we are dragging @@ -707,7 +734,7 @@ pub fn pointer_events( }, ); commands.trigger_targets(drag_event.clone(), *drag_target); - event_writers.drag_events.send(drag_event); + event_writers.drag_events.write(drag_event); // Update drag position drag.latest_pos = location.position; @@ -730,7 +757,7 @@ pub fn pointer_events( }, ); commands.trigger_targets(drag_over_event.clone(), hovered_entity); - event_writers.drag_over_events.send(drag_over_event); + event_writers.drag_over_events.write(drag_over_event); } } } @@ -751,11 +778,33 @@ pub fn pointer_events( }, ); commands.trigger_targets(move_event.clone(), hovered_entity); - event_writers.move_events.send(move_event); + event_writers.move_events.write(move_event); + } + } + PointerAction::Scroll { x, y, unit } => { + for (hovered_entity, hit) in hover_map + .get(&pointer_id) + .iter() + .flat_map(|h| h.iter().map(|(entity, data)| (*entity, data.clone()))) + { + // Emit Scroll events to the entities we are hovering + let scroll_event = Pointer::new( + pointer_id, + location.clone(), + hovered_entity, + Scroll { + unit, + x, + y, + hit: hit.clone(), + }, + ); + commands.trigger_targets(scroll_event.clone(), hovered_entity); + event_writers.scroll_events.write(scroll_event); } } // Canceled - PointerAction::Canceled => { + PointerAction::Cancel => { // Emit a Cancel to the hovered entity. for (hovered_entity, hit) in hover_map .get(&pointer_id) @@ -765,7 +814,7 @@ pub fn pointer_events( let cancel_event = Pointer::new(pointer_id, location.clone(), hovered_entity, Cancel { hit }); commands.trigger_targets(cancel_event.clone(), hovered_entity); - event_writers.cancel_events.send(cancel_event); + event_writers.cancel_events.write(cancel_event); } // Clear the state for the canceled pointer pointer_state.clear(pointer_id); diff --git a/crates/bevy_picking/src/hover.rs b/crates/bevy_picking/src/hover.rs index bc91ee79f7a35..6347568c02500 100644 --- a/crates/bevy_picking/src/hover.rs +++ b/crates/bevy_picking/src/hover.rs @@ -10,14 +10,14 @@ use std::collections::HashSet; use crate::{ backend::{self, HitData}, pointer::{PointerAction, PointerId, PointerInput, PointerInteraction, PointerPress}, - PickingBehavior, + Pickable, }; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::prelude::*; use bevy_math::FloatOrd; +use bevy_platform::collections::HashMap; use bevy_reflect::prelude::*; -use bevy_utils::HashMap; type DepthSortedHits = Vec<(Entity, HitData)>; @@ -43,8 +43,8 @@ type OverMap = HashMap; /// between it and the pointer block interactions. /// /// For example, if a pointer is hitting a UI button and a 3d mesh, but the button is in front of -/// the mesh, the UI button will be hovered, but the mesh will not. Unless, the [`PickingBehavior`] -/// component is present with [`should_block_lower`](PickingBehavior::should_block_lower) set to `false`. +/// the mesh, the UI button will be hovered, but the mesh will not. Unless, the [`Pickable`] +/// component is present with [`should_block_lower`](Pickable::should_block_lower) set to `false`. /// /// # Advanced Users /// @@ -64,7 +64,7 @@ pub struct PreviousHoverMap(pub HashMap>); /// This is the final focusing step to determine which entity the pointer is hovering over. pub fn generate_hovermap( // Inputs - picking_behavior: Query<&PickingBehavior>, + pickable: Query<&Pickable>, pointers: Query<&PointerId>, mut under_pointer: EventReader, mut pointer_input: EventReader, @@ -81,7 +81,7 @@ pub fn generate_hovermap( &pointers, ); build_over_map(&mut under_pointer, &mut over_map, &mut pointer_input); - build_hover_map(&pointers, picking_behavior, &over_map, &mut hover_map); + build_hover_map(&pointers, pickable, &over_map, &mut hover_map); } /// Clear non-empty local maps, reusing allocated memory. @@ -118,7 +118,7 @@ fn build_over_map( let cancelled_pointers: HashSet = pointer_input .read() .filter_map(|p| { - if let PointerAction::Canceled = p.action { + if let PointerAction::Cancel = p.action { Some(p.pointer_id) } else { None @@ -131,9 +131,7 @@ fn build_over_map( .filter(|e| !cancelled_pointers.contains(&e.pointer)) { let pointer = entities_under_pointer.pointer; - let layer_map = pointer_over_map - .entry(pointer) - .or_insert_with(BTreeMap::new); + let layer_map = pointer_over_map.entry(pointer).or_default(); for (entity, pick_data) in entities_under_pointer.picks.iter() { let layer = entities_under_pointer.order; let hits = layer_map.entry(FloatOrd(layer)).or_default(); @@ -148,12 +146,12 @@ fn build_over_map( } } -/// Build an unsorted set of hovered entities, accounting for depth, layer, and [`PickingBehavior`]. Note -/// that unlike the pointer map, this uses [`PickingBehavior`] to determine if lower entities receive hover +/// Build an unsorted set of hovered entities, accounting for depth, layer, and [`Pickable`]. Note +/// that unlike the pointer map, this uses [`Pickable`] to determine if lower entities receive hover /// focus. Often, only a single entity per pointer will be hovered. fn build_hover_map( pointers: &Query<&PointerId>, - picking_behavior: Query<&PickingBehavior>, + pickable: Query<&Pickable>, over_map: &Local, // Output hover_map: &mut HoverMap, @@ -163,11 +161,11 @@ fn build_hover_map( if let Some(layer_map) = over_map.get(pointer_id) { // Note we reverse here to start from the highest layer first. for (entity, pick_data) in layer_map.values().rev().flatten() { - if let Ok(picking_behavior) = picking_behavior.get(*entity) { - if picking_behavior.is_hoverable { + if let Ok(pickable) = pickable.get(*entity) { + if pickable.is_hoverable { pointer_entity_set.insert(*entity, pick_data.clone()); } - if picking_behavior.should_block_lower { + if pickable.should_block_lower { break; } } else { @@ -189,7 +187,7 @@ fn build_hover_map( /// the entity will be considered pressed. If that entity is instead being hovered by both pointers, /// it will be considered hovered. #[derive(Component, Copy, Clone, Default, Eq, PartialEq, Debug, Reflect)] -#[reflect(Component, Default, PartialEq, Debug)] +#[reflect(Component, Default, PartialEq, Debug, Clone)] pub enum PickingInteraction { /// The entity is being pressed down by a pointer. Pressed = 2, @@ -231,7 +229,7 @@ pub fn update_interactions( if let Some(pointers_hovered_entities) = hover_map.get(pointer) { // Insert a sorted list of hit entities into the pointer's interaction component. let mut sorted_entities: Vec<_> = pointers_hovered_entities.clone().drain().collect(); - sorted_entities.sort_by_key(|(_entity, hit)| FloatOrd(hit.depth)); + sorted_entities.sort_by_key(|(_, hit)| FloatOrd(hit.depth)); pointer_interaction.sorted_entities = sorted_entities; for hovered_entity in pointers_hovered_entities.iter().map(|(entity, _)| entity) { @@ -244,7 +242,7 @@ pub fn update_interactions( for (hovered_entity, new_interaction) in new_interaction_state.drain() { if let Ok(mut interaction) = interact.get_mut(hovered_entity) { *interaction = new_interaction; - } else if let Some(mut entity_commands) = commands.get_entity(hovered_entity) { + } else if let Ok(mut entity_commands) = commands.get_entity(hovered_entity) { entity_commands.try_insert(new_interaction); } } diff --git a/crates/bevy_picking/src/input.rs b/crates/bevy_picking/src/input.rs index 321ed6b5e6da2..712e612224c7b 100644 --- a/crates/bevy_picking/src/input.rs +++ b/crates/bevy_picking/src/input.rs @@ -13,21 +13,21 @@ use bevy_app::prelude::*; use bevy_ecs::prelude::*; -use bevy_hierarchy::DespawnRecursiveExt; use bevy_input::{ + mouse::MouseWheel, prelude::*, touch::{TouchInput, TouchPhase}, ButtonState, }; use bevy_math::Vec2; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_reflect::prelude::*; use bevy_render::camera::RenderTarget; -use bevy_utils::{tracing::debug, HashMap, HashSet}; use bevy_window::{PrimaryWindow, WindowEvent, WindowRef}; +use tracing::debug; use crate::pointer::{ Location, PointerAction, PointerButton, PointerId, PointerInput, PointerLocation, - PressDirection, }; use crate::PickSet; @@ -48,7 +48,7 @@ pub mod prelude { /// This plugin contains several settings, and is added to the world as a resource after initialization. /// You can configure pointer input settings at runtime by accessing the resource. #[derive(Copy, Clone, Resource, Debug, Reflect)] -#[reflect(Resource, Default)] +#[reflect(Resource, Default, Clone)] pub struct PointerInputPlugin { /// Should touch inputs be updated? pub is_touch_enabled: bool, @@ -118,17 +118,17 @@ pub fn mouse_pick_events( WindowEvent::CursorMoved(event) => { let location = Location { target: match RenderTarget::Window(WindowRef::Entity(event.window)) - .normalize(primary_window.get_single().ok()) + .normalize(primary_window.single().ok()) { Some(target) => target, None => continue, }, position: event.position, }; - pointer_events.send(PointerInput::new( + pointer_events.write(PointerInput::new( PointerId::Mouse, location, - PointerAction::Moved { + PointerAction::Move { delta: event.position - *cursor_last, }, )); @@ -138,7 +138,7 @@ pub fn mouse_pick_events( WindowEvent::MouseButtonInput(input) => { let location = Location { target: match RenderTarget::Window(WindowRef::Entity(input.window)) - .normalize(primary_window.get_single().ok()) + .normalize(primary_window.single().ok()) { Some(target) => target, None => continue, @@ -151,15 +151,28 @@ pub fn mouse_pick_events( MouseButton::Middle => PointerButton::Middle, MouseButton::Other(_) | MouseButton::Back | MouseButton::Forward => continue, }; - let direction = match input.state { - ButtonState::Pressed => PressDirection::Pressed, - ButtonState::Released => PressDirection::Released, + let action = match input.state { + ButtonState::Pressed => PointerAction::Press(button), + ButtonState::Released => PointerAction::Release(button), }; - pointer_events.send(PointerInput::new( - PointerId::Mouse, - location, - PointerAction::Pressed { direction, button }, - )); + pointer_events.write(PointerInput::new(PointerId::Mouse, location, action)); + } + WindowEvent::MouseWheel(event) => { + let MouseWheel { unit, x, y, window } = *event; + + let location = Location { + target: match RenderTarget::Window(WindowRef::Entity(window)) + .normalize(primary_window.single().ok()) + { + Some(target) => target, + None => continue, + }, + position: *cursor_last, + }; + + let action = PointerAction::Scroll { x, y, unit }; + + pointer_events.write(PointerInput::new(PointerId::Mouse, location, action)); } _ => {} } @@ -182,7 +195,7 @@ pub fn touch_pick_events( let pointer = PointerId::Touch(touch.id); let location = Location { target: match RenderTarget::Window(WindowRef::Entity(touch.window)) - .normalize(primary_window.get_single().ok()) + .normalize(primary_window.single().ok()) { Some(target) => target, None => continue, @@ -194,13 +207,10 @@ pub fn touch_pick_events( debug!("Spawning pointer {:?}", pointer); commands.spawn((pointer, PointerLocation::new(location.clone()))); - pointer_events.send(PointerInput::new( + pointer_events.write(PointerInput::new( pointer, location, - PointerAction::Pressed { - direction: PressDirection::Pressed, - button: PointerButton::Primary, - }, + PointerAction::Press(PointerButton::Primary), )); touch_cache.insert(touch.id, *touch); @@ -211,10 +221,10 @@ pub fn touch_pick_events( if last_touch == touch { continue; } - pointer_events.send(PointerInput::new( + pointer_events.write(PointerInput::new( pointer, location, - PointerAction::Moved { + PointerAction::Move { delta: touch.position - last_touch.position, }, )); @@ -222,21 +232,18 @@ pub fn touch_pick_events( touch_cache.insert(touch.id, *touch); } TouchPhase::Ended => { - pointer_events.send(PointerInput::new( + pointer_events.write(PointerInput::new( pointer, location, - PointerAction::Pressed { - direction: PressDirection::Released, - button: PointerButton::Primary, - }, + PointerAction::Release(PointerButton::Primary), )); touch_cache.remove(&touch.id); } TouchPhase::Canceled => { - pointer_events.send(PointerInput::new( + pointer_events.write(PointerInput::new( pointer, location, - PointerAction::Canceled, + PointerAction::Cancel, )); touch_cache.remove(&touch.id); } @@ -267,6 +274,6 @@ pub fn deactivate_touch_pointers( // A hash set is used to prevent despawning the same entity twice. for (entity, pointer) in despawn_list.drain() { debug!("Despawning pointer {:?}", pointer); - commands.entity(entity).despawn_recursive(); + commands.entity(entity).despawn(); } } diff --git a/crates/bevy_picking/src/lib.rs b/crates/bevy_picking/src/lib.rs index 03e0269fb50db..6afe86b0d61aa 100644 --- a/crates/bevy_picking/src/lib.rs +++ b/crates/bevy_picking/src/lib.rs @@ -1,12 +1,14 @@ -//! This crate provides 'picking' capabilities for the Bevy game engine. That means, in simple terms, figuring out -//! how to connect up a user's clicks or taps to the entities they are trying to interact with. +//! This crate provides 'picking' capabilities for the Bevy game engine, allowing pointers to +//! interact with entities using hover, click, and drag events. //! //! ## Overview //! //! In the simplest case, this plugin allows you to click on things in the scene. However, it also //! allows you to express more complex interactions, like detecting when a touch input drags a UI -//! element and drops it on a 3d mesh rendered to a different camera. The crate also provides a set of -//! interaction callbacks, allowing you to receive input directly on entities like here: +//! element and drops it on a 3d mesh rendered to a different camera. +//! +//! Pointer events bubble up the entity hierarchy and can be used with observers, allowing you to +//! succinctly express rich interaction behaviors by attaching pointer callbacks to entities: //! //! ```rust //! # use bevy_ecs::prelude::*; @@ -16,7 +18,8 @@ //! # let mut world = World::new(); //! world.spawn(MyComponent) //! .observe(|mut trigger: Trigger>| { -//! // Get the underlying event type +//! println!("I was just clicked!"); +//! // Get the underlying pointer event data //! let click_event: &Pointer = trigger.event(); //! // Stop the event from bubbling up the entity hierarchy //! trigger.propagate(false); @@ -24,16 +27,19 @@ //! ``` //! //! At its core, this crate provides a robust abstraction for computing picking state regardless of -//! pointing devices, or what you are hit testing against. It is designed to work with any input, including -//! mouse, touch, pens, or virtual pointers controlled by gamepads. +//! pointing devices, or what you are hit testing against. It is designed to work with any input, +//! including mouse, touch, pens, or virtual pointers controlled by gamepads. //! //! ## Expressive Events //! -//! The events in this module (see [`events`]) cannot be listened to with normal `EventReader`s. -//! Instead, they are dispatched to *observers* attached to specific entities. When events are generated, they -//! bubble up the entity hierarchy starting from their target, until they reach the root or bubbling is halted -//! with a call to [`Trigger::propagate`](bevy_ecs::observer::Trigger::propagate). -//! See [`Observer`] for details. +//! Although the events in this module (see [`events`]) can be listened to with normal +//! `EventReader`s, using observers is often more expressive, with less boilerplate. This is because +//! observers allow you to attach event handling logic to specific entities, as well as make use of +//! event bubbling. +//! +//! When events are generated, they bubble up the entity hierarchy starting from their target, until +//! they reach the root or bubbling is halted with a call to +//! [`Trigger::propagate`](bevy_ecs::observer::Trigger::propagate). See [`Observer`] for details. //! //! This allows you to run callbacks when any children of an entity are interacted with, and leads //! to succinct, expressive code: @@ -54,11 +60,11 @@ //! transform.rotate_local_y(drag.delta.x / 50.0); //! }) //! .observe(|trigger: Trigger>, mut commands: Commands| { -//! println!("Entity {:?} goes BOOM!", trigger.target()); +//! println!("Entity {} goes BOOM!", trigger.target()); //! commands.entity(trigger.target()).despawn(); //! }) //! .observe(|trigger: Trigger>, mut events: EventWriter| { -//! events.send(Greeting); +//! events.write(Greeting); //! }); //! } //! ``` @@ -74,8 +80,9 @@ //! #### Input Agnostic //! //! Picking provides a generic Pointer abstraction, which is useful for reacting to many different -//! types of input devices. Pointers can be controlled with anything, whether it's the included mouse -//! or touch inputs, or a custom gamepad input system you write yourself to control a virtual pointer. +//! types of input devices. Pointers can be controlled with anything, whether it's the included +//! mouse or touch inputs, or a custom gamepad input system you write yourself to control a virtual +//! pointer. //! //! ## Robustness //! @@ -90,8 +97,8 @@ //! #### Next Steps //! //! To learn more, take a look at the examples in the -//! [examples](https://github.com/bevyengine/bevy/tree/main/examples/picking). You -//! can read the next section to understand how the plugin works. +//! [examples](https://github.com/bevyengine/bevy/tree/main/examples/picking). You can read the next +//! section to understand how the plugin works. //! //! # The Picking Pipeline //! @@ -101,11 +108,11 @@ //! #### Pointers ([`pointer`](mod@pointer)) //! //! The first stage of the pipeline is to gather inputs and update pointers. This stage is -//! ultimately responsible for generating [`PointerInput`](pointer::PointerInput) events. The provided -//! crate does this automatically for mouse, touch, and pen inputs. If you wanted to implement your own -//! pointer, controlled by some other input, you can do that here. The ordering of events within the -//! [`PointerInput`](pointer::PointerInput) stream is meaningful for events with the same -//! [`PointerId`](pointer::PointerId), but not between different pointers. +//! ultimately responsible for generating [`PointerInput`](pointer::PointerInput) events. The +//! provided crate does this automatically for mouse, touch, and pen inputs. If you wanted to +//! implement your own pointer, controlled by some other input, you can do that here. The ordering +//! of events within the [`PointerInput`](pointer::PointerInput) stream is meaningful for events +//! with the same [`PointerId`](pointer::PointerId), but not between different pointers. //! //! Because pointer positions and presses are driven by these events, you can use them to mock //! inputs for testing. @@ -115,18 +122,18 @@ //! //! #### Backend ([`backend`]) //! -//! A picking backend only has one job: reading [`PointerLocation`](pointer::PointerLocation) components, -//! and producing [`PointerHits`](backend::PointerHits). You can find all documentation and types needed to -//! implement a backend at [`backend`]. +//! A picking backend only has one job: reading [`PointerLocation`](pointer::PointerLocation) +//! components, and producing [`PointerHits`](backend::PointerHits). You can find all documentation +//! and types needed to implement a backend at [`backend`]. //! //! You will eventually need to choose which picking backend(s) you want to use. This crate does not -//! supply any backends, and expects you to select some from the other bevy crates or the third-party -//! ecosystem. +//! supply any backends, and expects you to select some from the other bevy crates or the +//! third-party ecosystem. //! //! It's important to understand that you can mix and match backends! For example, you might have a //! backend for your UI, and one for the 3d scene, with each being specialized for their purpose. -//! Bevy provides some backends out of the box, but you can even write your own. It's been -//! made as easy as possible intentionally; the `bevy_mod_raycast` backend is 50 lines of code. +//! Bevy provides some backends out of the box, but you can even write your own. It's been made as +//! easy as possible intentionally; the `bevy_mod_raycast` backend is 50 lines of code. //! //! #### Hover ([`hover`]) //! @@ -135,8 +142,8 @@ //! just because a pointer is over an entity, it is not necessarily *hovering* that entity. Although //! multiple backends may be reporting that a pointer is hitting an entity, the hover system needs //! to determine which entities are actually being hovered by this pointer based on the pick depth, -//! order of the backend, and the optional [`PickingBehavior`] component of the entity. In other words, -//! if one entity is in front of another, usually only the topmost one will be hovered. +//! order of the backend, and the optional [`Pickable`] component of the entity. In other +//! words, if one entity is in front of another, usually only the topmost one will be hovered. //! //! #### Events ([`events`]) //! @@ -144,9 +151,8 @@ //! a pointer hovers or clicks an entity. These simple events are then used to generate more complex //! events for dragging and dropping. //! -//! Because it is completely agnostic to the earlier stages of the pipeline, you can easily -//! extend the plugin with arbitrary backends and input methods, yet still use all the high level -//! features. +//! Because it is completely agnostic to the earlier stages of the pipeline, you can easily extend +//! the plugin with arbitrary backends and input methods, yet still use all the high level features. #![deny(missing_docs)] @@ -173,21 +179,24 @@ pub mod prelude { #[doc(hidden)] pub use crate::mesh_picking::{ ray_cast::{MeshRayCast, MeshRayCastSettings, RayCastBackfaces, RayCastVisibility}, - MeshPickingPlugin, MeshPickingSettings, RayCastPickable, + MeshPickingCamera, MeshPickingPlugin, MeshPickingSettings, }; #[doc(hidden)] pub use crate::{ events::*, input::PointerInputPlugin, pointer::PointerButton, DefaultPickingPlugins, - InteractionPlugin, PickingBehavior, PickingPlugin, + InteractionPlugin, Pickable, PickingPlugin, }; } -/// An optional component that overrides default picking behavior for an entity, allowing you to -/// make an entity non-hoverable, or allow items below it to be hovered. See the documentation on -/// the fields for more details. +/// An optional component that marks an entity as usable by a backend, and overrides default +/// picking behavior for an entity. +/// +/// This allows you to make an entity non-hoverable, or allow items below it to be hovered. +/// +/// See the documentation on the fields for more details. #[derive(Component, Debug, Clone, Reflect, PartialEq, Eq)] -#[reflect(Component, Default, Debug, PartialEq)] -pub struct PickingBehavior { +#[reflect(Component, Default, Debug, PartialEq, Clone)] +pub struct Pickable { /// Should this entity block entities below it from being picked? /// /// This is useful if you want picking to continue hitting entities below this one. Normally, @@ -207,7 +216,7 @@ pub struct PickingBehavior { /// element will be marked as hovered. However, if this field is set to `false`, both the UI /// element *and* the mesh will be marked as hovered. /// - /// Entities without the [`PickingBehavior`] component will block by default. + /// Entities without the [`Pickable`] component will block by default. pub should_block_lower: bool, /// If this is set to `false` and `should_block_lower` is set to true, this entity will block @@ -222,11 +231,11 @@ pub struct PickingBehavior { /// components mark it as hovered. This can be combined with the other field /// [`Self::should_block_lower`], which is orthogonal to this one. /// - /// Entities without the [`PickingBehavior`] component are hoverable by default. + /// Entities without the [`Pickable`] component are hoverable by default. pub is_hoverable: bool, } -impl PickingBehavior { +impl Pickable { /// This entity will not block entities beneath it, nor will it emit events. /// /// If a backend reports this entity as being hit, the picking plugin will completely ignore it. @@ -236,7 +245,7 @@ impl PickingBehavior { }; } -impl Default for PickingBehavior { +impl Default for Pickable { fn default() -> Self { Self { should_block_lower: true, @@ -291,7 +300,7 @@ impl PluginGroup for DefaultPickingPlugins { /// This plugin contains several settings, and is added to the world as a resource after initialization. You /// can configure picking settings at runtime through the resource. #[derive(Copy, Clone, Debug, Resource, Reflect)] -#[reflect(Resource, Default, Debug)] +#[reflect(Resource, Default, Debug, Clone)] pub struct PickingPlugin { /// Enables and disables all picking features. pub is_enabled: bool, @@ -377,7 +386,8 @@ impl Plugin for PickingPlugin { .chain(), ) .register_type::() - .register_type::() + .register_type::() + .register_type::() .register_type::() .register_type::() .register_type::() @@ -412,6 +422,7 @@ impl Plugin for InteractionPlugin { .add_event::>() .add_event::>() .add_event::>() + .add_event::>() .add_systems( PreUpdate, (generate_hovermap, update_interactions, pointer_events) diff --git a/crates/bevy_picking/src/mesh_picking/mod.rs b/crates/bevy_picking/src/mesh_picking/mod.rs index a848097a6854f..1e7e45bc2d7d6 100644 --- a/crates/bevy_picking/src/mesh_picking/mod.rs +++ b/crates/bevy_picking/src/mesh_picking/mod.rs @@ -1,12 +1,18 @@ //! A [mesh ray casting](ray_cast) backend for [`bevy_picking`](crate). //! //! By default, all meshes are pickable. Picking can be disabled for individual entities -//! by adding [`PickingBehavior::IGNORE`]. +//! by adding [`Pickable::IGNORE`]. //! //! To make mesh picking entirely opt-in, set [`MeshPickingSettings::require_markers`] -//! to `true` and add a [`RayCastPickable`] component to the desired camera and target entities. +//! to `true` and add [`MeshPickingCamera`] and [`Pickable`] components to the desired camera and +//! target entities. //! //! To manually perform mesh ray casts independent of picking, use the [`MeshRayCast`] system parameter. +//! +//! ## Implementation Notes +//! +//! - The `position` reported in `HitData` is in world space. The `normal` is a vector pointing +//! away from the face, it is not guaranteed to be normalized for scaled meshes. pub mod ray_cast; @@ -21,12 +27,19 @@ use bevy_reflect::prelude::*; use bevy_render::{prelude::*, view::RenderLayers}; use ray_cast::{MeshRayCast, MeshRayCastSettings, RayCastVisibility, SimplifiedMesh}; +/// An optional component that marks cameras that should be used in the [`MeshPickingPlugin`]. +/// +/// Only needed if [`MeshPickingSettings::require_markers`] is set to `true`, and ignored otherwise. +#[derive(Debug, Clone, Default, Component, Reflect)] +#[reflect(Debug, Default, Component)] +pub struct MeshPickingCamera; + /// Runtime settings for the [`MeshPickingPlugin`]. #[derive(Resource, Reflect)] #[reflect(Resource, Default)] pub struct MeshPickingSettings { - /// When set to `true` ray casting will only happen between cameras and entities marked with - /// [`RayCastPickable`]. `false` by default. + /// When set to `true` ray casting will only consider cameras marked with + /// [`MeshPickingCamera`] and entities marked with [`Pickable`]. `false` by default. /// /// This setting is provided to give you fine-grained control over which cameras and entities /// should be used by the mesh picking backend at runtime. @@ -49,12 +62,6 @@ impl Default for MeshPickingSettings { } } -/// An optional component that marks cameras and target entities that should be used in the [`MeshPickingPlugin`]. -/// Only needed if [`MeshPickingSettings::require_markers`] is set to `true`, and ignored otherwise. -#[derive(Debug, Clone, Default, Component, Reflect)] -#[reflect(Component, Default)] -pub struct RayCastPickable; - /// Adds the mesh picking backend to your app. #[derive(Clone, Default)] pub struct MeshPickingPlugin; @@ -62,28 +69,28 @@ pub struct MeshPickingPlugin; impl Plugin for MeshPickingPlugin { fn build(&self, app: &mut App) { app.init_resource::() - .register_type::<(RayCastPickable, MeshPickingSettings, SimplifiedMesh)>() + .register_type::() + .register_type::() .add_systems(PreUpdate, update_hits.in_set(PickSet::Backend)); } } /// Casts rays into the scene using [`MeshPickingSettings`] and sends [`PointerHits`] events. -#[allow(clippy::too_many_arguments)] pub fn update_hits( backend_settings: Res, ray_map: Res, - picking_cameras: Query<(&Camera, Option<&RayCastPickable>, Option<&RenderLayers>)>, - pickables: Query<&PickingBehavior>, - marked_targets: Query<&RayCastPickable>, + picking_cameras: Query<(&Camera, Has, Option<&RenderLayers>)>, + pickables: Query<&Pickable>, + marked_targets: Query<&Pickable>, layers: Query<&RenderLayers>, mut ray_cast: MeshRayCast, mut output: EventWriter, ) { - for (&ray_id, &ray) in ray_map.map().iter() { - let Ok((camera, cam_pickable, cam_layers)) = picking_cameras.get(ray_id.camera) else { + for (&ray_id, &ray) in ray_map.iter() { + let Ok((camera, cam_can_pick, cam_layers)) = picking_cameras.get(ray_id.camera) else { continue; }; - if backend_settings.require_markers && cam_pickable.is_none() { + if backend_settings.require_markers && !cam_can_pick { continue; } @@ -99,10 +106,7 @@ pub fn update_hits( let entity_layers = layers.get(entity).cloned().unwrap_or_default(); let render_layers_match = cam_layers.intersects(&entity_layers); - let is_pickable = pickables - .get(entity) - .map(|p| p.is_hoverable) - .unwrap_or(true); + let is_pickable = pickables.get(entity).ok().is_none_or(|p| p.is_hoverable); marker_requirement && render_layers_match && is_pickable }, @@ -127,7 +131,7 @@ pub fn update_hits( .collect::>(); let order = camera.order as f32; if !picks.is_empty() { - output.send(PointerHits::new(ray_id.pointer, picks, order)); + output.write(PointerHits::new(ray_id.pointer, picks, order)); } } } diff --git a/crates/bevy_picking/src/mesh_picking/ray_cast/intersections.rs b/crates/bevy_picking/src/mesh_picking/ray_cast/intersections.rs index d4ec97e1f3549..9988a96e19bd7 100644 --- a/crates/bevy_picking/src/mesh_picking/ray_cast/intersections.rs +++ b/crates/bevy_picking/src/mesh_picking/ray_cast/intersections.rs @@ -1,11 +1,12 @@ use bevy_math::{bounding::Aabb3d, Dir3, Mat4, Ray3d, Vec3, Vec3A}; +use bevy_mesh::{Indices, Mesh, PrimitiveTopology}; use bevy_reflect::Reflect; -use bevy_render::mesh::{Indices, Mesh, PrimitiveTopology}; use super::Backfaces; /// Hit data for an intersection between a ray and a mesh. #[derive(Debug, Clone, Reflect)] +#[reflect(Clone)] pub struct RayMeshHit { /// The point of intersection in world space. pub point: Vec3, @@ -66,160 +67,135 @@ pub fn ray_mesh_intersection + Clone + Copy>( indices: Option<&[I]>, backface_culling: Backfaces, ) -> Option { - // The ray cast can hit the same mesh many times, so we need to track which hit is - // closest to the camera, and record that. - let mut closest_hit_distance = f32::MAX; - let mut closest_hit = None; - let world_to_mesh = mesh_transform.inverse(); - let mesh_space_ray = Ray3d::new( + let ray = Ray3d::new( world_to_mesh.transform_point3(ray.origin), Dir3::new(world_to_mesh.transform_vector3(*ray.direction)).ok()?, ); - if let Some(indices) = indices { + let closest_hit = if let Some(indices) = indices { // The index list must be a multiple of three. If not, the mesh is malformed and the raycast // result might be nonsensical. if indices.len() % 3 != 0 { return None; } - for triangle in indices.chunks_exact(3) { - let [a, b, c] = [ - triangle[0].try_into().ok()?, - triangle[1].try_into().ok()?, - triangle[2].try_into().ok()?, - ]; - - let triangle_index = Some(a); - let tri_vertex_positions = &[ - Vec3::from(positions[a]), - Vec3::from(positions[b]), - Vec3::from(positions[c]), - ]; - let tri_normals = vertex_normals.map(|normals| { - [ - Vec3::from(normals[a]), - Vec3::from(normals[b]), - Vec3::from(normals[c]), - ] - }); - - let Some(hit) = triangle_intersection( - tri_vertex_positions, - tri_normals.as_ref(), - closest_hit_distance, - &mesh_space_ray, - backface_culling, - ) else { - continue; - }; - - closest_hit = Some(RayMeshHit { - point: mesh_transform.transform_point3(hit.point), - normal: mesh_transform.transform_vector3(hit.normal), - barycentric_coords: hit.barycentric_coords, - distance: mesh_transform - .transform_vector3(mesh_space_ray.direction * hit.distance) - .length(), - triangle: hit.triangle.map(|tri| { - [ - mesh_transform.transform_point3(tri[0]), - mesh_transform.transform_point3(tri[1]), - mesh_transform.transform_point3(tri[2]), - ] - }), - triangle_index, - }); - closest_hit_distance = hit.distance; - } + indices + .chunks_exact(3) + .enumerate() + .fold( + (f32::MAX, None), + |(closest_distance, closest_hit), (tri_idx, triangle)| { + let [Ok(a), Ok(b), Ok(c)] = [ + triangle[0].try_into(), + triangle[1].try_into(), + triangle[2].try_into(), + ] else { + return (closest_distance, closest_hit); + }; + + let tri_vertices = match [positions.get(a), positions.get(b), positions.get(c)] + { + [Some(a), Some(b), Some(c)] => { + [Vec3::from(*a), Vec3::from(*b), Vec3::from(*c)] + } + _ => return (closest_distance, closest_hit), + }; + + match ray_triangle_intersection(&ray, &tri_vertices, backface_culling) { + Some(hit) if hit.distance >= 0. && hit.distance < closest_distance => { + (hit.distance, Some((tri_idx, hit))) + } + _ => (closest_distance, closest_hit), + } + }, + ) + .1 } else { - for (i, triangle) in positions.chunks_exact(3).enumerate() { - let &[a, b, c] = triangle else { - continue; - }; - let triangle_index = Some(i); - let tri_vertex_positions = &[Vec3::from(a), Vec3::from(b), Vec3::from(c)]; - let tri_normals = vertex_normals.map(|normals| { - [ - Vec3::from(normals[i]), - Vec3::from(normals[i + 1]), - Vec3::from(normals[i + 2]), - ] - }); - - let Some(hit) = triangle_intersection( - tri_vertex_positions, - tri_normals.as_ref(), - closest_hit_distance, - &mesh_space_ray, - backface_culling, - ) else { - continue; - }; - - closest_hit = Some(RayMeshHit { - point: mesh_transform.transform_point3(hit.point), - normal: mesh_transform.transform_vector3(hit.normal), - barycentric_coords: hit.barycentric_coords, - distance: mesh_transform - .transform_vector3(mesh_space_ray.direction * hit.distance) - .length(), - triangle: hit.triangle.map(|tri| { - [ - mesh_transform.transform_point3(tri[0]), - mesh_transform.transform_point3(tri[1]), - mesh_transform.transform_point3(tri[2]), - ] - }), - triangle_index, - }); - closest_hit_distance = hit.distance; - } - } + positions + .chunks_exact(3) + .enumerate() + .fold( + (f32::MAX, None), + |(closest_distance, closest_hit), (tri_idx, triangle)| { + let tri_vertices = [ + Vec3::from(triangle[0]), + Vec3::from(triangle[1]), + Vec3::from(triangle[2]), + ]; + + match ray_triangle_intersection(&ray, &tri_vertices, backface_culling) { + Some(hit) if hit.distance >= 0. && hit.distance < closest_distance => { + (hit.distance, Some((tri_idx, hit))) + } + _ => (closest_distance, closest_hit), + } + }, + ) + .1 + }; - closest_hit -} + closest_hit.and_then(|(tri_idx, hit)| { + let [a, b, c] = match indices { + Some(indices) => { + let triangle = indices.get((tri_idx * 3)..(tri_idx * 3 + 3))?; -fn triangle_intersection( - tri_vertices: &[Vec3; 3], - tri_normals: Option<&[Vec3; 3]>, - max_distance: f32, - ray: &Ray3d, - backface_culling: Backfaces, -) -> Option { - let hit = ray_triangle_intersection(ray, tri_vertices, backface_culling)?; + let [Ok(a), Ok(b), Ok(c)] = [ + triangle[0].try_into(), + triangle[1].try_into(), + triangle[2].try_into(), + ] else { + return None; + }; - if hit.distance < 0.0 || hit.distance > max_distance { - return None; - }; + [a, b, c] + } + None => [tri_idx * 3, tri_idx * 3 + 1, tri_idx * 3 + 2], + }; - let point = ray.get_point(hit.distance); - let u = hit.barycentric_coords.0; - let v = hit.barycentric_coords.1; - let w = 1.0 - u - v; - let barycentric = Vec3::new(u, v, w); + let tri_vertices = match [positions.get(a), positions.get(b), positions.get(c)] { + [Some(a), Some(b), Some(c)] => [Vec3::from(*a), Vec3::from(*b), Vec3::from(*c)], + _ => return None, + }; - let normal = if let Some(normals) = tri_normals { - normals[1] * u + normals[2] * v + normals[0] * w - } else { - (tri_vertices[1] - tri_vertices[0]) - .cross(tri_vertices[2] - tri_vertices[0]) - .normalize() - }; - - Some(RayMeshHit { - point, - normal, - barycentric_coords: barycentric, - distance: hit.distance, - triangle: Some(*tri_vertices), - triangle_index: None, + let tri_normals = vertex_normals.and_then(|normals| { + let [Some(a), Some(b), Some(c)] = [normals.get(a), normals.get(b), normals.get(c)] + else { + return None; + }; + Some([Vec3::from(*a), Vec3::from(*b), Vec3::from(*c)]) + }); + + let point = ray.get_point(hit.distance); + let u = hit.barycentric_coords.0; + let v = hit.barycentric_coords.1; + let w = 1.0 - u - v; + let barycentric = Vec3::new(u, v, w); + + let normal = if let Some(normals) = tri_normals { + normals[1] * u + normals[2] * v + normals[0] * w + } else { + (tri_vertices[1] - tri_vertices[0]) + .cross(tri_vertices[2] - tri_vertices[0]) + .normalize() + }; + + Some(RayMeshHit { + point: mesh_transform.transform_point3(point), + normal: mesh_transform.transform_vector3(normal), + barycentric_coords: barycentric, + distance: mesh_transform + .transform_vector3(ray.direction * hit.distance) + .length(), + triangle: Some(tri_vertices.map(|v| mesh_transform.transform_point3(v))), + triangle_index: Some(tri_idx), + }) }) } /// Takes a ray and triangle and computes the intersection. +#[inline] fn ray_triangle_intersection( ray: &Ray3d, triangle: &[Vec3; 3], @@ -313,6 +289,7 @@ pub fn ray_aabb_intersection_3d(ray: Ray3d, aabb: &Aabb3d, model_to_world: &Mat4 #[cfg(test)] mod tests { use bevy_math::Vec3; + use bevy_transform::components::GlobalTransform; use super::*; @@ -336,4 +313,174 @@ mod tests { let result = ray_triangle_intersection(&ray, &triangle, Backfaces::Cull); assert!(result.is_none()); } + + #[test] + fn ray_mesh_intersection_simple() { + let ray = Ray3d::new(Vec3::ZERO, Dir3::X); + let mesh_transform = GlobalTransform::IDENTITY.compute_matrix(); + let positions = &[V0, V1, V2]; + let vertex_normals = None; + let indices: Option<&[u16]> = None; + let backface_culling = Backfaces::Cull; + + let result = ray_mesh_intersection( + ray, + &mesh_transform, + positions, + vertex_normals, + indices, + backface_culling, + ); + + assert!(result.is_some()); + } + + #[test] + fn ray_mesh_intersection_indices() { + let ray = Ray3d::new(Vec3::ZERO, Dir3::X); + let mesh_transform = GlobalTransform::IDENTITY.compute_matrix(); + let positions = &[V0, V1, V2]; + let vertex_normals = None; + let indices: Option<&[u16]> = Some(&[0, 1, 2]); + let backface_culling = Backfaces::Cull; + + let result = ray_mesh_intersection( + ray, + &mesh_transform, + positions, + vertex_normals, + indices, + backface_culling, + ); + + assert!(result.is_some()); + } + + #[test] + fn ray_mesh_intersection_indices_vertex_normals() { + let ray = Ray3d::new(Vec3::ZERO, Dir3::X); + let mesh_transform = GlobalTransform::IDENTITY.compute_matrix(); + let positions = &[V0, V1, V2]; + let vertex_normals: Option<&[[f32; 3]]> = + Some(&[[-1., 0., 0.], [-1., 0., 0.], [-1., 0., 0.]]); + let indices: Option<&[u16]> = Some(&[0, 1, 2]); + let backface_culling = Backfaces::Cull; + + let result = ray_mesh_intersection( + ray, + &mesh_transform, + positions, + vertex_normals, + indices, + backface_culling, + ); + + assert!(result.is_some()); + } + + #[test] + fn ray_mesh_intersection_vertex_normals() { + let ray = Ray3d::new(Vec3::ZERO, Dir3::X); + let mesh_transform = GlobalTransform::IDENTITY.compute_matrix(); + let positions = &[V0, V1, V2]; + let vertex_normals: Option<&[[f32; 3]]> = + Some(&[[-1., 0., 0.], [-1., 0., 0.], [-1., 0., 0.]]); + let indices: Option<&[u16]> = None; + let backface_culling = Backfaces::Cull; + + let result = ray_mesh_intersection( + ray, + &mesh_transform, + positions, + vertex_normals, + indices, + backface_culling, + ); + + assert!(result.is_some()); + } + + #[test] + fn ray_mesh_intersection_missing_vertex_normals() { + let ray = Ray3d::new(Vec3::ZERO, Dir3::X); + let mesh_transform = GlobalTransform::IDENTITY.compute_matrix(); + let positions = &[V0, V1, V2]; + let vertex_normals: Option<&[[f32; 3]]> = Some(&[]); + let indices: Option<&[u16]> = None; + let backface_culling = Backfaces::Cull; + + let result = ray_mesh_intersection( + ray, + &mesh_transform, + positions, + vertex_normals, + indices, + backface_culling, + ); + + assert!(result.is_some()); + } + + #[test] + fn ray_mesh_intersection_indices_missing_vertex_normals() { + let ray = Ray3d::new(Vec3::ZERO, Dir3::X); + let mesh_transform = GlobalTransform::IDENTITY.compute_matrix(); + let positions = &[V0, V1, V2]; + let vertex_normals: Option<&[[f32; 3]]> = Some(&[]); + let indices: Option<&[u16]> = Some(&[0, 1, 2]); + let backface_culling = Backfaces::Cull; + + let result = ray_mesh_intersection( + ray, + &mesh_transform, + positions, + vertex_normals, + indices, + backface_culling, + ); + + assert!(result.is_some()); + } + + #[test] + fn ray_mesh_intersection_not_enough_indices() { + let ray = Ray3d::new(Vec3::ZERO, Dir3::X); + let mesh_transform = GlobalTransform::IDENTITY.compute_matrix(); + let positions = &[V0, V1, V2]; + let vertex_normals = None; + let indices: Option<&[u16]> = Some(&[0]); + let backface_culling = Backfaces::Cull; + + let result = ray_mesh_intersection( + ray, + &mesh_transform, + positions, + vertex_normals, + indices, + backface_culling, + ); + + assert!(result.is_none()); + } + + #[test] + fn ray_mesh_intersection_bad_indices() { + let ray = Ray3d::new(Vec3::ZERO, Dir3::X); + let mesh_transform = GlobalTransform::IDENTITY.compute_matrix(); + let positions = &[V0, V1, V2]; + let vertex_normals = None; + let indices: Option<&[u16]> = Some(&[0, 1, 3]); + let backface_culling = Backfaces::Cull; + + let result = ray_mesh_intersection( + ray, + &mesh_transform, + positions, + vertex_normals, + indices, + backface_culling, + ); + + assert!(result.is_none()); + } } diff --git a/crates/bevy_picking/src/mesh_picking/ray_cast/mod.rs b/crates/bevy_picking/src/mesh_picking/ray_cast/mod.rs index 2ba76f79606f6..c1f465b96a80a 100644 --- a/crates/bevy_picking/src/mesh_picking/ray_cast/mod.rs +++ b/crates/bevy_picking/src/mesh_picking/ray_cast/mod.rs @@ -7,8 +7,8 @@ mod intersections; use bevy_derive::{Deref, DerefMut}; use bevy_math::{bounding::Aabb3d, Ray3d}; +use bevy_mesh::Mesh; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; -use bevy_render::mesh::Mesh; use intersections::*; pub use intersections::{ray_aabb_intersection_3d, ray_mesh_intersection, RayMeshHit}; @@ -18,10 +18,11 @@ use bevy_ecs::{prelude::*, system::lifetimeless::Read, system::SystemParam}; use bevy_math::FloatOrd; use bevy_render::{prelude::*, primitives::Aabb}; use bevy_transform::components::GlobalTransform; -use bevy_utils::tracing::*; +use tracing::*; /// How a ray cast should handle [`Visibility`]. #[derive(Clone, Copy, Reflect)] +#[reflect(Clone)] pub enum RayCastVisibility { /// Completely ignore visibility checks. Hidden items can still be ray casted against. Any, @@ -89,7 +90,7 @@ impl<'a> Default for MeshRayCastSettings<'a> { /// /// By default, backfaces are culled. #[derive(Copy, Clone, Default, Reflect)] -#[reflect(Default)] +#[reflect(Default, Clone)] pub enum Backfaces { /// Cull backfaces. #[default] @@ -100,14 +101,14 @@ pub enum Backfaces { /// Disables backface culling for [ray casts](MeshRayCast) on this entity. #[derive(Component, Copy, Clone, Default, Reflect)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct RayCastBackfaces; /// A simplified mesh component that can be used for [ray casting](super::MeshRayCast). /// /// Consider using this component for complex meshes that don't need perfectly accurate ray casting. #[derive(Component, Clone, Debug, Deref, DerefMut, Reflect)] -#[reflect(Component, Debug)] +#[reflect(Component, Debug, Clone)] pub struct SimplifiedMesh(pub Handle); type MeshFilter = Or<(With, With, With)>; diff --git a/crates/bevy_picking/src/pointer.rs b/crates/bevy_picking/src/pointer.rs index d8a65d9588a02..e180a9c1bed78 100644 --- a/crates/bevy_picking/src/pointer.rs +++ b/crates/bevy_picking/src/pointer.rs @@ -9,10 +9,11 @@ //! driven by lower-level input devices and consumed by higher-level interaction systems. use bevy_ecs::prelude::*; +use bevy_input::mouse::MouseScrollUnit; use bevy_math::Vec2; +use bevy_platform::collections::HashMap; use bevy_reflect::prelude::*; use bevy_render::camera::{Camera, NormalizedRenderTarget}; -use bevy_utils::HashMap; use bevy_window::PrimaryWindow; use uuid::Uuid; @@ -27,7 +28,7 @@ use crate::backend::HitData; /// stable ID that persists regardless of the Entity they are associated with. #[derive(Debug, Default, Clone, Copy, Eq, PartialEq, Hash, Component, Reflect)] #[require(PointerLocation, PointerPress, PointerInteraction)] -#[reflect(Component, Default, Debug, Hash, PartialEq)] +#[reflect(Component, Default, Debug, Hash, PartialEq, Clone)] pub enum PointerId { /// The mouse pointer. #[default] @@ -36,7 +37,7 @@ pub enum PointerId { Touch(u64), /// A custom, uniquely identified pointer. Useful for mocking inputs or implementing a software /// controlled cursor. - #[reflect(ignore)] + #[reflect(ignore, clone)] Custom(Uuid), } @@ -66,7 +67,7 @@ impl PointerId { /// Holds a list of entities this pointer is currently interacting with, sorted from nearest to /// farthest. #[derive(Debug, Default, Clone, Component, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct PointerInteraction { pub(crate) sorted_entities: Vec<(Entity, HitData)>, } @@ -109,7 +110,7 @@ pub fn update_pointer_map(pointers: Query<(Entity, &PointerId)>, mut map: ResMut /// Tracks the state of the pointer's buttons in response to [`PointerInput`] events. #[derive(Debug, Default, Clone, Component, Reflect, PartialEq, Eq)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] pub struct PointerPress { primary: bool, secondary: bool, @@ -144,6 +145,7 @@ impl PointerPress { /// The stage of the pointer button press event #[derive(Debug, Clone, Copy, PartialEq, Eq, Reflect)] +#[reflect(Clone, PartialEq)] pub enum PressDirection { /// The pointer button was just pressed Pressed, @@ -153,6 +155,7 @@ pub enum PressDirection { /// The button that was just pressed or released #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Reflect)] +#[reflect(Clone, PartialEq)] pub enum PointerButton { /// The primary pointer button Primary, @@ -171,11 +174,11 @@ impl PointerButton { /// Component that tracks a pointer's current [`Location`]. #[derive(Debug, Default, Clone, Component, Reflect, PartialEq)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] pub struct PointerLocation { /// The [`Location`] of the pointer. Note that a location is both the target, and the position /// on the target. - #[reflect(ignore)] + #[reflect(ignore, clone)] pub location: Option, } @@ -203,7 +206,7 @@ impl PointerLocation { /// render target. It is up to picking backends to associate a Pointer's `Location` with a /// specific `Camera`, if any. #[derive(Debug, Clone, Component, Reflect, PartialEq)] -#[reflect(Component, Debug, PartialEq)] +#[reflect(Component, Debug, PartialEq, Clone)] pub struct Location { /// The [`NormalizedRenderTarget`] associated with the pointer, usually a window. pub target: NormalizedRenderTarget, @@ -223,7 +226,7 @@ impl Location { ) -> bool { if camera .target - .normalize(Some(match primary_window.get_single() { + .normalize(Some(match primary_window.single() { Ok(w) => w, Err(_) => return false, })) @@ -235,36 +238,43 @@ impl Location { camera .logical_viewport_rect() - .map(|rect| rect.contains(self.position)) - .unwrap_or(false) + .is_some_and(|rect| rect.contains(self.position)) } } -/// Types of actions that can be taken by pointers. +/// Event sent to drive a pointer. #[derive(Debug, Clone, Copy, Reflect)] +#[reflect(Clone)] pub enum PointerAction { - /// A button has been pressed on the pointer. - Pressed { - /// The press state, either pressed or released. - direction: PressDirection, - /// The button that was pressed. - button: PointerButton, - }, - /// The pointer has moved. - Moved { + /// Causes the pointer to press a button. + Press(PointerButton), + /// Causes the pointer to release a button. + Release(PointerButton), + /// Move the pointer. + Move { /// How much the pointer moved from the previous position. delta: Vec2, }, - /// The pointer has been canceled. The OS can cause this to happen to touch events. - Canceled, + /// Scroll the pointer + Scroll { + /// The mouse scroll unit. + unit: MouseScrollUnit, + /// The horizontal scroll value. + x: f32, + /// The vertical scroll value. + y: f32, + }, + /// Cancel the pointer. Often used for touch events. + Cancel, } /// An input event effecting a pointer. #[derive(Event, Debug, Clone, Reflect)] +#[reflect(Clone)] pub struct PointerInput { /// The id of the pointer. pub pointer_id: PointerId, - /// The location of the pointer. For [[`PointerAction::Moved`]], this is the location after the movement. + /// The location of the pointer. For [`PointerAction::Move`], this is the location after the movement. pub location: Location, /// The action that the event describes. pub action: PointerAction, @@ -285,8 +295,8 @@ impl PointerInput { /// Returns true if the `target_button` of this pointer was just pressed. #[inline] pub fn button_just_pressed(&self, target_button: PointerButton) -> bool { - if let PointerAction::Pressed { direction, button } = self.action { - direction == PressDirection::Pressed && button == target_button + if let PointerAction::Press(button) = self.action { + button == target_button } else { false } @@ -295,8 +305,8 @@ impl PointerInput { /// Returns true if the `target_button` of this pointer was just released. #[inline] pub fn button_just_released(&self, target_button: PointerButton) -> bool { - if let PointerAction::Pressed { direction, button } = self.action { - direction == PressDirection::Released && button == target_button + if let PointerAction::Release(button) = self.action { + button == target_button } else { false } @@ -309,21 +319,33 @@ impl PointerInput { ) { for event in events.read() { match event.action { - PointerAction::Pressed { direction, button } => { + PointerAction::Press(button) => { + pointers + .iter_mut() + .for_each(|(pointer_id, _, mut pointer)| { + if *pointer_id == event.pointer_id { + match button { + PointerButton::Primary => pointer.primary = true, + PointerButton::Secondary => pointer.secondary = true, + PointerButton::Middle => pointer.middle = true, + } + } + }); + } + PointerAction::Release(button) => { pointers .iter_mut() .for_each(|(pointer_id, _, mut pointer)| { if *pointer_id == event.pointer_id { - let is_pressed = direction == PressDirection::Pressed; match button { - PointerButton::Primary => pointer.primary = is_pressed, - PointerButton::Secondary => pointer.secondary = is_pressed, - PointerButton::Middle => pointer.middle = is_pressed, + PointerButton::Primary => pointer.primary = false, + PointerButton::Secondary => pointer.secondary = false, + PointerButton::Middle => pointer.middle = false, } } }); } - PointerAction::Moved { .. } => { + PointerAction::Move { .. } => { pointers.iter_mut().for_each(|(id, mut pointer, _)| { if *id == event.pointer_id { pointer.location = Some(event.location.to_owned()); diff --git a/crates/bevy_picking/src/window.rs b/crates/bevy_picking/src/window.rs index f55edca2dd1f7..30093da79750b 100644 --- a/crates/bevy_picking/src/window.rs +++ b/crates/bevy_picking/src/window.rs @@ -6,6 +6,10 @@ //! window will be inserted as a pointer hit, listed behind all other pointer //! hits. This means that when the pointer isn't hovering any other entities, //! the picking events will be routed to the window. +//! +//! ## Implementation Notes +//! +//! - This backend does not provide `position` or `normal` in `HitData`. use core::f32; @@ -35,7 +39,7 @@ pub fn update_window_hits( { let entity = window_ref.entity(); let hit_data = HitData::new(entity, 0.0, None, None); - output_events.send(PointerHits::new( + output_events.write(PointerHits::new( *pointer_id, vec![(entity, hit_data)], f32::NEG_INFINITY, diff --git a/crates/bevy_platform/Cargo.toml b/crates/bevy_platform/Cargo.toml new file mode 100644 index 0000000000000..bd6402b36a96b --- /dev/null +++ b/crates/bevy_platform/Cargo.toml @@ -0,0 +1,89 @@ +[package] +name = "bevy_platform" +version = "0.16.0-dev" +edition = "2024" +description = "Provides common platform agnostic APIs, as well as platform-specific features for Bevy Engine" +homepage = "https://bevyengine.org" +repository = "https://github.com/bevyengine/bevy" +license = "MIT OR Apache-2.0" +keywords = ["bevy"] + +[features] +default = ["std"] + +# Functionality + +## Adds serialization support through `serde`. +serialize = ["dep:serde", "hashbrown/serde"] + +## Adds integration with Rayon. +rayon = ["dep:rayon", "hashbrown/rayon"] + +# Platform Compatibility + +## Allows access to the `std` crate. Enabling this feature will prevent compilation +## on `no_std` targets, but provides access to certain additional features on +## supported platforms. +std = [ + "alloc", + "critical-section?/std", + "portable-atomic/std", + "portable-atomic-util/std", + "spin/std", + "foldhash/std", + "serde?/std", +] + +## Allows access to the `alloc` crate. +alloc = ["portable-atomic-util/alloc", "dep:hashbrown", "serde?/alloc"] + +## `critical-section` provides the building blocks for synchronization primitives +## on all platforms, including `no_std`. +critical-section = ["dep:critical-section", "portable-atomic/critical-section"] + +## Enables use of browser APIs. +## Note this is currently only applicable on `wasm32` architectures. +web = ["dep:web-time", "dep:getrandom"] + +[dependencies] +cfg-if = "1.0.0" +critical-section = { version = "1.2.0", default-features = false, optional = true } +spin = { version = "0.9.8", default-features = false, features = [ + "mutex", + "spin_mutex", + "rwlock", + "once", + "lazy", + "barrier", +] } +foldhash = { version = "0.1.3", default-features = false } +hashbrown = { version = "0.15.1", features = [ + "equivalent", + "raw-entry", +], optional = true, default-features = false } +serde = { version = "1", default-features = false, optional = true } +rayon = { version = "1", default-features = false, optional = true } + +[target.'cfg(target_arch = "wasm32")'.dependencies] +web-time = { version = "1.1", default-features = false, optional = true } +getrandom = { version = "0.2.0", default-features = false, optional = true, features = [ + "js", +] } + +[target.'cfg(not(all(target_has_atomic = "8", target_has_atomic = "16", target_has_atomic = "32", target_has_atomic = "64", target_has_atomic = "ptr")))'.dependencies] +portable-atomic = { version = "1", default-features = false, features = [ + "fallback", +] } +spin = { version = "0.9.8", default-features = false, features = [ + "portable_atomic", +] } + +[target.'cfg(not(target_has_atomic = "ptr"))'.dependencies] +portable-atomic-util = { version = "0.2.4", default-features = false } + +[lints] +workspace = true + +[package.metadata.docs.rs] +rustdoc-args = ["-Zunstable-options", "--generate-link-to-definition"] +all-features = true diff --git a/crates/bevy_platform/LICENSE-APACHE b/crates/bevy_platform/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_platform/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_platform/LICENSE-MIT b/crates/bevy_platform/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_platform/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_platform/README.md b/crates/bevy_platform/README.md new file mode 100644 index 0000000000000..4d853751aa09d --- /dev/null +++ b/crates/bevy_platform/README.md @@ -0,0 +1,51 @@ +# Bevy Platform Support + +[![License](https://img.shields.io/badge/license-MIT%2FApache-blue.svg)](https://github.com/bevyengine/bevy#license) +[![Crates.io](https://img.shields.io/crates/v/bevy_platform.svg)](https://crates.io/crates/bevy_platform) +[![Downloads](https://img.shields.io/crates/d/bevy_platform.svg)](https://crates.io/crates/bevy_platform) +[![Docs](https://docs.rs/bevy_platform/badge.svg)](https://docs.rs/bevy_platform/latest/bevy_platform/) +[![Discord](https://img.shields.io/discord/691052431525675048.svg?label=&logo=discord&logoColor=ffffff&color=7389D8&labelColor=6A7EC2)](https://discord.gg/bevy) + +Rust is a fantastic multi-platform language with extensive support for modern targets through its [standard library](https://doc.rust-lang.org/stable/std/). +However, some items within the standard library have alternatives that are better suited for [Bevy](https://crates.io/crates/bevy) and game engines in general. +Additionally, to support embedded and other esoteric platforms, it's often necessary to shed reliance on `std`, making your crate [`no_std`](https://docs.rust-embedded.org/book/intro/no-std.html). + +These needs are handled by this crate, `bevy_platform`. +The goal of this crate is to provide alternatives and extensions to the Rust standard library which minimize friction when developing with and for Bevy across multiple platforms. + +## Getting Started + +Like any dependency from [crates.io](https://crates.io/), use `cargo` to add it to your `Cargo.toml` file: + +```sh +cargo add bevy_platform +``` + +Now, instead of importing from `std` you can use `bevy_platform` for items it has alternative for. +See the documentation for what items are available, and explanations for _why_ you may want to use them. + +## `no_std` Support + +By default, `bevy_platform` will activate the `std` feature, requiring access to the `std` crate for whichever platforms you're targeting. +To use this crate on `no_std` platforms, disable default features: + +```toml +bevy_platform = { version = "x.y.z", default-features = false } +``` + +## Features + +### `std` (_default_) + +Enables usage of the standard library. Note that where this crate has alternatives to the standard library that it considers _better_ than what's provided, it will provide the alternative even when `std` is enabled. +This is explicitly incompatible with `no_std` targets. + +### `alloc` (_default_) + +Enables usage of the [`alloc`](https://doc.rust-lang.org/stable/alloc/) crate. Note that this feature is automatically enabled when enabling `std`. +This is compatible with most `no_std` targets, but not all. + +### `critical-section` + +Switches to using [`critical-section`](https://docs.rs/critical-section/latest/critical_section/) as a backend for synchronization. +You may need to enable this feature on platforms with little to no support for atomic operations. diff --git a/crates/bevy_platform/src/collections/hash_map.rs b/crates/bevy_platform/src/collections/hash_map.rs new file mode 100644 index 0000000000000..ae978a7fce93c --- /dev/null +++ b/crates/bevy_platform/src/collections/hash_map.rs @@ -0,0 +1,1287 @@ +//! Provides [`HashMap`] based on [hashbrown]'s implementation. +//! Unlike [`hashbrown::HashMap`], [`HashMap`] defaults to [`FixedHasher`] +//! instead of [`RandomState`]. +//! This provides determinism by default with an acceptable compromise to denial +//! of service resistance in the context of a game engine. + +use core::{ + fmt::Debug, + hash::{BuildHasher, Hash}, + ops::{Deref, DerefMut, Index}, +}; + +use hashbrown::{hash_map as hb, Equivalent}; + +use crate::hash::FixedHasher; + +#[cfg(feature = "rayon")] +use rayon::prelude::{FromParallelIterator, IntoParallelIterator, ParallelExtend}; + +// Re-exports to match `std::collections::hash_map` +pub use { + crate::hash::{DefaultHasher, RandomState}, + hb::{ + Drain, IntoIter, IntoKeys, IntoValues, Iter, IterMut, Keys, OccupiedEntry, VacantEntry, + Values, ValuesMut, + }, +}; + +// Additional items from `hashbrown` +pub use hb::{ + EntryRef, ExtractIf, OccupiedError, RawEntryBuilder, RawEntryBuilderMut, RawEntryMut, + RawOccupiedEntryMut, +}; + +/// Shortcut for [`Entry`](hb::Entry) with [`FixedHasher`] as the default hashing provider. +pub type Entry<'a, K, V, S = FixedHasher> = hb::Entry<'a, K, V, S>; + +/// New-type for [`HashMap`](hb::HashMap) with [`FixedHasher`] as the default hashing provider. +/// Can be trivially converted to and from a [hashbrown] [`HashMap`](hb::HashMap) using [`From`]. +/// +/// A new-type is used instead of a type alias due to critical methods like [`new`](hb::HashMap::new) +/// being incompatible with Bevy's choice of default hasher. +#[repr(transparent)] +pub struct HashMap(hb::HashMap); + +impl Clone for HashMap +where + hb::HashMap: Clone, +{ + #[inline] + fn clone(&self) -> Self { + Self(self.0.clone()) + } + + #[inline] + fn clone_from(&mut self, source: &Self) { + self.0.clone_from(&source.0); + } +} + +impl Debug for HashMap +where + hb::HashMap: Debug, +{ + #[inline] + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + as Debug>::fmt(&self.0, f) + } +} + +impl Default for HashMap +where + hb::HashMap: Default, +{ + #[inline] + fn default() -> Self { + Self(Default::default()) + } +} + +impl PartialEq for HashMap +where + hb::HashMap: PartialEq, +{ + #[inline] + fn eq(&self, other: &Self) -> bool { + self.0.eq(&other.0) + } +} + +impl Eq for HashMap where hb::HashMap: Eq {} + +impl FromIterator for HashMap +where + hb::HashMap: FromIterator, +{ + #[inline] + fn from_iter>(iter: U) -> Self { + Self(FromIterator::from_iter(iter)) + } +} + +impl Index for HashMap +where + hb::HashMap: Index, +{ + type Output = as Index>::Output; + + #[inline] + fn index(&self, index: T) -> &Self::Output { + self.0.index(index) + } +} + +impl IntoIterator for HashMap +where + hb::HashMap: IntoIterator, +{ + type Item = as IntoIterator>::Item; + + type IntoIter = as IntoIterator>::IntoIter; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl<'a, K, V, S> IntoIterator for &'a HashMap +where + &'a hb::HashMap: IntoIterator, +{ + type Item = <&'a hb::HashMap as IntoIterator>::Item; + + type IntoIter = <&'a hb::HashMap as IntoIterator>::IntoIter; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + (&self.0).into_iter() + } +} + +impl<'a, K, V, S> IntoIterator for &'a mut HashMap +where + &'a mut hb::HashMap: IntoIterator, +{ + type Item = <&'a mut hb::HashMap as IntoIterator>::Item; + + type IntoIter = <&'a mut hb::HashMap as IntoIterator>::IntoIter; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + (&mut self.0).into_iter() + } +} + +impl Extend for HashMap +where + hb::HashMap: Extend, +{ + #[inline] + fn extend>(&mut self, iter: U) { + self.0.extend(iter); + } +} + +impl From<[(K, V); N]> for HashMap +where + K: Eq + Hash, +{ + fn from(arr: [(K, V); N]) -> Self { + arr.into_iter().collect() + } +} + +impl From> for HashMap { + #[inline] + fn from(value: hb::HashMap) -> Self { + Self(value) + } +} + +impl From> for hb::HashMap { + #[inline] + fn from(value: HashMap) -> Self { + value.0 + } +} + +impl Deref for HashMap { + type Target = hb::HashMap; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for HashMap { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +#[cfg(feature = "serialize")] +impl serde::Serialize for HashMap +where + hb::HashMap: serde::Serialize, +{ + #[inline] + fn serialize(&self, serializer: T) -> Result + where + T: serde::Serializer, + { + self.0.serialize(serializer) + } +} + +#[cfg(feature = "serialize")] +impl<'de, K, V, S> serde::Deserialize<'de> for HashMap +where + hb::HashMap: serde::Deserialize<'de>, +{ + #[inline] + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + Ok(Self(serde::Deserialize::deserialize(deserializer)?)) + } +} + +#[cfg(feature = "rayon")] +impl FromParallelIterator for HashMap +where + hb::HashMap: FromParallelIterator, + T: Send, +{ + fn from_par_iter

(par_iter: P) -> Self + where + P: IntoParallelIterator, + { + Self( as FromParallelIterator>::from_par_iter(par_iter)) + } +} + +#[cfg(feature = "rayon")] +impl IntoParallelIterator for HashMap +where + hb::HashMap: IntoParallelIterator, +{ + type Item = as IntoParallelIterator>::Item; + type Iter = as IntoParallelIterator>::Iter; + + fn into_par_iter(self) -> Self::Iter { + self.0.into_par_iter() + } +} + +#[cfg(feature = "rayon")] +impl<'a, K: Sync, V: Sync, S> IntoParallelIterator for &'a HashMap +where + &'a hb::HashMap: IntoParallelIterator, +{ + type Item = <&'a hb::HashMap as IntoParallelIterator>::Item; + type Iter = <&'a hb::HashMap as IntoParallelIterator>::Iter; + + fn into_par_iter(self) -> Self::Iter { + (&self.0).into_par_iter() + } +} + +#[cfg(feature = "rayon")] +impl<'a, K: Sync, V: Sync, S> IntoParallelIterator for &'a mut HashMap +where + &'a mut hb::HashMap: IntoParallelIterator, +{ + type Item = <&'a mut hb::HashMap as IntoParallelIterator>::Item; + type Iter = <&'a mut hb::HashMap as IntoParallelIterator>::Iter; + + fn into_par_iter(self) -> Self::Iter { + (&mut self.0).into_par_iter() + } +} + +#[cfg(feature = "rayon")] +impl ParallelExtend for HashMap +where + hb::HashMap: ParallelExtend, + T: Send, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + as ParallelExtend>::par_extend(&mut self.0, par_iter); + } +} + +impl HashMap { + /// Creates an empty [`HashMap`]. + /// + /// Refer to [`new`](hb::HashMap::new) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// // Creates a HashMap with zero capacity. + /// let map = HashMap::new(); + /// # + /// # let mut map = map; + /// # map.insert(0usize, "foo"); + /// # assert_eq!(map.get(&0), Some("foo").as_ref()); + /// ``` + #[inline] + pub const fn new() -> Self { + Self::with_hasher(FixedHasher) + } + + /// Creates an empty [`HashMap`] with the specified capacity. + /// + /// Refer to [`with_capacity`](hb::HashMap::with_capacity) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// // Creates a HashMap with capacity for at least 5 entries. + /// let map = HashMap::with_capacity(5); + /// # + /// # let mut map = map; + /// # map.insert(0usize, "foo"); + /// # assert_eq!(map.get(&0), Some("foo").as_ref()); + /// ``` + #[inline] + pub fn with_capacity(capacity: usize) -> Self { + Self::with_capacity_and_hasher(capacity, FixedHasher) + } +} + +impl HashMap { + /// Creates an empty [`HashMap`] which will use the given hash builder to hash + /// keys. + /// + /// Refer to [`with_hasher`](hb::HashMap::with_hasher) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # use bevy_platform::hash::FixedHasher as SomeHasher; + /// // Creates a HashMap with the provided hasher. + /// let map = HashMap::with_hasher(SomeHasher); + /// # + /// # let mut map = map; + /// # map.insert(0usize, "foo"); + /// # assert_eq!(map.get(&0), Some("foo").as_ref()); + /// ``` + #[inline] + pub const fn with_hasher(hash_builder: S) -> Self { + Self(hb::HashMap::with_hasher(hash_builder)) + } + + /// Creates an empty [`HashMap`] with the specified capacity, using `hash_builder` + /// to hash the keys. + /// + /// Refer to [`with_capacity_and_hasher`](hb::HashMap::with_capacity_and_hasher) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # use bevy_platform::hash::FixedHasher as SomeHasher; + /// // Creates a HashMap with capacity for 5 entries and the provided hasher. + /// let map = HashMap::with_capacity_and_hasher(5, SomeHasher); + /// # + /// # let mut map = map; + /// # map.insert(0usize, "foo"); + /// # assert_eq!(map.get(&0), Some("foo").as_ref()); + /// ``` + #[inline] + pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self { + Self(hb::HashMap::with_capacity_and_hasher( + capacity, + hash_builder, + )) + } + + /// Returns a reference to the map's [`BuildHasher`], or `S` parameter. + /// + /// Refer to [`hasher`](hb::HashMap::hasher) for further details. + #[inline] + pub fn hasher(&self) -> &S { + self.0.hasher() + } + + /// Returns the number of elements the map can hold without reallocating. + /// + /// Refer to [`capacity`](hb::HashMap::capacity) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let map = HashMap::with_capacity(5); + /// + /// # let map: HashMap<(), ()> = map; + /// # + /// assert!(map.capacity() >= 5); + /// ``` + #[inline] + pub fn capacity(&self) -> usize { + self.0.capacity() + } + + /// An iterator visiting all keys in arbitrary order. + /// The iterator element type is `&'a K`. + /// + /// Refer to [`keys`](hb::HashMap::keys) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// for key in map.keys() { + /// // foo, bar, baz + /// // Note that the above order is not guaranteed + /// } + /// # + /// # assert_eq!(map.keys().count(), 3); + /// ``` + #[inline] + pub fn keys(&self) -> Keys<'_, K, V> { + self.0.keys() + } + + /// An iterator visiting all values in arbitrary order. + /// The iterator element type is `&'a V`. + /// + /// Refer to [`values`](hb::HashMap::values) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// for key in map.values() { + /// // 0, 1, 2 + /// // Note that the above order is not guaranteed + /// } + /// # + /// # assert_eq!(map.values().count(), 3); + /// ``` + #[inline] + pub fn values(&self) -> Values<'_, K, V> { + self.0.values() + } + + /// An iterator visiting all values mutably in arbitrary order. + /// The iterator element type is `&'a mut V`. + /// + /// Refer to [`values`](hb::HashMap::values) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// for key in map.values_mut() { + /// // 0, 1, 2 + /// // Note that the above order is not guaranteed + /// } + /// # + /// # assert_eq!(map.values_mut().count(), 3); + /// ``` + #[inline] + pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { + self.0.values_mut() + } + + /// An iterator visiting all key-value pairs in arbitrary order. + /// The iterator element type is `(&'a K, &'a V)`. + /// + /// Refer to [`iter`](hb::HashMap::iter) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// for (key, value) in map.iter() { + /// // ("foo", 0), ("bar", 1), ("baz", 2) + /// // Note that the above order is not guaranteed + /// } + /// # + /// # assert_eq!(map.iter().count(), 3); + /// ``` + #[inline] + pub fn iter(&self) -> Iter<'_, K, V> { + self.0.iter() + } + + /// An iterator visiting all key-value pairs in arbitrary order, + /// with mutable references to the values. + /// The iterator element type is `(&'a K, &'a mut V)`. + /// + /// Refer to [`iter_mut`](hb::HashMap::iter_mut) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// for (key, value) in map.iter_mut() { + /// // ("foo", 0), ("bar", 1), ("baz", 2) + /// // Note that the above order is not guaranteed + /// } + /// # + /// # assert_eq!(map.iter_mut().count(), 3); + /// ``` + #[inline] + pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { + self.0.iter_mut() + } + + /// Returns the number of elements in the map. + /// + /// Refer to [`len`](hb::HashMap::len) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// assert_eq!(map.len(), 0); + /// + /// map.insert("foo", 0); + /// + /// assert_eq!(map.len(), 1); + /// ``` + #[inline] + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if the map contains no elements. + /// + /// Refer to [`is_empty`](hb::HashMap::is_empty) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// assert!(map.is_empty()); + /// + /// map.insert("foo", 0); + /// + /// assert!(!map.is_empty()); + /// ``` + #[inline] + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Clears the map, returning all key-value pairs as an iterator. Keeps the + /// allocated memory for reuse. + /// + /// Refer to [`drain`](hb::HashMap::drain) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// for (key, value) in map.drain() { + /// // ("foo", 0), ("bar", 1), ("baz", 2) + /// // Note that the above order is not guaranteed + /// } + /// + /// assert!(map.is_empty()); + /// ``` + #[inline] + pub fn drain(&mut self) -> Drain<'_, K, V> { + self.0.drain() + } + + /// Retains only the elements specified by the predicate. Keeps the + /// allocated memory for reuse. + /// + /// Refer to [`retain`](hb::HashMap::retain) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// map.retain(|key, value| *value == 2); + /// + /// assert_eq!(map.len(), 1); + /// ``` + #[inline] + pub fn retain(&mut self, f: F) + where + F: FnMut(&K, &mut V) -> bool, + { + self.0.retain(f); + } + + /// Drains elements which are true under the given predicate, + /// and returns an iterator over the removed items. + /// + /// Refer to [`extract_if`](hb::HashMap::extract_if) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// let extracted = map + /// .extract_if(|key, value| *value == 2) + /// .collect::>(); + /// + /// assert_eq!(map.len(), 2); + /// assert_eq!(extracted.len(), 1); + /// ``` + #[inline] + pub fn extract_if(&mut self, f: F) -> ExtractIf<'_, K, V, F> + where + F: FnMut(&K, &mut V) -> bool, + { + self.0.extract_if(f) + } + + /// Clears the map, removing all key-value pairs. Keeps the allocated memory + /// for reuse. + /// + /// Refer to [`clear`](hb::HashMap::clear) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// map.clear(); + /// + /// assert!(map.is_empty()); + /// ``` + #[inline] + pub fn clear(&mut self) { + self.0.clear(); + } + + /// Creates a consuming iterator visiting all the keys in arbitrary order. + /// The map cannot be used after calling this. + /// The iterator element type is `K`. + /// + /// Refer to [`into_keys`](hb::HashMap::into_keys) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// for key in map.into_keys() { + /// // "foo", "bar", "baz" + /// // Note that the above order is not guaranteed + /// } + /// ``` + #[inline] + pub fn into_keys(self) -> IntoKeys { + self.0.into_keys() + } + + /// Creates a consuming iterator visiting all the values in arbitrary order. + /// The map cannot be used after calling this. + /// The iterator element type is `V`. + /// + /// Refer to [`into_values`](hb::HashMap::into_values) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// # + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// for key in map.into_values() { + /// // 0, 1, 2 + /// // Note that the above order is not guaranteed + /// } + /// ``` + #[inline] + pub fn into_values(self) -> IntoValues { + self.0.into_values() + } + + /// Takes the inner [`HashMap`](hb::HashMap) out of this wrapper. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let map: HashMap<&'static str, usize> = HashMap::new(); + /// let map: hashbrown::HashMap<&'static str, usize, _> = map.into_inner(); + /// ``` + #[inline] + pub fn into_inner(self) -> hb::HashMap { + self.0 + } +} + +impl HashMap +where + K: Eq + Hash, + S: BuildHasher, +{ + /// Reserves capacity for at least `additional` more elements to be inserted + /// in the [`HashMap`]. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// Refer to [`reserve`](hb::HashMap::reserve) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::with_capacity(5); + /// + /// # let mut map: HashMap<(), ()> = map; + /// # + /// assert!(map.capacity() >= 5); + /// + /// map.reserve(10); + /// + /// assert!(map.capacity() - map.len() >= 10); + /// ``` + #[inline] + pub fn reserve(&mut self, additional: usize) { + self.0.reserve(additional); + } + + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `HashMap`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// Refer to [`try_reserve`](hb::HashMap::try_reserve) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::with_capacity(5); + /// + /// # let mut map: HashMap<(), ()> = map; + /// # + /// assert!(map.capacity() >= 5); + /// + /// map.try_reserve(10).expect("Out of Memory!"); + /// + /// assert!(map.capacity() - map.len() >= 10); + /// ``` + #[inline] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), hashbrown::TryReserveError> { + self.0.try_reserve(additional) + } + + /// Shrinks the capacity of the map as much as possible. It will drop + /// down as much as possible while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// Refer to [`shrink_to_fit`](hb::HashMap::shrink_to_fit) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::with_capacity(5); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// assert!(map.capacity() >= 5); + /// + /// map.shrink_to_fit(); + /// + /// assert_eq!(map.capacity(), 3); + /// ``` + #[inline] + pub fn shrink_to_fit(&mut self) { + self.0.shrink_to_fit(); + } + + /// Shrinks the capacity of the map with a lower limit. It will drop + /// down no lower than the supplied limit while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// Refer to [`shrink_to`](hb::HashMap::shrink_to) for further details. + #[inline] + pub fn shrink_to(&mut self, min_capacity: usize) { + self.0.shrink_to(min_capacity); + } + + /// Gets the given key's corresponding entry in the map for in-place manipulation. + /// + /// Refer to [`entry`](hb::HashMap::entry) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// let value = map.entry("foo").or_insert(0); + /// # + /// # assert_eq!(*value, 0); + /// ``` + #[inline] + pub fn entry(&mut self, key: K) -> Entry<'_, K, V, S> { + self.0.entry(key) + } + + /// Gets the given key's corresponding entry by reference in the map for in-place manipulation. + /// + /// Refer to [`entry_ref`](hb::HashMap::entry_ref) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// # let mut map: HashMap<&'static str, usize> = map; + /// + /// let value = map.entry_ref("foo").or_insert(0); + /// # + /// # assert_eq!(*value, 0); + /// ``` + #[inline] + pub fn entry_ref<'a, 'b, Q>(&'a mut self, key: &'b Q) -> EntryRef<'a, 'b, K, Q, V, S> + where + Q: Hash + Equivalent + ?Sized, + { + self.0.entry_ref(key) + } + + /// Returns a reference to the value corresponding to the key. + /// + /// Refer to [`get`](hb::HashMap::get) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// + /// assert_eq!(map.get("foo"), Some(&0)); + /// ``` + #[inline] + pub fn get(&self, k: &Q) -> Option<&V> + where + Q: Hash + Equivalent + ?Sized, + { + self.0.get(k) + } + + /// Returns the key-value pair corresponding to the supplied key. + /// + /// Refer to [`get_key_value`](hb::HashMap::get_key_value) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// + /// assert_eq!(map.get_key_value("foo"), Some((&"foo", &0))); + /// ``` + #[inline] + pub fn get_key_value(&self, k: &Q) -> Option<(&K, &V)> + where + Q: Hash + Equivalent + ?Sized, + { + self.0.get_key_value(k) + } + + /// Returns the key-value pair corresponding to the supplied key, with a mutable reference to value. + /// + /// Refer to [`get_key_value_mut`](hb::HashMap::get_key_value_mut) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// + /// assert_eq!(map.get_key_value_mut("foo"), Some((&"foo", &mut 0))); + /// ``` + #[inline] + pub fn get_key_value_mut(&mut self, k: &Q) -> Option<(&K, &mut V)> + where + Q: Hash + Equivalent + ?Sized, + { + self.0.get_key_value_mut(k) + } + + /// Returns `true` if the map contains a value for the specified key. + /// + /// Refer to [`contains_key`](hb::HashMap::contains_key) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// + /// assert!(map.contains_key("foo")); + /// ``` + #[inline] + pub fn contains_key(&self, k: &Q) -> bool + where + Q: Hash + Equivalent + ?Sized, + { + self.0.contains_key(k) + } + + /// Returns a mutable reference to the value corresponding to the key. + /// + /// Refer to [`get_mut`](hb::HashMap::get_mut) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// + /// assert_eq!(map.get_mut("foo"), Some(&mut 0)); + /// ``` + #[inline] + pub fn get_mut(&mut self, k: &Q) -> Option<&mut V> + where + Q: Hash + Equivalent + ?Sized, + { + self.0.get_mut(k) + } + + /// Attempts to get mutable references to `N` values in the map at once. + /// + /// Refer to [`get_many_mut`](hb::HashMap::get_many_mut) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// let result = map.get_many_mut(["foo", "bar"]); + /// + /// assert_eq!(result, [Some(&mut 0), Some(&mut 1)]); + /// ``` + #[inline] + pub fn get_many_mut(&mut self, ks: [&Q; N]) -> [Option<&'_ mut V>; N] + where + Q: Hash + Equivalent + ?Sized, + { + self.0.get_many_mut(ks) + } + + /// Attempts to get mutable references to `N` values in the map at once, with immutable + /// references to the corresponding keys. + /// + /// Refer to [`get_many_key_value_mut`](hb::HashMap::get_many_key_value_mut) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// map.insert("bar", 1); + /// map.insert("baz", 2); + /// + /// let result = map.get_many_key_value_mut(["foo", "bar"]); + /// + /// assert_eq!(result, [Some((&"foo", &mut 0)), Some((&"bar", &mut 1))]); + /// ``` + #[inline] + pub fn get_many_key_value_mut( + &mut self, + ks: [&Q; N], + ) -> [Option<(&'_ K, &'_ mut V)>; N] + where + Q: Hash + Equivalent + ?Sized, + { + self.0.get_many_key_value_mut(ks) + } + + /// Inserts a key-value pair into the map. + /// + /// Refer to [`insert`](hb::HashMap::insert) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// + /// assert_eq!(map.get("foo"), Some(&0)); + /// ``` + #[inline] + pub fn insert(&mut self, k: K, v: V) -> Option { + self.0.insert(k, v) + } + + /// Tries to insert a key-value pair into the map, and returns + /// a mutable reference to the value in the entry. + /// + /// Refer to [`try_insert`](hb::HashMap::try_insert) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.try_insert("foo", 0).unwrap(); + /// + /// assert!(map.try_insert("foo", 1).is_err()); + /// ``` + #[inline] + pub fn try_insert(&mut self, key: K, value: V) -> Result<&mut V, OccupiedError<'_, K, V, S>> { + self.0.try_insert(key, value) + } + + /// Removes a key from the map, returning the value at the key if the key + /// was previously in the map. Keeps the allocated memory for reuse. + /// + /// Refer to [`remove`](hb::HashMap::remove) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// + /// assert_eq!(map.remove("foo"), Some(0)); + /// + /// assert!(map.is_empty()); + /// ``` + #[inline] + pub fn remove(&mut self, k: &Q) -> Option + where + Q: Hash + Equivalent + ?Sized, + { + self.0.remove(k) + } + + /// Removes a key from the map, returning the stored key and value if the + /// key was previously in the map. Keeps the allocated memory for reuse. + /// + /// Refer to [`remove_entry`](hb::HashMap::remove_entry) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// map.insert("foo", 0); + /// + /// assert_eq!(map.remove_entry("foo"), Some(("foo", 0))); + /// + /// assert!(map.is_empty()); + /// ``` + #[inline] + pub fn remove_entry(&mut self, k: &Q) -> Option<(K, V)> + where + Q: Hash + Equivalent + ?Sized, + { + self.0.remove_entry(k) + } + + /// Returns the total amount of memory allocated internally by the hash + /// set, in bytes. + /// + /// Refer to [`allocation_size`](hb::HashMap::allocation_size) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashMap; + /// let mut map = HashMap::new(); + /// + /// assert_eq!(map.allocation_size(), 0); + /// + /// map.insert("foo", 0u32); + /// + /// assert!(map.allocation_size() >= size_of::<&'static str>() + size_of::()); + /// ``` + #[inline] + pub fn allocation_size(&self) -> usize { + self.0.allocation_size() + } + + /// Insert a key-value pair into the map without checking + /// if the key already exists in the map. + /// + /// Refer to [`insert_unique_unchecked`](hb::HashMap::insert_unique_unchecked) for further details. + /// + /// # Safety + /// + /// This operation is safe if a key does not exist in the map. + /// + /// However, if a key exists in the map already, the behavior is unspecified: + /// this operation may panic, loop forever, or any following operation with the map + /// may panic, loop forever or return arbitrary result. + /// + /// That said, this operation (and following operations) are guaranteed to + /// not violate memory safety. + /// + /// However this operation is still unsafe because the resulting `HashMap` + /// may be passed to unsafe code which does expect the map to behave + /// correctly, and would cause unsoundness as a result. + #[expect( + unsafe_code, + reason = "re-exporting unsafe method from Hashbrown requires unsafe code" + )] + #[inline] + pub unsafe fn insert_unique_unchecked(&mut self, key: K, value: V) -> (&K, &mut V) { + // SAFETY: safety contract is ensured by the caller. + unsafe { self.0.insert_unique_unchecked(key, value) } + } + + /// Attempts to get mutable references to `N` values in the map at once, without validating that + /// the values are unique. + /// + /// Refer to [`get_many_unchecked_mut`](hb::HashMap::get_many_unchecked_mut) for further details. + /// + /// Returns an array of length `N` with the results of each query. `None` will be used if + /// the key is missing. + /// + /// For a safe alternative see [`get_many_mut`](`HashMap::get_many_mut`). + /// + /// # Safety + /// + /// Calling this method with overlapping keys is *[undefined behavior]* even if the resulting + /// references are not used. + /// + /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[expect( + unsafe_code, + reason = "re-exporting unsafe method from Hashbrown requires unsafe code" + )] + #[inline] + pub unsafe fn get_many_unchecked_mut( + &mut self, + keys: [&Q; N], + ) -> [Option<&'_ mut V>; N] + where + Q: Hash + Equivalent + ?Sized, + { + // SAFETY: safety contract is ensured by the caller. + unsafe { self.0.get_many_unchecked_mut(keys) } + } + + /// Attempts to get mutable references to `N` values in the map at once, with immutable + /// references to the corresponding keys, without validating that the values are unique. + /// + /// Refer to [`get_many_key_value_unchecked_mut`](hb::HashMap::get_many_key_value_unchecked_mut) for further details. + /// + /// Returns an array of length `N` with the results of each query. `None` will be returned if + /// any of the keys are missing. + /// + /// For a safe alternative see [`get_many_key_value_mut`](`HashMap::get_many_key_value_mut`). + /// + /// # Safety + /// + /// Calling this method with overlapping keys is *[undefined behavior]* even if the resulting + /// references are not used. + /// + /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[expect( + unsafe_code, + reason = "re-exporting unsafe method from Hashbrown requires unsafe code" + )] + #[inline] + pub unsafe fn get_many_key_value_unchecked_mut( + &mut self, + keys: [&Q; N], + ) -> [Option<(&'_ K, &'_ mut V)>; N] + where + Q: Hash + Equivalent + ?Sized, + { + // SAFETY: safety contract is ensured by the caller. + unsafe { self.0.get_many_key_value_unchecked_mut(keys) } + } +} diff --git a/crates/bevy_platform/src/collections/hash_set.rs b/crates/bevy_platform/src/collections/hash_set.rs new file mode 100644 index 0000000000000..7950e946db164 --- /dev/null +++ b/crates/bevy_platform/src/collections/hash_set.rs @@ -0,0 +1,1078 @@ +//! Provides [`HashSet`] based on [hashbrown]'s implementation. +//! Unlike [`hashbrown::HashSet`], [`HashSet`] defaults to [`FixedHasher`] +//! instead of [`RandomState`](crate::hash::RandomState). +//! This provides determinism by default with an acceptable compromise to denial +//! of service resistance in the context of a game engine. + +use core::{ + fmt::Debug, + hash::{BuildHasher, Hash}, + ops::{ + BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Deref, DerefMut, Sub, + SubAssign, + }, +}; + +use hashbrown::{hash_set as hb, Equivalent}; + +use crate::hash::FixedHasher; + +#[cfg(feature = "rayon")] +use rayon::prelude::{FromParallelIterator, IntoParallelIterator, ParallelExtend}; + +// Re-exports to match `std::collections::hash_set` +pub use hb::{Difference, Drain, Intersection, IntoIter, Iter, SymmetricDifference, Union}; + +// Additional items from `hashbrown` +pub use hb::{ExtractIf, OccupiedEntry, VacantEntry}; + +/// Shortcut for [`Entry`](hb::Entry) with [`FixedHasher`] as the default hashing provider. +pub type Entry<'a, T, S = FixedHasher> = hb::Entry<'a, T, S>; + +/// New-type for [`HashSet`](hb::HashSet) with [`FixedHasher`] as the default hashing provider. +/// Can be trivially converted to and from a [hashbrown] [`HashSet`](hb::HashSet) using [`From`]. +/// +/// A new-type is used instead of a type alias due to critical methods like [`new`](hb::HashSet::new) +/// being incompatible with Bevy's choice of default hasher. +#[repr(transparent)] +pub struct HashSet(hb::HashSet); + +impl Clone for HashSet +where + hb::HashSet: Clone, +{ + #[inline] + fn clone(&self) -> Self { + Self(self.0.clone()) + } + + #[inline] + fn clone_from(&mut self, source: &Self) { + self.0.clone_from(&source.0); + } +} + +impl Debug for HashSet +where + hb::HashSet: Debug, +{ + #[inline] + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + as Debug>::fmt(&self.0, f) + } +} + +impl Default for HashSet +where + hb::HashSet: Default, +{ + #[inline] + fn default() -> Self { + Self(Default::default()) + } +} + +impl PartialEq for HashSet +where + hb::HashSet: PartialEq, +{ + #[inline] + fn eq(&self, other: &Self) -> bool { + self.0.eq(&other.0) + } +} + +impl Eq for HashSet where hb::HashSet: Eq {} + +impl FromIterator for HashSet +where + hb::HashSet: FromIterator, +{ + #[inline] + fn from_iter>(iter: U) -> Self { + Self(FromIterator::from_iter(iter)) + } +} + +impl IntoIterator for HashSet +where + hb::HashSet: IntoIterator, +{ + type Item = as IntoIterator>::Item; + + type IntoIter = as IntoIterator>::IntoIter; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl<'a, T, S> IntoIterator for &'a HashSet +where + &'a hb::HashSet: IntoIterator, +{ + type Item = <&'a hb::HashSet as IntoIterator>::Item; + + type IntoIter = <&'a hb::HashSet as IntoIterator>::IntoIter; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + (&self.0).into_iter() + } +} + +impl<'a, T, S> IntoIterator for &'a mut HashSet +where + &'a mut hb::HashSet: IntoIterator, +{ + type Item = <&'a mut hb::HashSet as IntoIterator>::Item; + + type IntoIter = <&'a mut hb::HashSet as IntoIterator>::IntoIter; + + #[inline] + fn into_iter(self) -> Self::IntoIter { + (&mut self.0).into_iter() + } +} + +impl Extend for HashSet +where + hb::HashSet: Extend, +{ + #[inline] + fn extend>(&mut self, iter: U) { + self.0.extend(iter); + } +} + +impl From<[T; N]> for HashSet +where + T: Eq + Hash, +{ + fn from(value: [T; N]) -> Self { + value.into_iter().collect() + } +} + +impl From> for HashSet { + #[inline] + fn from(value: crate::collections::HashMap) -> Self { + Self(hb::HashSet::from(hashbrown::HashMap::from(value))) + } +} + +impl From> for HashSet { + #[inline] + fn from(value: hb::HashSet) -> Self { + Self(value) + } +} + +impl From> for hb::HashSet { + #[inline] + fn from(value: HashSet) -> Self { + value.0 + } +} + +impl Deref for HashSet { + type Target = hb::HashSet; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for HashSet { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +#[cfg(feature = "serialize")] +impl serde::Serialize for HashSet +where + hb::HashSet: serde::Serialize, +{ + #[inline] + fn serialize(&self, serializer: U) -> Result + where + U: serde::Serializer, + { + self.0.serialize(serializer) + } +} + +#[cfg(feature = "serialize")] +impl<'de, T, S> serde::Deserialize<'de> for HashSet +where + hb::HashSet: serde::Deserialize<'de>, +{ + #[inline] + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + Ok(Self(serde::Deserialize::deserialize(deserializer)?)) + } +} + +#[cfg(feature = "rayon")] +impl FromParallelIterator for HashSet +where + hb::HashSet: FromParallelIterator, + U: Send, +{ + fn from_par_iter

(par_iter: P) -> Self + where + P: IntoParallelIterator, + { + Self( as FromParallelIterator>::from_par_iter(par_iter)) + } +} + +#[cfg(feature = "rayon")] +impl IntoParallelIterator for HashSet +where + hb::HashSet: IntoParallelIterator, +{ + type Item = as IntoParallelIterator>::Item; + type Iter = as IntoParallelIterator>::Iter; + + fn into_par_iter(self) -> Self::Iter { + self.0.into_par_iter() + } +} + +#[cfg(feature = "rayon")] +impl<'a, T: Sync, S> IntoParallelIterator for &'a HashSet +where + &'a hb::HashSet: IntoParallelIterator, +{ + type Item = <&'a hb::HashSet as IntoParallelIterator>::Item; + type Iter = <&'a hb::HashSet as IntoParallelIterator>::Iter; + + fn into_par_iter(self) -> Self::Iter { + (&self.0).into_par_iter() + } +} + +#[cfg(feature = "rayon")] +impl ParallelExtend for HashSet +where + hb::HashSet: ParallelExtend, + U: Send, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + as ParallelExtend>::par_extend(&mut self.0, par_iter); + } +} + +impl HashSet { + /// Creates an empty [`HashSet`]. + /// + /// Refer to [`new`](hb::HashSet::new) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// # + /// // Creates a HashSet with zero capacity. + /// let map = HashSet::new(); + /// # + /// # let mut map = map; + /// # map.insert("foo"); + /// # assert_eq!(map.get("foo"), Some("foo").as_ref()); + /// ``` + #[inline] + pub const fn new() -> Self { + Self::with_hasher(FixedHasher) + } + + /// Creates an empty [`HashSet`] with the specified capacity. + /// + /// Refer to [`with_capacity`](hb::HashSet::with_capacity) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// # + /// // Creates a HashSet with capacity for at least 5 entries. + /// let map = HashSet::with_capacity(5); + /// # + /// # let mut map = map; + /// # map.insert("foo"); + /// # assert_eq!(map.get("foo"), Some("foo").as_ref()); + /// ``` + #[inline] + pub fn with_capacity(capacity: usize) -> Self { + Self::with_capacity_and_hasher(capacity, FixedHasher) + } +} + +impl HashSet { + /// Returns the number of elements the set can hold without reallocating. + /// + /// Refer to [`capacity`](hb::HashSet::capacity) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let map = HashSet::with_capacity(5); + /// + /// # let map: HashSet<()> = map; + /// # + /// assert!(map.capacity() >= 5); + /// ``` + #[inline] + pub fn capacity(&self) -> usize { + self.0.capacity() + } + + /// An iterator visiting all elements in arbitrary order. + /// The iterator element type is `&'a T`. + /// + /// Refer to [`iter`](hb::HashSet::iter) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// # + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// map.insert("bar"); + /// map.insert("baz"); + /// + /// for value in map.iter() { + /// // "foo", "bar", "baz" + /// // Note that the above order is not guaranteed + /// } + /// # + /// # assert_eq!(map.iter().count(), 3); + /// ``` + #[inline] + pub fn iter(&self) -> Iter<'_, T> { + self.0.iter() + } + + /// Returns the number of elements in the set. + /// + /// Refer to [`len`](hb::HashSet::len) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// assert_eq!(map.len(), 0); + /// + /// map.insert("foo"); + /// + /// assert_eq!(map.len(), 1); + /// ``` + #[inline] + pub fn len(&self) -> usize { + self.0.len() + } + + /// Returns `true` if the set contains no elements. + /// + /// Refer to [`is_empty`](hb::HashSet::is_empty) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// assert!(map.is_empty()); + /// + /// map.insert("foo"); + /// + /// assert!(!map.is_empty()); + /// ``` + #[inline] + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Clears the set, returning all elements in an iterator. + /// + /// Refer to [`drain`](hb::HashSet::drain) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// # + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// map.insert("bar"); + /// map.insert("baz"); + /// + /// for value in map.drain() { + /// // "foo", "bar", "baz" + /// // Note that the above order is not guaranteed + /// } + /// + /// assert!(map.is_empty()); + /// ``` + #[inline] + pub fn drain(&mut self) -> Drain<'_, T> { + self.0.drain() + } + + /// Retains only the elements specified by the predicate. + /// + /// Refer to [`retain`](hb::HashSet::retain) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// # + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// map.insert("bar"); + /// map.insert("baz"); + /// + /// map.retain(|value| *value == "baz"); + /// + /// assert_eq!(map.len(), 1); + /// ``` + #[inline] + pub fn retain(&mut self, f: F) + where + F: FnMut(&T) -> bool, + { + self.0.retain(f); + } + + /// Drains elements which are true under the given predicate, + /// and returns an iterator over the removed items. + /// + /// Refer to [`extract_if`](hb::HashSet::extract_if) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// # + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// map.insert("bar"); + /// map.insert("baz"); + /// + /// let extracted = map + /// .extract_if(|value| *value == "baz") + /// .collect::>(); + /// + /// assert_eq!(map.len(), 2); + /// assert_eq!(extracted.len(), 1); + /// ``` + #[inline] + pub fn extract_if(&mut self, f: F) -> ExtractIf<'_, T, F> + where + F: FnMut(&T) -> bool, + { + self.0.extract_if(f) + } + + /// Clears the set, removing all values. + /// + /// Refer to [`clear`](hb::HashSet::clear) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// # + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// map.insert("bar"); + /// map.insert("baz"); + /// + /// map.clear(); + /// + /// assert!(map.is_empty()); + /// ``` + #[inline] + pub fn clear(&mut self) { + self.0.clear(); + } + + /// Creates a new empty hash set which will use the given hasher to hash + /// keys. + /// + /// Refer to [`with_hasher`](hb::HashSet::with_hasher) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// # use bevy_platform::hash::FixedHasher as SomeHasher; + /// // Creates a HashSet with the provided hasher. + /// let map = HashSet::with_hasher(SomeHasher); + /// # + /// # let mut map = map; + /// # map.insert("foo"); + /// # assert_eq!(map.get("foo"), Some("foo").as_ref()); + /// ``` + #[inline] + pub const fn with_hasher(hasher: S) -> Self { + Self(hb::HashSet::with_hasher(hasher)) + } + + /// Creates an empty [`HashSet`] with the specified capacity, using + /// `hasher` to hash the keys. + /// + /// Refer to [`with_capacity_and_hasher`](hb::HashSet::with_capacity_and_hasher) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// # use bevy_platform::hash::FixedHasher as SomeHasher; + /// // Creates a HashSet with capacity for 5 entries and the provided hasher. + /// let map = HashSet::with_capacity_and_hasher(5, SomeHasher); + /// # + /// # let mut map = map; + /// # map.insert("foo"); + /// # assert_eq!(map.get("foo"), Some("foo").as_ref()); + /// ``` + #[inline] + pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> Self { + Self(hb::HashSet::with_capacity_and_hasher(capacity, hasher)) + } + + /// Returns a reference to the set's [`BuildHasher`]. + /// + /// Refer to [`hasher`](hb::HashSet::hasher) for further details. + #[inline] + pub fn hasher(&self) -> &S { + self.0.hasher() + } + + /// Takes the inner [`HashSet`](hb::HashSet) out of this wrapper. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let map: HashSet<&'static str> = HashSet::new(); + /// let map: hashbrown::HashSet<&'static str, _> = map.into_inner(); + /// ``` + #[inline] + pub fn into_inner(self) -> hb::HashSet { + self.0 + } +} + +impl HashSet +where + T: Eq + Hash, + S: BuildHasher, +{ + /// Reserves capacity for at least `additional` more elements to be inserted + /// in the [`HashSet`]. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// Refer to [`reserve`](hb::HashSet::reserve) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::with_capacity(5); + /// + /// # let mut map: HashSet<()> = map; + /// # + /// assert!(map.capacity() >= 5); + /// + /// map.reserve(10); + /// + /// assert!(map.capacity() - map.len() >= 10); + /// ``` + #[inline] + pub fn reserve(&mut self, additional: usize) { + self.0.reserve(additional); + } + + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `HashSet`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// Refer to [`try_reserve`](hb::HashSet::try_reserve) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::with_capacity(5); + /// + /// # let mut map: HashSet<()> = map; + /// # + /// assert!(map.capacity() >= 5); + /// + /// map.try_reserve(10).expect("Out of Memory!"); + /// + /// assert!(map.capacity() - map.len() >= 10); + /// ``` + #[inline] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), hashbrown::TryReserveError> { + self.0.try_reserve(additional) + } + + /// Shrinks the capacity of the set as much as possible. It will drop + /// down as much as possible while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// Refer to [`shrink_to_fit`](hb::HashSet::shrink_to_fit) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::with_capacity(5); + /// + /// map.insert("foo"); + /// map.insert("bar"); + /// map.insert("baz"); + /// + /// assert!(map.capacity() >= 5); + /// + /// map.shrink_to_fit(); + /// + /// assert_eq!(map.capacity(), 3); + /// ``` + #[inline] + pub fn shrink_to_fit(&mut self) { + self.0.shrink_to_fit(); + } + + /// Shrinks the capacity of the set with a lower limit. It will drop + /// down no lower than the supplied limit while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// Refer to [`shrink_to`](hb::HashSet::shrink_to) for further details. + #[inline] + pub fn shrink_to(&mut self, min_capacity: usize) { + self.0.shrink_to(min_capacity); + } + + /// Visits the values representing the difference, + /// i.e., the values that are in `self` but not in `other`. + /// + /// Refer to [`difference`](hb::HashSet::difference) for further details. + #[inline] + pub fn difference<'a>(&'a self, other: &'a Self) -> Difference<'a, T, S> { + self.0.difference(other) + } + + /// Visits the values representing the symmetric difference, + /// i.e., the values that are in `self` or in `other` but not in both. + /// + /// Refer to [`symmetric_difference`](hb::HashSet::symmetric_difference) for further details. + #[inline] + pub fn symmetric_difference<'a>(&'a self, other: &'a Self) -> SymmetricDifference<'a, T, S> { + self.0.symmetric_difference(other) + } + + /// Visits the values representing the intersection, + /// i.e., the values that are both in `self` and `other`. + /// + /// Refer to [`intersection`](hb::HashSet::intersection) for further details. + #[inline] + pub fn intersection<'a>(&'a self, other: &'a Self) -> Intersection<'a, T, S> { + self.0.intersection(other) + } + + /// Visits the values representing the union, + /// i.e., all the values in `self` or `other`, without duplicates. + /// + /// Refer to [`union`](hb::HashSet::union) for further details. + #[inline] + pub fn union<'a>(&'a self, other: &'a Self) -> Union<'a, T, S> { + self.0.union(other) + } + + /// Returns `true` if the set contains a value. + /// + /// Refer to [`contains`](hb::HashSet::contains) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// + /// assert!(map.contains("foo")); + /// ``` + #[inline] + pub fn contains(&self, value: &Q) -> bool + where + Q: Hash + Equivalent + ?Sized, + { + self.0.contains(value) + } + + /// Returns a reference to the value in the set, if any, that is equal to the given value. + /// + /// Refer to [`get`](hb::HashSet::get) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// + /// assert_eq!(map.get("foo"), Some(&"foo")); + /// ``` + #[inline] + pub fn get(&self, value: &Q) -> Option<&T> + where + Q: Hash + Equivalent + ?Sized, + { + self.0.get(value) + } + + /// Inserts the given `value` into the set if it is not present, then + /// returns a reference to the value in the set. + /// + /// Refer to [`get_or_insert`](hb::HashSet::get_or_insert) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// assert_eq!(map.get_or_insert("foo"), &"foo"); + /// ``` + #[inline] + pub fn get_or_insert(&mut self, value: T) -> &T { + self.0.get_or_insert(value) + } + + /// Inserts a value computed from `f` into the set if the given `value` is + /// not present, then returns a reference to the value in the set. + /// + /// Refer to [`get_or_insert_with`](hb::HashSet::get_or_insert_with) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// assert_eq!(map.get_or_insert_with(&"foo", |_| "foo"), &"foo"); + /// ``` + #[inline] + pub fn get_or_insert_with(&mut self, value: &Q, f: F) -> &T + where + Q: Hash + Equivalent + ?Sized, + F: FnOnce(&Q) -> T, + { + self.0.get_or_insert_with(value, f) + } + + /// Gets the given value's corresponding entry in the set for in-place manipulation. + /// + /// Refer to [`entry`](hb::HashSet::entry) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// let value = map.entry("foo").or_insert(); + /// # + /// # assert_eq!(value, ()); + /// ``` + #[inline] + pub fn entry(&mut self, value: T) -> Entry<'_, T, S> { + self.0.entry(value) + } + + /// Returns `true` if `self` has no elements in common with `other`. + /// This is equivalent to checking for an empty intersection. + /// + /// Refer to [`is_disjoint`](hb::HashSet::is_disjoint) for further details. + #[inline] + pub fn is_disjoint(&self, other: &Self) -> bool { + self.0.is_disjoint(other) + } + + /// Returns `true` if the set is a subset of another, + /// i.e., `other` contains at least all the values in `self`. + /// + /// Refer to [`is_subset`](hb::HashSet::is_subset) for further details. + #[inline] + pub fn is_subset(&self, other: &Self) -> bool { + self.0.is_subset(other) + } + + /// Returns `true` if the set is a superset of another, + /// i.e., `self` contains at least all the values in `other`. + /// + /// Refer to [`is_superset`](hb::HashSet::is_superset) for further details. + #[inline] + pub fn is_superset(&self, other: &Self) -> bool { + self.0.is_superset(other) + } + + /// Adds a value to the set. + /// + /// Refer to [`insert`](hb::HashSet::insert) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// + /// assert!(map.contains("foo")); + /// ``` + #[inline] + pub fn insert(&mut self, value: T) -> bool { + self.0.insert(value) + } + + /// Adds a value to the set, replacing the existing value, if any, that is equal to the given + /// one. Returns the replaced value. + /// + /// Refer to [`replace`](hb::HashSet::replace) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// + /// assert_eq!(map.replace("foo"), Some("foo")); + /// ``` + #[inline] + pub fn replace(&mut self, value: T) -> Option { + self.0.replace(value) + } + + /// Removes a value from the set. Returns whether the value was + /// present in the set. + /// + /// Refer to [`remove`](hb::HashSet::remove) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// + /// assert!(map.remove("foo")); + /// + /// assert!(map.is_empty()); + /// ``` + #[inline] + pub fn remove(&mut self, value: &Q) -> bool + where + Q: Hash + Equivalent + ?Sized, + { + self.0.remove(value) + } + + /// Removes and returns the value in the set, if any, that is equal to the given one. + /// + /// Refer to [`take`](hb::HashSet::take) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// map.insert("foo"); + /// + /// assert_eq!(map.take("foo"), Some("foo")); + /// + /// assert!(map.is_empty()); + /// ``` + #[inline] + pub fn take(&mut self, value: &Q) -> Option + where + Q: Hash + Equivalent + ?Sized, + { + self.0.take(value) + } + + /// Returns the total amount of memory allocated internally by the hash + /// set, in bytes. + /// + /// Refer to [`allocation_size`](hb::HashSet::allocation_size) for further details. + /// + /// # Examples + /// + /// ```rust + /// # use bevy_platform::collections::HashSet; + /// let mut map = HashSet::new(); + /// + /// assert_eq!(map.allocation_size(), 0); + /// + /// map.insert("foo"); + /// + /// assert!(map.allocation_size() >= size_of::<&'static str>()); + /// ``` + #[inline] + pub fn allocation_size(&self) -> usize { + self.0.allocation_size() + } + + /// Insert a value the set without checking if the value already exists in the set. + /// + /// Refer to [`insert_unique_unchecked`](hb::HashSet::insert_unique_unchecked) for further details. + /// + /// # Safety + /// + /// This operation is safe if a value does not exist in the set. + /// + /// However, if a value exists in the set already, the behavior is unspecified: + /// this operation may panic, loop forever, or any following operation with the set + /// may panic, loop forever or return arbitrary result. + /// + /// That said, this operation (and following operations) are guaranteed to + /// not violate memory safety. + /// + /// However this operation is still unsafe because the resulting `HashSet` + /// may be passed to unsafe code which does expect the set to behave + /// correctly, and would cause unsoundness as a result. + #[expect( + unsafe_code, + reason = "re-exporting unsafe method from Hashbrown requires unsafe code" + )] + #[inline] + pub unsafe fn insert_unique_unchecked(&mut self, value: T) -> &T { + // SAFETY: safety contract is ensured by the caller. + unsafe { self.0.insert_unique_unchecked(value) } + } +} + +impl BitOr<&HashSet> for &HashSet +where + for<'a> &'a hb::HashSet: BitOr<&'a hb::HashSet, Output = hb::HashSet>, +{ + type Output = HashSet; + + /// Returns the union of `self` and `rhs` as a new `HashSet`. + #[inline] + fn bitor(self, rhs: &HashSet) -> HashSet { + HashSet(self.0.bitor(&rhs.0)) + } +} + +impl BitAnd<&HashSet> for &HashSet +where + for<'a> &'a hb::HashSet: BitAnd<&'a hb::HashSet, Output = hb::HashSet>, +{ + type Output = HashSet; + + /// Returns the intersection of `self` and `rhs` as a new `HashSet`. + #[inline] + fn bitand(self, rhs: &HashSet) -> HashSet { + HashSet(self.0.bitand(&rhs.0)) + } +} + +impl BitXor<&HashSet> for &HashSet +where + for<'a> &'a hb::HashSet: BitXor<&'a hb::HashSet, Output = hb::HashSet>, +{ + type Output = HashSet; + + /// Returns the symmetric difference of `self` and `rhs` as a new `HashSet`. + #[inline] + fn bitxor(self, rhs: &HashSet) -> HashSet { + HashSet(self.0.bitxor(&rhs.0)) + } +} + +impl Sub<&HashSet> for &HashSet +where + for<'a> &'a hb::HashSet: Sub<&'a hb::HashSet, Output = hb::HashSet>, +{ + type Output = HashSet; + + /// Returns the difference of `self` and `rhs` as a new `HashSet`. + #[inline] + fn sub(self, rhs: &HashSet) -> HashSet { + HashSet(self.0.sub(&rhs.0)) + } +} + +impl BitOrAssign<&HashSet> for HashSet +where + hb::HashSet: for<'a> BitOrAssign<&'a hb::HashSet>, +{ + /// Modifies this set to contain the union of `self` and `rhs`. + #[inline] + fn bitor_assign(&mut self, rhs: &HashSet) { + self.0.bitor_assign(&rhs.0); + } +} + +impl BitAndAssign<&HashSet> for HashSet +where + hb::HashSet: for<'a> BitAndAssign<&'a hb::HashSet>, +{ + /// Modifies this set to contain the intersection of `self` and `rhs`. + #[inline] + fn bitand_assign(&mut self, rhs: &HashSet) { + self.0.bitand_assign(&rhs.0); + } +} + +impl BitXorAssign<&HashSet> for HashSet +where + hb::HashSet: for<'a> BitXorAssign<&'a hb::HashSet>, +{ + /// Modifies this set to contain the symmetric difference of `self` and `rhs`. + #[inline] + fn bitxor_assign(&mut self, rhs: &HashSet) { + self.0.bitxor_assign(&rhs.0); + } +} + +impl SubAssign<&HashSet> for HashSet +where + hb::HashSet: for<'a> SubAssign<&'a hb::HashSet>, +{ + /// Modifies this set to contain the difference of `self` and `rhs`. + #[inline] + fn sub_assign(&mut self, rhs: &HashSet) { + self.0.sub_assign(&rhs.0); + } +} diff --git a/crates/bevy_platform/src/collections/hash_table.rs b/crates/bevy_platform/src/collections/hash_table.rs new file mode 100644 index 0000000000000..5d6a2656796db --- /dev/null +++ b/crates/bevy_platform/src/collections/hash_table.rs @@ -0,0 +1,6 @@ +//! Provides [`HashTable`] + +pub use hashbrown::hash_table::{ + AbsentEntry, Drain, Entry, ExtractIf, HashTable, IntoIter, Iter, IterHash, IterHashMut, + IterMut, OccupiedEntry, VacantEntry, +}; diff --git a/crates/bevy_platform/src/collections/mod.rs b/crates/bevy_platform/src/collections/mod.rs new file mode 100644 index 0000000000000..3622165b656cf --- /dev/null +++ b/crates/bevy_platform/src/collections/mod.rs @@ -0,0 +1,12 @@ +//! Provides [`HashMap`] and [`HashSet`] from [`hashbrown`] with some customized defaults. +//! +//! Also provides the [`HashTable`] type, which is specific to [`hashbrown`]. + +pub use hash_map::HashMap; +pub use hash_set::HashSet; +pub use hash_table::HashTable; +pub use hashbrown::Equivalent; + +pub mod hash_map; +pub mod hash_set; +pub mod hash_table; diff --git a/crates/bevy_platform/src/hash.rs b/crates/bevy_platform/src/hash.rs new file mode 100644 index 0000000000000..3b1a836ecf83d --- /dev/null +++ b/crates/bevy_platform/src/hash.rs @@ -0,0 +1,180 @@ +//! Provides replacements for `std::hash` items using [`foldhash`]. +//! +//! Also provides some additional items beyond the standard library. + +use core::{ + fmt::Debug, + hash::{BuildHasher, Hash, Hasher}, + marker::PhantomData, + ops::Deref, +}; + +pub use foldhash::fast::{FixedState, FoldHasher as DefaultHasher, RandomState}; + +/// For when you want a deterministic hasher. +/// +/// Seed was randomly generated with a fair dice roll. Guaranteed to be random: +/// +const FIXED_HASHER: FixedState = + FixedState::with_seed(0b1001010111101110000001001100010000000011001001101011001001111000); + +/// Deterministic hasher based upon a random but fixed state. +#[derive(Copy, Clone, Default, Debug)] +pub struct FixedHasher; +impl BuildHasher for FixedHasher { + type Hasher = DefaultHasher; + + #[inline] + fn build_hasher(&self) -> Self::Hasher { + FIXED_HASHER.build_hasher() + } +} + +/// A pre-hashed value of a specific type. Pre-hashing enables memoization of hashes that are expensive to compute. +/// +/// It also enables faster [`PartialEq`] comparisons by short circuiting on hash equality. +/// See [`PassHash`] and [`PassHasher`] for a "pass through" [`BuildHasher`] and [`Hasher`] implementation +/// designed to work with [`Hashed`] +/// See `PreHashMap` for a hashmap pre-configured to use [`Hashed`] keys. +pub struct Hashed { + hash: u64, + value: V, + marker: PhantomData, +} + +impl Hashed { + /// Pre-hashes the given value using the [`BuildHasher`] configured in the [`Hashed`] type. + pub fn new(value: V) -> Self { + Self { + hash: H::default().hash_one(&value), + value, + marker: PhantomData, + } + } + + /// The pre-computed hash. + #[inline] + pub fn hash(&self) -> u64 { + self.hash + } +} + +impl Hash for Hashed { + #[inline] + fn hash(&self, state: &mut R) { + state.write_u64(self.hash); + } +} + +impl Deref for Hashed { + type Target = V; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.value + } +} + +impl PartialEq for Hashed { + /// A fast impl of [`PartialEq`] that first checks that `other`'s pre-computed hash + /// matches this value's pre-computed hash. + #[inline] + fn eq(&self, other: &Self) -> bool { + self.hash == other.hash && self.value.eq(&other.value) + } +} + +impl Debug for Hashed { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("Hashed") + .field("hash", &self.hash) + .field("value", &self.value) + .finish() + } +} + +impl Clone for Hashed { + #[inline] + fn clone(&self) -> Self { + Self { + hash: self.hash, + value: self.value.clone(), + marker: PhantomData, + } + } +} + +impl Copy for Hashed {} + +impl Eq for Hashed {} + +/// A [`BuildHasher`] that results in a [`PassHasher`]. +#[derive(Default, Clone)] +pub struct PassHash; + +impl BuildHasher for PassHash { + type Hasher = PassHasher; + + fn build_hasher(&self) -> Self::Hasher { + PassHasher::default() + } +} + +/// A no-op hash that only works on `u64`s. Will panic if attempting to +/// hash a type containing non-u64 fields. +#[derive(Debug, Default)] +pub struct PassHasher { + hash: u64, +} + +impl Hasher for PassHasher { + #[inline] + fn finish(&self) -> u64 { + self.hash + } + + fn write(&mut self, _bytes: &[u8]) { + panic!("can only hash u64 using PassHasher"); + } + + #[inline] + fn write_u64(&mut self, i: u64) { + self.hash = i; + } +} + +/// [`BuildHasher`] for types that already contain a high-quality hash. +#[derive(Clone, Default)] +pub struct NoOpHash; + +impl BuildHasher for NoOpHash { + type Hasher = NoOpHasher; + + fn build_hasher(&self) -> Self::Hasher { + NoOpHasher(0) + } +} + +#[doc(hidden)] +pub struct NoOpHasher(u64); + +// This is for types that already contain a high-quality hash and want to skip +// re-hashing that hash. +impl Hasher for NoOpHasher { + fn finish(&self) -> u64 { + self.0 + } + + fn write(&mut self, bytes: &[u8]) { + // This should never be called by consumers. Prefer to call `write_u64` instead. + // Don't break applications (slower fallback, just check in test): + self.0 = bytes.iter().fold(self.0, |hash, b| { + hash.rotate_left(8).wrapping_add(*b as u64) + }); + } + + #[inline] + fn write_u64(&mut self, i: u64) { + self.0 = i; + } +} diff --git a/crates/bevy_platform/src/lib.rs b/crates/bevy_platform/src/lib.rs new file mode 100644 index 0000000000000..96f2f9a21cf9c --- /dev/null +++ b/crates/bevy_platform/src/lib.rs @@ -0,0 +1,49 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc( + html_logo_url = "https://bevyengine.org/assets/icon.png", + html_favicon_url = "https://bevyengine.org/assets/icon.png" +)] +#![no_std] + +//! Platform compatibility support for first-party [Bevy] engine crates. +//! +//! [Bevy]: https://bevyengine.org/ + +#[cfg(feature = "std")] +extern crate std; + +#[cfg(feature = "alloc")] +extern crate alloc; + +pub mod hash; +pub mod sync; +pub mod thread; +pub mod time; + +#[cfg(feature = "alloc")] +pub mod collections; + +/// Frequently used items which would typically be included in most contexts. +/// +/// When adding `no_std` support to a crate for the first time, often there's a substantial refactor +/// required due to the change in implicit prelude from `std::prelude` to `core::prelude`. +/// This unfortunately leaves out many items from `alloc`, even if the crate unconditionally +/// includes that crate. +/// +/// This prelude aims to ease the transition by re-exporting items from `alloc` which would +/// otherwise be included in the `std` implicit prelude. +pub mod prelude { + #[cfg(feature = "alloc")] + pub use alloc::{ + borrow::ToOwned, boxed::Box, format, string::String, string::ToString, vec, vec::Vec, + }; + + // Items from `std::prelude` that are missing in this module: + // * dbg + // * eprint + // * eprintln + // * is_x86_feature_detected + // * print + // * println + // * thread_local +} diff --git a/crates/bevy_platform/src/sync/atomic.rs b/crates/bevy_platform/src/sync/atomic.rs new file mode 100644 index 0000000000000..65211482a6cfc --- /dev/null +++ b/crates/bevy_platform/src/sync/atomic.rs @@ -0,0 +1,43 @@ +//! Provides various atomic alternatives to language primitives. +//! +//! Certain platforms lack complete atomic support, requiring the use of a fallback +//! such as `portable-atomic`. +//! Using these types will ensure the correct atomic provider is used without the need for +//! feature gates in your own code. + +pub use atomic_16::{AtomicI16, AtomicU16}; +pub use atomic_32::{AtomicI32, AtomicU32}; +pub use atomic_64::{AtomicI64, AtomicU64}; +pub use atomic_8::{AtomicBool, AtomicI8, AtomicU8}; +pub use atomic_ptr::{AtomicIsize, AtomicPtr, AtomicUsize}; +pub use core::sync::atomic::Ordering; + +#[cfg(target_has_atomic = "8")] +use core::sync::atomic as atomic_8; + +#[cfg(not(target_has_atomic = "8"))] +use portable_atomic as atomic_8; + +#[cfg(target_has_atomic = "16")] +use core::sync::atomic as atomic_16; + +#[cfg(not(target_has_atomic = "16"))] +use portable_atomic as atomic_16; + +#[cfg(target_has_atomic = "32")] +use core::sync::atomic as atomic_32; + +#[cfg(not(target_has_atomic = "32"))] +use portable_atomic as atomic_32; + +#[cfg(target_has_atomic = "64")] +use core::sync::atomic as atomic_64; + +#[cfg(not(target_has_atomic = "64"))] +use portable_atomic as atomic_64; + +#[cfg(target_has_atomic = "ptr")] +use core::sync::atomic as atomic_ptr; + +#[cfg(not(target_has_atomic = "ptr"))] +use portable_atomic as atomic_ptr; diff --git a/crates/bevy_platform/src/sync/barrier.rs b/crates/bevy_platform/src/sync/barrier.rs new file mode 100644 index 0000000000000..2968a78b018f9 --- /dev/null +++ b/crates/bevy_platform/src/sync/barrier.rs @@ -0,0 +1,66 @@ +//! Provides `Barrier` and `BarrierWaitResult` + +pub use implementation::{Barrier, BarrierWaitResult}; + +#[cfg(feature = "std")] +use std::sync as implementation; + +#[cfg(not(feature = "std"))] +mod implementation { + use core::fmt; + + /// Fallback implementation of `Barrier` from the standard library. + pub struct Barrier { + inner: spin::Barrier, + } + + impl Barrier { + /// Creates a new barrier that can block a given number of threads. + /// + /// See the standard library for further details. + #[must_use] + pub const fn new(n: usize) -> Self { + Self { + inner: spin::Barrier::new(n), + } + } + + /// Blocks the current thread until all threads have rendezvoused here. + /// + /// See the standard library for further details. + pub fn wait(&self) -> BarrierWaitResult { + BarrierWaitResult { + inner: self.inner.wait(), + } + } + } + + impl fmt::Debug for Barrier { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Barrier").finish_non_exhaustive() + } + } + + /// Fallback implementation of `BarrierWaitResult` from the standard library. + pub struct BarrierWaitResult { + inner: spin::barrier::BarrierWaitResult, + } + + impl BarrierWaitResult { + /// Returns `true` if this thread is the "leader thread" for the call to [`Barrier::wait()`]. + /// + /// See the standard library for further details. + #[must_use] + pub fn is_leader(&self) -> bool { + self.inner.is_leader() + } + } + + impl fmt::Debug for BarrierWaitResult { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("BarrierWaitResult") + .field("is_leader", &self.is_leader()) + .finish() + } + } +} diff --git a/crates/bevy_platform/src/sync/lazy_lock.rs b/crates/bevy_platform/src/sync/lazy_lock.rs new file mode 100644 index 0000000000000..c756daeb94bcf --- /dev/null +++ b/crates/bevy_platform/src/sync/lazy_lock.rs @@ -0,0 +1,11 @@ +//! Provides `LazyLock` + +pub use implementation::LazyLock; + +#[cfg(feature = "std")] +use std::sync as implementation; + +#[cfg(not(feature = "std"))] +mod implementation { + pub use spin::Lazy as LazyLock; +} diff --git a/crates/bevy_platform/src/sync/mod.rs b/crates/bevy_platform/src/sync/mod.rs new file mode 100644 index 0000000000000..8fb7a2fbffaae --- /dev/null +++ b/crates/bevy_platform/src/sync/mod.rs @@ -0,0 +1,33 @@ +//! Provides various synchronization alternatives to language primitives. +//! +//! Currently missing from this module are the following items: +//! * `Condvar` +//! * `WaitTimeoutResult` +//! * `mpsc` +//! +//! Otherwise, this is a drop-in replacement for `std::sync`. + +pub use barrier::{Barrier, BarrierWaitResult}; +pub use lazy_lock::LazyLock; +pub use mutex::{Mutex, MutexGuard}; +pub use once::{Once, OnceLock, OnceState}; +pub use poison::{LockResult, PoisonError, TryLockError, TryLockResult}; +pub use rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; + +#[cfg(feature = "alloc")] +pub use arc::{Arc, Weak}; + +pub mod atomic; + +mod barrier; +mod lazy_lock; +mod mutex; +mod once; +mod poison; +mod rwlock; + +#[cfg(all(feature = "alloc", not(target_has_atomic = "ptr")))] +use portable_atomic_util as arc; + +#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] +use alloc::sync as arc; diff --git a/crates/bevy_platform/src/sync/mutex.rs b/crates/bevy_platform/src/sync/mutex.rs new file mode 100644 index 0000000000000..7ff363f5741d7 --- /dev/null +++ b/crates/bevy_platform/src/sync/mutex.rs @@ -0,0 +1,108 @@ +//! Provides `Mutex` and `MutexGuard` + +pub use implementation::{Mutex, MutexGuard}; + +#[cfg(feature = "std")] +use std::sync as implementation; + +#[cfg(not(feature = "std"))] +mod implementation { + use crate::sync::{LockResult, TryLockError, TryLockResult}; + use core::fmt; + + pub use spin::MutexGuard; + + /// Fallback implementation of `Mutex` from the standard library. + pub struct Mutex { + inner: spin::Mutex, + } + + impl Mutex { + /// Creates a new mutex in an unlocked state ready for use. + /// + /// See the standard library for further details. + pub const fn new(t: T) -> Self { + Self { + inner: spin::Mutex::new(t), + } + } + } + + impl Mutex { + /// Acquires a mutex, blocking the current thread until it is able to do so. + /// + /// See the standard library for further details. + pub fn lock(&self) -> LockResult> { + Ok(self.inner.lock()) + } + + /// Attempts to acquire this lock. + /// + /// See the standard library for further details. + pub fn try_lock(&self) -> TryLockResult> { + self.inner.try_lock().ok_or(TryLockError::WouldBlock) + } + + /// Determines whether the mutex is poisoned. + /// + /// See the standard library for further details. + pub fn is_poisoned(&self) -> bool { + false + } + + /// Clear the poisoned state from a mutex. + /// + /// See the standard library for further details. + pub fn clear_poison(&self) { + // no-op + } + + /// Consumes this mutex, returning the underlying data. + /// + /// See the standard library for further details. + pub fn into_inner(self) -> LockResult + where + T: Sized, + { + Ok(self.inner.into_inner()) + } + + /// Returns a mutable reference to the underlying data. + /// + /// See the standard library for further details. + pub fn get_mut(&mut self) -> LockResult<&mut T> { + Ok(self.inner.get_mut()) + } + } + + impl From for Mutex { + fn from(t: T) -> Self { + Mutex::new(t) + } + } + + impl Default for Mutex { + fn default() -> Mutex { + Mutex::new(Default::default()) + } + } + + impl fmt::Debug for Mutex { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut d = f.debug_struct("Mutex"); + match self.try_lock() { + Ok(guard) => { + d.field("data", &&*guard); + } + Err(TryLockError::Poisoned(err)) => { + d.field("data", &&**err.get_ref()); + } + Err(TryLockError::WouldBlock) => { + d.field("data", &format_args!("")); + } + } + d.field("poisoned", &false); + d.finish_non_exhaustive() + } + } +} diff --git a/crates/bevy_platform/src/sync/once.rs b/crates/bevy_platform/src/sync/once.rs new file mode 100644 index 0000000000000..f4ac34b905c0a --- /dev/null +++ b/crates/bevy_platform/src/sync/once.rs @@ -0,0 +1,217 @@ +//! Provides `Once`, `OnceState`, `OnceLock` + +pub use implementation::{Once, OnceLock, OnceState}; + +#[cfg(feature = "std")] +use std::sync as implementation; + +#[cfg(not(feature = "std"))] +mod implementation { + use core::{ + fmt, + panic::{RefUnwindSafe, UnwindSafe}, + }; + + /// Fallback implementation of `OnceLock` from the standard library. + pub struct OnceLock { + inner: spin::Once, + } + + impl OnceLock { + /// Creates a new empty cell. + /// + /// See the standard library for further details. + #[must_use] + pub const fn new() -> Self { + Self { + inner: spin::Once::new(), + } + } + + /// Gets the reference to the underlying value. + /// + /// See the standard library for further details. + pub fn get(&self) -> Option<&T> { + self.inner.get() + } + + /// Gets the mutable reference to the underlying value. + /// + /// See the standard library for further details. + pub fn get_mut(&mut self) -> Option<&mut T> { + self.inner.get_mut() + } + + /// Sets the contents of this cell to `value`. + /// + /// See the standard library for further details. + pub fn set(&self, value: T) -> Result<(), T> { + let mut value = Some(value); + + self.inner.call_once(|| value.take().unwrap()); + + match value { + Some(value) => Err(value), + None => Ok(()), + } + } + + /// Gets the contents of the cell, initializing it with `f` if the cell + /// was empty. + /// + /// See the standard library for further details. + pub fn get_or_init(&self, f: F) -> &T + where + F: FnOnce() -> T, + { + self.inner.call_once(f) + } + + /// Consumes the `OnceLock`, returning the wrapped value. Returns + /// `None` if the cell was empty. + /// + /// See the standard library for further details. + pub fn into_inner(mut self) -> Option { + self.take() + } + + /// Takes the value out of this `OnceLock`, moving it back to an uninitialized state. + /// + /// See the standard library for further details. + pub fn take(&mut self) -> Option { + if self.inner.is_completed() { + let mut inner = spin::Once::new(); + + core::mem::swap(&mut self.inner, &mut inner); + + inner.try_into_inner() + } else { + None + } + } + } + + impl RefUnwindSafe for OnceLock {} + impl UnwindSafe for OnceLock {} + + impl Default for OnceLock { + fn default() -> OnceLock { + OnceLock::new() + } + } + + impl fmt::Debug for OnceLock { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut d = f.debug_tuple("OnceLock"); + match self.get() { + Some(v) => d.field(v), + None => d.field(&format_args!("")), + }; + d.finish() + } + } + + impl Clone for OnceLock { + fn clone(&self) -> OnceLock { + let cell = Self::new(); + if let Some(value) = self.get() { + cell.set(value.clone()).ok().unwrap(); + } + cell + } + } + + impl From for OnceLock { + fn from(value: T) -> Self { + let cell = Self::new(); + cell.set(value).map(move |_| cell).ok().unwrap() + } + } + + impl PartialEq for OnceLock { + fn eq(&self, other: &OnceLock) -> bool { + self.get() == other.get() + } + } + + impl Eq for OnceLock {} + + /// Fallback implementation of `Once` from the standard library. + pub struct Once { + inner: OnceLock<()>, + } + + impl Once { + /// Creates a new `Once` value. + /// + /// See the standard library for further details. + #[expect(clippy::new_without_default, reason = "matching std::sync::Once")] + pub const fn new() -> Self { + Self { + inner: OnceLock::new(), + } + } + + /// Performs an initialization routine once and only once. The given closure + /// will be executed if this is the first time `call_once` has been called, + /// and otherwise the routine will *not* be invoked. + /// + /// See the standard library for further details. + pub fn call_once(&self, f: F) { + self.inner.get_or_init(f); + } + + /// Performs the same function as [`call_once()`] except ignores poisoning. + /// + /// See the standard library for further details. + pub fn call_once_force(&self, f: F) { + const STATE: OnceState = OnceState { _private: () }; + + self.call_once(move || f(&STATE)); + } + + /// Returns `true` if some [`call_once()`] call has completed + /// successfully. Specifically, `is_completed` will return false in + /// the following situations: + /// * [`call_once()`] was not called at all, + /// * [`call_once()`] was called, but has not yet completed, + /// * the [`Once`] instance is poisoned + /// + /// See the standard library for further details. + pub fn is_completed(&self) -> bool { + self.inner.get().is_some() + } + } + + impl RefUnwindSafe for Once {} + impl UnwindSafe for Once {} + + impl fmt::Debug for Once { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Once").finish_non_exhaustive() + } + } + + /// Fallback implementation of `OnceState` from the standard library. + pub struct OnceState { + _private: (), + } + + impl OnceState { + /// Returns `true` if the associated [`Once`] was poisoned prior to the + /// invocation of the closure passed to [`Once::call_once_force()`]. + /// + /// See the standard library for further details. + pub fn is_poisoned(&self) -> bool { + false + } + } + + impl fmt::Debug for OnceState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OnceState") + .field("poisoned", &self.is_poisoned()) + .finish() + } + } +} diff --git a/crates/bevy_platform/src/sync/poison.rs b/crates/bevy_platform/src/sync/poison.rs new file mode 100644 index 0000000000000..79eafc42505dd --- /dev/null +++ b/crates/bevy_platform/src/sync/poison.rs @@ -0,0 +1,107 @@ +//! Provides `LockResult`, `PoisonError`, `TryLockError`, `TryLockResult` + +pub use implementation::{LockResult, PoisonError, TryLockError, TryLockResult}; + +#[cfg(feature = "std")] +use std::sync as implementation; + +#[cfg(not(feature = "std"))] +mod implementation { + use core::{error::Error, fmt}; + + /// Fallback implementation of `PoisonError` from the standard library. + pub struct PoisonError { + guard: T, + } + + impl fmt::Debug for PoisonError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("PoisonError").finish_non_exhaustive() + } + } + + impl fmt::Display for PoisonError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + "poisoned lock: another task failed inside".fmt(f) + } + } + + impl Error for PoisonError {} + + impl PoisonError { + /// Creates a `PoisonError`. + /// + /// See the standard library for further details. + #[cfg(panic = "unwind")] + pub fn new(guard: T) -> PoisonError { + PoisonError { guard } + } + + /// Consumes this error indicating that a lock is poisoned, returning the + /// underlying guard to allow access regardless. + /// + /// See the standard library for further details. + pub fn into_inner(self) -> T { + self.guard + } + + /// Reaches into this error indicating that a lock is poisoned, returning a + /// reference to the underlying guard to allow access regardless. + /// + /// See the standard library for further details. + pub fn get_ref(&self) -> &T { + &self.guard + } + + /// Reaches into this error indicating that a lock is poisoned, returning a + /// mutable reference to the underlying guard to allow access regardless. + /// + /// See the standard library for further details. + pub fn get_mut(&mut self) -> &mut T { + &mut self.guard + } + } + + /// Fallback implementation of `TryLockError` from the standard library. + pub enum TryLockError { + /// The lock could not be acquired because another thread failed while holding + /// the lock. + Poisoned(PoisonError), + /// The lock could not be acquired at this time because the operation would + /// otherwise block. + WouldBlock, + } + + impl From> for TryLockError { + fn from(err: PoisonError) -> TryLockError { + TryLockError::Poisoned(err) + } + } + + impl fmt::Debug for TryLockError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + TryLockError::Poisoned(..) => "Poisoned(..)".fmt(f), + TryLockError::WouldBlock => "WouldBlock".fmt(f), + } + } + } + + impl fmt::Display for TryLockError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + TryLockError::Poisoned(..) => "poisoned lock: another task failed inside", + TryLockError::WouldBlock => "try_lock failed because the operation would block", + } + .fmt(f) + } + } + + impl Error for TryLockError {} + + /// Fallback implementation of `LockResult` from the standard library. + pub type LockResult = Result>; + + /// Fallback implementation of `TryLockResult` from the standard library. + pub type TryLockResult = Result>; +} diff --git a/crates/bevy_platform/src/sync/rwlock.rs b/crates/bevy_platform/src/sync/rwlock.rs new file mode 100644 index 0000000000000..f1f529baafe09 --- /dev/null +++ b/crates/bevy_platform/src/sync/rwlock.rs @@ -0,0 +1,124 @@ +//! Provides `RwLock`, `RwLockReadGuard`, `RwLockWriteGuard` + +pub use implementation::{RwLock, RwLockReadGuard, RwLockWriteGuard}; + +#[cfg(feature = "std")] +use std::sync as implementation; + +#[cfg(not(feature = "std"))] +mod implementation { + use crate::sync::{LockResult, TryLockError, TryLockResult}; + use core::fmt; + + pub use spin::rwlock::{RwLockReadGuard, RwLockWriteGuard}; + + /// Fallback implementation of `RwLock` from the standard library. + pub struct RwLock { + inner: spin::RwLock, + } + + impl RwLock { + /// Creates a new instance of an `RwLock` which is unlocked. + /// + /// See the standard library for further details. + pub const fn new(t: T) -> RwLock { + Self { + inner: spin::RwLock::new(t), + } + } + } + + impl RwLock { + /// Locks this `RwLock` with shared read access, blocking the current thread + /// until it can be acquired. + /// + /// See the standard library for further details. + pub fn read(&self) -> LockResult> { + Ok(self.inner.read()) + } + + /// Attempts to acquire this `RwLock` with shared read access. + /// + /// See the standard library for further details. + pub fn try_read(&self) -> TryLockResult> { + self.inner.try_read().ok_or(TryLockError::WouldBlock) + } + + /// Locks this `RwLock` with exclusive write access, blocking the current + /// thread until it can be acquired. + /// + /// See the standard library for further details. + pub fn write(&self) -> LockResult> { + Ok(self.inner.write()) + } + + /// Attempts to lock this `RwLock` with exclusive write access. + /// + /// See the standard library for further details. + pub fn try_write(&self) -> TryLockResult> { + self.inner.try_write().ok_or(TryLockError::WouldBlock) + } + + /// Determines whether the lock is poisoned. + /// + /// See the standard library for further details. + pub fn is_poisoned(&self) -> bool { + false + } + + /// Clear the poisoned state from a lock. + /// + /// See the standard library for further details. + pub fn clear_poison(&self) { + // no-op + } + + /// Consumes this `RwLock`, returning the underlying data. + /// + /// See the standard library for further details. + pub fn into_inner(self) -> LockResult + where + T: Sized, + { + Ok(self.inner.into_inner()) + } + + /// Returns a mutable reference to the underlying data. + /// + /// See the standard library for further details. + pub fn get_mut(&mut self) -> LockResult<&mut T> { + Ok(self.inner.get_mut()) + } + } + + impl fmt::Debug for RwLock { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut d = f.debug_struct("RwLock"); + match self.try_read() { + Ok(guard) => { + d.field("data", &&*guard); + } + Err(TryLockError::Poisoned(err)) => { + d.field("data", &&**err.get_ref()); + } + Err(TryLockError::WouldBlock) => { + d.field("data", &format_args!("")); + } + } + d.field("poisoned", &false); + d.finish_non_exhaustive() + } + } + + impl Default for RwLock { + fn default() -> RwLock { + RwLock::new(Default::default()) + } + } + + impl From for RwLock { + fn from(t: T) -> Self { + RwLock::new(t) + } + } +} diff --git a/crates/bevy_platform/src/thread.rs b/crates/bevy_platform/src/thread.rs new file mode 100644 index 0000000000000..e1d593c90b4d3 --- /dev/null +++ b/crates/bevy_platform/src/thread.rs @@ -0,0 +1,29 @@ +//! Provides `sleep` for all platforms. + +pub use thread::sleep; + +cfg_if::cfg_if! { + // TODO: use browser timeouts based on ScheduleRunnerPlugin::build + if #[cfg(feature = "std")] { + use std::thread; + } else { + mod fallback { + use core::{hint::spin_loop, time::Duration}; + + use crate::time::Instant; + + /// Puts the current thread to sleep for at least the specified amount of time. + /// + /// As this is a `no_std` fallback implementation, this will spin the current thread. + pub fn sleep(dur: Duration) { + let start = Instant::now(); + + while start.elapsed() < dur { + spin_loop() + } + } + } + + use fallback as thread; + } +} diff --git a/crates/bevy_platform/src/time/fallback.rs b/crates/bevy_platform/src/time/fallback.rs new file mode 100644 index 0000000000000..c438e6e3795a4 --- /dev/null +++ b/crates/bevy_platform/src/time/fallback.rs @@ -0,0 +1,177 @@ +//! Provides a fallback implementation of `Instant` from the standard library. + +#![expect( + unsafe_code, + reason = "Instant fallback requires unsafe to allow users to update the internal value" +)] + +use crate::sync::atomic::{AtomicPtr, Ordering}; + +use core::{ + fmt, + ops::{Add, AddAssign, Sub, SubAssign}, + time::Duration, +}; + +static ELAPSED_GETTER: AtomicPtr<()> = AtomicPtr::new(unset_getter as *mut _); + +/// Fallback implementation of `Instant` suitable for a `no_std` environment. +/// +/// If you are on any of the following target architectures, this is a drop-in replacement: +/// +/// - `x86` +/// - `x86_64` +/// - `aarch64` +/// +/// On any other architecture, you must call [`Instant::set_elapsed`], providing a method +/// which when called supplies a monotonically increasing count of elapsed nanoseconds relative +/// to some arbitrary point in time. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Instant(Duration); + +impl Instant { + /// Returns an instant corresponding to "now". + #[must_use] + pub fn now() -> Instant { + let getter = ELAPSED_GETTER.load(Ordering::Acquire); + + // SAFETY: Function pointer is always valid + let getter = unsafe { core::mem::transmute::<*mut (), fn() -> Duration>(getter) }; + + Self((getter)()) + } + + /// Provides a function returning the amount of time that has elapsed since execution began. + /// The getter provided to this method will be used by [`now`](Instant::now). + /// + /// # Safety + /// + /// - The function provided must accurately represent the elapsed time. + /// - The function must preserve all invariants of the [`Instant`] type. + /// - The pointer to the function must be valid whenever [`Instant::now`] is called. + pub unsafe fn set_elapsed(getter: fn() -> Duration) { + ELAPSED_GETTER.store(getter as *mut _, Ordering::Release); + } + + /// Returns the amount of time elapsed from another instant to this one, + /// or zero duration if that instant is later than this one. + #[must_use] + pub fn duration_since(&self, earlier: Instant) -> Duration { + self.saturating_duration_since(earlier) + } + + /// Returns the amount of time elapsed from another instant to this one, + /// or None if that instant is later than this one. + /// + /// Due to monotonicity bugs, even under correct logical ordering of the passed `Instant`s, + /// this method can return `None`. + #[must_use] + pub fn checked_duration_since(&self, earlier: Instant) -> Option { + self.0.checked_sub(earlier.0) + } + + /// Returns the amount of time elapsed from another instant to this one, + /// or zero duration if that instant is later than this one. + #[must_use] + pub fn saturating_duration_since(&self, earlier: Instant) -> Duration { + self.0.saturating_sub(earlier.0) + } + + /// Returns the amount of time elapsed since this instant. + #[must_use] + pub fn elapsed(&self) -> Duration { + Instant::now().saturating_duration_since(*self) + } + + /// Returns `Some(t)` where `t` is the time `self + duration` if `t` can be represented as + /// `Instant` (which means it's inside the bounds of the underlying data structure), `None` + /// otherwise. + pub fn checked_add(&self, duration: Duration) -> Option { + self.0.checked_add(duration).map(Instant) + } + + /// Returns `Some(t)` where `t` is the time `self - duration` if `t` can be represented as + /// `Instant` (which means it's inside the bounds of the underlying data structure), `None` + /// otherwise. + pub fn checked_sub(&self, duration: Duration) -> Option { + self.0.checked_sub(duration).map(Instant) + } +} + +impl Add for Instant { + type Output = Instant; + + /// # Panics + /// + /// This function may panic if the resulting point in time cannot be represented by the + /// underlying data structure. See [`Instant::checked_add`] for a version without panic. + fn add(self, other: Duration) -> Instant { + self.checked_add(other) + .expect("overflow when adding duration to instant") + } +} + +impl AddAssign for Instant { + fn add_assign(&mut self, other: Duration) { + *self = *self + other; + } +} + +impl Sub for Instant { + type Output = Instant; + + fn sub(self, other: Duration) -> Instant { + self.checked_sub(other) + .expect("overflow when subtracting duration from instant") + } +} + +impl SubAssign for Instant { + fn sub_assign(&mut self, other: Duration) { + *self = *self - other; + } +} + +impl Sub for Instant { + type Output = Duration; + + /// Returns the amount of time elapsed from another instant to this one, + /// or zero duration if that instant is later than this one. + fn sub(self, other: Instant) -> Duration { + self.duration_since(other) + } +} + +impl fmt::Debug for Instant { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +fn unset_getter() -> Duration { + cfg_if::cfg_if! { + if #[cfg(target_arch = "x86")] { + // SAFETY: standard technique for getting a nanosecond counter on x86 + let nanos = unsafe { + core::arch::x86::_rdtsc() + }; + Duration::from_nanos(nanos) + } else if #[cfg(target_arch = "x86_64")] { + // SAFETY: standard technique for getting a nanosecond counter on x86_64 + let nanos = unsafe { + core::arch::x86_64::_rdtsc() + }; + Duration::from_nanos(nanos) + } else if #[cfg(target_arch = "aarch64")] { + // SAFETY: standard technique for getting a nanosecond counter of aarch64 + let nanos = unsafe { + let mut ticks: u64; + core::arch::asm!("mrs {}, cntvct_el0", out(reg) ticks); + ticks + }; + Duration::from_nanos(nanos) + } else { + panic!("An elapsed time getter has not been provided to `Instant`. Please use `Instant::set_elapsed(...)` before calling `Instant::now()`") + } + } +} diff --git a/crates/bevy_platform/src/time/mod.rs b/crates/bevy_platform/src/time/mod.rs new file mode 100644 index 0000000000000..260d8e4aea124 --- /dev/null +++ b/crates/bevy_platform/src/time/mod.rs @@ -0,0 +1,15 @@ +//! Provides `Instant` for all platforms. + +pub use time::Instant; + +cfg_if::cfg_if! { + if #[cfg(all(target_arch = "wasm32", feature = "web"))] { + use web_time as time; + } else if #[cfg(feature = "std")] { + use std::time; + } else { + mod fallback; + + use fallback as time; + } +} diff --git a/crates/bevy_ptr/Cargo.toml b/crates/bevy_ptr/Cargo.toml index 0aea96ebd023b..0f56880bd41da 100644 --- a/crates/bevy_ptr/Cargo.toml +++ b/crates/bevy_ptr/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "bevy_ptr" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Utilities for working with untyped pointers in a more safe way" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" license = "MIT OR Apache-2.0" keywords = ["bevy", "no_std"] -rust-version = "1.81.0" +rust-version = "1.85.0" [dependencies] diff --git a/crates/bevy_ptr/LICENSE-APACHE b/crates/bevy_ptr/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_ptr/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_ptr/LICENSE-MIT b/crates/bevy_ptr/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_ptr/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_ptr/src/lib.rs b/crates/bevy_ptr/src/lib.rs index d767a25d4c63d..1580f3f926eba 100644 --- a/crates/bevy_ptr/src/lib.rs +++ b/crates/bevy_ptr/src/lib.rs @@ -9,7 +9,7 @@ use core::{ cell::UnsafeCell, - fmt::{self, Formatter, Pointer}, + fmt::{self, Debug, Formatter, Pointer}, marker::PhantomData, mem::ManuallyDrop, num::NonZeroUsize, @@ -17,11 +17,11 @@ use core::{ }; /// Used as a type argument to [`Ptr`], [`PtrMut`] and [`OwningPtr`] to specify that the pointer is aligned. -#[derive(Copy, Clone)] +#[derive(Debug, Copy, Clone)] pub struct Aligned; /// Used as a type argument to [`Ptr`], [`PtrMut`] and [`OwningPtr`] to specify that the pointer is not aligned. -#[derive(Copy, Clone)] +#[derive(Debug, Copy, Clone)] pub struct Unaligned; /// Trait that is only implemented for [`Aligned`] and [`Unaligned`] to work around the lack of ability @@ -159,7 +159,7 @@ impl<'a, T: ?Sized> From<&'a mut T> for ConstNonNull { /// /// It may be helpful to think of this type as similar to `&'a dyn Any` but without /// the metadata and able to point to data that does not correspond to a Rust type. -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone)] #[repr(transparent)] pub struct Ptr<'a, A: IsAligned = Aligned>(NonNull, PhantomData<(&'a u8, A)>); @@ -174,7 +174,6 @@ pub struct Ptr<'a, A: IsAligned = Aligned>(NonNull, PhantomData<(&'a u8, A)> /// /// It may be helpful to think of this type as similar to `&'a mut dyn Any` but without /// the metadata and able to point to data that does not correspond to a Rust type. -#[derive(Debug)] #[repr(transparent)] pub struct PtrMut<'a, A: IsAligned = Aligned>(NonNull, PhantomData<(&'a mut u8, A)>); @@ -194,7 +193,6 @@ pub struct PtrMut<'a, A: IsAligned = Aligned>(NonNull, PhantomData<(&'a mut /// /// It may be helpful to think of this type as similar to `&'a mut ManuallyDrop` but /// without the metadata and able to point to data that does not correspond to a Rust type. -#[derive(Debug)] #[repr(transparent)] pub struct OwningPtr<'a, A: IsAligned = Aligned>(NonNull, PhantomData<(&'a mut u8, A)>); @@ -265,6 +263,19 @@ macro_rules! impl_ptr { Pointer::fmt(&self.0, f) } } + + impl Debug for $ptr<'_, Aligned> { + #[inline] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{}({:?})", stringify!($ptr), self.0) + } + } + impl Debug for $ptr<'_, Unaligned> { + #[inline] + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{}({:?})", stringify!($ptr), self.0) + } + } }; } @@ -289,7 +300,9 @@ impl<'a, A: IsAligned> Ptr<'a, A> { /// Transforms this [`Ptr`] into an [`PtrMut`] /// /// # Safety - /// Another [`PtrMut`] for the same [`Ptr`] must not be created until the first is dropped. + /// * The data pointed to by this `Ptr` must be valid for writes. + /// * There must be no active references (mutable or otherwise) to the data underlying this `Ptr`. + /// * Another [`PtrMut`] for the same [`Ptr`] must not be created until the first is dropped. #[inline] pub unsafe fn assert_unique(self) -> PtrMut<'a, A> { PtrMut(self.0, PhantomData) @@ -411,9 +424,10 @@ impl<'a> OwningPtr<'a> { /// Consumes a value and creates an [`OwningPtr`] to it while ensuring a double drop does not happen. #[inline] pub fn make) -> R, R>(val: T, f: F) -> R { + let mut val = ManuallyDrop::new(val); // SAFETY: The value behind the pointer will not get dropped or observed later, // so it's safe to promote it to an owning pointer. - f(unsafe { Self::make_internal(&mut ManuallyDrop::new(val)) }) + f(unsafe { Self::make_internal(&mut val) }) } } diff --git a/crates/bevy_reflect/Cargo.toml b/crates/bevy_reflect/Cargo.toml index 193a0b7c86b62..bf85258700f5d 100644 --- a/crates/bevy_reflect/Cargo.toml +++ b/crates/bevy_reflect/Cargo.toml @@ -1,49 +1,89 @@ [package] name = "bevy_reflect" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Dynamically interact with rust types" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" license = "MIT OR Apache-2.0" keywords = ["bevy"] -rust-version = "1.81.0" +rust-version = "1.85.0" [features] default = ["std", "smallvec", "debug"] + +# Features + +## When enabled, allows documentation comments to be accessed via reflection +documentation = ["bevy_reflect_derive/documentation"] + +## Enables function reflection +functions = ["bevy_reflect_derive/functions"] + +# Debugging Features + +## Enables features useful for debugging reflection +debug = ["debug_stack"] + +## When enabled, keeps track of the current serialization/deserialization context for better error messages +debug_stack = ["std"] + +# Integrations + +## Adds reflection support to `glam` types. +glam = ["dep:glam"] + +## Adds reflection support to `petgraph` types. +petgraph = ["dep:petgraph", "std"] + +## Adds reflection support to `smallvec` types. +smallvec = ["dep:smallvec"] + +## Adds reflection support to `uuid` types. +uuid = ["dep:uuid"] + +## Adds reflection support to `wgpu-types` types. +wgpu-types = ["dep:wgpu-types"] + +# Platform Compatibility + +## Allows access to the `std` crate. Enabling this feature will prevent compilation +## on `no_std` targets, but provides access to certain additional features on +## supported platforms. std = [ "bevy_utils/std", "erased-serde/std", "downcast-rs/std", "serde/std", - "spin/std", "glam?/std", "smol_str?/std", "uuid?/std", + "bevy_platform/std", + "wgpu-types?/std", +] + +## `critical-section` provides the building blocks for synchronization primitives +## on all platforms, including `no_std`. +critical-section = [ + "bevy_platform/critical-section", + "bevy_utils/critical-section", ] -# When enabled, provides Bevy-related reflection implementations -bevy = ["smallvec", "smol_str"] -glam = ["dep:glam"] -petgraph = ["dep:petgraph", "std"] -smallvec = ["dep:smallvec"] -uuid = ["dep:uuid"] -wgpu-types = ["dep:wgpu-types", "std"] -# Enables features useful for debugging reflection -debug = ["debug_stack"] -# When enabled, keeps track of the current serialization/deserialization context for better error messages -debug_stack = [] -# When enabled, allows documentation comments to be accessed via reflection -documentation = ["bevy_reflect_derive/documentation"] -# Enables function reflection -functions = ["bevy_reflect_derive/functions"] + +## Enables use of browser APIs. +## Note this is currently only applicable on `wasm32` architectures. +web = ["bevy_platform/web", "uuid?/js"] [dependencies] # bevy -bevy_reflect_derive = { path = "derive", version = "0.15.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev", default-features = false, features = [ +bevy_reflect_derive = { path = "derive", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev", default-features = false, features = [ "alloc", ] } -bevy_ptr = { path = "../bevy_ptr", version = "0.15.0-dev" } +bevy_ptr = { path = "../bevy_ptr", version = "0.16.0-dev" } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "alloc", + "serialize", +] } # used by bevy-utils, but it also needs reflect impls foldhash = { version = "0.1.3", default-features = false } @@ -53,35 +93,32 @@ erased-serde = { version = "0.4", default-features = false, features = [ "alloc", ] } disqualified = { version = "1.0", default-features = false } -downcast-rs = { version = "1.2", default-features = false } +downcast-rs = { version = "2", default-features = false } thiserror = { version = "2", default-features = false } derive_more = { version = "1", default-features = false, features = ["from"] } serde = { version = "1", default-features = false, features = ["alloc"] } -spin = { version = "0.9.8", default-features = false, features = [ - "once", - "rwlock", -] } assert_type_match = "0.1.1" - smallvec = { version = "1.11", default-features = false, optional = true } -glam = { version = "0.29", default-features = false, features = [ +glam = { version = "0.29.3", default-features = false, features = [ "serde", ], optional = true } -petgraph = { version = "0.6", features = ["serde-1"], optional = true } +petgraph = { version = "0.7", features = ["serde-1"], optional = true } smol_str = { version = "0.2.0", default-features = false, features = [ "serde", ], optional = true } -uuid = { version = "1.0", default-features = false, optional = true, features = [ +uuid = { version = "1.13.1", default-features = false, optional = true, features = [ "v4", "serde", ] } variadics_please = "1.1" -wgpu-types = { version = "23", features = ["serde"], optional = true } +wgpu-types = { version = "24", features = [ + "serde", +], optional = true, default-features = false } [dev-dependencies] ron = "0.8.0" rmp-serde = "1.1" -bincode = "1.3" +bincode = { version = "2.0", features = ["serde"] } serde_json = "1.0" serde = { version = "1", features = ["derive"] } static_assertions = "1.1.0" diff --git a/crates/bevy_reflect/LICENSE-APACHE b/crates/bevy_reflect/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_reflect/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_reflect/LICENSE-MIT b/crates/bevy_reflect/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_reflect/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_reflect/compile_fail/Cargo.toml b/crates/bevy_reflect/compile_fail/Cargo.toml index 14e5eb2264f82..178711c5d03b0 100644 --- a/crates/bevy_reflect/compile_fail/Cargo.toml +++ b/crates/bevy_reflect/compile_fail/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "bevy_reflect_compile_fail" -edition = "2021" +edition = "2024" description = "Compile fail tests for Bevy Engine's reflection system" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" diff --git a/crates/bevy_reflect/compile_fail/tests/reflect_remote/nested_fail.rs b/crates/bevy_reflect/compile_fail/tests/reflect_remote/nested_fail.rs index 0f8ade8e234ca..457f1f75e5f52 100644 --- a/crates/bevy_reflect/compile_fail/tests/reflect_remote/nested_fail.rs +++ b/crates/bevy_reflect/compile_fail/tests/reflect_remote/nested_fail.rs @@ -26,6 +26,7 @@ mod incorrect_inner_type { //~| ERROR: `TheirInner` does not implement `PartialReflect` so cannot be introspected //~| ERROR: `TheirInner` does not implement `PartialReflect` so cannot be introspected //~| ERROR: `TheirInner` does not implement `TypePath` so cannot provide dynamic type path information + //~| ERROR: `TheirInner` does not implement `TypePath` so cannot provide dynamic type path information //~| ERROR: `?` operator has incompatible types struct MyOuter { // Reason: Should not use `MyInner` directly diff --git a/crates/bevy_reflect/derive/Cargo.toml b/crates/bevy_reflect/derive/Cargo.toml index 468e89e8fbff5..ad6ec8cd2f396 100644 --- a/crates/bevy_reflect/derive/Cargo.toml +++ b/crates/bevy_reflect/derive/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_reflect_derive" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Derive implementations for bevy_reflect" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -19,11 +19,15 @@ documentation = [] functions = [] [dependencies] -bevy_macro_utils = { path = "../../bevy_macro_utils", version = "0.15.0-dev" } +bevy_macro_utils = { path = "../../bevy_macro_utils", version = "0.16.0-dev" } proc-macro2 = "1.0" quote = "1.0" syn = { version = "2.0", features = ["full"] } -uuid = { version = "1.1", features = ["v4"] } +uuid = { version = "1.13.1", features = ["v4"] } + +[target.'cfg(target_arch = "wasm32")'.dependencies] +# TODO: Assuming all wasm builds are for the browser. Require `no_std` support to break assumption. +uuid = { version = "1.13.1", default-features = false, features = ["js"] } [lints] workspace = true diff --git a/crates/bevy_reflect/derive/LICENSE-APACHE b/crates/bevy_reflect/derive/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_reflect/derive/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_reflect/derive/LICENSE-MIT b/crates/bevy_reflect/derive/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_reflect/derive/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_reflect/derive/src/container_attributes.rs b/crates/bevy_reflect/derive/src/container_attributes.rs index b134c571c0501..8b2ae0351f53a 100644 --- a/crates/bevy_reflect/derive/src/container_attributes.rs +++ b/crates/bevy_reflect/derive/src/container_attributes.rs @@ -9,7 +9,7 @@ use crate::{ attribute_parser::terminated_parser, custom_attributes::CustomAttributes, derive_data::ReflectTraitToImpl, }; -use bevy_macro_utils::fq_std::{FQAny, FQOption}; +use bevy_macro_utils::fq_std::{FQAny, FQClone, FQOption, FQResult}; use proc_macro2::{Ident, Span}; use quote::quote_spanned; use syn::{ @@ -23,6 +23,7 @@ mod kw { syn::custom_keyword!(Debug); syn::custom_keyword!(PartialEq); syn::custom_keyword!(Hash); + syn::custom_keyword!(Clone); syn::custom_keyword!(no_field_bounds); syn::custom_keyword!(opaque); } @@ -89,10 +90,7 @@ pub(crate) struct FromReflectAttrs { impl FromReflectAttrs { /// Returns true if `FromReflect` should be automatically derived as part of the `Reflect` derive. pub fn should_auto_derive(&self) -> bool { - self.auto_derive - .as_ref() - .map(LitBool::value) - .unwrap_or(true) + self.auto_derive.as_ref().is_none_or(LitBool::value) } } @@ -112,10 +110,7 @@ pub(crate) struct TypePathAttrs { impl TypePathAttrs { /// Returns true if `TypePath` should be automatically derived as part of the `Reflect` derive. pub fn should_auto_derive(&self) -> bool { - self.auto_derive - .as_ref() - .map(LitBool::value) - .unwrap_or(true) + self.auto_derive.as_ref().is_none_or(LitBool::value) } } @@ -181,6 +176,7 @@ impl TypePathAttrs { /// > __Note:__ Registering a custom function only works for special traits. #[derive(Default, Clone)] pub(crate) struct ContainerAttributes { + clone: TraitImpl, debug: TraitImpl, hash: TraitImpl, partial_eq: TraitImpl, @@ -242,12 +238,14 @@ impl ContainerAttributes { self.parse_opaque(input) } else if lookahead.peek(kw::no_field_bounds) { self.parse_no_field_bounds(input) + } else if lookahead.peek(kw::Clone) { + self.parse_clone(input) } else if lookahead.peek(kw::Debug) { self.parse_debug(input) - } else if lookahead.peek(kw::PartialEq) { - self.parse_partial_eq(input) } else if lookahead.peek(kw::Hash) { self.parse_hash(input) + } else if lookahead.peek(kw::PartialEq) { + self.parse_partial_eq(input) } else if lookahead.peek(Ident::peek_any) { self.parse_ident(input) } else { @@ -280,6 +278,26 @@ impl ContainerAttributes { Ok(()) } + /// Parse `clone` attribute. + /// + /// Examples: + /// - `#[reflect(Clone)]` + /// - `#[reflect(Clone(custom_clone_fn))]` + fn parse_clone(&mut self, input: ParseStream) -> syn::Result<()> { + let ident = input.parse::()?; + + if input.peek(token::Paren) { + let content; + parenthesized!(content in input); + let path = content.parse::()?; + self.clone.merge(TraitImpl::Custom(path, ident.span))?; + } else { + self.clone = TraitImpl::Implemented(ident.span); + } + + Ok(()) + } + /// Parse special `Debug` registration. /// /// Examples: @@ -383,9 +401,11 @@ impl ContainerAttributes { // Override `lit` if this is a `FromReflect` derive. // This typically means a user is opting out of the default implementation // from the `Reflect` derive and using the `FromReflect` derive directly instead. - (trait_ == ReflectTraitToImpl::FromReflect) - .then(|| LitBool::new(true, Span::call_site())) - .unwrap_or_else(|| lit.clone()) + if trait_ == ReflectTraitToImpl::FromReflect { + LitBool::new(true, Span::call_site()) + } else { + lit.clone() + } })?; if let Some(existing) = &self.from_reflect_attrs.auto_derive { @@ -416,9 +436,11 @@ impl ContainerAttributes { // Override `lit` if this is a `FromReflect` derive. // This typically means a user is opting out of the default implementation // from the `Reflect` derive and using the `FromReflect` derive directly instead. - (trait_ == ReflectTraitToImpl::TypePath) - .then(|| LitBool::new(true, Span::call_site())) - .unwrap_or_else(|| lit.clone()) + if trait_ == ReflectTraitToImpl::TypePath { + LitBool::new(true, Span::call_site()) + } else { + lit.clone() + } })?; if let Some(existing) = &self.type_path_attrs.auto_derive { @@ -529,6 +551,24 @@ impl ContainerAttributes { } } + pub fn get_clone_impl(&self, bevy_reflect_path: &Path) -> Option { + match &self.clone { + &TraitImpl::Implemented(span) => Some(quote_spanned! {span=> + #[inline] + fn reflect_clone(&self) -> #FQResult<#bevy_reflect_path::__macro_exports::alloc_utils::Box, #bevy_reflect_path::ReflectCloneError> { + #FQResult::Ok(#bevy_reflect_path::__macro_exports::alloc_utils::Box::new(#FQClone::clone(self))) + } + }), + &TraitImpl::Custom(ref impl_fn, span) => Some(quote_spanned! {span=> + #[inline] + fn reflect_clone(&self) -> #FQResult<#bevy_reflect_path::__macro_exports::alloc_utils::Box, #bevy_reflect_path::ReflectCloneError> { + #FQResult::Ok(#bevy_reflect_path::__macro_exports::alloc_utils::Box::new(#impl_fn(self))) + } + }), + TraitImpl::NotImplemented => None, + } + } + pub fn custom_attributes(&self) -> &CustomAttributes { &self.custom_attributes } diff --git a/crates/bevy_reflect/derive/src/derive_data.rs b/crates/bevy_reflect/derive/src/derive_data.rs index e739c91ebb912..f825cb29050e1 100644 --- a/crates/bevy_reflect/derive/src/derive_data.rs +++ b/crates/bevy_reflect/derive/src/derive_data.rs @@ -12,13 +12,17 @@ use crate::{ where_clause_options::WhereClauseOptions, REFLECT_ATTRIBUTE_NAME, TYPE_NAME_ATTRIBUTE_NAME, TYPE_PATH_ATTRIBUTE_NAME, }; -use quote::{quote, ToTokens}; +use quote::{format_ident, quote, ToTokens}; use syn::token::Comma; +use crate::enum_utility::{EnumVariantOutputData, ReflectCloneVariantBuilder, VariantBuilder}; +use crate::field_attributes::CloneBehavior; use crate::generics::generate_generics; +use bevy_macro_utils::fq_std::{FQClone, FQOption, FQResult}; use syn::{ parse_str, punctuated::Punctuated, spanned::Spanned, Data, DeriveInput, Field, Fields, - GenericParam, Generics, Ident, LitStr, Meta, Path, PathSegment, Type, TypeParam, Variant, + GenericParam, Generics, Ident, LitStr, Member, Meta, Path, PathSegment, Type, TypeParam, + Variant, }; pub(crate) enum ReflectDerive<'a> { @@ -266,7 +270,7 @@ impl<'a> ReflectDerive<'a> { { return Err(syn::Error::new( meta.type_path().span(), - format!("a #[{TYPE_PATH_ATTRIBUTE_NAME} = \"...\"] attribute must be specified when using {provenance}") + format!("a #[{TYPE_PATH_ATTRIBUTE_NAME} = \"...\"] attribute must be specified when using {provenance}"), )); } @@ -546,6 +550,31 @@ impl<'a> StructField<'a> { pub fn attrs(&self) -> &FieldAttributes { &self.attrs } + + /// Generates a [`Member`] based on this field. + /// + /// If the field is unnamed, the declaration index is used. + /// This allows this member to be used for both active and ignored fields. + pub fn to_member(&self) -> Member { + match &self.data.ident { + Some(ident) => Member::Named(ident.clone()), + None => Member::Unnamed(self.declaration_index.into()), + } + } + + /// Returns a token stream for generating a `FieldId` for this field. + pub fn field_id(&self, bevy_reflect_path: &Path) -> proc_macro2::TokenStream { + match &self.data.ident { + Some(ident) => { + let name = ident.to_string(); + quote!(#bevy_reflect_path::FieldId::Named(#bevy_reflect_path::__macro_exports::alloc_utils::Cow::Borrowed(#name))) + } + None => { + let index = self.declaration_index; + quote!(#bevy_reflect_path::FieldId::Unnamed(#index)) + } + } + } } impl<'a> ReflectStruct<'a> { @@ -655,6 +684,135 @@ impl<'a> ReflectStruct<'a> { #bevy_reflect_path::TypeInfo::#info_variant(#info) } } + /// Returns the `Reflect::reflect_clone` impl, if any, as a `TokenStream`. + pub fn get_clone_impl(&self) -> Option { + let bevy_reflect_path = self.meta().bevy_reflect_path(); + + if let container_clone @ Some(_) = self.meta().attrs().get_clone_impl(bevy_reflect_path) { + return container_clone; + } + + let mut tokens = proc_macro2::TokenStream::new(); + + for field in self.fields().iter() { + let field_ty = field.reflected_type(); + let member = field.to_member(); + let accessor = self.access_for_field(field, false); + + match &field.attrs.clone { + CloneBehavior::Default => { + let value = if field.attrs.ignore.is_ignored() { + let field_id = field.field_id(bevy_reflect_path); + + quote! { + return #FQResult::Err(#bevy_reflect_path::ReflectCloneError::FieldNotCloneable { + field: #field_id, + variant: #FQOption::None, + container_type_path: #bevy_reflect_path::__macro_exports::alloc_utils::Cow::Borrowed( + ::type_path() + ) + }) + } + } else { + quote! { + #bevy_reflect_path::PartialReflect::reflect_clone(#accessor)? + .take() + .map_err(|value| #bevy_reflect_path::ReflectCloneError::FailedDowncast { + expected: #bevy_reflect_path::__macro_exports::alloc_utils::Cow::Borrowed( + <#field_ty as #bevy_reflect_path::TypePath>::type_path() + ), + received: #bevy_reflect_path::__macro_exports::alloc_utils::Cow::Owned( + #bevy_reflect_path::__macro_exports::alloc_utils::ToString::to_string( + #bevy_reflect_path::DynamicTypePath::reflect_type_path(&*value) + ) + ), + })? + } + }; + + tokens.extend(quote! { + #member: #value, + }); + } + CloneBehavior::Trait => { + tokens.extend(quote! { + #member: #FQClone::clone(#accessor), + }); + } + CloneBehavior::Func(clone_fn) => { + tokens.extend(quote! { + #member: #clone_fn(#accessor), + }); + } + } + } + + let ctor = match self.meta.remote_ty() { + Some(ty) => { + let ty = ty.as_expr_path().ok()?.to_token_stream(); + quote! { + Self(#ty { + #tokens + }) + } + } + None => { + quote! { + Self { + #tokens + } + } + } + }; + + Some(quote! { + #[inline] + #[allow(unreachable_code, reason = "Ignored fields without a `clone` attribute will early-return with an error")] + fn reflect_clone(&self) -> #FQResult<#bevy_reflect_path::__macro_exports::alloc_utils::Box, #bevy_reflect_path::ReflectCloneError> { + #FQResult::Ok(#bevy_reflect_path::__macro_exports::alloc_utils::Box::new(#ctor)) + } + }) + } + + /// Generates an accessor for the given field. + /// + /// The mutability of the access can be controlled by the `is_mut` parameter. + /// + /// Generally, this just returns something like `&self.field`. + /// However, if the struct is a remote wrapper, this then becomes `&self.0.field` in order to access the field on the inner type. + /// + /// If the field itself is a remote type, the above accessor is further wrapped in a call to `ReflectRemote::as_wrapper[_mut]`. + pub fn access_for_field( + &self, + field: &StructField<'a>, + is_mutable: bool, + ) -> proc_macro2::TokenStream { + let bevy_reflect_path = self.meta().bevy_reflect_path(); + let member = field.to_member(); + + let prefix_tokens = if is_mutable { quote!(&mut) } else { quote!(&) }; + + let accessor = if self.meta.is_remote_wrapper() { + quote!(self.0.#member) + } else { + quote!(self.#member) + }; + + match &field.attrs.remote { + Some(wrapper_ty) => { + let method = if is_mutable { + format_ident!("as_wrapper_mut") + } else { + format_ident!("as_wrapper") + }; + + quote! { + <#wrapper_ty as #bevy_reflect_path::ReflectRemote>::#method(#prefix_tokens #accessor) + } + } + None => quote!(#prefix_tokens #accessor), + } + } } impl<'a> ReflectEnum<'a> { @@ -757,6 +915,51 @@ impl<'a> ReflectEnum<'a> { #bevy_reflect_path::TypeInfo::Enum(#info) } } + + /// Returns the `Reflect::reflect_clone` impl, if any, as a `TokenStream`. + pub fn get_clone_impl(&self) -> Option { + let bevy_reflect_path = self.meta().bevy_reflect_path(); + + if let container_clone @ Some(_) = self.meta().attrs().get_clone_impl(bevy_reflect_path) { + return container_clone; + } + + let this = Ident::new("this", Span::call_site()); + let EnumVariantOutputData { + variant_patterns, + variant_constructors, + .. + } = ReflectCloneVariantBuilder::new(self).build(&this); + + let inner = quote! { + match #this { + #(#variant_patterns => #variant_constructors),* + } + }; + + let body = if variant_patterns.is_empty() { + // enum variant is empty, so &self will never exist + quote!(unreachable!()) + } else if self.meta.is_remote_wrapper() { + quote! { + let #this = ::as_remote(self); + #FQResult::Ok(#bevy_reflect_path::__macro_exports::alloc_utils::Box::new(::into_wrapper(#inner))) + } + } else { + quote! { + let #this = self; + #FQResult::Ok(#bevy_reflect_path::__macro_exports::alloc_utils::Box::new(#inner)) + } + }; + + Some(quote! { + #[inline] + #[allow(unreachable_code, reason = "Ignored fields without a `clone` attribute will early-return with an error")] + fn reflect_clone(&self) -> #FQResult<#bevy_reflect_path::__macro_exports::alloc_utils::Box, #bevy_reflect_path::ReflectCloneError> { + #body + } + }) + } } impl<'a> EnumVariant<'a> { @@ -897,7 +1100,7 @@ pub(crate) enum ReflectTypePath<'a> { reason = "Not currently used but may be useful in the future due to its generality." )] Anonymous { - qualified_type: Type, + qualified_type: Box, long_type_path: StringExpr, short_type_path: StringExpr, }, diff --git a/crates/bevy_reflect/derive/src/enum_utility.rs b/crates/bevy_reflect/derive/src/enum_utility.rs index f4b1e5ede8b77..5571b861a6a81 100644 --- a/crates/bevy_reflect/derive/src/enum_utility.rs +++ b/crates/bevy_reflect/derive/src/enum_utility.rs @@ -1,16 +1,21 @@ +use crate::field_attributes::CloneBehavior; use crate::{ derive_data::ReflectEnum, derive_data::StructField, field_attributes::DefaultBehavior, ident::ident_or_index, }; -use bevy_macro_utils::fq_std::{FQDefault, FQOption}; +use bevy_macro_utils::fq_std::{FQClone, FQDefault, FQOption, FQResult}; use proc_macro2::{Ident, TokenStream}; -use quote::{format_ident, quote}; +use quote::{format_ident, quote, ToTokens}; pub(crate) struct EnumVariantOutputData { /// The names of each variant as a string. /// /// For example, `Some` and `None` for the `Option` enum. pub variant_names: Vec, + /// The pattern matching portion of each variant. + /// + /// For example, `Option::Some { 0: _0 }` and `Option::None {}` for the `Option` enum. + pub variant_patterns: Vec, /// The constructor portion of each variant. /// /// For example, `Option::Some { 0: value }` and `Option::None {}` for the `Option` enum. @@ -139,6 +144,7 @@ pub(crate) trait VariantBuilder: Sized { let variants = self.reflect_enum().variants(); let mut variant_names = Vec::with_capacity(variants.len()); + let mut variant_patterns = Vec::with_capacity(variants.len()); let mut variant_constructors = Vec::with_capacity(variants.len()); for variant in variants { @@ -148,7 +154,10 @@ pub(crate) trait VariantBuilder: Sized { let fields = variant.fields(); - let field_constructors = fields.iter().map(|field| { + let mut field_patterns = Vec::with_capacity(fields.len()); + let mut field_constructors = Vec::with_capacity(fields.len()); + + for field in fields { let member = ident_or_index(field.data.ident.as_ref(), field.declaration_index); let alias = format_ident!("_{}", member); @@ -164,12 +173,18 @@ pub(crate) trait VariantBuilder: Sized { self.on_active_field(this, variant_field) }; - let constructor = quote! { + field_patterns.push(quote! { + #member: #alias + }); + + field_constructors.push(quote! { #member: #value - }; + }); + } - constructor - }); + let pattern = quote! { + #variant_path { #( #field_patterns ),* } + }; let constructor = quote! { #variant_path { @@ -178,11 +193,13 @@ pub(crate) trait VariantBuilder: Sized { }; variant_names.push(variant_name); + variant_patterns.push(pattern); variant_constructors.push(constructor); } EnumVariantOutputData { variant_names, + variant_patterns, variant_constructors, } } @@ -275,3 +292,103 @@ impl<'a> VariantBuilder for TryApplyVariantBuilder<'a> { } } } + +/// Generates the enum variant output data needed to build the `Reflect::reflect_clone` implementation. +pub(crate) struct ReflectCloneVariantBuilder<'a> { + reflect_enum: &'a ReflectEnum<'a>, +} + +impl<'a> ReflectCloneVariantBuilder<'a> { + pub fn new(reflect_enum: &'a ReflectEnum) -> Self { + Self { reflect_enum } + } +} + +impl<'a> VariantBuilder for ReflectCloneVariantBuilder<'a> { + fn reflect_enum(&self) -> &ReflectEnum { + self.reflect_enum + } + + fn access_field(&self, _ident: &Ident, field: VariantField) -> TokenStream { + let alias = field.alias; + quote!(#FQOption::Some(#alias)) + } + + fn unwrap_field(&self, field: VariantField) -> TokenStream { + let alias = field.alias; + quote!(#alias.unwrap()) + } + + fn construct_field(&self, field: VariantField) -> TokenStream { + let bevy_reflect_path = self.reflect_enum.meta().bevy_reflect_path(); + + let field_ty = field.field.reflected_type(); + + let alias = field.alias; + let alias = match &field.field.attrs.remote { + Some(wrapper_ty) => { + quote! { + <#wrapper_ty as #bevy_reflect_path::ReflectRemote>::as_wrapper(#alias) + } + } + None => alias.to_token_stream(), + }; + + match &field.field.attrs.clone { + CloneBehavior::Default => { + quote! { + #bevy_reflect_path::PartialReflect::reflect_clone(#alias)? + .take() + .map_err(|value| #bevy_reflect_path::ReflectCloneError::FailedDowncast { + expected: #bevy_reflect_path::__macro_exports::alloc_utils::Cow::Borrowed( + <#field_ty as #bevy_reflect_path::TypePath>::type_path() + ), + received: #bevy_reflect_path::__macro_exports::alloc_utils::Cow::Owned( + #bevy_reflect_path::__macro_exports::alloc_utils::ToString::to_string( + #bevy_reflect_path::DynamicTypePath::reflect_type_path(&*value) + ) + ), + })? + } + } + CloneBehavior::Trait => { + quote! { + #FQClone::clone(#alias) + } + } + CloneBehavior::Func(clone_fn) => { + quote! { + #clone_fn(#alias) + } + } + } + } + + fn on_active_field(&self, _this: &Ident, field: VariantField) -> TokenStream { + self.construct_field(field) + } + + fn on_ignored_field(&self, field: VariantField) -> TokenStream { + let bevy_reflect_path = self.reflect_enum.meta().bevy_reflect_path(); + let variant_name = field.variant_name; + let alias = field.alias; + + match &field.field.attrs.clone { + CloneBehavior::Default => { + let field_id = field.field.field_id(bevy_reflect_path); + + quote! { + return #FQResult::Err( + #bevy_reflect_path::ReflectCloneError::FieldNotCloneable { + field: #field_id, + variant: #FQOption::Some(#bevy_reflect_path::__macro_exports::alloc_utils::Cow::Borrowed(#variant_name)), + container_type_path: #bevy_reflect_path::__macro_exports::alloc_utils::Cow::Borrowed(::type_path()) + } + ) + } + } + CloneBehavior::Trait => quote! { #FQClone::clone(#alias) }, + CloneBehavior::Func(clone_fn) => quote! { #clone_fn() }, + } + } +} diff --git a/crates/bevy_reflect/derive/src/field_attributes.rs b/crates/bevy_reflect/derive/src/field_attributes.rs index 6cddb50e61bda..06d64791c414e 100644 --- a/crates/bevy_reflect/derive/src/field_attributes.rs +++ b/crates/bevy_reflect/derive/src/field_attributes.rs @@ -14,6 +14,7 @@ use syn::{parse::ParseStream, Attribute, LitStr, Meta, Token, Type}; mod kw { syn::custom_keyword!(ignore); syn::custom_keyword!(skip_serializing); + syn::custom_keyword!(clone); syn::custom_keyword!(default); syn::custom_keyword!(remote); } @@ -22,6 +23,7 @@ pub(crate) const IGNORE_SERIALIZATION_ATTR: &str = "skip_serializing"; pub(crate) const IGNORE_ALL_ATTR: &str = "ignore"; pub(crate) const DEFAULT_ATTR: &str = "default"; +pub(crate) const CLONE_ATTR: &str = "clone"; /// Stores data about if the field should be visible via the Reflect and serialization interfaces /// @@ -54,6 +56,14 @@ impl ReflectIgnoreBehavior { } } +#[derive(Default, Clone)] +pub(crate) enum CloneBehavior { + #[default] + Default, + Trait, + Func(syn::ExprPath), +} + /// Controls how the default value is determined for a field. #[derive(Default, Clone)] pub(crate) enum DefaultBehavior { @@ -74,6 +84,8 @@ pub(crate) enum DefaultBehavior { pub(crate) struct FieldAttributes { /// Determines how this field should be ignored if at all. pub ignore: ReflectIgnoreBehavior, + /// Sets the clone behavior of this field. + pub clone: CloneBehavior, /// Sets the default behavior of this field. pub default: DefaultBehavior, /// Custom attributes created via `#[reflect(@...)]`. @@ -121,6 +133,8 @@ impl FieldAttributes { self.parse_ignore(input) } else if lookahead.peek(kw::skip_serializing) { self.parse_skip_serializing(input) + } else if lookahead.peek(kw::clone) { + self.parse_clone(input) } else if lookahead.peek(kw::default) { self.parse_default(input) } else if lookahead.peek(kw::remote) { @@ -164,6 +178,30 @@ impl FieldAttributes { Ok(()) } + /// Parse `clone` attribute. + /// + /// Examples: + /// - `#[reflect(clone)]` + /// - `#[reflect(clone = "path::to::func")]` + fn parse_clone(&mut self, input: ParseStream) -> syn::Result<()> { + if !matches!(self.clone, CloneBehavior::Default) { + return Err(input.error(format!("only one of {:?} is allowed", [CLONE_ATTR]))); + } + + input.parse::()?; + + if input.peek(Token![=]) { + input.parse::()?; + + let lit = input.parse::()?; + self.clone = CloneBehavior::Func(lit.parse()?); + } else { + self.clone = CloneBehavior::Trait; + } + + Ok(()) + } + /// Parse `default` attribute. /// /// Examples: diff --git a/crates/bevy_reflect/derive/src/impls/enums.rs b/crates/bevy_reflect/derive/src/impls/enums.rs index 235a7cff1c0e0..3cbd8cce9510e 100644 --- a/crates/bevy_reflect/derive/src/impls/enums.rs +++ b/crates/bevy_reflect/derive/src/impls/enums.rs @@ -70,6 +70,7 @@ pub(crate) fn impl_enum(reflect_enum: &ReflectEnum) -> proc_macro2::TokenStream || Some(quote!(#bevy_reflect_path::enum_partial_eq)), || Some(quote!(#bevy_reflect_path::enum_hash)), ); + let clone_fn = reflect_enum.get_clone_impl(); #[cfg(not(feature = "functions"))] let function_impls = None::; @@ -174,7 +175,7 @@ pub(crate) fn impl_enum(reflect_enum: &ReflectEnum) -> proc_macro2::TokenStream } } - fn clone_dynamic(&self) -> #bevy_reflect_path::DynamicEnum { + fn to_dynamic_enum(&self) -> #bevy_reflect_path::DynamicEnum { #bevy_reflect_path::DynamicEnum::from_ref::(self) } } @@ -185,11 +186,6 @@ pub(crate) fn impl_enum(reflect_enum: &ReflectEnum) -> proc_macro2::TokenStream #FQOption::Some(::type_info()) } - #[inline] - fn clone_value(&self) -> #bevy_reflect_path::__macro_exports::alloc_utils::Box { - #bevy_reflect_path::__macro_exports::alloc_utils::Box::new(#bevy_reflect_path::Enum::clone_dynamic(self)) - } - #[inline] fn try_apply( &mut self, @@ -261,6 +257,8 @@ pub(crate) fn impl_enum(reflect_enum: &ReflectEnum) -> proc_macro2::TokenStream } #common_methods + + #clone_fn } } } diff --git a/crates/bevy_reflect/derive/src/impls/func/into_return.rs b/crates/bevy_reflect/derive/src/impls/func/into_return.rs index c47c328f9ae13..f7d1e0b8893e4 100644 --- a/crates/bevy_reflect/derive/src/impls/func/into_return.rs +++ b/crates/bevy_reflect/derive/src/impls/func/into_return.rs @@ -14,7 +14,7 @@ pub(crate) fn impl_into_return( quote! { impl #impl_generics #bevy_reflect::func::IntoReturn for #type_path #ty_generics #where_reflect_clause { fn into_return<'into_return>(self) -> #bevy_reflect::func::Return<'into_return> where Self: 'into_return { - #bevy_reflect::func::Return::Owned(Box::new(self)) + #bevy_reflect::func::Return::Owned(#bevy_reflect::__macro_exports::alloc_utils::Box::new(self)) } } diff --git a/crates/bevy_reflect/derive/src/impls/opaque.rs b/crates/bevy_reflect/derive/src/impls/opaque.rs index bdee656a964cc..2a08cadc285c4 100644 --- a/crates/bevy_reflect/derive/src/impls/opaque.rs +++ b/crates/bevy_reflect/derive/src/impls/opaque.rs @@ -32,6 +32,7 @@ pub(crate) fn impl_opaque(meta: &ReflectMeta) -> proc_macro2::TokenStream { let type_path_impl = impl_type_path(meta); let full_reflect_impl = impl_full_reflect(meta, &where_clause_options); let common_methods = common_partial_reflect_methods(meta, || None, || None); + let clone_fn = meta.attrs().get_clone_impl(bevy_reflect_path); let apply_impl = if let Some(remote_ty) = meta.remote_ty() { let ty = remote_ty.type_path(); @@ -77,7 +78,7 @@ pub(crate) fn impl_opaque(meta: &ReflectMeta) -> proc_macro2::TokenStream { } #[inline] - fn clone_value(&self) -> #bevy_reflect_path::__macro_exports::alloc_utils::Box { + fn to_dynamic(&self) -> #bevy_reflect_path::__macro_exports::alloc_utils::Box { #bevy_reflect_path::__macro_exports::alloc_utils::Box::new(#FQClone::clone(self)) } @@ -117,6 +118,8 @@ pub(crate) fn impl_opaque(meta: &ReflectMeta) -> proc_macro2::TokenStream { } #common_methods + + #clone_fn } } } diff --git a/crates/bevy_reflect/derive/src/impls/structs.rs b/crates/bevy_reflect/derive/src/impls/structs.rs index c1db19ca9c80b..7e10de3f2bcbc 100644 --- a/crates/bevy_reflect/derive/src/impls/structs.rs +++ b/crates/bevy_reflect/derive/src/impls/structs.rs @@ -47,6 +47,7 @@ pub(crate) fn impl_struct(reflect_struct: &ReflectStruct) -> proc_macro2::TokenS || Some(quote!(#bevy_reflect_path::struct_partial_eq)), || None, ); + let clone_fn = reflect_struct.get_clone_impl(); #[cfg(not(feature = "functions"))] let function_impls = None::; @@ -119,10 +120,10 @@ pub(crate) fn impl_struct(reflect_struct: &ReflectStruct) -> proc_macro2::TokenS #bevy_reflect_path::FieldIter::new(self) } - fn clone_dynamic(&self) -> #bevy_reflect_path::DynamicStruct { + fn to_dynamic_struct(&self) -> #bevy_reflect_path::DynamicStruct { let mut dynamic: #bevy_reflect_path::DynamicStruct = #FQDefault::default(); dynamic.set_represented_type(#bevy_reflect_path::PartialReflect::get_represented_type_info(self)); - #(dynamic.insert_boxed(#field_names, #bevy_reflect_path::PartialReflect::clone_value(#fields_ref));)* + #(dynamic.insert_boxed(#field_names, #bevy_reflect_path::PartialReflect::to_dynamic(#fields_ref));)* dynamic } } @@ -133,11 +134,6 @@ pub(crate) fn impl_struct(reflect_struct: &ReflectStruct) -> proc_macro2::TokenS #FQOption::Some(::type_info()) } - #[inline] - fn clone_value(&self) -> #bevy_reflect_path::__macro_exports::alloc_utils::Box { - #bevy_reflect_path::__macro_exports::alloc_utils::Box::new(#bevy_reflect_path::Struct::clone_dynamic(self)) - } - #[inline] fn try_apply( &mut self, @@ -179,6 +175,8 @@ pub(crate) fn impl_struct(reflect_struct: &ReflectStruct) -> proc_macro2::TokenS } #common_methods + + #clone_fn } } } diff --git a/crates/bevy_reflect/derive/src/impls/tuple_structs.rs b/crates/bevy_reflect/derive/src/impls/tuple_structs.rs index a0037c64ca57e..90c3555230cde 100644 --- a/crates/bevy_reflect/derive/src/impls/tuple_structs.rs +++ b/crates/bevy_reflect/derive/src/impls/tuple_structs.rs @@ -37,6 +37,7 @@ pub(crate) fn impl_tuple_struct(reflect_struct: &ReflectStruct) -> proc_macro2:: || Some(quote!(#bevy_reflect_path::tuple_struct_partial_eq)), || None, ); + let clone_fn = reflect_struct.get_clone_impl(); #[cfg(not(feature = "functions"))] let function_impls = None::; @@ -86,10 +87,10 @@ pub(crate) fn impl_tuple_struct(reflect_struct: &ReflectStruct) -> proc_macro2:: #bevy_reflect_path::TupleStructFieldIter::new(self) } - fn clone_dynamic(&self) -> #bevy_reflect_path::DynamicTupleStruct { + fn to_dynamic_tuple_struct(&self) -> #bevy_reflect_path::DynamicTupleStruct { let mut dynamic: #bevy_reflect_path::DynamicTupleStruct = #FQDefault::default(); dynamic.set_represented_type(#bevy_reflect_path::PartialReflect::get_represented_type_info(self)); - #(dynamic.insert_boxed(#bevy_reflect_path::PartialReflect::clone_value(#fields_ref));)* + #(dynamic.insert_boxed(#bevy_reflect_path::PartialReflect::to_dynamic(#fields_ref));)* dynamic } } @@ -99,10 +100,6 @@ pub(crate) fn impl_tuple_struct(reflect_struct: &ReflectStruct) -> proc_macro2:: fn get_represented_type_info(&self) -> #FQOption<&'static #bevy_reflect_path::TypeInfo> { #FQOption::Some(::type_info()) } - #[inline] - fn clone_value(&self) -> #bevy_reflect_path::__macro_exports::alloc_utils::Box { - #bevy_reflect_path::__macro_exports::alloc_utils::Box::new(#bevy_reflect_path::TupleStruct::clone_dynamic(self)) - } #[inline] fn try_apply( @@ -144,6 +141,8 @@ pub(crate) fn impl_tuple_struct(reflect_struct: &ReflectStruct) -> proc_macro2:: } #common_methods + + #clone_fn } } } diff --git a/crates/bevy_reflect/derive/src/lib.rs b/crates/bevy_reflect/derive/src/lib.rs index 276371427b620..2d9dfca681568 100644 --- a/crates/bevy_reflect/derive/src/lib.rs +++ b/crates/bevy_reflect/derive/src/lib.rs @@ -156,20 +156,25 @@ fn match_reflect_impls(ast: DeriveInput, source: ReflectImplSource) -> TokenStre /// /// There are a few "special" identifiers that work a bit differently: /// +/// * `#[reflect(Clone)]` will force the implementation of `Reflect::reflect_clone` to rely on +/// the type's [`Clone`] implementation. +/// A custom implementation may be provided using `#[reflect(Clone(my_clone_func))]` where +/// `my_clone_func` is the path to a function matching the signature: +/// `(&Self) -> Self`. /// * `#[reflect(Debug)]` will force the implementation of `Reflect::reflect_debug` to rely on /// the type's [`Debug`] implementation. /// A custom implementation may be provided using `#[reflect(Debug(my_debug_func))]` where /// `my_debug_func` is the path to a function matching the signature: -/// `(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result`. +/// `(&Self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result`. /// * `#[reflect(PartialEq)]` will force the implementation of `Reflect::reflect_partial_eq` to rely on /// the type's [`PartialEq`] implementation. /// A custom implementation may be provided using `#[reflect(PartialEq(my_partial_eq_func))]` where /// `my_partial_eq_func` is the path to a function matching the signature: -/// `(&self, value: &dyn #bevy_reflect_path::Reflect) -> bool`. +/// `(&Self, value: &dyn #bevy_reflect_path::Reflect) -> bool`. /// * `#[reflect(Hash)]` will force the implementation of `Reflect::reflect_hash` to rely on /// the type's [`Hash`] implementation. /// A custom implementation may be provided using `#[reflect(Hash(my_hash_func))]` where -/// `my_hash_func` is the path to a function matching the signature: `(&self) -> u64`. +/// `my_hash_func` is the path to a function matching the signature: `(&Self) -> u64`. /// * `#[reflect(Default)]` will register the `ReflectDefault` type data as normal. /// However, it will also affect how certain other operations are performed in order /// to improve performance and/or robustness. @@ -339,6 +344,18 @@ fn match_reflect_impls(ast: DeriveInput, source: ReflectImplSource) -> TokenStre /// What this does is register the `SerializationData` type within the `GetTypeRegistration` implementation, /// which will be used by the reflection serializers to determine whether or not the field is serializable. /// +/// ## `#[reflect(clone)]` +/// +/// This attribute affects the `Reflect::reflect_clone` implementation. +/// +/// Without this attribute, the implementation will rely on the field's own `Reflect::reflect_clone` implementation. +/// When this attribute is present, the implementation will instead use the field's `Clone` implementation directly. +/// +/// The attribute may also take the path to a custom function like `#[reflect(clone = "path::to::my_clone_func")]`, +/// where `my_clone_func` matches the signature `(&Self) -> Self`. +/// +/// This attribute does nothing if the containing struct/enum has the `#[reflect(Clone)]` attribute. +/// /// ## `#[reflect(@...)]` /// /// This attribute can be used to register custom attributes to the field's `TypeInfo`. diff --git a/crates/bevy_reflect/derive/src/string_expr.rs b/crates/bevy_reflect/derive/src/string_expr.rs index cc48a90b91735..dc878f39a9fd4 100644 --- a/crates/bevy_reflect/derive/src/string_expr.rs +++ b/crates/bevy_reflect/derive/src/string_expr.rs @@ -80,7 +80,7 @@ impl StringExpr { let owned = self.into_owned(); let borrowed = other.into_borrowed(); Self::Owned(quote! { - #owned + #borrowed + ::core::ops::Add::<&str>::add(#owned, #borrowed) }) } } diff --git a/crates/bevy_reflect/derive/src/struct_utility.rs b/crates/bevy_reflect/derive/src/struct_utility.rs index 09604419b6043..9bfd72de60596 100644 --- a/crates/bevy_reflect/derive/src/struct_utility.rs +++ b/crates/bevy_reflect/derive/src/struct_utility.rs @@ -1,5 +1,4 @@ -use crate::{derive_data::StructField, ReflectStruct}; -use quote::quote; +use crate::ReflectStruct; /// A helper struct for creating remote-aware field accessors. /// @@ -20,27 +19,15 @@ pub(crate) struct FieldAccessors { impl FieldAccessors { pub fn new(reflect_struct: &ReflectStruct) -> Self { - let bevy_reflect_path = reflect_struct.meta().bevy_reflect_path(); - let fields_ref = Self::get_fields(reflect_struct, |field, accessor| { - match &field.attrs.remote { - Some(wrapper_ty) => { - quote! { - <#wrapper_ty as #bevy_reflect_path::ReflectRemote>::as_wrapper(&#accessor) - } - } - None => quote!(& #accessor), - } - }); - let fields_mut = Self::get_fields(reflect_struct, |field, accessor| { - match &field.attrs.remote { - Some(wrapper_ty) => { - quote! { - <#wrapper_ty as #bevy_reflect_path::ReflectRemote>::as_wrapper_mut(&mut #accessor) - } - } - None => quote!(&mut #accessor), - } - }); + let (fields_ref, fields_mut): (Vec<_>, Vec<_>) = reflect_struct + .active_fields() + .map(|field| { + ( + reflect_struct.access_for_field(field, false), + reflect_struct.access_for_field(field, true), + ) + }) + .unzip(); let field_count = fields_ref.len(); let field_indices = (0..field_count).collect(); @@ -52,30 +39,4 @@ impl FieldAccessors { field_count, } } - - fn get_fields( - reflect_struct: &ReflectStruct, - mut wrapper_fn: F, - ) -> Vec - where - F: FnMut(&StructField, proc_macro2::TokenStream) -> proc_macro2::TokenStream, - { - let is_remote = reflect_struct.meta().is_remote_wrapper(); - reflect_struct - .active_fields() - .map(|field| { - let member = crate::ident::ident_or_index( - field.data.ident.as_ref(), - field.declaration_index, - ); - let accessor = if is_remote { - quote!(self.0.#member) - } else { - quote!(self.#member) - }; - - wrapper_fn(field, accessor) - }) - .collect::>() - } } diff --git a/crates/bevy_reflect/derive/src/trait_reflection.rs b/crates/bevy_reflect/derive/src/trait_reflection.rs index 24c452f2f1249..8df40e47a5fd0 100644 --- a/crates/bevy_reflect/derive/src/trait_reflection.rs +++ b/crates/bevy_reflect/derive/src/trait_reflection.rs @@ -1,7 +1,4 @@ -use bevy_macro_utils::{ - fq_std::{FQClone, FQOption, FQResult}, - BevyManifest, -}; +use bevy_macro_utils::fq_std::{FQClone, FQOption, FQResult}; use proc_macro::TokenStream; use quote::quote; use syn::{parse::Parse, parse_macro_input, Attribute, ItemTrait, Token}; @@ -34,7 +31,7 @@ pub(crate) fn reflect_trait(_args: &TokenStream, input: TokenStream) -> TokenStr let trait_ident = &item_trait.ident; let trait_vis = &item_trait.vis; let reflect_trait_ident = crate::ident::get_reflect_ident(&item_trait.ident.to_string()); - let bevy_reflect_path = BevyManifest::shared().get_path("bevy_reflect"); + let bevy_reflect_path = crate::meta::get_bevy_reflect_path(); let struct_doc = format!( " A type generated by the #[reflect_trait] macro for the `{trait_ident}` trait.\n\n This allows casting from `dyn Reflect` to `dyn {trait_ident}`.", diff --git a/crates/bevy_reflect/examples/reflect_docs.rs b/crates/bevy_reflect/examples/reflect_docs.rs index 10852185c00d8..d52589cc1dc4f 100644 --- a/crates/bevy_reflect/examples/reflect_docs.rs +++ b/crates/bevy_reflect/examples/reflect_docs.rs @@ -6,6 +6,8 @@ //! //! These scenarios can readily be achieved by using `bevy_reflect` with the `documentation` feature. +#![expect(clippy::print_stdout, reason = "Allowed in examples.")] + use bevy_reflect::{Reflect, TypeInfo, Typed}; fn main() { diff --git a/crates/bevy_reflect/src/array.rs b/crates/bevy_reflect/src/array.rs index 0753c0e345c1a..9ad906cfce1c5 100644 --- a/crates/bevy_reflect/src/array.rs +++ b/crates/bevy_reflect/src/array.rs @@ -1,8 +1,8 @@ use crate::generics::impl_generic_info_methods; use crate::{ - self as bevy_reflect, type_info::impl_type_methods, utility::reflect_hasher, ApplyError, - Generics, MaybeTyped, PartialReflect, Reflect, ReflectKind, ReflectMut, ReflectOwned, - ReflectRef, Type, TypeInfo, TypePath, + type_info::impl_type_methods, utility::reflect_hasher, ApplyError, Generics, MaybeTyped, + PartialReflect, Reflect, ReflectKind, ReflectMut, ReflectOwned, ReflectRef, Type, TypeInfo, + TypePath, }; use alloc::{boxed::Box, vec::Vec}; use bevy_reflect_derive::impl_type_path; @@ -69,10 +69,16 @@ pub trait Array: PartialReflect { fn drain(self: Box) -> Vec>; /// Clones the list, producing a [`DynamicArray`]. + #[deprecated(since = "0.16.0", note = "use `to_dynamic_array` instead")] fn clone_dynamic(&self) -> DynamicArray { + self.to_dynamic_array() + } + + /// Creates a new [`DynamicArray`] from this array. + fn to_dynamic_array(&self) -> DynamicArray { DynamicArray { represented_type: self.get_represented_type_info(), - values: self.iter().map(PartialReflect::clone_value).collect(), + values: self.iter().map(PartialReflect::to_dynamic).collect(), } } @@ -175,11 +181,6 @@ impl DynamicArray { } } - #[deprecated(since = "0.15.0", note = "use from_iter")] - pub fn from_vec(values: Vec) -> Self { - Self::from_iter(values) - } - /// Sets the [type] to be represented by this `DynamicArray`. /// /// # Panics @@ -261,11 +262,6 @@ impl PartialReflect for DynamicArray { ReflectOwned::Array(self) } - #[inline] - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - #[inline] fn reflect_hash(&self) -> Option { array_hash(self) @@ -312,18 +308,6 @@ impl Array for DynamicArray { fn drain(self: Box) -> Vec> { self.values.into_vec() } - - #[inline] - fn clone_dynamic(&self) -> DynamicArray { - DynamicArray { - represented_type: self.represented_type, - values: self - .values - .iter() - .map(|value| value.clone_value()) - .collect(), - } - } } impl FromIterator> for DynamicArray { @@ -513,6 +497,8 @@ pub fn array_debug(dyn_array: &dyn Array, f: &mut Formatter<'_>) -> core::fmt::R #[cfg(test)] mod tests { use crate::Reflect; + use alloc::boxed::Box; + #[test] fn next_index_increment() { const SIZE: usize = if cfg!(debug_assertions) { diff --git a/crates/bevy_reflect/src/attributes.rs b/crates/bevy_reflect/src/attributes.rs index 0e751fa57a258..a6edefab25c65 100644 --- a/crates/bevy_reflect/src/attributes.rs +++ b/crates/bevy_reflect/src/attributes.rs @@ -152,7 +152,6 @@ macro_rules! impl_custom_attribute_methods { $self.custom_attributes().get::() } - #[allow(rustdoc::redundant_explicit_links)] /// Gets a custom attribute by its [`TypeId`](core::any::TypeId). /// /// This is the dynamic equivalent of [`get_attribute`](Self::get_attribute). @@ -179,8 +178,8 @@ pub(crate) use impl_custom_attribute_methods; #[cfg(test)] mod tests { use super::*; - use crate as bevy_reflect; use crate::{type_info::Typed, TypeInfo, VariantInfo}; + use alloc::{format, string::String}; use core::ops::RangeInclusive; #[derive(Reflect, PartialEq, Debug)] diff --git a/crates/bevy_reflect/src/enums/dynamic_enum.rs b/crates/bevy_reflect/src/enums/dynamic_enum.rs index 9162ab3610eaf..3380921fbe152 100644 --- a/crates/bevy_reflect/src/enums/dynamic_enum.rs +++ b/crates/bevy_reflect/src/enums/dynamic_enum.rs @@ -1,9 +1,9 @@ use bevy_reflect_derive::impl_type_path; use crate::{ - self as bevy_reflect, enum_debug, enum_hash, enum_partial_eq, ApplyError, DynamicStruct, - DynamicTuple, Enum, PartialReflect, Reflect, ReflectKind, ReflectMut, ReflectOwned, ReflectRef, - Struct, Tuple, TypeInfo, VariantFieldIter, VariantType, + enum_debug, enum_hash, enum_partial_eq, ApplyError, DynamicStruct, DynamicTuple, Enum, + PartialReflect, Reflect, ReflectKind, ReflectMut, ReflectOwned, ReflectRef, Struct, Tuple, + TypeInfo, VariantFieldIter, VariantType, }; use alloc::{boxed::Box, string::String}; @@ -23,8 +23,8 @@ impl Clone for DynamicVariant { fn clone(&self) -> Self { match self { DynamicVariant::Unit => DynamicVariant::Unit, - DynamicVariant::Tuple(data) => DynamicVariant::Tuple(data.clone_dynamic()), - DynamicVariant::Struct(data) => DynamicVariant::Struct(data.clone_dynamic()), + DynamicVariant::Tuple(data) => DynamicVariant::Tuple(data.to_dynamic_tuple()), + DynamicVariant::Struct(data) => DynamicVariant::Struct(data.to_dynamic_struct()), } } } @@ -140,6 +140,22 @@ impl DynamicEnum { self.variant = variant.into(); } + /// Get a reference to the [`DynamicVariant`] contained in `self`. + pub fn variant(&self) -> &DynamicVariant { + &self.variant + } + + /// Get a mutable reference to the [`DynamicVariant`] contained in `self`. + /// + /// Using the mut reference to switch to a different variant will ___not___ update the + /// internal tracking of the variant name and index. + /// + /// If you want to switch variants, prefer one of the setters: + /// [`DynamicEnum::set_variant`] or [`DynamicEnum::set_variant_with_index`]. + pub fn variant_mut(&mut self) -> &mut DynamicVariant { + &mut self.variant + } + /// Create a [`DynamicEnum`] from an existing one. /// /// This is functionally the same as [`DynamicEnum::from_ref`] except it takes an owned value. @@ -150,7 +166,7 @@ impl DynamicEnum { /// Create a [`DynamicEnum`] from an existing one. /// /// This is functionally the same as [`DynamicEnum::from`] except it takes a reference. - pub fn from_ref(value: &TEnum) -> Self { + pub fn from_ref(value: &TEnum) -> Self { let type_info = value.get_represented_type_info(); let mut dyn_enum = match value.variant_type() { VariantType::Unit => DynamicEnum::new_with_index( @@ -161,7 +177,7 @@ impl DynamicEnum { VariantType::Tuple => { let mut data = DynamicTuple::default(); for field in value.iter_fields() { - data.insert_boxed(field.value().clone_value()); + data.insert_boxed(field.value().to_dynamic()); } DynamicEnum::new_with_index( value.variant_index(), @@ -173,7 +189,7 @@ impl DynamicEnum { let mut data = DynamicStruct::default(); for field in value.iter_fields() { let name = field.name().unwrap(); - data.insert_boxed(name, field.value().clone_value()); + data.insert_boxed(name, field.value().to_dynamic()); } DynamicEnum::new_with_index( value.variant_index(), @@ -339,14 +355,14 @@ impl PartialReflect for DynamicEnum { VariantType::Tuple => { let mut dyn_tuple = DynamicTuple::default(); for field in value.iter_fields() { - dyn_tuple.insert_boxed(field.value().clone_value()); + dyn_tuple.insert_boxed(field.value().to_dynamic()); } DynamicVariant::Tuple(dyn_tuple) } VariantType::Struct => { let mut dyn_struct = DynamicStruct::default(); for field in value.iter_fields() { - dyn_struct.insert_boxed(field.name().unwrap(), field.value().clone_value()); + dyn_struct.insert_boxed(field.name().unwrap(), field.value().to_dynamic()); } DynamicVariant::Struct(dyn_struct) } @@ -377,11 +393,6 @@ impl PartialReflect for DynamicEnum { ReflectOwned::Enum(self) } - #[inline] - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - #[inline] fn reflect_hash(&self) -> Option { enum_hash(self) diff --git a/crates/bevy_reflect/src/enums/enum_trait.rs b/crates/bevy_reflect/src/enums/enum_trait.rs index 696f389f6e5d1..bcbcb300d50c0 100644 --- a/crates/bevy_reflect/src/enums/enum_trait.rs +++ b/crates/bevy_reflect/src/enums/enum_trait.rs @@ -4,8 +4,9 @@ use crate::{ type_info::impl_type_methods, DynamicEnum, Generics, PartialReflect, Type, TypePath, VariantInfo, VariantType, }; -use alloc::{boxed::Box, format, string::String, sync::Arc}; -use bevy_utils::HashMap; +use alloc::{boxed::Box, format, string::String}; +use bevy_platform::collections::HashMap; +use bevy_platform::sync::Arc; use core::slice::Iter; /// A trait used to power [enum-like] operations via [reflection]. @@ -124,7 +125,14 @@ pub trait Enum: PartialReflect { /// The type of the current variant. fn variant_type(&self) -> VariantType; // Clones the enum into a [`DynamicEnum`]. - fn clone_dynamic(&self) -> DynamicEnum; + #[deprecated(since = "0.16.0", note = "use `to_dynamic_enum` instead")] + fn clone_dynamic(&self) -> DynamicEnum { + self.to_dynamic_enum() + } + /// Creates a new [`DynamicEnum`] from this enum. + fn to_dynamic_enum(&self) -> DynamicEnum { + DynamicEnum::from_ref(self) + } /// Returns true if the current variant's type matches the given one. fn is_variant(&self, variant_type: VariantType) -> bool { self.variant_type() == variant_type @@ -316,7 +324,6 @@ impl<'a> VariantField<'a> { // Tests that need access to internal fields have to go here rather than in mod.rs #[cfg(test)] mod tests { - use crate as bevy_reflect; use crate::*; #[derive(Reflect, Debug, PartialEq)] diff --git a/crates/bevy_reflect/src/enums/mod.rs b/crates/bevy_reflect/src/enums/mod.rs index 95a94e68e97e1..fd657ffa7f25a 100644 --- a/crates/bevy_reflect/src/enums/mod.rs +++ b/crates/bevy_reflect/src/enums/mod.rs @@ -10,8 +10,8 @@ pub use variants::*; #[cfg(test)] mod tests { - use crate as bevy_reflect; use crate::*; + use alloc::boxed::Box; #[derive(Reflect, Debug, PartialEq)] enum MyEnum { diff --git a/crates/bevy_reflect/src/enums/variants.rs b/crates/bevy_reflect/src/enums/variants.rs index 3397df50ddc92..55ccb8efb1b66 100644 --- a/crates/bevy_reflect/src/enums/variants.rs +++ b/crates/bevy_reflect/src/enums/variants.rs @@ -3,8 +3,8 @@ use crate::{ NamedField, UnnamedField, }; use alloc::boxed::Box; -use alloc::sync::Arc; -use bevy_utils::HashMap; +use bevy_platform::collections::HashMap; +use bevy_platform::sync::Arc; use core::slice::Iter; use thiserror::Error; @@ -359,7 +359,6 @@ impl UnitVariantInfo { #[cfg(test)] mod tests { use super::*; - use crate as bevy_reflect; use crate::{Reflect, Typed}; #[test] diff --git a/crates/bevy_reflect/src/error.rs b/crates/bevy_reflect/src/error.rs new file mode 100644 index 0000000000000..e783a33775a06 --- /dev/null +++ b/crates/bevy_reflect/src/error.rs @@ -0,0 +1,61 @@ +use crate::FieldId; +use alloc::{borrow::Cow, format}; +use thiserror::Error; + +/// An error that occurs when cloning a type via [`PartialReflect::reflect_clone`]. +/// +/// [`PartialReflect::reflect_clone`]: crate::PartialReflect::reflect_clone +#[derive(Clone, Debug, Error, PartialEq, Eq)] +pub enum ReflectCloneError { + /// The type does not have a custom implementation for [`PartialReflect::reflect_clone`]. + /// + /// [`PartialReflect::reflect_clone`]: crate::PartialReflect::reflect_clone + #[error("`PartialReflect::reflect_clone` not implemented for `{type_path}`")] + NotImplemented { type_path: Cow<'static, str> }, + /// The type cannot be cloned via [`PartialReflect::reflect_clone`]. + /// + /// This type should be returned when a type is intentionally opting out of reflection cloning. + /// + /// [`PartialReflect::reflect_clone`]: crate::PartialReflect::reflect_clone + #[error("`{type_path}` cannot be made cloneable for `PartialReflect::reflect_clone`")] + NotCloneable { type_path: Cow<'static, str> }, + /// The field cannot be cloned via [`PartialReflect::reflect_clone`]. + /// + /// When [deriving `Reflect`], this usually means that a field marked with `#[reflect(ignore)]` + /// is missing a `#[reflect(clone)]` attribute. + /// + /// This may be intentional if the field is not meant/able to be cloned. + /// + /// [`PartialReflect::reflect_clone`]: crate::PartialReflect::reflect_clone + /// [deriving `Reflect`]: derive@crate::Reflect + #[error( + "field `{}` cannot be made cloneable for `PartialReflect::reflect_clone` (are you missing a `#[reflect(clone)]` attribute?)", + full_path(.field, .variant.as_deref(), .container_type_path) + )] + FieldNotCloneable { + field: FieldId, + variant: Option>, + container_type_path: Cow<'static, str>, + }, + /// Could not downcast to the expected type. + /// + /// Realistically this should only occur when a type has incorrectly implemented [`Reflect`]. + /// + /// [`Reflect`]: crate::Reflect + #[error("expected downcast to `{expected}`, but received `{received}`")] + FailedDowncast { + expected: Cow<'static, str>, + received: Cow<'static, str>, + }, +} + +fn full_path( + field: &FieldId, + variant: Option<&str>, + container_type_path: &str, +) -> alloc::string::String { + match variant { + Some(variant) => format!("{}::{}::{}", container_type_path, variant, field), + None => format!("{}::{}", container_type_path, field), + } +} diff --git a/crates/bevy_reflect/src/fields.rs b/crates/bevy_reflect/src/fields.rs index ab7c3cd34db84..21d4ccd98a1db 100644 --- a/crates/bevy_reflect/src/fields.rs +++ b/crates/bevy_reflect/src/fields.rs @@ -3,7 +3,9 @@ use crate::{ type_info::impl_type_methods, MaybeTyped, PartialReflect, Type, TypeInfo, TypePath, }; -use alloc::sync::Arc; +use alloc::borrow::Cow; +use bevy_platform::sync::Arc; +use core::fmt::{Display, Formatter}; /// The named field of a reflected struct. #[derive(Clone, Debug)] @@ -129,3 +131,19 @@ impl UnnamedField { impl_custom_attribute_methods!(self.custom_attributes, "field"); } + +/// A representation of a field's accessor. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum FieldId { + Named(Cow<'static, str>), + Unnamed(usize), +} + +impl Display for FieldId { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + match self { + Self::Named(name) => Display::fmt(name, f), + Self::Unnamed(index) => Display::fmt(index, f), + } + } +} diff --git a/crates/bevy_reflect/src/from_reflect.rs b/crates/bevy_reflect/src/from_reflect.rs index 71a0dd571591c..dc113d869f3c7 100644 --- a/crates/bevy_reflect/src/from_reflect.rs +++ b/crates/bevy_reflect/src/from_reflect.rs @@ -112,7 +112,6 @@ impl ReflectFromReflect { /// /// This will convert the object to a concrete type if it wasn't already, and return /// the value as `Box`. - #[allow(clippy::wrong_self_convention)] pub fn from_reflect(&self, reflect_value: &dyn PartialReflect) -> Option> { (self.from_reflect)(reflect_value) } diff --git a/crates/bevy_reflect/src/func/args/arg.rs b/crates/bevy_reflect/src/func/args/arg.rs index 60698a3d7e0a7..8ca03aafd36ff 100644 --- a/crates/bevy_reflect/src/func/args/arg.rs +++ b/crates/bevy_reflect/src/func/args/arg.rs @@ -2,12 +2,9 @@ use crate::{ func::args::{ArgError, FromArg, Ownership}, PartialReflect, Reflect, TypePath, }; -use alloc::boxed::Box; +use alloc::{boxed::Box, string::ToString}; use core::ops::Deref; -#[cfg(not(feature = "std"))] -use alloc::{format, vec}; - /// Represents an argument that can be passed to a [`DynamicFunction`] or [`DynamicFunctionMut`]. /// /// [`DynamicFunction`]: crate::func::DynamicFunction @@ -55,7 +52,7 @@ impl<'a> Arg<'a> { /// let a = 1u32; /// let b = 2u32; /// let mut c = 3u32; - /// let mut args = ArgList::new().push_owned(a).push_ref(&b).push_mut(&mut c); + /// let mut args = ArgList::new().with_owned(a).with_ref(&b).with_mut(&mut c); /// /// let a = args.take::().unwrap(); /// assert_eq!(a, 1); @@ -81,7 +78,7 @@ impl<'a> Arg<'a> { /// ``` /// # use bevy_reflect::func::ArgList; /// let value = 123u32; - /// let mut args = ArgList::new().push_owned(value); + /// let mut args = ArgList::new().with_owned(value); /// let value = args.take_owned::().unwrap(); /// assert_eq!(value, 123); /// ``` @@ -116,7 +113,7 @@ impl<'a> Arg<'a> { /// ``` /// # use bevy_reflect::func::ArgList; /// let value = 123u32; - /// let mut args = ArgList::new().push_ref(&value); + /// let mut args = ArgList::new().with_ref(&value); /// let value = args.take_ref::().unwrap(); /// assert_eq!(*value, 123); /// ``` @@ -155,7 +152,7 @@ impl<'a> Arg<'a> { /// ``` /// # use bevy_reflect::func::ArgList; /// let mut value = 123u32; - /// let mut args = ArgList::new().push_mut(&mut value); + /// let mut args = ArgList::new().with_mut(&mut value); /// let value = args.take_mut::().unwrap(); /// assert_eq!(*value, 123); /// ``` diff --git a/crates/bevy_reflect/src/func/args/count.rs b/crates/bevy_reflect/src/func/args/count.rs index d5f410f88dfaf..159950ca61110 100644 --- a/crates/bevy_reflect/src/func/args/count.rs +++ b/crates/bevy_reflect/src/func/args/count.rs @@ -264,7 +264,7 @@ mod tests { } #[test] - fn should_allow_removeting_nonexistent_count() { + fn should_allow_removing_nonexistent_count() { let mut count = ArgCount::default(); assert_eq!(count.len(), 0); diff --git a/crates/bevy_reflect/src/func/args/error.rs b/crates/bevy_reflect/src/func/args/error.rs index 65c4caa6e8449..bd32bd5e5aadd 100644 --- a/crates/bevy_reflect/src/func/args/error.rs +++ b/crates/bevy_reflect/src/func/args/error.rs @@ -4,9 +4,6 @@ use thiserror::Error; use crate::func::args::Ownership; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, format, vec}; - /// An error that occurs when converting an [argument]. /// /// [argument]: crate::func::args::Arg diff --git a/crates/bevy_reflect/src/func/args/from_arg.rs b/crates/bevy_reflect/src/func/args/from_arg.rs index ddb1e014eb8b7..88d04aefe7525 100644 --- a/crates/bevy_reflect/src/func/args/from_arg.rs +++ b/crates/bevy_reflect/src/func/args/from_arg.rs @@ -1,8 +1,5 @@ use crate::func::args::{Arg, ArgError}; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, format, vec}; - /// A trait for types that can be created from an [`Arg`]. /// /// This trait exists so that types can be automatically converted into an [`Arg`] diff --git a/crates/bevy_reflect/src/func/args/info.rs b/crates/bevy_reflect/src/func/args/info.rs index 3919771f349d4..b1a81f3059a2e 100644 --- a/crates/bevy_reflect/src/func/args/info.rs +++ b/crates/bevy_reflect/src/func/args/info.rs @@ -6,9 +6,6 @@ use crate::{ Type, TypePath, }; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, format, vec}; - /// Type information for an [`Arg`] used in a [`DynamicFunction`] or [`DynamicFunctionMut`]. /// /// [`Arg`]: crate::func::args::Arg diff --git a/crates/bevy_reflect/src/func/args/list.rs b/crates/bevy_reflect/src/func/args/list.rs index 145414424f4b1..ee0964e71a644 100644 --- a/crates/bevy_reflect/src/func/args/list.rs +++ b/crates/bevy_reflect/src/func/args/list.rs @@ -10,9 +10,6 @@ use alloc::{ collections::vec_deque::{Iter, VecDeque}, }; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, format, vec}; - /// A list of arguments that can be passed to a [`DynamicFunction`] or [`DynamicFunctionMut`]. /// /// # Example @@ -24,15 +21,15 @@ use alloc::{boxed::Box, format, vec}; /// let mut baz = 789; /// let args = ArgList::new() /// // Push an owned argument -/// .push_owned(foo) +/// .with_owned(foo) /// // Push an owned and boxed argument -/// .push_boxed(Box::new(foo)) +/// .with_boxed(Box::new(foo)) /// // Push a reference argument -/// .push_ref(&bar) +/// .with_ref(&bar) /// // Push a mutable reference argument -/// .push_mut(&mut baz) +/// .with_mut(&mut baz) /// // Push a manually constructed argument -/// .push_arg(ArgValue::Ref(&3.14)); +/// .with_arg(ArgValue::Ref(&3.14)); /// ``` /// /// [arguments]: Arg @@ -61,7 +58,7 @@ impl<'a> ArgList<'a> { /// /// If an argument was previously removed from the beginning of the list, /// this method will also re-index the list. - pub fn push_arg(mut self, arg: ArgValue<'a>) -> Self { + pub fn push_arg(&mut self, arg: ArgValue<'a>) { if self.needs_reindex { for (index, arg) in self.list.iter_mut().enumerate() { arg.set_index(index); @@ -71,6 +68,46 @@ impl<'a> ArgList<'a> { let index = self.list.len(); self.list.push_back(Arg::new(index, arg)); + } + + /// Push an [`ArgValue::Ref`] onto the list with the given reference. + /// + /// If an argument was previously removed from the beginning of the list, + /// this method will also re-index the list. + pub fn push_ref(&mut self, arg: &'a dyn PartialReflect) { + self.push_arg(ArgValue::Ref(arg)); + } + + /// Push an [`ArgValue::Mut`] onto the list with the given mutable reference. + /// + /// If an argument was previously removed from the beginning of the list, + /// this method will also re-index the list. + pub fn push_mut(&mut self, arg: &'a mut dyn PartialReflect) { + self.push_arg(ArgValue::Mut(arg)); + } + + /// Push an [`ArgValue::Owned`] onto the list with the given owned value. + /// + /// If an argument was previously removed from the beginning of the list, + /// this method will also re-index the list. + pub fn push_owned(&mut self, arg: impl PartialReflect) { + self.push_arg(ArgValue::Owned(Box::new(arg))); + } + + /// Push an [`ArgValue::Owned`] onto the list with the given boxed value. + /// + /// If an argument was previously removed from the beginning of the list, + /// this method will also re-index the list. + pub fn push_boxed(&mut self, arg: Box) { + self.push_arg(ArgValue::Owned(arg)); + } + + /// Push an [`ArgValue`] onto the list. + /// + /// If an argument was previously removed from the beginning of the list, + /// this method will also re-index the list. + pub fn with_arg(mut self, arg: ArgValue<'a>) -> Self { + self.push_arg(arg); self } @@ -78,32 +115,32 @@ impl<'a> ArgList<'a> { /// /// If an argument was previously removed from the beginning of the list, /// this method will also re-index the list. - pub fn push_ref(self, arg: &'a dyn PartialReflect) -> Self { - self.push_arg(ArgValue::Ref(arg)) + pub fn with_ref(self, arg: &'a dyn PartialReflect) -> Self { + self.with_arg(ArgValue::Ref(arg)) } /// Push an [`ArgValue::Mut`] onto the list with the given mutable reference. /// /// If an argument was previously removed from the beginning of the list, /// this method will also re-index the list. - pub fn push_mut(self, arg: &'a mut dyn PartialReflect) -> Self { - self.push_arg(ArgValue::Mut(arg)) + pub fn with_mut(self, arg: &'a mut dyn PartialReflect) -> Self { + self.with_arg(ArgValue::Mut(arg)) } /// Push an [`ArgValue::Owned`] onto the list with the given owned value. /// /// If an argument was previously removed from the beginning of the list, /// this method will also re-index the list. - pub fn push_owned(self, arg: impl PartialReflect) -> Self { - self.push_arg(ArgValue::Owned(Box::new(arg))) + pub fn with_owned(self, arg: impl PartialReflect) -> Self { + self.with_arg(ArgValue::Owned(Box::new(arg))) } /// Push an [`ArgValue::Owned`] onto the list with the given boxed value. /// /// If an argument was previously removed from the beginning of the list, /// this method will also re-index the list. - pub fn push_boxed(self, arg: Box) -> Self { - self.push_arg(ArgValue::Owned(arg)) + pub fn with_boxed(self, arg: Box) -> Self { + self.with_arg(ArgValue::Owned(arg)) } /// Remove the first argument in the list and return it. @@ -126,7 +163,7 @@ impl<'a> ArgList<'a> { /// let a = 1u32; /// let b = 2u32; /// let mut c = 3u32; - /// let mut args = ArgList::new().push_owned(a).push_ref(&b).push_mut(&mut c); + /// let mut args = ArgList::new().with_owned(a).with_ref(&b).with_mut(&mut c); /// /// let a = args.take::().unwrap(); /// assert_eq!(a, 1); @@ -152,7 +189,7 @@ impl<'a> ArgList<'a> { /// ``` /// # use bevy_reflect::func::ArgList; /// let value = 123u32; - /// let mut args = ArgList::new().push_owned(value); + /// let mut args = ArgList::new().with_owned(value); /// let value = args.take_owned::().unwrap(); /// assert_eq!(value, 123); /// ``` @@ -171,7 +208,7 @@ impl<'a> ArgList<'a> { /// ``` /// # use bevy_reflect::func::ArgList; /// let value = 123u32; - /// let mut args = ArgList::new().push_ref(&value); + /// let mut args = ArgList::new().with_ref(&value); /// let value = args.take_ref::().unwrap(); /// assert_eq!(*value, 123); /// ``` @@ -190,7 +227,7 @@ impl<'a> ArgList<'a> { /// ``` /// # use bevy_reflect::func::ArgList; /// let mut value = 123u32; - /// let mut args = ArgList::new().push_mut(&mut value); + /// let mut args = ArgList::new().with_mut(&mut value); /// let value = args.take_mut::().unwrap(); /// assert_eq!(*value, 123); /// ``` @@ -217,7 +254,7 @@ impl<'a> ArgList<'a> { /// let a = 1u32; /// let b = 2u32; /// let mut c = 3u32; - /// let mut args = ArgList::new().push_owned(a).push_ref(&b).push_mut(&mut c); + /// let mut args = ArgList::new().with_owned(a).with_ref(&b).with_mut(&mut c); /// /// let c = args.pop::<&mut u32>().unwrap(); /// assert_eq!(*c, 3); @@ -243,7 +280,7 @@ impl<'a> ArgList<'a> { /// ``` /// # use bevy_reflect::func::ArgList; /// let value = 123u32; - /// let mut args = ArgList::new().push_owned(value); + /// let mut args = ArgList::new().with_owned(value); /// let value = args.pop_owned::().unwrap(); /// assert_eq!(value, 123); /// ``` @@ -262,7 +299,7 @@ impl<'a> ArgList<'a> { /// ``` /// # use bevy_reflect::func::ArgList; /// let value = 123u32; - /// let mut args = ArgList::new().push_ref(&value); + /// let mut args = ArgList::new().with_ref(&value); /// let value = args.pop_ref::().unwrap(); /// assert_eq!(*value, 123); /// ``` @@ -281,7 +318,7 @@ impl<'a> ArgList<'a> { /// ``` /// # use bevy_reflect::func::ArgList; /// let mut value = 123u32; - /// let mut args = ArgList::new().push_mut(&mut value); + /// let mut args = ArgList::new().with_mut(&mut value); /// let value = args.pop_mut::().unwrap(); /// assert_eq!(*value, 123); /// ``` @@ -308,13 +345,14 @@ impl<'a> ArgList<'a> { #[cfg(test)] mod tests { use super::*; + use alloc::string::String; #[test] fn should_push_arguments_in_order() { let args = ArgList::new() - .push_owned(123) - .push_owned(456) - .push_owned(789); + .with_owned(123) + .with_owned(456) + .with_owned(789); assert_eq!(args.len(), 3); assert_eq!(args.list[0].index(), 0); @@ -333,13 +371,13 @@ mod tests { let mut g = String::from("g"); let args = ArgList::new() - .push_arg(ArgValue::Owned(Box::new(a))) - .push_arg(ArgValue::Ref(&b)) - .push_arg(ArgValue::Mut(&mut c)) - .push_owned(d) - .push_boxed(Box::new(e)) - .push_ref(&f) - .push_mut(&mut g); + .with_arg(ArgValue::Owned(Box::new(a))) + .with_arg(ArgValue::Ref(&b)) + .with_arg(ArgValue::Mut(&mut c)) + .with_owned(d) + .with_boxed(Box::new(e)) + .with_ref(&f) + .with_mut(&mut g); assert!(matches!(args.list[0].value(), &ArgValue::Owned(_))); assert!(matches!(args.list[1].value(), &ArgValue::Ref(_))); @@ -358,10 +396,10 @@ mod tests { let mut d = 5.78_f32; let mut args = ArgList::new() - .push_owned(a) - .push_ref(&b) - .push_ref(&c) - .push_mut(&mut d); + .with_owned(a) + .with_ref(&b) + .with_ref(&c) + .with_mut(&mut d); assert_eq!(args.len(), 4); assert_eq!(args.take_owned::().unwrap(), String::from("a")); @@ -379,10 +417,10 @@ mod tests { let mut d = 5.78_f32; let mut args = ArgList::new() - .push_owned(a) - .push_ref(&b) - .push_ref(&c) - .push_mut(&mut d); + .with_owned(a) + .with_ref(&b) + .with_ref(&c) + .with_mut(&mut d); assert_eq!(args.len(), 4); assert_eq!(args.pop_mut::().unwrap(), &mut 5.78); @@ -395,9 +433,9 @@ mod tests { #[test] fn should_reindex_on_push_after_take() { let mut args = ArgList::new() - .push_owned(123) - .push_owned(456) - .push_owned(789); + .with_owned(123) + .with_owned(456) + .with_owned(789); assert!(!args.needs_reindex); @@ -408,7 +446,7 @@ mod tests { assert!(args.list[1].value().reflect_partial_eq(&789).unwrap()); assert_eq!(args.list[1].index(), 2); - let args = args.push_owned(123); + let args = args.with_owned(123); assert!(!args.needs_reindex); assert!(args.list[0].value().reflect_partial_eq(&456).unwrap()); assert_eq!(args.list[0].index(), 0); diff --git a/crates/bevy_reflect/src/func/args/ownership.rs b/crates/bevy_reflect/src/func/args/ownership.rs index 448efed9f6b1f..b9395c742f404 100644 --- a/crates/bevy_reflect/src/func/args/ownership.rs +++ b/crates/bevy_reflect/src/func/args/ownership.rs @@ -1,8 +1,5 @@ use core::fmt::{Display, Formatter}; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, format, vec}; - /// A trait for getting the ownership of a type. /// /// This trait exists so that [`TypedFunction`] can automatically generate diff --git a/crates/bevy_reflect/src/func/dynamic_function.rs b/crates/bevy_reflect/src/func/dynamic_function.rs index d513cb68084b5..7a5da57525487 100644 --- a/crates/bevy_reflect/src/func/dynamic_function.rs +++ b/crates/bevy_reflect/src/func/dynamic_function.rs @@ -1,5 +1,4 @@ use crate::{ - self as bevy_reflect, __macro_exports::RegisterForReflection, func::{ args::{ArgCount, ArgList}, @@ -11,13 +10,11 @@ use crate::{ ApplyError, MaybeTyped, PartialReflect, Reflect, ReflectKind, ReflectMut, ReflectOwned, ReflectRef, TypeInfo, TypePath, }; -use alloc::{borrow::Cow, boxed::Box, sync::Arc}; +use alloc::{borrow::Cow, boxed::Box}; +use bevy_platform::sync::Arc; use bevy_reflect_derive::impl_type_path; use core::fmt::{Debug, Formatter}; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, format, vec}; - /// An [`Arc`] containing a callback to a reflected function. /// /// The `Arc` is used to both ensure that it is `Send + Sync` @@ -57,7 +54,7 @@ type ArcFn<'env> = Arc Fn(ArgList<'a>) -> FunctionResult<'a> + Send /// let mut func: DynamicFunction = add.into_function(); /// /// // Dynamically call it: -/// let args = ArgList::default().push_owned(25_i32).push_owned(75_i32); +/// let args = ArgList::default().with_owned(25_i32).with_owned(75_i32); /// let value = func.call(args).unwrap().unwrap_owned(); /// /// // Check the result: @@ -95,8 +92,21 @@ impl<'env> DynamicFunction<'env> { func: F, info: impl TryInto, ) -> Self { + let arc = Arc::new(func); + + #[cfg(not(target_has_atomic = "ptr"))] + #[expect( + unsafe_code, + reason = "unsized coercion is an unstable feature for non-std types" + )] + // SAFETY: + // - Coercion from `T` to `dyn for<'a> Fn(ArgList<'a>) -> FunctionResult<'a> + Send + Sync + 'env` + // is valid as `T: for<'a> Fn(ArgList<'a>) -> FunctionResult<'a> + Send + Sync + 'env` + // - `Arc::from_raw` receives a valid pointer from a previous call to `Arc::into_raw` + let arc = unsafe { ArcFn::<'env>::from_raw(Arc::into_raw(arc) as *const _) }; + Self { - internal: DynamicFunctionInternal::new(Arc::new(func), info.try_into().unwrap()), + internal: DynamicFunctionInternal::new(arc, info.try_into().unwrap()), } } @@ -150,12 +160,12 @@ impl<'env> DynamicFunction<'env> { /// func = func.with_overload(add::); /// /// // Test `i32`: - /// let args = ArgList::default().push_owned(25_i32).push_owned(75_i32); + /// let args = ArgList::default().with_owned(25_i32).with_owned(75_i32); /// let result = func.call(args).unwrap().unwrap_owned(); /// assert_eq!(result.try_take::().unwrap(), 100); /// /// // Test `f32`: - /// let args = ArgList::default().push_owned(25.0_f32).push_owned(75.0_f32); + /// let args = ArgList::default().with_owned(25.0_f32).with_owned(75.0_f32); /// let result = func.call(args).unwrap().unwrap_owned(); /// assert_eq!(result.try_take::().unwrap(), 100.0); ///``` @@ -178,15 +188,15 @@ impl<'env> DynamicFunction<'env> { /// func = func.with_overload(add_3); /// /// // Test two arguments: - /// let args = ArgList::default().push_owned(25_i32).push_owned(75_i32); + /// let args = ArgList::default().with_owned(25_i32).with_owned(75_i32); /// let result = func.call(args).unwrap().unwrap_owned(); /// assert_eq!(result.try_take::().unwrap(), 100); /// /// // Test three arguments: /// let args = ArgList::default() - /// .push_owned(25_i32) - /// .push_owned(75_i32) - /// .push_owned(100_i32); + /// .with_owned(25_i32) + /// .with_owned(75_i32) + /// .with_owned(100_i32); /// let result = func.call(args).unwrap().unwrap_owned(); /// assert_eq!(result.try_take::().unwrap(), 200); /// ``` @@ -255,7 +265,7 @@ impl<'env> DynamicFunction<'env> { /// }; /// /// let mut func = add.into_function().with_name("add"); - /// let args = ArgList::new().push_owned(25_i32).push_owned(75_i32); + /// let args = ArgList::new().with_owned(25_i32).with_owned(75_i32); /// let result = func.call(args).unwrap().unwrap_owned(); /// assert_eq!(result.try_take::().unwrap(), 123); /// ``` @@ -348,7 +358,7 @@ impl Function for DynamicFunction<'static> { self.call(args) } - fn clone_dynamic(&self) -> DynamicFunction<'static> { + fn to_dynamic_function(&self) -> DynamicFunction<'static> { self.clone() } } @@ -385,7 +395,7 @@ impl PartialReflect for DynamicFunction<'static> { fn try_apply(&mut self, value: &dyn PartialReflect) -> Result<(), ApplyError> { match value.reflect_ref() { ReflectRef::Function(func) => { - *self = func.clone_dynamic(); + *self = func.to_dynamic_function(); Ok(()) } _ => Err(ApplyError::MismatchedTypes { @@ -411,10 +421,6 @@ impl PartialReflect for DynamicFunction<'static> { ReflectOwned::Function(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone()) - } - fn reflect_hash(&self) -> Option { None } @@ -473,7 +479,8 @@ mod tests { use crate::func::signature::ArgumentSignature; use crate::func::{FunctionError, IntoReturn, SignatureInfo}; use crate::Type; - use bevy_utils::HashSet; + use alloc::{format, string::String, vec, vec::Vec}; + use bevy_platform::collections::HashSet; use core::ops::Add; #[test] @@ -501,7 +508,7 @@ mod tests { fn should_return_error_on_arg_count_mismatch() { let func = (|a: i32, b: i32| a + b).into_function(); - let args = ArgList::default().push_owned(25_i32); + let args = ArgList::default().with_owned(25_i32); let error = func.call(args).unwrap_err(); assert_eq!( @@ -520,10 +527,10 @@ mod tests { .with_overload(|a: i32, b: i32, c: i32| a + b + c); let args = ArgList::default() - .push_owned(1_i32) - .push_owned(2_i32) - .push_owned(3_i32) - .push_owned(4_i32); + .with_owned(1_i32) + .with_owned(2_i32) + .with_owned(3_i32) + .with_owned(4_i32); let error = func.call(args).unwrap_err(); @@ -551,14 +558,14 @@ mod tests { assert_eq!(greet.name().unwrap(), "greet"); assert_eq!(clone.name().unwrap(), "greet"); - let clone_value = clone - .call(ArgList::default().push_ref(&String::from("world"))) + let cloned_value = clone + .call(ArgList::default().with_ref(&String::from("world"))) .unwrap() .unwrap_owned() .try_take::() .unwrap(); - assert_eq!(clone_value, "Hello, world!"); + assert_eq!(cloned_value, "Hello, world!"); } #[test] @@ -566,7 +573,7 @@ mod tests { let mut func: Box = Box::new((|a: i32, b: i32| a + b).into_function()); func.apply(&((|a: i32, b: i32| a * b).into_function())); - let args = ArgList::new().push_owned(5_i32).push_owned(5_i32); + let args = ArgList::new().with_owned(5_i32).with_owned(5_i32); let result = func.reflect_call(args).unwrap().unwrap_owned(); assert_eq!(result.try_take::().unwrap(), 25); } @@ -587,8 +594,8 @@ mod tests { ReflectRef::Function(func) => { let result = func.reflect_call( ArgList::new() - .push_ref(this.as_partial_reflect()) - .push_owned(curr - 1), + .with_ref(this.as_partial_reflect()) + .with_owned(curr - 1), ); let value = result.unwrap().unwrap_owned().try_take::().unwrap(); Ok((curr * value).into_return()) @@ -603,7 +610,7 @@ mod tests { .with_arg::<()>("this"), ); - let args = ArgList::new().push_ref(&factorial).push_owned(5_i32); + let args = ArgList::new().with_ref(&factorial).with_owned(5_i32); let value = factorial.call(args).unwrap().unwrap_owned(); assert_eq!(value.try_take::().unwrap(), 120); } @@ -641,11 +648,11 @@ mod tests { let func = func.with_name("add"); assert_eq!(func.name().unwrap(), "add"); - let args = ArgList::default().push_owned(25_i32).push_owned(75_i32); + let args = ArgList::default().with_owned(25_i32).with_owned(75_i32); let result = func.call(args).unwrap().unwrap_owned(); assert_eq!(result.try_take::().unwrap(), 100); - let args = ArgList::default().push_owned(25.0_f32).push_owned(75.0_f32); + let args = ArgList::default().with_owned(25.0_f32).with_owned(75.0_f32); let result = func.call(args).unwrap().unwrap_owned(); assert_eq!(result.try_take::().unwrap(), 100.0); } @@ -664,11 +671,11 @@ mod tests { let func = add::.into_function().with_overload(add::); - let args = ArgList::default().push_owned(25_i32).push_owned(75_i32); + let args = ArgList::default().with_owned(25_i32).with_owned(75_i32); let result = func.call(args).unwrap().unwrap_owned(); assert_eq!(result.try_take::().unwrap(), 100); - let args = ArgList::default().push_owned(25.0_f32).push_owned(75.0_f32); + let args = ArgList::default().with_owned(25.0_f32).with_owned(75.0_f32); let result = func.call(args).unwrap().unwrap_owned(); assert_eq!(result.try_take::().unwrap(), 100.0); } @@ -685,14 +692,14 @@ mod tests { let func = add_2.into_function().with_overload(add_3); - let args = ArgList::default().push_owned(25_i32).push_owned(75_i32); + let args = ArgList::default().with_owned(25_i32).with_owned(75_i32); let result = func.call(args).unwrap().unwrap_owned(); assert_eq!(result.try_take::().unwrap(), 100); let args = ArgList::default() - .push_owned(25_i32) - .push_owned(75_i32) - .push_owned(100_i32); + .with_owned(25_i32) + .with_owned(75_i32) + .with_owned(100_i32); let result = func.call(args).unwrap().unwrap_owned(); assert_eq!(result.try_take::().unwrap(), 200); } @@ -728,11 +735,11 @@ mod tests { let func = manual.with_overload(|a: u32, b: u32| a + b); - let args = ArgList::default().push_owned(25_i32).push_owned(75_i32); + let args = ArgList::default().with_owned(25_i32).with_owned(75_i32); let result = func.call(args).unwrap().unwrap_owned(); assert_eq!(result.try_take::().unwrap(), 100); - let args = ArgList::default().push_owned(25_u32).push_owned(75_u32); + let args = ArgList::default().with_owned(25_u32).with_owned(75_u32); let result = func.call(args).unwrap().unwrap_owned(); assert_eq!(result.try_take::().unwrap(), 100); } @@ -745,7 +752,7 @@ mod tests { let func = add::.into_function().with_overload(add::); - let args = ArgList::default().push_owned(25_u32).push_owned(75_u32); + let args = ArgList::default().with_owned(25_u32).with_owned(75_u32); let result = func.call(args); assert_eq!( result.unwrap_err(), diff --git a/crates/bevy_reflect/src/func/dynamic_function_internal.rs b/crates/bevy_reflect/src/func/dynamic_function_internal.rs index 427de1263d14a..7e36ec119d869 100644 --- a/crates/bevy_reflect/src/func/dynamic_function_internal.rs +++ b/crates/bevy_reflect/src/func/dynamic_function_internal.rs @@ -1,8 +1,8 @@ use crate::func::args::ArgCount; use crate::func::signature::{ArgListSignature, ArgumentSignature}; use crate::func::{ArgList, FunctionError, FunctionInfo, FunctionOverloadError}; -use alloc::borrow::Cow; -use bevy_utils::HashMap; +use alloc::{borrow::Cow, vec, vec::Vec}; +use bevy_platform::collections::HashMap; use core::fmt::{Debug, Formatter}; /// An internal structure for storing a function and its corresponding [function information]. @@ -246,7 +246,7 @@ mod tests { } #[test] - fn should_merge_overloaed_into_single() { + fn should_merge_overload_into_single() { let mut func_a = DynamicFunctionInternal { functions: vec!['a', 'b'], info: FunctionInfo::new(SignatureInfo::anonymous().with_arg::("arg0")) diff --git a/crates/bevy_reflect/src/func/dynamic_function_mut.rs b/crates/bevy_reflect/src/func/dynamic_function_mut.rs index bcbf6a96337ab..6d8be5ac47b89 100644 --- a/crates/bevy_reflect/src/func/dynamic_function_mut.rs +++ b/crates/bevy_reflect/src/func/dynamic_function_mut.rs @@ -1,9 +1,7 @@ -use alloc::{borrow::Cow, boxed::Box, sync::Arc}; +use alloc::{borrow::Cow, boxed::Box}; +use bevy_platform::sync::Arc; use core::fmt::{Debug, Formatter}; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, format, vec}; - use crate::func::{ args::{ArgCount, ArgList}, dynamic_function_internal::DynamicFunctionInternal, @@ -53,7 +51,7 @@ type BoxFnMut<'env> = Box FnMut(ArgList<'a>) -> FunctionResult<'a> + /// let mut func: DynamicFunctionMut = replace.into_function_mut(); /// /// // Dynamically call it: -/// let args = ArgList::default().push_owned(1_usize).push_owned(-2_i32); +/// let args = ArgList::default().with_owned(1_usize).with_owned(-2_i32); /// let value = func.call(args).unwrap().unwrap_owned(); /// /// // Check the result: @@ -157,11 +155,11 @@ impl<'env> DynamicFunctionMut<'env> { /// func = func.with_overload(add_f32); /// /// // Test `i32`: - /// let args = bevy_reflect::func::ArgList::new().push_owned(123_i32); + /// let args = bevy_reflect::func::ArgList::new().with_owned(123_i32); /// func.call(args).unwrap(); /// /// // Test `f32`: - /// let args = bevy_reflect::func::ArgList::new().push_owned(1.23_f32); + /// let args = bevy_reflect::func::ArgList::new().with_owned(1.23_f32); /// func.call(args).unwrap(); /// /// drop(func); @@ -222,7 +220,7 @@ impl<'env> DynamicFunctionMut<'env> { /// }; /// /// let mut func = add.into_function_mut().with_name("add"); - /// let args = ArgList::new().push_owned(25_i32).push_owned(75_i32); + /// let args = ArgList::new().with_owned(25_i32).with_owned(75_i32); /// let result = func.call(args).unwrap().unwrap_owned(); /// assert_eq!(result.try_take::().unwrap(), 100); /// ``` @@ -254,7 +252,7 @@ impl<'env> DynamicFunctionMut<'env> { /// let increment = |amount: i32| count += amount; /// /// let increment_function = increment.into_function_mut(); - /// let args = ArgList::new().push_owned(5_i32); + /// let args = ArgList::new().with_owned(5_i32); /// /// // We need to drop `increment_function` here so that we /// // can regain access to `count`. @@ -381,6 +379,7 @@ impl<'env> IntoFunctionMut<'env, ()> for DynamicFunctionMut<'env> { mod tests { use super::*; use crate::func::{FunctionError, IntoReturn, SignatureInfo}; + use alloc::vec; use core::ops::Add; #[test] @@ -409,7 +408,7 @@ mod tests { let mut total = 0; let mut func = (|a: i32, b: i32| total = a + b).into_function_mut(); - let args = ArgList::default().push_owned(25_i32); + let args = ArgList::default().with_owned(25_i32); let error = func.call(args).unwrap_err(); assert_eq!( error, @@ -419,7 +418,7 @@ mod tests { } ); - let args = ArgList::default().push_owned(25_i32); + let args = ArgList::default().with_owned(25_i32); let error = func.call_once(args).unwrap_err(); assert_eq!( error, @@ -457,9 +456,9 @@ mod tests { let mut func = func.with_name("add"); assert_eq!(func.name().unwrap(), "add"); - let args = ArgList::default().push_owned(25_i32); + let args = ArgList::default().with_owned(25_i32); func.call(args).unwrap(); - let args = ArgList::default().push_owned(75_i16); + let args = ArgList::default().with_owned(75_i16); func.call(args).unwrap(); drop(func); @@ -478,11 +477,11 @@ mod tests { let mut func = add::.into_function_mut().with_overload(add::); - let args = ArgList::default().push_owned(25_i32).push_owned(75_i32); + let args = ArgList::default().with_owned(25_i32).with_owned(75_i32); let result = func.call(args).unwrap().unwrap_owned(); assert_eq!(result.try_take::().unwrap(), 100); - let args = ArgList::default().push_owned(25.0_f32).push_owned(75.0_f32); + let args = ArgList::default().with_owned(25.0_f32).with_owned(75.0_f32); let result = func.call(args).unwrap().unwrap_owned(); assert_eq!(result.try_take::().unwrap(), 100.0); } diff --git a/crates/bevy_reflect/src/func/error.rs b/crates/bevy_reflect/src/func/error.rs index ad6796be19f1e..d9d105db1b9d1 100644 --- a/crates/bevy_reflect/src/func/error.rs +++ b/crates/bevy_reflect/src/func/error.rs @@ -4,12 +4,9 @@ use crate::func::{ Return, }; use alloc::borrow::Cow; -use bevy_utils::HashSet; +use bevy_platform::collections::HashSet; use thiserror::Error; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, format, vec}; - /// An error that occurs when calling a [`DynamicFunction`] or [`DynamicFunctionMut`]. /// /// [`DynamicFunction`]: crate::func::DynamicFunction diff --git a/crates/bevy_reflect/src/func/function.rs b/crates/bevy_reflect/src/func/function.rs index 0d8e94ca95aff..eb770e9e5002f 100644 --- a/crates/bevy_reflect/src/func/function.rs +++ b/crates/bevy_reflect/src/func/function.rs @@ -8,9 +8,6 @@ use crate::{ use alloc::borrow::Cow; use core::fmt::Debug; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, format, vec}; - /// A trait used to power [function-like] operations via [reflection]. /// /// This trait allows types to be called like regular functions @@ -28,7 +25,7 @@ use alloc::{boxed::Box, format, vec}; /// } /// /// let func: Box = Box::new(add.into_function()); -/// let args = ArgList::new().push_owned(25_i32).push_owned(75_i32); +/// let args = ArgList::new().with_owned(25_i32).with_owned(75_i32); /// let value = func.reflect_call(args).unwrap().unwrap_owned(); /// assert_eq!(value.try_take::().unwrap(), 100); /// ``` @@ -67,13 +64,20 @@ pub trait Function: PartialReflect + Debug { fn reflect_call<'a>(&self, args: ArgList<'a>) -> FunctionResult<'a>; /// Clone this function into a [`DynamicFunction`]. - fn clone_dynamic(&self) -> DynamicFunction<'static>; + #[deprecated(since = "0.16.0", note = "use `to_dynamic_function` instead")] + fn clone_dynamic(&self) -> DynamicFunction<'static> { + self.to_dynamic_function() + } + + /// Creates a new [`DynamicFunction`] from this function. + fn to_dynamic_function(&self) -> DynamicFunction<'static>; } #[cfg(test)] mod tests { use super::*; use crate::func::IntoFunction; + use alloc::boxed::Box; #[test] fn should_call_dyn_function() { @@ -82,7 +86,7 @@ mod tests { } let func: Box = Box::new(add.into_function()); - let args = ArgList::new().push_owned(25_i32).push_owned(75_i32); + let args = ArgList::new().with_owned(25_i32).with_owned(75_i32); let value = func.reflect_call(args).unwrap().unwrap_owned(); assert_eq!(value.try_take::().unwrap(), 100); } diff --git a/crates/bevy_reflect/src/func/info.rs b/crates/bevy_reflect/src/func/info.rs index 797b97e7e1bac..53737fd891dbd 100644 --- a/crates/bevy_reflect/src/func/info.rs +++ b/crates/bevy_reflect/src/func/info.rs @@ -1,9 +1,6 @@ -use alloc::{borrow::Cow, vec}; +use alloc::{borrow::Cow, boxed::Box, vec, vec::Vec}; use core::fmt::{Debug, Formatter}; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, format, vec}; - use crate::{ func::args::{ArgCount, ArgCountOutOfBoundsError, ArgInfo, GetOwnership, Ownership}, func::signature::ArgumentSignature, @@ -615,7 +612,6 @@ macro_rules! impl_typed_function { FunctionInfo::new( create_info::() .with_args({ - #[allow(unused_mut)] let mut _index = 0; vec![ $(ArgInfo::new::<$Arg>({ @@ -641,7 +637,6 @@ macro_rules! impl_typed_function { FunctionInfo::new( create_info::() .with_args({ - #[allow(unused_mut)] let mut _index = 1; vec![ ArgInfo::new::<&Receiver>(0), @@ -668,7 +663,6 @@ macro_rules! impl_typed_function { FunctionInfo::new( create_info::() .with_args({ - #[allow(unused_mut)] let mut _index = 1; vec![ ArgInfo::new::<&mut Receiver>(0), @@ -695,7 +689,6 @@ macro_rules! impl_typed_function { FunctionInfo::new( create_info::() .with_args({ - #[allow(unused_mut)] let mut _index = 1; vec![ ArgInfo::new::<&mut Receiver>(0), diff --git a/crates/bevy_reflect/src/func/into_function.rs b/crates/bevy_reflect/src/func/into_function.rs index e913045f8cc2a..be4e2ef9eee6c 100644 --- a/crates/bevy_reflect/src/func/into_function.rs +++ b/crates/bevy_reflect/src/func/into_function.rs @@ -1,8 +1,5 @@ use crate::func::{DynamicFunction, ReflectFn, TypedFunction}; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, format, vec}; - /// A trait for types that can be converted into a [`DynamicFunction`]. /// /// This trait is automatically implemented for any type that implements @@ -45,7 +42,7 @@ mod tests { fn should_create_dynamic_function_from_closure() { let c = 23; let func = (|a: i32, b: i32| a + b + c).into_function(); - let args = ArgList::new().push_owned(25_i32).push_owned(75_i32); + let args = ArgList::new().with_owned(25_i32).with_owned(75_i32); let result = func.call(args).unwrap().unwrap_owned(); assert_eq!(result.try_downcast_ref::(), Some(&123)); } @@ -57,7 +54,7 @@ mod tests { } let func = add.into_function(); - let args = ArgList::new().push_owned(25_i32).push_owned(75_i32); + let args = ArgList::new().with_owned(25_i32).with_owned(75_i32); let result = func.call(args).unwrap().unwrap_owned(); assert_eq!(result.try_downcast_ref::(), Some(&100)); } diff --git a/crates/bevy_reflect/src/func/into_function_mut.rs b/crates/bevy_reflect/src/func/into_function_mut.rs index 8f7f1b0a6dd1a..a371b366b5338 100644 --- a/crates/bevy_reflect/src/func/into_function_mut.rs +++ b/crates/bevy_reflect/src/func/into_function_mut.rs @@ -1,8 +1,5 @@ use crate::func::{DynamicFunctionMut, ReflectFnMut, TypedFunction}; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, format, vec}; - /// A trait for types that can be converted into a [`DynamicFunctionMut`]. /// /// This trait is automatically implemented for any type that implements @@ -51,7 +48,7 @@ mod tests { fn should_create_dynamic_function_mut_from_closure() { let c = 23; let func = (|a: i32, b: i32| a + b + c).into_function(); - let args = ArgList::new().push_owned(25_i32).push_owned(75_i32); + let args = ArgList::new().with_owned(25_i32).with_owned(75_i32); let result = func.call(args).unwrap().unwrap_owned(); assert_eq!(result.try_downcast_ref::(), Some(&123)); } @@ -60,7 +57,7 @@ mod tests { fn should_create_dynamic_function_mut_from_closure_with_mutable_capture() { let mut total = 0; let func = (|a: i32, b: i32| total = a + b).into_function_mut(); - let args = ArgList::new().push_owned(25_i32).push_owned(75_i32); + let args = ArgList::new().with_owned(25_i32).with_owned(75_i32); func.call_once(args).unwrap(); assert_eq!(total, 100); } @@ -72,7 +69,7 @@ mod tests { } let mut func = add.into_function_mut(); - let args = ArgList::new().push_owned(25_i32).push_owned(75_i32); + let args = ArgList::new().with_owned(25_i32).with_owned(75_i32); let result = func.call(args).unwrap().unwrap_owned(); assert_eq!(result.try_downcast_ref::(), Some(&100)); } diff --git a/crates/bevy_reflect/src/func/macros.rs b/crates/bevy_reflect/src/func/macros.rs index 410aaba456acd..3fb93a2230610 100644 --- a/crates/bevy_reflect/src/func/macros.rs +++ b/crates/bevy_reflect/src/func/macros.rs @@ -1,6 +1,3 @@ -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, format, vec}; - /// Helper macro to implement the necessary traits for function reflection. /// /// This macro calls the following macros: diff --git a/crates/bevy_reflect/src/func/mod.rs b/crates/bevy_reflect/src/func/mod.rs index f990e135f017d..74a89282c6d2d 100644 --- a/crates/bevy_reflect/src/func/mod.rs +++ b/crates/bevy_reflect/src/func/mod.rs @@ -25,9 +25,9 @@ //! let mut func: DynamicFunction = add.into_function(); //! let args: ArgList = ArgList::default() //! // Pushing a known type with owned ownership -//! .push_owned(25_i32) +//! .with_owned(25_i32) //! // Pushing a reflected type with owned ownership -//! .push_boxed(Box::new(75_i32) as Box); +//! .with_boxed(Box::new(75_i32) as Box); //! let result: FunctionResult = func.call(args); //! let value: Return = result.unwrap(); //! assert_eq!(value.unwrap_owned().try_downcast_ref::(), Some(&100)); @@ -96,7 +96,7 @@ //! //! # Generic Functions //! -//! In Rust, generic functions are [monomophized] by the compiler, +//! In Rust, generic functions are [monomorphized] by the compiler, //! which means that a separate copy of the function is generated for each concrete set of type parameters. //! //! When converting a generic function to a [`DynamicFunction`] or [`DynamicFunctionMut`], @@ -141,11 +141,11 @@ //! //! // You can then retrieve and call these functions by name: //! let reflect_add = registry.get(core::any::type_name_of_val(&add)).unwrap(); -//! let value = reflect_add.call(ArgList::default().push_owned(10_i32).push_owned(5_i32)).unwrap(); +//! let value = reflect_add.call(ArgList::default().with_owned(10_i32).with_owned(5_i32)).unwrap(); //! assert_eq!(value.unwrap_owned().try_downcast_ref::(), Some(&15)); //! //! let reflect_mul = registry.get("mul").unwrap(); -//! let value = reflect_mul.call(ArgList::default().push_owned(10_i32).push_owned(5_i32)).unwrap(); +//! let value = reflect_mul.call(ArgList::default().with_owned(10_i32).with_owned(5_i32)).unwrap(); //! assert_eq!(value.unwrap_owned().try_downcast_ref::(), Some(&50)); //! ``` //! @@ -153,7 +153,7 @@ //! [`Reflect`]: crate::Reflect //! [lack of variadic generics]: https://poignardazur.github.io/2024/05/25/report-on-rustnl-variadics/ //! [coherence issues]: https://doc.rust-lang.org/rustc/lints/listing/warn-by-default.html#coherence-leak-check -//! [monomophized]: https://en.wikipedia.org/wiki/Monomorphization +//! [monomorphized]: https://en.wikipedia.org/wiki/Monomorphization //! [overloading]: #overloading-functions //! [function overloading]: https://en.wikipedia.org/wiki/Function_overloading //! [variadic functions]: https://en.wikipedia.org/wiki/Variadic_function @@ -219,7 +219,7 @@ mod tests { fn foo() {} let func = foo.into_function(); - let args = ArgList::new().push_owned(123_i32); + let args = ArgList::new().with_owned(123_i32); let result = func.call(args); assert_eq!( result.unwrap_err(), @@ -235,7 +235,7 @@ mod tests { fn foo(_: i32) {} let func = foo.into_function(); - let args = ArgList::new().push_owned(123_u32); + let args = ArgList::new().with_owned(123_u32); let result = func.call(args); assert_eq!( result.unwrap_err(), @@ -252,7 +252,7 @@ mod tests { fn foo(_: &i32) {} let func = foo.into_function(); - let args = ArgList::new().push_owned(123_i32); + let args = ArgList::new().with_owned(123_i32); let result = func.call(args); assert_eq!( result.unwrap_err(), diff --git a/crates/bevy_reflect/src/func/reflect_fn.rs b/crates/bevy_reflect/src/func/reflect_fn.rs index 38a18141fcf43..6ef7c22eb177a 100644 --- a/crates/bevy_reflect/src/func/reflect_fn.rs +++ b/crates/bevy_reflect/src/func/reflect_fn.rs @@ -1,8 +1,5 @@ use variadics_please::all_tuples; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, format, vec}; - use crate::{ func::{ args::{ArgCount, FromArg}, @@ -47,7 +44,7 @@ use crate::{ /// a + b /// } /// -/// let args = ArgList::new().push_owned(25_i32).push_owned(75_i32); +/// let args = ArgList::new().with_owned(25_i32).with_owned(75_i32); /// /// let value = add.reflect_call(args).unwrap().unwrap_owned(); /// assert_eq!(value.try_take::().unwrap(), 100); @@ -91,7 +88,14 @@ macro_rules! impl_reflect_fn { // This clause essentially asserts that `Arg::This` is the same type as `Arg` Function: for<'a> Fn($($Arg::This<'a>),*) -> ReturnType + 'env, { - #[allow(unused_mut)] + #[expect( + clippy::allow_attributes, + reason = "This lint is part of a macro, which may not always trigger the `unused_mut` lint." + )] + #[allow( + unused_mut, + reason = "Some invocations of this macro may trigger the `unused_mut` lint, where others won't." + )] fn reflect_call<'a>(&self, mut args: ArgList<'a>) -> FunctionResult<'a> { const COUNT: usize = count_tokens!($($Arg)*); diff --git a/crates/bevy_reflect/src/func/reflect_fn_mut.rs b/crates/bevy_reflect/src/func/reflect_fn_mut.rs index 760e657037c5b..98db2b45907ef 100644 --- a/crates/bevy_reflect/src/func/reflect_fn_mut.rs +++ b/crates/bevy_reflect/src/func/reflect_fn_mut.rs @@ -1,8 +1,5 @@ use variadics_please::all_tuples; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, format, vec}; - use crate::{ func::{ args::{ArgCount, FromArg}, @@ -53,7 +50,7 @@ use crate::{ /// list.insert(index, value); /// }; /// -/// let args = ArgList::new().push_owned(1_usize).push_owned(2_i32); +/// let args = ArgList::new().with_owned(1_usize).with_owned(2_i32); /// /// insert.reflect_call_mut(args).unwrap(); /// assert_eq!(list, vec![1, 2, 3]); @@ -98,7 +95,14 @@ macro_rules! impl_reflect_fn_mut { // This clause essentially asserts that `Arg::This` is the same type as `Arg` Function: for<'a> FnMut($($Arg::This<'a>),*) -> ReturnType + 'env, { - #[allow(unused_mut)] + #[expect( + clippy::allow_attributes, + reason = "This lint is part of a macro, which may not always trigger the `unused_mut` lint." + )] + #[allow( + unused_mut, + reason = "Some invocations of this macro may trigger the `unused_mut` lint, where others won't." + )] fn reflect_call_mut<'a>(&mut self, mut args: ArgList<'a>) -> FunctionResult<'a> { const COUNT: usize = count_tokens!($($Arg)*); diff --git a/crates/bevy_reflect/src/func/registry.rs b/crates/bevy_reflect/src/func/registry.rs index 82d1f1542a2b5..58a8344ecf3c8 100644 --- a/crates/bevy_reflect/src/func/registry.rs +++ b/crates/bevy_reflect/src/func/registry.rs @@ -1,11 +1,9 @@ -use alloc::{borrow::Cow, sync::Arc}; +use alloc::borrow::Cow; +use bevy_platform::{ + collections::HashMap, + sync::{Arc, PoisonError, RwLock, RwLockReadGuard, RwLockWriteGuard}, +}; use core::fmt::Debug; -use std::sync::{PoisonError, RwLock, RwLockReadGuard, RwLockWriteGuard}; - -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, format, vec}; - -use bevy_utils::HashMap; use crate::func::{ ArgList, DynamicFunction, FunctionRegistrationError, FunctionResult, IntoFunction, @@ -173,7 +171,7 @@ impl FunctionRegistry { /// .register_with_name(core::any::type_name_of_val(&mul), mul)? /// // Registering an existing function with a custom name /// .register_with_name("my_crate::mul", mul)?; - /// + /// /// // Be careful not to register anonymous functions with their type name. /// // This code works but registers the function with a non-unique name like `foo::bar::{{closure}}` /// registry.register_with_name(core::any::type_name_of_val(&div), div)?; @@ -359,6 +357,7 @@ impl FunctionRegistryArc { mod tests { use super::*; use crate::func::{ArgList, IntoFunction}; + use alloc::format; #[test] fn should_register_function() { @@ -489,7 +488,7 @@ mod tests { let mut registry = FunctionRegistry::default(); registry.register(add).unwrap(); - let args = ArgList::new().push_owned(25_i32).push_owned(75_i32); + let args = ArgList::new().with_owned(25_i32).with_owned(75_i32); let result = registry .call(core::any::type_name_of_val(&add), args) .unwrap(); diff --git a/crates/bevy_reflect/src/func/return_type.rs b/crates/bevy_reflect/src/func/return_type.rs index 3d1153912cc75..bab3c04b25962 100644 --- a/crates/bevy_reflect/src/func/return_type.rs +++ b/crates/bevy_reflect/src/func/return_type.rs @@ -1,9 +1,6 @@ use crate::PartialReflect; use alloc::boxed::Box; -#[cfg(not(feature = "std"))] -use alloc::{format, vec}; - /// The return type of a [`DynamicFunction`] or [`DynamicFunctionMut`]. /// /// [`DynamicFunction`]: crate::func::DynamicFunction diff --git a/crates/bevy_reflect/src/func/signature.rs b/crates/bevy_reflect/src/func/signature.rs index 965ff401e00b8..7813d7d4f92b6 100644 --- a/crates/bevy_reflect/src/func/signature.rs +++ b/crates/bevy_reflect/src/func/signature.rs @@ -14,7 +14,8 @@ use crate::func::args::ArgInfo; use crate::func::{ArgList, SignatureInfo}; use crate::Type; -use bevy_utils::hashbrown::Equivalent; +use alloc::boxed::Box; +use bevy_platform::collections::Equivalent; use core::borrow::Borrow; use core::fmt::{Debug, Formatter}; use core::hash::{Hash, Hasher}; @@ -203,6 +204,7 @@ impl From<&ArgList<'_>> for ArgumentSignature { mod tests { use super::*; use crate::func::TypedFunction; + use alloc::{format, string::String, vec}; #[test] fn should_generate_signature_from_function_info() { diff --git a/crates/bevy_reflect/src/generics.rs b/crates/bevy_reflect/src/generics.rs index fa91fc35c5126..8c9c4816baf5e 100644 --- a/crates/bevy_reflect/src/generics.rs +++ b/crates/bevy_reflect/src/generics.rs @@ -1,6 +1,7 @@ use crate::type_info::impl_type_methods; use crate::{Reflect, Type, TypePath}; -use alloc::{borrow::Cow, boxed::Box, sync::Arc}; +use alloc::{borrow::Cow, boxed::Box}; +use bevy_platform::sync::Arc; use core::ops::Deref; use derive_more::derive::From; @@ -180,7 +181,19 @@ impl ConstParamInfo { /// Sets the default value for the parameter. pub fn with_default(mut self, default: T) -> Self { - self.default = Some(Arc::new(default)); + let arc = Arc::new(default); + + #[cfg(not(target_has_atomic = "ptr"))] + #[expect( + unsafe_code, + reason = "unsized coercion is an unstable feature for non-std types" + )] + // SAFETY: + // - Coercion from `T` to `dyn Reflect` is valid as `T: Reflect + 'static` + // - `Arc::from_raw` receives a valid pointer from a previous call to `Arc::into_raw` + let arc = unsafe { Arc::from_raw(Arc::into_raw(arc) as *const dyn Reflect) }; + + self.default = Some(arc); self } @@ -239,8 +252,8 @@ pub(crate) use impl_generic_info_methods; #[cfg(test)] mod tests { use super::*; - use crate as bevy_reflect; use crate::{Reflect, Typed}; + use alloc::string::String; use core::fmt::Debug; #[test] diff --git a/crates/bevy_reflect/src/impls/foldhash.rs b/crates/bevy_reflect/src/impls/foldhash.rs index a4f1df44efefc..1b0452d433603 100644 --- a/crates/bevy_reflect/src/impls/foldhash.rs +++ b/crates/bevy_reflect/src/impls/foldhash.rs @@ -1,4 +1,4 @@ -use crate::{self as bevy_reflect, impl_type_path}; +use crate::impl_type_path; impl_type_path!(::foldhash::fast::FoldHasher); impl_type_path!(::foldhash::fast::FixedState); diff --git a/crates/bevy_reflect/src/impls/glam.rs b/crates/bevy_reflect/src/impls/glam.rs index ba1fa00549329..139557ddb61a2 100644 --- a/crates/bevy_reflect/src/impls/glam.rs +++ b/crates/bevy_reflect/src/impls/glam.rs @@ -1,25 +1,24 @@ -use crate as bevy_reflect; use crate::{std_traits::ReflectDefault, ReflectDeserialize, ReflectSerialize}; use assert_type_match::assert_type_match; use bevy_reflect_derive::{impl_reflect, impl_reflect_opaque}; use glam::*; -#[cfg(not(feature = "std"))] -use alloc::format; - /// Reflects the given foreign type as an enum and asserts that the variants/fields match up. macro_rules! reflect_enum { ($(#[$meta:meta])* enum $ident:ident { $($ty:tt)* } ) => { impl_reflect!($(#[$meta])* enum $ident { $($ty)* }); #[assert_type_match($ident, test_only)] - #[allow(clippy::upper_case_acronyms)] + #[expect( + clippy::upper_case_acronyms, + reason = "The variants used are not acronyms." + )] enum $ident { $($ty)* } }; } impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct IVec2 { x: i32, @@ -27,7 +26,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct IVec3 { x: i32, @@ -36,7 +35,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct IVec4 { x: i32, @@ -47,7 +46,67 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[type_path = "glam"] + struct I8Vec2 { + x: i8, + y: i8, + } +); + +impl_reflect!( + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[type_path = "glam"] + struct I8Vec3 { + x: i8, + y: i8, + z: i8, + } +); + +impl_reflect!( + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[type_path = "glam"] + struct I8Vec4 { + x: i8, + y: i8, + z: i8, + w: i8, + } +); + +impl_reflect!( + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[type_path = "glam"] + struct I16Vec2 { + x: i16, + y: i16, + } +); + +impl_reflect!( + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[type_path = "glam"] + struct I16Vec3 { + x: i16, + y: i16, + z: i16, + } +); + +impl_reflect!( + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[type_path = "glam"] + struct I16Vec4 { + x: i16, + y: i16, + z: i16, + w: i16, + } +); + +impl_reflect!( + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct I64Vec2 { x: i64, @@ -56,7 +115,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct I64Vec3 { x: i64, @@ -66,7 +125,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct I64Vec4 { x: i64, @@ -77,7 +136,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct UVec2 { x: u32, @@ -85,7 +144,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct UVec3 { x: u32, @@ -94,7 +153,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct UVec4 { x: u32, @@ -105,7 +164,63 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[type_path = "glam"] + struct U8Vec2 { + x: u8, + y: u8, + } +); +impl_reflect!( + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[type_path = "glam"] + struct U8Vec3 { + x: u8, + y: u8, + z: u8, + } +); +impl_reflect!( + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[type_path = "glam"] + struct U8Vec4 { + x: u8, + y: u8, + z: u8, + w: u8, + } +); + +impl_reflect!( + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[type_path = "glam"] + struct U16Vec2 { + x: u16, + y: u16, + } +); +impl_reflect!( + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[type_path = "glam"] + struct U16Vec3 { + x: u16, + y: u16, + z: u16, + } +); +impl_reflect!( + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[type_path = "glam"] + struct U16Vec4 { + x: u16, + y: u16, + z: u16, + w: u16, + } +); + +impl_reflect!( + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct U64Vec2 { x: u64, @@ -113,7 +228,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct U64Vec3 { x: u64, @@ -122,7 +237,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, Hash, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, Hash, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct U64Vec4 { x: u64, @@ -133,7 +248,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Vec2 { x: f32, @@ -141,7 +256,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Vec3 { x: f32, @@ -150,7 +265,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Vec3A { x: f32, @@ -159,7 +274,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Vec4 { x: f32, @@ -170,7 +285,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct BVec2 { x: bool, @@ -178,7 +293,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct BVec3 { x: bool, @@ -187,7 +302,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct BVec4 { x: bool, @@ -198,7 +313,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct DVec2 { x: f64, @@ -206,7 +321,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct DVec3 { x: f64, @@ -215,7 +330,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct DVec4 { x: f64, @@ -226,7 +341,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Mat2 { x_axis: Vec2, @@ -234,7 +349,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Mat3 { x_axis: Vec3, @@ -243,7 +358,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Mat3A { x_axis: Vec3A, @@ -252,7 +367,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Mat4 { x_axis: Vec4, @@ -263,7 +378,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct DMat2 { x_axis: DVec2, @@ -271,7 +386,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct DMat3 { x_axis: DVec3, @@ -280,7 +395,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct DMat4 { x_axis: DVec4, @@ -291,7 +406,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Affine2 { matrix2: Mat2, @@ -299,7 +414,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Affine3A { matrix3: Mat3A, @@ -308,7 +423,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct DAffine2 { matrix2: DMat2, @@ -316,7 +431,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct DAffine3 { matrix3: DMat3, @@ -325,7 +440,7 @@ impl_reflect!( ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct Quat { x: f32, @@ -335,7 +450,7 @@ impl_reflect!( } ); impl_reflect!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] struct DQuat { x: f64, @@ -346,7 +461,7 @@ impl_reflect!( ); reflect_enum!( - #[reflect(Debug, PartialEq, Default, Deserialize, Serialize)] + #[reflect(Clone, Debug, PartialEq, Default, Deserialize, Serialize)] #[type_path = "glam"] enum EulerRot { ZYX, @@ -376,11 +491,24 @@ reflect_enum!( } ); -impl_reflect_opaque!(::glam::BVec3A(Debug, Default, Deserialize, Serialize)); -impl_reflect_opaque!(::glam::BVec4A(Debug, Default, Deserialize, Serialize)); +impl_reflect_opaque!(::glam::BVec3A( + Clone, + Debug, + Default, + Deserialize, + Serialize +)); +impl_reflect_opaque!(::glam::BVec4A( + Clone, + Debug, + Default, + Deserialize, + Serialize +)); #[cfg(test)] mod tests { + use alloc::{format, string::String}; use ron::{ ser::{to_string_pretty, PrettyConfig}, Deserializer, diff --git a/crates/bevy_reflect/src/impls/petgraph.rs b/crates/bevy_reflect/src/impls/petgraph.rs index f9ab09e1b9f26..ce2bf77e37810 100644 --- a/crates/bevy_reflect/src/impls/petgraph.rs +++ b/crates/bevy_reflect/src/impls/petgraph.rs @@ -1,10 +1,10 @@ -use crate::{ - self as bevy_reflect, impl_reflect_opaque, prelude::ReflectDefault, ReflectDeserialize, - ReflectSerialize, -}; +use crate::{impl_reflect_opaque, prelude::ReflectDefault, ReflectDeserialize, ReflectSerialize}; impl_reflect_opaque!(::petgraph::graph::NodeIndex( + Clone, Default, + PartialEq, + Hash, Serialize, Deserialize )); @@ -12,4 +12,4 @@ impl_reflect_opaque!(::petgraph::graph::DiGraph< N: ::core::clone::Clone, E: ::core::clone::Clone, Ix: ::petgraph::graph::IndexType ->()); +>(Clone)); diff --git a/crates/bevy_reflect/src/impls/smallvec.rs b/crates/bevy_reflect/src/impls/smallvec.rs index 793ca2001ccc1..942bcbe83f17c 100644 --- a/crates/bevy_reflect/src/impls/smallvec.rs +++ b/crates/bevy_reflect/src/impls/smallvec.rs @@ -1,18 +1,15 @@ -use alloc::boxed::Box; +use crate::{ + utility::GenericTypeInfoCell, ApplyError, FromReflect, FromType, Generics, GetTypeRegistration, + List, ListInfo, ListIter, MaybeTyped, PartialReflect, Reflect, ReflectFromPtr, ReflectKind, + ReflectMut, ReflectOwned, ReflectRef, TypeInfo, TypeParamInfo, TypePath, TypeRegistration, + Typed, +}; +use alloc::{borrow::Cow, boxed::Box, string::ToString, vec::Vec}; +use bevy_reflect::ReflectCloneError; use bevy_reflect_derive::impl_type_path; use core::any::Any; use smallvec::{Array as SmallArray, SmallVec}; -#[cfg(not(feature = "std"))] -use alloc::{format, vec}; - -use crate::{ - self as bevy_reflect, utility::GenericTypeInfoCell, ApplyError, FromReflect, FromType, - Generics, GetTypeRegistration, List, ListInfo, ListIter, MaybeTyped, PartialReflect, Reflect, - ReflectFromPtr, ReflectKind, ReflectMut, ReflectOwned, ReflectRef, TypeInfo, TypeParamInfo, - TypePath, TypeRegistration, Typed, -}; - impl List for SmallVec where T::Item: FromReflect + MaybeTyped + TypePath, @@ -137,8 +134,20 @@ where ReflectOwned::List(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new( + self.iter() + .map(|value| { + value + .reflect_clone()? + .take() + .map_err(|_| ReflectCloneError::FailedDowncast { + expected: Cow::Borrowed(::type_path()), + received: Cow::Owned(value.reflect_type_path().to_string()), + }) + }) + .collect::>()?, + )) } fn reflect_partial_eq(&self, value: &dyn PartialReflect) -> Option { diff --git a/crates/bevy_reflect/src/impls/smol_str.rs b/crates/bevy_reflect/src/impls/smol_str.rs index 7a0f99e2ed7fe..d07a00cd6f656 100644 --- a/crates/bevy_reflect/src/impls/smol_str.rs +++ b/crates/bevy_reflect/src/impls/smol_str.rs @@ -1,9 +1,8 @@ -use crate::{ - self as bevy_reflect, std_traits::ReflectDefault, ReflectDeserialize, ReflectSerialize, -}; +use crate::{std_traits::ReflectDefault, ReflectDeserialize, ReflectSerialize}; use bevy_reflect_derive::impl_reflect_opaque; impl_reflect_opaque!(::smol_str::SmolStr( + Clone, Debug, Hash, PartialEq, diff --git a/crates/bevy_reflect/src/impls/std.rs b/crates/bevy_reflect/src/impls/std.rs index 6679fa63151fe..350527f91097d 100644 --- a/crates/bevy_reflect/src/impls/std.rs +++ b/crates/bevy_reflect/src/impls/std.rs @@ -1,15 +1,17 @@ -// Temporary workaround for impl_reflect!(Option/Result false-positive -#![allow(unused_qualifications)] +#![expect( + unused_qualifications, + reason = "Temporary workaround for impl_reflect!(Option/Result false-positive" +)] use crate::{ - self as bevy_reflect, impl_type_path, map_apply, map_partial_eq, map_try_apply, + impl_type_path, map_apply, map_partial_eq, map_try_apply, prelude::ReflectDefault, reflect::impl_full_reflect, set_apply, set_partial_eq, set_try_apply, utility::{reflect_hasher, GenericTypeInfoCell, GenericTypePathCell, NonGenericTypeInfoCell}, - ApplyError, Array, ArrayInfo, ArrayIter, DynamicMap, DynamicSet, DynamicTypePath, FromReflect, - FromType, Generics, GetTypeRegistration, List, ListInfo, ListIter, Map, MapInfo, MapIter, - MaybeTyped, OpaqueInfo, PartialReflect, Reflect, ReflectDeserialize, ReflectFromPtr, + ApplyError, Array, ArrayInfo, ArrayIter, DynamicMap, DynamicTypePath, FromReflect, FromType, + Generics, GetTypeRegistration, List, ListInfo, ListIter, Map, MapInfo, MapIter, MaybeTyped, + OpaqueInfo, PartialReflect, Reflect, ReflectCloneError, ReflectDeserialize, ReflectFromPtr, ReflectFromReflect, ReflectKind, ReflectMut, ReflectOwned, ReflectRef, ReflectSerialize, Set, SetInfo, TypeInfo, TypeParamInfo, TypePath, TypeRegistration, TypeRegistry, Typed, }; @@ -18,6 +20,7 @@ use alloc::{ boxed::Box, collections::VecDeque, format, + string::ToString, vec::Vec, }; use bevy_reflect_derive::{impl_reflect, impl_reflect_opaque}; @@ -32,6 +35,7 @@ use core::{ use std::path::Path; impl_reflect_opaque!(bool( + Clone, Debug, Hash, PartialEq, @@ -40,6 +44,43 @@ impl_reflect_opaque!(bool( Default )); impl_reflect_opaque!(char( + Clone, + Debug, + Hash, + PartialEq, + Serialize, + Deserialize, + Default +)); +impl_reflect_opaque!(u8( + Clone, + Debug, + Hash, + PartialEq, + Serialize, + Deserialize, + Default +)); +impl_reflect_opaque!(u16( + Clone, + Debug, + Hash, + PartialEq, + Serialize, + Deserialize, + Default +)); +impl_reflect_opaque!(u32( + Clone, + Debug, + Hash, + PartialEq, + Serialize, + Deserialize, + Default +)); +impl_reflect_opaque!(u64( + Clone, Debug, Hash, PartialEq, @@ -47,11 +88,8 @@ impl_reflect_opaque!(char( Deserialize, Default )); -impl_reflect_opaque!(u8(Debug, Hash, PartialEq, Serialize, Deserialize, Default)); -impl_reflect_opaque!(u16(Debug, Hash, PartialEq, Serialize, Deserialize, Default)); -impl_reflect_opaque!(u32(Debug, Hash, PartialEq, Serialize, Deserialize, Default)); -impl_reflect_opaque!(u64(Debug, Hash, PartialEq, Serialize, Deserialize, Default)); impl_reflect_opaque!(u128( + Clone, Debug, Hash, PartialEq, @@ -60,6 +98,43 @@ impl_reflect_opaque!(u128( Default )); impl_reflect_opaque!(usize( + Clone, + Debug, + Hash, + PartialEq, + Serialize, + Deserialize, + Default +)); +impl_reflect_opaque!(i8( + Clone, + Debug, + Hash, + PartialEq, + Serialize, + Deserialize, + Default +)); +impl_reflect_opaque!(i16( + Clone, + Debug, + Hash, + PartialEq, + Serialize, + Deserialize, + Default +)); +impl_reflect_opaque!(i32( + Clone, + Debug, + Hash, + PartialEq, + Serialize, + Deserialize, + Default +)); +impl_reflect_opaque!(i64( + Clone, Debug, Hash, PartialEq, @@ -67,11 +142,8 @@ impl_reflect_opaque!(usize( Deserialize, Default )); -impl_reflect_opaque!(i8(Debug, Hash, PartialEq, Serialize, Deserialize, Default)); -impl_reflect_opaque!(i16(Debug, Hash, PartialEq, Serialize, Deserialize, Default)); -impl_reflect_opaque!(i32(Debug, Hash, PartialEq, Serialize, Deserialize, Default)); -impl_reflect_opaque!(i64(Debug, Hash, PartialEq, Serialize, Deserialize, Default)); impl_reflect_opaque!(i128( + Clone, Debug, Hash, PartialEq, @@ -80,6 +152,7 @@ impl_reflect_opaque!(i128( Default )); impl_reflect_opaque!(isize( + Clone, Debug, Hash, PartialEq, @@ -87,10 +160,25 @@ impl_reflect_opaque!(isize( Deserialize, Default )); -impl_reflect_opaque!(f32(Debug, PartialEq, Serialize, Deserialize, Default)); -impl_reflect_opaque!(f64(Debug, PartialEq, Serialize, Deserialize, Default)); +impl_reflect_opaque!(f32( + Clone, + Debug, + PartialEq, + Serialize, + Deserialize, + Default +)); +impl_reflect_opaque!(f64( + Clone, + Debug, + PartialEq, + Serialize, + Deserialize, + Default +)); impl_type_path!(str); impl_reflect_opaque!(::alloc::string::String( + Clone, Debug, Hash, PartialEq, @@ -100,6 +188,7 @@ impl_reflect_opaque!(::alloc::string::String( )); #[cfg(feature = "std")] impl_reflect_opaque!(::std::path::PathBuf( + Clone, Debug, Hash, PartialEq, @@ -107,16 +196,17 @@ impl_reflect_opaque!(::std::path::PathBuf( Deserialize, Default )); -impl_reflect_opaque!(::core::any::TypeId(Debug, Hash, PartialEq,)); -impl_reflect_opaque!(::alloc::collections::BTreeSet()); -impl_reflect_opaque!(::core::ops::Range()); -impl_reflect_opaque!(::core::ops::RangeInclusive()); -impl_reflect_opaque!(::core::ops::RangeFrom()); -impl_reflect_opaque!(::core::ops::RangeTo()); -impl_reflect_opaque!(::core::ops::RangeToInclusive()); -impl_reflect_opaque!(::core::ops::RangeFull()); -impl_reflect_opaque!(::core::ops::Bound()); -impl_reflect_opaque!(::bevy_utils::Duration( +impl_reflect_opaque!(::core::any::TypeId(Clone, Debug, Hash, PartialEq,)); +impl_reflect_opaque!(::alloc::collections::BTreeSet(Clone)); +impl_reflect_opaque!(::core::ops::Range(Clone)); +impl_reflect_opaque!(::core::ops::RangeInclusive(Clone)); +impl_reflect_opaque!(::core::ops::RangeFrom(Clone)); +impl_reflect_opaque!(::core::ops::RangeTo(Clone)); +impl_reflect_opaque!(::core::ops::RangeToInclusive(Clone)); +impl_reflect_opaque!(::core::ops::RangeFull(Clone)); +impl_reflect_opaque!(::core::ops::Bound(Clone)); +impl_reflect_opaque!(::core::time::Duration( + Clone, Debug, Hash, PartialEq, @@ -124,9 +214,11 @@ impl_reflect_opaque!(::bevy_utils::Duration( Deserialize, Default )); -#[cfg(any(target_arch = "wasm32", feature = "std"))] -impl_reflect_opaque!(::bevy_utils::Instant(Debug, Hash, PartialEq)); +impl_reflect_opaque!(::bevy_platform::time::Instant( + Clone, Debug, Hash, PartialEq +)); impl_reflect_opaque!(::core::num::NonZeroI128( + Clone, Debug, Hash, PartialEq, @@ -134,6 +226,7 @@ impl_reflect_opaque!(::core::num::NonZeroI128( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroU128( + Clone, Debug, Hash, PartialEq, @@ -141,6 +234,7 @@ impl_reflect_opaque!(::core::num::NonZeroU128( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroIsize( + Clone, Debug, Hash, PartialEq, @@ -148,6 +242,7 @@ impl_reflect_opaque!(::core::num::NonZeroIsize( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroUsize( + Clone, Debug, Hash, PartialEq, @@ -155,6 +250,7 @@ impl_reflect_opaque!(::core::num::NonZeroUsize( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroI64( + Clone, Debug, Hash, PartialEq, @@ -162,6 +258,7 @@ impl_reflect_opaque!(::core::num::NonZeroI64( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroU64( + Clone, Debug, Hash, PartialEq, @@ -169,6 +266,7 @@ impl_reflect_opaque!(::core::num::NonZeroU64( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroU32( + Clone, Debug, Hash, PartialEq, @@ -176,6 +274,7 @@ impl_reflect_opaque!(::core::num::NonZeroU32( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroI32( + Clone, Debug, Hash, PartialEq, @@ -183,6 +282,7 @@ impl_reflect_opaque!(::core::num::NonZeroI32( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroI16( + Clone, Debug, Hash, PartialEq, @@ -190,6 +290,7 @@ impl_reflect_opaque!(::core::num::NonZeroI16( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroU16( + Clone, Debug, Hash, PartialEq, @@ -197,6 +298,7 @@ impl_reflect_opaque!(::core::num::NonZeroU16( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroU8( + Clone, Debug, Hash, PartialEq, @@ -204,20 +306,22 @@ impl_reflect_opaque!(::core::num::NonZeroU8( Deserialize )); impl_reflect_opaque!(::core::num::NonZeroI8( + Clone, Debug, Hash, PartialEq, Serialize, Deserialize )); -impl_reflect_opaque!(::core::num::Wrapping()); -impl_reflect_opaque!(::core::num::Saturating()); -impl_reflect_opaque!(::alloc::sync::Arc); +impl_reflect_opaque!(::core::num::Wrapping(Clone)); +impl_reflect_opaque!(::core::num::Saturating(Clone)); +impl_reflect_opaque!(::bevy_platform::sync::Arc(Clone)); // `Serialize` and `Deserialize` only for platforms supported by serde: // https://github.com/serde-rs/serde/blob/3ffb86fc70efd3d329519e2dddfa306cc04f167c/serde/src/de/impls.rs#L1732 #[cfg(all(any(unix, windows), feature = "std"))] impl_reflect_opaque!(::std::ffi::OsString( + Clone, Debug, Hash, PartialEq, @@ -225,8 +329,8 @@ impl_reflect_opaque!(::std::ffi::OsString( Deserialize )); #[cfg(all(not(any(unix, windows)), feature = "std"))] -impl_reflect_opaque!(::std::ffi::OsString(Debug, Hash, PartialEq)); -impl_reflect_opaque!(::alloc::collections::BinaryHeap); +impl_reflect_opaque!(::std::ffi::OsString(Clone, Debug, Hash, PartialEq)); +impl_reflect_opaque!(::alloc::collections::BinaryHeap(Clone)); macro_rules! impl_reflect_for_atomic { ($ty:ty, $ordering:expr) => { @@ -236,7 +340,6 @@ macro_rules! impl_reflect_for_atomic { #[cfg(feature = "functions")] crate::func::macros::impl_function_traits!($ty); - #[allow(unused_mut)] impl GetTypeRegistration for $ty where $ty: Any + Send + Sync, @@ -305,10 +408,12 @@ macro_rules! impl_reflect_for_atomic { fn try_as_reflect_mut(&mut self) -> Option<&mut dyn Reflect> { Some(self) } + #[inline] - fn clone_value(&self) -> Box { - Box::new(<$ty>::new(self.load($ordering))) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new(<$ty>::new(self.load($ordering)))) } + #[inline] fn try_apply(&mut self, value: &dyn PartialReflect) -> Result<(), ApplyError> { if let Some(value) = value.try_downcast_ref::() { @@ -366,10 +471,12 @@ impl_reflect_for_atomic!( ::core::sync::atomic::AtomicUsize, ::core::sync::atomic::Ordering::SeqCst ); +#[cfg(target_has_atomic = "64")] impl_reflect_for_atomic!( ::core::sync::atomic::AtomicI64, ::core::sync::atomic::Ordering::SeqCst ); +#[cfg(target_has_atomic = "64")] impl_reflect_for_atomic!( ::core::sync::atomic::AtomicU64, ::core::sync::atomic::Ordering::SeqCst @@ -514,8 +621,19 @@ macro_rules! impl_reflect_for_veclike { ReflectOwned::List(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new( + self.iter() + .map(|value| { + value.reflect_clone()?.take().map_err(|_| { + ReflectCloneError::FailedDowncast { + expected: Cow::Borrowed(::type_path()), + received: Cow::Owned(value.reflect_type_path().to_string()), + } + }) + }) + .collect::>()?, + )) } fn reflect_hash(&self) -> Option { @@ -556,6 +674,7 @@ macro_rules! impl_reflect_for_veclike { fn get_type_registration() -> TypeRegistration { let mut registration = TypeRegistration::of::<$ty>(); registration.insert::(FromType::<$ty>::from_type()); + registration.insert::(FromType::<$ty>::from_type()); registration } @@ -603,7 +722,7 @@ macro_rules! impl_reflect_for_hashmap { where K: FromReflect + MaybeTyped + TypePath + GetTypeRegistration + Eq + Hash, V: FromReflect + MaybeTyped + TypePath + GetTypeRegistration, - S: TypePath + BuildHasher + Send + Sync, + S: TypePath + BuildHasher + Default + Send + Sync, { fn get(&self, key: &dyn PartialReflect) -> Option<&dyn PartialReflect> { key.try_downcast_ref::() @@ -651,7 +770,7 @@ macro_rules! impl_reflect_for_hashmap { .collect() } - fn clone_dynamic(&self) -> DynamicMap { + fn to_dynamic_map(&self) -> DynamicMap { let mut dynamic_map = DynamicMap::default(); dynamic_map.set_represented_type(self.get_represented_type_info()); for (k, v) in self { @@ -661,7 +780,7 @@ macro_rules! impl_reflect_for_hashmap { k.reflect_type_path() ) }); - dynamic_map.insert_boxed(Box::new(key), v.clone_value()); + dynamic_map.insert_boxed(Box::new(key), v.to_dynamic()); } dynamic_map } @@ -703,7 +822,7 @@ macro_rules! impl_reflect_for_hashmap { where K: FromReflect + MaybeTyped + TypePath + GetTypeRegistration + Eq + Hash, V: FromReflect + MaybeTyped + TypePath + GetTypeRegistration, - S: TypePath + BuildHasher + Send + Sync, + S: TypePath + BuildHasher + Default + Send + Sync, { fn get_represented_type_info(&self) -> Option<&'static TypeInfo> { Some(::type_info()) @@ -752,8 +871,25 @@ macro_rules! impl_reflect_for_hashmap { ReflectOwned::Map(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + let mut map = Self::with_capacity_and_hasher(self.len(), S::default()); + for (key, value) in self.iter() { + let key = key.reflect_clone()?.take().map_err(|_| { + ReflectCloneError::FailedDowncast { + expected: Cow::Borrowed(::type_path()), + received: Cow::Owned(key.reflect_type_path().to_string()), + } + })?; + let value = value.reflect_clone()?.take().map_err(|_| { + ReflectCloneError::FailedDowncast { + expected: Cow::Borrowed(::type_path()), + received: Cow::Owned(value.reflect_type_path().to_string()), + } + })?; + map.insert(key, value); + } + + Ok(Box::new(map)) } fn reflect_partial_eq(&self, value: &dyn PartialReflect) -> Option { @@ -774,14 +910,14 @@ macro_rules! impl_reflect_for_hashmap { where K: FromReflect + MaybeTyped + TypePath + GetTypeRegistration + Eq + Hash, V: FromReflect + MaybeTyped + TypePath + GetTypeRegistration, - S: TypePath + BuildHasher + Send + Sync, + S: TypePath + BuildHasher + Default + Send + Sync, ); impl Typed for $ty where K: FromReflect + MaybeTyped + TypePath + GetTypeRegistration + Eq + Hash, V: FromReflect + MaybeTyped + TypePath + GetTypeRegistration, - S: TypePath + BuildHasher + Send + Sync, + S: TypePath + BuildHasher + Default + Send + Sync, { fn type_info() -> &'static TypeInfo { static CELL: GenericTypeInfoCell = GenericTypeInfoCell::new(); @@ -800,11 +936,12 @@ macro_rules! impl_reflect_for_hashmap { where K: FromReflect + MaybeTyped + TypePath + GetTypeRegistration + Eq + Hash, V: FromReflect + MaybeTyped + TypePath + GetTypeRegistration, - S: TypePath + BuildHasher + Send + Sync, + S: TypePath + BuildHasher + Default + Send + Sync + Default, { fn get_type_registration() -> TypeRegistration { let mut registration = TypeRegistration::of::(); registration.insert::(FromType::::from_type()); + registration.insert::(FromType::::from_type()); registration } @@ -853,10 +990,10 @@ crate::func::macros::impl_function_traits!(::std::collections::HashMap; > ); -impl_reflect_for_hashmap!(bevy_utils::hashbrown::HashMap); -impl_type_path!(::bevy_utils::hashbrown::HashMap); +impl_reflect_for_hashmap!(bevy_platform::collections::HashMap); +impl_type_path!(::bevy_platform::collections::HashMap); #[cfg(feature = "functions")] -crate::func::macros::impl_function_traits!(::bevy_utils::hashbrown::HashMap; +crate::func::macros::impl_function_traits!(::bevy_platform::collections::HashMap; < K: FromReflect + MaybeTyped + TypePath + GetTypeRegistration + Eq + Hash, V: FromReflect + MaybeTyped + TypePath + GetTypeRegistration, @@ -869,7 +1006,7 @@ macro_rules! impl_reflect_for_hashset { impl Set for $ty where V: FromReflect + TypePath + GetTypeRegistration + Eq + Hash, - S: TypePath + BuildHasher + Send + Sync, + S: TypePath + BuildHasher + Default + Send + Sync, { fn get(&self, value: &dyn PartialReflect) -> Option<&dyn PartialReflect> { value @@ -893,15 +1030,6 @@ macro_rules! impl_reflect_for_hashset { .collect() } - fn clone_dynamic(&self) -> DynamicSet { - let mut dynamic_set = DynamicSet::default(); - dynamic_set.set_represented_type(self.get_represented_type_info()); - for v in self { - dynamic_set.insert_boxed(v.clone_value()); - } - dynamic_set - } - fn insert_boxed(&mut self, value: Box) -> bool { let value = V::take_from_reflect(value).unwrap_or_else(|value| { panic!( @@ -920,7 +1048,7 @@ macro_rules! impl_reflect_for_hashset { from_reflect = V::from_reflect(value); from_reflect.as_ref() }) - .map_or(false, |value| self.remove(value)) + .is_some_and(|value| self.remove(value)) } fn contains(&self, value: &dyn PartialReflect) -> bool { @@ -931,14 +1059,14 @@ macro_rules! impl_reflect_for_hashset { from_reflect = V::from_reflect(value); from_reflect.as_ref() }) - .map_or(false, |value| self.contains(value)) + .is_some_and(|value| self.contains(value)) } } impl PartialReflect for $ty where V: FromReflect + TypePath + GetTypeRegistration + Eq + Hash, - S: TypePath + BuildHasher + Send + Sync, + S: TypePath + BuildHasher + Default + Send + Sync, { fn get_represented_type_info(&self) -> Option<&'static TypeInfo> { Some(::type_info()) @@ -996,8 +1124,19 @@ macro_rules! impl_reflect_for_hashset { ReflectOwned::Set(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + let mut set = Self::with_capacity_and_hasher(self.len(), S::default()); + for value in self.iter() { + let value = value.reflect_clone()?.take().map_err(|_| { + ReflectCloneError::FailedDowncast { + expected: Cow::Borrowed(::type_path()), + received: Cow::Owned(value.reflect_type_path().to_string()), + } + })?; + set.insert(value); + } + + Ok(Box::new(set)) } fn reflect_partial_eq(&self, value: &dyn PartialReflect) -> Option { @@ -1008,7 +1147,7 @@ macro_rules! impl_reflect_for_hashset { impl Typed for $ty where V: FromReflect + TypePath + GetTypeRegistration + Eq + Hash, - S: TypePath + BuildHasher + Send + Sync, + S: TypePath + BuildHasher + Default + Send + Sync, { fn type_info() -> &'static TypeInfo { static CELL: GenericTypeInfoCell = GenericTypeInfoCell::new(); @@ -1025,11 +1164,12 @@ macro_rules! impl_reflect_for_hashset { impl GetTypeRegistration for $ty where V: FromReflect + TypePath + GetTypeRegistration + Eq + Hash, - S: TypePath + BuildHasher + Send + Sync, + S: TypePath + BuildHasher + Default + Send + Sync + Default, { fn get_type_registration() -> TypeRegistration { let mut registration = TypeRegistration::of::(); registration.insert::(FromType::::from_type()); + registration.insert::(FromType::::from_type()); registration } @@ -1042,7 +1182,7 @@ macro_rules! impl_reflect_for_hashset { for $ty where V: FromReflect + TypePath + GetTypeRegistration + Eq + Hash, - S: TypePath + BuildHasher + Send + Sync, + S: TypePath + BuildHasher + Default + Send + Sync, ); impl FromReflect for $ty @@ -1066,8 +1206,16 @@ macro_rules! impl_reflect_for_hashset { }; } -impl_type_path!(::bevy_utils::NoOpHash); -impl_type_path!(::bevy_utils::FixedHasher); +impl_type_path!(::bevy_platform::hash::NoOpHash); +impl_type_path!(::bevy_platform::hash::FixedHasher); +impl_reflect_opaque!(::core::net::SocketAddr( + Clone, + Debug, + Hash, + PartialEq, + Serialize, + Deserialize +)); #[cfg(feature = "std")] impl_reflect_for_hashset!(::std::collections::HashSet); @@ -1081,10 +1229,10 @@ crate::func::macros::impl_function_traits!(::std::collections::HashSet; > ); -impl_reflect_for_hashset!(::bevy_utils::hashbrown::HashSet); -impl_type_path!(::bevy_utils::hashbrown::HashSet); +impl_reflect_for_hashset!(::bevy_platform::collections::HashSet); +impl_type_path!(::bevy_platform::collections::HashSet); #[cfg(feature = "functions")] -crate::func::macros::impl_function_traits!(::bevy_utils::hashbrown::HashSet; +crate::func::macros::impl_function_traits!(::bevy_platform::collections::HashSet; < V: Hash + Eq + FromReflect + TypePath + GetTypeRegistration, S: TypePath + BuildHasher + Default + Send + Sync @@ -1155,7 +1303,7 @@ where k.reflect_type_path() ) }); - dynamic_map.insert_boxed(Box::new(key), v.clone_value()); + dynamic_map.insert_boxed(Box::new(key), v.to_dynamic()); } dynamic_map } @@ -1241,8 +1389,28 @@ where ReflectOwned::Map(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + let mut map = Self::new(); + for (key, value) in self.iter() { + let key = + key.reflect_clone()? + .take() + .map_err(|_| ReflectCloneError::FailedDowncast { + expected: Cow::Borrowed(::type_path()), + received: Cow::Owned(key.reflect_type_path().to_string()), + })?; + let value = + value + .reflect_clone()? + .take() + .map_err(|_| ReflectCloneError::FailedDowncast { + expected: Cow::Borrowed(::type_path()), + received: Cow::Owned(value.reflect_type_path().to_string()), + })?; + map.insert(key, value); + } + + Ok(Box::new(map)) } fn reflect_partial_eq(&self, value: &dyn PartialReflect) -> Option { @@ -1291,6 +1459,7 @@ where fn get_type_registration() -> TypeRegistration { let mut registration = TypeRegistration::of::(); registration.insert::(FromType::::from_type()); + registration.insert::(FromType::::from_type()); registration } } @@ -1405,11 +1574,6 @@ impl P ReflectOwned::Array(self) } - #[inline] - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - #[inline] fn reflect_hash(&self) -> Option { crate::array_hash(self) @@ -1604,8 +1768,8 @@ impl PartialReflect for Cow<'static, str> { ReflectOwned::Opaque(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone()) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new(self.clone())) } fn reflect_hash(&self) -> Option { @@ -1655,6 +1819,7 @@ impl GetTypeRegistration for Cow<'static, str> { let mut registration = TypeRegistration::of::>(); registration.insert::(FromType::>::from_type()); registration.insert::(FromType::>::from_type()); + registration.insert::(FromType::>::from_type()); registration.insert::(FromType::>::from_type()); registration } @@ -1791,8 +1956,8 @@ impl Parti ReflectOwned::List(self) } - fn clone_value(&self) -> Box { - Box::new(List::clone_dynamic(self)) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new(self.clone())) } fn reflect_hash(&self) -> Option { @@ -1900,8 +2065,8 @@ impl PartialReflect for &'static str { ReflectOwned::Opaque(self) } - fn clone_value(&self) -> Box { - Box::new(*self) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new(*self)) } fn reflect_hash(&self) -> Option { @@ -2039,8 +2204,8 @@ impl PartialReflect for &'static Path { ReflectOwned::Opaque(self) } - fn clone_value(&self) -> Box { - Box::new(*self) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new(*self)) } fn reflect_hash(&self) -> Option { @@ -2116,6 +2281,7 @@ impl GetTypeRegistration for &'static Path { fn get_type_registration() -> TypeRegistration { let mut registration = TypeRegistration::of::(); registration.insert::(FromType::::from_type()); + registration.insert::(FromType::::from_type()); registration } } @@ -2177,8 +2343,8 @@ impl PartialReflect for Cow<'static, Path> { ReflectOwned::Opaque(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone()) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new(self.clone())) } fn reflect_hash(&self) -> Option { @@ -2335,8 +2501,8 @@ impl PartialReflect for &'static Location<'static> { ReflectOwned::Opaque(self) } - fn clone_value(&self) -> Box { - Box::new(*self) + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new(*self)) } fn reflect_hash(&self) -> Option { @@ -2409,6 +2575,7 @@ impl GetTypeRegistration for &'static Location<'static> { fn get_type_registration() -> TypeRegistration { let mut registration = TypeRegistration::of::(); registration.insert::(FromType::::from_type()); + registration.insert::(FromType::::from_type()); registration } } @@ -2425,12 +2592,16 @@ crate::func::macros::impl_function_traits!(&'static Location<'static>); #[cfg(test)] mod tests { use crate::{ - self as bevy_reflect, Enum, FromReflect, PartialReflect, Reflect, ReflectSerialize, - TypeInfo, TypeRegistry, Typed, VariantInfo, VariantType, + Enum, FromReflect, PartialReflect, Reflect, ReflectSerialize, TypeInfo, TypeRegistry, + Typed, VariantInfo, VariantType, + }; + use alloc::{collections::BTreeMap, string::String, vec}; + use bevy_platform::collections::HashMap; + use bevy_platform::time::Instant; + use core::{ + f32::consts::{PI, TAU}, + time::Duration, }; - use alloc::collections::BTreeMap; - use bevy_utils::{Duration, HashMap, Instant}; - use core::f32::consts::{PI, TAU}; use static_assertions::assert_impl_all; use std::path::Path; diff --git a/crates/bevy_reflect/src/impls/uuid.rs b/crates/bevy_reflect/src/impls/uuid.rs index 71f708c4d1da7..7385304e28a26 100644 --- a/crates/bevy_reflect/src/impls/uuid.rs +++ b/crates/bevy_reflect/src/impls/uuid.rs @@ -1,5 +1,3 @@ -use crate as bevy_reflect; - use crate::{std_traits::ReflectDefault, ReflectDeserialize, ReflectSerialize}; use bevy_reflect_derive::impl_reflect_opaque; @@ -7,6 +5,7 @@ impl_reflect_opaque!(::uuid::Uuid( Serialize, Deserialize, Default, + Clone, Debug, PartialEq, Hash diff --git a/crates/bevy_reflect/src/impls/wgpu_types.rs b/crates/bevy_reflect/src/impls/wgpu_types.rs index 6f99b5d49699d..734eace938a04 100644 --- a/crates/bevy_reflect/src/impls/wgpu_types.rs +++ b/crates/bevy_reflect/src/impls/wgpu_types.rs @@ -1,6 +1,7 @@ -use crate::{self as bevy_reflect, impl_reflect_opaque, ReflectDeserialize, ReflectSerialize}; +use crate::{impl_reflect_opaque, ReflectDeserialize, ReflectSerialize}; impl_reflect_opaque!(::wgpu_types::TextureFormat( + Clone, Debug, Hash, PartialEq, diff --git a/crates/bevy_reflect/src/kind.rs b/crates/bevy_reflect/src/kind.rs index d5d16715c09c4..3eef10d0e55eb 100644 --- a/crates/bevy_reflect/src/kind.rs +++ b/crates/bevy_reflect/src/kind.rs @@ -274,6 +274,7 @@ impl ReflectOwned { #[cfg(test)] mod tests { + use alloc::vec; use std::collections::HashSet; use super::*; diff --git a/crates/bevy_reflect/src/lib.rs b/crates/bevy_reflect/src/lib.rs index c6e5ba0b4515d..58e9b8714f0e1 100644 --- a/crates/bevy_reflect/src/lib.rs +++ b/crates/bevy_reflect/src/lib.rs @@ -112,7 +112,7 @@ //! //! Additionally, using the derive macro on enums requires a third condition to be met: //! * All fields and sub-elements must implement [`FromReflect`]— -//! another important reflection trait discussed in a later section. +//! another important reflection trait discussed in a later section. //! //! # The Reflection Subtraits //! @@ -204,8 +204,8 @@ //! //! They are most commonly used as "proxies" for other types, //! where they contain the same data as— and therefore, represent— a concrete type. -//! The [`PartialReflect::clone_value`] method will return a dynamic type for all non-opaque types, -//! allowing all types to essentially be "cloned". +//! The [`PartialReflect::to_dynamic`] method will return a dynamic type for all non-opaque types, +//! allowing all types to essentially be "cloned" into a dynamic type. //! And since dynamic types themselves implement [`PartialReflect`], //! we may pass them around just like most other reflected types. //! @@ -219,9 +219,9 @@ //! foo: 123 //! }); //! -//! // `cloned` will be a `DynamicStruct` representing a `MyStruct` -//! let cloned: Box = original.clone_value(); -//! assert!(cloned.represents::()); +//! // `dynamic` will be a `DynamicStruct` representing a `MyStruct` +//! let dynamic: Box = original.to_dynamic(); +//! assert!(dynamic.represents::()); //! ``` //! //! ## Patching @@ -253,8 +253,8 @@ //! foo: 123 //! }); //! -//! let cloned: Box = original.clone_value(); -//! let value = cloned.try_take::().unwrap(); // PANIC! +//! let dynamic: Box = original.to_dynamic(); +//! let value = dynamic.try_take::().unwrap(); // PANIC! //! ``` //! //! To resolve this issue, we'll need to convert the dynamic type to the concrete one. @@ -278,8 +278,8 @@ //! foo: 123 //! }); //! -//! let cloned: Box = original.clone_value(); -//! let value = ::from_reflect(&*cloned).unwrap(); // OK! +//! let dynamic: Box = original.to_dynamic(); +//! let value = ::from_reflect(&*dynamic).unwrap(); // OK! //! ``` //! //! When deriving, all active fields and sub-elements must also implement `FromReflect`. @@ -557,11 +557,18 @@ //! [`ArgList`]: crate::func::ArgList //! [derive `Reflect`]: derive@crate::Reflect -#![cfg_attr(not(feature = "std"), no_std)] +#![no_std] + +#[cfg(feature = "std")] +extern crate std; extern crate alloc; +// Required to make proc macros work in bevy itself. +extern crate self as bevy_reflect; + mod array; +mod error; mod fields; mod from_reflect; #[cfg(feature = "functions")] @@ -627,6 +634,7 @@ pub mod prelude { pub use array::*; pub use enums::*; +pub use error::*; pub use fields::*; pub use from_reflect::*; pub use generics::*; @@ -683,7 +691,10 @@ pub mod __macro_exports { note = "consider annotating `{Self}` with `#[derive(Reflect)]`" )] pub trait RegisterForReflection { - #[allow(unused_variables)] + #[expect( + unused_variables, + reason = "The parameters here are intentionally unused by the default implementation; however, putting underscores here will result in the underscores being copied by rust-analyzer's tab completion." + )] fn __register(registry: &mut TypeRegistry) {} } @@ -709,11 +720,21 @@ pub mod __macro_exports { } #[cfg(test)] -#[allow(clippy::disallowed_types, clippy::approx_constant)] +#[expect( + clippy::approx_constant, + reason = "We don't need the exact value of Pi here." +)] mod tests { use ::serde::{de::DeserializeSeed, Deserialize, Serialize}; - use alloc::borrow::Cow; - use bevy_utils::HashMap; + use alloc::{ + borrow::Cow, + boxed::Box, + format, + string::{String, ToString}, + vec, + vec::Vec, + }; + use bevy_platform::collections::HashMap; use core::{ any::TypeId, fmt::{Debug, Formatter}, @@ -728,7 +749,6 @@ mod tests { use static_assertions::{assert_impl_all, assert_not_impl_all}; use super::{prelude::*, *}; - use crate as bevy_reflect; use crate::{ serde::{ReflectDeserializer, ReflectSerializer}, utility::GenericTypePathCell, @@ -866,7 +886,6 @@ mod tests { } #[test] - #[allow(clippy::disallowed_types)] fn reflect_unit_struct() { #[derive(Reflect)] struct Foo(u32, u64); @@ -926,7 +945,7 @@ mod tests { let foo = Foo { a: 1 }; assert!(foo.reflect_hash().is_some()); - let dynamic = foo.clone_dynamic(); + let dynamic = foo.to_dynamic_struct(); let mut map = DynamicMap::default(); map.insert(dynamic, 11u32); @@ -969,6 +988,331 @@ mod tests { assert_eq!(values, vec![1]); } + /// This test ensures that we are able to reflect generic types with one or more type parameters. + /// + /// When there is an `Add` implementation for `String`, the compiler isn't able to infer the correct + /// type to deref to. + /// If we don't append the strings in the `TypePath` derive correctly (i.e. explicitly specifying the type), + /// we'll get a compilation error saying that "`&String` cannot be added to `String`". + /// + /// So this test just ensures that we do do that correctly. + /// + /// This problem is a known issue and is unexpectedly expected behavior: + /// - + /// - + /// - + #[test] + fn should_reflect_generic() { + struct FakeString {} + + // This implementation confuses the compiler when trying to add a `&String` to a `String` + impl core::ops::Add for String { + type Output = Self; + fn add(self, _rhs: FakeString) -> Self::Output { + unreachable!() + } + } + + #[derive(Reflect)] + struct Foo(A); + + #[derive(Reflect)] + struct Bar(A, B); + + #[derive(Reflect)] + struct Baz(A, B, C); + } + + #[test] + fn should_reflect_clone() { + // Struct + #[derive(Reflect, Debug, PartialEq)] + struct Foo(usize); + + let value = Foo(123); + let clone = value.reflect_clone().expect("should reflect_clone struct"); + assert_eq!(value, clone.take::().unwrap()); + + // Tuple + let foo = (123, 4.56); + let clone = foo.reflect_clone().expect("should reflect_clone tuple"); + assert_eq!(foo, clone.take::<(u32, f32)>().unwrap()); + } + + #[test] + fn should_reflect_clone_generic_type() { + #[derive(Reflect, Debug, PartialEq)] + struct Foo(T, #[reflect(ignore, clone)] PhantomData); + #[derive(TypePath, Debug, PartialEq)] + struct Bar; + + // `usize` will be cloned via `Reflect::reflect_clone` + // `PhantomData` will be cloned via `Clone::clone` + let value = Foo::(123, PhantomData); + let clone = value + .reflect_clone() + .expect("should reflect_clone generic struct"); + assert_eq!(value, clone.take::>().unwrap()); + } + + #[test] + fn should_reflect_clone_with_clone() { + // A custom clone function to verify that the `#[reflect(Clone)]` container attribute + // takes precedence over the `#[reflect(clone)]` field attribute. + #[expect( + dead_code, + reason = "if things are working correctly, this function should never be called" + )] + fn custom_clone(_value: &usize) -> usize { + panic!("should not be called"); + } + + // Tuple Struct + #[derive(Reflect, Clone, Debug, PartialEq)] + #[reflect(Clone)] + struct Foo(#[reflect(clone = "custom_clone")] usize); + + let value = Foo(123); + let clone = value + .reflect_clone() + .expect("should reflect_clone tuple struct"); + assert_eq!(value, clone.take::().unwrap()); + + // Struct + #[derive(Reflect, Clone, Debug, PartialEq)] + #[reflect(Clone)] + struct Bar { + #[reflect(clone = "custom_clone")] + value: usize, + } + + let value = Bar { value: 123 }; + let clone = value.reflect_clone().expect("should reflect_clone struct"); + assert_eq!(value, clone.take::().unwrap()); + + // Enum + #[derive(Reflect, Clone, Debug, PartialEq)] + #[reflect(Clone)] + enum Baz { + Unit, + Tuple(#[reflect(clone = "custom_clone")] usize), + Struct { + #[reflect(clone = "custom_clone")] + value: usize, + }, + } + + let value = Baz::Unit; + let clone = value + .reflect_clone() + .expect("should reflect_clone unit variant"); + assert_eq!(value, clone.take::().unwrap()); + + let value = Baz::Tuple(123); + let clone = value + .reflect_clone() + .expect("should reflect_clone tuple variant"); + assert_eq!(value, clone.take::().unwrap()); + + let value = Baz::Struct { value: 123 }; + let clone = value + .reflect_clone() + .expect("should reflect_clone struct variant"); + assert_eq!(value, clone.take::().unwrap()); + } + + #[test] + fn should_custom_reflect_clone() { + #[derive(Reflect, Debug, PartialEq)] + #[reflect(Clone(clone_foo))] + struct Foo(usize); + + fn clone_foo(foo: &Foo) -> Foo { + Foo(foo.0 + 198) + } + + let foo = Foo(123); + let clone = foo.reflect_clone().unwrap(); + assert_eq!(Foo(321), clone.take::().unwrap()); + } + + #[test] + fn should_not_clone_ignored_fields() { + // Tuple Struct + #[derive(Reflect, Clone, Debug, PartialEq)] + struct Foo(#[reflect(ignore)] usize); + + let foo = Foo(123); + let clone = foo.reflect_clone(); + assert_eq!( + clone.unwrap_err(), + ReflectCloneError::FieldNotCloneable { + field: FieldId::Unnamed(0), + variant: None, + container_type_path: Cow::Borrowed(Foo::type_path()), + } + ); + + // Struct + #[derive(Reflect, Clone, Debug, PartialEq)] + struct Bar { + #[reflect(ignore)] + value: usize, + } + + let bar = Bar { value: 123 }; + let clone = bar.reflect_clone(); + assert_eq!( + clone.unwrap_err(), + ReflectCloneError::FieldNotCloneable { + field: FieldId::Named(Cow::Borrowed("value")), + variant: None, + container_type_path: Cow::Borrowed(Bar::type_path()), + } + ); + + // Enum + #[derive(Reflect, Clone, Debug, PartialEq)] + enum Baz { + Tuple(#[reflect(ignore)] usize), + Struct { + #[reflect(ignore)] + value: usize, + }, + } + + let baz = Baz::Tuple(123); + let clone = baz.reflect_clone(); + assert_eq!( + clone.unwrap_err(), + ReflectCloneError::FieldNotCloneable { + field: FieldId::Unnamed(0), + variant: Some(Cow::Borrowed("Tuple")), + container_type_path: Cow::Borrowed(Baz::type_path()), + } + ); + + let baz = Baz::Struct { value: 123 }; + let clone = baz.reflect_clone(); + assert_eq!( + clone.unwrap_err(), + ReflectCloneError::FieldNotCloneable { + field: FieldId::Named(Cow::Borrowed("value")), + variant: Some(Cow::Borrowed("Struct")), + container_type_path: Cow::Borrowed(Baz::type_path()), + } + ); + } + + #[test] + fn should_clone_ignored_fields_with_clone_attributes() { + #[derive(Reflect, Clone, Debug, PartialEq)] + struct Foo(#[reflect(ignore, clone)] usize); + + let foo = Foo(123); + let clone = foo.reflect_clone().unwrap(); + assert_eq!(Foo(123), clone.take::().unwrap()); + + #[derive(Reflect, Clone, Debug, PartialEq)] + struct Bar(#[reflect(ignore, clone = "clone_usize")] usize); + + fn clone_usize(this: &usize) -> usize { + *this + 198 + } + + let bar = Bar(123); + let clone = bar.reflect_clone().unwrap(); + assert_eq!(Bar(321), clone.take::().unwrap()); + } + + #[test] + fn should_composite_reflect_clone() { + #[derive(Reflect, Debug, PartialEq)] + enum MyEnum { + Unit, + Tuple( + Foo, + #[reflect(ignore, clone)] Bar, + #[reflect(clone = "clone_baz")] Baz, + ), + Struct { + foo: Foo, + #[reflect(ignore, clone)] + bar: Bar, + #[reflect(clone = "clone_baz")] + baz: Baz, + }, + } + + #[derive(Reflect, Debug, PartialEq)] + struct Foo { + #[reflect(clone = "clone_bar")] + bar: Bar, + baz: Baz, + } + + #[derive(Reflect, Default, Clone, Debug, PartialEq)] + #[reflect(Clone)] + struct Bar(String); + + #[derive(Reflect, Debug, PartialEq)] + struct Baz(String); + + fn clone_bar(bar: &Bar) -> Bar { + Bar(format!("{}!", bar.0)) + } + + fn clone_baz(baz: &Baz) -> Baz { + Baz(format!("{}!", baz.0)) + } + + let my_enum = MyEnum::Unit; + let clone = my_enum.reflect_clone().unwrap(); + assert_eq!(MyEnum::Unit, clone.take::().unwrap()); + + let my_enum = MyEnum::Tuple( + Foo { + bar: Bar("bar".to_string()), + baz: Baz("baz".to_string()), + }, + Bar("bar".to_string()), + Baz("baz".to_string()), + ); + let clone = my_enum.reflect_clone().unwrap(); + assert_eq!( + MyEnum::Tuple( + Foo { + bar: Bar("bar!".to_string()), + baz: Baz("baz".to_string()), + }, + Bar("bar".to_string()), + Baz("baz!".to_string()), + ), + clone.take::().unwrap() + ); + + let my_enum = MyEnum::Struct { + foo: Foo { + bar: Bar("bar".to_string()), + baz: Baz("baz".to_string()), + }, + bar: Bar("bar".to_string()), + baz: Baz("baz".to_string()), + }; + let clone = my_enum.reflect_clone().unwrap(); + assert_eq!( + MyEnum::Struct { + foo: Foo { + bar: Bar("bar!".to_string()), + baz: Baz("baz".to_string()), + }, + bar: Bar("bar".to_string()), + baz: Baz("baz!".to_string()), + }, + clone.take::().unwrap() + ); + } + #[test] fn should_call_from_reflect_dynamically() { #[derive(Reflect)] @@ -1189,7 +1533,7 @@ mod tests { list.push(3isize); list.push(4isize); list.push(5isize); - foo_patch.insert("c", list.clone_dynamic()); + foo_patch.insert("c", list.to_dynamic_list()); let mut map = DynamicMap::default(); map.insert(2usize, 3i8); @@ -1198,7 +1542,7 @@ mod tests { let mut bar_patch = DynamicStruct::default(); bar_patch.insert("x", 2u32); - foo_patch.insert("e", bar_patch.clone_dynamic()); + foo_patch.insert("e", bar_patch.to_dynamic_struct()); let mut tuple = DynamicTuple::default(); tuple.insert(2i32); @@ -1525,22 +1869,22 @@ mod tests { #[test] fn not_dynamic_names() { let list = Vec::::new(); - let dyn_list = list.clone_dynamic(); + let dyn_list = list.to_dynamic_list(); assert_ne!(dyn_list.reflect_type_path(), Vec::::type_path()); let array = [b'0'; 4]; - let dyn_array = array.clone_dynamic(); + let dyn_array = array.to_dynamic_array(); assert_ne!(dyn_array.reflect_type_path(), <[u8; 4]>::type_path()); let map = HashMap::::default(); - let dyn_map = map.clone_dynamic(); + let dyn_map = map.to_dynamic_map(); assert_ne!( dyn_map.reflect_type_path(), HashMap::::type_path() ); let tuple = (0usize, "1".to_string(), 2.0f32); - let mut dyn_tuple = tuple.clone_dynamic(); + let mut dyn_tuple = tuple.to_dynamic_tuple(); dyn_tuple.insert::(3); assert_ne!( dyn_tuple.reflect_type_path(), @@ -1552,13 +1896,13 @@ mod tests { a: usize, } let struct_ = TestStruct { a: 0 }; - let dyn_struct = struct_.clone_dynamic(); + let dyn_struct = struct_.to_dynamic_struct(); assert_ne!(dyn_struct.reflect_type_path(), TestStruct::type_path()); #[derive(Reflect)] struct TestTupleStruct(usize); let tuple_struct = TestTupleStruct(0); - let dyn_tuple_struct = tuple_struct.clone_dynamic(); + let dyn_tuple_struct = tuple_struct.to_dynamic_tuple_struct(); assert_ne!( dyn_tuple_struct.reflect_type_path(), TestTupleStruct::type_path() @@ -1943,7 +2287,7 @@ mod tests { #[test] fn should_permit_valid_represented_type_for_dynamic() { let type_info = <[i32; 2] as Typed>::type_info(); - let mut dynamic_array = [123; 2].clone_dynamic(); + let mut dynamic_array = [123; 2].to_dynamic_array(); dynamic_array.set_represented_type(Some(type_info)); } @@ -1951,7 +2295,7 @@ mod tests { #[should_panic(expected = "expected TypeInfo::Array but received")] fn should_prohibit_invalid_represented_type_for_dynamic() { let type_info = <(i32, i32) as Typed>::type_info(); - let mut dynamic_array = [123; 2].clone_dynamic(); + let mut dynamic_array = [123; 2].to_dynamic_array(); dynamic_array.set_represented_type(Some(type_info)); } @@ -2138,7 +2482,7 @@ mod tests { enum_struct: SomeEnum, custom: CustomDebug, #[reflect(ignore)] - #[allow(dead_code)] + #[expect(dead_code, reason = "This value is intended to not be reflected.")] ignored: isize, } @@ -2334,6 +2678,14 @@ bevy_reflect::tests::Test { assert_not_impl_all!(Foo: Reflect); } + #[test] + fn should_allow_empty_enums() { + #[derive(Reflect)] + enum Empty {} + + assert_impl_all!(Empty: Reflect); + } + #[test] fn recursive_typed_storage_does_not_hang() { #[derive(Reflect)] @@ -2491,7 +2843,7 @@ bevy_reflect::tests::Test { map, value: 12, } - .clone_dynamic(); + .to_dynamic_struct(); // test unknown DynamicStruct let mut test_unknown_struct = DynamicStruct::default(); @@ -2556,6 +2908,8 @@ bevy_reflect::tests::Test { #[test] fn should_reflect_remote_type() { mod external_crate { + use alloc::string::String; + #[derive(Debug, Default)] pub struct TheirType { pub value: String, @@ -2631,6 +2985,8 @@ bevy_reflect::tests::Test { #[test] fn should_reflect_remote_value_type() { mod external_crate { + use alloc::string::String; + #[derive(Clone, Debug, Default)] pub struct TheirType { pub value: String, @@ -2714,6 +3070,8 @@ bevy_reflect::tests::Test { // error[E0433]: failed to resolve: use of undeclared crate or module `external_crate` // ``` pub mod external_crate { + use alloc::string::String; + pub struct TheirType { pub value: String, } @@ -2735,6 +3093,8 @@ bevy_reflect::tests::Test { #[test] fn should_reflect_remote_enum() { mod external_crate { + use alloc::string::String; + #[derive(Debug, PartialEq, Eq)] pub enum TheirType { Unit, @@ -2899,6 +3259,8 @@ bevy_reflect::tests::Test { #[test] fn should_take_remote_type() { mod external_crate { + use alloc::string::String; + #[derive(Debug, Default, PartialEq, Eq)] pub struct TheirType { pub value: String, @@ -2931,6 +3293,8 @@ bevy_reflect::tests::Test { #[test] fn should_try_take_remote_type() { mod external_crate { + use alloc::string::String; + #[derive(Debug, Default, PartialEq, Eq)] pub struct TheirType { pub value: String, diff --git a/crates/bevy_reflect/src/list.rs b/crates/bevy_reflect/src/list.rs index 58fca368b88da..2e1c08567601b 100644 --- a/crates/bevy_reflect/src/list.rs +++ b/crates/bevy_reflect/src/list.rs @@ -9,9 +9,9 @@ use bevy_reflect_derive::impl_type_path; use crate::generics::impl_generic_info_methods; use crate::{ - self as bevy_reflect, type_info::impl_type_methods, utility::reflect_hasher, ApplyError, - FromReflect, Generics, MaybeTyped, PartialReflect, Reflect, ReflectKind, ReflectMut, - ReflectOwned, ReflectRef, Type, TypeInfo, TypePath, + type_info::impl_type_methods, utility::reflect_hasher, ApplyError, FromReflect, Generics, + MaybeTyped, PartialReflect, Reflect, ReflectKind, ReflectMut, ReflectOwned, ReflectRef, Type, + TypeInfo, TypePath, }; /// A trait used to power [list-like] operations via [reflection]. @@ -104,10 +104,16 @@ pub trait List: PartialReflect { fn drain(&mut self) -> Vec>; /// Clones the list, producing a [`DynamicList`]. + #[deprecated(since = "0.16.0", note = "use `to_dynamic_list` instead")] fn clone_dynamic(&self) -> DynamicList { + self.to_dynamic_list() + } + + /// Creates a new [`DynamicList`] from this list. + fn to_dynamic_list(&self) -> DynamicList { DynamicList { represented_type: self.get_represented_type_info(), - values: self.iter().map(PartialReflect::clone_value).collect(), + values: self.iter().map(PartialReflect::to_dynamic).collect(), } } @@ -246,17 +252,6 @@ impl List for DynamicList { fn drain(&mut self) -> Vec> { self.values.drain(..).collect() } - - fn clone_dynamic(&self) -> DynamicList { - DynamicList { - represented_type: self.represented_type, - values: self - .values - .iter() - .map(|value| value.clone_value()) - .collect(), - } - } } impl PartialReflect for DynamicList { @@ -320,11 +315,6 @@ impl PartialReflect for DynamicList { ReflectOwned::List(self) } - #[inline] - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - #[inline] fn reflect_hash(&self) -> Option { list_hash(self) @@ -470,7 +460,7 @@ pub fn list_try_apply(a: &mut L, b: &dyn PartialReflect) -> Result<(), v.try_apply(value)?; } } else { - List::push(a, value.clone_value()); + List::push(a, value.to_dynamic()); } } @@ -535,6 +525,7 @@ pub fn list_debug(dyn_list: &dyn List, f: &mut Formatter<'_>) -> core::fmt::Resu mod tests { use super::DynamicList; use crate::Reflect; + use alloc::{boxed::Box, vec}; use core::assert_eq; #[test] diff --git a/crates/bevy_reflect/src/map.rs b/crates/bevy_reflect/src/map.rs index e5205e90afa38..0a1c0b689a23b 100644 --- a/crates/bevy_reflect/src/map.rs +++ b/crates/bevy_reflect/src/map.rs @@ -1,12 +1,12 @@ use core::fmt::{Debug, Formatter}; +use bevy_platform::collections::HashTable; use bevy_reflect_derive::impl_type_path; -use bevy_utils::hashbrown::HashTable; use crate::{ - self as bevy_reflect, generics::impl_generic_info_methods, type_info::impl_type_methods, - ApplyError, Generics, MaybeTyped, PartialReflect, Reflect, ReflectKind, ReflectMut, - ReflectOwned, ReflectRef, Type, TypeInfo, TypePath, + generics::impl_generic_info_methods, type_info::impl_type_methods, ApplyError, Generics, + MaybeTyped, PartialReflect, Reflect, ReflectKind, ReflectMut, ReflectOwned, ReflectRef, Type, + TypeInfo, TypePath, }; use alloc::{boxed::Box, format, vec::Vec}; @@ -82,7 +82,20 @@ pub trait Map: PartialReflect { fn drain(&mut self) -> Vec<(Box, Box)>; /// Clones the map, producing a [`DynamicMap`]. - fn clone_dynamic(&self) -> DynamicMap; + #[deprecated(since = "0.16.0", note = "use `to_dynamic_map` instead")] + fn clone_dynamic(&self) -> DynamicMap { + self.to_dynamic_map() + } + + /// Creates a new [`DynamicMap`] from this map. + fn to_dynamic_map(&self) -> DynamicMap { + let mut map = DynamicMap::default(); + map.set_represented_type(self.get_represented_type_info()); + for (key, value) in self.iter() { + map.insert_boxed(key.to_dynamic(), value.to_dynamic()); + } + map + } /// Inserts a key-value pair into the map. /// @@ -206,7 +219,6 @@ macro_rules! hash_error { ), } } - .as_str() }} } @@ -244,7 +256,7 @@ impl DynamicMap { } fn internal_hash(value: &dyn PartialReflect) -> u64 { - value.reflect_hash().expect(hash_error!(value)) + value.reflect_hash().expect(&hash_error!(value)) } fn internal_eq<'a>( @@ -303,18 +315,6 @@ impl Map for DynamicMap { self.values.drain(..).collect() } - fn clone_dynamic(&self) -> DynamicMap { - DynamicMap { - represented_type: self.represented_type, - values: self - .values - .iter() - .map(|(key, value)| (key.clone_value(), value.clone_value())) - .collect(), - indices: self.indices.clone(), - } - } - fn insert_boxed( &mut self, key: Box, @@ -432,10 +432,6 @@ impl PartialReflect for DynamicMap { ReflectOwned::Map(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - fn reflect_partial_eq(&self, value: &dyn PartialReflect) -> Option { map_partial_eq(self, value) } @@ -621,7 +617,7 @@ pub fn map_try_apply(a: &mut M, b: &dyn PartialReflect) -> Result<(), Ap if let Some(a_value) = a.get_mut(key) { a_value.try_apply(b_value)?; } else { - a.insert_boxed(key.clone_value(), b_value.clone_value()); + a.insert_boxed(key.to_dynamic(), b_value.to_dynamic()); } } @@ -631,6 +627,10 @@ pub fn map_try_apply(a: &mut M, b: &dyn PartialReflect) -> Result<(), Ap #[cfg(test)] mod tests { use super::{DynamicMap, Map}; + use alloc::{ + borrow::ToOwned, + string::{String, ToString}, + }; #[test] fn test_into_iter() { diff --git a/crates/bevy_reflect/src/path/mod.rs b/crates/bevy_reflect/src/path/mod.rs index 3fe0504cf7408..a52bbb6aaa688 100644 --- a/crates/bevy_reflect/src/path/mod.rs +++ b/crates/bevy_reflect/src/path/mod.rs @@ -127,10 +127,12 @@ impl<'a> ReflectPath<'a> for &'a str { /// Note that a leading dot (`.`) or hash (`#`) token is implied for the first item in a path, /// and may therefore be omitted. /// +/// Additionally, an empty path may be used to get the struct itself. +/// /// ### Example /// ``` /// # use bevy_reflect::{GetPath, Reflect}; -/// #[derive(Reflect)] +/// #[derive(Reflect, PartialEq, Debug)] /// struct MyStruct { /// value: u32 /// } @@ -140,6 +142,8 @@ impl<'a> ReflectPath<'a> for &'a str { /// assert_eq!(my_struct.path::(".value").unwrap(), &123); /// // Access via field index /// assert_eq!(my_struct.path::("#0").unwrap(), &123); +/// // Access self +/// assert_eq!(*my_struct.path::("").unwrap(), my_struct); /// ``` /// /// ## Tuples and Tuple Structs @@ -502,13 +506,16 @@ impl core::ops::IndexMut for ParsedPath { } #[cfg(test)] -#[allow(clippy::float_cmp, clippy::approx_constant)] +#[expect( + clippy::approx_constant, + reason = "We don't need the exact value of Pi here." +)] mod tests { use super::*; - use crate as bevy_reflect; use crate::*; + use alloc::vec; - #[derive(Reflect)] + #[derive(Reflect, PartialEq, Debug)] struct A { w: usize, x: B, @@ -521,21 +528,21 @@ mod tests { tuple: (bool, f32), } - #[derive(Reflect)] + #[derive(Reflect, PartialEq, Debug)] struct B { foo: usize, łørđ: C, } - #[derive(Reflect)] + #[derive(Reflect, PartialEq, Debug)] struct C { mосква: f32, } - #[derive(Reflect)] + #[derive(Reflect, PartialEq, Debug)] struct D(E); - #[derive(Reflect)] + #[derive(Reflect, PartialEq, Debug)] struct E(f32, usize); #[derive(Reflect, PartialEq, Debug)] @@ -735,6 +742,7 @@ mod tests { fn reflect_path() { let mut a = a_sample(); + assert_eq!(*a.path::("").unwrap(), a); assert_eq!(*a.path::("w").unwrap(), 1); assert_eq!(*a.path::("x.foo").unwrap(), 10); assert_eq!(*a.path::("x.łørđ.mосква").unwrap(), 3.14); diff --git a/crates/bevy_reflect/src/path/parse.rs b/crates/bevy_reflect/src/path/parse.rs index bc48fe9c01be0..2ab2939a30ae4 100644 --- a/crates/bevy_reflect/src/path/parse.rs +++ b/crates/bevy_reflect/src/path/parse.rs @@ -64,7 +64,10 @@ impl<'a> PathParser<'a> { // the last byte before an ASCII utf-8 character (ie: it is a char // boundary). // - The slice always starts after a symbol ie: an ASCII character's boundary. - #[allow(unsafe_code)] + #[expect( + unsafe_code, + reason = "We have fulfilled the Safety requirements for `from_utf8_unchecked`." + )] let ident = unsafe { from_utf8_unchecked(ident) }; self.remaining = remaining; diff --git a/crates/bevy_reflect/src/reflect.rs b/crates/bevy_reflect/src/reflect.rs index bf7844dbde91b..4918179e127f4 100644 --- a/crates/bevy_reflect/src/reflect.rs +++ b/crates/bevy_reflect/src/reflect.rs @@ -1,9 +1,11 @@ use crate::{ array_debug, enum_debug, list_debug, map_debug, set_debug, struct_debug, tuple_debug, - tuple_struct_debug, DynamicTypePath, DynamicTyped, OpaqueInfo, ReflectKind, + tuple_struct_debug, DynamicTypePath, DynamicTyped, OpaqueInfo, ReflectCloneError, ReflectKind, ReflectKindMismatchError, ReflectMut, ReflectOwned, ReflectRef, TypeInfo, TypePath, Typed, }; +use alloc::borrow::Cow; use alloc::boxed::Box; +use alloc::string::ToString; use core::{ any::{Any, TypeId}, fmt::Debug, @@ -17,7 +19,7 @@ use crate::utility::NonGenericTypeInfoCell; #[derive(Error, Debug)] pub enum ApplyError { #[error("attempted to apply `{from_kind}` to `{to_kind}`")] - /// Attempted to apply the wrong [kind](ReflectKind) to a type, e.g. a struct to a enum. + /// Attempted to apply the wrong [kind](ReflectKind) to a type, e.g. a struct to an enum. MismatchedKinds { from_kind: ReflectKind, to_kind: ReflectKind, @@ -216,20 +218,116 @@ where /// See [`ReflectOwned`]. fn reflect_owned(self: Box) -> ReflectOwned; - /// Clones the value as a `Reflect` trait object. + /// Clones `Self` into its dynamic representation. /// - /// When deriving `Reflect` for a struct, tuple struct or enum, the value is - /// cloned via [`Struct::clone_dynamic`], [`TupleStruct::clone_dynamic`], - /// or [`Enum::clone_dynamic`], respectively. - /// Implementors of other `Reflect` subtraits (e.g. [`List`], [`Map`]) should - /// use those subtraits' respective `clone_dynamic` methods. + /// For value types or types marked with `#[reflect_value]`, + /// this will simply return a clone of `Self`. /// + /// Otherwise the associated dynamic type will be returned. + /// + /// For example, a [`List`] type will invoke [`List::clone_dynamic`], returning [`DynamicList`]. + /// A [`Struct`] type will invoke [`Struct::clone_dynamic`], returning [`DynamicStruct`]. + /// And so on. + /// + /// If the dynamic behavior is not desired, a concrete clone can be obtained using [`PartialReflect::reflect_clone`]. + /// + /// # Example + /// + /// ``` + /// # use bevy_reflect::{PartialReflect}; + /// let value = (1, true, 3.14); + /// let cloned = value.clone_value(); + /// assert!(cloned.is_dynamic()) + /// ``` + /// + /// [`List`]: crate::List + /// [`List::clone_dynamic`]: crate::List::clone_dynamic + /// [`DynamicList`]: crate::DynamicList + /// [`Struct`]: crate::Struct /// [`Struct::clone_dynamic`]: crate::Struct::clone_dynamic - /// [`TupleStruct::clone_dynamic`]: crate::TupleStruct::clone_dynamic - /// [`Enum::clone_dynamic`]: crate::Enum::clone_dynamic + /// [`DynamicStruct`]: crate::DynamicStruct + #[deprecated( + since = "0.16.0", + note = "to clone reflected values, prefer using `reflect_clone`. To convert reflected values to dynamic ones, use `to_dynamic`." + )] + fn clone_value(&self) -> Box { + self.to_dynamic() + } + + /// Converts this reflected value into its dynamic representation based on its [kind]. + /// + /// For example, a [`List`] type will internally invoke [`List::to_dynamic_list`], returning [`DynamicList`]. + /// A [`Struct`] type will invoke [`Struct::to_dynamic_struct`], returning [`DynamicStruct`]. + /// And so on. + /// + /// If the [kind] is [opaque], then the value will attempt to be cloned directly via [`reflect_clone`], + /// since opaque types do not have any standard dynamic representation. + /// + /// To attempt to clone the value directly such that it returns a concrete instance of this type, + /// use [`reflect_clone`]. + /// + /// # Panics + /// + /// This method will panic if the [kind] is [opaque] and the call to [`reflect_clone`] fails. + /// + /// # Example + /// + /// ``` + /// # use bevy_reflect::{PartialReflect}; + /// let value = (1, true, 3.14); + /// let dynamic_value = value.to_dynamic(); + /// assert!(dynamic_value.is_dynamic()) + /// ``` + /// + /// [kind]: PartialReflect::reflect_kind /// [`List`]: crate::List - /// [`Map`]: crate::Map - fn clone_value(&self) -> Box; + /// [`List::to_dynamic_list`]: crate::List::to_dynamic_list + /// [`DynamicList`]: crate::DynamicList + /// [`Struct`]: crate::Struct + /// [`Struct::to_dynamic_struct`]: crate::Struct::to_dynamic_struct + /// [`DynamicStruct`]: crate::DynamicStruct + /// [opaque]: crate::ReflectKind::Opaque + /// [`reflect_clone`]: PartialReflect::reflect_clone + fn to_dynamic(&self) -> Box { + match self.reflect_ref() { + ReflectRef::Struct(dyn_struct) => Box::new(dyn_struct.to_dynamic_struct()), + ReflectRef::TupleStruct(dyn_tuple_struct) => { + Box::new(dyn_tuple_struct.to_dynamic_tuple_struct()) + } + ReflectRef::Tuple(dyn_tuple) => Box::new(dyn_tuple.to_dynamic_tuple()), + ReflectRef::List(dyn_list) => Box::new(dyn_list.to_dynamic_list()), + ReflectRef::Array(dyn_array) => Box::new(dyn_array.to_dynamic_array()), + ReflectRef::Map(dyn_map) => Box::new(dyn_map.to_dynamic_map()), + ReflectRef::Set(dyn_set) => Box::new(dyn_set.to_dynamic_set()), + ReflectRef::Enum(dyn_enum) => Box::new(dyn_enum.to_dynamic_enum()), + #[cfg(feature = "functions")] + ReflectRef::Function(dyn_function) => Box::new(dyn_function.to_dynamic_function()), + ReflectRef::Opaque(value) => value.reflect_clone().unwrap().into_partial_reflect(), + } + } + + /// Attempts to clone `Self` using reflection. + /// + /// Unlike [`to_dynamic`], which generally returns a dynamic representation of `Self`, + /// this method attempts create a clone of `Self` directly, if possible. + /// + /// If the clone cannot be performed, an appropriate [`ReflectCloneError`] is returned. + /// + /// # Example + /// + /// ``` + /// # use bevy_reflect::PartialReflect; + /// let value = (1, true, 3.14); + /// let cloned = value.reflect_clone().unwrap(); + /// assert!(cloned.is::<(i32, bool, f64)>()) + /// ``` + /// + /// [`to_dynamic`]: PartialReflect::to_dynamic + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Err(ReflectCloneError::NotImplemented { + type_path: Cow::Owned(self.reflect_type_path().to_string()), + }) + } /// Returns a hash of the value (which includes the type). /// @@ -352,8 +450,7 @@ impl dyn PartialReflect { #[inline] pub fn represents(&self) -> bool { self.get_represented_type_info() - .map(|t| t.type_path() == T::type_path()) - .unwrap_or(false) + .is_some_and(|t| t.type_path() == T::type_path()) } /// Downcasts the value to type `T`, consuming the trait object. diff --git a/crates/bevy_reflect/src/serde/de/deserialize_with_registry.rs b/crates/bevy_reflect/src/serde/de/deserialize_with_registry.rs index d0c9703d403c8..f92a8e68e24da 100644 --- a/crates/bevy_reflect/src/serde/de/deserialize_with_registry.rs +++ b/crates/bevy_reflect/src/serde/de/deserialize_with_registry.rs @@ -41,7 +41,7 @@ use serde::Deserializer; /// [`TypedReflectDeserializer`]: crate::serde::TypedReflectDeserializer /// [`ReflectDeserializer`]: crate::serde::ReflectDeserializer /// [via the registry]: TypeRegistry::register_type_data -pub trait DeserializeWithRegistry<'de>: PartialReflect + Sized { +pub trait DeserializeWithRegistry<'de>: Sized { fn deserialize(deserializer: D, registry: &TypeRegistry) -> Result where D: Deserializer<'de>; diff --git a/crates/bevy_reflect/src/serde/de/error_utils.rs b/crates/bevy_reflect/src/serde/de/error_utils.rs index f028976805791..d570c47f0c369 100644 --- a/crates/bevy_reflect/src/serde/de/error_utils.rs +++ b/crates/bevy_reflect/src/serde/de/error_utils.rs @@ -1,6 +1,9 @@ use core::fmt::Display; use serde::de::Error; +#[cfg(feature = "debug_stack")] +use std::thread_local; + #[cfg(feature = "debug_stack")] thread_local! { /// The thread-local [`TypeInfoStack`] used for debugging. diff --git a/crates/bevy_reflect/src/serde/de/mod.rs b/crates/bevy_reflect/src/serde/de/mod.rs index e55897166e926..e82b60bcee5c8 100644 --- a/crates/bevy_reflect/src/serde/de/mod.rs +++ b/crates/bevy_reflect/src/serde/de/mod.rs @@ -24,16 +24,19 @@ mod tuples; #[cfg(test)] mod tests { - use bincode::Options; + use alloc::{ + boxed::Box, + string::{String, ToString}, + vec, + vec::Vec, + }; use core::{any::TypeId, f32::consts::PI, ops::RangeInclusive}; - use serde::{de::IgnoredAny, Deserializer}; - use serde::{de::DeserializeSeed, Deserialize}; + use serde::{de::IgnoredAny, Deserializer}; - use bevy_utils::{HashMap, HashSet}; + use bevy_platform::collections::{HashMap, HashSet}; use crate::{ - self as bevy_reflect, serde::{ ReflectDeserializer, ReflectDeserializerProcessor, ReflectSerializer, TypedReflectDeserializer, @@ -466,10 +469,9 @@ mod tests { let deserializer = ReflectDeserializer::new(®istry); - let dynamic_output = bincode::DefaultOptions::new() - .with_fixint_encoding() - .deserialize_seed(deserializer, &input) - .unwrap(); + let config = bincode::config::standard().with_fixed_int_encoding(); + let (dynamic_output, _read_bytes) = + bincode::serde::seed_decode_from_slice(deserializer, &input, config).unwrap(); let output = ::from_reflect(dynamic_output.as_ref()).unwrap(); assert_eq!(expected, output); diff --git a/crates/bevy_reflect/src/serde/mod.rs b/crates/bevy_reflect/src/serde/mod.rs index 3fcaa6aafc7b3..a2c3fe63edd33 100644 --- a/crates/bevy_reflect/src/serde/mod.rs +++ b/crates/bevy_reflect/src/serde/mod.rs @@ -10,8 +10,8 @@ pub use type_data::*; mod tests { use super::*; use crate::{ - self as bevy_reflect, type_registry::TypeRegistry, DynamicStruct, DynamicTupleStruct, - FromReflect, PartialReflect, Reflect, Struct, + type_registry::TypeRegistry, DynamicStruct, DynamicTupleStruct, FromReflect, + PartialReflect, Reflect, Struct, }; use serde::de::DeserializeSeed; @@ -164,7 +164,7 @@ mod tests { let mut registry = TypeRegistry::default(); registry.register::(); - let value: DynamicStruct = TestStruct { a: 123, b: 456 }.clone_dynamic(); + let value: DynamicStruct = TestStruct { a: 123, b: 456 }.to_dynamic_struct(); let serializer = ReflectSerializer::new(&value, ®istry); @@ -175,7 +175,7 @@ mod tests { let mut deserializer = ron::de::Deserializer::from_str(&result).unwrap(); let reflect_deserializer = ReflectDeserializer::new(®istry); - let expected = value.clone_value(); + let expected = value.to_dynamic(); let result = reflect_deserializer.deserialize(&mut deserializer).unwrap(); assert!(expected @@ -189,7 +189,8 @@ mod tests { use crate::serde::{DeserializeWithRegistry, ReflectDeserializeWithRegistry}; use crate::serde::{ReflectSerializeWithRegistry, SerializeWithRegistry}; use crate::{ReflectFromReflect, TypePath}; - use alloc::sync::Arc; + use alloc::{format, string::String, vec, vec::Vec}; + use bevy_platform::sync::Arc; use bevy_reflect_derive::reflect_trait; use core::any::TypeId; use core::fmt::{Debug, Formatter}; @@ -199,7 +200,7 @@ mod tests { #[reflect_trait] trait Enemy: Reflect + Debug { - #[allow(dead_code, reason = "this method is purely for testing purposes")] + #[expect(dead_code, reason = "this method is purely for testing purposes")] fn hp(&self) -> u8; } @@ -336,6 +337,22 @@ mod tests { registry } + fn create_arc_dyn_enemy(enemy: T) -> Arc { + let arc = Arc::new(enemy); + + #[cfg(not(target_has_atomic = "ptr"))] + #[expect( + unsafe_code, + reason = "unsized coercion is an unstable feature for non-std types" + )] + // SAFETY: + // - Coercion from `T` to `dyn Enemy` is valid as `T: Enemy + 'static` + // - `Arc::from_raw` receives a valid pointer from a previous call to `Arc::into_raw` + let arc = unsafe { Arc::from_raw(Arc::into_raw(arc) as *const dyn Enemy) }; + + arc + } + #[test] fn should_serialize_with_serialize_with_registry() { let registry = create_registry(); @@ -343,8 +360,8 @@ mod tests { let level = Level { name: String::from("Level 1"), enemies: EnemyList(vec![ - Arc::new(Skeleton(10)), - Arc::new(Zombie { + create_arc_dyn_enemy(Skeleton(10)), + create_arc_dyn_enemy(Zombie { health: 20, walk_speed: 0.5, }), @@ -374,8 +391,8 @@ mod tests { let expected = Level { name: String::from("Level 1"), enemies: EnemyList(vec![ - Arc::new(Skeleton(10)), - Arc::new(Zombie { + create_arc_dyn_enemy(Skeleton(10)), + create_arc_dyn_enemy(Zombie { health: 20, walk_speed: 0.5, }), @@ -388,8 +405,8 @@ mod tests { let unexpected = Level { name: String::from("Level 1"), enemies: EnemyList(vec![ - Arc::new(Skeleton(20)), - Arc::new(Zombie { + create_arc_dyn_enemy(Skeleton(20)), + create_arc_dyn_enemy(Zombie { health: 20, walk_speed: 5.0, }), diff --git a/crates/bevy_reflect/src/serde/ser/error_utils.rs b/crates/bevy_reflect/src/serde/ser/error_utils.rs index 8e6570c6691a2..d252e7f591d69 100644 --- a/crates/bevy_reflect/src/serde/ser/error_utils.rs +++ b/crates/bevy_reflect/src/serde/ser/error_utils.rs @@ -1,6 +1,9 @@ use core::fmt::Display; use serde::ser::Error; +#[cfg(feature = "debug_stack")] +use std::thread_local; + #[cfg(feature = "debug_stack")] thread_local! { /// The thread-local [`TypeInfoStack`] used for debugging. diff --git a/crates/bevy_reflect/src/serde/ser/mod.rs b/crates/bevy_reflect/src/serde/ser/mod.rs index 53afacde37430..25399e1d711e5 100644 --- a/crates/bevy_reflect/src/serde/ser/mod.rs +++ b/crates/bevy_reflect/src/serde/ser/mod.rs @@ -21,11 +21,17 @@ mod tuples; #[cfg(test)] mod tests { use crate::{ - self as bevy_reflect, serde::{ReflectSerializer, ReflectSerializerProcessor}, PartialReflect, Reflect, ReflectSerialize, Struct, TypeRegistry, }; - use bevy_utils::{HashMap, HashSet}; + #[cfg(feature = "functions")] + use alloc::boxed::Box; + use alloc::{ + string::{String, ToString}, + vec, + vec::Vec, + }; + use bevy_platform::collections::{HashMap, HashSet}; use core::{any::TypeId, f32::consts::PI, ops::RangeInclusive}; use ron::{extensions::Extensions, ser::PrettyConfig}; use serde::{Serialize, Serializer}; @@ -343,7 +349,8 @@ mod tests { let registry = get_registry(); let serializer = ReflectSerializer::new(&input, ®istry); - let bytes = bincode::serialize(&serializer).unwrap(); + let config = bincode::config::standard().with_fixed_int_encoding(); + let bytes = bincode::serde::encode_to_vec(&serializer, config).unwrap(); let expected: Vec = vec![ 1, 0, 0, 0, 0, 0, 0, 0, 41, 0, 0, 0, 0, 0, 0, 0, 98, 101, 118, 121, 95, 114, 101, 102, @@ -401,7 +408,7 @@ mod tests { some: Some(SomeStruct { foo: 999999999 }), none: None, }; - let dynamic = value.clone_dynamic(); + let dynamic = value.to_dynamic_struct(); let reflect = dynamic.as_partial_reflect(); let registry = get_registry(); @@ -647,6 +654,7 @@ mod tests { mod functions { use super::*; use crate::func::{DynamicFunction, IntoFunction}; + use alloc::string::ToString; #[test] fn should_not_serialize_function() { diff --git a/crates/bevy_reflect/src/serde/ser/serialize_with_registry.rs b/crates/bevy_reflect/src/serde/ser/serialize_with_registry.rs index 25922a5bd9456..9c5bfb06f1ca8 100644 --- a/crates/bevy_reflect/src/serde/ser/serialize_with_registry.rs +++ b/crates/bevy_reflect/src/serde/ser/serialize_with_registry.rs @@ -75,7 +75,7 @@ impl FromType for ReflectSerializeWithReg serialize: |value: &dyn Reflect, registry| { let value = value.downcast_ref::().unwrap_or_else(|| { panic!( - "Expected value to be of type {:?} but received {:?}", + "Expected value to be of type {} but received {}", core::any::type_name::(), value.reflect_type_path() ) diff --git a/crates/bevy_reflect/src/serde/ser/structs.rs b/crates/bevy_reflect/src/serde/ser/structs.rs index 4eb3e76700d57..828eb3e6cb829 100644 --- a/crates/bevy_reflect/src/serde/ser/structs.rs +++ b/crates/bevy_reflect/src/serde/ser/structs.rs @@ -48,10 +48,7 @@ impl Serialize for StructSerializer<'_, P> { )?; for (index, value) in self.struct_value.iter_fields().enumerate() { - if serialization_data - .map(|data| data.is_field_skipped(index)) - .unwrap_or(false) - { + if serialization_data.is_some_and(|data| data.is_field_skipped(index)) { continue; } let key = struct_info.field_at(index).unwrap().name(); diff --git a/crates/bevy_reflect/src/serde/ser/tuple_structs.rs b/crates/bevy_reflect/src/serde/ser/tuple_structs.rs index 5bf2ec64ae7e0..00554c0a86694 100644 --- a/crates/bevy_reflect/src/serde/ser/tuple_structs.rs +++ b/crates/bevy_reflect/src/serde/ser/tuple_structs.rs @@ -57,10 +57,7 @@ impl Serialize for TupleStructSerializer<'_, P> { )?; for (index, value) in self.tuple_struct.iter_fields().enumerate() { - if serialization_data - .map(|data| data.is_field_skipped(index)) - .unwrap_or(false) - { + if serialization_data.is_some_and(|data| data.is_field_skipped(index)) { continue; } state.serialize_field(&TypedReflectSerializer::new_internal( diff --git a/crates/bevy_reflect/src/serde/type_data.rs b/crates/bevy_reflect/src/serde/type_data.rs index de88f99831fad..9bb3e134ac6bc 100644 --- a/crates/bevy_reflect/src/serde/type_data.rs +++ b/crates/bevy_reflect/src/serde/type_data.rs @@ -1,6 +1,6 @@ use crate::Reflect; use alloc::boxed::Box; -use bevy_utils::{hashbrown::hash_map::Iter, HashMap}; +use bevy_platform::collections::{hash_map::Iter, HashMap}; /// Contains data relevant to the automatic reflect powered (de)serialization of a type. #[derive(Debug, Clone)] @@ -14,9 +14,9 @@ impl SerializationData { /// # Arguments /// /// * `skipped_iter`: The iterator of field indices to be skipped during (de)serialization. - /// Indices are assigned only to reflected fields. - /// Ignored fields (i.e. those marked `#[reflect(ignore)]`) are implicitly skipped - /// and do not need to be included in this iterator. + /// Indices are assigned only to reflected fields. + /// Ignored fields (i.e. those marked `#[reflect(ignore)]`) are implicitly skipped + /// and do not need to be included in this iterator. pub fn new>(skipped_iter: I) -> Self { Self { skipped_fields: skipped_iter.collect(), diff --git a/crates/bevy_reflect/src/set.rs b/crates/bevy_reflect/src/set.rs index 0d46d9f9df771..753662b603fff 100644 --- a/crates/bevy_reflect/src/set.rs +++ b/crates/bevy_reflect/src/set.rs @@ -1,13 +1,13 @@ use alloc::{boxed::Box, format, vec::Vec}; use core::fmt::{Debug, Formatter}; +use bevy_platform::collections::{hash_table::OccupiedEntry as HashTableOccupiedEntry, HashTable}; use bevy_reflect_derive::impl_type_path; -use bevy_utils::hashbrown::{hash_table::OccupiedEntry as HashTableOccupiedEntry, HashTable}; use crate::{ - self as bevy_reflect, generics::impl_generic_info_methods, hash_error, - type_info::impl_type_methods, ApplyError, Generics, PartialReflect, Reflect, ReflectKind, - ReflectMut, ReflectOwned, ReflectRef, Type, TypeInfo, TypePath, + generics::impl_generic_info_methods, hash_error, type_info::impl_type_methods, ApplyError, + Generics, PartialReflect, Reflect, ReflectKind, ReflectMut, ReflectOwned, ReflectRef, Type, + TypeInfo, TypePath, }; /// A trait used to power [set-like] operations via [reflection]. @@ -68,7 +68,20 @@ pub trait Set: PartialReflect { fn drain(&mut self) -> Vec>; /// Clones the set, producing a [`DynamicSet`]. - fn clone_dynamic(&self) -> DynamicSet; + #[deprecated(since = "0.16.0", note = "use `to_dynamic_set` instead")] + fn clone_dynamic(&self) -> DynamicSet { + self.to_dynamic_set() + } + + /// Creates a new [`DynamicSet`] from this set. + fn to_dynamic_set(&self) -> DynamicSet { + let mut set = DynamicSet::default(); + set.set_represented_type(self.get_represented_type_info()); + for value in self.iter() { + set.insert_boxed(value.to_dynamic()); + } + set + } /// Inserts a value into the set. /// @@ -165,7 +178,7 @@ impl DynamicSet { } fn internal_hash(value: &dyn PartialReflect) -> u64 { - value.reflect_hash().expect(hash_error!(value)) + value.reflect_hash().expect(&hash_error!(value)) } fn internal_eq( @@ -199,23 +212,6 @@ impl Set for DynamicSet { self.hash_table.drain().collect::>() } - fn clone_dynamic(&self) -> DynamicSet { - let mut hash_table = HashTable::new(); - self.hash_table - .iter() - .map(|value| value.clone_value()) - .for_each(|value| { - hash_table.insert_unique(Self::internal_hash(value.as_ref()), value, |boxed| { - Self::internal_hash(boxed.as_ref()) - }); - }); - - DynamicSet { - represented_type: self.represented_type, - hash_table, - } - } - fn insert_boxed(&mut self, value: Box) -> bool { assert_eq!( value.reflect_partial_eq(&*value), @@ -315,10 +311,6 @@ impl PartialReflect for DynamicSet { ReflectOwned::Set(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - fn reflect_partial_eq(&self, value: &dyn PartialReflect) -> Option { set_partial_eq(self, value) } @@ -375,7 +367,7 @@ impl FromIterator for DynamicSet { impl IntoIterator for DynamicSet { type Item = Box; - type IntoIter = bevy_utils::hashbrown::hash_table::IntoIter; + type IntoIter = bevy_platform::collections::hash_table::IntoIter; fn into_iter(self) -> Self::IntoIter { self.hash_table.into_iter() @@ -385,7 +377,7 @@ impl IntoIterator for DynamicSet { impl<'a> IntoIterator for &'a DynamicSet { type Item = &'a dyn PartialReflect; type IntoIter = core::iter::Map< - bevy_utils::hashbrown::hash_table::Iter<'a, Box>, + bevy_platform::collections::hash_table::Iter<'a, Box>, fn(&'a Box) -> Self::Item, >; @@ -465,7 +457,7 @@ pub fn set_apply(a: &mut M, b: &dyn PartialReflect) { if let ReflectRef::Set(set_value) = b.reflect_ref() { for b_value in set_value.iter() { if a.get(b_value).is_none() { - a.insert_boxed(b_value.clone_value()); + a.insert_boxed(b_value.to_dynamic()); } } } else { @@ -488,7 +480,7 @@ pub fn set_try_apply(a: &mut S, b: &dyn PartialReflect) -> Result<(), Ap for b_value in set_value.iter() { if a.get(b_value).is_none() { - a.insert_boxed(b_value.clone_value()); + a.insert_boxed(b_value.to_dynamic()); } } @@ -498,6 +490,7 @@ pub fn set_try_apply(a: &mut S, b: &dyn PartialReflect) -> Result<(), Ap #[cfg(test)] mod tests { use super::DynamicSet; + use alloc::string::{String, ToString}; #[test] fn test_into_iter() { diff --git a/crates/bevy_reflect/src/struct_trait.rs b/crates/bevy_reflect/src/struct_trait.rs index 9ee7c11d47a86..9146e9aecea64 100644 --- a/crates/bevy_reflect/src/struct_trait.rs +++ b/crates/bevy_reflect/src/struct_trait.rs @@ -1,14 +1,14 @@ use crate::generics::impl_generic_info_methods; use crate::{ - self as bevy_reflect, attributes::{impl_custom_attribute_methods, CustomAttributes}, type_info::impl_type_methods, ApplyError, Generics, NamedField, PartialReflect, Reflect, ReflectKind, ReflectMut, ReflectOwned, ReflectRef, Type, TypeInfo, TypePath, }; -use alloc::{borrow::Cow, boxed::Box, sync::Arc, vec::Vec}; +use alloc::{borrow::Cow, boxed::Box, vec::Vec}; +use bevy_platform::collections::HashMap; +use bevy_platform::sync::Arc; use bevy_reflect_derive::impl_type_path; -use bevy_utils::HashMap; use core::{ fmt::{Debug, Formatter}, slice::Iter, @@ -72,7 +72,19 @@ pub trait Struct: PartialReflect { fn iter_fields(&self) -> FieldIter; /// Clones the struct into a [`DynamicStruct`]. - fn clone_dynamic(&self) -> DynamicStruct; + #[deprecated(since = "0.16.0", note = "use `to_dynamic_struct` instead")] + fn clone_dynamic(&self) -> DynamicStruct { + self.to_dynamic_struct() + } + + fn to_dynamic_struct(&self) -> DynamicStruct { + let mut dynamic_struct = DynamicStruct::default(); + dynamic_struct.set_represented_type(self.get_represented_type_info()); + for (i, value) in self.iter_fields().enumerate() { + dynamic_struct.insert_boxed(self.name_at(i).unwrap(), value.to_dynamic()); + } + dynamic_struct + } /// Will return `None` if [`TypeInfo`] is not available. fn get_represented_struct_info(&self) -> Option<&'static StructInfo> { @@ -370,19 +382,6 @@ impl Struct for DynamicStruct { index: 0, } } - - fn clone_dynamic(&self) -> DynamicStruct { - DynamicStruct { - represented_type: self.get_represented_type_info(), - field_names: self.field_names.clone(), - field_indices: self.field_indices.clone(), - fields: self - .fields - .iter() - .map(|value| value.clone_value()) - .collect(), - } - } } impl PartialReflect for DynamicStruct { @@ -449,11 +448,6 @@ impl PartialReflect for DynamicStruct { ReflectOwned::Struct(self) } - #[inline] - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - fn reflect_partial_eq(&self, value: &dyn PartialReflect) -> Option { struct_partial_eq(self, value) } @@ -584,7 +578,6 @@ pub fn struct_debug(dyn_struct: &dyn Struct, f: &mut Formatter<'_>) -> core::fmt #[cfg(test)] mod tests { - use crate as bevy_reflect; use crate::*; #[derive(Reflect, Default)] struct MyStruct { diff --git a/crates/bevy_reflect/src/tuple.rs b/crates/bevy_reflect/src/tuple.rs index 9790990a26bb7..31ad67fdcf937 100644 --- a/crates/bevy_reflect/src/tuple.rs +++ b/crates/bevy_reflect/src/tuple.rs @@ -3,8 +3,8 @@ use variadics_please::all_tuples; use crate::generics::impl_generic_info_methods; use crate::{ - self as bevy_reflect, type_info::impl_type_methods, utility::GenericTypePathCell, ApplyError, - FromReflect, Generics, GetTypeRegistration, MaybeTyped, PartialReflect, Reflect, ReflectKind, + type_info::impl_type_methods, utility::GenericTypePathCell, ApplyError, FromReflect, Generics, + GetTypeRegistration, MaybeTyped, PartialReflect, Reflect, ReflectCloneError, ReflectKind, ReflectMut, ReflectOwned, ReflectRef, Type, TypeInfo, TypePath, TypeRegistration, TypeRegistry, Typed, UnnamedField, }; @@ -55,8 +55,19 @@ pub trait Tuple: PartialReflect { /// Drain the fields of this tuple to get a vector of owned values. fn drain(self: Box) -> Vec>; - /// Clones the struct into a [`DynamicTuple`]. - fn clone_dynamic(&self) -> DynamicTuple; + /// Clones the tuple into a [`DynamicTuple`]. + #[deprecated(since = "0.16.0", note = "use `to_dynamic_tuple` instead")] + fn clone_dynamic(&self) -> DynamicTuple { + self.to_dynamic_tuple() + } + + /// Creates a new [`DynamicTuple`] from this tuple. + fn to_dynamic_tuple(&self) -> DynamicTuple { + DynamicTuple { + represented_type: self.get_represented_type_info(), + fields: self.iter_fields().map(PartialReflect::to_dynamic).collect(), + } + } /// Will return `None` if [`TypeInfo`] is not available. fn get_represented_tuple_info(&self) -> Option<&'static TupleInfo> { @@ -270,18 +281,6 @@ impl Tuple for DynamicTuple { fn drain(self: Box) -> Vec> { self.fields } - - #[inline] - fn clone_dynamic(&self) -> DynamicTuple { - DynamicTuple { - represented_type: self.represented_type, - fields: self - .fields - .iter() - .map(|value| value.clone_value()) - .collect(), - } - } } impl PartialReflect for DynamicTuple { @@ -339,11 +338,6 @@ impl PartialReflect for DynamicTuple { ReflectOwned::Tuple(self) } - #[inline] - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - fn try_apply(&mut self, value: &dyn PartialReflect) -> Result<(), ApplyError> { tuple_try_apply(self, value) } @@ -518,18 +512,6 @@ macro_rules! impl_reflect_tuple { $(Box::new(self.$index),)* ] } - - #[inline] - fn clone_dynamic(&self) -> DynamicTuple { - let info = self.get_represented_type_info(); - DynamicTuple { - represented_type: info, - fields: self - .iter_fields() - .map(|value| value.clone_value()) - .collect(), - } - } } impl<$($name: Reflect + MaybeTyped + TypePath + GetTypeRegistration),*> PartialReflect for ($($name,)*) { @@ -578,10 +560,6 @@ macro_rules! impl_reflect_tuple { ReflectOwned::Tuple(self) } - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - fn reflect_partial_eq(&self, value: &dyn PartialReflect) -> Option { crate::tuple_partial_eq(self, value) } @@ -593,6 +571,16 @@ macro_rules! impl_reflect_tuple { fn try_apply(&mut self, value: &dyn PartialReflect) -> Result<(), ApplyError> { crate::tuple_try_apply(self, value) } + + fn reflect_clone(&self) -> Result, ReflectCloneError> { + Ok(Box::new(( + $( + self.$index.reflect_clone()? + .take::<$name>() + .expect("`Reflect::reflect_clone` should return the same type"), + )* + ))) + } } impl<$($name: Reflect + MaybeTyped + TypePath + GetTypeRegistration),*> Reflect for ($($name,)*) { diff --git a/crates/bevy_reflect/src/tuple_struct.rs b/crates/bevy_reflect/src/tuple_struct.rs index 3445bfb6c8b3f..09d2819807506 100644 --- a/crates/bevy_reflect/src/tuple_struct.rs +++ b/crates/bevy_reflect/src/tuple_struct.rs @@ -2,13 +2,13 @@ use bevy_reflect_derive::impl_type_path; use crate::generics::impl_generic_info_methods; use crate::{ - self as bevy_reflect, attributes::{impl_custom_attribute_methods, CustomAttributes}, type_info::impl_type_methods, ApplyError, DynamicTuple, Generics, PartialReflect, Reflect, ReflectKind, ReflectMut, ReflectOwned, ReflectRef, Tuple, Type, TypeInfo, TypePath, UnnamedField, }; -use alloc::{boxed::Box, sync::Arc, vec::Vec}; +use alloc::{boxed::Box, vec::Vec}; +use bevy_platform::sync::Arc; use core::{ fmt::{Debug, Formatter}, slice::Iter, @@ -56,7 +56,18 @@ pub trait TupleStruct: PartialReflect { fn iter_fields(&self) -> TupleStructFieldIter; /// Clones the struct into a [`DynamicTupleStruct`]. - fn clone_dynamic(&self) -> DynamicTupleStruct; + #[deprecated(since = "0.16.0", note = "use `to_dynamic_tuple_struct` instead")] + fn clone_dynamic(&self) -> DynamicTupleStruct { + self.to_dynamic_tuple_struct() + } + + /// Creates a new [`DynamicTupleStruct`] from this tuple struct. + fn to_dynamic_tuple_struct(&self) -> DynamicTupleStruct { + DynamicTupleStruct { + represented_type: self.get_represented_type_info(), + fields: self.iter_fields().map(PartialReflect::to_dynamic).collect(), + } + } /// Will return `None` if [`TypeInfo`] is not available. fn get_represented_tuple_struct_info(&self) -> Option<&'static TupleStructInfo> { @@ -279,17 +290,6 @@ impl TupleStruct for DynamicTupleStruct { index: 0, } } - - fn clone_dynamic(&self) -> DynamicTupleStruct { - DynamicTupleStruct { - represented_type: self.represented_type, - fields: self - .fields - .iter() - .map(|value| value.clone_value()) - .collect(), - } - } } impl PartialReflect for DynamicTupleStruct { @@ -357,11 +357,6 @@ impl PartialReflect for DynamicTupleStruct { ReflectOwned::TupleStruct(self) } - #[inline] - fn clone_value(&self) -> Box { - Box::new(self.clone_dynamic()) - } - #[inline] fn reflect_partial_eq(&self, value: &dyn PartialReflect) -> Option { tuple_struct_partial_eq(self, value) @@ -494,7 +489,6 @@ pub fn tuple_struct_debug( #[cfg(test)] mod tests { - use crate as bevy_reflect; use crate::*; #[derive(Reflect)] struct Ts(u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8); diff --git a/crates/bevy_reflect/src/type_info.rs b/crates/bevy_reflect/src/type_info.rs index 4ccac40508a43..1a3be15c36126 100644 --- a/crates/bevy_reflect/src/type_info.rs +++ b/crates/bevy_reflect/src/type_info.rs @@ -72,7 +72,6 @@ use thiserror::Error; /// # fn reflect_ref(&self) -> ReflectRef { todo!() } /// # fn reflect_mut(&mut self) -> ReflectMut { todo!() } /// # fn reflect_owned(self: Box) -> ReflectOwned { todo!() } -/// # fn clone_value(&self) -> Box { todo!() } /// # } /// # impl Reflect for MyStruct { /// # fn into_any(self: Box) -> Box { todo!() } @@ -547,6 +546,8 @@ pub(crate) use impl_type_methods; /// For example, [`i32`] cannot be broken down any further, so it is represented by an [`OpaqueInfo`]. /// And while [`String`] itself is a struct, its fields are private, so we don't really treat /// it _as_ a struct. It therefore makes more sense to represent it as an [`OpaqueInfo`]. +/// +/// [`String`]: alloc::string::String #[derive(Debug, Clone)] pub struct OpaqueInfo { ty: Type, @@ -585,6 +586,7 @@ impl OpaqueInfo { #[cfg(test)] mod tests { use super::*; + use alloc::vec::Vec; #[test] fn should_return_error_on_invalid_cast() { diff --git a/crates/bevy_reflect/src/type_info_stack.rs b/crates/bevy_reflect/src/type_info_stack.rs index 8f1161485f1aa..cdc19244de295 100644 --- a/crates/bevy_reflect/src/type_info_stack.rs +++ b/crates/bevy_reflect/src/type_info_stack.rs @@ -1,12 +1,10 @@ use crate::TypeInfo; +use alloc::vec::Vec; use core::{ fmt::{Debug, Formatter}, slice::Iter, }; -#[cfg(not(feature = "std"))] -use alloc::{boxed::Box, format, vec}; - /// Helper struct for managing a stack of [`TypeInfo`] instances. /// /// This is useful for tracking the type hierarchy when serializing and deserializing types. diff --git a/crates/bevy_reflect/src/type_registry.rs b/crates/bevy_reflect/src/type_registry.rs index 609c66d1856ac..5827ebdac5f1a 100644 --- a/crates/bevy_reflect/src/type_registry.rs +++ b/crates/bevy_reflect/src/type_registry.rs @@ -1,8 +1,11 @@ use crate::{serde::Serializable, FromReflect, Reflect, TypeInfo, TypePath, Typed}; -use alloc::sync::Arc; use alloc::{boxed::Box, string::String}; +use bevy_platform::{ + collections::{HashMap, HashSet}, + sync::{Arc, PoisonError, RwLock, RwLockReadGuard, RwLockWriteGuard}, +}; use bevy_ptr::{Ptr, PtrMut}; -use bevy_utils::{HashMap, HashSet, TypeIdMap}; +use bevy_utils::TypeIdMap; use core::{ any::TypeId, fmt::Debug, @@ -11,12 +14,6 @@ use core::{ use downcast_rs::{impl_downcast, Downcast}; use serde::Deserialize; -#[cfg(feature = "std")] -use std::sync::{PoisonError, RwLock, RwLockReadGuard, RwLockWriteGuard}; - -#[cfg(not(feature = "std"))] -use spin::rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; - /// A registry of [reflected] types. /// /// This struct is used as the central store for type information. @@ -46,12 +43,12 @@ pub struct TypeRegistryArc { impl Debug for TypeRegistryArc { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let read_lock = self.internal.read(); - - #[cfg(feature = "std")] - let read_lock = read_lock.unwrap_or_else(PoisonError::into_inner); - - read_lock.type_path_to_id.keys().fmt(f) + self.internal + .read() + .unwrap_or_else(PoisonError::into_inner) + .type_path_to_id + .keys() + .fmt(f) } } @@ -79,8 +76,7 @@ pub trait GetTypeRegistration: 'static { /// /// This method is called by [`TypeRegistry::register`] to register any other required types. /// Often, this is done for fields of structs and enum variants to ensure all types are properly registered. - #[allow(unused_variables)] - fn register_type_dependencies(registry: &mut TypeRegistry) {} + fn register_type_dependencies(_registry: &mut TypeRegistry) {} } impl Default for TypeRegistry { @@ -169,6 +165,43 @@ impl TypeRegistry { } } + /// Attempts to register the referenced type `T` if it has not yet been registered. + /// + /// See [`register`] for more details. + /// + /// # Example + /// + /// ``` + /// # use bevy_reflect::{Reflect, TypeRegistry}; + /// # use core::any::TypeId; + /// # + /// # let mut type_registry = TypeRegistry::default(); + /// # + /// #[derive(Reflect)] + /// struct Foo { + /// bar: Bar, + /// } + /// + /// #[derive(Reflect)] + /// struct Bar; + /// + /// let foo = Foo { bar: Bar }; + /// + /// // Equivalent to `type_registry.register::()` + /// type_registry.register_by_val(&foo); + /// + /// assert!(type_registry.contains(TypeId::of::())); + /// assert!(type_registry.contains(TypeId::of::())); + /// ``` + /// + /// [`register`]: Self::register + pub fn register_by_val(&mut self, _: &T) + where + T: GetTypeRegistration, + { + self.register::(); + } + /// Attempts to register the type described by `registration`. /// /// If the registration for the type already exists, it will not be registered again. @@ -217,9 +250,11 @@ impl TypeRegistry { type_id: TypeId, get_registration: impl FnOnce() -> TypeRegistration, ) -> bool { + use bevy_platform::collections::hash_map::Entry; + match self.registrations.entry(type_id) { - bevy_utils::Entry::Occupied(_) => false, - bevy_utils::Entry::Vacant(entry) => { + Entry::Occupied(_) => false, + Entry::Vacant(entry) => { let registration = get_registration(); Self::update_registration_indices( ®istration, @@ -434,22 +469,14 @@ impl TypeRegistry { impl TypeRegistryArc { /// Takes a read lock on the underlying [`TypeRegistry`]. pub fn read(&self) -> RwLockReadGuard<'_, TypeRegistry> { - let read_lock = self.internal.read(); - - #[cfg(feature = "std")] - let read_lock = read_lock.unwrap_or_else(PoisonError::into_inner); - - read_lock + self.internal.read().unwrap_or_else(PoisonError::into_inner) } /// Takes a write lock on the underlying [`TypeRegistry`]. pub fn write(&self) -> RwLockWriteGuard<'_, TypeRegistry> { - let write_lock = self.internal.write(); - - #[cfg(feature = "std")] - let write_lock = write_lock.unwrap_or_else(PoisonError::into_inner); - - write_lock + self.internal + .write() + .unwrap_or_else(PoisonError::into_inner) } } @@ -785,7 +812,10 @@ pub struct ReflectFromPtr { from_ptr_mut: unsafe fn(PtrMut) -> &mut dyn Reflect, } -#[allow(unsafe_code)] +#[expect( + unsafe_code, + reason = "We must interact with pointers here, which are inherently unsafe." +)] impl ReflectFromPtr { /// Returns the [`TypeId`] that the [`ReflectFromPtr`] was constructed for. pub fn type_id(&self) -> TypeId { @@ -837,7 +867,10 @@ impl ReflectFromPtr { } } -#[allow(unsafe_code)] +#[expect( + unsafe_code, + reason = "We must interact with pointers here, which are inherently unsafe." +)] impl FromType for ReflectFromPtr { fn from_type() -> Self { ReflectFromPtr { @@ -857,10 +890,12 @@ impl FromType for ReflectFromPtr { } #[cfg(test)] -#[allow(unsafe_code)] +#[expect( + unsafe_code, + reason = "We must interact with pointers here, which are inherently unsafe." +)] mod test { use super::*; - use crate as bevy_reflect; #[test] fn test_reflect_from_ptr() { diff --git a/crates/bevy_reflect/src/utility.rs b/crates/bevy_reflect/src/utility.rs index f106baf622135..5735a29dbe7c7 100644 --- a/crates/bevy_reflect/src/utility.rs +++ b/crates/bevy_reflect/src/utility.rs @@ -2,18 +2,16 @@ use crate::TypeInfo; use alloc::boxed::Box; -use bevy_utils::{DefaultHasher, FixedHasher, NoOpHash, TypeIdMap}; +use bevy_platform::{ + hash::{DefaultHasher, FixedHasher, NoOpHash}, + sync::{OnceLock, PoisonError, RwLock}, +}; +use bevy_utils::TypeIdMap; use core::{ any::{Any, TypeId}, hash::BuildHasher, }; -#[cfg(feature = "std")] -use std::sync::{OnceLock, PoisonError, RwLock}; - -#[cfg(not(feature = "std"))] -use spin::{Once as OnceLock, RwLock}; - /// A type that can be stored in a ([`Non`])[`GenericTypeCell`]. /// /// [`Non`]: NonGenericTypeCell @@ -24,6 +22,7 @@ pub trait TypedProperty: sealed::Sealed { /// Used to store a [`String`] in a [`GenericTypePathCell`] as part of a [`TypePath`] implementation. /// /// [`TypePath`]: crate::TypePath +/// [`String`]: alloc::string::String pub struct TypePathComponent; mod sealed { @@ -89,7 +88,6 @@ mod sealed { /// # fn reflect_ref(&self) -> ReflectRef { todo!() } /// # fn reflect_mut(&mut self) -> ReflectMut { todo!() } /// # fn reflect_owned(self: Box) -> ReflectOwned { todo!() } -/// # fn clone_value(&self) -> Box { todo!() } /// # } /// # impl Reflect for Foo { /// # fn into_any(self: Box) -> Box { todo!() } @@ -121,11 +119,7 @@ impl NonGenericTypeCell { where F: FnOnce() -> T::Stored, { - #[cfg(feature = "std")] - return self.0.get_or_init(f); - - #[cfg(not(feature = "std"))] - return self.0.call_once(f); + self.0.get_or_init(f) } } @@ -181,7 +175,6 @@ impl Default for NonGenericTypeCell { /// # fn reflect_ref(&self) -> ReflectRef { todo!() } /// # fn reflect_mut(&mut self) -> ReflectMut { todo!() } /// # fn reflect_owned(self: Box) -> ReflectOwned { todo!() } -/// # fn clone_value(&self) -> Box { todo!() } /// # } /// # impl Reflect for Foo { /// # fn into_any(self: Box) -> Box { todo!() } @@ -258,12 +251,11 @@ impl GenericTypeCell { /// /// This method will then return the correct [`TypedProperty`] reference for the given type `T`. fn get_by_type_id(&self, type_id: TypeId) -> Option<&T::Stored> { - let read_lock = self.0.read(); - - #[cfg(feature = "std")] - let read_lock = read_lock.unwrap_or_else(PoisonError::into_inner); - - read_lock.get(&type_id).copied() + self.0 + .read() + .unwrap_or_else(PoisonError::into_inner) + .get(&type_id) + .copied() } /// Returns a reference to the [`TypedProperty`] stored in the cell. @@ -281,12 +273,7 @@ impl GenericTypeCell { } fn insert_by_type_id(&self, type_id: TypeId, value: T::Stored) -> &T::Stored { - let write_lock = self.0.write(); - - #[cfg(feature = "std")] - let write_lock = write_lock.unwrap_or_else(PoisonError::into_inner); - - let mut write_lock = write_lock; + let mut write_lock = self.0.write().unwrap_or_else(PoisonError::into_inner); write_lock .entry(type_id) diff --git a/crates/bevy_remote/Cargo.toml b/crates/bevy_remote/Cargo.toml index 4a12f7742c997..d2e3395f77e71 100644 --- a/crates/bevy_remote/Cargo.toml +++ b/crates/bevy_remote/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_remote" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "The Bevy Remote Protocol" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -14,15 +14,18 @@ http = ["dep:async-io", "dep:smol-hyper"] [dependencies] # bevy -bevy_app = { path = "../bevy_app", version = "0.15.0-dev" } -bevy_derive = { path = "../bevy_derive", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev", features = [ +bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } +bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev", features = [ + "serialize", +] } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } +bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "std", "serialize", ] } -bevy_hierarchy = { path = "../bevy_hierarchy", version = "0.15.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev" } -bevy_tasks = { path = "../bevy_tasks", version = "0.15.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } # other anyhow = "1" diff --git a/crates/bevy_remote/LICENSE-APACHE b/crates/bevy_remote/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_remote/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_remote/LICENSE-MIT b/crates/bevy_remote/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_remote/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_remote/src/builtin_methods.rs b/crates/bevy_remote/src/builtin_methods.rs index 840cc102eabea..ce5fa259a1a33 100644 --- a/crates/bevy_remote/src/builtin_methods.rs +++ b/crates/bevy_remote/src/builtin_methods.rs @@ -7,22 +7,29 @@ use bevy_ecs::{ component::ComponentId, entity::Entity, event::EventCursor, + hierarchy::ChildOf, query::QueryBuilder, - reflect::{AppTypeRegistry, ReflectComponent}, + reflect::{AppTypeRegistry, ReflectComponent, ReflectResource}, removal_detection::RemovedComponentEntity, system::{In, Local}, world::{EntityRef, EntityWorldMut, FilteredEntityRef, World}, }; -use bevy_hierarchy::BuildChildren as _; +use bevy_platform::collections::HashMap; use bevy_reflect::{ serde::{ReflectSerializer, TypedReflectDeserializer}, - PartialReflect, TypeRegistration, TypeRegistry, + GetPath, PartialReflect, TypeRegistration, TypeRegistry, }; -use bevy_utils::HashMap; use serde::{de::DeserializeSeed as _, Deserialize, Serialize}; use serde_json::{Map, Value}; -use crate::{error_codes, BrpError, BrpResult}; +use crate::{ + error_codes, + schemas::{json_schema::JsonSchemaBevyType, open_rpc::OpenRpcDocument}, + BrpError, BrpResult, +}; + +#[cfg(all(feature = "http", not(target_family = "wasm")))] +use {crate::schemas::open_rpc::ServerObject, bevy_utils::default}; /// The method path for a `bevy/get` request. pub const BRP_GET_METHOD: &str = "bevy/get"; @@ -48,12 +55,36 @@ pub const BRP_REPARENT_METHOD: &str = "bevy/reparent"; /// The method path for a `bevy/list` request. pub const BRP_LIST_METHOD: &str = "bevy/list"; +/// The method path for a `bevy/mutate_component` request. +pub const BRP_MUTATE_COMPONENT_METHOD: &str = "bevy/mutate_component"; + /// The method path for a `bevy/get+watch` request. pub const BRP_GET_AND_WATCH_METHOD: &str = "bevy/get+watch"; /// The method path for a `bevy/list+watch` request. pub const BRP_LIST_AND_WATCH_METHOD: &str = "bevy/list+watch"; +/// The method path for a `bevy/get_resource` request. +pub const BRP_GET_RESOURCE_METHOD: &str = "bevy/get_resource"; + +/// The method path for a `bevy/insert_resource` request. +pub const BRP_INSERT_RESOURCE_METHOD: &str = "bevy/insert_resource"; + +/// The method path for a `bevy/remove_resource` request. +pub const BRP_REMOVE_RESOURCE_METHOD: &str = "bevy/remove_resource"; + +/// The method path for a `bevy/mutate_resource` request. +pub const BRP_MUTATE_RESOURCE_METHOD: &str = "bevy/mutate_resource"; + +/// The method path for a `bevy/list_resources` request. +pub const BRP_LIST_RESOURCES_METHOD: &str = "bevy/list_resources"; + +/// The method path for a `bevy/registry/schema` request. +pub const BRP_REGISTRY_SCHEMA_METHOD: &str = "bevy/registry/schema"; + +/// The method path for a `rpc.discover` request. +pub const RPC_DISCOVER_METHOD: &str = "rpc.discover"; + /// `bevy/get`: Retrieves one or more components from the entity with the given /// ID. /// @@ -79,6 +110,15 @@ pub struct BrpGetParams { pub strict: bool, } +/// `bevy/get_resource`: Retrieves the value of a given resource. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct BrpGetResourceParams { + /// The [full path] of the resource type being requested. + /// + /// [full path]: bevy_reflect::TypePath::type_path + pub resource: String, +} + /// `bevy/query`: Performs a query over components in the ECS, returning entities /// and component values that match. /// @@ -145,6 +185,15 @@ pub struct BrpRemoveParams { pub components: Vec, } +/// `bevy/remove_resource`: Removes the given resource from the world. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct BrpRemoveResourceParams { + /// The [full path] of the resource type to remove. + /// + /// [full path]: bevy_reflect::TypePath::type_path + pub resource: String, +} + /// `bevy/insert`: Adds one or more components to an entity. /// /// The server responds with a null. @@ -165,6 +214,19 @@ pub struct BrpInsertParams { pub components: HashMap, } +/// `bevy/insert_resource`: Inserts a resource into the world with a given +/// value. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct BrpInsertResourceParams { + /// The [full path] of the resource type to insert. + /// + /// [full path]: bevy_reflect::TypePath::type_path + pub resource: String, + + /// The serialized value of the resource to be inserted. + pub value: Value, +} + /// `bevy/reparent`: Assign a new parent to one or more entities. /// /// The server responds with a null. @@ -192,6 +254,47 @@ pub struct BrpListParams { pub entity: Entity, } +/// `bevy/mutate_component`: +/// +/// The server responds with a null. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct BrpMutateComponentParams { + /// The entity of the component to mutate. + pub entity: Entity, + + /// The [full path] of the component to mutate. + /// + /// [full path]: bevy_reflect::TypePath::type_path + pub component: String, + + /// The [path] of the field within the component. + /// + /// [path]: bevy_reflect::GetPath + pub path: String, + + /// The value to insert at `path`. + pub value: Value, +} + +/// `bevy/mutate_resource`: +/// +/// The server responds with a null. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct BrpMutateResourceParams { + /// The [full path] of the resource to mutate. + /// + /// [full path]: bevy_reflect::TypePath::type_path + pub resource: String, + + /// The [path] of the field within the resource. + /// + /// [path]: bevy_reflect::GetPath + pub path: String, + + /// The value to insert at `path`. + pub value: Value, +} + /// Describes the data that is to be fetched in a query. #[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq)] pub struct BrpQuery { @@ -236,6 +339,38 @@ pub struct BrpQueryFilter { pub with: Vec, } +/// Constraints that can be placed on a query to include or exclude +/// certain definitions. +#[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq)] +pub struct BrpJsonSchemaQueryFilter { + /// The crate name of the type name of each component that must not be + /// present on the entity for it to be included in the results. + #[serde(skip_serializing_if = "Vec::is_empty", default)] + pub without_crates: Vec, + + /// The crate name of the type name of each component that must be present + /// on the entity for it to be included in the results. + #[serde(skip_serializing_if = "Vec::is_empty", default)] + pub with_crates: Vec, + + /// Constrain resource by type + #[serde(default)] + pub type_limit: JsonSchemaTypeLimit, +} + +/// Additional [`BrpJsonSchemaQueryFilter`] constraints that can be placed on a query to include or exclude +/// certain definitions. +#[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq)] +pub struct JsonSchemaTypeLimit { + /// Schema cannot have specified reflect types + #[serde(skip_serializing_if = "Vec::is_empty", default)] + pub without: Vec, + + /// Schema needs to have specified reflect types + #[serde(skip_serializing_if = "Vec::is_empty", default)] + pub with: Vec, +} + /// A response from the world to the client that specifies a single entity. /// /// This is sent in response to `bevy/spawn`. @@ -261,6 +396,13 @@ pub enum BrpGetResponse { Strict(HashMap), } +/// The response to a `bevy/get_resource` request. +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct BrpGetResourceResponse { + /// The value of the requested resource. + pub value: Value, +} + /// A single response from a `bevy/get+watch` request. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] #[serde(untagged)] @@ -289,6 +431,9 @@ pub enum BrpGetWatchingResponse { /// The response to a `bevy/list` request. pub type BrpListResponse = Vec; +/// The response to a `bevy/list_resources` request. +pub type BrpListResourcesResponse = Vec; + /// A single response from a `bevy/list+watch` request. #[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq)] pub struct BrpListWatchingResponse { @@ -351,6 +496,45 @@ pub fn process_remote_get_request(In(params): In>, world: &World) serde_json::to_value(response).map_err(BrpError::internal) } +/// Handles a `bevy/get_resource` request coming from a client. +pub fn process_remote_get_resource_request( + In(params): In>, + world: &World, +) -> BrpResult { + let BrpGetResourceParams { + resource: resource_path, + } = parse_some(params)?; + + let app_type_registry = world.resource::(); + let type_registry = app_type_registry.read(); + let reflect_resource = + get_reflect_resource(&type_registry, &resource_path).map_err(BrpError::resource_error)?; + + let Ok(reflected) = reflect_resource.reflect(world) else { + return Err(BrpError::resource_not_present(&resource_path)); + }; + + // Use the `ReflectSerializer` to serialize the value of the resource; + // this produces a map with a single item. + let reflect_serializer = ReflectSerializer::new(reflected.as_partial_reflect(), &type_registry); + let Value::Object(serialized_object) = + serde_json::to_value(&reflect_serializer).map_err(BrpError::resource_error)? + else { + return Err(BrpError { + code: error_codes::RESOURCE_ERROR, + message: format!("Resource `{}` could not be serialized", resource_path), + data: None, + }); + }; + + // Get the single value out of the map. + let value = serialized_object.into_values().next().ok_or_else(|| { + BrpError::internal(anyhow!("Unexpected format of serialized resource value")) + })?; + let response = BrpGetResourceResponse { value }; + serde_json::to_value(response).map_err(BrpError::internal) +} + /// Handles a `bevy/get+watch` request coming from a client. pub fn process_remote_get_watching_request( In(params): In>, @@ -507,11 +691,7 @@ fn reflect_component( // Each component value serializes to a map with a single entry. let reflect_serializer = ReflectSerializer::new(reflected.as_partial_reflect(), type_registry); let Value::Object(serialized_object) = - serde_json::to_value(&reflect_serializer).map_err(|err| BrpError { - code: error_codes::COMPONENT_ERROR, - message: err.to_string(), - data: None, - })? + serde_json::to_value(&reflect_serializer).map_err(BrpError::component_error)? else { return Err(BrpError { code: error_codes::COMPONENT_ERROR, @@ -634,6 +814,44 @@ pub fn process_remote_spawn_request(In(params): In>, world: &mut W serde_json::to_value(response).map_err(BrpError::internal) } +/// Handles a `rpc.discover` request coming from a client. +pub fn process_remote_list_methods_request( + In(_params): In>, + world: &mut World, +) -> BrpResult { + let remote_methods = world.resource::(); + + #[cfg(all(feature = "http", not(target_family = "wasm")))] + let servers = match ( + world.get_resource::(), + world.get_resource::(), + ) { + (Some(url), Some(port)) => Some(vec![ServerObject { + name: "Server".to_owned(), + url: format!("{}:{}", url.0, port.0), + ..default() + }]), + (Some(url), None) => Some(vec![ServerObject { + name: "Server".to_owned(), + url: url.0.to_string(), + ..default() + }]), + _ => None, + }; + + #[cfg(any(not(feature = "http"), target_family = "wasm"))] + let servers = None; + + let doc = OpenRpcDocument { + info: Default::default(), + methods: remote_methods.into(), + openrpc: "1.3.2".to_owned(), + servers, + }; + + serde_json::to_value(doc).map_err(BrpError::internal) +} + /// Handles a `bevy/insert` request (insert components) coming from a client. pub fn process_remote_insert_request( In(params): In>, @@ -657,6 +875,144 @@ pub fn process_remote_insert_request( Ok(Value::Null) } +/// Handles a `bevy/insert_resource` request coming from a client. +pub fn process_remote_insert_resource_request( + In(params): In>, + world: &mut World, +) -> BrpResult { + let BrpInsertResourceParams { + resource: resource_path, + value, + } = parse_some(params)?; + + let app_type_registry = world.resource::().clone(); + let type_registry = app_type_registry.read(); + + let reflected_resource = deserialize_resource(&type_registry, &resource_path, value) + .map_err(BrpError::resource_error)?; + + let reflect_resource = + get_reflect_resource(&type_registry, &resource_path).map_err(BrpError::resource_error)?; + reflect_resource.insert(world, &*reflected_resource, &type_registry); + + Ok(Value::Null) +} + +/// Handles a `bevy/mutate_component` request coming from a client. +/// +/// This method allows you to mutate a single field inside an Entity's +/// component. +pub fn process_remote_mutate_component_request( + In(params): In>, + world: &mut World, +) -> BrpResult { + let BrpMutateComponentParams { + entity, + component, + path, + value, + } = parse_some(params)?; + let app_type_registry = world.resource::().clone(); + let type_registry = app_type_registry.read(); + + // Get the fully-qualified type names of the component to be mutated. + let component_type: &TypeRegistration = type_registry + .get_with_type_path(&component) + .ok_or_else(|| { + BrpError::component_error(anyhow!("Unknown component type: `{}`", component)) + })?; + + // Get the reflected representation of the component. + let mut reflected = component_type + .data::() + .ok_or_else(|| { + BrpError::component_error(anyhow!("Component `{}` isn't registered", component)) + })? + .reflect_mut(world.entity_mut(entity)) + .ok_or_else(|| { + BrpError::component_error(anyhow!("Cannot reflect component `{}`", component)) + })?; + + // Get the type of the field in the component that is to be + // mutated. + let value_type: &TypeRegistration = type_registry + .get_with_type_path( + reflected + .reflect_path(path.as_str()) + .map_err(BrpError::component_error)? + .reflect_type_path(), + ) + .ok_or_else(|| { + BrpError::component_error(anyhow!("Unknown component field type: `{}`", component)) + })?; + + // Get the reflected representation of the value to be inserted + // into the component. + let value: Box = TypedReflectDeserializer::new(value_type, &type_registry) + .deserialize(&value) + .map_err(BrpError::component_error)?; + + // Apply the mutation. + reflected + .reflect_path_mut(path.as_str()) + .map_err(BrpError::component_error)? + .try_apply(value.as_ref()) + .map_err(BrpError::component_error)?; + + Ok(Value::Null) +} + +/// Handles a `bevy/mutate_resource` request coming from a client. +pub fn process_remote_mutate_resource_request( + In(params): In>, + world: &mut World, +) -> BrpResult { + let BrpMutateResourceParams { + resource: resource_path, + path: field_path, + value, + } = parse_some(params)?; + + let app_type_registry = world.resource::().clone(); + let type_registry = app_type_registry.read(); + + // Get the `ReflectResource` for the given resource path. + let reflect_resource = + get_reflect_resource(&type_registry, &resource_path).map_err(BrpError::resource_error)?; + + // Get the actual resource value from the world as a `dyn Reflect`. + let mut reflected_resource = reflect_resource + .reflect_mut(world) + .map_err(|_| BrpError::resource_not_present(&resource_path))?; + + // Get the type registration for the field with the given path. + let value_registration = type_registry + .get_with_type_path( + reflected_resource + .reflect_path(field_path.as_str()) + .map_err(BrpError::resource_error)? + .reflect_type_path(), + ) + .ok_or_else(|| { + BrpError::resource_error(anyhow!("Unknown resource field type: `{}`", resource_path)) + })?; + + // Use the field's type registration to deserialize the given value. + let deserialized_value: Box = + TypedReflectDeserializer::new(value_registration, &type_registry) + .deserialize(&value) + .map_err(BrpError::resource_error)?; + + // Apply the value to the resource. + reflected_resource + .reflect_path_mut(field_path.as_str()) + .map_err(BrpError::resource_error)? + .try_apply(&*deserialized_value) + .map_err(BrpError::resource_error)?; + + Ok(Value::Null) +} + /// Handles a `bevy/remove` request (remove components) coming from a client. pub fn process_remote_remove_request( In(params): In>, @@ -679,6 +1035,25 @@ pub fn process_remote_remove_request( Ok(Value::Null) } +/// Handles a `bevy/remove_resource` request coming from a client. +pub fn process_remote_remove_resource_request( + In(params): In>, + world: &mut World, +) -> BrpResult { + let BrpRemoveResourceParams { + resource: resource_path, + } = parse_some(params)?; + + let app_type_registry = world.resource::().clone(); + let type_registry = app_type_registry.read(); + + let reflect_resource = + get_reflect_resource(&type_registry, &resource_path).map_err(BrpError::resource_error)?; + reflect_resource.remove(world); + + Ok(Value::Null) +} + /// Handles a `bevy/destroy` (despawn entity) request coming from a client. pub fn process_remote_destroy_request( In(params): In>, @@ -715,7 +1090,7 @@ pub fn process_remote_reparent_request( // If `None`, remove the entities' parents. else { for entity in entities { - get_entity_mut(world, entity)?.remove_parent(); + get_entity_mut(world, entity)?.remove::(); } } @@ -755,7 +1130,28 @@ pub fn process_remote_list_request(In(params): In>, world: &World) serde_json::to_value(response).map_err(BrpError::internal) } -/// Handles a `bevy/list` request (list all components) coming from a client. +/// Handles a `bevy/list_resources` request coming from a client. +pub fn process_remote_list_resources_request( + In(_params): In>, + world: &World, +) -> BrpResult { + let mut response = BrpListResourcesResponse::default(); + + let app_type_registry = world.resource::(); + let type_registry = app_type_registry.read(); + + for registered_type in type_registry.iter() { + if registered_type.data::().is_some() { + response.push(registered_type.type_info().type_path().to_owned()); + } + } + + response.sort(); + + serde_json::to_value(response).map_err(BrpError::internal) +} + +/// Handles a `bevy/list+watch` request coming from a client. pub fn process_remote_list_watching_request( In(params): In>, world: &World, @@ -801,6 +1197,57 @@ pub fn process_remote_list_watching_request( } } +/// Handles a `bevy/registry/schema` request (list all registry types in form of schema) coming from a client. +pub fn export_registry_types(In(params): In>, world: &World) -> BrpResult { + let filter: BrpJsonSchemaQueryFilter = match params { + None => Default::default(), + Some(params) => parse(params)?, + }; + + let types = world.resource::(); + let types = types.read(); + let schemas = types + .iter() + .map(crate::schemas::json_schema::export_type) + .filter(|(_, schema)| { + if let Some(crate_name) = &schema.crate_name { + if !filter.with_crates.is_empty() + && !filter.with_crates.iter().any(|c| crate_name.eq(c)) + { + return false; + } + if !filter.without_crates.is_empty() + && filter.without_crates.iter().any(|c| crate_name.eq(c)) + { + return false; + } + } + if !filter.type_limit.with.is_empty() + && !filter + .type_limit + .with + .iter() + .any(|c| schema.reflect_types.iter().any(|cc| c.eq(cc))) + { + return false; + } + if !filter.type_limit.without.is_empty() + && filter + .type_limit + .without + .iter() + .any(|c| schema.reflect_types.iter().any(|cc| c.eq(cc))) + { + return false; + } + + true + }) + .collect::>(); + + serde_json::to_value(schemas).map_err(BrpError::internal) +} + /// Immutably retrieves an entity from the [`World`], returning an error if the /// entity isn't present. fn get_entity(world: &World, entity: Entity) -> Result, BrpError> { @@ -942,6 +1389,23 @@ fn deserialize_components( Ok(reflect_components) } +/// Given a resource path and an associated serialized value (`value`), return the +/// deserialized value. +fn deserialize_resource( + type_registry: &TypeRegistry, + resource_path: &str, + value: Value, +) -> AnyhowResult> { + let Some(resource_type) = type_registry.get_with_type_path(resource_path) else { + return Err(anyhow!("Unknown resource type: `{}`", resource_path)); + }; + let reflected: Box = + TypedReflectDeserializer::new(resource_type, type_registry) + .deserialize(&value) + .map_err(|err| anyhow!("{resource_path} is invalid: {err}"))?; + Ok(reflected) +} + /// Given a collection `reflect_components` of reflected component values, insert them into /// the given entity (`entity_world_mut`). fn insert_reflected_components( @@ -982,6 +1446,30 @@ fn get_component_type_registration<'r>( .ok_or_else(|| anyhow!("Unknown component type: `{}`", component_path)) } +/// Given a resource's type path, return the associated [`ReflectResource`] from the given +/// `type_registry` if possible. +fn get_reflect_resource<'r>( + type_registry: &'r TypeRegistry, + resource_path: &str, +) -> AnyhowResult<&'r ReflectResource> { + let resource_registration = get_resource_type_registration(type_registry, resource_path)?; + + resource_registration + .data::() + .ok_or_else(|| anyhow!("Resource `{}` isn't reflectable", resource_path)) +} + +/// Given a resource's type path, return the associated [`TypeRegistration`] from the given +/// `type_registry` if possible. +fn get_resource_type_registration<'r>( + type_registry: &'r TypeRegistry, + resource_path: &str, +) -> AnyhowResult<&'r TypeRegistration> { + type_registry + .get_with_type_path(resource_path) + .ok_or_else(|| anyhow!("Unknown resource type: `{}`", resource_path)) +} + #[cfg(test)] mod tests { /// A generic function that tests serialization and deserialization of any type @@ -1013,6 +1501,14 @@ mod tests { }); test_serialize_deserialize(BrpListWatchingResponse::default()); test_serialize_deserialize(BrpQuery::default()); + test_serialize_deserialize(BrpJsonSchemaQueryFilter::default()); + test_serialize_deserialize(BrpJsonSchemaQueryFilter { + type_limit: JsonSchemaTypeLimit { + with: vec!["Resource".to_owned()], + ..Default::default() + }, + ..Default::default() + }); test_serialize_deserialize(BrpListParams { entity: Entity::from_raw(0), }); diff --git a/crates/bevy_remote/src/http.rs b/crates/bevy_remote/src/http.rs index 04c99ea21010f..4e36e4a0bfe94 100644 --- a/crates/bevy_remote/src/http.rs +++ b/crates/bevy_remote/src/http.rs @@ -15,7 +15,8 @@ use anyhow::Result as AnyhowResult; use async_channel::{Receiver, Sender}; use async_io::Async; use bevy_app::{App, Plugin, Startup}; -use bevy_ecs::system::{Res, Resource}; +use bevy_ecs::resource::Resource; +use bevy_ecs::system::Res; use bevy_tasks::{futures_lite::StreamExt, IoTaskPool}; use core::{ convert::Infallible, diff --git a/crates/bevy_remote/src/lib.rs b/crates/bevy_remote/src/lib.rs index 3d6781444148d..b21fb97bbb453 100644 --- a/crates/bevy_remote/src/lib.rs +++ b/crates/bevy_remote/src/lib.rs @@ -102,7 +102,7 @@ //! in the ECS. Each of these methods uses the `bevy/` prefix, which is a namespace reserved for //! BRP built-in methods. //! -//! ### bevy/get +//! ### `bevy/get` //! //! Retrieve the values of one or more components from an entity. //! @@ -123,7 +123,7 @@ //! //! `result`: A map associating each type name to its value on the requested entity. //! -//! ### bevy/query +//! ### `bevy/query` //! //! Perform a query over components in the ECS, returning all matching entities and their associated //! component values. @@ -133,17 +133,18 @@ //! //! `params`: //! - `data`: -//! - `components` (optional): An array of [fully-qualified type names] of components to fetch. +//! - `components` (optional): An array of [fully-qualified type names] of components to fetch, +//! see _below_ example for a query to list all the type names in **your** project. //! - `option` (optional): An array of fully-qualified type names of components to fetch optionally. //! - `has` (optional): An array of fully-qualified type names of components whose presence will be -//! reported as boolean values. +//! reported as boolean values. //! - `filter` (optional): //! - `with` (optional): An array of fully-qualified type names of components that must be present //! on entities in order for them to be included in results. //! - `without` (optional): An array of fully-qualified type names of components that must *not* be //! present on entities in order for them to be included in results. -//! - `strict` (optional): A flag to enable strict mode which will fail if any one of the -//! components is not present or can not be reflected. Defaults to false. +//! - `strict` (optional): A flag to enable strict mode which will fail if any one of the components +//! is not present or can not be reflected. Defaults to false. //! //! `result`: An array, each of which is an object containing: //! - `entity`: The ID of a query-matching entity. @@ -152,7 +153,9 @@ //! - `has`: A map associating each type name from `has` to a boolean value indicating whether or not the //! entity has that component. If `has` was empty or omitted, this key will be omitted in the response. //! -//! ### bevy/spawn +//! +//! +//! ### `bevy/spawn` //! //! Create a new entity with the provided components and return the resulting entity ID. //! @@ -162,7 +165,7 @@ //! `result`: //! - `entity`: The ID of the newly spawned entity. //! -//! ### bevy/destroy +//! ### `bevy/destroy` //! //! Despawn the entity with the given ID. //! @@ -171,7 +174,7 @@ //! //! `result`: null. //! -//! ### bevy/remove +//! ### `bevy/remove` //! //! Delete one or more components from an entity. //! @@ -181,7 +184,7 @@ //! //! `result`: null. //! -//! ### bevy/insert +//! ### `bevy/insert` //! //! Insert one or more components into an entity. //! @@ -191,7 +194,20 @@ //! //! `result`: null. //! -//! ### bevy/reparent +//! ### `bevy/mutate_component` +//! +//! Mutate a field in a component. +//! +//! `params`: +//! - `entity`: The ID of the entity with the component to mutate. +//! - `component`: The component's [fully-qualified type name]. +//! - `path`: The path of the field within the component. See +//! [`GetPath`](bevy_reflect::GetPath#syntax) for more information on formatting this string. +//! - `value`: The value to insert at `path`. +//! +//! `result`: null. +//! +//! ### `bevy/reparent` //! //! Assign a new parent to one or more entities. //! @@ -202,7 +218,7 @@ //! //! `result`: null. //! -//! ### bevy/list +//! ### `bevy/list` //! //! List all registered components or all components present on an entity. //! @@ -214,7 +230,7 @@ //! //! `result`: An array of fully-qualified type names of components. //! -//! ### bevy/get+watch +//! ### `bevy/get+watch` //! //! Watch the values of one or more components from an entity. //! @@ -242,7 +258,7 @@ //! - `removed`: An array of fully-qualified type names of components removed from the entity //! in the last tick. //! -//! ### bevy/list+watch +//! ### `bevy/list+watch` //! //! Watch all components present on an entity. //! @@ -258,6 +274,52 @@ //! - `removed`: An array of fully-qualified type names of components removed from the entity //! in the last tick. //! +//! ### `bevy/get_resource` +//! +//! Extract the value of a given resource from the world. +//! +//! `params`: +//! - `resource`: The [fully-qualified type name] of the resource to get. +//! +//! `result`: +//! - `value`: The value of the resource in the world. +//! +//! ### `bevy/insert_resource` +//! +//! Insert the given resource into the world with the given value. +//! +//! `params`: +//! - `resource`: The [fully-qualified type name] of the resource to insert. +//! - `value`: The value of the resource to be inserted. +//! +//! `result`: null. +//! +//! ### `bevy/remove_resource` +//! +//! Remove the given resource from the world. +//! +//! `params` +//! - `resource`: The [fully-qualified type name] of the resource to remove. +//! +//! `result`: null. +//! +//! ### `bevy/mutate_resource` +//! +//! Mutate a field in a resource. +//! +//! `params`: +//! - `resource`: The [fully-qualified type name] of the resource to mutate. +//! - `path`: The path of the field within the resource. See +//! [`GetPath`](bevy_reflect::GetPath#syntax) for more information on formatting this string. +//! - `value`: The value to be inserted at `path`. +//! +//! `result`: null. +//! +//! ### `bevy/list_resources` +//! +//! List all reflectable registered resource types. This method has no parameters. +//! +//! `result`: An array of [fully-qualified type names] of registered resource types. //! //! ## Custom methods //! @@ -307,11 +369,13 @@ use bevy_app::{prelude::*, MainScheduleOrder}; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ entity::Entity, - schedule::{IntoSystemConfigs, IntoSystemSetConfigs, ScheduleLabel, SystemSet}, - system::{Commands, In, IntoSystem, ResMut, Resource, System, SystemId}, + resource::Resource, + schedule::{IntoScheduleConfigs, ScheduleLabel, SystemSet}, + system::{Commands, In, IntoSystem, ResMut, System, SystemId}, world::World, }; -use bevy_utils::{prelude::default, HashMap}; +use bevy_platform::collections::HashMap; +use bevy_utils::prelude::default; use serde::{Deserialize, Serialize}; use serde_json::Value; use std::sync::RwLock; @@ -319,6 +383,7 @@ use std::sync::RwLock; pub mod builtin_methods; #[cfg(feature = "http")] pub mod http; +pub mod schemas; const CHANNEL_SIZE: usize = 16; @@ -406,6 +471,14 @@ impl Default for RemotePlugin { builtin_methods::BRP_LIST_METHOD, builtin_methods::process_remote_list_request, ) + .with_method( + builtin_methods::BRP_MUTATE_COMPONENT_METHOD, + builtin_methods::process_remote_mutate_component_request, + ) + .with_method( + builtin_methods::RPC_DISCOVER_METHOD, + builtin_methods::process_remote_list_methods_request, + ) .with_watching_method( builtin_methods::BRP_GET_AND_WATCH_METHOD, builtin_methods::process_remote_get_watching_request, @@ -414,6 +487,30 @@ impl Default for RemotePlugin { builtin_methods::BRP_LIST_AND_WATCH_METHOD, builtin_methods::process_remote_list_watching_request, ) + .with_method( + builtin_methods::BRP_GET_RESOURCE_METHOD, + builtin_methods::process_remote_get_resource_request, + ) + .with_method( + builtin_methods::BRP_INSERT_RESOURCE_METHOD, + builtin_methods::process_remote_insert_resource_request, + ) + .with_method( + builtin_methods::BRP_REMOVE_RESOURCE_METHOD, + builtin_methods::process_remote_remove_resource_request, + ) + .with_method( + builtin_methods::BRP_MUTATE_RESOURCE_METHOD, + builtin_methods::process_remote_mutate_resource_request, + ) + .with_method( + builtin_methods::BRP_LIST_RESOURCES_METHOD, + builtin_methods::process_remote_list_resources_request, + ) + .with_method( + builtin_methods::BRP_REGISTRY_SCHEMA_METHOD, + builtin_methods::export_registry_types, + ) } } @@ -461,7 +558,7 @@ impl Plugin for RemotePlugin { } /// Schedule that contains all systems to process Bevy Remote Protocol requests -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct RemoteLast; /// The systems sets of the [`RemoteLast`] schedule. @@ -539,6 +636,11 @@ impl RemoteMethods { pub fn get(&self, method: &str) -> Option<&RemoteMethodSystemId> { self.0.get(method) } + + /// Get a [`Vec`] with method names. + pub fn methods(&self) -> Vec { + self.0.keys().cloned().collect() + } } /// Holds the [`BrpMessage`]'s of all ongoing watching requests along with their handlers. @@ -563,6 +665,26 @@ pub struct RemoteWatchingRequests(Vec<(BrpMessage, RemoteWatchingMethodSystemId) /// } /// } /// ``` +/// Or, to list all the fully-qualified type paths in **your** project, pass Null to the +/// `params`. +/// ```json +/// { +/// "jsonrpc": "2.0", +/// "method": "bevy/list", +/// "id": 0, +/// "params": null +///} +///``` +/// +/// In Rust: +/// ```ignore +/// let req = BrpRequest { +/// jsonrpc: "2.0".to_string(), +/// method: BRP_LIST_METHOD.to_string(), // All the methods have consts +/// id: Some(ureq::json!(0)), +/// params: None, +/// }; +/// ``` #[derive(Debug, Serialize, Deserialize, Clone)] pub struct BrpRequest { /// This field is mandatory and must be set to `"2.0"` for the request to be accepted. @@ -672,6 +794,26 @@ impl BrpError { } } + /// Resource was not present in the world. + #[must_use] + pub fn resource_not_present(resource: &str) -> Self { + Self { + code: error_codes::RESOURCE_NOT_PRESENT, + message: format!("Resource `{resource}` not present in the world"), + data: None, + } + } + + /// An arbitrary resource error. Possibly related to reflection. + #[must_use] + pub fn resource_error(error: E) -> Self { + Self { + code: error_codes::RESOURCE_ERROR, + message: error.to_string(), + data: None, + } + } + /// An arbitrary internal error. #[must_use] pub fn internal(error: E) -> Self { @@ -726,6 +868,12 @@ pub mod error_codes { /// Cannot reparent an entity to itself. pub const SELF_REPARENT: i16 = -23404; + + /// Could not reflect or find resource. + pub const RESOURCE_ERROR: i16 = -23501; + + /// Could not find resource in the world. + pub const RESOURCE_NOT_PRESENT: i16 = -23502; } /// The result of a request. diff --git a/crates/bevy_remote/src/schemas/json_schema.rs b/crates/bevy_remote/src/schemas/json_schema.rs new file mode 100644 index 0000000000000..3fcc588f92ae4 --- /dev/null +++ b/crates/bevy_remote/src/schemas/json_schema.rs @@ -0,0 +1,543 @@ +//! Module with JSON Schema type for Bevy Registry Types. +//! It tries to follow this standard: +use bevy_ecs::reflect::{ReflectComponent, ReflectResource}; +use bevy_platform::collections::HashMap; +use bevy_reflect::{ + prelude::ReflectDefault, NamedField, OpaqueInfo, ReflectDeserialize, ReflectSerialize, + TypeInfo, TypeRegistration, VariantInfo, +}; +use core::any::TypeId; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Map, Value}; + +/// Exports schema info for a given type +pub fn export_type(reg: &TypeRegistration) -> (String, JsonSchemaBevyType) { + (reg.type_info().type_path().to_owned(), reg.into()) +} + +fn get_registered_reflect_types(reg: &TypeRegistration) -> Vec { + // Vec could be moved to allow registering more types by game maker. + let registered_reflect_types: [(TypeId, &str); 5] = [ + { (TypeId::of::(), "Component") }, + { (TypeId::of::(), "Resource") }, + { (TypeId::of::(), "Default") }, + { (TypeId::of::(), "Serialize") }, + { (TypeId::of::(), "Deserialize") }, + ]; + let mut result = Vec::new(); + for (id, name) in registered_reflect_types { + if reg.data_by_id(id).is_some() { + result.push(name.to_owned()); + } + } + result +} + +impl From<&TypeRegistration> for JsonSchemaBevyType { + fn from(reg: &TypeRegistration) -> Self { + let t = reg.type_info(); + let binding = t.type_path_table(); + + let short_path = binding.short_path(); + let type_path = binding.path(); + let mut typed_schema = JsonSchemaBevyType { + reflect_types: get_registered_reflect_types(reg), + short_path: short_path.to_owned(), + type_path: type_path.to_owned(), + crate_name: binding.crate_name().map(str::to_owned), + module_path: binding.module_path().map(str::to_owned), + ..Default::default() + }; + match t { + TypeInfo::Struct(info) => { + typed_schema.properties = info + .iter() + .map(|field| (field.name().to_owned(), field.ty().ref_type())) + .collect::>(); + typed_schema.required = info + .iter() + .filter(|field| !field.type_path().starts_with("core::option::Option")) + .map(|f| f.name().to_owned()) + .collect::>(); + typed_schema.additional_properties = Some(false); + typed_schema.schema_type = SchemaType::Object; + typed_schema.kind = SchemaKind::Struct; + } + TypeInfo::Enum(info) => { + typed_schema.kind = SchemaKind::Enum; + + let simple = info + .iter() + .all(|variant| matches!(variant, VariantInfo::Unit(_))); + if simple { + typed_schema.schema_type = SchemaType::String; + typed_schema.one_of = info + .iter() + .map(|variant| match variant { + VariantInfo::Unit(v) => v.name().into(), + _ => unreachable!(), + }) + .collect::>(); + } else { + typed_schema.schema_type = SchemaType::Object; + typed_schema.one_of = info + .iter() + .map(|variant| match variant { + VariantInfo::Struct(v) => json!({ + "type": "object", + "kind": "Struct", + "typePath": format!("{}::{}", type_path, v.name()), + "shortPath": v.name(), + "properties": v + .iter() + .map(|field| (field.name().to_owned(), field.ref_type())) + .collect::>(), + "additionalProperties": false, + "required": v + .iter() + .filter(|field| !field.type_path().starts_with("core::option::Option")) + .map(NamedField::name) + .collect::>(), + }), + VariantInfo::Tuple(v) => json!({ + "type": "array", + "kind": "Tuple", + "typePath": format!("{}::{}", type_path, v.name()), + "shortPath": v.name(), + "prefixItems": v + .iter() + .map(SchemaJsonReference::ref_type) + .collect::>(), + "items": false, + }), + VariantInfo::Unit(v) => json!({ + "typePath": format!("{}::{}", type_path, v.name()), + "shortPath": v.name(), + }), + }) + .collect::>(); + } + } + TypeInfo::TupleStruct(info) => { + typed_schema.schema_type = SchemaType::Array; + typed_schema.kind = SchemaKind::TupleStruct; + typed_schema.prefix_items = info + .iter() + .map(SchemaJsonReference::ref_type) + .collect::>(); + typed_schema.items = Some(false.into()); + } + TypeInfo::List(info) => { + typed_schema.schema_type = SchemaType::Array; + typed_schema.kind = SchemaKind::List; + typed_schema.items = info.item_ty().ref_type().into(); + } + TypeInfo::Array(info) => { + typed_schema.schema_type = SchemaType::Array; + typed_schema.kind = SchemaKind::Array; + typed_schema.items = info.item_ty().ref_type().into(); + } + TypeInfo::Map(info) => { + typed_schema.schema_type = SchemaType::Object; + typed_schema.kind = SchemaKind::Map; + typed_schema.key_type = info.key_ty().ref_type().into(); + typed_schema.value_type = info.value_ty().ref_type().into(); + } + TypeInfo::Tuple(info) => { + typed_schema.schema_type = SchemaType::Array; + typed_schema.kind = SchemaKind::Tuple; + typed_schema.prefix_items = info + .iter() + .map(SchemaJsonReference::ref_type) + .collect::>(); + typed_schema.items = Some(false.into()); + } + TypeInfo::Set(info) => { + typed_schema.schema_type = SchemaType::Set; + typed_schema.kind = SchemaKind::Set; + typed_schema.items = info.value_ty().ref_type().into(); + } + TypeInfo::Opaque(info) => { + typed_schema.schema_type = info.map_json_type(); + typed_schema.kind = SchemaKind::Value; + } + }; + typed_schema + } +} + +/// JSON Schema type for Bevy Registry Types +/// It tries to follow this standard: +/// +/// To take the full advantage from info provided by Bevy registry it provides extra fields +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] +#[serde(rename_all = "camelCase")] +pub struct JsonSchemaBevyType { + /// Bevy specific field, short path of the type. + pub short_path: String, + /// Bevy specific field, full path of the type. + pub type_path: String, + /// Bevy specific field, path of the module that type is part of. + #[serde(skip_serializing_if = "Option::is_none", default)] + pub module_path: Option, + /// Bevy specific field, name of the crate that type is part of. + #[serde(skip_serializing_if = "Option::is_none", default)] + pub crate_name: Option, + /// Bevy specific field, names of the types that type reflects. + #[serde(skip_serializing_if = "Vec::is_empty", default)] + pub reflect_types: Vec, + /// Bevy specific field, [`TypeInfo`] type mapping. + pub kind: SchemaKind, + /// Bevy specific field, provided when [`SchemaKind`] `kind` field is equal to [`SchemaKind::Map`]. + /// + /// It contains type info of key of the Map. + #[serde(skip_serializing_if = "Option::is_none", default)] + pub key_type: Option, + /// Bevy specific field, provided when [`SchemaKind`] `kind` field is equal to [`SchemaKind::Map`]. + /// + /// It contains type info of value of the Map. + #[serde(skip_serializing_if = "Option::is_none", default)] + pub value_type: Option, + /// The type keyword is fundamental to JSON Schema. It specifies the data type for a schema. + #[serde(rename = "type")] + pub schema_type: SchemaType, + /// The behavior of this keyword depends on the presence and annotation results of "properties" + /// and "patternProperties" within the same schema object. + /// Validation with "additionalProperties" applies only to the child + /// values of instance names that do not appear in the annotation results of either "properties" or "patternProperties". + #[serde(skip_serializing_if = "Option::is_none", default)] + pub additional_properties: Option, + /// Validation succeeds if, for each name that appears in both the instance and as a name + /// within this keyword's value, the child instance for that name successfully validates + /// against the corresponding schema. + #[serde(skip_serializing_if = "HashMap::is_empty", default)] + pub properties: HashMap, + /// An object instance is valid against this keyword if every item in the array is the name of a property in the instance. + #[serde(skip_serializing_if = "Vec::is_empty", default)] + pub required: Vec, + /// An instance validates successfully against this keyword if it validates successfully against exactly one schema defined by this keyword's value. + #[serde(skip_serializing_if = "Vec::is_empty", default)] + pub one_of: Vec, + /// Validation succeeds if each element of the instance validates against the schema at the same position, if any. This keyword does not constrain the length of the array. If the array is longer than this keyword's value, this keyword validates only the prefix of matching length. + /// + /// This keyword produces an annotation value which is the largest index to which this keyword + /// applied a subschema. The value MAY be a boolean true if a subschema was applied to every + /// index of the instance, such as is produced by the "items" keyword. + /// This annotation affects the behavior of "items" and "unevaluatedItems". + #[serde(skip_serializing_if = "Vec::is_empty", default)] + pub prefix_items: Vec, + /// This keyword applies its subschema to all instance elements at indexes greater + /// than the length of the "prefixItems" array in the same schema object, + /// as reported by the annotation result of that "prefixItems" keyword. + /// If no such annotation result exists, "items" applies its subschema to all + /// instance array elements. + /// + /// If the "items" subschema is applied to any positions within the instance array, + /// it produces an annotation result of boolean true, indicating that all remaining + /// array elements have been evaluated against this keyword's subschema. + #[serde(skip_serializing_if = "Option::is_none", default)] + pub items: Option, +} + +/// Kind of json schema, maps [`TypeInfo`] type +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] +pub enum SchemaKind { + /// Struct + #[default] + Struct, + /// Enum type + Enum, + /// A key-value map + Map, + /// Array + Array, + /// List + List, + /// Fixed size collection of items + Tuple, + /// Fixed size collection of items with named fields + TupleStruct, + /// Set of unique values + Set, + /// Single value, eg. primitive types + Value, +} + +/// Type of json schema +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] +#[serde(rename_all = "lowercase")] +pub enum SchemaType { + /// Represents a string value. + String, + + /// Represents a floating-point number. + Float, + + /// Represents an unsigned integer. + Uint, + + /// Represents a signed integer. + Int, + + /// Represents an object with key-value pairs. + Object, + + /// Represents an array of values. + Array, + + /// Represents a boolean value (true or false). + Boolean, + + /// Represents a set of unique values. + Set, + + /// Represents a null value. + #[default] + Null, +} + +/// Helper trait for generating json schema reference +trait SchemaJsonReference { + /// Reference to another type in schema. + /// The value `$ref` is a URI-reference that is resolved against the schema. + fn ref_type(self) -> Value; +} + +/// Helper trait for mapping bevy type path into json schema type +pub trait SchemaJsonType { + /// Bevy Reflect type path + fn get_type_path(&self) -> &'static str; + + /// JSON Schema type keyword from Bevy reflect type path into + fn map_json_type(&self) -> SchemaType { + match self.get_type_path() { + "bool" => SchemaType::Boolean, + "u8" | "u16" | "u32" | "u64" | "u128" | "usize" => SchemaType::Uint, + "i8" | "i16" | "i32" | "i64" | "i128" | "isize" => SchemaType::Int, + "f32" | "f64" => SchemaType::Float, + "char" | "str" | "alloc::string::String" => SchemaType::String, + _ => SchemaType::Object, + } + } +} + +impl SchemaJsonType for OpaqueInfo { + fn get_type_path(&self) -> &'static str { + self.type_path() + } +} + +impl SchemaJsonReference for &bevy_reflect::Type { + fn ref_type(self) -> Value { + let path = self.path(); + json!({"type": json!({ "$ref": format!("#/$defs/{path}") })}) + } +} + +impl SchemaJsonReference for &bevy_reflect::UnnamedField { + fn ref_type(self) -> Value { + let path = self.type_path(); + json!({"type": json!({ "$ref": format!("#/$defs/{path}") })}) + } +} + +impl SchemaJsonReference for &NamedField { + fn ref_type(self) -> Value { + let type_path = self.type_path(); + json!({"type": json!({ "$ref": format!("#/$defs/{type_path}") }), "typePath": self.name()}) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use bevy_ecs::{component::Component, reflect::AppTypeRegistry, resource::Resource}; + use bevy_reflect::Reflect; + + #[test] + fn reflect_export_struct() { + #[derive(Reflect, Resource, Default, Deserialize, Serialize)] + #[reflect(Resource, Default, Serialize, Deserialize)] + struct Foo { + a: f32, + b: Option, + } + + let atr = AppTypeRegistry::default(); + { + let mut register = atr.write(); + register.register::(); + } + let type_registry = atr.read(); + let foo_registration = type_registry + .get(TypeId::of::()) + .expect("SHOULD BE REGISTERED") + .clone(); + let (_, schema) = export_type(&foo_registration); + + assert!( + !schema.reflect_types.contains(&"Component".to_owned()), + "Should not be a component" + ); + assert!( + schema.reflect_types.contains(&"Resource".to_owned()), + "Should be a resource" + ); + let _ = schema.properties.get("a").expect("Missing `a` field"); + let _ = schema.properties.get("b").expect("Missing `b` field"); + assert!( + schema.required.contains(&"a".to_owned()), + "Field a should be required" + ); + assert!( + !schema.required.contains(&"b".to_owned()), + "Field b should not be required" + ); + } + + #[test] + fn reflect_export_enum() { + #[derive(Reflect, Component, Default, Deserialize, Serialize)] + #[reflect(Component, Default, Serialize, Deserialize)] + enum EnumComponent { + ValueOne(i32), + ValueTwo { + test: i32, + }, + #[default] + NoValue, + } + + let atr = AppTypeRegistry::default(); + { + let mut register = atr.write(); + register.register::(); + } + let type_registry = atr.read(); + let foo_registration = type_registry + .get(TypeId::of::()) + .expect("SHOULD BE REGISTERED") + .clone(); + let (_, schema) = export_type(&foo_registration); + assert!( + schema.reflect_types.contains(&"Component".to_owned()), + "Should be a component" + ); + assert!( + !schema.reflect_types.contains(&"Resource".to_owned()), + "Should not be a resource" + ); + assert!(schema.properties.is_empty(), "Should not have any field"); + assert!(schema.one_of.len() == 3, "Should have 3 possible schemas"); + } + + #[test] + fn reflect_export_struct_without_reflect_types() { + #[derive(Reflect, Component, Default, Deserialize, Serialize)] + enum EnumComponent { + ValueOne(i32), + ValueTwo { + test: i32, + }, + #[default] + NoValue, + } + + let atr = AppTypeRegistry::default(); + { + let mut register = atr.write(); + register.register::(); + } + let type_registry = atr.read(); + let foo_registration = type_registry + .get(TypeId::of::()) + .expect("SHOULD BE REGISTERED") + .clone(); + let (_, schema) = export_type(&foo_registration); + assert!( + !schema.reflect_types.contains(&"Component".to_owned()), + "Should not be a component" + ); + assert!( + !schema.reflect_types.contains(&"Resource".to_owned()), + "Should not be a resource" + ); + assert!(schema.properties.is_empty(), "Should not have any field"); + assert!(schema.one_of.len() == 3, "Should have 3 possible schemas"); + } + + #[test] + fn reflect_export_tuple_struct() { + #[derive(Reflect, Component, Default, Deserialize, Serialize)] + #[reflect(Component, Default, Serialize, Deserialize)] + struct TupleStructType(usize, i32); + + let atr = AppTypeRegistry::default(); + { + let mut register = atr.write(); + register.register::(); + } + let type_registry = atr.read(); + let foo_registration = type_registry + .get(TypeId::of::()) + .expect("SHOULD BE REGISTERED") + .clone(); + let (_, schema) = export_type(&foo_registration); + assert!( + schema.reflect_types.contains(&"Component".to_owned()), + "Should be a component" + ); + assert!( + !schema.reflect_types.contains(&"Resource".to_owned()), + "Should not be a resource" + ); + assert!(schema.properties.is_empty(), "Should not have any field"); + assert!(schema.prefix_items.len() == 2, "Should have 2 prefix items"); + } + + #[test] + fn reflect_export_serialization_check() { + #[derive(Reflect, Resource, Default, Deserialize, Serialize)] + #[reflect(Resource, Default)] + struct Foo { + a: f32, + } + + let atr = AppTypeRegistry::default(); + { + let mut register = atr.write(); + register.register::(); + } + let type_registry = atr.read(); + let foo_registration = type_registry + .get(TypeId::of::()) + .expect("SHOULD BE REGISTERED") + .clone(); + let (_, schema) = export_type(&foo_registration); + let schema_as_value = serde_json::to_value(&schema).expect("Should serialize"); + let value = json!({ + "shortPath": "Foo", + "typePath": "bevy_remote::schemas::json_schema::tests::Foo", + "modulePath": "bevy_remote::schemas::json_schema::tests", + "crateName": "bevy_remote", + "reflectTypes": [ + "Resource", + "Default", + ], + "kind": "Struct", + "type": "object", + "additionalProperties": false, + "properties": { + "a": { + "type": { + "$ref": "#/$defs/f32" + } + }, + }, + "required": [ + "a" + ] + }); + assert_eq!(schema_as_value, value); + } +} diff --git a/crates/bevy_remote/src/schemas/mod.rs b/crates/bevy_remote/src/schemas/mod.rs new file mode 100644 index 0000000000000..7104fd5547549 --- /dev/null +++ b/crates/bevy_remote/src/schemas/mod.rs @@ -0,0 +1,4 @@ +//! Module with schemas used for various BRP endpoints + +pub mod json_schema; +pub mod open_rpc; diff --git a/crates/bevy_remote/src/schemas/open_rpc.rs b/crates/bevy_remote/src/schemas/open_rpc.rs new file mode 100644 index 0000000000000..0ffda36bc375e --- /dev/null +++ b/crates/bevy_remote/src/schemas/open_rpc.rs @@ -0,0 +1,118 @@ +//! Module with trimmed down `OpenRPC` document structs. +//! It tries to follow this standard: +use bevy_platform::collections::HashMap; +use bevy_utils::default; +use serde::{Deserialize, Serialize}; + +use crate::RemoteMethods; + +use super::json_schema::JsonSchemaBevyType; + +/// Represents an `OpenRPC` document as defined by the `OpenRPC` specification. +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct OpenRpcDocument { + /// The version of the `OpenRPC` specification being used. + pub openrpc: String, + /// Informational metadata about the document. + pub info: InfoObject, + /// List of RPC methods defined in the document. + pub methods: Vec, + /// Optional list of server objects that provide the API endpoint details. + pub servers: Option>, +} + +/// Contains metadata information about the `OpenRPC` document. +#[derive(Serialize, Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct InfoObject { + /// The title of the API or document. + pub title: String, + /// The version of the API. + pub version: String, + /// An optional description providing additional details about the API. + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + /// A collection of custom extension fields. + #[serde(flatten)] + pub extensions: HashMap, +} + +impl Default for InfoObject { + fn default() -> Self { + Self { + title: "Bevy Remote Protocol".to_owned(), + version: env!("CARGO_PKG_VERSION").to_owned(), + description: None, + extensions: Default::default(), + } + } +} + +/// Describes a server hosting the API as specified in the `OpenRPC` document. +#[derive(Serialize, Deserialize, Debug, Default)] +#[serde(rename_all = "camelCase")] +pub struct ServerObject { + /// The name of the server. + pub name: String, + /// The URL endpoint of the server. + pub url: String, + /// An optional description of the server. + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + /// Additional custom extension fields. + #[serde(flatten)] + pub extensions: HashMap, +} + +/// Represents an RPC method in the `OpenRPC` document. +#[derive(Serialize, Deserialize, Debug, Default)] +#[serde(rename_all = "camelCase")] +pub struct MethodObject { + /// The method name (e.g., "/bevy/get") + pub name: String, + /// An optional short summary of the method. + #[serde(skip_serializing_if = "Option::is_none")] + pub summary: Option, + /// An optional detailed description of the method. + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + /// Parameters for the RPC method + #[serde(default)] + pub params: Vec, + // /// The expected result of the method + // #[serde(skip_serializing_if = "Option::is_none")] + // pub result: Option, + /// Additional custom extension fields. + #[serde(flatten)] + pub extensions: HashMap, +} + +/// Represents an RPC method parameter in the `OpenRPC` document. +#[derive(Serialize, Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct Parameter { + /// Parameter name + pub name: String, + /// Parameter description + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + /// JSON schema describing the parameter + pub schema: JsonSchemaBevyType, + /// Additional custom extension fields. + #[serde(flatten)] + pub extensions: HashMap, +} + +impl From<&RemoteMethods> for Vec { + fn from(value: &RemoteMethods) -> Self { + value + .methods() + .iter() + .map(|e| MethodObject { + name: e.to_owned(), + ..default() + }) + .collect() + } +} diff --git a/crates/bevy_render/Cargo.toml b/crates/bevy_render/Cargo.toml index 6e1407f326cbf..5da61a57dd850 100644 --- a/crates/bevy_render/Cargo.toml +++ b/crates/bevy_render/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_render" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Provides rendering functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -9,9 +9,20 @@ license = "MIT OR Apache-2.0" keywords = ["bevy"] [features] +# Bevy users should _never_ turn this feature on. +# +# Bevy/wgpu developers can turn this feature on to test a newer version of wgpu without needing to also update naga_oil. +# +# When turning this feature on, you can add the following to bevy/Cargo.toml (not this file), and then run `cargo update`: +# [patch.crates-io] +# wgpu = { git = "https://github.com/gfx-rs/wgpu", rev = "..." } +# wgpu-core = { git = "https://github.com/gfx-rs/wgpu", rev = "..." } +# wgpu-hal = { git = "https://github.com/gfx-rs/wgpu", rev = "..." } +# wgpu-types = { git = "https://github.com/gfx-rs/wgpu", rev = "..." } +decoupled_naga = [] + # Texture formats (require more than just image support) basis-universal = ["bevy_image/basis-universal"] -dds = ["bevy_image/dds"] exr = ["bevy_image/exr"] hdr = ["bevy_image/hdr"] ktx2 = ["dep:ktx2", "bevy_image/ktx2"] @@ -20,43 +31,48 @@ multi_threaded = ["bevy_tasks/multi_threaded"] shader_format_glsl = ["naga/glsl-in", "naga/wgsl-out", "naga_oil/glsl"] shader_format_spirv = ["wgpu/spirv", "naga/spv-in", "naga/spv-out"] +shader_format_wesl = ["wesl"] # Enable SPIR-V shader passthrough spirv_shader_passthrough = ["wgpu/spirv"] +# Statically linked DXC shader compiler for DirectX 12 +# TODO: When wgpu switches to DirectX 12 instead of Vulkan by default on windows, make this a default feature +statically-linked-dxc = ["wgpu/static-dxc"] + trace = ["profiling"] -tracing-tracy = [] +tracing-tracy = ["dep:tracy-client"] ci_limits = [] webgl = ["wgpu/webgl"] webgpu = ["wgpu/webgpu"] -ios_simulator = [] detailed_trace = [] [dependencies] # bevy -bevy_app = { path = "../bevy_app", version = "0.15.0-dev" } -bevy_asset = { path = "../bevy_asset", version = "0.15.0-dev" } -bevy_color = { path = "../bevy_color", version = "0.15.0-dev", features = [ +bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } +bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } +bevy_color = { path = "../bevy_color", version = "0.16.0-dev", features = [ "serialize", "wgpu-types", ] } -bevy_derive = { path = "../bevy_derive", version = "0.15.0-dev" } -bevy_diagnostic = { path = "../bevy_diagnostic", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" } -bevy_encase_derive = { path = "../bevy_encase_derive", version = "0.15.0-dev" } -bevy_hierarchy = { path = "../bevy_hierarchy", version = "0.15.0-dev" } -bevy_math = { path = "../bevy_math", version = "0.15.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev", features = [ - "bevy", +bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } +bevy_diagnostic = { path = "../bevy_diagnostic", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } +bevy_encase_derive = { path = "../bevy_encase_derive", version = "0.16.0-dev" } +bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } +bevy_render_macros = { path = "macros", version = "0.16.0-dev" } +bevy_time = { path = "../bevy_time", version = "0.16.0-dev" } +bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } +bevy_window = { path = "../bevy_window", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } +bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev" } +bevy_image = { path = "../bevy_image", version = "0.16.0-dev" } +bevy_mesh = { path = "../bevy_mesh", version = "0.16.0-dev" } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "std", + "serialize", ] } -bevy_render_macros = { path = "macros", version = "0.15.0-dev" } -bevy_time = { path = "../bevy_time", version = "0.15.0-dev" } -bevy_transform = { path = "../bevy_transform", version = "0.15.0-dev" } -bevy_window = { path = "../bevy_window", version = "0.15.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } -bevy_tasks = { path = "../bevy_tasks", version = "0.15.0-dev" } -bevy_image = { path = "../bevy_image", version = "0.15.0-dev" } -bevy_mesh = { path = "../bevy_mesh", version = "0.15.0-dev" } # rendering image = { version = "0.25.2", default-features = false } @@ -67,17 +83,17 @@ codespan-reporting = "0.11.0" # It is enabled for now to avoid having to do a significant overhaul of the renderer just for wasm. # When the 'atomics' feature is enabled `fragile-send-sync-non-atomic` does nothing # and Bevy instead wraps `wgpu` types to verify they are not used off their origin thread. -wgpu = { version = "23.0.1", default-features = false, features = [ +wgpu = { version = "24", default-features = false, features = [ "wgsl", "dx12", "metal", "naga-ir", "fragile-send-sync-non-atomic-wasm", ] } -naga = { version = "23", features = ["wgsl-in"] } +naga = { version = "24", features = ["wgsl-in"] } serde = { version = "1", features = ["derive"] } bytemuck = { version = "1.5", features = ["derive", "must_cast"] } -downcast-rs = "1.2.0" +downcast-rs = { version = "2", default-features = false, features = ["std"] } thiserror = { version = "2", default-features = false } derive_more = { version = "1", default-features = false, features = ["from"] } futures-lite = "2.0.1" @@ -92,15 +108,24 @@ nonmax = "0.5" smallvec = { version = "1.11", features = ["const_new"] } offset-allocator = "0.2" variadics_please = "1.1" +tracing = { version = "0.1", default-features = false, features = ["std"] } +tracy-client = { version = "0.18.0", optional = true } +indexmap = { version = "2" } +fixedbitset = { version = "0.5" } +bitflags = "2" +wesl = { version = "0.1.2", optional = true } [target.'cfg(not(target_arch = "wasm32"))'.dependencies] # Omit the `glsl` feature in non-WebAssembly by default. -naga_oil = { version = "0.16", default-features = false, features = [ +naga_oil = { version = "0.17", default-features = false, features = [ "test_shader", ] } +[dev-dependencies] +proptest = "1" + [target.'cfg(target_arch = "wasm32")'.dependencies] -naga_oil = "0.16" +naga_oil = "0.17" js-sys = "0.3" web-sys = { version = "0.3.67", features = [ 'Blob', @@ -112,6 +137,19 @@ web-sys = { version = "0.3.67", features = [ 'Window', ] } wasm-bindgen = "0.2" +# TODO: Assuming all wasm builds are for the browser. Require `no_std` support to break assumption. +bevy_app = { path = "../bevy_app", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } +bevy_tasks = { path = "../bevy_tasks", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, features = [ + "web", +] } [target.'cfg(all(target_arch = "wasm32", target_feature = "atomics"))'.dependencies] send_wrapper = "0.6.0" diff --git a/crates/bevy_render/LICENSE-APACHE b/crates/bevy_render/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_render/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_render/LICENSE-MIT b/crates/bevy_render/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_render/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_render/macros/Cargo.toml b/crates/bevy_render/macros/Cargo.toml index fab68977bc98b..c3fc40b23e2fb 100644 --- a/crates/bevy_render/macros/Cargo.toml +++ b/crates/bevy_render/macros/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_render_macros" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Derive implementations for bevy_render" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -12,7 +12,7 @@ keywords = ["bevy"] proc-macro = true [dependencies] -bevy_macro_utils = { path = "../../bevy_macro_utils", version = "0.15.0-dev" } +bevy_macro_utils = { path = "../../bevy_macro_utils", version = "0.16.0-dev" } syn = "2.0" proc-macro2 = "1.0" diff --git a/crates/bevy_render/macros/LICENSE-APACHE b/crates/bevy_render/macros/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_render/macros/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_render/macros/LICENSE-MIT b/crates/bevy_render/macros/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_render/macros/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_render/macros/src/as_bind_group.rs b/crates/bevy_render/macros/src/as_bind_group.rs index 96176e071ca6c..4252929170f19 100644 --- a/crates/bevy_render/macros/src/as_bind_group.rs +++ b/crates/bevy_render/macros/src/as_bind_group.rs @@ -3,10 +3,11 @@ use proc_macro::TokenStream; use proc_macro2::{Ident, Span}; use quote::{quote, ToTokens}; use syn::{ + parenthesized, parse::{Parse, ParseStream}, punctuated::Punctuated, - token::Comma, - Data, DataStruct, Error, Fields, Lit, LitInt, LitStr, Meta, MetaList, Result, + token::{Comma, DotDot}, + Data, DataStruct, Error, Fields, LitInt, LitStr, Meta, MetaList, Result, }; const UNIFORM_ATTRIBUTE_NAME: Symbol = Symbol("uniform"); @@ -16,6 +17,12 @@ const SAMPLER_ATTRIBUTE_NAME: Symbol = Symbol("sampler"); const STORAGE_ATTRIBUTE_NAME: Symbol = Symbol("storage"); const BIND_GROUP_DATA_ATTRIBUTE_NAME: Symbol = Symbol("bind_group_data"); const BINDLESS_ATTRIBUTE_NAME: Symbol = Symbol("bindless"); +const DATA_ATTRIBUTE_NAME: Symbol = Symbol("data"); +const BINDING_ARRAY_MODIFIER_NAME: Symbol = Symbol("binding_array"); +const LIMIT_MODIFIER_NAME: Symbol = Symbol("limit"); +const INDEX_TABLE_MODIFIER_NAME: Symbol = Symbol("index_table"); +const RANGE_MODIFIER_NAME: Symbol = Symbol("range"); +const BINDING_MODIFIER_NAME: Symbol = Symbol("binding"); #[derive(Copy, Clone, Debug)] enum BindingType { @@ -39,6 +46,17 @@ enum BindingState<'a> { }, } +enum BindlessSlabResourceLimitAttr { + Auto, + Limit(LitInt), +} + +// The `bindless(index_table(range(M..N)))` attribute. +struct BindlessIndexTableRangeAttr { + start: LitInt, + end: LitInt, +} + pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { let manifest = BevyManifest::shared(); let render_path = manifest.get_path("bevy_render"); @@ -48,14 +66,22 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { let mut binding_states: Vec = Vec::new(); let mut binding_impls = Vec::new(); - let mut binding_layouts = Vec::new(); + let mut bindless_binding_layouts = Vec::new(); + let mut non_bindless_binding_layouts = Vec::new(); + let mut bindless_resource_types = Vec::new(); + let mut bindless_buffer_descriptors = Vec::new(); let mut attr_prepared_data_ident = None; + // After the first attribute pass, this will be `None` if the object isn't + // bindless and `Some` if it is. let mut attr_bindless_count = None; + let mut attr_bindless_index_table_range = None; + let mut attr_bindless_index_table_binding = None; // `actual_bindless_slot_count` holds the actual number of bindless slots // per bind group, taking into account whether the current platform supports // bindless resources. let actual_bindless_slot_count = Ident::new("actual_bindless_slot_count", Span::call_site()); + let bind_group_layout_entries = Ident::new("bind_group_layout_entries", Span::call_site()); // The `BufferBindingType` and corresponding `BufferUsages` used for // uniforms. We need this because bindless uniforms don't exist, so in @@ -63,7 +89,7 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { let uniform_binding_type = Ident::new("uniform_binding_type", Span::call_site()); let uniform_buffer_usages = Ident::new("uniform_buffer_usages", Span::call_site()); - // Read struct-level attributes + // Read struct-level attributes, first pass. for attr in &ast.attrs { if let Some(attr_ident) = attr.path().get_ident() { if attr_ident == BIND_GROUP_DATA_ATTRIBUTE_NAME { @@ -72,35 +98,216 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { { attr_prepared_data_ident = Some(prepared_data_ident); } - } else if attr_ident == UNIFORM_ATTRIBUTE_NAME { - let (binding_index, converted_shader_type) = get_uniform_binding_attr(attr)?; - binding_impls.push(quote! {{ - use #render_path::render_resource::AsBindGroupShaderType; - let mut buffer = #render_path::render_resource::encase::UniformBuffer::new(Vec::new()); - let converted: #converted_shader_type = self.as_bind_group_shader_type(&images); - buffer.write(&converted).unwrap(); - ( - #binding_index, - #render_path::render_resource::OwnedBindingResource::Buffer(render_device.create_buffer_with_data( - &#render_path::render_resource::BufferInitDescriptor { - label: None, - usage: #uniform_buffer_usages, - contents: buffer.as_ref(), - }, + } else if attr_ident == BINDLESS_ATTRIBUTE_NAME { + attr_bindless_count = Some(BindlessSlabResourceLimitAttr::Auto); + if let Meta::List(_) = attr.meta { + // Parse bindless features. + attr.parse_nested_meta(|submeta| { + if submeta.path.is_ident(&LIMIT_MODIFIER_NAME) { + let content; + parenthesized!(content in submeta.input); + let lit: LitInt = content.parse()?; + + attr_bindless_count = Some(BindlessSlabResourceLimitAttr::Limit(lit)); + return Ok(()); + } + + if submeta.path.is_ident(&INDEX_TABLE_MODIFIER_NAME) { + submeta.parse_nested_meta(|subsubmeta| { + if subsubmeta.path.is_ident(&RANGE_MODIFIER_NAME) { + let content; + parenthesized!(content in subsubmeta.input); + let start: LitInt = content.parse()?; + content.parse::()?; + let end: LitInt = content.parse()?; + attr_bindless_index_table_range = + Some(BindlessIndexTableRangeAttr { start, end }); + return Ok(()); + } + + if subsubmeta.path.is_ident(&BINDING_MODIFIER_NAME) { + let content; + parenthesized!(content in subsubmeta.input); + let lit: LitInt = content.parse()?; + + attr_bindless_index_table_binding = Some(lit); + return Ok(()); + } + + Err(Error::new_spanned( + attr, + "Expected `range(M..N)` or `binding(N)`", + )) + })?; + return Ok(()); + } + + Err(Error::new_spanned( + attr, + "Expected `limit` or `index_table`", )) - ) - }}); + })?; + } + } + } + } - binding_layouts.push(quote!{ - #render_path::render_resource::BindGroupLayoutEntry { - binding: #binding_index, - visibility: #render_path::render_resource::ShaderStages::all(), - ty: #render_path::render_resource::BindingType::Buffer { - ty: #uniform_binding_type, - has_dynamic_offset: false, - min_binding_size: Some(<#converted_shader_type as #render_path::render_resource::ShaderType>::min_size()), - }, - count: #actual_bindless_slot_count, + // Read struct-level attributes, second pass. + for attr in &ast.attrs { + if let Some(attr_ident) = attr.path().get_ident() { + if attr_ident == UNIFORM_ATTRIBUTE_NAME || attr_ident == DATA_ATTRIBUTE_NAME { + let UniformBindingAttr { + binding_type, + binding_index, + converted_shader_type, + binding_array: binding_array_binding, + } = get_uniform_binding_attr(attr)?; + match binding_type { + UniformBindingAttrType::Uniform => { + binding_impls.push(quote! {{ + use #render_path::render_resource::AsBindGroupShaderType; + let mut buffer = #render_path::render_resource::encase::UniformBuffer::new(Vec::new()); + let converted: #converted_shader_type = self.as_bind_group_shader_type(&images); + buffer.write(&converted).unwrap(); + ( + #binding_index, + #render_path::render_resource::OwnedBindingResource::Buffer(render_device.create_buffer_with_data( + &#render_path::render_resource::BufferInitDescriptor { + label: None, + usage: #uniform_buffer_usages, + contents: buffer.as_ref(), + }, + )) + ) + }}); + + match (&binding_array_binding, &attr_bindless_count) { + (&None, &Some(_)) => { + return Err(Error::new_spanned( + attr, + "Must specify `binding_array(...)` with `#[uniform]` if the \ + object is bindless", + )); + } + (&Some(_), &None) => { + return Err(Error::new_spanned( + attr, + "`binding_array(...)` with `#[uniform]` requires the object to \ + be bindless", + )); + } + _ => {} + } + + let binding_array_binding = binding_array_binding.unwrap_or(0); + bindless_binding_layouts.push(quote! { + #bind_group_layout_entries.push( + #render_path::render_resource::BindGroupLayoutEntry { + binding: #binding_array_binding, + visibility: #render_path::render_resource::ShaderStages::all(), + ty: #render_path::render_resource::BindingType::Buffer { + ty: #uniform_binding_type, + has_dynamic_offset: false, + min_binding_size: Some(<#converted_shader_type as #render_path::render_resource::ShaderType>::min_size()), + }, + count: #actual_bindless_slot_count, + } + ); + }); + + add_bindless_resource_type( + &render_path, + &mut bindless_resource_types, + binding_index, + quote! { #render_path::render_resource::BindlessResourceType::Buffer }, + ); + } + + UniformBindingAttrType::Data => { + binding_impls.push(quote! {{ + use #render_path::render_resource::AsBindGroupShaderType; + use #render_path::render_resource::encase::{ShaderType, internal::WriteInto}; + let mut buffer: Vec = Vec::new(); + let converted: #converted_shader_type = self.as_bind_group_shader_type(&images); + converted.write_into( + &mut #render_path::render_resource::encase::internal::Writer::new( + &converted, + &mut buffer, + 0, + ).unwrap(), + ); + let min_size = <#converted_shader_type as #render_path::render_resource::ShaderType>::min_size().get() as usize; + while buffer.len() < min_size { + buffer.push(0); + } + ( + #binding_index, + #render_path::render_resource::OwnedBindingResource::Data( + #render_path::render_resource::OwnedData(buffer) + ) + ) + }}); + + let binding_array_binding = binding_array_binding.unwrap_or(0); + bindless_binding_layouts.push(quote! { + #bind_group_layout_entries.push( + #render_path::render_resource::BindGroupLayoutEntry { + binding: #binding_array_binding, + visibility: #render_path::render_resource::ShaderStages::all(), + ty: #render_path::render_resource::BindingType::Buffer { + ty: #uniform_binding_type, + has_dynamic_offset: false, + min_binding_size: Some(<#converted_shader_type as #render_path::render_resource::ShaderType>::min_size()), + }, + count: None, + } + ); + }); + + add_bindless_resource_type( + &render_path, + &mut bindless_resource_types, + binding_index, + quote! { #render_path::render_resource::BindlessResourceType::DataBuffer }, + ); + } + } + + // Push the non-bindless binding layout. + + non_bindless_binding_layouts.push(quote!{ + #bind_group_layout_entries.push( + #render_path::render_resource::BindGroupLayoutEntry { + binding: #binding_index, + visibility: #render_path::render_resource::ShaderStages::all(), + ty: #render_path::render_resource::BindingType::Buffer { + ty: #uniform_binding_type, + has_dynamic_offset: false, + min_binding_size: Some(<#converted_shader_type as #render_path::render_resource::ShaderType>::min_size()), + }, + count: None, + } + ); + }); + + bindless_buffer_descriptors.push(quote! { + #render_path::render_resource::BindlessBufferDescriptor { + // Note that, because this is bindless, *binding + // index* here refers to the index in the + // bindless index table (`bindless_index`), and + // the actual binding number is the *binding + // array binding*. + binding_number: #render_path::render_resource::BindingNumber( + #binding_array_binding + ), + bindless_index: + #render_path::render_resource::BindlessIndex(#binding_index), + size: Some( + < + #converted_shader_type as + #render_path::render_resource::ShaderType + >::min_size().get() as usize + ), } }); @@ -109,12 +316,6 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { binding_states.resize(required_len, BindingState::Free); } binding_states[binding_index as usize] = BindingState::OccupiedConvertedUniform; - } else if attr_ident == BINDLESS_ATTRIBUTE_NAME { - if let Ok(count_lit) = - attr.parse_args_with(|input: ParseStream| input.parse::()) - { - attr_bindless_count = Some(count_lit); - } } } } @@ -132,6 +333,11 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { } }; + // Count the number of sampler fields needed. We might have to disable + // bindless if bindless arrays take the GPU over the maximum number of + // samplers. + let mut sampler_binding_count: u32 = 0; + // Read field-level attributes for field in fields { // Search ahead for texture attributes so we can use them with any @@ -220,11 +426,21 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { match binding_type { BindingType::Uniform => { + if attr_bindless_count.is_some() { + return Err(Error::new_spanned( + attr, + "Only structure-level `#[uniform]` attributes are supported in \ + bindless mode", + )); + } + // uniform codegen is deferred to account for combined uniform bindings } + BindingType::Storage => { let StorageAttrs { visibility, + binding_array: binding_array_binding, read_only, buffer, } = get_storage_binding_attr(nested_meta_items)?; @@ -254,20 +470,78 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { }); } - binding_layouts.push(quote! { - #render_path::render_resource::BindGroupLayoutEntry { - binding: #binding_index, - visibility: #visibility, - ty: #render_path::render_resource::BindingType::Buffer { - ty: #render_path::render_resource::BufferBindingType::Storage { read_only: #read_only }, - has_dynamic_offset: false, - min_binding_size: None, - }, - count: #actual_bindless_slot_count, - } + non_bindless_binding_layouts.push(quote! { + #bind_group_layout_entries.push( + #render_path::render_resource::BindGroupLayoutEntry { + binding: #binding_index, + visibility: #visibility, + ty: #render_path::render_resource::BindingType::Buffer { + ty: #render_path::render_resource::BufferBindingType::Storage { read_only: #read_only }, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: #actual_bindless_slot_count, + } + ); }); + + if let Some(binding_array_binding) = binding_array_binding { + // Add the storage buffer to the `BindlessResourceType` list + // in the bindless descriptor. + let bindless_resource_type = quote! { + #render_path::render_resource::BindlessResourceType::Buffer + }; + add_bindless_resource_type( + &render_path, + &mut bindless_resource_types, + binding_index, + bindless_resource_type, + ); + + // Push the buffer descriptor. + bindless_buffer_descriptors.push(quote! { + #render_path::render_resource::BindlessBufferDescriptor { + // Note that, because this is bindless, *binding + // index* here refers to the index in the bindless + // index table (`bindless_index`), and the actual + // binding number is the *binding array binding*. + binding_number: #render_path::render_resource::BindingNumber( + #binding_array_binding + ), + bindless_index: + #render_path::render_resource::BindlessIndex(#binding_index), + size: None, + } + }); + + // Declare the binding array. + bindless_binding_layouts.push(quote!{ + #bind_group_layout_entries.push( + #render_path::render_resource::BindGroupLayoutEntry { + binding: #binding_array_binding, + visibility: #render_path::render_resource::ShaderStages::all(), + ty: #render_path::render_resource::BindingType::Buffer { + ty: #render_path::render_resource::BufferBindingType::Storage { + read_only: #read_only + }, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: #actual_bindless_slot_count, + } + ); + }); + } } + BindingType::StorageTexture => { + if attr_bindless_count.is_some() { + return Err(Error::new_spanned( + attr, + "Storage textures are unsupported in bindless mode", + )); + } + let StorageTextureAttrs { dimension, image_format, @@ -284,7 +558,7 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { binding_impls.insert(0, quote! { ( #binding_index, #render_path::render_resource::OwnedBindingResource::TextureView( - #dimension, + #render_path::render_resource::#dimension, { let handle: Option<&#asset_path::Handle<#image_path::Image>> = (&self.#field_name).into(); if let Some(handle) = handle { @@ -297,19 +571,22 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { ) }); - binding_layouts.push(quote! { - #render_path::render_resource::BindGroupLayoutEntry { - binding: #binding_index, - visibility: #visibility, - ty: #render_path::render_resource::BindingType::StorageTexture { - access: #render_path::render_resource::StorageTextureAccess::#access, - format: #render_path::render_resource::TextureFormat::#image_format, - view_dimension: #render_path::render_resource::#dimension, - }, - count: #actual_bindless_slot_count, - } + non_bindless_binding_layouts.push(quote! { + #bind_group_layout_entries.push( + #render_path::render_resource::BindGroupLayoutEntry { + binding: #binding_index, + visibility: #visibility, + ty: #render_path::render_resource::BindingType::StorageTexture { + access: #render_path::render_resource::StorageTextureAccess::#access, + format: #render_path::render_resource::TextureFormat::#image_format, + view_dimension: #render_path::render_resource::#dimension, + }, + count: #actual_bindless_slot_count, + } + ); }); } + BindingType::Texture => { let TextureAttrs { dimension, @@ -341,19 +618,66 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { ) }); - binding_layouts.push(quote! { - #render_path::render_resource::BindGroupLayoutEntry { - binding: #binding_index, - visibility: #visibility, - ty: #render_path::render_resource::BindingType::Texture { - multisampled: #multisampled, - sample_type: #render_path::render_resource::#sample_type, - view_dimension: #render_path::render_resource::#dimension, - }, - count: #actual_bindless_slot_count, - } + sampler_binding_count += 1; + + non_bindless_binding_layouts.push(quote! { + #bind_group_layout_entries.push( + #render_path::render_resource::BindGroupLayoutEntry { + binding: #binding_index, + visibility: #visibility, + ty: #render_path::render_resource::BindingType::Texture { + multisampled: #multisampled, + sample_type: #render_path::render_resource::#sample_type, + view_dimension: #render_path::render_resource::#dimension, + }, + count: #actual_bindless_slot_count, + } + ); }); + + let bindless_resource_type = match *dimension { + BindingTextureDimension::D1 => { + quote! { + #render_path::render_resource::BindlessResourceType::Texture1d + } + } + BindingTextureDimension::D2 => { + quote! { + #render_path::render_resource::BindlessResourceType::Texture2d + } + } + BindingTextureDimension::D2Array => { + quote! { + #render_path::render_resource::BindlessResourceType::Texture2dArray + } + } + BindingTextureDimension::Cube => { + quote! { + #render_path::render_resource::BindlessResourceType::TextureCube + } + } + BindingTextureDimension::CubeArray => { + quote! { + #render_path::render_resource::BindlessResourceType::TextureCubeArray + } + } + BindingTextureDimension::D3 => { + quote! { + #render_path::render_resource::BindlessResourceType::Texture3d + } + } + }; + + // Add the texture to the `BindlessResourceType` list in the + // bindless descriptor. + add_bindless_resource_type( + &render_path, + &mut bindless_resource_types, + binding_index, + bindless_resource_type, + ); } + BindingType::Sampler => { let SamplerAttrs { sampler_binding_type, @@ -387,7 +711,10 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { binding_impls.insert(0, quote! { ( #binding_index, - #render_path::render_resource::OwnedBindingResource::Sampler({ + #render_path::render_resource::OwnedBindingResource::Sampler( + // TODO: Support other types. + #render_path::render_resource::WgpuSamplerBindingType::Filtering, + { let handle: Option<&#asset_path::Handle<#image_path::Image>> = (&self.#field_name).into(); if let Some(handle) = handle { let image = images.get(handle).ok_or_else(|| #render_path::render_resource::AsBindGroupError::RetryNextUpdate)?; @@ -417,14 +744,31 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { ) }); - binding_layouts.push(quote!{ - #render_path::render_resource::BindGroupLayoutEntry { - binding: #binding_index, - visibility: #visibility, - ty: #render_path::render_resource::BindingType::Sampler(#render_path::render_resource::#sampler_binding_type), - count: #actual_bindless_slot_count, - } + sampler_binding_count += 1; + + non_bindless_binding_layouts.push(quote!{ + #bind_group_layout_entries.push( + #render_path::render_resource::BindGroupLayoutEntry { + binding: #binding_index, + visibility: #visibility, + ty: #render_path::render_resource::BindingType::Sampler(#render_path::render_resource::#sampler_binding_type), + count: #actual_bindless_slot_count, + } + ); }); + + // Add the sampler to the `BindlessResourceType` list in the + // bindless descriptor. + // + // TODO: Support other types of samplers. + add_bindless_resource_type( + &render_path, + &mut bindless_resource_types, + binding_index, + quote! { + #render_path::render_resource::BindlessResourceType::SamplerFiltering + }, + ); } } } @@ -440,11 +784,7 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { Some(_) => { quote! { let (#uniform_binding_type, #uniform_buffer_usages) = - if render_device.features().contains( - #render_path::settings::WgpuFeatures::BUFFER_BINDING_ARRAY | - #render_path::settings::WgpuFeatures::TEXTURE_BINDING_ARRAY - ) && render_device.limits().max_storage_buffers_per_shader_stage > 0 && - !force_no_bindless { + if Self::bindless_supported(render_device) && !force_no_bindless { ( #render_path::render_resource::BufferBindingType::Storage { read_only: true }, #render_path::render_resource::BufferUsages::STORAGE, @@ -490,17 +830,19 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { ) }}); - binding_layouts.push(quote!{ - #render_path::render_resource::BindGroupLayoutEntry { - binding: #binding_index, - visibility: #render_path::render_resource::ShaderStages::all(), - ty: #render_path::render_resource::BindingType::Buffer { - ty: #uniform_binding_type, - has_dynamic_offset: false, - min_binding_size: Some(<#field_ty as #render_path::render_resource::ShaderType>::min_size()), - }, - count: actual_bindless_slot_count, - } + non_bindless_binding_layouts.push(quote!{ + #bind_group_layout_entries.push( + #render_path::render_resource::BindGroupLayoutEntry { + binding: #binding_index, + visibility: #render_path::render_resource::ShaderStages::all(), + ty: #render_path::render_resource::BindingType::Buffer { + ty: #uniform_binding_type, + has_dynamic_offset: false, + min_binding_size: Some(<#field_ty as #render_path::render_resource::ShaderType>::min_size()), + }, + count: #actual_bindless_slot_count, + } + ); }); // multi-field uniform bindings for a given index require an intermediate struct to derive ShaderType } else { @@ -536,8 +878,8 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { ) }}); - binding_layouts.push(quote!{ - #render_path::render_resource::BindGroupLayoutEntry { + non_bindless_binding_layouts.push(quote!{ + #bind_group_layout_entries.push(#render_path::render_resource::BindGroupLayoutEntry { binding: #binding_index, visibility: #render_path::render_resource::ShaderStages::all(), ty: #render_path::render_resource::BindingType::Buffer { @@ -545,8 +887,8 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { has_dynamic_offset: false, min_binding_size: Some(<#uniform_struct_name as #render_path::render_resource::ShaderType>::min_size()), }, - count: actual_bindless_slot_count, - } + count: #actual_bindless_slot_count, + }); }); } } @@ -563,33 +905,138 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { (prepared_data.clone(), prepared_data) }; - // Calculate the actual number of bindless slots, taking hardware - // limitations into account. - let (bindless_slot_count, actual_bindless_slot_count_declaration) = match attr_bindless_count { - Some(bindless_count) => ( + // Calculate the number of samplers that we need, so that we don't go over + // the limit on certain platforms. See + // https://github.com/bevyengine/bevy/issues/16988. + let bindless_count_syntax = match attr_bindless_count { + Some(BindlessSlabResourceLimitAttr::Auto) => { + quote! { #render_path::render_resource::AUTO_BINDLESS_SLAB_RESOURCE_LIMIT } + } + Some(BindlessSlabResourceLimitAttr::Limit(ref count)) => { + quote! { #count } + } + None => quote! { 0 }, + }; + + // Calculate the actual bindless index table range, taking the + // `#[bindless(index_table(range(M..N)))]` attribute into account. + let bindless_index_table_range = match attr_bindless_index_table_range { + None => { + let resource_count = bindless_resource_types.len() as u32; quote! { - fn bindless_slot_count() -> Option { - Some(#bindless_count) - } - }, + #render_path::render_resource::BindlessIndex(0).. + #render_path::render_resource::BindlessIndex(#resource_count) + } + } + Some(BindlessIndexTableRangeAttr { start, end }) => { quote! { - let #actual_bindless_slot_count = if render_device.features().contains( - #render_path::settings::WgpuFeatures::BUFFER_BINDING_ARRAY | - #render_path::settings::WgpuFeatures::TEXTURE_BINDING_ARRAY - ) && render_device.limits().max_storage_buffers_per_shader_stage > 0 && - !force_no_bindless { - ::core::num::NonZeroU32::new(#bindless_count) - } else { - None - }; - }, - ), - None => ( - TokenStream::new().into(), - quote! { let #actual_bindless_slot_count: Option<::core::num::NonZeroU32> = None; }, - ), + #render_path::render_resource::BindlessIndex(#start).. + #render_path::render_resource::BindlessIndex(#end) + } + } + }; + + // Calculate the actual binding number of the bindless index table, taking + // the `#[bindless(index_table(binding(B)))]` into account. + let bindless_index_table_binding_number = match attr_bindless_index_table_binding { + None => quote! { #render_path::render_resource::BindingNumber(0) }, + Some(binding_number) => { + quote! { #render_path::render_resource::BindingNumber(#binding_number) } + } }; + // Calculate the actual number of bindless slots, taking hardware + // limitations into account. + let (bindless_slot_count, actual_bindless_slot_count_declaration, bindless_descriptor_syntax) = + match attr_bindless_count { + Some(ref bindless_count) => { + let bindless_supported_syntax = quote! { + fn bindless_supported( + render_device: &#render_path::renderer::RenderDevice + ) -> bool { + render_device.features().contains( + #render_path::settings::WgpuFeatures::BUFFER_BINDING_ARRAY | + #render_path::settings::WgpuFeatures::TEXTURE_BINDING_ARRAY + ) && + render_device.limits().max_storage_buffers_per_shader_stage > 0 && + render_device.limits().max_samplers_per_shader_stage >= + (#sampler_binding_count * #bindless_count_syntax) + } + }; + let actual_bindless_slot_count_declaration = quote! { + let #actual_bindless_slot_count = if Self::bindless_supported(render_device) && + !force_no_bindless { + ::core::num::NonZeroU32::new(#bindless_count_syntax) + } else { + None + }; + }; + let bindless_slot_count_declaration = match bindless_count { + BindlessSlabResourceLimitAttr::Auto => { + quote! { + fn bindless_slot_count() -> Option< + #render_path::render_resource::BindlessSlabResourceLimit + > { + Some(#render_path::render_resource::BindlessSlabResourceLimit::Auto) + } + } + } + BindlessSlabResourceLimitAttr::Limit(lit) => { + quote! { + fn bindless_slot_count() -> Option< + #render_path::render_resource::BindlessSlabResourceLimit + > { + Some(#render_path::render_resource::BindlessSlabResourceLimit::Custom(#lit)) + } + } + } + }; + + let bindless_buffer_descriptor_count = bindless_buffer_descriptors.len(); + + // We use `LazyLock` so that we can call `min_size`, which isn't + // a `const fn`. + let bindless_descriptor_syntax = quote! { + static RESOURCES: &[#render_path::render_resource::BindlessResourceType] = &[ + #(#bindless_resource_types),* + ]; + static BUFFERS: ::std::sync::LazyLock<[ + #render_path::render_resource::BindlessBufferDescriptor; + #bindless_buffer_descriptor_count + ]> = ::std::sync::LazyLock::new(|| { + [#(#bindless_buffer_descriptors),*] + }); + static INDEX_TABLES: &[ + #render_path::render_resource::BindlessIndexTableDescriptor + ] = &[ + #render_path::render_resource::BindlessIndexTableDescriptor { + indices: #bindless_index_table_range, + binding_number: #bindless_index_table_binding_number, + } + ]; + Some(#render_path::render_resource::BindlessDescriptor { + resources: ::std::borrow::Cow::Borrowed(RESOURCES), + buffers: ::std::borrow::Cow::Borrowed(&*BUFFERS), + index_tables: ::std::borrow::Cow::Borrowed(&*INDEX_TABLES), + }) + }; + + ( + quote! { + #bindless_slot_count_declaration + #bindless_supported_syntax + }, + actual_bindless_slot_count_declaration, + bindless_descriptor_syntax, + ) + } + None => ( + TokenStream::new().into(), + quote! { let #actual_bindless_slot_count: Option<::core::num::NonZeroU32> = None; }, + quote! { None }, + ), + }; + Ok(TokenStream::from(quote! { #(#field_struct_impls)* @@ -632,12 +1079,57 @@ pub fn derive_as_bind_group(ast: syn::DeriveInput) -> Result { #actual_bindless_slot_count_declaration #uniform_binding_type_declarations - vec![#(#binding_layouts,)*] + let mut #bind_group_layout_entries = Vec::new(); + match #actual_bindless_slot_count { + Some(bindless_slot_count) => { + let bindless_index_table_range = #bindless_index_table_range; + #bind_group_layout_entries.extend( + #render_path::render_resource::create_bindless_bind_group_layout_entries( + bindless_index_table_range.end.0 - + bindless_index_table_range.start.0, + bindless_slot_count.into(), + #bindless_index_table_binding_number, + ).into_iter() + ); + #(#bindless_binding_layouts)*; + } + None => { + #(#non_bindless_binding_layouts)*; + } + }; + #bind_group_layout_entries + } + + fn bindless_descriptor() -> Option<#render_path::render_resource::BindlessDescriptor> { + #bindless_descriptor_syntax } } })) } +/// Adds a bindless resource type to the `BindlessResourceType` array in the +/// bindless descriptor we're building up. +/// +/// See the `bevy_render::render_resource::bindless::BindlessResourceType` +/// documentation for more information. +fn add_bindless_resource_type( + render_path: &syn::Path, + bindless_resource_types: &mut Vec, + binding_index: u32, + bindless_resource_type: proc_macro2::TokenStream, +) { + // If we need to grow the array, pad the unused fields with + // `BindlessResourceType::None`. + if bindless_resource_types.len() < (binding_index as usize + 1) { + bindless_resource_types.resize_with(binding_index as usize + 1, || { + quote! { #render_path::render_resource::BindlessResourceType::None } + }); + } + + // Assign the `BindlessResourceType`. + bindless_resource_types[binding_index as usize] = bindless_resource_type; +} + fn get_fallback_image( render_path: &syn::Path, dimension: BindingTextureDimension, @@ -660,8 +1152,34 @@ fn get_fallback_image( /// like `#[uniform(LitInt, Ident)]` struct UniformBindingMeta { lit_int: LitInt, - _comma: Comma, ident: Ident, + binding_array: Option, +} + +/// The parsed structure-level `#[uniform]` or `#[data]` attribute. +/// +/// The corresponding syntax is `#[uniform(BINDING_INDEX, CONVERTED_SHADER_TYPE, +/// binding_array(BINDING_ARRAY)]`, optionally replacing `uniform` with `data`. +struct UniformBindingAttr { + /// Whether the declaration is `#[uniform]` or `#[data]`. + binding_type: UniformBindingAttrType, + /// The binding index. + binding_index: u32, + /// The uniform data type. + converted_shader_type: Ident, + /// The binding number of the binding array, if this is a bindless material. + binding_array: Option, +} + +/// Whether a structure-level shader type declaration is `#[uniform]` or +/// `#[data]`. +enum UniformBindingAttrType { + /// `#[uniform]`: i.e. in bindless mode, we need a separate buffer per data + /// instance. + Uniform, + /// `#[data]`: i.e. in bindless mode, we concatenate all instance data into + /// a single buffer. + Data, } /// Represents the arguments for any general binding attribute. @@ -703,22 +1221,62 @@ impl Parse for BindingIndexOptions { } impl Parse for UniformBindingMeta { + // Parse syntax like `#[uniform(0, StandardMaterial, binding_array(10))]`. fn parse(input: ParseStream) -> Result { + let lit_int = input.parse()?; + input.parse::()?; + let ident = input.parse()?; + + // Look for a `binding_array(BINDING_NUMBER)` declaration. + let mut binding_array: Option = None; + if input.parse::().is_ok() { + if input + .parse::()? + .get_ident() + .is_none_or(|ident| *ident != BINDING_ARRAY_MODIFIER_NAME) + { + return Err(Error::new_spanned(ident, "Expected `binding_array`")); + } + let parser; + parenthesized!(parser in input); + binding_array = Some(parser.parse()?); + } + Ok(Self { - lit_int: input.parse()?, - _comma: input.parse()?, - ident: input.parse()?, + lit_int, + ident, + binding_array, }) } } -fn get_uniform_binding_attr(attr: &syn::Attribute) -> Result<(u32, Ident)> { +/// Parses a structure-level `#[uniform]` attribute (not a field-level +/// `#[uniform]` attribute). +fn get_uniform_binding_attr(attr: &syn::Attribute) -> Result { + let attr_ident = attr + .path() + .get_ident() + .expect("Shouldn't be here if we didn't have an attribute"); + let uniform_binding_meta = attr.parse_args_with(UniformBindingMeta::parse)?; let binding_index = uniform_binding_meta.lit_int.base10_parse()?; let ident = uniform_binding_meta.ident; + let binding_array = match uniform_binding_meta.binding_array { + None => None, + Some(binding_array) => Some(binding_array.base10_parse()?), + }; - Ok((binding_index, ident)) + Ok(UniformBindingAttr { + binding_type: if attr_ident == UNIFORM_ATTRIBUTE_NAME { + UniformBindingAttrType::Uniform + } else { + UniformBindingAttrType::Data + }, + binding_index, + converted_shader_type: ident, + binding_array, + }) } fn get_binding_nested_attr(attr: &syn::Attribute) -> Result<(u32, Vec)> { @@ -852,6 +1410,14 @@ fn get_visibility_flag_value(meta_list: &MetaList) -> Result Result { + meta_list + .parse_args_with(|input: ParseStream| input.parse::())? + .base10_parse() +} + #[derive(Clone, Copy, Default)] enum BindingTextureDimension { D1, @@ -1192,6 +1758,7 @@ fn get_sampler_binding_type_value(lit_str: &LitStr) -> Result, read_only: bool, buffer: bool, } @@ -1201,6 +1768,7 @@ const BUFFER: Symbol = Symbol("buffer"); fn get_storage_binding_attr(metas: Vec) -> Result { let mut visibility = ShaderStageVisibility::vertex_fragment(); + let mut binding_array = None; let mut read_only = false; let mut buffer = false; @@ -1211,6 +1779,10 @@ fn get_storage_binding_attr(metas: Vec) -> Result { List(m) if m.path == VISIBILITY => { visibility = get_visibility_flag_value(&m)?; } + // Parse #[storage(0, binding_array(...))] for bindless mode. + List(m) if m.path == BINDING_ARRAY_MODIFIER_NAME => { + binding_array = Some(get_binding_array_flag_value(&m)?); + } Path(path) if path == READ_ONLY => { read_only = true; } @@ -1228,6 +1800,7 @@ fn get_storage_binding_attr(metas: Vec) -> Result { Ok(StorageAttrs { visibility, + binding_array, read_only, buffer, }) diff --git a/crates/bevy_render/macros/src/lib.rs b/crates/bevy_render/macros/src/lib.rs index 5f88f589ebca1..7a04932bcd0d6 100644 --- a/crates/bevy_render/macros/src/lib.rs +++ b/crates/bevy_render/macros/src/lib.rs @@ -11,10 +11,7 @@ use quote::format_ident; use syn::{parse_macro_input, DeriveInput}; pub(crate) fn bevy_render_path() -> syn::Path { - BevyManifest::shared() - .maybe_get_path("bevy_render") - // NOTE: If the derivation is within bevy_render, then we need to return 'crate' - .unwrap_or_else(|| BevyManifest::parse_str("crate")) + BevyManifest::shared().get_path("bevy_render") } #[proc_macro_derive(ExtractResource)] @@ -63,7 +60,8 @@ pub fn derive_extract_component(input: TokenStream) -> TokenStream { sampler, bind_group_data, storage, - bindless + bindless, + data ) )] pub fn derive_as_bind_group(input: TokenStream) -> TokenStream { diff --git a/crates/bevy_render/src/alpha.rs b/crates/bevy_render/src/alpha.rs index 12e1377eabaab..dd748811193d4 100644 --- a/crates/bevy_render/src/alpha.rs +++ b/crates/bevy_render/src/alpha.rs @@ -3,7 +3,7 @@ use bevy_reflect::{std_traits::ReflectDefault, Reflect}; // TODO: add discussion about performance. /// Sets how a material's base color alpha channel is used for transparency. #[derive(Debug, Default, Reflect, Copy, Clone, PartialEq)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub enum AlphaMode { /// Base color alpha values are overridden to be fully opaque (1.0). #[default] diff --git a/crates/bevy_render/src/batching/gpu_preprocessing.rs b/crates/bevy_render/src/batching/gpu_preprocessing.rs index cd1aed53e221b..7de55ee022587 100644 --- a/crates/bevy_render/src/batching/gpu_preprocessing.rs +++ b/crates/bevy_render/src/batching/gpu_preprocessing.rs @@ -1,36 +1,51 @@ //! Batching functionality when GPU preprocessing is in use. +use core::{any::TypeId, marker::PhantomData, mem}; + use bevy_app::{App, Plugin}; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ - entity::{Entity, EntityHashMap}, + prelude::Entity, query::{Has, With}, - schedule::IntoSystemConfigs as _, - system::{Query, Res, ResMut, Resource, StaticSystemParam}, + resource::Resource, + schedule::IntoScheduleConfigs as _, + system::{Query, Res, ResMut, StaticSystemParam}, world::{FromWorld, World}, }; use bevy_encase_derive::ShaderType; -use bevy_utils::{default, tracing::error}; +use bevy_math::UVec4; +use bevy_platform::collections::{hash_map::Entry, HashMap, HashSet}; +use bevy_utils::{default, TypeIdMap}; use bytemuck::{Pod, Zeroable}; +use encase::{internal::WriteInto, ShaderSize}; +use indexmap::IndexMap; use nonmax::NonMaxU32; +use tracing::{error, info}; use wgpu::{BindingResource, BufferUsages, DownlevelFlags, Features}; use crate::{ + experimental::occlusion_culling::OcclusionCulling, render_phase::{ - BinnedPhaseItem, BinnedRenderPhaseBatch, BinnedRenderPhaseBatchSets, - CachedRenderPipelinePhaseItem, PhaseItemBinKey as _, PhaseItemExtraIndex, SortedPhaseItem, + BinnedPhaseItem, BinnedRenderPhaseBatch, BinnedRenderPhaseBatchSet, + BinnedRenderPhaseBatchSets, CachedRenderPipelinePhaseItem, PhaseItem, + PhaseItemBatchSetKey as _, PhaseItemExtraIndex, RenderBin, SortedPhaseItem, SortedRenderPhase, UnbatchableBinnedEntityIndices, ViewBinnedRenderPhases, ViewSortedRenderPhases, }, - render_resource::{BufferVec, GpuArrayBufferable, RawBufferVec, UninitBufferVec}, + render_resource::{Buffer, GpuArrayBufferable, RawBufferVec, UninitBufferVec}, renderer::{RenderAdapter, RenderDevice, RenderQueue}, - view::{ExtractedView, NoIndirectDrawing, ViewTarget}, - Render, RenderApp, RenderSet, + sync_world::MainEntity, + view::{ExtractedView, NoIndirectDrawing, RetainedViewEntity}, + Render, RenderApp, RenderDebugFlags, RenderSet, }; use super::{BatchMeta, GetBatchData, GetFullBatchData}; -pub struct BatchingPlugin; +#[derive(Default)] +pub struct BatchingPlugin { + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, +} impl Plugin for BatchingPlugin { fn build(&self, app: &mut App) { @@ -39,10 +54,17 @@ impl Plugin for BatchingPlugin { }; render_app - .insert_resource(IndirectParametersBuffer::new()) + .insert_resource(IndirectParametersBuffers::new( + self.debug_flags + .contains(RenderDebugFlags::ALLOW_COPIES_FROM_INDIRECT_PARAMETERS), + )) + .add_systems( + Render, + write_indirect_parameters_buffers.in_set(RenderSet::PrepareResourcesFlush), + ) .add_systems( Render, - write_indirect_parameters_buffer.in_set(RenderSet::PrepareResourcesFlush), + clear_indirect_parameters_buffers.in_set(RenderSet::ManageViews), ); } @@ -89,6 +111,11 @@ impl GpuPreprocessingSupport { } } } + + /// Returns true if GPU culling is supported on this platform. + pub fn is_culling_supported(&self) -> bool { + self.max_supported_mode == GpuPreprocessingMode::Culling + } } /// The amount of GPU preprocessing (compute and indirect draw) that we do. @@ -127,18 +154,6 @@ where BD: GpuArrayBufferable + Sync + Send + 'static, BDI: Pod + Default, { - /// A storage area for the buffer data that the GPU compute shader is - /// expected to write to. - /// - /// There will be one entry for each index. - pub data_buffer: UninitBufferVec, - - /// The index of the buffer data in the current input buffer that - /// corresponds to each instance. - /// - /// This is keyed off each view. Each view has a separate buffer. - pub work_item_buffers: EntityHashMap, - /// The uniform data inputs for the current frame. /// /// These are uploaded during the extraction phase. @@ -152,6 +167,101 @@ where /// data input uniform is expected to contain the index of the /// corresponding buffer data input uniform in this list. pub previous_input_buffer: InstanceInputUniformBuffer, + + /// The data needed to render buffers for each phase. + /// + /// The keys of this map are the type IDs of each phase: e.g. `Opaque3d`, + /// `AlphaMask3d`, etc. + pub phase_instance_buffers: TypeIdMap>, +} + +impl Default for BatchedInstanceBuffers +where + BD: GpuArrayBufferable + Sync + Send + 'static, + BDI: Pod + Sync + Send + Default + 'static, +{ + fn default() -> Self { + BatchedInstanceBuffers { + current_input_buffer: InstanceInputUniformBuffer::new(), + previous_input_buffer: InstanceInputUniformBuffer::new(), + phase_instance_buffers: HashMap::default(), + } + } +} + +/// The GPU buffers holding the data needed to render batches for a single +/// phase. +/// +/// These are split out per phase so that we can run the phases in parallel. +/// This is the version of the structure that has a type parameter, which +/// enables Bevy's scheduler to run the batching operations for the different +/// phases in parallel. +/// +/// See the documentation for [`BatchedInstanceBuffers`] for more information. +#[derive(Resource)] +pub struct PhaseBatchedInstanceBuffers +where + PI: PhaseItem, + BD: GpuArrayBufferable + Sync + Send + 'static, +{ + /// The buffers for this phase. + pub buffers: UntypedPhaseBatchedInstanceBuffers, + phantom: PhantomData, +} + +impl Default for PhaseBatchedInstanceBuffers +where + PI: PhaseItem, + BD: GpuArrayBufferable + Sync + Send + 'static, +{ + fn default() -> Self { + PhaseBatchedInstanceBuffers { + buffers: UntypedPhaseBatchedInstanceBuffers::default(), + phantom: PhantomData, + } + } +} + +/// The GPU buffers holding the data needed to render batches for a single +/// phase, without a type parameter for that phase. +/// +/// Since this structure doesn't have a type parameter, it can be placed in +/// [`BatchedInstanceBuffers::phase_instance_buffers`]. +pub struct UntypedPhaseBatchedInstanceBuffers +where + BD: GpuArrayBufferable + Sync + Send + 'static, +{ + /// A storage area for the buffer data that the GPU compute shader is + /// expected to write to. + /// + /// There will be one entry for each index. + pub data_buffer: UninitBufferVec, + + /// The index of the buffer data in the current input buffer that + /// corresponds to each instance. + /// + /// This is keyed off each view. Each view has a separate buffer. + pub work_item_buffers: HashMap, + + /// A buffer that holds the number of indexed meshes that weren't visible in + /// the previous frame, when GPU occlusion culling is in use. + /// + /// There's one set of [`LatePreprocessWorkItemIndirectParameters`] per + /// view. Bevy uses this value to determine how many threads to dispatch to + /// check meshes that weren't visible next frame to see if they became newly + /// visible this frame. + pub late_indexed_indirect_parameters_buffer: + RawBufferVec, + + /// A buffer that holds the number of non-indexed meshes that weren't + /// visible in the previous frame, when GPU occlusion culling is in use. + /// + /// There's one set of [`LatePreprocessWorkItemIndirectParameters`] per + /// view. Bevy uses this value to determine how many threads to dispatch to + /// check meshes that weren't visible next frame to see if they became newly + /// visible this frame. + pub late_non_indexed_indirect_parameters_buffer: + RawBufferVec, } /// Holds the GPU buffer of instance input data, which is the data about each @@ -217,11 +327,31 @@ where } /// Returns the piece of buffered data at the given index. - pub fn get(&self, uniform_index: u32) -> BDI { + /// + /// Returns [`None`] if the index is out of bounds or the data is removed. + pub fn get(&self, uniform_index: u32) -> Option { + if (uniform_index as usize) >= self.buffer.len() + || self.free_uniform_indices.contains(&uniform_index) + { + None + } else { + Some(self.get_unchecked(uniform_index)) + } + } + + /// Returns the piece of buffered data at the given index. + /// Can return data that has previously been removed. + /// + /// # Panics + /// if `uniform_index` is not in bounds of [`Self::buffer`]. + pub fn get_unchecked(&self, uniform_index: u32) -> BDI { self.buffer.values()[uniform_index as usize] } /// Stores a piece of buffered data at the given index. + /// + /// # Panics + /// if `uniform_index` is not in bounds of [`Self::buffer`]. pub fn set(&mut self, uniform_index: u32, element: BDI) { self.buffer.values_mut()[uniform_index as usize] = element; } @@ -233,6 +363,23 @@ where self.buffer.push(default()); } } + + /// Returns the number of instances in this buffer. + pub fn len(&self) -> usize { + self.buffer.len() + } + + /// Returns true if this buffer has no instances or false if it contains any + /// instances. + pub fn is_empty(&self) -> bool { + self.buffer.is_empty() + } + + /// Consumes this [`InstanceInputUniformBuffer`] and returns the raw buffer + /// ready to be uploaded to the GPU. + pub fn into_buffer(self) -> RawBufferVec { + self.buffer + } } impl Default for InstanceInputUniformBuffer @@ -245,107 +392,699 @@ where } /// The buffer of GPU preprocessing work items for a single view. -pub struct PreprocessWorkItemBuffer { - /// The buffer of work items. - pub buffer: BufferVec, - /// True if we're drawing directly instead of indirectly. - pub no_indirect_drawing: bool, +pub enum PreprocessWorkItemBuffers { + /// The work items we use if we aren't using indirect drawing. + /// + /// Because we don't have to separate indexed from non-indexed meshes in + /// direct mode, we only have a single buffer here. + Direct(RawBufferVec), + + /// The buffer of work items we use if we are using indirect drawing. + /// + /// We need to separate out indexed meshes from non-indexed meshes in this + /// case because the indirect parameters for these two types of meshes have + /// different sizes. + Indirect { + /// The buffer of work items corresponding to indexed meshes. + indexed: RawBufferVec, + /// The buffer of work items corresponding to non-indexed meshes. + non_indexed: RawBufferVec, + /// The work item buffers we use when GPU occlusion culling is in use. + gpu_occlusion_culling: Option, + }, +} + +/// The work item buffers we use when GPU occlusion culling is in use. +pub struct GpuOcclusionCullingWorkItemBuffers { + /// The buffer of work items corresponding to indexed meshes. + pub late_indexed: UninitBufferVec, + /// The buffer of work items corresponding to non-indexed meshes. + pub late_non_indexed: UninitBufferVec, + /// The offset into the + /// [`UntypedPhaseBatchedInstanceBuffers::late_indexed_indirect_parameters_buffer`] + /// where this view's indirect dispatch counts for indexed meshes live. + pub late_indirect_parameters_indexed_offset: u32, + /// The offset into the + /// [`UntypedPhaseBatchedInstanceBuffers::late_non_indexed_indirect_parameters_buffer`] + /// where this view's indirect dispatch counts for non-indexed meshes live. + pub late_indirect_parameters_non_indexed_offset: u32, +} + +/// A GPU-side data structure that stores the number of workgroups to dispatch +/// for the second phase of GPU occlusion culling. +/// +/// The late mesh preprocessing phase checks meshes that weren't visible frame +/// to determine if they're potentially visible this frame. +#[derive(Clone, Copy, ShaderType, Pod, Zeroable)] +#[repr(C)] +pub struct LatePreprocessWorkItemIndirectParameters { + /// The number of workgroups to dispatch. + /// + /// This will be equal to `work_item_count / 64`, rounded *up*. + dispatch_x: u32, + /// The number of workgroups along the abstract Y axis to dispatch: always + /// 1. + dispatch_y: u32, + /// The number of workgroups along the abstract Z axis to dispatch: always + /// 1. + dispatch_z: u32, + /// The actual number of work items. + /// + /// The GPU indirect dispatch doesn't read this, but it's used internally to + /// determine the actual number of work items that exist in the late + /// preprocessing work item buffer. + work_item_count: u32, + /// Padding to 64-byte boundaries for some hardware. + pad: UVec4, +} + +impl Default for LatePreprocessWorkItemIndirectParameters { + fn default() -> LatePreprocessWorkItemIndirectParameters { + LatePreprocessWorkItemIndirectParameters { + dispatch_x: 0, + dispatch_y: 1, + dispatch_z: 1, + work_item_count: 0, + pad: default(), + } + } +} + +/// Returns the set of work item buffers for the given view, first creating it +/// if necessary. +/// +/// Bevy uses work item buffers to tell the mesh preprocessing compute shader +/// which meshes are to be drawn. +/// +/// You may need to call this function if you're implementing your own custom +/// render phases. See the `specialized_mesh_pipeline` example. +pub fn get_or_create_work_item_buffer<'a, I>( + work_item_buffers: &'a mut HashMap, + view: RetainedViewEntity, + no_indirect_drawing: bool, + enable_gpu_occlusion_culling: bool, +) -> &'a mut PreprocessWorkItemBuffers +where + I: 'static, +{ + let preprocess_work_item_buffers = match work_item_buffers.entry(view) { + Entry::Occupied(occupied_entry) => occupied_entry.into_mut(), + Entry::Vacant(vacant_entry) => { + if no_indirect_drawing { + vacant_entry.insert(PreprocessWorkItemBuffers::Direct(RawBufferVec::new( + BufferUsages::STORAGE, + ))) + } else { + vacant_entry.insert(PreprocessWorkItemBuffers::Indirect { + indexed: RawBufferVec::new(BufferUsages::STORAGE), + non_indexed: RawBufferVec::new(BufferUsages::STORAGE), + // We fill this in below if `enable_gpu_occlusion_culling` + // is set. + gpu_occlusion_culling: None, + }) + } + } + }; + + // Initialize the GPU occlusion culling buffers if necessary. + if let PreprocessWorkItemBuffers::Indirect { + ref mut gpu_occlusion_culling, + .. + } = *preprocess_work_item_buffers + { + match ( + enable_gpu_occlusion_culling, + gpu_occlusion_culling.is_some(), + ) { + (false, false) | (true, true) => {} + (false, true) => { + *gpu_occlusion_culling = None; + } + (true, false) => { + *gpu_occlusion_culling = Some(GpuOcclusionCullingWorkItemBuffers { + late_indexed: UninitBufferVec::new(BufferUsages::STORAGE), + late_non_indexed: UninitBufferVec::new(BufferUsages::STORAGE), + late_indirect_parameters_indexed_offset: 0, + late_indirect_parameters_non_indexed_offset: 0, + }); + } + } + } + + preprocess_work_item_buffers +} + +/// Initializes work item buffers for a phase in preparation for a new frame. +pub fn init_work_item_buffers( + work_item_buffers: &mut PreprocessWorkItemBuffers, + late_indexed_indirect_parameters_buffer: &'_ mut RawBufferVec< + LatePreprocessWorkItemIndirectParameters, + >, + late_non_indexed_indirect_parameters_buffer: &'_ mut RawBufferVec< + LatePreprocessWorkItemIndirectParameters, + >, +) { + // Add the offsets for indirect parameters that the late phase of mesh + // preprocessing writes to. + if let PreprocessWorkItemBuffers::Indirect { + gpu_occlusion_culling: + Some(GpuOcclusionCullingWorkItemBuffers { + ref mut late_indirect_parameters_indexed_offset, + ref mut late_indirect_parameters_non_indexed_offset, + .. + }), + .. + } = *work_item_buffers + { + *late_indirect_parameters_indexed_offset = late_indexed_indirect_parameters_buffer + .push(LatePreprocessWorkItemIndirectParameters::default()) + as u32; + *late_indirect_parameters_non_indexed_offset = late_non_indexed_indirect_parameters_buffer + .push(LatePreprocessWorkItemIndirectParameters::default()) + as u32; + } +} + +impl PreprocessWorkItemBuffers { + /// Adds a new work item to the appropriate buffer. + /// + /// `indexed` specifies whether the work item corresponds to an indexed + /// mesh. + pub fn push(&mut self, indexed: bool, preprocess_work_item: PreprocessWorkItem) { + match *self { + PreprocessWorkItemBuffers::Direct(ref mut buffer) => { + buffer.push(preprocess_work_item); + } + PreprocessWorkItemBuffers::Indirect { + indexed: ref mut indexed_buffer, + non_indexed: ref mut non_indexed_buffer, + ref mut gpu_occlusion_culling, + } => { + if indexed { + indexed_buffer.push(preprocess_work_item); + } else { + non_indexed_buffer.push(preprocess_work_item); + } + + if let Some(ref mut gpu_occlusion_culling) = *gpu_occlusion_culling { + if indexed { + gpu_occlusion_culling.late_indexed.add(); + } else { + gpu_occlusion_culling.late_non_indexed.add(); + } + } + } + } + } + + /// Clears out the GPU work item buffers in preparation for a new frame. + pub fn clear(&mut self) { + match *self { + PreprocessWorkItemBuffers::Direct(ref mut buffer) => { + buffer.clear(); + } + PreprocessWorkItemBuffers::Indirect { + indexed: ref mut indexed_buffer, + non_indexed: ref mut non_indexed_buffer, + ref mut gpu_occlusion_culling, + } => { + indexed_buffer.clear(); + non_indexed_buffer.clear(); + + if let Some(ref mut gpu_occlusion_culling) = *gpu_occlusion_culling { + gpu_occlusion_culling.late_indexed.clear(); + gpu_occlusion_culling.late_non_indexed.clear(); + gpu_occlusion_culling.late_indirect_parameters_indexed_offset = 0; + gpu_occlusion_culling.late_indirect_parameters_non_indexed_offset = 0; + } + } + } + } } /// One invocation of the preprocessing shader: i.e. one mesh instance in a /// view. -#[derive(Clone, Copy, Pod, Zeroable, ShaderType)] +#[derive(Clone, Copy, Default, Pod, Zeroable, ShaderType)] #[repr(C)] pub struct PreprocessWorkItem { /// The index of the batch input data in the input buffer that the shader /// reads from. pub input_index: u32, - /// In direct mode, this is the index of the `MeshUniform` in the output - /// buffer that we write to. In indirect mode, this is the index of the - /// [`IndirectParameters`]. - pub output_index: u32, + + /// In direct mode, the index of the mesh uniform; in indirect mode, the + /// index of the [`IndirectParametersGpuMetadata`]. + /// + /// In indirect mode, this is the index of the + /// [`IndirectParametersGpuMetadata`] in the + /// `IndirectParametersBuffers::indexed_metadata` or + /// `IndirectParametersBuffers::non_indexed_metadata`. + pub output_or_indirect_parameters_index: u32, } -/// The `wgpu` indirect parameters structure. +/// The `wgpu` indirect parameters structure that specifies a GPU draw command. /// -/// This is actually a union of the two following structures: +/// This is the variant for indexed meshes. We generate the instances of this +/// structure in the `build_indirect_params.wgsl` compute shader. +#[derive(Clone, Copy, Debug, Pod, Zeroable, ShaderType)] +#[repr(C)] +pub struct IndirectParametersIndexed { + /// The number of indices that this mesh has. + pub index_count: u32, + /// The number of instances we are to draw. + pub instance_count: u32, + /// The offset of the first index for this mesh in the index buffer slab. + pub first_index: u32, + /// The offset of the first vertex for this mesh in the vertex buffer slab. + pub base_vertex: u32, + /// The index of the first mesh instance in the `MeshUniform` buffer. + pub first_instance: u32, +} + +/// The `wgpu` indirect parameters structure that specifies a GPU draw command. /// -/// ``` -/// #[repr(C)] -/// struct ArrayIndirectParameters { -/// vertex_count: u32, -/// instance_count: u32, -/// first_vertex: u32, -/// first_instance: u32, -/// } +/// This is the variant for non-indexed meshes. We generate the instances of +/// this structure in the `build_indirect_params.wgsl` compute shader. +#[derive(Clone, Copy, Debug, Pod, Zeroable, ShaderType)] +#[repr(C)] +pub struct IndirectParametersNonIndexed { + /// The number of vertices that this mesh has. + pub vertex_count: u32, + /// The number of instances we are to draw. + pub instance_count: u32, + /// The offset of the first vertex for this mesh in the vertex buffer slab. + pub base_vertex: u32, + /// The index of the first mesh instance in the `Mesh` buffer. + pub first_instance: u32, +} + +/// A structure, initialized on CPU and read on GPU, that contains metadata +/// about each batch. /// -/// #[repr(C)] -/// struct ElementIndirectParameters { -/// index_count: u32, -/// instance_count: u32, -/// first_vertex: u32, -/// base_vertex: u32, -/// first_instance: u32, -/// } -/// ``` +/// Each batch will have one instance of this structure. +#[derive(Clone, Copy, Default, Pod, Zeroable, ShaderType)] +#[repr(C)] +pub struct IndirectParametersCpuMetadata { + /// The index of the first instance of this mesh in the array of + /// `MeshUniform`s. + /// + /// Note that this is the *first* output index in this batch. Since each + /// instance of this structure refers to arbitrarily many instances, the + /// `MeshUniform`s corresponding to this batch span the indices + /// `base_output_index..(base_output_index + instance_count)`. + pub base_output_index: u32, + + /// The index of the batch set that this batch belongs to in the + /// [`IndirectBatchSet`] buffer. + /// + /// A *batch set* is a set of meshes that may be multi-drawn together. + /// Multiple batches (and therefore multiple instances of + /// [`IndirectParametersGpuMetadata`] structures) can be part of the same + /// batch set. + pub batch_set_index: u32, +} + +/// A structure, written and read GPU, that records how many instances of each +/// mesh are actually to be drawn. /// -/// We actually generally treat these two variants identically in code. To do -/// that, we make the following two observations: +/// The GPU mesh preprocessing shader increments the +/// [`Self::early_instance_count`] and [`Self::late_instance_count`] as it +/// determines that meshes are visible. The indirect parameter building shader +/// reads this metadata in order to construct the indirect draw parameters. /// -/// 1. `instance_count` is in the same place in both structures. So we can -/// access it regardless of the structure we're looking at. +/// Each batch will have one instance of this structure. +#[derive(Clone, Copy, Default, Pod, Zeroable, ShaderType)] +#[repr(C)] +pub struct IndirectParametersGpuMetadata { + /// The index of the first mesh in this batch in the array of + /// `MeshInputUniform`s. + pub mesh_index: u32, + + /// The number of instances that were judged visible last frame. + /// + /// The CPU sets this value to 0, and the GPU mesh preprocessing shader + /// increments it as it culls mesh instances. + pub early_instance_count: u32, + + /// The number of instances that have been judged potentially visible this + /// frame that weren't in the last frame's potentially visible set. + /// + /// The CPU sets this value to 0, and the GPU mesh preprocessing shader + /// increments it as it culls mesh instances. + pub late_instance_count: u32, +} + +/// A structure, shared between CPU and GPU, that holds the number of on-GPU +/// indirect draw commands for each *batch set*. /// -/// 2. The second structure is one word larger than the first. Thus we need to -/// pad out the first structure by one word in order to place both structures in -/// an array. If we pad out `ArrayIndirectParameters` by copying the -/// `first_instance` field into the padding, then the resulting union structure -/// will always have a read-only copy of `first_instance` in the final word. We -/// take advantage of this in the shader to reduce branching. -#[derive(Clone, Copy, Pod, Zeroable, ShaderType)] +/// A *batch set* is a set of meshes that may be multi-drawn together. +/// +/// If the current hardware and driver support `multi_draw_indirect_count`, the +/// indirect parameters building shader increments +/// [`Self::indirect_parameters_count`] as it generates indirect parameters. The +/// `multi_draw_indirect_count` command reads +/// [`Self::indirect_parameters_count`] in order to determine how many commands +/// belong to each batch set. +#[derive(Clone, Copy, Default, Pod, Zeroable, ShaderType)] #[repr(C)] -pub struct IndirectParameters { - /// For `ArrayIndirectParameters`, `vertex_count`; for - /// `ElementIndirectParameters`, `index_count`. - pub vertex_or_index_count: u32, +pub struct IndirectBatchSet { + /// The number of indirect parameter commands (i.e. batches) in this batch + /// set. + /// + /// The CPU sets this value to 0 before uploading this structure to GPU. The + /// indirect parameters building shader increments this value as it creates + /// indirect parameters. Then the `multi_draw_indirect_count` command reads + /// this value in order to determine how many indirect draw commands to + /// process. + pub indirect_parameters_count: u32, + + /// The offset within the `IndirectParametersBuffers::indexed_data` or + /// `IndirectParametersBuffers::non_indexed_data` of the first indirect draw + /// command for this batch set. + /// + /// The CPU fills out this value. + pub indirect_parameters_base: u32, +} - /// The number of instances we're going to draw. +/// The buffers containing all the information that indirect draw commands +/// (`multi_draw_indirect`, `multi_draw_indirect_count`) use to draw the scene. +/// +/// In addition to the indirect draw buffers themselves, this structure contains +/// the buffers that store [`IndirectParametersGpuMetadata`], which are the +/// structures that culling writes to so that the indirect parameter building +/// pass can determine how many meshes are actually to be drawn. +/// +/// These buffers will remain empty if indirect drawing isn't in use. +#[derive(Resource, Deref, DerefMut)] +pub struct IndirectParametersBuffers { + /// A mapping from a phase type ID to the indirect parameters buffers for + /// that phase. /// - /// This field is in the same place in both structures. - pub instance_count: u32, + /// Examples of phase type IDs are `Opaque3d` and `AlphaMask3d`. + #[deref] + pub buffers: TypeIdMap, + /// If true, this sets the `COPY_SRC` flag on indirect draw parameters so + /// that they can be read back to CPU. + /// + /// This is a debugging feature that may reduce performance. It primarily + /// exists for the `occlusion_culling` example. + pub allow_copies_from_indirect_parameter_buffers: bool, +} + +impl IndirectParametersBuffers { + /// Initializes a new [`IndirectParametersBuffers`] resource. + pub fn new(allow_copies_from_indirect_parameter_buffers: bool) -> IndirectParametersBuffers { + IndirectParametersBuffers { + buffers: TypeIdMap::default(), + allow_copies_from_indirect_parameter_buffers, + } + } +} + +/// The buffers containing all the information that indirect draw commands use +/// to draw the scene, for a single phase. +/// +/// This is the version of the structure that has a type parameter, so that the +/// batching for different phases can run in parallel. +/// +/// See the [`IndirectParametersBuffers`] documentation for more information. +#[derive(Resource)] +pub struct PhaseIndirectParametersBuffers +where + PI: PhaseItem, +{ + /// The indirect draw buffers for the phase. + pub buffers: UntypedPhaseIndirectParametersBuffers, + phantom: PhantomData, +} + +impl PhaseIndirectParametersBuffers +where + PI: PhaseItem, +{ + pub fn new(allow_copies_from_indirect_parameter_buffers: bool) -> Self { + PhaseIndirectParametersBuffers { + buffers: UntypedPhaseIndirectParametersBuffers::new( + allow_copies_from_indirect_parameter_buffers, + ), + phantom: PhantomData, + } + } +} - /// For `ArrayIndirectParameters`, `first_vertex`; for - /// `ElementIndirectParameters`, `first_index`. - pub first_vertex_or_first_index: u32, +/// The buffers containing all the information that indirect draw commands use +/// to draw the scene, for a single phase. +/// +/// This is the version of the structure that doesn't have a type parameter, so +/// that it can be inserted into [`IndirectParametersBuffers::buffers`] +/// +/// See the [`IndirectParametersBuffers`] documentation for more information. +pub struct UntypedPhaseIndirectParametersBuffers { + /// Information that indirect draw commands use to draw indexed meshes in + /// the scene. + pub indexed: MeshClassIndirectParametersBuffers, + /// Information that indirect draw commands use to draw non-indexed meshes + /// in the scene. + pub non_indexed: MeshClassIndirectParametersBuffers, +} + +impl UntypedPhaseIndirectParametersBuffers { + /// Creates the indirect parameters buffers. + pub fn new( + allow_copies_from_indirect_parameter_buffers: bool, + ) -> UntypedPhaseIndirectParametersBuffers { + let mut indirect_parameter_buffer_usages = BufferUsages::STORAGE | BufferUsages::INDIRECT; + if allow_copies_from_indirect_parameter_buffers { + indirect_parameter_buffer_usages |= BufferUsages::COPY_SRC; + } + + UntypedPhaseIndirectParametersBuffers { + non_indexed: MeshClassIndirectParametersBuffers::new( + allow_copies_from_indirect_parameter_buffers, + ), + indexed: MeshClassIndirectParametersBuffers::new( + allow_copies_from_indirect_parameter_buffers, + ), + } + } - /// For `ArrayIndirectParameters`, `first_instance`; for - /// `ElementIndirectParameters`, `base_vertex`. - pub base_vertex_or_first_instance: u32, + /// Reserves space for `count` new batches. + /// + /// The `indexed` parameter specifies whether the meshes that these batches + /// correspond to are indexed or not. + pub fn allocate(&mut self, indexed: bool, count: u32) -> u32 { + if indexed { + self.indexed.allocate(count) + } else { + self.non_indexed.allocate(count) + } + } - /// For `ArrayIndirectParameters`, this is padding; for - /// `ElementIndirectParameters`, this is `first_instance`. + /// Returns the number of batches currently allocated. /// - /// Conventionally, we copy `first_instance` into this field when padding - /// out `ArrayIndirectParameters`. That way, shader code can read this value - /// at the same place, regardless of the specific structure this represents. - pub first_instance: u32, + /// The `indexed` parameter specifies whether the meshes that these batches + /// correspond to are indexed or not. + fn batch_count(&self, indexed: bool) -> usize { + if indexed { + self.indexed.batch_count() + } else { + self.non_indexed.batch_count() + } + } + + /// Returns the number of batch sets currently allocated. + /// + /// The `indexed` parameter specifies whether the meshes that these batch + /// sets correspond to are indexed or not. + pub fn batch_set_count(&self, indexed: bool) -> usize { + if indexed { + self.indexed.batch_sets.len() + } else { + self.non_indexed.batch_sets.len() + } + } + + /// Adds a new batch set to `Self::indexed_batch_sets` or + /// `Self::non_indexed_batch_sets` as appropriate. + /// + /// `indexed` specifies whether the meshes that these batch sets correspond + /// to are indexed or not. `indirect_parameters_base` specifies the offset + /// within `Self::indexed_data` or `Self::non_indexed_data` of the first + /// batch in this batch set. + #[inline] + pub fn add_batch_set(&mut self, indexed: bool, indirect_parameters_base: u32) { + if indexed { + self.indexed.batch_sets.push(IndirectBatchSet { + indirect_parameters_base, + indirect_parameters_count: 0, + }); + } else { + self.non_indexed.batch_sets.push(IndirectBatchSet { + indirect_parameters_base, + indirect_parameters_count: 0, + }); + } + } + + /// Returns the index that a newly-added batch set will have. + /// + /// The `indexed` parameter specifies whether the meshes in such a batch set + /// are indexed or not. + pub fn get_next_batch_set_index(&self, indexed: bool) -> Option { + NonMaxU32::new(self.batch_set_count(indexed) as u32) + } + + /// Clears out the buffers in preparation for a new frame. + pub fn clear(&mut self) { + self.indexed.clear(); + self.non_indexed.clear(); + } } -/// The buffer containing the list of [`IndirectParameters`], for draw commands. -#[derive(Resource, Deref, DerefMut)] -pub struct IndirectParametersBuffer(pub BufferVec); +/// The buffers containing all the information that indirect draw commands use +/// to draw the scene, for a single mesh class (indexed or non-indexed), for a +/// single phase. +pub struct MeshClassIndirectParametersBuffers +where + IP: Clone + ShaderSize + WriteInto, +{ + /// The GPU buffer that stores the indirect draw parameters for the meshes. + /// + /// The indirect parameters building shader writes to this buffer, while the + /// `multi_draw_indirect` or `multi_draw_indirect_count` commands read from + /// it to perform the draws. + data: UninitBufferVec, + + /// The GPU buffer that holds the data used to construct indirect draw + /// parameters for meshes. + /// + /// The GPU mesh preprocessing shader writes to this buffer, and the + /// indirect parameters building shader reads this buffer to construct the + /// indirect draw parameters. + cpu_metadata: RawBufferVec, + + /// The GPU buffer that holds data built by the GPU used to construct + /// indirect draw parameters for meshes. + /// + /// The GPU mesh preprocessing shader writes to this buffer, and the + /// indirect parameters building shader reads this buffer to construct the + /// indirect draw parameters. + gpu_metadata: UninitBufferVec, + + /// The GPU buffer that holds the number of indirect draw commands for each + /// phase of each view, for meshes. + /// + /// The indirect parameters building shader writes to this buffer, and the + /// `multi_draw_indirect_count` command reads from it in order to know how + /// many indirect draw commands to process. + batch_sets: RawBufferVec, +} + +impl MeshClassIndirectParametersBuffers +where + IP: Clone + ShaderSize + WriteInto, +{ + fn new( + allow_copies_from_indirect_parameter_buffers: bool, + ) -> MeshClassIndirectParametersBuffers { + let mut indirect_parameter_buffer_usages = BufferUsages::STORAGE | BufferUsages::INDIRECT; + if allow_copies_from_indirect_parameter_buffers { + indirect_parameter_buffer_usages |= BufferUsages::COPY_SRC; + } -impl IndirectParametersBuffer { - /// Creates the indirect parameters buffer. - pub fn new() -> IndirectParametersBuffer { - IndirectParametersBuffer(BufferVec::new( - BufferUsages::STORAGE | BufferUsages::INDIRECT, - )) + MeshClassIndirectParametersBuffers { + data: UninitBufferVec::new(indirect_parameter_buffer_usages), + cpu_metadata: RawBufferVec::new(BufferUsages::STORAGE), + gpu_metadata: UninitBufferVec::new(BufferUsages::STORAGE), + batch_sets: RawBufferVec::new(indirect_parameter_buffer_usages), + } + } + + /// Returns the GPU buffer that stores the indirect draw parameters for + /// indexed meshes. + /// + /// The indirect parameters building shader writes to this buffer, while the + /// `multi_draw_indirect` or `multi_draw_indirect_count` commands read from + /// it to perform the draws. + #[inline] + pub fn data_buffer(&self) -> Option<&Buffer> { + self.data.buffer() + } + + /// Returns the GPU buffer that holds the CPU-constructed data used to + /// construct indirect draw parameters for meshes. + /// + /// The CPU writes to this buffer, and the indirect parameters building + /// shader reads this buffer to construct the indirect draw parameters. + #[inline] + pub fn cpu_metadata_buffer(&self) -> Option<&Buffer> { + self.cpu_metadata.buffer() + } + + /// Returns the GPU buffer that holds the GPU-constructed data used to + /// construct indirect draw parameters for meshes. + /// + /// The GPU mesh preprocessing shader writes to this buffer, and the + /// indirect parameters building shader reads this buffer to construct the + /// indirect draw parameters. + #[inline] + pub fn gpu_metadata_buffer(&self) -> Option<&Buffer> { + self.gpu_metadata.buffer() + } + + /// Returns the GPU buffer that holds the number of indirect draw commands + /// for each phase of each view. + /// + /// The indirect parameters building shader writes to this buffer, and the + /// `multi_draw_indirect_count` command reads from it in order to know how + /// many indirect draw commands to process. + #[inline] + pub fn batch_sets_buffer(&self) -> Option<&Buffer> { + self.batch_sets.buffer() + } + + /// Reserves space for `count` new batches. + /// + /// This allocates in the [`Self::cpu_metadata`], [`Self::gpu_metadata`], + /// and [`Self::data`] buffers. + fn allocate(&mut self, count: u32) -> u32 { + let length = self.data.len(); + self.cpu_metadata.reserve_internal(count as usize); + self.gpu_metadata.add_multiple(count as usize); + for _ in 0..count { + self.data.add(); + self.cpu_metadata + .push(IndirectParametersCpuMetadata::default()); + } + length as u32 + } + + /// Sets the [`IndirectParametersCpuMetadata`] for the mesh at the given + /// index. + pub fn set(&mut self, index: u32, value: IndirectParametersCpuMetadata) { + self.cpu_metadata.set(index, value); + } + + /// Returns the number of batches corresponding to meshes that are currently + /// allocated. + #[inline] + pub fn batch_count(&self) -> usize { + self.data.len() + } + + /// Clears out all the buffers in preparation for a new frame. + pub fn clear(&mut self) { + self.data.clear(); + self.cpu_metadata.clear(); + self.gpu_metadata.clear(); + self.batch_sets.clear(); } } -impl Default for IndirectParametersBuffer { +impl Default for IndirectParametersBuffers { fn default() -> Self { - Self::new() + // By default, we don't allow GPU indirect parameter mapping, since + // that's a debugging option. + Self::new(false) } } @@ -354,25 +1093,46 @@ impl FromWorld for GpuPreprocessingSupport { let adapter = world.resource::(); let device = world.resource::(); - // Filter some Qualcomm devices on Android as they crash when using GPU - // preprocessing. - // We filter out Adreno 730 and earlier GPUs (except 720, as it's newer - // than 730). + // Filter Android drivers that are incompatible with GPU preprocessing: + // - We filter out Adreno 730 and earlier GPUs (except 720, as it's newer + // than 730). + // - We filter out Mali GPUs with driver versions lower than 48. fn is_non_supported_android_device(adapter: &RenderAdapter) -> bool { crate::get_adreno_model(adapter).is_some_and(|model| model != 720 && model <= 730) + || crate::get_mali_driver_version(adapter).is_some_and(|version| version < 48) } - let max_supported_mode = if device.limits().max_compute_workgroup_size_x == 0 || is_non_supported_android_device(adapter) + let culling_feature_support = device.features().contains( + Features::INDIRECT_FIRST_INSTANCE + | Features::MULTI_DRAW_INDIRECT + | Features::PUSH_CONSTANTS, + ); + // Depth downsampling for occlusion culling requires 12 textures + let limit_support = device.limits().max_storage_textures_per_shader_stage >= 12 && + // Even if the adapter supports compute, we might be simulating a lack of + // compute via device limits (see `WgpuSettingsPriority::WebGL2` and + // `wgpu::Limits::downlevel_webgl2_defaults()`). This will have set all the + // `max_compute_*` limits to zero, so we arbitrarily pick one as a canary. + device.limits().max_compute_workgroup_storage_size != 0; + + let downlevel_support = adapter.get_downlevel_capabilities().flags.contains( + DownlevelFlags::COMPUTE_SHADERS | + DownlevelFlags::VERTEX_AND_INSTANCE_INDEX_RESPECTS_RESPECTIVE_FIRST_VALUE_IN_INDIRECT_DRAW + ); + + let max_supported_mode = if device.limits().max_compute_workgroup_size_x == 0 + || is_non_supported_android_device(adapter) { + info!( + "GPU preprocessing is not supported on this device. \ + Falling back to CPU preprocessing.", + ); GpuPreprocessingMode::None - } else if !device - .features() - .contains(Features::INDIRECT_FIRST_INSTANCE | Features::MULTI_DRAW_INDIRECT) || - !adapter.get_downlevel_capabilities().flags.contains( - DownlevelFlags::VERTEX_AND_INSTANCE_INDEX_RESPECTS_RESPECTIVE_FIRST_VALUE_IN_INDIRECT_DRAW) - { + } else if !(culling_feature_support && limit_support && downlevel_support) { + info!("Some GPU preprocessing are limited on this device."); GpuPreprocessingMode::PreprocessingOnly } else { + info!("GPU preprocessing is fully supported on this device."); GpuPreprocessingMode::Culling }; @@ -383,15 +1143,35 @@ impl FromWorld for GpuPreprocessingSupport { impl BatchedInstanceBuffers where BD: GpuArrayBufferable + Sync + Send + 'static, - BDI: Pod + Default, + BDI: Pod + Sync + Send + Default + 'static, { /// Creates new buffers. pub fn new() -> Self { - BatchedInstanceBuffers { + Self::default() + } + + /// Clears out the buffers in preparation for a new frame. + pub fn clear(&mut self) { + for phase_instance_buffer in self.phase_instance_buffers.values_mut() { + phase_instance_buffer.clear(); + } + } +} + +impl UntypedPhaseBatchedInstanceBuffers +where + BD: GpuArrayBufferable + Sync + Send + 'static, +{ + pub fn new() -> Self { + UntypedPhaseBatchedInstanceBuffers { data_buffer: UninitBufferVec::new(BufferUsages::STORAGE), - work_item_buffers: EntityHashMap::default(), - current_input_buffer: InstanceInputUniformBuffer::new(), - previous_input_buffer: InstanceInputUniformBuffer::new(), + work_item_buffers: HashMap::default(), + late_indexed_indirect_parameters_buffer: RawBufferVec::new( + BufferUsages::STORAGE | BufferUsages::INDIRECT, + ), + late_non_indexed_indirect_parameters_buffer: RawBufferVec::new( + BufferUsages::STORAGE | BufferUsages::INDIRECT, + ), } } @@ -407,16 +1187,20 @@ where /// Clears out the buffers in preparation for a new frame. pub fn clear(&mut self) { self.data_buffer.clear(); - for work_item_buffer in self.work_item_buffers.values_mut() { - work_item_buffer.buffer.clear(); + self.late_indexed_indirect_parameters_buffer.clear(); + self.late_non_indexed_indirect_parameters_buffer.clear(); + + // Clear each individual set of buffers, but don't depopulate the hash + // table. We want to avoid reallocating these vectors every frame. + for view_work_item_buffers in self.work_item_buffers.values_mut() { + view_work_item_buffers.clear(); } } } -impl Default for BatchedInstanceBuffers +impl Default for UntypedPhaseBatchedInstanceBuffers where BD: GpuArrayBufferable + Sync + Send + 'static, - BDI: Pod + Default, { fn default() -> Self { Self::new() @@ -436,8 +1220,11 @@ where /// The index of the first instance in this batch in the instance buffer. instance_start_index: u32, + /// True if the mesh in question has an index buffer; false otherwise. + indexed: bool, + /// The index of the indirect parameters for this batch in the - /// [`IndirectParametersBuffer`]. + /// [`IndirectParametersBuffers`]. /// /// If CPU culling is being used, then this will be `None`. indirect_parameters_index: Option, @@ -458,15 +1245,29 @@ where /// /// `instance_end_index` is the index of the last instance in this batch /// plus one. - fn flush(self, instance_end_index: u32, phase: &mut SortedRenderPhase) - where + fn flush( + self, + instance_end_index: u32, + phase: &mut SortedRenderPhase, + phase_indirect_parameters_buffers: &mut UntypedPhaseIndirectParametersBuffers, + ) where I: CachedRenderPipelinePhaseItem + SortedPhaseItem, { let (batch_range, batch_extra_index) = phase.items[self.phase_item_start_index as usize].batch_range_and_extra_index_mut(); *batch_range = self.instance_start_index..instance_end_index; - *batch_extra_index = - PhaseItemExtraIndex::maybe_indirect_parameters_index(self.indirect_parameters_index); + *batch_extra_index = match self.indirect_parameters_index { + Some(indirect_parameters_index) => PhaseItemExtraIndex::IndirectParametersIndex { + range: u32::from(indirect_parameters_index) + ..(u32::from(indirect_parameters_index) + 1), + batch_set_index: None, + }, + None => PhaseItemExtraIndex::None, + }; + if let Some(indirect_parameters_index) = self.indirect_parameters_index { + phase_indirect_parameters_buffers + .add_batch_set(self.indexed, indirect_parameters_index.into()); + } } } @@ -483,71 +1284,97 @@ pub fn clear_batched_gpu_instance_buffers( ) where GFBD: GetFullBatchData, { + // Don't clear the entire table, because that would delete the buffers, and + // we want to reuse those allocations. if let Some(mut gpu_batched_instance_buffers) = gpu_batched_instance_buffers { gpu_batched_instance_buffers.clear(); } } /// A system that removes GPU preprocessing work item buffers that correspond to -/// deleted [`ViewTarget`]s. +/// deleted [`ExtractedView`]s. /// /// This is a separate system from [`clear_batched_gpu_instance_buffers`] -/// because [`ViewTarget`]s aren't created until after the extraction phase is -/// completed. +/// because [`ExtractedView`]s aren't created until after the extraction phase +/// is completed. pub fn delete_old_work_item_buffers( mut gpu_batched_instance_buffers: ResMut< BatchedInstanceBuffers, >, - view_targets: Query>, + extracted_views: Query<&ExtractedView>, ) where GFBD: GetFullBatchData, { - gpu_batched_instance_buffers - .work_item_buffers - .retain(|entity, _| view_targets.contains(*entity)); + let retained_view_entities: HashSet<_> = extracted_views + .iter() + .map(|extracted_view| extracted_view.retained_view_entity) + .collect(); + for phase_instance_buffers in gpu_batched_instance_buffers + .phase_instance_buffers + .values_mut() + { + phase_instance_buffers + .work_item_buffers + .retain(|retained_view_entity, _| { + retained_view_entities.contains(retained_view_entity) + }); + } } /// Batch the items in a sorted render phase, when GPU instance buffer building /// is in use. This means comparing metadata needed to draw each phase item and /// trying to combine the draws into a batch. pub fn batch_and_prepare_sorted_render_phase( - gpu_array_buffer: ResMut>, - mut indirect_parameters_buffer: ResMut, + mut phase_batched_instance_buffers: ResMut>, + mut phase_indirect_parameters_buffers: ResMut>, mut sorted_render_phases: ResMut>, - mut views: Query<(Entity, Has), With>, + mut views: Query<( + &ExtractedView, + Has, + Has, + )>, system_param_item: StaticSystemParam, ) where I: CachedRenderPipelinePhaseItem + SortedPhaseItem, GFBD: GetFullBatchData, { // We only process GPU-built batch data in this function. - let BatchedInstanceBuffers { + let UntypedPhaseBatchedInstanceBuffers { ref mut data_buffer, ref mut work_item_buffers, - .. - } = gpu_array_buffer.into_inner(); + ref mut late_indexed_indirect_parameters_buffer, + ref mut late_non_indexed_indirect_parameters_buffer, + } = phase_batched_instance_buffers.buffers; - for (view, no_indirect_drawing) in &mut views { - let Some(phase) = sorted_render_phases.get_mut(&view) else { + for (extracted_view, no_indirect_drawing, gpu_occlusion_culling) in &mut views { + let Some(phase) = sorted_render_phases.get_mut(&extracted_view.retained_view_entity) else { continue; }; // Create the work item buffer if necessary. - let work_item_buffer = - work_item_buffers - .entry(view) - .or_insert_with(|| PreprocessWorkItemBuffer { - buffer: BufferVec::new(BufferUsages::STORAGE), - no_indirect_drawing, - }); + let work_item_buffer = get_or_create_work_item_buffer::( + work_item_buffers, + extracted_view.retained_view_entity, + no_indirect_drawing, + gpu_occlusion_culling, + ); + + // Initialize those work item buffers in preparation for this new frame. + init_work_item_buffers( + work_item_buffer, + late_indexed_indirect_parameters_buffer, + late_non_indexed_indirect_parameters_buffer, + ); // Walk through the list of phase items, building up batches as we go. let mut batch: Option> = None; + for current_index in 0..phase.items.len() { // Get the index of the input data, and comparison metadata, for // this entity. let item = &phase.items[current_index]; - let entity = (item.entity(), item.main_entity()); + let entity = item.main_entity(); + let item_is_indexed = item.indexed(); let current_batch_input_index = GFBD::get_index_and_compare_data(&system_param_item, entity); @@ -558,7 +1385,11 @@ pub fn batch_and_prepare_sorted_render_phase( let Some((current_input_index, current_meta)) = current_batch_input_index else { // Break a batch if we need to. if let Some(batch) = batch.take() { - batch.flush(data_buffer.len() as u32, phase); + batch.flush( + data_buffer.len() as u32, + phase, + &mut phase_indirect_parameters_buffers.buffers, + ); } continue; @@ -577,32 +1408,53 @@ pub fn batch_and_prepare_sorted_render_phase( }); // Make space in the data buffer for this instance. - let item = &phase.items[current_index]; - let entity = (item.entity(), item.main_entity()); let output_index = data_buffer.add() as u32; // If we can't batch, break the existing batch and make a new one. if !can_batch { // Break a batch if we need to. if let Some(batch) = batch.take() { - batch.flush(output_index, phase); + batch.flush( + output_index, + phase, + &mut phase_indirect_parameters_buffers.buffers, + ); } - // Start a new batch. - let indirect_parameters_index = if !no_indirect_drawing { - GFBD::get_batch_indirect_parameters_index( - &system_param_item, - &mut indirect_parameters_buffer, - entity, - output_index, + let indirect_parameters_index = if no_indirect_drawing { + None + } else if item_is_indexed { + Some( + phase_indirect_parameters_buffers + .buffers + .indexed + .allocate(1), ) } else { - None + Some( + phase_indirect_parameters_buffers + .buffers + .non_indexed + .allocate(1), + ) }; + + // Start a new batch. + if let Some(indirect_parameters_index) = indirect_parameters_index { + GFBD::write_batch_indirect_parameters_metadata( + item_is_indexed, + output_index, + None, + &mut phase_indirect_parameters_buffers.buffers, + indirect_parameters_index, + ); + }; + batch = Some(SortedRenderBatch { phase_item_start_index: current_index as u32, instance_start_index: output_index, - indirect_parameters_index, + indexed: item_is_indexed, + indirect_parameters_index: indirect_parameters_index.and_then(NonMaxU32::new), meta: current_meta, }); } @@ -610,29 +1462,49 @@ pub fn batch_and_prepare_sorted_render_phase( // Add a new preprocessing work item so that the preprocessing // shader will copy the per-instance data over. if let Some(batch) = batch.as_ref() { - work_item_buffer.buffer.push(PreprocessWorkItem { - input_index: current_input_index.into(), - output_index: match batch.indirect_parameters_index { - Some(indirect_parameters_index) => indirect_parameters_index.into(), - None => output_index, + work_item_buffer.push( + item_is_indexed, + PreprocessWorkItem { + input_index: current_input_index.into(), + output_or_indirect_parameters_index: match ( + no_indirect_drawing, + batch.indirect_parameters_index, + ) { + (true, _) => output_index, + (false, Some(indirect_parameters_index)) => { + indirect_parameters_index.into() + } + (false, None) => 0, + }, }, - }); + ); } } // Flush the final batch if necessary. if let Some(batch) = batch.take() { - batch.flush(data_buffer.len() as u32, phase); + batch.flush( + data_buffer.len() as u32, + phase, + &mut phase_indirect_parameters_buffers.buffers, + ); } } } /// Creates batches for a render phase that uses bins. pub fn batch_and_prepare_binned_render_phase( - gpu_array_buffer: ResMut>, - mut indirect_parameters_buffer: ResMut, + mut phase_batched_instance_buffers: ResMut>, + phase_indirect_parameters_buffers: ResMut>, mut binned_render_phases: ResMut>, - mut views: Query<(Entity, Has), With>, + mut views: Query< + ( + &ExtractedView, + Has, + Has, + ), + With, + >, param: StaticSystemParam, ) where BPI: BinnedPhaseItem, @@ -640,91 +1512,188 @@ pub fn batch_and_prepare_binned_render_phase( { let system_param_item = param.into_inner(); - let BatchedInstanceBuffers { + let phase_indirect_parameters_buffers = phase_indirect_parameters_buffers.into_inner(); + + let UntypedPhaseBatchedInstanceBuffers { ref mut data_buffer, ref mut work_item_buffers, - .. - } = gpu_array_buffer.into_inner(); + ref mut late_indexed_indirect_parameters_buffer, + ref mut late_non_indexed_indirect_parameters_buffer, + } = phase_batched_instance_buffers.buffers; - for (view, no_indirect_drawing) in &mut views { - let Some(phase) = binned_render_phases.get_mut(&view) else { + for (extracted_view, no_indirect_drawing, gpu_occlusion_culling) in &mut views { + let Some(phase) = binned_render_phases.get_mut(&extracted_view.retained_view_entity) else { continue; }; // Create the work item buffer if necessary; otherwise, just mark it as // used this frame. - let work_item_buffer = - work_item_buffers - .entry(view) - .or_insert_with(|| PreprocessWorkItemBuffer { - buffer: BufferVec::new(BufferUsages::STORAGE), - no_indirect_drawing, - }); + let work_item_buffer = get_or_create_work_item_buffer::( + work_item_buffers, + extracted_view.retained_view_entity, + no_indirect_drawing, + gpu_occlusion_culling, + ); + + // Initialize those work item buffers in preparation for this new frame. + init_work_item_buffers( + work_item_buffer, + late_indexed_indirect_parameters_buffer, + late_non_indexed_indirect_parameters_buffer, + ); + + // Prepare multidrawables. + + if let ( + &mut BinnedRenderPhaseBatchSets::MultidrawIndirect(ref mut batch_sets), + &mut PreprocessWorkItemBuffers::Indirect { + indexed: ref mut indexed_work_item_buffer, + non_indexed: ref mut non_indexed_work_item_buffer, + gpu_occlusion_culling: ref mut gpu_occlusion_culling_buffers, + }, + ) = (&mut phase.batch_sets, &mut *work_item_buffer) + { + let mut output_index = data_buffer.len() as u32; + + // Initialize the state for both indexed and non-indexed meshes. + let mut indexed_preparer: MultidrawableBatchSetPreparer = + MultidrawableBatchSetPreparer::new( + phase_indirect_parameters_buffers.buffers.batch_count(true) as u32, + phase_indirect_parameters_buffers + .buffers + .indexed + .batch_sets + .len() as u32, + ); + let mut non_indexed_preparer: MultidrawableBatchSetPreparer = + MultidrawableBatchSetPreparer::new( + phase_indirect_parameters_buffers.buffers.batch_count(false) as u32, + phase_indirect_parameters_buffers + .buffers + .non_indexed + .batch_sets + .len() as u32, + ); + + // Prepare each batch set. + for (batch_set_key, bins) in &phase.multidrawable_meshes { + if batch_set_key.indexed() { + indexed_preparer.prepare_multidrawable_binned_batch_set( + bins, + &mut output_index, + data_buffer, + indexed_work_item_buffer, + &mut phase_indirect_parameters_buffers.buffers.indexed, + batch_sets, + ); + } else { + non_indexed_preparer.prepare_multidrawable_binned_batch_set( + bins, + &mut output_index, + data_buffer, + non_indexed_work_item_buffer, + &mut phase_indirect_parameters_buffers.buffers.non_indexed, + batch_sets, + ); + } + } - // Prepare batchables. + // Reserve space in the occlusion culling buffers, if necessary. + if let Some(gpu_occlusion_culling_buffers) = gpu_occlusion_culling_buffers { + gpu_occlusion_culling_buffers + .late_indexed + .add_multiple(indexed_preparer.work_item_count); + gpu_occlusion_culling_buffers + .late_non_indexed + .add_multiple(non_indexed_preparer.work_item_count); + } + } - // If multi-draw is in use, as we step through the list of batchables, - // we gather adjacent batches that have the same *batch set* key into - // batch sets. This variable stores the last batch set key that we've - // seen. If our current batch set key is identical to this one, we can - // merge the current batch into the last batch set. - let mut maybe_last_multidraw_key = None; + // Prepare batchables. - for key in &phase.batchable_mesh_keys { + for (key, bin) in &phase.batchable_meshes { let mut batch: Option = None; - for &(entity, main_entity) in &phase.batchable_mesh_values[key] { - let Some(input_index) = - GFBD::get_binned_index(&system_param_item, (entity, main_entity)) - else { - continue; - }; + for (&main_entity, &input_index) in bin.entities() { let output_index = data_buffer.add() as u32; match batch { Some(ref mut batch) => { - // Append to the current batch. batch.instance_range.end = output_index + 1; - work_item_buffer.buffer.push(PreprocessWorkItem { - input_index: input_index.into(), - output_index: match batch.extra_index { - PhaseItemExtraIndex::IndirectParametersIndex(ref range) => { - range.start - } - PhaseItemExtraIndex::DynamicOffset(_) - | PhaseItemExtraIndex::None => output_index, + + // Append to the current batch. + // + // If we're in indirect mode, then we write the first + // output index of this batch, so that we have a + // tightly-packed buffer if GPU culling discards some of + // the instances. Otherwise, we can just write the + // output index directly. + work_item_buffer.push( + key.0.indexed(), + PreprocessWorkItem { + input_index: *input_index, + output_or_indirect_parameters_index: match ( + no_indirect_drawing, + &batch.extra_index, + ) { + (true, _) => output_index, + ( + false, + PhaseItemExtraIndex::IndirectParametersIndex { + range: indirect_parameters_range, + .. + }, + ) => indirect_parameters_range.start, + (false, &PhaseItemExtraIndex::DynamicOffset(_)) + | (false, &PhaseItemExtraIndex::None) => 0, + }, }, - }); + ); } None if !no_indirect_drawing => { // Start a new batch, in indirect mode. - let indirect_parameters_index = GFBD::get_batch_indirect_parameters_index( - &system_param_item, - &mut indirect_parameters_buffer, - (entity, main_entity), + let indirect_parameters_index = phase_indirect_parameters_buffers + .buffers + .allocate(key.0.indexed(), 1); + let batch_set_index = phase_indirect_parameters_buffers + .buffers + .get_next_batch_set_index(key.0.indexed()); + + GFBD::write_batch_indirect_parameters_metadata( + key.0.indexed(), output_index, + batch_set_index, + &mut phase_indirect_parameters_buffers.buffers, + indirect_parameters_index, + ); + work_item_buffer.push( + key.0.indexed(), + PreprocessWorkItem { + input_index: *input_index, + output_or_indirect_parameters_index: indirect_parameters_index, + }, ); - work_item_buffer.buffer.push(PreprocessWorkItem { - input_index: input_index.into(), - output_index: indirect_parameters_index.unwrap_or_default().into(), - }); batch = Some(BinnedRenderPhaseBatch { - representative_entity: (entity, main_entity), + representative_entity: (Entity::PLACEHOLDER, main_entity), instance_range: output_index..output_index + 1, - extra_index: PhaseItemExtraIndex::maybe_indirect_parameters_index( - indirect_parameters_index, - ), + extra_index: PhaseItemExtraIndex::IndirectParametersIndex { + range: indirect_parameters_index..(indirect_parameters_index + 1), + batch_set_index: None, + }, }); } None => { // Start a new batch, in direct mode. - work_item_buffer.buffer.push(PreprocessWorkItem { - input_index: input_index.into(), - output_index, - }); + work_item_buffer.push( + key.0.indexed(), + PreprocessWorkItem { + input_index: *input_index, + output_or_indirect_parameters_index: output_index, + }, + ); batch = Some(BinnedRenderPhaseBatch { - representative_entity: (entity, main_entity), + representative_entity: (Entity::PLACEHOLDER, main_entity), instance_range: output_index..output_index + 1, extra_index: PhaseItemExtraIndex::None, }); @@ -740,65 +1709,91 @@ pub fn batch_and_prepare_binned_render_phase( BinnedRenderPhaseBatchSets::Direct(ref mut vec) => { vec.push(batch); } - BinnedRenderPhaseBatchSets::MultidrawIndirect(ref mut batch_sets) => { - // We're in multi-draw mode. Check to see whether our - // batch set key is the same as the last one. If so, - // merge this batch into the preceding batch set. - match (&maybe_last_multidraw_key, key.get_batch_set_key()) { - (Some(ref last_multidraw_key), Some(this_multidraw_key)) - if *last_multidraw_key == this_multidraw_key => - { - batch_sets.last_mut().unwrap().push(batch); - } - (_, maybe_this_multidraw_key) => { - maybe_last_multidraw_key = maybe_this_multidraw_key; - batch_sets.push(vec![batch]); - } - } + BinnedRenderPhaseBatchSets::MultidrawIndirect(ref mut vec) => { + // The Bevy renderer will never mark a mesh as batchable + // but not multidrawable if multidraw is in use. + // However, custom render pipelines might do so, such as + // the `specialized_mesh_pipeline` example. + vec.push(BinnedRenderPhaseBatchSet { + first_batch: batch, + batch_count: 1, + bin_key: key.1.clone(), + index: phase_indirect_parameters_buffers + .buffers + .batch_set_count(key.0.indexed()) + as u32, + }); } } } } // Prepare unbatchables. - for key in &phase.unbatchable_mesh_keys { - let unbatchables = phase.unbatchable_mesh_values.get_mut(key).unwrap(); - for &(entity, main_entity) in &unbatchables.entities { - let Some(input_index) = - GFBD::get_binned_index(&system_param_item, (entity, main_entity)) + for (key, unbatchables) in &mut phase.unbatchable_meshes { + // Allocate the indirect parameters if necessary. + let mut indirect_parameters_offset = if no_indirect_drawing { + None + } else if key.0.indexed() { + Some( + phase_indirect_parameters_buffers + .buffers + .indexed + .allocate(unbatchables.entities.len() as u32), + ) + } else { + Some( + phase_indirect_parameters_buffers + .buffers + .non_indexed + .allocate(unbatchables.entities.len() as u32), + ) + }; + + for main_entity in unbatchables.entities.keys() { + let Some(input_index) = GFBD::get_binned_index(&system_param_item, *main_entity) else { continue; }; let output_index = data_buffer.add() as u32; - if !no_indirect_drawing { + if let Some(ref mut indirect_parameters_index) = indirect_parameters_offset { // We're in indirect mode, so add an indirect parameters // index. - let indirect_parameters_index = GFBD::get_batch_indirect_parameters_index( - &system_param_item, - &mut indirect_parameters_buffer, - (entity, main_entity), + GFBD::write_batch_indirect_parameters_metadata( + key.0.indexed(), output_index, - ) - .unwrap_or_default(); - work_item_buffer.buffer.push(PreprocessWorkItem { - input_index: input_index.into(), - output_index: indirect_parameters_index.into(), - }); + None, + &mut phase_indirect_parameters_buffers.buffers, + *indirect_parameters_index, + ); + work_item_buffer.push( + key.0.indexed(), + PreprocessWorkItem { + input_index: input_index.into(), + output_or_indirect_parameters_index: *indirect_parameters_index, + }, + ); unbatchables .buffer_indices .add(UnbatchableBinnedEntityIndices { - instance_index: indirect_parameters_index.into(), - extra_index: PhaseItemExtraIndex::IndirectParametersIndex( - u32::from(indirect_parameters_index) - ..(u32::from(indirect_parameters_index) + 1), - ), + instance_index: *indirect_parameters_index, + extra_index: PhaseItemExtraIndex::IndirectParametersIndex { + range: *indirect_parameters_index..(*indirect_parameters_index + 1), + batch_set_index: None, + }, }); + phase_indirect_parameters_buffers + .buffers + .add_batch_set(key.0.indexed(), *indirect_parameters_index); + *indirect_parameters_index += 1; } else { - work_item_buffer.buffer.push(PreprocessWorkItem { - input_index: input_index.into(), - output_index, - }); + work_item_buffer.push( + key.0.indexed(), + PreprocessWorkItem { + input_index: input_index.into(), + output_or_indirect_parameters_index: output_index, + }, + ); unbatchables .buffer_indices .add(UnbatchableBinnedEntityIndices { @@ -811,6 +1806,195 @@ pub fn batch_and_prepare_binned_render_phase( } } +/// The state that [`batch_and_prepare_binned_render_phase`] uses to construct +/// multidrawable batch sets. +/// +/// The [`batch_and_prepare_binned_render_phase`] system maintains two of these: +/// one for indexed meshes and one for non-indexed meshes. +struct MultidrawableBatchSetPreparer +where + BPI: BinnedPhaseItem, + GFBD: GetFullBatchData, +{ + /// The offset in the indirect parameters buffer at which the next indirect + /// parameters will be written. + indirect_parameters_index: u32, + /// The number of batch sets we've built so far for this mesh class. + batch_set_index: u32, + /// The number of work items we've emitted so far for this mesh class. + work_item_count: usize, + phantom: PhantomData<(BPI, GFBD)>, +} + +impl MultidrawableBatchSetPreparer +where + BPI: BinnedPhaseItem, + GFBD: GetFullBatchData, +{ + /// Creates a new [`MultidrawableBatchSetPreparer`] that will start writing + /// indirect parameters and batch sets at the given indices. + #[inline] + fn new(initial_indirect_parameters_index: u32, initial_batch_set_index: u32) -> Self { + MultidrawableBatchSetPreparer { + indirect_parameters_index: initial_indirect_parameters_index, + batch_set_index: initial_batch_set_index, + work_item_count: 0, + phantom: PhantomData, + } + } + + /// Creates batch sets and writes the GPU data needed to draw all visible + /// entities of one mesh class in the given batch set. + /// + /// The *mesh class* represents whether the mesh has indices or not. + #[inline] + fn prepare_multidrawable_binned_batch_set( + &mut self, + bins: &IndexMap, + output_index: &mut u32, + data_buffer: &mut UninitBufferVec, + indexed_work_item_buffer: &mut RawBufferVec, + mesh_class_buffers: &mut MeshClassIndirectParametersBuffers, + batch_sets: &mut Vec>, + ) where + IP: Clone + ShaderSize + WriteInto, + { + let current_indexed_batch_set_index = self.batch_set_index; + let current_output_index = *output_index; + + let indirect_parameters_base = self.indirect_parameters_index; + + // We're going to write the first entity into the batch set. Do this + // here so that we can preload the bin into cache as a side effect. + let Some((first_bin_key, first_bin)) = bins.iter().next() else { + return; + }; + let first_bin_len = first_bin.entities().len(); + let first_bin_entity = first_bin + .entities() + .keys() + .next() + .copied() + .unwrap_or(MainEntity::from(Entity::PLACEHOLDER)); + + // Traverse the batch set, processing each bin. + for bin in bins.values() { + // Record the first output index for this batch, as well as its own + // index. + mesh_class_buffers + .cpu_metadata + .push(IndirectParametersCpuMetadata { + base_output_index: *output_index, + batch_set_index: self.batch_set_index, + }); + + // Traverse the bin, pushing `PreprocessWorkItem`s for each entity + // within it. This is a hot loop, so make it as fast as possible. + for &input_index in bin.entities().values() { + indexed_work_item_buffer.push(PreprocessWorkItem { + input_index: *input_index, + output_or_indirect_parameters_index: self.indirect_parameters_index, + }); + } + + // Reserve space for the appropriate number of entities in the data + // buffer. Also, advance the output index and work item count. + let bin_entity_count = bin.entities().len(); + data_buffer.add_multiple(bin_entity_count); + *output_index += bin_entity_count as u32; + self.work_item_count += bin_entity_count; + + self.indirect_parameters_index += 1; + } + + // Reserve space for the bins in this batch set in the GPU buffers. + let bin_count = bins.len(); + mesh_class_buffers.gpu_metadata.add_multiple(bin_count); + mesh_class_buffers.data.add_multiple(bin_count); + + // Write the information the GPU will need about this batch set. + mesh_class_buffers.batch_sets.push(IndirectBatchSet { + indirect_parameters_base, + indirect_parameters_count: 0, + }); + + self.batch_set_index += 1; + + // Record the batch set. The render node later processes this record to + // render the batches. + batch_sets.push(BinnedRenderPhaseBatchSet { + first_batch: BinnedRenderPhaseBatch { + representative_entity: (Entity::PLACEHOLDER, first_bin_entity), + instance_range: current_output_index..(current_output_index + first_bin_len as u32), + extra_index: PhaseItemExtraIndex::maybe_indirect_parameters_index(NonMaxU32::new( + indirect_parameters_base, + )), + }, + bin_key: (*first_bin_key).clone(), + batch_count: self.indirect_parameters_index - indirect_parameters_base, + index: current_indexed_batch_set_index, + }); + } +} + +/// A system that gathers up the per-phase GPU buffers and inserts them into the +/// [`BatchedInstanceBuffers`] and [`IndirectParametersBuffers`] tables. +/// +/// This runs after the [`batch_and_prepare_binned_render_phase`] or +/// [`batch_and_prepare_sorted_render_phase`] systems. It takes the per-phase +/// [`PhaseBatchedInstanceBuffers`] and [`PhaseIndirectParametersBuffers`] +/// resources and inserts them into the global [`BatchedInstanceBuffers`] and +/// [`IndirectParametersBuffers`] tables. +/// +/// This system exists so that the [`batch_and_prepare_binned_render_phase`] and +/// [`batch_and_prepare_sorted_render_phase`] can run in parallel with one +/// another. If those two systems manipulated [`BatchedInstanceBuffers`] and +/// [`IndirectParametersBuffers`] directly, then they wouldn't be able to run in +/// parallel. +pub fn collect_buffers_for_phase( + mut phase_batched_instance_buffers: ResMut>, + mut phase_indirect_parameters_buffers: ResMut>, + mut batched_instance_buffers: ResMut< + BatchedInstanceBuffers, + >, + mut indirect_parameters_buffers: ResMut, +) where + PI: PhaseItem, + GFBD: GetFullBatchData + Send + Sync + 'static, +{ + // Insert the `PhaseBatchedInstanceBuffers` into the global table. Replace + // the contents of the per-phase resource with the old batched instance + // buffers in order to reuse allocations. + let untyped_phase_batched_instance_buffers = + mem::take(&mut phase_batched_instance_buffers.buffers); + if let Some(mut old_untyped_phase_batched_instance_buffers) = batched_instance_buffers + .phase_instance_buffers + .insert(TypeId::of::(), untyped_phase_batched_instance_buffers) + { + old_untyped_phase_batched_instance_buffers.clear(); + phase_batched_instance_buffers.buffers = old_untyped_phase_batched_instance_buffers; + } + + // Insert the `PhaseIndirectParametersBuffers` into the global table. + // Replace the contents of the per-phase resource with the old indirect + // parameters buffers in order to reuse allocations. + let untyped_phase_indirect_parameters_buffers = mem::replace( + &mut phase_indirect_parameters_buffers.buffers, + UntypedPhaseIndirectParametersBuffers::new( + indirect_parameters_buffers.allow_copies_from_indirect_parameter_buffers, + ), + ); + if let Some(mut old_untyped_phase_indirect_parameters_buffers) = indirect_parameters_buffers + .insert( + TypeId::of::(), + untyped_phase_indirect_parameters_buffers, + ) + { + old_untyped_phase_indirect_parameters_buffers.clear(); + phase_indirect_parameters_buffers.buffers = old_untyped_phase_indirect_parameters_buffers; + } +} + /// A system that writes all instance buffers to the GPU. pub fn write_batched_instance_buffers( render_device: Res, @@ -820,13 +2004,11 @@ pub fn write_batched_instance_buffers( GFBD: GetFullBatchData, { let BatchedInstanceBuffers { - ref mut data_buffer, - work_item_buffers: ref mut index_buffers, - ref mut current_input_buffer, - ref mut previous_input_buffer, + current_input_buffer, + previous_input_buffer, + phase_instance_buffers, } = gpu_array_buffer.into_inner(); - data_buffer.write_buffer(&render_device); current_input_buffer .buffer .write_buffer(&render_device, &render_queue); @@ -834,18 +2016,117 @@ pub fn write_batched_instance_buffers( .buffer .write_buffer(&render_device, &render_queue); - for index_buffer in index_buffers.values_mut() { - index_buffer - .buffer - .write_buffer(&render_device, &render_queue); + for phase_instance_buffers in phase_instance_buffers.values_mut() { + let UntypedPhaseBatchedInstanceBuffers { + ref mut data_buffer, + ref mut work_item_buffers, + ref mut late_indexed_indirect_parameters_buffer, + ref mut late_non_indexed_indirect_parameters_buffer, + } = *phase_instance_buffers; + + data_buffer.write_buffer(&render_device); + late_indexed_indirect_parameters_buffer.write_buffer(&render_device, &render_queue); + late_non_indexed_indirect_parameters_buffer.write_buffer(&render_device, &render_queue); + + for phase_work_item_buffers in work_item_buffers.values_mut() { + match *phase_work_item_buffers { + PreprocessWorkItemBuffers::Direct(ref mut buffer_vec) => { + buffer_vec.write_buffer(&render_device, &render_queue); + } + PreprocessWorkItemBuffers::Indirect { + ref mut indexed, + ref mut non_indexed, + ref mut gpu_occlusion_culling, + } => { + indexed.write_buffer(&render_device, &render_queue); + non_indexed.write_buffer(&render_device, &render_queue); + + if let Some(GpuOcclusionCullingWorkItemBuffers { + ref mut late_indexed, + ref mut late_non_indexed, + late_indirect_parameters_indexed_offset: _, + late_indirect_parameters_non_indexed_offset: _, + }) = *gpu_occlusion_culling + { + if !late_indexed.is_empty() { + late_indexed.write_buffer(&render_device); + } + if !late_non_indexed.is_empty() { + late_non_indexed.write_buffer(&render_device); + } + } + } + } + } } } -pub fn write_indirect_parameters_buffer( +pub fn clear_indirect_parameters_buffers( + mut indirect_parameters_buffers: ResMut, +) { + for phase_indirect_parameters_buffers in indirect_parameters_buffers.values_mut() { + phase_indirect_parameters_buffers.clear(); + } +} + +pub fn write_indirect_parameters_buffers( render_device: Res, render_queue: Res, - mut indirect_parameters_buffer: ResMut, + mut indirect_parameters_buffers: ResMut, ) { - indirect_parameters_buffer.write_buffer(&render_device, &render_queue); - indirect_parameters_buffer.clear(); + for phase_indirect_parameters_buffers in indirect_parameters_buffers.values_mut() { + phase_indirect_parameters_buffers + .indexed + .data + .write_buffer(&render_device); + phase_indirect_parameters_buffers + .non_indexed + .data + .write_buffer(&render_device); + + phase_indirect_parameters_buffers + .indexed + .cpu_metadata + .write_buffer(&render_device, &render_queue); + phase_indirect_parameters_buffers + .non_indexed + .cpu_metadata + .write_buffer(&render_device, &render_queue); + + phase_indirect_parameters_buffers + .non_indexed + .gpu_metadata + .write_buffer(&render_device); + phase_indirect_parameters_buffers + .indexed + .gpu_metadata + .write_buffer(&render_device); + + phase_indirect_parameters_buffers + .indexed + .batch_sets + .write_buffer(&render_device, &render_queue); + phase_indirect_parameters_buffers + .non_indexed + .batch_sets + .write_buffer(&render_device, &render_queue); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn instance_buffer_correct_behavior() { + let mut instance_buffer = InstanceInputUniformBuffer::new(); + + let index = instance_buffer.add(2); + instance_buffer.remove(index); + assert_eq!(instance_buffer.get_unchecked(index), 2); + assert_eq!(instance_buffer.get(index), None); + + instance_buffer.add(5); + assert_eq!(instance_buffer.buffer().len(), 1); + } } diff --git a/crates/bevy_render/src/batching/mod.rs b/crates/bevy_render/src/batching/mod.rs index 31adef26f62e8..40ce7ce3b4aa3 100644 --- a/crates/bevy_render/src/batching/mod.rs +++ b/crates/bevy_render/src/batching/mod.rs @@ -4,23 +4,23 @@ use bevy_ecs::{ system::{ResMut, SystemParam, SystemParamItem}, }; use bytemuck::Pod; +use gpu_preprocessing::UntypedPhaseIndirectParametersBuffers; use nonmax::NonMaxU32; -use self::gpu_preprocessing::IndirectParametersBuffer; -use crate::{render_phase::PhaseItemExtraIndex, sync_world::MainEntity}; use crate::{ render_phase::{ - BinnedPhaseItem, CachedRenderPipelinePhaseItem, DrawFunctionId, SortedPhaseItem, - SortedRenderPhase, ViewBinnedRenderPhases, + BinnedPhaseItem, CachedRenderPipelinePhaseItem, DrawFunctionId, PhaseItemExtraIndex, + SortedPhaseItem, SortedRenderPhase, ViewBinnedRenderPhases, }, render_resource::{CachedRenderPipelineId, GpuArrayBufferable}, + sync_world::MainEntity, }; pub mod gpu_preprocessing; pub mod no_gpu_preprocessing; /// Add this component to mesh entities to disable automatic batching -#[derive(Component)] +#[derive(Component, Default)] pub struct NoAutomaticBatching; /// Data necessary to be equal for two draw commands to be mergeable @@ -58,7 +58,9 @@ impl BatchMeta { PhaseItemExtraIndex::DynamicOffset(dynamic_offset) => { NonMaxU32::new(dynamic_offset) } - PhaseItemExtraIndex::None | PhaseItemExtraIndex::IndirectParametersIndex(_) => None, + PhaseItemExtraIndex::None | PhaseItemExtraIndex::IndirectParametersIndex { .. } => { + None + } }, user_data, } @@ -114,7 +116,7 @@ pub trait GetFullBatchData: GetBatchData { /// [`GetFullBatchData::get_index_and_compare_data`] instead. fn get_binned_batch_data( param: &SystemParamItem, - query_item: (Entity, MainEntity), + query_item: MainEntity, ) -> Option; /// Returns the index of the [`GetFullBatchData::BufferInputData`] that the @@ -126,33 +128,54 @@ pub trait GetFullBatchData: GetBatchData { /// function will never be called. fn get_index_and_compare_data( param: &SystemParamItem, - query_item: (Entity, MainEntity), + query_item: MainEntity, ) -> Option<(NonMaxU32, Option)>; /// Returns the index of the [`GetFullBatchData::BufferInputData`] that the - /// GPU preprocessing phase will use, for the binning path. + /// GPU preprocessing phase will use. /// /// We already inserted the [`GetFullBatchData::BufferInputData`] during the /// extraction phase before we got here, so this function shouldn't need to - /// look up any render data. If CPU instance buffer building is in use, this - /// function will never be called. + /// look up any render data. + /// + /// This function is currently only called for unbatchable entities when GPU + /// instance buffer building is in use. For batchable entities, the uniform + /// index is written during queuing (e.g. in `queue_material_meshes`). In + /// the case of CPU instance buffer building, the CPU writes the uniforms, + /// so there's no index to return. fn get_binned_index( param: &SystemParamItem, - query_item: (Entity, MainEntity), + query_item: MainEntity, ) -> Option; - /// Pushes [`gpu_preprocessing::IndirectParameters`] necessary to draw this - /// batch onto the given [`IndirectParametersBuffer`], and returns its + /// Writes the [`gpu_preprocessing::IndirectParametersGpuMetadata`] + /// necessary to draw this batch into the given metadata buffer at the given /// index. /// /// This is only used if GPU culling is enabled (which requires GPU /// preprocessing). - fn get_batch_indirect_parameters_index( - param: &SystemParamItem, - indirect_parameters_buffer: &mut IndirectParametersBuffer, - entity: (Entity, MainEntity), - instance_index: u32, - ) -> Option; + /// + /// * `indexed` is true if the mesh is indexed or false if it's non-indexed. + /// + /// * `base_output_index` is the index of the first mesh instance in this + /// batch in the `MeshUniform` output buffer. + /// + /// * `batch_set_index` is the index of the batch set in the + /// [`gpu_preprocessing::IndirectBatchSet`] buffer, if this batch belongs to + /// a batch set. + /// + /// * `indirect_parameters_buffers` is the buffer in which to write the + /// metadata. + /// + /// * `indirect_parameters_offset` is the index in that buffer at which to + /// write the metadata. + fn write_batch_indirect_parameters_metadata( + indexed: bool, + base_output_index: u32, + batch_set_index: Option, + indirect_parameters_buffers: &mut UntypedPhaseIndirectParametersBuffers, + indirect_parameters_offset: u32, + ); } /// Sorts a render phase that uses bins. @@ -161,8 +184,10 @@ where BPI: BinnedPhaseItem, { for phase in phases.values_mut() { - phase.batchable_mesh_keys.sort_unstable(); - phase.unbatchable_mesh_keys.sort_unstable(); + phase.multidrawable_meshes.sort_unstable_keys(); + phase.batchable_meshes.sort_unstable_keys(); + phase.unbatchable_meshes.sort_unstable_keys(); + phase.non_mesh_items.sort_unstable_keys(); } } diff --git a/crates/bevy_render/src/batching/no_gpu_preprocessing.rs b/crates/bevy_render/src/batching/no_gpu_preprocessing.rs index 41ddea778cfd6..8bbbff8dd9e46 100644 --- a/crates/bevy_render/src/batching/no_gpu_preprocessing.rs +++ b/crates/bevy_render/src/batching/no_gpu_preprocessing.rs @@ -1,9 +1,11 @@ //! Batching functionality when GPU preprocessing isn't in use. use bevy_derive::{Deref, DerefMut}; -use bevy_ecs::system::{Res, ResMut, Resource, StaticSystemParam}; -use bevy_utils::tracing::error; +use bevy_ecs::entity::Entity; +use bevy_ecs::resource::Resource; +use bevy_ecs::system::{Res, ResMut, StaticSystemParam}; use smallvec::{smallvec, SmallVec}; +use tracing::error; use wgpu::BindingResource; use crate::{ @@ -106,11 +108,11 @@ pub fn batch_and_prepare_binned_render_phase( for phase in phases.values_mut() { // Prepare batchables. - for key in &phase.batchable_mesh_keys { + for bin in phase.batchable_meshes.values_mut() { let mut batch_set: SmallVec<[BinnedRenderPhaseBatch; 1]> = smallvec![]; - for &(entity, main_entity) in &phase.batchable_mesh_values[key] { + for main_entity in bin.entities().keys() { let Some(buffer_data) = - GFBD::get_binned_batch_data(&system_param_item, (entity, main_entity)) + GFBD::get_binned_batch_data(&system_param_item, *main_entity) else { continue; }; @@ -127,7 +129,7 @@ pub fn batch_and_prepare_binned_render_phase( == PhaseItemExtraIndex::maybe_dynamic_offset(instance.dynamic_offset) }) { batch_set.push(BinnedRenderPhaseBatch { - representative_entity: (entity, main_entity), + representative_entity: (Entity::PLACEHOLDER, *main_entity), instance_range: instance.index..instance.index, extra_index: PhaseItemExtraIndex::maybe_dynamic_offset( instance.dynamic_offset, @@ -145,7 +147,7 @@ pub fn batch_and_prepare_binned_render_phase( batch_sets.push(batch_set); } BinnedRenderPhaseBatchSets::Direct(_) - | BinnedRenderPhaseBatchSets::MultidrawIndirect(_) => { + | BinnedRenderPhaseBatchSets::MultidrawIndirect { .. } => { error!( "Dynamic uniform batch sets should be used when GPU preprocessing is off" ); @@ -154,10 +156,10 @@ pub fn batch_and_prepare_binned_render_phase( } // Prepare unbatchables. - for key in &phase.unbatchable_mesh_keys { - let unbatchables = phase.unbatchable_mesh_values.get_mut(key).unwrap(); - for &entity in &unbatchables.entities { - let Some(buffer_data) = GFBD::get_binned_batch_data(&system_param_item, entity) + for unbatchables in phase.unbatchable_meshes.values_mut() { + for main_entity in unbatchables.entities.keys() { + let Some(buffer_data) = + GFBD::get_binned_batch_data(&system_param_item, *main_entity) else { continue; }; diff --git a/crates/bevy_render/src/bindless.wgsl b/crates/bevy_render/src/bindless.wgsl new file mode 100644 index 0000000000000..05517a1746d29 --- /dev/null +++ b/crates/bevy_render/src/bindless.wgsl @@ -0,0 +1,37 @@ +// Defines the common arrays used to access bindless resources. +// +// This need to be kept up to date with the `BINDING_NUMBERS` table in +// `bindless.rs`. +// +// You access these by indexing into the bindless index table, and from there +// indexing into the appropriate binding array. For example, to access the base +// color texture of a `StandardMaterial` in bindless mode, write +// `bindless_textures_2d[materials[slot].base_color_texture]`, where +// `materials` is the bindless index table and `slot` is the index into that +// table (which can be found in the `Mesh`). + +#define_import_path bevy_render::bindless + +#ifdef BINDLESS + +// Binding 0 is the bindless index table. +// Filtering samplers. +@group(2) @binding(1) var bindless_samplers_filtering: binding_array; +// Non-filtering samplers (nearest neighbor). +@group(2) @binding(2) var bindless_samplers_non_filtering: binding_array; +// Comparison samplers (typically for shadow mapping). +@group(2) @binding(3) var bindless_samplers_comparison: binding_array; +// 1D textures. +@group(2) @binding(4) var bindless_textures_1d: binding_array>; +// 2D textures. +@group(2) @binding(5) var bindless_textures_2d: binding_array>; +// 2D array textures. +@group(2) @binding(6) var bindless_textures_2d_array: binding_array>; +// 3D textures. +@group(2) @binding(7) var bindless_textures_3d: binding_array>; +// Cubemap textures. +@group(2) @binding(8) var bindless_textures_cube: binding_array>; +// Cubemap array textures. +@group(2) @binding(9) var bindless_textures_cube_array: binding_array>; + +#endif // BINDLESS diff --git a/crates/bevy_render/src/camera/camera.rs b/crates/bevy_render/src/camera/camera.rs index 1dc75c8b16645..95218b7a593cd 100644 --- a/crates/bevy_render/src/camera/camera.rs +++ b/crates/bevy_render/src/camera/camera.rs @@ -1,3 +1,7 @@ +#![expect( + clippy::module_inception, + reason = "The parent module contains all things viewport-related, while this module handles cameras as a component. However, a rename/refactor which should clear up this lint is being discussed; see #17196." +)] use super::{ClearColorConfig, Projection}; use crate::{ batching::gpu_preprocessing::{GpuPreprocessingMode, GpuPreprocessingSupport}, @@ -10,7 +14,7 @@ use crate::{ texture::GpuImage, view::{ ColorGrading, ExtractedView, ExtractedWindows, Msaa, NoIndirectDrawing, RenderLayers, - RenderVisibleEntities, ViewUniformOffset, Visibility, VisibleEntities, + RenderVisibleEntities, RetainedViewEntity, ViewUniformOffset, Visibility, VisibleEntities, }, Extract, }; @@ -18,27 +22,30 @@ use bevy_asset::{AssetEvent, AssetId, Assets, Handle}; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ change_detection::DetectChanges, - component::{Component, ComponentId, Mutable}, - entity::{Entity, EntityBorrow}, + component::{Component, HookContext}, + entity::{ContainsEntity, Entity}, event::EventReader, - prelude::{require, With}, + prelude::With, query::Has, reflect::ReflectComponent, - system::{Commands, Query, Res, ResMut, Resource}, + resource::Resource, + system::{Commands, Query, Res, ResMut}, world::DeferredWorld, }; use bevy_image::Image; use bevy_math::{ops, vec2, Dir3, FloatOrd, Mat4, Ray3d, Rect, URect, UVec2, UVec4, Vec2, Vec3}; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_reflect::prelude::*; use bevy_render_macros::ExtractComponent; use bevy_transform::components::{GlobalTransform, Transform}; -use bevy_utils::{tracing::warn, HashMap, HashSet}; use bevy_window::{ NormalizedWindowRef, PrimaryWindow, Window, WindowCreated, WindowRef, WindowResized, WindowScaleFactorChanged, }; use core::ops::Range; use derive_more::derive::From; +use thiserror::Error; +use tracing::warn; use wgpu::{BlendState, TextureFormat, TextureUsages}; /// Render viewport configuration for the [`Camera`] component. @@ -47,7 +54,7 @@ use wgpu::{BlendState, TextureFormat, TextureUsages}; /// You can overlay multiple cameras in a single window using viewports to create effects like /// split screen, minimaps, and character viewers. #[derive(Reflect, Debug, Clone)] -#[reflect(Default)] +#[reflect(Default, Clone)] pub struct Viewport { /// The physical position to render this viewport to within the [`RenderTarget`] of this [`Camera`]. /// (0,0) corresponds to the top-left corner @@ -69,6 +76,42 @@ impl Default for Viewport { } } +impl Viewport { + /// Cut the viewport rectangle so that it lies inside a rectangle of the + /// given size. + /// + /// If either of the viewport's position coordinates lies outside the given + /// dimensions, it will be moved just inside first. If either of the given + /// dimensions is zero, the position and size of the viewport rectangle will + /// both be set to zero in that dimension. + pub fn clamp_to_size(&mut self, size: UVec2) { + // If the origin of the viewport rect is outside, then adjust so that + // it's just barely inside. Then, cut off the part that is outside. + if self.physical_size.x + self.physical_position.x > size.x { + if self.physical_position.x < size.x { + self.physical_size.x = size.x - self.physical_position.x; + } else if size.x > 0 { + self.physical_position.x = size.x - 1; + self.physical_size.x = 1; + } else { + self.physical_position.x = 0; + self.physical_size.x = 0; + } + } + if self.physical_size.y + self.physical_position.y > size.y { + if self.physical_position.y < size.y { + self.physical_size.y = size.y - self.physical_position.y; + } else if size.y > 0 { + self.physical_position.y = size.y - 1; + self.physical_size.y = 1; + } else { + self.physical_position.y = 0; + self.physical_size.y = 0; + } + } + } +} + /// Settings to define a camera sub view. /// /// When [`Camera::sub_camera_view`] is `Some`, only the sub-section of the @@ -98,6 +141,7 @@ impl Default for Viewport { /// example have the following values: /// `full_size` = 32x18, `size` = 16x9, `offset` = 16,9 #[derive(Debug, Clone, Copy, Reflect, PartialEq)] +#[reflect(Clone, PartialEq, Default)] pub struct SubCameraView { /// Size of the entire camera view pub full_size: UVec2, @@ -144,7 +188,7 @@ pub struct ComputedCameraValues { /// #[derive(Component, Clone, Copy, Reflect)] #[reflect(opaque)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct Exposure { /// pub ev100: f32, @@ -243,7 +287,7 @@ impl Default for PhysicalCameraParameters { /// Error returned when a conversion between world-space and viewport-space coordinates fails. /// /// See [`world_to_viewport`][Camera::world_to_viewport] and [`viewport_to_world`][Camera::viewport_to_world]. -#[derive(Debug, Eq, PartialEq, Copy, Clone)] +#[derive(Debug, Eq, PartialEq, Copy, Clone, Error)] pub enum ViewportConversionError { /// The pre-computed size of the viewport was not available. /// @@ -253,18 +297,22 @@ pub enum ViewportConversionError { /// - it references a [`Window`](RenderTarget::Window) entity that doesn't exist or doesn't actually have a `Window` component, /// - it references an [`Image`](RenderTarget::Image) that doesn't exist (invalid handle), /// - it references a [`TextureView`](RenderTarget::TextureView) that doesn't exist (invalid handle). + #[error("pre-computed size of viewport not available")] NoViewportSize, /// The computed coordinate was beyond the `Camera`'s near plane. /// /// Only applicable when converting from world-space to viewport-space. + #[error("computed coordinate beyond `Camera`'s near plane")] PastNearPlane, /// The computed coordinate was beyond the `Camera`'s far plane. /// /// Only applicable when converting from world-space to viewport-space. + #[error("computed coordinate beyond `Camera`'s far plane")] PastFarPlane, /// The Normalized Device Coordinates could not be computed because the `camera_transform`, the /// `world_position`, or the projection matrix defined by [`CameraProjection`] contained `NAN` /// (see [`world_to_ndc`][Camera::world_to_ndc] and [`ndc_to_world`][Camera::ndc_to_world]). + #[error("found NaN while computing NDC")] InvalidData, } @@ -284,7 +332,7 @@ pub enum ViewportConversionError { /// [`Camera2d`]: https://docs.rs/bevy/latest/bevy/core_pipeline/core_2d/struct.Camera2d.html /// [`Camera3d`]: https://docs.rs/bevy/latest/bevy/core_pipeline/core_3d/struct.Camera3d.html #[derive(Component, Debug, Reflect, Clone)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[component(on_add = warn_on_no_render_graph)] #[require( Frustum, @@ -304,7 +352,7 @@ pub struct Camera { /// camera will not be rendered. pub is_active: bool, /// Computed values for this camera, such as the projection matrix and the render target size. - #[reflect(ignore)] + #[reflect(ignore, clone)] pub computed: ComputedCameraValues, /// The "target" that this camera will render to. pub target: RenderTarget, @@ -313,7 +361,7 @@ pub struct Camera { pub hdr: bool, // todo: reflect this when #6042 lands /// The [`CameraOutputMode`] for this camera. - #[reflect(ignore)] + #[reflect(ignore, clone)] pub output_mode: CameraOutputMode, /// If this is enabled, a previous camera exists that shares this camera's render target, and this camera has MSAA enabled, then the previous camera's /// outputs will be written to the intermediate multi-sampled render target textures for this camera. This enables cameras with MSAA enabled to @@ -326,9 +374,9 @@ pub struct Camera { pub sub_camera_view: Option, } -fn warn_on_no_render_graph(world: DeferredWorld, entity: Entity, _: ComponentId) { +fn warn_on_no_render_graph(world: DeferredWorld, HookContext { entity, caller, .. }: HookContext) { if !world.entity(entity).contains::() { - warn!("Entity {entity} has a `Camera` component, but it doesn't have a render graph configured. Consider adding a `Camera2d` or `Camera3d` component, or manually adding a `CameraRenderGraph` component if you need a custom render graph."); + warn!("{}Entity {entity} has a `Camera` component, but it doesn't have a render graph configured. Consider adding a `Camera2d` or `Camera3d` component, or manually adding a `CameraRenderGraph` component if you need a custom render graph.", caller.map(|location|format!("{location}: ")).unwrap_or_default()); } } @@ -466,10 +514,10 @@ impl Camera { camera_transform: &GlobalTransform, world_position: Vec3, ) -> Result { - let target_size = self - .logical_viewport_size() + let target_rect = self + .logical_viewport_rect() .ok_or(ViewportConversionError::NoViewportSize)?; - let ndc_space_coords = self + let mut ndc_space_coords = self .world_to_ndc(camera_transform, world_position) .ok_or(ViewportConversionError::InvalidData)?; // NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space @@ -480,10 +528,12 @@ impl Camera { return Err(ViewportConversionError::PastFarPlane); } - // Once in NDC space, we can discard the z element and rescale x/y to fit the screen - let mut viewport_position = (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_size; // Flip the Y co-ordinate origin from the bottom to the top. - viewport_position.y = target_size.y - viewport_position.y; + ndc_space_coords.y = -ndc_space_coords.y; + + // Once in NDC space, we can discard the z element and map x/y to the viewport rect + let viewport_position = + (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min; Ok(viewport_position) } @@ -502,10 +552,10 @@ impl Camera { camera_transform: &GlobalTransform, world_position: Vec3, ) -> Result { - let target_size = self - .logical_viewport_size() + let target_rect = self + .logical_viewport_rect() .ok_or(ViewportConversionError::NoViewportSize)?; - let ndc_space_coords = self + let mut ndc_space_coords = self .world_to_ndc(camera_transform, world_position) .ok_or(ViewportConversionError::InvalidData)?; // NDC z-values outside of 0 < z < 1 are outside the (implicit) camera frustum and are thus not in viewport-space @@ -519,10 +569,12 @@ impl Camera { // Stretching ndc depth to value via near plane and negating result to be in positive room again. let depth = -self.depth_ndc_to_view_z(ndc_space_coords.z); - // Once in NDC space, we can discard the z element and rescale x/y to fit the screen - let mut viewport_position = (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_size; // Flip the Y co-ordinate origin from the bottom to the top. - viewport_position.y = target_size.y - viewport_position.y; + ndc_space_coords.y = -ndc_space_coords.y; + + // Once in NDC space, we can discard the z element and map x/y to the viewport rect + let viewport_position = + (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_rect.size() + target_rect.min; Ok(viewport_position.extend(depth)) } @@ -542,15 +594,16 @@ impl Camera { pub fn viewport_to_world( &self, camera_transform: &GlobalTransform, - mut viewport_position: Vec2, + viewport_position: Vec2, ) -> Result { - let target_size = self - .logical_viewport_size() + let target_rect = self + .logical_viewport_rect() .ok_or(ViewportConversionError::NoViewportSize)?; + let mut rect_relative = (viewport_position - target_rect.min) / target_rect.size(); // Flip the Y co-ordinate origin from the top to the bottom. - viewport_position.y = target_size.y - viewport_position.y; - let ndc = viewport_position * 2. / target_size - Vec2::ONE; + rect_relative.y = 1.0 - rect_relative.y; + let ndc = rect_relative * 2. - Vec2::ONE; let ndc_to_world = camera_transform.compute_matrix() * self.computed.clip_from_view.inverse(); let world_near_plane = ndc_to_world.project_point3(ndc.extend(1.)); @@ -580,14 +633,17 @@ impl Camera { pub fn viewport_to_world_2d( &self, camera_transform: &GlobalTransform, - mut viewport_position: Vec2, + viewport_position: Vec2, ) -> Result { - let target_size = self - .logical_viewport_size() + let target_rect = self + .logical_viewport_rect() .ok_or(ViewportConversionError::NoViewportSize)?; + let mut rect_relative = (viewport_position - target_rect.min) / target_rect.size(); + // Flip the Y co-ordinate origin from the top to the bottom. - viewport_position.y = target_size.y - viewport_position.y; - let ndc = viewport_position * 2. / target_size - Vec2::ONE; + rect_relative.y = 1.0 - rect_relative.y; + + let ndc = rect_relative * 2. - Vec2::ONE; let world_near_plane = self .ndc_to_world(camera_transform, ndc.extend(1.)) @@ -694,7 +750,7 @@ impl Default for CameraOutputMode { /// Configures the [`RenderGraph`](crate::render_graph::RenderGraph) name assigned to be run for a given [`Camera`] entity. #[derive(Component, Debug, Deref, DerefMut, Reflect, Clone)] #[reflect(opaque)] -#[reflect(Component, Debug)] +#[reflect(Component, Debug, Clone)] pub struct CameraRenderGraph(InternedRenderSubGraph); impl CameraRenderGraph { @@ -714,6 +770,7 @@ impl CameraRenderGraph { /// The "target" that a [`Camera`] will render to. For example, this could be a [`Window`] /// swapchain or an [`Image`]. #[derive(Debug, Clone, Reflect, From)] +#[reflect(Clone)] pub enum RenderTarget { /// Window to which the camera's view is rendered. Window(WindowRef), @@ -726,6 +783,7 @@ pub enum RenderTarget { /// A render target that renders to an [`Image`]. #[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[reflect(Clone, PartialEq, Hash)] pub struct ImageRenderTarget { /// The image to render to. pub handle: Handle, @@ -759,6 +817,7 @@ impl Default for RenderTarget { /// /// Once we have this we shouldn't need to resolve it down anymore. #[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord, From)] +#[reflect(Clone, PartialEq, Hash)] pub enum NormalizedRenderTarget { /// Window to which the camera's view is rendered. Window(NormalizedWindowRef), @@ -883,13 +942,7 @@ impl NormalizedRenderTarget { /// System in charge of updating a [`Camera`] when its window or projection changes. /// /// The system detects window creation, resize, and scale factor change events to update the camera -/// projection if needed. It also queries any [`CameraProjection`] component associated with the same -/// entity as the [`Camera`] one, to automatically update the camera projection matrix. -/// -/// The system function is generic over the camera projection type, and only instances of -/// [`OrthographicProjection`] and [`PerspectiveProjection`] are automatically added to -/// the app, as well as the runtime-selected [`Projection`]. -/// The system runs during [`PostUpdate`](bevy_app::PostUpdate). +/// [`Projection`] if needed. /// /// ## World Resources /// @@ -898,8 +951,7 @@ impl NormalizedRenderTarget { /// /// [`OrthographicProjection`]: crate::camera::OrthographicProjection /// [`PerspectiveProjection`]: crate::camera::PerspectiveProjection -#[allow(clippy::too_many_arguments)] -pub fn camera_system>( +pub fn camera_system( mut window_resized_events: EventReader, mut window_created_events: EventReader, mut window_scale_factor_changed_events: EventReader, @@ -908,7 +960,7 @@ pub fn camera_system>( windows: Query<(Entity, &Window)>, images: Res>, manual_texture_views: Res, - mut cameras: Query<(&mut Camera, &mut T)>, + mut cameras: Query<(&mut Camera, &mut Projection)>, ) { let primary_window = primary_window.iter().next(); @@ -943,7 +995,7 @@ pub fn camera_system>( || camera.computed.old_sub_camera_view != camera.sub_camera_view { let new_computed_target_info = normalized_target.get_render_target_info( - &windows, + windows, &images, &manual_texture_views, ); @@ -973,18 +1025,13 @@ pub fn camera_system>( } } } - // This check is needed because when changing WindowMode to SizedFullscreen, the viewport may have invalid + // This check is needed because when changing WindowMode to Fullscreen, the viewport may have invalid // arguments due to a sudden change on the window size to a lower value. // If the size of the window is lower, the viewport will match that lower value. if let Some(viewport) = &mut camera.viewport { let target_info = &new_computed_target_info; if let Some(target) = target_info { - if viewport.physical_size.x > target.physical_size.x { - viewport.physical_size.x = target.physical_size.x; - } - if viewport.physical_size.y > target.physical_size.y { - viewport.physical_size.y = target.physical_size.y; - } + viewport.clamp_to_size(target.physical_size); } } camera.computed.target_info = new_computed_target_info; @@ -1015,7 +1062,7 @@ pub fn camera_system>( /// This component lets you control the [`TextureUsages`] field of the main texture generated for the camera #[derive(Component, ExtractComponent, Clone, Copy, Reflect)] #[reflect(opaque)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct CameraMainTextureUsages(pub TextureUsages); impl Default for CameraMainTextureUsages { fn default() -> Self { @@ -1047,6 +1094,7 @@ pub fn extract_cameras( mut commands: Commands, query: Extract< Query<( + Entity, RenderEntity, &Camera, &CameraRenderGraph, @@ -1067,6 +1115,7 @@ pub fn extract_cameras( ) { let primary_window = primary_window.iter().next(); for ( + main_entity, render_entity, camera, camera_render_graph, @@ -1133,6 +1182,7 @@ pub fn extract_cameras( }) .collect(), }; + let mut commands = commands.entity(render_entity); commands.insert(( ExtractedCamera { @@ -1153,6 +1203,7 @@ pub fn extract_cameras( hdr: camera.hdr, }, ExtractedView { + retained_view_entity: RetainedViewEntity::new(main_entity.into(), None, 0), clip_from_view: camera.clip_from_view(), world_from_view: *transform, clip_from_world: None, @@ -1220,10 +1271,7 @@ pub fn sort_cameras( // sort by order and ensure within an order, RenderTargets of the same type are packed together sorted_cameras .0 - .sort_by(|c1, c2| match c1.order.cmp(&c2.order) { - core::cmp::Ordering::Equal => c1.target.cmp(&c2.target), - ord => ord, - }); + .sort_by(|c1, c2| (c1.order, &c1.target).cmp(&(c2.order, &c2.target))); let mut previous_order_target = None; let mut ambiguities = >::default(); let mut target_counts = >::default(); @@ -1265,7 +1313,7 @@ pub fn sort_cameras( /// /// [`OrthographicProjection`]: crate::camera::OrthographicProjection #[derive(Component, Clone, Default, Reflect)] -#[reflect(Default, Component)] +#[reflect(Default, Component, Clone)] pub struct TemporalJitter { /// Offset is in range [-0.5, 0.5]. pub offset: Vec2, diff --git a/crates/bevy_render/src/camera/camera_driver_node.rs b/crates/bevy_render/src/camera/camera_driver_node.rs index 99a988b9f32df..8be5a345b4c4a 100644 --- a/crates/bevy_render/src/camera/camera_driver_node.rs +++ b/crates/bevy_render/src/camera/camera_driver_node.rs @@ -4,8 +4,8 @@ use crate::{ renderer::RenderContext, view::ExtractedWindows, }; -use bevy_ecs::{entity::EntityBorrow, prelude::QueryState, world::World}; -use bevy_utils::HashSet; +use bevy_ecs::{entity::ContainsEntity, prelude::QueryState, world::World}; +use bevy_platform::collections::HashSet; use wgpu::{LoadOp, Operations, RenderPassColorAttachment, RenderPassDescriptor, StoreOp}; pub struct CameraDriverNode { @@ -71,7 +71,7 @@ impl Node for CameraDriverNode { }; #[cfg(feature = "trace")] - let _span = bevy_utils::tracing::info_span!("no_camera_clear_pass").entered(); + let _span = tracing::info_span!("no_camera_clear_pass").entered(); let pass_descriptor = RenderPassDescriptor { label: Some("no_camera_clear_pass"), color_attachments: &[Some(RenderPassColorAttachment { diff --git a/crates/bevy_render/src/camera/clear_color.rs b/crates/bevy_render/src/camera/clear_color.rs index 49dbdea76bf2a..157bcf899878a 100644 --- a/crates/bevy_render/src/camera/clear_color.rs +++ b/crates/bevy_render/src/camera/clear_color.rs @@ -8,7 +8,7 @@ use serde::{Deserialize, Serialize}; /// For a camera, specifies the color used to clear the viewport before rendering. #[derive(Reflect, Serialize, Deserialize, Copy, Clone, Debug, Default, From)] -#[reflect(Serialize, Deserialize, Default)] +#[reflect(Serialize, Deserialize, Default, Clone)] pub enum ClearColorConfig { /// The clear color is taken from the world's [`ClearColor`] resource. #[default] @@ -26,7 +26,7 @@ pub enum ClearColorConfig { /// This color appears as the "background" color for simple apps, /// when there are portions of the screen with nothing rendered. #[derive(Resource, Clone, Debug, Deref, DerefMut, ExtractResource, Reflect)] -#[reflect(Resource, Default, Debug)] +#[reflect(Resource, Default, Debug, Clone)] pub struct ClearColor(pub Color); /// Match the dark gray bevy website code block color by default. diff --git a/crates/bevy_render/src/camera/manual_texture_view.rs b/crates/bevy_render/src/camera/manual_texture_view.rs index 11d82364a611a..56eff5612a905 100644 --- a/crates/bevy_render/src/camera/manual_texture_view.rs +++ b/crates/bevy_render/src/camera/manual_texture_view.rs @@ -1,14 +1,14 @@ use crate::{extract_resource::ExtractResource, render_resource::TextureView}; -use bevy_ecs::{prelude::Component, reflect::ReflectComponent, system::Resource}; +use bevy_ecs::{prelude::Component, reflect::ReflectComponent, resource::Resource}; use bevy_image::BevyDefault as _; use bevy_math::UVec2; +use bevy_platform::collections::HashMap; use bevy_reflect::prelude::*; -use bevy_utils::HashMap; use wgpu::TextureFormat; /// A unique id that corresponds to a specific [`ManualTextureView`] in the [`ManualTextureViews`] collection. #[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Component, Reflect)] -#[reflect(Component, Default, Debug, PartialEq, Hash)] +#[reflect(Component, Default, Debug, PartialEq, Hash, Clone)] pub struct ManualTextureViewHandle(pub u32); /// A manually managed [`TextureView`] for use as a [`crate::camera::RenderTarget`]. diff --git a/crates/bevy_render/src/camera/mod.rs b/crates/bevy_render/src/camera/mod.rs index 83c882cc3ed8e..4c77021bad762 100644 --- a/crates/bevy_render/src/camera/mod.rs +++ b/crates/bevy_render/src/camera/mod.rs @@ -1,4 +1,3 @@ -#[allow(clippy::module_inception)] mod camera; mod camera_driver_node; mod clear_color; @@ -16,7 +15,7 @@ use crate::{ render_graph::RenderGraph, ExtractSchedule, Render, RenderApp, RenderSet, }; use bevy_app::{App, Plugin}; -use bevy_ecs::schedule::IntoSystemConfigs; +use bevy_ecs::schedule::IntoScheduleConfigs; #[derive(Default)] pub struct CameraPlugin; @@ -33,9 +32,7 @@ impl Plugin for CameraPlugin { .init_resource::() .init_resource::() .add_plugins(( - CameraProjectionPlugin::::default(), - CameraProjectionPlugin::::default(), - CameraProjectionPlugin::::default(), + CameraProjectionPlugin, ExtractResourcePlugin::::default(), ExtractResourcePlugin::::default(), ExtractComponentPlugin::::default(), diff --git a/crates/bevy_render/src/camera/projection.rs b/crates/bevy_render/src/camera/projection.rs index fd3880c1db6bf..e3f95cb0361e5 100644 --- a/crates/bevy_render/src/camera/projection.rs +++ b/crates/bevy_render/src/camera/projection.rs @@ -1,12 +1,12 @@ -use core::marker::PhantomData; +use core::fmt::Debug; use crate::{primitives::Frustum, view::VisibilitySystems}; use bevy_app::{App, Plugin, PostStartup, PostUpdate}; -use bevy_ecs::{component::Mutable, prelude::*}; +use bevy_asset::AssetEvents; +use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::prelude::*; use bevy_math::{ops, AspectRatio, Mat4, Rect, Vec2, Vec3A, Vec4}; -use bevy_reflect::{ - std_traits::ReflectDefault, GetTypeRegistration, Reflect, ReflectDeserialize, ReflectSerialize, -}; +use bevy_reflect::{std_traits::ReflectDefault, Reflect, ReflectDeserialize, ReflectSerialize}; use bevy_transform::{components::GlobalTransform, TransformSystem}; use derive_more::derive::From; use serde::{Deserialize, Serialize}; @@ -14,49 +14,33 @@ use serde::{Deserialize, Serialize}; /// Adds [`Camera`](crate::camera::Camera) driver systems for a given projection type. /// /// If you are using `bevy_pbr`, then you need to add `PbrProjectionPlugin` along with this. -pub struct CameraProjectionPlugin( - PhantomData, -); -impl + GetTypeRegistration> Plugin - for CameraProjectionPlugin -{ +#[derive(Default)] +pub struct CameraProjectionPlugin; + +impl Plugin for CameraProjectionPlugin { fn build(&self, app: &mut App) { - app.register_type::() + app.register_type::() + .register_type::() + .register_type::() + .register_type::() .add_systems( PostStartup, - crate::camera::camera_system:: - .in_set(CameraUpdateSystem) - // We assume that each camera will only have one projection, - // so we can ignore ambiguities with all other monomorphizations. - // FIXME: Add an archetype invariant for this https://github.com/bevyengine/bevy/issues/1481. - .ambiguous_with(CameraUpdateSystem), + crate::camera::camera_system.in_set(CameraUpdateSystem), ) .add_systems( PostUpdate, ( - crate::camera::camera_system:: + crate::camera::camera_system .in_set(CameraUpdateSystem) - // We assume that each camera will only have one projection, - // so we can ignore ambiguities with all other monomorphizations. - // FIXME: Add an archetype invariant for this https://github.com/bevyengine/bevy/issues/1481. - .ambiguous_with(CameraUpdateSystem), - crate::view::update_frusta:: + .before(AssetEvents), + crate::view::update_frusta .in_set(VisibilitySystems::UpdateFrusta) - .after(crate::camera::camera_system::) - .after(TransformSystem::TransformPropagate) - // We assume that no camera will have more than one projection component, - // so these systems will run independently of one another. - // FIXME: Add an archetype invariant for this https://github.com/bevyengine/bevy/issues/1481. - .ambiguous_with(VisibilitySystems::UpdateFrusta), + .after(crate::camera::camera_system) + .after(TransformSystem::TransformPropagate), ), ); } } -impl Default for CameraProjectionPlugin { - fn default() -> Self { - Self(Default::default()) - } -} /// Label for [`camera_system`], shared across all `T`. /// @@ -64,21 +48,40 @@ impl Default for CameraPr #[derive(SystemSet, Clone, Eq, PartialEq, Hash, Debug)] pub struct CameraUpdateSystem; -/// Trait to control the projection matrix of a camera. +/// Describes a type that can generate a projection matrix, allowing it to be added to a +/// [`Camera`]'s [`Projection`] component. /// -/// Components implementing this trait are automatically polled for changes, and used -/// to recompute the camera projection matrix of the [`Camera`] component attached to -/// the same entity as the component implementing this trait. +/// Once implemented, the projection can be added to a camera using [`Projection::custom`]. /// -/// Use the plugins [`CameraProjectionPlugin`] and `bevy::pbr::PbrProjectionPlugin` to setup the -/// systems for your [`CameraProjection`] implementation. +/// The projection will be automatically updated as the render area is resized. This is useful when, +/// for example, a projection type has a field like `fov` that should change when the window width +/// is changed but not when the height changes. +/// +/// This trait is implemented by bevy's built-in projections [`PerspectiveProjection`] and +/// [`OrthographicProjection`]. /// /// [`Camera`]: crate::camera::Camera pub trait CameraProjection { + /// Generate the projection matrix. fn get_clip_from_view(&self) -> Mat4; + + /// Generate the projection matrix for a [`SubCameraView`](super::SubCameraView). fn get_clip_from_view_for_sub(&self, sub_view: &super::SubCameraView) -> Mat4; + + /// When the area this camera renders to changes dimensions, this method will be automatically + /// called. Use this to update any projection properties that depend on the aspect ratio or + /// dimensions of the render area. fn update(&mut self, width: f32, height: f32); + + /// The far plane distance of the projection. fn far(&self) -> f32; + + /// The eight corners of the camera frustum, as defined by this projection. + /// + /// The corners should be provided in the following order: first the bottom right, top right, + /// top left, bottom left for the near plane, then similar for the far plane. + // TODO: This seems somewhat redundant with `compute_frustum`, and similarly should be possible + // to compute with a default impl. fn get_frustum_corners(&self, z_near: f32, z_far: f32) -> [Vec3A; 8]; /// Compute camera frustum for camera with given projection and transform. @@ -97,12 +100,152 @@ pub trait CameraProjection { } } -/// A configurable [`CameraProjection`] that can select its projection type at runtime. +mod sealed { + use super::CameraProjection; + + /// A wrapper trait to make it possible to implement Clone for boxed [`super::CameraProjection`] + /// trait objects, without breaking object safety rules by making it `Sized`. Additional bounds + /// are included for downcasting, and fulfilling the trait bounds on `Projection`. + pub trait DynCameraProjection: + CameraProjection + core::fmt::Debug + Send + Sync + downcast_rs::Downcast + { + fn clone_box(&self) -> Box; + } + + downcast_rs::impl_downcast!(DynCameraProjection); + + impl DynCameraProjection for T + where + T: 'static + CameraProjection + core::fmt::Debug + Send + Sync + Clone, + { + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } + } +} + +/// Holds a dynamic [`CameraProjection`] trait object. Use [`Projection::custom()`] to construct a +/// custom projection. +/// +/// The contained dynamic object can be downcast into a static type using [`CustomProjection::get`]. +#[derive(Component, Debug, Reflect, Deref, DerefMut)] +#[reflect(Default, Clone)] +pub struct CustomProjection { + #[reflect(ignore)] + #[deref] + dyn_projection: Box, +} + +impl Default for CustomProjection { + fn default() -> Self { + Self { + dyn_projection: Box::new(PerspectiveProjection::default()), + } + } +} + +impl Clone for CustomProjection { + fn clone(&self) -> Self { + Self { + dyn_projection: self.dyn_projection.clone_box(), + } + } +} + +impl CustomProjection { + /// Returns a reference to the [`CameraProjection`] `P`. + /// + /// Returns `None` if this dynamic object is not a projection of type `P`. + /// + /// ``` + /// # use bevy_render::prelude::{Projection, PerspectiveProjection}; + /// // For simplicity's sake, use perspective as a custom projection: + /// let projection = Projection::custom(PerspectiveProjection::default()); + /// let Projection::Custom(custom) = projection else { return }; + /// + /// // At this point the projection type is erased. + /// // We can use `get()` if we know what kind of projection we have. + /// let perspective = custom.get::().unwrap(); + /// + /// assert_eq!(perspective.fov, PerspectiveProjection::default().fov); + /// ``` + pub fn get

(&self) -> Option<&P> + where + P: CameraProjection + Debug + Send + Sync + Clone + 'static, + { + self.dyn_projection.downcast_ref() + } + + /// Returns a mutable reference to the [`CameraProjection`] `P`. + /// + /// Returns `None` if this dynamic object is not a projection of type `P`. + /// + /// ``` + /// # use bevy_render::prelude::{Projection, PerspectiveProjection}; + /// // For simplicity's sake, use perspective as a custom projection: + /// let mut projection = Projection::custom(PerspectiveProjection::default()); + /// let Projection::Custom(mut custom) = projection else { return }; + /// + /// // At this point the projection type is erased. + /// // We can use `get_mut()` if we know what kind of projection we have. + /// let perspective = custom.get_mut::().unwrap(); + /// + /// assert_eq!(perspective.fov, PerspectiveProjection::default().fov); + /// perspective.fov = 1.0; + /// ``` + pub fn get_mut

(&mut self) -> Option<&mut P> + where + P: CameraProjection + Debug + Send + Sync + Clone + 'static, + { + self.dyn_projection.downcast_mut() + } +} + +/// Component that defines how to compute a [`Camera`]'s projection matrix. +/// +/// Common projections, like perspective and orthographic, are provided out of the box to handle the +/// majority of use cases. Custom projections can be added using the [`CameraProjection`] trait and +/// the [`Projection::custom`] constructor. +/// +/// ## What's a projection? +/// +/// A camera projection essentially describes how 3d points from the point of view of a camera are +/// projected onto a 2d screen. This is where properties like a camera's field of view are defined. +/// More specifically, a projection is a 4x4 matrix that transforms points from view space (the +/// point of view of the camera) into clip space. Clip space is almost, but not quite, equivalent to +/// the rectangle that is rendered to your screen, with a depth axis. Any points that land outside +/// the bounds of this cuboid are "clipped" and not rendered. +/// +/// You can also think of the projection as the thing that describes the shape of a camera's +/// frustum: the volume in 3d space that is visible to a camera. +/// +/// [`Camera`]: crate::camera::Camera #[derive(Component, Debug, Clone, Reflect, From)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub enum Projection { Perspective(PerspectiveProjection), Orthographic(OrthographicProjection), + Custom(CustomProjection), +} + +impl Projection { + /// Construct a new custom camera projection from a type that implements [`CameraProjection`]. + pub fn custom

(projection: P) -> Self + where + // Implementation note: pushing these trait bounds all the way out to this function makes + // errors nice for users. If a trait is missing, they will get a helpful error telling them + // that, say, the `Debug` implementation is missing. Wrapping these traits behind a super + // trait or some other indirection will make the errors harder to understand. + // + // For example, we don't use the `DynCameraProjection`` trait bound, because it is not the + // trait the user should be implementing - they only need to worry about implementing + // `CameraProjection`. + P: CameraProjection + Debug + Send + Sync + Clone + 'static, + { + Projection::Custom(CustomProjection { + dyn_projection: Box::new(projection), + }) + } } impl CameraProjection for Projection { @@ -110,6 +253,7 @@ impl CameraProjection for Projection { match self { Projection::Perspective(projection) => projection.get_clip_from_view(), Projection::Orthographic(projection) => projection.get_clip_from_view(), + Projection::Custom(projection) => projection.get_clip_from_view(), } } @@ -117,6 +261,7 @@ impl CameraProjection for Projection { match self { Projection::Perspective(projection) => projection.get_clip_from_view_for_sub(sub_view), Projection::Orthographic(projection) => projection.get_clip_from_view_for_sub(sub_view), + Projection::Custom(projection) => projection.get_clip_from_view_for_sub(sub_view), } } @@ -124,6 +269,7 @@ impl CameraProjection for Projection { match self { Projection::Perspective(projection) => projection.update(width, height), Projection::Orthographic(projection) => projection.update(width, height), + Projection::Custom(projection) => projection.update(width, height), } } @@ -131,6 +277,7 @@ impl CameraProjection for Projection { match self { Projection::Perspective(projection) => projection.far(), Projection::Orthographic(projection) => projection.far(), + Projection::Custom(projection) => projection.far(), } } @@ -138,6 +285,7 @@ impl CameraProjection for Projection { match self { Projection::Perspective(projection) => projection.get_frustum_corners(z_near, z_far), Projection::Orthographic(projection) => projection.get_frustum_corners(z_near, z_far), + Projection::Custom(projection) => projection.get_frustum_corners(z_near, z_far), } } } @@ -149,8 +297,8 @@ impl Default for Projection { } /// A 3D camera projection in which distant objects appear smaller than close objects. -#[derive(Component, Debug, Clone, Reflect)] -#[reflect(Component, Default, Debug)] +#[derive(Debug, Clone, Reflect)] +#[reflect(Default, Debug, Clone)] pub struct PerspectiveProjection { /// The vertical field of view (FOV) in radians. /// @@ -285,7 +433,7 @@ impl Default for PerspectiveProjection { /// }); /// ``` #[derive(Default, Debug, Clone, Copy, Reflect, Serialize, Deserialize)] -#[reflect(Serialize, Deserialize)] +#[reflect(Serialize, Deserialize, Default, Clone)] pub enum ScalingMode { /// Match the viewport size. /// @@ -341,8 +489,8 @@ pub enum ScalingMode { /// ..OrthographicProjection::default_2d() /// }); /// ``` -#[derive(Component, Debug, Clone, Reflect)] -#[reflect(Component, Debug, FromWorld)] +#[derive(Debug, Clone, Reflect)] +#[reflect(Debug, FromWorld, Clone)] pub struct OrthographicProjection { /// The distance of the near clipping plane in world units. /// @@ -492,8 +640,8 @@ impl CameraProjection for OrthographicProjection { ScalingMode::Fixed { width, height } => (width, height), }; - let origin_x = (projection_width * self.viewport_origin.x).round(); - let origin_y = (projection_height * self.viewport_origin.y).round(); + let origin_x = projection_width * self.viewport_origin.x; + let origin_y = projection_height * self.viewport_origin.y; self.area = Rect::new( self.scale * -origin_x, diff --git a/crates/bevy_render/src/diagnostic/internal.rs b/crates/bevy_render/src/diagnostic/internal.rs index 872323f80fc2d..ec226c760b96b 100644 --- a/crates/bevy_render/src/diagnostic/internal.rs +++ b/crates/bevy_render/src/diagnostic/internal.rs @@ -6,15 +6,16 @@ use core::{ use std::thread::{self, ThreadId}; use bevy_diagnostic::{Diagnostic, DiagnosticMeasurement, DiagnosticPath, DiagnosticsStore}; -use bevy_ecs::system::{Res, ResMut, Resource}; -use bevy_utils::{tracing, Instant}; +use bevy_ecs::resource::Resource; +use bevy_ecs::system::{Res, ResMut}; +use bevy_platform::time::Instant; use std::sync::Mutex; use wgpu::{ Buffer, BufferDescriptor, BufferUsages, CommandEncoder, ComputePass, Features, MapMode, - PipelineStatisticsTypes, QuerySet, QuerySetDescriptor, QueryType, Queue, RenderPass, + PipelineStatisticsTypes, QuerySet, QuerySetDescriptor, QueryType, RenderPass, }; -use crate::renderer::{RenderDevice, WgpuWrapper}; +use crate::renderer::{RenderAdapterInfo, RenderDevice, RenderQueue, WgpuWrapper}; use super::RecordDiagnostics; @@ -31,6 +32,8 @@ struct DiagnosticsRecorderInternal { current_frame: Mutex, submitted_frames: Vec, finished_frames: Vec, + #[cfg(feature = "tracing-tracy")] + tracy_gpu_context: tracy_client::GpuContext, } /// Records diagnostics into [`QuerySet`]'s keeping track of the mapping between @@ -40,21 +43,31 @@ pub struct DiagnosticsRecorder(WgpuWrapper); impl DiagnosticsRecorder { /// Creates the new `DiagnosticsRecorder`. - pub fn new(device: &RenderDevice, queue: &Queue) -> DiagnosticsRecorder { + pub fn new( + adapter_info: &RenderAdapterInfo, + device: &RenderDevice, + queue: &RenderQueue, + ) -> DiagnosticsRecorder { let features = device.features(); - let timestamp_period_ns = if features.contains(Features::TIMESTAMP_QUERY) { - queue.get_timestamp_period() - } else { - 0.0 - }; + #[cfg(feature = "tracing-tracy")] + let tracy_gpu_context = + super::tracy_gpu::new_tracy_gpu_context(adapter_info, device, queue); + let _ = adapter_info; // Prevent unused variable warnings when tracing-tracy is not enabled DiagnosticsRecorder(WgpuWrapper::new(DiagnosticsRecorderInternal { - timestamp_period_ns, + timestamp_period_ns: queue.get_timestamp_period(), features, - current_frame: Mutex::new(FrameData::new(device, features)), + current_frame: Mutex::new(FrameData::new( + device, + features, + #[cfg(feature = "tracing-tracy")] + tracy_gpu_context.clone(), + )), submitted_frames: Vec::new(), finished_frames: Vec::new(), + #[cfg(feature = "tracing-tracy")] + tracy_gpu_context, })) } @@ -85,7 +98,7 @@ impl DiagnosticsRecorder { /// Copies data from [`QuerySet`]'s to a [`Buffer`], after which it can be downloaded to CPU. /// - /// Should be called before [`DiagnosticsRecorder::finish_frame`] + /// Should be called before [`DiagnosticsRecorder::finish_frame`]. pub fn resolve(&mut self, encoder: &mut CommandEncoder) { self.current_frame_mut().resolve(encoder); } @@ -101,6 +114,9 @@ impl DiagnosticsRecorder { device: &RenderDevice, callback: impl FnOnce(RenderDiagnostics) + Send + Sync + 'static, ) { + #[cfg(feature = "tracing-tracy")] + let tracy_gpu_context = self.0.tracy_gpu_context.clone(); + let internal = &mut self.0; internal .current_frame @@ -111,7 +127,12 @@ impl DiagnosticsRecorder { // reuse one of the finished frames, if we can let new_frame = match internal.finished_frames.pop() { Some(frame) => frame, - None => FrameData::new(device, internal.features), + None => FrameData::new( + device, + internal.features, + #[cfg(feature = "tracing-tracy")] + tracy_gpu_context, + ), }; let old_frame = core::mem::replace( @@ -168,10 +189,16 @@ struct FrameData { closed_spans: Vec, is_mapped: Arc, callback: Option>, + #[cfg(feature = "tracing-tracy")] + tracy_gpu_context: tracy_client::GpuContext, } impl FrameData { - fn new(device: &RenderDevice, features: Features) -> FrameData { + fn new( + device: &RenderDevice, + features: Features, + #[cfg(feature = "tracing-tracy")] tracy_gpu_context: tracy_client::GpuContext, + ) -> FrameData { let wgpu_device = device.wgpu_device(); let mut buffer_size = 0; @@ -236,6 +263,8 @@ impl FrameData { closed_spans: Vec::new(), is_mapped: Arc::new(AtomicBool::new(false)), callback: None, + #[cfg(feature = "tracing-tracy")] + tracy_gpu_context, } } @@ -298,7 +327,7 @@ impl FrameData { .open_spans .iter() .filter(|v| v.thread_id == thread_id) - .last(); + .next_back(); let path_range = match &parent { Some(parent) if parent.path_range.end == self.path_components.len() => { @@ -335,7 +364,7 @@ impl FrameData { let (index, _) = iter .enumerate() .filter(|(_, v)| v.thread_id == thread_id) - .last() + .next_back() .unwrap(); let span = self.open_spans.swap_remove(index); @@ -501,6 +530,19 @@ impl FrameData { let end = timestamps[end as usize] as f64; let value = (end - begin) * (timestamp_period_ns as f64) / 1e6; + #[cfg(feature = "tracing-tracy")] + { + // Calling span_alloc() and end_zone() here instead of in open_span() and close_span() means that tracy does not know where each GPU command was recorded on the CPU timeline. + // Unfortunately we must do it this way, because tracy does not play nicely with multithreaded command recording. The start/end pairs would get all mixed up. + // The GPU spans themselves are still accurate though, and it's probably safe to assume that each GPU span in frame N belongs to the corresponding CPU render node span from frame N-1. + let name = &self.path_components[span.path_range.clone()].join("/"); + let mut tracy_gpu_span = + self.tracy_gpu_context.span_alloc(name, "", "", 0).unwrap(); + tracy_gpu_span.end_zone(); + tracy_gpu_span.upload_timestamp_start(begin as i64); + tracy_gpu_span.upload_timestamp_end(end as i64); + } + diagnostics.push(RenderDiagnostic { path: self.diagnostic_path(&span.path_range, "elapsed_gpu"), suffix: "ms", diff --git a/crates/bevy_render/src/diagnostic/mod.rs b/crates/bevy_render/src/diagnostic/mod.rs index 6e91e2a736e79..7f046036a9be5 100644 --- a/crates/bevy_render/src/diagnostic/mod.rs +++ b/crates/bevy_render/src/diagnostic/mod.rs @@ -3,13 +3,15 @@ //! For more info, see [`RenderDiagnosticsPlugin`]. pub(crate) mod internal; +#[cfg(feature = "tracing-tracy")] +mod tracy_gpu; use alloc::{borrow::Cow, sync::Arc}; use core::marker::PhantomData; use bevy_app::{App, Plugin, PreUpdate}; -use crate::RenderApp; +use crate::{renderer::RenderAdapterInfo, RenderApp}; use self::internal::{ sync_diagnostics, DiagnosticsRecorder, Pass, RenderDiagnosticsMutex, WriteTimestamp, @@ -20,8 +22,8 @@ use super::{RenderDevice, RenderQueue}; /// Enables collecting render diagnostics, such as CPU/GPU elapsed time per render pass, /// as well as pipeline statistics (number of primitives, number of shader invocations, etc). /// -/// To access the diagnostics, you can use [`DiagnosticsStore`](bevy_diagnostic::DiagnosticsStore) resource, -/// or add [`LogDiagnosticsPlugin`](bevy_diagnostic::LogDiagnosticsPlugin). +/// To access the diagnostics, you can use the [`DiagnosticsStore`](bevy_diagnostic::DiagnosticsStore) resource, +/// add [`LogDiagnosticsPlugin`](bevy_diagnostic::LogDiagnosticsPlugin), or use [Tracy](https://github.com/bevyengine/bevy/blob/main/docs/profiling.md#tracy-renderqueue). /// /// To record diagnostics in your own passes: /// 1. First, obtain the diagnostic recorder using [`RenderContext::diagnostic_recorder`](crate::renderer::RenderContext::diagnostic_recorder). @@ -43,7 +45,6 @@ use super::{RenderDevice, RenderQueue}; /// # Supported platforms /// Timestamp queries and pipeline statistics are currently supported only on Vulkan and DX12. /// On other platforms (Metal, WebGPU, WebGL2) only CPU time will be recorded. -#[allow(clippy::doc_markdown)] #[derive(Default)] pub struct RenderDiagnosticsPlugin; @@ -63,9 +64,10 @@ impl Plugin for RenderDiagnosticsPlugin { return; }; + let adapter_info = render_app.world().resource::(); let device = render_app.world().resource::(); let queue = render_app.world().resource::(); - render_app.insert_resource(DiagnosticsRecorder::new(device, queue)); + render_app.insert_resource(DiagnosticsRecorder::new(adapter_info, device, queue)); } } diff --git a/crates/bevy_render/src/diagnostic/tracy_gpu.rs b/crates/bevy_render/src/diagnostic/tracy_gpu.rs new file mode 100644 index 0000000000000..c059b8baa5d00 --- /dev/null +++ b/crates/bevy_render/src/diagnostic/tracy_gpu.rs @@ -0,0 +1,67 @@ +use crate::renderer::{RenderAdapterInfo, RenderDevice, RenderQueue}; +use tracy_client::{Client, GpuContext, GpuContextType}; +use wgpu::{ + Backend, BufferDescriptor, BufferUsages, CommandEncoderDescriptor, Maintain, MapMode, + QuerySetDescriptor, QueryType, QUERY_SIZE, +}; + +pub fn new_tracy_gpu_context( + adapter_info: &RenderAdapterInfo, + device: &RenderDevice, + queue: &RenderQueue, +) -> GpuContext { + let tracy_gpu_backend = match adapter_info.backend { + Backend::Vulkan => GpuContextType::Vulkan, + Backend::Dx12 => GpuContextType::Direct3D12, + Backend::Gl => GpuContextType::OpenGL, + Backend::Metal | Backend::BrowserWebGpu | Backend::Empty => GpuContextType::Invalid, + }; + + let tracy_client = Client::running().unwrap(); + tracy_client + .new_gpu_context( + Some("RenderQueue"), + tracy_gpu_backend, + initial_timestamp(device, queue), + queue.get_timestamp_period(), + ) + .unwrap() +} + +// Code copied from https://github.com/Wumpf/wgpu-profiler/blob/f9de342a62cb75f50904a98d11dd2bbeb40ceab8/src/tracy.rs +fn initial_timestamp(device: &RenderDevice, queue: &RenderQueue) -> i64 { + let query_set = device.wgpu_device().create_query_set(&QuerySetDescriptor { + label: None, + ty: QueryType::Timestamp, + count: 1, + }); + + let resolve_buffer = device.create_buffer(&BufferDescriptor { + label: None, + size: QUERY_SIZE as _, + usage: BufferUsages::QUERY_RESOLVE | BufferUsages::COPY_SRC, + mapped_at_creation: false, + }); + + let map_buffer = device.create_buffer(&BufferDescriptor { + label: None, + size: QUERY_SIZE as _, + usage: BufferUsages::MAP_READ | BufferUsages::COPY_DST, + mapped_at_creation: false, + }); + + let mut timestamp_encoder = device.create_command_encoder(&CommandEncoderDescriptor::default()); + timestamp_encoder.write_timestamp(&query_set, 0); + timestamp_encoder.resolve_query_set(&query_set, 0..1, &resolve_buffer, 0); + // Workaround for https://github.com/gfx-rs/wgpu/issues/6406 + // TODO when that bug is fixed, merge these encoders together again + let mut copy_encoder = device.create_command_encoder(&CommandEncoderDescriptor::default()); + copy_encoder.copy_buffer_to_buffer(&resolve_buffer, 0, &map_buffer, 0, QUERY_SIZE as _); + queue.submit([timestamp_encoder.finish(), copy_encoder.finish()]); + + map_buffer.slice(..).map_async(MapMode::Read, |_| ()); + device.poll(Maintain::Wait); + + let view = map_buffer.slice(..).get_mapped_range(); + i64::from_le_bytes((*view).try_into().unwrap()) +} diff --git a/crates/bevy_render/src/experimental/mod.rs b/crates/bevy_render/src/experimental/mod.rs new file mode 100644 index 0000000000000..40bb6cf1dcc4d --- /dev/null +++ b/crates/bevy_render/src/experimental/mod.rs @@ -0,0 +1,6 @@ +//! Experimental rendering features. +//! +//! Experimental features are features with known problems, but are included +//! nonetheless for testing purposes. + +pub mod occlusion_culling; diff --git a/crates/bevy_render/src/experimental/occlusion_culling/mesh_preprocess_types.wgsl b/crates/bevy_render/src/experimental/occlusion_culling/mesh_preprocess_types.wgsl new file mode 100644 index 0000000000000..a597fb0537228 --- /dev/null +++ b/crates/bevy_render/src/experimental/occlusion_culling/mesh_preprocess_types.wgsl @@ -0,0 +1,69 @@ +// Types needed for GPU mesh uniform building. + +#define_import_path bevy_pbr::mesh_preprocess_types + +// Per-frame data that the CPU supplies to the GPU. +struct MeshInput { + // The model transform. + world_from_local: mat3x4, + // The lightmap UV rect, packed into 64 bits. + lightmap_uv_rect: vec2, + // Various flags. + flags: u32, + previous_input_index: u32, + first_vertex_index: u32, + first_index_index: u32, + index_count: u32, + current_skin_index: u32, + // Low 16 bits: index of the material inside the bind group data. + // High 16 bits: index of the lightmap in the binding array. + material_and_lightmap_bind_group_slot: u32, + timestamp: u32, + // User supplied index to identify the mesh instance + tag: u32, + pad: u32, +} + +// The `wgpu` indirect parameters structure. This is a union of two structures. +// For more information, see the corresponding comment in +// `gpu_preprocessing.rs`. +struct IndirectParametersIndexed { + // `vertex_count` or `index_count`. + index_count: u32, + // `instance_count` in both structures. + instance_count: u32, + // `first_vertex` or `first_index`. + first_index: u32, + // `base_vertex` or `first_instance`. + base_vertex: u32, + // A read-only copy of `instance_index`. + first_instance: u32, +} + +struct IndirectParametersNonIndexed { + vertex_count: u32, + instance_count: u32, + base_vertex: u32, + first_instance: u32, +} + +struct IndirectParametersCpuMetadata { + base_output_index: u32, + batch_set_index: u32, +} + +struct IndirectParametersGpuMetadata { + mesh_index: u32, +#ifdef WRITE_INDIRECT_PARAMETERS_METADATA + early_instance_count: atomic, + late_instance_count: atomic, +#else // WRITE_INDIRECT_PARAMETERS_METADATA + early_instance_count: u32, + late_instance_count: u32, +#endif // WRITE_INDIRECT_PARAMETERS_METADATA +} + +struct IndirectBatchSet { + indirect_parameters_count: atomic, + indirect_parameters_base: u32, +} diff --git a/crates/bevy_render/src/experimental/occlusion_culling/mod.rs b/crates/bevy_render/src/experimental/occlusion_culling/mod.rs new file mode 100644 index 0000000000000..a3b067e19f705 --- /dev/null +++ b/crates/bevy_render/src/experimental/occlusion_culling/mod.rs @@ -0,0 +1,116 @@ +//! GPU occlusion culling. +//! +//! See [`OcclusionCulling`] for a detailed description of occlusion culling in +//! Bevy. + +use bevy_app::{App, Plugin}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; +use bevy_ecs::{component::Component, entity::Entity, prelude::ReflectComponent}; +use bevy_reflect::{prelude::ReflectDefault, Reflect}; + +use crate::{ + extract_component::ExtractComponent, + render_resource::{Shader, TextureView}, +}; + +/// The handle to the `mesh_preprocess_types.wgsl` compute shader. +pub const MESH_PREPROCESS_TYPES_SHADER_HANDLE: Handle = + weak_handle!("7bf7bdb1-ec53-4417-987f-9ec36533287c"); + +/// Enables GPU occlusion culling. +/// +/// See [`OcclusionCulling`] for a detailed description of occlusion culling in +/// Bevy. +pub struct OcclusionCullingPlugin; + +impl Plugin for OcclusionCullingPlugin { + fn build(&self, app: &mut App) { + load_internal_asset!( + app, + MESH_PREPROCESS_TYPES_SHADER_HANDLE, + "mesh_preprocess_types.wgsl", + Shader::from_wgsl + ); + } +} + +/// Add this component to a view in order to enable experimental GPU occlusion +/// culling. +/// +/// *Bevy's occlusion culling is currently marked as experimental.* There are +/// known issues whereby, in rare circumstances, occlusion culling can result in +/// meshes being culled that shouldn't be (i.e. meshes that turn invisible). +/// Please try it out and report issues. +/// +/// *Occlusion culling* allows Bevy to avoid rendering objects that are fully +/// behind other opaque or alpha tested objects. This is different from, and +/// complements, depth fragment rejection as the `DepthPrepass` enables. While +/// depth rejection allows Bevy to avoid rendering *pixels* that are behind +/// other objects, the GPU still has to examine those pixels to reject them, +/// which requires transforming the vertices of the objects and performing +/// skinning if the objects were skinned. Occlusion culling allows the GPU to go +/// a step further, avoiding even transforming the vertices of objects that it +/// can quickly prove to be behind other objects. +/// +/// Occlusion culling inherently has some overhead, because Bevy must examine +/// the objects' bounding boxes, and create an acceleration structure +/// (hierarchical Z-buffer) to perform the occlusion tests. Therefore, occlusion +/// culling is disabled by default. Only enable it if you measure it to be a +/// speedup on your scene. Note that, because Bevy's occlusion culling runs on +/// the GPU and is quite efficient, it's rare for occlusion culling to result in +/// a significant slowdown. +/// +/// Occlusion culling currently requires a `DepthPrepass`. If no depth prepass +/// is present on the view, the [`OcclusionCulling`] component will be ignored. +/// Additionally, occlusion culling is currently incompatible with deferred +/// shading; including both `DeferredPrepass` and [`OcclusionCulling`] results +/// in unspecified behavior. +/// +/// The algorithm that Bevy uses is known as [*two-phase occlusion culling*]. +/// When you enable occlusion culling, Bevy splits the depth prepass into two: +/// an *early* depth prepass and a *late* depth prepass. The early depth prepass +/// renders all the meshes that were visible last frame to produce a +/// conservative approximation of the depth buffer. Then, after producing an +/// acceleration structure known as a hierarchical Z-buffer or depth pyramid, +/// Bevy tests the bounding boxes of all meshes against that depth buffer. Those +/// that can be quickly proven to be behind the geometry rendered during the +/// early depth prepass are skipped entirely. The other potentially-visible +/// meshes are rendered during the late prepass, and finally all the visible +/// meshes are rendered as usual during the opaque, transparent, etc. passes. +/// +/// Unlike other occlusion culling systems you may be familiar with, Bevy's +/// occlusion culling is fully dynamic and requires no baking step. The CPU +/// overhead is minimal. Large skinned meshes and other dynamic objects can +/// occlude other objects. +/// +/// [*two-phase occlusion culling*]: +/// https://medium.com/@mil_kru/two-pass-occlusion-culling-4100edcad501 +#[derive(Component, ExtractComponent, Clone, Copy, Default, Reflect)] +#[reflect(Component, Default, Clone)] +pub struct OcclusionCulling; + +/// A render-world component that contains resources necessary to perform +/// occlusion culling on any view other than a camera. +/// +/// Bevy automatically places this component on views created for shadow +/// mapping. You don't ordinarily need to add this component yourself. +#[derive(Clone, Component)] +pub struct OcclusionCullingSubview { + /// A texture view of the Z-buffer. + pub depth_texture_view: TextureView, + /// The size of the texture along both dimensions. + /// + /// Because [`OcclusionCullingSubview`] is only currently used for shadow + /// maps, they're guaranteed to have sizes equal to a power of two, so we + /// don't have to store the two dimensions individually here. + pub depth_texture_size: u32, +} + +/// A render-world component placed on each camera that stores references to all +/// entities other than cameras that need occlusion culling. +/// +/// Bevy automatically places this component on cameras that are drawing +/// shadows, when those shadows come from lights with occlusion culling enabled. +/// You don't ordinarily need to add this component yourself. +#[derive(Clone, Component)] +pub struct OcclusionCullingSubviewEntities(pub Vec); diff --git a/crates/bevy_render/src/extract_component.rs b/crates/bevy_render/src/extract_component.rs index 64e744775ffaf..19d15a2b8607c 100644 --- a/crates/bevy_render/src/extract_component.rs +++ b/crates/bevy_render/src/extract_component.rs @@ -8,6 +8,7 @@ use crate::{ }; use bevy_app::{App, Plugin}; use bevy_ecs::{ + bundle::NoBundleEffect, component::Component, prelude::*, query::{QueryFilter, QueryItem, ReadOnlyQueryData}, @@ -53,7 +54,7 @@ pub trait ExtractComponent: Component { /// /// `Out` has a [`Bundle`] trait bound instead of a [`Component`] trait bound in order to allow use cases /// such as tuples of components as output. - type Out: Bundle; + type Out: Bundle; // TODO: https://github.com/rust-lang/rust/issues/29661 // type Out: Component = Self; @@ -153,7 +154,7 @@ fn prepare_uniform_components( ) }) .collect::>(); - commands.insert_or_spawn_batch(entities); + commands.try_insert_batch(entities); } /// This plugin extracts the components into the render world for synced entities. @@ -211,7 +212,7 @@ fn extract_components( } } *previous_len = values.len(); - commands.insert_or_spawn_batch(values); + commands.try_insert_batch(values); } /// This system extracts all components of the corresponding [`ExtractComponent`], for entities that are visible and synced via [`crate::sync_world::SyncToRenderWorld`]. @@ -231,5 +232,5 @@ fn extract_visible_components( } } *previous_len = values.len(); - commands.insert_or_spawn_batch(values); + commands.try_insert_batch(values); } diff --git a/crates/bevy_render/src/extract_instances.rs b/crates/bevy_render/src/extract_instances.rs index b1344c0e78648..a8e5a9ecbdf92 100644 --- a/crates/bevy_render/src/extract_instances.rs +++ b/crates/bevy_render/src/extract_instances.rs @@ -11,7 +11,8 @@ use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ prelude::Entity, query::{QueryFilter, QueryItem, ReadOnlyQueryData}, - system::{Query, ResMut, Resource}, + resource::Resource, + system::{Query, ResMut}, }; use crate::sync_world::MainEntityHashMap; diff --git a/crates/bevy_render/src/extract_param.rs b/crates/bevy_render/src/extract_param.rs index 6ac7079bc5f49..f54309847426e 100644 --- a/crates/bevy_render/src/extract_param.rs +++ b/crates/bevy_render/src/extract_param.rs @@ -2,7 +2,10 @@ use crate::MainWorld; use bevy_ecs::{ component::Tick, prelude::*, - system::{ReadOnlySystemParam, SystemMeta, SystemParam, SystemParamItem, SystemState}, + system::{ + ReadOnlySystemParam, SystemMeta, SystemParam, SystemParamItem, SystemParamValidationError, + SystemState, + }, world::unsafe_world_cell::UnsafeWorldCell, }; use core::ops::{Deref, DerefMut}; @@ -79,14 +82,15 @@ where #[inline] unsafe fn validate_param( state: &Self::State, - system_meta: &SystemMeta, + _system_meta: &SystemMeta, world: UnsafeWorldCell, - ) -> bool { + ) -> Result<(), SystemParamValidationError> { // SAFETY: Read-only access to world data registered in `init_state`. let result = unsafe { world.get_resource_by_id(state.main_world_state) }; let Some(main_world) = result else { - system_meta.try_warn_param::<&World>(); - return false; + return Err(SystemParamValidationError::invalid::( + "`MainWorld` resource does not exist", + )); }; // SAFETY: Type is guaranteed by `SystemState`. let main_world: &World = unsafe { main_world.deref() }; diff --git a/crates/bevy_render/src/extract_resource.rs b/crates/bevy_render/src/extract_resource.rs index 2d36e694be7e5..cec8647ffc048 100644 --- a/crates/bevy_render/src/extract_resource.rs +++ b/crates/bevy_render/src/extract_resource.rs @@ -3,6 +3,7 @@ use core::marker::PhantomData; use bevy_app::{App, Plugin}; use bevy_ecs::prelude::*; pub use bevy_render_macros::ExtractResource; +use bevy_utils::once; use crate::{Extract, ExtractSchedule, RenderApp}; @@ -34,10 +35,10 @@ impl Plugin for ExtractResourcePlugin { if let Some(render_app) = app.get_sub_app_mut(RenderApp) { render_app.add_systems(ExtractSchedule, extract_resource::); } else { - bevy_utils::error_once!( + once!(tracing::error!( "Render app did not exist when trying to add `extract_resource` for <{}>.", core::any::type_name::() - ); + )); } } } @@ -56,12 +57,13 @@ pub fn extract_resource( } else { #[cfg(debug_assertions)] if !main_resource.is_added() { - bevy_utils::warn_once!( + once!(tracing::warn!( "Removing resource {} from render world not expected, adding using `Commands`. This may decrease performance", core::any::type_name::() - ); + )); } + commands.insert_resource(R::extract_resource(main_resource)); } } diff --git a/crates/bevy_render/src/globals.rs b/crates/bevy_render/src/globals.rs index 65d5bbc28fd7f..c05d96c4c9300 100644 --- a/crates/bevy_render/src/globals.rs +++ b/crates/bevy_render/src/globals.rs @@ -6,13 +6,14 @@ use crate::{ Extract, ExtractSchedule, Render, RenderApp, RenderSet, }; use bevy_app::{App, Plugin}; -use bevy_asset::{load_internal_asset, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_diagnostic::FrameCount; use bevy_ecs::prelude::*; use bevy_reflect::prelude::*; use bevy_time::Time; -pub const GLOBALS_TYPE_HANDLE: Handle = Handle::weak_from_u128(17924628719070609599); +pub const GLOBALS_TYPE_HANDLE: Handle = + weak_handle!("9e22a765-30ca-4070-9a4c-34ac08f1c0e7"); pub struct GlobalsPlugin; @@ -45,7 +46,7 @@ fn extract_time(mut commands: Commands, time: Extract>) { /// Contains global values useful when writing shaders. /// Currently only contains values related to time. #[derive(Default, Clone, Resource, ExtractResource, Reflect, ShaderType)] -#[reflect(Resource, Default)] +#[reflect(Resource, Default, Clone)] pub struct GlobalsUniform { /// The time since startup in seconds. /// Wraps to 0 after 1 hour. diff --git a/crates/bevy_render/src/gpu_component_array_buffer.rs b/crates/bevy_render/src/gpu_component_array_buffer.rs index ac0f471a4a235..b3f78f5bfbca4 100644 --- a/crates/bevy_render/src/gpu_component_array_buffer.rs +++ b/crates/bevy_render/src/gpu_component_array_buffer.rs @@ -6,7 +6,7 @@ use crate::{ use bevy_app::{App, Plugin}; use bevy_ecs::{ prelude::{Component, Entity}, - schedule::IntoSystemConfigs, + schedule::IntoScheduleConfigs, system::{Commands, Query, Res, ResMut}, }; use core::marker::PhantomData; @@ -53,7 +53,7 @@ fn prepare_gpu_component_array_buffers( .iter() .map(|(entity, component)| (entity, gpu_array_buffer.push(component.clone()))) .collect::>(); - commands.insert_or_spawn_batch(entities); + commands.try_insert_batch(entities); gpu_array_buffer.write_buffer(&render_device, &render_queue); } diff --git a/crates/bevy_render/src/gpu_readback.rs b/crates/bevy_render/src/gpu_readback.rs index d67db45e0986b..02f0c2d1db696 100644 --- a/crates/bevy_render/src/gpu_readback.rs +++ b/crates/bevy_render/src/gpu_readback.rs @@ -1,7 +1,10 @@ use crate::{ extract_component::ExtractComponentPlugin, render_asset::RenderAssets, - render_resource::{Buffer, BufferUsages, Extent3d, ImageDataLayout, Texture, TextureFormat}, + render_resource::{ + Buffer, BufferUsages, CommandEncoder, Extent3d, TexelCopyBufferLayout, Texture, + TextureFormat, + }, renderer::{render_system, RenderDevice}, storage::{GpuShaderStorageBuffer, ShaderStorageBuffer}, sync_world::MainEntity, @@ -12,7 +15,7 @@ use async_channel::{Receiver, Sender}; use bevy_app::{App, Plugin}; use bevy_asset::Handle; use bevy_derive::{Deref, DerefMut}; -use bevy_ecs::schedule::IntoSystemConfigs; +use bevy_ecs::schedule::IntoScheduleConfigs; use bevy_ecs::{ change_detection::ResMut, entity::Entity, @@ -21,13 +24,13 @@ use bevy_ecs::{ system::{Query, Res}, }; use bevy_image::{Image, TextureFormatPixelInfo}; +use bevy_platform::collections::HashMap; use bevy_reflect::Reflect; use bevy_render_macros::ExtractComponent; -use bevy_utils::{tracing::warn, HashMap}; use encase::internal::ReadFrom; use encase::private::Reader; use encase::ShaderType; -use wgpu::{CommandEncoder, COPY_BYTES_PER_ROW_ALIGNMENT}; +use tracing::warn; /// A plugin that enables reading back gpu buffers and textures to the cpu. pub struct GpuReadbackPlugin { @@ -184,7 +187,7 @@ impl GpuReadbackBufferPool { enum ReadbackSource { Texture { texture: Texture, - layout: ImageDataLayout, + layout: TexelCopyBufferLayout, size: Extent3d, }, Buffer { @@ -239,16 +242,11 @@ fn prepare_buffers( match readback { Readback::Texture(image) => { if let Some(gpu_image) = gpu_images.get(image) { - let layout = layout_data( - gpu_image.size.width, - gpu_image.size.height, - gpu_image.texture_format, - ); + let layout = layout_data(gpu_image.size, gpu_image.texture_format); let buffer = buffer_pool.get( &render_device, get_aligned_size( - gpu_image.size.width, - gpu_image.size.height, + gpu_image.size, gpu_image.texture_format.pixel_size() as u32, ) as u64, ); @@ -299,7 +297,7 @@ pub(crate) fn submit_readback_commands(world: &World, command_encoder: &mut Comm } => { command_encoder.copy_texture_to_buffer( texture.as_image_copy(), - wgpu::ImageCopyBuffer { + wgpu::TexelCopyBufferInfo { buffer: &readback.buffer, layout: *layout, }, @@ -339,7 +337,7 @@ fn map_buffers(mut readbacks: ResMut) { drop(data); buffer.unmap(); if let Err(e) = tx.try_send((entity, buffer, result)) { - warn!("Failed to send readback result: {:?}", e); + warn!("Failed to send readback result: {}", e); } }); readbacks.mapped.push(readback); @@ -348,23 +346,38 @@ fn map_buffers(mut readbacks: ResMut) { // Utils -pub(crate) fn align_byte_size(value: u32) -> u32 { - value + (COPY_BYTES_PER_ROW_ALIGNMENT - (value % COPY_BYTES_PER_ROW_ALIGNMENT)) +/// Round up a given value to be a multiple of [`wgpu::COPY_BYTES_PER_ROW_ALIGNMENT`]. +pub(crate) const fn align_byte_size(value: u32) -> u32 { + RenderDevice::align_copy_bytes_per_row(value as usize) as u32 } -pub(crate) fn get_aligned_size(width: u32, height: u32, pixel_size: u32) -> u32 { - height * align_byte_size(width * pixel_size) +/// Get the size of a image when the size of each row has been rounded up to [`wgpu::COPY_BYTES_PER_ROW_ALIGNMENT`]. +pub(crate) const fn get_aligned_size(extent: Extent3d, pixel_size: u32) -> u32 { + extent.height * align_byte_size(extent.width * pixel_size) * extent.depth_or_array_layers } -pub(crate) fn layout_data(width: u32, height: u32, format: TextureFormat) -> ImageDataLayout { - ImageDataLayout { - bytes_per_row: if height > 1 { +/// Get a [`TexelCopyBufferLayout`] aligned such that the image can be copied into a buffer. +pub(crate) fn layout_data(extent: Extent3d, format: TextureFormat) -> TexelCopyBufferLayout { + TexelCopyBufferLayout { + bytes_per_row: if extent.height > 1 || extent.depth_or_array_layers > 1 { // 1 = 1 row - Some(get_aligned_size(width, 1, format.pixel_size() as u32)) + Some(get_aligned_size( + Extent3d { + width: extent.width, + height: 1, + depth_or_array_layers: 1, + }, + format.pixel_size() as u32, + )) + } else { + None + }, + rows_per_image: if extent.depth_or_array_layers > 1 { + let (_, block_dimension_y) = format.block_dimensions(); + Some(extent.height / block_dimension_y) } else { None }, - rows_per_image: None, - ..Default::default() + offset: 0, } } diff --git a/crates/bevy_render/src/lib.rs b/crates/bevy_render/src/lib.rs index 7e206ffd2fb25..843bb6828457c 100644 --- a/crates/bevy_render/src/lib.rs +++ b/crates/bevy_render/src/lib.rs @@ -1,5 +1,5 @@ #![expect(missing_docs, reason = "Not all docs are written yet, see #3492.")] -#![expect(unsafe_code)] +#![expect(unsafe_code, reason = "Unsafe code is used to improve performance.")] #![cfg_attr( any(docsrs, docsrs_dep), expect( @@ -19,10 +19,14 @@ compile_error!("bevy_render cannot compile for a 16-bit platform."); extern crate alloc; extern crate core; +// Required to make proc macros work in bevy itself. +extern crate self as bevy_render; + pub mod alpha; pub mod batching; pub mod camera; pub mod diagnostic; +pub mod experimental; pub mod extract_component; pub mod extract_instances; mod extract_param; @@ -40,7 +44,6 @@ pub mod render_phase; pub mod render_resource; pub mod renderer; pub mod settings; -mod spatial_bundle; pub mod storage; pub mod sync_component; pub mod sync_world; @@ -50,7 +53,6 @@ pub mod view; /// The render prelude. /// /// This includes the most common types in this crate, re-exported for your convenience. -#[expect(deprecated)] pub mod prelude { #[doc(hidden)] pub use crate::{ @@ -64,9 +66,8 @@ pub mod prelude { Mesh3d, }, render_resource::Shader, - spatial_bundle::SpatialBundle, texture::ImagePlugin, - view::{InheritedVisibility, Msaa, ViewVisibility, Visibility, VisibilityBundle}, + view::{InheritedVisibility, Msaa, ViewVisibility, Visibility}, ExtractSchedule, }; } @@ -75,11 +76,13 @@ use bevy_ecs::schedule::ScheduleBuildSettings; use bevy_utils::prelude::default; pub use extract_param::Extract; -use bevy_hierarchy::ValidParentCheckPlugin; use bevy_window::{PrimaryWindow, RawHandleWrapperHolder}; -use extract_resource::ExtractResourcePlugin; +use experimental::occlusion_culling::OcclusionCullingPlugin; use globals::GlobalsPlugin; -use render_asset::RenderAssetBytesPerFrame; +use render_asset::{ + extract_render_asset_bytes_per_frame, reset_render_asset_bytes_per_frame, + RenderAssetBytesPerFrame, RenderAssetBytesPerFrameLimiter, +}; use renderer::{RenderAdapter, RenderDevice, RenderQueue}; use settings::RenderResources; use sync_world::{ @@ -99,11 +102,12 @@ use crate::{ }; use alloc::sync::Arc; use bevy_app::{App, AppLabel, Plugin, SubApp}; -use bevy_asset::{load_internal_asset, AssetApp, AssetServer, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, AssetApp, AssetServer, Handle}; use bevy_ecs::{prelude::*, schedule::ScheduleLabel}; -use bevy_utils::tracing::debug; +use bitflags::bitflags; use core::ops::{Deref, DerefMut}; use std::sync::Mutex; +use tracing::debug; /// Contains the default Bevy rendering backend based on wgpu. /// @@ -119,6 +123,21 @@ pub struct RenderPlugin { /// If `true`, disables asynchronous pipeline compilation. /// This has no effect on macOS, Wasm, iOS, or without the `multi_threaded` feature. pub synchronous_pipeline_compilation: bool, + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, +} + +bitflags! { + /// Debugging flags that can optionally be set when constructing the renderer. + #[derive(Clone, Copy, PartialEq, Default, Debug)] + pub struct RenderDebugFlags: u8 { + /// If true, this sets the `COPY_SRC` flag on indirect draw parameters + /// so that they can be read back to CPU. + /// + /// This is a debugging feature that may reduce performance. It + /// primarily exists for the `occlusion_culling` example. + const ALLOW_COPIES_FROM_INDIRECT_PARAMETERS = 1; + } } /// The systems sets of the default [`App`] rendering schedule. @@ -130,6 +149,8 @@ pub enum RenderSet { ExtractCommands, /// Prepare assets that have been created/modified/removed this frame. PrepareAssets, + /// Prepares extracted meshes. + PrepareMeshes, /// Create any additional views such as those used for shadow mapping. ManageViews, /// Queue drawable entities as phase items in render phases ready for @@ -137,6 +158,9 @@ pub enum RenderSet { Queue, /// A sub-set within [`Queue`](RenderSet::Queue) where mesh entity queue systems are executed. Ensures `prepare_assets::` is completed. QueueMeshes, + /// A sub-set within [`Queue`](RenderSet::Queue) where meshes that have + /// become invisible or changed phases are removed from the bins. + QueueSweep, // TODO: This could probably be moved in favor of a system ordering // abstraction in `Render` or `Queue` /// Sort the [`SortedRenderPhase`](render_phase::SortedRenderPhase)s and @@ -147,6 +171,9 @@ pub enum RenderSet { Prepare, /// A sub-set within [`Prepare`](RenderSet::Prepare) for initializing buffers, textures and uniforms for use in bind groups. PrepareResources, + /// Collect phase buffers after + /// [`PrepareResources`](RenderSet::PrepareResources) has run. + PrepareResourcesCollectPhaseBuffers, /// Flush buffers after [`PrepareResources`](RenderSet::PrepareResources), but before [`PrepareBindGroups`](RenderSet::PrepareBindGroups). PrepareResourcesFlush, /// A sub-set within [`Prepare`](RenderSet::Prepare) for constructing bind groups, or other data that relies on render resources prepared in [`PrepareResources`](RenderSet::PrepareResources). @@ -163,7 +190,7 @@ pub enum RenderSet { } /// The main render schedule. -#[derive(ScheduleLabel, Debug, Hash, PartialEq, Eq, Clone)] +#[derive(ScheduleLabel, Debug, Hash, PartialEq, Eq, Clone, Default)] pub struct Render; impl Render { @@ -178,6 +205,7 @@ impl Render { schedule.configure_sets( ( ExtractCommands, + PrepareMeshes, ManageViews, Queue, PhaseSort, @@ -189,14 +217,20 @@ impl Render { .chain(), ); - schedule.configure_sets((ExtractCommands, PrepareAssets, Prepare).chain()); + schedule.configure_sets((ExtractCommands, PrepareAssets, PrepareMeshes, Prepare).chain()); schedule.configure_sets( - QueueMeshes + (QueueMeshes, QueueSweep) + .chain() .in_set(Queue) .after(prepare_assets::), ); schedule.configure_sets( - (PrepareResources, PrepareResourcesFlush, PrepareBindGroups) + ( + PrepareResources, + PrepareResourcesCollectPhaseBuffers, + PrepareResourcesFlush, + PrepareBindGroups, + ) .chain() .in_set(Prepare), ); @@ -212,7 +246,7 @@ impl Render { /// /// This schedule is run on the main world, but its buffers are not applied /// until it is returned to the render world. -#[derive(ScheduleLabel, PartialEq, Eq, Debug, Clone, Hash)] +#[derive(ScheduleLabel, PartialEq, Eq, Debug, Clone, Hash, Default)] pub struct ExtractSchedule; /// The simulation [`World`] of the application, stored as a resource. @@ -251,11 +285,12 @@ struct FutureRenderResources(Arc>>); #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, AppLabel)] pub struct RenderApp; -pub const INSTANCE_INDEX_SHADER_HANDLE: Handle = - Handle::weak_from_u128(10313207077636615845); -pub const MATHS_SHADER_HANDLE: Handle = Handle::weak_from_u128(10665356303104593376); +pub const MATHS_SHADER_HANDLE: Handle = + weak_handle!("d94d70d4-746d-49c4-bfc3-27d63f2acda0"); pub const COLOR_OPERATIONS_SHADER_HANDLE: Handle = - Handle::weak_from_u128(1844674407370955161); + weak_handle!("33a80b2f-aaf7-4c86-b828-e7ae83b72f1a"); +pub const BINDLESS_SHADER_HANDLE: Handle = + weak_handle!("13f1baaa-41bf-448e-929e-258f9307a522"); impl Plugin for RenderPlugin { /// Initializes the renderer, sets up the [`RenderSet`] and creates the rendering sub-app. @@ -282,16 +317,22 @@ impl Plugin for RenderPlugin { let primary_window = app .world_mut() .query_filtered::<&RawHandleWrapperHolder, With>() - .get_single(app.world()) + .single(app.world()) .ok() .cloned(); let settings = render_creation.clone(); let async_renderer = async move { - let instance = wgpu::Instance::new(wgpu::InstanceDescriptor { + let instance = wgpu::Instance::new(&wgpu::InstanceDescriptor { backends, - dx12_shader_compiler: settings.dx12_shader_compiler.clone(), flags: settings.instance_flags, - gles_minor_version: settings.gles3_minor_version, + backend_options: wgpu::BackendOptions { + gl: wgpu::GlBackendOptions { + gles_minor_version: settings.gles3_minor_version, + }, + dx12: wgpu::Dx12BackendOptions { + shader_compiler: settings.dx12_shader_compiler.clone(), + }, + }, }); let surface = primary_window.and_then(|wrapper| { @@ -352,21 +393,33 @@ impl Plugin for RenderPlugin { }; app.add_plugins(( - ValidParentCheckPlugin::::default(), WindowRenderPlugin, CameraPlugin, ViewPlugin, MeshPlugin, GlobalsPlugin, MorphPlugin, - BatchingPlugin, + BatchingPlugin { + debug_flags: self.debug_flags, + }, SyncWorldPlugin, StoragePlugin, GpuReadbackPlugin::default(), + OcclusionCullingPlugin, + #[cfg(feature = "tracing-tracy")] + diagnostic::RenderDiagnosticsPlugin, )); - app.init_resource::() - .add_plugins(ExtractResourcePlugin::::default()); + app.init_resource::(); + if let Some(render_app) = app.get_sub_app_mut(RenderApp) { + render_app.init_resource::(); + render_app + .add_systems(ExtractSchedule, extract_render_asset_bytes_per_frame) + .add_systems( + Render, + reset_render_asset_bytes_per_frame.in_set(RenderSet::Cleanup), + ); + } app.register_type::() // These types cannot be registered in bevy_color, as it does not depend on the rest of Bevy @@ -393,6 +446,12 @@ impl Plugin for RenderPlugin { "color_operations.wgsl", Shader::from_wgsl ); + load_internal_asset!( + app, + BINDLESS_SHADER_HANDLE, + "bindless.wgsl", + Shader::from_wgsl + ); if let Some(future_render_resources) = app.world_mut().remove_resource::() { @@ -416,14 +475,7 @@ impl Plugin for RenderPlugin { .insert_resource(device) .insert_resource(queue) .insert_resource(render_adapter) - .insert_resource(adapter_info) - .add_systems( - Render, - (|mut bpf: ResMut| { - bpf.reset(); - }) - .in_set(RenderSet::Cleanup), - ); + .insert_resource(adapter_info); } } } @@ -477,10 +529,8 @@ unsafe fn initialize_render_app(app: &mut App) { // This set applies the commands from the extract schedule while the render schedule // is running in parallel with the main app. apply_extract_commands.in_set(RenderSet::ExtractCommands), - ( - PipelineCache::process_pipeline_queue_system.before(render_system), - render_system, - ) + (PipelineCache::process_pipeline_queue_system, render_system) + .chain() .in_set(RenderSet::Render), despawn_temporary_render_entities.in_set(RenderSet::PostCleanup), ), @@ -489,7 +539,7 @@ unsafe fn initialize_render_app(app: &mut App) { render_app.set_extract(|main_world, render_world| { { #[cfg(feature = "trace")] - let _stage_span = bevy_utils::tracing::info_span!("entity_sync").entered(); + let _stage_span = tracing::info_span!("entity_sync").entered(); entity_sync_system(main_world, render_world); } @@ -534,3 +584,26 @@ pub fn get_adreno_model(adapter: &RenderAdapter) -> Option { .fold(0, |acc, digit| acc * 10 + digit), ) } + +/// Get the Mali driver version if the adapter is a Mali GPU. +pub fn get_mali_driver_version(adapter: &RenderAdapter) -> Option { + if !cfg!(target_os = "android") { + return None; + } + + let driver_name = adapter.get_info().name; + if !driver_name.contains("Mali") { + return None; + } + let driver_info = adapter.get_info().driver_info; + if let Some(start_pos) = driver_info.find("v1.r") { + if let Some(end_pos) = driver_info[start_pos..].find('p') { + let start_idx = start_pos + 4; // Skip "v1.r" + let end_idx = start_pos + end_pos; + + return driver_info[start_idx..end_idx].parse::().ok(); + } + } + + None +} diff --git a/crates/bevy_render/src/maths.wgsl b/crates/bevy_render/src/maths.wgsl index a9cb80c0fcabb..b492dd6bb28e8 100644 --- a/crates/bevy_render/src/maths.wgsl +++ b/crates/bevy_render/src/maths.wgsl @@ -93,3 +93,64 @@ fn sphere_intersects_plane_half_space( fn powsafe(color: vec3, power: f32) -> vec3 { return pow(abs(color), vec3(power)) * sign(color); } + +// https://en.wikipedia.org/wiki/Vector_projection#Vector_projection_2 +fn project_onto(lhs: vec3, rhs: vec3) -> vec3 { + let other_len_sq_rcp = 1.0 / dot(rhs, rhs); + return rhs * dot(lhs, rhs) * other_len_sq_rcp; +} + +// Below are fast approximations of common irrational and trig functions. These +// are likely most useful when raymarching, for example, where complete numeric +// accuracy can be sacrificed for greater sample count. + +fn fast_sqrt(x: f32) -> f32 { + let n = bitcast(0x1fbd1df5 + (bitcast(x) >> 1u)); + // One Newton's method iteration for better precision + return 0.5 * (n + x / n); +} + +// Slightly less accurate than fast_acos_4, but much simpler. +fn fast_acos(in_x: f32) -> f32 { + let x = abs(in_x); + var res = -0.156583 * x + HALF_PI; + res *= fast_sqrt(1.0 - x); + return select(PI - res, res, in_x >= 0.0); +} + +// 4th order polynomial approximation +// 4 VGRP, 16 ALU Full Rate +// 7 * 10^-5 radians precision +// Reference : Handbook of Mathematical Functions (chapter : Elementary Transcendental Functions), M. Abramowitz and I.A. Stegun, Ed. +fn fast_acos_4(x: f32) -> f32 { + let x1 = abs(x); + let x2 = x1 * x1; + let x3 = x2 * x1; + var s: f32; + + s = -0.2121144 * x1 + 1.5707288; + s = 0.0742610 * x2 + s; + s = -0.0187293 * x3 + s; + s = fast_sqrt(1.0 - x1) * s; + + // acos function mirroring + return select(PI - s, s, x >= 0.0); +} + +fn fast_atan2(y: f32, x: f32) -> f32 { + var t0 = max(abs(x), abs(y)); + var t1 = min(abs(x), abs(y)); + var t3 = t1 / t0; + var t4 = t3 * t3; + + t0 = 0.0872929; + t0 = t0 * t4 - 0.301895; + t0 = t0 * t4 + 1.0; + t3 = t0 * t3; + + t3 = select(t3, (0.5 * PI) - t3, abs(y) > abs(x)); + t3 = select(t3, PI - t3, x < 0); + t3 = select(-t3, t3, y > 0); + + return t3; +} diff --git a/crates/bevy_render/src/mesh/allocator.rs b/crates/bevy_render/src/mesh/allocator.rs index 6e7afaa4d7cde..bc638859ed3ad 100644 --- a/crates/bevy_render/src/mesh/allocator.rs +++ b/crates/bevy_render/src/mesh/allocator.rs @@ -11,12 +11,15 @@ use bevy_app::{App, Plugin}; use bevy_asset::AssetId; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ - schedule::IntoSystemConfigs as _, - system::{Res, ResMut, Resource}, + resource::Resource, + schedule::IntoScheduleConfigs as _, + system::{Res, ResMut}, world::{FromWorld, World}, }; -use bevy_utils::{default, tracing::error, HashMap, HashSet}; +use bevy_platform::collections::{hash_map::Entry, HashMap, HashSet}; +use bevy_utils::default; use offset_allocator::{Allocation, Allocator}; +use tracing::error; use wgpu::{ BufferDescriptor, BufferSize, BufferUsages, CommandEncoderDescriptor, DownlevelFlags, COPY_BUFFER_ALIGNMENT, @@ -155,7 +158,6 @@ pub struct MeshBufferSlice<'a> { pub struct SlabId(pub NonMaxU32); /// Data for a single slab. -#[allow(clippy::large_enum_variant)] enum Slab { /// A slab that can contain multiple objects. General(GeneralSlab), @@ -194,7 +196,7 @@ struct GeneralSlab { element_layout: ElementLayout, /// The size of this slab in slots. - slot_capacity: u32, + current_slot_capacity: u32, } /// A slab that contains a single object. @@ -222,6 +224,18 @@ enum ElementClass { Index, } +/// The results of [`GeneralSlab::grow_if_necessary`]. +enum SlabGrowthResult { + /// The mesh data already fits in the slab; the slab doesn't need to grow. + NoGrowthNeeded, + /// The slab needed to grow. + /// + /// The [`SlabToReallocate`] contains the old capacity of the slab. + NeededGrowth(SlabToReallocate), + /// The slab wanted to grow but couldn't because it hit its maximum size. + CantGrow, +} + /// Information about the size of individual elements (vertices or indices) /// within a slab. /// @@ -276,9 +290,8 @@ struct SlabsToReallocate(HashMap); /// reallocated. #[derive(Default)] struct SlabToReallocate { - /// Maps all allocations that need to be relocated to their positions within - /// the *new* slab. - allocations_to_copy: HashMap, SlabAllocation>, + /// The capacity of the slab before we decided to grow it. + old_slot_capacity: u32, } impl Display for SlabId { @@ -345,7 +358,10 @@ pub fn allocate_and_free_meshes( render_device: Res, render_queue: Res, ) { - // Process newly-added meshes. + // Process removed or modified meshes. + mesh_allocator.free_meshes(&extracted_meshes); + + // Process newly-added or modified meshes. mesh_allocator.allocate_meshes( &mesh_allocator_settings, &extracted_meshes, @@ -353,9 +369,6 @@ pub fn allocate_and_free_meshes( &render_device, &render_queue, ); - - // Process removed meshes. - mesh_allocator.free_meshes(&extracted_meshes); } impl MeshAllocator { @@ -396,7 +409,7 @@ impl MeshAllocator { slab_id: SlabId, ) -> Option { match self.slabs.get(&slab_id)? { - Slab::General(ref general_slab) => { + Slab::General(general_slab) => { let slab_allocation = general_slab.resident_allocations.get(mesh_id)?; Some(MeshBufferSlice { buffer: general_slab.buffer.as_ref()?, @@ -407,7 +420,7 @@ impl MeshAllocator { }) } - Slab::LargeObject(ref large_object_slab) => { + Slab::LargeObject(large_object_slab) => { let buffer = large_object_slab.buffer.as_ref()?; Some(MeshBufferSlice { buffer, @@ -526,7 +539,6 @@ impl MeshAllocator { } /// A generic function that copies either vertex or index data into a slab. - #[allow(clippy::too_many_arguments)] fn copy_element_data( &mut self, mesh_id: &AssetId, @@ -543,7 +555,7 @@ impl MeshAllocator { match *slab { Slab::General(ref mut general_slab) => { - let (Some(ref buffer), Some(allocated_range)) = ( + let (Some(buffer), Some(allocated_range)) = ( &general_slab.buffer, general_slab.pending_allocations.remove(mesh_id), ) else { @@ -595,9 +607,17 @@ impl MeshAllocator { } } + /// Frees allocations for meshes that were removed or modified this frame. fn free_meshes(&mut self, extracted_meshes: &ExtractedAssets) { let mut empty_slabs = >::default(); - for mesh_id in &extracted_meshes.removed { + + // TODO: Consider explicitly reusing allocations for changed meshes of the same size + let meshes_to_free = extracted_meshes + .removed + .iter() + .chain(extracted_meshes.modified.iter()); + + for mesh_id in meshes_to_free { if let Some(slab_id) = self.mesh_id_to_vertex_slab.remove(mesh_id) { self.free_allocation_in_slab(mesh_id, slab_id, &mut empty_slabs); } @@ -693,32 +713,39 @@ impl MeshAllocator { // and try to allocate the mesh inside them. We go with the first one // that succeeds. let mut mesh_allocation = None; - 'slab: for &slab_id in &*candidate_slabs { - loop { - let Some(Slab::General(ref mut slab)) = self.slabs.get_mut(&slab_id) else { - unreachable!("Slab not found") - }; + for &slab_id in &*candidate_slabs { + let Some(Slab::General(slab)) = self.slabs.get_mut(&slab_id) else { + unreachable!("Slab not found") + }; - if let Some(allocation) = slab.allocator.allocate(data_slot_count) { - mesh_allocation = Some(MeshAllocation { - slab_id, - slab_allocation: SlabAllocation { - allocation, - slot_count: data_slot_count, - }, - }); - break 'slab; - } + let Some(allocation) = slab.allocator.allocate(data_slot_count) else { + continue; + }; - // Try to grow the slab. If this fails, the slab is full; go on - // to the next slab. - match slab.try_grow(settings) { - Ok(new_mesh_allocation_records) => { - slabs_to_grow.insert(slab_id, new_mesh_allocation_records); + // Try to fit the object in the slab, growing if necessary. + match slab.grow_if_necessary(allocation.offset + data_slot_count, settings) { + SlabGrowthResult::NoGrowthNeeded => {} + SlabGrowthResult::NeededGrowth(slab_to_reallocate) => { + // If we already grew the slab this frame, don't replace the + // `SlabToReallocate` entry. We want to keep the entry + // corresponding to the size that the slab had at the start + // of the frame, so that we can copy only the used portion + // of the initial buffer to the new one. + if let Entry::Vacant(vacant_entry) = slabs_to_grow.entry(slab_id) { + vacant_entry.insert(slab_to_reallocate); } - Err(()) => continue 'slab, } + SlabGrowthResult::CantGrow => continue, } + + mesh_allocation = Some(MeshAllocation { + slab_id, + slab_allocation: SlabAllocation { + allocation, + slot_count: data_slot_count, + }, + }); + break; } // If we still have no allocation, make a new slab. @@ -744,9 +771,7 @@ impl MeshAllocator { // Mark the allocation as pending. Don't copy it in just yet; further // meshes loaded this frame may result in its final allocation location // changing. - if let Some(Slab::General(ref mut general_slab)) = - self.slabs.get_mut(&mesh_allocation.slab_id) - { + if let Some(Slab::General(general_slab)) = self.slabs.get_mut(&mesh_allocation.slab_id) { general_slab .pending_allocations .insert(*mesh_id, mesh_allocation.slab_allocation); @@ -773,10 +798,11 @@ impl MeshAllocator { /// Reallocates a slab that needs to be resized, or allocates a new slab. /// - /// This performs the actual growth operation that [`GeneralSlab::try_grow`] - /// scheduled. We do the growth in two phases so that, if a slab grows - /// multiple times in the same frame, only one new buffer is reallocated, - /// rather than reallocating the buffer multiple times. + /// This performs the actual growth operation that + /// [`GeneralSlab::grow_if_necessary`] scheduled. We do the growth in two + /// phases so that, if a slab grows multiple times in the same frame, only + /// one new buffer is reallocated, rather than reallocating the buffer + /// multiple times. fn reallocate_slab( &mut self, render_device: &RenderDevice, @@ -785,7 +811,7 @@ impl MeshAllocator { slab_to_grow: SlabToReallocate, ) { let Some(Slab::General(slab)) = self.slabs.get_mut(&slab_id) else { - error!("Couldn't find slab {:?} to grow", slab_id); + error!("Couldn't find slab {} to grow", slab_id); return; }; @@ -804,38 +830,28 @@ impl MeshAllocator { slab_id, buffer_usages_to_str(buffer_usages) )), - size: slab.slot_capacity as u64 * slab.element_layout.slot_size(), + size: slab.current_slot_capacity as u64 * slab.element_layout.slot_size(), usage: buffer_usages, mapped_at_creation: false, }); slab.buffer = Some(new_buffer.clone()); + let Some(old_buffer) = old_buffer else { return }; + // In order to do buffer copies, we need a command encoder. let mut encoder = render_device.create_command_encoder(&CommandEncoderDescriptor { label: Some("slab resize encoder"), }); - // If we have no objects to copy over, we're done. - let Some(old_buffer) = old_buffer else { - return; - }; - - for (mesh_id, src_slab_allocation) in &mut slab.resident_allocations { - let Some(dest_slab_allocation) = slab_to_grow.allocations_to_copy.get(mesh_id) else { - continue; - }; - - encoder.copy_buffer_to_buffer( - &old_buffer, - src_slab_allocation.allocation.offset as u64 * slab.element_layout.slot_size(), - &new_buffer, - dest_slab_allocation.allocation.offset as u64 * slab.element_layout.slot_size(), - dest_slab_allocation.slot_count as u64 * slab.element_layout.slot_size(), - ); - // Now that we've done the copy, we can update the allocation record. - *src_slab_allocation = dest_slab_allocation.clone(); - } + // Copy the data from the old buffer into the new one. + encoder.copy_buffer_to_buffer( + &old_buffer, + 0, + &new_buffer, + 0, + slab_to_grow.old_slot_capacity as u64 * slab.element_layout.slot_size(), + ); let command_buffer = encoder.finish(); render_queue.submit([command_buffer]); @@ -862,7 +878,7 @@ impl MeshAllocator { } impl GeneralSlab { - /// Creates a new growable slab big enough to hold an single element of + /// Creates a new growable slab big enough to hold a single element of /// `data_slot_count` size with the given `layout`. fn new( new_slab_id: SlabId, @@ -871,16 +887,19 @@ impl GeneralSlab { layout: ElementLayout, data_slot_count: u32, ) -> GeneralSlab { - let slab_slot_capacity = (settings.min_slab_size.div_ceil(layout.slot_size()) as u32) + let initial_slab_slot_capacity = (settings.min_slab_size.div_ceil(layout.slot_size()) + as u32) + .max(offset_allocator::ext::min_allocator_size(data_slot_count)); + let max_slab_slot_capacity = (settings.max_slab_size.div_ceil(layout.slot_size()) as u32) .max(offset_allocator::ext::min_allocator_size(data_slot_count)); let mut new_slab = GeneralSlab { - allocator: Allocator::new(slab_slot_capacity), + allocator: Allocator::new(max_slab_slot_capacity), buffer: None, resident_allocations: HashMap::default(), pending_allocations: HashMap::default(), element_layout: layout, - slot_capacity: slab_slot_capacity, + current_slot_capacity: initial_slab_slot_capacity, }; // This should never fail. @@ -897,68 +916,40 @@ impl GeneralSlab { new_slab } - /// Attempts to grow a slab that's just run out of space. + /// Checks to see if the size of this slab is at least `new_size_in_slots` + /// and grows the slab if it isn't. /// - /// Returns a structure the allocations that need to be relocated if the - /// growth succeeded. If the slab is full, returns `Err`. - fn try_grow(&mut self, settings: &MeshAllocatorSettings) -> Result { - // In extremely rare cases due to allocator fragmentation, it may happen - // that we fail to re-insert every object that was in the slab after - // growing it. Even though this will likely never happen, we use this - // loop to handle this unlikely event properly if it does. - 'grow: loop { - let new_slab_slot_capacity = ((self.slot_capacity as f64 * settings.growth_factor) - .ceil() as u32) - .min((settings.max_slab_size / self.element_layout.slot_size()) as u32); - if new_slab_slot_capacity == self.slot_capacity { - // The slab is full. - return Err(()); - } - - // Grow the slab. - self.allocator = Allocator::new(new_slab_slot_capacity); - self.slot_capacity = new_slab_slot_capacity; - - let mut slab_to_grow = SlabToReallocate::default(); - - // Place every resident allocation that was in the old slab in the - // new slab. - for (allocated_mesh_id, old_allocation_range) in &self.resident_allocations { - let allocation_size = old_allocation_range.slot_count; - match self.allocator.allocate(allocation_size) { - Some(allocation) => { - slab_to_grow.allocations_to_copy.insert( - *allocated_mesh_id, - SlabAllocation { - allocation, - slot_count: allocation_size, - }, - ); - } - None => { - // We failed to insert one of the allocations that we - // had before. - continue 'grow; - } - } - } + /// The returned [`SlabGrowthResult`] describes whether the slab needed to + /// grow and whether, if so, it was successful in doing so. + fn grow_if_necessary( + &mut self, + new_size_in_slots: u32, + settings: &MeshAllocatorSettings, + ) -> SlabGrowthResult { + // Is the slab big enough already? + let initial_slot_capacity = self.current_slot_capacity; + if self.current_slot_capacity >= new_size_in_slots { + return SlabGrowthResult::NoGrowthNeeded; + } - // Move every allocation that was pending in the old slab to the new - // slab. - for slab_allocation in self.pending_allocations.values_mut() { - let allocation_size = slab_allocation.slot_count; - match self.allocator.allocate(allocation_size) { - Some(allocation) => slab_allocation.allocation = allocation, - None => { - // We failed to insert one of the allocations that we - // had before. - continue 'grow; - } - } + // Try to grow in increments of `MeshAllocatorSettings::growth_factor` + // until we're big enough. + while self.current_slot_capacity < new_size_in_slots { + let new_slab_slot_capacity = + ((self.current_slot_capacity as f64 * settings.growth_factor).ceil() as u32) + .min((settings.max_slab_size / self.element_layout.slot_size()) as u32); + if new_slab_slot_capacity == self.current_slot_capacity { + // The slab is full. + return SlabGrowthResult::CantGrow; } - return Ok(slab_to_grow); + self.current_slot_capacity = new_slab_slot_capacity; } + + // Tell our caller what we did. + SlabGrowthResult::NeededGrowth(SlabToReallocate { + old_slot_capacity: initial_slot_capacity, + }) } } diff --git a/crates/bevy_render/src/mesh/components.rs b/crates/bevy_render/src/mesh/components.rs index 10229be41210d..000de324e3702 100644 --- a/crates/bevy_render/src/mesh/components.rs +++ b/crates/bevy_render/src/mesh/components.rs @@ -2,9 +2,13 @@ use crate::{ mesh::Mesh, view::{self, Visibility, VisibilityClass}, }; -use bevy_asset::{AssetId, Handle}; +use bevy_asset::{AsAssetId, AssetEvent, AssetId, Handle}; use bevy_derive::{Deref, DerefMut}; -use bevy_ecs::{component::Component, prelude::require, reflect::ReflectComponent}; +use bevy_ecs::{ + change_detection::DetectChangesMut, component::Component, event::EventReader, + reflect::ReflectComponent, system::Query, +}; +use bevy_platform::{collections::HashSet, hash::FixedHasher}; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_transform::components::Transform; use derive_more::derive::From; @@ -37,7 +41,7 @@ use derive_more::derive::From; /// } /// ``` #[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq, From)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone, PartialEq)] #[require(Transform, Visibility, VisibilityClass)] #[component(on_add = view::add_visibility_class::)] pub struct Mesh2d(pub Handle); @@ -54,6 +58,14 @@ impl From<&Mesh2d> for AssetId { } } +impl AsAssetId for Mesh2d { + type Asset = Mesh; + + fn as_asset_id(&self) -> AssetId { + self.id() + } +} + /// A component for 3D meshes. Requires a [`MeshMaterial3d`] to be rendered, commonly using a [`StandardMaterial`]. /// /// [`MeshMaterial3d`]: @@ -85,7 +97,7 @@ impl From<&Mesh2d> for AssetId { /// } /// ``` #[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq, From)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone, PartialEq)] #[require(Transform, Visibility, VisibilityClass)] #[component(on_add = view::add_visibility_class::)] pub struct Mesh3d(pub Handle); @@ -101,3 +113,45 @@ impl From<&Mesh3d> for AssetId { mesh.id() } } + +impl AsAssetId for Mesh3d { + type Asset = Mesh; + + fn as_asset_id(&self) -> AssetId { + self.id() + } +} + +/// A system that marks a [`Mesh3d`] as changed if the associated [`Mesh`] asset +/// has changed. +/// +/// This is needed because the systems that extract meshes, such as +/// `extract_meshes_for_gpu_building`, write some metadata about the mesh (like +/// the location within each slab) into the GPU structures that they build that +/// needs to be kept up to date if the contents of the mesh change. +pub fn mark_3d_meshes_as_changed_if_their_assets_changed( + mut meshes_3d: Query<&mut Mesh3d>, + mut mesh_asset_events: EventReader>, +) { + let mut changed_meshes: HashSet, FixedHasher> = HashSet::default(); + for mesh_asset_event in mesh_asset_events.read() { + if let AssetEvent::Modified { id } = mesh_asset_event { + changed_meshes.insert(*id); + } + } + + if changed_meshes.is_empty() { + return; + } + + for mut mesh_3d in &mut meshes_3d { + if changed_meshes.contains(&mesh_3d.0.id()) { + mesh_3d.set_changed(); + } + } +} + +/// A component that stores an arbitrary index used to identify the mesh instance when rendering. +#[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq)] +#[reflect(Component, Default, Clone, PartialEq)] +pub struct MeshTag(pub u32); diff --git a/crates/bevy_render/src/mesh/mod.rs b/crates/bevy_render/src/mesh/mod.rs index 7a7829e0f4ef1..fbd530c14da42 100644 --- a/crates/bevy_render/src/mesh/mod.rs +++ b/crates/bevy_render/src/mesh/mod.rs @@ -1,4 +1,3 @@ -use bevy_hierarchy::Children; use bevy_math::Vec3; pub use bevy_mesh::*; use morph::{MeshMorphWeights, MorphWeights}; @@ -9,26 +8,52 @@ use crate::{ render_asset::{PrepareAssetError, RenderAsset, RenderAssetPlugin, RenderAssets}, render_resource::TextureView, texture::GpuImage, + view::VisibilitySystems, RenderApp, }; use allocator::MeshAllocatorPlugin; use bevy_app::{App, Plugin, PostUpdate}; -use bevy_asset::{AssetApp, AssetId, RenderAssetUsages}; +use bevy_asset::{AssetApp, AssetEvents, AssetId, RenderAssetUsages}; use bevy_ecs::{ - entity::Entity, - query::{Changed, With}, - system::Query, -}; -use bevy_ecs::{ - query::Without, + prelude::*, system::{ lifetimeless::{SRes, SResMut}, SystemParamItem, }, }; -pub use components::{Mesh2d, Mesh3d}; +pub use components::{mark_3d_meshes_as_changed_if_their_assets_changed, Mesh2d, Mesh3d, MeshTag}; use wgpu::IndexFormat; +/// Registers all [`MeshBuilder`] types. +pub struct MeshBuildersPlugin; + +impl Plugin for MeshBuildersPlugin { + fn build(&self, app: &mut App) { + // 2D Mesh builders + app.register_type::() + .register_type::() + .register_type::() + .register_type::() + .register_type::() + .register_type::() + .register_type::() + .register_type::() + .register_type::() + .register_type::() + // 3D Mesh builders + .register_type::() + .register_type::() + .register_type::() + .register_type::() + .register_type::() + .register_type::() + .register_type::() + .register_type::() + .register_type::() + .register_type::(); + } +} + /// Adds the [`Mesh`] as an asset and makes sure that they are extracted and prepared for the GPU. pub struct MeshPlugin; @@ -40,9 +65,16 @@ impl Plugin for MeshPlugin { .register_type::() .register_type::() .register_type::>() + .add_plugins(MeshBuildersPlugin) // 'Mesh' must be prepared after 'Image' as meshes rely on the morph target image being ready .add_plugins(RenderAssetPlugin::::default()) - .add_plugins(MeshAllocatorPlugin); + .add_plugins(MeshAllocatorPlugin) + .add_systems( + PostUpdate, + mark_3d_meshes_as_changed_if_their_assets_changed + .ambiguous_with(VisibilitySystems::CalculateBounds) + .before(AssetEvents), + ); let Some(render_app) = app.get_sub_app_mut(RenderApp) else { return; @@ -130,6 +162,12 @@ impl RenderMesh { pub fn primitive_topology(&self) -> PrimitiveTopology { self.key_bits.primitive_topology() } + + /// Returns true if this mesh uses an index buffer or false otherwise. + #[inline] + pub fn indexed(&self) -> bool { + matches!(self.buffer_info, RenderMeshBufferInfo::Indexed { .. }) + } } /// The index/vertex buffer info of a [`RenderMesh`]. @@ -158,7 +196,7 @@ impl RenderAsset for RenderMesh { let mut vertex_size = 0; for attribute_data in mesh.attributes() { let vertex_format = attribute_data.0.format; - vertex_size += vertex_format.get_size() as usize; + vertex_size += vertex_format.size() as usize; } let vertex_count = mesh.count_vertices(); @@ -170,7 +208,7 @@ impl RenderAsset for RenderMesh { fn prepare_asset( mesh: Self::SourceAsset, _: AssetId, - (images, ref mut mesh_vertex_buffer_layouts): &mut SystemParamItem, + (images, mesh_vertex_buffer_layouts): &mut SystemParamItem, ) -> Result> { let morph_targets = match mesh.morph_targets() { Some(mt) => { diff --git a/crates/bevy_render/src/pipelined_rendering.rs b/crates/bevy_render/src/pipelined_rendering.rs index 41279e7d25db1..fb665e469d559 100644 --- a/crates/bevy_render/src/pipelined_rendering.rs +++ b/crates/bevy_render/src/pipelined_rendering.rs @@ -2,8 +2,8 @@ use async_channel::{Receiver, Sender}; use bevy_app::{App, AppExit, AppLabel, Plugin, SubApp}; use bevy_ecs::{ + resource::Resource, schedule::MainThreadExecutor, - system::Resource, world::{Mut, World}, }; use bevy_tasks::ComputeTaskPool; @@ -92,14 +92,14 @@ impl Drop for RenderAppChannels { /// ``` /// /// - `sync` is the step where the entity-entity mapping between the main and render world is updated. -/// This is run on the main app's thread. For more information checkout [`SyncWorldPlugin`]. +/// This is run on the main app's thread. For more information checkout [`SyncWorldPlugin`]. /// - `extract` is the step where data is copied from the main world to the render world. -/// This is run on the main app's thread. +/// This is run on the main app's thread. /// - On the render thread, we first apply the `extract commands`. This is not run during extract, so the -/// main schedule can start sooner. +/// main schedule can start sooner. /// - Then the `rendering schedule` is run. See [`RenderSet`](crate::RenderSet) for the standard steps in this process. /// - In parallel to the rendering thread the [`RenderExtractApp`] schedule runs. By -/// default, this schedule is empty. But it is useful if you need something to run before I/O processing. +/// default, this schedule is empty. But it is useful if you need something to run before I/O processing. /// - Next all the `winit events` are processed. /// - And finally the `main app schedule` is run. /// - Once both the `main app schedule` and the `render schedule` are finished running, `extract` is run again. @@ -148,7 +148,7 @@ impl Plugin for PipelinedRenderingPlugin { std::thread::spawn(move || { #[cfg(feature = "trace")] - let _span = bevy_utils::tracing::info_span!("render thread").entered(); + let _span = tracing::info_span!("render thread").entered(); let compute_task_pool = ComputeTaskPool::get(); loop { @@ -164,8 +164,7 @@ impl Plugin for PipelinedRenderingPlugin { { #[cfg(feature = "trace")] - let _sub_app_span = - bevy_utils::tracing::info_span!("sub app", name = ?RenderApp).entered(); + let _sub_app_span = tracing::info_span!("sub app", name = ?RenderApp).entered(); render_app.update(); } @@ -174,7 +173,7 @@ impl Plugin for PipelinedRenderingPlugin { } } - bevy_utils::tracing::debug!("exiting pipelined rendering thread"); + tracing::debug!("exiting pipelined rendering thread"); }); } } diff --git a/crates/bevy_render/src/primitives/mod.rs b/crates/bevy_render/src/primitives/mod.rs index 48c0652fef3b9..ca664fc338c77 100644 --- a/crates/bevy_render/src/primitives/mod.rs +++ b/crates/bevy_render/src/primitives/mod.rs @@ -7,7 +7,7 @@ use bevy_reflect::prelude::*; /// An axis-aligned bounding box, defined by: /// - a center, /// - the distances from the center to each faces along the axis, -/// the faces are orthogonal to the axis. +/// the faces are orthogonal to the axis. /// /// It is typically used as a component on an entity to represent the local space /// occupied by this entity, with faces orthogonal to its local axis. @@ -18,7 +18,7 @@ use bevy_reflect::prelude::*; /// /// It will be added automatically by the systems in [`CalculateBounds`] to entities that: /// - could be subject to frustum culling, for example with a [`Mesh3d`] -/// or `Sprite` component, +/// or `Sprite` component, /// - don't have the [`NoFrustumCulling`] component. /// /// It won't be updated automatically if the space occupied by the entity changes, @@ -29,7 +29,7 @@ use bevy_reflect::prelude::*; /// [`CalculateBounds`]: crate::view::visibility::VisibilitySystems::CalculateBounds /// [`Mesh3d`]: crate::mesh::Mesh #[derive(Component, Clone, Copy, Debug, Default, Reflect, PartialEq)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] pub struct Aabb { pub center: Vec3A, pub half_extents: Vec3A, @@ -224,9 +224,9 @@ impl HalfSpace { /// [`CameraProjection`]: crate::camera::CameraProjection /// [`GlobalTransform`]: bevy_transform::components::GlobalTransform #[derive(Component, Clone, Copy, Debug, Default, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct Frustum { - #[reflect(ignore)] + #[reflect(ignore, clone)] pub half_spaces: [HalfSpace; 6], } @@ -327,9 +327,9 @@ impl Frustum { } #[derive(Component, Clone, Debug, Default, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct CubemapFrusta { - #[reflect(ignore)] + #[reflect(ignore, clone)] pub frusta: [Frustum; 6], } @@ -343,9 +343,9 @@ impl CubemapFrusta { } #[derive(Component, Debug, Default, Reflect, Clone)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct CascadesFrusta { - #[reflect(ignore)] + #[reflect(ignore, clone)] pub frusta: EntityHashMap>, } diff --git a/crates/bevy_render/src/render_asset.rs b/crates/bevy_render/src/render_asset.rs index 2757dceb9fa81..6626cb7797947 100644 --- a/crates/bevy_render/src/render_asset.rs +++ b/crates/bevy_render/src/render_asset.rs @@ -1,22 +1,21 @@ use crate::{ - render_resource::AsBindGroupError, ExtractSchedule, MainWorld, Render, RenderApp, RenderSet, + render_resource::AsBindGroupError, Extract, ExtractSchedule, MainWorld, Render, RenderApp, + RenderSet, Res, }; use bevy_app::{App, Plugin, SubApp}; pub use bevy_asset::RenderAssetUsages; use bevy_asset::{Asset, AssetEvent, AssetId, Assets}; use bevy_ecs::{ - prelude::{Commands, EventReader, IntoSystemConfigs, ResMut, Resource}, - schedule::{SystemConfigs, SystemSet}, - system::{StaticSystemParam, SystemParam, SystemParamItem, SystemState}, + prelude::{Commands, EventReader, IntoScheduleConfigs, ResMut, Resource}, + schedule::{ScheduleConfigs, SystemSet}, + system::{ScheduleSystem, StaticSystemParam, SystemParam, SystemParamItem, SystemState}, world::{FromWorld, Mut}, }; -use bevy_render_macros::ExtractResource; -use bevy_utils::{ - tracing::{debug, error}, - HashMap, HashSet, -}; +use bevy_platform::collections::{HashMap, HashSet}; use core::marker::PhantomData; +use core::sync::atomic::{AtomicUsize, Ordering}; use thiserror::Error; +use tracing::{debug, error}; #[derive(Debug, Error)] pub enum PrepareAssetError { @@ -55,7 +54,10 @@ pub trait RenderAsset: Send + Sync + 'static + Sized { /// Size of the data the asset will upload to the gpu. Specifying a return value /// will allow the asset to be throttled via [`RenderAssetBytesPerFrame`]. #[inline] - #[allow(unused_variables)] + #[expect( + unused_variables, + reason = "The parameters here are intentionally unused by the default implementation; however, putting underscores here will result in the underscores being copied by rust-analyzer's tab completion." + )] fn byte_len(source_asset: &Self::SourceAsset) -> Option { None } @@ -130,17 +132,17 @@ impl Plugin // helper to allow specifying dependencies between render assets pub trait RenderAssetDependency { - fn register_system(render_app: &mut SubApp, system: SystemConfigs); + fn register_system(render_app: &mut SubApp, system: ScheduleConfigs); } impl RenderAssetDependency for () { - fn register_system(render_app: &mut SubApp, system: SystemConfigs) { + fn register_system(render_app: &mut SubApp, system: ScheduleConfigs) { render_app.add_systems(Render, system); } } impl RenderAssetDependency for A { - fn register_system(render_app: &mut SubApp, system: SystemConfigs) { + fn register_system(render_app: &mut SubApp, system: ScheduleConfigs) { render_app.add_systems(Render, system.after(prepare_assets::)); } } @@ -149,14 +151,19 @@ impl RenderAssetDependency for A { #[derive(Resource)] pub struct ExtractedAssets { /// The assets extracted this frame. + /// + /// These are assets that were either added or modified this frame. pub extracted: Vec<(AssetId, A::SourceAsset)>, - /// IDs of the assets removed this frame. + /// IDs of the assets that were removed this frame. /// /// These assets will not be present in [`ExtractedAssets::extracted`]. pub removed: HashSet>, - /// IDs of the assets added this frame. + /// IDs of the assets that were modified this frame. + pub modified: HashSet>, + + /// IDs of the assets that were added this frame. pub added: HashSet>, } @@ -165,6 +172,7 @@ impl Default for ExtractedAssets { Self { extracted: Default::default(), removed: Default::default(), + modified: Default::default(), added: Default::default(), } } @@ -233,18 +241,30 @@ pub(crate) fn extract_render_asset( |world, mut cached_state: Mut>| { let (mut events, mut assets) = cached_state.state.get_mut(world); - let mut changed_assets = >::default(); + let mut needs_extracting = >::default(); let mut removed = >::default(); + let mut modified = >::default(); for event in events.read() { - #[allow(clippy::match_same_arms)] + #[expect( + clippy::match_same_arms, + reason = "LoadedWithDependencies is marked as a TODO, so it's likely this will no longer lint soon." + )] match event { - AssetEvent::Added { id } | AssetEvent::Modified { id } => { - changed_assets.insert(*id); + AssetEvent::Added { id } => { + needs_extracting.insert(*id); + } + AssetEvent::Modified { id } => { + needs_extracting.insert(*id); + modified.insert(*id); + } + AssetEvent::Removed { .. } => { + // We don't care that the asset was removed from Assets in the main world. + // An asset is only removed from RenderAssets when its last handle is dropped (AssetEvent::Unused). } - AssetEvent::Removed { .. } => {} AssetEvent::Unused { id } => { - changed_assets.remove(id); + needs_extracting.remove(id); + modified.remove(id); removed.insert(*id); } AssetEvent::LoadedWithDependencies { .. } => { @@ -255,7 +275,7 @@ pub(crate) fn extract_render_asset( let mut extracted_assets = Vec::new(); let mut added = >::default(); - for id in changed_assets.drain() { + for id in needs_extracting.drain() { if let Some(asset) = assets.get(id) { let asset_usage = A::asset_usage(asset); if asset_usage.contains(RenderAssetUsages::RENDER_WORLD) { @@ -275,6 +295,7 @@ pub(crate) fn extract_render_asset( commands.insert_resource(ExtractedAssets:: { extracted: extracted_assets, removed, + modified, added, }); cached_state.state.apply(world); @@ -304,7 +325,7 @@ pub fn prepare_assets( mut render_assets: ResMut>, mut prepare_next_frame: ResMut>, param: StaticSystemParam<::Param>, - mut bpf: ResMut, + bpf: Res, ) { let mut wrote_asset_count = 0; @@ -397,54 +418,94 @@ pub fn prepare_assets( } } -/// A resource that attempts to limit the amount of data transferred from cpu to gpu -/// each frame, preventing choppy frames at the cost of waiting longer for gpu assets -/// to become available -#[derive(Resource, Default, Debug, Clone, Copy, ExtractResource)] +pub fn reset_render_asset_bytes_per_frame( + mut bpf_limiter: ResMut, +) { + bpf_limiter.reset(); +} + +pub fn extract_render_asset_bytes_per_frame( + bpf: Extract>, + mut bpf_limiter: ResMut, +) { + bpf_limiter.max_bytes = bpf.max_bytes; +} + +/// A resource that defines the amount of data allowed to be transferred from CPU to GPU +/// each frame, preventing choppy frames at the cost of waiting longer for GPU assets +/// to become available. +#[derive(Resource, Default)] pub struct RenderAssetBytesPerFrame { pub max_bytes: Option, - pub available: usize, } impl RenderAssetBytesPerFrame { /// `max_bytes`: the number of bytes to write per frame. - /// this is a soft limit: only full assets are written currently, uploading stops + /// + /// This is a soft limit: only full assets are written currently, uploading stops /// after the first asset that exceeds the limit. + /// /// To participate, assets should implement [`RenderAsset::byte_len`]. If the default /// is not overridden, the assets are assumed to be small enough to upload without restriction. pub fn new(max_bytes: usize) -> Self { Self { max_bytes: Some(max_bytes), - available: 0, } } +} - /// Reset the available bytes. Called once per frame by the [`crate::RenderPlugin`]. +/// A render-world resource that facilitates limiting the data transferred from CPU to GPU +/// each frame, preventing choppy frames at the cost of waiting longer for GPU assets +/// to become available. +#[derive(Resource, Default)] +pub struct RenderAssetBytesPerFrameLimiter { + /// Populated by [`RenderAssetBytesPerFrame`] during extraction. + pub max_bytes: Option, + /// Bytes written this frame. + pub bytes_written: AtomicUsize, +} + +impl RenderAssetBytesPerFrameLimiter { + /// Reset the available bytes. Called once per frame during extraction by [`crate::RenderPlugin`]. pub fn reset(&mut self) { - self.available = self.max_bytes.unwrap_or(usize::MAX); + if self.max_bytes.is_none() { + return; + } + self.bytes_written.store(0, Ordering::Relaxed); } - /// check how many bytes are available since the last reset + /// Check how many bytes are available for writing. pub fn available_bytes(&self, required_bytes: usize) -> usize { - if self.max_bytes.is_none() { - return required_bytes; + if let Some(max_bytes) = self.max_bytes { + let total_bytes = self + .bytes_written + .fetch_add(required_bytes, Ordering::Relaxed); + + // The bytes available is the inverse of the amount we overshot max_bytes + if total_bytes >= max_bytes { + required_bytes.saturating_sub(total_bytes - max_bytes) + } else { + required_bytes + } + } else { + required_bytes } - - required_bytes.min(self.available) } - /// decrease the available bytes for the current frame - fn write_bytes(&mut self, bytes: usize) { - if self.max_bytes.is_none() { - return; + /// Decreases the available bytes for the current frame. + fn write_bytes(&self, bytes: usize) { + if self.max_bytes.is_some() && bytes > 0 { + self.bytes_written.fetch_add(bytes, Ordering::Relaxed); } - - let write_bytes = bytes.min(self.available); - self.available -= write_bytes; } - // check if any bytes remain available for writing this frame + /// Returns `true` if there are no remaining bytes available for writing this frame. fn exhausted(&self) -> bool { - self.max_bytes.is_some() && self.available == 0 + if let Some(max_bytes) = self.max_bytes { + let bytes_written = self.bytes_written.load(Ordering::Relaxed); + bytes_written >= max_bytes + } else { + false + } } } diff --git a/crates/bevy_render/src/render_graph/app.rs b/crates/bevy_render/src/render_graph/app.rs index 80ffcdb2a1f8d..338ae75d7a284 100644 --- a/crates/bevy_render/src/render_graph/app.rs +++ b/crates/bevy_render/src/render_graph/app.rs @@ -1,6 +1,6 @@ use bevy_app::{App, SubApp}; use bevy_ecs::world::FromWorld; -use bevy_utils::tracing::warn; +use tracing::warn; use super::{IntoRenderNodeArray, Node, RenderGraph, RenderLabel, RenderSubGraph}; diff --git a/crates/bevy_render/src/render_graph/context.rs b/crates/bevy_render/src/render_graph/context.rs index c27f269d0ba45..4c6ecd30c1d67 100644 --- a/crates/bevy_render/src/render_graph/context.rs +++ b/crates/bevy_render/src/render_graph/context.rs @@ -3,10 +3,10 @@ use crate::{ render_resource::{Buffer, Sampler, TextureView}, }; use alloc::borrow::Cow; -use bevy_ecs::entity::Entity; +use bevy_ecs::{entity::Entity, intern::Interned}; use thiserror::Error; -use super::{InternedRenderSubGraph, RenderSubGraph}; +use super::{InternedRenderSubGraph, RenderLabel, RenderSubGraph}; /// A command that signals the graph runner to run the sub graph corresponding to the `sub_graph` /// with the specified `inputs` next. @@ -224,6 +224,11 @@ impl<'a> RenderGraphContext<'a> { Ok(()) } + /// Returns a human-readable label for this node, for debugging purposes. + pub fn label(&self) -> Interned { + self.node.label + } + /// Finishes the context for this [`Node`](super::Node) by /// returning the sub graphs to run next. pub fn finish(self) -> Vec { diff --git a/crates/bevy_render/src/render_graph/graph.rs b/crates/bevy_render/src/render_graph/graph.rs index 4b315e22a0869..d1a7020bcfcab 100644 --- a/crates/bevy_render/src/render_graph/graph.rs +++ b/crates/bevy_render/src/render_graph/graph.rs @@ -5,8 +5,8 @@ use crate::{ }, renderer::RenderContext, }; -use bevy_ecs::{define_label, intern::Interned, prelude::World, system::Resource}; -use bevy_utils::HashMap; +use bevy_ecs::{define_label, intern::Interned, prelude::World, resource::Resource}; +use bevy_platform::collections::HashMap; use core::fmt::Debug; use super::{EdgeExistence, InternedRenderLabel, IntoRenderNodeArray}; @@ -14,6 +14,9 @@ use super::{EdgeExistence, InternedRenderLabel, IntoRenderNodeArray}; pub use bevy_render_macros::RenderSubGraph; define_label!( + #[diagnostic::on_unimplemented( + note = "consider annotating `{Self}` with `#[derive(RenderSubGraph)]`" + )] /// A strongly-typed class of labels used to identify a [`SubGraph`] in a render graph. RenderSubGraph, RENDER_SUB_GRAPH_INTERNER @@ -677,7 +680,7 @@ mod tests { renderer::RenderContext, }; use bevy_ecs::world::{FromWorld, World}; - use bevy_utils::HashSet; + use bevy_platform::collections::HashSet; #[derive(Debug, Hash, PartialEq, Eq, Clone, RenderLabel)] enum TestLabel { diff --git a/crates/bevy_render/src/render_graph/node.rs b/crates/bevy_render/src/render_graph/node.rs index 91775fef7ccef..0a634c2598461 100644 --- a/crates/bevy_render/src/render_graph/node.rs +++ b/crates/bevy_render/src/render_graph/node.rs @@ -23,6 +23,9 @@ pub use bevy_render_macros::RenderLabel; use super::{InternedRenderSubGraph, RenderSubGraph}; define_label!( + #[diagnostic::on_unimplemented( + note = "consider annotating `{Self}` with `#[derive(RenderLabel)]`" + )] /// A strongly-typed class of labels used to identify a [`Node`] in a render graph. RenderLabel, RENDER_LABEL_INTERNER @@ -238,7 +241,7 @@ pub struct NodeState { impl Debug for NodeState { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - writeln!(f, "{:?} ({:?})", self.label, self.type_name) + writeln!(f, "{:?} ({})", self.label, self.type_name) } } diff --git a/crates/bevy_render/src/render_phase/draw.rs b/crates/bevy_render/src/render_phase/draw.rs index 6f45e1b39783f..a12d336018508 100644 --- a/crates/bevy_render/src/render_phase/draw.rs +++ b/crates/bevy_render/src/render_phase/draw.rs @@ -3,7 +3,8 @@ use bevy_app::{App, SubApp}; use bevy_ecs::{ entity::Entity, query::{QueryEntityError, QueryState, ROQueryItem, ReadOnlyQueryData}, - system::{ReadOnlySystemParam, Resource, SystemParam, SystemParamItem, SystemState}, + resource::Resource, + system::{ReadOnlySystemParam, SystemParam, SystemParamItem, SystemState}, world::World, }; use bevy_utils::TypeIdMap; @@ -22,7 +23,10 @@ pub trait Draw: Send + Sync + 'static { /// Prepares the draw function to be used. This is called once and only once before the phase /// begins. There may be zero or more [`draw`](Draw::draw) calls following a call to this function. /// Implementing this is optional. - #[allow(unused_variables)] + #[expect( + unused_variables, + reason = "The parameters here are intentionally unused by the default implementation; however, putting underscores here will result in the underscores being copied by rust-analyzer's tab completion." + )] fn prepare(&mut self, world: &'_ World) {} /// Draws a [`PhaseItem`] by issuing zero or more `draw` calls via the [`TrackedRenderPass`]. @@ -232,7 +236,14 @@ macro_rules! render_command_tuple_impl { type ViewQuery = ($($name::ViewQuery,)*); type ItemQuery = ($($name::ItemQuery,)*); - #[allow(non_snake_case)] + #[expect( + clippy::allow_attributes, + reason = "We are in a macro; as such, `non_snake_case` may not always lint." + )] + #[allow( + non_snake_case, + reason = "Parameter and variable names are provided by the macro invocation, not by us." + )] fn render<'w>( _item: &P, ($($view,)*): ROQueryItem<'w, Self::ViewQuery>, @@ -321,7 +332,9 @@ where let view = match self.view.get_manual(world, view) { Ok(view) => view, Err(err) => match err { - QueryEntityError::NoSuchEntity(_, _) => return Err(DrawError::ViewEntityNotFound), + QueryEntityError::EntityDoesNotExist(_) => { + return Err(DrawError::ViewEntityNotFound) + } QueryEntityError::QueryDoesNotMatch(_, _) | QueryEntityError::AliasedMutability(_) => { return Err(DrawError::InvalidViewQuery) diff --git a/crates/bevy_render/src/render_phase/draw_state.rs b/crates/bevy_render/src/render_phase/draw_state.rs index 4ba3e410870a5..a7b8acdc00393 100644 --- a/crates/bevy_render/src/render_phase/draw_state.rs +++ b/crates/bevy_render/src/render_phase/draw_state.rs @@ -8,10 +8,13 @@ use crate::{ renderer::RenderDevice, }; use bevy_color::LinearRgba; -use bevy_utils::{default, detailed_trace}; +use bevy_utils::default; use core::ops::Range; use wgpu::{IndexFormat, QuerySet, RenderPass}; +#[cfg(feature = "detailed_trace")] +use tracing::trace; + /// Tracks the state of a [`TrackedRenderPass`]. /// /// This is used to skip redundant operations on the [`TrackedRenderPass`] (e.g. setting an already @@ -164,7 +167,8 @@ impl<'a> TrackedRenderPass<'a> { /// /// Subsequent draw calls will exhibit the behavior defined by the `pipeline`. pub fn set_render_pipeline(&mut self, pipeline: &'a RenderPipeline) { - detailed_trace!("set pipeline: {:?}", pipeline); + #[cfg(feature = "detailed_trace")] + trace!("set pipeline: {:?}", pipeline); if self.state.is_pipeline_set(pipeline.id()) { return; } @@ -189,7 +193,8 @@ impl<'a> TrackedRenderPass<'a> { .state .is_bind_group_set(index, bind_group.id(), dynamic_uniform_indices) { - detailed_trace!( + #[cfg(feature = "detailed_trace")] + trace!( "set bind_group {} (already set): {:?} ({:?})", index, bind_group, @@ -197,7 +202,8 @@ impl<'a> TrackedRenderPass<'a> { ); return; } - detailed_trace!( + #[cfg(feature = "detailed_trace")] + trace!( "set bind_group {}: {:?} ({:?})", index, bind_group, @@ -222,7 +228,8 @@ impl<'a> TrackedRenderPass<'a> { /// [`draw_indexed`]: TrackedRenderPass::draw_indexed pub fn set_vertex_buffer(&mut self, slot_index: usize, buffer_slice: BufferSlice<'a>) { if self.state.is_vertex_buffer_set(slot_index, &buffer_slice) { - detailed_trace!( + #[cfg(feature = "detailed_trace")] + trace!( "set vertex buffer {} (already set): {:?} (offset = {}, size = {})", slot_index, buffer_slice.id(), @@ -231,7 +238,8 @@ impl<'a> TrackedRenderPass<'a> { ); return; } - detailed_trace!( + #[cfg(feature = "detailed_trace")] + trace!( "set vertex buffer {}: {:?} (offset = {}, size = {})", slot_index, buffer_slice.id(), @@ -258,14 +266,16 @@ impl<'a> TrackedRenderPass<'a> { .state .is_index_buffer_set(buffer_slice.id(), offset, index_format) { - detailed_trace!( + #[cfg(feature = "detailed_trace")] + trace!( "set index buffer (already set): {:?} ({})", buffer_slice.id(), offset ); return; } - detailed_trace!("set index buffer: {:?} ({})", buffer_slice.id(), offset); + #[cfg(feature = "detailed_trace")] + trace!("set index buffer: {:?} ({})", buffer_slice.id(), offset); self.pass.set_index_buffer(*buffer_slice, index_format); self.state .set_index_buffer(buffer_slice.id(), offset, index_format); @@ -275,7 +285,8 @@ impl<'a> TrackedRenderPass<'a> { /// /// The active vertex buffer(s) can be set with [`TrackedRenderPass::set_vertex_buffer`]. pub fn draw(&mut self, vertices: Range, instances: Range) { - detailed_trace!("draw: {:?} {:?}", vertices, instances); + #[cfg(feature = "detailed_trace")] + trace!("draw: {:?} {:?}", vertices, instances); self.pass.draw(vertices, instances); } @@ -284,7 +295,8 @@ impl<'a> TrackedRenderPass<'a> { /// The active index buffer can be set with [`TrackedRenderPass::set_index_buffer`], while the /// active vertex buffer(s) can be set with [`TrackedRenderPass::set_vertex_buffer`]. pub fn draw_indexed(&mut self, indices: Range, base_vertex: i32, instances: Range) { - detailed_trace!( + #[cfg(feature = "detailed_trace")] + trace!( "draw indexed: {:?} {} {:?}", indices, base_vertex, @@ -311,7 +323,8 @@ impl<'a> TrackedRenderPass<'a> { /// } /// ``` pub fn draw_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: u64) { - detailed_trace!("draw indirect: {:?} {}", indirect_buffer, indirect_offset); + #[cfg(feature = "detailed_trace")] + trace!("draw indirect: {:?} {}", indirect_buffer, indirect_offset); self.pass.draw_indirect(indirect_buffer, indirect_offset); } @@ -335,7 +348,8 @@ impl<'a> TrackedRenderPass<'a> { /// } /// ``` pub fn draw_indexed_indirect(&mut self, indirect_buffer: &'a Buffer, indirect_offset: u64) { - detailed_trace!( + #[cfg(feature = "detailed_trace")] + trace!( "draw indexed indirect: {:?} {}", indirect_buffer, indirect_offset @@ -367,7 +381,8 @@ impl<'a> TrackedRenderPass<'a> { indirect_offset: u64, count: u32, ) { - detailed_trace!( + #[cfg(feature = "detailed_trace")] + trace!( "multi draw indirect: {:?} {}, {}x", indirect_buffer, indirect_offset, @@ -407,7 +422,8 @@ impl<'a> TrackedRenderPass<'a> { count_offset: u64, max_count: u32, ) { - detailed_trace!( + #[cfg(feature = "detailed_trace")] + trace!( "multi draw indirect count: {:?} {}, ({:?} {})x, max {}x", indirect_buffer, indirect_offset, @@ -449,7 +465,8 @@ impl<'a> TrackedRenderPass<'a> { indirect_offset: u64, count: u32, ) { - detailed_trace!( + #[cfg(feature = "detailed_trace")] + trace!( "multi draw indexed indirect: {:?} {}, {}x", indirect_buffer, indirect_offset, @@ -491,7 +508,8 @@ impl<'a> TrackedRenderPass<'a> { count_offset: u64, max_count: u32, ) { - detailed_trace!( + #[cfg(feature = "detailed_trace")] + trace!( "multi draw indexed indirect count: {:?} {}, ({:?} {})x, max {}x", indirect_buffer, indirect_offset, @@ -512,7 +530,8 @@ impl<'a> TrackedRenderPass<'a> { /// /// Subsequent stencil tests will test against this value. pub fn set_stencil_reference(&mut self, reference: u32) { - detailed_trace!("set stencil reference: {}", reference); + #[cfg(feature = "detailed_trace")] + trace!("set stencil reference: {}", reference); self.pass.set_stencil_reference(reference); } @@ -520,7 +539,8 @@ impl<'a> TrackedRenderPass<'a> { /// /// Subsequent draw calls will discard any fragments that fall outside this region. pub fn set_scissor_rect(&mut self, x: u32, y: u32, width: u32, height: u32) { - detailed_trace!("set_scissor_rect: {} {} {} {}", x, y, width, height); + #[cfg(feature = "detailed_trace")] + trace!("set_scissor_rect: {} {} {} {}", x, y, width, height); self.pass.set_scissor_rect(x, y, width, height); } @@ -528,7 +548,8 @@ impl<'a> TrackedRenderPass<'a> { /// /// `Features::PUSH_CONSTANTS` must be enabled on the device in order to call these functions. pub fn set_push_constants(&mut self, stages: ShaderStages, offset: u32, data: &[u8]) { - detailed_trace!( + #[cfg(feature = "detailed_trace")] + trace!( "set push constants: {:?} offset: {} data.len: {}", stages, offset, @@ -549,7 +570,8 @@ impl<'a> TrackedRenderPass<'a> { min_depth: f32, max_depth: f32, ) { - detailed_trace!( + #[cfg(feature = "detailed_trace")] + trace!( "set viewport: {} {} {} {} {} {}", x, y, @@ -580,7 +602,8 @@ impl<'a> TrackedRenderPass<'a> { /// /// This is a GPU debugging feature. This has no effect on the rendering itself. pub fn insert_debug_marker(&mut self, label: &str) { - detailed_trace!("insert debug marker: {}", label); + #[cfg(feature = "detailed_trace")] + trace!("insert debug marker: {}", label); self.pass.insert_debug_marker(label); } @@ -605,7 +628,8 @@ impl<'a> TrackedRenderPass<'a> { /// [`push_debug_group`]: TrackedRenderPass::push_debug_group /// [`pop_debug_group`]: TrackedRenderPass::pop_debug_group pub fn push_debug_group(&mut self, label: &str) { - detailed_trace!("push_debug_group marker: {}", label); + #[cfg(feature = "detailed_trace")] + trace!("push_debug_group marker: {}", label); self.pass.push_debug_group(label); } @@ -622,7 +646,8 @@ impl<'a> TrackedRenderPass<'a> { /// [`push_debug_group`]: TrackedRenderPass::push_debug_group /// [`pop_debug_group`]: TrackedRenderPass::pop_debug_group pub fn pop_debug_group(&mut self) { - detailed_trace!("pop_debug_group"); + #[cfg(feature = "detailed_trace")] + trace!("pop_debug_group"); self.pass.pop_debug_group(); } @@ -630,7 +655,8 @@ impl<'a> TrackedRenderPass<'a> { /// /// Subsequent blending tests will test against this value. pub fn set_blend_constant(&mut self, color: LinearRgba) { - detailed_trace!("set blend constant: {:?}", color); + #[cfg(feature = "detailed_trace")] + trace!("set blend constant: {:?}", color); self.pass.set_blend_constant(wgpu::Color::from(color)); } } diff --git a/crates/bevy_render/src/render_phase/mod.rs b/crates/bevy_render/src/render_phase/mod.rs index 5899bc9c01ea2..a4eb4a944f4f3 100644 --- a/crates/bevy_render/src/render_phase/mod.rs +++ b/crates/bevy_render/src/render_phase/mod.rs @@ -30,15 +30,27 @@ mod rangefinder; use bevy_app::{App, Plugin}; use bevy_derive::{Deref, DerefMut}; -use bevy_utils::{default, hashbrown::hash_map::Entry, HashMap}; +use bevy_ecs::component::Tick; +use bevy_ecs::entity::EntityHash; +use bevy_platform::collections::{hash_map::Entry, HashMap}; +use bevy_utils::default; pub use draw::*; pub use draw_state::*; use encase::{internal::WriteInto, ShaderSize}; +use fixedbitset::{Block, FixedBitSet}; +use indexmap::IndexMap; use nonmax::NonMaxU32; pub use rangefinder::*; +use wgpu::Features; -use crate::batching::gpu_preprocessing::GpuPreprocessingMode; -use crate::sync_world::MainEntity; +use crate::batching::gpu_preprocessing::{ + GpuPreprocessingMode, GpuPreprocessingSupport, PhaseBatchedInstanceBuffers, + PhaseIndirectParametersBuffers, +}; +use crate::renderer::RenderDevice; +use crate::sync_world::{MainEntity, MainEntityHashMap}; +use crate::view::RetainedViewEntity; +use crate::RenderDebugFlags; use crate::{ batching::{ self, @@ -50,12 +62,12 @@ use crate::{ Render, RenderApp, RenderSet, }; use bevy_ecs::{ - entity::EntityHashMap, prelude::*, system::{lifetimeless::SRes, SystemParamItem}, }; use core::{fmt::Debug, hash::Hash, iter, marker::PhantomData, ops::Range, slice::SliceIndex}; use smallvec::SmallVec; +use tracing::warn; /// Stores the rendering instructions for a single phase that uses bins in all /// views. @@ -63,7 +75,7 @@ use smallvec::SmallVec; /// They're cleared out every frame, but storing them in a resource like this /// allows us to reuse allocations. #[derive(Resource, Deref, DerefMut)] -pub struct ViewBinnedRenderPhases(pub EntityHashMap>) +pub struct ViewBinnedRenderPhases(pub HashMap>) where BPI: BinnedPhaseItem; @@ -85,28 +97,31 @@ pub struct BinnedRenderPhase where BPI: BinnedPhaseItem, { - /// A list of `BinKey`s for batchable items. + /// The multidrawable bins. /// - /// These are accumulated in `queue_material_meshes` and then sorted in - /// `batch_and_prepare_binned_render_phase`. - pub batchable_mesh_keys: Vec, - - /// The batchable bins themselves. + /// Each batch set key maps to a *batch set*, which in this case is a set of + /// meshes that can be drawn together in one multidraw call. Each batch set + /// is subdivided into *bins*, each of which represents a particular mesh. + /// Each bin contains the entity IDs of instances of that mesh. /// - /// Each bin corresponds to a single batch set. For unbatchable entities, - /// prefer `unbatchable_values` instead. - pub batchable_mesh_values: HashMap>, - - /// A list of `BinKey`s for unbatchable items. + /// So, for example, if there are two cubes and a sphere present in the + /// scene, we would generally have one batch set containing two bins, + /// assuming that the cubes and sphere meshes are allocated together and use + /// the same pipeline. The first bin, corresponding to the cubes, will have + /// two entities in it. The second bin, corresponding to the sphere, will + /// have one entity in it. + pub multidrawable_meshes: IndexMap>, + + /// The bins corresponding to batchable items that aren't multidrawable. /// - /// These are accumulated in `queue_material_meshes` and then sorted in - /// `batch_and_prepare_binned_render_phase`. - pub unbatchable_mesh_keys: Vec, + /// For multidrawable entities, use `multidrawable_meshes`; for + /// unbatchable entities, use `unbatchable_values`. + pub batchable_meshes: IndexMap<(BPI::BatchSetKey, BPI::BinKey), RenderBin>, /// The unbatchable bins. /// /// Each entity here is rendered in a separate drawcall. - pub unbatchable_mesh_values: HashMap, + pub unbatchable_meshes: IndexMap<(BPI::BatchSetKey, BPI::BinKey), UnbatchableBinnedEntities>, /// Items in the bin that aren't meshes at all. /// @@ -115,7 +130,7 @@ where /// entity are simply called in order at rendering time. /// /// See the `custom_phase_item` example for an example of how to use this. - pub non_mesh_items: Vec<(BPI::BinKey, (Entity, MainEntity))>, + pub non_mesh_items: IndexMap<(BPI::BatchSetKey, BPI::BinKey), NonMeshEntities>, /// Information on each batch set. /// @@ -125,15 +140,125 @@ where /// platforms that support storage buffers, a batch set always consists of /// at most one batch. /// - /// The unbatchable entities immediately follow the batches in the storage - /// buffers. - pub(crate) batch_sets: BinnedRenderPhaseBatchSets, + /// Multidrawable entities come first, then batchable entities, then + /// unbatchable entities. + pub(crate) batch_sets: BinnedRenderPhaseBatchSets, + + /// The batch and bin key for each entity. + /// + /// We retain these so that, when the entity changes, + /// [`Self::sweep_old_entities`] can quickly find the bin it was located in + /// and remove it. + cached_entity_bin_keys: IndexMap, EntityHash>, + + /// The set of indices in [`Self::cached_entity_bin_keys`] that are + /// confirmed to be up to date. + /// + /// Note that each bit in this bit set refers to an *index* in the + /// [`IndexMap`] (i.e. a bucket in the hash table). They aren't entity IDs. + valid_cached_entity_bin_keys: FixedBitSet, + + /// The set of entities that changed bins this frame. + /// + /// An entity will only be present in this list if it was in one bin on the + /// previous frame and is in a new bin on this frame. Each list entry + /// specifies the bin the entity used to be in. We use this in order to + /// remove the entity from the old bin during + /// [`BinnedRenderPhase::sweep_old_entities`]. + entities_that_changed_bins: Vec>, + /// The gpu preprocessing mode configured for the view this phase is associated + /// with. + gpu_preprocessing_mode: GpuPreprocessingMode, +} + +/// All entities that share a mesh and a material and can be batched as part of +/// a [`BinnedRenderPhase`]. +#[derive(Default)] +pub struct RenderBin { + /// A list of the entities in each bin, along with their cached + /// [`InputUniformIndex`]. + entities: IndexMap, +} + +/// Information that we track about an entity that was in one bin on the +/// previous frame and is in a different bin this frame. +struct EntityThatChangedBins +where + BPI: BinnedPhaseItem, +{ + /// The entity. + main_entity: MainEntity, + /// The key that identifies the bin that this entity used to be in. + old_cached_binned_entity: CachedBinnedEntity, +} + +/// Information that we keep about an entity currently within a bin. +pub struct CachedBinnedEntity +where + BPI: BinnedPhaseItem, +{ + /// Information that we use to identify a cached entity in a bin. + pub cached_bin_key: Option>, + /// The last modified tick of the entity. + /// + /// We use this to detect when the entity needs to be invalidated. + pub change_tick: Tick, +} + +/// Information that we use to identify a cached entity in a bin. +pub struct CachedBinKey +where + BPI: BinnedPhaseItem, +{ + /// The key of the batch set containing the entity. + pub batch_set_key: BPI::BatchSetKey, + /// The key of the bin containing the entity. + pub bin_key: BPI::BinKey, + /// The type of render phase that we use to render the entity: multidraw, + /// plain batch, etc. + pub phase_type: BinnedRenderPhaseType, +} + +impl Clone for CachedBinnedEntity +where + BPI: BinnedPhaseItem, +{ + fn clone(&self) -> Self { + CachedBinnedEntity { + cached_bin_key: self.cached_bin_key.clone(), + change_tick: self.change_tick, + } + } +} + +impl Clone for CachedBinKey +where + BPI: BinnedPhaseItem, +{ + fn clone(&self) -> Self { + CachedBinKey { + batch_set_key: self.batch_set_key.clone(), + bin_key: self.bin_key.clone(), + phase_type: self.phase_type, + } + } +} + +impl PartialEq for CachedBinKey +where + BPI: BinnedPhaseItem, +{ + fn eq(&self, other: &Self) -> bool { + self.batch_set_key == other.batch_set_key + && self.bin_key == other.bin_key + && self.phase_type == other.phase_type + } } /// How we store and render the batch sets. /// /// Each one of these corresponds to a [`GpuPreprocessingMode`]. -pub enum BinnedRenderPhaseBatchSets { +pub enum BinnedRenderPhaseBatchSets { /// Batches are grouped into batch sets based on dynamic uniforms. /// /// This corresponds to [`GpuPreprocessingMode::None`]. @@ -148,10 +273,23 @@ pub enum BinnedRenderPhaseBatchSets { /// be multi-drawn together. /// /// This corresponds to [`GpuPreprocessingMode::Culling`]. - MultidrawIndirect(Vec>), + MultidrawIndirect(Vec>), } -impl BinnedRenderPhaseBatchSets { +/// A group of entities that will be batched together into a single multi-draw +/// call. +pub struct BinnedRenderPhaseBatchSet { + /// The first batch in this batch set. + pub(crate) first_batch: BinnedRenderPhaseBatch, + /// The key of the bin that the first batch corresponds to. + pub(crate) bin_key: BK, + /// The number of batches. + pub(crate) batch_count: u32, + /// The index of the batch set in the GPU buffer. + pub(crate) index: u32, +} + +impl BinnedRenderPhaseBatchSets { fn clear(&mut self) { match *self { BinnedRenderPhaseBatchSets::DynamicUniforms(ref mut vec) => vec.clear(), @@ -182,12 +320,18 @@ pub struct BinnedRenderPhaseBatch { /// Information about the unbatchable entities in a bin. pub struct UnbatchableBinnedEntities { /// The entities. - pub entities: Vec<(Entity, MainEntity)>, + pub entities: MainEntityHashMap, /// The GPU array buffer indices of each unbatchable binned entity. pub(crate) buffer_indices: UnbatchableBinnedEntityIndexSet, } +/// Information about [`BinnedRenderPhaseType::NonMesh`] entities. +pub struct NonMeshEntities { + /// The entities. + pub entities: MainEntityHashMap, +} + /// Stores instance indices and dynamic offsets for unbatchable entities in a /// binned render phase. /// @@ -237,14 +381,16 @@ pub(crate) struct UnbatchableBinnedEntityIndices { /// placed in. #[derive(Clone, Copy, PartialEq, Debug)] pub enum BinnedRenderPhaseType { - /// The item is a mesh that's eligible for indirect rendering and can be - /// batched with other meshes of the same type. + /// The item is a mesh that's eligible for multi-draw indirect rendering and + /// can be batched with other meshes of the same type. + MultidrawableMesh, + + /// The item is a mesh that can be batched with other meshes of the same type and + /// drawn in a single draw call. BatchableMesh, /// The item is a mesh that's eligible for indirect rendering, but can't be /// batched with other meshes of the same type. - /// - /// At the moment, this is used for skinned meshes. UnbatchableMesh, /// The item isn't a mesh at all. @@ -282,9 +428,13 @@ impl ViewBinnedRenderPhases where BPI: BinnedPhaseItem, { - pub fn insert_or_clear(&mut self, entity: Entity, gpu_preprocessing: GpuPreprocessingMode) { - match self.entry(entity) { - Entry::Occupied(mut entry) => entry.get_mut().clear(), + pub fn prepare_for_new_frame( + &mut self, + retained_view_entity: RetainedViewEntity, + gpu_preprocessing: GpuPreprocessingMode, + ) { + match self.entry(retained_view_entity) { + Entry::Occupied(mut entry) => entry.get_mut().prepare_for_new_frame(), Entry::Vacant(entry) => { entry.insert(BinnedRenderPhase::::new(gpu_preprocessing)); } @@ -292,6 +442,19 @@ where } } +/// The index of the uniform describing this object in the GPU buffer, when GPU +/// preprocessing is enabled. +/// +/// For example, for 3D meshes, this is the index of the `MeshInputUniform` in +/// the buffer. +/// +/// This field is ignored if GPU preprocessing isn't in use, such as (currently) +/// in the case of 2D meshes. In that case, it can be safely set to +/// [`core::default::Default::default`]. +#[derive(Clone, Copy, PartialEq, Default, Deref, DerefMut)] +#[repr(transparent)] +pub struct InputUniformIndex(pub u32); + impl BinnedRenderPhase where BPI: BinnedPhaseItem, @@ -303,28 +466,69 @@ where /// type. pub fn add( &mut self, - key: BPI::BinKey, - entity: (Entity, MainEntity), - phase_type: BinnedRenderPhaseType, + batch_set_key: BPI::BatchSetKey, + bin_key: BPI::BinKey, + (entity, main_entity): (Entity, MainEntity), + input_uniform_index: InputUniformIndex, + mut phase_type: BinnedRenderPhaseType, + change_tick: Tick, ) { + // If the user has overridden indirect drawing for this view, we need to + // force the phase type to be batchable instead. + if self.gpu_preprocessing_mode == GpuPreprocessingMode::PreprocessingOnly + && phase_type == BinnedRenderPhaseType::MultidrawableMesh + { + phase_type = BinnedRenderPhaseType::BatchableMesh; + } + match phase_type { + BinnedRenderPhaseType::MultidrawableMesh => { + match self.multidrawable_meshes.entry(batch_set_key.clone()) { + indexmap::map::Entry::Occupied(mut entry) => { + entry + .get_mut() + .entry(bin_key.clone()) + .or_default() + .insert(main_entity, input_uniform_index); + } + indexmap::map::Entry::Vacant(entry) => { + let mut new_batch_set = IndexMap::default(); + new_batch_set.insert( + bin_key.clone(), + RenderBin::from_entity(main_entity, input_uniform_index), + ); + entry.insert(new_batch_set); + } + } + } + BinnedRenderPhaseType::BatchableMesh => { - match self.batchable_mesh_values.entry(key.clone()) { - Entry::Occupied(mut entry) => entry.get_mut().push(entity), - Entry::Vacant(entry) => { - self.batchable_mesh_keys.push(key); - entry.insert(vec![entity]); + match self + .batchable_meshes + .entry((batch_set_key.clone(), bin_key.clone()).clone()) + { + indexmap::map::Entry::Occupied(mut entry) => { + entry.get_mut().insert(main_entity, input_uniform_index); + } + indexmap::map::Entry::Vacant(entry) => { + entry.insert(RenderBin::from_entity(main_entity, input_uniform_index)); } } } BinnedRenderPhaseType::UnbatchableMesh => { - match self.unbatchable_mesh_values.entry(key.clone()) { - Entry::Occupied(mut entry) => entry.get_mut().entities.push(entity), - Entry::Vacant(entry) => { - self.unbatchable_mesh_keys.push(key); + match self + .unbatchable_meshes + .entry((batch_set_key.clone(), bin_key.clone())) + { + indexmap::map::Entry::Occupied(mut entry) => { + entry.get_mut().entities.insert(main_entity, entity); + } + indexmap::map::Entry::Vacant(entry) => { + let mut entities = MainEntityHashMap::default(); + entities.insert(main_entity, entity); entry.insert(UnbatchableBinnedEntities { - entities: vec![entity], + entities, buffer_indices: default(), }); } @@ -333,9 +537,63 @@ where BinnedRenderPhaseType::NonMesh => { // We don't process these items further. - self.non_mesh_items.push((key, entity)); + match self + .non_mesh_items + .entry((batch_set_key.clone(), bin_key.clone()).clone()) + { + indexmap::map::Entry::Occupied(mut entry) => { + entry.get_mut().entities.insert(main_entity, entity); + } + indexmap::map::Entry::Vacant(entry) => { + let mut entities = MainEntityHashMap::default(); + entities.insert(main_entity, entity); + entry.insert(NonMeshEntities { entities }); + } + } + } + } + + // Update the cache. + self.update_cache( + main_entity, + Some(CachedBinKey { + batch_set_key, + bin_key, + phase_type, + }), + change_tick, + ); + } + + /// Inserts an entity into the cache with the given change tick. + pub fn update_cache( + &mut self, + main_entity: MainEntity, + cached_bin_key: Option>, + change_tick: Tick, + ) { + let new_cached_binned_entity = CachedBinnedEntity { + cached_bin_key, + change_tick, + }; + + let (index, old_cached_binned_entity) = self + .cached_entity_bin_keys + .insert_full(main_entity, new_cached_binned_entity.clone()); + + // If the entity changed bins, record its old bin so that we can remove + // the entity from it. + if let Some(old_cached_binned_entity) = old_cached_binned_entity { + if old_cached_binned_entity.cached_bin_key != new_cached_binned_entity.cached_bin_key { + self.entities_that_changed_bins.push(EntityThatChangedBins { + main_entity, + old_cached_binned_entity, + }); } } + + // Mark the entity as valid. + self.valid_cached_entity_bin_keys.grow_and_insert(index); } /// Encodes the GPU commands needed to render all entities in this phase. @@ -370,14 +628,22 @@ where let draw_functions = world.resource::>(); let mut draw_functions = draw_functions.write(); + let render_device = world.resource::(); + let multi_draw_indirect_count_supported = render_device + .features() + .contains(Features::MULTI_DRAW_INDIRECT_COUNT); + match self.batch_sets { BinnedRenderPhaseBatchSets::DynamicUniforms(ref batch_sets) => { - debug_assert_eq!(self.batchable_mesh_keys.len(), batch_sets.len()); + debug_assert_eq!(self.batchable_meshes.len(), batch_sets.len()); - for (key, batch_set) in self.batchable_mesh_keys.iter().zip(batch_sets.iter()) { + for ((batch_set_key, bin_key), batch_set) in + self.batchable_meshes.keys().zip(batch_sets.iter()) + { for batch in batch_set { let binned_phase_item = BPI::new( - key.clone(), + batch_set_key.clone(), + bin_key.clone(), batch.representative_entity, batch.instance_range.clone(), batch.extra_index.clone(), @@ -396,9 +662,12 @@ where } BinnedRenderPhaseBatchSets::Direct(ref batch_set) => { - for (batch, key) in batch_set.iter().zip(self.batchable_mesh_keys.iter()) { + for (batch, (batch_set_key, bin_key)) in + batch_set.iter().zip(self.batchable_meshes.keys()) + { let binned_phase_item = BPI::new( - key.clone(), + batch_set_key.clone(), + bin_key.clone(), batch.representative_entity, batch.instance_range.clone(), batch.extra_index.clone(), @@ -416,17 +685,27 @@ where } BinnedRenderPhaseBatchSets::MultidrawIndirect(ref batch_sets) => { - let mut batchable_mesh_key_index = 0; - for batch_set in batch_sets.iter() { - let Some(batch) = batch_set.first() else { - continue; + for (batch_set_key, batch_set) in self + .multidrawable_meshes + .keys() + .chain( + self.batchable_meshes + .keys() + .map(|(batch_set_key, _)| batch_set_key), + ) + .zip(batch_sets.iter()) + { + let batch = &batch_set.first_batch; + + let batch_set_index = if multi_draw_indirect_count_supported { + NonMaxU32::new(batch_set.index) + } else { + None }; - let key = &self.batchable_mesh_keys[batchable_mesh_key_index]; - batchable_mesh_key_index += batch_set.len(); - let binned_phase_item = BPI::new( - key.clone(), + batch_set_key.clone(), + batch_set.bin_key.clone(), batch.representative_entity, batch.instance_range.clone(), match batch.extra_index { @@ -434,10 +713,11 @@ where PhaseItemExtraIndex::DynamicOffset(ref dynamic_offset) => { PhaseItemExtraIndex::DynamicOffset(*dynamic_offset) } - PhaseItemExtraIndex::IndirectParametersIndex(ref range) => { - PhaseItemExtraIndex::IndirectParametersIndex( - range.start..(range.start + batch_set.len() as u32), - ) + PhaseItemExtraIndex::IndirectParametersIndex { ref range, .. } => { + PhaseItemExtraIndex::IndirectParametersIndex { + range: range.start..(range.start + batch_set.batch_count), + batch_set_index, + } } }, ); @@ -467,9 +747,10 @@ where let draw_functions = world.resource::>(); let mut draw_functions = draw_functions.write(); - for key in &self.unbatchable_mesh_keys { - let unbatchable_entities = &self.unbatchable_mesh_values[key]; - for (entity_index, &entity) in unbatchable_entities.entities.iter().enumerate() { + for (batch_set_key, bin_key) in self.unbatchable_meshes.keys() { + let unbatchable_entities = + &self.unbatchable_meshes[&(batch_set_key.clone(), bin_key.clone())]; + for (entity_index, entity) in unbatchable_entities.entities.iter().enumerate() { let unbatchable_dynamic_offset = match &unbatchable_entities.buffer_indices { UnbatchableBinnedEntityIndexSet::NoEntities => { // Shouldn't happen… @@ -486,21 +767,23 @@ where let first_indirect_parameters_index_for_entity = u32::from(*first_indirect_parameters_index) + entity_index as u32; - PhaseItemExtraIndex::IndirectParametersIndex( - first_indirect_parameters_index_for_entity + PhaseItemExtraIndex::IndirectParametersIndex { + range: first_indirect_parameters_index_for_entity ..(first_indirect_parameters_index_for_entity + 1), - ) + batch_set_index: None, + } } }, }, - UnbatchableBinnedEntityIndexSet::Dense(ref dynamic_offsets) => { + UnbatchableBinnedEntityIndexSet::Dense(dynamic_offsets) => { dynamic_offsets[entity_index].clone() } }; let binned_phase_item = BPI::new( - key.clone(), - entity, + batch_set_key.clone(), + bin_key.clone(), + (*entity.1, *entity.0), unbatchable_dynamic_offset.instance_index ..(unbatchable_dynamic_offset.instance_index + 1), unbatchable_dynamic_offset.extra_index, @@ -530,35 +813,207 @@ where let draw_functions = world.resource::>(); let mut draw_functions = draw_functions.write(); - for &(ref key, entity) in &self.non_mesh_items { - // Come up with a fake batch range and extra index. The draw - // function is expected to manage any sort of batching logic itself. - let binned_phase_item = BPI::new(key.clone(), entity, 0..1, PhaseItemExtraIndex::None); + for ((batch_set_key, bin_key), non_mesh_entities) in &self.non_mesh_items { + for (main_entity, entity) in non_mesh_entities.entities.iter() { + // Come up with a fake batch range and extra index. The draw + // function is expected to manage any sort of batching logic itself. + let binned_phase_item = BPI::new( + batch_set_key.clone(), + bin_key.clone(), + (*entity, *main_entity), + 0..1, + PhaseItemExtraIndex::None, + ); - let Some(draw_function) = draw_functions.get_mut(binned_phase_item.draw_function()) - else { - continue; - }; + let Some(draw_function) = draw_functions.get_mut(binned_phase_item.draw_function()) + else { + continue; + }; - draw_function.draw(world, render_pass, view, &binned_phase_item)?; + draw_function.draw(world, render_pass, view, &binned_phase_item)?; + } } Ok(()) } pub fn is_empty(&self) -> bool { - self.batchable_mesh_keys.is_empty() - && self.unbatchable_mesh_keys.is_empty() + self.multidrawable_meshes.is_empty() + && self.batchable_meshes.is_empty() + && self.unbatchable_meshes.is_empty() && self.non_mesh_items.is_empty() } - pub fn clear(&mut self) { - self.batchable_mesh_keys.clear(); - self.batchable_mesh_values.clear(); - self.unbatchable_mesh_keys.clear(); - self.unbatchable_mesh_values.clear(); - self.non_mesh_items.clear(); + pub fn prepare_for_new_frame(&mut self) { self.batch_sets.clear(); + + self.valid_cached_entity_bin_keys.clear(); + self.valid_cached_entity_bin_keys + .grow(self.cached_entity_bin_keys.len()); + self.valid_cached_entity_bin_keys + .set_range(self.cached_entity_bin_keys.len().., true); + + self.entities_that_changed_bins.clear(); + + for unbatchable_bin in self.unbatchable_meshes.values_mut() { + unbatchable_bin.buffer_indices.clear(); + } + } + + /// Checks to see whether the entity is in a bin and returns true if it's + /// both in a bin and up to date. + /// + /// If this function returns true, we also add the entry to the + /// `valid_cached_entity_bin_keys` list. + pub fn validate_cached_entity( + &mut self, + visible_entity: MainEntity, + current_change_tick: Tick, + ) -> bool { + if let indexmap::map::Entry::Occupied(entry) = + self.cached_entity_bin_keys.entry(visible_entity) + { + if entry.get().change_tick == current_change_tick { + self.valid_cached_entity_bin_keys.insert(entry.index()); + return true; + } + } + + false + } + + /// Removes all entities not marked as clean from the bins. + /// + /// During `queue_material_meshes`, we process all visible entities and mark + /// each as clean as we come to it. Then, in [`sweep_old_entities`], we call + /// this method, which removes entities that aren't marked as clean from the + /// bins. + pub fn sweep_old_entities(&mut self) { + // Search for entities not marked as valid. We have to do this in + // reverse order because `swap_remove_index` will potentially invalidate + // all indices after the one we remove. + for index in ReverseFixedBitSetZeroesIterator::new(&self.valid_cached_entity_bin_keys) { + let Some((entity, cached_binned_entity)) = + self.cached_entity_bin_keys.swap_remove_index(index) + else { + continue; + }; + + if let Some(ref cached_bin_key) = cached_binned_entity.cached_bin_key { + remove_entity_from_bin( + entity, + cached_bin_key, + &mut self.multidrawable_meshes, + &mut self.batchable_meshes, + &mut self.unbatchable_meshes, + &mut self.non_mesh_items, + ); + } + } + + // If an entity changed bins, we need to remove it from its old bin. + for entity_that_changed_bins in self.entities_that_changed_bins.drain(..) { + let Some(ref old_cached_bin_key) = entity_that_changed_bins + .old_cached_binned_entity + .cached_bin_key + else { + continue; + }; + remove_entity_from_bin( + entity_that_changed_bins.main_entity, + old_cached_bin_key, + &mut self.multidrawable_meshes, + &mut self.batchable_meshes, + &mut self.unbatchable_meshes, + &mut self.non_mesh_items, + ); + } + } +} + +/// Removes an entity from a bin. +/// +/// If this makes the bin empty, this function removes the bin as well. +/// +/// This is a standalone function instead of a method on [`BinnedRenderPhase`] +/// for borrow check reasons. +fn remove_entity_from_bin( + entity: MainEntity, + entity_bin_key: &CachedBinKey, + multidrawable_meshes: &mut IndexMap>, + batchable_meshes: &mut IndexMap<(BPI::BatchSetKey, BPI::BinKey), RenderBin>, + unbatchable_meshes: &mut IndexMap<(BPI::BatchSetKey, BPI::BinKey), UnbatchableBinnedEntities>, + non_mesh_items: &mut IndexMap<(BPI::BatchSetKey, BPI::BinKey), NonMeshEntities>, +) where + BPI: BinnedPhaseItem, +{ + match entity_bin_key.phase_type { + BinnedRenderPhaseType::MultidrawableMesh => { + if let indexmap::map::Entry::Occupied(mut batch_set_entry) = + multidrawable_meshes.entry(entity_bin_key.batch_set_key.clone()) + { + if let indexmap::map::Entry::Occupied(mut bin_entry) = batch_set_entry + .get_mut() + .entry(entity_bin_key.bin_key.clone()) + { + bin_entry.get_mut().remove(entity); + + // If the bin is now empty, remove the bin. + if bin_entry.get_mut().is_empty() { + bin_entry.swap_remove(); + } + } + + // If the batch set is now empty, remove it. This will perturb + // the order, but that's OK because we're going to sort the bin + // afterwards. + if batch_set_entry.get_mut().is_empty() { + batch_set_entry.swap_remove(); + } + } + } + + BinnedRenderPhaseType::BatchableMesh => { + if let indexmap::map::Entry::Occupied(mut bin_entry) = batchable_meshes.entry(( + entity_bin_key.batch_set_key.clone(), + entity_bin_key.bin_key.clone(), + )) { + bin_entry.get_mut().remove(entity); + + // If the bin is now empty, remove the bin. + if bin_entry.get_mut().is_empty() { + bin_entry.swap_remove(); + } + } + } + + BinnedRenderPhaseType::UnbatchableMesh => { + if let indexmap::map::Entry::Occupied(mut bin_entry) = unbatchable_meshes.entry(( + entity_bin_key.batch_set_key.clone(), + entity_bin_key.bin_key.clone(), + )) { + bin_entry.get_mut().entities.remove(&entity); + + // If the bin is now empty, remove the bin. + if bin_entry.get_mut().entities.is_empty() { + bin_entry.swap_remove(); + } + } + } + + BinnedRenderPhaseType::NonMesh => { + if let indexmap::map::Entry::Occupied(mut bin_entry) = non_mesh_items.entry(( + entity_bin_key.batch_set_key.clone(), + entity_bin_key.bin_key.clone(), + )) { + bin_entry.get_mut().entities.remove(&entity); + + // If the bin is now empty, remove the bin. + if bin_entry.get_mut().entities.is_empty() { + bin_entry.swap_remove(); + } + } + } } } @@ -568,11 +1023,10 @@ where { fn new(gpu_preprocessing: GpuPreprocessingMode) -> Self { Self { - batchable_mesh_keys: vec![], - batchable_mesh_values: HashMap::default(), - unbatchable_mesh_keys: vec![], - unbatchable_mesh_values: HashMap::default(), - non_mesh_items: vec![], + multidrawable_meshes: IndexMap::default(), + batchable_meshes: IndexMap::default(), + unbatchable_meshes: IndexMap::default(), + non_mesh_items: IndexMap::default(), batch_sets: match gpu_preprocessing { GpuPreprocessingMode::Culling => { BinnedRenderPhaseBatchSets::MultidrawIndirect(vec![]) @@ -582,6 +1036,10 @@ where } GpuPreprocessingMode::None => BinnedRenderPhaseBatchSets::DynamicUniforms(vec![]), }, + cached_entity_bin_keys: IndexMap::default(), + valid_cached_entity_bin_keys: FixedBitSet::new(), + entities_that_changed_bins: vec![], + gpu_preprocessing_mode: gpu_preprocessing, } } } @@ -614,13 +1072,14 @@ impl UnbatchableBinnedEntityIndexSet { u32::from(*first_indirect_parameters_index) + entity_index; Some(UnbatchableBinnedEntityIndices { instance_index: instance_range.start + entity_index, - extra_index: PhaseItemExtraIndex::IndirectParametersIndex( - first_indirect_parameters_index_for_this_batch + extra_index: PhaseItemExtraIndex::IndirectParametersIndex { + range: first_indirect_parameters_index_for_this_batch ..(first_indirect_parameters_index_for_this_batch + 1), - ), + batch_set_index: None, + }, }) } - UnbatchableBinnedEntityIndexSet::Dense(ref indices) => { + UnbatchableBinnedEntityIndexSet::Dense(indices) => { indices.get(entity_index as usize).cloned() } } @@ -632,18 +1091,26 @@ impl UnbatchableBinnedEntityIndexSet { /// /// This is the version used when the pipeline supports GPU preprocessing: e.g. /// 3D PBR meshes. -pub struct BinnedRenderPhasePlugin(PhantomData<(BPI, GFBD)>) +pub struct BinnedRenderPhasePlugin where BPI: BinnedPhaseItem, - GFBD: GetFullBatchData; + GFBD: GetFullBatchData, +{ + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, + phantom: PhantomData<(BPI, GFBD)>, +} -impl Default for BinnedRenderPhasePlugin +impl BinnedRenderPhasePlugin where BPI: BinnedPhaseItem, GFBD: GetFullBatchData, { - fn default() -> Self { - Self(PhantomData) + pub fn new(debug_flags: RenderDebugFlags) -> Self { + Self { + debug_flags, + phantom: PhantomData, + } } } @@ -659,6 +1126,11 @@ where render_app .init_resource::>() + .init_resource::>() + .insert_resource(PhaseIndirectParametersBuffers::::new( + self.debug_flags + .contains(RenderDebugFlags::ALLOW_COPIES_FROM_INDIRECT_PARAMETERS), + )) .add_systems( Render, ( @@ -674,6 +1146,14 @@ where ), ) .in_set(RenderSet::PrepareResources), + sweep_old_entities::.in_set(RenderSet::QueueSweep), + gpu_preprocessing::collect_buffers_for_phase:: + .run_if( + resource_exists::< + BatchedInstanceBuffers, + >, + ) + .in_set(RenderSet::PrepareResourcesCollectPhaseBuffers), ), ); } @@ -685,7 +1165,7 @@ where /// They're cleared out every frame, but storing them in a resource like this /// allows us to reuse allocations. #[derive(Resource, Deref, DerefMut)] -pub struct ViewSortedRenderPhases(pub EntityHashMap>) +pub struct ViewSortedRenderPhases(pub HashMap>) where SPI: SortedPhaseItem; @@ -702,8 +1182,8 @@ impl ViewSortedRenderPhases where SPI: SortedPhaseItem, { - pub fn insert_or_clear(&mut self, entity: Entity) { - match self.entry(entity) { + pub fn insert_or_clear(&mut self, retained_view_entity: RetainedViewEntity) { + match self.entry(retained_view_entity) { Entry::Occupied(mut entry) => entry.get_mut().clear(), Entry::Vacant(entry) => { entry.insert(default()); @@ -717,18 +1197,26 @@ where /// /// This is the version used when the pipeline supports GPU preprocessing: e.g. /// 3D PBR meshes. -pub struct SortedRenderPhasePlugin(PhantomData<(SPI, GFBD)>) +pub struct SortedRenderPhasePlugin where SPI: SortedPhaseItem, - GFBD: GetFullBatchData; + GFBD: GetFullBatchData, +{ + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, + phantom: PhantomData<(SPI, GFBD)>, +} -impl Default for SortedRenderPhasePlugin +impl SortedRenderPhasePlugin where SPI: SortedPhaseItem, GFBD: GetFullBatchData, { - fn default() -> Self { - Self(PhantomData) + pub fn new(debug_flags: RenderDebugFlags) -> Self { + Self { + debug_flags, + phantom: PhantomData, + } } } @@ -744,18 +1232,33 @@ where render_app .init_resource::>() + .init_resource::>() + .insert_resource(PhaseIndirectParametersBuffers::::new( + self.debug_flags + .contains(RenderDebugFlags::ALLOW_COPIES_FROM_INDIRECT_PARAMETERS), + )) .add_systems( Render, ( - no_gpu_preprocessing::batch_and_prepare_sorted_render_phase:: - .run_if(resource_exists::>), - gpu_preprocessing::batch_and_prepare_sorted_render_phase::.run_if( - resource_exists::< - BatchedInstanceBuffers, - >, - ), - ) - .in_set(RenderSet::PrepareResources), + ( + no_gpu_preprocessing::batch_and_prepare_sorted_render_phase:: + .run_if(resource_exists::>), + gpu_preprocessing::batch_and_prepare_sorted_render_phase:: + .run_if( + resource_exists::< + BatchedInstanceBuffers, + >, + ), + ) + .in_set(RenderSet::PrepareResources), + gpu_preprocessing::collect_buffers_for_phase:: + .run_if( + resource_exists::< + BatchedInstanceBuffers, + >, + ) + .in_set(RenderSet::PrepareResourcesCollectPhaseBuffers), + ), ); } } @@ -779,26 +1282,34 @@ impl UnbatchableBinnedEntityIndexSet { first_indirect_parameters_index: None, } } - PhaseItemExtraIndex::IndirectParametersIndex(ref range) => { + PhaseItemExtraIndex::IndirectParametersIndex { + range: ref indirect_parameters_index, + .. + } => { // This is the first entity we've seen, and we have compute // shaders. Initialize the fast path. *self = UnbatchableBinnedEntityIndexSet::Sparse { instance_range: indices.instance_index..indices.instance_index + 1, - first_indirect_parameters_index: NonMaxU32::new(range.start), + first_indirect_parameters_index: NonMaxU32::new( + indirect_parameters_index.start, + ), } } } } UnbatchableBinnedEntityIndexSet::Sparse { - ref mut instance_range, + instance_range, first_indirect_parameters_index, } if instance_range.end == indices.instance_index && ((first_indirect_parameters_index.is_none() && indices.extra_index == PhaseItemExtraIndex::None) || first_indirect_parameters_index.is_some_and( |first_indirect_parameters_index| match indices.extra_index { - PhaseItemExtraIndex::IndirectParametersIndex(ref this_range) => { + PhaseItemExtraIndex::IndirectParametersIndex { + range: ref this_range, + .. + } => { u32::from(first_indirect_parameters_index) + instance_range.end - instance_range.start == this_range.start @@ -819,6 +1330,10 @@ impl UnbatchableBinnedEntityIndexSet { // but let's go ahead and do the sensible thing anyhow: demote // the compressed `NoDynamicOffsets` field to the full // `DynamicOffsets` array. + warn!( + "Unbatchable binned entity index set was demoted from sparse to dense. \ + This is a bug in the renderer. Please report it.", + ); let new_dynamic_offsets = (0..instance_range.len() as u32) .flat_map(|entity_index| self.indices_for_entity_index(entity_index)) .chain(iter::once(indices)) @@ -826,11 +1341,22 @@ impl UnbatchableBinnedEntityIndexSet { *self = UnbatchableBinnedEntityIndexSet::Dense(new_dynamic_offsets); } - UnbatchableBinnedEntityIndexSet::Dense(ref mut dense_indices) => { + UnbatchableBinnedEntityIndexSet::Dense(dense_indices) => { dense_indices.push(indices); } } } + + /// Clears the unbatchable binned entity index set. + fn clear(&mut self) { + match self { + UnbatchableBinnedEntityIndexSet::Dense(dense_indices) => dense_indices.clear(), + UnbatchableBinnedEntityIndexSet::Sparse { .. } => { + *self = UnbatchableBinnedEntityIndexSet::NoEntities; + } + _ => {} + } + } } /// A collection of all items to be rendered that will be encoded to GPU @@ -948,15 +1474,15 @@ where /// [`SortedPhaseItem`]s. /// /// * Binned phase items have a `BinKey` which specifies what bin they're to be -/// placed in. All items in the same bin are eligible to be batched together. -/// The `BinKey`s are sorted, but the individual bin items aren't. Binned phase -/// items are good for opaque meshes, in which the order of rendering isn't -/// important. Generally, binned phase items are faster than sorted phase items. +/// placed in. All items in the same bin are eligible to be batched together. +/// The `BinKey`s are sorted, but the individual bin items aren't. Binned phase +/// items are good for opaque meshes, in which the order of rendering isn't +/// important. Generally, binned phase items are faster than sorted phase items. /// /// * Sorted phase items, on the other hand, are placed into one large buffer -/// and then sorted all at once. This is needed for transparent meshes, which -/// have to be sorted back-to-front to render with the painter's algorithm. -/// These types of phase items are generally slower than binned phase items. +/// and then sorted all at once. This is needed for transparent meshes, which +/// have to be sorted back-to-front to render with the painter's algorithm. +/// These types of phase items are generally slower than binned phase items. pub trait PhaseItem: Sized + Send + Sync + 'static { /// Whether or not this `PhaseItem` should be subjected to automatic batching. (Default: `true`) const AUTOMATIC_BATCHING: bool = true; @@ -997,12 +1523,12 @@ pub trait PhaseItem: Sized + Send + Sync + 'static { /// instances they already have. These can be: /// /// * The *dynamic offset*: a `wgpu` dynamic offset into the uniform buffer of -/// instance data. This is used on platforms that don't support storage -/// buffers, to work around uniform buffer size limitations. +/// instance data. This is used on platforms that don't support storage +/// buffers, to work around uniform buffer size limitations. /// /// * The *indirect parameters index*: an index into the buffer that specifies -/// the indirect parameters for this [`PhaseItem`]'s drawcall. This is used when -/// indirect mode is on (as used for GPU culling). +/// the indirect parameters for this [`PhaseItem`]'s drawcall. This is used when +/// indirect mode is on (as used for GPU culling). /// /// Note that our indirect draw functionality requires storage buffers, so it's /// impossible to have both a dynamic offset and an indirect parameters index. @@ -1018,7 +1544,22 @@ pub enum PhaseItemExtraIndex { /// An index into the buffer that specifies the indirect parameters for this /// [`PhaseItem`]'s drawcall. This is used when indirect mode is on (as used /// for GPU culling). - IndirectParametersIndex(Range), + IndirectParametersIndex { + /// The range of indirect parameters within the indirect parameters array. + /// + /// If we're using `multi_draw_indirect_count`, this specifies the + /// maximum range of indirect parameters within that array. If batches + /// are ultimately culled out on the GPU, the actual number of draw + /// commands might be lower than the length of this range. + range: Range, + /// If `multi_draw_indirect_count` is in use, and this phase item is + /// part of a batch set, specifies the index of the batch set that this + /// phase item is a part of. + /// + /// If `multi_draw_indirect_count` isn't in use, or this phase item + /// isn't part of a batch set, this is `None`. + batch_set_index: Option, + }, } impl PhaseItemExtraIndex { @@ -1028,9 +1569,11 @@ impl PhaseItemExtraIndex { indirect_parameters_index: Option, ) -> PhaseItemExtraIndex { match indirect_parameters_index { - Some(indirect_parameters_index) => PhaseItemExtraIndex::IndirectParametersIndex( - u32::from(indirect_parameters_index)..(u32::from(indirect_parameters_index) + 1), - ), + Some(indirect_parameters_index) => PhaseItemExtraIndex::IndirectParametersIndex { + range: u32::from(indirect_parameters_index) + ..(u32::from(indirect_parameters_index) + 1), + batch_set_index: None, + }, None => PhaseItemExtraIndex::None, } } @@ -1059,7 +1602,13 @@ pub trait BinnedPhaseItem: PhaseItem { /// lowest variable bind group id such as the material bind group id, and /// its dynamic offsets if any, next bind group and offsets, etc. This /// reduces the need for rebinding between bins and improves performance. - type BinKey: PhaseItemBinKey; + type BinKey: Clone + Send + Sync + PartialEq + Eq + Ord + Hash; + + /// The key used to combine batches into batch sets. + /// + /// A *batch set* is a set of meshes that can potentially be multi-drawn + /// together. + type BatchSetKey: PhaseItemBatchSetKey; /// Creates a new binned phase item from the key and per-entity data. /// @@ -1067,31 +1616,25 @@ pub trait BinnedPhaseItem: PhaseItem { /// before rendering. The resulting phase item isn't stored in any data /// structures, resulting in significant memory savings. fn new( - key: Self::BinKey, + batch_set_key: Self::BatchSetKey, + bin_key: Self::BinKey, representative_entity: (Entity, MainEntity), batch_range: Range, extra_index: PhaseItemExtraIndex, ) -> Self; } -/// A trait that allows fetching the *batch set key* from a bin key. +/// A key used to combine batches into batch sets. /// -/// A *batch set* is a set of mesh batches that will be rendered with multi-draw -/// if multi-draw is in use. The *batch set key* is the data that has to be -/// identical between meshes in order to place them in the same batch set. A -/// batch set can therefore span multiple bins. -/// -/// The batch set key should be at the beginning of the bin key structure so -/// that batches in the same batch set will be adjacent to one another in the -/// sorted list of bins. -pub trait PhaseItemBinKey: Clone + Send + Sync + PartialEq + Eq + Ord + Hash { - type BatchSetKey: Clone + PartialEq; - - /// Returns the batch set key, if applicable. +/// A *batch set* is a set of meshes that can potentially be multi-drawn +/// together. +pub trait PhaseItemBatchSetKey: Clone + Send + Sync + PartialEq + Eq + Ord + Hash { + /// Returns true if this batch set key describes indexed meshes or false if + /// it describes non-indexed meshes. /// - /// If this returns `None`, no batches in this phase item can be grouped - /// together into batch sets. - fn get_batch_set_key(&self) -> Option; + /// Bevy uses this in order to determine which kind of indirect draw + /// parameters to use, if indirect drawing is enabled. + fn indexed(&self) -> bool; } /// Represents phase items that must be sorted. The `SortKey` specifies the @@ -1112,7 +1655,7 @@ pub trait SortedPhaseItem: PhaseItem { /// Sorts a slice of phase items into render order. Generally if the same type /// is batched this should use a stable sort like [`slice::sort_by_key`]. /// In almost all other cases, this should not be altered from the default, - /// which uses a unstable sort, as this provides the best balance of CPU and GPU + /// which uses an unstable sort, as this provides the best balance of CPU and GPU /// performance. /// /// Implementers can optionally not sort the list at all. This is generally advisable if and @@ -1125,6 +1668,17 @@ pub trait SortedPhaseItem: PhaseItem { fn sort(items: &mut [Self]) { items.sort_unstable_by_key(Self::sort_key); } + + /// Whether this phase item targets indexed meshes (those with both vertex + /// and index buffers as opposed to just vertex buffers). + /// + /// Bevy needs this information in order to properly group phase items + /// together for multi-draw indirect, because the GPU layout of indirect + /// commands differs between indexed and non-indexed meshes. + /// + /// If you're implementing a custom phase item that doesn't describe a mesh, + /// you can safely return false here. + fn indexed(&self) -> bool; } /// A [`PhaseItem`] item, that automatically sets the appropriate render pipeline, @@ -1175,14 +1729,152 @@ where } } +/// Removes entities that became invisible or changed phases from the bins. +/// +/// This must run after queuing. +pub fn sweep_old_entities(mut render_phases: ResMut>) +where + BPI: BinnedPhaseItem, +{ + for phase in render_phases.0.values_mut() { + phase.sweep_old_entities(); + } +} + impl BinnedRenderPhaseType { - /// Creates the appropriate [`BinnedRenderPhaseType`] for a mesh, given its - /// batchability. - pub fn mesh(batchable: bool) -> BinnedRenderPhaseType { - if batchable { - BinnedRenderPhaseType::BatchableMesh - } else { - BinnedRenderPhaseType::UnbatchableMesh + pub fn mesh( + batchable: bool, + gpu_preprocessing_support: &GpuPreprocessingSupport, + ) -> BinnedRenderPhaseType { + match (batchable, gpu_preprocessing_support.max_supported_mode) { + (true, GpuPreprocessingMode::Culling) => BinnedRenderPhaseType::MultidrawableMesh, + (true, _) => BinnedRenderPhaseType::BatchableMesh, + (false, _) => BinnedRenderPhaseType::UnbatchableMesh, + } + } +} + +impl RenderBin { + /// Creates a [`RenderBin`] containing a single entity. + fn from_entity(entity: MainEntity, uniform_index: InputUniformIndex) -> RenderBin { + let mut entities = IndexMap::default(); + entities.insert(entity, uniform_index); + RenderBin { entities } + } + + /// Inserts an entity into the bin. + fn insert(&mut self, entity: MainEntity, uniform_index: InputUniformIndex) { + self.entities.insert(entity, uniform_index); + } + + /// Removes an entity from the bin. + fn remove(&mut self, entity_to_remove: MainEntity) { + self.entities.swap_remove(&entity_to_remove); + } + + /// Returns true if the bin contains no entities. + fn is_empty(&self) -> bool { + self.entities.is_empty() + } + + /// Returns the [`IndexMap`] containing all the entities in the bin, along + /// with the cached [`InputUniformIndex`] of each. + #[inline] + pub fn entities(&self) -> &IndexMap { + &self.entities + } +} + +/// An iterator that efficiently finds the indices of all zero bits in a +/// [`FixedBitSet`] and returns them in reverse order. +/// +/// [`FixedBitSet`] doesn't natively offer this functionality, so we have to +/// implement it ourselves. +#[derive(Debug)] +struct ReverseFixedBitSetZeroesIterator<'a> { + /// The bit set. + bitset: &'a FixedBitSet, + /// The next bit index we're going to scan when [`Iterator::next`] is + /// called. + bit_index: isize, +} + +impl<'a> ReverseFixedBitSetZeroesIterator<'a> { + fn new(bitset: &'a FixedBitSet) -> ReverseFixedBitSetZeroesIterator<'a> { + ReverseFixedBitSetZeroesIterator { + bitset, + bit_index: (bitset.len() as isize) - 1, + } + } +} + +impl<'a> Iterator for ReverseFixedBitSetZeroesIterator<'a> { + type Item = usize; + + fn next(&mut self) -> Option { + while self.bit_index >= 0 { + // Unpack the bit index into block and bit. + let block_index = self.bit_index / (Block::BITS as isize); + let bit_pos = self.bit_index % (Block::BITS as isize); + + // Grab the block. Mask off all bits above the one we're scanning + // from by setting them all to 1. + let mut block = self.bitset.as_slice()[block_index as usize]; + if bit_pos + 1 < (Block::BITS as isize) { + block |= (!0) << (bit_pos + 1); + } + + // Search for the next unset bit. Note that the `leading_ones` + // function counts from the MSB to the LSB, so we need to flip it to + // get the bit number. + let pos = (Block::BITS as isize) - (block.leading_ones() as isize) - 1; + + // If we found an unset bit, return it. + if pos != -1 { + let result = block_index * (Block::BITS as isize) + pos; + self.bit_index = result - 1; + return Some(result as usize); + } + + // Otherwise, go to the previous block. + self.bit_index = block_index * (Block::BITS as isize) - 1; + } + + None + } +} + +#[cfg(test)] +mod test { + use super::ReverseFixedBitSetZeroesIterator; + use fixedbitset::FixedBitSet; + use proptest::{collection::vec, prop_assert_eq, proptest}; + + proptest! { + #[test] + fn reverse_fixed_bit_set_zeroes_iterator( + bits in vec(0usize..1024usize, 0usize..1024usize), + size in 0usize..1024usize, + ) { + // Build a random bit set. + let mut bitset = FixedBitSet::new(); + bitset.grow(size); + for bit in bits { + if bit < size { + bitset.set(bit, true); + } + } + + // Iterate over the bit set backwards in a naive way, and check that + // that iteration sequence corresponds to the optimized one. + let mut iter = ReverseFixedBitSetZeroesIterator::new(&bitset); + for bit_index in (0..size).rev() { + if !bitset.contains(bit_index) { + prop_assert_eq!(iter.next(), Some(bit_index)); + } + } + + prop_assert_eq!(iter.next(), None); } } } diff --git a/crates/bevy_render/src/render_resource/bind_group.rs b/crates/bevy_render/src/render_resource/bind_group.rs index 8d0ed47f8395f..2c8e984bfdcaf 100644 --- a/crates/bevy_render/src/render_resource/bind_group.rs +++ b/crates/bevy_render/src/render_resource/bind_group.rs @@ -6,14 +6,17 @@ use crate::{ renderer::RenderDevice, texture::GpuImage, }; -use alloc::sync::Arc; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::system::{SystemParam, SystemParamItem}; pub use bevy_render_macros::AsBindGroup; use core::ops::Deref; use encase::ShaderType; use thiserror::Error; -use wgpu::{BindGroupEntry, BindGroupLayoutEntry, BindingResource, TextureViewDimension}; +use wgpu::{ + BindGroupEntry, BindGroupLayoutEntry, BindingResource, SamplerBindingType, TextureViewDimension, +}; + +use super::{BindlessDescriptor, BindlessSlabResourceLimit}; define_atomic_id!(BindGroupId); @@ -21,27 +24,44 @@ define_atomic_id!(BindGroupId); /// to a [`TrackedRenderPass`](crate::render_phase::TrackedRenderPass). /// This makes them accessible in the pipeline (shaders) as uniforms. /// -/// May be converted from and dereferences to a wgpu [`BindGroup`](wgpu::BindGroup). +/// This is a lightweight thread-safe wrapper around wgpu's own [`BindGroup`](wgpu::BindGroup), +/// which can be cloned as needed to workaround lifetime management issues. It may be converted +/// from and dereferences to wgpu's [`BindGroup`](wgpu::BindGroup). +/// /// Can be created via [`RenderDevice::create_bind_group`](RenderDevice::create_bind_group). #[derive(Clone, Debug)] pub struct BindGroup { id: BindGroupId, - value: Arc>, + value: WgpuWrapper, } impl BindGroup { - /// Returns the [`BindGroupId`]. + /// Returns the [`BindGroupId`] representing the unique ID of the bind group. #[inline] pub fn id(&self) -> BindGroupId { self.id } } +impl PartialEq for BindGroup { + fn eq(&self, other: &Self) -> bool { + self.id == other.id + } +} + +impl Eq for BindGroup {} + +impl core::hash::Hash for BindGroup { + fn hash(&self, state: &mut H) { + self.id.0.hash(state); + } +} + impl From for BindGroup { fn from(value: wgpu::BindGroup) -> Self { BindGroup { id: BindGroupId::new(), - value: Arc::new(WgpuWrapper::new(value)), + value: WgpuWrapper::new(value), } } } @@ -125,17 +145,19 @@ impl Deref for BindGroup { /// /// The following field-level attributes are supported: /// -/// * `uniform(BINDING_INDEX)` -/// * The field will be converted to a shader-compatible type using the [`ShaderType`] trait, written to a [`Buffer`], and bound as a uniform. -/// [`ShaderType`] is implemented for most math types already, such as [`f32`], [`Vec4`](bevy_math::Vec4), and -/// [`LinearRgba`](bevy_color::LinearRgba). It can also be derived for custom structs. +/// ## `uniform(BINDING_INDEX)` +/// +/// * The field will be converted to a shader-compatible type using the [`ShaderType`] trait, written to a [`Buffer`], and bound as a uniform. +/// [`ShaderType`] is implemented for most math types already, such as [`f32`], [`Vec4`](bevy_math::Vec4), and +/// [`LinearRgba`](bevy_color::LinearRgba). It can also be derived for custom structs. +/// +/// ## `texture(BINDING_INDEX, arguments)` /// -/// * `texture(BINDING_INDEX, arguments)` -/// * This field's [`Handle`](bevy_asset::Handle) will be used to look up the matching [`Texture`](crate::render_resource::Texture) -/// GPU resource, which will be bound as a texture in shaders. The field will be assumed to implement [`Into>>`]. In practice, -/// most fields should be a [`Handle`](bevy_asset::Handle) or [`Option>`]. If the value of an [`Option>`] is -/// [`None`], the [`crate::texture::FallbackImage`] resource will be used instead. This attribute can be used in conjunction with a `sampler` binding attribute -/// (with a different binding index) if a binding of the sampler for the [`Image`](bevy_image::Image) is also required. +/// * This field's [`Handle`](bevy_asset::Handle) will be used to look up the matching [`Texture`](crate::render_resource::Texture) +/// GPU resource, which will be bound as a texture in shaders. The field will be assumed to implement [`Into>>`]. In practice, +/// most fields should be a [`Handle`](bevy_asset::Handle) or [`Option>`]. If the value of an [`Option>`] is +/// [`None`], the [`crate::texture::FallbackImage`] resource will be used instead. This attribute can be used in conjunction with a `sampler` binding attribute +/// (with a different binding index) if a binding of the sampler for the [`Image`](bevy_image::Image) is also required. /// /// | Arguments | Values | Default | /// |-----------------------|-------------------------------------------------------------------------|----------------------| @@ -145,11 +167,12 @@ impl Deref for BindGroup { /// | `multisampled` = ... | `true`, `false` | `false` | /// | `visibility(...)` | `all`, `none`, or a list-combination of `vertex`, `fragment`, `compute` | `vertex`, `fragment` | /// -/// * `storage_texture(BINDING_INDEX, arguments)` -/// * This field's [`Handle`](bevy_asset::Handle) will be used to look up the matching [`Texture`](crate::render_resource::Texture) -/// GPU resource, which will be bound as a storage texture in shaders. The field will be assumed to implement [`Into>>`]. In practice, -/// most fields should be a [`Handle`](bevy_asset::Handle) or [`Option>`]. If the value of an [`Option>`] is -/// [`None`], the [`crate::texture::FallbackImage`] resource will be used instead. +/// ## `storage_texture(BINDING_INDEX, arguments)` +/// +/// * This field's [`Handle`](bevy_asset::Handle) will be used to look up the matching [`Texture`](crate::render_resource::Texture) +/// GPU resource, which will be bound as a storage texture in shaders. The field will be assumed to implement [`Into>>`]. In practice, +/// most fields should be a [`Handle`](bevy_asset::Handle) or [`Option>`]. If the value of an [`Option>`] is +/// [`None`], the [`crate::texture::FallbackImage`] resource will be used instead. /// /// | Arguments | Values | Default | /// |------------------------|--------------------------------------------------------------------------------------------|---------------| @@ -158,28 +181,34 @@ impl Deref for BindGroup { /// | `access` = ... | any member of [`StorageTextureAccess`](crate::render_resource::StorageTextureAccess) | `ReadWrite` | /// | `visibility(...)` | `all`, `none`, or a list-combination of `vertex`, `fragment`, `compute` | `compute` | /// -/// * `sampler(BINDING_INDEX, arguments)` -/// * This field's [`Handle`](bevy_asset::Handle) will be used to look up the matching [`Sampler`] GPU -/// resource, which will be bound as a sampler in shaders. The field will be assumed to implement [`Into>>`]. In practice, -/// most fields should be a [`Handle`](bevy_asset::Handle) or [`Option>`]. If the value of an [`Option>`] is -/// [`None`], the [`crate::texture::FallbackImage`] resource will be used instead. This attribute can be used in conjunction with a `texture` binding attribute -/// (with a different binding index) if a binding of the texture for the [`Image`](bevy_image::Image) is also required. +/// ## `sampler(BINDING_INDEX, arguments)` +/// +/// * This field's [`Handle`](bevy_asset::Handle) will be used to look up the matching [`Sampler`] GPU +/// resource, which will be bound as a sampler in shaders. The field will be assumed to implement [`Into>>`]. In practice, +/// most fields should be a [`Handle`](bevy_asset::Handle) or [`Option>`]. If the value of an [`Option>`] is +/// [`None`], the [`crate::texture::FallbackImage`] resource will be used instead. This attribute can be used in conjunction with a `texture` binding attribute +/// (with a different binding index) if a binding of the texture for the [`Image`](bevy_image::Image) is also required. /// /// | Arguments | Values | Default | /// |------------------------|-------------------------------------------------------------------------|------------------------| /// | `sampler_type` = "..." | `"filtering"`, `"non_filtering"`, `"comparison"`. | `"filtering"` | /// | `visibility(...)` | `all`, `none`, or a list-combination of `vertex`, `fragment`, `compute` | `vertex`, `fragment` | -/// * `storage(BINDING_INDEX, arguments)` -/// * The field's [`Handle`](bevy_asset::Handle) will be used to look up the matching [`Buffer`] GPU resource, which -/// will be bound as a storage buffer in shaders. If the `storage` attribute is used, the field is expected a raw -/// buffer, and the buffer will be bound as a storage buffer in shaders. -/// * It supports an optional `read_only` parameter. Defaults to false if not present. -/// -/// | Arguments | Values | Default | -/// |------------------------|-------------------------------------------------------------------------|----------------------| -/// | `visibility(...)` | `all`, `none`, or a list-combination of `vertex`, `fragment`, `compute` | `vertex`, `fragment` | -/// | `read_only` | if present then value is true, otherwise false | `false` | -/// | `buffer` | if present then the field will be assumed to be a raw wgpu buffer | | +/// +/// ## `storage(BINDING_INDEX, arguments)` +/// +/// * The field's [`Handle`](bevy_asset::Handle) will be used to look +/// up the matching [`Buffer`] GPU resource, which will be bound as a storage +/// buffer in shaders. If the `storage` attribute is used, the field is expected +/// a raw buffer, and the buffer will be bound as a storage buffer in shaders. +/// In bindless mode, `binding_array()` argument that specifies the binding +/// number of the resulting storage buffer binding array must be present. +/// +/// | Arguments | Values | Default | +/// |------------------------|-------------------------------------------------------------------------|------------------------| +/// | `visibility(...)` | `all`, `none`, or a list-combination of `vertex`, `fragment`, `compute` | `vertex`, `fragment` | +/// | `read_only` | if present then value is true, otherwise false | `false` | +/// | `buffer` | if present then the field will be assumed to be a raw wgpu buffer | | +/// | `binding_array(...)` | the binding number of the binding array, for bindless mode | bindless mode disabled | /// /// Note that fields without field-level binding attributes will be ignored. /// ``` @@ -236,41 +265,181 @@ impl Deref for BindGroup { /// ``` /// /// Some less common scenarios will require "struct-level" attributes. These are the currently supported struct-level attributes: -/// * `uniform(BINDING_INDEX, ConvertedShaderType)` -/// * This also creates a [`Buffer`] using [`ShaderType`] and binds it as a uniform, much -/// like the field-level `uniform` attribute. The difference is that the entire [`AsBindGroup`] value is converted to `ConvertedShaderType`, -/// which must implement [`ShaderType`], instead of a specific field implementing [`ShaderType`]. This is useful if more complicated conversion -/// logic is required. The conversion is done using the [`AsBindGroupShaderType`] trait, which is automatically implemented -/// if `&Self` implements [`Into`]. Only use [`AsBindGroupShaderType`] if access to resources like [`RenderAssets`] is -/// required. -/// * `bind_group_data(DataType)` -/// * The [`AsBindGroup`] type will be converted to some `DataType` using [`Into`] and stored -/// as [`AsBindGroup::Data`] as part of the [`AsBindGroup::as_bind_group`] call. This is useful if data needs to be stored alongside -/// the generated bind group, such as a unique identifier for a material's bind group. The most common use case for this attribute -/// is "shader pipeline specialization". See [`SpecializedRenderPipeline`](crate::render_resource::SpecializedRenderPipeline). -/// * `bindless(COUNT)` -/// * This switch enables *bindless resources*, which changes the way Bevy -/// supplies resources (uniforms, textures, and samplers) to the shader. -/// When bindless resources are enabled, and the current platform supports -/// them, instead of presenting a single instance of a resource to your -/// shader Bevy will instead present a *binding array* of `COUNT` elements. -/// In your shader, the index of the element of each binding array -/// corresponding to the mesh currently being drawn can be retrieved with -/// `mesh[in.instance_index].material_and_lightmap_bind_group_slot & -/// 0xffffu`. -/// * Bindless uniforms don't exist, so in bindless mode all uniforms and -/// uniform buffers are automatically replaced with read-only storage -/// buffers. -/// * The purpose of bindless mode is to improve performance by reducing -/// state changes. By grouping resources together into binding arrays, Bevy -/// doesn't have to modify GPU state as often, decreasing API and driver -/// overhead. -/// * If bindless mode is enabled, the `BINDLESS` definition will be -/// available. Because not all platforms support bindless resources, you -/// should check for the presence of this definition via `#ifdef` and fall -/// back to standard bindings if it isn't present. -/// * See the `shaders/shader_material_bindless` example for an example of -/// how to use bindless mode. +/// ## `uniform(BINDING_INDEX, ConvertedShaderType)` +/// +/// * This also creates a [`Buffer`] using [`ShaderType`] and binds it as a +/// uniform, much like the field-level `uniform` attribute. The difference is +/// that the entire [`AsBindGroup`] value is converted to `ConvertedShaderType`, +/// which must implement [`ShaderType`], instead of a specific field +/// implementing [`ShaderType`]. This is useful if more complicated conversion +/// logic is required, or when using bindless mode (see below). The conversion +/// is done using the [`AsBindGroupShaderType`] trait, +/// which is automatically implemented if `&Self` implements +/// [`Into`]. Outside of bindless mode, only use +/// [`AsBindGroupShaderType`] if access to resources like +/// [`RenderAssets`] is required. +/// +/// * In bindless mode (see `bindless(COUNT)`), this attribute becomes +/// `uniform(BINDLESS_INDEX, ConvertedShaderType, +/// binding_array(BINDING_INDEX))`. The resulting uniform buffers will be +/// available in the shader as a binding array at the given `BINDING_INDEX`. The +/// `BINDLESS_INDEX` specifies the offset of the buffer in the bindless index +/// table. +/// +/// For example, suppose that the material slot is stored in a variable named +/// `slot`, the bindless index table is named `material_indices`, and that the +/// first field (index 0) of the bindless index table type is named +/// `material`. Then specifying `#[uniform(0, StandardMaterialUniform, +/// binding_array(10)]` will create a binding array buffer declared in the +/// shader as `var material_array: +/// binding_array` and accessible as +/// `material_array[material_indices[slot].material]`. +/// +/// ## `data(BINDING_INDEX, ConvertedShaderType, binding_array(BINDING_INDEX))` +/// +/// * This is very similar to `uniform(BINDING_INDEX, ConvertedShaderType, +/// binding_array(BINDING_INDEX)` and in fact is identical if bindless mode +/// isn't being used. The difference is that, in bindless mode, the `data` +/// attribute produces a single buffer containing an array, not an array of +/// buffers. For example, suppose you had the following declaration: +/// +/// ```ignore +/// #[uniform(0, StandardMaterialUniform, binding_array(10))] +/// struct StandardMaterial { ... } +/// ``` +/// +/// In bindless mode, this will produce a binding matching the following WGSL +/// declaration: +/// +/// ```wgsl +/// @group(2) @binding(10) var material_array: binding_array; +/// ``` +/// +/// On the other hand, if you write this declaration: +/// +/// ```ignore +/// #[data(0, StandardMaterialUniform, binding_array(10))] +/// struct StandardMaterial { ... } +/// ``` +/// +/// Then Bevy produces a binding that matches this WGSL declaration instead: +/// +/// ```wgsl +/// @group(2) @binding(10) var material_array: array; +/// ``` +/// +/// * Just as with the structure-level `uniform` attribute, Bevy converts the +/// entire [`AsBindGroup`] to `ConvertedShaderType`, using the +/// [`AsBindGroupShaderType`] trait. +/// +/// * In non-bindless mode, the structure-level `data` attribute is the same as +/// the structure-level `uniform` attribute and produces a single uniform buffer +/// in the shader. The above example would result in a binding that looks like +/// this in WGSL in non-bindless mode: +/// +/// ```wgsl +/// @group(2) @binding(0) var material: StandardMaterial; +/// ``` +/// +/// * For efficiency reasons, `data` is generally preferred over `uniform` +/// unless you need to place your data in individual buffers. +/// +/// ## `bind_group_data(DataType)` +/// +/// * The [`AsBindGroup`] type will be converted to some `DataType` using [`Into`] and stored +/// as [`AsBindGroup::Data`] as part of the [`AsBindGroup::as_bind_group`] call. This is useful if data needs to be stored alongside +/// the generated bind group, such as a unique identifier for a material's bind group. The most common use case for this attribute +/// is "shader pipeline specialization". See [`SpecializedRenderPipeline`](crate::render_resource::SpecializedRenderPipeline). +/// +/// ## `bindless` +/// +/// * This switch enables *bindless resources*, which changes the way Bevy +/// supplies resources (textures, and samplers) to the shader. When bindless +/// resources are enabled, and the current platform supports them, Bevy will +/// allocate textures, and samplers into *binding arrays*, separated based on +/// type and will supply your shader with indices into those arrays. +/// * Bindless textures and samplers are placed into the appropriate global +/// array defined in `bevy_render::bindless` (`bindless.wgsl`). +/// * Bevy doesn't currently support bindless buffers, except for those created +/// with the `uniform(BINDLESS_INDEX, ConvertedShaderType, +/// binding_array(BINDING_INDEX))` attribute. If you need to include a buffer in +/// your object, and you can't create the data in that buffer with the `uniform` +/// attribute, consider a non-bindless object instead. +/// * If bindless mode is enabled, the `BINDLESS` definition will be +/// available. Because not all platforms support bindless resources, you +/// should check for the presence of this definition via `#ifdef` and fall +/// back to standard bindings if it isn't present. +/// * By default, in bindless mode, binding 0 becomes the *bindless index +/// table*, which is an array of structures, each of which contains as many +/// fields of type `u32` as the highest binding number in the structure +/// annotated with `#[derive(AsBindGroup)]`. Again by default, the *i*th field +/// of the bindless index table contains the index of the resource with binding +/// *i* within the appropriate binding array. +/// * In the case of materials, the index of the applicable table within the +/// bindless index table list corresponding to the mesh currently being drawn +/// can be retrieved with +/// `mesh[in.instance_index].material_and_lightmap_bind_group_slot & 0xffffu`. +/// * You can limit the size of the bindless slabs to N resources with the +/// `limit(N)` declaration. For example, `#[bindless(limit(16))]` ensures that +/// each slab will have no more than 16 total resources in it. If you don't +/// specify a limit, Bevy automatically picks a reasonable one for the current +/// platform. +/// * The `index_table(range(M..N), binding(B))` declaration allows you to +/// customize the layout of the bindless index table. This is useful for +/// materials that are composed of multiple bind groups, such as +/// `ExtendedMaterial`. In such cases, there will be multiple bindless index +/// tables, so they can't both be assigned to binding 0 or their bindings will +/// conflict. +/// - The `binding(B)` attribute of the `index_table` attribute allows you to +/// customize the binding (`@binding(B)`, in the shader) at which the index +/// table will be bound. +/// - The `range(M, N)` attribute of the `index_table` attribute allows you to +/// change the mapping from the field index in the bindless index table to the +/// bindless index. Instead of the field at index $i$ being mapped to the +/// bindless index $i$, with the `range(M, N)` attribute the field at index +/// $i$ in the bindless index table is mapped to the bindless index $i$ + M. +/// The size of the index table will be set to N - M. Note that this may +/// result in the table being too small to contain all the bindless bindings. +/// * The purpose of bindless mode is to improve performance by reducing +/// state changes. By grouping resources together into binding arrays, Bevy +/// doesn't have to modify GPU state as often, decreasing API and driver +/// overhead. +/// * See the `shaders/shader_material_bindless` example for an example of how +/// to use bindless mode. See the `shaders/extended_material_bindless` example +/// for a more exotic example of bindless mode that demonstrates the +/// `index_table` attribute. +/// * The following diagram illustrates how bindless mode works using a subset +/// of `StandardMaterial`: +/// +/// ```text +/// Shader Bindings Sampler Binding Array +/// +----+-----------------------------+ +-----------+-----------+-----+ +/// +---| 0 | material_indices | +->| sampler 0 | sampler 1 | ... | +/// | +----+-----------------------------+ | +-----------+-----------+-----+ +/// | | 1 | bindless_samplers_filtering +--+ ^ +/// | +----+-----------------------------+ +-------------------------------+ +/// | | .. | ... | | +/// | +----+-----------------------------+ Texture Binding Array | +/// | | 5 | bindless_textures_2d +--+ +-----------+-----------+-----+ | +/// | +----+-----------------------------+ +->| texture 0 | texture 1 | ... | | +/// | | .. | ... | +-----------+-----------+-----+ | +/// | +----+-----------------------------+ ^ | +/// | + 10 | material_array +--+ +---------------------------+ | +/// | +----+-----------------------------+ | | | +/// | | Buffer Binding Array | | +/// | | +----------+----------+-----+ | | +/// | +->| buffer 0 | buffer 1 | ... | | | +/// | Material Bindless Indices +----------+----------+-----+ | | +/// | +----+-----------------------------+ ^ | | +/// +-->| 0 | material +----------+ | | +/// +----+-----------------------------+ | | +/// | 1 | base_color_texture +---------------------------------------+ | +/// +----+-----------------------------+ | +/// | 2 | base_color_sampler +-------------------------------------------+ +/// +----+-----------------------------+ +/// | .. | ... | +/// +----+-----------------------------+ +/// ``` /// /// The previous `CoolMaterial` example illustrating "combining multiple field-level uniform attributes with the same binding index" can /// also be equivalently represented with a single struct-level uniform attribute: @@ -338,10 +507,19 @@ pub trait AsBindGroup { /// Note that the *actual* slot count may be different from this value, due /// to platform limitations. For example, if bindless resources aren't /// supported on this platform, the actual slot count will be 1. - fn bindless_slot_count() -> Option { + fn bindless_slot_count() -> Option { None } + /// True if the hardware *actually* supports bindless textures for this + /// type, taking the device and driver capabilities into account. + /// + /// If this type doesn't use bindless textures, then the return value from + /// this function is meaningless. + fn bindless_supported(_: &RenderDevice) -> bool { + true + } + /// label fn label() -> Option<&'static str> { None @@ -405,7 +583,7 @@ pub trait AsBindGroup { ) } - /// Returns a vec of bind group layout entries + /// Returns a vec of bind group layout entries. /// /// Set `force_no_bindless` to true to require that bindless textures *not* /// be used. `ExtendedMaterial` uses this in order to ensure that the base @@ -416,6 +594,10 @@ pub trait AsBindGroup { ) -> Vec where Self: Sized; + + fn bindless_descriptor() -> Option { + None + } } /// An error that occurs during [`AsBindGroup::as_bind_group`] calls. @@ -455,15 +637,30 @@ pub struct BindingResources(pub Vec<(u32, OwnedBindingResource)>); pub enum OwnedBindingResource { Buffer(Buffer), TextureView(TextureViewDimension, TextureView), - Sampler(Sampler), + Sampler(SamplerBindingType, Sampler), + Data(OwnedData), } +/// Data that will be copied into a GPU buffer. +/// +/// This corresponds to the `#[data]` attribute in `AsBindGroup`. +#[derive(Debug, Deref, DerefMut)] +pub struct OwnedData(pub Vec); + impl OwnedBindingResource { + /// Creates a [`BindingResource`] reference to this + /// [`OwnedBindingResource`]. + /// + /// Note that this operation panics if passed a + /// [`OwnedBindingResource::Data`], because [`OwnedData`] doesn't itself + /// correspond to any binding and instead requires the + /// `MaterialBindGroupAllocator` to pack it into a buffer. pub fn get_binding(&self) -> BindingResource { match self { OwnedBindingResource::Buffer(buffer) => buffer.as_entire_binding(), OwnedBindingResource::TextureView(_, view) => BindingResource::TextureView(view), - OwnedBindingResource::Sampler(sampler) => BindingResource::Sampler(sampler), + OwnedBindingResource::Sampler(_, sampler) => BindingResource::Sampler(sampler), + OwnedBindingResource::Data(_) => panic!("`OwnedData` has no binding resource"), } } } @@ -493,7 +690,6 @@ where #[cfg(test)] mod test { use super::*; - use crate as bevy_render; use bevy_asset::Handle; use bevy_image::Image; diff --git a/crates/bevy_render/src/render_resource/bind_group_layout.rs b/crates/bevy_render/src/render_resource/bind_group_layout.rs index c7f01cd8b381e..e19f5b969fc03 100644 --- a/crates/bevy_render/src/render_resource/bind_group_layout.rs +++ b/crates/bevy_render/src/render_resource/bind_group_layout.rs @@ -1,14 +1,21 @@ use crate::define_atomic_id; use crate::renderer::WgpuWrapper; -use alloc::sync::Arc; use core::ops::Deref; define_atomic_id!(BindGroupLayoutId); +/// Bind group layouts define the interface of resources (e.g. buffers, textures, samplers) +/// for a shader. The actual resource binding is done via a [`BindGroup`](super::BindGroup). +/// +/// This is a lightweight thread-safe wrapper around wgpu's own [`BindGroupLayout`](wgpu::BindGroupLayout), +/// which can be cloned as needed to workaround lifetime management issues. It may be converted +/// from and dereferences to wgpu's [`BindGroupLayout`](wgpu::BindGroupLayout). +/// +/// Can be created via [`RenderDevice::create_bind_group_layout`](crate::RenderDevice::create_bind_group_layout). #[derive(Clone, Debug)] pub struct BindGroupLayout { id: BindGroupLayoutId, - value: Arc>, + value: WgpuWrapper, } impl PartialEq for BindGroupLayout { @@ -17,7 +24,16 @@ impl PartialEq for BindGroupLayout { } } +impl Eq for BindGroupLayout {} + +impl core::hash::Hash for BindGroupLayout { + fn hash(&self, state: &mut H) { + self.id.0.hash(state); + } +} + impl BindGroupLayout { + /// Returns the [`BindGroupLayoutId`] representing the unique ID of the bind group layout. #[inline] pub fn id(&self) -> BindGroupLayoutId { self.id @@ -33,7 +49,7 @@ impl From for BindGroupLayout { fn from(value: wgpu::BindGroupLayout) -> Self { BindGroupLayout { id: BindGroupLayoutId::new(), - value: Arc::new(WgpuWrapper::new(value)), + value: WgpuWrapper::new(value), } } } diff --git a/crates/bevy_render/src/render_resource/bind_group_layout_entries.rs b/crates/bevy_render/src/render_resource/bind_group_layout_entries.rs index 3a811a5dbe41b..bc4a7d306da4b 100644 --- a/crates/bevy_render/src/render_resource/bind_group_layout_entries.rs +++ b/crates/bevy_render/src/render_resource/bind_group_layout_entries.rs @@ -222,7 +222,7 @@ impl IntoBindGroupLayoutEntryBuilder for BindingType { impl IntoBindGroupLayoutEntryBuilder for BindGroupLayoutEntry { fn into_bind_group_layout_entry_builder(self) -> BindGroupLayoutEntryBuilder { if self.binding != u32::MAX { - bevy_utils::tracing::warn!("The BindGroupLayoutEntries api ignores the binding index when converting a raw wgpu::BindGroupLayoutEntry. You can ignore this warning by setting it to u32::MAX."); + tracing::warn!("The BindGroupLayoutEntries api ignores the binding index when converting a raw wgpu::BindGroupLayoutEntry. You can ignore this warning by setting it to u32::MAX."); } BindGroupLayoutEntryBuilder { ty: self.ty, @@ -556,4 +556,16 @@ pub mod binding_types { } .into_bind_group_layout_entry_builder() } + + pub fn texture_storage_3d( + format: TextureFormat, + access: StorageTextureAccess, + ) -> BindGroupLayoutEntryBuilder { + BindingType::StorageTexture { + access, + format, + view_dimension: TextureViewDimension::D3, + } + .into_bind_group_layout_entry_builder() + } } diff --git a/crates/bevy_render/src/render_resource/bindless.rs b/crates/bevy_render/src/render_resource/bindless.rs new file mode 100644 index 0000000000000..64a0fa2c1fe84 --- /dev/null +++ b/crates/bevy_render/src/render_resource/bindless.rs @@ -0,0 +1,344 @@ +//! Types and functions relating to bindless resources. + +use alloc::borrow::Cow; +use core::{ + num::{NonZeroU32, NonZeroU64}, + ops::Range, +}; + +use bevy_derive::{Deref, DerefMut}; +use wgpu::{ + BindGroupLayoutEntry, SamplerBindingType, ShaderStages, TextureSampleType, TextureViewDimension, +}; + +use crate::render_resource::binding_types::storage_buffer_read_only_sized; + +use super::binding_types::{ + sampler, texture_1d, texture_2d, texture_2d_array, texture_3d, texture_cube, texture_cube_array, +}; + +/// The default value for the number of resources that can be stored in a slab +/// on this platform. +/// +/// See the documentation for [`BindlessSlabResourceLimit`] for more +/// information. +#[cfg(any(target_os = "macos", target_os = "ios"))] +pub const AUTO_BINDLESS_SLAB_RESOURCE_LIMIT: u32 = 64; +/// The default value for the number of resources that can be stored in a slab +/// on this platform. +/// +/// See the documentation for [`BindlessSlabResourceLimit`] for more +/// information. +#[cfg(not(any(target_os = "macos", target_os = "ios")))] +pub const AUTO_BINDLESS_SLAB_RESOURCE_LIMIT: u32 = 2048; + +/// The binding numbers for the built-in binding arrays of each bindless +/// resource type. +/// +/// In the case of materials, the material allocator manages these binding +/// arrays. +/// +/// `bindless.wgsl` contains declarations of these arrays for use in your +/// shaders. If you change these, make sure to update that file as well. +pub static BINDING_NUMBERS: [(BindlessResourceType, BindingNumber); 9] = [ + (BindlessResourceType::SamplerFiltering, BindingNumber(1)), + (BindlessResourceType::SamplerNonFiltering, BindingNumber(2)), + (BindlessResourceType::SamplerComparison, BindingNumber(3)), + (BindlessResourceType::Texture1d, BindingNumber(4)), + (BindlessResourceType::Texture2d, BindingNumber(5)), + (BindlessResourceType::Texture2dArray, BindingNumber(6)), + (BindlessResourceType::Texture3d, BindingNumber(7)), + (BindlessResourceType::TextureCube, BindingNumber(8)), + (BindlessResourceType::TextureCubeArray, BindingNumber(9)), +]; + +/// The maximum number of resources that can be stored in a slab. +/// +/// This limit primarily exists in order to work around `wgpu` performance +/// problems involving large numbers of bindless resources. Also, some +/// platforms, such as Metal, currently enforce limits on the number of +/// resources in use. +/// +/// This corresponds to `LIMIT` in the `#[bindless(LIMIT)]` attribute when +/// deriving [`crate::render_resource::AsBindGroup`]. +#[derive(Clone, Copy, Default, PartialEq, Debug)] +pub enum BindlessSlabResourceLimit { + /// Allows the renderer to choose a reasonable value for the resource limit + /// based on the platform. + /// + /// This value has been tuned, so you should default to this value unless + /// you have special platform-specific considerations that prevent you from + /// using it. + #[default] + Auto, + + /// A custom value for the resource limit. + /// + /// Bevy will allocate no more than this number of resources in a slab, + /// unless exceeding this value is necessary in order to allocate at all + /// (i.e. unless the number of bindless resources in your bind group exceeds + /// this value), in which case Bevy can exceed it. + Custom(u32), +} + +/// Information about the bindless resources in this object. +/// +/// The material bind group allocator uses this descriptor in order to create +/// and maintain bind groups. The fields within this bindless descriptor are +/// [`Cow`]s in order to support both the common case in which the fields are +/// simply `static` constants and the more unusual case in which the fields are +/// dynamically generated efficiently. An example of the latter case is +/// `ExtendedMaterial`, which needs to assemble a bindless descriptor from those +/// of the base material and the material extension at runtime. +/// +/// This structure will only be present if this object is bindless. +pub struct BindlessDescriptor { + /// The bindless resource types that this object uses, in order of bindless + /// index. + /// + /// The resource assigned to binding index 0 will be at index 0, the + /// resource assigned to binding index will be at index 1 in this array, and + /// so on. Unused binding indices are set to [`BindlessResourceType::None`]. + pub resources: Cow<'static, [BindlessResourceType]>, + /// The [`BindlessBufferDescriptor`] for each bindless buffer that this + /// object uses. + /// + /// The order of this array is irrelevant. + pub buffers: Cow<'static, [BindlessBufferDescriptor]>, + /// The [`BindlessIndexTableDescriptor`]s describing each bindless index + /// table. + /// + /// This list must be sorted by the first bindless index. + pub index_tables: Cow<'static, [BindlessIndexTableDescriptor]>, +} + +/// The type of potentially-bindless resource. +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub enum BindlessResourceType { + /// No bindless resource. + /// + /// This is used as a placeholder to fill holes in the + /// [`BindlessDescriptor::resources`] list. + None, + /// A storage buffer. + Buffer, + /// A filtering sampler. + SamplerFiltering, + /// A non-filtering sampler (nearest neighbor). + SamplerNonFiltering, + /// A comparison sampler (typically used for shadow maps). + SamplerComparison, + /// A 1D texture. + Texture1d, + /// A 2D texture. + Texture2d, + /// A 2D texture array. + /// + /// Note that this differs from a binding array. 2D texture arrays must all + /// have the same size and format. + Texture2dArray, + /// A 3D texture. + Texture3d, + /// A cubemap texture. + TextureCube, + /// A cubemap texture array. + /// + /// Note that this differs from a binding array. Cubemap texture arrays must + /// all have the same size and format. + TextureCubeArray, + /// Multiple instances of plain old data concatenated into a single buffer. + /// + /// This corresponds to the `#[data]` declaration in + /// [`crate::render_resource::AsBindGroup`]. + /// + /// Note that this resource doesn't itself map to a GPU-level binding + /// resource and instead depends on the `MaterialBindGroupAllocator` to + /// create a binding resource for it. + DataBuffer, +} + +/// Describes a bindless buffer. +/// +/// Unlike samplers and textures, each buffer in a bind group gets its own +/// unique bind group entry. That is, there isn't any `bindless_buffers` binding +/// array to go along with `bindless_textures_2d`, +/// `bindless_samplers_filtering`, etc. Therefore, this descriptor contains two +/// indices: the *binding number* and the *bindless index*. The binding number +/// is the `@binding` number used in the shader, while the bindless index is the +/// index of the buffer in the bindless index table (which is itself +/// conventionally bound to binding number 0). +/// +/// When declaring the buffer in a derived implementation +/// [`crate::render_resource::AsBindGroup`] with syntax like +/// `#[uniform(BINDLESS_INDEX, StandardMaterialUniform, +/// bindless(BINDING_NUMBER)]`, the bindless index is `BINDLESS_INDEX`, and the +/// binding number is `BINDING_NUMBER`. Note the order. +#[derive(Clone, Copy, Debug)] +pub struct BindlessBufferDescriptor { + /// The actual binding number of the buffer. + /// + /// This is declared with `@binding` in WGSL. When deriving + /// [`crate::render_resource::AsBindGroup`], this is the `BINDING_NUMBER` in + /// `#[uniform(BINDLESS_INDEX, StandardMaterialUniform, + /// bindless(BINDING_NUMBER)]`. + pub binding_number: BindingNumber, + /// The index of the buffer in the bindless index table. + /// + /// In the shader, this is the index into the table bound to binding 0. When + /// deriving [`crate::render_resource::AsBindGroup`], this is the + /// `BINDLESS_INDEX` in `#[uniform(BINDLESS_INDEX, StandardMaterialUniform, + /// bindless(BINDING_NUMBER)]`. + pub bindless_index: BindlessIndex, + /// The size of the buffer in bytes, if known. + pub size: Option, +} + +/// Describes the layout of the bindless index table, which maps bindless +/// indices to indices within the binding arrays. +#[derive(Clone)] +pub struct BindlessIndexTableDescriptor { + /// The range of bindless indices that this descriptor covers. + pub indices: Range, + /// The binding at which the index table itself will be bound. + /// + /// By default, this is binding 0, but it can be changed with the + /// `#[bindless(index_table(binding(B)))]` attribute. + pub binding_number: BindingNumber, +} + +/// The index of the actual binding in the bind group. +/// +/// This is the value specified in WGSL as `@binding`. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Deref, DerefMut)] +pub struct BindingNumber(pub u32); + +/// The index in the bindless index table. +/// +/// This table is conventionally bound to binding number 0. +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Hash, Debug, Deref, DerefMut)] +pub struct BindlessIndex(pub u32); + +/// Creates the bind group layout entries common to all shaders that use +/// bindless bind groups. +/// +/// `bindless_resource_count` specifies the total number of bindless resources. +/// `bindless_slab_resource_limit` specifies the resolved +/// [`BindlessSlabResourceLimit`] value. +pub fn create_bindless_bind_group_layout_entries( + bindless_index_table_length: u32, + bindless_slab_resource_limit: u32, + bindless_index_table_binding_number: BindingNumber, +) -> Vec { + let bindless_slab_resource_limit = + NonZeroU32::new(bindless_slab_resource_limit).expect("Bindless slot count must be nonzero"); + + // The maximum size of a binding array is the + // `bindless_slab_resource_limit`, which would occur if all of the bindless + // resources were of the same type. So we create our binding arrays with + // that size. + + vec![ + // Start with the bindless index table, bound to binding number 0. + storage_buffer_read_only_sized( + false, + NonZeroU64::new(bindless_index_table_length as u64 * size_of::() as u64), + ) + .build(*bindless_index_table_binding_number, ShaderStages::all()), + // Continue with the common bindless resource arrays. + sampler(SamplerBindingType::Filtering) + .count(bindless_slab_resource_limit) + .build(1, ShaderStages::all()), + sampler(SamplerBindingType::NonFiltering) + .count(bindless_slab_resource_limit) + .build(2, ShaderStages::all()), + sampler(SamplerBindingType::Comparison) + .count(bindless_slab_resource_limit) + .build(3, ShaderStages::all()), + texture_1d(TextureSampleType::Float { filterable: true }) + .count(bindless_slab_resource_limit) + .build(4, ShaderStages::all()), + texture_2d(TextureSampleType::Float { filterable: true }) + .count(bindless_slab_resource_limit) + .build(5, ShaderStages::all()), + texture_2d_array(TextureSampleType::Float { filterable: true }) + .count(bindless_slab_resource_limit) + .build(6, ShaderStages::all()), + texture_3d(TextureSampleType::Float { filterable: true }) + .count(bindless_slab_resource_limit) + .build(7, ShaderStages::all()), + texture_cube(TextureSampleType::Float { filterable: true }) + .count(bindless_slab_resource_limit) + .build(8, ShaderStages::all()), + texture_cube_array(TextureSampleType::Float { filterable: true }) + .count(bindless_slab_resource_limit) + .build(9, ShaderStages::all()), + ] +} + +impl BindlessSlabResourceLimit { + /// Determines the actual bindless slab resource limit on this platform. + pub fn resolve(&self) -> u32 { + match *self { + BindlessSlabResourceLimit::Auto => AUTO_BINDLESS_SLAB_RESOURCE_LIMIT, + BindlessSlabResourceLimit::Custom(limit) => limit, + } + } +} + +impl BindlessResourceType { + /// Returns the binding number for the common array of this resource type. + /// + /// For example, if you pass `BindlessResourceType::Texture2d`, this will + /// return 5, in order to match the `@group(2) @binding(5) var + /// bindless_textures_2d: binding_array>` declaration in + /// `bindless.wgsl`. + /// + /// Not all resource types have fixed binding numbers. If you call + /// [`Self::binding_number`] on such a resource type, it returns `None`. + /// + /// Note that this returns a static reference to the binding number, not the + /// binding number itself. This is to conform to an idiosyncratic API in + /// `wgpu` whereby binding numbers for binding arrays are taken by `&u32` + /// *reference*, not by `u32` value. + pub fn binding_number(&self) -> Option<&'static BindingNumber> { + match BINDING_NUMBERS.binary_search_by_key(self, |(key, _)| *key) { + Ok(binding_number) => Some(&BINDING_NUMBERS[binding_number].1), + Err(_) => None, + } + } +} + +impl From for BindlessResourceType { + fn from(texture_view_dimension: TextureViewDimension) -> Self { + match texture_view_dimension { + TextureViewDimension::D1 => BindlessResourceType::Texture1d, + TextureViewDimension::D2 => BindlessResourceType::Texture2d, + TextureViewDimension::D2Array => BindlessResourceType::Texture2dArray, + TextureViewDimension::Cube => BindlessResourceType::TextureCube, + TextureViewDimension::CubeArray => BindlessResourceType::TextureCubeArray, + TextureViewDimension::D3 => BindlessResourceType::Texture3d, + } + } +} + +impl From for BindlessResourceType { + fn from(sampler_binding_type: SamplerBindingType) -> Self { + match sampler_binding_type { + SamplerBindingType::Filtering => BindlessResourceType::SamplerFiltering, + SamplerBindingType::NonFiltering => BindlessResourceType::SamplerNonFiltering, + SamplerBindingType::Comparison => BindlessResourceType::SamplerComparison, + } + } +} + +impl From for BindlessIndex { + fn from(value: u32) -> Self { + Self(value) + } +} + +impl From for BindingNumber { + fn from(value: u32) -> Self { + Self(value) + } +} diff --git a/crates/bevy_render/src/render_resource/buffer.rs b/crates/bevy_render/src/render_resource/buffer.rs index ea2e6b4c6838c..9b7bb2c41f487 100644 --- a/crates/bevy_render/src/render_resource/buffer.rs +++ b/crates/bevy_render/src/render_resource/buffer.rs @@ -1,6 +1,5 @@ use crate::define_atomic_id; use crate::renderer::WgpuWrapper; -use alloc::sync::Arc; use core::ops::{Bound, Deref, RangeBounds}; define_atomic_id!(BufferId); @@ -8,8 +7,7 @@ define_atomic_id!(BufferId); #[derive(Clone, Debug)] pub struct Buffer { id: BufferId, - value: Arc>, - size: wgpu::BufferAddress, + value: WgpuWrapper, } impl Buffer { @@ -28,7 +26,7 @@ impl Buffer { let size = match bounds.end_bound() { Bound::Included(&bound) => bound + 1, Bound::Excluded(&bound) => bound, - Bound::Unbounded => self.size, + Bound::Unbounded => self.value.size(), } - offset; BufferSlice { id: self.id, @@ -48,8 +46,7 @@ impl From for Buffer { fn from(value: wgpu::Buffer) -> Self { Buffer { id: BufferId::new(), - size: value.size(), - value: Arc::new(WgpuWrapper::new(value)), + value: WgpuWrapper::new(value), } } } diff --git a/crates/bevy_render/src/render_resource/buffer_vec.rs b/crates/bevy_render/src/render_resource/buffer_vec.rs index 3a04e5c45be1d..4e6c787fba299 100644 --- a/crates/bevy_render/src/render_resource/buffer_vec.rs +++ b/crates/bevy_render/src/render_resource/buffer_vec.rs @@ -29,13 +29,13 @@ use super::GpuArrayBufferable; /// from system RAM to VRAM. /// /// Other options for storing GPU-accessible data are: -/// * [`StorageBuffer`](crate::render_resource::StorageBuffer) +/// * [`BufferVec`] /// * [`DynamicStorageBuffer`](crate::render_resource::DynamicStorageBuffer) -/// * [`UniformBuffer`](crate::render_resource::UniformBuffer) /// * [`DynamicUniformBuffer`](crate::render_resource::DynamicUniformBuffer) /// * [`GpuArrayBuffer`](crate::render_resource::GpuArrayBuffer) -/// * [`BufferVec`] +/// * [`StorageBuffer`](crate::render_resource::StorageBuffer) /// * [`Texture`](crate::render_resource::Texture) +/// * [`UniformBuffer`](crate::render_resource::UniformBuffer) pub struct RawBufferVec { values: Vec, buffer: Option, @@ -103,6 +103,25 @@ impl RawBufferVec { self.values.append(&mut other.values); } + /// Returns the value at the given index. + pub fn get(&self, index: u32) -> Option<&T> { + self.values.get(index as usize) + } + + /// Sets the value at the given index. + /// + /// The index must be less than [`RawBufferVec::len`]. + pub fn set(&mut self, index: u32, value: T) { + self.values[index as usize] = value; + } + + /// Preallocates space for `count` elements in the internal CPU-side buffer. + /// + /// Unlike [`RawBufferVec::reserve`], this doesn't have any effect on the GPU buffer. + pub fn reserve_internal(&mut self, count: usize) { + self.values.reserve(count); + } + /// Changes the debugging label of the buffer. /// /// The next time the buffer is updated (via [`reserve`](Self::reserve)), Bevy will inform @@ -188,6 +207,18 @@ impl RawBufferVec { } } +impl RawBufferVec +where + T: NoUninit + Default, +{ + pub fn grow_set(&mut self, index: u32, value: T) { + while index as usize + 1 > self.len() { + self.values.push(T::default()); + } + self.values[index as usize] = value; + } +} + impl Extend for RawBufferVec { #[inline] fn extend>(&mut self, iter: I) { @@ -207,6 +238,15 @@ impl Extend for RawBufferVec { /// CPU access to the data after it's been added via [`BufferVec::push`]. If you /// need CPU access to the data, consider another type, such as /// [`StorageBuffer`][super::StorageBuffer]. +/// +/// Other options for storing GPU-accessible data are: +/// * [`DynamicStorageBuffer`](crate::render_resource::DynamicStorageBuffer) +/// * [`DynamicUniformBuffer`](crate::render_resource::DynamicUniformBuffer) +/// * [`GpuArrayBuffer`](crate::render_resource::GpuArrayBuffer) +/// * [`RawBufferVec`] +/// * [`StorageBuffer`](crate::render_resource::StorageBuffer) +/// * [`Texture`](crate::render_resource::Texture) +/// * [`UniformBuffer`](crate::render_resource::UniformBuffer) pub struct BufferVec where T: ShaderType + WriteInto, @@ -276,7 +316,7 @@ where // TODO: Consider using unsafe code to push uninitialized, to prevent // the zeroing. It shows up in profiles. - self.data.extend(iter::repeat(0).take(element_size)); + self.data.extend(iter::repeat_n(0, element_size)); // Take a slice of the new data for `write_into` to use. This is // important: it hoists the bounds check up here so that the compiler @@ -416,8 +456,14 @@ where /// Reserves space for one more element in the buffer and returns its index. pub fn add(&mut self) -> usize { + self.add_multiple(1) + } + + /// Reserves space for the given number of elements in the buffer and + /// returns the index of the first one. + pub fn add_multiple(&mut self, count: usize) -> usize { let index = self.len; - self.len += 1; + self.len += count; index } diff --git a/crates/bevy_render/src/render_resource/gpu_array_buffer.rs b/crates/bevy_render/src/render_resource/gpu_array_buffer.rs index c4c5906373029..195920ee0ceb7 100644 --- a/crates/bevy_render/src/render_resource/gpu_array_buffer.rs +++ b/crates/bevy_render/src/render_resource/gpu_array_buffer.rs @@ -6,7 +6,7 @@ use crate::{ render_resource::batched_uniform_buffer::BatchedUniformBuffer, renderer::{RenderDevice, RenderQueue}, }; -use bevy_ecs::{prelude::Component, system::Resource}; +use bevy_ecs::{prelude::Component, resource::Resource}; use core::marker::PhantomData; use encase::{private::WriteInto, ShaderSize, ShaderType}; use nonmax::NonMaxU32; @@ -24,13 +24,13 @@ impl GpuArrayBufferable for T {} /// binding (within reasonable limits). /// /// Other options for storing GPU-accessible data are: -/// * [`StorageBuffer`](crate::render_resource::StorageBuffer) +/// * [`BufferVec`] /// * [`DynamicStorageBuffer`](crate::render_resource::DynamicStorageBuffer) -/// * [`UniformBuffer`](crate::render_resource::UniformBuffer) /// * [`DynamicUniformBuffer`](crate::render_resource::DynamicUniformBuffer) /// * [`RawBufferVec`](crate::render_resource::RawBufferVec) -/// * [`BufferVec`] +/// * [`StorageBuffer`](crate::render_resource::StorageBuffer) /// * [`Texture`](crate::render_resource::Texture) +/// * [`UniformBuffer`](crate::render_resource::UniformBuffer) #[derive(Resource)] pub enum GpuArrayBuffer { Uniform(BatchedUniformBuffer), diff --git a/crates/bevy_render/src/render_resource/mod.rs b/crates/bevy_render/src/render_resource/mod.rs index 27a08851122c6..b777d96290ccd 100644 --- a/crates/bevy_render/src/render_resource/mod.rs +++ b/crates/bevy_render/src/render_resource/mod.rs @@ -3,6 +3,7 @@ mod bind_group; mod bind_group_entries; mod bind_group_layout; mod bind_group_layout_entries; +mod bindless; mod buffer; mod buffer_vec; mod gpu_array_buffer; @@ -19,6 +20,7 @@ pub use bind_group::*; pub use bind_group_entries::*; pub use bind_group_layout::*; pub use bind_group_layout_entries::*; +pub use bindless::*; pub use buffer::*; pub use buffer_vec::*; pub use gpu_array_buffer::*; @@ -43,19 +45,20 @@ pub use wgpu::{ ColorWrites, CommandEncoder, CommandEncoderDescriptor, CompareFunction, ComputePass, ComputePassDescriptor, ComputePipelineDescriptor as RawComputePipelineDescriptor, DepthBiasState, DepthStencilState, DownlevelFlags, Extent3d, Face, Features as WgpuFeatures, - FilterMode, FragmentState as RawFragmentState, FrontFace, ImageCopyBuffer, ImageCopyBufferBase, - ImageCopyTexture, ImageCopyTextureBase, ImageDataLayout, ImageSubresourceRange, IndexFormat, + FilterMode, FragmentState as RawFragmentState, FrontFace, ImageSubresourceRange, IndexFormat, Limits as WgpuLimits, LoadOp, Maintain, MapMode, MultisampleState, Operations, Origin3d, PipelineCompilationOptions, PipelineLayout, PipelineLayoutDescriptor, PolygonMode, PrimitiveState, PrimitiveTopology, PushConstantRange, RenderPassColorAttachment, RenderPassDepthStencilAttachment, RenderPassDescriptor, RenderPipelineDescriptor as RawRenderPipelineDescriptor, Sampler as WgpuSampler, - SamplerBindingType, SamplerDescriptor, ShaderModule, ShaderModuleDescriptor, ShaderSource, - ShaderStages, StencilFaceState, StencilOperation, StencilState, StorageTextureAccess, StoreOp, - TextureAspect, TextureDescriptor, TextureDimension, TextureFormat, TextureSampleType, - TextureUsages, TextureView as WgpuTextureView, TextureViewDescriptor, TextureViewDimension, - VertexAttribute, VertexBufferLayout as RawVertexBufferLayout, VertexFormat, - VertexState as RawVertexState, VertexStepMode, COPY_BUFFER_ALIGNMENT, + SamplerBindingType, SamplerBindingType as WgpuSamplerBindingType, SamplerDescriptor, + ShaderModule, ShaderModuleDescriptor, ShaderSource, ShaderStages, StencilFaceState, + StencilOperation, StencilState, StorageTextureAccess, StoreOp, TexelCopyBufferInfo, + TexelCopyBufferLayout, TexelCopyTextureInfo, TextureAspect, TextureDescriptor, + TextureDimension, TextureFormat, TextureFormatFeatureFlags, TextureFormatFeatures, + TextureSampleType, TextureUsages, TextureView as WgpuTextureView, TextureViewDescriptor, + TextureViewDimension, VertexAttribute, VertexBufferLayout as RawVertexBufferLayout, + VertexFormat, VertexState as RawVertexState, VertexStepMode, COPY_BUFFER_ALIGNMENT, }; pub use crate::mesh::VertexBufferLayout; diff --git a/crates/bevy_render/src/render_resource/pipeline.rs b/crates/bevy_render/src/render_resource/pipeline.rs index be400a02e9b0f..30f9a974b853f 100644 --- a/crates/bevy_render/src/render_resource/pipeline.rs +++ b/crates/bevy_render/src/render_resource/pipeline.rs @@ -6,7 +6,6 @@ use crate::{ render_resource::{BindGroupLayout, Shader}, }; use alloc::borrow::Cow; -use alloc::sync::Arc; use bevy_asset::Handle; use core::ops::Deref; use wgpu::{ @@ -22,7 +21,7 @@ define_atomic_id!(RenderPipelineId); #[derive(Clone, Debug)] pub struct RenderPipeline { id: RenderPipelineId, - value: Arc>, + value: WgpuWrapper, } impl RenderPipeline { @@ -36,7 +35,7 @@ impl From for RenderPipeline { fn from(value: wgpu::RenderPipeline) -> Self { RenderPipeline { id: RenderPipelineId::new(), - value: Arc::new(WgpuWrapper::new(value)), + value: WgpuWrapper::new(value), } } } @@ -59,7 +58,7 @@ define_atomic_id!(ComputePipelineId); #[derive(Clone, Debug)] pub struct ComputePipeline { id: ComputePipelineId, - value: Arc>, + value: WgpuWrapper, } impl ComputePipeline { @@ -74,7 +73,7 @@ impl From for ComputePipeline { fn from(value: wgpu::ComputePipeline) -> Self { ComputePipeline { id: ComputePipelineId::new(), - value: Arc::new(WgpuWrapper::new(value)), + value: WgpuWrapper::new(value), } } } diff --git a/crates/bevy_render/src/render_resource/pipeline_cache.rs b/crates/bevy_render/src/render_resource/pipeline_cache.rs index 0e041968de1cc..653ae70b1c77c 100644 --- a/crates/bevy_render/src/render_resource/pipeline_cache.rs +++ b/crates/bevy_render/src/render_resource/pipeline_cache.rs @@ -8,19 +8,17 @@ use alloc::{borrow::Cow, sync::Arc}; use bevy_asset::{AssetEvent, AssetId, Assets}; use bevy_ecs::{ event::EventReader, - system::{Res, ResMut, Resource}, + resource::Resource, + system::{Res, ResMut}, }; +use bevy_platform::collections::{hash_map::EntryRef, HashMap, HashSet}; use bevy_tasks::Task; -use bevy_utils::{ - default, - hashbrown::hash_map::EntryRef, - tracing::{debug, error}, - HashMap, HashSet, -}; +use bevy_utils::default; use core::{future::Future, hash::Hash, mem, ops::Deref}; use naga::valid::Capabilities; use std::sync::{Mutex, PoisonError}; use thiserror::Error; +use tracing::{debug, error}; #[cfg(feature = "shader_format_spirv")] use wgpu::util::make_spirv; use wgpu::{ @@ -30,7 +28,7 @@ use wgpu::{ /// A descriptor for a [`Pipeline`]. /// -/// Used to store an heterogenous collection of render and compute pipeline descriptors together. +/// Used to store a heterogenous collection of render and compute pipeline descriptors together. #[derive(Debug)] pub enum PipelineDescriptor { RenderPipelineDescriptor(Box), @@ -39,7 +37,7 @@ pub enum PipelineDescriptor { /// A pipeline defining the data layout and shader logic for a specific GPU task. /// -/// Used to store an heterogenous collection of render and compute pipelines together. +/// Used to store a heterogenous collection of render and compute pipelines together. #[derive(Debug)] pub enum Pipeline { RenderPipeline(RenderPipeline), @@ -129,6 +127,8 @@ struct ShaderData { struct ShaderCache { data: HashMap, ShaderData>, + #[cfg(feature = "shader_format_wesl")] + asset_paths: HashMap>, shaders: HashMap, Shader>, import_path_shaders: HashMap>, waiting_on_import: HashMap>>, @@ -181,6 +181,8 @@ impl ShaderCache { Self { composer, data: Default::default(), + #[cfg(feature = "shader_format_wesl")] + asset_paths: Default::default(), shaders: Default::default(), import_path_shaders: Default::default(), waiting_on_import: Default::default(), @@ -214,7 +216,6 @@ impl ShaderCache { Ok(()) } - #[allow(clippy::result_large_err)] fn get( &mut self, render_device: &RenderDevice, @@ -226,6 +227,7 @@ impl ShaderCache { .shaders .get(&id) .ok_or(PipelineCacheError::ShaderNotLoaded(id))?; + let data = self.data.entry(id).or_default(); let n_asset_imports = shader .imports() @@ -254,7 +256,7 @@ impl ShaderCache { shader_defs.push("SIXTEEN_BYTE_ALIGNMENT".into()); } - if cfg!(feature = "ios_simulator") { + if cfg!(target_abi = "sim") { shader_defs.push("NO_CUBE_ARRAY_TEXTURES_SUPPORT".into()); } @@ -264,12 +266,50 @@ impl ShaderCache { )); debug!( - "processing shader {:?}, with shader defs {:?}", + "processing shader {}, with shader defs {:?}", id, shader_defs ); let shader_source = match &shader.source { #[cfg(feature = "shader_format_spirv")] Source::SpirV(data) => make_spirv(data), + #[cfg(feature = "shader_format_wesl")] + Source::Wesl(_) => { + if let ShaderImport::AssetPath(path) = shader.import_path() { + let shader_resolver = + ShaderResolver::new(&self.asset_paths, &self.shaders); + let module_path = wesl::syntax::ModulePath::from_path(path); + let mut compiler_options = wesl::CompileOptions { + imports: true, + condcomp: true, + lower: true, + ..default() + }; + + for shader_def in shader_defs { + match shader_def { + ShaderDefVal::Bool(key, value) => { + compiler_options.features.insert(key.clone(), value); + } + _ => debug!( + "ShaderDefVal::Int and ShaderDefVal::UInt are not supported in wesl", + ), + } + } + + let compiled = wesl::compile( + &module_path, + &shader_resolver, + &wesl::EscapeMangler, + &compiler_options, + ) + .unwrap(); + + let naga = naga::front::wgsl::parse_str(&compiled.to_string()).unwrap(); + ShaderSource::Naga(Cow::Owned(naga)) + } else { + panic!("Wesl shaders must be imported from a file"); + } + } #[cfg(not(feature = "shader_format_spirv"))] Source::SpirV(_) => { unimplemented!( @@ -309,7 +349,28 @@ impl ShaderCache { }, )?; - ShaderSource::Naga(Cow::Owned(naga)) + #[cfg(not(feature = "decoupled_naga"))] + { + ShaderSource::Naga(Cow::Owned(naga)) + } + + #[cfg(feature = "decoupled_naga")] + { + let mut validator = naga::valid::Validator::new( + naga::valid::ValidationFlags::all(), + self.composer.capabilities, + ); + let module_info = validator.validate(&naga).unwrap(); + let wgsl = Cow::Owned( + naga::back::wgsl::write_string( + &naga, + &module_info, + naga::back::wgsl::WriterFlags::empty(), + ) + .unwrap(), + ); + ShaderSource::Wgsl(wgsl) + } } }; @@ -321,7 +382,19 @@ impl ShaderCache { render_device .wgpu_device() .push_error_scope(wgpu::ErrorFilter::Validation); - let shader_module = render_device.create_shader_module(module_descriptor); + + let shader_module = match shader.validate_shader { + ValidateShader::Enabled => { + render_device.create_and_validate_shader_module(module_descriptor) + } + // SAFETY: we are interfacing with shader code, which may contain undefined behavior, + // such as indexing out of bounds. + // The checks required are prohibitively expensive and a poor default for game engines. + ValidateShader::Disabled => unsafe { + render_device.create_shader_module(module_descriptor) + }, + }; + let error = render_device.wgpu_device().pop_error_scope(); // `now_or_never` will return Some if the future is ready and None otherwise. @@ -329,7 +402,7 @@ impl ShaderCache { // So to keep the complexity of the ShaderCache low, we will only catch this error early on native platforms, // and on wasm the error will be handled by wgpu and crash the application. if let Some(Some(wgpu::Error::Validation { description, .. })) = - bevy_utils::futures::now_or_never(error) + bevy_tasks::futures::now_or_never(error) { return Err(PipelineCacheError::CreateShaderModule(description)); } @@ -389,6 +462,13 @@ impl ShaderCache { } } + #[cfg(feature = "shader_format_wesl")] + if let Source::Wesl(_) = shader.source { + if let ShaderImport::AssetPath(path) = shader.import_path() { + self.asset_paths + .insert(wesl::syntax::ModulePath::from_path(path), id); + } + } self.shaders.insert(id, shader); pipelines_to_queue } @@ -403,6 +483,40 @@ impl ShaderCache { } } +#[cfg(feature = "shader_format_wesl")] +pub struct ShaderResolver<'a> { + asset_paths: &'a HashMap>, + shaders: &'a HashMap, Shader>, +} + +#[cfg(feature = "shader_format_wesl")] +impl<'a> ShaderResolver<'a> { + pub fn new( + asset_paths: &'a HashMap>, + shaders: &'a HashMap, Shader>, + ) -> Self { + Self { + asset_paths, + shaders, + } + } +} + +#[cfg(feature = "shader_format_wesl")] +impl<'a> wesl::Resolver for ShaderResolver<'a> { + fn resolve_source( + &self, + module_path: &wesl::syntax::ModulePath, + ) -> Result, wesl::ResolveError> { + let asset_id = self.asset_paths.get(module_path).ok_or_else(|| { + wesl::ResolveError::ModuleNotFound(module_path.clone(), "Invalid asset id".to_string()) + })?; + + let shader = self.shaders.get(asset_id).unwrap(); + Ok(Cow::Borrowed(shader.source.as_str())) + } +} + type LayoutCacheKey = (Vec, Vec); #[derive(Default)] struct LayoutCache { @@ -494,7 +608,10 @@ impl PipelineCache { /// See [`PipelineCache::queue_render_pipeline()`]. #[inline] pub fn get_render_pipeline_state(&self, id: CachedRenderPipelineId) -> &CachedPipelineState { - &self.pipelines[id.0].state + // If the pipeline id isn't in `pipelines`, it's queued in `new_pipelines` + self.pipelines + .get(id.0) + .map_or(&CachedPipelineState::Queued, |pipeline| &pipeline.state) } /// Get the state of a cached compute pipeline. @@ -502,12 +619,18 @@ impl PipelineCache { /// See [`PipelineCache::queue_compute_pipeline()`]. #[inline] pub fn get_compute_pipeline_state(&self, id: CachedComputePipelineId) -> &CachedPipelineState { - &self.pipelines[id.0].state + // If the pipeline id isn't in `pipelines`, it's queued in `new_pipelines` + self.pipelines + .get(id.0) + .map_or(&CachedPipelineState::Queued, |pipeline| &pipeline.state) } /// Get the render pipeline descriptor a cached render pipeline was inserted from. /// /// See [`PipelineCache::queue_render_pipeline()`]. + /// + /// **Note**: Be careful calling this method. It will panic if called with a pipeline that + /// has been queued but has not yet been processed by [`PipelineCache::process_queue()`]. #[inline] pub fn get_render_pipeline_descriptor( &self, @@ -522,6 +645,9 @@ impl PipelineCache { /// Get the compute pipeline descriptor a cached render pipeline was inserted from. /// /// See [`PipelineCache::queue_compute_pipeline()`]. + /// + /// **Note**: Be careful calling this method. It will panic if called with a pipeline that + /// has been queued but has not yet been processed by [`PipelineCache::process_queue()`]. #[inline] pub fn get_compute_pipeline_descriptor( &self, @@ -543,7 +669,7 @@ impl PipelineCache { #[inline] pub fn get_render_pipeline(&self, id: CachedRenderPipelineId) -> Option<&RenderPipeline> { if let CachedPipelineState::Ok(Pipeline::RenderPipeline(pipeline)) = - &self.pipelines[id.0].state + &self.pipelines.get(id.0)?.state { Some(pipeline) } else { @@ -577,7 +703,7 @@ impl PipelineCache { #[inline] pub fn get_compute_pipeline(&self, id: CachedComputePipelineId) -> Option<&ComputePipeline> { if let CachedPipelineState::Ok(Pipeline::ComputePipeline(pipeline)) = - &self.pipelines[id.0].state + &self.pipelines.get(id.0)?.state { Some(pipeline) } else { @@ -873,16 +999,14 @@ impl PipelineCache { }; } - CachedPipelineState::Creating(ref mut task) => { - match bevy_utils::futures::check_ready(task) { - Some(Ok(pipeline)) => { - cached_pipeline.state = CachedPipelineState::Ok(pipeline); - return; - } - Some(Err(err)) => cached_pipeline.state = CachedPipelineState::Err(err), - _ => (), + CachedPipelineState::Creating(task) => match bevy_tasks::futures::check_ready(task) { + Some(Ok(pipeline)) => { + cached_pipeline.state = CachedPipelineState::Ok(pipeline); + return; } - } + Some(Err(err)) => cached_pipeline.state = CachedPipelineState::Err(err), + _ => (), + }, CachedPipelineState::Err(err) => match err { // Retry @@ -921,7 +1045,10 @@ impl PipelineCache { mut events: Extract>>, ) { for event in events.read() { - #[allow(clippy::match_same_arms)] + #[expect( + clippy::match_same_arms, + reason = "LoadedWithDependencies is marked as a TODO, so it's likely this will no longer lint soon." + )] match event { // PERF: Instead of blocking waiting for the shader cache lock, try again next frame if the lock is currently held AssetEvent::Added { id } | AssetEvent::Modified { id } => { @@ -1069,6 +1196,18 @@ fn get_capabilities(features: Features, downlevel: DownlevelFlags) -> Capabiliti Capabilities::SUBGROUP_VERTEX_STAGE, features.contains(Features::SUBGROUP_VERTEX), ); + capabilities.set( + Capabilities::SHADER_FLOAT32_ATOMIC, + features.contains(Features::SHADER_FLOAT32_ATOMIC), + ); + capabilities.set( + Capabilities::TEXTURE_ATOMIC, + features.contains(Features::TEXTURE_ATOMIC), + ); + capabilities.set( + Capabilities::TEXTURE_INT64_ATOMIC, + features.contains(Features::TEXTURE_INT64_ATOMIC), + ); capabilities } diff --git a/crates/bevy_render/src/render_resource/pipeline_specializer.rs b/crates/bevy_render/src/render_resource/pipeline_specializer.rs index 3ee7a78ed7793..e017242ea0b9b 100644 --- a/crates/bevy_render/src/render_resource/pipeline_specializer.rs +++ b/crates/bevy_render/src/render_resource/pipeline_specializer.rs @@ -5,15 +5,18 @@ use crate::{ RenderPipelineDescriptor, }, }; -use bevy_ecs::system::Resource; -use bevy_utils::{ - default, - hashbrown::hash_map::{RawEntryMut, VacantEntry}, - tracing::error, - Entry, FixedHasher, HashMap, +use bevy_ecs::resource::Resource; +use bevy_platform::{ + collections::{ + hash_map::{Entry, RawEntryMut, VacantEntry}, + HashMap, + }, + hash::FixedHasher, }; +use bevy_utils::default; use core::{fmt::Debug, hash::Hash}; use thiserror::Error; +use tracing::error; pub trait SpecializedRenderPipeline { type Key: Clone + Hash + PartialEq + Eq; diff --git a/crates/bevy_render/src/render_resource/resource_macros.rs b/crates/bevy_render/src/render_resource/resource_macros.rs index 86a0bf285f31a..6cdf3b69794f5 100644 --- a/crates/bevy_render/src/render_resource/resource_macros.rs +++ b/crates/bevy_render/src/render_resource/resource_macros.rs @@ -4,9 +4,11 @@ macro_rules! define_atomic_id { #[derive(Copy, Clone, Hash, Eq, PartialEq, PartialOrd, Ord, Debug)] pub struct $atomic_id_type(core::num::NonZero); - // We use new instead of default to indicate that each ID created will be unique. - #[allow(clippy::new_without_default)] impl $atomic_id_type { + #[expect( + clippy::new_without_default, + reason = "Implementing the `Default` trait on atomic IDs would imply that two `::default()` equal each other. By only implementing `new()`, we indicate that each atomic ID created will be unique." + )] pub fn new() -> Self { use core::sync::atomic::{AtomicU32, Ordering}; diff --git a/crates/bevy_render/src/render_resource/shader.rs b/crates/bevy_render/src/render_resource/shader.rs index 36f71ed3bf611..005fb07c05bee 100644 --- a/crates/bevy_render/src/render_resource/shader.rs +++ b/crates/bevy_render/src/render_resource/shader.rs @@ -21,6 +21,30 @@ pub enum ShaderReflectError { #[error(transparent)] Validation(#[from] naga::WithSpan), } + +/// Describes whether or not to perform runtime checks on shaders. +/// Runtime checks can be enabled for safety at the cost of speed. +/// By default no runtime checks will be performed. +/// +/// # Panics +/// Because no runtime checks are performed for spirv, +/// enabling `ValidateShader` for spirv will cause a panic +#[derive(Clone, Debug, Default)] +pub enum ValidateShader { + #[default] + /// No runtime checks for soundness (e.g. bound checking) are performed. + /// + /// This is suitable for trusted shaders, written by your program or dependencies you trust. + Disabled, + /// Enable's runtime checks for soundness (e.g. bound checking). + /// + /// While this can have a meaningful impact on performance, + /// this setting should *always* be enabled when loading untrusted shaders. + /// This might occur if you are creating a shader playground, running user-generated shaders + /// (as in `VRChat`), or writing a web browser in Bevy. + Enabled, +} + /// A shader, as defined by its [`ShaderSource`](wgpu::ShaderSource) and [`ShaderStage`](naga::ShaderStage) /// This is an "unprocessed" shader. It can contain preprocessor directives. #[derive(Asset, TypePath, Debug, Clone)] @@ -36,6 +60,10 @@ pub struct Shader { // we must store strong handles to our dependencies to stop them // from being immediately dropped if we are the only user. pub file_dependencies: Vec>, + /// Enable or disable runtime shader validation, trading safety against speed. + /// + /// Please read the [`ValidateShader`] docs for a discussion of the tradeoffs involved. + pub validate_shader: ValidateShader, } impl Shader { @@ -78,6 +106,7 @@ impl Shader { additional_imports: Default::default(), shader_defs: Default::default(), file_dependencies: Default::default(), + validate_shader: ValidateShader::Disabled, } } @@ -108,6 +137,7 @@ impl Shader { additional_imports: Default::default(), shader_defs: Default::default(), file_dependencies: Default::default(), + validate_shader: ValidateShader::Disabled, } } @@ -121,6 +151,43 @@ impl Shader { additional_imports: Default::default(), shader_defs: Default::default(), file_dependencies: Default::default(), + validate_shader: ValidateShader::Disabled, + } + } + + #[cfg(feature = "shader_format_wesl")] + pub fn from_wesl(source: impl Into>, path: impl Into) -> Shader { + let source = source.into(); + let path = path.into(); + let (import_path, imports) = Shader::preprocess(&source, &path); + + match import_path { + ShaderImport::AssetPath(asset_path) => { + // Create the shader import path - always starting with "/" + let shader_path = std::path::Path::new("/").join(&asset_path); + + // Convert to a string with forward slashes and without extension + let import_path_str = shader_path + .with_extension("") + .to_string_lossy() + .replace('\\', "/"); + + let import_path = ShaderImport::AssetPath(import_path_str.to_string()); + + Shader { + path, + imports, + import_path, + source: Source::Wesl(source), + additional_imports: Default::default(), + shader_defs: Default::default(), + file_dependencies: Default::default(), + validate_shader: ValidateShader::Disabled, + } + } + ShaderImport::Custom(_) => { + panic!("Wesl shaders must be imported from an asset path"); + } } } @@ -192,6 +259,7 @@ impl<'a> From<&'a Shader> for naga_oil::compose::NagaModuleDescriptor<'a> { #[derive(Debug, Clone)] pub enum Source { Wgsl(Cow<'static, str>), + Wesl(Cow<'static, str>), Glsl(Cow<'static, str>, naga::ShaderStage), SpirV(Cow<'static, [u8]>), // TODO: consider the following @@ -202,7 +270,7 @@ pub enum Source { impl Source { pub fn as_str(&self) -> &str { match self { - Source::Wgsl(s) | Source::Glsl(s, _) => s, + Source::Wgsl(s) | Source::Wesl(s) | Source::Glsl(s, _) => s, Source::SpirV(_) => panic!("spirv not yet implemented"), } } @@ -219,6 +287,7 @@ impl From<&Source> for naga_oil::compose::ShaderLanguage { "GLSL is not supported in this configuration; use the feature `shader_format_glsl`" ), Source::SpirV(_) => panic!("spirv not yet implemented"), + Source::Wesl(_) => panic!("wesl not yet implemented"), } } } @@ -238,6 +307,7 @@ impl From<&Source> for naga_oil::compose::ShaderType { "GLSL is not supported in this configuration; use the feature `shader_format_glsl`" ), Source::SpirV(_) => panic!("spirv not yet implemented"), + Source::Wesl(_) => panic!("wesl not yet implemented"), } } } @@ -281,6 +351,8 @@ impl AssetLoader for ShaderLoader { "comp" => { Shader::from_glsl(String::from_utf8(bytes)?, naga::ShaderStage::Compute, path) } + #[cfg(feature = "shader_format_wesl")] + "wesl" => Shader::from_wesl(String::from_utf8(bytes)?, path), _ => panic!("unhandled extension: {ext}"), }; @@ -294,7 +366,7 @@ impl AssetLoader for ShaderLoader { } fn extensions(&self) -> &[&str] { - &["spv", "wgsl", "vert", "frag", "comp"] + &["spv", "wgsl", "vert", "frag", "comp", "wesl"] } } diff --git a/crates/bevy_render/src/render_resource/storage_buffer.rs b/crates/bevy_render/src/render_resource/storage_buffer.rs index c559712a76389..b407e22d8f9e6 100644 --- a/crates/bevy_render/src/render_resource/storage_buffer.rs +++ b/crates/bevy_render/src/render_resource/storage_buffer.rs @@ -22,14 +22,13 @@ use super::IntoBinding; /// is automatically enforced by this structure. /// /// Other options for storing GPU-accessible data are: +/// * [`BufferVec`](crate::render_resource::BufferVec) /// * [`DynamicStorageBuffer`] -/// * [`UniformBuffer`](crate::render_resource::UniformBuffer) /// * [`DynamicUniformBuffer`](crate::render_resource::DynamicUniformBuffer) /// * [`GpuArrayBuffer`](crate::render_resource::GpuArrayBuffer) /// * [`RawBufferVec`](crate::render_resource::RawBufferVec) -/// * [`BufferVec`](crate::render_resource::BufferVec) -/// * [`BufferVec`](crate::render_resource::BufferVec) /// * [`Texture`](crate::render_resource::Texture) +/// * [`UniformBuffer`](crate::render_resource::UniformBuffer) /// /// [std430 alignment/padding requirements]: https://www.w3.org/TR/WGSL/#address-spaces-storage pub struct StorageBuffer { @@ -156,6 +155,8 @@ impl<'a, T: ShaderType + WriteInto> IntoBinding<'a> for &'a StorageBuffer { /// Stores data to be transferred to the GPU and made accessible to shaders as a dynamic storage buffer. /// +/// This is just a [`StorageBuffer`], but also allows you to set dynamic offsets. +/// /// Dynamic storage buffers can be made available to shaders in some combination of read/write mode, and can store large amounts /// of data. Note however that WebGL2 does not support storage buffers, so consider alternative options in this case. Dynamic /// storage buffers support multiple separate bindings at dynamic byte offsets and so have a @@ -168,14 +169,13 @@ impl<'a, T: ShaderType + WriteInto> IntoBinding<'a> for &'a StorageBuffer { /// will additionally be aligned to meet dynamic offset alignment requirements. /// /// Other options for storing GPU-accessible data are: -/// * [`StorageBuffer`] -/// * [`UniformBuffer`](crate::render_resource::UniformBuffer) +/// * [`BufferVec`](crate::render_resource::BufferVec) /// * [`DynamicUniformBuffer`](crate::render_resource::DynamicUniformBuffer) /// * [`GpuArrayBuffer`](crate::render_resource::GpuArrayBuffer) /// * [`RawBufferVec`](crate::render_resource::RawBufferVec) -/// * [`BufferVec`](crate::render_resource::BufferVec) -/// * [`BufferVec`](crate::render_resource::BufferVec) +/// * [`StorageBuffer`] /// * [`Texture`](crate::render_resource::Texture) +/// * [`UniformBuffer`](crate::render_resource::UniformBuffer) /// /// [std430 alignment/padding requirements]: https://www.w3.org/TR/WGSL/#address-spaces-storage pub struct DynamicStorageBuffer { diff --git a/crates/bevy_render/src/render_resource/texture.rs b/crates/bevy_render/src/render_resource/texture.rs index ca8d26b085f43..f975fc18f31e6 100644 --- a/crates/bevy_render/src/render_resource/texture.rs +++ b/crates/bevy_render/src/render_resource/texture.rs @@ -1,8 +1,7 @@ use crate::define_atomic_id; use crate::renderer::WgpuWrapper; -use alloc::sync::Arc; use bevy_derive::{Deref, DerefMut}; -use bevy_ecs::system::Resource; +use bevy_ecs::resource::Resource; use core::ops::Deref; define_atomic_id!(TextureId); @@ -11,10 +10,19 @@ define_atomic_id!(TextureId); /// /// May be converted from and dereferences to a wgpu [`Texture`](wgpu::Texture). /// Can be created via [`RenderDevice::create_texture`](crate::renderer::RenderDevice::create_texture). +/// +/// Other options for storing GPU-accessible data are: +/// * [`BufferVec`](crate::render_resource::BufferVec) +/// * [`DynamicStorageBuffer`](crate::render_resource::DynamicStorageBuffer) +/// * [`DynamicUniformBuffer`](crate::render_resource::DynamicUniformBuffer) +/// * [`GpuArrayBuffer`](crate::render_resource::GpuArrayBuffer) +/// * [`RawBufferVec`](crate::render_resource::RawBufferVec) +/// * [`StorageBuffer`](crate::render_resource::StorageBuffer) +/// * [`UniformBuffer`](crate::render_resource::UniformBuffer) #[derive(Clone, Debug)] pub struct Texture { id: TextureId, - value: Arc>, + value: WgpuWrapper, } impl Texture { @@ -34,7 +42,7 @@ impl From for Texture { fn from(value: wgpu::Texture) -> Self { Texture { id: TextureId::new(), - value: Arc::new(WgpuWrapper::new(value)), + value: WgpuWrapper::new(value), } } } @@ -54,18 +62,16 @@ define_atomic_id!(TextureViewId); #[derive(Clone, Debug)] pub struct TextureView { id: TextureViewId, - value: Arc>, + value: WgpuWrapper, } pub struct SurfaceTexture { - value: Arc>, + value: WgpuWrapper, } impl SurfaceTexture { - pub fn try_unwrap(self) -> Option { - Arc::try_unwrap(self.value) - .map(WgpuWrapper::into_inner) - .ok() + pub fn present(self) { + self.value.into_inner().present(); } } @@ -81,7 +87,7 @@ impl From for TextureView { fn from(value: wgpu::TextureView) -> Self { TextureView { id: TextureViewId::new(), - value: Arc::new(WgpuWrapper::new(value)), + value: WgpuWrapper::new(value), } } } @@ -89,7 +95,7 @@ impl From for TextureView { impl From for SurfaceTexture { fn from(value: wgpu::SurfaceTexture) -> Self { SurfaceTexture { - value: Arc::new(WgpuWrapper::new(value)), + value: WgpuWrapper::new(value), } } } @@ -122,7 +128,7 @@ define_atomic_id!(SamplerId); #[derive(Clone, Debug)] pub struct Sampler { id: SamplerId, - value: Arc>, + value: WgpuWrapper, } impl Sampler { @@ -137,7 +143,7 @@ impl From for Sampler { fn from(value: wgpu::Sampler) -> Self { Sampler { id: SamplerId::new(), - value: Arc::new(WgpuWrapper::new(value)), + value: WgpuWrapper::new(value), } } } diff --git a/crates/bevy_render/src/render_resource/uniform_buffer.rs b/crates/bevy_render/src/render_resource/uniform_buffer.rs index 95e1a0a56664e..b7d22972df469 100644 --- a/crates/bevy_render/src/render_resource/uniform_buffer.rs +++ b/crates/bevy_render/src/render_resource/uniform_buffer.rs @@ -27,12 +27,12 @@ use super::IntoBinding; /// (vectors), or structures with fields that are vectors. /// /// Other options for storing GPU-accessible data are: -/// * [`StorageBuffer`](crate::render_resource::StorageBuffer) +/// * [`BufferVec`](crate::render_resource::BufferVec) /// * [`DynamicStorageBuffer`](crate::render_resource::DynamicStorageBuffer) /// * [`DynamicUniformBuffer`] /// * [`GpuArrayBuffer`](crate::render_resource::GpuArrayBuffer) /// * [`RawBufferVec`](crate::render_resource::RawBufferVec) -/// * [`BufferVec`](crate::render_resource::BufferVec) +/// * [`StorageBuffer`](crate::render_resource::StorageBuffer) /// * [`Texture`](crate::render_resource::Texture) /// /// [std140 alignment/padding requirements]: https://www.w3.org/TR/WGSL/#address-spaces-uniform @@ -164,14 +164,13 @@ impl<'a, T: ShaderType + WriteInto> IntoBinding<'a> for &'a UniformBuffer { /// (vectors), or structures with fields that are vectors. /// /// Other options for storing GPU-accessible data are: -/// * [`StorageBuffer`](crate::render_resource::StorageBuffer) +/// * [`BufferVec`](crate::render_resource::BufferVec) /// * [`DynamicStorageBuffer`](crate::render_resource::DynamicStorageBuffer) -/// * [`UniformBuffer`] -/// * [`DynamicUniformBuffer`] /// * [`GpuArrayBuffer`](crate::render_resource::GpuArrayBuffer) /// * [`RawBufferVec`](crate::render_resource::RawBufferVec) -/// * [`BufferVec`](crate::render_resource::BufferVec) +/// * [`StorageBuffer`](crate::render_resource::StorageBuffer) /// * [`Texture`](crate::render_resource::Texture) +/// * [`UniformBuffer`] /// /// [std140 alignment/padding requirements]: https://www.w3.org/TR/WGSL/#address-spaces-uniform pub struct DynamicUniformBuffer { @@ -279,11 +278,11 @@ impl DynamicUniformBuffer { device: &RenderDevice, queue: &'a RenderQueue, ) -> Option> { - let alignment = if cfg!(feature = "ios_simulator") { + let alignment = if cfg!(target_abi = "sim") { // On iOS simulator on silicon macs, metal validation check that the host OS alignment // is respected, but the device reports the correct value for iOS, which is smaller. // Use the larger value. - // See https://github.com/bevyengine/bevy/pull/10178 - remove if it's not needed anymore. + // See https://github.com/gfx-rs/wgpu/issues/7057 - remove if it's not needed anymore. AlignmentValue::new(256) } else { AlignmentValue::new(device.limits().min_uniform_buffer_offset_alignment as u64) diff --git a/crates/bevy_render/src/renderer/graph_runner.rs b/crates/bevy_render/src/renderer/graph_runner.rs index 15b3240638e7c..39f05ca6a85c6 100644 --- a/crates/bevy_render/src/renderer/graph_runner.rs +++ b/crates/bevy_render/src/renderer/graph_runner.rs @@ -1,7 +1,7 @@ use bevy_ecs::{prelude::Entity, world::World}; +use bevy_platform::collections::HashMap; #[cfg(feature = "trace")] -use bevy_utils::tracing::info_span; -use bevy_utils::HashMap; +use tracing::info_span; use alloc::{borrow::Cow, collections::VecDeque}; use smallvec::{smallvec, SmallVec}; @@ -68,6 +68,7 @@ impl RenderGraphRunner { render_device: RenderDevice, mut diagnostics_recorder: Option, queue: &wgpu::Queue, + #[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))] adapter: &wgpu::Adapter, world: &World, finalizer: impl FnOnce(&mut wgpu::CommandEncoder), @@ -76,16 +77,20 @@ impl RenderGraphRunner { recorder.begin_frame(); } - let mut render_context = - RenderContext::new(render_device, adapter.get_info(), diagnostics_recorder); + let mut render_context = RenderContext::new( + render_device, + #[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))] + adapter.get_info(), + diagnostics_recorder, + ); Self::run_graph(graph, None, &mut render_context, world, &[], None)?; finalizer(render_context.command_encoder()); let (render_device, mut diagnostics_recorder) = { + let (commands, render_device, diagnostics_recorder) = render_context.finish(); + #[cfg(feature = "trace")] let _span = info_span!("submit_graph_commands").entered(); - - let (commands, render_device, diagnostics_recorder) = render_context.finish(); queue.submit(commands); (render_device, diagnostics_recorder) diff --git a/crates/bevy_render/src/renderer/mod.rs b/crates/bevy_render/src/renderer/mod.rs index 85213476bf4c4..1691911c2cbbe 100644 --- a/crates/bevy_render/src/renderer/mod.rs +++ b/crates/bevy_render/src/renderer/mod.rs @@ -2,10 +2,11 @@ mod graph_runner; mod render_device; use bevy_derive::{Deref, DerefMut}; +#[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))] use bevy_tasks::ComputeTaskPool; -use bevy_utils::tracing::{error, info, info_span, warn}; pub use graph_runner::*; pub use render_device::*; +use tracing::{error, info, info_span, warn}; use crate::{ diagnostic::{internal::DiagnosticsRecorder, RecordDiagnostics}, @@ -17,8 +18,8 @@ use crate::{ }; use alloc::sync::Arc; use bevy_ecs::{prelude::*, system::SystemState}; +use bevy_platform::time::Instant; use bevy_time::TimeSender; -use bevy_utils::Instant; use wgpu::{ Adapter, AdapterInfo, CommandBuffer, CommandEncoder, DeviceType, Instance, Queue, RequestAdapterOptions, @@ -35,6 +36,7 @@ pub fn render_system(world: &mut World, state: &mut SystemState(); let render_device = world.resource::(); let render_queue = world.resource::(); + #[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))] let render_adapter = world.resource::(); let res = RenderGraphRunner::run( @@ -42,6 +44,7 @@ pub fn render_system(world: &mut World, state: &mut SystemState(); for window in windows.values_mut() { - if let Some(wrapped_texture) = window.swap_chain_texture.take() { - if let Some(surface_texture) = wrapped_texture.try_unwrap() { - // TODO(clean): winit docs recommends calling pre_present_notify before this. - // though `present()` doesn't present the frame, it schedules it to be presented - // by wgpu. - // https://docs.rs/winit/0.29.9/wasm32-unknown-unknown/winit/window/struct.Window.html#method.pre_present_notify - surface_texture.present(); - } + if let Some(surface_texture) = window.swap_chain_texture.take() { + // TODO(clean): winit docs recommends calling pre_present_notify before this. + // though `present()` doesn't present the frame, it schedules it to be presented + // by wgpu. + // https://docs.rs/winit/0.29.9/wasm32-unknown-unknown/winit/window/struct.Window.html#method.pre_present_notify + surface_texture.present(); } } #[cfg(feature = "tracing-tracy")] - bevy_utils::tracing::event!( - bevy_utils::tracing::Level::INFO, + tracing::event!( + tracing::Level::INFO, message = "finished frame", tracy.frame_mark = true ); @@ -221,10 +222,8 @@ pub async fn initialize_renderer( // RAY_QUERY and RAY_TRACING_ACCELERATION STRUCTURE will sometimes cause DeviceLost failures on platforms // that report them as supported: // - // WGPU also currently doesn't actually support these features yet, so we should disable - // them until they are safe to enable. - features -= wgpu::Features::RAY_QUERY; - features -= wgpu::Features::RAY_TRACING_ACCELERATION_STRUCTURE; + features -= wgpu::Features::EXPERIMENTAL_RAY_QUERY; + features -= wgpu::Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE; limits = adapter.limits(); } @@ -379,6 +378,7 @@ pub struct RenderContext<'w> { render_device: RenderDevice, command_encoder: Option, command_buffer_queue: Vec>, + #[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))] force_serial: bool, diagnostics_recorder: Option>, } @@ -387,14 +387,19 @@ impl<'w> RenderContext<'w> { /// Creates a new [`RenderContext`] from a [`RenderDevice`]. pub fn new( render_device: RenderDevice, + #[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))] adapter_info: AdapterInfo, diagnostics_recorder: Option, ) -> Self { - // HACK: Parallel command encoding is currently bugged on AMD + Windows + Vulkan with wgpu 0.19.1 - #[cfg(target_os = "windows")] + // HACK: Parallel command encoding is currently bugged on AMD + Windows/Linux + Vulkan + #[cfg(any(target_os = "windows", target_os = "linux"))] let force_serial = adapter_info.driver.contains("AMD") && adapter_info.backend == wgpu::Backend::Vulkan; - #[cfg(not(target_os = "windows"))] + #[cfg(not(any( + target_os = "windows", + target_os = "linux", + all(target_arch = "wasm32", target_feature = "atomics") + )))] let force_serial = { drop(adapter_info); false @@ -404,6 +409,7 @@ impl<'w> RenderContext<'w> { render_device, command_encoder: None, command_buffer_queue: Vec::new(), + #[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))] force_serial, diagnostics_recorder: diagnostics_recorder.map(Arc::new), } @@ -416,7 +422,7 @@ impl<'w> RenderContext<'w> { /// Gets the diagnostics recorder, used to track elapsed time and pipeline statistics /// of various render and compute passes. - pub fn diagnostic_recorder(&self) -> impl RecordDiagnostics { + pub fn diagnostic_recorder(&self) -> impl RecordDiagnostics + use<> { self.diagnostics_recorder.clone() } @@ -492,6 +498,10 @@ impl<'w> RenderContext<'w> { let mut command_buffers = Vec::with_capacity(self.command_buffer_queue.len()); + #[cfg(feature = "trace")] + let _command_buffer_generation_tasks_span = + info_span!("command_buffer_generation_tasks").entered(); + #[cfg(not(all(target_arch = "wasm32", target_feature = "atomics")))] { let mut task_based_command_buffers = ComputeTaskPool::get().scope(|task_pool| { @@ -531,6 +541,9 @@ impl<'w> RenderContext<'w> { } } + #[cfg(feature = "trace")] + drop(_command_buffer_generation_tasks_span); + command_buffers.sort_unstable_by_key(|(i, _)| *i); let mut command_buffers = command_buffers diff --git a/crates/bevy_render/src/renderer/render_device.rs b/crates/bevy_render/src/renderer/render_device.rs index 407d1b361b0e5..d33139745baf8 100644 --- a/crates/bevy_render/src/renderer/render_device.rs +++ b/crates/bevy_render/src/renderer/render_device.rs @@ -4,8 +4,7 @@ use crate::render_resource::{ RenderPipeline, Sampler, Texture, }; use crate::WgpuWrapper; -use alloc::sync::Arc; -use bevy_ecs::system::Resource; +use bevy_ecs::resource::Resource; use wgpu::{ util::DeviceExt, BindGroupDescriptor, BindGroupEntry, BindGroupLayoutDescriptor, BindGroupLayoutEntry, BufferAsyncError, BufferBindingType, MaintainResult, @@ -14,17 +13,17 @@ use wgpu::{ /// This GPU device is responsible for the creation of most rendering and compute resources. #[derive(Resource, Clone)] pub struct RenderDevice { - device: Arc>, + device: WgpuWrapper, } impl From for RenderDevice { fn from(device: wgpu::Device) -> Self { - Self::new(Arc::new(WgpuWrapper::new(device))) + Self::new(WgpuWrapper::new(device)) } } impl RenderDevice { - pub fn new(device: Arc>) -> Self { + pub fn new(device: WgpuWrapper) -> Self { Self { device } } @@ -45,8 +44,18 @@ impl RenderDevice { } /// Creates a [`ShaderModule`](wgpu::ShaderModule) from either SPIR-V or WGSL source code. + /// + /// # Safety + /// + /// Creates a shader module with user-customizable runtime checks which allows shaders to + /// perform operations which can lead to undefined behavior like indexing out of bounds, + /// To avoid UB, ensure any unchecked shaders are sound! + /// This method should never be called for user-supplied shaders. #[inline] - pub fn create_shader_module(&self, desc: wgpu::ShaderModuleDescriptor) -> wgpu::ShaderModule { + pub unsafe fn create_shader_module( + &self, + desc: wgpu::ShaderModuleDescriptor, + ) -> wgpu::ShaderModule { #[cfg(feature = "spirv_shader_passthrough")] match &desc.source { wgpu::ShaderSource::SpirV(source) @@ -65,9 +74,36 @@ impl RenderDevice { }) } } - _ => self.device.create_shader_module(desc), + // SAFETY: + // + // This call passes binary data to the backend as-is and can potentially result in a driver crash or bogus behavior. + // No attempt is made to ensure that data is valid SPIR-V. + _ => unsafe { + self.device + .create_shader_module_trusted(desc, wgpu::ShaderRuntimeChecks::unchecked()) + }, + } + #[cfg(not(feature = "spirv_shader_passthrough"))] + // SAFETY: the caller is responsible for upholding the safety requirements + unsafe { + self.device + .create_shader_module_trusted(desc, wgpu::ShaderRuntimeChecks::unchecked()) } + } + /// Creates and validates a [`ShaderModule`](wgpu::ShaderModule) from either SPIR-V or WGSL source code. + /// + /// See [`ValidateShader`](bevy_render::render_resource::ValidateShader) for more information on the tradeoffs involved with shader validation. + #[inline] + pub fn create_and_validate_shader_module( + &self, + desc: wgpu::ShaderModuleDescriptor, + ) -> wgpu::ShaderModule { + #[cfg(feature = "spirv_shader_passthrough")] + match &desc.source { + wgpu::ShaderSource::SpirV(_source) => panic!("no safety checks are performed for spirv shaders. use `create_shader_module` instead"), + _ => self.device.create_shader_module(desc), + } #[cfg(not(feature = "spirv_shader_passthrough"))] self.device.create_shader_module(desc) } @@ -231,10 +267,16 @@ impl RenderDevice { buffer.map_async(map_mode, callback); } - pub fn align_copy_bytes_per_row(row_bytes: usize) -> usize { + // Rounds up `row_bytes` to be a multiple of [`wgpu::COPY_BYTES_PER_ROW_ALIGNMENT`]. + pub const fn align_copy_bytes_per_row(row_bytes: usize) -> usize { let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT as usize; - let padded_bytes_per_row_padding = (align - row_bytes % align) % align; - row_bytes + padded_bytes_per_row_padding + + // If row_bytes is aligned calculate a value just under the next aligned value. + // Otherwise calculate a value greater than the next aligned value. + let over_aligned = row_bytes + align - 1; + + // Round the number *down* to the nearest aligned value. + (over_aligned / align) * align } pub fn get_supported_read_only_binding_type( @@ -248,3 +290,19 @@ impl RenderDevice { } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn align_copy_bytes_per_row() { + // Test for https://github.com/bevyengine/bevy/issues/16992 + let align = wgpu::COPY_BYTES_PER_ROW_ALIGNMENT as usize; + + assert_eq!(RenderDevice::align_copy_bytes_per_row(0), 0); + assert_eq!(RenderDevice::align_copy_bytes_per_row(1), align); + assert_eq!(RenderDevice::align_copy_bytes_per_row(align + 1), align * 2); + assert_eq!(RenderDevice::align_copy_bytes_per_row(align), align); + } +} diff --git a/crates/bevy_render/src/settings.rs b/crates/bevy_render/src/settings.rs index fe8933656a28c..d4eb9b7680cfa 100644 --- a/crates/bevy_render/src/settings.rs +++ b/crates/bevy_render/src/settings.rs @@ -71,10 +71,10 @@ impl Default for WgpuSettings { Backends::all() }; - let backends = Some(wgpu::util::backend_bits_from_env().unwrap_or(default_backends)); + let backends = Some(Backends::from_env().unwrap_or(default_backends)); let power_preference = - wgpu::util::power_preference_from_env().unwrap_or(PowerPreference::HighPerformance); + PowerPreference::from_env().unwrap_or(PowerPreference::HighPerformance); let priority = settings_priority_from_env().unwrap_or(WgpuSettingsPriority::Functionality); @@ -86,7 +86,11 @@ impl Default for WgpuSettings { { wgpu::Limits::downlevel_webgl2_defaults() } else { - #[allow(unused_mut)] + #[expect(clippy::allow_attributes, reason = "`unused_mut` is not always linted")] + #[allow( + unused_mut, + reason = "This variable needs to be mutable if the `ci_limits` feature is enabled" + )] let mut limits = wgpu::Limits::default(); #[cfg(feature = "ci_limits")] { @@ -96,13 +100,27 @@ impl Default for WgpuSettings { limits }; - let dx12_compiler = - wgpu::util::dx12_shader_compiler_from_env().unwrap_or(Dx12Compiler::Dxc { - dxil_path: None, - dxc_path: None, + let dx12_shader_compiler = + Dx12Compiler::from_env().unwrap_or(if cfg!(feature = "statically-linked-dxc") { + Dx12Compiler::StaticDxc + } else { + let dxc = "dxcompiler.dll"; + let dxil = "dxil.dll"; + + if cfg!(target_os = "windows") + && std::fs::metadata(dxc).is_ok() + && std::fs::metadata(dxil).is_ok() + { + Dx12Compiler::DynamicDxc { + dxc_path: String::from(dxc), + dxil_path: String::from(dxil), + } + } else { + Dx12Compiler::Fxc + } }); - let gles3_minor_version = wgpu::util::gles_minor_version_from_env().unwrap_or_default(); + let gles3_minor_version = Gles3MinorVersion::from_env().unwrap_or_default(); let instance_flags = InstanceFlags::default().with_env(); @@ -115,7 +133,7 @@ impl Default for WgpuSettings { disabled_features: None, limits, constrained_limits: None, - dx12_shader_compiler: dx12_compiler, + dx12_shader_compiler, gles3_minor_version, instance_flags, memory_hints: MemoryHints::default(), diff --git a/crates/bevy_render/src/spatial_bundle.rs b/crates/bevy_render/src/spatial_bundle.rs deleted file mode 100644 index d50bd31dfd3fd..0000000000000 --- a/crates/bevy_render/src/spatial_bundle.rs +++ /dev/null @@ -1,72 +0,0 @@ -#![expect(deprecated)] -use bevy_ecs::prelude::Bundle; -use bevy_transform::prelude::{GlobalTransform, Transform}; - -use crate::view::{InheritedVisibility, ViewVisibility, Visibility}; - -/// A [`Bundle`] that allows the correct positional rendering of an entity. -/// -/// It consists of transform components, -/// controlling position, rotation and scale of the entity, -/// but also visibility components, -/// which determine whether the entity is visible or not. -/// -/// Parent-child hierarchies of entities must contain -/// all the [`Component`]s in this `Bundle` -/// to be rendered correctly. -/// -/// [`Component`]: bevy_ecs::component::Component -#[derive(Bundle, Clone, Debug, Default)] -#[deprecated( - since = "0.15.0", - note = "Use the `Transform` and `Visibility` components instead. - Inserting `Transform` will now also insert a `GlobalTransform` automatically. - Inserting 'Visibility' will now also insert `InheritedVisibility` and `ViewVisibility` automatically." -)] -pub struct SpatialBundle { - /// The visibility of the entity. - pub visibility: Visibility, - /// The inherited visibility of the entity. - pub inherited_visibility: InheritedVisibility, - /// The view visibility of the entity. - pub view_visibility: ViewVisibility, - /// The transform of the entity. - pub transform: Transform, - /// The global transform of the entity. - pub global_transform: GlobalTransform, -} - -impl SpatialBundle { - /// Creates a new [`SpatialBundle`] from a [`Transform`]. - /// - /// This initializes [`GlobalTransform`] as identity, and visibility as visible - #[inline] - pub const fn from_transform(transform: Transform) -> Self { - SpatialBundle { - transform, - ..Self::INHERITED_IDENTITY - } - } - - /// A [`SpatialBundle`] with inherited visibility and identity transform. - pub const INHERITED_IDENTITY: Self = SpatialBundle { - visibility: Visibility::Inherited, - inherited_visibility: InheritedVisibility::HIDDEN, - view_visibility: ViewVisibility::HIDDEN, - transform: Transform::IDENTITY, - global_transform: GlobalTransform::IDENTITY, - }; - - /// An invisible [`SpatialBundle`] with identity transform. - pub const HIDDEN_IDENTITY: Self = SpatialBundle { - visibility: Visibility::Hidden, - ..Self::INHERITED_IDENTITY - }; -} - -impl From for SpatialBundle { - #[inline] - fn from(transform: Transform) -> Self { - Self::from_transform(transform) - } -} diff --git a/crates/bevy_render/src/storage.rs b/crates/bevy_render/src/storage.rs index 7434f3999f3dc..0046b4e6ac2bc 100644 --- a/crates/bevy_render/src/storage.rs +++ b/crates/bevy_render/src/storage.rs @@ -27,7 +27,7 @@ impl Plugin for StoragePlugin { /// A storage buffer that is prepared as a [`RenderAsset`] and uploaded to the GPU. #[derive(Asset, Reflect, Debug, Clone)] #[reflect(opaque)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub struct ShaderStorageBuffer { /// Optional data used to initialize the buffer. pub data: Option>, diff --git a/crates/bevy_render/src/sync_component.rs b/crates/bevy_render/src/sync_component.rs index 0fac10b409a17..dd7eca1a2840c 100644 --- a/crates/bevy_render/src/sync_component.rs +++ b/crates/bevy_render/src/sync_component.rs @@ -32,11 +32,11 @@ impl Plugin for SyncComponentPlugin { fn build(&self, app: &mut App) { app.register_required_components::(); - app.world_mut().register_component_hooks::().on_remove( - |mut world, entity, _component_id| { + app.world_mut() + .register_component_hooks::() + .on_remove(|mut world, context| { let mut pending = world.resource_mut::(); - pending.push(EntityRecord::ComponentRemoved(entity)); - }, - ); + pending.push(EntityRecord::ComponentRemoved(context.entity)); + }); } } diff --git a/crates/bevy_render/src/sync_world.rs b/crates/bevy_render/src/sync_world.rs index 15fb582fb7682..ce0408833366c 100644 --- a/crates/bevy_render/src/sync_world.rs +++ b/crates/bevy_render/src/sync_world.rs @@ -1,17 +1,19 @@ use bevy_app::Plugin; use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::component::{ComponentCloneBehavior, Mutable, StorageType}; use bevy_ecs::entity::EntityHash; use bevy_ecs::{ component::Component, - entity::{Entity, EntityBorrow, TrustedEntityBorrow}, + entity::{ContainsEntity, Entity, EntityEquivalent}, observer::Trigger, query::With, reflect::ReflectComponent, - system::{Local, Query, ResMut, Resource, SystemState}, + resource::Resource, + system::{Local, Query, ResMut, SystemState}, world::{Mut, OnAdd, OnRemove, World}, }; -use bevy_reflect::Reflect; -use bevy_utils::hashbrown; +use bevy_platform::collections::{HashMap, HashSet}; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; /// A plugin that synchronizes entities with [`SyncToRenderWorld`] between the main world and the render world. /// @@ -118,14 +120,14 @@ impl Plugin for SyncWorldPlugin { /// [`ExtractComponentPlugin`]: crate::extract_component::ExtractComponentPlugin /// [`SyncComponentPlugin`]: crate::sync_component::SyncComponentPlugin #[derive(Component, Copy, Clone, Debug, Default, Reflect)] -#[reflect[Component]] +#[reflect[Component, Default, Clone]] #[component(storage = "SparseSet")] pub struct SyncToRenderWorld; /// Component added on the main world entities that are synced to the Render World in order to keep track of the corresponding render world entity. /// /// Can also be used as a newtype wrapper for render world entities. -#[derive(Component, Deref, Copy, Clone, Debug, Eq, Hash, PartialEq)] +#[derive(Deref, Copy, Clone, Debug, Eq, Hash, PartialEq)] pub struct RenderEntity(Entity); impl RenderEntity { #[inline] @@ -134,25 +136,35 @@ impl RenderEntity { } } +impl Component for RenderEntity { + const STORAGE_TYPE: StorageType = StorageType::Table; + + type Mutability = Mutable; + + fn clone_behavior() -> ComponentCloneBehavior { + ComponentCloneBehavior::Ignore + } +} + impl From for RenderEntity { fn from(entity: Entity) -> Self { RenderEntity(entity) } } -impl EntityBorrow for RenderEntity { +impl ContainsEntity for RenderEntity { fn entity(&self) -> Entity { self.id() } } // SAFETY: RenderEntity is a newtype around Entity that derives its comparison traits. -unsafe impl TrustedEntityBorrow for RenderEntity {} +unsafe impl EntityEquivalent for RenderEntity {} /// Component added on the render world entities to keep track of the corresponding main world entity. /// /// Can also be used as a newtype wrapper for main world entities. -#[derive(Component, Deref, Copy, Clone, Debug, Eq, Hash, PartialEq)] +#[derive(Component, Deref, Copy, Clone, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] pub struct MainEntity(Entity); impl MainEntity { #[inline] @@ -167,24 +179,24 @@ impl From for MainEntity { } } -impl EntityBorrow for MainEntity { +impl ContainsEntity for MainEntity { fn entity(&self) -> Entity { self.id() } } // SAFETY: RenderEntity is a newtype around Entity that derives its comparison traits. -unsafe impl TrustedEntityBorrow for MainEntity {} +unsafe impl EntityEquivalent for MainEntity {} -/// A [`HashMap`](hashbrown::HashMap) pre-configured to use [`EntityHash`] hashing with a [`MainEntity`]. -pub type MainEntityHashMap = hashbrown::HashMap; +/// A [`HashMap`] pre-configured to use [`EntityHash`] hashing with a [`MainEntity`]. +pub type MainEntityHashMap = HashMap; -/// A [`HashSet`](hashbrown::HashSet) pre-configured to use [`EntityHash`] hashing with a [`MainEntity`].. -pub type MainEntityHashSet = hashbrown::HashSet; +/// A [`HashSet`] pre-configured to use [`EntityHash`] hashing with a [`MainEntity`].. +pub type MainEntityHashSet = HashSet; /// Marker component that indicates that its entity needs to be despawned at the end of the frame. #[derive(Component, Copy, Clone, Debug, Default, Reflect)] -#[reflect(Component)] +#[reflect(Component, Default, Clone)] pub struct TemporaryRenderEntity; /// A record enum to what entities with [`SyncToRenderWorld`] have been added or removed. @@ -284,14 +296,9 @@ mod render_entities_world_query_impls { /// SAFETY: defers completely to `&RenderEntity` implementation, /// and then only modifies the output safely. unsafe impl WorldQuery for RenderEntity { - type Item<'w> = Entity; type Fetch<'w> = <&'static RenderEntity as WorldQuery>::Fetch<'w>; type State = <&'static RenderEntity as WorldQuery>::State; - fn shrink<'wlong: 'wshort, 'wshort>(item: Entity) -> Entity { - item - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>( fetch: Self::Fetch<'wlong>, ) -> Self::Fetch<'wshort> { @@ -336,18 +343,6 @@ mod render_entities_world_query_impls { unsafe { <&RenderEntity as WorldQuery>::set_table(fetch, &component_id, table) } } - #[inline(always)] - unsafe fn fetch<'w>( - fetch: &mut Self::Fetch<'w>, - entity: Entity, - table_row: TableRow, - ) -> Self::Item<'w> { - // SAFETY: defers to the `&T` implementation, with T set to `RenderEntity`. - let component = - unsafe { <&RenderEntity as WorldQuery>::fetch(fetch, entity, table_row) }; - component.id() - } - fn update_component_access( &component_id: &ComponentId, access: &mut FilteredAccess, @@ -374,7 +369,25 @@ mod render_entities_world_query_impls { // SAFETY: Component access of Self::ReadOnly is a subset of Self. // Self::ReadOnly matches exactly the same archetypes/tables as Self. unsafe impl QueryData for RenderEntity { + const IS_READ_ONLY: bool = true; type ReadOnly = RenderEntity; + type Item<'w> = Entity; + + fn shrink<'wlong: 'wshort, 'wshort>(item: Entity) -> Entity { + item + } + + #[inline(always)] + unsafe fn fetch<'w>( + fetch: &mut Self::Fetch<'w>, + entity: Entity, + table_row: TableRow, + ) -> Self::Item<'w> { + // SAFETY: defers to the `&T` implementation, with T set to `RenderEntity`. + let component = + unsafe { <&RenderEntity as QueryData>::fetch(fetch, entity, table_row) }; + component.id() + } } // SAFETY: the underlying `Entity` is copied, and no mutable access is provided. @@ -383,14 +396,9 @@ mod render_entities_world_query_impls { /// SAFETY: defers completely to `&RenderEntity` implementation, /// and then only modifies the output safely. unsafe impl WorldQuery for MainEntity { - type Item<'w> = Entity; type Fetch<'w> = <&'static MainEntity as WorldQuery>::Fetch<'w>; type State = <&'static MainEntity as WorldQuery>::State; - fn shrink<'wlong: 'wshort, 'wshort>(item: Entity) -> Entity { - item - } - fn shrink_fetch<'wlong: 'wshort, 'wshort>( fetch: Self::Fetch<'wlong>, ) -> Self::Fetch<'wshort> { @@ -435,17 +443,6 @@ mod render_entities_world_query_impls { unsafe { <&MainEntity as WorldQuery>::set_table(fetch, &component_id, table) } } - #[inline(always)] - unsafe fn fetch<'w>( - fetch: &mut Self::Fetch<'w>, - entity: Entity, - table_row: TableRow, - ) -> Self::Item<'w> { - // SAFETY: defers to the `&T` implementation, with T set to `MainEntity`. - let component = unsafe { <&MainEntity as WorldQuery>::fetch(fetch, entity, table_row) }; - component.id() - } - fn update_component_access( &component_id: &ComponentId, access: &mut FilteredAccess, @@ -472,7 +469,24 @@ mod render_entities_world_query_impls { // SAFETY: Component access of Self::ReadOnly is a subset of Self. // Self::ReadOnly matches exactly the same archetypes/tables as Self. unsafe impl QueryData for MainEntity { + const IS_READ_ONLY: bool = true; type ReadOnly = MainEntity; + type Item<'w> = Entity; + + fn shrink<'wlong: 'wshort, 'wshort>(item: Entity) -> Entity { + item + } + + #[inline(always)] + unsafe fn fetch<'w>( + fetch: &mut Self::Fetch<'w>, + entity: Entity, + table_row: TableRow, + ) -> Self::Item<'w> { + // SAFETY: defers to the `&T` implementation, with T set to `MainEntity`. + let component = unsafe { <&MainEntity as QueryData>::fetch(fetch, entity, table_row) }; + component.id() + } } // SAFETY: the underlying `Entity` is copied, and no mutable access is provided. @@ -538,7 +552,7 @@ mod tests { // Only one synchronized entity assert!(q.iter(&render_world).count() == 1); - let render_entity = q.get_single(&render_world).unwrap(); + let render_entity = q.single(&render_world).unwrap(); let render_entity_component = main_world.get::(main_entity).unwrap(); assert!(render_entity_component.id() == render_entity); diff --git a/crates/bevy_render/src/texture/fallback_image.rs b/crates/bevy_render/src/texture/fallback_image.rs index fa61b88abfc8d..18c83414bd7d6 100644 --- a/crates/bevy_render/src/texture/fallback_image.rs +++ b/crates/bevy_render/src/texture/fallback_image.rs @@ -7,10 +7,11 @@ use crate::{ use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ prelude::{FromWorld, Res, ResMut}, - system::{Resource, SystemParam}, + resource::Resource, + system::SystemParam, }; use bevy_image::{BevyDefault, Image, ImageSampler, TextureFormatPixelInfo}; -use bevy_utils::HashMap; +use bevy_platform::collections::HashMap; /// A [`RenderApp`](crate::RenderApp) resource that contains the default "fallback image", /// which can be used in situations where an image was not explicitly defined. The most common @@ -97,7 +98,7 @@ fn fallback_image_new( RenderAssetUsages::RENDER_WORLD, ) } else { - let mut image = Image::default(); + let mut image = Image::default_uninit(); image.texture_descriptor.dimension = TextureDimension::D2; image.texture_descriptor.size = extents; image.texture_descriptor.format = format; @@ -113,7 +114,7 @@ fn fallback_image_new( render_queue, &image.texture_descriptor, TextureDataOrder::default(), - &image.data, + &image.data.expect("Image has no data"), ) } else { render_device.create_texture(&image.texture_descriptor) diff --git a/crates/bevy_render/src/texture/gpu_image.rs b/crates/bevy_render/src/texture/gpu_image.rs index f1ee1ade7e8f5..551bd3ee02e09 100644 --- a/crates/bevy_render/src/texture/gpu_image.rs +++ b/crates/bevy_render/src/texture/gpu_image.rs @@ -36,7 +36,7 @@ impl RenderAsset for GpuImage { #[inline] fn byte_len(image: &Self::SourceAsset) -> Option { - Some(image.data.len()) + image.data.as_ref().map(Vec::len) } /// Converts the extracted image into a [`GpuImage`]. @@ -45,13 +45,17 @@ impl RenderAsset for GpuImage { _: AssetId, (render_device, render_queue, default_sampler): &mut SystemParamItem, ) -> Result> { - let texture = render_device.create_texture_with_data( - render_queue, - &image.texture_descriptor, - // TODO: Is this correct? Do we need to use `MipMajor` if it's a ktx2 file? - wgpu::util::TextureDataOrder::default(), - &image.data, - ); + let texture = if let Some(ref data) = image.data { + render_device.create_texture_with_data( + render_queue, + &image.texture_descriptor, + // TODO: Is this correct? Do we need to use `MipMajor` if it's a ktx2 file? + wgpu::util::TextureDataOrder::default(), + data, + ) + } else { + render_device.create_texture(&image.texture_descriptor) + }; let texture_view = texture.create_view( image diff --git a/crates/bevy_render/src/texture/mod.rs b/crates/bevy_render/src/texture/mod.rs index 3671b7a0c83db..6955de7ff4fc2 100644 --- a/crates/bevy_render/src/texture/mod.rs +++ b/crates/bevy_render/src/texture/mod.rs @@ -18,7 +18,7 @@ use crate::{ render_asset::RenderAssetPlugin, renderer::RenderDevice, Render, RenderApp, RenderSet, }; use bevy_app::{App, Plugin}; -use bevy_asset::{AssetApp, Assets, Handle}; +use bevy_asset::{weak_handle, AssetApp, Assets, Handle}; use bevy_ecs::prelude::*; /// A handle to a 1 x 1 transparent white image. @@ -27,7 +27,7 @@ use bevy_ecs::prelude::*; /// While that handle points to an opaque white 1 x 1 image, this handle points to a transparent 1 x 1 white image. // Number randomly selected by fair WolframAlpha query. Totally arbitrary. pub const TRANSPARENT_IMAGE_HANDLE: Handle = - Handle::weak_from_u128(154728948001857810431816125397303024160); + weak_handle!("d18ad97e-a322-4981-9505-44c59a4b5e46"); // TODO: replace Texture names with Image names? /// Adds the [`Image`] as an asset and makes sure that they are extracted and prepared for the GPU. diff --git a/crates/bevy_render/src/texture/texture_cache.rs b/crates/bevy_render/src/texture/texture_cache.rs index 898d3d7511c3b..ca1ef9b31b35f 100644 --- a/crates/bevy_render/src/texture/texture_cache.rs +++ b/crates/bevy_render/src/texture/texture_cache.rs @@ -2,8 +2,8 @@ use crate::{ render_resource::{Texture, TextureView}, renderer::RenderDevice, }; -use bevy_ecs::{prelude::ResMut, system::Resource}; -use bevy_utils::{Entry, HashMap}; +use bevy_ecs::{prelude::ResMut, resource::Resource}; +use bevy_platform::collections::{hash_map::Entry, HashMap}; use wgpu::{TextureDescriptor, TextureViewDescriptor}; /// The internal representation of a [`CachedTexture`] used to track whether it was recently used diff --git a/crates/bevy_render/src/view/mod.rs b/crates/bevy_render/src/view/mod.rs index 06e2db79f28df..c392dcaaebe76 100644 --- a/crates/bevy_render/src/view/mod.rs +++ b/crates/bevy_render/src/view/mod.rs @@ -1,7 +1,8 @@ pub mod visibility; pub mod window; -use bevy_asset::{load_internal_asset, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; +use bevy_diagnostic::FrameCount; pub use visibility::*; pub use window::*; @@ -10,6 +11,7 @@ use crate::{ CameraMainTextureUsages, ClearColor, ClearColorConfig, Exposure, ExtractedCamera, ManualTextureViews, MipBias, NormalizedRenderTarget, TemporalJitter, }, + experimental::occlusion_culling::OcclusionCulling, extract_component::ExtractComponentPlugin, prelude::Shader, primitives::Frustum, @@ -17,6 +19,7 @@ use crate::{ render_phase::ViewRangefinder3d, render_resource::{DynamicUniformBuffer, ShaderType, Texture, TextureView}, renderer::{RenderDevice, RenderQueue}, + sync_world::MainEntity, texture::{ CachedTexture, ColorAttachment, DepthAttachment, GpuImage, OutputColorAttachment, TextureCache, @@ -30,10 +33,10 @@ use bevy_derive::{Deref, DerefMut}; use bevy_ecs::prelude::*; use bevy_image::BevyDefault as _; use bevy_math::{mat3, vec2, vec3, Mat3, Mat4, UVec4, Vec2, Vec3, Vec4, Vec4Swizzles}; +use bevy_platform::collections::{hash_map::Entry, HashMap}; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render_macros::ExtractComponent; use bevy_transform::components::GlobalTransform; -use bevy_utils::{hashbrown::hash_map::Entry, HashMap}; use core::{ ops::Range, sync::atomic::{AtomicUsize, Ordering}, @@ -43,7 +46,7 @@ use wgpu::{ TextureDescriptor, TextureDimension, TextureFormat, TextureUsages, }; -pub const VIEW_TYPE_HANDLE: Handle = Handle::weak_from_u128(15421373904451797197); +pub const VIEW_TYPE_HANDLE: Handle = weak_handle!("7234423c-38bb-411c-acec-f67730f6db5b"); /// The matrix that converts from the RGB to the LMS color space. /// @@ -108,9 +111,11 @@ impl Plugin for ViewPlugin { .register_type::() .register_type::() .register_type::() + .register_type::() // NOTE: windows.is_changed() handles cases where a window was resized .add_plugins(( ExtractComponentPlugin::::default(), + ExtractComponentPlugin::::default(), VisibilityPlugin, VisibilityRangePlugin, )); @@ -182,10 +187,81 @@ impl Msaa { pub fn samples(&self) -> u32 { *self as u32 } + + pub fn from_samples(samples: u32) -> Self { + match samples { + 1 => Msaa::Off, + 2 => Msaa::Sample2, + 4 => Msaa::Sample4, + 8 => Msaa::Sample8, + _ => panic!("Unsupported MSAA sample count: {}", samples), + } + } +} + +/// An identifier for a view that is stable across frames. +/// +/// We can't use [`Entity`] for this because render world entities aren't +/// stable, and we can't use just [`MainEntity`] because some main world views +/// extract to multiple render world views. For example, a directional light +/// extracts to one render world view per cascade, and a point light extracts to +/// one render world view per cubemap face. So we pair the main entity with an +/// *auxiliary entity* and a *subview index*, which *together* uniquely identify +/// a view in the render world in a way that's stable from frame to frame. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +pub struct RetainedViewEntity { + /// The main entity that this view corresponds to. + pub main_entity: MainEntity, + + /// Another entity associated with the view entity. + /// + /// This is currently used for shadow cascades. If there are multiple + /// cameras, each camera needs to have its own set of shadow cascades. Thus + /// the light and subview index aren't themselves enough to uniquely + /// identify a shadow cascade: we need the camera that the cascade is + /// associated with as well. This entity stores that camera. + /// + /// If not present, this will be `MainEntity(Entity::PLACEHOLDER)`. + pub auxiliary_entity: MainEntity, + + /// The index of the view corresponding to the entity. + /// + /// For example, for point lights that cast shadows, this is the index of + /// the cubemap face (0 through 5 inclusive). For directional lights, this + /// is the index of the cascade. + pub subview_index: u32, } +impl RetainedViewEntity { + /// Creates a new [`RetainedViewEntity`] from the given main world entity, + /// auxiliary main world entity, and subview index. + /// + /// See [`RetainedViewEntity::subview_index`] for an explanation of what + /// `auxiliary_entity` and `subview_index` are. + pub fn new( + main_entity: MainEntity, + auxiliary_entity: Option, + subview_index: u32, + ) -> Self { + Self { + main_entity, + auxiliary_entity: auxiliary_entity.unwrap_or(Entity::PLACEHOLDER.into()), + subview_index, + } + } +} + +/// Describes a camera in the render world. +/// +/// Each entity in the main world can potentially extract to multiple subviews, +/// each of which has a [`RetainedViewEntity::subview_index`]. For instance, 3D +/// cameras extract to both a 3D camera subview with index 0 and a special UI +/// subview with index 1. Likewise, point lights with shadows extract to 6 +/// subviews, one for each side of the shadow cubemap. #[derive(Component)] pub struct ExtractedView { + /// The entity in the main world corresponding to this render world view. + pub retained_view_entity: RetainedViewEntity, /// Typically a right-handed projection matrix, one of either: /// /// Perspective (infinite reverse z) @@ -241,7 +317,7 @@ impl ExtractedView { /// `post_saturation` value in [`ColorGradingGlobal`], which is applied after /// tonemapping. #[derive(Component, Reflect, Debug, Default, Clone)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct ColorGrading { /// Filmic color grading values applied to the image as a whole (as opposed /// to individual sections, like shadows and highlights). @@ -270,7 +346,7 @@ pub struct ColorGrading { /// Filmic color grading values applied to the image as a whole (as opposed to /// individual sections, like shadows and highlights). #[derive(Clone, Debug, Reflect)] -#[reflect(Default)] +#[reflect(Default, Clone)] pub struct ColorGradingGlobal { /// Exposure value (EV) offset, measured in stops. pub exposure: f32, @@ -336,6 +412,7 @@ pub struct ColorGradingUniform { /// A section of color grading values that can be selectively applied to /// shadows, midtones, and highlights. #[derive(Reflect, Debug, Copy, Clone, PartialEq)] +#[reflect(Clone, PartialEq)] pub struct ColorGradingSection { /// Values below 1.0 desaturate, with a value of 0.0 resulting in a grayscale image /// with luminance defined by ITU-R BT.709. @@ -493,6 +570,7 @@ pub struct ViewUniform { pub frustum: [Vec4; 6], pub color_grading: ColorGradingUniform, pub mip_bias: f32, + pub frame_count: u32, } #[derive(Resource)] @@ -538,7 +616,9 @@ pub struct ViewTargetAttachments(HashMap { pub source: &'a TextureView, + pub source_texture: &'a Texture, pub destination: &'a TextureView, + pub destination_texture: &'a Texture, } impl From for ColorGradingUniform { @@ -631,10 +711,13 @@ impl From for ColorGradingUniform { /// /// The vast majority of applications will not need to use this component, as it /// generally reduces rendering performance. -#[derive(Component)] +/// +/// Note: This component should only be added when initially spawning a camera. Adding +/// or removing after spawn can result in unspecified behavior. +#[derive(Component, Default)] pub struct NoIndirectDrawing; -#[derive(Component)] +#[derive(Component, Default)] pub struct NoCpuCulling; impl ViewTarget { @@ -766,13 +849,17 @@ impl ViewTarget { self.main_textures.b.mark_as_cleared(); PostProcessWrite { source: &self.main_textures.a.texture.default_view, + source_texture: &self.main_textures.a.texture.texture, destination: &self.main_textures.b.texture.default_view, + destination_texture: &self.main_textures.b.texture.texture, } } else { self.main_textures.a.mark_as_cleared(); PostProcessWrite { source: &self.main_textures.b.texture.default_view, + source_texture: &self.main_textures.b.texture.texture, destination: &self.main_textures.a.texture.default_view, + destination_texture: &self.main_textures.a.texture.texture, } } } @@ -814,6 +901,7 @@ pub fn prepare_view_uniforms( Option<&TemporalJitter>, Option<&MipBias>, )>, + frame_count: Res, ) { let view_iter = views.iter(); let view_count = view_iter.len(); @@ -867,6 +955,7 @@ pub fn prepare_view_uniforms( frustum, color_grading: extracted_view.color_grading.clone().into(), mip_bias: mip_bias.unwrap_or(&MipBias(0.0)).0, + frame_count: frame_count.0, }), }; @@ -964,7 +1053,7 @@ pub fn prepare_view_targets( }; let (a, b, sampled, main_texture) = textures - .entry((camera.target.clone(), view.hdr, msaa)) + .entry((camera.target.clone(), texture_usage.0, view.hdr, msaa)) .or_insert_with(|| { let descriptor = TextureDescriptor { label: None, diff --git a/crates/bevy_render/src/view/view.wgsl b/crates/bevy_render/src/view/view.wgsl index ed08599758a07..317de2eb88073 100644 --- a/crates/bevy_render/src/view/view.wgsl +++ b/crates/bevy_render/src/view/view.wgsl @@ -60,4 +60,5 @@ struct View { frustum: array, 6>, color_grading: ColorGrading, mip_bias: f32, + frame_count: u32, }; diff --git a/crates/bevy_render/src/view/visibility/mod.rs b/crates/bevy_render/src/view/visibility/mod.rs index 3e991a6cbdde0..63c931a8b035d 100644 --- a/crates/bevy_render/src/view/visibility/mod.rs +++ b/crates/bevy_render/src/view/visibility/mod.rs @@ -1,11 +1,9 @@ -#![expect(deprecated)] - mod range; mod render_layers; use core::any::TypeId; -use bevy_ecs::component::ComponentId; +use bevy_ecs::component::HookContext; use bevy_ecs::entity::EntityHashSet; use bevy_ecs::world::DeferredWorld; use derive_more::derive::{Deref, DerefMut}; @@ -14,19 +12,18 @@ pub use render_layers::*; use bevy_app::{Plugin, PostUpdate}; use bevy_asset::Assets; -use bevy_ecs::prelude::*; -use bevy_hierarchy::{Children, Parent}; +use bevy_ecs::{hierarchy::validate_parent_has_component, prelude::*}; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_transform::{components::GlobalTransform, TransformSystem}; use bevy_utils::{Parallel, TypeIdMap}; use smallvec::SmallVec; use super::NoCpuCulling; -use crate::sync_world::MainEntity; use crate::{ - camera::{Camera, CameraProjection}, + camera::{Camera, CameraProjection, Projection}, mesh::{Mesh, Mesh3d, MeshAabb}, primitives::{Aabb, Frustum, Sphere}, + sync_world::MainEntity, }; /// User indication of whether an entity is visible. Propagates down the entity hierarchy. @@ -37,10 +34,10 @@ use crate::{ /// This is done by the `visibility_propagate_system` which uses the entity hierarchy and /// `Visibility` to set the values of each entity's [`InheritedVisibility`] component. #[derive(Component, Clone, Copy, Reflect, Debug, PartialEq, Eq, Default)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] #[require(InheritedVisibility, ViewVisibility)] pub enum Visibility { - /// An entity with `Visibility::Inherited` will inherit the Visibility of its [`Parent`]. + /// An entity with `Visibility::Inherited` will inherit the Visibility of its [`ChildOf`] target. /// /// A root-level entity that is set to `Inherited` will be visible. #[default] @@ -50,7 +47,7 @@ pub enum Visibility { /// An entity with `Visibility::Visible` will be unconditionally visible. /// /// Note that an entity with `Visibility::Visible` will be visible regardless of whether the - /// [`Parent`] entity is hidden. + /// [`ChildOf`] target entity is hidden. Visible, } @@ -112,7 +109,8 @@ impl PartialEq<&Visibility> for Visibility { /// /// [`VisibilityPropagate`]: VisibilitySystems::VisibilityPropagate #[derive(Component, Deref, Debug, Default, Clone, Copy, Reflect, PartialEq, Eq)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] +#[component(on_insert = validate_parent_has_component::)] pub struct InheritedVisibility(bool); impl InheritedVisibility { @@ -154,7 +152,7 @@ impl InheritedVisibility { // Note: This can't be a `ComponentId` because the visibility classes are copied // into the render world, and component IDs are per-world. #[derive(Clone, Component, Default, Reflect, Deref, DerefMut)] -#[reflect(Component, Default)] +#[reflect(Component, Default, Clone)] pub struct VisibilityClass(pub SmallVec<[TypeId; 1]>); /// Algorithmically-computed indication of whether an entity is visible and should be extracted for rendering. @@ -168,7 +166,7 @@ pub struct VisibilityClass(pub SmallVec<[TypeId; 1]>); /// [`VisibilityPropagate`]: VisibilitySystems::VisibilityPropagate /// [`CheckVisibility`]: VisibilitySystems::CheckVisibility #[derive(Component, Deref, Debug, Default, Clone, Copy, Reflect, PartialEq, Eq)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] pub struct ViewVisibility(bool); impl ViewVisibility { @@ -200,35 +198,13 @@ impl ViewVisibility { } } -/// A [`Bundle`] of the [`Visibility`], [`InheritedVisibility`], and [`ViewVisibility`] -/// [`Component`]s, which describe the visibility of an entity. -/// -/// * To show or hide an entity, you should set its [`Visibility`]. -/// * To get the inherited visibility of an entity, you should get its [`InheritedVisibility`]. -/// * For visibility hierarchies to work correctly, you must have both all of [`Visibility`], [`InheritedVisibility`], and [`ViewVisibility`]. -/// * ~~You may use the [`VisibilityBundle`] to guarantee this.~~ [`VisibilityBundle`] is now deprecated. -/// [`InheritedVisibility`] and [`ViewVisibility`] are automatically inserted whenever [`Visibility`] is inserted. -#[derive(Bundle, Debug, Clone, Default)] -#[deprecated( - since = "0.15.0", - note = "Use the `Visibility` component instead. Inserting it will now also insert `InheritedVisibility` and `ViewVisibility` automatically." -)] -pub struct VisibilityBundle { - /// The visibility of the entity. - pub visibility: Visibility, - // The inherited visibility of the entity. - pub inherited_visibility: InheritedVisibility, - // The computed visibility of the entity. - pub view_visibility: ViewVisibility, -} - /// Use this component to opt-out of built-in frustum culling for entities, see /// [`Frustum`]. /// /// It can be used for example: /// - when a [`Mesh`] is updated but its [`Aabb`] is not, which might happen with animations, /// - when using some light effects, like wanting a [`Mesh`] out of the [`Frustum`] -/// to appear in the reflection of a [`Mesh`] within. +/// to appear in the reflection of a [`Mesh`] within. #[derive(Debug, Component, Default, Reflect)] #[reflect(Component, Default, Debug)] pub struct NoFrustumCulling; @@ -243,9 +219,9 @@ pub struct NoFrustumCulling; /// This component is intended to be attached to the same entity as the [`Camera`] and /// the [`Frustum`] defining the view. #[derive(Clone, Component, Default, Debug, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct VisibleEntities { - #[reflect(ignore)] + #[reflect(ignore, clone)] pub entities: TypeIdMap>, } @@ -293,9 +269,9 @@ impl VisibleEntities { /// /// This component is extracted from [`VisibleEntities`]. #[derive(Clone, Component, Default, Debug, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct RenderVisibleEntities { - #[reflect(ignore)] + #[reflect(ignore, clone)] pub entities: TypeIdMap>, } @@ -340,7 +316,7 @@ pub enum VisibilitySystems { /// Label for [`update_frusta`] in [`CameraProjectionPlugin`](crate::camera::CameraProjectionPlugin). UpdateFrusta, /// Label for the system propagating the [`InheritedVisibility`] in a - /// [`hierarchy`](bevy_hierarchy). + /// [`ChildOf`] / [`Children`] hierarchy. VisibilityPropagate, /// Label for the [`check_visibility`] system updating [`ViewVisibility`] /// of each entity and the [`VisibleEntities`] of each view.\ @@ -349,6 +325,10 @@ pub enum VisibilitySystems { /// the order of systems within this set is irrelevant, as [`check_visibility`] /// assumes that its operations are irreversible during the frame. CheckVisibility, + /// Label for the `mark_newly_hidden_entities_invisible` system, which sets + /// [`ViewVisibility`] to [`ViewVisibility::HIDDEN`] for entities that no + /// view has marked as visible. + MarkNewlyHiddenEntitiesInvisible, } pub struct VisibilityPlugin; @@ -364,6 +344,10 @@ impl Plugin for VisibilityPlugin { .before(CheckVisibility) .after(TransformSystem::TransformPropagate), ) + .configure_sets( + PostUpdate, + MarkNewlyHiddenEntitiesInvisible.after(CheckVisibility), + ) .init_resource::() .add_systems( PostUpdate, @@ -372,6 +356,7 @@ impl Plugin for VisibilityPlugin { (visibility_propagate_system, reset_view_visibility) .in_set(VisibilityPropagate), check_visibility.in_set(CheckVisibility), + mark_newly_hidden_entities_invisible.in_set(MarkNewlyHiddenEntitiesInvisible), ), ); } @@ -398,10 +383,10 @@ pub fn calculate_bounds( /// Updates [`Frustum`]. /// /// This system is used in [`CameraProjectionPlugin`](crate::camera::CameraProjectionPlugin). -pub fn update_frusta( +pub fn update_frusta( mut views: Query< - (&GlobalTransform, &T, &mut Frustum), - Or<(Changed, Changed)>, + (&GlobalTransform, &Projection, &mut Frustum), + Or<(Changed, Changed)>, >, ) { for (transform, projection, mut frustum) in &mut views { @@ -411,20 +396,23 @@ pub fn update_frusta( fn visibility_propagate_system( changed: Query< - (Entity, &Visibility, Option<&Parent>, Option<&Children>), - (With, Changed), + (Entity, &Visibility, Option<&ChildOf>, Option<&Children>), + ( + With, + Or<(Changed, Changed)>, + ), >, mut visibility_query: Query<(&Visibility, &mut InheritedVisibility)>, children_query: Query<&Children, (With, With)>, ) { - for (entity, visibility, parent, children) in &changed { + for (entity, visibility, child_of, children) in &changed { let is_visible = match visibility { Visibility::Visible => true, Visibility::Hidden => false, // fall back to true if no parent is found or parent lacks components - Visibility::Inherited => parent - .and_then(|p| visibility_query.get(p.get()).ok()) - .map_or(true, |(_, x)| x.get()), + Visibility::Inherited => child_of + .and_then(|c| visibility_query.get(c.parent()).ok()) + .is_none_or(|(_, x)| x.get()), }; let (_, mut inherited_visibility) = visibility_query .get_mut(entity) @@ -477,6 +465,10 @@ fn propagate_recursive( } /// Stores all entities that were visible in the previous frame. +/// +/// As systems that check visibility judge entities visible, they remove them +/// from this set. Afterward, the `mark_newly_hidden_entities_invisible` system +/// runs and marks every mesh still remaining in this set as hidden. #[derive(Resource, Default, Deref, DerefMut)] pub struct PreviousVisibleEntities(EntityHashSet); @@ -628,13 +620,23 @@ pub fn check_visibility( } } } +} - // Now whatever previous visible entities are left are entities that were +/// Marks any entities that weren't judged visible this frame as invisible. +/// +/// As visibility-determining systems run, they remove entities that they judge +/// visible from [`PreviousVisibleEntities`]. At the end of visibility +/// determination, all entities that remain in [`PreviousVisibleEntities`] must +/// be invisible. This system goes through those entities and marks them newly +/// invisible (which sets the change flag for them). +fn mark_newly_hidden_entities_invisible( + mut view_visibilities: Query<&mut ViewVisibility>, + mut previous_visible_entities: ResMut, +) { + // Whatever previous visible entities are left are entities that were // visible last frame but just became invisible. for entity in previous_visible_entities.drain() { - if let Ok((_, _, mut view_visibility, _, _, _, _, _, _)) = - visible_aabb_query.get_mut(entity) - { + if let Ok(mut view_visibility) = view_visibilities.get_mut(entity) { *view_visibility = ViewVisibility::HIDDEN; } } @@ -653,8 +655,10 @@ pub fn check_visibility( /// ... /// } /// ``` -pub fn add_visibility_class(mut world: DeferredWorld<'_>, entity: Entity, _: ComponentId) -where +pub fn add_visibility_class( + mut world: DeferredWorld<'_>, + HookContext { entity, .. }: HookContext, +) where C: 'static, { if let Some(mut visibility_class) = world.get_mut::(entity) { @@ -666,7 +670,6 @@ where mod test { use super::*; use bevy_app::prelude::*; - use bevy_hierarchy::BuildChildren; #[test] fn visibility_propagation() { @@ -757,6 +760,58 @@ mod test { ); } + #[test] + fn test_visibility_propagation_on_parent_change() { + // Setup the world and schedule + let mut app = App::new(); + + app.add_systems(Update, visibility_propagate_system); + + // Create entities with visibility and hierarchy + let parent1 = app.world_mut().spawn((Visibility::Hidden,)).id(); + let parent2 = app.world_mut().spawn((Visibility::Visible,)).id(); + let child1 = app.world_mut().spawn((Visibility::Inherited,)).id(); + let child2 = app.world_mut().spawn((Visibility::Inherited,)).id(); + + // Build hierarchy + app.world_mut() + .entity_mut(parent1) + .add_children(&[child1, child2]); + + // Run the system initially to set up visibility + app.update(); + + // Change parent visibility to Hidden + app.world_mut() + .entity_mut(parent2) + .insert(Visibility::Visible); + // Simulate a change in the parent component + app.world_mut().entity_mut(child2).insert(ChildOf(parent2)); // example of changing parent + + // Run the system again to propagate changes + app.update(); + + let is_visible = |e: Entity| { + app.world() + .entity(e) + .get::() + .unwrap() + .get() + }; + + // Retrieve and assert visibility + + assert!( + !is_visible(child1), + "Child1 should inherit visibility from parent" + ); + + assert!( + is_visible(child2), + "Child2 should inherit visibility from parent" + ); + } + #[test] fn visibility_propagation_unconditional_visible() { use Visibility::{Hidden, Inherited, Visible}; diff --git a/crates/bevy_render/src/view/visibility/range.rs b/crates/bevy_render/src/view/visibility/range.rs index d62a9ffb69c5e..4c264e0778543 100644 --- a/crates/bevy_render/src/view/visibility/range.rs +++ b/crates/bevy_render/src/view/visibility/range.rs @@ -13,13 +13,15 @@ use bevy_ecs::{ query::{Changed, With}, reflect::ReflectComponent, removal_detection::RemovedComponents, - schedule::IntoSystemConfigs as _, - system::{Query, Res, ResMut, Resource}, + resource::Resource, + schedule::IntoScheduleConfigs as _, + system::{Local, Query, Res, ResMut}, }; use bevy_math::{vec4, FloatOrd, Vec4}; +use bevy_platform::collections::HashMap; use bevy_reflect::Reflect; use bevy_transform::components::GlobalTransform; -use bevy_utils::{prelude::default, HashMap}; +use bevy_utils::{prelude::default, Parallel}; use nonmax::NonMaxU16; use wgpu::{BufferBindingType, BufferUsages}; @@ -112,7 +114,7 @@ impl Plugin for VisibilityRangePlugin { /// `start_margin` of the next lower LOD; this is important for the crossfade /// effect to function properly. #[derive(Component, Clone, PartialEq, Default, Reflect)] -#[reflect(Component, PartialEq, Hash)] +#[reflect(Component, PartialEq, Hash, Clone)] pub struct VisibilityRange { /// The range of distances, in world units, between which this entity will /// smoothly fade into view as the camera zooms out. @@ -383,7 +385,8 @@ impl VisibleEntityRanges { pub fn check_visibility_ranges( mut visible_entity_ranges: ResMut, view_query: Query<(Entity, &GlobalTransform), With>, - mut entity_query: Query<(Entity, &GlobalTransform, Option<&Aabb>, &VisibilityRange)>, + mut par_local: Local>>, + entity_query: Query<(Entity, &GlobalTransform, Option<&Aabb>, &VisibilityRange)>, ) { visible_entity_ranges.clear(); @@ -402,30 +405,34 @@ pub fn check_visibility_ranges( // Check each entity/view pair. Only consider entities with // [`VisibilityRange`] components. - for (entity, entity_transform, maybe_model_aabb, visibility_range) in entity_query.iter_mut() { - let mut visibility = 0; - for (view_index, &(_, view_position)) in views.iter().enumerate() { - // If instructed to use the AABB and the model has one, use its - // center as the model position. Otherwise, use the model's - // translation. - let model_position = match (visibility_range.use_aabb, maybe_model_aabb) { - (true, Some(model_aabb)) => entity_transform - .affine() - .transform_point3a(model_aabb.center), - _ => entity_transform.translation_vec3a(), - }; - - if visibility_range.is_visible_at_all((view_position - model_position).length()) { - visibility |= 1 << view_index; + entity_query.par_iter().for_each( + |(entity, entity_transform, maybe_model_aabb, visibility_range)| { + let mut visibility = 0; + for (view_index, &(_, view_position)) in views.iter().enumerate() { + // If instructed to use the AABB and the model has one, use its + // center as the model position. Otherwise, use the model's + // translation. + let model_position = match (visibility_range.use_aabb, maybe_model_aabb) { + (true, Some(model_aabb)) => entity_transform + .affine() + .transform_point3a(model_aabb.center), + _ => entity_transform.translation_vec3a(), + }; + + if visibility_range.is_visible_at_all((view_position - model_position).length()) { + visibility |= 1 << view_index; + } } - } - // Invisible entities have no entry at all in the hash map. This speeds - // up checks slightly in this common case. - if visibility != 0 { - visible_entity_ranges.entities.insert(entity, visibility); - } - } + // Invisible entities have no entry at all in the hash map. This speeds + // up checks slightly in this common case. + if visibility != 0 { + par_local.borrow_local_mut().push((entity, visibility)); + } + }, + ); + + visible_entity_ranges.entities.extend(par_local.drain()); } /// Extracts all [`VisibilityRange`] components from the main world to the diff --git a/crates/bevy_render/src/view/visibility/render_layers.rs b/crates/bevy_render/src/view/visibility/render_layers.rs index 1932abdf71fba..a5a58453e830b 100644 --- a/crates/bevy_render/src/view/visibility/render_layers.rs +++ b/crates/bevy_render/src/view/visibility/render_layers.rs @@ -20,7 +20,7 @@ pub type Layer = usize; /// /// Entities without this component belong to layer `0`. #[derive(Component, Clone, Reflect, PartialEq, Eq, PartialOrd, Ord)] -#[reflect(Component, Default, PartialEq, Debug)] +#[reflect(Component, Default, PartialEq, Debug, Clone)] pub struct RenderLayers(SmallVec<[u64; INLINE_BLOCKS]>); /// The number of memory blocks stored inline diff --git a/crates/bevy_render/src/view/window/mod.rs b/crates/bevy_render/src/view/window/mod.rs index 14c0fe45befc7..c3fc6e551634f 100644 --- a/crates/bevy_render/src/view/window/mod.rs +++ b/crates/bevy_render/src/view/window/mod.rs @@ -5,11 +5,8 @@ use crate::{ }; use bevy_app::{App, Plugin}; use bevy_ecs::{entity::EntityHashMap, prelude::*}; -use bevy_utils::{ - default, - tracing::{debug, warn}, - HashSet, -}; +use bevy_platform::collections::HashSet; +use bevy_utils::default; use bevy_window::{ CompositeAlphaMode, PresentMode, PrimaryWindow, RawHandleWrapper, Window, WindowClosing, }; @@ -17,6 +14,7 @@ use core::{ num::NonZero, ops::{Deref, DerefMut}, }; +use tracing::{debug, warn}; use wgpu::{ SurfaceConfiguration, SurfaceTargetUnsafe, TextureFormat, TextureUsages, TextureViewDescriptor, }; @@ -216,7 +214,6 @@ impl WindowSurfaces { /// another alternative is to try to use [`ANGLE`](https://github.com/gfx-rs/wgpu#angle) and /// [`Backends::GL`](crate::settings::Backends::GL) if your GPU/drivers support `OpenGL 4.3` / `OpenGL ES 3.0` or /// later. -#[allow(clippy::too_many_arguments)] pub fn prepare_windows( mut windows: ResMut, mut window_surfaces: ResMut, @@ -269,7 +266,7 @@ pub fn prepare_windows( } #[cfg(target_os = "linux")] Err(wgpu::SurfaceError::Timeout) if may_erroneously_timeout() => { - bevy_utils::tracing::trace!( + tracing::trace!( "Couldn't get swap chain texture. This is probably a quirk \ of your Linux GPU driver, so it can be safely ignored." ); @@ -307,9 +304,7 @@ const DEFAULT_DESIRED_MAXIMUM_FRAME_LATENCY: u32 = 2; pub fn create_surfaces( // By accessing a NonSend resource, we tell the scheduler to put this system on the main thread, // which is necessary for some OS's - #[cfg(any(target_os = "macos", target_os = "ios"))] _marker: Option< - NonSend, - >, + #[cfg(any(target_os = "macos", target_os = "ios"))] _marker: bevy_ecs::system::NonSendMarker, windows: Res, mut window_surfaces: ResMut, render_instance: Res, @@ -322,8 +317,8 @@ pub fn create_surfaces( .entry(window.entity) .or_insert_with(|| { let surface_target = SurfaceTargetUnsafe::RawHandle { - raw_display_handle: window.handle.display_handle, - raw_window_handle: window.handle.window_handle, + raw_display_handle: window.handle.get_display_handle(), + raw_window_handle: window.handle.get_window_handle(), }; // SAFETY: The window handles in ExtractedWindows will always be valid objects to create surfaces on let surface = unsafe { diff --git a/crates/bevy_render/src/view/window/screenshot.rs b/crates/bevy_render/src/view/window/screenshot.rs index aab9b08c680d7..6e223eedaf047 100644 --- a/crates/bevy_render/src/view/window/screenshot.rs +++ b/crates/bevy_render/src/view/window/screenshot.rs @@ -17,20 +17,16 @@ use crate::{ }; use alloc::{borrow::Cow, sync::Arc}; use bevy_app::{First, Plugin, Update}; -use bevy_asset::{load_internal_asset, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, Handle}; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ entity::EntityHashMap, event::event_update_system, prelude::*, system::SystemState, }; -use bevy_hierarchy::DespawnRecursiveExt; use bevy_image::{Image, TextureFormatPixelInfo}; +use bevy_platform::collections::HashSet; use bevy_reflect::Reflect; use bevy_tasks::AsyncComputeTaskPool; -use bevy_utils::{ - default, - tracing::{error, info, warn}, - HashSet, -}; +use bevy_utils::default; use bevy_window::{PrimaryWindow, WindowRef}; use core::ops::Deref; use std::{ @@ -40,6 +36,7 @@ use std::{ Mutex, }, }; +use tracing::{error, info, warn}; use wgpu::{CommandEncoder, Extent3d, TextureFormat}; #[derive(Event, Deref, DerefMut, Reflect, Debug)] @@ -74,12 +71,12 @@ pub struct ScreenshotCaptured(pub Image); pub struct Screenshot(pub RenderTarget); /// A marker component that indicates that a screenshot is currently being captured. -#[derive(Component)] +#[derive(Component, Default)] pub struct Capturing; /// A marker component that indicates that a screenshot has been captured, the image is ready, and /// the screenshot entity can be despawned. -#[derive(Component)] +#[derive(Component, Default)] pub struct Captured; impl Screenshot { @@ -188,7 +185,7 @@ pub fn save_to_disk(path: impl AsRef) -> impl FnMut(Trigger>) { for entity in screenshots.iter() { - commands.entity(entity).despawn_recursive(); + commands.entity(entity).despawn(); } } @@ -239,11 +236,11 @@ fn extract_screenshots( }; if seen_targets.contains(&render_target) { warn!( - "Duplicate render target for screenshot, skipping entity {:?}: {:?}", + "Duplicate render target for screenshot, skipping entity {}: {:?}", entity, render_target ); // If we don't despawn the entity here, it will be captured again in the next frame - commands.entity(entity).despawn_recursive(); + commands.entity(entity).despawn(); continue; } seen_targets.insert(render_target.clone()); @@ -254,7 +251,6 @@ fn extract_screenshots( system_state.apply(&mut main_world); } -#[allow(clippy::too_many_arguments)] fn prepare_screenshots( targets: Res, mut prepared: ResMut, @@ -273,7 +269,7 @@ fn prepare_screenshots( NormalizedRenderTarget::Window(window) => { let window = window.entity(); let Some(surface_data) = window_surfaces.surfaces.get(&window) else { - warn!("Unknown window for screenshot, skipping: {:?}", window); + warn!("Unknown window for screenshot, skipping: {}", window); continue; }; let format = surface_data.configuration.format.add_srgb_suffix(); @@ -371,8 +367,7 @@ fn prepare_screenshot_state( let texture_view = texture.create_view(&Default::default()); let buffer = render_device.create_buffer(&wgpu::BufferDescriptor { label: Some("screenshot-transfer-buffer"), - size: gpu_readback::get_aligned_size(size.width, size.height, format.pixel_size() as u32) - as u64, + size: gpu_readback::get_aligned_size(size, format.pixel_size() as u32) as u64, usage: BufferUsages::MAP_READ | BufferUsages::COPY_DST, mapped_at_creation: false, }); @@ -397,7 +392,8 @@ fn prepare_screenshot_state( pub struct ScreenshotPlugin; -const SCREENSHOT_SHADER_HANDLE: Handle = Handle::weak_from_u128(11918575842344596158); +const SCREENSHOT_SHADER_HANDLE: Handle = + weak_handle!("c31753d6-326a-47cb-a359-65c97a471fda"); impl Plugin for ScreenshotPlugin { fn build(&self, app: &mut bevy_app::App) { @@ -579,7 +575,6 @@ pub(crate) fn submit_screenshot_commands(world: &World, encoder: &mut CommandEnc } } -#[allow(clippy::too_many_arguments)] fn render_screenshot( encoder: &mut CommandEncoder, prepared: &RenderScreenshotsPrepared, @@ -591,17 +586,18 @@ fn render_screenshot( texture_view: &wgpu::TextureView, ) { if let Some(prepared_state) = &prepared.get(entity) { + let extent = Extent3d { + width, + height, + depth_or_array_layers: 1, + }; encoder.copy_texture_to_buffer( prepared_state.texture.as_image_copy(), - wgpu::ImageCopyBuffer { + wgpu::TexelCopyBufferInfo { buffer: &prepared_state.buffer, - layout: gpu_readback::layout_data(width, height, texture_format), - }, - Extent3d { - width, - height, - ..Default::default() + layout: gpu_readback::layout_data(extent, texture_format), }, + extent, ); if let Some(pipeline) = pipelines.get_render_pipeline(prepared_state.pipeline_id) { @@ -628,7 +624,7 @@ fn render_screenshot( pub(crate) fn collect_screenshots(world: &mut World) { #[cfg(feature = "trace")] - let _span = bevy_utils::tracing::info_span!("collect_screenshots").entered(); + let _span = tracing::info_span!("collect_screenshots").entered(); let sender = world.resource::().deref().clone(); let prepared = world.resource::(); @@ -690,7 +686,7 @@ pub(crate) fn collect_screenshots(world: &mut World) { RenderAssetUsages::RENDER_WORLD, ), )) { - error!("Failed to send screenshot: {:?}", e); + error!("Failed to send screenshot: {}", e); } }; diff --git a/crates/bevy_scene/Cargo.toml b/crates/bevy_scene/Cargo.toml index 91eecb563b69b..3bb913c85908b 100644 --- a/crates/bevy_scene/Cargo.toml +++ b/crates/bevy_scene/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_scene" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Provides scene functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -10,31 +10,40 @@ keywords = ["bevy"] [features] default = ["serialize"] -serialize = ["dep:serde", "uuid/serde", "bevy_ecs/serialize"] +serialize = [ + "dep:serde", + "uuid/serde", + "bevy_ecs/serialize", + "bevy_platform/serialize", +] [dependencies] # bevy -bevy_app = { path = "../bevy_app", version = "0.15.0-dev" } -bevy_asset = { path = "../bevy_asset", version = "0.15.0-dev" } -bevy_derive = { path = "../bevy_derive", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev", features = [ - "bevy", +bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } +bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } +bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } +bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } +bevy_render = { path = "../bevy_render", version = "0.16.0-dev", optional = true } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "std", ] } -bevy_hierarchy = { path = "../bevy_hierarchy", version = "0.15.0-dev" } -bevy_transform = { path = "../bevy_transform", version = "0.15.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } -bevy_render = { path = "../bevy_render", version = "0.15.0-dev", optional = true } # other serde = { version = "1.0", features = ["derive"], optional = true } -uuid = { version = "1.1", features = ["v4"] } +uuid = { version = "1.13.1", features = ["v4"] } thiserror = { version = "2", default-features = false } derive_more = { version = "1", default-features = false, features = ["from"] } +[target.'cfg(target_arch = "wasm32")'.dependencies] +# TODO: Assuming all wasm builds are for the browser. Require `no_std` support to break assumption. +uuid = { version = "1.13.1", default-features = false, features = ["js"] } + [dev-dependencies] postcard = { version = "1.0", features = ["alloc"] } -bincode = "1.3" +bincode = { version = "2.0", features = ["serde"] } rmp-serde = "1.1" [lints] diff --git a/crates/bevy_scene/LICENSE-APACHE b/crates/bevy_scene/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_scene/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_scene/LICENSE-MIT b/crates/bevy_scene/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_scene/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_scene/src/bundle.rs b/crates/bevy_scene/src/bundle.rs deleted file mode 100644 index 0024b2f77729b..0000000000000 --- a/crates/bevy_scene/src/bundle.rs +++ /dev/null @@ -1,188 +0,0 @@ -#![expect(deprecated)] - -use bevy_derive::{Deref, DerefMut}; -use bevy_ecs::{ - bundle::Bundle, - change_detection::ResMut, - entity::Entity, - prelude::{Changed, Component, Without}, - system::{Commands, Query}, -}; -#[cfg(feature = "bevy_render")] -use bevy_render::prelude::{InheritedVisibility, ViewVisibility, Visibility}; -use bevy_transform::components::{GlobalTransform, Transform}; - -use crate::{DynamicSceneRoot, InstanceId, SceneRoot, SceneSpawner}; - -/// [`InstanceId`] of a spawned scene. It can be used with the [`SceneSpawner`] to -/// interact with the spawned scene. -#[derive(Component, Deref, DerefMut)] -pub struct SceneInstance(pub(crate) InstanceId); - -/// A component bundle for a [`Scene`](crate::Scene) root. -/// -/// The scene from `scene` will be spawned as a child of the entity with this component. -/// Once it's spawned, the entity will have a [`SceneInstance`] component. -#[derive(Default, Bundle, Clone)] -#[deprecated( - since = "0.15.0", - note = "Use the `SceneRoot` component instead. Inserting `SceneRoot` will also insert the other components required by scenes automatically." -)] -pub struct SceneBundle { - /// Handle to the scene to spawn. - pub scene: SceneRoot, - /// Transform of the scene root entity. - pub transform: Transform, - /// Global transform of the scene root entity. - pub global_transform: GlobalTransform, - - /// User-driven visibility of the scene root entity. - #[cfg(feature = "bevy_render")] - pub visibility: Visibility, - /// Inherited visibility of the scene root entity. - #[cfg(feature = "bevy_render")] - pub inherited_visibility: InheritedVisibility, - /// Algorithmically-computed visibility of the scene root entity for rendering. - #[cfg(feature = "bevy_render")] - pub view_visibility: ViewVisibility, -} - -/// A component bundle for a [`DynamicScene`](crate::DynamicScene) root. -/// -/// The dynamic scene from `scene` will be spawn as a child of the entity with this component. -/// Once it's spawned, the entity will have a [`SceneInstance`] component. -#[derive(Default, Bundle, Clone)] -#[deprecated( - since = "0.15.0", - note = "Use the `DynamicSceneRoot` component instead. Inserting `DynamicSceneRoot` will also insert the other components required by scenes automatically." -)] -pub struct DynamicSceneBundle { - /// Handle to the scene to spawn. - pub scene: DynamicSceneRoot, - /// Transform of the scene root entity. - pub transform: Transform, - /// Global transform of the scene root entity. - pub global_transform: GlobalTransform, - - /// User-driven visibility of the scene root entity. - #[cfg(feature = "bevy_render")] - pub visibility: Visibility, - /// Inherited visibility of the scene root entity. - #[cfg(feature = "bevy_render")] - pub inherited_visibility: InheritedVisibility, - /// Algorithmically-computed visibility of the scene root entity for rendering. - #[cfg(feature = "bevy_render")] - pub view_visibility: ViewVisibility, -} - -/// System that will spawn scenes from the [`SceneRoot`] and [`DynamicSceneRoot`] components. -pub fn scene_spawner( - mut commands: Commands, - mut scene_to_spawn: Query< - (Entity, &SceneRoot, Option<&mut SceneInstance>), - (Changed, Without), - >, - mut dynamic_scene_to_spawn: Query< - (Entity, &DynamicSceneRoot, Option<&mut SceneInstance>), - (Changed, Without), - >, - mut scene_spawner: ResMut, -) { - for (entity, scene, instance) in &mut scene_to_spawn { - let new_instance = scene_spawner.spawn_as_child(scene.0.clone(), entity); - if let Some(mut old_instance) = instance { - scene_spawner.despawn_instance(**old_instance); - *old_instance = SceneInstance(new_instance); - } else { - commands.entity(entity).insert(SceneInstance(new_instance)); - } - } - for (entity, dynamic_scene, instance) in &mut dynamic_scene_to_spawn { - let new_instance = scene_spawner.spawn_dynamic_as_child(dynamic_scene.0.clone(), entity); - if let Some(mut old_instance) = instance { - scene_spawner.despawn_instance(**old_instance); - *old_instance = SceneInstance(new_instance); - } else { - commands.entity(entity).insert(SceneInstance(new_instance)); - } - } -} - -#[cfg(test)] -mod tests { - use crate::{DynamicScene, DynamicSceneRoot, ScenePlugin, SceneSpawner}; - use bevy_app::{App, ScheduleRunnerPlugin}; - use bevy_asset::{AssetPlugin, Assets}; - use bevy_ecs::{ - component::Component, - entity::Entity, - prelude::{AppTypeRegistry, ReflectComponent, World}, - }; - use bevy_hierarchy::{Children, HierarchyPlugin}; - use bevy_reflect::Reflect; - - #[derive(Component, Reflect, Default)] - #[reflect(Component)] - struct ComponentA { - pub x: f32, - pub y: f32, - } - - #[test] - fn spawn_and_delete() { - let mut app = App::new(); - - app.add_plugins(ScheduleRunnerPlugin::default()) - .add_plugins(HierarchyPlugin) - .add_plugins(AssetPlugin::default()) - .add_plugins(ScenePlugin) - .register_type::(); - app.update(); - - let mut scene_world = World::new(); - - // create a new DynamicScene manually - let type_registry = app.world().resource::().clone(); - scene_world.insert_resource(type_registry); - scene_world.spawn(ComponentA { x: 3.0, y: 4.0 }); - let scene = DynamicScene::from_world(&scene_world); - let scene_handle = app - .world_mut() - .resource_mut::>() - .add(scene); - - // spawn the scene as a child of `entity` using `DynamicSceneRoot` - let entity = app - .world_mut() - .spawn(DynamicSceneRoot(scene_handle.clone())) - .id(); - - // run the app's schedule once, so that the scene gets spawned - app.update(); - - // make sure that the scene was added as a child of the root entity - let (scene_entity, scene_component_a) = app - .world_mut() - .query::<(Entity, &ComponentA)>() - .single(app.world()); - assert_eq!(scene_component_a.x, 3.0); - assert_eq!(scene_component_a.y, 4.0); - assert_eq!( - app.world().entity(entity).get::().unwrap().len(), - 1 - ); - - // let's try to delete the scene - let mut scene_spawner = app.world_mut().resource_mut::(); - scene_spawner.despawn(&scene_handle); - - // run the scene spawner system to despawn the scene - app.update(); - - // the scene entity does not exist anymore - assert!(app.world().get_entity(scene_entity).is_err()); - - // the root entity does not have any children anymore - assert!(app.world().entity(entity).get::().is_none()); - } -} diff --git a/crates/bevy_scene/src/components.rs b/crates/bevy_scene/src/components.rs index 8709c7990fd50..d4d42c3a1c98c 100644 --- a/crates/bevy_scene/src/components.rs +++ b/crates/bevy_scene/src/components.rs @@ -1,9 +1,6 @@ use bevy_asset::Handle; use bevy_derive::{Deref, DerefMut}; -use bevy_ecs::{ - component::{require, Component}, - prelude::ReflectComponent, -}; +use bevy_ecs::{component::Component, prelude::ReflectComponent}; use bevy_reflect::{prelude::ReflectDefault, Reflect}; use bevy_transform::components::Transform; use derive_more::derive::From; @@ -16,7 +13,7 @@ use crate::{DynamicScene, Scene}; /// Adding this component will spawn the scene as a child of that entity. /// Once it's spawned, the entity will have a [`SceneInstance`](crate::SceneInstance) component. #[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq, From)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] #[require(Transform)] #[cfg_attr(feature = "bevy_render", require(Visibility))] pub struct SceneRoot(pub Handle); @@ -24,7 +21,7 @@ pub struct SceneRoot(pub Handle); /// Adding this component will spawn the scene as a child of that entity. /// Once it's spawned, the entity will have a [`SceneInstance`](crate::SceneInstance) component. #[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq, From)] -#[reflect(Component, Default, Debug, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] #[require(Transform)] #[cfg_attr(feature = "bevy_render", require(Visibility))] pub struct DynamicSceneRoot(pub Handle); diff --git a/crates/bevy_scene/src/dynamic_scene.rs b/crates/bevy_scene/src/dynamic_scene.rs index babc45f7f119f..f0cf3960d60ea 100644 --- a/crates/bevy_scene/src/dynamic_scene.rs +++ b/crates/bevy_scene/src/dynamic_scene.rs @@ -1,17 +1,23 @@ -use crate::{ron, DynamicSceneBuilder, Scene, SceneSpawnError}; +use crate::{DynamicSceneBuilder, Scene, SceneSpawnError}; use bevy_asset::Asset; -use bevy_ecs::reflect::ReflectResource; +use bevy_ecs::reflect::{ReflectMapEntities, ReflectResource}; use bevy_ecs::{ entity::{Entity, EntityHashMap, SceneEntityMapper}, - reflect::{AppTypeRegistry, ReflectComponent, ReflectMapEntities}, + reflect::{AppTypeRegistry, ReflectComponent}, world::World, }; -use bevy_reflect::{PartialReflect, TypePath, TypeRegistry}; +use bevy_reflect::{PartialReflect, TypePath}; + +use crate::reflect_utils::clone_reflect_value; +use bevy_ecs::component::ComponentCloneBehavior; +use bevy_ecs::relationship::RelationshipHookMode; #[cfg(feature = "serialize")] -use crate::serde::SceneSerializer; -#[cfg(feature = "serialize")] -use serde::Serialize; +use { + crate::{ron, serde::SceneSerializer}, + bevy_reflect::TypeRegistry, + serde::Serialize, +}; /// A collection of serializable resources and dynamic entities. /// @@ -85,7 +91,6 @@ impl DynamicScene { // Apply/ add each component to the given entity. for component in &scene_entity.components { - let mut component = component.clone_value(); let type_info = component.get_represented_type_info().ok_or_else(|| { SceneSpawnError::NoRepresentedType { type_path: component.reflect_type_path().to_string(), @@ -103,26 +108,32 @@ impl DynamicScene { } })?; - // If this component references entities in the scene, update - // them to the entities in the world. - if let Some(map_entities) = registration.data::() { - SceneEntityMapper::world_scope(entity_map, world, |_, mapper| { - map_entities.map_entities(component.as_partial_reflect_mut(), mapper); - }); + { + let component_id = reflect_component.register_component(world); + // SAFETY: we registered the component above. the info exists + #[expect(unsafe_code, reason = "this is faster")] + let component_info = + unsafe { world.components().get_info_unchecked(component_id) }; + if *component_info.clone_behavior() == ComponentCloneBehavior::Ignore { + continue; + } } - reflect_component.apply_or_insert( - &mut world.entity_mut(entity), - component.as_partial_reflect(), - &type_registry, - ); + SceneEntityMapper::world_scope(entity_map, world, |world, mapper| { + reflect_component.apply_or_insert_mapped( + &mut world.entity_mut(entity), + component.as_partial_reflect(), + &type_registry, + mapper, + RelationshipHookMode::Skip, + ); + }); } } // Insert resources after all entities have been added to the world. // This ensures the entities are available for the resources to reference during mapping. for resource in &self.resources { - let mut resource = resource.clone_value(); let type_info = resource.get_represented_type_info().ok_or_else(|| { SceneSpawnError::NoRepresentedType { type_path: resource.reflect_type_path().to_string(), @@ -141,15 +152,22 @@ impl DynamicScene { // If this component references entities in the scene, update // them to the entities in the world. - if let Some(map_entities) = registration.data::() { + let mut cloned_resource; + let partial_reflect_resource = if let Some(map_entities) = + registration.data::() + { + cloned_resource = clone_reflect_value(resource.as_partial_reflect(), registration); SceneEntityMapper::world_scope(entity_map, world, |_, mapper| { - map_entities.map_entities(resource.as_partial_reflect_mut(), mapper); + map_entities.map_entities(cloned_resource.as_partial_reflect_mut(), mapper); }); - } + cloned_resource.as_partial_reflect() + } else { + resource.as_partial_reflect() + }; // If the world already contains an instance of the given resource // just apply the (possibly) new value, otherwise insert the resource - reflect_resource.apply_or_insert(world, resource.as_partial_reflect(), &type_registry); + reflect_resource.apply_or_insert(world, partial_reflect_resource, &type_registry); } Ok(()) @@ -199,23 +217,24 @@ where mod tests { use bevy_ecs::{ component::Component, - entity::{ - Entity, EntityHashMap, EntityMapper, MapEntities, VisitEntities, VisitEntitiesMut, - }, + entity::{Entity, EntityHashMap, EntityMapper, MapEntities}, + hierarchy::ChildOf, reflect::{AppTypeRegistry, ReflectComponent, ReflectMapEntities, ReflectResource}, - system::Resource, - world::{Command, World}, + resource::Resource, + world::World, }; - use bevy_hierarchy::{AddChild, Parent}; + use bevy_reflect::Reflect; use crate::dynamic_scene::DynamicScene; use crate::dynamic_scene_builder::DynamicSceneBuilder; - #[derive(Resource, Reflect, Debug, VisitEntities, VisitEntitiesMut)] + #[derive(Resource, Reflect, MapEntities, Debug)] #[reflect(Resource, MapEntities)] struct TestResource { + #[entities] entity_a: Entity, + #[entities] entity_b: Entity, } @@ -268,14 +287,12 @@ mod tests { world .resource_mut::() .write() - .register::(); + .register::(); let original_parent_entity = world.spawn_empty().id(); let original_child_entity = world.spawn_empty().id(); - AddChild { - parent: original_parent_entity, - child: original_child_entity, - } - .apply(&mut world); + world + .entity_mut(original_parent_entity) + .add_child(original_child_entity); // We then write this relationship to a new scene, and then write that scene back to the // world to create another parent and child relationship @@ -292,15 +309,13 @@ mod tests { // We then add the parent from the scene as a child of the original child // Hierarchy should look like: // Original Parent <- Original Child <- Scene Parent <- Scene Child - AddChild { - parent: original_child_entity, - child: from_scene_parent_entity, - } - .apply(&mut world); + world + .entity_mut(original_child_entity) + .add_child(from_scene_parent_entity); // We then reload the scene to make sure that from_scene_parent_entity's parent component // isn't updated with the entity map, since this component isn't defined in the scene. - // With bevy_hierarchy, this can cause serious errors and malformed hierarchies. + // With [`bevy_ecs::hierarchy`], this can cause serious errors and malformed hierarchies. scene.write_to_world(&mut world, &mut entity_map).unwrap(); assert_eq!( @@ -308,9 +323,9 @@ mod tests { world .get_entity(original_child_entity) .unwrap() - .get::() + .get::() .unwrap() - .get(), + .parent(), "something about reloading the scene is touching entities with the same scene Ids" ); assert_eq!( @@ -318,9 +333,9 @@ mod tests { world .get_entity(from_scene_parent_entity) .unwrap() - .get::() + .get::() .unwrap() - .get(), + .parent(), "something about reloading the scene is touching components not defined in the scene but on entities defined in the scene" ); assert_eq!( @@ -328,9 +343,9 @@ mod tests { world .get_entity(from_scene_child_entity) .unwrap() - .get::() + .get::() .expect("something is wrong with this test, and the scene components don't have a parent/child relationship") - .get(), + .parent(), "something is wrong with this test or the code reloading scenes since the relationship between scene entities is broken" ); } @@ -343,13 +358,13 @@ mod tests { #[reflect(Component)] struct A; - #[derive(Component, Reflect, VisitEntities)] - #[reflect(Component, MapEntities)] + #[derive(Component, Reflect)] + #[reflect(Component)] struct B(pub Entity); impl MapEntities for B { - fn map_entities(&mut self, entity_mapper: &mut M) { - self.0 = entity_mapper.map_entity(self.0); + fn map_entities(&mut self, entity_mapper: &mut E) { + self.0 = entity_mapper.get_mapped(self.0); } } @@ -368,7 +383,7 @@ mod tests { let mut dst_world = World::new(); dst_world .register_component_hooks::() - .on_add(|mut world, _, _| { + .on_add(|mut world, _| { world.commands().spawn_empty(); }); dst_world.insert_resource(reg.clone()); diff --git a/crates/bevy_scene/src/dynamic_scene_builder.rs b/crates/bevy_scene/src/dynamic_scene_builder.rs index 208702dd81b3b..5deac09aaa073 100644 --- a/crates/bevy_scene/src/dynamic_scene_builder.rs +++ b/crates/bevy_scene/src/dynamic_scene_builder.rs @@ -1,13 +1,17 @@ +use core::any::TypeId; + +use crate::reflect_utils::clone_reflect_value; use crate::{DynamicEntity, DynamicScene, SceneFilter}; use alloc::collections::BTreeMap; use bevy_ecs::{ component::{Component, ComponentId}, + entity_disabling::DefaultQueryFilters, prelude::Entity, reflect::{AppTypeRegistry, ReflectComponent, ReflectResource}, - system::Resource, + resource::Resource, world::World, }; -use bevy_reflect::{PartialReflect, ReflectFromReflect}; +use bevy_reflect::PartialReflect; use bevy_utils::default; /// A [`DynamicScene`] builder, used to build a scene from a [`World`] by extracting some entities and resources. @@ -300,14 +304,8 @@ impl<'w> DynamicSceneBuilder<'w> { .data::()? .reflect(original_entity)?; - // Clone via `FromReflect`. Unlike `PartialReflect::clone_value` this - // retains the original type and `ReflectSerialize` type data which is needed to - // deserialize. - let component = type_registration - .data::() - .and_then(|fr| fr.from_reflect(component.as_partial_reflect())) - .map(PartialReflect::into_partial_reflect) - .unwrap_or_else(|| component.clone_value()); + let component = + clone_reflect_value(component.as_partial_reflect(), type_registration); entry.components.push(component); Some(()) @@ -348,9 +346,18 @@ impl<'w> DynamicSceneBuilder<'w> { /// [`deny_resource`]: Self::deny_resource #[must_use] pub fn extract_resources(mut self) -> Self { + // Don't extract the DefaultQueryFilters resource + let original_world_dqf_id = self + .original_world + .components() + .get_resource_id(TypeId::of::()); + let type_registry = self.original_world.resource::().read(); for (component_id, _) in self.original_world.storages().resources.iter() { + if Some(component_id) == original_world_dqf_id { + continue; + } let mut extract_and_push = || { let type_id = self .original_world @@ -369,13 +376,11 @@ impl<'w> DynamicSceneBuilder<'w> { let resource = type_registration .data::()? - .reflect(self.original_world)?; + .reflect(self.original_world) + .ok()?; - let resource = type_registration - .data::() - .and_then(|fr| fr.from_reflect(resource.as_partial_reflect())) - .map(PartialReflect::into_partial_reflect) - .unwrap_or_else(|| resource.clone_value()); + let resource = + clone_reflect_value(resource.as_partial_reflect(), type_registration); self.extracted_resources.insert(component_id, resource); Some(()) diff --git a/crates/bevy_scene/src/lib.rs b/crates/bevy_scene/src/lib.rs index 8a21b2040d78e..a507a58aafb1c 100644 --- a/crates/bevy_scene/src/lib.rs +++ b/crates/bevy_scene/src/lib.rs @@ -1,5 +1,4 @@ #![cfg_attr(docsrs, feature(doc_auto_cfg))] -#![forbid(unsafe_code)] #![doc( html_logo_url = "https://bevyengine.org/assets/icon.png", html_favicon_url = "https://bevyengine.org/assets/icon.png" @@ -13,10 +12,10 @@ extern crate alloc; -mod bundle; mod components; mod dynamic_scene; mod dynamic_scene_builder; +mod reflect_utils; mod scene; mod scene_filter; mod scene_loader; @@ -28,8 +27,6 @@ pub mod serde; /// Rusty Object Notation, a crate used to serialize and deserialize bevy scenes. pub use bevy_asset::ron; -use bevy_ecs::schedule::IntoSystemConfigs; -pub use bundle::*; pub use components::*; pub use dynamic_scene::*; pub use dynamic_scene_builder::*; @@ -41,17 +38,18 @@ pub use scene_spawner::*; /// The scene prelude. /// /// This includes the most common types in this crate, re-exported for your convenience. -#[expect(deprecated)] pub mod prelude { #[doc(hidden)] pub use crate::{ - DynamicScene, DynamicSceneBuilder, DynamicSceneBundle, DynamicSceneRoot, Scene, - SceneBundle, SceneFilter, SceneRoot, SceneSpawner, + DynamicScene, DynamicSceneBuilder, DynamicSceneRoot, Scene, SceneFilter, SceneRoot, + SceneSpawner, }; } use bevy_app::prelude::*; -use bevy_asset::AssetApp; + +#[cfg(feature = "serialize")] +use {bevy_asset::AssetApp, bevy_ecs::schedule::IntoScheduleConfigs}; /// Plugin that provides scene functionality to an [`App`]. #[derive(Default)] @@ -71,31 +69,35 @@ impl Plugin for ScenePlugin { // Register component hooks for DynamicSceneRoot app.world_mut() .register_component_hooks::() - .on_remove(|mut world, entity, _| { - let Some(handle) = world.get::(entity) else { + .on_remove(|mut world, context| { + let Some(handle) = world.get::(context.entity) else { return; }; let id = handle.id(); - if let Some(&SceneInstance(scene_instance)) = world.get::(entity) { + if let Some(&SceneInstance(scene_instance)) = + world.get::(context.entity) + { let Some(mut scene_spawner) = world.get_resource_mut::() else { return; }; if let Some(instance_ids) = scene_spawner.spawned_dynamic_scenes.get_mut(&id) { instance_ids.remove(&scene_instance); } - scene_spawner.despawn_instance(scene_instance); + scene_spawner.unregister_instance(scene_instance); } }); // Register component hooks for SceneRoot app.world_mut() .register_component_hooks::() - .on_remove(|mut world, entity, _| { - if let Some(&SceneInstance(scene_instance)) = world.get::(entity) { + .on_remove(|mut world, context| { + if let Some(&SceneInstance(scene_instance)) = + world.get::(context.entity) + { let Some(mut scene_spawner) = world.get_resource_mut::() else { return; }; - scene_spawner.despawn_instance(scene_instance); + scene_spawner.unregister_instance(scene_instance); } }); } diff --git a/crates/bevy_scene/src/reflect_utils.rs b/crates/bevy_scene/src/reflect_utils.rs new file mode 100644 index 0000000000000..bf69dd035295a --- /dev/null +++ b/crates/bevy_scene/src/reflect_utils.rs @@ -0,0 +1,25 @@ +use bevy_reflect::{PartialReflect, ReflectFromReflect, TypeRegistration}; + +/// Attempts to clone a [`PartialReflect`] value using various methods. +/// +/// This first attempts to clone via [`PartialReflect::reflect_clone`]. +/// then falls back to [`ReflectFromReflect::from_reflect`], +/// and finally [`PartialReflect::to_dynamic`] if the first two methods fail. +/// +/// This helps ensure that the original type and type data is retained, +/// and only returning a dynamic type if all other methods fail. +pub(super) fn clone_reflect_value( + value: &dyn PartialReflect, + type_registration: &TypeRegistration, +) -> Box { + value + .reflect_clone() + .map(PartialReflect::into_partial_reflect) + .unwrap_or_else(|_| { + type_registration + .data::() + .and_then(|fr| fr.from_reflect(value.as_partial_reflect())) + .map(PartialReflect::into_partial_reflect) + .unwrap_or_else(|| value.to_dynamic()) + }) +} diff --git a/crates/bevy_scene/src/scene.rs b/crates/bevy_scene/src/scene.rs index aba8a55d46796..1d684c9dac2bb 100644 --- a/crates/bevy_scene/src/scene.rs +++ b/crates/bevy_scene/src/scene.rs @@ -1,11 +1,17 @@ +use core::any::TypeId; + +use crate::reflect_utils::clone_reflect_value; use crate::{DynamicScene, SceneSpawnError}; use bevy_asset::Asset; use bevy_ecs::{ + component::ComponentCloneBehavior, entity::{Entity, EntityHashMap, SceneEntityMapper}, - reflect::{AppTypeRegistry, ReflectComponent, ReflectMapEntities, ReflectResource}, + entity_disabling::DefaultQueryFilters, + reflect::{AppTypeRegistry, ReflectComponent, ReflectResource}, + relationship::RelationshipHookMode, world::World, }; -use bevy_reflect::{PartialReflect, TypePath}; +use bevy_reflect::TypePath; /// A composition of [`World`] objects. /// @@ -59,8 +65,16 @@ impl Scene { ) -> Result<(), SceneSpawnError> { let type_registry = type_registry.read(); + let self_dqf_id = self + .world + .components() + .get_resource_id(TypeId::of::()); + // Resources archetype for (component_id, resource_data) in self.world.storages().resources.iter() { + if Some(component_id) == self_dqf_id { + continue; + } if !resource_data.is_present() { continue; } @@ -112,6 +126,10 @@ impl Scene { .get_info(component_id) .expect("component_ids in archetypes should have ComponentInfo"); + if *component_info.clone_behavior() == ComponentCloneBehavior::Ignore { + continue; + } + let registration = type_registry .get(component_info.type_id().unwrap()) .ok_or_else(|| SceneSpawnError::UnregisteredType { @@ -124,25 +142,26 @@ impl Scene { } })?; - let Some(mut component) = reflect_component + let Some(component) = reflect_component .reflect(self.world.entity(scene_entity.id())) - .map(PartialReflect::clone_value) + .map(|component| { + clone_reflect_value(component.as_partial_reflect(), registration) + }) else { continue; }; // If this component references entities in the scene, // update them to the entities in the world. - if let Some(map_entities) = registration.data::() { - SceneEntityMapper::world_scope(entity_map, world, |_, mapper| { - map_entities.map_entities(component.as_partial_reflect_mut(), mapper); - }); - } - reflect_component.apply_or_insert( - &mut world.entity_mut(entity), - component.as_partial_reflect(), - &type_registry, - ); + SceneEntityMapper::world_scope(entity_map, world, |world, mapper| { + reflect_component.apply_or_insert_mapped( + &mut world.entity_mut(entity), + component.as_partial_reflect(), + &type_registry, + mapper, + RelationshipHookMode::Skip, + ); + }); } } } diff --git a/crates/bevy_scene/src/scene_filter.rs b/crates/bevy_scene/src/scene_filter.rs index 062b218165c0c..a3154c37e79ff 100644 --- a/crates/bevy_scene/src/scene_filter.rs +++ b/crates/bevy_scene/src/scene_filter.rs @@ -1,4 +1,4 @@ -use bevy_utils::{hashbrown::hash_set::IntoIter, HashSet}; +use bevy_platform::collections::{hash_set::IntoIter, HashSet}; use core::any::{Any, TypeId}; /// A filter used to control which types can be added to a [`DynamicScene`]. diff --git a/crates/bevy_scene/src/scene_loader.rs b/crates/bevy_scene/src/scene_loader.rs index 481b7ebc04124..d74dff84f5a6d 100644 --- a/crates/bevy_scene/src/scene_loader.rs +++ b/crates/bevy_scene/src/scene_loader.rs @@ -1,21 +1,27 @@ -#[cfg(feature = "serialize")] -use crate::serde::SceneDeserializer; -use crate::{ron, DynamicScene}; -use bevy_asset::{io::Reader, AssetLoader, LoadContext}; +use crate::ron; use bevy_ecs::{ reflect::AppTypeRegistry, world::{FromWorld, World}, }; use bevy_reflect::TypeRegistryArc; -#[cfg(feature = "serialize")] -use serde::de::DeserializeSeed; use thiserror::Error; +#[cfg(feature = "serialize")] +use { + crate::{serde::SceneDeserializer, DynamicScene}, + bevy_asset::{io::Reader, AssetLoader, LoadContext}, + serde::de::DeserializeSeed, +}; + /// Asset loader for a Bevy dynamic scene (`.scn` / `.scn.ron`). /// /// The loader handles assets serialized with [`DynamicScene::serialize`]. #[derive(Debug)] pub struct SceneLoader { + #[cfg_attr( + not(feature = "serialize"), + expect(dead_code, reason = "only used with `serialize` feature") + )] type_registry: TypeRegistryArc, } diff --git a/crates/bevy_scene/src/scene_spawner.rs b/crates/bevy_scene/src/scene_spawner.rs index 32d829b8c726a..dce5ad971e105 100644 --- a/crates/bevy_scene/src/scene_spawner.rs +++ b/crates/bevy_scene/src/scene_spawner.rs @@ -3,23 +3,30 @@ use bevy_asset::{AssetEvent, AssetId, Assets, Handle}; use bevy_ecs::{ entity::{Entity, EntityHashMap}, event::{Event, EventCursor, Events}, + hierarchy::ChildOf, reflect::AppTypeRegistry, - system::Resource, - world::{Command, Mut, World}, + resource::Resource, + world::{Mut, World}, }; -use bevy_hierarchy::{AddChild, BuildChildren, DespawnRecursiveExt, Parent}; +use bevy_platform::collections::{HashMap, HashSet}; use bevy_reflect::Reflect; -use bevy_utils::{HashMap, HashSet}; use thiserror::Error; use uuid::Uuid; +use crate::{DynamicSceneRoot, SceneRoot}; +use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::{ + change_detection::ResMut, + prelude::{Changed, Component, Without}, + system::{Commands, Query}, +}; /// Triggered on a scene's parent entity when [`crate::SceneInstance`] becomes ready to use. /// /// See also [`Trigger`], [`SceneSpawner::instance_is_ready`]. /// /// [`Trigger`]: bevy_ecs::observer::Trigger #[derive(Clone, Copy, Debug, Eq, PartialEq, Event, Reflect)] -#[reflect(Debug, PartialEq)] +#[reflect(Debug, PartialEq, Clone)] pub struct SceneInstanceReady { /// Instance which has been spawned. pub instance_id: InstanceId, @@ -34,7 +41,7 @@ pub struct InstanceInfo { /// Unique id identifying a scene instance. #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, Reflect)] -#[reflect(Debug, PartialEq, Hash)] +#[reflect(Debug, PartialEq, Hash, Clone)] pub struct InstanceId(Uuid); impl InstanceId { @@ -172,10 +179,19 @@ impl SceneSpawner { } /// Schedule the despawn of a scene instance, removing all its entities from the world. + /// + /// Note: this will despawn _all_ entities associated with this instance, including those + /// that have been removed from the scene hierarchy. To despawn _only_ entities still in the hierarchy, + /// despawn the relevant root entity directly. pub fn despawn_instance(&mut self, instance_id: InstanceId) { self.instances_to_despawn.push(instance_id); } + /// This will remove all records of this instance, without despawning any entities. + pub fn unregister_instance(&mut self, instance_id: InstanceId) { + self.spawned_instances.remove(&instance_id); + } + /// Immediately despawns all instances of a dynamic scene. pub fn despawn_sync( &mut self, @@ -194,9 +210,8 @@ impl SceneSpawner { pub fn despawn_instance_sync(&mut self, world: &mut World, instance_id: &InstanceId) { if let Some(instance) = self.spawned_instances.remove(instance_id) { for &entity in instance.entity_map.values() { - if let Ok(mut entity_mut) = world.get_entity_mut(entity) { - entity_mut.remove_parent(); - entity_mut.despawn_recursive(); + if let Ok(entity_mut) = world.get_entity_mut(entity) { + entity_mut.despawn(); }; } } @@ -316,10 +331,7 @@ impl SceneSpawner { Ok(_) => { self.spawned_instances .insert(instance_id, InstanceInfo { entity_map }); - let spawned = self - .spawned_dynamic_scenes - .entry(handle.id()) - .or_insert_with(HashSet::default); + let spawned = self.spawned_dynamic_scenes.entry(handle.id()).or_default(); spawned.insert(instance_id); // Scenes with parents need more setup before they are ready. @@ -371,22 +383,18 @@ impl SceneSpawner { for (instance_id, parent) in scenes_with_parent { if let Some(instance) = self.spawned_instances.get(&instance_id) { for &entity in instance.entity_map.values() { - // Add the `Parent` component to the scene root, and update the `Children` component of + // Add the `ChildOf` component to the scene root, and update the `Children` component of // the scene parent if !world .get_entity(entity) + .ok() // This will filter only the scene root entity, as all other from the // scene have a parent - .map(|entity| entity.contains::()) - // Default is true so that it won't run on an entity that wouldn't exist anymore + // Entities that wouldn't exist anymore are also skipped // this case shouldn't happen anyway - .unwrap_or(true) + .is_none_or(|entity| entity.contains::()) { - AddChild { - parent, - child: entity, - } - .apply(world); + world.entity_mut(parent).add_child(entity); } } @@ -400,7 +408,7 @@ impl SceneSpawner { } } - /// Check that an scene instance spawned previously is ready to use + /// Check that a scene instance spawned previously is ready to use pub fn instance_is_ready(&self, instance_id: InstanceId) -> bool { self.spawned_instances.contains_key(&instance_id) } @@ -472,12 +480,51 @@ pub fn scene_spawner_system(world: &mut World) { }); } +/// [`InstanceId`] of a spawned scene. It can be used with the [`SceneSpawner`] to +/// interact with the spawned scene. +#[derive(Component, Deref, DerefMut)] +pub struct SceneInstance(pub(crate) InstanceId); + +/// System that will spawn scenes from the [`SceneRoot`] and [`DynamicSceneRoot`] components. +pub fn scene_spawner( + mut commands: Commands, + mut scene_to_spawn: Query< + (Entity, &SceneRoot, Option<&mut SceneInstance>), + (Changed, Without), + >, + mut dynamic_scene_to_spawn: Query< + (Entity, &DynamicSceneRoot, Option<&mut SceneInstance>), + (Changed, Without), + >, + mut scene_spawner: ResMut, +) { + for (entity, scene, instance) in &mut scene_to_spawn { + let new_instance = scene_spawner.spawn_as_child(scene.0.clone(), entity); + if let Some(mut old_instance) = instance { + scene_spawner.despawn_instance(**old_instance); + *old_instance = SceneInstance(new_instance); + } else { + commands.entity(entity).insert(SceneInstance(new_instance)); + } + } + for (entity, dynamic_scene, instance) in &mut dynamic_scene_to_spawn { + let new_instance = scene_spawner.spawn_dynamic_as_child(dynamic_scene.0.clone(), entity); + if let Some(mut old_instance) = instance { + scene_spawner.despawn_instance(**old_instance); + *old_instance = SceneInstance(new_instance); + } else { + commands.entity(entity).insert(SceneInstance(new_instance)); + } + } +} + #[cfg(test)] mod tests { use bevy_app::App; use bevy_asset::{AssetPlugin, AssetServer, Handle}; use bevy_ecs::{ component::Component, + hierarchy::Children, observer::Trigger, prelude::ReflectComponent, query::With, @@ -488,6 +535,78 @@ mod tests { use crate::{DynamicSceneBuilder, DynamicSceneRoot, ScenePlugin}; use super::*; + use crate::{DynamicScene, SceneSpawner}; + use bevy_app::ScheduleRunnerPlugin; + use bevy_asset::Assets; + use bevy_ecs::{ + entity::Entity, + prelude::{AppTypeRegistry, World}, + }; + + #[derive(Component, Reflect, Default)] + #[reflect(Component)] + struct ComponentA { + pub x: f32, + pub y: f32, + } + + #[test] + fn spawn_and_delete() { + let mut app = App::new(); + + app.add_plugins(ScheduleRunnerPlugin::default()) + .add_plugins(AssetPlugin::default()) + .add_plugins(ScenePlugin) + .register_type::(); + app.update(); + + let mut scene_world = World::new(); + + // create a new DynamicScene manually + let type_registry = app.world().resource::().clone(); + scene_world.insert_resource(type_registry); + scene_world.spawn(ComponentA { x: 3.0, y: 4.0 }); + let scene = DynamicScene::from_world(&scene_world); + let scene_handle = app + .world_mut() + .resource_mut::>() + .add(scene); + + // spawn the scene as a child of `entity` using `DynamicSceneRoot` + let entity = app + .world_mut() + .spawn(DynamicSceneRoot(scene_handle.clone())) + .id(); + + // run the app's schedule once, so that the scene gets spawned + app.update(); + + // make sure that the scene was added as a child of the root entity + let (scene_entity, scene_component_a) = app + .world_mut() + .query::<(Entity, &ComponentA)>() + .single(app.world()) + .unwrap(); + assert_eq!(scene_component_a.x, 3.0); + assert_eq!(scene_component_a.y, 4.0); + assert_eq!( + app.world().entity(entity).get::().unwrap().len(), + 1 + ); + + // let's try to delete the scene + let mut scene_spawner = app.world_mut().resource_mut::(); + scene_spawner.despawn(&scene_handle); + + // run the scene spawner system to despawn the scene + app.update(); + + // the scene entity does not exist anymore + assert!(app.world().get_entity(scene_entity).is_err()); + + // the root entity does not have any children anymore + assert!(app.world().entity(entity).get::().is_none()); + } #[derive(Reflect, Component, Debug, PartialEq, Eq, Clone, Copy, Default)] #[reflect(Component)] @@ -510,7 +629,10 @@ mod tests { // clone only existing entity let mut scene_spawner = SceneSpawner::default(); - let entity = world.query_filtered::>().single(&world); + let entity = world + .query_filtered::>() + .single(&world) + .unwrap(); let scene = DynamicSceneBuilder::from_world(&world) .extract_entity(entity) .build(); @@ -542,7 +664,7 @@ mod tests { #[derive(Component, Reflect, Default)] #[reflect(Component)] - struct ComponentA; + struct ComponentF; #[derive(Resource, Default)] struct TriggerCount(u32); @@ -552,9 +674,9 @@ mod tests { app.add_plugins((AssetPlugin::default(), ScenePlugin)); app.init_resource::(); - app.register_type::(); - app.world_mut().spawn(ComponentA); - app.world_mut().spawn(ComponentA); + app.register_type::(); + app.world_mut().spawn(ComponentF); + app.world_mut().spawn(ComponentF); app } @@ -708,7 +830,7 @@ mod tests { fn despawn_scene() { let mut app = App::new(); app.add_plugins((AssetPlugin::default(), ScenePlugin)); - app.register_type::(); + app.register_type::(); let asset_server = app.world().resource::(); @@ -729,7 +851,7 @@ mod tests { // Spawn scene. for _ in 0..count { app.world_mut() - .spawn((ComponentA, DynamicSceneRoot(scene.clone()))); + .spawn((ComponentF, DynamicSceneRoot(scene.clone()))); } app.update(); @@ -738,9 +860,9 @@ mod tests { // Despawn scene. app.world_mut() .run_system_once( - |mut commands: Commands, query: Query>| { + |mut commands: Commands, query: Query>| { for entity in query.iter() { - commands.entity(entity).despawn_recursive(); + commands.entity(entity).despawn(); } }, ) @@ -749,4 +871,58 @@ mod tests { app.update(); check(app.world_mut(), 0); } + + #[test] + fn scene_child_order_preserved_when_archetype_order_mismatched() { + let mut app = App::new(); + + app.add_plugins(ScheduleRunnerPlugin::default()) + .add_plugins(AssetPlugin::default()) + .add_plugins(ScenePlugin) + .register_type::() + .register_type::(); + app.update(); + + let mut scene_world = World::new(); + let root = scene_world.spawn_empty().id(); + let temporary_root = scene_world.spawn_empty().id(); + // Spawn entities with different parent first before parenting them to the actual root, allowing us + // to decouple child order from archetype-creation-order + let child1 = scene_world + .spawn((ChildOf(temporary_root), ComponentA { x: 1.0, y: 1.0 })) + .id(); + let child2 = scene_world + .spawn((ChildOf(temporary_root), ComponentA { x: 2.0, y: 2.0 })) + .id(); + // the "first" child is intentionally spawned with a different component to force it into a "newer" archetype, + // meaning it will be iterated later in the spawn code. + let child0 = scene_world + .spawn((ChildOf(temporary_root), ComponentF)) + .id(); + + scene_world + .entity_mut(root) + .add_children(&[child0, child1, child2]); + + let scene = Scene::new(scene_world); + let scene_handle = app.world_mut().resource_mut::>().add(scene); + + let spawned = app.world_mut().spawn(SceneRoot(scene_handle.clone())).id(); + + app.update(); + let world = app.world_mut(); + + let spawned_root = world.entity(spawned).get::().unwrap()[0]; + let children = world.entity(spawned_root).get::().unwrap(); + assert_eq!(children.len(), 3); + assert!(world.entity(children[0]).get::().is_some()); + assert_eq!( + world.entity(children[1]).get::().unwrap().x, + 1.0 + ); + assert_eq!( + world.entity(children[2]).get::().unwrap().x, + 2.0 + ); + } } diff --git a/crates/bevy_scene/src/serde.rs b/crates/bevy_scene/src/serde.rs index 0c74b8e7ee894..ead8933a49966 100644 --- a/crates/bevy_scene/src/serde.rs +++ b/crates/bevy_scene/src/serde.rs @@ -2,6 +2,7 @@ use crate::{DynamicEntity, DynamicScene}; use bevy_ecs::entity::Entity; +use bevy_platform::collections::HashSet; use bevy_reflect::{ serde::{ ReflectDeserializer, TypeRegistrationDeserializer, TypedReflectDeserializer, @@ -9,7 +10,6 @@ use bevy_reflect::{ }, PartialReflect, ReflectFromReflect, TypeRegistry, }; -use bevy_utils::HashSet; use core::fmt::Formatter; use serde::{ de::{DeserializeSeed, Error, MapAccess, SeqAccess, Visitor}, @@ -180,7 +180,7 @@ impl<'a> Serialize for SceneMapSerializer<'a> { ) }) .collect::>(); - entries.sort_by_key(|(type_path, _partial_reflect)| *type_path); + entries.sort_by_key(|(type_path, _)| *type_path); entries }; @@ -515,14 +515,13 @@ mod tests { DynamicScene, DynamicSceneBuilder, }; use bevy_ecs::{ - entity::{Entity, EntityHashMap, VisitEntities, VisitEntitiesMut}, + entity::{Entity, EntityHashMap}, prelude::{Component, ReflectComponent, ReflectResource, Resource, World}, query::{With, Without}, - reflect::{AppTypeRegistry, ReflectMapEntities}, + reflect::AppTypeRegistry, world::FromWorld, }; use bevy_reflect::{Reflect, ReflectDeserialize, ReflectSerialize}; - use bincode::Options; use serde::{de::DeserializeSeed, Deserialize, Serialize}; use std::io::BufReader; @@ -584,9 +583,9 @@ mod tests { foo: i32, } - #[derive(Clone, Component, Reflect, PartialEq, VisitEntities, VisitEntitiesMut)] - #[reflect(Component, MapEntities, PartialEq)] - struct MyEntityRef(Entity); + #[derive(Clone, Component, Reflect, PartialEq)] + #[reflect(Component, PartialEq)] + struct MyEntityRef(#[entities] Entity); impl FromWorld for MyEntityRef { fn from_world(_world: &mut World) -> Self { @@ -763,12 +762,12 @@ mod tests { let bar_to_foo = dst_world .query_filtered::<&MyEntityRef, Without>() - .get_single(&dst_world) + .single(&dst_world) .cloned() .unwrap(); let foo = dst_world .query_filtered::>() - .get_single(&dst_world) + .single(&dst_world) .unwrap(); assert_eq!(foo, bar_to_foo.0); @@ -793,7 +792,7 @@ mod tests { deserialized_scene .write_to_world(&mut world, &mut EntityHashMap::default()) .unwrap(); - assert_eq!(&qux, world.query::<&Qux>().single(&world)); + assert_eq!(&qux, world.query::<&Qux>().single(&world).unwrap()); } #[test] @@ -894,8 +893,9 @@ mod tests { let scene = DynamicScene::from_world(&world); + let config = bincode::config::standard().with_fixed_int_encoding(); let scene_serializer = SceneSerializer::new(&scene, registry); - let serialized_scene = bincode::serialize(&scene_serializer).unwrap(); + let serialized_scene = bincode::serde::encode_to_vec(&scene_serializer, config).unwrap(); assert_eq!( vec![ @@ -913,10 +913,9 @@ mod tests { type_registry: registry, }; - let deserialized_scene = bincode::DefaultOptions::new() - .with_fixint_encoding() - .deserialize_seed(scene_deserializer, &serialized_scene) - .unwrap(); + let (deserialized_scene, _read_bytes) = + bincode::serde::seed_decode_from_slice(scene_deserializer, &serialized_scene, config) + .unwrap(); assert_eq!(1, deserialized_scene.entities.len()); assert_scene_eq(&scene, &deserialized_scene); @@ -935,7 +934,7 @@ mod tests { .entities .iter() .find(|dynamic_entity| dynamic_entity.entity == expected.entity) - .unwrap_or_else(|| panic!("missing entity (expected: `{:?}`)", expected.entity)); + .unwrap_or_else(|| panic!("missing entity (expected: `{}`)", expected.entity)); assert_eq!(expected.entity, received.entity, "entities did not match"); diff --git a/crates/bevy_sprite/Cargo.toml b/crates/bevy_sprite/Cargo.toml index 8800f6cb6e115..8fa5bae2ccefe 100644 --- a/crates/bevy_sprite/Cargo.toml +++ b/crates/bevy_sprite/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_sprite" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Provides sprite functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -10,40 +10,37 @@ keywords = ["bevy"] [features] bevy_sprite_picking_backend = ["bevy_picking", "bevy_window"] -serialize = ["dep:serde"] webgl = [] webgpu = [] [dependencies] # bevy -bevy_app = { path = "../bevy_app", version = "0.15.0-dev" } -bevy_asset = { path = "../bevy_asset", version = "0.15.0-dev" } -bevy_color = { path = "../bevy_color", version = "0.15.0-dev" } -bevy_core_pipeline = { path = "../bevy_core_pipeline", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" } -bevy_image = { path = "../bevy_image", version = "0.15.0-dev" } -bevy_math = { path = "../bevy_math", version = "0.15.0-dev" } -bevy_picking = { path = "../bevy_picking", version = "0.15.0-dev", optional = true } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev", features = [ - "bevy", +bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } +bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } +bevy_color = { path = "../bevy_color", version = "0.16.0-dev" } +bevy_core_pipeline = { path = "../bevy_core_pipeline", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } +bevy_image = { path = "../bevy_image", version = "0.16.0-dev" } +bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } +bevy_picking = { path = "../bevy_picking", version = "0.16.0-dev", optional = true } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } +bevy_render = { path = "../bevy_render", version = "0.16.0-dev" } +bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } +bevy_window = { path = "../bevy_window", version = "0.16.0-dev", optional = true } +bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "std", ] } -bevy_render = { path = "../bevy_render", version = "0.15.0-dev" } -bevy_transform = { path = "../bevy_transform", version = "0.15.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } -bevy_window = { path = "../bevy_window", version = "0.15.0-dev", optional = true } -bevy_derive = { path = "../bevy_derive", version = "0.15.0-dev" } # other bytemuck = { version = "1", features = ["derive", "must_cast"] } fixedbitset = "0.5" -guillotiere = "0.6.0" -thiserror = { version = "2", default-features = false } derive_more = { version = "1", default-features = false, features = ["from"] } -rectangle-pack = "0.4" bitflags = "2.3" radsort = "0.1" nonmax = "0.5" -serde = { version = "1", features = ["derive"], optional = true } +tracing = { version = "0.1", default-features = false, features = ["std"] } [lints] workspace = true diff --git a/crates/bevy_sprite/LICENSE-APACHE b/crates/bevy_sprite/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_sprite/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_sprite/LICENSE-MIT b/crates/bevy_sprite/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_sprite/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_sprite/src/bundle.rs b/crates/bevy_sprite/src/bundle.rs deleted file mode 100644 index fdc2f8ed515d1..0000000000000 --- a/crates/bevy_sprite/src/bundle.rs +++ /dev/null @@ -1,31 +0,0 @@ -#![expect(deprecated)] -use crate::Sprite; -use bevy_ecs::bundle::Bundle; -use bevy_render::{ - sync_world::SyncToRenderWorld, - view::{InheritedVisibility, ViewVisibility, Visibility}, -}; -use bevy_transform::components::{GlobalTransform, Transform}; - -/// A [`Bundle`] of components for drawing a single sprite from an image. -#[derive(Bundle, Clone, Debug, Default)] -#[deprecated( - since = "0.15.0", - note = "Use the `Sprite` component instead. Inserting it will now also insert `Transform` and `Visibility` automatically." -)] -pub struct SpriteBundle { - /// Specifies the rendering properties of the sprite, such as color tint and flip. - pub sprite: Sprite, - /// The local transform of the sprite, relative to its parent. - pub transform: Transform, - /// The absolute transform of the sprite. This should generally not be written to directly. - pub global_transform: GlobalTransform, - /// User indication of whether an entity is visible - pub visibility: Visibility, - /// Inherited visibility of an entity. - pub inherited_visibility: InheritedVisibility, - /// Algorithmically-computed indication of whether an entity is visible and should be extracted for rendering - pub view_visibility: ViewVisibility, - /// Marker component that indicates that its entity needs to be synchronized to the render world - pub sync: SyncToRenderWorld, -} diff --git a/crates/bevy_sprite/src/lib.rs b/crates/bevy_sprite/src/lib.rs index 39204eb4cc9e0..37d4d2d6e48ef 100644 --- a/crates/bevy_sprite/src/lib.rs +++ b/crates/bevy_sprite/src/lib.rs @@ -10,78 +10,60 @@ extern crate alloc; -mod bundle; -mod dynamic_texture_atlas_builder; mod mesh2d; #[cfg(feature = "bevy_sprite_picking_backend")] mod picking_backend; mod render; mod sprite; -mod texture_atlas; -mod texture_atlas_builder; mod texture_slice; /// The sprite prelude. /// /// This includes the most common types in this crate, re-exported for your convenience. -#[expect(deprecated)] pub mod prelude { + #[cfg(feature = "bevy_sprite_picking_backend")] + #[doc(hidden)] + pub use crate::picking_backend::{ + SpritePickingCamera, SpritePickingMode, SpritePickingPlugin, SpritePickingSettings, + }; #[doc(hidden)] pub use crate::{ - bundle::SpriteBundle, sprite::{Sprite, SpriteImageMode}, - texture_atlas::{TextureAtlas, TextureAtlasLayout, TextureAtlasSources}, texture_slice::{BorderRect, SliceScaleMode, TextureSlice, TextureSlicer}, - ColorMaterial, ColorMesh2dBundle, MeshMaterial2d, TextureAtlasBuilder, + ColorMaterial, MeshMaterial2d, ScalingMode, }; } -use bevy_reflect::{std_traits::ReflectDefault, Reflect}; -pub use bundle::*; -pub use dynamic_texture_atlas_builder::*; pub use mesh2d::*; #[cfg(feature = "bevy_sprite_picking_backend")] pub use picking_backend::*; pub use render::*; pub use sprite::*; -pub use texture_atlas::*; -pub use texture_atlas_builder::*; pub use texture_slice::*; use bevy_app::prelude::*; -use bevy_asset::{load_internal_asset, AssetApp, Assets, Handle}; -use bevy_core_pipeline::core_2d::Transparent2d; -use bevy_ecs::{prelude::*, query::QueryItem}; -use bevy_image::Image; +use bevy_asset::{load_internal_asset, weak_handle, AssetEvents, Assets, Handle}; +use bevy_core_pipeline::core_2d::{AlphaMask2d, Opaque2d, Transparent2d}; +use bevy_ecs::prelude::*; +use bevy_image::{prelude::*, TextureAtlasPlugin}; use bevy_render::{ - extract_component::{ExtractComponent, ExtractComponentPlugin}, + batching::sort_binned_render_phase, mesh::{Mesh, Mesh2d, MeshAabb}, primitives::Aabb, render_phase::AddRenderCommand, render_resource::{Shader, SpecializedRenderPipelines}, - view::{self, NoFrustumCulling, VisibilityClass, VisibilitySystems}, + view::{NoFrustumCulling, VisibilitySystems}, ExtractSchedule, Render, RenderApp, RenderSet, }; /// Adds support for 2D sprite rendering. -pub struct SpritePlugin { - /// Whether to add the sprite picking backend to the app. - #[cfg(feature = "bevy_sprite_picking_backend")] - pub add_picking: bool, -} +#[derive(Default)] +pub struct SpritePlugin; -impl Default for SpritePlugin { - fn default() -> Self { - Self { - #[cfg(feature = "bevy_sprite_picking_backend")] - add_picking: true, - } - } -} - -pub const SPRITE_SHADER_HANDLE: Handle = Handle::weak_from_u128(2763343953151597127); +pub const SPRITE_SHADER_HANDLE: Handle = + weak_handle!("ed996613-54c0-49bd-81be-1c2d1a0d03c2"); pub const SPRITE_VIEW_BINDINGS_SHADER_HANDLE: Handle = - Handle::weak_from_u128(8846920112458963210); + weak_handle!("43947210-8df6-459a-8f2a-12f350d174cc"); /// System set for sprite rendering. #[derive(Debug, Hash, PartialEq, Eq, Clone, SystemSet)] @@ -90,16 +72,6 @@ pub enum SpriteSystem { ComputeSlices, } -/// A component that marks entities that aren't themselves sprites but become -/// sprites during rendering. -/// -/// Right now, this is used for `Text`. -#[derive(Component, Reflect, Clone, Copy, Debug, Default)] -#[reflect(Component, Default, Debug)] -#[require(VisibilityClass)] -#[component(on_add = view::add_visibility_class::)] -pub struct SpriteSource; - impl Plugin for SpritePlugin { fn build(&self, app: &mut App) { load_internal_asset!( @@ -114,26 +86,23 @@ impl Plugin for SpritePlugin { "render/sprite_view_bindings.wgsl", Shader::from_wgsl ); - app.init_asset::() - .register_asset_reflect::() - .register_type::() + + if !app.is_plugin_added::() { + app.add_plugins(TextureAtlasPlugin); + } + + app.register_type::() .register_type::() .register_type::() .register_type::() - .register_type::() .register_type::() - .register_type::() - .add_plugins(( - Mesh2dRenderPlugin, - ColorMaterialPlugin, - ExtractComponentPlugin::::default(), - )) + .add_plugins((Mesh2dRenderPlugin, ColorMaterialPlugin)) .add_systems( PostUpdate, ( calculate_bounds_2d.in_set(VisibilitySystems::CalculateBounds), ( - compute_slices_on_asset_event, + compute_slices_on_asset_event.before(AssetEvents), compute_slices_on_sprite_change, ) .in_set(SpriteSystem::ComputeSlices), @@ -141,9 +110,7 @@ impl Plugin for SpritePlugin { ); #[cfg(feature = "bevy_sprite_picking_backend")] - if self.add_picking { - app.add_plugins(SpritePickingPlugin); - } + app.add_plugins(SpritePickingPlugin); if let Some(render_app) = app.get_sub_app_mut(RenderApp) { render_app @@ -151,6 +118,7 @@ impl Plugin for SpritePlugin { .init_resource::>() .init_resource::() .init_resource::() + .init_resource::() .init_resource::() .add_render_command::() .add_systems( @@ -168,6 +136,8 @@ impl Plugin for SpritePlugin { .ambiguous_with(queue_material2d_meshes::), prepare_sprite_image_bind_groups.in_set(RenderSet::PrepareBindGroups), prepare_sprite_view_bind_groups.in_set(RenderSet::PrepareBindGroups), + sort_binned_render_phase::.in_set(RenderSet::PhaseSort), + sort_binned_render_phase::.in_set(RenderSet::PhaseSort), ), ); }; @@ -175,7 +145,9 @@ impl Plugin for SpritePlugin { fn finish(&self, app: &mut App) { if let Some(render_app) = app.get_sub_app_mut(RenderApp) { - render_app.init_resource::(); + render_app + .init_resource::() + .init_resource::(); } } } @@ -183,7 +155,7 @@ impl Plugin for SpritePlugin { /// System calculating and inserting an [`Aabb`] component to entities with either: /// - a `Mesh2d` component, /// - a `Sprite` and `Handle` components, -/// and without a [`NoFrustumCulling`] component. +/// and without a [`NoFrustumCulling`] component. /// /// Used in system set [`VisibilitySystems::CalculateBounds`]. pub fn calculate_bounds_2d( @@ -229,18 +201,6 @@ pub fn calculate_bounds_2d( } } -impl ExtractComponent for SpriteSource { - type QueryData = (); - - type QueryFilter = With; - - type Out = SpriteSource; - - fn extract_component(_: QueryItem<'_, Self::QueryData>) -> Option { - Some(SpriteSource) - } -} - #[cfg(test)] mod test { @@ -372,7 +332,7 @@ mod test { .world_mut() .spawn(Sprite { rect: Some(Rect::new(0., 0., 0.5, 1.)), - anchor: Anchor::TopRight, + anchor: Anchor::TOP_RIGHT, image: image_handle, ..default() }) diff --git a/crates/bevy_sprite/src/mesh2d/color_material.rs b/crates/bevy_sprite/src/mesh2d/color_material.rs index 8c3267c40ba4d..83b69307769cb 100644 --- a/crates/bevy_sprite/src/mesh2d/color_material.rs +++ b/crates/bevy_sprite/src/mesh2d/color_material.rs @@ -1,16 +1,14 @@ -#![expect(deprecated)] - -use crate::{AlphaMode2d, Material2d, Material2dPlugin, MaterialMesh2dBundle}; +use crate::{AlphaMode2d, Material2d, Material2dPlugin}; use bevy_app::{App, Plugin}; -use bevy_asset::{load_internal_asset, Asset, AssetApp, Assets, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, Asset, AssetApp, Assets, Handle}; use bevy_color::{Alpha, Color, ColorToComponents, LinearRgba}; use bevy_image::Image; -use bevy_math::Vec4; +use bevy_math::{Affine2, Mat3, Vec4}; use bevy_reflect::prelude::*; use bevy_render::{render_asset::RenderAssets, render_resource::*, texture::GpuImage}; pub const COLOR_MATERIAL_SHADER_HANDLE: Handle = - Handle::weak_from_u128(3253086872234592509); + weak_handle!("92e0e6e9-ed0b-4db3-89ab-5f65d3678250"); #[derive(Default)] pub struct ColorMaterialPlugin; @@ -42,11 +40,12 @@ impl Plugin for ColorMaterialPlugin { /// A [2d material](Material2d) that renders [2d meshes](crate::Mesh2d) with a texture tinted by a uniform color #[derive(Asset, AsBindGroup, Reflect, Debug, Clone)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] #[uniform(0, ColorMaterialUniform)] pub struct ColorMaterial { pub color: Color, pub alpha_mode: AlphaMode2d, + pub uv_transform: Affine2, #[texture(1)] #[sampler(2)] pub texture: Option>, @@ -63,6 +62,7 @@ impl Default for ColorMaterial { fn default() -> Self { ColorMaterial { color: Color::WHITE, + uv_transform: Affine2::default(), texture: None, // TODO should probably default to AlphaMask once supported? alpha_mode: AlphaMode2d::Blend, @@ -119,6 +119,7 @@ impl ColorMaterialFlags { #[derive(Clone, Default, ShaderType)] pub struct ColorMaterialUniform { pub color: Vec4, + pub uv_transform: Mat3, pub flags: u32, pub alpha_cutoff: f32, } @@ -142,6 +143,7 @@ impl AsBindGroupShaderType for ColorMaterial { }; ColorMaterialUniform { color: LinearRgba::from(self.color).to_f32_array().into(), + uv_transform: self.uv_transform.into(), flags: flags.bits(), alpha_cutoff, } @@ -157,10 +159,3 @@ impl Material2d for ColorMaterial { self.alpha_mode } } - -/// A component bundle for entities with a [`Mesh2d`](crate::Mesh2d) and a [`ColorMaterial`]. -#[deprecated( - since = "0.15.0", - note = "Use the `Mesh3d` and `MeshMaterial3d` components instead. Inserting them will now also insert the other components required by them automatically." -)] -pub type ColorMesh2dBundle = MaterialMesh2dBundle; diff --git a/crates/bevy_sprite/src/mesh2d/color_material.wgsl b/crates/bevy_sprite/src/mesh2d/color_material.wgsl index a166ce453099f..a2dbe4e055937 100644 --- a/crates/bevy_sprite/src/mesh2d/color_material.wgsl +++ b/crates/bevy_sprite/src/mesh2d/color_material.wgsl @@ -9,6 +9,7 @@ struct ColorMaterial { color: vec4, + uv_transform: mat3x3, // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. flags: u32, alpha_cutoff: f32, @@ -34,8 +35,10 @@ fn fragment( output_color = output_color * mesh.color; #endif + let uv = (material.uv_transform * vec3(mesh.uv, 1.0)).xy; + if ((material.flags & COLOR_MATERIAL_FLAGS_TEXTURE_BIT) != 0u) { - output_color = output_color * textureSample(texture, texture_sampler, mesh.uv); + output_color = output_color * textureSample(texture, texture_sampler, uv); } output_color = alpha_discard(material, output_color); diff --git a/crates/bevy_sprite/src/mesh2d/material.rs b/crates/bevy_sprite/src/mesh2d/material.rs index fcc830301c554..e34595f138eb5 100644 --- a/crates/bevy_sprite/src/mesh2d/material.rs +++ b/crates/bevy_sprite/src/mesh2d/material.rs @@ -1,22 +1,29 @@ -#![expect(deprecated)] - use crate::{ DrawMesh2d, Mesh2d, Mesh2dPipeline, Mesh2dPipelineKey, RenderMesh2dInstances, - SetMesh2dBindGroup, SetMesh2dViewBindGroup, + SetMesh2dBindGroup, SetMesh2dViewBindGroup, ViewKeyCache, ViewSpecializationTicks, }; -use bevy_app::{App, Plugin}; -use bevy_asset::{Asset, AssetApp, AssetId, AssetServer, Handle}; +use bevy_app::{App, Plugin, PostUpdate}; +use bevy_asset::prelude::AssetChanged; +use bevy_asset::{AsAssetId, Asset, AssetApp, AssetEvents, AssetId, AssetServer, Handle}; use bevy_core_pipeline::{ - core_2d::{AlphaMask2d, AlphaMask2dBinKey, Opaque2d, Opaque2dBinKey, Transparent2d}, - tonemapping::{DebandDither, Tonemapping}, + core_2d::{ + AlphaMask2d, AlphaMask2dBinKey, BatchSetKey2d, Opaque2d, Opaque2dBinKey, Transparent2d, + }, + tonemapping::Tonemapping, }; use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::component::Tick; +use bevy_ecs::system::SystemChangeTick; use bevy_ecs::{ prelude::*, system::{lifetimeless::SRes, SystemParamItem}, }; use bevy_math::FloatOrd; +use bevy_platform::collections::HashMap; use bevy_reflect::{prelude::ReflectDefault, Reflect}; +use bevy_render::camera::extract_cameras; +use bevy_render::render_phase::{DrawFunctionId, InputUniformIndex}; +use bevy_render::render_resource::CachedRenderPipelineId; use bevy_render::view::RenderVisibleEntities; use bevy_render::{ mesh::{MeshVertexBufferLayoutRef, RenderMesh}, @@ -29,19 +36,19 @@ use bevy_render::{ ViewBinnedRenderPhases, ViewSortedRenderPhases, }, render_resource::{ - AsBindGroup, AsBindGroupError, BindGroup, BindGroupId, BindGroupLayout, PipelineCache, - RenderPipelineDescriptor, Shader, ShaderRef, SpecializedMeshPipeline, + AsBindGroup, AsBindGroupError, BindGroup, BindGroupId, BindGroupLayout, BindingResources, + PipelineCache, RenderPipelineDescriptor, Shader, ShaderRef, SpecializedMeshPipeline, SpecializedMeshPipelineError, SpecializedMeshPipelines, }, renderer::RenderDevice, - view::{ExtractedView, InheritedVisibility, Msaa, ViewVisibility, Visibility}, + sync_world::{MainEntity, MainEntityHashMap}, + view::{ExtractedView, ViewVisibility}, Extract, ExtractSchedule, Render, RenderApp, RenderSet, }; -use bevy_render::{render_resource::BindingResources, sync_world::MainEntityHashMap}; -use bevy_transform::components::{GlobalTransform, Transform}; -use bevy_utils::tracing::error; +use bevy_utils::Parallel; use core::{hash::Hash, marker::PhantomData}; use derive_more::derive::From; +use tracing::error; /// Materials are used alongside [`Material2dPlugin`], [`Mesh2d`], and [`MeshMaterial2d`] /// to spawn entities that are rendered with a specific [`Material2d`] type. They serve as an easy to use high level @@ -139,7 +146,10 @@ pub trait Material2d: AsBindGroup + Asset + Clone + Sized { } /// Customizes the default [`RenderPipelineDescriptor`]. - #[allow(unused_variables)] + #[expect( + unused_variables, + reason = "The parameters here are intentionally unused by the default implementation; however, putting underscores here will result in the underscores being copied by rust-analyzer's tab completion." + )] #[inline] fn specialize( descriptor: &mut RenderPipelineDescriptor, @@ -178,8 +188,8 @@ pub trait Material2d: AsBindGroup + Asset + Clone + Sized { /// ``` /// /// [`MeshMaterial2d`]: crate::MeshMaterial2d -#[derive(Component, Clone, Debug, Deref, DerefMut, Reflect, PartialEq, Eq, From)] -#[reflect(Component, Default)] +#[derive(Component, Clone, Debug, Deref, DerefMut, Reflect, From)] +#[reflect(Component, Default, Clone)] pub struct MeshMaterial2d(pub Handle); impl Default for MeshMaterial2d { @@ -188,6 +198,14 @@ impl Default for MeshMaterial2d { } } +impl PartialEq for MeshMaterial2d { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } +} + +impl Eq for MeshMaterial2d {} + impl From> for AssetId { fn from(material: MeshMaterial2d) -> Self { material.id() @@ -200,13 +218,21 @@ impl From<&MeshMaterial2d> for AssetId { } } +impl AsAssetId for MeshMaterial2d { + type Asset = M; + + fn as_asset_id(&self) -> AssetId { + self.id() + } +} + /// Sets how a 2d material's base color alpha channel is used for transparency. /// Currently, this only works with [`Mesh2d`]. Sprites are always transparent. /// /// This is very similar to [`AlphaMode`](bevy_render::alpha::AlphaMode) but this only applies to 2d meshes. /// We use a separate type because 2d doesn't support all the transparency modes that 3d does. #[derive(Debug, Default, Reflect, Copy, Clone, PartialEq)] -#[reflect(Default, Debug)] +#[reflect(Default, Debug, Clone)] pub enum AlphaMode2d { /// Base color alpha values are overridden to be fully opaque (1.0). #[default] @@ -242,22 +268,41 @@ where { fn build(&self, app: &mut App) { app.init_asset::() + .init_resource::>() .register_type::>() - .add_plugins(RenderAssetPlugin::>::default()); + .add_plugins(RenderAssetPlugin::>::default()) + .add_systems( + PostUpdate, + check_entities_needing_specialization::.after(AssetEvents), + ); if let Some(render_app) = app.get_sub_app_mut(RenderApp) { render_app + .init_resource::>() + .init_resource::>() .add_render_command::>() .add_render_command::>() .add_render_command::>() .init_resource::>() .init_resource::>>() - .add_systems(ExtractSchedule, extract_mesh_materials_2d::) + .add_systems( + ExtractSchedule, + ( + extract_entities_needs_specialization::.after(extract_cameras), + extract_mesh_materials_2d::, + ), + ) .add_systems( Render, - queue_material2d_meshes:: - .in_set(RenderSet::QueueMeshes) - .after(prepare_assets::>), + ( + specialize_material2d_meshes:: + .in_set(RenderSet::PrepareMeshes) + .after(prepare_assets::>) + .after(prepare_assets::), + queue_material2d_meshes:: + .in_set(RenderSet::QueueMeshes) + .after(prepare_assets::>), + ), ); } } @@ -278,17 +323,58 @@ impl Default for RenderMaterial2dInstances { } } -fn extract_mesh_materials_2d( +pub fn extract_mesh_materials_2d( mut material_instances: ResMut>, - query: Extract), With>>, + changed_meshes_query: Extract< + Query< + (Entity, &ViewVisibility, &MeshMaterial2d), + Or<(Changed, Changed>)>, + >, + >, + mut removed_visibilities_query: Extract>, + mut removed_materials_query: Extract>>, ) { - material_instances.clear(); - - for (entity, view_visibility, material) in &query { + for (entity, view_visibility, material) in &changed_meshes_query { if view_visibility.get() { - material_instances.insert(entity.into(), material.id()); + add_mesh_instance(entity, material, &mut material_instances); + } else { + remove_mesh_instance(entity, &mut material_instances); } } + + for entity in removed_visibilities_query + .read() + .chain(removed_materials_query.read()) + { + // Only queue a mesh for removal if we didn't pick it up above. + // It's possible that a necessary component was removed and re-added in + // the same frame. + if !changed_meshes_query.contains(entity) { + remove_mesh_instance(entity, &mut material_instances); + } + } + + // Adds or updates a mesh instance in the [`RenderMaterial2dInstances`] + // array. + fn add_mesh_instance( + entity: Entity, + material: &MeshMaterial2d, + material_instances: &mut RenderMaterial2dInstances, + ) where + M: Material2d, + { + material_instances.insert(entity.into(), material.id()); + } + + // Removes a mesh instance from the [`RenderMaterial2dInstances`] array. + fn remove_mesh_instance( + entity: Entity, + material_instances: &mut RenderMaterial2dInstances, + ) where + M: Material2d, + { + material_instances.remove(&MainEntity::from(entity)); + } } /// Render pipeline data for a given [`Material2d`] @@ -468,29 +554,148 @@ pub const fn tonemapping_pipeline_key(tonemapping: Tonemapping) -> Mesh2dPipelin } } -#[allow(clippy::too_many_arguments)] -pub fn queue_material2d_meshes( - opaque_draw_functions: Res>, - alpha_mask_draw_functions: Res>, - transparent_draw_functions: Res>, +pub fn extract_entities_needs_specialization( + entities_needing_specialization: Extract>>, + mut entity_specialization_ticks: ResMut>, + mut removed_mesh_material_components: Extract>>, + mut specialized_material2d_pipeline_cache: ResMut>, + views: Query<&MainEntity, With>, + ticks: SystemChangeTick, +) where + M: Material2d, +{ + // Clean up any despawned entities, we do this first in case the removed material was re-added + // the same frame, thus will appear both in the removed components list and have been added to + // the `EntitiesNeedingSpecialization` collection by triggering the `Changed` filter + for entity in removed_mesh_material_components.read() { + entity_specialization_ticks.remove(&MainEntity::from(entity)); + for view in views { + if let Some(cache) = specialized_material2d_pipeline_cache.get_mut(view) { + cache.remove(&MainEntity::from(entity)); + } + } + } + for entity in entities_needing_specialization.iter() { + // Update the entity's specialization tick with this run's tick + entity_specialization_ticks.insert((*entity).into(), ticks.this_run()); + } +} + +#[derive(Clone, Resource, Deref, DerefMut, Debug)] +pub struct EntitiesNeedingSpecialization { + #[deref] + pub entities: Vec, + _marker: PhantomData, +} + +impl Default for EntitiesNeedingSpecialization { + fn default() -> Self { + Self { + entities: Default::default(), + _marker: Default::default(), + } + } +} + +#[derive(Clone, Resource, Deref, DerefMut, Debug)] +pub struct EntitySpecializationTicks { + #[deref] + pub entities: MainEntityHashMap, + _marker: PhantomData, +} + +impl Default for EntitySpecializationTicks { + fn default() -> Self { + Self { + entities: MainEntityHashMap::default(), + _marker: Default::default(), + } + } +} + +/// Stores the [`SpecializedMaterial2dViewPipelineCache`] for each view. +#[derive(Resource, Deref, DerefMut)] +pub struct SpecializedMaterial2dPipelineCache { + // view_entity -> view pipeline cache + #[deref] + map: MainEntityHashMap>, + marker: PhantomData, +} + +/// Stores the cached render pipeline ID for each entity in a single view, as +/// well as the last time it was changed. +#[derive(Deref, DerefMut)] +pub struct SpecializedMaterial2dViewPipelineCache { + // material entity -> (tick, pipeline_id) + #[deref] + map: MainEntityHashMap<(Tick, CachedRenderPipelineId)>, + marker: PhantomData, +} + +impl Default for SpecializedMaterial2dPipelineCache { + fn default() -> Self { + Self { + map: HashMap::default(), + marker: PhantomData, + } + } +} + +impl Default for SpecializedMaterial2dViewPipelineCache { + fn default() -> Self { + Self { + map: HashMap::default(), + marker: PhantomData, + } + } +} + +pub fn check_entities_needing_specialization( + needs_specialization: Query< + Entity, + ( + Or<( + Changed, + AssetChanged, + Changed>, + AssetChanged>, + )>, + With>, + ), + >, + mut par_local: Local>>, + mut entities_needing_specialization: ResMut>, +) where + M: Material2d, +{ + entities_needing_specialization.clear(); + + needs_specialization + .par_iter() + .for_each(|entity| par_local.borrow_local_mut().push(entity)); + + par_local.drain_into(&mut entities_needing_specialization); +} + +pub fn specialize_material2d_meshes( material2d_pipeline: Res>, mut pipelines: ResMut>>, pipeline_cache: Res, - render_meshes: Res>, - render_materials: Res>>, + (render_meshes, render_materials): ( + Res>, + Res>>, + ), mut render_mesh_instances: ResMut, render_material_instances: Res>, - mut transparent_render_phases: ResMut>, - mut opaque_render_phases: ResMut>, - mut alpha_mask_render_phases: ResMut>, - views: Query<( - Entity, - &ExtractedView, - &RenderVisibleEntities, - &Msaa, - Option<&Tonemapping>, - Option<&DebandDither>, - )>, + transparent_render_phases: Res>, + opaque_render_phases: Res>, + alpha_mask_render_phases: Res>, + views: Query<(&MainEntity, &ExtractedView, &RenderVisibleEntities)>, + view_key_cache: Res, + entity_specialization_ticks: Res>, + view_specialization_ticks: Res, + ticks: SystemChangeTick, + mut specialized_material_pipeline_cache: ResMut>, ) where M::Data: PartialEq + Eq + Hash + Clone, { @@ -498,47 +703,48 @@ pub fn queue_material2d_meshes( return; } - for (view_entity, view, visible_entities, msaa, tonemapping, dither) in &views { - let Some(transparent_phase) = transparent_render_phases.get_mut(&view_entity) else { + for (view_entity, view, visible_entities) in &views { + if !transparent_render_phases.contains_key(&view.retained_view_entity) + && !opaque_render_phases.contains_key(&view.retained_view_entity) + && !alpha_mask_render_phases.contains_key(&view.retained_view_entity) + { continue; - }; - let Some(opaque_phase) = opaque_render_phases.get_mut(&view_entity) else { - continue; - }; - let Some(alpha_mask_phase) = alpha_mask_render_phases.get_mut(&view_entity) else { + } + + let Some(view_key) = view_key_cache.get(view_entity) else { continue; }; - let draw_transparent_2d = transparent_draw_functions.read().id::>(); - let draw_opaque_2d = opaque_draw_functions.read().id::>(); - let draw_alpha_mask_2d = alpha_mask_draw_functions.read().id::>(); - - let mut view_key = Mesh2dPipelineKey::from_msaa_samples(msaa.samples()) - | Mesh2dPipelineKey::from_hdr(view.hdr); + let view_tick = view_specialization_ticks.get(view_entity).unwrap(); + let view_specialized_material_pipeline_cache = specialized_material_pipeline_cache + .entry(*view_entity) + .or_default(); - if !view.hdr { - if let Some(tonemapping) = tonemapping { - view_key |= Mesh2dPipelineKey::TONEMAP_IN_SHADER; - view_key |= tonemapping_pipeline_key(*tonemapping); - } - if let Some(DebandDither::Enabled) = dither { - view_key |= Mesh2dPipelineKey::DEBAND_DITHER; - } - } - for (render_entity, visible_entity) in visible_entities.iter::() { + for (_, visible_entity) in visible_entities.iter::() { let Some(material_asset_id) = render_material_instances.get(visible_entity) else { continue; }; let Some(mesh_instance) = render_mesh_instances.get_mut(visible_entity) else { continue; }; + let entity_tick = entity_specialization_ticks.get(visible_entity).unwrap(); + let last_specialized_tick = view_specialized_material_pipeline_cache + .get(visible_entity) + .map(|(tick, _)| *tick); + let needs_specialization = last_specialized_tick.is_none_or(|tick| { + view_tick.is_newer_than(tick, ticks.this_run()) + || entity_tick.is_newer_than(tick, ticks.this_run()) + }); + if !needs_specialization { + continue; + } let Some(material_2d) = render_materials.get(*material_asset_id) else { continue; }; let Some(mesh) = render_meshes.get(mesh_instance.mesh_asset_id) else { continue; }; - let mesh_key = view_key + let mesh_key = *view_key | Mesh2dPipelineKey::from_primitive_topology(mesh.primitive_topology()) | material_2d.properties.mesh_pipeline_key_bits; @@ -560,40 +766,133 @@ pub fn queue_material2d_meshes( } }; + view_specialized_material_pipeline_cache + .insert(*visible_entity, (ticks.this_run(), pipeline_id)); + } + } +} + +pub fn queue_material2d_meshes( + (render_meshes, render_materials): ( + Res>, + Res>>, + ), + mut render_mesh_instances: ResMut, + render_material_instances: Res>, + mut transparent_render_phases: ResMut>, + mut opaque_render_phases: ResMut>, + mut alpha_mask_render_phases: ResMut>, + views: Query<(&MainEntity, &ExtractedView, &RenderVisibleEntities)>, + specialized_material_pipeline_cache: ResMut>, +) where + M::Data: PartialEq + Eq + Hash + Clone, +{ + if render_material_instances.is_empty() { + return; + } + + for (view_entity, view, visible_entities) in &views { + let Some(view_specialized_material_pipeline_cache) = + specialized_material_pipeline_cache.get(view_entity) + else { + continue; + }; + + let Some(transparent_phase) = transparent_render_phases.get_mut(&view.retained_view_entity) + else { + continue; + }; + let Some(opaque_phase) = opaque_render_phases.get_mut(&view.retained_view_entity) else { + continue; + }; + let Some(alpha_mask_phase) = alpha_mask_render_phases.get_mut(&view.retained_view_entity) + else { + continue; + }; + + for (render_entity, visible_entity) in visible_entities.iter::() { + let Some((current_change_tick, pipeline_id)) = view_specialized_material_pipeline_cache + .get(visible_entity) + .map(|(current_change_tick, pipeline_id)| (*current_change_tick, *pipeline_id)) + else { + continue; + }; + + // Skip the entity if it's cached in a bin and up to date. + if opaque_phase.validate_cached_entity(*visible_entity, current_change_tick) + || alpha_mask_phase.validate_cached_entity(*visible_entity, current_change_tick) + { + continue; + } + + let Some(material_asset_id) = render_material_instances.get(visible_entity) else { + continue; + }; + let Some(mesh_instance) = render_mesh_instances.get_mut(visible_entity) else { + continue; + }; + let Some(material_2d) = render_materials.get(*material_asset_id) else { + continue; + }; + let Some(mesh) = render_meshes.get(mesh_instance.mesh_asset_id) else { + continue; + }; + mesh_instance.material_bind_group_id = material_2d.get_bind_group_id(); let mesh_z = mesh_instance.transforms.world_from_local.translation.z; + // We don't support multidraw yet for 2D meshes, so we use this + // custom logic to generate the `BinnedRenderPhaseType` instead of + // `BinnedRenderPhaseType::mesh`, which can return + // `BinnedRenderPhaseType::MultidrawableMesh` if the hardware + // supports multidraw. + let binned_render_phase_type = if mesh_instance.automatic_batching { + BinnedRenderPhaseType::BatchableMesh + } else { + BinnedRenderPhaseType::UnbatchableMesh + }; + match material_2d.properties.alpha_mode { AlphaMode2d::Opaque => { let bin_key = Opaque2dBinKey { pipeline: pipeline_id, - draw_function: draw_opaque_2d, + draw_function: material_2d.properties.draw_function_id, asset_id: mesh_instance.mesh_asset_id.into(), material_bind_group_id: material_2d.get_bind_group_id().0, }; opaque_phase.add( + BatchSetKey2d { + indexed: mesh.indexed(), + }, bin_key, (*render_entity, *visible_entity), - BinnedRenderPhaseType::mesh(mesh_instance.automatic_batching), + InputUniformIndex::default(), + binned_render_phase_type, + current_change_tick, ); } AlphaMode2d::Mask(_) => { let bin_key = AlphaMask2dBinKey { pipeline: pipeline_id, - draw_function: draw_alpha_mask_2d, + draw_function: material_2d.properties.draw_function_id, asset_id: mesh_instance.mesh_asset_id.into(), material_bind_group_id: material_2d.get_bind_group_id().0, }; alpha_mask_phase.add( + BatchSetKey2d { + indexed: mesh.indexed(), + }, bin_key, (*render_entity, *visible_entity), - BinnedRenderPhaseType::mesh(mesh_instance.automatic_batching), + InputUniformIndex::default(), + binned_render_phase_type, + current_change_tick, ); } AlphaMode2d::Blend => { transparent_phase.add(Transparent2d { entity: (*render_entity, *visible_entity), - draw_function: draw_transparent_2d, + draw_function: material_2d.properties.draw_function_id, pipeline: pipeline_id, // NOTE: Back-to-front ordering for transparent with ascending sort means far should have the // lowest sort key and getting closer should increase. As we have @@ -603,6 +902,8 @@ pub fn queue_material2d_meshes( // Batching is done in batch_and_prepare_render_phase batch_range: 0..1, extra_index: PhaseItemExtraIndex::None, + extracted_index: usize::MAX, + indexed: mesh.indexed(), }); } } @@ -626,6 +927,7 @@ pub struct Material2dProperties { /// These are precalculated so that we can just "or" them together in /// [`queue_material2d_meshes`]. pub mesh_pipeline_key_bits: Mesh2dPipelineKey, + pub draw_function_id: DrawFunctionId, } /// Data prepared for a [`Material2d`] instance. @@ -645,17 +947,42 @@ impl PreparedMaterial2d { impl RenderAsset for PreparedMaterial2d { type SourceAsset = M; - type Param = (SRes, SRes>, M::Param); + type Param = ( + SRes, + SRes>, + SRes>, + SRes>, + SRes>, + M::Param, + ); fn prepare_asset( material: Self::SourceAsset, _: AssetId, - (render_device, pipeline, material_param): &mut SystemParamItem, + ( + render_device, + pipeline, + opaque_draw_functions, + alpha_mask_draw_functions, + transparent_draw_functions, + material_param, + ): &mut SystemParamItem, ) -> Result> { match material.as_bind_group(&pipeline.material2d_layout, render_device, material_param) { Ok(prepared) => { let mut mesh_pipeline_key_bits = Mesh2dPipelineKey::empty(); mesh_pipeline_key_bits.insert(alpha_mode_pipeline_key(material.alpha_mode())); + + let draw_function_id = match material.alpha_mode() { + AlphaMode2d::Opaque => opaque_draw_functions.read().id::>(), + AlphaMode2d::Mask(_) => { + alpha_mask_draw_functions.read().id::>() + } + AlphaMode2d::Blend => { + transparent_draw_functions.read().id::>() + } + }; + Ok(PreparedMaterial2d { bindings: prepared.bindings, bind_group: prepared.bind_group, @@ -664,6 +991,7 @@ impl RenderAsset for PreparedMaterial2d { depth_bias: material.depth_bias(), alpha_mode: material.alpha_mode(), mesh_pipeline_key_bits, + draw_function_id, }, }) } @@ -674,36 +1002,3 @@ impl RenderAsset for PreparedMaterial2d { } } } - -/// A component bundle for entities with a [`Mesh2d`] and a [`MeshMaterial2d`]. -#[derive(Bundle, Clone)] -#[deprecated( - since = "0.15.0", - note = "Use the `Mesh2d` and `MeshMaterial2d` components instead. Inserting them will now also insert the other components required by them automatically." -)] -pub struct MaterialMesh2dBundle { - pub mesh: Mesh2d, - pub material: MeshMaterial2d, - pub transform: Transform, - pub global_transform: GlobalTransform, - /// User indication of whether an entity is visible - pub visibility: Visibility, - // Inherited visibility of an entity. - pub inherited_visibility: InheritedVisibility, - // Indication of whether an entity is visible in any view. - pub view_visibility: ViewVisibility, -} - -impl Default for MaterialMesh2dBundle { - fn default() -> Self { - Self { - mesh: Default::default(), - material: Default::default(), - transform: Default::default(), - global_transform: Default::default(), - visibility: Default::default(), - inherited_visibility: Default::default(), - view_visibility: Default::default(), - } - } -} diff --git a/crates/bevy_sprite/src/mesh2d/mesh.rs b/crates/bevy_sprite/src/mesh2d/mesh.rs index bc6a5e9556e94..5822d47ed6231 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh.rs +++ b/crates/bevy_sprite/src/mesh2d/mesh.rs @@ -1,7 +1,8 @@ use bevy_app::Plugin; -use bevy_asset::{load_internal_asset, AssetId, Handle}; +use bevy_asset::{load_internal_asset, weak_handle, AssetId, Handle}; -use crate::Material2dBindGroupId; +use crate::{tonemapping_pipeline_key, Material2dBindGroupId}; +use bevy_core_pipeline::tonemapping::DebandDither; use bevy_core_pipeline::{ core_2d::{AlphaMask2d, Camera2d, Opaque2d, Transparent2d, CORE_2D_DEPTH_FORMAT}, tonemapping::{ @@ -9,6 +10,8 @@ use bevy_core_pipeline::{ }, }; use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::component::Tick; +use bevy_ecs::system::SystemChangeTick; use bevy_ecs::{ prelude::*, query::ROQueryItem, @@ -16,9 +19,12 @@ use bevy_ecs::{ }; use bevy_image::{BevyDefault, Image, ImageSampler, TextureFormatPixelInfo}; use bevy_math::{Affine3, Vec4}; +use bevy_render::mesh::MeshTag; +use bevy_render::prelude::Msaa; +use bevy_render::RenderSet::PrepareAssets; use bevy_render::{ batching::{ - gpu_preprocessing::IndirectParameters, + gpu_preprocessing::IndirectParametersCpuMetadata, no_gpu_preprocessing::{ self, batch_and_prepare_binned_render_phase, batch_and_prepare_sorted_render_phase, write_batched_instance_buffer, BatchedInstanceBuffer, @@ -32,7 +38,8 @@ use bevy_render::{ }, render_asset::RenderAssets, render_phase::{ - PhaseItem, PhaseItemExtraIndex, RenderCommand, RenderCommandResult, TrackedRenderPass, + sweep_old_entities, PhaseItem, PhaseItemExtraIndex, RenderCommand, RenderCommandResult, + TrackedRenderPass, }, render_resource::{binding_types::uniform_buffer, *}, renderer::{RenderDevice, RenderQueue}, @@ -44,19 +51,26 @@ use bevy_render::{ Extract, ExtractSchedule, Render, RenderApp, RenderSet, }; use bevy_transform::components::GlobalTransform; -use bevy_utils::tracing::error; use nonmax::NonMaxU32; +use tracing::error; #[derive(Default)] pub struct Mesh2dRenderPlugin; -pub const MESH2D_VERTEX_OUTPUT: Handle = Handle::weak_from_u128(7646632476603252194); -pub const MESH2D_VIEW_TYPES_HANDLE: Handle = Handle::weak_from_u128(12677582416765805110); -pub const MESH2D_VIEW_BINDINGS_HANDLE: Handle = Handle::weak_from_u128(6901431444735842434); -pub const MESH2D_TYPES_HANDLE: Handle = Handle::weak_from_u128(8994673400261890424); -pub const MESH2D_BINDINGS_HANDLE: Handle = Handle::weak_from_u128(8983617858458862856); -pub const MESH2D_FUNCTIONS_HANDLE: Handle = Handle::weak_from_u128(4976379308250389413); -pub const MESH2D_SHADER_HANDLE: Handle = Handle::weak_from_u128(2971387252468633715); +pub const MESH2D_VERTEX_OUTPUT: Handle = + weak_handle!("71e279c7-85a0-46ac-9a76-1586cbf506d0"); +pub const MESH2D_VIEW_TYPES_HANDLE: Handle = + weak_handle!("01087b0d-91e9-46ac-8628-dfe19a7d4b83"); +pub const MESH2D_VIEW_BINDINGS_HANDLE: Handle = + weak_handle!("fbdd8b80-503d-4688-bcec-db29ab4620b2"); +pub const MESH2D_TYPES_HANDLE: Handle = + weak_handle!("199f2089-6e99-4348-9bb1-d82816640a7f"); +pub const MESH2D_BINDINGS_HANDLE: Handle = + weak_handle!("a7bd44cc-0580-4427-9a00-721cf386b6e4"); +pub const MESH2D_FUNCTIONS_HANDLE: Handle = + weak_handle!("0d08ff71-68c1-4017-83e2-bfc34d285c51"); +pub const MESH2D_SHADER_HANDLE: Handle = + weak_handle!("91a7602b-df95-4ea3-9d97-076abcb69d91"); impl Plugin for Mesh2dRenderPlugin { fn build(&self, app: &mut bevy_app::App) { @@ -94,12 +108,18 @@ impl Plugin for Mesh2dRenderPlugin { if let Some(render_app) = app.get_sub_app_mut(RenderApp) { render_app + .init_resource::() .init_resource::() .init_resource::>() .add_systems(ExtractSchedule, extract_mesh2d) .add_systems( Render, ( + ( + sweep_old_entities::, + sweep_old_entities::, + ) + .in_set(RenderSet::QueueSweep), batch_and_prepare_binned_render_phase:: .in_set(RenderSet::PrepareResources), batch_and_prepare_binned_render_phase:: @@ -137,7 +157,13 @@ impl Plugin for Mesh2dRenderPlugin { render_app .insert_resource(batched_instance_buffer) - .init_resource::(); + .init_resource::() + .init_resource::() + .init_resource::() + .add_systems( + Render, + check_views_need_specialization.in_set(PrepareAssets), + ); } // Load the mesh_bindings shader module here as it depends on runtime information about @@ -152,6 +178,48 @@ impl Plugin for Mesh2dRenderPlugin { } } +#[derive(Resource, Deref, DerefMut, Default, Debug, Clone)] +pub struct ViewKeyCache(MainEntityHashMap); + +#[derive(Resource, Deref, DerefMut, Default, Debug, Clone)] +pub struct ViewSpecializationTicks(MainEntityHashMap); + +pub fn check_views_need_specialization( + mut view_key_cache: ResMut, + mut view_specialization_ticks: ResMut, + views: Query<( + &MainEntity, + &ExtractedView, + &Msaa, + Option<&Tonemapping>, + Option<&DebandDither>, + )>, + ticks: SystemChangeTick, +) { + for (view_entity, view, msaa, tonemapping, dither) in &views { + let mut view_key = Mesh2dPipelineKey::from_msaa_samples(msaa.samples()) + | Mesh2dPipelineKey::from_hdr(view.hdr); + + if !view.hdr { + if let Some(tonemapping) = tonemapping { + view_key |= Mesh2dPipelineKey::TONEMAP_IN_SHADER; + view_key |= tonemapping_pipeline_key(*tonemapping); + } + if let Some(DebandDither::Enabled) = dither { + view_key |= Mesh2dPipelineKey::DEBAND_DITHER; + } + } + + if !view_key_cache + .get_mut(view_entity) + .is_some_and(|current_key| *current_key == view_key) + { + view_key_cache.insert(*view_entity, view_key); + view_specialization_ticks.insert(*view_entity, ticks.this_run()); + } + } +} + #[derive(Component)] pub struct Mesh2dTransforms { pub world_from_local: Affine3, @@ -169,10 +237,11 @@ pub struct Mesh2dUniform { pub local_from_world_transpose_a: [Vec4; 2], pub local_from_world_transpose_b: f32, pub flags: u32, + pub tag: u32, } -impl From<&Mesh2dTransforms> for Mesh2dUniform { - fn from(mesh_transforms: &Mesh2dTransforms) -> Self { +impl Mesh2dUniform { + fn from_components(mesh_transforms: &Mesh2dTransforms, tag: u32) -> Self { let (local_from_world_transpose_a, local_from_world_transpose_b) = mesh_transforms.world_from_local.inverse_transpose_3x3(); Self { @@ -180,6 +249,7 @@ impl From<&Mesh2dTransforms> for Mesh2dUniform { local_from_world_transpose_a, local_from_world_transpose_b, flags: mesh_transforms.flags, + tag, } } } @@ -198,12 +268,13 @@ pub struct RenderMesh2dInstance { pub mesh_asset_id: AssetId, pub material_bind_group_id: Material2dBindGroupId, pub automatic_batching: bool, + pub tag: u32, } #[derive(Default, Resource, Deref, DerefMut)] pub struct RenderMesh2dInstances(MainEntityHashMap); -#[derive(Component)] +#[derive(Component, Default)] pub struct Mesh2dMarker; pub fn extract_mesh2d( @@ -214,13 +285,14 @@ pub fn extract_mesh2d( &ViewVisibility, &GlobalTransform, &Mesh2d, + Option<&MeshTag>, Has, )>, >, ) { render_mesh_instances.clear(); - for (entity, view_visibility, transform, handle, no_automatic_batching) in &query { + for (entity, view_visibility, transform, handle, tag, no_automatic_batching) in &query { if !view_visibility.get() { continue; } @@ -234,6 +306,7 @@ pub fn extract_mesh2d( mesh_asset_id: handle.0.id(), material_bind_group_id: Material2dBindGroupId::default(), automatic_batching: !no_automatic_batching, + tag: tag.map_or(0, |i| **i), }, ); } @@ -298,8 +371,8 @@ impl FromWorld for Mesh2dPipeline { let format_size = image.texture_descriptor.format.pixel_size(); render_queue.write_texture( texture.as_image_copy(), - &image.data, - ImageDataLayout { + image.data.as_ref().expect("Image has no data"), + TexelCopyBufferLayout { offset: 0, bytes_per_row: Some(image.width() * format_size as u32), rows_per_image: None, @@ -361,7 +434,7 @@ impl GetBatchData for Mesh2dPipeline { ) -> Option<(Self::BufferData, Option)> { let mesh_instance = mesh_instances.get(&main_entity)?; Some(( - (&mesh_instance.transforms).into(), + Mesh2dUniform::from_components(&mesh_instance.transforms, mesh_instance.tag), mesh_instance.automatic_batching.then_some(( mesh_instance.material_bind_group_id, mesh_instance.mesh_asset_id, @@ -375,15 +448,18 @@ impl GetFullBatchData for Mesh2dPipeline { fn get_binned_batch_data( (mesh_instances, _, _): &SystemParamItem, - (_entity, main_entity): (Entity, MainEntity), + main_entity: MainEntity, ) -> Option { let mesh_instance = mesh_instances.get(&main_entity)?; - Some((&mesh_instance.transforms).into()) + Some(Mesh2dUniform::from_components( + &mesh_instance.transforms, + mesh_instance.tag, + )) } fn get_index_and_compare_data( _: &SystemParamItem, - _query_item: (Entity, MainEntity), + _query_item: MainEntity, ) -> Option<(NonMaxU32, Option)> { error!( "`get_index_and_compare_data` is only intended for GPU mesh uniform building, \ @@ -394,7 +470,7 @@ impl GetFullBatchData for Mesh2dPipeline { fn get_binned_index( _: &SystemParamItem, - _query_item: (Entity, MainEntity), + _query_item: MainEntity, ) -> Option { error!( "`get_binned_index` is only intended for GPU mesh uniform building, \ @@ -403,45 +479,33 @@ impl GetFullBatchData for Mesh2dPipeline { None } - fn get_batch_indirect_parameters_index( - (mesh_instances, meshes, mesh_allocator): &SystemParamItem, - indirect_parameters_buffer: &mut bevy_render::batching::gpu_preprocessing::IndirectParametersBuffer, - (_entity, main_entity): (Entity, MainEntity), - instance_index: u32, - ) -> Option { - let mesh_instance = mesh_instances.get(&main_entity)?; - let mesh = meshes.get(mesh_instance.mesh_asset_id)?; - let vertex_buffer_slice = mesh_allocator.mesh_vertex_slice(&mesh_instance.mesh_asset_id)?; - + fn write_batch_indirect_parameters_metadata( + indexed: bool, + base_output_index: u32, + batch_set_index: Option, + indirect_parameters_buffer: &mut bevy_render::batching::gpu_preprocessing::UntypedPhaseIndirectParametersBuffers, + indirect_parameters_offset: u32, + ) { // Note that `IndirectParameters` covers both of these structures, even // though they actually have distinct layouts. See the comment above that // type for more information. - let indirect_parameters = match mesh.buffer_info { - RenderMeshBufferInfo::Indexed { - count: index_count, .. - } => { - let index_buffer_slice = - mesh_allocator.mesh_index_slice(&mesh_instance.mesh_asset_id)?; - IndirectParameters { - vertex_or_index_count: index_count, - instance_count: 0, - first_vertex_or_first_index: index_buffer_slice.range.start, - base_vertex_or_first_instance: vertex_buffer_slice.range.start, - first_instance: instance_index, - } - } - RenderMeshBufferInfo::NonIndexed => IndirectParameters { - vertex_or_index_count: mesh.vertex_count, - instance_count: 0, - first_vertex_or_first_index: vertex_buffer_slice.range.start, - base_vertex_or_first_instance: instance_index, - first_instance: instance_index, + let indirect_parameters = IndirectParametersCpuMetadata { + base_output_index, + batch_set_index: match batch_set_index { + None => !0, + Some(batch_set_index) => u32::from(batch_set_index), }, }; - (indirect_parameters_buffer.push(indirect_parameters) as u32) - .try_into() - .ok() + if indexed { + indirect_parameters_buffer + .indexed + .set(indirect_parameters_offset, indirect_parameters); + } else { + indirect_parameters_buffer + .non_indexed + .set(indirect_parameters_offset, indirect_parameters); + } } } @@ -706,7 +770,6 @@ pub struct Mesh2dViewBindGroup { pub value: BindGroup, } -#[allow(clippy::too_many_arguments)] pub fn prepare_mesh2d_view_bind_groups( mut commands: Commands, render_device: Res, diff --git a/crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl b/crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl index 0b994822112d8..dbd73fb171f3f 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl +++ b/crates/bevy_sprite/src/mesh2d/mesh2d_functions.wgsl @@ -43,3 +43,7 @@ fn mesh2d_tangent_local_to_world(world_from_local: mat4x4, vertex_tangent: vertex_tangent.w ); } + +fn get_tag(instance_index: u32) -> u32 { + return mesh[instance_index].tag; +} \ No newline at end of file diff --git a/crates/bevy_sprite/src/mesh2d/mesh2d_types.wgsl b/crates/bevy_sprite/src/mesh2d/mesh2d_types.wgsl index d5038c818d58e..e29264e0bf4f3 100644 --- a/crates/bevy_sprite/src/mesh2d/mesh2d_types.wgsl +++ b/crates/bevy_sprite/src/mesh2d/mesh2d_types.wgsl @@ -13,4 +13,5 @@ struct Mesh2d { local_from_world_transpose_b: f32, // 'flags' is a bit field indicating various options. u32 is 32 bits so we have up to 32 options. flags: u32, + tag: u32, }; diff --git a/crates/bevy_sprite/src/mesh2d/wireframe2d.rs b/crates/bevy_sprite/src/mesh2d/wireframe2d.rs index 6f2659fbaaf91..63e52805199de 100644 --- a/crates/bevy_sprite/src/mesh2d/wireframe2d.rs +++ b/crates/bevy_sprite/src/mesh2d/wireframe2d.rs @@ -1,17 +1,61 @@ -use crate::{Material2d, Material2dKey, Material2dPlugin, Mesh2d}; -use bevy_app::{Plugin, Startup, Update}; -use bevy_asset::{load_internal_asset, Asset, Assets, Handle}; -use bevy_color::{Color, LinearRgba}; -use bevy_ecs::prelude::*; -use bevy_reflect::{std_traits::ReflectDefault, Reflect, TypePath}; +use crate::{ + DrawMesh2d, Mesh2dPipeline, Mesh2dPipelineKey, RenderMesh2dInstances, SetMesh2dBindGroup, + SetMesh2dViewBindGroup, ViewKeyCache, ViewSpecializationTicks, +}; +use bevy_app::{App, Plugin, PostUpdate, Startup, Update}; +use bevy_asset::{ + load_internal_asset, prelude::AssetChanged, weak_handle, AsAssetId, Asset, AssetApp, + AssetEvents, AssetId, Assets, Handle, UntypedAssetId, +}; +use bevy_color::{Color, ColorToComponents}; +use bevy_core_pipeline::core_2d::{ + graph::{Core2d, Node2d}, + Camera2d, +}; +use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::{ + component::Tick, + prelude::*, + query::QueryItem, + system::{lifetimeless::SRes, SystemChangeTick, SystemParamItem}, +}; +use bevy_platform::{ + collections::{HashMap, HashSet}, + hash::FixedHasher, +}; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{ - extract_resource::ExtractResource, mesh::MeshVertexBufferLayoutRef, prelude::*, + batching::gpu_preprocessing::GpuPreprocessingMode, + camera::ExtractedCamera, + extract_resource::ExtractResource, + mesh::{ + allocator::{MeshAllocator, SlabId}, + Mesh2d, MeshVertexBufferLayoutRef, RenderMesh, + }, + prelude::*, + render_asset::{ + prepare_assets, PrepareAssetError, RenderAsset, RenderAssetPlugin, RenderAssets, + }, + render_graph::{NodeRunError, RenderGraphApp, RenderGraphContext, ViewNode, ViewNodeRunner}, + render_phase::{ + AddRenderCommand, BinnedPhaseItem, BinnedRenderPhasePlugin, BinnedRenderPhaseType, + CachedRenderPipelinePhaseItem, DrawFunctionId, DrawFunctions, InputUniformIndex, PhaseItem, + PhaseItemBatchSetKey, PhaseItemExtraIndex, RenderCommand, RenderCommandResult, + SetItemPipeline, TrackedRenderPass, ViewBinnedRenderPhases, + }, render_resource::*, + renderer::RenderContext, + sync_world::{MainEntity, MainEntityHashMap}, + view::{ + ExtractedView, RenderVisibleEntities, RetainedViewEntity, ViewDepthTexture, ViewTarget, + }, + Extract, Render, RenderApp, RenderDebugFlags, RenderSet, }; +use core::{hash::Hash, ops::Range}; +use tracing::error; -use super::MeshMaterial2d; - -pub const WIREFRAME_2D_SHADER_HANDLE: Handle = Handle::weak_from_u128(6920362697190520314); +pub const WIREFRAME_2D_SHADER_HANDLE: Handle = + weak_handle!("2d8a3853-2927-4de2-9dc7-3971e7e40970"); /// A [`Plugin`] that draws wireframes for 2D meshes. /// @@ -23,9 +67,20 @@ pub const WIREFRAME_2D_SHADER_HANDLE: Handle = Handle::weak_from_u128(69 /// /// This is a native only feature. #[derive(Debug, Default)] -pub struct Wireframe2dPlugin; +pub struct Wireframe2dPlugin { + /// Debugging flags that can optionally be set when constructing the renderer. + pub debug_flags: RenderDebugFlags, +} + +impl Wireframe2dPlugin { + /// Creates a new [`Wireframe2dPlugin`] with the given debug flags. + pub fn new(debug_flags: RenderDebugFlags) -> Self { + Self { debug_flags } + } +} + impl Plugin for Wireframe2dPlugin { - fn build(&self, app: &mut bevy_app::App) { + fn build(&self, app: &mut App) { load_internal_asset!( app, WIREFRAME_2D_SHADER_HANDLE, @@ -33,24 +88,83 @@ impl Plugin for Wireframe2dPlugin { Shader::from_wgsl ); - app.register_type::() - .register_type::() - .register_type::() - .register_type::() - .init_resource::() - .add_plugins(Material2dPlugin::::default()) - .add_systems(Startup, setup_global_wireframe_material) + app.add_plugins(( + BinnedRenderPhasePlugin::::new(self.debug_flags), + RenderAssetPlugin::::default(), + )) + .init_asset::() + .init_resource::>() + .register_type::() + .register_type::() + .register_type::() + .init_resource::() + .init_resource::() + .add_systems(Startup, setup_global_wireframe_material) + .add_systems( + Update, + ( + global_color_changed.run_if(resource_changed::), + wireframe_color_changed, + // Run `apply_global_wireframe_material` after `apply_wireframe_material` so that the global + // wireframe setting is applied to a mesh on the same frame its wireframe marker component is removed. + (apply_wireframe_material, apply_global_wireframe_material).chain(), + ), + ) + .add_systems( + PostUpdate, + check_wireframe_entities_needing_specialization + .after(AssetEvents) + .run_if(resource_exists::), + ); + + let Some(render_app) = app.get_sub_app_mut(RenderApp) else { + return; + }; + + render_app + .init_resource::() + .init_resource::() + .init_resource::>() + .add_render_command::() + .init_resource::() + .init_resource::>() + .add_render_graph_node::>(Core2d, Node2d::Wireframe) + .add_render_graph_edges( + Core2d, + ( + Node2d::EndMainPass, + Node2d::Wireframe, + Node2d::PostProcessing, + ), + ) + .add_systems( + ExtractSchedule, + ( + extract_wireframe_2d_camera, + extract_wireframe_entities_needing_specialization, + extract_wireframe_materials, + ), + ) .add_systems( - Update, + Render, ( - global_color_changed.run_if(resource_changed::), - wireframe_color_changed, - // Run `apply_global_wireframe_material` after `apply_wireframe_material` so that the global - // wireframe setting is applied to a mesh on the same frame its wireframe marker component is removed. - (apply_wireframe_material, apply_global_wireframe_material).chain(), + specialize_wireframes + .in_set(RenderSet::PrepareMeshes) + .after(prepare_assets::) + .after(prepare_assets::), + queue_wireframes + .in_set(RenderSet::QueueMeshes) + .after(prepare_assets::), ), ); } + + fn finish(&self, app: &mut App) { + let Some(render_app) = app.get_sub_app_mut(RenderApp) else { + return; + }; + render_app.init_resource::(); + } } /// Enables wireframe rendering for any entity it is attached to. @@ -61,6 +175,245 @@ impl Plugin for Wireframe2dPlugin { #[reflect(Component, Default, Debug, PartialEq)] pub struct Wireframe2d; +pub struct Wireframe2dPhaseItem { + /// Determines which objects can be placed into a *batch set*. + /// + /// Objects in a single batch set can potentially be multi-drawn together, + /// if it's enabled and the current platform supports it. + pub batch_set_key: Wireframe2dBatchSetKey, + /// The key, which determines which can be batched. + pub bin_key: Wireframe2dBinKey, + /// An entity from which data will be fetched, including the mesh if + /// applicable. + pub representative_entity: (Entity, MainEntity), + /// The ranges of instances. + pub batch_range: Range, + /// An extra index, which is either a dynamic offset or an index in the + /// indirect parameters list. + pub extra_index: PhaseItemExtraIndex, +} + +impl PhaseItem for Wireframe2dPhaseItem { + fn entity(&self) -> Entity { + self.representative_entity.0 + } + + fn main_entity(&self) -> MainEntity { + self.representative_entity.1 + } + + fn draw_function(&self) -> DrawFunctionId { + self.batch_set_key.draw_function + } + + fn batch_range(&self) -> &Range { + &self.batch_range + } + + fn batch_range_mut(&mut self) -> &mut Range { + &mut self.batch_range + } + + fn extra_index(&self) -> PhaseItemExtraIndex { + self.extra_index.clone() + } + + fn batch_range_and_extra_index_mut(&mut self) -> (&mut Range, &mut PhaseItemExtraIndex) { + (&mut self.batch_range, &mut self.extra_index) + } +} + +impl CachedRenderPipelinePhaseItem for Wireframe2dPhaseItem { + fn cached_pipeline(&self) -> CachedRenderPipelineId { + self.batch_set_key.pipeline + } +} + +impl BinnedPhaseItem for Wireframe2dPhaseItem { + type BinKey = Wireframe2dBinKey; + type BatchSetKey = Wireframe2dBatchSetKey; + + fn new( + batch_set_key: Self::BatchSetKey, + bin_key: Self::BinKey, + representative_entity: (Entity, MainEntity), + batch_range: Range, + extra_index: PhaseItemExtraIndex, + ) -> Self { + Self { + batch_set_key, + bin_key, + representative_entity, + batch_range, + extra_index, + } + } +} + +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Wireframe2dBatchSetKey { + /// The identifier of the render pipeline. + pub pipeline: CachedRenderPipelineId, + + /// The wireframe material asset ID. + pub asset_id: UntypedAssetId, + + /// The function used to draw. + pub draw_function: DrawFunctionId, + /// The ID of the slab of GPU memory that contains vertex data. + /// + /// For non-mesh items, you can fill this with 0 if your items can be + /// multi-drawn, or with a unique value if they can't. + pub vertex_slab: SlabId, + + /// The ID of the slab of GPU memory that contains index data, if present. + /// + /// For non-mesh items, you can safely fill this with `None`. + pub index_slab: Option, +} + +impl PhaseItemBatchSetKey for Wireframe2dBatchSetKey { + fn indexed(&self) -> bool { + self.index_slab.is_some() + } +} + +/// Data that must be identical in order to *batch* phase items together. +/// +/// Note that a *batch set* (if multi-draw is in use) contains multiple batches. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Wireframe2dBinKey { + /// The wireframe mesh asset ID. + pub asset_id: UntypedAssetId, +} + +pub struct SetWireframe2dPushConstants; + +impl RenderCommand

for SetWireframe2dPushConstants { + type Param = ( + SRes, + SRes>, + ); + type ViewQuery = (); + type ItemQuery = (); + + #[inline] + fn render<'w>( + item: &P, + _view: (), + _item_query: Option<()>, + (wireframe_instances, wireframe_assets): SystemParamItem<'w, '_, Self::Param>, + pass: &mut TrackedRenderPass<'w>, + ) -> RenderCommandResult { + let Some(wireframe_material) = wireframe_instances.get(&item.main_entity()) else { + return RenderCommandResult::Failure("No wireframe material found for entity"); + }; + let Some(wireframe_material) = wireframe_assets.get(*wireframe_material) else { + return RenderCommandResult::Failure("No wireframe material found for entity"); + }; + + pass.set_push_constants( + ShaderStages::FRAGMENT, + 0, + bytemuck::bytes_of(&wireframe_material.color), + ); + RenderCommandResult::Success + } +} + +pub type DrawWireframe2d = ( + SetItemPipeline, + SetMesh2dViewBindGroup<0>, + SetMesh2dBindGroup<1>, + SetWireframe2dPushConstants, + DrawMesh2d, +); + +#[derive(Resource, Clone)] +pub struct Wireframe2dPipeline { + mesh_pipeline: Mesh2dPipeline, + shader: Handle, +} + +impl FromWorld for Wireframe2dPipeline { + fn from_world(render_world: &mut World) -> Self { + Wireframe2dPipeline { + mesh_pipeline: render_world.resource::().clone(), + shader: WIREFRAME_2D_SHADER_HANDLE, + } + } +} + +impl SpecializedMeshPipeline for Wireframe2dPipeline { + type Key = Mesh2dPipelineKey; + + fn specialize( + &self, + key: Self::Key, + layout: &MeshVertexBufferLayoutRef, + ) -> Result { + let mut descriptor = self.mesh_pipeline.specialize(key, layout)?; + descriptor.label = Some("wireframe_2d_pipeline".into()); + descriptor.push_constant_ranges.push(PushConstantRange { + stages: ShaderStages::FRAGMENT, + range: 0..16, + }); + let fragment = descriptor.fragment.as_mut().unwrap(); + fragment.shader = self.shader.clone(); + descriptor.primitive.polygon_mode = PolygonMode::Line; + descriptor.depth_stencil.as_mut().unwrap().bias.slope_scale = 1.0; + Ok(descriptor) + } +} + +#[derive(Default)] +struct Wireframe2dNode; +impl ViewNode for Wireframe2dNode { + type ViewQuery = ( + &'static ExtractedCamera, + &'static ExtractedView, + &'static ViewTarget, + &'static ViewDepthTexture, + ); + + fn run<'w>( + &self, + graph: &mut RenderGraphContext, + render_context: &mut RenderContext<'w>, + (camera, view, target, depth): QueryItem<'w, Self::ViewQuery>, + world: &'w World, + ) -> Result<(), NodeRunError> { + let Some(wireframe_phase) = + world.get_resource::>() + else { + return Ok(()); + }; + + let Some(wireframe_phase) = wireframe_phase.get(&view.retained_view_entity) else { + return Ok(()); + }; + + let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor { + label: Some("wireframe_2d_pass"), + color_attachments: &[Some(target.get_color_attachment())], + depth_stencil_attachment: Some(depth.get_attachment(StoreOp::Store)), + timestamp_writes: None, + occlusion_query_set: None, + }); + + if let Some(viewport) = camera.viewport.as_ref() { + render_pass.set_camera_viewport(viewport); + } + + if let Err(err) = wireframe_phase.render(&mut render_pass, world, graph.view_entity()) { + error!("Error encountered while rendering the stencil phase {err:?}"); + return Err(NodeRunError::DrawError(err)); + } + + Ok(()) + } +} + /// Sets the color of the [`Wireframe2d`] of the entity it is attached to. /// /// If this component is present but there's no [`Wireframe2d`] component, @@ -73,6 +426,11 @@ pub struct Wireframe2dColor { pub color: Color, } +#[derive(Component, Debug, Clone, Default)] +pub struct ExtractedWireframeColor { + pub color: [f32; 4], +} + /// Disables wireframe rendering for any entity it is attached to. /// It will ignore the [`Wireframe2dConfig`] global setting. /// @@ -84,7 +442,7 @@ pub struct NoWireframe2d; #[derive(Resource, Debug, Clone, Default, ExtractResource, Reflect)] #[reflect(Resource, Debug, Default)] pub struct Wireframe2dConfig { - /// Whether to show wireframes for all 2D meshes. + /// Whether to show wireframes for all meshes. /// Can be overridden for individual meshes by adding a [`Wireframe2d`] or [`NoWireframe2d`] component. pub global: bool, /// If [`Self::global`] is set, any [`Entity`] that does not have a [`Wireframe2d`] component attached to it will have @@ -93,21 +451,121 @@ pub struct Wireframe2dConfig { pub default_color: Color, } +#[derive(Asset, Reflect, Clone, Debug, Default)] +#[reflect(Clone, Default)] +pub struct Wireframe2dMaterial { + pub color: Color, +} + +pub struct RenderWireframeMaterial { + pub color: [f32; 4], +} + +#[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect, PartialEq, Eq)] +#[reflect(Component, Default, Clone, PartialEq)] +pub struct Mesh2dWireframe(pub Handle); + +impl AsAssetId for Mesh2dWireframe { + type Asset = Wireframe2dMaterial; + + fn as_asset_id(&self) -> AssetId { + self.0.id() + } +} + +impl RenderAsset for RenderWireframeMaterial { + type SourceAsset = Wireframe2dMaterial; + type Param = (); + + fn prepare_asset( + source_asset: Self::SourceAsset, + _asset_id: AssetId, + _param: &mut SystemParamItem, + ) -> Result> { + Ok(RenderWireframeMaterial { + color: source_asset.color.to_linear().to_f32_array(), + }) + } +} + +#[derive(Resource, Deref, DerefMut, Default)] +pub struct RenderWireframeInstances(MainEntityHashMap>); + +#[derive(Clone, Resource, Deref, DerefMut, Debug, Default)] +pub struct WireframeEntitiesNeedingSpecialization { + #[deref] + pub entities: Vec, +} + +#[derive(Resource, Deref, DerefMut, Clone, Debug, Default)] +pub struct WireframeEntitySpecializationTicks { + pub entities: MainEntityHashMap, +} + +/// Stores the [`SpecializedWireframeViewPipelineCache`] for each view. +#[derive(Resource, Deref, DerefMut, Default)] +pub struct SpecializedWireframePipelineCache { + // view entity -> view pipeline cache + #[deref] + map: HashMap, +} + +/// Stores the cached render pipeline ID for each entity in a single view, as +/// well as the last time it was changed. +#[derive(Deref, DerefMut, Default)] +pub struct SpecializedWireframeViewPipelineCache { + // material entity -> (tick, pipeline_id) + #[deref] + map: MainEntityHashMap<(Tick, CachedRenderPipelineId)>, +} + #[derive(Resource)] -struct GlobalWireframe2dMaterial { +struct GlobalWireframeMaterial { // This handle will be reused when the global config is enabled handle: Handle, } +pub fn extract_wireframe_materials( + mut material_instances: ResMut, + changed_meshes_query: Extract< + Query< + (Entity, &ViewVisibility, &Mesh2dWireframe), + Or<(Changed, Changed)>, + >, + >, + mut removed_visibilities_query: Extract>, + mut removed_materials_query: Extract>, +) { + for (entity, view_visibility, material) in &changed_meshes_query { + if view_visibility.get() { + material_instances.insert(entity.into(), material.id()); + } else { + material_instances.remove(&MainEntity::from(entity)); + } + } + + for entity in removed_visibilities_query + .read() + .chain(removed_materials_query.read()) + { + // Only queue a mesh for removal if we didn't pick it up above. + // It's possible that a necessary component was removed and re-added in + // the same frame. + if !changed_meshes_query.contains(entity) { + material_instances.remove(&MainEntity::from(entity)); + } + } +} + fn setup_global_wireframe_material( mut commands: Commands, mut materials: ResMut>, config: Res, ) { // Create the handle used for the global material - commands.insert_resource(GlobalWireframe2dMaterial { + commands.insert_resource(GlobalWireframeMaterial { handle: materials.add(Wireframe2dMaterial { - color: config.default_color.into(), + color: config.default_color, }), }); } @@ -116,25 +574,24 @@ fn setup_global_wireframe_material( fn global_color_changed( config: Res, mut materials: ResMut>, - global_material: Res, + global_material: Res, ) { if let Some(global_material) = materials.get_mut(&global_material.handle) { - global_material.color = config.default_color.into(); + global_material.color = config.default_color; } } /// Updates the wireframe material when the color in [`Wireframe2dColor`] changes -#[allow(clippy::type_complexity)] fn wireframe_color_changed( mut materials: ResMut>, mut colors_changed: Query< - (&mut MeshMaterial2d, &Wireframe2dColor), + (&mut Mesh2dWireframe, &Wireframe2dColor), (With, Changed), >, ) { for (mut handle, wireframe_color) in &mut colors_changed { handle.0 = materials.add(Wireframe2dMaterial { - color: wireframe_color.color.into(), + color: wireframe_color.color, }); } } @@ -146,99 +603,277 @@ fn apply_wireframe_material( mut materials: ResMut>, wireframes: Query< (Entity, Option<&Wireframe2dColor>), - ( - With, - Without>, - ), - >, - no_wireframes: Query< - Entity, - ( - With, - With>, - ), + (With, Without), >, + no_wireframes: Query, With)>, mut removed_wireframes: RemovedComponents, - global_material: Res, + global_material: Res, ) { for e in removed_wireframes.read().chain(no_wireframes.iter()) { - if let Some(mut commands) = commands.get_entity(e) { - commands.remove::>(); + if let Ok(mut commands) = commands.get_entity(e) { + commands.remove::(); } } - let mut wireframes_to_spawn = vec![]; - for (e, wireframe_color) in &wireframes { - let material = if let Some(wireframe_color) = wireframe_color { - materials.add(Wireframe2dMaterial { - color: wireframe_color.color.into(), - }) - } else { - // If there's no color specified we can use the global material since it's already set to use the default_color - global_material.handle.clone() - }; - wireframes_to_spawn.push((e, MeshMaterial2d(material))); + let mut material_to_spawn = vec![]; + for (e, maybe_color) in &wireframes { + let material = get_wireframe_material(maybe_color, &mut materials, &global_material); + material_to_spawn.push((e, Mesh2dWireframe(material))); } - commands.insert_or_spawn_batch(wireframes_to_spawn); + commands.try_insert_batch(material_to_spawn); } -type Wireframe2dFilter = (With, Without, Without); +type WireframeFilter = (With, Without, Without); /// Applies or removes a wireframe material on any mesh without a [`Wireframe2d`] or [`NoWireframe2d`] component. fn apply_global_wireframe_material( mut commands: Commands, config: Res, meshes_without_material: Query< - Entity, - ( - Wireframe2dFilter, - Without>, - ), - >, - meshes_with_global_material: Query< - Entity, - (Wireframe2dFilter, With>), + (Entity, Option<&Wireframe2dColor>), + (WireframeFilter, Without), >, - global_material: Res, + meshes_with_global_material: Query)>, + global_material: Res, + mut materials: ResMut>, ) { if config.global { let mut material_to_spawn = vec![]; - for e in &meshes_without_material { + for (e, maybe_color) in &meshes_without_material { + let material = get_wireframe_material(maybe_color, &mut materials, &global_material); // We only add the material handle but not the Wireframe component // This makes it easy to detect which mesh is using the global material and which ones are user specified - material_to_spawn.push((e, MeshMaterial2d(global_material.handle.clone()))); + material_to_spawn.push((e, Mesh2dWireframe(material))); } - commands.insert_or_spawn_batch(material_to_spawn); + commands.try_insert_batch(material_to_spawn); } else { for e in &meshes_with_global_material { - commands - .entity(e) - .remove::>(); + commands.entity(e).remove::(); } } } -#[derive(Default, AsBindGroup, TypePath, Debug, Clone, Asset)] -pub struct Wireframe2dMaterial { - #[uniform(0)] - pub color: LinearRgba, +/// Gets a handle to a wireframe material with a fallback on the default material +fn get_wireframe_material( + maybe_color: Option<&Wireframe2dColor>, + wireframe_materials: &mut Assets, + global_material: &GlobalWireframeMaterial, +) -> Handle { + if let Some(wireframe_color) = maybe_color { + wireframe_materials.add(Wireframe2dMaterial { + color: wireframe_color.color, + }) + } else { + // If there's no color specified we can use the global material since it's already set to use the default_color + global_material.handle.clone() + } } -impl Material2d for Wireframe2dMaterial { - fn fragment_shader() -> ShaderRef { - WIREFRAME_2D_SHADER_HANDLE.into() +fn extract_wireframe_2d_camera( + mut wireframe_2d_phases: ResMut>, + cameras: Extract>>, + mut live_entities: Local>, +) { + live_entities.clear(); + for (main_entity, camera) in &cameras { + if !camera.is_active { + continue; + } + let retained_view_entity = RetainedViewEntity::new(main_entity.into(), None, 0); + wireframe_2d_phases.prepare_for_new_frame(retained_view_entity, GpuPreprocessingMode::None); + live_entities.insert(retained_view_entity); } - fn depth_bias(&self) -> f32 { - 1.0 + // Clear out all dead views. + wireframe_2d_phases.retain(|camera_entity, _| live_entities.contains(camera_entity)); +} + +pub fn extract_wireframe_entities_needing_specialization( + entities_needing_specialization: Extract>, + mut entity_specialization_ticks: ResMut, + views: Query<&ExtractedView>, + mut specialized_wireframe_pipeline_cache: ResMut, + mut removed_meshes_query: Extract>, + ticks: SystemChangeTick, +) { + for entity in entities_needing_specialization.iter() { + // Update the entity's specialization tick with this run's tick + entity_specialization_ticks.insert((*entity).into(), ticks.this_run()); } - fn specialize( - descriptor: &mut RenderPipelineDescriptor, - _layout: &MeshVertexBufferLayoutRef, - _key: Material2dKey, - ) -> Result<(), SpecializedMeshPipelineError> { - descriptor.primitive.polygon_mode = PolygonMode::Line; - Ok(()) + for entity in removed_meshes_query.read() { + for view in &views { + if let Some(specialized_wireframe_pipeline_cache) = + specialized_wireframe_pipeline_cache.get_mut(&view.retained_view_entity) + { + specialized_wireframe_pipeline_cache.remove(&MainEntity::from(entity)); + } + } + } +} + +pub fn check_wireframe_entities_needing_specialization( + needs_specialization: Query< + Entity, + Or<( + Changed, + AssetChanged, + Changed, + AssetChanged, + )>, + >, + mut entities_needing_specialization: ResMut, +) { + entities_needing_specialization.clear(); + for entity in &needs_specialization { + entities_needing_specialization.push(entity); + } +} + +pub fn specialize_wireframes( + render_meshes: Res>, + render_mesh_instances: Res, + render_wireframe_instances: Res, + wireframe_phases: Res>, + views: Query<(&ExtractedView, &RenderVisibleEntities)>, + view_key_cache: Res, + entity_specialization_ticks: Res, + view_specialization_ticks: Res, + mut specialized_material_pipeline_cache: ResMut, + mut pipelines: ResMut>, + pipeline: Res, + pipeline_cache: Res, + ticks: SystemChangeTick, +) { + // Record the retained IDs of all views so that we can expire old + // pipeline IDs. + let mut all_views: HashSet = HashSet::default(); + + for (view, visible_entities) in &views { + all_views.insert(view.retained_view_entity); + + if !wireframe_phases.contains_key(&view.retained_view_entity) { + continue; + } + + let Some(view_key) = view_key_cache.get(&view.retained_view_entity.main_entity) else { + continue; + }; + + let view_tick = view_specialization_ticks + .get(&view.retained_view_entity.main_entity) + .unwrap(); + let view_specialized_material_pipeline_cache = specialized_material_pipeline_cache + .entry(view.retained_view_entity) + .or_default(); + + for (_, visible_entity) in visible_entities.iter::() { + if !render_wireframe_instances.contains_key(visible_entity) { + continue; + }; + let Some(mesh_instance) = render_mesh_instances.get(visible_entity) else { + continue; + }; + let entity_tick = entity_specialization_ticks.get(visible_entity).unwrap(); + let last_specialized_tick = view_specialized_material_pipeline_cache + .get(visible_entity) + .map(|(tick, _)| *tick); + let needs_specialization = last_specialized_tick.is_none_or(|tick| { + view_tick.is_newer_than(tick, ticks.this_run()) + || entity_tick.is_newer_than(tick, ticks.this_run()) + }); + if !needs_specialization { + continue; + } + let Some(mesh) = render_meshes.get(mesh_instance.mesh_asset_id) else { + continue; + }; + + let mut mesh_key = *view_key; + mesh_key |= Mesh2dPipelineKey::from_primitive_topology(mesh.primitive_topology()); + + let pipeline_id = + pipelines.specialize(&pipeline_cache, &pipeline, mesh_key, &mesh.layout); + let pipeline_id = match pipeline_id { + Ok(id) => id, + Err(err) => { + error!("{}", err); + continue; + } + }; + + view_specialized_material_pipeline_cache + .insert(*visible_entity, (ticks.this_run(), pipeline_id)); + } + } + + // Delete specialized pipelines belonging to views that have expired. + specialized_material_pipeline_cache + .retain(|retained_view_entity, _| all_views.contains(retained_view_entity)); +} + +fn queue_wireframes( + custom_draw_functions: Res>, + render_mesh_instances: Res, + mesh_allocator: Res, + specialized_wireframe_pipeline_cache: Res, + render_wireframe_instances: Res, + mut wireframe_2d_phases: ResMut>, + mut views: Query<(&ExtractedView, &RenderVisibleEntities)>, +) { + for (view, visible_entities) in &mut views { + let Some(wireframe_phase) = wireframe_2d_phases.get_mut(&view.retained_view_entity) else { + continue; + }; + let draw_wireframe = custom_draw_functions.read().id::(); + + let Some(view_specialized_material_pipeline_cache) = + specialized_wireframe_pipeline_cache.get(&view.retained_view_entity) + else { + continue; + }; + + for (render_entity, visible_entity) in visible_entities.iter::() { + let Some(wireframe_instance) = render_wireframe_instances.get(visible_entity) else { + continue; + }; + let Some((current_change_tick, pipeline_id)) = view_specialized_material_pipeline_cache + .get(visible_entity) + .map(|(current_change_tick, pipeline_id)| (*current_change_tick, *pipeline_id)) + else { + continue; + }; + + // Skip the entity if it's cached in a bin and up to date. + if wireframe_phase.validate_cached_entity(*visible_entity, current_change_tick) { + continue; + } + let Some(mesh_instance) = render_mesh_instances.get(visible_entity) else { + continue; + }; + let (vertex_slab, index_slab) = mesh_allocator.mesh_slabs(&mesh_instance.mesh_asset_id); + let bin_key = Wireframe2dBinKey { + asset_id: mesh_instance.mesh_asset_id.untyped(), + }; + let batch_set_key = Wireframe2dBatchSetKey { + pipeline: pipeline_id, + asset_id: wireframe_instance.untyped(), + draw_function: draw_wireframe, + vertex_slab: vertex_slab.unwrap_or_default(), + index_slab, + }; + wireframe_phase.add( + batch_set_key, + bin_key, + (*render_entity, *visible_entity), + InputUniformIndex::default(), + if mesh_instance.automatic_batching { + BinnedRenderPhaseType::BatchableMesh + } else { + BinnedRenderPhaseType::UnbatchableMesh + }, + current_change_tick, + ); + } } } diff --git a/crates/bevy_sprite/src/mesh2d/wireframe2d.wgsl b/crates/bevy_sprite/src/mesh2d/wireframe2d.wgsl index fac02d6456a86..c7bb3aa791b18 100644 --- a/crates/bevy_sprite/src/mesh2d/wireframe2d.wgsl +++ b/crates/bevy_sprite/src/mesh2d/wireframe2d.wgsl @@ -1,11 +1,12 @@ #import bevy_sprite::mesh2d_vertex_output::VertexOutput -struct WireframeMaterial { - color: vec4, -}; +struct PushConstants { + color: vec4 +} + +var push_constants: PushConstants; -@group(2) @binding(0) var material: WireframeMaterial; @fragment fn fragment(in: VertexOutput) -> @location(0) vec4 { - return material.color; + return push_constants.color; } diff --git a/crates/bevy_sprite/src/picking_backend.rs b/crates/bevy_sprite/src/picking_backend.rs index bd57aaf202036..a0298381476e7 100644 --- a/crates/bevy_sprite/src/picking_backend.rs +++ b/crates/bevy_sprite/src/picking_backend.rs @@ -1,24 +1,36 @@ //! A [`bevy_picking`] backend for sprites. Works for simple sprites and sprite atlases. Works for //! sprites with arbitrary transforms. Picking is done based on sprite bounds, not visible pixels. //! This means a partially transparent sprite is pickable even in its transparent areas. +//! +//! ## Implementation Notes +//! +//! - The `position` reported in `HitData` in in world space, and the `normal` is a normalized +//! vector provided by the target's `GlobalTransform::back()`. -use core::cmp::Reverse; - -use crate::{Sprite, TextureAtlasLayout}; +use crate::Sprite; use bevy_app::prelude::*; use bevy_asset::prelude::*; use bevy_color::Alpha; use bevy_ecs::prelude::*; -use bevy_image::Image; -use bevy_math::{prelude::*, FloatExt, FloatOrd}; +use bevy_image::prelude::*; +use bevy_math::{prelude::*, FloatExt}; use bevy_picking::backend::prelude::*; use bevy_reflect::prelude::*; use bevy_render::prelude::*; use bevy_transform::prelude::*; use bevy_window::PrimaryWindow; +/// An optional component that marks cameras that should be used in the [`SpritePickingPlugin`]. +/// +/// Only needed if [`SpritePickingSettings::require_markers`] is set to `true`, and ignored +/// otherwise. +#[derive(Debug, Clone, Default, Component, Reflect)] +#[reflect(Debug, Default, Component, Clone)] +pub struct SpritePickingCamera; + /// How should the [`SpritePickingPlugin`] handle picking and how should it handle transparent pixels #[derive(Debug, Clone, Copy, Reflect)] +#[reflect(Debug, Clone)] pub enum SpritePickingMode { /// Even if a sprite is picked on a transparent pixel, it should still count within the backend. /// Only consider the rect of a given sprite. @@ -32,34 +44,50 @@ pub enum SpritePickingMode { #[derive(Resource, Reflect)] #[reflect(Resource, Default)] pub struct SpritePickingSettings { + /// When set to `true` sprite picking will only consider cameras marked with + /// [`SpritePickingCamera`]. + /// + /// This setting is provided to give you fine-grained control over which cameras and entities + /// should be used by the sprite picking backend at runtime. + pub require_markers: bool, /// Should the backend count transparent pixels as part of the sprite for picking purposes or should it use the bounding box of the sprite alone. /// - /// Defaults to an incusive alpha threshold of 0.1 + /// Defaults to an inclusive alpha threshold of 0.1 pub picking_mode: SpritePickingMode, } impl Default for SpritePickingSettings { fn default() -> Self { Self { + require_markers: false, picking_mode: SpritePickingMode::AlphaThreshold(0.1), } } } +/// Enables the sprite picking backend, allowing you to click on, hover over and drag sprites. #[derive(Clone)] pub struct SpritePickingPlugin; impl Plugin for SpritePickingPlugin { fn build(&self, app: &mut App) { app.init_resource::() + .register_type::() + .register_type::() + .register_type::() .add_systems(PreUpdate, sprite_picking.in_set(PickSet::Backend)); } } -#[allow(clippy::too_many_arguments)] fn sprite_picking( pointers: Query<(&PointerId, &PointerLocation)>, - cameras: Query<(Entity, &Camera, &GlobalTransform, &OrthographicProjection)>, + cameras: Query<( + Entity, + &Camera, + &GlobalTransform, + &Projection, + Has, + )>, primary_window: Query>, images: Res>, texture_atlas_layout: Res>, @@ -68,39 +96,46 @@ fn sprite_picking( Entity, &Sprite, &GlobalTransform, - Option<&PickingBehavior>, + &Pickable, &ViewVisibility, )>, mut output: EventWriter, ) { let mut sorted_sprites: Vec<_> = sprite_query .iter() - .filter_map(|(entity, sprite, transform, picking_behavior, vis)| { + .filter_map(|(entity, sprite, transform, pickable, vis)| { if !transform.affine().is_nan() && vis.get() { - Some((entity, sprite, transform, picking_behavior)) + Some((entity, sprite, transform, pickable)) } else { None } }) .collect(); - sorted_sprites.sort_by_key(|x| Reverse(FloatOrd(x.2.translation().z))); - let primary_window = primary_window.get_single().ok(); + // radsort is a stable radix sort that performed better than `slice::sort_by_key` + radsort::sort_by_key(&mut sorted_sprites, |(_, _, transform, _)| { + -transform.translation().z + }); + + let primary_window = primary_window.single().ok(); for (pointer, location) in pointers.iter().filter_map(|(pointer, pointer_location)| { pointer_location.location().map(|loc| (pointer, loc)) }) { let mut blocked = false; - let Some((cam_entity, camera, cam_transform, cam_ortho)) = cameras - .iter() - .filter(|(_, camera, _, _)| camera.is_active) - .find(|(_, camera, _, _)| { - camera - .target - .normalize(primary_window) - .map(|x| x == location.target) - .unwrap_or(false) - }) + let Some((cam_entity, camera, cam_transform, Projection::Orthographic(cam_ortho), _)) = + cameras + .iter() + .filter(|(_, camera, _, _, cam_can_pick)| { + let marker_requirement = !settings.require_markers || *cam_can_pick; + camera.is_active && marker_requirement + }) + .find(|(_, camera, _, _, _)| { + camera + .target + .normalize(primary_window) + .is_some_and(|x| x == location.target) + }) else { continue; }; @@ -120,7 +155,7 @@ fn sprite_picking( let picks: Vec<(Entity, HitData)> = sorted_sprites .iter() .copied() - .filter_map(|(entity, sprite, sprite_transform, picking_behavior)| { + .filter_map(|(entity, sprite, sprite_transform, pickable)| { if blocked { return None; } @@ -185,10 +220,7 @@ fn sprite_picking( } }; - blocked = cursor_in_valid_pixels_of_sprite - && picking_behavior - .map(|p| p.should_block_lower) - .unwrap_or(true); + blocked = cursor_in_valid_pixels_of_sprite && pickable.should_block_lower; cursor_in_valid_pixels_of_sprite.then(|| { let hit_pos_world = @@ -214,6 +246,6 @@ fn sprite_picking( .collect(); let order = camera.order as f32; - output.send(PointerHits::new(*pointer, picks, order)); + output.write(PointerHits::new(*pointer, picks, order)); } } diff --git a/crates/bevy_sprite/src/render/mod.rs b/crates/bevy_sprite/src/render/mod.rs index 51e5f41b97fd1..de57f43536631 100644 --- a/crates/bevy_sprite/src/render/mod.rs +++ b/crates/bevy_sprite/src/render/mod.rs @@ -1,8 +1,6 @@ use core::ops::Range; -use crate::{ - texture_atlas::TextureAtlasLayout, ComputedTextureSlices, Sprite, SPRITE_SHADER_HANDLE, -}; +use crate::{ComputedTextureSlices, ScalingMode, Sprite, SPRITE_SHADER_HANDLE}; use bevy_asset::{AssetEvent, AssetId, Assets}; use bevy_color::{ColorToComponents, LinearRgba}; use bevy_core_pipeline::{ @@ -12,15 +10,16 @@ use bevy_core_pipeline::{ TonemappingLuts, }, }; +use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ prelude::*, query::ROQueryItem, system::{lifetimeless::*, SystemParamItem, SystemState}, }; -use bevy_image::{BevyDefault, Image, ImageSampler, TextureFormatPixelInfo}; +use bevy_image::{BevyDefault, Image, ImageSampler, TextureAtlasLayout, TextureFormatPixelInfo}; use bevy_math::{Affine3A, FloatOrd, Quat, Rect, Vec2, Vec4}; -use bevy_render::sync_world::MainEntity; -use bevy_render::view::RenderVisibleEntities; +use bevy_platform::collections::HashMap; +use bevy_render::view::{RenderVisibleEntities, RetainedViewEntity}; use bevy_render::{ render_asset::RenderAssets, render_phase::{ @@ -32,7 +31,7 @@ use bevy_render::{ *, }, renderer::{RenderDevice, RenderQueue}, - sync_world::{RenderEntity, TemporaryRenderEntity}, + sync_world::RenderEntity, texture::{DefaultImageSampler, FallbackImage, GpuImage}, view::{ ExtractedView, Msaa, ViewTarget, ViewUniform, ViewUniformOffset, ViewUniforms, @@ -41,7 +40,6 @@ use bevy_render::{ Extract, }; use bevy_transform::components::GlobalTransform; -use bevy_utils::HashMap; use bytemuck::{Pod, Zeroable}; use fixedbitset::FixedBitSet; @@ -103,8 +101,8 @@ impl FromWorld for SpritePipeline { let format_size = image.texture_descriptor.format.pixel_size(); render_queue.write_texture( texture.as_image_copy(), - &image.data, - ImageDataLayout { + image.data.as_ref().expect("Image has no data"), + TexelCopyBufferLayout { offset: 0, bytes_per_row: Some(image.width() * format_size as u32), rows_per_image: None, @@ -325,27 +323,47 @@ impl SpecializedRenderPipeline for SpritePipeline { } } +pub struct ExtractedSlice { + pub offset: Vec2, + pub rect: Rect, + pub size: Vec2, +} + pub struct ExtractedSprite { + pub main_entity: Entity, + pub render_entity: Entity, pub transform: GlobalTransform, pub color: LinearRgba, - /// Select an area of the texture - pub rect: Option, /// Change the on-screen size of the sprite - pub custom_size: Option, /// Asset ID of the [`Image`] of this sprite /// PERF: storing an `AssetId` instead of `Handle` enables some optimizations (`ExtractedSprite` becomes `Copy` and doesn't need to be dropped) pub image_handle_id: AssetId, pub flip_x: bool, pub flip_y: bool, - pub anchor: Vec2, - /// For cases where additional [`ExtractedSprites`] are created during extraction, this stores the - /// entity that caused that creation for use in determining visibility. - pub original_entity: Option, + pub kind: ExtractedSpriteKind, +} + +pub enum ExtractedSpriteKind { + /// A single sprite with custom sizing and scaling options + Single { + anchor: Vec2, + rect: Option, + scaling_mode: Option, + custom_size: Option, + }, + /// Indexes into the list of [`ExtractedSlice`]s stored in the [`ExtractedSlices`] resource + /// Used for elements composed from multiple sprites such as text or nine-patched borders + Slices { indices: Range }, } #[derive(Resource, Default)] pub struct ExtractedSprites { - pub sprites: HashMap<(Entity, MainEntity), ExtractedSprite>, + pub sprites: Vec, +} + +#[derive(Resource, Default)] +pub struct ExtractedSlices { + pub slices: Vec, } #[derive(Resource, Default)] @@ -366,8 +384,8 @@ pub fn extract_sprite_events( } pub fn extract_sprites( - mut commands: Commands, mut extracted_sprites: ResMut, + mut extracted_slices: ResMut, texture_atlases: Extract>>, sprite_query: Extract< Query<( @@ -381,26 +399,32 @@ pub fn extract_sprites( >, ) { extracted_sprites.sprites.clear(); - for (original_entity, entity, view_visibility, sprite, transform, slices) in sprite_query.iter() + extracted_slices.slices.clear(); + for (main_entity, render_entity, view_visibility, sprite, transform, slices) in + sprite_query.iter() { if !view_visibility.get() { continue; } if let Some(slices) = slices { - extracted_sprites.sprites.extend( - slices - .extract_sprites(transform, original_entity, sprite) - .map(|e| { - ( - ( - commands.spawn(TemporaryRenderEntity).id(), - original_entity.into(), - ), - e, - ) - }), - ); + let start = extracted_slices.slices.len(); + extracted_slices + .slices + .extend(slices.extract_slices(sprite)); + let end = extracted_slices.slices.len(); + extracted_sprites.sprites.push(ExtractedSprite { + main_entity, + render_entity, + color: sprite.color.into(), + transform: *transform, + flip_x: sprite.flip_x, + flip_y: sprite.flip_y, + image_handle_id: sprite.image.id(), + kind: ExtractedSpriteKind::Slices { + indices: start..end, + }, + }); } else { let atlas_rect = sprite .texture_atlas @@ -413,27 +437,27 @@ pub fn extract_sprites( (Some(atlas_rect), Some(mut sprite_rect)) => { sprite_rect.min += atlas_rect.min; sprite_rect.max += atlas_rect.min; - Some(sprite_rect) } }; // PERF: we don't check in this function that the `Image` asset is ready, since it should be in most cases and hashing the handle is expensive - extracted_sprites.sprites.insert( - (entity, original_entity.into()), - ExtractedSprite { - color: sprite.color.into(), - transform: *transform, + extracted_sprites.sprites.push(ExtractedSprite { + main_entity, + render_entity, + color: sprite.color.into(), + transform: *transform, + flip_x: sprite.flip_x, + flip_y: sprite.flip_y, + image_handle_id: sprite.image.id(), + kind: ExtractedSpriteKind::Single { + anchor: sprite.anchor.as_vec(), rect, + scaling_mode: sprite.image_mode.scale(), // Pass the custom size custom_size: sprite.custom_size, - flip_x: sprite.flip_x, - flip_y: sprite.flip_y, - image_handle_id: sprite.image.id(), - anchor: sprite.anchor.as_vec(), - original_entity: Some(original_entity), }, - ); + }); } } } @@ -483,7 +507,10 @@ pub struct SpriteViewBindGroup { pub value: BindGroup, } -#[derive(Component, PartialEq, Eq, Clone)] +#[derive(Resource, Deref, DerefMut, Default)] +pub struct SpriteBatches(HashMap<(RetainedViewEntity, Entity), SpriteBatch>); + +#[derive(PartialEq, Eq, Clone, Debug)] pub struct SpriteBatch { image_handle_id: AssetId, range: Range, @@ -494,7 +521,6 @@ pub struct ImageBindGroups { values: HashMap, BindGroup>, } -#[allow(clippy::too_many_arguments)] pub fn queue_sprites( mut view_entities: Local, draw_functions: Res>, @@ -504,7 +530,6 @@ pub fn queue_sprites( extracted_sprites: Res, mut transparent_render_phases: ResMut>, mut views: Query<( - Entity, &RenderVisibleEntities, &ExtractedView, &Msaa, @@ -514,8 +539,9 @@ pub fn queue_sprites( ) { let draw_sprite_function = draw_functions.read().id::(); - for (view_entity, visible_entities, view, msaa, tonemapping, dither) in &mut views { - let Some(transparent_phase) = transparent_render_phases.get_mut(&view_entity) else { + for (visible_entities, view, msaa, tonemapping, dither) in &mut views { + let Some(transparent_phase) = transparent_render_phases.get_mut(&view.retained_view_entity) + else { continue; }; @@ -558,10 +584,10 @@ pub fn queue_sprites( .items .reserve(extracted_sprites.sprites.len()); - for ((entity, main_entity), extracted_sprite) in extracted_sprites.sprites.iter() { - let index = extracted_sprite.original_entity.unwrap_or(*entity).index(); + for (index, extracted_sprite) in extracted_sprites.sprites.iter().enumerate() { + let view_index = extracted_sprite.main_entity.index(); - if !view_entities.contains(index as usize) { + if !view_entities.contains(view_index as usize) { continue; } @@ -572,17 +598,21 @@ pub fn queue_sprites( transparent_phase.add(Transparent2d { draw_function: draw_sprite_function, pipeline, - entity: (*entity, *main_entity), + entity: ( + extracted_sprite.render_entity, + extracted_sprite.main_entity.into(), + ), sort_key, - // batch_range and dynamic_offset will be calculated in prepare_sprites + // `batch_range` is calculated in `prepare_sprite_image_bind_groups` batch_range: 0..0, extra_index: PhaseItemExtraIndex::None, + extracted_index: index, + indexed: true, }); } } } -#[allow(clippy::too_many_arguments)] pub fn prepare_sprite_view_bind_groups( mut commands: Commands, render_device: Res, @@ -616,10 +646,7 @@ pub fn prepare_sprite_view_bind_groups( } } -#[allow(clippy::too_many_arguments)] pub fn prepare_sprite_image_bind_groups( - mut commands: Commands, - mut previous_len: Local, render_device: Res, render_queue: Res, mut sprite_meta: ResMut, @@ -627,8 +654,10 @@ pub fn prepare_sprite_image_bind_groups( mut image_bind_groups: ResMut, gpu_images: Res>, extracted_sprites: Res, + extracted_slices: Res, mut phases: ResMut>, events: Res, + mut batches: ResMut, ) { // If an image has changed, the GpuImage has (probably) changed for event in &events.images { @@ -642,7 +671,7 @@ pub fn prepare_sprite_image_bind_groups( }; } - let mut batches: Vec<(Entity, SpriteBatch)> = Vec::with_capacity(*previous_len); + batches.clear(); // Clear the sprite instances sprite_meta.sprite_instance_buffer.clear(); @@ -652,7 +681,8 @@ pub fn prepare_sprite_image_bind_groups( let image_bind_groups = &mut *image_bind_groups; - for transparent_phase in phases.values_mut() { + for (retained_view, transparent_phase) in phases.iter_mut() { + let mut current_batch = None; let mut batch_item_index = 0; let mut batch_image_size = Vec2::ZERO; let mut batch_image_handle = AssetId::invalid(); @@ -662,7 +692,12 @@ pub fn prepare_sprite_image_bind_groups( // Compatible items share the same entity. for item_index in 0..transparent_phase.items.len() { let item = &transparent_phase.items[item_index]; - let Some(extracted_sprite) = extracted_sprites.sprites.get(&item.entity) else { + + let Some(extracted_sprite) = extracted_sprites + .sprites + .get(item.extracted_index) + .filter(|extracted_sprite| extracted_sprite.render_entity == item.entity()) + else { // If there is a phase item that is not a sprite, then we must start a new // batch to draw the other phase item(s) and to respect draw order. This can be // done by invalidating the batch_image_handle @@ -670,8 +705,7 @@ pub fn prepare_sprite_image_bind_groups( continue; }; - let batch_image_changed = batch_image_handle != extracted_sprite.image_handle_id; - if batch_image_changed { + if batch_image_handle != extracted_sprite.image_handle_id { let Some(gpu_image) = gpu_images.get(extracted_sprite.image_handle_id) else { continue; }; @@ -691,108 +725,170 @@ pub fn prepare_sprite_image_bind_groups( )), ) }); - } - - // By default, the size of the quad is the size of the texture - let mut quad_size = batch_image_size; - - // Calculate vertex data for this item - let mut uv_offset_scale: Vec4; - - // If a rect is specified, adjust UVs and the size of the quad - if let Some(rect) = extracted_sprite.rect { - let rect_size = rect.size(); - uv_offset_scale = Vec4::new( - rect.min.x / batch_image_size.x, - rect.max.y / batch_image_size.y, - rect_size.x / batch_image_size.x, - -rect_size.y / batch_image_size.y, - ); - quad_size = rect_size; - } else { - uv_offset_scale = Vec4::new(0.0, 1.0, 1.0, -1.0); - } - if extracted_sprite.flip_x { - uv_offset_scale.x += uv_offset_scale.z; - uv_offset_scale.z *= -1.0; - } - if extracted_sprite.flip_y { - uv_offset_scale.y += uv_offset_scale.w; - uv_offset_scale.w *= -1.0; - } - - // Override the size if a custom one is specified - if let Some(custom_size) = extracted_sprite.custom_size { - quad_size = custom_size; - } - let transform = extracted_sprite.transform.affine() - * Affine3A::from_scale_rotation_translation( - quad_size.extend(1.0), - Quat::IDENTITY, - (quad_size * (-extracted_sprite.anchor - Vec2::splat(0.5))).extend(0.0), - ); - - // Store the vertex data and add the item to the render phase - sprite_meta - .sprite_instance_buffer - .push(SpriteInstance::from( - &transform, - &extracted_sprite.color, - &uv_offset_scale, - )); - - if batch_image_changed { batch_item_index = item_index; - - batches.push(( - item.entity(), + current_batch = Some(batches.entry((*retained_view, item.entity())).insert( SpriteBatch { image_handle_id: batch_image_handle, range: index..index, }, )); } + match extracted_sprite.kind { + ExtractedSpriteKind::Single { + anchor, + rect, + scaling_mode, + custom_size, + } => { + // By default, the size of the quad is the size of the texture + let mut quad_size = batch_image_size; + let mut texture_size = batch_image_size; + + // Calculate vertex data for this item + // If a rect is specified, adjust UVs and the size of the quad + let mut uv_offset_scale = if let Some(rect) = rect { + let rect_size = rect.size(); + quad_size = rect_size; + // Update texture size to the rect size + // It will help scale properly only portion of the image + texture_size = rect_size; + Vec4::new( + rect.min.x / batch_image_size.x, + rect.max.y / batch_image_size.y, + rect_size.x / batch_image_size.x, + -rect_size.y / batch_image_size.y, + ) + } else { + Vec4::new(0.0, 1.0, 1.0, -1.0) + }; + if extracted_sprite.flip_x { + uv_offset_scale.x += uv_offset_scale.z; + uv_offset_scale.z *= -1.0; + } + if extracted_sprite.flip_y { + uv_offset_scale.y += uv_offset_scale.w; + uv_offset_scale.w *= -1.0; + } + + // Override the size if a custom one is specified + quad_size = custom_size.unwrap_or(quad_size); + + // Used for translation of the quad if `TextureScale::Fit...` is specified. + let mut quad_translation = Vec2::ZERO; + + // Scales the texture based on the `texture_scale` field. + if let Some(scaling_mode) = scaling_mode { + apply_scaling( + scaling_mode, + texture_size, + &mut quad_size, + &mut quad_translation, + &mut uv_offset_scale, + ); + } + + let transform = extracted_sprite.transform.affine() + * Affine3A::from_scale_rotation_translation( + quad_size.extend(1.0), + Quat::IDENTITY, + ((quad_size + quad_translation) * (-anchor - Vec2::splat(0.5))) + .extend(0.0), + ); + + // Store the vertex data and add the item to the render phase + sprite_meta + .sprite_instance_buffer + .push(SpriteInstance::from( + &transform, + &extracted_sprite.color, + &uv_offset_scale, + )); + + current_batch.as_mut().unwrap().get_mut().range.end += 1; + index += 1; + } + ExtractedSpriteKind::Slices { ref indices } => { + for i in indices.clone() { + let slice = &extracted_slices.slices[i]; + let rect = slice.rect; + let rect_size = rect.size(); + + // Calculate vertex data for this item + let mut uv_offset_scale: Vec4; + + // If a rect is specified, adjust UVs and the size of the quad + uv_offset_scale = Vec4::new( + rect.min.x / batch_image_size.x, + rect.max.y / batch_image_size.y, + rect_size.x / batch_image_size.x, + -rect_size.y / batch_image_size.y, + ); + + if extracted_sprite.flip_x { + uv_offset_scale.x += uv_offset_scale.z; + uv_offset_scale.z *= -1.0; + } + if extracted_sprite.flip_y { + uv_offset_scale.y += uv_offset_scale.w; + uv_offset_scale.w *= -1.0; + } + + let transform = extracted_sprite.transform.affine() + * Affine3A::from_scale_rotation_translation( + slice.size.extend(1.0), + Quat::IDENTITY, + (slice.size * -Vec2::splat(0.5) + slice.offset).extend(0.0), + ); + + // Store the vertex data and add the item to the render phase + sprite_meta + .sprite_instance_buffer + .push(SpriteInstance::from( + &transform, + &extracted_sprite.color, + &uv_offset_scale, + )); + + current_batch.as_mut().unwrap().get_mut().range.end += 1; + index += 1; + } + } + } transparent_phase.items[batch_item_index] .batch_range_mut() .end += 1; - batches.last_mut().unwrap().1.range.end += 1; - index += 1; } - } - sprite_meta - .sprite_instance_buffer - .write_buffer(&render_device, &render_queue); - - if sprite_meta.sprite_index_buffer.len() != 6 { - sprite_meta.sprite_index_buffer.clear(); - - // NOTE: This code is creating 6 indices pointing to 4 vertices. - // The vertices form the corners of a quad based on their two least significant bits. - // 10 11 - // - // 00 01 - // The sprite shader can then use the two least significant bits as the vertex index. - // The rest of the properties to transform the vertex positions and UVs (which are - // implicit) are baked into the instance transform, and UV offset and scale. - // See bevy_sprite/src/render/sprite.wgsl for the details. - sprite_meta.sprite_index_buffer.push(2); - sprite_meta.sprite_index_buffer.push(0); - sprite_meta.sprite_index_buffer.push(1); - sprite_meta.sprite_index_buffer.push(1); - sprite_meta.sprite_index_buffer.push(3); - sprite_meta.sprite_index_buffer.push(2); - sprite_meta - .sprite_index_buffer + .sprite_instance_buffer .write_buffer(&render_device, &render_queue); - } - *previous_len = batches.len(); - commands.insert_or_spawn_batch(batches); -} + if sprite_meta.sprite_index_buffer.len() != 6 { + sprite_meta.sprite_index_buffer.clear(); + + // NOTE: This code is creating 6 indices pointing to 4 vertices. + // The vertices form the corners of a quad based on their two least significant bits. + // 10 11 + // + // 00 01 + // The sprite shader can then use the two least significant bits as the vertex index. + // The rest of the properties to transform the vertex positions and UVs (which are + // implicit) are baked into the instance transform, and UV offset and scale. + // See bevy_sprite/src/render/sprite.wgsl for the details. + sprite_meta.sprite_index_buffer.push(2); + sprite_meta.sprite_index_buffer.push(0); + sprite_meta.sprite_index_buffer.push(1); + sprite_meta.sprite_index_buffer.push(1); + sprite_meta.sprite_index_buffer.push(3); + sprite_meta.sprite_index_buffer.push(2); + sprite_meta + .sprite_index_buffer + .write_buffer(&render_device, &render_queue); + } + } +} /// [`RenderCommand`] for sprite rendering. pub type DrawSprite = ( SetItemPipeline, @@ -820,19 +916,19 @@ impl RenderCommand

for SetSpriteViewBindGroup; impl RenderCommand

for SetSpriteTextureBindGroup { - type Param = SRes; - type ViewQuery = (); - type ItemQuery = Read; + type Param = (SRes, SRes); + type ViewQuery = Read; + type ItemQuery = (); fn render<'w>( - _item: &P, - _view: (), - batch: Option<&'_ SpriteBatch>, - image_bind_groups: SystemParamItem<'w, '_, Self::Param>, + item: &P, + view: ROQueryItem<'w, Self::ViewQuery>, + _entity: Option<()>, + (image_bind_groups, batches): SystemParamItem<'w, '_, Self::Param>, pass: &mut TrackedRenderPass<'w>, ) -> RenderCommandResult { let image_bind_groups = image_bind_groups.into_inner(); - let Some(batch) = batch else { + let Some(batch) = batches.get(&(view.retained_view_entity, item.entity())) else { return RenderCommandResult::Skip; }; @@ -850,19 +946,19 @@ impl RenderCommand

for SetSpriteTextureBindGrou pub struct DrawSpriteBatch; impl RenderCommand

for DrawSpriteBatch { - type Param = SRes; - type ViewQuery = (); - type ItemQuery = Read; + type Param = (SRes, SRes); + type ViewQuery = Read; + type ItemQuery = (); fn render<'w>( - _item: &P, - _view: (), - batch: Option<&'_ SpriteBatch>, - sprite_meta: SystemParamItem<'w, '_, Self::Param>, + item: &P, + view: ROQueryItem<'w, Self::ViewQuery>, + _entity: Option<()>, + (sprite_meta, batches): SystemParamItem<'w, '_, Self::Param>, pass: &mut TrackedRenderPass<'w>, ) -> RenderCommandResult { let sprite_meta = sprite_meta.into_inner(); - let Some(batch) = batch else { + let Some(batch) = batches.get(&(view.retained_view_entity, item.entity())) else { return RenderCommandResult::Skip; }; @@ -883,3 +979,89 @@ impl RenderCommand

for DrawSpriteBatch { RenderCommandResult::Success } } + +/// Scales a texture to fit within a given quad size with keeping the aspect ratio. +fn apply_scaling( + scaling_mode: ScalingMode, + texture_size: Vec2, + quad_size: &mut Vec2, + quad_translation: &mut Vec2, + uv_offset_scale: &mut Vec4, +) { + let quad_ratio = quad_size.x / quad_size.y; + let texture_ratio = texture_size.x / texture_size.y; + let tex_quad_scale = texture_ratio / quad_ratio; + let quad_tex_scale = quad_ratio / texture_ratio; + + match scaling_mode { + ScalingMode::FillCenter => { + if quad_ratio > texture_ratio { + // offset texture to center by y coordinate + uv_offset_scale.y += (uv_offset_scale.w - uv_offset_scale.w * tex_quad_scale) * 0.5; + // sum up scales + uv_offset_scale.w *= tex_quad_scale; + } else { + // offset texture to center by x coordinate + uv_offset_scale.x += (uv_offset_scale.z - uv_offset_scale.z * quad_tex_scale) * 0.5; + uv_offset_scale.z *= quad_tex_scale; + }; + } + ScalingMode::FillStart => { + if quad_ratio > texture_ratio { + uv_offset_scale.y += uv_offset_scale.w - uv_offset_scale.w * tex_quad_scale; + uv_offset_scale.w *= tex_quad_scale; + } else { + uv_offset_scale.z *= quad_tex_scale; + } + } + ScalingMode::FillEnd => { + if quad_ratio > texture_ratio { + uv_offset_scale.w *= tex_quad_scale; + } else { + uv_offset_scale.x += uv_offset_scale.z - uv_offset_scale.z * quad_tex_scale; + uv_offset_scale.z *= quad_tex_scale; + } + } + ScalingMode::FitCenter => { + if texture_ratio > quad_ratio { + // Scale based on width + quad_size.y *= quad_tex_scale; + } else { + // Scale based on height + quad_size.x *= tex_quad_scale; + } + } + ScalingMode::FitStart => { + if texture_ratio > quad_ratio { + // The quad is scaled to match the image ratio, and the quad translation is adjusted + // to start of the quad within the original quad size. + let scale = Vec2::new(1.0, quad_tex_scale); + let new_quad = *quad_size * scale; + let offset = *quad_size - new_quad; + *quad_translation = Vec2::new(0.0, -offset.y); + *quad_size = new_quad; + } else { + let scale = Vec2::new(tex_quad_scale, 1.0); + let new_quad = *quad_size * scale; + let offset = *quad_size - new_quad; + *quad_translation = Vec2::new(offset.x, 0.0); + *quad_size = new_quad; + } + } + ScalingMode::FitEnd => { + if texture_ratio > quad_ratio { + let scale = Vec2::new(1.0, quad_tex_scale); + let new_quad = *quad_size * scale; + let offset = *quad_size - new_quad; + *quad_translation = Vec2::new(0.0, offset.y); + *quad_size = new_quad; + } else { + let scale = Vec2::new(tex_quad_scale, 1.0); + let new_quad = *quad_size * scale; + let offset = *quad_size - new_quad; + *quad_translation = Vec2::new(-offset.x, 0.0); + *quad_size = new_quad; + } + } + } +} diff --git a/crates/bevy_sprite/src/sprite.rs b/crates/bevy_sprite/src/sprite.rs index c6550f51d7e62..32b8ebb49e6f3 100644 --- a/crates/bevy_sprite/src/sprite.rs +++ b/crates/bevy_sprite/src/sprite.rs @@ -1,10 +1,8 @@ use bevy_asset::{Assets, Handle}; use bevy_color::Color; -use bevy_ecs::{ - component::{require, Component}, - reflect::ReflectComponent, -}; -use bevy_image::Image; +use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::{component::Component, reflect::ReflectComponent}; +use bevy_image::{Image, TextureAtlas, TextureAtlasLayout}; use bevy_math::{Rect, UVec2, Vec2}; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; use bevy_render::{ @@ -13,12 +11,12 @@ use bevy_render::{ }; use bevy_transform::components::Transform; -use crate::{TextureAtlas, TextureAtlasLayout, TextureSlicer}; +use crate::TextureSlicer; /// Describes a sprite to be rendered to a 2D camera #[derive(Component, Debug, Default, Clone, Reflect)] #[require(Transform, Visibility, SyncToRenderWorld, VisibilityClass)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[component(on_add = view::add_visibility_class::)] pub struct Sprite { /// The image used to render the sprite @@ -157,11 +155,14 @@ impl From> for Sprite { /// Controls how the image is altered when scaled. #[derive(Default, Debug, Clone, Reflect, PartialEq)] -#[reflect(Debug)] +#[reflect(Debug, Default, Clone)] pub enum SpriteImageMode { /// The sprite will take on the size of the image by default, and will be stretched or shrunk if [`Sprite::custom_size`] is set. #[default] Auto, + /// The texture will be scaled to fit the rect bounds defined in [`Sprite::custom_size`]. + /// Otherwise no scaling will be applied. + Scale(ScalingMode), /// The texture will be cut in 9 slices, keeping the texture in proportions on resize Sliced(TextureSlicer), /// The texture will be repeated if stretched beyond `stretched_value` @@ -185,43 +186,92 @@ impl SpriteImageMode { SpriteImageMode::Sliced(..) | SpriteImageMode::Tiled { .. } ) } + + /// Returns [`ScalingMode`] if scale is presented or [`Option::None`] otherwise. + #[inline] + #[must_use] + pub const fn scale(&self) -> Option { + if let SpriteImageMode::Scale(scale) = self { + Some(*scale) + } else { + None + } + } } -/// How a sprite is positioned relative to its [`Transform`]. -/// It defaults to `Anchor::Center`. -#[derive(Component, Debug, Clone, Copy, PartialEq, Default, Reflect)] -#[reflect(Component, Default, Debug, PartialEq)] -#[doc(alias = "pivot")] -pub enum Anchor { +/// Represents various modes for proportional scaling of a texture. +/// +/// Can be used in [`SpriteImageMode::Scale`]. +#[derive(Debug, Clone, Copy, PartialEq, Default, Reflect)] +#[reflect(Debug, Default, Clone)] +pub enum ScalingMode { + /// Scale the texture uniformly (maintain the texture's aspect ratio) + /// so that both dimensions (width and height) of the texture will be equal + /// to or larger than the corresponding dimension of the target rectangle. + /// Fill sprite with a centered texture. #[default] - Center, - BottomLeft, - BottomCenter, - BottomRight, - CenterLeft, - CenterRight, - TopLeft, - TopCenter, - TopRight, - /// Custom anchor point. Top left is `(-0.5, 0.5)`, center is `(0.0, 0.0)`. The value will - /// be scaled with the sprite size. - Custom(Vec2), + FillCenter, + /// Scales the texture to fill the target rectangle while maintaining its aspect ratio. + /// One dimension of the texture will match the rectangle's size, + /// while the other dimension may exceed it. + /// The exceeding portion is aligned to the start: + /// * Horizontal overflow is left-aligned if the width exceeds the rectangle. + /// * Vertical overflow is top-aligned if the height exceeds the rectangle. + FillStart, + /// Scales the texture to fill the target rectangle while maintaining its aspect ratio. + /// One dimension of the texture will match the rectangle's size, + /// while the other dimension may exceed it. + /// The exceeding portion is aligned to the end: + /// * Horizontal overflow is right-aligned if the width exceeds the rectangle. + /// * Vertical overflow is bottom-aligned if the height exceeds the rectangle. + FillEnd, + /// Scaling the texture will maintain the original aspect ratio + /// and ensure that the original texture fits entirely inside the rect. + /// At least one axis (x or y) will fit exactly. The result is centered inside the rect. + FitCenter, + /// Scaling the texture will maintain the original aspect ratio + /// and ensure that the original texture fits entirely inside rect. + /// At least one axis (x or y) will fit exactly. + /// Aligns the result to the left and top edges of rect. + FitStart, + /// Scaling the texture will maintain the original aspect ratio + /// and ensure that the original texture fits entirely inside rect. + /// At least one axis (x or y) will fit exactly. + /// Aligns the result to the right and bottom edges of rect. + FitEnd, } +/// Normalized (relative to its size) offset of a 2d renderable entity from its [`Transform`]. +#[derive(Component, Debug, Clone, Copy, PartialEq, Deref, DerefMut, Reflect)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] +#[doc(alias = "pivot")] +pub struct Anchor(pub Vec2); + impl Anchor { + pub const BOTTOM_LEFT: Self = Self(Vec2::new(-0.5, -0.5)); + pub const BOTTOM_CENTER: Self = Self(Vec2::new(0.0, -0.5)); + pub const BOTTOM_RIGHT: Self = Self(Vec2::new(0.5, -0.5)); + pub const CENTER_LEFT: Self = Self(Vec2::new(-0.5, 0.0)); + pub const CENTER: Self = Self(Vec2::ZERO); + pub const CENTER_RIGHT: Self = Self(Vec2::new(0.5, 0.0)); + pub const TOP_LEFT: Self = Self(Vec2::new(-0.5, 0.5)); + pub const TOP_CENTER: Self = Self(Vec2::new(0.0, 0.5)); + pub const TOP_RIGHT: Self = Self(Vec2::new(0.5, 0.5)); + pub fn as_vec(&self) -> Vec2 { - match self { - Anchor::Center => Vec2::ZERO, - Anchor::BottomLeft => Vec2::new(-0.5, -0.5), - Anchor::BottomCenter => Vec2::new(0.0, -0.5), - Anchor::BottomRight => Vec2::new(0.5, -0.5), - Anchor::CenterLeft => Vec2::new(-0.5, 0.0), - Anchor::CenterRight => Vec2::new(0.5, 0.0), - Anchor::TopLeft => Vec2::new(-0.5, 0.5), - Anchor::TopCenter => Vec2::new(0.0, 0.5), - Anchor::TopRight => Vec2::new(0.5, 0.5), - Anchor::Custom(point) => *point, - } + self.0 + } +} + +impl Default for Anchor { + fn default() -> Self { + Self::CENTER + } +} + +impl From for Anchor { + fn from(value: Vec2) -> Self { + Self(value) } } @@ -230,10 +280,11 @@ mod tests { use bevy_asset::{Assets, RenderAssetUsages}; use bevy_color::Color; use bevy_image::Image; + use bevy_image::{TextureAtlas, TextureAtlasLayout}; use bevy_math::{Rect, URect, UVec2, Vec2}; use bevy_render::render_resource::{Extent3d, TextureDimension, TextureFormat}; - use crate::{Anchor, TextureAtlas, TextureAtlasLayout}; + use crate::Anchor; use super::Sprite; @@ -304,7 +355,7 @@ mod tests { let sprite = Sprite { image, - anchor: Anchor::BottomLeft, + anchor: Anchor::BOTTOM_LEFT, ..Default::default() }; @@ -326,7 +377,7 @@ mod tests { let sprite = Sprite { image, - anchor: Anchor::TopRight, + anchor: Anchor::TOP_RIGHT, ..Default::default() }; @@ -348,7 +399,7 @@ mod tests { let sprite = Sprite { image, - anchor: Anchor::BottomLeft, + anchor: Anchor::BOTTOM_LEFT, flip_x: true, ..Default::default() }; @@ -371,7 +422,7 @@ mod tests { let sprite = Sprite { image, - anchor: Anchor::TopRight, + anchor: Anchor::TOP_RIGHT, flip_y: true, ..Default::default() }; @@ -395,7 +446,7 @@ mod tests { let sprite = Sprite { image, rect: Some(Rect::new(1.5, 3.0, 3.0, 9.5)), - anchor: Anchor::BottomLeft, + anchor: Anchor::BOTTOM_LEFT, ..Default::default() }; @@ -419,7 +470,7 @@ mod tests { let sprite = Sprite { image, - anchor: Anchor::BottomLeft, + anchor: Anchor::BOTTOM_LEFT, texture_atlas: Some(TextureAtlas { layout: texture_atlas, index: 0, @@ -447,7 +498,7 @@ mod tests { let sprite = Sprite { image, - anchor: Anchor::BottomLeft, + anchor: Anchor::BOTTOM_LEFT, texture_atlas: Some(TextureAtlas { layout: texture_atlas, index: 0, diff --git a/crates/bevy_sprite/src/texture_slice/border_rect.rs b/crates/bevy_sprite/src/texture_slice/border_rect.rs index adc90a626a859..00e4fcb8b135f 100644 --- a/crates/bevy_sprite/src/texture_slice/border_rect.rs +++ b/crates/bevy_sprite/src/texture_slice/border_rect.rs @@ -1,10 +1,11 @@ -use bevy_reflect::Reflect; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; /// Defines the extents of the border of a rectangle. /// /// This struct is used to represent thickness or offsets from the edges /// of a rectangle (left, right, top, and bottom), with values increasing inwards. #[derive(Default, Copy, Clone, PartialEq, Debug, Reflect)] +#[reflect(Clone, PartialEq, Default)] pub struct BorderRect { /// Extent of the border along the left edge pub left: f32, diff --git a/crates/bevy_sprite/src/texture_slice/computed_slices.rs b/crates/bevy_sprite/src/texture_slice/computed_slices.rs index 490071a6005ed..f36cf4bfacc78 100644 --- a/crates/bevy_sprite/src/texture_slice/computed_slices.rs +++ b/crates/bevy_sprite/src/texture_slice/computed_slices.rs @@ -1,12 +1,11 @@ -use crate::{ExtractedSprite, Sprite, SpriteImageMode, TextureAtlasLayout}; +use crate::{ExtractedSlice, Sprite, SpriteImageMode, TextureAtlasLayout}; use super::TextureSlice; use bevy_asset::{AssetEvent, Assets}; use bevy_ecs::prelude::*; use bevy_image::Image; use bevy_math::{Rect, Vec2}; -use bevy_transform::prelude::*; -use bevy_utils::HashSet; +use bevy_platform::collections::HashSet; /// Component storing texture slices for tiled or sliced sprite entities /// @@ -15,58 +14,33 @@ use bevy_utils::HashSet; pub struct ComputedTextureSlices(Vec); impl ComputedTextureSlices { - /// Computes [`ExtractedSprite`] iterator from the sprite slices + /// Computes [`ExtractedSlice`] iterator from the sprite slices /// /// # Arguments /// - /// * `transform` - the sprite entity global transform - /// * `original_entity` - the sprite entity /// * `sprite` - The sprite component - /// * `handle` - The sprite texture handle #[must_use] - pub(crate) fn extract_sprites<'a>( + pub(crate) fn extract_slices<'a>( &'a self, - transform: &'a GlobalTransform, - original_entity: Entity, sprite: &'a Sprite, - ) -> impl ExactSizeIterator + 'a { + ) -> impl ExactSizeIterator + 'a { let mut flip = Vec2::ONE; - let [mut flip_x, mut flip_y] = [false; 2]; if sprite.flip_x { flip.x *= -1.0; - flip_x = true; } if sprite.flip_y { flip.y *= -1.0; - flip_y = true; } - self.0.iter().map(move |slice| { - let offset = (slice.offset * flip).extend(0.0); - let transform = transform.mul_transform(Transform::from_translation(offset)); - ExtractedSprite { - original_entity: Some(original_entity), - color: sprite.color.into(), - transform, - rect: Some(slice.texture_rect), - custom_size: Some(slice.draw_size), - flip_x, - flip_y, - image_handle_id: sprite.image.id(), - anchor: Self::redepend_anchor_from_sprite_to_slice(sprite, slice), - } + let anchor = sprite.anchor.as_vec() + * sprite + .custom_size + .unwrap_or(sprite.rect.unwrap_or_default().size()); + self.0.iter().map(move |slice| ExtractedSlice { + offset: slice.offset * flip - anchor, + rect: slice.texture_rect, + size: slice.draw_size, }) } - - fn redepend_anchor_from_sprite_to_slice(sprite: &Sprite, slice: &TextureSlice) -> Vec2 { - let sprite_size = sprite - .custom_size - .unwrap_or(sprite.rect.unwrap_or_default().size()); - if sprite_size == Vec2::ZERO { - sprite.anchor.as_vec() - } else { - sprite.anchor.as_vec() * sprite_size / slice.draw_size - } - } } /// Generates sprite slices for a [`Sprite`] with [`SpriteImageMode::Sliced`] or [`SpriteImageMode::Sliced`]. The slices @@ -123,6 +97,9 @@ fn compute_sprite_slices( SpriteImageMode::Auto => { unreachable!("Slices should not be computed for SpriteImageMode::Stretch") } + SpriteImageMode::Scale(_) => { + unreachable!("Slices should not be computed for SpriteImageMode::Scale") + } }; Some(ComputedTextureSlices(slices)) } diff --git a/crates/bevy_sprite/src/texture_slice/mod.rs b/crates/bevy_sprite/src/texture_slice/mod.rs index 2dea51adc6d41..7b1a1e33e2168 100644 --- a/crates/bevy_sprite/src/texture_slice/mod.rs +++ b/crates/bevy_sprite/src/texture_slice/mod.rs @@ -22,12 +22,12 @@ pub struct TextureSlice { } impl TextureSlice { - /// Transforms the given slice in an collection of tiled subdivisions. + /// Transforms the given slice in a collection of tiled subdivisions. /// /// # Arguments /// /// * `stretch_value` - The slice will repeat when the ratio between the *drawing dimensions* of texture and the - /// *original texture size* (rect) are above `stretch_value`. + /// *original texture size* (rect) are above `stretch_value`. /// * `tile_x` - should the slice be tiled horizontally /// * `tile_y` - should the slice be tiled vertically #[must_use] @@ -86,7 +86,7 @@ impl TextureSlice { remaining_columns -= size_y; } if slices.len() > 1_000 { - bevy_utils::tracing::warn!("One of your tiled textures has generated {} slices. You might want to use higher stretch values to avoid a great performance cost", slices.len()); + tracing::warn!("One of your tiled textures has generated {} slices. You might want to use higher stretch values to avoid a great performance cost", slices.len()); } slices } diff --git a/crates/bevy_sprite/src/texture_slice/slicer.rs b/crates/bevy_sprite/src/texture_slice/slicer.rs index 310be429796a5..3f8ea1c0b4abd 100644 --- a/crates/bevy_sprite/src/texture_slice/slicer.rs +++ b/crates/bevy_sprite/src/texture_slice/slicer.rs @@ -1,6 +1,6 @@ use super::{BorderRect, TextureSlice}; use bevy_math::{vec2, Rect, Vec2}; -use bevy_reflect::Reflect; +use bevy_reflect::{std_traits::ReflectDefault, Reflect}; /// Slices a texture using the **9-slicing** technique. This allows to reuse an image at various sizes /// without needing to prepare multiple assets. The associated texture will be split into nine portions, @@ -11,6 +11,7 @@ use bevy_reflect::Reflect; /// /// See [9-sliced](https://en.wikipedia.org/wiki/9-slice_scaling) textures. #[derive(Debug, Clone, Reflect, PartialEq)] +#[reflect(Clone, PartialEq)] pub struct TextureSlicer { /// Inset values in pixels that define the four slicing lines dividing the texture into nine sections. pub border: BorderRect, @@ -24,6 +25,7 @@ pub struct TextureSlicer { /// Defines how a texture slice scales when resized #[derive(Debug, Copy, Clone, Default, Reflect, PartialEq)] +#[reflect(Clone, PartialEq, Default)] pub enum SliceScaleMode { /// The slice will be stretched to fit the area #[default] @@ -217,7 +219,7 @@ impl TextureSlicer { if self.border.left + self.border.right >= rect.size().x || self.border.top + self.border.bottom >= rect.size().y { - bevy_utils::tracing::error!( + tracing::error!( "TextureSlicer::border has out of bounds values. No slicing will be applied" ); return vec![TextureSlice { diff --git a/crates/bevy_state/Cargo.toml b/crates/bevy_state/Cargo.toml index 30ddcfd6eddc2..1ae52fa571670 100644 --- a/crates/bevy_state/Cargo.toml +++ b/crates/bevy_state/Cargo.toml @@ -1,30 +1,64 @@ [package] name = "bevy_state" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Finite state machines for Bevy" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" license = "MIT OR Apache-2.0" keywords = ["bevy"] -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - [features] -default = ["bevy_reflect", "bevy_app", "bevy_hierarchy"] -bevy_reflect = ["dep:bevy_reflect", "bevy_ecs/bevy_reflect"] +default = ["std", "bevy_reflect", "bevy_app"] + +# Functionality + +## Adds runtime reflection support using `bevy_reflect`. +bevy_reflect = [ + "dep:bevy_reflect", + "bevy_ecs/bevy_reflect", + "bevy_app?/bevy_reflect", +] + +## Adds integration with the `bevy_app` plugin API. bevy_app = ["dep:bevy_app"] -bevy_hierarchy = ["dep:bevy_hierarchy"] + +# Platform Compatibility + +## Allows access to the `std` crate. Enabling this feature will prevent compilation +## on `no_std` targets, but provides access to certain additional features on +## supported platforms. +std = [ + "bevy_ecs/std", + "bevy_utils/std", + "bevy_reflect?/std", + "bevy_app?/std", + "bevy_platform/std", +] + +## `critical-section` provides the building blocks for synchronization primitives +## on all platforms, including `no_std`. +critical-section = [ + "bevy_ecs/critical-section", + "bevy_utils/critical-section", + "bevy_app?/critical-section", + "bevy_reflect?/critical-section", + "bevy_platform/critical-section", +] [dependencies] -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" } -bevy_state_macros = { path = "macros", version = "0.15.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev", optional = true } -bevy_app = { path = "../bevy_app", version = "0.15.0-dev", optional = true } -bevy_hierarchy = { path = "../bevy_hierarchy", version = "0.15.0-dev", optional = true } +# bevy +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev", default-features = false } +bevy_state_macros = { path = "macros", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev", default-features = false } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, optional = true } +bevy_app = { path = "../bevy_app", version = "0.16.0-dev", default-features = false, optional = true } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false } variadics_please = "1.1" +# other +log = { version = "0.4", default-features = false } + [lints] workspace = true diff --git a/crates/bevy_state/LICENSE-APACHE b/crates/bevy_state/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_state/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_state/LICENSE-MIT b/crates/bevy_state/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_state/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_state/macros/Cargo.toml b/crates/bevy_state/macros/Cargo.toml index 50a4d468b1508..2f569f395ef03 100644 --- a/crates/bevy_state/macros/Cargo.toml +++ b/crates/bevy_state/macros/Cargo.toml @@ -1,15 +1,15 @@ [package] name = "bevy_state_macros" -version = "0.15.0-dev" +version = "0.16.0-dev" description = "Macros for bevy_state" -edition = "2021" +edition = "2024" license = "MIT OR Apache-2.0" [lib] proc-macro = true [dependencies] -bevy_macro_utils = { path = "../../bevy_macro_utils", version = "0.15.0-dev" } +bevy_macro_utils = { path = "../../bevy_macro_utils", version = "0.16.0-dev" } syn = { version = "2.0", features = ["full"] } quote = "1.0" diff --git a/crates/bevy_state/macros/LICENSE-APACHE b/crates/bevy_state/macros/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_state/macros/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_state/macros/LICENSE-MIT b/crates/bevy_state/macros/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_state/macros/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_state/src/app.rs b/crates/bevy_state/src/app.rs index c1aaf0933337a..46a23c9f9ac7d 100644 --- a/crates/bevy_state/src/app.rs +++ b/crates/bevy_state/src/app.rs @@ -1,6 +1,7 @@ use bevy_app::{App, MainScheduleOrder, Plugin, PreStartup, PreUpdate, SubApp}; -use bevy_ecs::{event::Events, schedule::IntoSystemConfigs, world::FromWorld}; -use bevy_utils::{tracing::warn, warn_once}; +use bevy_ecs::{event::Events, schedule::IntoScheduleConfigs, world::FromWorld}; +use bevy_utils::once; +use log::warn; use crate::{ state::{ @@ -87,7 +88,9 @@ pub trait AppExtStates { /// Separate function to only warn once for all state installation methods. fn warn_if_no_states_plugin_installed(app: &SubApp) { if !app.is_plugin_added::() { - warn_once!("States were added to the app, but `StatesPlugin` is not installed."); + once!(warn!( + "States were added to the app, but `StatesPlugin` is not installed." + )); } } @@ -313,7 +316,6 @@ impl Plugin for StatesPlugin { #[cfg(test)] mod tests { use crate::{ - self as bevy_state, app::StatesPlugin, state::{State, StateTransition, StateTransitionEvent}, }; diff --git a/crates/bevy_state/src/commands.rs b/crates/bevy_state/src/commands.rs index 036e35d6033ee..d9da362b628f4 100644 --- a/crates/bevy_state/src/commands.rs +++ b/crates/bevy_state/src/commands.rs @@ -1,5 +1,5 @@ use bevy_ecs::{system::Commands, world::World}; -use bevy_utils::tracing::debug; +use log::debug; use crate::state::{FreelyMutableState, NextState}; diff --git a/crates/bevy_state/src/condition.rs b/crates/bevy_state/src/condition.rs index dac281b7a3201..faede71be55c1 100644 --- a/crates/bevy_state/src/condition.rs +++ b/crates/bevy_state/src/condition.rs @@ -9,11 +9,14 @@ use bevy_ecs::{change_detection::DetectChanges, system::Res}; /// ``` /// # use bevy_ecs::prelude::*; /// # use bevy_state::prelude::*; +/// # use bevy_app::{App, Update}; +/// # use bevy_state::app::StatesPlugin; /// # #[derive(Resource, Default)] /// # struct Counter(u8); -/// # let mut app = Schedule::default(); -/// # let mut world = World::new(); -/// # world.init_resource::(); +/// # let mut app = App::new(); +/// # app +/// # .init_resource::() +/// # .add_plugins(StatesPlugin); /// #[derive(States, Clone, Copy, Default, Eq, PartialEq, Hash, Debug)] /// enum GameState { /// #[default] @@ -21,7 +24,7 @@ use bevy_ecs::{change_detection::DetectChanges, system::Res}; /// Paused, /// } /// -/// app.add_systems( +/// app.add_systems(Update, /// // `state_exists` will only return true if the /// // given state exists /// my_system.run_if(state_exists::), @@ -31,15 +34,15 @@ use bevy_ecs::{change_detection::DetectChanges, system::Res}; /// counter.0 += 1; /// } /// -/// // `GameState` does not yet exist `my_system` won't run -/// app.run(&mut world); -/// assert_eq!(world.resource::().0, 0); +/// // `GameState` does not yet exist so `my_system` won't run +/// app.update(); +/// assert_eq!(app.world().resource::().0, 0); /// -/// world.init_resource::>(); +/// app.init_state::(); /// /// // `GameState` now exists so `my_system` will run -/// app.run(&mut world); -/// assert_eq!(world.resource::().0, 1); +/// app.update(); +/// assert_eq!(app.world().resource::().0, 1); /// ``` pub fn state_exists(current_state: Option>>) -> bool { current_state.is_some() @@ -55,11 +58,14 @@ pub fn state_exists(current_state: Option>>) -> bool { /// ``` /// # use bevy_ecs::prelude::*; /// # use bevy_state::prelude::*; +/// # use bevy_app::{App, Update}; +/// # use bevy_state::app::StatesPlugin; /// # #[derive(Resource, Default)] /// # struct Counter(u8); -/// # let mut app = Schedule::default(); -/// # let mut world = World::new(); -/// # world.init_resource::(); +/// # let mut app = App::new(); +/// # app +/// # .init_resource::() +/// # .add_plugins(StatesPlugin); /// #[derive(States, Clone, Copy, Default, Eq, PartialEq, Hash, Debug)] /// enum GameState { /// #[default] @@ -67,14 +73,14 @@ pub fn state_exists(current_state: Option>>) -> bool { /// Paused, /// } /// -/// world.init_resource::>(); -/// -/// app.add_systems(( -/// // `in_state` will only return true if the -/// // given state equals the given value -/// play_system.run_if(in_state(GameState::Playing)), -/// pause_system.run_if(in_state(GameState::Paused)), -/// )); +/// app +/// .init_state::() +/// .add_systems(Update, ( +/// // `in_state` will only return true if the +/// // given state equals the given value +/// play_system.run_if(in_state(GameState::Playing)), +/// pause_system.run_if(in_state(GameState::Paused)), +/// )); /// /// fn play_system(mut counter: ResMut) { /// counter.0 += 1; @@ -85,14 +91,14 @@ pub fn state_exists(current_state: Option>>) -> bool { /// } /// /// // We default to `GameState::Playing` so `play_system` runs -/// app.run(&mut world); -/// assert_eq!(world.resource::().0, 1); +/// app.update(); +/// assert_eq!(app.world().resource::().0, 1); /// -/// *world.resource_mut::>() = State::new(GameState::Paused); +/// app.insert_state(GameState::Paused); /// /// // Now that we are in `GameState::Pause`, `pause_system` will run -/// app.run(&mut world); -/// assert_eq!(world.resource::().0, 0); +/// app.update(); +/// assert_eq!(app.world().resource::().0, 0); /// ``` pub fn in_state(state: S) -> impl FnMut(Option>>) -> bool + Clone { move |current_state: Option>>| match current_state { @@ -114,11 +120,14 @@ pub fn in_state(state: S) -> impl FnMut(Option>>) -> boo /// ``` /// # use bevy_ecs::prelude::*; /// # use bevy_state::prelude::*; +/// # use bevy_state::app::StatesPlugin; +/// # use bevy_app::{App, Update}; /// # #[derive(Resource, Default)] /// # struct Counter(u8); -/// # let mut app = Schedule::default(); -/// # let mut world = World::new(); -/// # world.init_resource::(); +/// # let mut app = App::new(); +/// # app +/// # .init_resource::() +/// # .add_plugins(StatesPlugin); /// #[derive(States, Clone, Copy, Default, Eq, PartialEq, Hash, Debug)] /// enum GameState { /// #[default] @@ -126,32 +135,32 @@ pub fn in_state(state: S) -> impl FnMut(Option>>) -> boo /// Paused, /// } /// -/// world.init_resource::>(); -/// -/// app.add_systems( -/// // `state_changed` will only return true if the -/// // given states value has just been updated or -/// // the state has just been added -/// my_system.run_if(state_changed::), -/// ); +/// app +/// .init_state::() +/// .add_systems(Update, +/// // `state_changed` will only return true if the +/// // given states value has just been updated or +/// // the state has just been added +/// my_system.run_if(state_changed::), +/// ); /// /// fn my_system(mut counter: ResMut) { /// counter.0 += 1; /// } /// /// // `GameState` has just been added so `my_system` will run -/// app.run(&mut world); -/// assert_eq!(world.resource::().0, 1); +/// app.update(); +/// assert_eq!(app.world().resource::().0, 1); /// /// // `GameState` has not been updated so `my_system` will not run -/// app.run(&mut world); -/// assert_eq!(world.resource::().0, 1); +/// app.update(); +/// assert_eq!(app.world().resource::().0, 1); /// -/// *world.resource_mut::>() = State::new(GameState::Paused); +/// app.insert_state(GameState::Paused); /// /// // Now that `GameState` has been updated `my_system` will run -/// app.run(&mut world); -/// assert_eq!(world.resource::().0, 2); +/// app.update(); +/// assert_eq!(app.world().resource::().0, 2); /// ``` pub fn state_changed(current_state: Option>>) -> bool { let Some(current_state) = current_state else { @@ -162,9 +171,7 @@ pub fn state_changed(current_state: Option>>) -> bool { #[cfg(test)] mod tests { - use crate as bevy_state; - - use bevy_ecs::schedule::{Condition, IntoSystemConfigs, Schedule}; + use bevy_ecs::schedule::{Condition, IntoScheduleConfigs, Schedule}; use crate::prelude::*; use bevy_state_macros::States; diff --git a/crates/bevy_state/src/lib.rs b/crates/bevy_state/src/lib.rs index 796516f5a6b9e..b2714b50c5025 100644 --- a/crates/bevy_state/src/lib.rs +++ b/crates/bevy_state/src/lib.rs @@ -1,10 +1,12 @@ +#![no_std] + //! In Bevy, states are app-wide interdependent, finite state machines that are generally used to model the large scale structure of your program: whether a game is paused, if the player is in combat, if assets are loaded and so on. //! //! This module provides 3 distinct types of state, all of which implement the [`States`](state::States) trait: //! //! - Standard [`States`](state::States) can only be changed by manually setting the [`NextState`](state::NextState) resource. //! These states are the baseline on which the other state types are built, and can be used on -//! their own for many simple patterns. See the [state example](https://github.com/bevyengine/bevy/blob/latest/examples/state/state.rs) +//! their own for many simple patterns. See the [states example](https://github.com/bevyengine/bevy/blob/latest/examples/state/states.rs) //! for a simple use case. //! - [`SubStates`](state::SubStates) are children of other states - they can be changed manually using [`NextState`](state::NextState), //! but are removed from the [`World`](bevy_ecs::prelude::World) if the source states aren't in the right state. See the [sub_states example](https://github.com/bevyengine/bevy/blob/latest/examples/state/sub_states.rs) @@ -36,6 +38,14 @@ )] #![cfg_attr(any(docsrs, docsrs_dep), feature(rustdoc_internals))] +#[cfg(feature = "std")] +extern crate std; + +extern crate alloc; + +// Required to make proc macros work in bevy itself. +extern crate self as bevy_state; + #[cfg(feature = "bevy_app")] /// Provides [`App`](bevy_app::App) and [`SubApp`](bevy_app::SubApp) with state installation methods pub mod app; diff --git a/crates/bevy_state/src/reflect.rs b/crates/bevy_state/src/reflect.rs index c620cd4638466..0ff09c3759d21 100644 --- a/crates/bevy_state/src/reflect.rs +++ b/crates/bevy_state/src/reflect.rs @@ -98,7 +98,6 @@ impl FromType for ReflectFreelyMu #[cfg(test)] mod tests { - use crate as bevy_state; use crate::{ app::{AppExtStates, StatesPlugin}, reflect::{ReflectFreelyMutableState, ReflectState}, diff --git a/crates/bevy_state/src/state/freely_mutable_state.rs b/crates/bevy_state/src/state/freely_mutable_state.rs index 2bc186ebe0fb2..aef72e15fafb9 100644 --- a/crates/bevy_state/src/state/freely_mutable_state.rs +++ b/crates/bevy_state/src/state/freely_mutable_state.rs @@ -1,7 +1,7 @@ use bevy_ecs::{ event::EventWriter, prelude::Schedule, - schedule::{IntoSystemConfigs, IntoSystemSetConfigs}, + schedule::IntoScheduleConfigs, system::{Commands, IntoSystem, ResMut}, }; diff --git a/crates/bevy_state/src/state/mod.rs b/crates/bevy_state/src/state/mod.rs index d02d3a32ed452..9267478281575 100644 --- a/crates/bevy_state/src/state/mod.rs +++ b/crates/bevy_state/src/state/mod.rs @@ -17,11 +17,11 @@ pub use transitions::*; #[cfg(test)] mod tests { + use alloc::vec::Vec; use bevy_ecs::{event::EventRegistry, prelude::*}; use bevy_state_macros::{States, SubStates}; use super::*; - use crate as bevy_state; #[derive(States, PartialEq, Eq, Debug, Default, Hash, Clone)] enum SimpleState { diff --git a/crates/bevy_state/src/state/resources.rs b/crates/bevy_state/src/state/resources.rs index 71c192cb2a3e7..4bbe6d1b1f24e 100644 --- a/crates/bevy_state/src/state/resources.rs +++ b/crates/bevy_state/src/state/resources.rs @@ -2,7 +2,8 @@ use core::ops::Deref; use bevy_ecs::{ change_detection::DetectChangesMut, - system::{ResMut, Resource}, + resource::Resource, + system::ResMut, world::{FromWorld, World}, }; diff --git a/crates/bevy_state/src/state/state_set.rs b/crates/bevy_state/src/state/state_set.rs index ca167d4970596..5199662027e1c 100644 --- a/crates/bevy_state/src/state/state_set.rs +++ b/crates/bevy_state/src/state/state_set.rs @@ -1,6 +1,6 @@ use bevy_ecs::{ event::{EventReader, EventWriter}, - schedule::{IntoSystemConfigs, IntoSystemSetConfigs, Schedule}, + schedule::{IntoScheduleConfigs, Schedule}, system::{Commands, IntoSystem, Res, ResMut}, }; use variadics_please::all_tuples; diff --git a/crates/bevy_state/src/state/states.rs b/crates/bevy_state/src/state/states.rs index 8e2422d46a361..163e689f0a754 100644 --- a/crates/bevy_state/src/state/states.rs +++ b/crates/bevy_state/src/state/states.rs @@ -21,8 +21,8 @@ use core::hash::Hash; /// /// ``` /// use bevy_state::prelude::*; -/// use bevy_ecs::prelude::IntoSystemConfigs; -/// use bevy_ecs::system::ResMut; +/// use bevy_ecs::prelude::IntoScheduleConfigs; +/// use bevy_ecs::system::{ResMut, ScheduleSystem}; /// /// /// #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Default, States)] @@ -46,7 +46,7 @@ use core::hash::Hash; /// /// # struct AppMock; /// # impl AppMock { -/// # fn add_systems(&mut self, schedule: S, systems: impl IntoSystemConfigs) {} +/// # fn add_systems(&mut self, schedule: S, systems: impl IntoScheduleConfigs) {} /// # } /// # struct Update; /// # let mut app = AppMock; diff --git a/crates/bevy_state/src/state/transitions.rs b/crates/bevy_state/src/state/transitions.rs index 4c4311a9d53a7..be289260543e6 100644 --- a/crates/bevy_state/src/state/transitions.rs +++ b/crates/bevy_state/src/state/transitions.rs @@ -2,7 +2,7 @@ use core::{marker::PhantomData, mem}; use bevy_ecs::{ event::{Event, EventReader, EventWriter}, - schedule::{IntoSystemSetConfigs, Schedule, ScheduleLabel, Schedules, SystemSet}, + schedule::{IntoScheduleConfigs, Schedule, ScheduleLabel, Schedules, SystemSet}, system::{Commands, In, ResMut}, world::World, }; @@ -12,13 +12,13 @@ use super::{resources::State, states::States}; /// The label of a [`Schedule`] that **only** runs whenever [`State`] enters the provided state. /// /// This schedule ignores identity transitions. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct OnEnter(pub S); /// The label of a [`Schedule`] that **only** runs whenever [`State`] exits the provided state. /// /// This schedule ignores identity transitions. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct OnExit(pub S); /// The label of a [`Schedule`] that **only** runs whenever [`State`] @@ -27,7 +27,7 @@ pub struct OnExit(pub S); /// Systems added to this schedule are always ran *after* [`OnExit`], and *before* [`OnEnter`]. /// /// This schedule will run on identity transitions. -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct OnTransition { /// The state being exited. pub exited: S, @@ -37,7 +37,7 @@ pub struct OnTransition { /// Runs [state transitions](States). /// -/// By default, it will be triggered after `PreUpdate`, but +/// By default, it will be triggered once before [`PreStartup`] and then each frame after [`PreUpdate`], but /// you can manually trigger it at arbitrary times by creating an exclusive /// system to run the schedule. /// @@ -49,7 +49,10 @@ pub struct OnTransition { /// let _ = world.try_run_schedule(StateTransition); /// } /// ``` -#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash)] +/// +/// [`PreStartup`]: https://docs.rs/bevy/latest/bevy/prelude/struct.PreStartup.html +/// [`PreUpdate`]: https://docs.rs/bevy/latest/bevy/prelude/struct.PreUpdate.html +#[derive(ScheduleLabel, Clone, Debug, PartialEq, Eq, Hash, Default)] pub struct StateTransition; /// Event sent when any state transition of `S` happens. @@ -145,7 +148,7 @@ pub(crate) fn internal_apply_state_transition( // Transition events are sent even for same state transitions // Although enter and exit schedules are not run by default. - event.send(StateTransitionEvent { + event.write(StateTransitionEvent { exited: Some(exited.clone()), entered: Some(entered.clone()), }); @@ -154,7 +157,7 @@ pub(crate) fn internal_apply_state_transition( // If the [`State`] resource does not exist, we create it, compute dependent states, send a transition event and register the `OnEnter` schedule. commands.insert_resource(State(entered.clone())); - event.send(StateTransitionEvent { + event.write(StateTransitionEvent { exited: None, entered: Some(entered.clone()), }); @@ -166,7 +169,7 @@ pub(crate) fn internal_apply_state_transition( if let Some(resource) = current_state { commands.remove_resource::>(); - event.send(StateTransitionEvent { + event.write(StateTransitionEvent { exited: Some(resource.get().clone()), entered: None, }); diff --git a/crates/bevy_state/src/state_scoped.rs b/crates/bevy_state/src/state_scoped.rs index 3e9f49f51734d..b58017d6e347f 100644 --- a/crates/bevy_state/src/state_scoped.rs +++ b/crates/bevy_state/src/state_scoped.rs @@ -6,8 +6,6 @@ use bevy_ecs::{ event::EventReader, system::{Commands, Query}, }; -#[cfg(feature = "bevy_hierarchy")] -use bevy_hierarchy::DespawnRecursiveExt; #[cfg(feature = "bevy_reflect")] use bevy_reflect::prelude::*; @@ -19,11 +17,10 @@ use crate::state::{StateTransitionEvent, States}; /// To enable this feature remember to add the attribute `#[states(scoped_entities)]` when deriving [`States`]. /// It's also possible to enable it when adding the state to an app with [`enable_state_scoped_entities`](crate::app::AppExtStates::enable_state_scoped_entities). /// -/// If `bevy_hierarchy` feature is enabled, which it is by default, the despawn will be recursive. -/// /// ``` /// use bevy_state::prelude::*; /// use bevy_ecs::prelude::*; +/// use bevy_ecs::system::ScheduleSystem; /// /// #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Default, States)] /// #[states(scoped_entities)] @@ -48,7 +45,7 @@ use crate::state::{StateTransitionEvent, States}; /// # impl AppMock { /// # fn init_state(&mut self) {} /// # fn enable_state_scoped_entities(&mut self) {} -/// # fn add_systems(&mut self, schedule: S, systems: impl IntoSystemConfigs) {} +/// # fn add_systems(&mut self, schedule: S, systems: impl IntoScheduleConfigs) {} /// # } /// # struct Update; /// # let mut app = AppMock; @@ -57,13 +54,20 @@ use crate::state::{StateTransitionEvent, States}; /// app.add_systems(OnEnter(GameState::InGame), spawn_player); /// ``` #[derive(Component, Clone)] -#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Component))] +#[cfg_attr(feature = "bevy_reflect", derive(Reflect), reflect(Component, Clone))] pub struct StateScoped(pub S); +impl Default for StateScoped +where + S: States + Default, +{ + fn default() -> Self { + Self(S::default()) + } +} + /// Removes entities marked with [`StateScoped`] /// when their state no longer matches the world state. -/// -/// If `bevy_hierarchy` feature is enabled, which it is by default, the despawn will be recursive. pub fn clear_state_scoped_entities( mut commands: Commands, mut transitions: EventReader>, @@ -83,9 +87,6 @@ pub fn clear_state_scoped_entities( }; for (entity, binding) in &query { if binding.0 == *exited { - #[cfg(feature = "bevy_hierarchy")] - commands.entity(entity).despawn_recursive(); - #[cfg(not(feature = "bevy_hierarchy"))] commands.entity(entity).despawn(); } } diff --git a/crates/bevy_state/src/state_scoped_events.rs b/crates/bevy_state/src/state_scoped_events.rs index fbeafe545310b..c84f5c60bf0a0 100644 --- a/crates/bevy_state/src/state_scoped_events.rs +++ b/crates/bevy_state/src/state_scoped_events.rs @@ -1,12 +1,14 @@ +use alloc::vec::Vec; use core::marker::PhantomData; use bevy_app::{App, SubApp}; use bevy_ecs::{ event::{Event, EventReader, Events}, - system::{Commands, Resource}, + resource::Resource, + system::Commands, world::World, }; -use bevy_utils::HashMap; +use bevy_platform::collections::HashMap; use crate::state::{FreelyMutableState, OnExit, StateTransitionEvent}; diff --git a/crates/bevy_tasks/Cargo.toml b/crates/bevy_tasks/Cargo.toml index 00ec38e4005c2..07c20b9750f5b 100644 --- a/crates/bevy_tasks/Cargo.toml +++ b/crates/bevy_tasks/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_tasks" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "A task executor for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -10,64 +10,75 @@ keywords = ["bevy"] [features] default = ["std", "async_executor"] -std = [ - "futures-lite/std", - "async-task/std", - "spin/std", - "edge-executor?/std", - "portable-atomic-util?/std", -] + +# Functionality + +## Enables multi-threading support. +## Without this feature, all tasks will be run on a single thread. multi_threaded = ["std", "dep:async-channel", "dep:concurrent-queue"] + +## Uses `async-executor` as a task execution backend. +## This backend is incompatible with `no_std` targets. async_executor = ["std", "dep:async-executor"] -edge_executor = ["dep:edge-executor"] -critical-section = [ - "dep:critical-section", - "edge-executor?/critical-section", - "portable-atomic?/critical-section", -] -portable-atomic = [ - "dep:portable-atomic", - "dep:portable-atomic-util", - "edge-executor?/portable-atomic", - "async-task/portable-atomic", - "spin/portable_atomic", + +# Platform Compatibility + +## Allows access to the `std` crate. Enabling this feature will prevent compilation +## on `no_std` targets, but provides access to certain additional features on +## supported platforms. +std = ["futures-lite/std", "async-task/std", "bevy_platform/std"] + +## `critical-section` provides the building blocks for synchronization primitives +## on all platforms, including `no_std`. +critical-section = ["bevy_platform/critical-section"] + +## Enables use of browser APIs. +## Note this is currently only applicable on `wasm32` architectures. +web = [ + "bevy_platform/web", + "dep:wasm-bindgen-futures", + "dep:pin-project", + "dep:futures-channel", ] [dependencies] +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "alloc", +] } + futures-lite = { version = "2.0.1", default-features = false, features = [ "alloc", ] } async-task = { version = "4.4.0", default-features = false } -spin = { version = "0.9.8", default-features = false, features = [ - "spin_mutex", - "rwlock", - "once", -] } derive_more = { version = "1", default-features = false, features = [ "deref", "deref_mut", ] } - +cfg-if = "1.0.0" async-executor = { version = "1.11", optional = true } -edge-executor = { version = "0.4.1", default-features = false, optional = true } async-channel = { version = "2.3.0", optional = true } async-io = { version = "2.0.0", optional = true } concurrent-queue = { version = "2.0.0", optional = true } -critical-section = { version = "1.2.0", optional = true } -portable-atomic = { version = "1", default-features = false, features = [ - "fallback", -], optional = true } -portable-atomic-util = { version = "0.2.4", features = [ +atomic-waker = { version = "1", default-features = false } +crossbeam-queue = { version = "0.3", default-features = false, features = [ "alloc", -], optional = true } +] } [target.'cfg(target_arch = "wasm32")'.dependencies] -wasm-bindgen-futures = "0.4" -pin-project = "1" -futures-channel = "0.3" +wasm-bindgen-futures = { version = "0.4", optional = true } +pin-project = { version = "1", optional = true } +futures-channel = { version = "0.3", optional = true } -[dev-dependencies] -web-time = { version = "1.1" } +[target.'cfg(not(all(target_has_atomic = "8", target_has_atomic = "16", target_has_atomic = "32", target_has_atomic = "64", target_has_atomic = "ptr")))'.dependencies] +async-task = { version = "4.4.0", default-features = false, features = [ + "portable-atomic", +] } +heapless = { version = "0.8", default-features = false, features = [ + "portable-atomic", +] } +atomic-waker = { version = "1", default-features = false, features = [ + "portable-atomic", +] } [lints] workspace = true diff --git a/crates/bevy_tasks/LICENSE-APACHE b/crates/bevy_tasks/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_tasks/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_tasks/LICENSE-MIT b/crates/bevy_tasks/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_tasks/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_tasks/README.md b/crates/bevy_tasks/README.md index 2af6a606f65fa..b03d2fcf979da 100644 --- a/crates/bevy_tasks/README.md +++ b/crates/bevy_tasks/README.md @@ -36,7 +36,7 @@ The determining factor for what kind of work should go in each pool is latency r ## `no_std` Support -To enable `no_std` support in this crate, you will need to disable default features, and enable the `edge_executor` and `critical-section` features. For platforms without full support for Rust atomics, you may also need to enable the `portable-atomic` feature. +To enable `no_std` support in this crate, you will need to disable default features, and enable the `edge_executor` and `critical-section` features. [bevy]: https://bevyengine.org [rayon]: https://github.com/rayon-rs/rayon diff --git a/crates/bevy_tasks/examples/busy_behavior.rs b/crates/bevy_tasks/examples/busy_behavior.rs index ee92ec359382c..8dc56172df2a5 100644 --- a/crates/bevy_tasks/examples/busy_behavior.rs +++ b/crates/bevy_tasks/examples/busy_behavior.rs @@ -2,8 +2,11 @@ //! for 100ms. It's expected to take about a second to run (assuming the machine has >= 4 logical //! cores) +#![expect(clippy::print_stdout, reason = "Allowed in examples.")] + +use bevy_platform::time::Instant; use bevy_tasks::TaskPoolBuilder; -use web_time::{Duration, Instant}; +use core::time::Duration; fn main() { let pool = TaskPoolBuilder::new() diff --git a/crates/bevy_tasks/examples/idle_behavior.rs b/crates/bevy_tasks/examples/idle_behavior.rs index 2887163170875..06276e916d1fb 100644 --- a/crates/bevy_tasks/examples/idle_behavior.rs +++ b/crates/bevy_tasks/examples/idle_behavior.rs @@ -2,8 +2,11 @@ //! spinning. Other than the one thread, the system should remain idle, demonstrating good behavior //! for small workloads. +#![expect(clippy::print_stdout, reason = "Allowed in examples.")] + +use bevy_platform::time::Instant; use bevy_tasks::TaskPoolBuilder; -use web_time::{Duration, Instant}; +use core::time::Duration; fn main() { let pool = TaskPoolBuilder::new() diff --git a/crates/bevy_tasks/src/edge_executor.rs b/crates/bevy_tasks/src/edge_executor.rs new file mode 100644 index 0000000000000..70e11c8a433cf --- /dev/null +++ b/crates/bevy_tasks/src/edge_executor.rs @@ -0,0 +1,652 @@ +//! Alternative to `async_executor` based on [`edge_executor`] by Ivan Markov. +//! +//! It has been vendored along with its tests to update several outdated dependencies. +//! +//! [`async_executor`]: https://github.com/smol-rs/async-executor +//! [`edge_executor`]: https://github.com/ivmarkov/edge-executor + +#![expect(unsafe_code, reason = "original implementation relies on unsafe")] +#![expect( + dead_code, + reason = "keeping methods from original implementation for transparency" +)] + +// TODO: Create a more tailored replacement, possibly integrating [Fotre](https://github.com/NthTensor/Forte) + +use alloc::rc::Rc; +use core::{ + future::{poll_fn, Future}, + marker::PhantomData, + task::{Context, Poll}, +}; + +use async_task::{Runnable, Task}; +use atomic_waker::AtomicWaker; +use bevy_platform::sync::{Arc, LazyLock}; +use futures_lite::FutureExt; + +/// An async executor. +/// +/// # Examples +/// +/// A multi-threaded executor: +/// +/// ```ignore +/// use async_channel::unbounded; +/// use easy_parallel::Parallel; +/// +/// use edge_executor::{Executor, block_on}; +/// +/// let ex: Executor = Default::default(); +/// let (signal, shutdown) = unbounded::<()>(); +/// +/// Parallel::new() +/// // Run four executor threads. +/// .each(0..4, |_| block_on(ex.run(shutdown.recv()))) +/// // Run the main future on the current thread. +/// .finish(|| block_on(async { +/// println!("Hello world!"); +/// drop(signal); +/// })); +/// ``` +pub struct Executor<'a, const C: usize = 64> { + state: LazyLock>>, + _invariant: PhantomData>, +} + +impl<'a, const C: usize> Executor<'a, C> { + /// Creates a new executor. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::Executor; + /// + /// let ex: Executor = Default::default(); + /// ``` + pub const fn new() -> Self { + Self { + state: LazyLock::new(|| Arc::new(State::new())), + _invariant: PhantomData, + } + } + + /// Spawns a task onto the executor. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::Executor; + /// + /// let ex: Executor = Default::default(); + /// + /// let task = ex.spawn(async { + /// println!("Hello world"); + /// }); + /// ``` + /// + /// Note that if the executor's queue size is equal to the number of currently + /// spawned and running tasks, spawning this additional task might cause the executor to panic + /// later, when the task is scheduled for polling. + pub fn spawn(&self, fut: F) -> Task + where + F: Future + Send + 'a, + F::Output: Send + 'a, + { + // SAFETY: Original implementation missing safety documentation + unsafe { self.spawn_unchecked(fut) } + } + + /// Attempts to run a task if at least one is scheduled. + /// + /// Running a scheduled task means simply polling its future once. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::Executor; + /// + /// let ex: Executor = Default::default(); + /// assert!(!ex.try_tick()); // no tasks to run + /// + /// let task = ex.spawn(async { + /// println!("Hello world"); + /// }); + /// assert!(ex.try_tick()); // a task was found + /// ``` + pub fn try_tick(&self) -> bool { + if let Some(runnable) = self.try_runnable() { + runnable.run(); + + true + } else { + false + } + } + + /// Runs a single task asynchronously. + /// + /// Running a task means simply polling its future once. + /// + /// If no tasks are scheduled when this method is called, it will wait until one is scheduled. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::{Executor, block_on}; + /// + /// let ex: Executor = Default::default(); + /// + /// let task = ex.spawn(async { + /// println!("Hello world"); + /// }); + /// block_on(ex.tick()); // runs the task + /// ``` + pub async fn tick(&self) { + self.runnable().await.run(); + } + + /// Runs the executor asynchronously until the given future completes. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::{Executor, block_on}; + /// + /// let ex: Executor = Default::default(); + /// + /// let task = ex.spawn(async { 1 + 2 }); + /// let res = block_on(ex.run(async { task.await * 2 })); + /// + /// assert_eq!(res, 6); + /// ``` + pub async fn run(&self, fut: F) -> F::Output + where + F: Future + Send + 'a, + { + // SAFETY: Original implementation missing safety documentation + unsafe { self.run_unchecked(fut).await } + } + + /// Waits for the next runnable task to run. + async fn runnable(&self) -> Runnable { + poll_fn(|ctx| self.poll_runnable(ctx)).await + } + + /// Polls the first task scheduled for execution by the executor. + fn poll_runnable(&self, ctx: &Context<'_>) -> Poll { + self.state().waker.register(ctx.waker()); + + if let Some(runnable) = self.try_runnable() { + Poll::Ready(runnable) + } else { + Poll::Pending + } + } + + /// Pops the first task scheduled for execution by the executor. + /// + /// Returns + /// - `None` - if no task was scheduled for execution + /// - `Some(Runnnable)` - the first task scheduled for execution. Calling `Runnable::run` will + /// execute the task. In other words, it will poll its future. + fn try_runnable(&self) -> Option { + let runnable; + + #[cfg(all( + target_has_atomic = "8", + target_has_atomic = "16", + target_has_atomic = "32", + target_has_atomic = "64", + target_has_atomic = "ptr" + ))] + { + runnable = self.state().queue.pop(); + } + + #[cfg(not(all( + target_has_atomic = "8", + target_has_atomic = "16", + target_has_atomic = "32", + target_has_atomic = "64", + target_has_atomic = "ptr" + )))] + { + runnable = self.state().queue.dequeue(); + } + + runnable + } + + /// # Safety + /// + /// Original implementation missing safety documentation + unsafe fn spawn_unchecked(&self, fut: F) -> Task + where + F: Future, + { + let schedule = { + let state = self.state().clone(); + + move |runnable| { + #[cfg(all( + target_has_atomic = "8", + target_has_atomic = "16", + target_has_atomic = "32", + target_has_atomic = "64", + target_has_atomic = "ptr" + ))] + { + state.queue.push(runnable).unwrap(); + } + + #[cfg(not(all( + target_has_atomic = "8", + target_has_atomic = "16", + target_has_atomic = "32", + target_has_atomic = "64", + target_has_atomic = "ptr" + )))] + { + state.queue.enqueue(runnable).unwrap(); + } + + if let Some(waker) = state.waker.take() { + waker.wake(); + } + } + }; + + // SAFETY: Original implementation missing safety documentation + let (runnable, task) = unsafe { async_task::spawn_unchecked(fut, schedule) }; + + runnable.schedule(); + + task + } + + /// # Safety + /// + /// Original implementation missing safety documentation + async unsafe fn run_unchecked(&self, fut: F) -> F::Output + where + F: Future, + { + let run_forever = async { + loop { + self.tick().await; + } + }; + + run_forever.or(fut).await + } + + /// Returns a reference to the inner state. + fn state(&self) -> &Arc> { + &self.state + } +} + +impl<'a, const C: usize> Default for Executor<'a, C> { + fn default() -> Self { + Self::new() + } +} + +// SAFETY: Original implementation missing safety documentation +unsafe impl<'a, const C: usize> Send for Executor<'a, C> {} +// SAFETY: Original implementation missing safety documentation +unsafe impl<'a, const C: usize> Sync for Executor<'a, C> {} + +/// A thread-local executor. +/// +/// The executor can only be run on the thread that created it. +/// +/// # Examples +/// +/// ```ignore +/// use edge_executor::{LocalExecutor, block_on}; +/// +/// let local_ex: LocalExecutor = Default::default(); +/// +/// block_on(local_ex.run(async { +/// println!("Hello world!"); +/// })); +/// ``` +pub struct LocalExecutor<'a, const C: usize = 64> { + executor: Executor<'a, C>, + _not_send: PhantomData>>, +} + +impl<'a, const C: usize> LocalExecutor<'a, C> { + /// Creates a single-threaded executor. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::LocalExecutor; + /// + /// let local_ex: LocalExecutor = Default::default(); + /// ``` + pub const fn new() -> Self { + Self { + executor: Executor::::new(), + _not_send: PhantomData, + } + } + + /// Spawns a task onto the executor. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::LocalExecutor; + /// + /// let local_ex: LocalExecutor = Default::default(); + /// + /// let task = local_ex.spawn(async { + /// println!("Hello world"); + /// }); + /// ``` + /// + /// Note that if the executor's queue size is equal to the number of currently + /// spawned and running tasks, spawning this additional task might cause the executor to panic + /// later, when the task is scheduled for polling. + pub fn spawn(&self, fut: F) -> Task + where + F: Future + 'a, + F::Output: 'a, + { + // SAFETY: Original implementation missing safety documentation + unsafe { self.executor.spawn_unchecked(fut) } + } + + /// Attempts to run a task if at least one is scheduled. + /// + /// Running a scheduled task means simply polling its future once. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::LocalExecutor; + /// + /// let local_ex: LocalExecutor = Default::default(); + /// assert!(!local_ex.try_tick()); // no tasks to run + /// + /// let task = local_ex.spawn(async { + /// println!("Hello world"); + /// }); + /// assert!(local_ex.try_tick()); // a task was found + /// ``` + pub fn try_tick(&self) -> bool { + self.executor.try_tick() + } + + /// Runs a single task asynchronously. + /// + /// Running a task means simply polling its future once. + /// + /// If no tasks are scheduled when this method is called, it will wait until one is scheduled. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::{LocalExecutor, block_on}; + /// + /// let local_ex: LocalExecutor = Default::default(); + /// + /// let task = local_ex.spawn(async { + /// println!("Hello world"); + /// }); + /// block_on(local_ex.tick()); // runs the task + /// ``` + pub async fn tick(&self) { + self.executor.tick().await; + } + + /// Runs the executor asynchronously until the given future completes. + /// + /// # Examples + /// + /// ```ignore + /// use edge_executor::{LocalExecutor, block_on}; + /// + /// let local_ex: LocalExecutor = Default::default(); + /// + /// let task = local_ex.spawn(async { 1 + 2 }); + /// let res = block_on(local_ex.run(async { task.await * 2 })); + /// + /// assert_eq!(res, 6); + /// ``` + pub async fn run(&self, fut: F) -> F::Output + where + F: Future, + { + // SAFETY: Original implementation missing safety documentation + unsafe { self.executor.run_unchecked(fut) }.await + } +} + +impl<'a, const C: usize> Default for LocalExecutor<'a, C> { + fn default() -> Self { + Self::new() + } +} + +struct State { + #[cfg(all( + target_has_atomic = "8", + target_has_atomic = "16", + target_has_atomic = "32", + target_has_atomic = "64", + target_has_atomic = "ptr" + ))] + queue: crossbeam_queue::ArrayQueue, + #[cfg(not(all( + target_has_atomic = "8", + target_has_atomic = "16", + target_has_atomic = "32", + target_has_atomic = "64", + target_has_atomic = "ptr" + )))] + queue: heapless::mpmc::MpMcQueue, + waker: AtomicWaker, +} + +impl State { + fn new() -> Self { + Self { + #[cfg(all( + target_has_atomic = "8", + target_has_atomic = "16", + target_has_atomic = "32", + target_has_atomic = "64", + target_has_atomic = "ptr" + ))] + queue: crossbeam_queue::ArrayQueue::new(C), + #[cfg(not(all( + target_has_atomic = "8", + target_has_atomic = "16", + target_has_atomic = "32", + target_has_atomic = "64", + target_has_atomic = "ptr" + )))] + queue: heapless::mpmc::MpMcQueue::new(), + waker: AtomicWaker::new(), + } + } +} + +#[cfg(test)] +mod different_executor_tests { + use core::cell::Cell; + + use futures_lite::future::{block_on, pending, poll_once}; + use futures_lite::pin; + + use super::LocalExecutor; + + #[test] + fn shared_queue_slot() { + block_on(async { + let was_polled = Cell::new(false); + let future = async { + was_polled.set(true); + pending::<()>().await; + }; + + let ex1: LocalExecutor = Default::default(); + let ex2: LocalExecutor = Default::default(); + + // Start the futures for running forever. + let (run1, run2) = (ex1.run(pending::<()>()), ex2.run(pending::<()>())); + pin!(run1); + pin!(run2); + assert!(poll_once(run1.as_mut()).await.is_none()); + assert!(poll_once(run2.as_mut()).await.is_none()); + + // Spawn the future on executor one and then poll executor two. + ex1.spawn(future).detach(); + assert!(poll_once(run2).await.is_none()); + assert!(!was_polled.get()); + + // Poll the first one. + assert!(poll_once(run1).await.is_none()); + assert!(was_polled.get()); + }); + } +} + +#[cfg(test)] +mod drop_tests { + use alloc::string::String; + use core::mem; + use core::sync::atomic::{AtomicUsize, Ordering}; + use core::task::{Poll, Waker}; + use std::sync::Mutex; + + use bevy_platform::sync::LazyLock; + use futures_lite::future; + + use super::{Executor, Task}; + + #[test] + fn leaked_executor_leaks_everything() { + static DROP: AtomicUsize = AtomicUsize::new(0); + static WAKER: LazyLock>> = LazyLock::new(Default::default); + + let ex: Executor = Default::default(); + + let task = ex.spawn(async { + let _guard = CallOnDrop(|| { + DROP.fetch_add(1, Ordering::SeqCst); + }); + + future::poll_fn(|cx| { + *WAKER.lock().unwrap() = Some(cx.waker().clone()); + Poll::Pending::<()> + }) + .await; + }); + + future::block_on(ex.tick()); + assert!(WAKER.lock().unwrap().is_some()); + assert_eq!(DROP.load(Ordering::SeqCst), 0); + + mem::forget(ex); + assert_eq!(DROP.load(Ordering::SeqCst), 0); + + assert!(future::block_on(future::poll_once(task)).is_none()); + assert_eq!(DROP.load(Ordering::SeqCst), 0); + } + + #[test] + fn await_task_after_dropping_executor() { + let s: String = "hello".into(); + + let ex: Executor = Default::default(); + let task: Task<&str> = ex.spawn(async { &*s }); + assert!(ex.try_tick()); + + drop(ex); + assert_eq!(future::block_on(task), "hello"); + drop(s); + } + + #[test] + fn drop_executor_and_then_drop_finished_task() { + static DROP: AtomicUsize = AtomicUsize::new(0); + + let ex: Executor = Default::default(); + let task = ex.spawn(async { + CallOnDrop(|| { + DROP.fetch_add(1, Ordering::SeqCst); + }) + }); + assert!(ex.try_tick()); + + assert_eq!(DROP.load(Ordering::SeqCst), 0); + drop(ex); + assert_eq!(DROP.load(Ordering::SeqCst), 0); + drop(task); + assert_eq!(DROP.load(Ordering::SeqCst), 1); + } + + #[test] + fn drop_finished_task_and_then_drop_executor() { + static DROP: AtomicUsize = AtomicUsize::new(0); + + let ex: Executor = Default::default(); + let task = ex.spawn(async { + CallOnDrop(|| { + DROP.fetch_add(1, Ordering::SeqCst); + }) + }); + assert!(ex.try_tick()); + + assert_eq!(DROP.load(Ordering::SeqCst), 0); + drop(task); + assert_eq!(DROP.load(Ordering::SeqCst), 1); + drop(ex); + assert_eq!(DROP.load(Ordering::SeqCst), 1); + } + + struct CallOnDrop(F); + + impl Drop for CallOnDrop { + fn drop(&mut self) { + (self.0)(); + } + } +} + +#[cfg(test)] +mod local_queue { + use alloc::boxed::Box; + + use futures_lite::{future, pin}; + + use super::Executor; + + #[test] + fn two_queues() { + future::block_on(async { + // Create an executor with two runners. + let ex: Executor = Default::default(); + let (run1, run2) = ( + ex.run(future::pending::<()>()), + ex.run(future::pending::<()>()), + ); + let mut run1 = Box::pin(run1); + pin!(run2); + + // Poll them both. + assert!(future::poll_once(run1.as_mut()).await.is_none()); + assert!(future::poll_once(run2.as_mut()).await.is_none()); + + // Drop the first one, which should leave the local queue in the `None` state. + drop(run1); + assert!(future::poll_once(run2.as_mut()).await.is_none()); + }); + } +} diff --git a/crates/bevy_tasks/src/executor.rs b/crates/bevy_tasks/src/executor.rs index 04667c1b16d59..9a9f4f9dfa8b2 100644 --- a/crates/bevy_tasks/src/executor.rs +++ b/crates/bevy_tasks/src/executor.rs @@ -8,27 +8,24 @@ //! [`async-executor`]: https://crates.io/crates/async-executor //! [`edge-executor`]: https://crates.io/crates/edge-executor -pub use async_task::Task; use core::{ fmt, panic::{RefUnwindSafe, UnwindSafe}, }; use derive_more::{Deref, DerefMut}; -#[cfg(feature = "multi_threaded")] -pub use async_task::FallibleTask; - -#[cfg(feature = "async_executor")] -type ExecutorInner<'a> = async_executor::Executor<'a>; - -#[cfg(feature = "async_executor")] -type LocalExecutorInner<'a> = async_executor::LocalExecutor<'a>; - -#[cfg(all(not(feature = "async_executor"), feature = "edge_executor"))] -type ExecutorInner<'a> = edge_executor::Executor<'a, 64>; +cfg_if::cfg_if! { + if #[cfg(feature = "async_executor")] { + type ExecutorInner<'a> = async_executor::Executor<'a>; + type LocalExecutorInner<'a> = async_executor::LocalExecutor<'a>; + } else { + type ExecutorInner<'a> = crate::edge_executor::Executor<'a, 64>; + type LocalExecutorInner<'a> = crate::edge_executor::LocalExecutor<'a, 64>; + } +} -#[cfg(all(not(feature = "async_executor"), feature = "edge_executor"))] -type LocalExecutorInner<'a> = edge_executor::LocalExecutor<'a, 64>; +#[cfg(all(feature = "multi_threaded", not(target_arch = "wasm32")))] +pub use async_task::FallibleTask; /// Wrapper around a multi-threading-aware async executor. /// Spawning will generally require tasks to be `Send` and `Sync` to allow multiple @@ -51,6 +48,7 @@ pub struct LocalExecutor<'a>(LocalExecutorInner<'a>); impl Executor<'_> { /// Construct a new [`Executor`] + #[expect(clippy::allow_attributes, reason = "This lint may not always trigger.")] #[allow(dead_code, reason = "not all feature flags require this function")] pub const fn new() -> Self { Self(ExecutorInner::new()) @@ -59,6 +57,7 @@ impl Executor<'_> { impl LocalExecutor<'_> { /// Construct a new [`LocalExecutor`] + #[expect(clippy::allow_attributes, reason = "This lint may not always trigger.")] #[allow(dead_code, reason = "not all feature flags require this function")] pub const fn new() -> Self { Self(LocalExecutorInner::new()) diff --git a/crates/bevy_utils/src/futures.rs b/crates/bevy_tasks/src/futures.rs similarity index 96% rename from crates/bevy_utils/src/futures.rs rename to crates/bevy_tasks/src/futures.rs index 6a4f9ff9cc9e4..a28138e0ecaa2 100644 --- a/crates/bevy_utils/src/futures.rs +++ b/crates/bevy_tasks/src/futures.rs @@ -1,3 +1,5 @@ +#![expect(unsafe_code, reason = "Futures require unsafe code.")] + //! Utilities for working with [`Future`]s. use core::{ future::Future, diff --git a/crates/bevy_tasks/src/lib.rs b/crates/bevy_tasks/src/lib.rs index 3f3db301bbb00..ae684a4eb5124 100644 --- a/crates/bevy_tasks/src/lib.rs +++ b/crates/bevy_tasks/src/lib.rs @@ -4,47 +4,85 @@ html_logo_url = "https://bevyengine.org/assets/icon.png", html_favicon_url = "https://bevyengine.org/assets/icon.png" )] -#![cfg_attr(not(feature = "std"), no_std)] +#![no_std] + +#[cfg(feature = "std")] +extern crate std; extern crate alloc; +mod conditional_send { + cfg_if::cfg_if! { + if #[cfg(target_arch = "wasm32")] { + /// Use [`ConditionalSend`] to mark an optional Send trait bound. Useful as on certain platforms (eg. Wasm), + /// futures aren't Send. + pub trait ConditionalSend {} + impl ConditionalSend for T {} + } else { + /// Use [`ConditionalSend`] to mark an optional Send trait bound. Useful as on certain platforms (eg. Wasm), + /// futures aren't Send. + pub trait ConditionalSend: Send {} + impl ConditionalSend for T {} + } + } +} + +pub use conditional_send::*; + +/// Use [`ConditionalSendFuture`] for a future with an optional Send trait bound, as on certain platforms (eg. Wasm), +/// futures aren't Send. +pub trait ConditionalSendFuture: Future + ConditionalSend {} +impl ConditionalSendFuture for T {} + +use alloc::boxed::Box; + +/// An owned and dynamically typed Future used when you can't statically type your result or need to add some indirection. +pub type BoxedFuture<'a, T> = core::pin::Pin + 'a>>; + +pub mod futures; + +#[cfg(not(feature = "async_executor"))] +mod edge_executor; + mod executor; mod slice; pub use slice::{ParallelSlice, ParallelSliceMut}; -#[cfg_attr(target_arch = "wasm32", path = "wasm_task.rs")] +#[cfg_attr(all(target_arch = "wasm32", feature = "web"), path = "wasm_task.rs")] mod task; pub use task::Task; -#[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] -mod task_pool; +cfg_if::cfg_if! { + if #[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] { + mod task_pool; + mod thread_executor; -#[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] -pub use task_pool::{Scope, TaskPool, TaskPoolBuilder}; + pub use task_pool::{Scope, TaskPool, TaskPoolBuilder}; + pub use thread_executor::{ThreadExecutor, ThreadExecutorTicker}; + } else if #[cfg(any(target_arch = "wasm32", not(feature = "multi_threaded")))] { + mod single_threaded_task_pool; -#[cfg(any(target_arch = "wasm32", not(feature = "multi_threaded")))] -mod single_threaded_task_pool; - -#[cfg(any(target_arch = "wasm32", not(feature = "multi_threaded")))] -pub use single_threaded_task_pool::{Scope, TaskPool, TaskPoolBuilder, ThreadExecutor}; + pub use single_threaded_task_pool::{Scope, TaskPool, TaskPoolBuilder, ThreadExecutor}; + } +} mod usages; -#[cfg(not(target_arch = "wasm32"))] -pub use usages::tick_global_task_pools_on_main_thread; +pub use futures_lite::future::poll_once; pub use usages::{AsyncComputeTaskPool, ComputeTaskPool, IoTaskPool}; -#[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] -mod thread_executor; -#[cfg(all(not(target_arch = "wasm32"), feature = "multi_threaded"))] -pub use thread_executor::{ThreadExecutor, ThreadExecutorTicker}; +#[cfg(not(all(target_arch = "wasm32", feature = "web")))] +pub use usages::tick_global_task_pools_on_main_thread; -#[cfg(all(feature = "async-io", feature = "std"))] -pub use async_io::block_on; -#[cfg(all(not(feature = "async-io"), feature = "std"))] -pub use futures_lite::future::block_on; -pub use futures_lite::future::poll_once; +#[cfg(feature = "std")] +cfg_if::cfg_if! { + if #[cfg(feature = "async-io")] { + pub use async_io::block_on; + } else { + pub use futures_lite::future::block_on; + } +} mod iter; pub use iter::ParallelIterator; @@ -67,27 +105,28 @@ pub mod prelude { pub use crate::block_on; } -#[cfg(feature = "std")] -use core::num::NonZero; - -/// Gets the logical CPU core count available to the current process. -/// -/// This is identical to [`std::thread::available_parallelism`], except -/// it will return a default value of 1 if it internally errors out. -/// -/// This will always return at least 1. -#[cfg(feature = "std")] -pub fn available_parallelism() -> usize { - std::thread::available_parallelism() - .map(NonZero::::get) - .unwrap_or(1) -} - -/// Gets the logical CPU core count available to the current process. -/// -/// This will always return at least 1. -#[cfg(not(feature = "std"))] -pub fn available_parallelism() -> usize { - // Without access to std, assume a single thread is available - 1 +cfg_if::cfg_if! { + if #[cfg(feature = "std")] { + use core::num::NonZero; + + /// Gets the logical CPU core count available to the current process. + /// + /// This is identical to [`std::thread::available_parallelism`], except + /// it will return a default value of 1 if it internally errors out. + /// + /// This will always return at least 1. + pub fn available_parallelism() -> usize { + std::thread::available_parallelism() + .map(NonZero::::get) + .unwrap_or(1) + } + } else { + /// Gets the logical CPU core count available to the current process. + /// + /// This will always return at least 1. + pub fn available_parallelism() -> usize { + // Without access to std, assume a single thread is available + 1 + } + } } diff --git a/crates/bevy_tasks/src/single_threaded_task_pool.rs b/crates/bevy_tasks/src/single_threaded_task_pool.rs index 51adc739c1f8c..0f9488bcd0f33 100644 --- a/crates/bevy_tasks/src/single_threaded_task_pool.rs +++ b/crates/bevy_tasks/src/single_threaded_task_pool.rs @@ -1,13 +1,14 @@ use alloc::{string::String, vec::Vec}; +use bevy_platform::sync::Arc; use core::{cell::RefCell, future::Future, marker::PhantomData, mem}; use crate::Task; -#[cfg(feature = "portable-atomic")] -use portable_atomic_util::Arc; +#[cfg(feature = "std")] +use std::thread_local; -#[cfg(not(feature = "portable-atomic"))] -use alloc::sync::Arc; +#[cfg(not(feature = "std"))] +use bevy_platform::sync::{Mutex, PoisonError}; #[cfg(feature = "std")] use crate::executor::LocalExecutor; @@ -27,7 +28,7 @@ static LOCAL_EXECUTOR: LocalExecutor<'static> = const { LocalExecutor::new() }; type ScopeResult = alloc::rc::Rc>>; #[cfg(not(feature = "std"))] -type ScopeResult = Arc>>; +type ScopeResult = Arc>>; /// Used to create a [`TaskPool`]. #[derive(Debug, Default, Clone)] @@ -178,7 +179,7 @@ impl TaskPool { #[cfg(not(feature = "std"))] { - let mut lock = result.lock(); + let mut lock = result.lock().unwrap_or_else(PoisonError::into_inner); lock.take().unwrap() } }) @@ -198,26 +199,27 @@ impl TaskPool { where T: 'static + MaybeSend + MaybeSync, { - #[cfg(all(target_arch = "wasm32", feature = "std"))] - return Task::wrap_future(future); - - #[cfg(all(not(target_arch = "wasm32"), feature = "std"))] - return LOCAL_EXECUTOR.with(|executor| { - let task = executor.spawn(future); - // Loop until all tasks are done - while executor.try_tick() {} - - Task::new(task) - }); - - #[cfg(not(feature = "std"))] - return { - let task = LOCAL_EXECUTOR.spawn(future); - // Loop until all tasks are done - while LOCAL_EXECUTOR.try_tick() {} + cfg_if::cfg_if! { + if #[cfg(all(target_arch = "wasm32", feature = "web"))] { + Task::wrap_future(future) + } else if #[cfg(feature = "std")] { + LOCAL_EXECUTOR.with(|executor| { + let task = executor.spawn(future); + // Loop until all tasks are done + while executor.try_tick() {} + + Task::new(task) + }) + } else { + { + let task = LOCAL_EXECUTOR.spawn(future); + // Loop until all tasks are done + while LOCAL_EXECUTOR.try_tick() {} - Task::new(task) - }; + Task::new(task) + } + } + } } /// Spawns a static future on the JS event loop. This is exactly the same as [`TaskPool::spawn`]. @@ -307,7 +309,7 @@ impl<'scope, 'env, T: Send + 'env> Scope<'scope, 'env, T> { #[cfg(not(feature = "std"))] { - let mut lock = result.lock(); + let mut lock = result.lock().unwrap_or_else(PoisonError::into_inner); *lock = Some(temp_result); } }; diff --git a/crates/bevy_tasks/src/slice.rs b/crates/bevy_tasks/src/slice.rs index 5f964a4561778..a705314a34502 100644 --- a/crates/bevy_tasks/src/slice.rs +++ b/crates/bevy_tasks/src/slice.rs @@ -215,6 +215,7 @@ impl ParallelSliceMut for S where S: AsMut<[T]> {} #[cfg(test)] mod tests { use crate::*; + use alloc::vec; #[test] fn test_par_chunks_map() { diff --git a/crates/bevy_tasks/src/task.rs b/crates/bevy_tasks/src/task.rs index cf5095408b0f3..d4afb775f2e01 100644 --- a/crates/bevy_tasks/src/task.rs +++ b/crates/bevy_tasks/src/task.rs @@ -14,11 +14,11 @@ use core::{ /// Tasks that panic get immediately canceled. Awaiting a canceled task also causes a panic. #[derive(Debug)] #[must_use = "Tasks are canceled when dropped, use `.detach()` to run them in the background."] -pub struct Task(crate::executor::Task); +pub struct Task(async_task::Task); impl Task { /// Creates a new task from a given `async_executor::Task` - pub fn new(task: crate::executor::Task) -> Self { + pub fn new(task: async_task::Task) -> Self { Self(task) } diff --git a/crates/bevy_tasks/src/task_pool.rs b/crates/bevy_tasks/src/task_pool.rs index 215981215f2f7..25255a1e5d1d3 100644 --- a/crates/bevy_tasks/src/task_pool.rs +++ b/crates/bevy_tasks/src/task_pool.rs @@ -1,16 +1,15 @@ +use alloc::{boxed::Box, format, string::String, vec::Vec}; use core::{future::Future, marker::PhantomData, mem, panic::AssertUnwindSafe}; -use std::thread::{self, JoinHandle}; +use std::{ + thread::{self, JoinHandle}, + thread_local, +}; use crate::executor::FallibleTask; +use bevy_platform::sync::Arc; use concurrent_queue::ConcurrentQueue; use futures_lite::FutureExt; -#[cfg(feature = "portable-atomic")] -use {alloc::boxed::Box, portable_atomic_util::Arc}; - -#[cfg(not(feature = "portable-atomic"))] -use alloc::sync::Arc; - use crate::{ block_on, thread_executor::{ThreadExecutor, ThreadExecutorTicker}, @@ -75,16 +74,20 @@ impl TaskPoolBuilder { /// This is called on the thread itself and has access to all thread-local storage. /// This will block running async tasks on the thread until the callback completes. pub fn on_thread_spawn(mut self, f: impl Fn() + Send + Sync + 'static) -> Self { - #[cfg(feature = "portable-atomic")] - let arc = { - let boxed = Box::new(f); - let boxed: Box = boxed; - Arc::from(boxed) - }; - - #[cfg(not(feature = "portable-atomic"))] let arc = Arc::new(f); + #[cfg(not(target_has_atomic = "ptr"))] + #[expect( + unsafe_code, + reason = "unsized coercion is an unstable feature for non-std types" + )] + // SAFETY: + // - Coercion from `impl Fn` to `dyn Fn` is valid + // - `Arc::from_raw` receives a valid pointer from a previous call to `Arc::into_raw` + let arc = unsafe { + Arc::from_raw(Arc::into_raw(arc) as *const (dyn Fn() + Send + Sync + 'static)) + }; + self.on_thread_spawn = Some(arc); self } @@ -94,16 +97,20 @@ impl TaskPoolBuilder { /// This is called on the thread itself and has access to all thread-local storage. /// This will block thread termination until the callback completes. pub fn on_thread_destroy(mut self, f: impl Fn() + Send + Sync + 'static) -> Self { - #[cfg(feature = "portable-atomic")] - let arc = { - let boxed = Box::new(f); - let boxed: Box = boxed; - Arc::from(boxed) - }; - - #[cfg(not(feature = "portable-atomic"))] let arc = Arc::new(f); + #[cfg(not(target_has_atomic = "ptr"))] + #[expect( + unsafe_code, + reason = "unsized coercion is an unstable feature for non-std types" + )] + // SAFETY: + // - Coercion from `impl Fn` to `dyn Fn` is valid + // - `Arc::from_raw` receives a valid pointer from a previous call to `Arc::into_raw` + let arc = unsafe { + Arc::from_raw(Arc::into_raw(arc) as *const (dyn Fn() + Send + Sync + 'static)) + }; + self.on_thread_destroy = Some(arc); self } @@ -330,7 +337,7 @@ impl TaskPool { T: Send + 'static, { Self::THREAD_EXECUTOR.with(|scope_executor| { - // If a `external_executor` is passed use that. Otherwise get the executor stored + // If an `external_executor` is passed, use that. Otherwise, get the executor stored // in the `THREAD_EXECUTOR` thread local. if let Some(external_executor) = external_executor { self.scope_with_executor_inner( @@ -479,7 +486,7 @@ impl TaskPool { .is_ok(); } }; - execute_forever.or(get_results).await + get_results.or(execute_forever).await } #[inline] @@ -498,7 +505,7 @@ impl TaskPool { let _result = AssertUnwindSafe(tick_forever).catch_unwind().await.is_ok(); } }; - execute_forever.or(get_results).await + get_results.or(execute_forever).await } #[inline] @@ -520,7 +527,7 @@ impl TaskPool { .is_ok(); } }; - execute_forever.or(get_results).await + get_results.or(execute_forever).await } #[inline] @@ -538,7 +545,7 @@ impl TaskPool { let _result = AssertUnwindSafe(tick_forever).catch_unwind().await.is_ok(); } }; - execute_forever.or(get_results).await + get_results.or(execute_forever).await } /// Spawns a static future onto the thread pool. The returned [`Task`] is a @@ -694,7 +701,6 @@ where } #[cfg(test)] -#[allow(clippy::disallowed_types)] mod tests { use super::*; use core::sync::atomic::{AtomicBool, AtomicI32, Ordering}; diff --git a/crates/bevy_tasks/src/thread_executor.rs b/crates/bevy_tasks/src/thread_executor.rs index 0f8a9c3be9038..48fb3e2861c05 100644 --- a/crates/bevy_tasks/src/thread_executor.rs +++ b/crates/bevy_tasks/src/thread_executor.rs @@ -1,7 +1,8 @@ use core::marker::PhantomData; use std::thread::{self, ThreadId}; -use crate::executor::{Executor, Task}; +use crate::executor::Executor; +use async_task::Task; use futures_lite::Future; /// An executor that can only be ticked on the thread it was instantiated on. But diff --git a/crates/bevy_tasks/src/usages.rs b/crates/bevy_tasks/src/usages.rs index 385b30fdb402d..8b08d5941c6bd 100644 --- a/crates/bevy_tasks/src/usages.rs +++ b/crates/bevy_tasks/src/usages.rs @@ -1,20 +1,11 @@ use super::TaskPool; +use bevy_platform::sync::OnceLock; use core::ops::Deref; -#[cfg(feature = "std")] -use std::sync::OnceLock; - -#[cfg(not(feature = "std"))] -use spin::Once; - macro_rules! taskpool { ($(#[$attr:meta])* ($static:ident, $type:ident)) => { - #[cfg(feature = "std")] static $static: OnceLock<$type> = OnceLock::new(); - #[cfg(not(feature = "std"))] - static $static: Once<$type> = Once::new(); - $(#[$attr])* #[derive(Debug)] pub struct $type(TaskPool); @@ -22,15 +13,7 @@ macro_rules! taskpool { impl $type { #[doc = concat!(" Gets the global [`", stringify!($type), "`] instance, or initializes it with `f`.")] pub fn get_or_init(f: impl FnOnce() -> TaskPool) -> &'static Self { - #[cfg(feature = "std")] - { - $static.get_or_init(|| Self(f())) - } - - #[cfg(not(feature = "std"))] - { - $static.call_once(|| Self(f())) - } + $static.get_or_init(|| Self(f())) } #[doc = concat!(" Attempts to get the global [`", stringify!($type), "`] instance, \ @@ -98,7 +81,7 @@ taskpool! { /// # Warning /// /// This function *must* be called on the main thread, or the task pools will not be updated appropriately. -#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(all(target_arch = "wasm32", feature = "web")))] pub fn tick_global_task_pools_on_main_thread() { COMPUTE_TASK_POOL .get() diff --git a/crates/bevy_tasks/src/wasm_task.rs b/crates/bevy_tasks/src/wasm_task.rs index cdf805b2b840c..0cc569c47913d 100644 --- a/crates/bevy_tasks/src/wasm_task.rs +++ b/crates/bevy_tasks/src/wasm_task.rs @@ -1,3 +1,4 @@ +use alloc::boxed::Box; use core::{ any::Any, future::{Future, IntoFuture}, @@ -59,7 +60,12 @@ impl Future for Task { // NOTE: Propagating the panic here sorta has parity with the async_executor behavior. // For those tasks, polling them after a panic returns a `None` which gets `unwrap`ed, so // using `resume_unwind` here is essentially keeping the same behavior while adding more information. + #[cfg(feature = "std")] Poll::Ready(Ok(Err(panic))) => std::panic::resume_unwind(panic), + #[cfg(not(feature = "std"))] + Poll::Ready(Ok(Err(_panic))) => { + unreachable!("catching a panic is only possible with std") + } Poll::Ready(Err(_)) => panic!("Polled a task after it was cancelled"), Poll::Pending => Poll::Pending, } @@ -74,6 +80,14 @@ struct CatchUnwind(#[pin] F); impl Future for CatchUnwind { type Output = Result; fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - std::panic::catch_unwind(AssertUnwindSafe(|| self.project().0.poll(cx)))?.map(Ok) + let f = AssertUnwindSafe(|| self.project().0.poll(cx)); + + #[cfg(feature = "std")] + let result = std::panic::catch_unwind(f)?; + + #[cfg(not(feature = "std"))] + let result = f(); + + result.map(Ok) } } diff --git a/crates/bevy_text/Cargo.toml b/crates/bevy_text/Cargo.toml index 80f89a6e5d1e4..2a1a40a74f52e 100644 --- a/crates/bevy_text/Cargo.toml +++ b/crates/bevy_text/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_text" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Provides text functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -13,30 +13,33 @@ default_font = [] [dependencies] # bevy -bevy_app = { path = "../bevy_app", version = "0.15.0-dev" } -bevy_asset = { path = "../bevy_asset", version = "0.15.0-dev" } -bevy_color = { path = "../bevy_color", version = "0.15.0-dev" } -bevy_derive = { path = "../bevy_derive", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev" } -bevy_hierarchy = { path = "../bevy_hierarchy", version = "0.15.0-dev" } -bevy_image = { path = "../bevy_image", version = "0.15.0-dev" } -bevy_math = { path = "../bevy_math", version = "0.15.0-dev" } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev", features = [ - "bevy", +bevy_app = { path = "../bevy_app", version = "0.16.0-dev" } +bevy_asset = { path = "../bevy_asset", version = "0.16.0-dev" } +bevy_color = { path = "../bevy_color", version = "0.16.0-dev" } +bevy_derive = { path = "../bevy_derive", version = "0.16.0-dev" } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev" } +bevy_image = { path = "../bevy_image", version = "0.16.0-dev" } +bevy_log = { path = "../bevy_log", version = "0.16.0-dev" } +bevy_math = { path = "../bevy_math", version = "0.16.0-dev" } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev" } +bevy_render = { path = "../bevy_render", version = "0.16.0-dev" } +bevy_sprite = { path = "../bevy_sprite", version = "0.16.0-dev" } +bevy_transform = { path = "../bevy_transform", version = "0.16.0-dev" } +bevy_window = { path = "../bevy_window", version = "0.16.0-dev" } +bevy_utils = { path = "../bevy_utils", version = "0.16.0-dev" } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false, features = [ + "std", + "serialize", ] } -bevy_render = { path = "../bevy_render", version = "0.15.0-dev" } -bevy_sprite = { path = "../bevy_sprite", version = "0.15.0-dev" } -bevy_transform = { path = "../bevy_transform", version = "0.15.0-dev" } -bevy_window = { path = "../bevy_window", version = "0.15.0-dev" } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } # other -cosmic-text = { version = "0.12", features = ["shape-run-cache"] } +cosmic-text = { version = "0.13", features = ["shape-run-cache"] } thiserror = { version = "2", default-features = false } serde = { version = "1", features = ["derive"] } smallvec = "1.13" unicode-bidi = "0.3.13" sys-locale = "0.3.0" +tracing = { version = "0.1", default-features = false, features = ["std"] } [dev-dependencies] approx = "0.5.1" diff --git a/crates/bevy_text/LICENSE-APACHE b/crates/bevy_text/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_text/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_text/LICENSE-MIT b/crates/bevy_text/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_text/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_text/src/bounds.rs b/crates/bevy_text/src/bounds.rs index 98d42e3a34b28..db2ceb0b28f57 100644 --- a/crates/bevy_text/src/bounds.rs +++ b/crates/bevy_text/src/bounds.rs @@ -11,7 +11,7 @@ use bevy_reflect::{std_traits::ReflectDefault, Reflect}; /// reliable limit if it is necessary to contain the text strictly in the bounds. Currently this /// component is mainly useful for text wrapping only. #[derive(Component, Copy, Clone, Debug, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct TextBounds { /// The maximum width of text in logical pixels. /// If `None`, the width is unbounded. diff --git a/crates/bevy_text/src/font_atlas.rs b/crates/bevy_text/src/font_atlas.rs index 4ce4ea62072db..a10dee5923a32 100644 --- a/crates/bevy_text/src/font_atlas.rs +++ b/crates/bevy_text/src/font_atlas.rs @@ -1,12 +1,11 @@ use bevy_asset::{Assets, Handle}; -use bevy_image::{Image, ImageSampler}; +use bevy_image::{prelude::*, ImageSampler}; use bevy_math::{IVec2, UVec2}; +use bevy_platform::collections::HashMap; use bevy_render::{ render_asset::RenderAssetUsages, render_resource::{Extent3d, TextureDimension, TextureFormat}, }; -use bevy_sprite::{DynamicTextureAtlasBuilder, TextureAtlasLayout}; -use bevy_utils::HashMap; use crate::{FontSmoothing, GlyphAtlasLocation, TextError}; @@ -21,7 +20,7 @@ use crate::{FontSmoothing, GlyphAtlasLocation, TextError}; /// providing a trade-off between visual quality and performance. /// /// A [`CacheKey`](cosmic_text::CacheKey) encodes all of the information of a subpixel-offset glyph and is used to -/// find that glyphs raster in a [`TextureAtlas`](bevy_sprite::TextureAtlas) through its corresponding [`GlyphAtlasLocation`]. +/// find that glyphs raster in a [`TextureAtlas`] through its corresponding [`GlyphAtlasLocation`]. pub struct FontAtlas { /// Used to update the [`TextureAtlasLayout`]. pub dynamic_texture_atlas_builder: DynamicTextureAtlasBuilder, @@ -98,7 +97,7 @@ impl FontAtlas { let atlas_layout = atlas_layouts.get_mut(&self.texture_atlas).unwrap(); let atlas_texture = textures.get_mut(&self.texture).unwrap(); - if let Some(glyph_index) = + if let Ok(glyph_index) = self.dynamic_texture_atlas_builder .add_texture(atlas_layout, texture, atlas_texture) { diff --git a/crates/bevy_text/src/font_atlas_set.rs b/crates/bevy_text/src/font_atlas_set.rs index 60374daf49bfc..1a498127ba49d 100644 --- a/crates/bevy_text/src/font_atlas_set.rs +++ b/crates/bevy_text/src/font_atlas_set.rs @@ -1,17 +1,13 @@ use bevy_asset::{Asset, AssetEvent, AssetId, Assets}; -use bevy_ecs::{ - event::EventReader, - system::{ResMut, Resource}, -}; -use bevy_image::Image; +use bevy_ecs::{event::EventReader, resource::Resource, system::ResMut}; +use bevy_image::prelude::*; use bevy_math::{IVec2, UVec2}; +use bevy_platform::collections::HashMap; use bevy_reflect::TypePath; use bevy_render::{ render_asset::RenderAssetUsages, render_resource::{Extent3d, TextureDimension, TextureFormat}, }; -use bevy_sprite::TextureAtlasLayout; -use bevy_utils::HashMap; use crate::{error::TextError, Font, FontAtlas, FontSmoothing, GlyphAtlasInfo}; @@ -92,9 +88,7 @@ impl FontAtlasSet { pub fn has_glyph(&self, cache_key: cosmic_text::CacheKey, font_size: &FontAtlasKey) -> bool { self.font_atlases .get(font_size) - .map_or(false, |font_atlas| { - font_atlas.iter().any(|atlas| atlas.has_glyph(cache_key)) - }) + .is_some_and(|font_atlas| font_atlas.iter().any(|atlas| atlas.has_glyph(cache_key))) } /// Adds the given subpixel-offset glyph to the [`FontAtlas`]es in this set @@ -181,22 +175,15 @@ impl FontAtlasSet { self.font_atlases .get(&FontAtlasKey(cache_key.font_size_bits, font_smoothing)) .and_then(|font_atlases| { - font_atlases - .iter() - .find_map(|atlas| { - atlas.get_glyph_index(cache_key).map(|location| { - ( - location, - atlas.texture_atlas.clone_weak(), - atlas.texture.clone_weak(), - ) + font_atlases.iter().find_map(|atlas| { + atlas + .get_glyph_index(cache_key) + .map(|location| GlyphAtlasInfo { + location, + texture_atlas: atlas.texture_atlas.clone_weak(), + texture: atlas.texture.clone_weak(), }) - }) - .map(|(location, texture_atlas, texture)| GlyphAtlasInfo { - texture_atlas, - location, - texture, - }) + }) }) } diff --git a/crates/bevy_text/src/glyph.rs b/crates/bevy_text/src/glyph.rs index b5295c655e76a..c761bc00336e0 100644 --- a/crates/bevy_text/src/glyph.rs +++ b/crates/bevy_text/src/glyph.rs @@ -1,10 +1,9 @@ //! This module exports types related to rendering glyphs. use bevy_asset::Handle; -use bevy_image::Image; +use bevy_image::prelude::*; use bevy_math::{IVec2, Vec2}; use bevy_reflect::Reflect; -use bevy_sprite::TextureAtlasLayout; /// A glyph of a font, typically representing a single character, positioned in screen space. /// @@ -12,6 +11,7 @@ use bevy_sprite::TextureAtlasLayout; /// /// Used in [`TextPipeline::queue_text`](crate::TextPipeline::queue_text) and [`crate::TextLayoutInfo`] for rendering glyphs. #[derive(Debug, Clone, Reflect)] +#[reflect(Clone)] pub struct PositionedGlyph { /// The position of the glyph in the text block's bounding box. pub position: Vec2, @@ -21,24 +21,12 @@ pub struct PositionedGlyph { pub atlas_info: GlyphAtlasInfo, /// The index of the glyph in the [`ComputedTextBlock`](crate::ComputedTextBlock)'s tracked spans. pub span_index: usize, - /// TODO: In order to do text editing, we need access to the size of glyphs and their index in the associated String. - /// For example, to figure out where to place the cursor in an input box from the mouse's position. - /// Without this, it's only possible in texts where each glyph is one byte. Cosmic text has methods for this - /// cosmic-texts [hit detection](https://pop-os.github.io/cosmic-text/cosmic_text/struct.Buffer.html#method.hit) - byte_index: usize, -} - -impl PositionedGlyph { - /// Creates a new [`PositionedGlyph`] - pub fn new(position: Vec2, size: Vec2, atlas_info: GlyphAtlasInfo, span_index: usize) -> Self { - Self { - position, - size, - atlas_info, - span_index, - byte_index: 0, - } - } + /// The index of the glyph's line. + pub line_index: usize, + /// The byte index of the glyph in it's line. + pub byte_index: usize, + /// The byte length of the glyph. + pub byte_length: usize, } /// Information about a glyph in an atlas. @@ -48,6 +36,7 @@ impl PositionedGlyph { /// /// Used in [`PositionedGlyph`] and [`FontAtlasSet`](crate::FontAtlasSet). #[derive(Debug, Clone, Reflect)] +#[reflect(Clone)] pub struct GlyphAtlasInfo { /// A handle to the [`Image`] data for the texture atlas this glyph was placed in. /// @@ -66,6 +55,7 @@ pub struct GlyphAtlasInfo { /// /// Used in [`GlyphAtlasInfo`] and [`FontAtlas`](crate::FontAtlas). #[derive(Debug, Clone, Copy, Reflect)] +#[reflect(Clone)] pub struct GlyphAtlasLocation { /// The index of the glyph in the atlas pub glyph_index: usize, diff --git a/crates/bevy_text/src/lib.rs b/crates/bevy_text/src/lib.rs index ad9feb7fdcf0f..670f793c31c15 100644 --- a/crates/bevy_text/src/lib.rs +++ b/crates/bevy_text/src/lib.rs @@ -29,8 +29,6 @@ //! 3. [`PositionedGlyph`]s are stored in a [`TextLayoutInfo`], //! which contains all the information that downstream systems need for rendering. -#![allow(clippy::type_complexity)] - extern crate alloc; mod bounds; @@ -61,9 +59,6 @@ pub use text_access::*; /// /// This includes the most common types in this crate, re-exported for your convenience. pub mod prelude { - #[doc(hidden)] - #[allow(deprecated)] - pub use crate::Text2dBundle; #[doc(hidden)] pub use crate::{ Font, JustifyText, LineBreak, Text2d, Text2dReader, Text2dWriter, TextColor, TextError, @@ -72,9 +67,9 @@ pub mod prelude { } use bevy_app::{prelude::*, Animation}; -use bevy_asset::AssetApp; #[cfg(feature = "default_font")] use bevy_asset::{load_internal_binary_asset, Handle}; +use bevy_asset::{AssetApp, AssetEvents}; use bevy_ecs::prelude::*; use bevy_render::{ camera::CameraUpdateSystem, view::VisibilitySystems, ExtractSchedule, RenderApp, @@ -113,6 +108,7 @@ impl Plugin for TextPlugin { app.init_asset::() .register_type::() .register_type::() + .register_type::() .register_type::() .register_type::() .register_type::() @@ -128,7 +124,7 @@ impl Plugin for TextPlugin { .add_systems( PostUpdate, ( - remove_dropped_font_atlas_sets, + remove_dropped_font_atlas_sets.before(AssetEvents), detect_text_needs_rerender::, update_text2d_layout // Potential conflict: `Assets` diff --git a/crates/bevy_text/src/pipeline.rs b/crates/bevy_text/src/pipeline.rs index f8e7274ef31f2..c1f9ca7fe4920 100644 --- a/crates/bevy_text/src/pipeline.rs +++ b/crates/bevy_text/src/pipeline.rs @@ -4,16 +4,14 @@ use bevy_asset::{AssetId, Assets}; use bevy_color::Color; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{ - component::Component, - entity::Entity, - reflect::ReflectComponent, - system::{ResMut, Resource}, + component::Component, entity::Entity, reflect::ReflectComponent, resource::Resource, + system::ResMut, }; -use bevy_image::Image; +use bevy_image::prelude::*; +use bevy_log::{once, warn}; use bevy_math::{UVec2, Vec2}; +use bevy_platform::collections::HashMap; use bevy_reflect::{std_traits::ReflectDefault, Reflect}; -use bevy_sprite::TextureAtlasLayout; -use bevy_utils::HashMap; use cosmic_text::{Attrs, Buffer, Family, Metrics, Shaping, Wrap}; @@ -81,7 +79,6 @@ impl TextPipeline { /// Utilizes [`cosmic_text::Buffer`] to shape and layout text /// /// Negative or 0.0 font sizes will not be laid out. - #[allow(clippy::too_many_arguments)] pub fn update_buffer<'a>( &mut self, fonts: &Assets, @@ -98,6 +95,7 @@ impl TextPipeline { // Collect span information into a vec. This is necessary because font loading requires mut access // to FontSystem, which the cosmic-text Buffer also needs. let mut font_size: f32 = 0.; + let mut line_height: f32 = 0.0; let mut spans: Vec<(usize, &str, &TextFont, FontFaceInfo, Color)> = core::mem::take(&mut self.spans_buffer) .into_iter() @@ -130,6 +128,7 @@ impl TextPipeline { // Get max font size for use in cosmic Metrics. font_size = font_size.max(text_font.font_size); + line_height = line_height.max(text_font.line_height.eval(text_font.font_size)); // Load Bevy fonts into cosmic-text's font system. let face_info = load_font_to_fontdb( @@ -141,12 +140,15 @@ impl TextPipeline { // Save spans that aren't zero-sized. if scale_factor <= 0.0 || text_font.font_size <= 0.0 { + once!(warn!( + "Text span {entity} has a font size <= 0.0. Nothing will be displayed.", + )); + continue; } spans.push((span_index, span, text_font, face_info, color)); } - let line_height = font_size * 1.2; let mut metrics = Metrics::new(font_size, line_height).scale(scale_factor as f32); // Metrics of 0.0 cause `Buffer::set_metrics` to panic. We hack around this by 'falling // through' to call `Buffer::set_rich_text` with zero spans so any cached text will be cleared without @@ -171,8 +173,7 @@ impl TextPipeline { // Update the buffer. let buffer = &mut computed.buffer; - buffer.set_metrics(font_system, metrics); - buffer.set_size(font_system, bounds.width, bounds.height); + buffer.set_metrics_and_size(font_system, metrics, bounds.width, bounds.height); buffer.set_wrap( font_system, @@ -184,15 +185,24 @@ impl TextPipeline { }, ); - buffer.set_rich_text(font_system, spans_iter, Attrs::new(), Shaping::Advanced); + buffer.set_rich_text( + font_system, + spans_iter, + Attrs::new(), + Shaping::Advanced, + Some(justify.into()), + ); - // PERF: https://github.com/pop-os/cosmic-text/issues/166: - // Setting alignment afterwards appears to invalidate some layouting performed by `set_text` which is presumably not free? - for buffer_line in buffer.lines.iter_mut() { - buffer_line.set_align(Some(justify.into())); - } buffer.shape_until_scroll(font_system, false); + // Workaround for alignment not working for unbounded text. + // See https://github.com/pop-os/cosmic-text/issues/343 + if bounds.width.is_none() && justify != JustifyText::Left { + let dimensions = buffer_dimensions(buffer); + // `set_size` causes a re-layout to occur. + buffer.set_size(font_system, Some(dimensions.x), bounds.height); + } + // Recover the spans buffer. spans.clear(); self.spans_buffer = spans @@ -207,7 +217,6 @@ impl TextPipeline { /// /// Produces a [`TextLayoutInfo`], containing [`PositionedGlyph`]s /// which contain information for rendering the text. - #[allow(clippy::too_many_arguments)] pub fn queue_text<'a>( &mut self, layout_info: &mut TextLayoutInfo, @@ -255,77 +264,84 @@ impl TextPipeline { let buffer = &mut computed.buffer; let box_size = buffer_dimensions(buffer); - let result = buffer - .layout_runs() - .flat_map(|run| { - run.glyphs - .iter() - .map(move |layout_glyph| (layout_glyph, run.line_y)) - }) - .try_for_each(|(layout_glyph, line_y)| { - let mut temp_glyph; - let span_index = layout_glyph.metadata; - let font_id = glyph_info[span_index].0; - let font_smoothing = glyph_info[span_index].1; - - let layout_glyph = if font_smoothing == FontSmoothing::None { - // If font smoothing is disabled, round the glyph positions and sizes, - // effectively discarding all subpixel layout. - temp_glyph = layout_glyph.clone(); - temp_glyph.x = temp_glyph.x.round(); - temp_glyph.y = temp_glyph.y.round(); - temp_glyph.w = temp_glyph.w.round(); - temp_glyph.x_offset = temp_glyph.x_offset.round(); - temp_glyph.y_offset = temp_glyph.y_offset.round(); - temp_glyph.line_height_opt = temp_glyph.line_height_opt.map(f32::round); - - &temp_glyph - } else { - layout_glyph - }; - - let font_atlas_set = font_atlas_sets.sets.entry(font_id).or_default(); - - let physical_glyph = layout_glyph.physical((0., 0.), 1.); - - let atlas_info = font_atlas_set - .get_glyph_atlas_info(physical_glyph.cache_key, font_smoothing) - .map(Ok) - .unwrap_or_else(|| { - font_atlas_set.add_glyph_to_atlas( - texture_atlases, - textures, - &mut font_system.0, - &mut swash_cache.0, - layout_glyph, - font_smoothing, - ) - })?; - - let texture_atlas = texture_atlases.get(&atlas_info.texture_atlas).unwrap(); - let location = atlas_info.location; - let glyph_rect = texture_atlas.textures[location.glyph_index]; - let left = location.offset.x as f32; - let top = location.offset.y as f32; - let glyph_size = UVec2::new(glyph_rect.width(), glyph_rect.height()); - - // offset by half the size because the origin is center - let x = glyph_size.x as f32 / 2.0 + left + physical_glyph.x as f32; - let y = line_y.round() + physical_glyph.y as f32 - top + glyph_size.y as f32 / 2.0; - let y = match y_axis_orientation { - YAxisOrientation::TopToBottom => y, - YAxisOrientation::BottomToTop => box_size.y - y, - }; - - let position = Vec2::new(x, y); - - // TODO: recreate the byte index, that keeps track of where a cursor is, - // when glyphs are not limited to single byte representation, relevant for #1319 - let pos_glyph = - PositionedGlyph::new(position, glyph_size.as_vec2(), atlas_info, span_index); - layout_info.glyphs.push(pos_glyph); - Ok(()) - }); + let result = buffer.layout_runs().try_for_each(|run| { + let result = run + .glyphs + .iter() + .map(move |layout_glyph| (layout_glyph, run.line_y, run.line_i)) + .try_for_each(|(layout_glyph, line_y, line_i)| { + let mut temp_glyph; + let span_index = layout_glyph.metadata; + let font_id = glyph_info[span_index].0; + let font_smoothing = glyph_info[span_index].1; + + let layout_glyph = if font_smoothing == FontSmoothing::None { + // If font smoothing is disabled, round the glyph positions and sizes, + // effectively discarding all subpixel layout. + temp_glyph = layout_glyph.clone(); + temp_glyph.x = temp_glyph.x.round(); + temp_glyph.y = temp_glyph.y.round(); + temp_glyph.w = temp_glyph.w.round(); + temp_glyph.x_offset = temp_glyph.x_offset.round(); + temp_glyph.y_offset = temp_glyph.y_offset.round(); + temp_glyph.line_height_opt = temp_glyph.line_height_opt.map(f32::round); + + &temp_glyph + } else { + layout_glyph + }; + + let font_atlas_set = font_atlas_sets.sets.entry(font_id).or_default(); + + let physical_glyph = layout_glyph.physical((0., 0.), 1.); + + let atlas_info = font_atlas_set + .get_glyph_atlas_info(physical_glyph.cache_key, font_smoothing) + .map(Ok) + .unwrap_or_else(|| { + font_atlas_set.add_glyph_to_atlas( + texture_atlases, + textures, + &mut font_system.0, + &mut swash_cache.0, + layout_glyph, + font_smoothing, + ) + })?; + + let texture_atlas = texture_atlases.get(&atlas_info.texture_atlas).unwrap(); + let location = atlas_info.location; + let glyph_rect = texture_atlas.textures[location.glyph_index]; + let left = location.offset.x as f32; + let top = location.offset.y as f32; + let glyph_size = UVec2::new(glyph_rect.width(), glyph_rect.height()); + + // offset by half the size because the origin is center + let x = glyph_size.x as f32 / 2.0 + left + physical_glyph.x as f32; + let y = + line_y.round() + physical_glyph.y as f32 - top + glyph_size.y as f32 / 2.0; + let y = match y_axis_orientation { + YAxisOrientation::TopToBottom => y, + YAxisOrientation::BottomToTop => box_size.y - y, + }; + + let position = Vec2::new(x, y); + + let pos_glyph = PositionedGlyph { + position, + size: glyph_size.as_vec2(), + atlas_info, + span_index, + byte_index: layout_glyph.start, + byte_length: layout_glyph.end - layout_glyph.start, + line_index: line_i, + }; + layout_info.glyphs.push(pos_glyph); + Ok(()) + }); + + result + }); // Return the scratch vec. self.glyph_info = glyph_info; @@ -341,7 +357,6 @@ impl TextPipeline { /// /// Produces a [`TextMeasureInfo`] which can be used by a layout system /// to measure the text area on demand. - #[allow(clippy::too_many_arguments)] pub fn create_text_measure<'a>( &mut self, entity: Entity, @@ -399,7 +414,7 @@ impl TextPipeline { /// Contains scaled glyphs and their size. Generated via [`TextPipeline::queue_text`] when an entity has /// [`TextLayout`] and [`ComputedTextBlock`] components. #[derive(Component, Clone, Default, Debug, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct TextLayoutInfo { /// Scaled and positioned glyphs in screenspace pub glyphs: Vec, @@ -486,7 +501,13 @@ fn get_attrs<'a>( .stretch(face_info.stretch) .style(face_info.style) .weight(face_info.weight) - .metrics(Metrics::relative(text_font.font_size, 1.2).scale(scale_factor as f32)) + .metrics( + Metrics { + font_size: text_font.font_size, + line_height: text_font.line_height.eval(text_font.font_size), + } + .scale(scale_factor as f32), + ) .color(cosmic_text::Color(color.to_linear().as_u32())); attrs } diff --git a/crates/bevy_text/src/text.rs b/crates/bevy_text/src/text.rs index 06b9eeb9902d4..faa5d93dc9cee 100644 --- a/crates/bevy_text/src/text.rs +++ b/crates/bevy_text/src/text.rs @@ -8,12 +8,12 @@ use bevy_asset::Handle; use bevy_color::Color; use bevy_derive::{Deref, DerefMut}; use bevy_ecs::{prelude::*, reflect::ReflectComponent}; -use bevy_hierarchy::{Children, Parent}; use bevy_reflect::prelude::*; -use bevy_utils::warn_once; +use bevy_utils::once; use cosmic_text::{Buffer, Metrics}; use serde::{Deserialize, Serialize}; use smallvec::SmallVec; +use tracing::warn; /// Wrapper for [`cosmic_text::Buffer`] #[derive(Deref, DerefMut, Debug, Clone)] @@ -29,7 +29,7 @@ impl Default for CosmicBuffer { /// /// Returned by [`ComputedTextBlock::entities`]. #[derive(Debug, Copy, Clone, Reflect)] -#[reflect(Debug)] +#[reflect(Debug, Clone)] pub struct TextEntity { /// The entity. pub entity: Entity, @@ -43,7 +43,7 @@ pub struct TextEntity { /// /// Automatically updated by 2d and UI text systems. #[derive(Component, Debug, Clone, Reflect)] -#[reflect(Component, Debug, Default)] +#[reflect(Component, Debug, Default, Clone)] pub struct ComputedTextBlock { /// Buffer for managing text layout and creating [`TextLayoutInfo`]. /// @@ -51,7 +51,7 @@ pub struct ComputedTextBlock { /// `TextLayoutInfo`. If you want to control the buffer contents manually or use the `cosmic-text` /// editor, then you need to not use `TextLayout` and instead manually implement the conversion to /// `TextLayoutInfo`. - #[reflect(ignore)] + #[reflect(ignore, clone)] pub(crate) buffer: CosmicBuffer, /// Entities for all text spans in the block, including the root-level text. /// @@ -86,6 +86,16 @@ impl ComputedTextBlock { pub fn needs_rerender(&self) -> bool { self.needs_rerender } + /// Accesses the underlying buffer which can be used for `cosmic-text` APIs such as accessing layout information + /// or calculating a cursor position. + /// + /// Mutable access is not offered because changes would be overwritten during the automated layout calculation. + /// If you want to control the buffer contents manually or use the `cosmic-text` + /// editor, then you need to not use `TextLayout` and instead manually implement the conversion to + /// `TextLayoutInfo`. + pub fn buffer(&self) -> &CosmicBuffer { + &self.buffer + } } impl Default for ComputedTextBlock { @@ -106,7 +116,7 @@ impl Default for ComputedTextBlock { /// /// See [`Text2d`](crate::Text2d) for the core component of 2d text, and `Text` in `bevy_ui` for UI text. #[derive(Component, Debug, Copy, Clone, Default, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[require(ComputedTextBlock, TextLayoutInfo)] pub struct TextLayout { /// The text's internal alignment. @@ -158,7 +168,10 @@ impl TextLayout { } } -/// A span of UI text in a tree of spans under an entity with [`TextLayout`] and `Text` or `Text2d`. +/// A span of text in a tree of spans. +/// +/// `TextSpan` is only valid as a child of an entity with [`TextLayout`], which is provided by `Text` +/// for text in `bevy_ui` or `Text2d` for text in 2d world-space. /// /// Spans are collected in hierarchy traversal order into a [`ComputedTextBlock`] for layout. /// @@ -168,12 +181,13 @@ impl TextLayout { /// # use bevy_color::palettes::basic::{RED, BLUE}; /// # use bevy_ecs::world::World; /// # use bevy_text::{Font, TextLayout, TextFont, TextSpan, TextColor}; -/// # use bevy_hierarchy::BuildChildren; /// /// # let font_handle: Handle = Default::default(); /// # let mut world = World::default(); /// # /// world.spawn(( +/// // `Text` or `Text2d` are needed, and will provide default instances +/// // of the following components. /// TextLayout::default(), /// TextFont { /// font: font_handle.clone().into(), @@ -183,6 +197,7 @@ impl TextLayout { /// TextColor(BLUE.into()), /// )) /// .with_child(( +/// // Children must be `TextSpan`, not `Text` or `Text2d`. /// TextSpan::new("Hello!"), /// TextFont { /// font: font_handle.into(), @@ -193,7 +208,7 @@ impl TextLayout { /// )); /// ``` #[derive(Component, Debug, Default, Clone, Deref, DerefMut, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[require(TextFont, TextColor)] pub struct TextSpan(pub String); @@ -235,7 +250,7 @@ impl From for TextSpan { /// _Has no affect on a single line text entity_, unless used together with a /// [`TextBounds`](super::bounds::TextBounds) component with an explicit `width` value. #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash, Reflect, Serialize, Deserialize)] -#[reflect(Serialize, Deserialize)] +#[reflect(Serialize, Deserialize, Clone, PartialEq, Hash)] pub enum JustifyText { /// Leftmost character is immediately to the right of the render position. /// Bounds start from the render position and advance rightwards. @@ -267,7 +282,7 @@ impl From for cosmic_text::Align { /// `TextFont` determines the style of a text span within a [`ComputedTextBlock`], specifically /// the font face, the font size, and the color. #[derive(Component, Clone, Debug, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] pub struct TextFont { /// The specific font face to use, as a `Handle` to a [`Font`] asset. /// @@ -285,6 +300,11 @@ pub struct TextFont { /// A new font atlas is generated for every combination of font handle and scaled font size /// which can have a strong performance impact. pub font_size: f32, + /// The vertical height of a line of text, from the top of one line to the top of the + /// next. + /// + /// Defaults to `LineHeight::RelativeToFont(1.2)` + pub line_height: LineHeight, /// The antialiasing method to use when rendering text. pub font_smoothing: FontSmoothing, } @@ -317,6 +337,12 @@ impl TextFont { self.font_smoothing = font_smoothing; self } + + /// Returns this [`TextFont`] with the specified [`LineHeight`]. + pub const fn with_line_height(mut self, line_height: LineHeight) -> Self { + self.line_height = line_height; + self + } } impl Default for TextFont { @@ -324,14 +350,42 @@ impl Default for TextFont { Self { font: Default::default(), font_size: 20.0, + line_height: LineHeight::default(), font_smoothing: Default::default(), } } } +/// Specifies the height of each line of text for `Text` and `Text2d` +/// +/// Default is 1.2x the font size +#[derive(Debug, Clone, Copy, Reflect)] +#[reflect(Debug, Clone)] +pub enum LineHeight { + /// Set line height to a specific number of pixels + Px(f32), + /// Set line height to a multiple of the font size + RelativeToFont(f32), +} + +impl LineHeight { + pub(crate) fn eval(self, font_size: f32) -> f32 { + match self { + LineHeight::Px(px) => px, + LineHeight::RelativeToFont(scale) => scale * font_size, + } + } +} + +impl Default for LineHeight { + fn default() -> Self { + LineHeight::RelativeToFont(1.2) + } +} + /// The color of the text for this section. -#[derive(Component, Copy, Clone, Debug, Deref, DerefMut, Reflect)] -#[reflect(Component, Default, Debug)] +#[derive(Component, Copy, Clone, Debug, Deref, DerefMut, Reflect, PartialEq)] +#[reflect(Component, Default, Debug, PartialEq, Clone)] pub struct TextColor(pub Color); impl Default for TextColor { @@ -355,7 +409,7 @@ impl TextColor { /// Determines how lines will be broken when preventing text from running out of bounds. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default, Reflect, Serialize, Deserialize)] -#[reflect(Serialize, Deserialize)] +#[reflect(Serialize, Deserialize, Clone, PartialEq, Hash, Default)] pub enum LineBreak { /// Uses the [Unicode Line Breaking Algorithm](https://www.unicode.org/reports/tr14/). /// Lines will be broken up at the nearest suitable word boundary, usually a space. @@ -378,7 +432,7 @@ pub enum LineBreak { /// /// **Note:** Subpixel antialiasing is not currently supported. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default, Reflect, Serialize, Deserialize)] -#[reflect(Serialize, Deserialize)] +#[reflect(Serialize, Deserialize, Clone, PartialEq, Hash, Default)] #[doc(alias = "antialiasing")] #[doc(alias = "pixelated")] pub enum FontSmoothing { @@ -417,13 +471,13 @@ pub fn detect_text_needs_rerender( ), >, changed_spans: Query< - (Entity, Option<&Parent>, Has), + (Entity, Option<&ChildOf>, Has), ( Or<( Changed, Changed, Changed, - Changed, // Included to detect broken text block hierarchies. + Changed, // Included to detect broken text block hierarchies. Added, )>, With, @@ -431,7 +485,7 @@ pub fn detect_text_needs_rerender( ), >, mut computed: Query<( - Option<&Parent>, + Option<&ChildOf>, Option<&mut ComputedTextBlock>, Has, )>, @@ -443,8 +497,8 @@ pub fn detect_text_needs_rerender( // - Root children changed (can include additions and removals). for root in changed_roots.iter() { let Ok((_, Some(mut computed), _)) = computed.get_mut(root) else { - warn_once!("found entity {:?} with a root text component ({}) but no ComputedTextBlock; this warning only \ - prints once", root, core::any::type_name::()); + once!(warn!("found entity {} with a root text component ({}) but no ComputedTextBlock; this warning only \ + prints once", root, core::any::type_name::())); continue; }; computed.needs_rerender = true; @@ -454,32 +508,32 @@ pub fn detect_text_needs_rerender( // - Span component changed. // - Span TextFont changed. // - Span children changed (can include additions and removals). - for (entity, maybe_span_parent, has_text_block) in changed_spans.iter() { + for (entity, maybe_span_child_of, has_text_block) in changed_spans.iter() { if has_text_block { - warn_once!("found entity {:?} with a TextSpan that has a TextLayout, which should only be on root \ + once!(warn!("found entity {} with a TextSpan that has a TextLayout, which should only be on root \ text entities (that have {}); this warning only prints once", - entity, core::any::type_name::()); + entity, core::any::type_name::())); } - let Some(span_parent) = maybe_span_parent else { - warn_once!( - "found entity {:?} with a TextSpan that has no parent; it should have an ancestor \ + let Some(span_child_of) = maybe_span_child_of else { + once!(warn!( + "found entity {} with a TextSpan that has no parent; it should have an ancestor \ with a root text component ({}); this warning only prints once", entity, core::any::type_name::() - ); + )); continue; }; - let mut parent: Entity = **span_parent; + let mut parent: Entity = span_child_of.parent(); // Search for the nearest ancestor with ComputedTextBlock. // Note: We assume the perf cost from duplicate visits in the case that multiple spans in a block are visited // is outweighed by the expense of tracking visited spans. loop { - let Ok((maybe_parent, maybe_computed, has_span)) = computed.get_mut(parent) else { - warn_once!("found entity {:?} with a TextSpan that is part of a broken hierarchy with a Parent \ - component that points at non-existent entity {:?}; this warning only prints once", - entity, parent); + let Ok((maybe_child_of, maybe_computed, has_span)) = computed.get_mut(parent) else { + once!(warn!("found entity {} with a TextSpan that is part of a broken hierarchy with a ChildOf \ + component that points at non-existent entity {}; this warning only prints once", + entity, parent)); break; }; if let Some(mut computed) = maybe_computed { @@ -487,21 +541,21 @@ pub fn detect_text_needs_rerender( break; } if !has_span { - warn_once!("found entity {:?} with a TextSpan that has an ancestor ({}) that does not have a text \ + once!(warn!("found entity {} with a TextSpan that has an ancestor ({}) that does not have a text \ span component or a ComputedTextBlock component; this warning only prints once", - entity, parent); + entity, parent)); break; } - let Some(next_parent) = maybe_parent else { - warn_once!( - "found entity {:?} with a TextSpan that has no ancestor with the root text \ + let Some(next_child_of) = maybe_child_of else { + once!(warn!( + "found entity {} with a TextSpan that has no ancestor with the root text \ component ({}); this warning only prints once", entity, core::any::type_name::() - ); + )); break; }; - parent = **next_parent; + parent = next_child_of.parent(); } } } diff --git a/crates/bevy_text/src/text2d.rs b/crates/bevy_text/src/text2d.rs index f0e1a9fa441ac..a9419e89c0f0c 100644 --- a/crates/bevy_text/src/text2d.rs +++ b/crates/bevy_text/src/text2d.rs @@ -7,42 +7,32 @@ use crate::{ use bevy_asset::Assets; use bevy_color::LinearRgba; use bevy_derive::{Deref, DerefMut}; +use bevy_ecs::entity::EntityHashSet; use bevy_ecs::{ change_detection::{DetectChanges, Ref}, - component::{require, Component}, + component::Component, entity::Entity, prelude::{ReflectComponent, With}, query::{Changed, Without}, system::{Commands, Local, Query, Res, ResMut}, }; -use bevy_image::Image; +use bevy_image::prelude::*; use bevy_math::Vec2; use bevy_reflect::{prelude::ReflectDefault, Reflect}; use bevy_render::sync_world::TemporaryRenderEntity; -use bevy_render::view::Visibility; +use bevy_render::view::{self, Visibility, VisibilityClass}; use bevy_render::{ primitives::Aabb, view::{NoFrustumCulling, ViewVisibility}, Extract, }; -use bevy_sprite::{Anchor, ExtractedSprite, ExtractedSprites, SpriteSource, TextureAtlasLayout}; +use bevy_sprite::{ + Anchor, ExtractedSlice, ExtractedSlices, ExtractedSprite, ExtractedSprites, Sprite, +}; use bevy_transform::components::Transform; use bevy_transform::prelude::GlobalTransform; -use bevy_utils::HashSet; use bevy_window::{PrimaryWindow, Window}; -/// [`Text2dBundle`] was removed in favor of required components. -/// The core component is now [`Text2d`] which can contain a single text segment. -/// Indexed access to segments can be done with the new [`Text2dReader`] and [`Text2dWriter`] system params. -/// Additional segments can be added through children with [`TextSpan`](crate::text::TextSpan). -/// Text configuration can be done with [`TextLayout`], [`TextFont`] and [`TextColor`], -/// while sprite-related configuration uses [`TextBounds`] and [`Anchor`] components. -#[deprecated( - since = "0.15.0", - note = "Text2dBundle has been migrated to required components. Follow the documentation for more information." -)] -pub struct Text2dBundle {} - /// The top-level 2D text component. /// /// Adding `Text2d` to an entity will pull in required components for setting up 2d text. @@ -61,7 +51,7 @@ pub struct Text2dBundle {} /// # use bevy_color::Color; /// # use bevy_color::palettes::basic::BLUE; /// # use bevy_ecs::world::World; -/// # use bevy_text::{Font, JustifyText, Text2d, TextLayout, TextFont, TextColor}; +/// # use bevy_text::{Font, JustifyText, Text2d, TextLayout, TextFont, TextColor, TextSpan}; /// # /// # let font_handle: Handle = Default::default(); /// # let mut world = World::default(); @@ -85,19 +75,26 @@ pub struct Text2dBundle {} /// Text2d::new("hello world\nand bevy!"), /// TextLayout::new_with_justify(JustifyText::Center) /// )); +/// +/// // With spans +/// world.spawn(Text2d::new("hello ")).with_children(|parent| { +/// parent.spawn(TextSpan::new("world")); +/// parent.spawn((TextSpan::new("!"), TextColor(BLUE.into()))); +/// }); /// ``` #[derive(Component, Clone, Debug, Default, Deref, DerefMut, Reflect)] -#[reflect(Component, Default, Debug)] +#[reflect(Component, Default, Debug, Clone)] #[require( TextLayout, TextFont, TextColor, TextBounds, Anchor, - SpriteSource, Visibility, + VisibilityClass, Transform )] +#[component(on_add = view::add_visibility_class::)] pub struct Text2d(pub String); impl Text2d { @@ -141,6 +138,7 @@ pub type Text2dWriter<'w, 's> = TextWriter<'w, 's, Text2d>; pub fn extract_text2d_sprite( mut commands: Commands, mut extracted_sprites: ResMut, + mut extracted_slices: ResMut, texture_atlases: Extract>>, windows: Extract>>, text2d_query: Extract< @@ -149,24 +147,29 @@ pub fn extract_text2d_sprite( &ViewVisibility, &ComputedTextBlock, &TextLayoutInfo, + &TextBounds, &Anchor, &GlobalTransform, )>, >, - text_styles: Extract>, + text_colors: Extract>, ) { + let mut start = extracted_slices.slices.len(); + let mut end = start + 1; + // TODO: Support window-independent scaling: https://github.com/bevyengine/bevy/issues/5621 let scale_factor = windows - .get_single() + .single() .map(|window| window.resolution.scale_factor()) .unwrap_or(1.0); let scaling = GlobalTransform::from_scale(Vec2::splat(scale_factor.recip()).extend(1.)); for ( - original_entity, + main_entity, view_visibility, computed_block, text_layout_info, + text_bounds, anchor, global_transform, ) in text2d_query.iter() @@ -175,22 +178,29 @@ pub fn extract_text2d_sprite( continue; } - let text_anchor = -(anchor.as_vec() + 0.5); - let alignment_translation = text_layout_info.size * text_anchor; - let transform = *global_transform - * GlobalTransform::from_translation(alignment_translation.extend(0.)) - * scaling; + let size = Vec2::new( + text_bounds.width.unwrap_or(text_layout_info.size.x), + text_bounds.height.unwrap_or(text_layout_info.size.y), + ); + let bottom_left = + -(anchor.as_vec() + 0.5) * size + (size.y - text_layout_info.size.y) * Vec2::Y; + let transform = + *global_transform * GlobalTransform::from_translation(bottom_left.extend(0.)) * scaling; let mut color = LinearRgba::WHITE; let mut current_span = usize::MAX; - for PositionedGlyph { - position, - atlas_info, - span_index, - .. - } in &text_layout_info.glyphs + + for ( + i, + PositionedGlyph { + position, + atlas_info, + span_index, + .. + }, + ) in text_layout_info.glyphs.iter().enumerate() { if *span_index != current_span { - color = text_styles + color = text_colors .get( computed_block .entities() @@ -198,29 +208,41 @@ pub fn extract_text2d_sprite( .map(|t| t.entity) .unwrap_or(Entity::PLACEHOLDER), ) - .map(|(_, text_color)| LinearRgba::from(text_color.0)) + .map(|text_color| LinearRgba::from(text_color.0)) .unwrap_or_default(); current_span = *span_index; } - let atlas = texture_atlases.get(&atlas_info.texture_atlas).unwrap(); + let rect = texture_atlases + .get(&atlas_info.texture_atlas) + .unwrap() + .textures[atlas_info.location.glyph_index] + .as_rect(); + extracted_slices.slices.push(ExtractedSlice { + offset: *position, + rect, + size: rect.size(), + }); - extracted_sprites.sprites.insert( - ( - commands.spawn(TemporaryRenderEntity).id(), - original_entity.into(), - ), - ExtractedSprite { - transform: transform * GlobalTransform::from_translation(position.extend(0.)), + if text_layout_info.glyphs.get(i + 1).is_none_or(|info| { + info.span_index != current_span || info.atlas_info.texture != atlas_info.texture + }) { + let render_entity = commands.spawn(TemporaryRenderEntity).id(); + extracted_sprites.sprites.push(ExtractedSprite { + main_entity, + render_entity, + transform, color, - rect: Some(atlas.textures[atlas_info.location.glyph_index].as_rect()), - custom_size: None, image_handle_id: atlas_info.texture.id(), flip_x: false, flip_y: false, - anchor: Anchor::Center.as_vec(), - original_entity: Some(original_entity), - }, - ); + kind: bevy_sprite::ExtractedSpriteKind::Slices { + indices: start..end, + }, + }); + start = end; + } + + end += 1; } } } @@ -232,11 +254,10 @@ pub fn extract_text2d_sprite( /// /// [`ResMut>`](Assets) -- This system only adds new [`Image`] assets. /// It does not modify or observe existing ones. -#[allow(clippy::too_many_arguments)] pub fn update_text2d_layout( - mut last_scale_factor: Local, + mut last_scale_factor: Local>, // Text items which should be reprocessed again, generally when the font hasn't loaded yet. - mut queue: Local>, + mut queue: Local, mut textures: ResMut>, fonts: Res>, windows: Query<&Window, With>, @@ -256,20 +277,22 @@ pub fn update_text2d_layout( ) { // TODO: Support window-independent scaling: https://github.com/bevyengine/bevy/issues/5621 let scale_factor = windows - .get_single() + .single() + .ok() .map(|window| window.resolution.scale_factor()) - .unwrap_or(1.0); + .or(*last_scale_factor) + .unwrap_or(1.); let inverse_scale_factor = scale_factor.recip(); - let factor_changed = *last_scale_factor != scale_factor; - *last_scale_factor = scale_factor; + let factor_changed = *last_scale_factor != Some(scale_factor); + *last_scale_factor = Some(scale_factor); for (entity, block, bounds, text_layout_info, mut computed) in &mut text_query { if factor_changed || computed.needs_rerender() || bounds.is_changed() - || queue.remove(&entity) + || (!queue.is_empty() && queue.remove(&entity)) { let text_bounds = TextBounds { width: if block.linebreak == LineBreak::NoWrap { @@ -329,16 +352,27 @@ pub fn scale_value(value: f32, factor: f32) -> f32 { pub fn calculate_bounds_text2d( mut commands: Commands, mut text_to_update_aabb: Query< - (Entity, &TextLayoutInfo, &Anchor, Option<&mut Aabb>), + ( + Entity, + &TextLayoutInfo, + &Anchor, + &TextBounds, + Option<&mut Aabb>, + ), (Changed, Without), >, ) { - for (entity, layout_info, anchor, aabb) in &mut text_to_update_aabb { - // `Anchor::as_vec` gives us an offset relative to the text2d bounds, by negating it and scaling - // by the logical size we compensate the transform offset in local space to get the center. - let center = (-anchor.as_vec() * layout_info.size).extend(0.0).into(); - // Distance in local space from the center to the x and y limits of the text2d bounds. - let half_extents = (layout_info.size / 2.0).extend(0.0).into(); + for (entity, layout_info, anchor, text_bounds, aabb) in &mut text_to_update_aabb { + let size = Vec2::new( + text_bounds.width.unwrap_or(layout_info.size.x), + text_bounds.height.unwrap_or(layout_info.size.y), + ); + let center = (-anchor.as_vec() * size + (size.y - layout_info.size.y) * Vec2::Y) + .extend(0.) + .into(); + + let half_extents = (0.5 * layout_info.size).extend(0.0).into(); + if let Some(mut aabb) = aabb { *aabb = Aabb { center, @@ -358,7 +392,7 @@ mod tests { use bevy_app::{App, Update}; use bevy_asset::{load_internal_binary_asset, Handle}; - use bevy_ecs::schedule::IntoSystemConfigs; + use bevy_ecs::schedule::IntoScheduleConfigs; use crate::{detect_text_needs_rerender, TextIterScratch}; diff --git a/crates/bevy_text/src/text_access.rs b/crates/bevy_text/src/text_access.rs index 84943ea5667fa..7aafa28ef63e2 100644 --- a/crates/bevy_text/src/text_access.rs +++ b/crates/bevy_text/src/text_access.rs @@ -4,7 +4,6 @@ use bevy_ecs::{ prelude::*, system::{Query, SystemParam}, }; -use bevy_hierarchy::Children; use crate::{TextColor, TextFont, TextSpan}; diff --git a/crates/bevy_time/Cargo.toml b/crates/bevy_time/Cargo.toml index e12b04423c463..520782b51990c 100644 --- a/crates/bevy_time/Cargo.toml +++ b/crates/bevy_time/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "bevy_time" -version = "0.15.0-dev" -edition = "2021" +version = "0.16.0-dev" +edition = "2024" description = "Provides time functionality for Bevy Engine" homepage = "https://bevyengine.org" repository = "https://github.com/bevyengine/bevy" @@ -9,23 +9,58 @@ license = "MIT OR Apache-2.0" keywords = ["bevy"] [features] -default = ["bevy_reflect"] -serialize = ["serde"] +default = ["std", "bevy_reflect", "bevy_app/default"] + +# Functionality + +## Adds runtime reflection support using `bevy_reflect`. +bevy_reflect = [ + "dep:bevy_reflect", + "bevy_ecs/bevy_reflect", + "bevy_app/bevy_reflect", +] + +## Adds serialization support through `serde`. +serialize = ["dep:serde", "bevy_ecs/serialize", "bevy_platform/serialize"] + +# Platform Compatibility + +## Allows access to the `std` crate. Enabling this feature will prevent compilation +## on `no_std` targets, but provides access to certain additional features on +## supported platforms. +std = [ + "serde?/std", + "bevy_reflect?/std", + "bevy_ecs/std", + "bevy_app/std", + "bevy_platform/std", + "dep:crossbeam-channel", +] + +## `critical-section` provides the building blocks for synchronization primitives +## on all platforms, including `no_std`. +critical-section = [ + "bevy_ecs/critical-section", + "bevy_platform/critical-section", + "bevy_reflect?/critical-section", + "bevy_app/critical-section", +] [dependencies] # bevy -bevy_app = { path = "../bevy_app", version = "0.15.0-dev" } -bevy_ecs = { path = "../bevy_ecs", version = "0.15.0-dev", features = [ - "bevy_reflect", -] } -bevy_reflect = { path = "../bevy_reflect", version = "0.15.0-dev", features = [ - "bevy", -], optional = true } -bevy_utils = { path = "../bevy_utils", version = "0.15.0-dev" } +bevy_app = { path = "../bevy_app", version = "0.16.0-dev", default-features = false } +bevy_ecs = { path = "../bevy_ecs", version = "0.16.0-dev", default-features = false } +bevy_reflect = { path = "../bevy_reflect", version = "0.16.0-dev", default-features = false, optional = true } +bevy_platform = { path = "../bevy_platform", version = "0.16.0-dev", default-features = false } # other -crossbeam-channel = "0.5.0" -serde = { version = "1", features = ["derive"], optional = true } +crossbeam-channel = { version = "0.5.0", default-features = false, features = [ + "std", +], optional = true } +serde = { version = "1", features = [ + "derive", +], default-features = false, optional = true } +log = { version = "0.4", default-features = false } [lints] workspace = true diff --git a/crates/bevy_time/LICENSE-APACHE b/crates/bevy_time/LICENSE-APACHE new file mode 100644 index 0000000000000..d9a10c0d8e868 --- /dev/null +++ b/crates/bevy_time/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/crates/bevy_time/LICENSE-MIT b/crates/bevy_time/LICENSE-MIT new file mode 100644 index 0000000000000..9cf106272ac3b --- /dev/null +++ b/crates/bevy_time/LICENSE-MIT @@ -0,0 +1,19 @@ +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/crates/bevy_time/src/common_conditions.rs b/crates/bevy_time/src/common_conditions.rs index e00acf8cffb8b..d944303439e02 100644 --- a/crates/bevy_time/src/common_conditions.rs +++ b/crates/bevy_time/src/common_conditions.rs @@ -1,14 +1,14 @@ use crate::{Real, Time, Timer, TimerMode, Virtual}; use bevy_ecs::system::Res; -use bevy_utils::Duration; +use core::time::Duration; /// Run condition that is active on a regular time interval, using [`Time`] to advance /// the timer. The timer ticks at the rate of [`Time::relative_speed`]. /// /// ```no_run /// # use bevy_app::{App, NoopPluginGroup as DefaultPlugins, PluginGroup, Update}; -/// # use bevy_ecs::schedule::IntoSystemConfigs; -/// # use bevy_utils::Duration; +/// # use bevy_ecs::schedule::IntoScheduleConfigs; +/// # use core::time::Duration; /// # use bevy_time::common_conditions::on_timer; /// fn main() { /// App::new() @@ -47,8 +47,8 @@ pub fn on_timer(duration: Duration) -> impl FnMut(Res