From f4cd3b8c7ad55debf24b48865623e95c3f716cc4 Mon Sep 17 00:00:00 2001 From: Jan Date: Wed, 3 Jan 2024 23:24:31 +0100 Subject: [PATCH 01/20] Clone wgpu-py 0.13.2 --- .github/CODEOWNERS | 1 + .github/workflows/ci.yml | 350 +++ .github/workflows/screenshots.yml | 39 + .gitignore | 123 + .readthedocs.yaml | 26 + CHANGELOG.md | 660 +++++ LICENSE | 25 + README.md | 161 ++ codegen/README.md | 153 ++ codegen/__init__.py | 65 + codegen/__main__.py | 19 + codegen/apipatcher.py | 529 ++++ codegen/apiwriter.py | 146 ++ codegen/files.py | 97 + codegen/hparser.py | 231 ++ codegen/idlparser.py | 432 ++++ codegen/tests/test_codegen_apipatcher.py | 118 + codegen/tests/test_codegen_rspatcher.py | 94 + codegen/tests/test_codegen_utils.py | 256 ++ codegen/tests/test_codegen_z.py | 83 + codegen/utils.py | 312 +++ codegen/wgpu_native_patcher.py | 359 +++ conftest.py | 39 + dev-requirements.txt | 19 + docs/Makefile | 20 + docs/_static/style.css | 0 docs/_templates/wgpu_class_layout.rst | 7 + docs/backends.rst | 83 + docs/conf.py | 166 ++ docs/gui.rst | 172 ++ docs/guide.rst | 254 ++ docs/index.rst | 24 + docs/make.bat | 35 + docs/start.rst | 103 + docs/utils.rst | 71 + docs/wgpu.rst | 224 ++ docs/wgpu_enums.rst | 7 + docs/wgpu_flags.rst | 7 + docs/wgpu_structs.rst | 7 + download-wgpu-native.py | 178 ++ examples/compute_noop.py | 136 + examples/compute_timestamps.py | 166 ++ examples/cube.py | 388 +++ examples/events.py | 21 + examples/screenshots/cube.png | Bin 0 -> 4265 bytes examples/screenshots/triangle_auto.png | Bin 0 -> 22181 bytes examples/shadertoy_blink.py | 42 + examples/shadertoy_circuits.py | 68 + examples/shadertoy_flyby.py | 190 ++ examples/shadertoy_gen_art.py | 86 + examples/shadertoy_glsl_clock.py | 94 + examples/shadertoy_glsl_flame.py | 81 + examples/shadertoy_glsl_fuji.py | 169 ++ examples/shadertoy_glsl_inercia.py | 490 ++++ examples/shadertoy_glsl_mouse_event.py | 50 + examples/shadertoy_glsl_sdf.py | 657 +++++ examples/shadertoy_glsl_sea.py | 174 ++ examples/shadertoy_glsl_stone.py | 288 +++ examples/shadertoy_glsl_textures.py | 26 + examples/shadertoy_liberation.py | 144 ++ examples/shadertoy_matrix.py | 155 ++ examples/shadertoy_riders.py | 45 + examples/shadertoy_sea.py | 210 ++ examples/shadertoy_star.py | 99 + examples/shadertoy_textures.py | 24 + examples/tests/test_examples.py | 153 ++ examples/triangle.py | 155 ++ examples/triangle_auto.py | 21 + examples/triangle_glfw.py | 22 + examples/triangle_glsl.py | 143 ++ examples/triangle_qt.py | 41 + examples/triangle_qt_embed.py | 53 + examples/triangle_subprocess.py | 84 + examples/triangle_wx.py | 16 + examples/triangle_wx_embed.py | 40 + examples/wgpu-examples.ipynb | 117 + pyproject.toml | 34 + setup.cfg | 36 + setup.py | 63 + tests/renderutils.py | 334 +++ tests/test_api.py | 220 ++ tests/test_diagnostics.py | 382 +++ tests/test_gui_auto_offscreen.py | 66 + tests/test_gui_base.py | 246 ++ tests/test_gui_glfw.py | 297 +++ tests/test_util_compute.py | 536 ++++ tests/test_util_core.py | 47 + tests/test_util_shadertoy.py | 154 ++ tests/test_wgpu_native_basics.py | 222 ++ tests/test_wgpu_native_buffer.py | 530 ++++ tests/test_wgpu_native_compute_tex.py | 592 +++++ tests/test_wgpu_native_errors.py | 268 ++ tests/test_wgpu_native_query_set.py | 151 ++ tests/test_wgpu_native_render.py | 629 +++++ tests/test_wgpu_native_render_tex.py | 566 +++++ tests/test_wgpu_native_texture.py | 285 +++ tests/testutils.py | 138 + tests_mem/test_gui_glfw.py | 64 + tests_mem/test_gui_offscreen.py | 90 + tests_mem/test_gui_qt.py | 58 + tests_mem/test_meta.py | 81 + tests_mem/test_objects.py | 377 +++ tests_mem/testutils.py | 230 ++ wgpu/__init__.py | 29 + wgpu/__pyinstaller/__init__.py | 12 + wgpu/__pyinstaller/conftest.py | 1 + wgpu/__pyinstaller/hook-wgpu.py | 28 + wgpu/__pyinstaller/test_wgpu.py | 30 + wgpu/_classes.py | 2100 ++++++++++++++++ wgpu/_coreutils.py | 157 ++ wgpu/_diagnostics.py | 520 ++++ wgpu/backends/__init__.py | 37 + wgpu/backends/auto.py | 27 + wgpu/backends/js_webgpu/__init__.py | 31 + wgpu/backends/rs.py | 12 + wgpu/backends/wgpu_native/__init__.py | 21 + wgpu/backends/wgpu_native/_api.py | 2937 ++++++++++++++++++++++ wgpu/backends/wgpu_native/_ffi.py | 205 ++ wgpu/backends/wgpu_native/_helpers.py | 445 ++++ wgpu/backends/wgpu_native/_mappings.py | 454 ++++ wgpu/backends/wgpu_native/extras.py | 50 + wgpu/classes.py | 8 + wgpu/enums.py | 686 +++++ wgpu/flags.py | 111 + wgpu/gui/__init__.py | 13 + wgpu/gui/auto.py | 106 + wgpu/gui/base.py | 417 +++ wgpu/gui/glfw.py | 553 ++++ wgpu/gui/jupyter.py | 137 + wgpu/gui/offscreen.py | 244 ++ wgpu/gui/qt.py | 430 ++++ wgpu/gui/wx.py | 176 ++ wgpu/resources/__init__.py | 2 + wgpu/resources/codegen_report.md | 34 + wgpu/resources/webgpu.h | 1803 +++++++++++++ wgpu/resources/webgpu.idl | 1314 ++++++++++ wgpu/resources/wgpu.h | 256 ++ wgpu/structs.py | 748 ++++++ wgpu/utils/__init__.py | 42 + wgpu/utils/compute.py | 198 ++ wgpu/utils/device.py | 17 + wgpu/utils/shadertoy.py | 704 ++++++ 142 files changed, 32134 insertions(+) create mode 100644 .github/CODEOWNERS create mode 100644 .github/workflows/ci.yml create mode 100644 .github/workflows/screenshots.yml create mode 100644 .gitignore create mode 100644 .readthedocs.yaml create mode 100644 CHANGELOG.md create mode 100644 LICENSE create mode 100644 README.md create mode 100644 codegen/README.md create mode 100644 codegen/__init__.py create mode 100644 codegen/__main__.py create mode 100644 codegen/apipatcher.py create mode 100644 codegen/apiwriter.py create mode 100644 codegen/files.py create mode 100644 codegen/hparser.py create mode 100644 codegen/idlparser.py create mode 100644 codegen/tests/test_codegen_apipatcher.py create mode 100644 codegen/tests/test_codegen_rspatcher.py create mode 100644 codegen/tests/test_codegen_utils.py create mode 100644 codegen/tests/test_codegen_z.py create mode 100644 codegen/utils.py create mode 100644 codegen/wgpu_native_patcher.py create mode 100644 conftest.py create mode 100644 dev-requirements.txt create mode 100644 docs/Makefile create mode 100644 docs/_static/style.css create mode 100644 docs/_templates/wgpu_class_layout.rst create mode 100644 docs/backends.rst create mode 100644 docs/conf.py create mode 100644 docs/gui.rst create mode 100644 docs/guide.rst create mode 100644 docs/index.rst create mode 100644 docs/make.bat create mode 100644 docs/start.rst create mode 100644 docs/utils.rst create mode 100644 docs/wgpu.rst create mode 100644 docs/wgpu_enums.rst create mode 100644 docs/wgpu_flags.rst create mode 100644 docs/wgpu_structs.rst create mode 100644 download-wgpu-native.py create mode 100644 examples/compute_noop.py create mode 100644 examples/compute_timestamps.py create mode 100644 examples/cube.py create mode 100644 examples/events.py create mode 100644 examples/screenshots/cube.png create mode 100644 examples/screenshots/triangle_auto.png create mode 100644 examples/shadertoy_blink.py create mode 100644 examples/shadertoy_circuits.py create mode 100644 examples/shadertoy_flyby.py create mode 100644 examples/shadertoy_gen_art.py create mode 100644 examples/shadertoy_glsl_clock.py create mode 100644 examples/shadertoy_glsl_flame.py create mode 100644 examples/shadertoy_glsl_fuji.py create mode 100644 examples/shadertoy_glsl_inercia.py create mode 100644 examples/shadertoy_glsl_mouse_event.py create mode 100644 examples/shadertoy_glsl_sdf.py create mode 100644 examples/shadertoy_glsl_sea.py create mode 100644 examples/shadertoy_glsl_stone.py create mode 100644 examples/shadertoy_glsl_textures.py create mode 100644 examples/shadertoy_liberation.py create mode 100644 examples/shadertoy_matrix.py create mode 100644 examples/shadertoy_riders.py create mode 100644 examples/shadertoy_sea.py create mode 100644 examples/shadertoy_star.py create mode 100644 examples/shadertoy_textures.py create mode 100644 examples/tests/test_examples.py create mode 100644 examples/triangle.py create mode 100644 examples/triangle_auto.py create mode 100644 examples/triangle_glfw.py create mode 100644 examples/triangle_glsl.py create mode 100644 examples/triangle_qt.py create mode 100644 examples/triangle_qt_embed.py create mode 100644 examples/triangle_subprocess.py create mode 100644 examples/triangle_wx.py create mode 100644 examples/triangle_wx_embed.py create mode 100644 examples/wgpu-examples.ipynb create mode 100644 pyproject.toml create mode 100644 setup.cfg create mode 100644 setup.py create mode 100644 tests/renderutils.py create mode 100644 tests/test_api.py create mode 100644 tests/test_diagnostics.py create mode 100644 tests/test_gui_auto_offscreen.py create mode 100644 tests/test_gui_base.py create mode 100644 tests/test_gui_glfw.py create mode 100644 tests/test_util_compute.py create mode 100644 tests/test_util_core.py create mode 100644 tests/test_util_shadertoy.py create mode 100644 tests/test_wgpu_native_basics.py create mode 100644 tests/test_wgpu_native_buffer.py create mode 100644 tests/test_wgpu_native_compute_tex.py create mode 100644 tests/test_wgpu_native_errors.py create mode 100644 tests/test_wgpu_native_query_set.py create mode 100644 tests/test_wgpu_native_render.py create mode 100644 tests/test_wgpu_native_render_tex.py create mode 100644 tests/test_wgpu_native_texture.py create mode 100644 tests/testutils.py create mode 100644 tests_mem/test_gui_glfw.py create mode 100644 tests_mem/test_gui_offscreen.py create mode 100644 tests_mem/test_gui_qt.py create mode 100644 tests_mem/test_meta.py create mode 100644 tests_mem/test_objects.py create mode 100644 tests_mem/testutils.py create mode 100644 wgpu/__init__.py create mode 100644 wgpu/__pyinstaller/__init__.py create mode 100644 wgpu/__pyinstaller/conftest.py create mode 100644 wgpu/__pyinstaller/hook-wgpu.py create mode 100644 wgpu/__pyinstaller/test_wgpu.py create mode 100644 wgpu/_classes.py create mode 100644 wgpu/_coreutils.py create mode 100644 wgpu/_diagnostics.py create mode 100644 wgpu/backends/__init__.py create mode 100644 wgpu/backends/auto.py create mode 100644 wgpu/backends/js_webgpu/__init__.py create mode 100644 wgpu/backends/rs.py create mode 100644 wgpu/backends/wgpu_native/__init__.py create mode 100644 wgpu/backends/wgpu_native/_api.py create mode 100644 wgpu/backends/wgpu_native/_ffi.py create mode 100644 wgpu/backends/wgpu_native/_helpers.py create mode 100644 wgpu/backends/wgpu_native/_mappings.py create mode 100644 wgpu/backends/wgpu_native/extras.py create mode 100644 wgpu/classes.py create mode 100644 wgpu/enums.py create mode 100644 wgpu/flags.py create mode 100644 wgpu/gui/__init__.py create mode 100644 wgpu/gui/auto.py create mode 100644 wgpu/gui/base.py create mode 100644 wgpu/gui/glfw.py create mode 100644 wgpu/gui/jupyter.py create mode 100644 wgpu/gui/offscreen.py create mode 100644 wgpu/gui/qt.py create mode 100644 wgpu/gui/wx.py create mode 100644 wgpu/resources/__init__.py create mode 100644 wgpu/resources/codegen_report.md create mode 100644 wgpu/resources/webgpu.h create mode 100644 wgpu/resources/webgpu.idl create mode 100644 wgpu/resources/wgpu.h create mode 100644 wgpu/structs.py create mode 100644 wgpu/utils/__init__.py create mode 100644 wgpu/utils/compute.py create mode 100644 wgpu/utils/device.py create mode 100644 wgpu/utils/shadertoy.py diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..df51a9a --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @Korijn diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..5292b1b --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,350 @@ +name: CI + +on: + push: + branches: + - main + tags: + - 'v*' + pull_request: + branches: + - main + +jobs: + + lint-build: + name: Test Linting + timeout-minutes: 5 + runs-on: ubuntu-latest + strategy: + fail-fast: false + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.9 + uses: actions/setup-python@v4 + with: + python-version: 3.9 + - name: Install dev dependencies + run: | + python -m pip install --upgrade pip + pip install -U black flake8 flake8-black pep8-naming + - name: Flake8 + run: | + flake8 . + + test-codegen-build: + name: Test Codegen + timeout-minutes: 5 + runs-on: ubuntu-latest + strategy: + fail-fast: false + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -U pytest numpy black cffi + - name: Test codegen + run: | + pytest -v codegen + + test-minimal-import-build: + name: Test Imports + timeout-minutes: 5 + runs-on: ubuntu-latest + strategy: + fail-fast: false + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install requests + python download-wgpu-native.py + pip uninstall -q -y requests + pip install -e . + - name: Test imports + env: + WGPU_FORCE_OFFSCREEN: true + run: | + python -c "print('wgpu'); import wgpu; print(wgpu)" + python -c "print('wgpu.backends.wgpu_native'); import wgpu.backends.wgpu_native" + python -c "print('wgpu.gui.offscreen'); import wgpu.gui.offscreen" + python -c "print('wgpu.utils'); import wgpu.utils" + python -c "print('wgpu.utils.shadertoy'); import wgpu.utils.shadertoy" + + docs-build: + name: Test Docs + timeout-minutes: 5 + runs-on: ubuntu-latest + strategy: + fail-fast: false + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.9 + uses: actions/setup-python@v4 + with: + python-version: 3.9 + - name: Install dev dependencies + run: | + python -m pip install --upgrade pip + pip install -U -r dev-requirements.txt + - name: Build docs + run: | + cd docs + make html SPHINXOPTS="-W --keep-going" + + test-examples-build: + name: Test Examples + timeout-minutes: 10 + runs-on: ubuntu-latest + strategy: + fail-fast: false + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.9 + uses: actions/setup-python@v4 + with: + python-version: '3.9' + - name: Install llvmpipe and lavapipe for offscreen canvas + run: | + sudo apt-get update -y -qq + sudo apt install -y libegl1-mesa libgl1-mesa-dri libxcb-xfixes0-dev mesa-vulkan-drivers + - name: Install dev dependencies + run: | + python -m pip install --upgrade pip + pip install -U -r dev-requirements.txt + python download-wgpu-native.py + pip install -e . + - name: Test examples + env: + EXPECT_LAVAPIPE: true + run: | + pytest -v examples + + test-pyinstaller-build: + name: Test PyInstaller + timeout-minutes: 5 + runs-on: ubuntu-latest + strategy: + fail-fast: false + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.11' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -U requests numpy pytest + python download-wgpu-native.py + pip install -e . + pip install psutil glfw pyinstaller>=4.9 + - name: Test PyInstaller + run: | + pyinstaller --version + pytest -v wgpu/__pyinstaller + + test-builds: + name: ${{ matrix.name }} + timeout-minutes: 5 + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - name: Test Linux py38 + os: ubuntu-latest + pyversion: '3.8' + - name: Test Linux py39 + os: ubuntu-latest + pyversion: '3.9' + - name: Test Linux py310 + os: ubuntu-latest + pyversion: '3.10' + - name: Test Linux py311 + os: ubuntu-latest + pyversion: '3.11' + - name: Test Linux py312 + os: ubuntu-latest + pyversion: '3.12' + - name: Test Linux pypy3 + os: ubuntu-latest + pyversion: 'pypy3.9' + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.pyversion }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.pyversion }} + - name: Install llvmpipe and lavapipe for offscreen canvas + if: matrix.os == 'ubuntu-latest' + run: | + sudo apt-get update -y -qq + sudo apt install -y libegl1-mesa libgl1-mesa-dri libxcb-xfixes0-dev mesa-vulkan-drivers + - name: Install dev dependencies + run: | + python -m pip install --upgrade pip + pip install -U -r dev-requirements.txt + python download-wgpu-native.py + pip install -e . + - name: Unit tests + run: | + pytest -v tests + - name: Memory tests + run: | + pytest -v tests_mem + + # The release builds are done for the platforms that we want to build wheels for. + # We build wheels, test them, and then upload the wheel as an artifact. + release-builds: + name: Build wheels on ${{ matrix.os }} + timeout-minutes: 10 + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.9 + uses: actions/setup-python@v4 + with: + python-version: '3.9' + - name: Install dev dependencies + run: | + python -m pip install --upgrade pip wheel setuptools twine + - name: Build wheels + uses: pypa/cibuildwheel@v2.16.2 + env: + CIBW_MANYLINUX_X86_64_IMAGE: quay.io/pypa/manylinux_2_28_x86_64 + CIBW_ARCHS_LINUX: x86_64 + CIBW_SKIP: cp39-musllinux_x86_64 + with: + output-dir: dist + - name: Twine check + run: | + twine check dist/* + - name: Upload distributions + uses: actions/upload-artifact@v2 + with: + path: dist + name: dist + + # Thees release builds uses QEMU so that we can build wheels for arm64. + # We build wheels and upload the wheel as an artifact, but we don't test them here. + qemu-release-builds: + name: Build wheels on ubuntu-latest with QEMU + timeout-minutes: 10 + runs-on: ubuntu-latest + strategy: + fail-fast: false + steps: + - uses: actions/checkout@v3 + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + with: + platforms: arm64 + - name: Build wheels + uses: pypa/cibuildwheel@v2.16.2 + env: + CIBW_MANYLINUX_AARCH64_IMAGE: quay.io/pypa/manylinux_2_28_aarch64 + CIBW_ARCHS_LINUX: aarch64 + CIBW_SKIP: cp39-musllinux_aarch64 + with: + output-dir: dist + - name: Upload distributions + uses: actions/upload-artifact@v2 + with: + path: dist + name: dist + + sdist-build: + name: Build sdist + timeout-minutes: 5 + runs-on: ubuntu-latest + strategy: + fail-fast: false + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.9 + uses: actions/setup-python@v4 + with: + python-version: '3.9' + - name: Install dev dependencies + run: | + python -m pip install --upgrade pip + pip install -U -r dev-requirements.txt + - name: Create source distribution + run: | + python setup.py sdist + - name: Test sdist + shell: bash + run: | + rm -rf ./wgpu + pushd $HOME + pip install $GITHUB_WORKSPACE/dist/*.tar.gz + popd + # don't run tests, we just want to know if the sdist can be installed + pip uninstall -y wgpu + git reset --hard HEAD + - name: Twine check + run: | + twine check dist/* + - name: Upload distributions + uses: actions/upload-artifact@v2 + with: + path: dist + name: dist + + publish: + name: Publish to Github and Pypi + runs-on: ubuntu-latest + needs: [test-builds, release-builds, qemu-release-builds, sdist-build] + if: success() && startsWith(github.ref, 'refs/tags/v') + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.9 + uses: actions/setup-python@v4 + with: + python-version: '3.9' + - name: Download assets + uses: actions/download-artifact@v1.0.0 + with: + name: dist + - name: Get version from git ref + id: get_version + run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//} + - name: Create GH release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ steps.get_version.outputs.VERSION }} + release_name: Release ${{ steps.get_version.outputs.VERSION }} + body: | + Autogenerated binary wheels that include wgpu-native. + See [the changelog](https://github.com/pygfx/wgpu-py/blob/main/CHANGELOG.md) for details. + draft: false + prerelease: false + - name: Upload release assets + # Move back to official action after fix https://github.com/actions/upload-release-asset/issues/4 + uses: AButler/upload-release-assets@v2.0 + with: + release-tag: ${{ steps.get_version.outputs.VERSION }} + files: 'dist/*.tar.gz;dist/*.whl' + repo-token: ${{ secrets.GITHUB_TOKEN }} + - name: Publish to PyPI + uses: pypa/gh-action-pypi-publish@master + with: + user: __token__ + password: ${{ secrets.PYPI_PASSWORD }} diff --git a/.github/workflows/screenshots.yml b/.github/workflows/screenshots.yml new file mode 100644 index 0000000..276d53b --- /dev/null +++ b/.github/workflows/screenshots.yml @@ -0,0 +1,39 @@ +name: Screenshots + +on: + workflow_dispatch: + pull_request: + branches: + - main + +jobs: + screenshots: + name: Regenerate + timeout-minutes: 10 + runs-on: 'ubuntu-latest' + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.9 + uses: actions/setup-python@v4 + with: + python-version: '3.9' + - name: Install llvmpipe and lavapipe for offscreen canvas + run: | + sudo apt-get update -y -qq + sudo add-apt-repository ppa:oibaf/graphics-drivers -y + sudo apt-get update -y -qq + sudo apt install -y libegl1-mesa libgl1-mesa-dri libxcb-xfixes0-dev mesa-vulkan-drivers + - name: Install dev dependencies + run: | + python -m pip install --upgrade pip + pip install -U -r dev-requirements.txt + python download-wgpu-native.py + pip install -e . + - name: Regenerate screenshots + run: | + pytest -v --regenerate-screenshots -k test_examples_screenshots examples + - uses: actions/upload-artifact@v2 + if: always() + with: + name: screenshots + path: examples/screenshots diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..526929d --- /dev/null +++ b/.gitignore @@ -0,0 +1,123 @@ +# Special for this repo +nogit/ +wgpu/resources/*.dll +wgpu/resources/*.so +wgpu/resources/*.dylib +wgpu/resources/commit-sha +examples/screenshots/diffs + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# Renderdoc captures +*.cap +*.rdc + +# C extensions +*.so + +# Distribution / packaging +.DS_Store +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +wheelhouse/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ +docs/generated + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# VSCode project settings +.vscode + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000..7c3e63c --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,26 @@ +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +version: 2 + +build: + os: ubuntu-22.04 + tools: + python: "3.11" +# If we ever want to run wgpu stuff in the doc build +# apt_packages: +# - libegl1-mesa +# - libgl1-mesa-dri +# - libxcb-xfixes0-dev +# - mesa-vulkan-drivers + +sphinx: + configuration: docs/conf.py + fail_on_warning: true + +python: + install: + - method: pip + path: . + extra_requirements: + - docs diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..215facf --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,660 @@ +# Changelog / release notes + +WebGPU and wgpu-native are still changing fast, and with that we do to. We do +not yet attempt to make things backwards compatible. Instead we try to +be precise about tracking changes to the public API. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +Possible sections in each release: + +* Added: for new features. +* Changed: for changes in existing functionality. +* Deprecated: for soon-to-be removed features. +* Removed: for now removed features. +* Fixed: for any bug fixes. +* Security: in case of vulnerabilities. + + +### [v0.13.2] - 21-12-2023 + +Added: + +* Implement support for timestamp QuerySet. +* Add texture input and iFrameRate builtin to Shadertoy util https://github.com/pygfx/wgpu-py/pull/453 + + +### [v0.13.1] - 08-12-2023 + +Fixed: + +* Prevent access violation errors with GLFW on Windows. +* Prevent a segfault when deleting a `GPUPipelineLayout` (observed in a very specific use-case on LavaPipe). +* Fix `triangle_glsl.py` example. +* Fix that when logger is set to debug, errors are produced when Python exits. + +Added: + +* Support for linux-aarch64 (binary wheels available)! This includes Raspberry Pi's with a 64-bit OS, and adds support for building linux docker images on Apple Silicon devices without having to emulate x86 (no need for `--platform linux/amd64`). + + +### [v0.13.0] - 24-11-2023 + +Added: + +* Add `iDate` builtin to Shadertoy utility. +* Allow "auto" layout args for `create_compute_pipeline()`. +* Official support for Python 3.12 and pypy. + +Changed: + +* Update to wgpu-native 0.18.1.2. +* `CanvasContext.get_current_texture()` now returns a `GPUTexture` instead of a `GPUTextureView`. +* `OffscreenCanvasBase.present()` now receives a `GPUTexture` instead of a `GPUTextureView`, + and this is a new texture on each draw (no re-use). +* Renamed ``wgpu.gui.WgpuOffscreenCanvas` to `WgpuOffscreenCanvasBase`. +* The `wgpu.base` submodule that defined the GPU classes is renamed to be a private + module. The new `wgpu.classes` namespace contains all GPU classes (and nothing else). +* The `__repr__` of the GPU classes shows a shorter canonical class name. +* Flags and Enums have a more useful `__repr__`. + +Fixed: + +* Dragging a window between windows with different scale factor (with Qt on Windows) + no longer puts the window in an invalid state. A warning is still produced though. +* `GPUCommandEncoder.begin_render_pass()` binds the lifetime of passed texture views to + the returned render pass object to prevent premature destruction when no reference to + a texture view is kept. + + +### [v0.12.0] - 15-11-2023 + +This is a big release that contains many improvements, but also multiple API changes. + +Most backward incompatible changes are due to two things: the backend +system has been refactored, making it simpler and future-proof. And we +have revised the buffer mapping API, making it more similar to the +WebGPU spec, and providing more flexible and performant ways to set +buffer data. + +A summary to help you update your code: +```py +# X import wgpu.backends.rs +import wgpu + + +# X wgpu.request_adapter(canvas=None, power_preference="high-performance") +wgpu.gpu.request_adapter(power_preference="high-performance") + +# X buffer.map_read() +buffer.map("READ") +buffer.read_mapped(...) +buffer.read_mapped(...) +buffer.unmap() + +# X buffer.map_write() +buffer.map("WRITE") +buffer.write_mapped(data1, ...) +buffer.write_mapped(data2, ...) +buffer.unmap() +``` + +Added: + +* The `wgpu.gpu` object, which represents the API entrypoint. This makes the API more clear and more similar to the WebGPU API. +* A convenience `auto` backend, and a stub `js_webgpu` backend. +* New function `enumerate_adapters()` in the `wgpu_native` backend. +* Warning about pip when wgpu-native binary is missing on Linux +* The `GPUBuffer` has new methods `map()`, `map_async()`, `unmap()`. These have been + part of the WebGPU spec for a long time, but we had an alternative API, until now. +* The `GPUBuffer` has new methods `read_mapped()` and `write_mapped()`. These are not + present in the WebGPU spec; they are the Pythonic alternative to `getMappedRange()`. +* Flags can now be passed as strings, and can even be combined using "MAP_READ|COPY_DIST". +* GUI events have an extra "timestamp" field, and wheel events an additional "buttons" field. +* A diagnostics subsystem that amongst other things counts GPU objects. Try e.g. `wgpu.diagnostics.print_report()`. +* Several improvements to the shadertoy util: offscreen support and a snapshot method. + +Changed: + +* Can create a buffer that is initially mapped: `device.create_buffer(..., mapped_at_creation=True)` is enabled again. +* The `wgpu.request_adapter()` function is moved to `wgpu.gpu.request_adapter()`. Same for the async version. +* The `canvas` argument of the `request_adapter()` function is now optional. +* The `rs` backend is renamed to `wgpu_native`. +* It is no longer necessary to explicitly import the backend. +* The `GPUDevice.request_device_tracing()` method is now a function in the `wgpu_native` backend. +* We no longer force using Vulkan on Windows. For now wgpu-native still prefers Vulkan over D3D12. +* The `wgpu.utils` subpackage is imported by default, but most submodules are not. This means that `compute_with_buffers` must be explicitly imported from `wgpu.utils.compute`. + +Deprecated: + +* `wgpu.request_adapter()` and its async version. Use `wgpu.gpu.request_adapter()` instead. +* The `GPUBuffer` methods `map_read()`and `map_write()` are deprecated, in favor of `map()`, `unmap()`, `read_mapped()` and `write_mapped()`. + +To be clear, these are not changed: + +* The convenient `device.create_buffer_with_data()` (not part of the WebGPU spec) is still available. +* The `GPUQueue.read_buffer()` and `GPUQueue.write_buffer()` methods are unchanged. + +Fixed: + +* The shaderutil now re-uses the default device, avoiding memoryleaks when running multiple consecutively. +* The GUI backend selection takes into account whether a backend module is already imported. +* The offscreen GUI backend no longer uses asyncio (it does not need an event loop). +* Prevent a few classes of memoryleaks. Mind that creating many `GPUDevice` objects still leaks. + + +### [v0.11.0] - 11-10-2023 + +Changed: + +* Update to wgpu-native 0.17.2.1. No changes are needed in downstream code. + + +### [v0.10.0] - 09-10-2023 + +In this release the API is aligned with the latest webgpu.idl, and +we updated to wgpu-native (v0.17.0.2). + +Added: + +* New `wgpu.wgsl_language_features` property, which for now always returns an empty set. +* The `GPUShaderModule.compilation_info` property (and its async version) are replaced with a `get_compilation_info()` method. +* The WebGPU features "bgra8unorm-storage" and "float32-filterable" are now available. + +Changed: + +* The binary wheels are now based on manylinux 2.28, and the 32bit Linux wheels are no longer built. +* In WGSL: toplevel constants must be defined using `const`, using `let` will now fail. +* In WGSL: it is no longer possible to re-declare an existing variable name. +* Error messages may look a bit different, since wgpu-native now produces nice messages replacing our custom ones. +* Errors produced by a call into a wgpu-native function now produce a Python exception (no more async logging of errors). + + +### [v0.9.5] - 02-10-2023 + +Fixed: + +* Fixed setting the dpi awareness in the Qt backend, by correctly looking up the Qt version. + +Changed: + +* Links to readthedocs now point to *stable* instead of *latest*, so that people + reading the docs see these that reflect the latest release. +* Don't enable any features by default (previously WGPUNativeFeature_TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES was enabled). + + +### [v0.9.4] - 23-02-2023 + +Fixed: + +* Fixed issue related to winid (native widgets) on embedded Qt widgets on Windows (#348). +* Fixed our example screenshot tests. + + +### [v0.9.3] - 20-02-2023 + +Changed: + +* The offscreen `WgpuCanvas.draw()` method now returns a `memoryview` instead of a numpy array. +* The shadertoy util changed internally from using numpy to using a memoryview. + + +### [v0.9.2] - 17-02-2023 + +Fixed: + +* Fixed that `get_preferred_format()` could crash (in `wgpuSurfaceGetSupportedFormats`) due to an upstream bug in wgpu-native (#342) + +Added: + +* The shadertoy util now supports GLSL, so code from the shadertoy website can be direcly copied and run with wgpu (#343) + + +### [v0.9.1] - 13-02-2023 + +Changed: + +* Improved documentation. + +Added: + +* Added `print_report()` to get a report on the internals of wgpu. +* Added `command_encoder.clear_buffer()` +* Added support for GLSL. + + +### [v0.9.0] - 25-01-2023 + +In this release the API is aligned with the latest webgpu.idl, and +we updated to the latest release of wgpu-native (v0.14.2.3). + +Changed: + +* To use the default `min_binding_size` in `create_bind_group_layout`, it should be `None` instead of zero. +* If the depth-stencil texture has not room for stencil data, the `stencil_read_mask` and `stencil_write_mask` fields in the `DepthStencilState` struct passed to `create_render_pipeline()` must be set to 0. +* In WGSL, `@stage(compute)` must now be `@compute`. Same for `vertex` and `fragment`. +* In WGSL, the list of reserved words has been extended, including e.g. `mod`, `matrix` and `ref`. +* In WGSL, `smoothStep` is now `smoothstep`. + +Added: + +* New IDL: texture has new props `weight`, `height`, `depth_or_array_layers`. +* New IDL: Buffer has new prop `map_state`. + + +### [v0.8.4] - 10-01-2023 + +Fixed: + +* The offscreen canvas's mainloop prevents leaking callbacks better (#322) +* Prevent error messages when Qt examples/apps are closed (#326) + + +### [v0.8.3] - 06-01-2023 + +Fixed: + +* Prevent Qt warning about setting dpi awareness (#320) +* Make canvases close when they get deleted (#319) +* Fix qt canvas in ipython (#315) +* Make offscreen canvas closable (#309) +* Fix that the offscreen canvas had it size hardcoded, ignoring the given size (#317) +* Fixed renaming of `queue` in docs (#308) +* Fix using `.draw_frame` on qt canvas (#304) +* Add missing dev dependencies (#295) + +Added: + +* A shadertoy utility, plus examples (#312) + +Changed: + +* Improve the error prompt when wgsl code is multi line error (#311, #316) +* Tests: execute examples in the test process (#310) +* Package only the release binary (not the debug build) (#299) +* Codegen: uses in-memory file system during code generation (#303) +* Improve readme (#290, #302, #314) + + +### [v0.8.2] - 06-10-2022 + +Fixed: + +* Fixed imports for PyQt6. +* Keyboard events work again for Qt 6.3. +* Fixed that overloading ``handle_event()`` did not work for a canvas based on a Qt or wx main widget/window. + +Added: + +* Can now add a wildcard ("*") to ``add_event_handler`` to handle all events. +* Shader error messages show more context, making shader debugging much easier. +* VSync can now be turned off to raise the frame rate when needed. Note that FPS measurements are still a poor performance benchmark! + +Changed: + +* GLFW canvas does not draw when minimized anymore. +* The offscreen and Jupyter canvas now use the srgb format for consistency with normal canvases. +* The examples have been adjusted for srgb colors. + + +### [v0.8.1] - 29-04-2022 + +Fixed: + +* Fixed regression that `canvas.handle_event()` could no longer be overloaded to handle move and wheel events. + +Changed: + +* Added a note in the docs to explain that the version of the examples must match the runtime version. + + +### [v0.8.0] - 20-04-2022 + +Changed: + +* Now targeting wgpu-native 0.12.0.1. +* Updated API to the latest WebGPU spec. +* Better error logging using the new callbacks in wgpu-native. +* All destructors (drop methods) are now working as they should. + +To update, you need to adjust to the following API changes: + +* The encoder's `end_pass()` are renamed to `end()`. +* The compute encoder's `dispatch()` is renamed `dispatch_workgroups`, and `dispatch_indirect` to `dispatch_workgroups_indirect`. +* The `load_value` is replaced with `clear_value` and `load_op`. +* Same for `depth_load_value` and `stencil_load_value`. +* The `device.create_sampler()` method for mipmap filtering now uses the `MipmapFilterMode` enum instead of the `FilterMode` enum. Since the fields of these enums are the same, you probably don't need to change anything. + + +To update, your shaders need the following changes: + +* The decorators have changed from `[[...]]` to `@...`. + * E.g. change `[[location(0)]]` to `@location(0)`. + * E.g. change `[[group(0), binding(0)]]` to `@group(0) @binding(0)`. +* Structs now use `,` to separate fields instead of `;`. +* The `elseif` keyword is now `else if`. +* Buffers bound as arrays don't need to be defined via a struct anymore. + + +### [v0.7.7] - 12-04-2022 + +Fixed: + +* Fixed that event handlers could not be added while in an event handler. +* Prevent swap chain errors when minimizing a window. + +Added: + +* The `QWgpuWidget` now also supports the autogui events. +* Our CI now tests the examples (including image comparisons). + + +### [v0.7.6] - 28-03-2022 + +Changed: + +* Pointer move and wheel events are now rate-limited, leading to better performance if e.g. picking is done at each event. + +Added: + +* Added `wgpu.gui.base.log_exception`, a context-manager to catch and log exceptions, e.g. in event callbacks. + + +### [v0.7.5] - 17-03-2022 + +Fixed: + +* Mouse down events were not emitted during double clicks in the Qt canvas. +* Mouse move events were not emitted no button is pressed in the Qt canvas. + + +### [v0.7.4] - 04-02-2022 + +Fixed: + +* Position of glfw pointer events on MacOS. + + +### [v0.7.3] - 25-01-2022 + +Added: + +* Expanded the `auto` gui backend, which can now also select qt framework if available. +* The qt gui backend (like the glfw gui backend) supports user events in the same manner as + the jupyter gui backend. +* Expanded the `auto` gui backend to also support an offscreen canvas intended for automated tests. + +Fixed: + +* Size of glfw windows on MacOS. + + +### [v0.7.2] - 24-12-2021 + +Fixed: + +* Exceptions in user-interaction callbacks don't break the glfw loop anymore. +* Pointer events in glfw have the correct key modifiers now. + + +### [v0.7.1] - 22-12-2021 + +Added: + +* #224 - Added `add_event_handler` and `remove_event_handler` to GLFW and Jupyter GUI canvases. + + +### [v0.7.0] - 21-12-2021 + +Changed: + +* Now targeting wgpu-native v0.11.0.1, containing many upstream fixes and improvements. +* The `[[block]]` syntax in shaders has been dropped. +* Renamed `ProgrammableStage.clamp_depth` -> `unclipped_depth`. + + +### [v0.6.0] - 16-12-2021 + +Added: + +* Official support for Windows 3.10. +* The `max_fps` argument can be provided to a canvas. +* The glfw gui backend supports user events in the same manner as the jupyter gui backend, + using the [jupyter_rfb event specification](https://jupyter-rfb.readthedocs.io/en/stable/events.html). +* Introduce the `auto` gui backend, which selects either glfw or jupyter. + +Fixed: + +* The wx gui backend is now fully functional. + +Changed: + +* The qt and wx gui backend now contain `WgpuCanvas` for a toplevel window, + and `WgpuWidget` for an embeddable widget. +* All gui backends (can) now limit the FPS. +* No changes to the wgpu API. + + +### [v0.5.9] - 11-10-2021 + +Fixed: + +* Include the correct binaries in macOS arm64 wheels +* Options for arch argument of download-wgpu-native.py script + + +### [v0.5.8] - 09-10-2021 + +Added: + +* Experimental support for macos_arm64 (M1). + +Changed: + +* The Qt examples use PySide6 instead of PyQt5. + + +### [v0.5.7] - 07-10-2021 + +Changed: + +* Update to the latest wgpu-native (including latest Naga). +* The power-preference is actually taken into account. +* The adapter actually reports its limits. +* The limits in `request_device` are actually used. +* The `Adapter.is_software` property is renamed to `Adapter.is_fallback_adapter`. + + +### [v0.5.6] - 30-08-2021 + +Added: + +* An offscreen canvas to take snapshots without needing a window. + +Changed: + +* On Windows, the Vulkan backend is now forced unless `WGPU_BACKEND_TYPE` is set. + +Fixed: + +* Better support for multiple canvases by fixing a specific Qt issue. +* Fixed that canvas was not passed to low level function of `request_adapter`. +* Support calling `get_current_texture()` multiple times during a draw. + + +### [v0.5.5] - 09-08-2021 + +Added: + +* The wgpu backend can be forced using the `WGPU_BACKEND_TYPE` env variable. + Values can be e.g. "D3D12", "Metal", "Vulkan". +* Initial support for off-screen canvases. +* Adds `adapter.is_software` property. + +Changed: + +* The `GPUPresentationContext` class has been renamed to `GPUCanvasContext`. +* The functionality of the swap-chain has moved to the `GPUCanvasContext`. +* The now removed `GPUSwapChain` was used as a context manager. Instead, + the frame is presented (ala GL swapbuffers) automatically at the end of a draw. +* The `canvas.configure_swap_chain()` method has been removed. Instead, + `canvas.get_context()` should be used, to obtain a present/canvas context. +* The `adapter.request_device()` method has its arguments `non_guaranteed_features` + and `non_guaranteed_limits` replaced with `required_features` and `required_limits`. +* The enum field `StoreOp.clear` is now `StoreOp.discard`. +* The flag field `TextureUsage.SAMPLED ` is now `TextureUsage.TEXTURE_BINDING `. +* The flag field `TextureUsage.STORAGE ` is now `TextureUsage.STORAGE_BINDING `. +* The enum `InputStepMode` is now `VertexStepMode`. +* WGSL: `arrays` must be declared as `var` (not `let`) in order to allow dynamic indexing. +* WGSL: storage classes are written differently. + + +### [v0.5.4] - 11-06-2021 + +Changed: + +* The backend selection is automatic by default. To force a backend, the `WGPU_BACKEND_TYPE` evironment variable can be set to e.g. "Vulkan". It could be good to do this on Windows to prevent selection of DX12 for now. + + +### [v0.5.3] - 04-06-2021 + +Added: + +* `adapter.properties` now has actual values, allowing inspeciton of the selected + GPU and backend. +* Added back support for filtering float32 textures by enabling a certain wgpu feature + by default. + +Fixed: + +* An error in the docs of `create_render_pipeline`. +* Vulkan backend is now forced to prevent DX12 being select and causing probems + because it's less mature. + + +### [v0.5.2] - 23-05-2021 + +This release uses a new version of wgpu-native which has changed quite a bit internally. There +is more validation (thus more restrictions). There are only a few changes to the API. +However, one big change is that shaders can now be provided as both SpirV and WGSL. Due to +the strict validation, most shaders compiled by PyShader are not usable anymore. We +recommend using WGSL instead. + +Added: + +* Added `GPUAdaper.properties` (the amount of information it contains will increase in the future). +* Added proper support for WGSL. + +Changed: + +* Renamed `renderpass.set_blend_color` -> `set_blend_constant`. +* Stricter validation of SpirV shaders. +* Float32 texture formats must now use a non-filtering sampler and texture-sample-type. +* Integer texture formats can no longer use a texture (use `textureLoad` instead). +* ... and more tighter restrictions. + +Removed: + +* The API concerning debug markers and groups is temporarily removed. +* Adapter and device features is temporarily removed. +* Adapter and device limits is temporarily removed. + + +### [v0.4] - 21-05-2021 + +This release represents about half a year of progress on the WebGPU API, so the API +has changed quite a bit. The wgpu-py API more closely reflects the webgpu API - wgpu-native does +not affect the API except for a few additional features. + +Added: + +* Added `GPUQueue.read_buffer` as extra API (next to `write_buffer` which is original WebGPU API). +* Added `GPUQueue.read_texture` as extra API. + +y +Removed: + +* Removed `GPUBuffer.read_data()`. Use `device.queue.read_buffer()` instead. Note that `usage` `MAP_READ` should be replaced with `COPY_SRC`. +* Removed `GPUBuffer.write_data()`. Use `device.queue.write_buffer()` instead. Note that `usage` `MAP_WRITE` should be replaced with `COPY_DST`. + +Changed: + +* `GPUCanvasContext.get_swap_chain_preferred_format()`: now takes an `adapter` instead of a `device`. +* `GPUAdapter.extensions`: is now called `features`. +* `GPUAdapter.request_device()`: the `extensions` and `limit` args are now `non_guaranteed_features` and `non_guaranteed_limits`. +* `GPUDevice.default_queue`: is now called `queue`. +* `GPUDevice.create_compute_pipeline()`: the `compute_stage` arg is now called `compute`. +* `GPUDevice.create_bind_group_layout()` has changed the required structure of the layout enty dicts. +* `GPUDevice.create_render_pipeline()` has changed *a lot* in terms of shape of input dicts. See new docs. +* `GPUTexture.create_view()`: args `mip_level_count` and `array_layer_count` are default `None` instead of `0`. +* `GPUCommandEncoder.begin_render_pass()`: the `color_attachments` and `depth_stencil_attachment` arguments have their `attachment` field renamed to `view`. +* `GPURenderEncoderBase.set_index_buffer()` has an extra argument (after the buffer) to specify the format. The index format is no longer specified in `device.create_render_pipeline()`. +* Flag `TextureUsage` has field OUTPUT_ATTACHMENT renamed to RENDER_ATTACHMENT. +* Enum `BindingType` is split up in different enums for buffer, sampler, sampled texture and storage texture. +* Enum `BlendFactor` has some of its field names changed. +* Enum `VertexFormat` has its field names changed, e.g. ushort2 -> uint16x2. +* The API is more restrictive in the use of buffer/texture usage combinations. +* The API is more restrictive in formats for storage buffers/textures. +* When copying from/to textures, the `bytes_per_row` must now be a multiple of 256. + + +### [v0.3.0] - 2020-07-05 + +With this update we're using a later release of wgpu-native, and follow changes +is the WebGPU spec. Further, we've removed the need for ctypes to communicate +data arrays. Instead, wgpu-py can consume any object that supports the buffer +protocol, and it returns `memoryview` objects. + +Added: + +* The texture object has more properties to query the parameters that it was created with. +* The texture view object has a `texture` property. +* The render and compute pipeline objects have a property `layout` and a method `get_bind_group_layout()`. +* The shader object got a `compilation_info` method, but this does not do anything yet. +* The `create_shader_module()` has a `source_map` attribute, but this is yet unused. +* Log messages from wgpu-native (Rust) are now injected into Python's logger. +* The `queue` object got two new methods `write_buffer` and `write_texture`. +* The buffer has `read_data()` and `write_data()` methods. Note: the latter may be removed later. +* The device `create_buffer_with_data` is added as a convenience function. This will likely stay. + +Changed: + +* Targets wgpu-native v.0.5.2. The first release build from the wgpu-native repo itself. +* The `array_layer` in copy operations involving a texture is removed. +* The `utils.compute_with_buffers` function now accepts *any* data dat supports + the buffer protocol (not just ctypes arrays). The outputs are `memoryview` objects, + which shape and format can be specified. When a ctypes array type is specified, + the output will be an instance of that type. This means that these changes are + fully backwards compatible. + +Removed: + +* The buffer (for now) no longer exposes a data mapping API. Instead use `read_data()` and `write_data()`. +* The device `create_buffer_mapped` method is similarly removed. Use `create_buffer_with_data` instead. + + +### [v0.2.0] - 2020-04-16 + +Added: + +* The canvase now has a `request_draw` method. +* More and better docs. +* The canvas can be passed to `request_adapter` so that the created surface + can be selected on it. + * Support for debug markers. + +Changed: + +* Targets wgpu-native v0.5.1. This is the last release when wgpu-native was still part of wgpu-core. +* The `bindings` in bind groups and bind group layouts are now called `entries`. +* There is no more generic storage texture, only a readonly and a writeonly one. +* The `set_index_buffer` and `set_vertex_buffer` methods got a `size` argument. +* The `row_pitch` and `image_height` args in copy operations involving a texture + are renamed to `bytes_per_row` and `rows_per_image`. +* Rendering is now done under the swap_chain's context: `with swap_chain as current_texture_view` + + +### [v0.1.6] - 2020-04-01 + +This release is the first moderately mature version of wgpu-py. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..eef4b5a --- /dev/null +++ b/LICENSE @@ -0,0 +1,25 @@ +BSD 2-Clause License + +Copyright (c) 2019-2023, Almar Klein, Korijn van Golen +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..2b51e38 --- /dev/null +++ b/README.md @@ -0,0 +1,161 @@ +[![CI](https://github.com/pygfx/wgpu-py/workflows/CI/badge.svg)](https://github.com/pygfx/wgpu-py/actions) +[![Documentation Status](https://readthedocs.org/projects/wgpu-py/badge/?version=stable)](https://wgpu-py.readthedocs.io) +[![PyPI version](https://badge.fury.io/py/wgpu.svg)](https://badge.fury.io/py/wgpu) + +# wgpu-py + +A Python implementation of WebGPU - the next generation GPU API. + + + + + +## Introduction + +In short, this is a Python lib wrapping +[wgpu-native](https://github.com/gfx-rs/wgpu) and exposing it with a Pythonic +API similar to the [WebGPU spec](https://gpuweb.github.io/gpuweb/). + +The OpenGL API is old and showing it's cracks. New API's like Vulkan, Metal and +DX12 provide a modern way to control the GPU, but these API's are too low-level +for general use. The WebGPU API follows the same concepts, but with a simpler +(higher level) spelling. The Python `wgpu` library brings the WebGPU API to +Python. + +To get an idea of what this API looks like have a look at +[triangle.py](https://github.com/pygfx/wgpu-py/blob/main/examples/triangle.py) +and the other [examples](https://github.com/pygfx/wgpu-py/blob/main/examples/). + + +## Status + +> **Note** +> +> The wgpu-API has not settled yet, use with care! + +* Coverage of the WebGPU spec is complete enough to build e.g. + [pygfx](https://github.com/pygfx/pygfx). +* Test coverage of the API is close to 100%. +* Support for Windows, Linux, and MacOS (Intel and M1). +* Until WebGPU settles as a standard, its specification may change, and with + that our API will probably too. Check the [changelog](CHANGELOG.md) when you + upgrade! + + +## Installation + + +``` +pip install wgpu glfw +``` + +Linux users should make sure that **pip >= 20.3**. That should do the +trick on most systems. See [getting started](https://wgpu-py.readthedocs.io/en/stable/start.html) +for details. + + +## Usage + +Also see the [online documentation](https://wgpu-py.readthedocs.io) and the [examples](https://github.com/pygfx/wgpu-py/tree/main/examples). + +The full API is accessable via the main namespace: +```py +import wgpu +``` + +To render to the screen you can use a variety of GUI toolkits: + +```py +# The auto backend selects either the glfw, qt or jupyter backend +from wgpu.gui.auto import WgpuCanvas, run, call_later + +# Visualizations can be embedded as a widget in a Qt application. +# Import PySide6, PyQt6, PySide2 or PyQt5 before running the line below. +# The code will detect and use the library that is imported. +from wgpu.gui.qt import WgpuCanvas + +# Visualizations can be embedded as a widget in a wx application. +from wgpu.gui.wx import WgpuCanvas +``` + +Some functions in the original `wgpu-native` API are async. In the Python API, +the default functions are all sync (blocking), making things easy for general use. +Async versions of these functions are available, so wgpu can also work +well with Asyncio or Trio. + + +## License + +This code is distributed under the 2-clause BSD license. + + +## Developers + +* Clone the repo. +* Install devtools using `pip install -r dev-requirements.txt` (you can replace + `pip` with `pipenv` to install to a virtualenv). +* Install wgpu-py in editable mode by running `pip install -e .`, this will also + install runtime dependencies as needed. +* Run `python download-wgpu-native.py` to download the upstream wgpu-native + binaries. + * Or alternatively point the `WGPU_LIB_PATH` environment variable to a custom + build. +* Use `black .` to apply autoformatting. +* Use `flake8 .` to check for flake errors. +* Use `pytest .` to run the tests. +* Use `pip wheel --no-deps .` to build a wheel. + + +### Updating to a later version of WebGPU or wgpu-native + +To update to upstream changes, we use a combination of automatic code +generation and manual updating. See [the codegen utility](codegen/README.md) +for more information. + + +## Testing + +The test suite is divided into multiple parts: + +* `pytest -v tests` runs the core unit tests. +* `pytest -v examples` tests the examples. +* `pytest -v wgpu/__pyinstaller` tests if wgpu is properly supported by + pyinstaller. +* `pytest -v codegen` lints the generated binding code. + +There are two types of tests for examples included: + +### Type 1: Checking if examples can run + +When running the test suite, pytest will run every example in a subprocess, to +see if it can run and exit cleanly. You can opt out of this mechanism by +including the comment `# run_example = false` in the module. + +### Type 2: Checking if examples output an image + +You can also (independently) opt-in to output testing for examples, by including +the comment `# test_example = true` in the module. Output testing means the test +suite will attempt to import the `canvas` instance global from your example, and +call it to see if an image is produced. + +To support this type of testing, ensure the following requirements are met: + +* The `WgpuCanvas` class is imported from the `wgpu.gui.auto` module. +* The `canvas` instance is exposed as a global in the module. +* A rendering callback has been registered with `canvas.request_draw(fn)`. + +Reference screenshots are stored in the `examples/screenshots` folder, the test +suite will compare the rendered image with the reference. + +Note: this step will be skipped when not running on CI. Since images will have +subtle differences depending on the system on which they are rendered, that +would make the tests unreliable. + +For every test that fails on screenshot verification, diffs will be generated +for the rgb and alpha channels and made available in the +`examples/screenshots/diffs` folder. On CI, the `examples/screenshots` folder +will be published as a build artifact so you can download and inspect the +differences. + +If you want to update the reference screenshot for a given example, you can grab +those from the build artifacts as well and commit them to your branch. diff --git a/codegen/README.md b/codegen/README.md new file mode 100644 index 0000000..fdeebb6 --- /dev/null +++ b/codegen/README.md @@ -0,0 +1,153 @@ +# wgpu-py codegen + + + +## Introduction + +### How wgpu-py is maintained + +The wgpu-py library provides a Pythonic interpretation of the [WebGPU API](https://www.w3.org/TR/webgpu/). It closely follows the official spec (in the form of an [IDL file](https://gpuweb.github.io/gpuweb/webgpu.idl)). Further below is a section on how we deviate from the spec. + +The actual implementation is implemented in backends. At the moment there is only one backend, based on [wgpu-native](https://github.com/gfx-rs/wgpu-native). We make API calls into this dynamic library, as specified by [two header files](https://github.com/gfx-rs/wgpu-native/tree/trunk/ffi). + +The API (based on the IDL) and the backend (based on the header files) can be updated independently. In both cases, however, we are dealing with a relatively large API, which is (currently) changing quite a bit, and we need the implementation to be precise. Therefore, doing the maintenance completely by hand would be a big burden and prone to errors. + +On the other hand, applying fully automated code generation is also not feasible, because of the many edge-cases that have to be taken into account. Plus the code-generation code must also be maintained. + +Therefore we aim for a hybrid approach in which the aforementioned specs are used to *check* the implementations and introduce code and comments to make updates easier. + +### The purpose of `codegen` + +* Make maintaining wgpu-py as easy as possible; +* In particular the process of updating to new versions of WebGPU and wgpu-native; +* To validate that our API matches the WebGPU spec, and know where it differs. +* To validate that our calls into wgpu-native are correct. + +During an update, it should *not* be necessary to check the diffs of `webgpu.idl` or `webgpu.h`. Instead, by running the +codegen, any relevant differences in these specs should result in changes (of code or annotations) in the respective `.py`files. That said, during development it can be helpful to use the WebGPU spec and the header files as a reference. + +This package is *not* part of the wgpu library - it is a tool to help maintain it. It has its own tests, which try to cover the utils well, +but the parsers and generators are less important to fully cover by tests, because we are the only users. If it breaks, we fix it. + +### General tips + +* It's probably easier to update relatively often, so that each increment is small. +* Sometimes certain features or changes are present in WebGPU, but not in wgpu-native. This may result in some manual mappings etc. which make the code less elegant. These hacks are generally temporary though. +* It's generally recommended to update `webgpu.idl` and `webgpu.h` separately. Though it could also be advantageous to combine them, to avoid the hacky stuff mentioned in the previous point. + + + +## What the codegen does in general + +* Help update the front API. + * Make changes to `_classes.py`. + * Generate `flags.py`, `enums.py`, and `structs.py`. +* Help update the wgpu-native backend: + * Make changes to `backends/wgpu_native/_api.py`. + * Generate `backends/wgpu_native/_mappings.py`. +* Write `resources/codegen_report.md` providing a summary of the codegen process. + + + +## Updating the front API + +### Introduction + +The WebGPU API is specified by `webgpu.idl` (in the resources directory). We parse this file with a custom parser (`idlparser.py`) to obtain a description of the interfaces, enums, and flags. + +Note that while `wgpu/_classes.py` defines the API (and corresponding docstrings), the implementation of the majority of methods occurs in the backends, so most methods simply `raise NotimplementedError()`. + +### Changes with respect to JS + +In some cases we may want to deviate from the WebGPU API, because well ... Python is not JavaScript. There is a simple system in place to mark any such differences, that also makes sure that these changes are listed in the docs. To mark how the py API deviates from the WebGPU spec: + +* Decorate a method with `@apidiff.hide` to mark it as not supported by our API. +* Decorate a method with `@apidiff.add` to mark it as intended even though it does not + match the WebGPU spec. +* Decorate a method with `@apidiff.change` to mark that our method has a different signature. + +Other changes include: + +* Where in JS the input args are provided via a dict, we use kwargs directly. Nevertheless, some input args have subdicts (and sub-sub-dicts) +* For methods that are async in IDL, we also provide sync methods. The Async method names have an "_async" suffix. + +### Codegen summary + +* Generate `flags.py`, `enums.py`, and `structs.py`. + +* Make changes to `_classes.py`. + + * Add missing classes, methods and properties, along with a FIXME comment.. + + * Modify changed signatures, along with a FIXME comment. + * Mark unknown classes, methods and properties with a FIXME comment. + + * Put a comment that contains the corresponding IDL-line for each method and attribute. + + +### The update process + +* Download the latest [webgpu.idl](https://gpuweb.github.io/gpuweb/webgpu.idl) and place in the resources folder. +* Run `python codegen` to apply the automatic patches to the code. +* It may be necessary to tweak the `idlparser.py` to adjust to new formatting. +* Check the diff of `flags.py`, `enums.py`, `structs.py` for any changes that might need manual work. +* Go through all FIXME comments that were added in `_classes.py`: + * Apply any necessary changes. + * Remove the FIXME comment if no further action is needed, or turn into a TODO for later. + * Note that all new classes/methods/properties (instead those marked as hidden) need a docstring. +* Run `python codegen` again to validate that all is well. Repeat the step above if necessary. +* Make sure that the tests run and provide full coverage. +* Make sure that the examples all work. +* Update downstream code, like our own tests and examples, but also e.g. pygfx. +* Make a summary of the API changes to put in the release notes. + + + +## Updating the wgpu-native backend + +### Introduction + +The backends are almost a copy of `_classes.py`: all methods in `_classes.py` that `raise NotImplementedError()` must be implemented. + +The wgpu-native backend calls into a dynamic library, which interface is specified by `webgpu.h` and `wgpu.h` (in the resources directory). We parse these files with a custom parser (`hparser.py`) to obtain a description of the interfaces, enums, flags, and structs. + +The majority of work in the wgpu-native backend is the conversion of Python dicts to C structs, and then using them to call into the dynamic library. The codegen helps by validating the structs and API calls. + +### Tips + +* In the code, use `new_struct()` and `new_struct_p()` to create a C structure with minimal boilerplate. It also converts string enum values to their corresponding integers. + +* Since the codegen adds comments for missing fields, you can instantiate a struct without any fields, then run the codegen to fill it in, and then further implement the logic. +* The API of the backends should not deviate from the base API - only`@apidiff.add` is allowed (and should be used sparingly). +* Use `webgpu.py` and `wgpu.h` as a reference to check available functions and structs. +* No docstrings needed in this module. +* This process typically does not introduce changes to the API, but wgpu may now be more strict on specific usage or require changes to the shaders. + +### Codegen summary + +* Generate `backends/wgpu_native/_mappings.py`. + * Generate mappings for enum field names to ints. + * Detect and report missing flags and enum fields. + +* Make changes to `wgpu_native/_api.py`. + * Validate and annotate function calls into the lib. + * Validate and annotate struct creations (missing struct fields are filled in). + * Ensure that each incoming struct is checked to catch invalid input. + +### The update process + +* Download the latest `webgpu.h` and DLL using `python download-wgpu-native.py --version xx` +* Run `python codegen` to apply the automatic patches to the code. +* It may be necessary to tweak the `hparser.py` to adjust to new formatting. +* Diff the report for new differences to take into account. +* Diff `wgpu_native/_api.py` to get an idea of what structs and functions have changed. +* Go through all FIXME comments that were added in `_api.py`: + * Apply any necessary changes. + * Remove the FIXME comment if no further action is needed, or turn into a TODO for later. + +* Run `python codegen` again to validate that all is well. Repeat the steps above if necessary. +* Make sure that the tests run and provide full coverage. +* Make sure that the examples all work. +* Update downstream code, like our own tests and examples, but also e.g. pygfx. + +* Make a summary of the API changes to put in the release notes. diff --git a/codegen/__init__.py b/codegen/__init__.py new file mode 100644 index 0000000..b2b7767 --- /dev/null +++ b/codegen/__init__.py @@ -0,0 +1,65 @@ +import io + +from .utils import print, PrintToFile +from . import apiwriter, apipatcher, wgpu_native_patcher, idlparser, hparser +from .files import file_cache + + +def main(): + """Codegen entry point. This will populate the file cache with the + new code, but not write it to disk.""" + + log = io.StringIO() + with PrintToFile(log): + print("# Code generatation report") + prepare() + update_api() + update_wgpu_native() + file_cache.write("resources/codegen_report.md", log.getvalue()) + + +def prepare(): + """Force parsing (and caching) the IDL and C header.""" + print("## Preparing") + file_cache.reset() + idlparser.get_idl_parser(allow_cache=False) + hparser.get_h_parser(allow_cache=False) + + +def update_api(): + """Update the public API and patch the public-facing API of the backends.""" + + print("## Updating API") + + # Write the simple stuff + apiwriter.write_flags() + apiwriter.write_enums() + apiwriter.write_structs() + + # Patch base API: IDL -> API + code1 = file_cache.read("_classes.py") + print("### Patching API for _classes.py") + code2 = apipatcher.patch_base_api(code1) + file_cache.write("_classes.py", code2) + + # Patch backend APIs: _classes.py -> API + for fname in ["backends/wgpu_native/_api.py"]: + code1 = file_cache.read(fname) + print(f"### Patching API for {fname}") + code2 = apipatcher.patch_backend_api(code1) + file_cache.write(fname, code2) + + +def update_wgpu_native(): + """Update and check the wgpu-native backend.""" + + print("## Validating backends/wgpu_native/_api.py") + + # Write the simple stuff + wgpu_native_patcher.compare_flags() + wgpu_native_patcher.write_mappings() + + # Patch wgpu_native api + code1 = file_cache.read("backends/wgpu_native/_api.py") + code2 = wgpu_native_patcher.patch_wgpu_native_backend(code1) + file_cache.write("backends/wgpu_native/_api.py", code2) diff --git a/codegen/__main__.py b/codegen/__main__.py new file mode 100644 index 0000000..9c8a261 --- /dev/null +++ b/codegen/__main__.py @@ -0,0 +1,19 @@ +""" +The entrypoint / script to apply automatic patches to the code. +See README.md for more information. +""" + +import os +import sys + + +# Little trick to allow running this file as a script +sys.path.insert(0, os.path.abspath(os.path.join(__file__, "..", ".."))) + + +from codegen import main, file_cache # noqa: E402 + + +if __name__ == "__main__": + main() + file_cache.write_changed_files_to_disk() diff --git a/codegen/apipatcher.py b/codegen/apipatcher.py new file mode 100644 index 0000000..a4cfdcc --- /dev/null +++ b/codegen/apipatcher.py @@ -0,0 +1,529 @@ +""" +The logic to generate/patch the base API from the WebGPU +spec (IDL), and the backend implementations from the base API. +""" + +from codegen.utils import print, blacken, to_snake_case, to_camel_case, Patcher +from codegen.idlparser import get_idl_parser +from codegen.files import file_cache + + +def patch_base_api(code): + """Given the Python code, applies patches to make the code conform + to the IDL. + """ + idl = get_idl_parser() + + # Write __all__ + part1, found_all, part2 = code.partition("\n__all__ =") + if found_all: + part2 = part2.split("]", 1)[-1] + line = "\n__all__ = [" + line += ", ".join(f'"{name}"' for name in idl.classes.keys()) + line += "]" + code = part1 + line + part2 + + # Patch! + for patcher in [CommentRemover(), BaseApiPatcher(), IdlCommentInjector()]: + patcher.apply(code) + code = patcher.dumps() + return code + + +def patch_backend_api(code): + """Given the Python code, applies patches to make the code conform + to the base API. + """ + + # Obtain the base API definition + base_api_code = file_cache.read("_classes.py") + + # Patch! + for patcher in [ + CommentRemover(), + BackendApiPatcher(base_api_code), + StructValidationChecker(), + ]: + patcher.apply(code) + code = patcher.dumps() + return code + + +class CommentRemover(Patcher): + """A patcher that removes comments that we add in other parsers, + to prevent accumulating comments. + """ + + triggers = "# IDL:", "# FIXME: unknown api", "# FIXME: missing check_struct" + + def apply(self, code): + self._init(code) + for line, i in self.iter_lines(): + if line.lstrip().startswith(self.triggers): + self.remove_line(i) + + +class AbstractCommentInjector(Patcher): + """A base patcher that can insert helpful comments in front of + properties, methods, and classes. It does not mark any as new or unknown, + since that is the task of the API patchers. + + Also moves decorators just above the def. Doing this here in a + post-processing step means we dont have to worry about decorators + in the other patchers, keeping them simpler. + """ + + # Note that in terms of structure, this class is basically a simplified + # version of the AbstractApiPatcher + + def apply(self, code): + self._init(code) + self.patch_classes() + + def patch_classes(self): + for classname, i1, i2 in self.iter_classes(): + if self.class_is_known(classname): + comment = self.get_class_comment(classname) + if comment: + self.insert_line(i1, comment) + self.patch_properties(classname, i1 + 1, i2) + self.patch_methods(classname, i1 + 1, i2) + + def patch_properties(self, classname, i1, i2): + for propname, j1, j2 in self.iter_properties(i1): + comment = self.get_prop_comment(classname, propname) + if comment: + self.insert_line(j1, comment) + self._move_decorator_below_comments(j1) + + def patch_methods(self, classname, i1, i2): + for methodname, j1, j2 in self.iter_methods(i1): + comment = self.get_method_comment(classname, methodname) + if comment: + self.insert_line(j1, comment) + self._move_decorator_below_comments(j1) + + def _move_decorator_below_comments(self, i_def): + for i in range(i_def - 3, i_def): + line = self.lines[i] + if line.lstrip().startswith("@"): + self.remove_line(i) + self.insert_line(i_def, line) + + +class AbstractApiPatcher(Patcher): + """The base patcher to update a wgpu API. + + This code is generalized, so it can be used both to generate the base API + as well as the backends (implementations). + + The idea is to walk over all classes, patch it if necessary, then + walk over each of its properties and methods to patch these too. + """ + + def apply(self, code): + self._init(code) + self._counts = {"classes": 0, "methods": 0, "properties": 0} + self.patch_classes() + stats = ", ".join(f"{self._counts[key]} {key}" for key in self._counts) + print("Validated " + stats) + + def patch_classes(self): + seen_classes = set() + + # Update existing classes in the Python code + for classname, i1, i2 in self.iter_classes(): + seen_classes.add(classname) + self._apidiffs = set() + if self.class_is_known(classname): + old_line = self.lines[i1] + new_line = self.get_class_def(classname) + if old_line != new_line: + fixme_line = "# FIXME: was " + old_line.split("class ", 1)[-1] + self.replace_line(i1, f"{fixme_line}\n{new_line}") + self.patch_properties(classname, i1 + 1, i2) + self.patch_methods(classname, i1 + 1, i2) + else: + msg = f"unknown api: class {classname}" + self.insert_line(i1, "# FIXME: " + msg) + print("Warning: " + msg) + if self._apidiffs: + print(f"Diffs for {classname}:", ", ".join(sorted(self._apidiffs))) + + # Add missing classes + lines = [] + for classname in self.get_class_names(): + if classname not in seen_classes: + lines.append("# FIXME: new class to implement") + lines.append(self.get_class_def(classname)) + more_lines = [] + more_lines += self.get_missing_properties(classname, set()) + more_lines += self.get_missing_methods(classname, set()) + lines.extend(more_lines or [" pass"]) + if lines: + self.insert_line(i2 + 1, "\n".join(lines)) + + self._counts["classes"] += len(seen_classes) + + def patch_properties(self, classname, i1, i2): + seen_props = set() + + # Update existing properties in Python code + for propname, j1, j2 in self.iter_properties(i1): + seen_props.add(propname) + pre_lines = "\n".join(self.lines[j1 - 3 : j1]) + self._apidiffs_from_lines(pre_lines, propname) + if self.prop_is_known(classname, propname): + if "@apidiff.add" in pre_lines: + print(f"ERROR: apidiff.add for known {classname}.{propname}") + elif "@apidiff.hide" in pre_lines: + pass # continue as normal + old_line = self.lines[j1] + new_line = f" def {propname}(self):" + if old_line != new_line: + fixme_line = " # FIXME: was " + old_line.split("def ", 1)[-1] + lines = [fixme_line, new_line] + self.replace_line(j1, "\n".join(lines)) + elif "@apidiff.add" in pre_lines: + pass + else: + msg = f"unknown api: prop {classname}.{propname}" + self.insert_line(j1, " # FIXME: " + msg) + print("Warning: " + msg) + + # Add missing properties for this class + lines = self.get_missing_properties(classname, seen_props) + if lines: + self.insert_line(i2 + 1, "\n".join(lines)) + + self._counts["properties"] += len(seen_props) + + def patch_methods(self, classname, i1, i2): + seen_funcs = set() + + # Update existing methods in Python code + for methodname, j1, j2 in self.iter_methods(i1): + seen_funcs.add(methodname) + pre_lines = "\n".join(self.lines[j1 - 3 : j1]) + self._apidiffs_from_lines(pre_lines, methodname) + if self.method_is_known(classname, methodname): + if "@apidiff.add" in pre_lines: + print(f"ERROR: apidiff.add for known {classname}.{methodname}") + elif "@apidiff.hide" in pre_lines: + pass # continue as normal + elif "@apidiff.change" in pre_lines: + continue + old_line = self.lines[j1] + new_line = self.get_method_def(classname, methodname) + if old_line != new_line: + fixme_line = " # FIXME: was " + old_line.split("def ", 1)[-1] + lines = [fixme_line, new_line] + self.replace_line(j1, "\n".join(lines)) + elif "@apidiff.add" in pre_lines: + pass + elif methodname.startswith("_"): + pass + else: + msg = f"unknown api: method {classname}.{methodname}" + self.insert_line(j1, " # FIXME: " + msg) + print("Warning: " + msg) + + # Add missing methods for this class + lines = self.get_missing_methods(classname, seen_funcs) + if lines: + self.insert_line(i2 + 1, "\n".join(lines)) + + self._counts["methods"] += len(seen_funcs) + + def get_missing_properties(self, classname, seen_props): + lines = [] + for propname in self.get_required_prop_names(classname): + if propname not in seen_props: + lines.append(" # FIXME: new prop to implement") + lines.append(" @property") + lines.append(f" def {propname}(self):") + lines.append(" raise NotImplementedError()") + lines.append("") + return lines + + def get_missing_methods(self, classname, seen_funcs): + lines = [] + for methodname in self.get_required_method_names(classname): + if methodname not in seen_funcs: + lines.append(" # FIXME: new method to implement") + lines.append(self.get_method_def(classname, methodname)) + lines.append(" raise NotImplementedError()\n") + return lines + + def _apidiffs_from_lines(self, text, what): + diffs = [x.replace("(", " ").split()[0] for x in text.split("@apidiff.")[1:]] + if diffs: + self._apidiffs.add(f"{'/'.join(diffs)} {what}") + + +class IdlPatcherMixin: + def __init__(self): + super().__init__() + self.idl = get_idl_parser() + + def name2idl(self, name): + m = {"__init__": "constructor"} + name = m.get(name, name) + return to_camel_case(name) + + def name2py(self, name): + m = {"constructor": "__init__"} + name = m.get(name, name) + return to_snake_case(name) + + def class_is_known(self, classname): + return classname in self.idl.classes + + def get_class_def(self, classname): + cls = self.idl.classes[classname] + # Make sure that GPUObjectBase comes last, for MRO + ignore = "Event", "EventTarget", "DOMException" + bases = sorted(cls.bases or [], key=lambda n: n.count("GPUObjectBase")) + bases = [b for b in bases if b not in ignore] + # Cover some special cases + if classname.lower().endswith("error"): + if "memory" in classname.lower(): + bases.append("MemoryError") + elif not bases: + bases.append("Exception") + + bases = "" if not bases else f"({', '.join(bases)})" + return f"class {classname}{bases}:" + + def get_method_def(self, classname, methodname): + # Get the corresponding IDL line + functions = self.idl.classes[classname].functions + name_idl = self.name2idl(methodname) + if methodname.endswith("_async") and name_idl not in functions: + name_idl = self.name2idl(methodname.replace("_async", "")) + elif name_idl not in functions and name_idl + "Async" in functions: + name_idl += "Async" + idl_line = functions[name_idl] + + # Construct preamble + preamble = "def " + to_snake_case(methodname) + "(" + if "async" in methodname: + preamble = "async " + preamble + + # Get arg names and types + args = idl_line.split("(", 1)[1].split(")", 1)[0].split(",") + args = [arg.strip() for arg in args if arg.strip()] + raw_defaults = [arg.partition("=")[2].strip() for arg in args] + place_holder_default = False + defaults = [] + for default, arg in zip(raw_defaults, args): + if default: + place_holder_default = "None" # any next args must have a default + elif arg.startswith("optional "): + default = "None" + else: + default = place_holder_default + defaults.append(default) + + argnames = [arg.split("=")[0].split()[-1] for arg in args] + argnames = [to_snake_case(argname) for argname in argnames] + argnames = [(f"{n}={v}" if v else n) for n, v in zip(argnames, defaults)] + argtypes = [arg.split("=")[0].split()[-2] for arg in args] + + # If one arg that is a dict, flatten dict to kwargs + if len(argtypes) == 1 and argtypes[0].endswith( + ("Options", "Descriptor", "Configuration") + ): + assert argtypes[0].startswith("GPU") + fields = self.idl.structs[argtypes[0][3:]].values() # struct fields + py_args = [self._arg_from_struct_field(field) for field in fields] + if py_args[0].startswith("label: str"): + py_args[0] = 'label=""' + py_args = ["self", "*"] + py_args + else: + py_args = ["self"] + argnames + + # Construct final def + line = preamble + ", ".join(py_args) + "): pass\n" + line = blacken(line, True).split("):")[0] + "):" + return " " + line + + def _arg_from_struct_field(self, field): + name = to_snake_case(field.name) + d = field.default + t = self.idl.resolve_type(field.typename) + result = name + if t: + result += f": {t}" + if d: + d = {"false": "False", "true": "True"}.get(d, d) + result += f"={d}" + return result + + def prop_is_known(self, classname, propname): + propname_idl = self.name2idl(propname) + return propname_idl in self.idl.classes[classname].attributes + + def method_is_known(self, classname, methodname): + functions = self.idl.classes[classname].functions + name_idl = self.name2idl(methodname) + if "_async" in methodname and name_idl not in functions: + name_idl = self.name2idl(methodname.replace("_async", "")) + elif name_idl not in functions and name_idl + "Async" in functions: + name_idl += "Async" + return name_idl if name_idl in functions else None + + def get_class_names(self): + return list(self.idl.classes.keys()) + + def get_required_prop_names(self, classname): + propnames_idl = self.idl.classes[classname].attributes.keys() + return [self.name2py(x) for x in propnames_idl] + + def get_required_method_names(self, classname): + methodnames_idl = self.idl.classes[classname].functions.keys() + return [self.name2py(x) for x in methodnames_idl] + + +class BaseApiPatcher(IdlPatcherMixin, AbstractApiPatcher): + """A patcher to patch the base API (in _classes.py), using IDL as input.""" + + +class IdlCommentInjector(IdlPatcherMixin, AbstractCommentInjector): + """A patcher that injects signatures as defined in IDL, which can be useful + to determine the types of arguments, etc. + """ + + def get_class_comment(self, classname): + return None + + def get_prop_comment(self, classname, propname): + if self.prop_is_known(classname, propname): + propname_idl = self.name2idl(propname) + return " # IDL: " + self.idl.classes[classname].attributes[propname_idl] + + def get_method_comment(self, classname, methodname): + name_idl = self.method_is_known(classname, methodname) + if name_idl: + return " # IDL: " + self.idl.classes[classname].functions[name_idl] + + +class BackendApiPatcher(AbstractApiPatcher): + """A patcher to patch a backend API, using the base API as input.""" + + def __init__(self, base_api_code): + super().__init__() + + p1 = Patcher(base_api_code) + + # Collect what's needed + self.classes = classes = {} + for classname, i1, i2 in p1.iter_classes(): + methods = {} + for methodname, j1, j2 in p1.iter_methods(i1 + 1): + pre_lines = "\n".join(p1.lines[j1 - 3 : j1]) + if "@apidiff.hide" in pre_lines: + continue # method (currently) not part of our API + body = "\n".join(p1.lines[j1 + 1 : j2 + 1]) + must_overload = "raise NotImplementedError()" in body + methods[methodname] = p1.lines[j1], must_overload + classes[classname] = p1.lines[i1], methods + # We assume that all properties can be implemented on the base class + + def class_is_known(self, classname): + return classname in self.classes + + def get_class_def(self, classname): + line, _ = self.classes[classname] + + if "):" not in line: + return line.replace(":", f"(classes.{classname}):") + else: + i = line.find("(") + bases = line[i:].strip("():").replace(",", " ").split() + bases = [b for b in bases if b.startswith("GPU")] + bases.insert(0, f"classes.{classname}") + return line[:i] + "(" + ", ".join(bases) + "):" + + def get_method_def(self, classname, methodname): + _, methods = self.classes[classname] + line, _ = methods[methodname] + return line + + def prop_is_known(self, classname, propname): + return False + + def method_is_known(self, classname, methodname): + _, methods = self.classes[classname] + return methodname in methods + + def get_class_names(self): + return list(self.classes.keys()) + + def get_required_prop_names(self, classname): + return [] + + def get_required_method_names(self, classname): + _, methods = self.classes[classname] + return list(name for name in methods.keys() if methods[name][1]) + + +class StructValidationChecker(Patcher): + """Checks that all structs are vaildated in the methods that have incoming structs.""" + + def apply(self, code): + self._init(code) + + idl = get_idl_parser() + all_structs = set() + ignore_structs = {"Extent3D"} + + for classname, i1, i2 in self.iter_classes(): + if classname not in idl.classes: + continue + + # For each method ... + for methodname, j1, j2 in self.iter_methods(i1 + 1): + code = "\n".join(self.lines[j1 : j2 + 1]) + # Get signature and cut it up in words + sig_words = code.partition("(")[2].split("):")[0] + for c in "][(),\"'": + sig_words = sig_words.replace(c, " ") + # Collect incoming structs from signature + method_structs = set() + for word in sig_words.split(): + if word.startswith("structs."): + structname = word.partition(".")[2] + method_structs.update(self._get_sub_structs(idl, structname)) + all_structs.update(method_structs) + # Collect structs being checked + checked = set() + for line in code.splitlines(): + line = line.lstrip() + if line.startswith("check_struct("): + name = line.split("(")[1].split(",")[0].strip('"') + checked.add(name) + # Test that a matching check is done + unchecked = method_structs.difference(checked) + unchecked = list(sorted(unchecked.difference(ignore_structs))) + if ( + methodname.endswith("_async") + and f"return self.{methodname[:-7]}" in code + ): + pass + elif unchecked: + msg = f"missing check_struct in {methodname}: {unchecked}" + self.insert_line(j1, f"# FIXME: {msg}") + print(f"ERROR: {msg}") + + # Test that we did find structs. In case our detection fails for + # some reason, this would probably catch that. + assert len(all_structs) > 10 + + def _get_sub_structs(self, idl, structname): + structnames = {structname} + for structfield in idl.structs[structname].values(): + structname2 = structfield.typename[3:] # remove "GPU" + if structname2 in idl.structs: + structnames.update(self._get_sub_structs(idl, structname2)) + return structnames diff --git a/codegen/apiwriter.py b/codegen/apiwriter.py new file mode 100644 index 0000000..488ca1c --- /dev/null +++ b/codegen/apiwriter.py @@ -0,0 +1,146 @@ +""" +Writes the parts of the API that are simple: flags, enums, structs. +""" + +import re + +from codegen.utils import print, blacken, to_snake_case +from codegen.idlparser import get_idl_parser +from codegen.files import file_cache + + +ref_pattern = re.compile(r"\W((GPU|flags\.|enums\.|structs\.)\w+?)\W", re.MULTILINE) + + +def resolve_crossrefs(text): + # Similar code as in docs/conf.py + text += " " + i2 = 0 + while True: + m = ref_pattern.search(text, i2) + if not m: + break + i1, i2 = m.start(1), m.end(1) + prefix = m.group(2) + ref_indicator = ":obj:" if prefix.lower() == prefix else ":class:" + name = m.group(1) + if name.startswith("structs."): + link = name.split(".")[1] + else: + link = "wgpu." + name + insertion = f"{ref_indicator}`{name} <{link}>`" + text = text[:i1] + insertion + text[i2:] + i2 += len(insertion) - len(name) + return text.rstrip() + + +def write_flags(): + # Get preamble + pylines = [] + for line in file_cache.read("flags.py").splitlines(): + pylines.append(line) + if "AUTOGENERATED" in line: + pylines += ["", ""] + break + # Prepare + idl = get_idl_parser() + n = len(idl.flags) + # List'm + pylines.append(f"# There are {n} flags\n") + pylines.append("__all__ = [") + for name in idl.flags.keys(): + pylines.append(f' "{name}",') + pylines.append("]\n\n") + # The flags definitions + for name, d in idl.flags.items(): + # Object-docstring as a comment + for key, val in d.items(): + pylines.append(f'#: * "{key}" ({val})') + # Generate Code + pylines.append(f'{name} = Flags(\n "{name}",') + for key, val in d.items(): + pylines.append(f" {key}={val!r},") + pylines.append(")\n") + # Write + code = blacken("\n".join(pylines)) + file_cache.write("flags.py", code) + print(f"Wrote {n} flags to flags.py") + + +def write_enums(): + # Get preamble + pylines = [] + for line in file_cache.read("enums.py").splitlines(): + pylines.append(line) + if "AUTOGENERATED" in line: + pylines += ["", ""] + break + # Prepare + idl = get_idl_parser() + n = len(idl.enums) + # List'm + pylines.append(f"# There are {n} enums\n") + pylines.append("__all__ = [") + for name in idl.enums.keys(): + pylines.append(f' "{name}",') + pylines.append("]\n\n") + for name, d in idl.enums.items(): + # Object-docstring as a comment + for key, val in d.items(): + pylines.append(f'#: * "{key}"') + # Generate Code + pylines.append(f'{name} = Enum(\n "{name}",') + for key, val in d.items(): + pylines.append(f' {key}="{val}",') + pylines.append(")\n") + # Write + code = blacken("\n".join(pylines)) + file_cache.write("enums.py", code) + print(f"Wrote {n} enums to enums.py") + + +def write_structs(): + # Get preamble + pylines = [] + for line in file_cache.read("structs.py").splitlines(): + pylines.append(line) + if "AUTOGENERATED" in line: + pylines += ["", ""] + break + # Prepare + idl = get_idl_parser() + n = len(idl.structs) + ignore = ["ImageCopyTextureTagged"] + pylines.append(f"# There are {n} structs\n") + # List'm + pylines.append("__all__ = [") + for name in idl.structs.keys(): + if name not in ignore: + pylines.append(f' "{name}",') + pylines.append("]\n\n") + for name, d in idl.structs.items(): + if name in ignore: + continue + # Object-docstring as a comment + for field in d.values(): + tp = idl.resolve_type(field.typename).strip("'") + if field.default is not None: + pylines.append( + resolve_crossrefs(f"#: * {field.name} :: {tp} = {field.default}") + ) + else: + pylines.append(resolve_crossrefs(f"#: * {field.name} :: {tp}")) + # Generate Code + pylines.append(f'{name} = Struct(\n "{name}",') + for field in d.values(): + key = to_snake_case(field.name) + val = idl.resolve_type(field.typename) + if not val.startswith(("'", '"')): + val = f"'{val}'" + pylines.append(f" {key}={val},") + pylines.append(")\n") + + # Write + code = blacken("\n".join(pylines)) + file_cache.write("structs.py", code) + print(f"Wrote {n} structs to structs.py") diff --git a/codegen/files.py b/codegen/files.py new file mode 100644 index 0000000..0b7c4b6 --- /dev/null +++ b/codegen/files.py @@ -0,0 +1,97 @@ +""" +Simple utilities to handle files, including a mini virtual file system. +""" + +import os + + +lib_dir = os.path.abspath(os.path.join(__file__, "..", "..", "wgpu")) + + +def read_file(*fname): + """Read a file from disk using the relative filename. Line endings are normalized.""" + filename = os.path.join(lib_dir, *fname) + with open(filename, "rb") as f: + return f.read().decode().replace("\r\n", "\n").replace("\r", "\n") + + +class FileCache: + """An in-memory file cache, to allow performing the codegen + in-memory, providing checks on what is actually changed, enabling + dry runs for tests, and make it easier to write back files with the + correct line endings. + """ + + _filenames_to_change = [ + "_classes.py", + "flags.py", + "enums.py", + "structs.py", + "backends/wgpu_native/_api.py", + "backends/wgpu_native/_mappings.py", + "resources/codegen_report.md", + ] + + def __init__(self): + self._file_contents = {} + self._files_written = set() + + def reset(self): + """Reset the cache, populating the files with a copy from disk.""" + self._file_contents.clear() + for fname in self.filenames_to_change: + self.write(fname, read_file(fname)) + self._files_written.clear() + + @property + def filenames_to_change(self): + """The (relative) filenames that the codegen is allowed to change.""" + return tuple(self._filenames_to_change) + + @property + def filenames_written(self): + """The (relative) filenames that are actually written.""" + return set(self._files_written) + + def write(self, fname, text): + """Write to a (virtual) file. The text is a string with LF newlines.""" + assert fname in self.filenames_to_change + self._files_written.add(fname) + self._file_contents[fname] = text + + def read(self, fname): + """Read from a (virtual) file. Returns text with LF newlines.""" + assert fname in self.filenames_to_change + return self._file_contents[fname] + + def write_changed_files_to_disk(self): + """Write the virtual files to disk, using appropriate newlines.""" + # Get reference line ending chars + with open(os.path.join(lib_dir, "__init__.py"), "rb") as f: + text = f.read().decode() + line_endings = get_line_endings(text) + # Write files + for fname in self.filenames_to_change: + text = self.read(fname) + filename = os.path.join(lib_dir, fname) + with open(filename, "wb") as f: + f.write(text.replace("\n", line_endings).encode()) + + +file_cache = FileCache() + + +def get_line_endings(text): + """Detect whether the line endings in use is CR LF or CRLF.""" + # Count how many line ending chars there are + crlf_count = text.count("\r\n") + lf_count = text.count("\n") - crlf_count + cr_count = text.count("\r") - crlf_count + assert lf_count + cr_count + crlf_count >= 4 + # Check what's used the most, or whether it's a combination. + if lf_count > cr_count and lf_count > crlf_count: + return "\n" + elif cr_count > lf_count and cr_count > crlf_count: + return "\r" + else: + return "\r\n" diff --git a/codegen/hparser.py b/codegen/hparser.py new file mode 100644 index 0000000..740ba14 --- /dev/null +++ b/codegen/hparser.py @@ -0,0 +1,231 @@ +from cffi import FFI + +from codegen.utils import print, remove_c_comments +from codegen.files import read_file + + +_parser = None + + +def _get_wgpu_header(): + """Func written so we can use this in both wgpu_native/_ffi.py and codegen/hparser.py""" + # Read files + lines1 = [] + lines1.extend(read_file("resources", "webgpu.h").splitlines()) + lines1.extend(read_file("resources", "wgpu.h").splitlines()) + # Deal with pre-processor commands, because cffi cannot handle them. + # Just removing them, plus a few extra lines, seems to do the trick. + lines2 = [] + for line in lines1: + if line.startswith("#define ") and len(line.split()) > 2 and "0x" in line: + line = line.replace("(", "").replace(")", "") + elif line.startswith("#"): + continue + elif 'extern "C"' in line: + continue + for define_to_drop in [ + "WGPU_EXPORT ", + "WGPU_NULLABLE ", + " WGPU_OBJECT_ATTRIBUTE", + " WGPU_ENUM_ATTRIBUTE", + " WGPU_FUNCTION_ATTRIBUTE", + " WGPU_STRUCTURE_ATTRIBUTE", + ]: + line = line.replace(define_to_drop, "") + lines2.append(line) + return "\n".join(lines2) + + +def get_h_parser(*, allow_cache=True): + """Get the global HParser object.""" + + # Singleton pattern + global _parser + if _parser and allow_cache: + return _parser + + source = _get_wgpu_header() + + # Create parser + hp = HParser(source) + hp.parse() + _parser = hp + return hp + + +class HParser: + """Object to parse the wgpu.h header file, by letting cffi do the heavy lifting.""" + + def __init__(self, source): + self.source = source + + def parse(self, verbose=True): + self.flags = {} + self.enums = {} + self.structs = {} + self.functions = {} + + self._parse_from_h() + self._parse_from_cffi() + + if verbose: + print(f"The wgpu.h defines {len(self.functions)} functions") + keys = "flags", "enums", "structs" + stats = ", ".join(f"{len(getattr(self, key))} {key}" for key in keys) + print("The wgpu.h defines " + stats) + + def _parse_from_h(self): + code = self.source + + # Collect enums and flags. This is easy. + # Note that flags are first defined as enums and then redefined as flags later. + i1 = i2 = i3 = i4 = 0 + while True: + # Find enum + i1 = code.find("typedef enum", i4) + i2 = code.find("{", i1) + i3 = code.find("}", i2) + i4 = code.find(";", i3) + if i1 < 0: + break + # Decompose "typedef enum XX {...} XX;" + name1 = code[i1 + 13 : i2].strip() + name2 = code[i3 + 1 : i4].strip() + assert name1 == name2 + assert name1.startswith("WGPU") + name = name1[4:] + self.enums[name] = enum = {} + for f in code[i2 + 1 : i3].strip().strip(";").split(","): + f = remove_c_comments(f).strip() + if not f: + continue # happens when last item has a comma + key, _, val = f.partition("=") + # Handle key + key = key.strip() + assert key.startswith("WGPU") and "_" in key + key = key.split("_", 1)[1] + # Turn value into an int + val = val.strip() + if val.startswith("0x"): + enum[key] = int(val, 16) + elif "<<" in val: + val1, _, val2 = val.partition("<<") + enum[key] = int(val1) << int(val2) + elif "|" in val: # field is an OR of the earlier fields :/ + keys = [k.strip().split("_", 1)[1] for k in val.split("|")] + val = 0 + for k in keys: + val |= enum[k] + enum[key] = val + else: + enum[key] = int(val) + + # Turn some enums into flags + for line in code.splitlines(): + if line.startswith("typedef WGPUFlags "): + parts = line.strip().strip(";").split() + assert len(parts) == 3 + name = parts[-1] + if name.endswith("Flags"): + assert name.startswith("WGPU") + name1 = name[4:-1] # xxFlags -> xxFlag + name2 = name[4:-5] # xxFlags -> xx + name = name1 if name1 in self.enums else name2 + self.flags[name] = self.enums.pop(name) + + # Collect structs. This is relatively easy, since we only need the C code. + # But we dont deal with union structs. + i1 = i2 = i3 = i4 = 0 + while True: + # Find struct + i1 = code.find("typedef struct", i4) + i2 = code.find("{", i1) + i3 = code.find("}", i2) + i4 = code.find(";", i3) + if i1 < 0: + break + # Only do simple structs, not Unions + if 0 < code.find("{", i2 + 1) < i3: + continue + # Decompose + name = code[i3 + 1 : i4].strip() + self.structs[name] = struct = {} + for f in code[i2 + 1 : i3].strip().strip(";").split(";"): + f = remove_c_comments(f).strip() + if not f: + continue # probably last item ended with a comma + parts = f.strip().split() + typename = " ".join(parts[:-1]) + typename = typename.replace("const ", "") + key = parts[-1].strip("*") + struct[key] = typename + + # Collect functions. This is not too hard, since we only need the C code. + i1 = i2 = i3 = 0 + while True: + # Find function + i1 = code.find("wgpu", i3) + i2 = code.find("(", i1) + i3 = code.find(");", i2) + if i1 < 0: + break + # Extract name, and check whether we found something real + name = code[i1:i2] + if not (name and name.isidentifier()): + i3 = i1 + 5 + continue + # Decompose further + i1 = code.rfind("\n", 0, i1) + line = code[i1 : i3 + 2] + line = " ".join(line.split()) # effective way to put on one line + self.functions[name] = line + + def _parse_from_cffi(self): + self.ffi = ffi = FFI() + ffi.cdef(self.source) + + # Collect structs. We iterate over all types. Some will resolve + # to C types, the rest are structs. The types for the struct + # fields are reduced to the C primitives, making it less useful + # for annotations. We update the structs that we've found by + # parsing wgpu.h directly. + for names in ffi.list_types(): + for name in names: + # name = ffi.getctype(name) - no, keep original + if name.startswith("WGPU") and not name.endswith("Impl"): + t = ffi.typeof(name) + if not hasattr(t, "fields"): + continue # probably an enum + elif not t.fields: + continue # base struct / alias + s = ffi.new(f"{name} *") + # Construct struct + struct = {} + for key, field in t.fields: + typename = field.type.cname + # typename = ffi.getctype(typename) + if typename.startswith("WGPU"): + val = typename # Enum or struct + else: + val = type(getattr(s, key)).__name__ + struct[key] = val + # Update + if name not in self.structs: + self.structs[name] = struct + else: + ori_struct = self.structs[name] + assert set(struct) == set(ori_struct) + for key, val in struct.items(): + if ori_struct[key] != val: + if val.startswith("_"): # _CDataBase + pass + elif ori_struct[key].startswith("WGPU"): + if "/" not in ori_struct[key]: + ori_struct[key] += "/" + val + else: + ori_struct[key] = val + # Make copies + alt_name = name + while alt_name != ffi.getctype(alt_name): + alt_name = ffi.getctype(alt_name) + self.structs[alt_name] = self.structs[name] diff --git a/codegen/idlparser.py b/codegen/idlparser.py new file mode 100644 index 0000000..5063c91 --- /dev/null +++ b/codegen/idlparser.py @@ -0,0 +1,432 @@ +""" +The logic to parse the IDL file, from this we generate the base API. + +This module may need tweaks as the used IDL syntax/constructs changes. + +It would be good to occasionally check the coverage of this module to +identify and remove code paths that are no longer used. +""" + +from codegen.utils import print +from codegen.files import read_file + + +_parser = None + + +def get_idl_parser(*, allow_cache=True): + """Get the global IdlParser object.""" + + # Singleton pattern + global _parser + if _parser and allow_cache: + return _parser + + # Get source + source = read_file("resources", "webgpu.idl") + + # Create parser + idl = IdlParser(source) + idl.parse() + _parser = idl + return idl + + +class StructField: + """A little object to specify the field of a struct.""" + + def __init__(self, line, name, typename, default=None): + self.line = line + self.name = name + self.typename = typename + self.default = default + + def __repr__(self): + return f"" + + def to_str(self): + return self.line + + +class Interface: + """A class definition, or flags.""" + + def __init__(self, name, bases): + self.bases = bases + self.constants = {} + self.attributes = {} # name -> line + self.functions = {} + + +class IdlParser: + """An object that can be used to walk over a str in an easy way. + + This parser has the following attributes: + + * flags: a dict mapping the (neutral) flag name to a dict of field-value pairs. + * enums: a dict mapping the (Pythonic) enum name to a dict of field-value pairs. + * structs: a dict mapping the (Pythonic) struct name to a dict of StructField + objects. + * functions: a dict mapping the (normalized) func name to the line defining the + function. + + """ + + def __init__(self, source): + self.source = self._pre_process(source) + self._length = len(self.source) + self._pos = 0 + + def _reset(self): + self._pos = 0 + + def end_reached(self): + return self._pos >= self._length + + def read_until(self, char): + start = self._pos + while self._pos < self._length: + c = self.source[self._pos] + self._pos += 1 + if c == char: + return self.source[start : self._pos] + return "" + + def read_line(self): + return self.read_until("\n") + + def peek_line(self): + char = "\n" + start = pos = self._pos + while pos < self._length: + c = self.source[pos] + pos += 1 + if c == char: + return self.source[start:pos] + return "" + + def parse(self, verbose=True): + self._interfaces = {} + self.classes = {} + self.structs = {} + self.flags = {} + self.enums = {} + + self.typedefs = {} + + self._reset() + self._parse() + self._post_process() + + if verbose: + f_count = sum(len(cls.functions) for cls in self.classes.values()) + print( + f"The webgpu.idl defines {len(self.classes)} classes with {f_count} functions" + ) + keys = "flags", "enums", "structs" + stats = ", ".join(f"{len(getattr(self, key))} {key}" for key in keys) + print("The webgpu.idl defines " + stats) + + def _pre_process(self, text): + """Pre-process the text to make it a bit easier to parse. + Beware to keep line numbers the same + """ + text = text.replace("\n[\n", "\n\n[").replace("\n]\n", "]\n\n") + text = text.replace("[ ", "[") + text = self._remove_comments(text) + return text + + def _remove_comments(self, text): + lines = [] + in_multiline_comment = False + for line in text.splitlines(): + if in_multiline_comment: + if "*/" in line: + _, _, line = line.partition("//") + if "//" in line: + line, _, _ = line.partition("//") + lines.append(line if line.strip() else "") + in_multiline_comment = False + else: + lines.append("") + else: + if "//" in line: + line, _, _ = line.partition("//") + lines.append(line if line.strip() else "") + elif "/*" in line: + line, _, _ = line.partition("/*") + lines.append(line if line.strip() else "") + in_multiline_comment = True + else: + lines.append(line) + return "\n".join(lines) + + def resolve_type(self, typename): + """Resolve a type to a suitable name that is also valid so that flake8 + wont complain when this is used as a type annotation. + """ + + name = typename.strip().strip("?") + + # We want the flag, not the type that is an alias for int + name = name[:-5] if name.endswith("Flags") else name + + # First resolve using typedefs that we found in the IDL + while name in self.typedefs: + new_name = self.typedefs[name] + if new_name == name: + break + name = new_name + + # Resolve to a Python type (maybe) + pythonmap = { + "DOMString": "str", + "DOMString?": "str", + "USVString": "str", + "long": "int", + "unsigned long": "int", + "unsigned long long": "int", + "[Clamp] unsigned short": "int", + "unsigned short": "int", + "GPUIntegerCoordinate": "int", + "GPUSampleMask": "int", + "GPUFenceValue": "int", + "GPUSize64": "int", + "GPUSize32": "int", + "GPUIndex32": "int", + "double": "float", + "boolean": "bool", + "object": "dict", + "ImageBitmap": "memoryview", + "ImageData": "memoryview", + "VideoFrame": "memoryview", + "GPUPipelineConstantValue": "float", + "GPUExternalTexture": "object", + } + name = pythonmap.get(name, name) + + # Is this a case for which we need to recurse? + if name.startswith("sequence<") and name.endswith(">"): + name = name.split("<")[-1].rstrip(">") + name = self.resolve_type(name).strip("'") + return f"'List[{name}]'" + elif name.startswith("record<") and name.endswith(">"): + name = name.split("<")[-1].rstrip(">") + names = [self.resolve_type(t).strip("'") for t in name.split(",")] + return f"'Dict[{', '.join(names)}]'" + elif " or " in name: + name = name.strip("()") + names = [self.resolve_type(t).strip("'") for t in name.split(" or ")] + names = sorted(set(names)) + return f"'Union[{', '.join(names)}]'" + + # Triage + if name in __builtins__: + return name # ok + elif name in self.classes: + return f"'{name}'" # ok, but wrap in string because can be declared later + elif name.startswith("HTML"): + return "object" # anything, we ignore this stuff anyway + elif name in ["OffscreenCanvas"]: + return "object" + elif name in ["PredefinedColorSpace"]: + return "str" + else: + assert name.startswith("GPU") + name = name[3:] + name = name[:-4] if name.endswith("Dict") else name + if name in self.flags: + return f"'flags.{name}'" + elif name in self.enums: + return f"'enums.{name}'" + elif name in self.structs: + return f"'structs.{name}'" + else: + # When this happens, update the code above or the pythonmap + raise RuntimeError("Encountered unknown IDL type: ", name) + + def _parse(self): + while not self.end_reached(): + line = self.read_line() + + if not line.strip(): + pass + elif line.startswith("typedef "): + # Get the important bit + value = line.split(" ", 1)[-1] + if value.startswith("["): + value = value.split("]")[-1] + # Parse + if value.startswith("("): # Union type + while ")" not in value: + value = value.rstrip() + " " + self.read_line().lstrip() + assert value.count("(") == 1 and value.count(")") == 1 + value = value.split("(")[1] + val, _, key = value.partition(")") + else: # Singleton type + val, _, key = value.rpartition(" ") + key = key.strip().strip(";").strip() + self.typedefs[key] = val.strip() + elif line.startswith(("namespace ", "interface ", "partial interface ")): + # A class or a set of flags + # Collect lines that define this interface + while "{" not in line: + line = line.rstrip() + " " + self.read_line().lstrip() + lines = [line] + while not line.startswith("};"): + line = self.read_line() + lines.append(line) + classname_raw, _, base_raw = lines[0].split("{")[0].partition(":") + classname = classname_raw.split()[-1] + # Collect base classes + based_on = list(base_raw.split()) + while self.peek_line().startswith(classname + " includes "): + line = self.read_line() + based_on.append(line.split()[-1].rstrip(";")) + # Create / get interface object + if classname not in self._interfaces: + self._interfaces[classname] = Interface(classname, based_on) + interface = self._interfaces[classname] + # Parse members + line_index = 0 + while line_index < len(lines) - 1: + line_index += 1 + line = lines[line_index].strip() + if not line: + continue + elif line.startswith("[Exposed="): + continue # WTF? + elif line.startswith("const "): + parts = line.strip(";").split() + assert len(parts) == 5 + assert parts[-2] == "=" + name = parts[2] + val = int(parts[-1], 16) + interface.constants[name] = val + elif "attribute " in line: + name = line.partition("attribute")[2].split()[-1].strip(";") + interface.attributes[name] = line + elif "(" in line: + line = lines[line_index] + while line.count("(") > line.count(")"): + line_index += 1 + line += lines[line_index] + assert line.count("(") == line.count(")") + line = line.strip() + line.replace("\n", " ") + for c in (" ", " ", " "): + line = line.replace(c, " ") + assert line.endswith(";") + funcname = line.split("(")[0].split()[-1] + line = ( + line.replace("\n", " ") + .replace(" ", " ") + .replace(" ", " ") + ) + interface.functions[funcname] = line + elif " includes " in line: + parts = line.strip(";").split() + assert len(parts) == 3 and parts[1] == "includes" + classname, _, base = parts + if classname not in self._interfaces: + self._interfaces[classname] = Interface(classname, []) + self._interfaces[classname].bases.append(parts[2]) + elif line.startswith("enum "): + line += self.read_until("}") + self.read_line() + lines = line.strip().split("\n") + name = lines[0].split(" ", 1)[1].strip("{ \t\r\n") + d = {} + for i, line in enumerate(lines[1:-1]): + line = line.strip() + if not line or line.startswith("//"): + continue + key = val = line.strip('", \t') + for i1, i2 in [ + ("-", "_"), + ("1d", "d1"), + ("2d", "d2"), + ("3d", "d3"), + ]: + key = key.replace(i1, i2) + d[key] = val + self.enums[name] = d + elif line.startswith("dictionary "): + while "{" not in line: + line = line.rstrip() + self.read_line() + assert line.count("{") == 1 and line.count("}") == 0 + lines = [line] + while not line.startswith("};"): + line = self.read_line() + lines.append(line) + name = lines[0].split(" ", 1)[1].strip("{ \t\r\n") + if ":" in name: + name, _, base = name.partition(":") + name, base = name.strip(), base.strip() + if base not in self.structs: + # print(f"dict {name} has unknown base dict {base}") + d = {} + else: + d = self.structs[base].copy() + else: + d = {} + for line in lines[1:-1]: + line = line.split("//")[0].strip() + if not line: + continue + assert line.endswith(";") + arg = line.strip().strip(",;").strip() + default = None + if "=" in arg: + arg, default = arg.rsplit("=", 1) + arg, default = arg.strip(), default.strip() + arg_type, arg_name = arg.strip().rsplit(" ", 1) + if arg_type.startswith("required "): + arg_type = arg_type[9:] + # required args should not have a default + assert default is None + else: + default = default or "None" + d[arg_name] = StructField(line, arg_name, arg_type, default) + self.structs[name] = d + elif line.startswith(("[Exposed=", "[Serializable]")): + pass + else: + raise RuntimeError("Unknown line:", line.rstrip()) + + def _post_process(self): + """We don't do any name format normalization in the parser code itself; + we do that here. + """ + + # Drop some toplevel names + for name in [ + "NavigatorGPU", + "GPUSupportedLimits", + "GPUSupportedFeatures", + "WGSLLanguageFeatures", + "GPUUncapturedErrorEvent", + "GPUExternalTexture", + ]: + self._interfaces.pop(name, None) + + # Divide flags and actual class definitions + for name, interface in self._interfaces.items(): + if interface.constants: + self.flags[name] = interface.constants + elif name not in ("Navigator", "WorkerNavigator"): + delattr(interface, "constants") + self.classes[name] = interface + + # Remove GPU prefix + for d in (self.structs, self.flags, self.enums): + for name in list(d.keys()): + assert name.startswith("GPU") + new_name = name[3:] + if new_name.endswith("Dict"): + new_name = new_name[:-4] + d[new_name] = d.pop(name) + + # Remove (abstract) base structs + for name in list(self.structs): + if name.endswith("Base"): + self.structs.pop(name) diff --git a/codegen/tests/test_codegen_apipatcher.py b/codegen/tests/test_codegen_apipatcher.py new file mode 100644 index 0000000..6ef5bb1 --- /dev/null +++ b/codegen/tests/test_codegen_apipatcher.py @@ -0,0 +1,118 @@ +""" Test some parts of apipatcher.py, and Implicitly tests idlparser.py. +""" + +from codegen.utils import blacken +from codegen.apipatcher import CommentRemover, AbstractCommentInjector + + +def dedent(code): + return code.replace("\n ", "\n") + + +def test_comment_remover(): + code = """ + # + # a comment + # IDL: some idl spec + # FIXME: unknown api method + # FIXME: unknown api property + # FIXME: unknown api class + # FIXME: new method - only user should remove + # FIXME: was changed - only user should remove + """ + + p = CommentRemover() + p.apply(dedent(code)) + code = p.dumps() + + assert code.count("#") == 4 + + assert "IDL" not in code # IDL is auto-added by the codegen + assert "unknown" not in code # these are also auto-added + + assert "new" in code # user should remove these + assert "was changed" in code # user should remove these + + +class MyCommentInjector(AbstractCommentInjector): + def class_is_known(self, classname): + return True + + def prop_is_known(self, classname, propname): + return True + + def method_is_known(self, classname, methodname): + return True + + def get_class_comment(self, classname): + return "# this is a class" + + def get_prop_comment(self, classname, propname): + return "# this is a property" + + def get_method_comment(self, classname, methodname): + return "# this is a method" + + +def test_comment_injector(): + code1 = """ + class X: + 'x' + + def foo(self): + pass + + @whatever + def bar(self): + pass + + @property + def spam(self): + pass + + @property + # valid Python, but we want comments above decorators + def eggs(self): + pass + """ + + code3 = """ + # this is a class + class X: + 'x' + + # this is a method + def foo(self): + pass + + # this is a method + @whatever + def bar(self): + pass + + # this is a property + @property + def spam(self): + pass + + # valid Python, but we want comments above decorators + # this is a property + @property + def eggs(self): + pass + """ + code3 = blacken(dedent(code3)).strip() + + p = MyCommentInjector() + p.apply(dedent(code1)) + code2 = p.dumps().strip() + + assert code2 == code3 + + +if __name__ == "__main__": + for func in list(globals().values()): + if callable(func) and func.__name__.startswith("test_"): + print(f"Running {func.__name__} ...") + func() + print("Done") diff --git a/codegen/tests/test_codegen_rspatcher.py b/codegen/tests/test_codegen_rspatcher.py new file mode 100644 index 0000000..6b6e80f --- /dev/null +++ b/codegen/tests/test_codegen_rspatcher.py @@ -0,0 +1,94 @@ +""" Test some parts of rsbackend.py, and implicitly tests hparser.py. +""" + +from codegen.wgpu_native_patcher import patch_wgpu_native_backend + + +def dedent(code): + return code.replace("\n ", "\n") + + +def test_patch_functions(): + code1 = """ + libf.wgpuAdapterRequestDevice(1, 2, 3) + libf.wgpuFooBar(1, 2, 3) + """ + + code2 = patch_wgpu_native_backend(dedent(code1)) + + # All original lines are there + assert all(line[4:] in code2 for line in code1 if line.strip()) + + # But also an annotation + assert "WGPUAdapter adapter, WGPUDeviceDescriptor" in code2 + # And a notification that foo_bar is unknown + assert code2.count("# FIXME:") == 1 + assert code2.count("FooBar") == 2 + + +def test_patch_structs(): + # Check simple struct + code1 = """ + struct = new_struct_p( + "WGPUBufferDescriptor *", + label=c_label, + size=size, + usage=usage, + ) + """ + code2 = patch_wgpu_native_backend(dedent(code1)) + assert all(line[4:] in code2 for line in code1 if line.strip()) + assert "usage: WGPUBufferUsageFlags/int" in code2 + assert "size: int" in code2 + assert "# FIXME:" not in code2 + assert code2 == patch_wgpu_native_backend(code2) # Don't stack comments + + # Check, but now using not-pointer + code1 = """ + struct = new_struct( + "WGPUBufferDescriptor", + label=c_label, + size=size, + usage=usage, + ) + """ + code2 = patch_wgpu_native_backend(dedent(code1)) + assert all(line[4:] in code2 for line in code1 if line.strip()) + assert "usage: WGPUBufferUsageFlags/int" in code2 + assert "size: int" in code2 + assert "# FIXME:" not in code2 + + # Fail + code1 = 'struct = new_struct("WGPUBufferDescriptor *",label=c_label,size=size,usage=usage,)' + code2 = patch_wgpu_native_backend(dedent(code1)) + assert "# FIXME:" in code2 + assert code2 == patch_wgpu_native_backend(code2) # Don't stack comments + + # Fail + code1 = 'struct = new_struct_p("WGPUBufferDescriptor",label=c_label,size=size,usage=usage,)' + code2 = patch_wgpu_native_backend(dedent(code1)) + assert "# FIXME:" in code2 + assert code2 == patch_wgpu_native_backend(code2) # Don't stack comments + + # Missing values + code1 = 'struct = new_struct_p("WGPUBufferDescriptor *",label=c_label,size=size,)' + code2 = patch_wgpu_native_backend(dedent(code1)) + assert "usage: WGPUBufferUsageFlags/int" in code2 + assert "# FIXME:" not in code2 + assert "usage" in code2 # comment added + assert code2 == patch_wgpu_native_backend(code2) # Don't stack comments + + # Too many values + code1 = 'struct = new_struct_p("WGPUBufferDescriptor *",label=c_label,foo=size,)' + code2 = patch_wgpu_native_backend(dedent(code1)) + assert "usage: WGPUBufferUsageFlags/int" in code2 + assert "# FIXME: unknown" in code2 + assert code2 == patch_wgpu_native_backend(code2) # Don't stack comments + + +if __name__ == "__main__": + for func in list(globals().values()): + if callable(func) and func.__name__.startswith("test_"): + print(f"Running {func.__name__} ...") + func() + print("Done") diff --git a/codegen/tests/test_codegen_utils.py b/codegen/tests/test_codegen_utils.py new file mode 100644 index 0000000..f92741d --- /dev/null +++ b/codegen/tests/test_codegen_utils.py @@ -0,0 +1,256 @@ +""" +Strive for full coverage of the codegen utils module. +""" + +from codegen.utils import ( + remove_c_comments, + blacken, + Patcher, + to_snake_case, + to_camel_case, +) + +from pytest import raises + + +def dedent(code): + return code.replace("\n ", "\n") + + +def test_to_snake_case(): + assert to_snake_case("foo_bar_spam") == "foo_bar_spam" + assert to_snake_case("_foo_bar_spam") == "_foo_bar_spam" + assert to_snake_case("fooBarSpam") == "foo_bar_spam" + assert to_snake_case("_fooBarSpam") == "_foo_bar_spam" + assert to_snake_case("maxTextureDimension1D") == "max_texture_dimension1d" + + +def test_to_camel_case(): + assert to_camel_case("foo_bar_spam") == "fooBarSpam" + assert to_camel_case("_foo_bar_spam") == "_fooBarSpam" + assert to_camel_case("fooBarSpam") == "fooBarSpam" + assert to_camel_case("_fooBarSpam") == "_fooBarSpam" + assert to_camel_case("max_texture_dimension1d") == "maxTextureDimension1D" + + +def test_remove_c_comments(): + code1 = """ + x1 hello// comment + // comment + x2 hello/* comment */ + x3/* comment */ hello + x4 /* comment + comment + */hello + """ + + code3 = """ + x1 hello + + x2 hello + x3 hello + x4 hello + """ + + code1, code3 = dedent(code1), dedent(code3) + + code2 = remove_c_comments(code1) + + assert code2 == code3 + + +def test_blacken_singleline(): + code1 = """ + def foo(): + pass + def foo( + ): + pass + def foo( + a1, a2, a3 + ): + pass + def foo( + a1, a2, a3, + ): + pass + def foo( + a1, + a2, + a3, + ): + pass + """ + + code2 = """ + def foo(): + pass + def foo(): + pass + def foo(a1, a2, a3): + pass + def foo(a1, a2, a3): + pass + def foo(a1, a2, a3): + pass + """ + + code1 = dedent(code1).strip() + code2 = dedent(code2).strip() + + code3 = blacken(code1, True) + code3 = code3.replace("\n\n", "\n").replace("\n\n", "\n").strip() + + assert code3 == code2 + + # Also test simply long lines + code = "foo = 1" + " + 1" * 100 + assert len(code) > 300 + assert code.count("\n") == 0 + assert blacken(code, False).strip().count("\n") > 3 + assert blacken(code, True).strip().count("\n") == 0 + + +def test_blacken_comments(): + code1 = """ + def foo(): # hi + pass + def foo( + a1, # hi + a2, # ha + a3, + ): # ho + pass + """ + + code2 = """ + def foo(): # hi + pass + def foo(a1, a2, a3): # hi ha ho + pass + """ + + code1 = dedent(code1).strip() + code2 = dedent(code2).strip() + + code3 = blacken(code1, True) + code3 = code3.replace("\n\n", "\n").replace("\n\n", "\n").strip() + + assert code3 == code2 + + +def test_patcher(): + code = """ + class Foo1: + def bar1(self): + pass + def bar2(self): + pass + @property + def bar3(self): + pass + + class Foo2: + def bar1(self): + pass + @property + def bar2(self): + pass + def bar3(self): + pass + """ + + code = blacken(dedent(code)) + p = Patcher(code) + + # Dump before doing anything, should yield original + assert p.dumps() == code + + # Check iter_lines + lines = [] + for line, i in p.iter_lines(): + assert isinstance(line, str) + assert isinstance(i, int) + lines.append(line) + assert "\n".join(lines).strip() == code.strip() + + # Check iter_properties + names = [] + for classname, i1, i2 in p.iter_classes(): + for funcname, j1, j2 in p.iter_properties(i1 + 1): + names.append(classname + "." + funcname) + assert names == ["Foo1.bar3", "Foo2.bar2"] + + # Check iter_methods + names = [] + for classname, i1, i2 in p.iter_classes(): + for funcname, j1, j2 in p.iter_methods(i1 + 1): + names.append(classname + "." + funcname) + assert names == ["Foo1.bar1", "Foo1.bar2", "Foo2.bar1", "Foo2.bar3"] + + # Check insert_line (can insert into same line multiple times + p = Patcher(code) + for classname, i1, i2 in p.iter_classes(): + p.insert_line(i1, "# a class") + p.insert_line(i1, "# a class") + code2 = p.dumps() + assert code2.count("# a class") == 4 + + # Check replace_line (can only replace one time per line) + p = Patcher(code2) + for line, i in p.iter_lines(): + if line.lstrip().startswith("#"): + p.replace_line(i, "# comment") + with raises(Exception): + p.replace_line(i, "# comment") + code2 = p.dumps() + assert code2.count("#") == 4 + assert code2.count("# comment") == 4 + + # Remove comments + p = Patcher(code2) + for line, i in p.iter_lines(): + if line.lstrip().startswith("#"): + p.remove_line(i) + code2 = p.dumps() + assert code2.count("#") == 0 + + # We should be back to where we started + assert code2 == code + + +def test_patcher2(): + code = """ + class Foo1: + def bar1(self): + pass + @property + def bar2(self): + pass + """ + + p = Patcher(dedent(code)) + + # Check property line indices + for classname, i1, i2 in p.iter_classes(): + for funcname, j1, j2 in p.iter_properties(i1 + 1): + line = p.lines[j1].lstrip() + assert line.startswith("def") + assert funcname in line + assert "pass" in p.lines[j2] + + # Check method line indices + for classname, i1, i2 in p.iter_classes(): + for funcname, j1, j2 in p.iter_methods(i1 + 1): + line = p.lines[j1].lstrip() + assert line.startswith("def") + assert funcname in line + assert "pass" in p.lines[j2] + + +if __name__ == "__main__": + for func in list(globals().values()): + if callable(func) and func.__name__.startswith("test_"): + print(f"Running {func.__name__} ...") + func() + print("Done") diff --git a/codegen/tests/test_codegen_z.py b/codegen/tests/test_codegen_z.py new file mode 100644 index 0000000..c5b381c --- /dev/null +++ b/codegen/tests/test_codegen_z.py @@ -0,0 +1,83 @@ +""" +Applying the codegen should not introduce changes. +""" + +import os +import time + +from codegen import main +from codegen.files import file_cache, get_line_endings, lib_dir + + +def test_line_endings(): + # All LF, but also ok if fraction is CR + assert get_line_endings("foo\nbar\nspam\neggs\n") == "\n" + assert get_line_endings("foo\nbar\nspam\reggs\n") == "\n" + + # All CR, but also ok if fraction is LF. + # I know that CR line endings are an archaic Mac thing, but some dev + # might just have his git setup in a weird way :) + assert get_line_endings("foo\rbar\rspam\reggs\r") == "\r" + assert get_line_endings("foo\rbar\rspam\neggs\r") == "\r" + + # If most are equal, + assert get_line_endings("foo\r\nbar\r\nspam\r\neggs\r\n") == "\r\n" + assert get_line_endings("foo\r\nbar\r\nspam\neggs\r\n") == "\r\n" + assert get_line_endings("foo\r\nbar\r\nspam\reggs\r\n") == "\r\n" + + +def test_that_code_is_up_to_date(): + """Test that running the codegen updates what we expect, but does not introduce changes.""" + + # Obtain mtime of all file that can change + mtimes = {} + for fname in file_cache.filenames_to_change: + filename = os.path.join(lib_dir, fname) + mtimes[filename] = os.path.getmtime(filename) + + time.sleep(0.2) + + # Reset the file cache + file_cache.reset() + assert file_cache.filenames_written == set() + + # Collect original version of the files + originals = {} + for fname in file_cache.filenames_to_change: + originals[fname] = file_cache.read(fname) + + # Perform the codegen + main() + + # Confirm that all files that are allowed to change, are actually written + assert file_cache.filenames_written == set(file_cache.filenames_to_change) + + # Double-check that mtimes have not changed - i.e. the real file system is not touched + all(t == os.path.getmtime(filename) for filename, t in mtimes.items()) + + # Check that the files have not actually changed. This is to ensure that: + # * an update to wgpu-native is actually completed. + # * the autogenerated code is not manually changed. + # * The codegen report is correct. + for fname in file_cache.filenames_to_change: + content1 = originals[fname] + content2 = file_cache.read(fname) + assert content1 == content2 + + print("Codegen check ok!") + + +def test_that_codegen_report_has_no_errors(): + filename = os.path.join(lib_dir, "resources", "codegen_report.md") + with open(filename, "rb") as f: + text = f.read().decode() + + # The codegen uses a prefix "ERROR:" for unacceptable things. + # All caps, some function names may contain the name "error". + assert "ERROR" not in text + + +if __name__ == "__main__": + test_line_endings() + test_that_code_is_up_to_date() + test_that_codegen_report_has_no_errors() diff --git a/codegen/utils.py b/codegen/utils.py new file mode 100644 index 0000000..e4f68c8 --- /dev/null +++ b/codegen/utils.py @@ -0,0 +1,312 @@ +""" +Codegen utils. +""" + +import os +import sys +import tempfile + +import black + + +def to_snake_case(name): + """Convert a name from camelCase to snake_case. Names that already are + snake_case remain the same. + """ + name2 = "" + for c in name: + c2 = c.lower() + if c2 != c and len(name2) > 0 and name2[-1] not in "_123": + name2 += "_" + name2 += c2 + return name2 + + +def to_camel_case(name): + """Convert a name from snake_case to camelCase. Names that already are + camelCase remain the same. + """ + is_capital = False + name2 = "" + for c in name: + if c == "_" and name2: + is_capital = True + elif is_capital: + name2 += c.upper() + is_capital = False + else: + name2 += c + if name2.endswith(("1d", "2d", "3d")): + name2 = name2[:-1] + "D" + return name2 + + +_file_objects_to_print_to = [sys.stdout] + + +def print(*args, **kwargs): + """Report something (will be printed and added to a file.""" + # __builtins__.print(*args, **kwargs) + if args and not args[0].lstrip().startswith("#"): + args = ("*",) + args + for f in _file_objects_to_print_to: + __builtins__["print"](*args, file=f, flush=True, **kwargs) + + +class PrintToFile: + """Context manager to print to file.""" + + def __init__(self, f): + assert hasattr(f, "write") + self.f = f + + def __enter__(self): + _file_objects_to_print_to.append(self.f) + + def __exit__(self, type, value, tb): + while self.f in _file_objects_to_print_to: + _file_objects_to_print_to.remove(self.f) + self.f.close() + + +def remove_c_comments(code): + """Remove C-style comments from the given code.""" + pos = 0 + new_code = "" + + while True: + # Find start of comment + lookfor = None + i1 = code.find("//", pos) + i2 = code.find("/*", pos) + if i1 >= 0: + lookfor = "\n" + comment_start = i1 + if i2 >= 0: + if not (i1 >= 0 and i1 < i2): + lookfor = "*/" + comment_start = i2 + # Found a start? + if not lookfor: + new_code += code[pos:] + break + else: + new_code += code[pos:comment_start] + # Find the end + comment_end = code.find(lookfor, comment_start + 2) + if comment_end < 0: + break + if lookfor == "\n": + pos = comment_end + else: + pos = comment_end + len(lookfor) + return new_code + + +def blacken(src, singleline=False): + """Format the given src string using black. If singleline is True, + all function signatures become single-line, so they can be parsed + and updated. + """ + # Normal black + mode = black.FileMode(line_length=999 if singleline else 88) + result = black.format_str(src, mode=mode) + + # Make defs single-line. You'd think that setting the line length + # to a very high number would do the trick, but it does not. + if singleline: + lines1 = result.splitlines() + lines2 = [] + in_sig = False + comment = "" + for line in lines1: + if in_sig: + # Handle comment + line, _, c = line.partition("#") + line = line.rstrip() + c = c.strip() + if c: + comment += " " + c.strip() + # Detect end + if line.endswith("):"): + in_sig = False + # Compose line + current_line = lines2[-1] + if not current_line.endswith("("): + current_line += " " + current_line += line.lstrip() + # Finalize + if not in_sig: + # Remove trailing spaces and commas + current_line = current_line.replace(" ):", "):") + current_line = current_line.replace(",):", "):") + # Add comment + if comment: + current_line += " #" + comment + comment = "" + lines2[-1] = current_line + else: + lines2.append(line) + line_nc = line.split("#")[0].strip() + if ( + line_nc.startswith(("def ", "async def", "class ")) + and "(" in line_nc + ): + if not line_nc.endswith("):"): + in_sig = True + lines2.append("") + result = "\n".join(lines2) + + return result + + +class Patcher: + """Class to help patch a Python module. Supports iterating (over + lines, classes, properties, methods), and applying diffs (replace, + remove, insert). + """ + + def __init__(self, code=None): + self._init(code) + + def _init(self, code): + """Subclasses can call this to reset the patcher.""" + self.lines = [] + self._diffs = {} + self._classes = {} + if code: + self.lines = blacken(code, True).splitlines() # inf line length + + def remove_line(self, i): + """Remove the line at the given position. There must not have been + an action on line i. + """ + assert i not in self._diffs, f"Line {i} already has a diff" + self._diffs[i] = i, "remove" + + def insert_line(self, i, line): + """Insert a new line at the given position. It's ok if there + has already been an insertion an line i, but there must not have been + any other actions. + """ + if i in self._diffs and self._diffs[i][1] == "insert": + cur_line = self._diffs[i][2] + self._diffs[i] = i, "insert", cur_line + "\n" + line + else: + assert i not in self._diffs, f"Line {i} already has a diff" + self._diffs[i] = i, "insert", line + + def replace_line(self, i, line): + """Replace the line at the given position with another line. + There must not have been an action on line i. + """ + assert i not in self._diffs, f"Line {i} already has a diff" + self._diffs[i] = i, "replace", line + + def dumps(self, format=True): + """Return the patched result as a string.""" + lines = self.lines.copy() + # Apply diff + diffs = sorted(self._diffs.values()) + for diff in reversed(diffs): + if diff[1] == "remove": + lines.pop(diff[0]) + elif diff[1] == "insert": + lines.insert(diff[0], diff[2]) + elif diff[1] == "replace": + lines[diff[0]] = diff[2] + else: # pragma: no cover + raise ValueError(f"Unknown diff: {diff}") + # Format + text = "\n".join(lines) + if format: + try: + text = blacken(text) + except black.InvalidInput as err: # pragma: no cover + # If you get this error, it really helps to load the code + # in an IDE to see where the error is. Let's help with that ... + filename = os.path.join(tempfile.gettempdir(), "wgpu_patcher_fail.py") + with open(filename, "wb") as f: + f.write(text.encode()) + err = str(err) + err = err if len(err) < 78 else err[:77] + "…" + raise RuntimeError( + f"It appears that the patcher has generated invalid Python:" + f"\n\n {err}\n\n" + f'Wrote the generated (but unblackened) code to:\n\n "{filename}"' + ) + + return text + + def iter_lines(self, start_line=0): + """Generator to iterate over the lines. + Each iteration yields (line, linenr) + """ + for i in range(start_line, len(self.lines)): + line = self.lines[i] + yield line, i + + def iter_classes(self, start_line=0): + """Generator to iterate over the classes. + Each iteration yields (classname, linenr_start, linenr_end), + where linenr_end is the last line of code. + """ + current_class = None + for i in range(start_line, len(self.lines)): + line = self.lines[i] + sline = line.rstrip() + if current_class and sline: + if sline.startswith(" "): + current_class[2] = i + else: # code has less indentation -> something new + yield current_class + current_class = None + if line.startswith("class "): + name = line.split(":")[0].split("(")[0].split()[-1] + current_class = [name, i, i] + if current_class: + yield current_class + + def iter_properties(self, start_line=0): + """Generator to iterate over the properties. + Each iteration yields (classname, linenr_first, linenr_last), + where linenr_first is the line that startswith `def`, + and linenr_last is the last line of code. + """ + return self._iter_props_and_methods(start_line, True) + + def iter_methods(self, start_line=0): + """Generator to iterate over the methods. + Each iteration yields (classname, linenr_first, linenr_last) + where linenr_first is the line that startswith `def`, + and linenr_last is the last line of code. + """ + return self._iter_props_and_methods(start_line, False) + + def _iter_props_and_methods(self, start_line, find_props): + prop_mark = None + current_def = None + for i in range(start_line, len(self.lines)): + line = self.lines[i] + sline = line.rstrip() + if current_def and sline: + if sline.startswith(" "): + current_def[2] = i + else: + yield current_def + current_def = None + if sline and not sline.startswith(" "): + break # exit class + if line.startswith((" def ", " async def ")): + name = line.split("(")[0].split()[-1] + if prop_mark and find_props: + current_def = [name, i, i] + elif not prop_mark and not find_props: + current_def = [name, i, i] + if line.startswith(" @property"): + prop_mark = i + elif sline and not sline.lstrip().startswith("#"): + prop_mark = None + + if current_def: + yield current_def diff --git a/codegen/wgpu_native_patcher.py b/codegen/wgpu_native_patcher.py new file mode 100644 index 0000000..bc8110d --- /dev/null +++ b/codegen/wgpu_native_patcher.py @@ -0,0 +1,359 @@ +""" +Apply codegen to wgpu-native backend. + +The idea is that when there are any changes in wgpu.h that affect how +wgpu_native/_api.py should be written, this module will: + +* For enums: automatically update the mappings. +* For flags: report discrepancies. +* For structs and functions: update the code, so a diff of _api.py quickly + shows if manual changes are needed. + +Note that the apipatcher will also patch wgpu_native/_api.py, but where that codegen +focuses on the API, here we focus on the C library usage. +""" + +from codegen.utils import print, blacken, Patcher +from codegen.hparser import get_h_parser +from codegen.idlparser import get_idl_parser +from codegen.files import file_cache + + +mappings_preamble = ''' +""" Mappings for the wgpu-native backend. """ + +# THIS CODE IS AUTOGENERATED - DO NOT EDIT + +# flake8: noqa +'''.lstrip() + + +def compare_flags(): + """For each flag in WebGPU: + + * Verify that there is a corresponding flag in wgpu.h + * Verify that all fields are present too. + * Verify that the (integer) value is equal. + + """ + + idl = get_idl_parser() + hp = get_h_parser() + + name_map = { + "ColorWrite": "ColorWriteMask", + } + + for name, flag in idl.flags.items(): + name = name_map.get(name, name) + if name not in hp.flags: + print(f"Flag {name} missing in wgpu.h") + else: + for key, val in flag.items(): + key = key.title().replace("_", "") # MAP_READ -> MapRead + key = name_map.get(f"{name}.{key}") or key + if key not in hp.flags[name]: + print(f"Flag field {name}.{key} missing in wgpu.h") + elif val != hp.flags[name][key]: + print(f"Warning: Flag field {name}.{key} have different values.") + + +def write_mappings(): + """Generate the file with dicts to map enums strings to ints. This + also compares the enums in wgpu-native with WebGPU, and reports any + missing ones. + """ + + idl = get_idl_parser() + hp = get_h_parser() + + name_map = {} + name_map_i = {v: k for k, v in name_map.items()} + + # Init generated code + pylines = [mappings_preamble] + + # Create enummap, which allows the wgpu-native backend to resolve enum field names + # to the corresponding integer value. + enummap = {} + for name in idl.enums: + hname = name_map.get(name, name) + if hname not in hp.enums: + print(f"Enum {hname} missing in wgpu.h") + continue + hp_enum = {key.lower(): val for key, val in hp.enums[hname].items()} + for ikey in idl.enums[name].values(): + hkey = ikey.lower().replace("-", "") + hkey = name_map.get(f"{name}.{hkey}") or hkey + if hkey in hp_enum: + enummap[name + "." + ikey] = hp_enum[hkey] + else: + print(f"Enum field {name}.{ikey} missing in wgpu.h") + + # Write enummap + pylines.append(f"# There are {len(enummap)} enum mappings\n") + pylines.append("enummap = {") + for key in sorted(enummap.keys()): + pylines.append(f' "{key}": {enummap[key]!r},') + pylines.append("}\n") + + # Some structs have fields that are enum values. The wgpu-native backend + # must be able to resolve these too. + cstructfield2enum = {} + for structname, struct in hp.structs.items(): + for key, val in struct.items(): + if isinstance(val, str) and val.startswith("WGPU"): + henumname = val[4:].split("/")[0] + enumname = name_map_i.get(henumname, henumname) + if enumname in idl.enums: + cstructfield2enum[f"{structname[4:]}.{key}"] = enumname + else: + pass # a struct + + # Write cstructfield2enum + pylines.append(f"# There are {len(cstructfield2enum)} struct-field enum mappings\n") + pylines.append("cstructfield2enum = {") + for key in sorted(cstructfield2enum.keys()): + pylines.append(f' "{key}": {cstructfield2enum[key]!r},') + pylines.append("}\n") + + # Write a few native-only mappings: key => int + pylines.append("enum_str2int = {") + for name in ["BackendType"]: + pylines.append(f' "{name}":' + " {") + for key, val in hp.enums[name].items(): + if key == "Force32": + continue + pylines.append(f' "{key}": {val},') + pylines.append(" }") + pylines.append("}") + + # Write a few native-only mappings: int => key + # If possible, resolve to WebGPU names, otherwise use the native name. + pylines.append("enum_int2str = {") + for name in [ + "BackendType", + "AdapterType", + "ErrorType", + "DeviceLostReason", + "TextureFormat", + "TextureDimension", + "PresentMode", + "CompositeAlphaMode", + ]: + webgpu_names = {} + if name in idl.enums: + webgpu_names = { + val.replace("-", ""): val for val in idl.enums[name].values() + } + if "unknown" in webgpu_names: + webgpu_names["undefined"] = "unknown" + pylines.append(f' "{name}":' + " {") + for key, val in hp.enums[name].items(): + if key == "Force32": + continue + enum_val = webgpu_names.get(key.lower(), key) + pylines.append(f' {val}: "{enum_val}",') + pylines.append(" },") + pylines.append("}") + + # Wrap up + code = blacken("\n".join(pylines)) # just in case; code is already black + file_cache.write("backends/wgpu_native/_mappings.py", code) + print( + f"Wrote {len(enummap)} enum mappings and {len(cstructfield2enum)} struct-field mappings to wgpu_native/_mappings.py" + ) + + +def patch_wgpu_native_backend(code): + """Given the Python code, applies patches to annotate functions + calls and struct instantiations. + + For functions: + + * Verify that the function exists in wgpu.h. If not, add a fixme comment. + * Add a comment showing correspinding signature from wgpu.h. + + For structs: + + * Verify that the struct name exists. + * Verify that the correct form (pointer or not) is used. + * Verify that all used fields exists. + * Annotate any missing fields. + * Add a comment that shows all fields and their type. + + """ + + for patcher in [CommentRemover(), FunctionPatcher(), StructPatcher()]: + patcher.apply(code) + code = patcher.dumps() + return code + + +class CommentRemover(Patcher): + triggers = "# FIXME: unknown C", "# FIXME: invalid C", "# H:" + + def apply(self, code): + self._init(code) + for line, i in self.iter_lines(): + if line.lstrip().startswith(self.triggers): + self.remove_line(i) + + +class FunctionPatcher(Patcher): + def apply(self, code): + self._init(code) + hp = get_h_parser() + count = 0 + detected = set() + + for line, i in self.iter_lines(): + if "lib.wgpu" in line or "libf.wgpu" in line: + start = line.index(".wgpu") + 1 + end = line.index("(", start) + name = line[start:end] + indent = " " * (len(line) - len(line.lstrip())) + if "lib.wgpu" in line: + self.insert_line( + i, f"{indent}# FIXME: wgpu func calls must be done from libf" + ) + if name not in hp.functions: + msg = f"unknown C function {name}" + self.insert_line(i, f"{indent}# FIXME: {msg}") + print(f"ERROR: {msg}") + else: + detected.add(name) + anno = hp.functions[name].replace(name, "f").strip(";") + self.insert_line(i, indent + f"# H: " + anno) + count += 1 + + print(f"Validated {count} C function calls") + + # Determine what functions were not detected + # There are still quite a few, so we don't list them yet + ignore = ( + "wgpu_create_surface_from", + "wgpu_set_log_level", + "wgpu_get_version", + "wgpu_set_log_callback", + ) + unused = set(name for name in hp.functions if not name.startswith(ignore)) + unused.difference_update(detected) + print(f"Not using {len(unused)} C functions") + + +class StructPatcher(Patcher): + def apply(self, code): + self._init(code) + hp = get_h_parser() + + count = 0 + line_index = -1 + brace_depth = 0 + + for line, i in self.iter_lines(): + if "new_struct_p(" in line or "new_struct(" in line: + if line.lstrip().startswith("def "): + continue # Implementation + if "_new_struct" in line: + continue # Implementation + if "new_struct_p()" in line or "new_struct()" in line: + continue # Comments or docs + line_index = i + j = line.index("new_struct") + line = line[j:] # start brace searching from right pos + brace_depth = 0 + + if line_index >= 0: + for c in line: + if c == "#": + break + elif c == "(": + brace_depth += 1 + elif c == ")": + brace_depth -= 1 + assert brace_depth >= 0 + if brace_depth == 0: + self._validate_struct(hp, line_index, i) + count += 1 + line_index = -1 + break + + print(f"Validated {count} C structs") + + def _validate_struct(self, hp, i1, i2): + """Validate a specific struct usage.""" + + lines = self.lines[ + i1 : i2 + 1 + ] # note: i2 is the line index where the closing brace is + indent = " " * (len(lines[-1]) - len(lines[-1].lstrip())) + + if len(lines) == 1: + # Single line - add a comma before the closing brace + print( + "Notice: made a struct multiline. Rerun codegen to validate the struct." + ) + line = lines[0] + i = line.rindex(")") + line = line[:i] + "," + line[i:] + self.replace_line(i1, line) + return + elif len(lines) == 3 and lines[1].count("="): + # Triplet - add a comma after the last element + print( + "Notice: made a struct multiline. Rerun codegen to validate the struct." + ) + self.replace_line(i1 + 1, self.lines[i1 + 1] + ",") + return + + # We can assume that the struct is multi-line and formatted by Black! + assert len(lines) >= 3 + + # Get struct name, and verify + name = lines[1].strip().strip(',"') + struct_name = name.strip(" *") + if name.endswith("*"): + if "new_struct_p" not in lines[0]: + self.insert_line( + i1, indent + f"# FIXME: invalid C struct, use new_struct_p()" + ) + else: + if "new_struct_p" in lines[0]: + self.insert_line( + i1, indent + f"# FIXME: invalid C struct, use new_struct()" + ) + + # Get struct object and create annotation line + if struct_name not in hp.structs: + msg = f"unknown C struct {struct_name}" + self.insert_line(i1, f"{indent}# FIXME: {msg}") + print(f"ERROR: {msg}") + return + else: + struct = hp.structs[struct_name] + fields = ", ".join(f"{key}: {val}" for key, val in struct.items()) + self.insert_line(i1, indent + f"# H: " + fields) + + # Check keys + keys_found = [] + for j in range(2, len(lines) - 1): + line = lines[j] + key = line.split("=")[0].strip() + if key.startswith("# not used:"): + key = key.split(":")[1].split("=")[0].strip() + elif key.startswith("#"): + continue + keys_found.append(key) + if key not in struct: + msg = f"unknown C struct field {struct_name}.{key}" + self.insert_line(i1 + j, f"{indent}# FIXME: {msg}") + print(f"ERROR: {msg}") + + # Insert comments for unused keys + more_lines = [] + for key in struct: + if key not in keys_found: + more_lines.append(indent + f" # not used: {key}") + if more_lines: + self.insert_line(i2, "\n".join(more_lines)) diff --git a/conftest.py b/conftest.py new file mode 100644 index 0000000..2425c0c --- /dev/null +++ b/conftest.py @@ -0,0 +1,39 @@ +"""Global configuration for pytest""" + +import os +import sys + +import numpy as np +import pytest + + +# Enable importing testutils from all test scripts +sys.path.insert(0, os.path.abspath(os.path.join(__file__, "..", "tests"))) + + +def pytest_addoption(parser): + parser.addoption( + "--regenerate-screenshots", + action="store_true", + dest="regenerate_screenshots", + default=False, + ) + + +@pytest.fixture(autouse=True) +def predictable_random_numbers(): + """ + Called at start of each test, guarantees that calls to random produce the same output over subsequent tests runs, + see http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.random.seed.html + """ + np.random.seed(0) + + +@pytest.fixture(autouse=True, scope="session") +def numerical_exceptions(): + """ + Ensure any numerical errors raise a warning in our test suite + The point is that we enforce such cases to be handled explicitly in our code + Preferably using local `with np.errstate(...)` constructs + """ + np.seterr(all="raise") diff --git a/dev-requirements.txt b/dev-requirements.txt new file mode 100644 index 0000000..2b1492c --- /dev/null +++ b/dev-requirements.txt @@ -0,0 +1,19 @@ +# For unit tests, linting, etc. +requests +numpy +pytest +black +flake8 +flake8-black +pep8-naming +sphinx +imageio +pyinstaller +psutil + +# Building wheels +wheel +setuptools +twine +auditwheel; sys_platform == 'linux' +cibuildwheel diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..d4bb2cb --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/_static/style.css b/docs/_static/style.css new file mode 100644 index 0000000..e69de29 diff --git a/docs/_templates/wgpu_class_layout.rst b/docs/_templates/wgpu_class_layout.rst new file mode 100644 index 0000000..24f62af --- /dev/null +++ b/docs/_templates/wgpu_class_layout.rst @@ -0,0 +1,7 @@ +{{ objname | escape | underline}} + +.. currentmodule:: {{ module }} + +.. autoclass:: {{ objname }} + :members: + :show-inheritance: diff --git a/docs/backends.rst b/docs/backends.rst new file mode 100644 index 0000000..7f91098 --- /dev/null +++ b/docs/backends.rst @@ -0,0 +1,83 @@ +The wgpu backends +================= + +What do backends do? +-------------------- + +The heavy lifting (i.e communication with the hardware) in wgpu is performed by +one of its backends. + +Backends can be selected explicitly by importing them: + +.. code-block:: py + + import wgpu.backends.wgpu_natve + +There is also an `auto` backend to help keep code portable: + +.. code-block:: py + + import wgpu.backends.auto + +In most cases, however, you don't need any of the above imports, because +a backend is automatically selected in the first call to :func:`wgpu.GPU.request_adapter`. + +Each backend can also provide additional (backend-specific) +functionality. To keep the main API clean and portable, this extra +functionality is provided as a functional API that has to be imported +from the specific backend. + + +The wgpu_native backend +----------------------- + +.. code-block:: py + + import wgpu.backends.wgpu_natve + + +This backend wraps `wgpu-native `__, +which is a C-api for `wgpu `__, a Rust library +that wraps Vulkan, Metal, DirectX12 and more. +This is the main backend for wgpu-core. The only working backend, right now, to be precise. +It also works out of the box, because the wgpu-native DLL is shipped with wgpu-py. + +The wgpu_native backend provides a few extra functionalities: + + +.. py:function:: wgpu.backends.wgpu_native.enumerate_adapters() + + Return a list of all available adapters on this system. + + :return: Adapters + :rtype: list + + +.. py:function:: wgpu.backends.wgpu_native.request_device_tracing(adapter, trace_path, *, label="", required_features, required_limits, default_queue) + + An alternative to :func:`wgpu.GPUAdapter.request_adapter`, that streams a trace + of all low level calls to disk, so the visualization can be replayed (also on other systems), + investigated, and debugged. + + :param adapter: The adapter to create a device for. + :param trace_path: The path to an (empty) directory. Is created if it does not exist. + :param label: A human readable label. Optional. + :param required_features: The features (extensions) that you need. Default []. + :param required_limits: the various limits that you need. Default {}. + :param default_queue: Descriptor for the default queue. Optional. + :return: Device + :rtype: wgpu.GPUDevice + + +The js_webgpu backend +--------------------- + +.. code-block:: py + + import wgpu.backends.js_webgpu + + +This backend calls into the JavaScript WebGPU API. For this, the Python code would need +access to JavaScript - this backend is intended for use-cases like `PScript `__ `PyScript `__, and `RustPyhon `__. + +This backend is still a stub, see `issue #407 `__ for details. diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000..1e40388 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,166 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. + +import re +import os +import sys +import shutil + + +ROOT_DIR = os.path.abspath(os.path.join(__file__, "..", "..")) +sys.path.insert(0, ROOT_DIR) + +os.environ["WGPU_FORCE_OFFSCREEN"] = "true" + +import wgpu # noqa: E402 + + +# -- Tests ------------------------------------------------------------------- + +# Ensure that all classes are referenced in the alphabetic list, +# and referenced at least one other time as part of the explanatory text. +with open(os.path.join(ROOT_DIR, "docs", "wgpu.rst"), "rb") as f: + wgpu_text = f.read().decode() + wgpu_lines = [line.strip() for line in wgpu_text.splitlines()] +for cls_name in wgpu.classes.__all__: + assert ( + f"~{cls_name}" in wgpu_lines + ), f"Class '{cls_name}' not listed in class list in wgpu.rst" + assert ( + f":class:`{cls_name}`" in wgpu_text + ), f"Class '{cls_name}' not referenced in the text in wgpu.rst" + + +# -- Hacks to tweak docstrings ----------------------------------------------- + +# Make flags and enums appear better in docs +wgpu.enums._use_sphinx_repr = True +wgpu.flags._use_sphinx_repr = True +wgpu.structs._use_sphinx_repr = True + +# Build regular expressions to resolve crossrefs +func_ref_pattern = re.compile(r"\ (`\w+?\(\)`)", re.MULTILINE) +ob_ref_pattern = re.compile( + r"\ (`(GPU|gui\.Wgpu|flags\.|enums\.|structs\.)\w+?`)", re.MULTILINE +) +argtype_ref_pattern = re.compile( + r"\(((GPU|gui\.Wgpu|flags\.|enums\.|structs\.)\w+?)\)", re.MULTILINE +) + + +def resolve_crossrefs(text): + text = (text or "").lstrip() + + # Turn references to functions into a crossref. + # E.g. `Foo.bar()` + i2 = 0 + while True: + m = func_ref_pattern.search(text, i2) + if not m: + break + i1, i2 = m.start(1), m.end(1) + ref_indicator = ":func:" + text = text[:i1] + ref_indicator + text[i1:] + + # Turn references to objects (classes, flags, enums, and structs) into a crossref. + # E.g. `GPUDevice` or `flags.BufferUsage` + i2 = 0 + while True: + m = ob_ref_pattern.search(text, i2) + if not m: + break + i1, i2 = m.start(1), m.end(1) + prefix = m.group(2) # e.g. GPU or flags. + ref_indicator = ":obj:" if prefix.lower() == prefix else ":class:" + text = text[:i1] + ref_indicator + text[i1:] + + # Turn function arg types into a crossref. + # E.g. (GPUDevice) or (flags.BufferUsage) + i2 = 0 + while True: + m = argtype_ref_pattern.search(text) + if not m: + break + i1, i2 = m.start(1), m.end(1) + ref_indicator = ":obj:" + text = text[:i1] + ref_indicator + "`" + text[i1:i2] + "`" + text[i2:] + + return text + + +# Tweak docstrings of classes and their methods +for module, hide_class_signature in [(wgpu.classes, True), (wgpu.gui, False)]: + for cls_name in module.__all__: + cls = getattr(module, cls_name) + # Class docstring + docs = resolve_crossrefs(cls.__doc__) + if hide_class_signature: + docs = cls.__name__ + "()\n\n " + docs + cls.__doc__ = docs or None + # Docstring of methods + for method in cls.__dict__.values(): + if callable(method) and hasattr(method, "__code__"): + docs = resolve_crossrefs(method.__doc__) + if ( + method.__code__.co_argcount == 1 + and method.__code__.co_kwonlyargcount > 0 + ): + sig = method.__name__ + "(**parameters)" + docs = sig + "\n\n " + docs + method.__doc__ = docs or None + + +# -- Project information ----------------------------------------------------- + +project = "wgpu-py" +copyright = "2020-2023, Almar Klein, Korijn van Golen" +author = "Almar Klein, Korijn van Golen" +release = wgpu.__version__ + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.napoleon", + "sphinx.ext.autosummary", +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Just let autosummary produce a new version each time +shutil.rmtree(os.path.join(os.path.dirname(__file__), "generated"), True) + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] + +master_doc = "index" + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. + +if not (os.getenv("READTHEDOCS") or os.getenv("CI")): + html_theme = "sphinx_rtd_theme" + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] diff --git a/docs/gui.rst b/docs/gui.rst new file mode 100644 index 0000000..c052205 --- /dev/null +++ b/docs/gui.rst @@ -0,0 +1,172 @@ +gui API +======= + +.. currentmodule:: wgpu.gui + +You can use vanilla wgpu for compute tasks and to render offscreen. To +render to a window on screen we need a *canvas*. Since the Python +ecosystem provides many different GUI toolkits, wgpu implements a base +canvas class, and has builtin support for a few GUI toolkits. At the +moment these include GLFW, Jupyter, Qt, and wx. + + +The Canvas base classes +----------------------- + +.. autosummary:: + :nosignatures: + :toctree: generated + :template: wgpu_class_layout.rst + + ~WgpuCanvasInterface + ~WgpuCanvasBase + ~WgpuAutoGui + ~WgpuOffscreenCanvasBase + + +For each supported GUI toolkit there is a module that implements a ``WgpuCanvas`` class, +which inherits from :class:`WgpuCanvasBase`, providing a common API. +The GLFW, Qt, and Jupyter backends also inherit from :class:`WgpuAutoGui` to include +support for events (interactivity). In the next sections we demonstrates the different +canvas classes that you can use. + + +The auto GUI backend +-------------------- + +The default approach for examples and small applications is to use +the automatically selected GUI backend. At the moment this selects +either the GLFW, Qt, or Jupyter backend, depending on the environment. + +To implement interaction, the ``canvas`` has a :func:`WgpuAutoGui.handle_event()` method +that can be overloaded. Alternatively you can use it's :func:`WgpuAutoGui.add_event_handler()` +method. See the `event spec `_ +for details about the event objects. + + +Also see the `triangle auto example `_ +and `cube example `_. + +.. code-block:: py + + from wgpu.gui.auto import WgpuCanvas, run, call_later + + canvas = WgpuCanvas(title="Example") + canvas.request_draw(your_draw_function) + + run() + + +Support for GLFW +---------------- + +`GLFW `_ is a lightweight windowing toolkit. +Install it with ``pip install glfw``. The preferred approach is to use the auto backend, +but you can replace ``from wgpu.gui.auto`` with ``from wgpu.gui.glfw`` to force using GLFW. + +.. code-block:: py + + from wgpu.gui.glfw import WgpuCanvas, run, call_later + + canvas = WgpuCanvas(title="Example") + canvas.request_draw(your_draw_function) + + run() + + +Support for Qt +-------------- + +There is support for PyQt5, PyQt6, PySide2 and PySide6. The wgpu library detects what +library you are using by looking what module has been imported. +For a toplevel widget, the ``gui.qt.WgpuCanvas`` class can be imported. If you want to +embed the canvas as a subwidget, use ``gui.qt.WgpuWidget`` instead. + +Also see the `Qt triangle example `_ +and `Qt triangle embed example `_. + +.. code-block:: py + + # Import any of the Qt libraries before importing the WgpuCanvas. + # This way wgpu knows which Qt library to use. + from PySide6 import QtWidgets + from wgpu.gui.qt import WgpuCanvas + + app = QtWidgets.QApplication([]) + + # Instantiate the canvas + canvas = WgpuCanvas(title="Example") + + # Tell the canvas what drawing function to call + canvas.request_draw(your_draw_function) + + app.exec_() + + +Support for wx +-------------- + +There is support for embedding a wgpu visualization in wxPython. +For a toplevel widget, the ``gui.wx.WgpuCanvas`` class can be imported. If you want to +embed the canvas as a subwidget, use ``gui.wx.WgpuWidget`` instead. + +Also see the `wx triangle example `_ +and `wx triangle embed example `_. + +.. code-block:: py + + import wx + from wgpu.gui.wx import WgpuCanvas + + app = wx.App() + + # Instantiate the canvas + canvas = WgpuCanvas(title="Example") + + # Tell the canvas what drawing function to call + canvas.request_draw(your_draw_function) + + app.MainLoop() + + + +Support for offscreen +--------------------- + +You can also use a "fake" canvas to draw offscreen and get the result as a numpy array. +Note that you can render to a texture without using any canvas +object, but in some cases it's convenient to do so with a canvas-like API. + +.. code-block:: py + + from wgpu.gui.offscreen import WgpuCanvas + + # Instantiate the canvas + canvas = WgpuCanvas(size=(500, 400), pixel_ratio=1) + + # ... + + # Tell the canvas what drawing function to call + canvas.request_draw(your_draw_function) + + # Perform a draw + array = canvas.draw() # numpy array with shape (400, 500, 4) + + +Support for Jupyter lab and notebook +------------------------------------ + +WGPU can be used in Jupyter lab and the Jupyter notebook. This canvas +is based on `jupyter_rfb `_, an ipywidget +subclass implementing a remote frame-buffer. There are also some `wgpu examples `_. + +.. code-block:: py + + # from wgpu.gui.jupyter import WgpuCanvas # Direct approach + from wgpu.gui.auto import WgpuCanvas # Approach compatible with desktop usage + + canvas = WgpuCanvas() + + # ... wgpu code + + canvas # Use as cell output diff --git a/docs/guide.rst b/docs/guide.rst new file mode 100644 index 0000000..22c1a2c --- /dev/null +++ b/docs/guide.rst @@ -0,0 +1,254 @@ +Guide +===== + + +This library (``wgpu``) presents a Pythonic API for the `WebGPU spec +`_. It is an API to control graphics +hardware. Like OpenGL but modern. Or like Vulkan but higher level. +GPU programming is a craft that requires knowledge of how GPU's work. + + +Getting started +--------------- + +Creating a canvas ++++++++++++++++++ + +If you want to render to the screen, you need a canvas. Multiple +GUI toolkits are supported, see the :doc:`gui`. In general, it's easiest to let ``wgpu`` select a GUI automatically: + +.. code-block:: py + + from wgpu.gui.auto import WgpuCanvas, run + + canvas = WgpuCanvas(title="a wgpu example") + + +Next, we can setup the render context, which we will need later on. + +.. code-block:: py + + present_context = canvas.get_context() + render_texture_format = present_context.get_preferred_format(device.adapter) + present_context.configure(device=device, format=render_texture_format) + + +Obtaining a device +++++++++++++++++++ + +The next step is to obtain an adapter, which represents an abstract render device. +You can pass it the ``canvas`` that you just created, or pass ``None`` for the canvas +if you have none (e.g. for compute or offscreen rendering). From the adapter, +you can obtain a device. + +.. code-block:: py + + adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + device = adapter.request_device() + +The ``wgpu.gpu`` object is the API entrypoint (:class:`wgpu.GPU`). It contains just a handful of functions, +including ``request_adapter()``. The device is used to create most other GPU objects. + + +Creating buffers, textures shaders, etc. +++++++++++++++++++++++++++++++++++++++++ + +Using the device, you can create buffers, textures, write shader code, and put +these together into pipeline objects. How to do this depends a lot on what you +want to achieve, and is therefore out of scope for this guide. Have a look at the examples +or some of the tutorials that we link to below. + +Setting up a draw function +++++++++++++++++++++++++++ + +Let's now define a function that will actually draw the stuff we put together in +the previous step. + +.. code-block:: py + + def draw_frame(): + + # We'll record commands that we do on a render pass object + command_encoder = device.create_command_encoder() + current_texture_view = present_context.get_current_texture() + render_pass = command_encoder.begin_render_pass( + color_attachments=[ + { + "view": current_texture_view, + "resolve_target": None, + "clear_value": (1, 1, 1, 1), + "load_op": wgpu.LoadOp.clear, + "store_op": wgpu.StoreOp.store, + } + ], + ) + + # Perform commands, something like ... + render_pass.set_pipeline(...) + render_pass.set_index_buffer(...) + render_pass.set_vertex_buffer(...) + render_pass.set_bind_group(...) + render_pass.draw_indexed(...) + + # When done, submit the commands to the device queue. + render_pass.end() + device.queue.submit([command_encoder.finish()]) + + # If you want to draw continuously, request a new draw right now + canvas.request_draw() + + +Starting the event loop ++++++++++++++++++++++++ + + +We can now pass the above render function to the canvas. The canvas will then +call the function whenever it (re)draws the window. And finally, we call ``run()`` to enter the mainloop. + +.. code-block:: py + + canvas.request_draw(draw_frame) + run() + + +Offscreen ++++++++++ + +If you render offscreen, or only do compute, you do not need a canvas. You also won't need a GUI toolkit, draw function or enter the event loop. +Instead, you will obtain a command encoder and submit it's records to the queue directly. + + +Examples and external resources +------------------------------- + +Examples that show wgpu-py in action: + +* https://github.com/pygfx/wgpu-py/tree/main/examples + +.. note:: The examples in the main branch of the repository may not match the pip installable version. Be sure to refer to the examples from the git tag that matches the version of wgpu you have installed. + + +External resources: + +* https://webgpu.rocks/ +* https://sotrh.github.io/learn-wgpu/ +* https://rust-tutorials.github.io/learn-wgpu/ + + +A brief history of WebGPU +------------------------- + +For years, OpenGL has been the only cross-platform API to talk to the GPU. +But over time OpenGL has grown into an inconsistent and complex API ... + + *OpenGL is dying* + --- Dzmitry Malyshau at `Fosdem 2020 `_ + +In recent years, modern API's have emerged that solve many of OpenGL's +problems. You may have heard of Vulkan, Metal, and DX12. These +API's are much closer to the hardware, which makes the drivers more +consistent and reliable. Unfortunately, the huge amount of "knobs to +turn" also makes them quite hard to work with for developers. + +Therefore, higher level API are needed, which use the same concepts, but are much easier to work with. +The most notable one is the `WebGPU specification `_. This is what future devs +will be using to write GPU code for the browser. And for desktop and mobile as well. + +As the WebGPU spec is being developed, a reference implementation is +also build. It's written in Rust and powers the WebGPU implementation in Firefox. +This reference implementation, called `wgpu `__, +also exposes a C-api (via `wgpu-native `__), +so that it can be wrapped in Python. And this is precisely what wgpu-py does. + +So in short, wgpu-py is a Python wrapper of wgpu, which is an desktop +implementation of WebGPU, an API that wraps Vulkan, Metal and DX12, +which talk to the GPU hardware. + + + +Coordinate system +----------------- + +In wgpu, the Y-axis is up in normalized device coordinate (NDC): point(-1.0, -1.0) +in NDC is located at the bottom-left corner of NDC. In addition, x and +y in NDC should be between -1.0 and 1.0 inclusive, while z in NDC should +be between 0.0 and 1.0 inclusive. Vertices out of this range in NDC +will not introduce any errors, but they will be clipped. + + +Array data +---------- + +The wgpu library makes no assumptions about how you store your data. +In places where you provide data to the API, it can consume any data +that supports the buffer protocol, which includes ``bytes``, +``bytearray``, ``memoryview``, ctypes arrays, and numpy arrays. + +In places where data is returned, the API returns a ``memoryview`` +object. These objects provide a quite versatile view on ndarray data: + +.. code-block:: py + + # One could, for instance read the content of a buffer + m = device.queue.read_buffer(buffer) + # Cast it to float32 + m = m.cast("f") + # Index it + m[0] + # Show the content + print(m.tolist()) + +Chances are that you prefer Numpy. Converting the ``memoryview`` to a +numpy array (without copying the data) is easy: + +.. code-block:: py + + array = np.frombuffer(m, np.float32) + + +Debugging +--------- + +If the default wgpu-backend causes issues, or if you want to run on a +different backend for another reason, you can set the +`WGPU_BACKEND_TYPE` environment variable to "Vulkan", "Metal", "D3D12", +"D3D11", or "OpenGL". + +The log messages produced (by Rust) in wgpu-native are captured and +injected into Python's "wgpu" logger. One can set the log level to +"INFO" or even "DEBUG" to get detailed logging information. + +Many GPU objects can be given a string label. This label will be used +in Rust validation errors, and are also used in e.g. RenderDoc to +identify objects. Additionally, you can insert debug markers at the +render/compute pass object, which will then show up in RenderDoc. + +Eventually, wgpu-native will fully validate API input. Until then, it +may be worthwhile to enable the Vulkan validation layers. To do so, run +a debug build of wgpu-native and make sure that the Lunar Vulkan SDK +is installed. + +You can run your application via RenderDoc, which is able to capture a +frame, including all API calls, objects and the complete pipeline state, +and display all of that information within a nice UI. + +You can use ``adapter.request_device_tracing()`` to provide a directory path +where a trace of all API calls will be written. This trace can then be used +to re-play your use-case elsewhere (it's cross-platform). + +Also see wgpu-core's section on debugging: +https://github.com/gfx-rs/wgpu/wiki/Debugging-wgpu-Applications + + +Freezing apps +------------- + +In wgpu a PyInstaller-hook is provided to help simplify the freezing process +(it e.g. ensures that the wgpu-native DLL is included). This hook requires +PyInstaller version 4+. + +Our hook also includes ``glfw`` when it is available, so code using ``wgpu.gui.auto`` +should Just Work. + +Note that PyInstaller needs ``wgpu`` to be installed in `site-packages` for +the hook to work (i.e. it seems not to work with a ``pip -e .`` dev install). diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000..d522b56 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,24 @@ +Welcome to the wgpu-py docs! +============================== + +.. automodule:: wgpu + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + start + guide + wgpu + backends + gui + utils + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 0000000..2119f51 --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/start.rst b/docs/start.rst new file mode 100644 index 0000000..6c27905 --- /dev/null +++ b/docs/start.rst @@ -0,0 +1,103 @@ +Installation +============ + +.. note:: Since the API changes with each release,you may want to check the `CHANGELOG.md `_ when you upgrade to a newer version of wgpu. + +Install with pip +---------------- + +You can install ``wgpu-py`` via pip. +Python 3.8 or higher is required. Pypy is supported. Only depends on ``cffi`` (installed automatically by pip). + +.. code-block:: bash + + pip install wgpu + + +Since most users will want to render something to screen, we recommend installing GLFW as well: + +.. code-block:: bash + + pip install wgpu glfw + + +GUI libraries +------------- + +Multiple GUI backends are supported, see :doc:`the GUI API ` for details: + +* `glfw `_: a lightweight GUI for the desktop +* `jupyter_rfb `_: only needed if you plan on using wgpu in Jupyter +* qt (PySide6, PyQt6, PySide2, PyQt5) +* wx + + +The wgpu-native library +----------------------- + +The wheels that pip installs include the prebuilt binaries of `wgpu-native `_, so on most systems everything Just Works. + +On Linux you need at least **pip >= 20.3**, and a recent Linux distribution, otherwise the binaries will not be available. See below for details. + +If you need/want, you can also `build wgpu-native yourself `_. +You will then need to set the environment variable ``WGPU_LIB_PATH`` to let wgpu-py know where the DLL is located. + + +Platform requirements +--------------------- + +Under the hood, wgpu runs on Vulkan, Metal, or DX12. The wgpu-backend +is selected automatically, but can be overridden by setting the +``WGPU_BACKEND_TYPE`` environment variable to "Vulkan", "Metal", "D3D12", +"D3D11", or "OpenGL". + +Windows ++++++++ + +On Windows 10+, things should just work. If your machine has a dedicated GPU, +you may want to update to the latest (Nvidia or AMD) drivers. + +MacOS ++++++ + +On MacOS you need at least 10.13 (High Sierra) to have Metal/Vulkan support. + +Linux ++++++ + +On Linux, it's advisable to install the proprietary drivers of your GPU +(if you have a dedicated GPU). You may need to ``apt install +mesa-vulkan-drivers``. Wayland support is currently broken (we could use +a hand to fix this). + +Binary wheels for Linux are only available for **manylinux_2_24**. +This means that the installation requires ``pip >= 20.3``, and you need +a recent Linux distribution, listed `here `_. + +If you wish to work with an older distribution, you will have to build +wgpu-native yourself, see "dependencies" above. Note that wgpu-native +still needs Vulkan support and may not compile / work on older +distributions. + +Installing LavaPipe on Linux +++++++++++++++++++++++++++++ + +To run wgpu on systems that do not have a GPU (e.g. CI) you need a software renderer. +On Windows this (probably) just works via DX12. On Linux you can use LavaPipe: + +.. code-block:: bash + + sudo apt update -y -qq + sudo apt install --no-install-recommends -y libegl1-mesa libgl1-mesa-dri libxcb-xfixes0-dev mesa-vulkan-drivers + +The distribution's version of Lavapipe may be a bit outdated. To get a more recent version, you can use this PPA: + +.. code-block:: bash + + sudo add-apt-repository ppa:oibaf/graphics-drivers -y + +.. note:: + + The precise visual output may differ between differen implementations of Vulkan/Metal/DX12. + Therefore you should probably avoid per-pixel comparisons when multiple different systems are + involved. In wgpu-py and pygfx we have solved this by generating all reference images on CI (with Lavapipe). diff --git a/docs/utils.rst b/docs/utils.rst new file mode 100644 index 0000000..6ed4557 --- /dev/null +++ b/docs/utils.rst @@ -0,0 +1,71 @@ +Utils +===== + +The wgpu library provides a few utilities. Note that most functions below need to be explictly imported. + +Logger +------ + +Errors, warnings, and info messages (including messages generated by +wgpu-native) are logged using Python's default logging mechanics. The +wgpu logger instance is in ``wgpu.logger``, but can also be obtained +via: + +.. code-block:: py + + import logging + logger = logging.getLogger("wgpu") + + +Diagnostics +----------- + +To print a full diagnostic report: + +.. code-block:: py + + wgpu.diagnostics.print_report() + +To inspect (for example) the total buffer usage: + +.. code-block:: py + + >>> counts = wgpu.diagnostics.object_counts.get_dict() + >>> print(counts["Buffer"]) + {'count': 3, 'resource_mem': 784} + + +.. autoclass:: wgpu._diagnostics.DiagnosticsRoot + :members: + + +.. autoclass:: wgpu._diagnostics.Diagnostics + :members: + + +Get default device +------------------ + +.. autofunction:: wgpu.utils.get_default_device + + +Compute with buffers +-------------------- + +.. code-block:: py + + from wgpu.utils.compute import compute_with_buffers + +.. autofunction:: wgpu.utils.compute_with_buffers + + + +Shadertoy +--------- + +.. code-block:: py + + from wgpu.utils.shadertoy import Shadertoy + +.. autoclass:: wgpu.utils.shadertoy.Shadertoy + :members: diff --git a/docs/wgpu.rst b/docs/wgpu.rst new file mode 100644 index 0000000..372f15e --- /dev/null +++ b/docs/wgpu.rst @@ -0,0 +1,224 @@ +wgpu API +======== + +.. currentmodule:: wgpu + + +This document describes the wgpu API, which essentially is a Pythonic version of the +`WebGPU API `_. It exposes an API +for performing operations, such as rendering and computation, on a +Graphics Processing Unit. + +.. note:: + The WebGPU API is still being developed and occasionally there are backwards + incompatible changes. Since we mostly follow the WebGPU API, there may be + backwards incompatible changes to wgpu-py too. This will be so until + the WebGPU API settles as a standard. In the mean time, keep an eye on the + `CHANGELOG.md `_. + + +How to read this API +-------------------- + +The classes in this API all have a name staring with "GPU", this helps +discern them from flags and enums. These classes are never instantiated +directly; new objects are returned by special methods (mostly from the device). + +Most methods in this API have no positional arguments; each argument +must be referenced by name. Some argument values must be a :doc:`dict `, these +can be thought of as "nested" arguments. Many arguments (and dict fields) must be a +:doc:`flag ` or :doc:`enum `. +Some arguments have a default value. Most do not. + + +Differences from WebGPU +----------------------- + +This API is derived from the WebGPU spec, but differs in a few ways. +For example, methods that in WebGPU accept a descriptor/struct/dict, +here accept the fields in that struct as keyword arguments. + + +.. autodata:: wgpu._classes.apidiff + :annotation: Differences of base API: + + +Each backend may implement extra functionality on top of the base API. +This is listed in :doc:`backends `. + + +Overview +-------- + +This overview attempts to describe how all classes fit together. Scroll down for a list of all flags, enums, structs, and GPU classes. + + +Adapter, device and canvas +++++++++++++++++++++++++++ + +The :class:`GPU` class represents the API root/entrypoint. An instance is available at ``wgpu.gpu``. This instance is loaded from one the :doc:`backends `. + +The :class:`GPUAdapter` represents a hardware or software device, with specific +features, limits and properties. To actually start using that harware for computations or rendering, a :class:`GPUDevice` object must be requisted from the adapter. This is a logical unit +to control your hardware (or software). +The device is the central object; most other GPU objects are created from it. +Also see the convenience function :func:`wgpu.utils.get_default_device`. +Information on the adapter can be obtained using :func:`wgpu.GPUAdapter.request_adapter_info` in the form of a :class:`GPUAdapterInfo`. + +A device is controlled with a specific backend API. By default one is selected automatically. +This can be overridden by setting the +`WGPU_BACKEND_TYPE` environment variable to "Vulkan", "Metal", "D3D12", "D3D11", or "OpenGL". + +The device and all objects created from it inherit from :class:`GPUObjectBase` - they represent something on the GPU. + +In most render use-cases you want the result to be presented to a canvas on the screen. +The :class:`GPUCanvasContext` is the bridge between wgpu and the underlying GUI backend. + +Buffers and textures +++++++++++++++++++++ + +A :class:`GPUBuffer` can be created from a device. It is used to hold data, that can +be uploaded using it's API. From the shader's point of view, the buffer can be accessed +as a typed array. + +A :class:`GPUTexture` is similar to a buffer, but has some image-specific features. +A texture can be 1D, 2D or 3D, can have multiple levels of detail (i.e. lod or mipmaps). +The texture itself represents the raw data, you can create one or more :class:`GPUTextureView` objects +for it, that can be attached to a shader. + +To let a shader sample from a texture, you also need a :class:`GPUSampler` that +defines the filtering and sampling behavior beyond the edges. + +Bind groups ++++++++++++ + +Shaders need access to resources like buffers, texture views, and samplers. +The access to these resources occurs via so called bindings. There are +integer slots, which must be specifie both via the API, and in the shader. + +Bindings are organized into :class:`GPUBindGroup` s, which are essentially a list +of :class:`GPUBinding` s. + +Further, in wgpu you need to specify a :class:`GPUBindGroupLayout`, providing +meta-information about the binding (type, texture dimension etc.). + +Multiple bind groups layouts are collected in a :class:`GPUPipelineLayout`, +which represents a complete layout description for a pipeline. + +Shaders and pipelines ++++++++++++++++++++++ + +The wgpu API knows three kinds of shaders: compute, vertex and fragment. +Pipelines define how the shader is run, and with what resources. + +Shaders are represented by a :class:`GPUShaderModule`. + +Compute shaders are combined with a pipelinelayout into a :class:`GPUComputePipeline`. +Similarly, a vertex and (optional) fragment shader are combined with a pipelinelayout +into a :class:`GPURenderPipeline`. Both of these inherit from :class:`GPUPipelineBase`. + +Command buffers and encoders +++++++++++++++++++++++++++++ + +The actual rendering occurs by recording a series of commands and then submitting these commands. + +The root object to generate commands with is the :class:`GPUCommandEncoder`. +This class inherits from :class:`GPUCommandsMixin` (because it generates commands), +and :class:`GPUDebugCommandsMixin` (because it supports debugging). + +Commands specific to compute and rendering are generated with a :class:`GPUComputePassEncoder` and :class:`GPURenderPassEncoder` respectively. You get these from the command encoder by the +corresponding ``begin_x_pass()`` method. These pass encoders inherit from +:class:`GPUBindingCommandsMixin` (because you associate a pipeline) +and the latter also from :class:`GPURenderCommandsMixin`. + +When you're done generating commands, you call ``finish()`` and get the list of +commands as an opaque object: the :class:`GPUCommandBuffer`. You don't really use this object +except for submitting it to the :class:`GPUQueue`. + +The command buffers are one-time use. The :class:`GPURenderBundle` and :class:`GPURenderBundleEncoder` can +be used to record commands to be used multiple times, but this is not yet +implememted in wgpu-py. + +Error handling +++++++++++++++ + +Errors in wgpu-native are raised as Python errors where possible. Uncaught errors +and warnings are logged using the ``wgpu`` logger. + +There are specific exceptions that can be raised: +* :class:`GPUError` is the generic (base) error class. +* :class:`GPUValidationError` is for wgpu validation errors. Shader errors also fall into this category. +* :class:`GPUOutOfMemoryError` is a wgpu `MemoryError`. +* :class:`GPUInternalError` when wgpu reaches a internal error state. +* :class:`GPUPipelineError` for errors related to the pipeline. +* :class:`GPUDeviceLostInfo` when the device is lost. + +TODO +++++ + +These classes are not supported and/or documented yet. +:class:`GPUCompilationMessage` +:class:`GPUCompilationInfo` +:class:`GPUQuerySet` + + +List of flags, enums, and structs +--------------------------------- + +.. toctree:: + :maxdepth: 2 + + wgpu_flags + wgpu_enums + wgpu_structs + + +List of GPU classes +------------------- + +.. automodule:: wgpu.classes + +.. currentmodule:: wgpu + +.. autosummary:: + :nosignatures: + :toctree: generated + :template: wgpu_class_layout.rst + + ~GPU + ~GPUAdapterInfo + ~GPUAdapter + ~GPUBindGroup + ~GPUBindGroupLayout + ~GPUBindingCommandsMixin + ~GPUBuffer + ~GPUCanvasContext + ~GPUCommandBuffer + ~GPUCommandEncoder + ~GPUCommandsMixin + ~GPUCompilationInfo + ~GPUCompilationMessage + ~GPUComputePassEncoder + ~GPUComputePipeline + ~GPUDebugCommandsMixin + ~GPUDevice + ~GPUDeviceLostInfo + ~GPUError + ~GPUInternalError + ~GPUObjectBase + ~GPUOutOfMemoryError + ~GPUPipelineBase + ~GPUPipelineError + ~GPUPipelineLayout + ~GPUQuerySet + ~GPUQueue + ~GPURenderBundle + ~GPURenderBundleEncoder + ~GPURenderCommandsMixin + ~GPURenderPassEncoder + ~GPURenderPipeline + ~GPUSampler + ~GPUShaderModule + ~GPUTexture + ~GPUTextureView + ~GPUValidationError diff --git a/docs/wgpu_enums.rst b/docs/wgpu_enums.rst new file mode 100644 index 0000000..b4532ff --- /dev/null +++ b/docs/wgpu_enums.rst @@ -0,0 +1,7 @@ +Enums +===== + +.. automodule:: wgpu.enums + :members: + :undoc-members: + :exclude-members: Enum diff --git a/docs/wgpu_flags.rst b/docs/wgpu_flags.rst new file mode 100644 index 0000000..8c41c6c --- /dev/null +++ b/docs/wgpu_flags.rst @@ -0,0 +1,7 @@ +Flags +===== + +.. automodule:: wgpu.flags + :members: + :undoc-members: + :exclude-members: Flags diff --git a/docs/wgpu_structs.rst b/docs/wgpu_structs.rst new file mode 100644 index 0000000..3feb606 --- /dev/null +++ b/docs/wgpu_structs.rst @@ -0,0 +1,7 @@ +Structs +======= + +.. automodule:: wgpu.structs + :members: + :undoc-members: + :exclude-members: Struct diff --git a/download-wgpu-native.py b/download-wgpu-native.py new file mode 100644 index 0000000..659240a --- /dev/null +++ b/download-wgpu-native.py @@ -0,0 +1,178 @@ +import os +import re +import sys +import argparse +import tempfile +import platform +from zipfile import ZipFile + +import requests + + +# The directory containing non-python resources that are included in packaging +RESOURCE_DIR = os.path.join("wgpu", "resources") +# The version installed through this script is tracked in the backend module +VERSION_FILE = os.path.join("wgpu", "backends", "wgpu_native", "__init__.py") + +# Whether to ensure we export \n instead of \r\n +FORCE_SIMPLE_NEWLINES = False +if sys.platform.startswith("win"): + sample = open(os.path.join(RESOURCE_DIR, "codegen_report.md"), "rb").read() + if sample.count(b"\r\n") == 0: + FORCE_SIMPLE_NEWLINES = True + + +def get_current_version(): + with open(VERSION_FILE) as fh: + return re.search(r"__version__ = \"(.*?)\"", fh.read()).group(1) + + +def write_current_version(version, commit_sha): + with open(VERSION_FILE, "rb") as fh: + file_content = fh.read().decode() + file_content = re.sub( + r"__version__ = \".*?\"", + f'__version__ = "{version}"', + file_content, + ) + file_content = re.sub( + r"__commit_sha__ = \".*?\"", + f'__commit_sha__ = "{commit_sha}"', + file_content, + ) + with open(VERSION_FILE, mode="wb") as fh: + fh.write(file_content.encode()) + + +def download_file(url, filename): + resp = requests.get(url, stream=True) + with open(filename, mode="wb") as fh: + for chunk in resp.iter_content(chunk_size=1024 * 128): + fh.write(chunk) + + +def extract_file(zip_filename, member, path): + z = ZipFile(zip_filename) + os.makedirs(path, exist_ok=True) + z.extract(member, path=path) + if member.endswith(".h") and FORCE_SIMPLE_NEWLINES: + filename = os.path.join(path, member) + bb = open(filename, "rb").read() + with open(filename, "wb") as f: + f.write(bb.replace(b"\r\n", b"\n")) + + +def get_os_string(): + if sys.platform.startswith("win"): + return "windows" + elif sys.platform.startswith("darwin"): + return "macos" + elif sys.platform.startswith("linux"): + return "linux" + else: + # We do not provide binaries for this platform. Note that we can + # have false positives, e.g. on ARM Linux. We assume that users on + # such platforms are aware and arrange for the wgpu lib themselves. + raise RuntimeError(f"Platform '{sys.platform}' not supported") + + +def get_arch(): + # See e.g.: https://stackoverflow.com/questions/45124888 + is_64_bit = sys.maxsize > 2**32 + machine = platform.machine() + + # See if this is run by cibuildwheel and check to see if ARCHFLAGS is + # specified (only done on macOS). This allows to select the proper binaries. + # For specifics of CIBUILDWHEEL and macOS build envs, see: + # https://github.com/pypa/cibuildwheel/blob/4307b52ff28b631519d38bfa0dd09d6a9b39a81e/cibuildwheel/macos.py#L277 + if os.environ.get("CIBUILDWHEEL") == "1" and "ARCHFLAGS" in os.environ: + archflags = os.environ["ARCHFLAGS"] + return "aarch64" if "arm64" in archflags else "x86_64" + + if machine == "armv7l": + # Raspberry pi + return "armv7" + elif is_64_bit and machine.startswith(("arm", "aarch64")): + # Includes MacOS M1, arm linux, ... + return "aarch64" + elif is_64_bit: + return "x86_64" + else: + return "i686" + + +def main(version, os_string, arch, upstream): + for build in ["release"]: # ["release", "debug"] + filename = f"wgpu-{os_string}-{arch}-{build}.zip" + url = f"https://github.com/{upstream}/releases/download/v{version}/{filename}" + tmp = tempfile.gettempdir() + zip_filename = os.path.join(tmp, filename) + print(f"Downloading {url} to {zip_filename}") + download_file(url, zip_filename) + headerfile1 = "webgpu.h" + headerfile2 = "wgpu.h" + binaryfile = None + if os_string == "linux": + binaryfile = "libwgpu_native.so" + elif os_string == "macos": + binaryfile = "libwgpu_native.dylib" + elif os_string == "windows": + binaryfile = "wgpu_native.dll" + else: + raise RuntimeError(f"Platform '{os_string}' not supported") + root, ext = os.path.splitext(binaryfile) + binaryfile_name = root + "-" + build + ext + print(f"Extracting {headerfile1} to {RESOURCE_DIR}") + extract_file(zip_filename, headerfile1, RESOURCE_DIR) + print(f"Extracting {headerfile2} to {RESOURCE_DIR}") + extract_file(zip_filename, headerfile2, RESOURCE_DIR) + print(f"Extracting {binaryfile} to {RESOURCE_DIR}") + extract_file(zip_filename, binaryfile, RESOURCE_DIR) + os.replace( + os.path.join(RESOURCE_DIR, binaryfile), + os.path.join(RESOURCE_DIR, binaryfile_name), + ) + current_version = get_current_version() + if version != current_version: + print(f"Version changed, updating {VERSION_FILE}") + filename = "commit-sha" + url = f"https://github.com/{upstream}/releases/download/v{version}/{filename}" + commit_sha_filename = os.path.join(tmp, filename) + print(f"Downloading {url} to {commit_sha_filename}") + download_file(url, commit_sha_filename) + with open(commit_sha_filename) as fh: + commit_sha = fh.read().strip() + write_current_version(version, commit_sha) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Download wgpu-native binaries and headers from github releases" + ) + version = get_current_version() + parser.add_argument( + "--version", help=f"Version to download (default: {version})", default=version + ) + os_string = get_os_string() + parser.add_argument( + "--os", + help=f"Platform to download for (default: {os_string})", + default=os_string, + choices=("linux", "macos", "windows"), + ) + arch_string = get_arch() + parser.add_argument( + "--arch", + help=f"Architecture to download for (default: {arch_string})", + default=arch_string, + choices=("x86_64", "i686", "aarch64"), + ) + upstream = "gfx-rs/wgpu-native" + parser.add_argument( + "--upstream", + help=f"Upstream repository to download release from (default: {upstream})", + default=upstream, + ) + args = parser.parse_args() + + main(args.version, args.os, args.arch, args.upstream) diff --git a/examples/compute_noop.py b/examples/compute_noop.py new file mode 100644 index 0000000..2c19e2a --- /dev/null +++ b/examples/compute_noop.py @@ -0,0 +1,136 @@ +""" +Example compute shader that does ... nothing but copy a value from one +buffer into another. +""" + +import wgpu +from wgpu.utils.compute import compute_with_buffers # Convenience function + + +# %% Shader and data + +shader_source = """ + +@group(0) @binding(0) +var data1: array; + +@group(0) @binding(1) +var data2: array; + +@compute +@workgroup_size(1) +fn main(@builtin(global_invocation_id) index: vec3) { + let i: u32 = index.x; + data2[i] = data1[i]; +} +""" + +# Create input data as a memoryview +n = 20 +data = memoryview(bytearray(n * 4)).cast("i") +for i in range(n): + data[i] = i + + +# %% The short version, using memoryview + +# The first arg is the input data, per binding +# The second arg are the ouput types, per binding +out = compute_with_buffers({0: data}, {1: (n, "i")}, shader_source, n=n) + +# The result is a dict matching the output types +# Select data from buffer at binding 1 +result = out[1].tolist() +print(result) +assert result == list(range(20)) + + +# %% The short version, using numpy + +# import numpy as np +# +# numpy_data = np.frombuffer(data, np.int32) +# out = compute_with_buffers({0: numpy_data}, {1: numpy_data.nbytes}, shader_source, n=n) +# result = np.frombuffer(out[1], dtype=np.int32) +# print(result.tolist()) + + +# %% The long version using the wgpu API + +# %% Create device +# Create device and shader object +device = wgpu.utils.get_default_device() + +# Or, you can select GPU by requesting all available adapters +# adapters = wgpu.backends.wgpu_native.enumerate_adapters() +# adapter = None +# for adap in adapters: +# adapter_info = adap.request_adapter_info() +# print(adapter_info) +# if "NVIDIA" in adapter_info["device"]: +# adapter = adap +# break +# assert adapter is not None +# device = adapter.request_device() + +# %% +cshader = device.create_shader_module(code=shader_source) + +# Create buffer objects, input buffer is mapped. +buffer1 = device.create_buffer_with_data(data=data, usage=wgpu.BufferUsage.STORAGE) +buffer2 = device.create_buffer( + size=data.nbytes, usage=wgpu.BufferUsage.STORAGE | wgpu.BufferUsage.COPY_SRC +) + +# Setup layout and bindings +binding_layouts = [ + { + "binding": 0, + "visibility": wgpu.ShaderStage.COMPUTE, + "buffer": { + "type": wgpu.BufferBindingType.read_only_storage, + }, + }, + { + "binding": 1, + "visibility": wgpu.ShaderStage.COMPUTE, + "buffer": { + "type": wgpu.BufferBindingType.storage, + }, + }, +] +bindings = [ + { + "binding": 0, + "resource": {"buffer": buffer1, "offset": 0, "size": buffer1.size}, + }, + { + "binding": 1, + "resource": {"buffer": buffer2, "offset": 0, "size": buffer2.size}, + }, +] + +# Put everything together +bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) +pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[bind_group_layout]) +bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) + +# Create and run the pipeline +compute_pipeline = device.create_compute_pipeline( + layout=pipeline_layout, + compute={"module": cshader, "entry_point": "main"}, +) +command_encoder = device.create_command_encoder() +compute_pass = command_encoder.begin_compute_pass() +compute_pass.set_pipeline(compute_pipeline) +compute_pass.set_bind_group(0, bind_group, [], 0, 999999) # last 2 elements not used +compute_pass.dispatch_workgroups(n, 1, 1) # x y z +compute_pass.end() +device.queue.submit([command_encoder.finish()]) + +# Read result +# result = buffer2.read_data().cast("i") +out = device.queue.read_buffer(buffer2).cast("i") +result = out.tolist() +print(result) +assert result == list(range(20)) diff --git a/examples/compute_timestamps.py b/examples/compute_timestamps.py new file mode 100644 index 0000000..051e7d9 --- /dev/null +++ b/examples/compute_timestamps.py @@ -0,0 +1,166 @@ +""" +A simple example to profile a compute pass using ComputePassTimestampWrites. +""" + +import wgpu + +""" +Define the number of elements, global and local sizes. +Change these and see how it affects performance. +""" +n = 512 * 512 +local_size = [32, 1, 1] +global_size = [n // local_size[0], 1, 1] + +shader_source = f""" +@group(0) @binding(0) +var data1: array; + +@group(0) @binding(1) +var data2: array; + +@group(0) @binding(2) +var data3: array; + +@compute +@workgroup_size({','.join(map(str, local_size))}) +fn main(@builtin(global_invocation_id) index: vec3) {{ + let i: u32 = index.x; + data3[i] = data1[i] + data2[i]; +}} +""" + +# Define two arrays +data1 = memoryview(bytearray(n * 4)).cast("i") +data2 = memoryview(bytearray(n * 4)).cast("i") + +# Initialize the arrays +for i in range(n): + data1[i] = i + +for i in range(n): + data2[i] = i * 2 + +adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + +# Request a device with the timestamp_query feature, so we can profile our computation +device = adapter.request_device(required_features=[wgpu.FeatureName.timestamp_query]) +cshader = device.create_shader_module(code=shader_source) + +# Create buffer objects, input buffer is mapped. +buffer1 = device.create_buffer_with_data(data=data1, usage=wgpu.BufferUsage.STORAGE) +buffer2 = device.create_buffer_with_data(data=data2, usage=wgpu.BufferUsage.STORAGE) +buffer3 = device.create_buffer( + size=data1.nbytes, usage=wgpu.BufferUsage.STORAGE | wgpu.BufferUsage.COPY_SRC +) + +# Setup layout and bindings +binding_layouts = [ + { + "binding": 0, + "visibility": wgpu.ShaderStage.COMPUTE, + "buffer": { + "type": wgpu.BufferBindingType.read_only_storage, + }, + }, + { + "binding": 1, + "visibility": wgpu.ShaderStage.COMPUTE, + "buffer": { + "type": wgpu.BufferBindingType.read_only_storage, + }, + }, + { + "binding": 2, + "visibility": wgpu.ShaderStage.COMPUTE, + "buffer": { + "type": wgpu.BufferBindingType.storage, + }, + }, +] +bindings = [ + { + "binding": 0, + "resource": {"buffer": buffer1, "offset": 0, "size": buffer1.size}, + }, + { + "binding": 1, + "resource": {"buffer": buffer2, "offset": 0, "size": buffer2.size}, + }, + { + "binding": 2, + "resource": {"buffer": buffer3, "offset": 0, "size": buffer3.size}, + }, +] + +# Put everything together +bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) +pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[bind_group_layout]) +bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) + +# Create and run the pipeline +compute_pipeline = device.create_compute_pipeline( + layout=pipeline_layout, + compute={"module": cshader, "entry_point": "main"}, +) + +""" +Create a QuerySet to store the 'beginning_of_pass' and 'end_of_pass' timestamps. +Set the 'count' parameter to 2, as this set will contain 2 timestamps. +""" +query_set = device.create_query_set(type=wgpu.QueryType.timestamp, count=2) +command_encoder = device.create_command_encoder() + +# Pass our QuerySet and the indices into it, where the timestamps will be written. +compute_pass = command_encoder.begin_compute_pass( + timestamp_writes={ + "query_set": query_set, + "beginning_of_pass_write_index": 0, + "end_of_pass_write_index": 1, + } +) + +""" +Create the buffer to store our query results. +Each timestamp is 8 bytes. We mark the buffer usage to be QUERY_RESOLVE, +as we will use this buffer in a resolve_query_set call later. +""" +query_buf = device.create_buffer( + size=8 * query_set.count, + usage=wgpu.BufferUsage.QUERY_RESOLVE + | wgpu.BufferUsage.STORAGE + | wgpu.BufferUsage.COPY_SRC + | wgpu.BufferUsage.COPY_DST, +) +compute_pass.set_pipeline(compute_pipeline) +compute_pass.set_bind_group(0, bind_group, [], 0, 999999) # last 2 elements not used +compute_pass.dispatch_workgroups(*global_size) # x y z +compute_pass.end() + +# Resolve our queries, and store the results in the destination buffer we created above. +command_encoder.resolve_query_set( + query_set=query_set, + first_query=0, + query_count=2, + destination=query_buf, + destination_offset=0, +) +device.queue.submit([command_encoder.finish()]) + +""" +Read the query buffer to get the timestamps. +Index 0: beginning timestamp +Index 1: end timestamp +""" +timestamps = device.queue.read_buffer(query_buf).cast("Q").tolist() +print(f"Adding two {n} sized arrays took {(timestamps[1]-timestamps[0])/1000} us") + +# Read result +out = device.queue.read_buffer(buffer3).cast("i") +result = out.tolist() + +# Calculate the result on the CPU for comparison +result_cpu = [a + b for a, b in zip(data1, data2)] + +# Ensure results are the same +assert result == result_cpu diff --git a/examples/cube.py b/examples/cube.py new file mode 100644 index 0000000..18dc4e1 --- /dev/null +++ b/examples/cube.py @@ -0,0 +1,388 @@ +""" +This example renders a simple textured rotating cube. +""" +# test_example = true + +import time + +from wgpu.gui.auto import WgpuCanvas, run +import wgpu +import numpy as np + + +# %% Create canvas and device + +# Create a canvas to render to +canvas = WgpuCanvas(title="wgpu cube") + +# Create a wgpu device +adapter = wgpu.gpu.request_adapter(power_preference="high-performance") +device = adapter.request_device() + +# Prepare present context +present_context = canvas.get_context() +render_texture_format = present_context.get_preferred_format(device.adapter) +present_context.configure(device=device, format=render_texture_format) + + +# %% Generate data + +# pos texcoord +# x, y, z, w, u, v +vertex_data = np.array( + [ + # top (0, 0, 1) + [-1, -1, 1, 1, 0, 0], + [1, -1, 1, 1, 1, 0], + [1, 1, 1, 1, 1, 1], + [-1, 1, 1, 1, 0, 1], + # bottom (0, 0, -1) + [-1, 1, -1, 1, 1, 0], + [1, 1, -1, 1, 0, 0], + [1, -1, -1, 1, 0, 1], + [-1, -1, -1, 1, 1, 1], + # right (1, 0, 0) + [1, -1, -1, 1, 0, 0], + [1, 1, -1, 1, 1, 0], + [1, 1, 1, 1, 1, 1], + [1, -1, 1, 1, 0, 1], + # left (-1, 0, 0) + [-1, -1, 1, 1, 1, 0], + [-1, 1, 1, 1, 0, 0], + [-1, 1, -1, 1, 0, 1], + [-1, -1, -1, 1, 1, 1], + # front (0, 1, 0) + [1, 1, -1, 1, 1, 0], + [-1, 1, -1, 1, 0, 0], + [-1, 1, 1, 1, 0, 1], + [1, 1, 1, 1, 1, 1], + # back (0, -1, 0) + [1, -1, 1, 1, 0, 0], + [-1, -1, 1, 1, 1, 0], + [-1, -1, -1, 1, 1, 1], + [1, -1, -1, 1, 0, 1], + ], + dtype=np.float32, +) + +index_data = np.array( + [ + [0, 1, 2, 2, 3, 0], # top + [4, 5, 6, 6, 7, 4], # bottom + [8, 9, 10, 10, 11, 8], # right + [12, 13, 14, 14, 15, 12], # left + [16, 17, 18, 18, 19, 16], # front + [20, 21, 22, 22, 23, 20], # back + ], + dtype=np.uint32, +).flatten() + + +# Create texture data (srgb gray values) +texture_data = np.array( + [ + [50, 100, 150, 200], + [100, 150, 200, 50], + [150, 200, 50, 100], + [200, 50, 100, 150], + ], + dtype=np.uint8, +) +texture_data = np.repeat(texture_data, 64, 0) +texture_data = np.repeat(texture_data, 64, 1) +texture_size = texture_data.shape[1], texture_data.shape[0], 1 + +# Use numpy to create a struct for the uniform +uniform_dtype = [("transform", "float32", (4, 4))] +uniform_data = np.zeros((), dtype=uniform_dtype) + + +# %% Create resource objects (buffers, textures, samplers) + +# Create vertex buffer, and upload data +vertex_buffer = device.create_buffer_with_data( + data=vertex_data, usage=wgpu.BufferUsage.VERTEX +) + +# Create index buffer, and upload data +index_buffer = device.create_buffer_with_data( + data=index_data, usage=wgpu.BufferUsage.INDEX +) + +# Create uniform buffer - data is uploaded each frame +uniform_buffer = device.create_buffer( + size=uniform_data.nbytes, usage=wgpu.BufferUsage.UNIFORM | wgpu.BufferUsage.COPY_DST +) + +# Create texture, and upload data +texture = device.create_texture( + size=texture_size, + usage=wgpu.TextureUsage.COPY_DST | wgpu.TextureUsage.TEXTURE_BINDING, + dimension=wgpu.TextureDimension.d2, + format=wgpu.TextureFormat.r8unorm, + mip_level_count=1, + sample_count=1, +) +texture_view = texture.create_view() + +device.queue.write_texture( + { + "texture": texture, + "mip_level": 0, + "origin": (0, 0, 0), + }, + texture_data, + { + "offset": 0, + "bytes_per_row": texture_data.strides[0], + }, + texture_size, +) + +# Create a sampler +sampler = device.create_sampler() + + +# %% The shaders + + +shader_source = """ +struct Locals { + transform: mat4x4, +}; +@group(0) @binding(0) +var r_locals: Locals; + +struct VertexInput { + @location(0) pos : vec4, + @location(1) texcoord: vec2, +}; +struct VertexOutput { + @location(0) texcoord: vec2, + @builtin(position) pos: vec4, +}; +struct FragmentOutput { + @location(0) color : vec4, +}; + + +@vertex +fn vs_main(in: VertexInput) -> VertexOutput { + let ndc: vec4 = r_locals.transform * in.pos; + var out: VertexOutput; + out.pos = vec4(ndc.x, ndc.y, 0.0, 1.0); + out.texcoord = in.texcoord; + return out; +} + +@group(0) @binding(1) +var r_tex: texture_2d; + +@group(0) @binding(2) +var r_sampler: sampler; + +@fragment +fn fs_main(in: VertexOutput) -> FragmentOutput { + let value = textureSample(r_tex, r_sampler, in.texcoord).r; + let physical_color = vec3(pow(value, 2.2)); // gamma correct + var out: FragmentOutput; + out.color = vec4(physical_color.rgb, 1.0); + return out; +} +""" + +shader = device.create_shader_module(code=shader_source) + + +# %% The bind groups + +# We always have two bind groups, so we can play distributing our +# resources over these two groups in different configurations. +bind_groups_entries = [[]] +bind_groups_layout_entries = [[]] + +bind_groups_entries[0].append( + { + "binding": 0, + "resource": { + "buffer": uniform_buffer, + "offset": 0, + "size": uniform_buffer.size, + }, + } +) +bind_groups_layout_entries[0].append( + { + "binding": 0, + "visibility": wgpu.ShaderStage.VERTEX | wgpu.ShaderStage.FRAGMENT, + "buffer": {"type": wgpu.BufferBindingType.uniform}, + } +) + +bind_groups_entries[0].append({"binding": 1, "resource": texture_view}) +bind_groups_layout_entries[0].append( + { + "binding": 1, + "visibility": wgpu.ShaderStage.FRAGMENT, + "texture": { + "sample_type": wgpu.TextureSampleType.float, + "view_dimension": wgpu.TextureViewDimension.d2, + }, + } +) + +bind_groups_entries[0].append({"binding": 2, "resource": sampler}) +bind_groups_layout_entries[0].append( + { + "binding": 2, + "visibility": wgpu.ShaderStage.FRAGMENT, + "sampler": {"type": wgpu.SamplerBindingType.filtering}, + } +) + + +# Create the wgou binding objects +bind_group_layouts = [] +bind_groups = [] + +for entries, layout_entries in zip(bind_groups_entries, bind_groups_layout_entries): + bind_group_layout = device.create_bind_group_layout(entries=layout_entries) + bind_group_layouts.append(bind_group_layout) + bind_groups.append( + device.create_bind_group(layout=bind_group_layout, entries=entries) + ) + +pipeline_layout = device.create_pipeline_layout(bind_group_layouts=bind_group_layouts) + + +# %% The render pipeline + +render_pipeline = device.create_render_pipeline( + layout=pipeline_layout, + vertex={ + "module": shader, + "entry_point": "vs_main", + "buffers": [ + { + "array_stride": 4 * 6, + "step_mode": wgpu.VertexStepMode.vertex, + "attributes": [ + { + "format": wgpu.VertexFormat.float32x4, + "offset": 0, + "shader_location": 0, + }, + { + "format": wgpu.VertexFormat.float32x2, + "offset": 4 * 4, + "shader_location": 1, + }, + ], + }, + ], + }, + primitive={ + "topology": wgpu.PrimitiveTopology.triangle_list, + "front_face": wgpu.FrontFace.ccw, + "cull_mode": wgpu.CullMode.back, + }, + depth_stencil=None, + multisample=None, + fragment={ + "module": shader, + "entry_point": "fs_main", + "targets": [ + { + "format": render_texture_format, + "blend": { + "alpha": ( + wgpu.BlendFactor.one, + wgpu.BlendFactor.zero, + wgpu.BlendOperation.add, + ), + "color": ( + wgpu.BlendFactor.one, + wgpu.BlendFactor.zero, + wgpu.BlendOperation.add, + ), + }, + } + ], + }, +) + + +# %% Setup the render function + + +def draw_frame(): + # Update uniform transform + a1 = -0.3 + a2 = time.time() + s = 0.6 + ortho = np.array( + [ + [s, 0, 0, 0], + [0, s, 0, 0], + [0, 0, s, 0], + [0, 0, 0, 1], + ], + ) + rot1 = np.array( + [ + [1, 0, 0, 0], + [0, np.cos(a1), -np.sin(a1), 0], + [0, np.sin(a1), +np.cos(a1), 0], + [0, 0, 0, 1], + ], + ) + rot2 = np.array( + [ + [np.cos(a2), 0, np.sin(a2), 0], + [0, 1, 0, 0], + [-np.sin(a2), 0, np.cos(a2), 0], + [0, 0, 0, 1], + ], + ) + uniform_data["transform"] = rot2 @ rot1 @ ortho + + # Upload the uniform struct + tmp_buffer = device.create_buffer_with_data( + data=uniform_data, usage=wgpu.BufferUsage.COPY_SRC + ) + + command_encoder = device.create_command_encoder() + command_encoder.copy_buffer_to_buffer( + tmp_buffer, 0, uniform_buffer, 0, uniform_data.nbytes + ) + + current_texture_view = present_context.get_current_texture().create_view() + render_pass = command_encoder.begin_render_pass( + color_attachments=[ + { + "view": current_texture_view, + "resolve_target": None, + "clear_value": (1, 1, 1, 1), + "load_op": wgpu.LoadOp.clear, + "store_op": wgpu.StoreOp.store, + } + ], + ) + + render_pass.set_pipeline(render_pipeline) + render_pass.set_index_buffer(index_buffer, wgpu.IndexFormat.uint32) + render_pass.set_vertex_buffer(0, vertex_buffer) + for bind_group_id, bind_group in enumerate(bind_groups): + render_pass.set_bind_group(bind_group_id, bind_group, [], 0, 99) + render_pass.draw_indexed(index_data.size, 1, 0, 0, 0) + render_pass.end() + + device.queue.submit([command_encoder.finish()]) + + canvas.request_draw() + + +canvas.request_draw(draw_frame) + +if __name__ == "__main__": + run() diff --git a/examples/events.py b/examples/events.py new file mode 100644 index 0000000..17ba923 --- /dev/null +++ b/examples/events.py @@ -0,0 +1,21 @@ +""" +A simple example to demonstrate events. +""" +from wgpu.gui.auto import WgpuCanvas, run, call_later + + +class MyCanvas(WgpuCanvas): + def handle_event(self, event): + if event["event_type"] != "pointer_move": + print(event) + + +if __name__ == "__main__": + canvas = MyCanvas(size=(640, 480), title="wgpu events") + + def send_message(message): + print(f"Message: {message}") + + call_later(2, send_message, "hello") + + run() diff --git a/examples/screenshots/cube.png b/examples/screenshots/cube.png new file mode 100644 index 0000000000000000000000000000000000000000..9003daefcd8541e50f5024bc1cb4260be703adab GIT binary patch literal 4265 zcmcgveNJHG5( zRyQ}8Y0)z>8B>_RWO(K(W0Ha>GNN#%F^Q>6yxH^+um9((hDzbWiU}fH)qDB!<kMsv2bY)WI&*H_m}+7g0LPkq+Y)6;rx@thDFtbe(YvXhicmfgj2q|@!) zj-_U1$d5B19aHV^n}X6LWnmWLX4olhNpZqJEVAdcd^@u|!buibWtC079gS!03CPX! zT7<*xZGZUv`ILy@hWo9SCHf4r@eN*Y)y^oulFiJb|CAiW0U{Vk2Qw+oz|8UpImAO>;*G2ryz3o^1DBhr%vt7!?M!FgN#JEPwP zGT3tPB$He2PCeg^f-rw%32JCHv);bF}$>Iflkf5oxy z?8M(L`4h$-M6GUlU-Y0zTD(%A2zk{gXcaQcFJ1wi&nofdC!O_Z(O{0`(DeH-Vw&7 z2IJDnpY?taZ}5!T{rKv`0%KftDB>9$ zBJFw7u(0~!1VZf-^ZoqpT0YVE_|+>tsd4kuiyx95<(J-zmXwx7P22yq(`H|7c;KJ0 zi8$uYs%yo2Uy>iY11Ix&DvPFc6<+x~!f_$P{%o$jlR+q6$hc`F)rq`P|)XNb+99xn#Z{x^pzAMJb zM0v#u{=s}HI1Pe5Y@-S$3IcZ(D@%(N*vH4h95Ys#3BL6Z8R4SrWZZiZ+O+>FkA#Do zqq1~jy@DV07}rdOl4-1aoFq^{KZFsPNdLI^Wm!TKaR4WQ3DL1m#>Au~m@|c?r^{$0 z0hu0{Nc2zErkOY<1{@?MV~u3DBTv{B7b|EzA!Se%xNVV*hmR?Y300XAVkITt#HBq) z&%jF=YYhKj_p>2g>VDNt*tD!kdQQSrav)ad4ipv?N@;{g1005vNOq(2eFWV({l4kP z7s<$cTF8wyTPamlBserMmwXW%lA^-_Uw}hnqjWg9Y6Tnvp*yOdTl_kJ|9C`dobX_g zeb$FbA;p6C3CnsmN|7?CO;I8!Hd`1bOkISt$!IP~d}FSkaKH#HzHc>x1e{AB@ ze+m^axm8u6PkB&>41@|;UjC_8!yWR3M&bLfG-TAEI>cSouWd^ z1xp}Tmx1^DTVH)N999o4AX-knze(U~Vk_Dcz`b?HwQb8nZS-oUsV%KZKnghv+$l^r za(7YDYqFDmCXSIWw$(<>3{+T#|Ne%aK2iTMuSxjsvq)Kern1UUWfzlgx7joKh7{no zyd-$nM=`AX;4=nyFp6=x{ARpDQ8acBNCU#@)si);1o0|w&hT}(%-A$STpgEIBy;vS3Txu(sxt4ka3^!8kaM-k#7d|;<7zTcT%AKP z;0MhGm6499;KitM;8#oO;yF^hC|`_mG$w&CD_e*#$;De1{HM5 z*or+|BC3cwoemfW==7h{S3cVOF*wydj@bDw3Mp`_Y#|b4^uFJHi~g%9FOb&Sit?tk zfJ5{oF4>BAI<7TCzx{rN)ok<{j52IV=>FDE~W9zzHpXauPknjYUL z6_txE95V5;!2usZ<)m74+MzQ)UAk(vFBxfngRfb;c6me{+noXi_g zU9N4ne$m<4sIaeI>gjuoixmwS_E|w`dIz%%?rzSZyIaTDKg=ClS!!u{ySaHrI^S`7 zscFr+mDQmSy#m;H!BPK;v3nXCg3@$R8-Jarpo7u{@uj9I=j-hkbm;F`+?~?#pUbh) zg;S*7zvtRd)W4G+K`qdr5ee~Wf~Tx0`WR2~dB5Ns67|CD_ol()I6x>*T@S7X! lQ8VHHvP!-AXTMhXbzpD4I>CK({Fe-W(=xJbZ&~fm{{mw?X)*u+ literal 0 HcmV?d00001 diff --git a/examples/screenshots/triangle_auto.png b/examples/screenshots/triangle_auto.png new file mode 100644 index 0000000000000000000000000000000000000000..5c9bb7ee723e992879622eced3d6b43da9c9494a GIT binary patch literal 22181 zcmb@tWmKC{(>59?PO+jz3dJc9C~k!or$I^ucPWKHaMw~SXo2Edq=vgoaS8-?flThreEFDA>%{{tU14D^ zVkN>SZeu-DQ%M{7!jbp8#S-2orL~6f@qH6kCof2E;QaC_O}V=@;@j76FBl^R{I0Dl z+GetmfskVp@X8E2pzrecaRb80ePn0>96Wmwn0s8vK7of>2H@zvoe3`X7d$rXl?{M{ z{eeS-y%IggUY`U0^LJcI>@@`NpYQ&EeC7Y-y#W9B$Nz7x>wme|e@*%SZR7vL zz5W*@|6lQ1t#_dS;+XK4LB*u+5Q1O!F}Sm&wzRMGmx||MZG4$s5 zBFBBqO({cMV(?b8-h(>Wt(t>Hr~+hAP1<2!{{$&GH%b;@or*&wc`-7#TO0 zhAFTBF<6xOnQKrkw72ZjhufQVyAC%6tZ|s|iRT9_9(%L=27MQDK&O++isP+W85)Fc zt$U%t6Su<#5Ony3G6N<&aVag@h?ZKv#snKS`%Z6DxbN!E2oejK*DUrc=A#epQHK9# zX}l+Nhh%*d$4x^#BZgPB{l)wBTRB~DG?zJ~p1H?1us|YF{~3`kCipQSW{d|xBf8>Z zvK0IzVU*SpyfodrSrXYFe4FM=9-ByoTjh-=bg*_|5X3;--=uck7EbyhhW{zM*^QIy z_sV&i3GwVHyc~Tc-f%8N%w15g64ifJ5hmMb3<>h)Z!axgS>B3C3+mmaISGs|gGmg2uBWOtMki6V5N9`V2qi`lor5 zAb3$LAOWb7Xz} zN1&8=2qBMSNrWYW`m59j^@^q`sDPZX&Okx>;*Vwf2lkYAi5ps4|(DIF*#A z!gwC@X(?Ri29z`8k%cs;Q6INE^YYEpTyX>9_Eg~zBW6Nj7s?^fP#qf>L=$|Q{69wM z;>-n=RQ}8ICQ190T|D#F3V8dnbyH@$eD`_Is=;zT^8>}4Tstj;ptCps15!xC%FVYH zIVQa|7I>w43dEY|k(RK1Cn{}Q-%J z;^}Xbyqi8;IFkuac(}8TRwcH=Gjkk+V>1C#2GCtH-5dLzx$+ID&$_x=gFhZk6fD_clW~{V>tps zc3g?7(*~$?Il(8K;VLosZ(MgTUPWs6rCH4i3i@}692=Ph3s~1dIYRyUO!Ce(G30{% z-Zy~b9hd7cL1;D?rN{f6MfC8!c5z-H(Ct@4W*@>Di_h#dYZE-yHi!?b;Eq9T@M zNd3^-l;+P1&_Eti;J<4wHvphtF4P&5xz+cmr`qbftw#^lm#9?^e#99Sky&?wyFvEe z6U>5(&a0LvYu*#jKWj&MqGBtsL4ZrS`q~$9&fo4L`vb*sq+KSaQkpU3?~5~f5A?&W zw0NsKzW9r?ffuQeJ@NdZL9Ti7_W)YVg3I&#U5HmbJ|NgFuM>9A#xU(4rUA(D!3~?! zNVCizu#U%vJzWP)h=jJwtJ;;c=ntZ1lXDGCD_c+J6Ed7<;lZW;?KQWGE?VszM-6!$ z@4d+j3X(~mwGq_JGRn>5L@hmv@lN=Dy}+{JWj<;BB(LL#I`|gMaD8AN?54%7aFeOC zn68LQ?jXmcq*Mr?#3$`M?@+E&#rj3vcKNznxOVmw6aTu*Z)<4}wfKaG21eTZhn$85 zWQ?{&0ov}~^|b;4W7b@dDB@IfF*IPp9Yg7uT;+Am8xcYjE|u4!te<-fiWAC#UT-v5 z*`q%Db-O@ie+I_(oa&~2R?9SLuJVTRdc34Af(G13a**Jxx)|U_P~R9$)~{DQ9P(N$ zHb{h^W{h&{QU&ZlgEa`J3wX8dJOBCff)cOhFKy=Q9;w-Bp!|~h>sd}Ct1>}^-_YK& z+tH1|Dc1rzD7~LEtujoYPb8>ebPx<&k&LkxZ>Z~)SKc&#W@$acEsypkQUY}1gvr8M zAVGuKE|!EqS@(chiQs{`>O)#NqSLx1wHD-AV8GJ|$ff#u$NbdUUrn-;?5+_aK@ zu(w)CKD0I$)*?B?P(60R}#^?5Nxk|lXw_^E5J2q6&MsGh0! ztvp%#k=&FCyZ>)n*$=r{O~I|ZsYbJ}qqVq=)f(OB?-Py%jPzO;ZXj9I`?r!4Ba)ys zzeeUHf?4J7s+}sU6 z*`Em|Opc1sJ-VS}&91)86og?WWW6cso_Kt(@1JG)N4KUJ`0aIQpXv zA10p6Xe*>8EUj}R%Q+k)Q~ssg@6^{Xz^pc}wxYQ|??yQ0w%F>|jjHmw=q&B$mkN1* zxV_mPJ}#NOF}qq-H)}rSa|9c7VeTz)T{`NHF3mXV{uOs0hG=5Y4oAD%8i%9O-Q24 zH6^7t4PuXM{O_#GxBek0?t(D1LC)5xbr(@MLMB?T7_{Nnt^$9T%;vK|L}?~vp(k9o zACOX?Am#B1XSCTTV0AcUpUQjIWxe1Xb7yazgy|^!M1Z+L!qt*qJpNj+Y}qh7+YTXB z|6bPZeVVI_z#8(~?1+nd%Y{Sb-OTaEwStc|$q+@r!!Ak;p}Httas^VLM6>R`^fd}( z;a?T6G`de^t!j@*-pcgA| z(2o9|3>yz3pI>WdtB$b3s`r0wCu~h@=`y!IXDG%Q3v_=;4}EQ5H2rd{)<%o_Hjww2 zr%B=`uLn%jFYqCa$##_hY@EhnAqIRw+L;-sK2uulGYSbt|GAh?_l!EeeS+a#3~>$& zvm^xy=JC-HWXn9HiUFBh49z>M7s^Rj9>y8`cC0_5B$YiqQ*} z_TKb#T(eyvY{*cF9(hs{wxehwp*FUge(wXx)YM?jr-CwnTN0U#_OQ@$T2`JH?$P75 z$jOUim9^X8NB;qn!2>5vvC{#qp2e%Wx0`C^y8BN#r^~SLQ&t_lxXEUnXWgxAyk4qd zBc<5P#ILrVLuqel2#MXTwwb#Hu^}#@aPu2*_T4=zIF|s20Xb zFZCs5--&2HuWh9-cA;qnj1e*zmHa7hnsDHDQAZY3i|mlA`hjfWn!HjR%#_9T{wY3L z{6sRC)2cMMeU1I0XVBMs=^g@E#jm_~gw_^7#lM0A&6))SHOO)Z zfaX&pB`E|HcDvqg&jer0%7)Y{@j>(72p$+a9IXqXcja@p+dCq7@i3dVsDY9*oKu8E zg&Rwe(KGYcLJe~jZU6RqY6S4dt2T_|8v}zNW^cj-Us{jz27W8Z(2|-_wiL=D01cF- zm`b)hER?q|^6yYlY}jlS46m=Ehzm2Kw0%R>@s4VC2HIw84V@3;V6X5^)1to$%v({$ z`gBdZ+P#iW{8fsdd`B*%_KlQF=7K9r^VrkZ3eTnXY~oF;P2R>!C;E<^2Pvp>bQR2V zyRj6rSH^N}!nT;<-}eE2bpdU>^CkKrsa+WK$>=bG#nuyOcOCxT~`_5`1jo z{kUUtpE3fgM(u-CS$QeuZX(}lF}gWc?Z||IH%wJGtou@HWG%;Ho9i5ZsdEkZUC31N zPK|G*SOA@?!%BZYa1*O4a{rOJcB*w`e6SfO9rD$}jl&Gr+lZjYnQf;aerJj7pYqp@ zeC|->`bzMIS@J^H2+IC;@D9q0hhc!hrV!AH=PP97_@EJ840X)D$hx7}FDGVr`^ztk z?4|E=TE-*D_xh>&{&t_@OLtTdjy%t>dc&?}!OhnxKM0S$fh`NFpH>PgPP?`ltP zG}MJ3+VvYDX6ypYIbLbETXAu#kYp#p!H^r+H$`6kQ6mcKMy+y?`jDj|qf({_Wi5r~ z+EJ9~`-~zlZV(XgO@@$WOLi0X}S{jXP`rTL#b zulu3@GK=&Y9ddaET4F@jf#hOkAAoch7r|~zPfo8I%%ZvuH;z#6!yaQrAe77?FVWa- zh~3g_z|kzOcj)uIbjaw2HTy{tbYehdKSy=6DQl{rm^@$7u1REHKZ$(HzyQ2Dmco1c>q0L8>1{SPPmP1c?F{;=l7*=KmVMA%K*2CiQhCk5yT-gvT|5E_%(S92NM zN|DEMfGY?E))gCJb~-^SPB;fCJhg*qzYnb?sxwUZd)P3Tmxt#~ zb+8@R_!b+FHr!dWcogcjcu^Q;j$|s3JoNPq(A*At7az!`FI3tTW6nb}lQd$uEYywQ zKpb${OylUL#8&N(dRzEKH3T2~H9vh?CB^FYsCXg8WPdv@r!BGAyxTCv$fw?a$>~Ji zvECp%KrQRwIF`Z!!xknDq>b8BBl{@To`OTlN5Wb9fgg1#eCliNAI{d>HI-Axy%>RK zqPk1YCuiUHk>_(MKU>?7@|cEluAy0G-PTRQhcef|TxVY6Kcv-6G@g{}y(ftl^oT69 z&%S{8ILi34{z@dbH~^t0NihiTBl^#7H-&2ThtZTf>d#gdmf9U9oSRVAoU8MTM=iSc zVp=XoO7K2>MEP6l4AYXL^QSTu1~f7SN=`30%8KEWH5d7k85~ANaeIeiF6%E%o8zOj zma2Vxy_6f{dhgki@w}$%go)3fSEm1DItFaxVhY@LdPt1IeGZ7RmH;EwldbLeuBUQY2o06^y`W zgDGn=V#&g$0`IK7NmYT_KAHlNcFujtRG*`7k8uf9N8V*Zo^&)MxKnmzbaa2!c{%STzX=*Ar=dK+0f-J@!0}OC#M-QN8%2o zKYAu$4r}eVmJrKlc|h(c&G1Ea$jS$~SUp+SUayGS<#0JKN2I&Cr~`~X&PzVayk=+g zY5fIF4uPBc$3}ckh9T!wKJq#GhfQCZhBeB7(ZG|4d;KPrWa3@BDdJ- zDb5IuZ=Coe0)dm2!ffl??=MtX2fWB8`kle+7#v1nhz0s>&|B6g#~rAPMT%P~cWv_M zU_Oc9Fkmu|a*XK!>^!4TvemJb(lmvSS^VN;g{;5FBxgj^y)HZHxi+Iyd}7kx_{K+$XWU;au5V-aU*NYE4;)>pK;+`-8VM$n zvtW=6G5ar4M#%oX`PNV5c};Mpn0GEbJh)0Fp_8<6g+omob^fq0nudB<#+Nl11yGYh zw;zYUvHZ-aClO*+q1$D={)kCj-|I2Qn#3z)+Tr4Zm?Y19{dNH#c5}q+Bdo$D^=S$U zx#=CDXwen$JJ?Pr#8U>X8_nD+8mcg*5~g*XCOLW|9PD!&)I%H2$lO3s@JdP7z+)$q z!)(g6xeKX7gOHopEI%`Gu2K(QM)7u+^z;Q`&$IJQ)t2v(m z^Q9mrY`g{A35h1{ujOKEdL+c$XDMHgl%FwDyg#=xZnb}86`$!|kypnmC?O;8-RZ4T z4!?NP$2q2)N9^wiUD*FU`8d6s-ncrX#9K~HI2M%6u~a)|kVK;4ygNH;^p8=ymi;vC zLS5ybmUDk3AGXv`3r$82c1zsBXoMgm@LwkFrN&;EDyKqU%~$kbcK8R9=|Q%Y4N4H) zMgq(EVLm3rQ6QZ2Z%r!REH0D3Pp*I9fxj6?=wzcVNdSzwzDQc_lx=@+qQQpT!g!v~ zBuM*IN-(o}=?nf~k87p0neB_BsKq>PCQU-p1XWb8k6mC>3u zziRY7gQD66zh<|;Pd02TZa*;#2*GB3eM`s!K`KP_M6A|>sfEm)@Tooe6(5m^P;^h; zlj`^LK0udNTT4(Q3?Ck(5csUfhP`)C^1jMfp5Y0d|WU^2RUkwxigFR`6$qs29%JM8Zi-#PAwuW&U^7rEiDMf1SeKD96sv-(^_$HmKVZGttU4j@+R^{f#$l6Ec^{R1evmW9*@)1T7Bcc zc$$6sIP@v*alH!*l|A~wg+W=q=MpqsP$~r7#LcnAnK1&6EgdSfRb>LL7oR7wfjM%z zKFaReBDF5ys@Z)9xo7>jSiMS1{mQ5f)$M4;3xqpthOlrv_fv?JaLZmlve93}W=R@{~5Q??pzaqmyVFGX4UhfiH+z^HzNTJZ-^Uk^UJ)bhpi z)tY@(`cps}!dg7oZ`6IG!n)AjfN4w6>8m39qvzY|cmB3~*uOr4 z54!<@|B00u$J4I~M%95LMo|~LZ=;6sXEVUWapRoIm)%Ivf z?YbA$;{i43-JZ~O&n_)*+?{;C{EQXEd@UhW0Fu<_v)kuoL+n%$_KLCuFW0`ZrBr`IyG9_%sI{2sy} zUb-&z1;2_nCD<8*BTRhUi2n3+;U0!4w9}8L)dn~gE=s8((~K_~FoNOv0#$()DZ7M} z1Q`7@zR_VqFtVhaQtPWQJVq?V#Kd9S=N+T&FZQ>weQ6(ObDOodx!ViL*!_6g27 zwD8@M_iiI0o?URjJ(gz3Cnyg|o6+xkr`NF?z!Co3CG!$9_k%j*d~1+oMIhOMwRd)7 zs+jJFD!K~j{%O;7>W^(_lLIIXqqdPyJ`Tnh@2KM$inNYaU#MwkaIH^z-Q$rMg-bzH zp<{HuMYOW;x|Z^vNx3KD@kd}yhuJ7p+-7Vhzj>K*xj&jN7+}60w%JZlS zMTB1pmi2-^3ae*VP4)6?GFPEp%ColD8oz%{W*}nVS8}f0uH+{9#He=67_Cd{05gPT zY6LV0P>2EJp~<}KwiZw+4?uD4u{e4DtxT?g2X0<4Qa*qHOA)}+2Z3MY{E+J|#2&m2 zqhu^zVSk<@BgI{<1OL5Tt`%8kcpc?rpP??A$`=X7Zv#6?X%b@EX6omfay$iG&{5yI6oMtolFX?4|v}}muy+Y<$%erbe zeR`W3RRcDrggC_XR!Mi&&5D6p&9A?s4(?&{$#lE-it3aqi+*~;oKAcCit)E%$Ul#j z`Rqls!;t`<#~h#I0vtF!^y>U!9(rs9kG=!>V~ z_0Q&*mkD1%s%Ys*=p^bFb);E?F9$V#3Y#Keev`KI%y=m0a0RAMcBEQiu(@6t%LxTLL;;J(O z^n}-`R{`c2dAC#1#>DD_Vbzc(QUhv8CZT%5Fv986314D~`0EzlKA)<0f*h7ppXx%{?-h*Y{drCIhEWpo#SWQSzQ!e<8aDgPe$0i@N<>nKCgrus)$O!G zHz5lqbS+>V;|WRqwe(jt!M1F(6}G^1E}Lv4xwD#mxkLxSHCt4~n+u2C)0Fba*yh-H z^%5eT$wXPzvV5NOSa(*c@WmKglERngU)@^`i9CJcXKu?RQX8%0a({!DgC8dNP(AFh z6S&31E&$i)fLwg^)?|AYf$?LT;I1y$RCCy5HFpv;JC)GCVW4 zLZ5%fyrG)c#H>(LGlN|wJ+Gu}r-#<0JDSa%JN3u-Wm2=pxQW%n!z&h=(+X5gKr7bG zj%GiC-^n+(u~Fj6NGX;}T{kf2vEQDq4!GMb<}j9jJP>2ZIW z1Qsv-gH|lQ#P4Qa`SaE5YWXpGzhuQDaQ1{cWLY6tXT<7d znkCAH-KzE_-C zNoEaIRR@g2R5JpcMnzw67~*N0-M zAK1G>2`KiLOsfns_~a=4tWBup1-qg9sAPZDR;5IUEo0I5ny%;@ZArbdvoF8}oU?p13g zkyk$a7n~HW@`eVzl!s8dkEtk?@$j8moR75898SGPtqTV$RS2L-(yLe8!JS!cVn*_k z`?JB+H+4m8`XAOoxk#Y{>^XN1mN z2TJ`#*|By;(qmKb*~+(I(?=b?%lY>hVr6Q+aev@Xup-TAhWTn%FA$`KBY(1~xxgbZ zs`=0_m*nSHqZc|i=~mA9P3*gu_3d(B`g)+c5^P8z*3_uv)SgO#4k;SEf~d^`Z$9NR zU5HcboAhFHtT%N3s`bp{Us~4$7wv_>wp#piIE4E{Nq5)jI)P$Si4+5r zi`2?5R({q7)Xcqa?sGN4BZvrb2=sDbZzHoxKvIO3GwM0t^uw3(I=%wR3*hw6Vxc@0 zwEtLm`D5o=g=Wy4nZ(RGc!q~S8F(w#$28T}IRsF}Qii%9$AX12hW_qO^p2j@0PA#) zf=?$x^ImZrM%D8jGpwtQ=gu0d0&1Rh65FLIY2ldkc<`dTE_CGQ8{^66>?+>wo31k+ z48);X4-er=Jb}h3~vo#y0F%c>`jCvgIt16?-Q_Ns{)B z86hPOXSk9)rt#&1wxFHTtt!d=v~W_KUa$i`JpxQTb$VJg#T}~d${xi<@?F83Nz#bz z%aoKnAD(>6&>!uz+PUM=s`3>Jx@t-8qj$$6gtXYFJQ%ANLHB+L<<&knmxeZPOqhofi}8D#r*8WD}6_ z&Ft1)Et#K{7hYCy6cpUyoDgDK@{OFiB|4uIoL3G=vVp?Vdm>uOqmKUD1-3&uzxS_( zM*3>0-YBb(7ft%g)CxE$BMb`0_6nZm>s5BppS{AMD9WRJt6szn}-M$r~Oi;45S$U=@Uj`kqv{kPG_CfjQQ{g zgV^SDmXlu!C+z{kST+ZyIZx<()^S|foE_G>V|zaojG^2@UB{pX$Rk5);&CdI=tum_UJnm?q!Ua{o2Y5e9wr*ZvyEYJLG-+=FX&f7v&J>DF@FF$7 zj#9Cl*|8$ZJuNl&S2nW`&S}&(c^ZeC*S7VJ$GAdzTuhY*SxQ%-!8g+jZ@AwKoph=Q z2$qkgEPRd?w&HLSeYFCCQ@;PgP6v-K=9x8nlrMG|k#e*=YUur?1LsWW98*=ew~D|X|D!*Vm?BQO z>Nj^g?!m-z>e#yUlL;Dpk|wLhsHCi?Tw^oTH7}N+U5km@6h0PyFg_RM=*;VS|K*r# z76P$`kCdA)8R~gvieJU(lR&+cO+GK$!6h#|jmog}ggthwJ0-i~^gZ2X>PvGj& zc#o`MwtIwIAg1lM+C_sVllleo_yTls07vs%+;I7s;+Xg~!Wz^AtwDG^uK7obVY|kr zbdL4)f%xeW)(n1H!8Y=(GmQxbE3p^%g-GdBsTZ%FZ%3=kaAvunjgUy~1PvQUc6I4R z=V0@m2fE!KM+e<|vN)Dz%VA+ZIL*lgexs@>)@ul`h`3)G-P)j-vMS=zp63|VZN-}1 zH3f-2pL;7ptEF)>^!3e)8(T{ub7$o*M=YhFpR}dX<-D_10h+Kg)(8H50rrCh`aL-k zuY+Nyo~NJf8uVPB&`lrH&{2&CU!+~;<6xVXa&NOlKXSn8+)cj}iUl)tfqB8NF!d}> zVN=bRu3mZV%IF2i!0S3)>euwv25KYJ;@Bp*2Fxhd*!W`j<0_3(7}VzDC@c!onl+2F zX(P?}Z=95^g8w-Wypb?Z1|Fu?AlcW%D_1<4d`}hh`oEDQ@b&s@d_gnejmct)-1PNa zmb)hV=(_usZ=}D?Z)_J{Re#>ON^e9ntT(2GcRy@}W!l;zSHvOcG1ePOd5ix{_AoeY z{bU2JO+$IkSc(KjRK??~6Eh>9fW z*4AbCKf#;NN`L!?xH=6s3T@^Il1BE^B~-QTqO(J6ie|k=}0B7rBr2(~{Ny4Via8c40kdJ`bW&*DUM}@t7(o-=_3Y z?o-_St#;uLdyqB4^+HK&hKz+M*FaMvFyi?0oM@_Pq#f>}DWX`{^V29aEd{yd0usm^ z5m>pf3`hrjL2nOiCf^5?$LTnm`=a~(t`lmVw9K<^65&phC~Mo3xxsq(hQaxY0W0Ic z4g`4u!=lGsKYZ@^$W_F*v?^ht>g+l_XTypWz8`*FS&`^FoOHm^5O?d(l4+V*O1>jGev^^aAug60HY^@oM7pi3iiL zoBVn6Lbc}H#9Jil319zEtN>}klwu#k$WycbJ+ARlmME3-VX<*y*U2GlHlu7G##A2! z(fmkt-0Vjthc2Q7+h=1U3Nw~sJY-jISK;1Tw9U)1E|7VY$Qd7pr~PJIThF9sIPi~& zvK?#tF_W29Z~a=WnT+Y$y8>ec;v(1$G%^L zyL!jP%60Iv_xa^LL`7WP-O|bn0QB7YFzp6kIgkRF9|ChChojxmgq2RR5f?LKsOpn| z7R|htMegfhp`(i~w|S>Uv}b)o9ZJH~F}b(?ppm|RhNS^mKWLRz6|8mfxPLy<^KN(} z<*$6ptP@5X?gPuu!h7wOlaJb?-4luf7`s_ z;)Q8#I-J(UMwUeVpF|nANck=0ByYwN@#mUPJ1rg;>gi<27EJST-IgLUm>f*acRAcj zpn4h}Z>|>xam9m2%XPda zx+Im)q`=3fTgw{m0wnuQ7H!kp;`sUY)N(QQ!)PR~=alb|1HP#~WWDm?^^Z8{B%hIz zH)4mSw8Zsf9>ec*Du?1<)g=4OmpxIv_&KW~0CNS6Z7to9Pbjq{@?TfNxO@cTnq9e? zzF9_A7bVm*f40pBH~PoVjgFtXD;EI4@90Xf(S5{x4`MPX$ghdNoBdUV-VNLR0#$B0 zd;=E1@Dz`hq3|4;SLS@%@^t-c^5wy0!;foNzf+GZ)OS>p8XhclA_?N&ANZq}qriMT zumTxKfhYOOiIhA6Af8AZbC44Dy-oN?qsF>$zFw^inQTLT^x~R# z>KCouk=d-~svD2H3>9atgh)r<>rCR-)^od+^QQp-tmBtV>0OSZ&XV~88iq4kEv3a| zgvATXmL%MBvmut>VLHD&gS6tnsRtZ?WIZaJzAgi^%Gae`(}bxFFwlom>M$ZP0rG|1QLN)PybiN2^H| z#0;x5fllB$z)B^`dVkq)s7s^Km8Lr2G5VVdnuBAzppoe=`+&>T|1QRMO<1v)bl@m0 z75Sdb5VQY^O4uJA^E$J9e-QJ7^F#TiTI(ZXSQ!uXdBv8Ww)gcJdeEn^jU#b4g!f2^-^<|XDvra!opb0#3ZmmzFvzqZb-E3no(CuDZq+-G0 zY~ASB6s3Z9aTh9d542WAF=-8mT?ldFjrBcR8t#wI0M1Nl8Asg60P`5BIE@}Dq_u#p z8sD4#brW0GHafDPTWWc2YQ`1871Jf%b=gq$c_$o%h==Y61Nqm@fH3F!!z}s46{nDr zOKSEhOgI{~yCB1_70?=LM=oTUrNorL8Iowc7k&H9;Yis0M12yuQ8B8#oQcgd4Cu+s+a&X#u*Y#$}qchR@hRS2&M;4iQP1i-i$Dp z4{(V##CeeFJYs4NSiB=z((=C(zkqqF=X`b&SFG6GkJ8kEgK#gAI#(qNWMSFYjDMs+an~}NgIl(ql+EA_N2%|v4NTWrF^P1=UQ!` z+uGY`@rcNd`u(@XNAUIZuGrSY+s3e`m=o)HknERYZ=Q_>c{oCsd)7vOWo}yVXkX=w z3GEz|h*Qqc9D)Op&Z`&|ZqZAFXJ0!#aNL(Qgp*H~`@ics6qyk>?GxM;N&N_XPkiC* zwaeIF(I;QJU#OJRl;k~sRIjm6$3FyFI&yvBb)MQBQ|9hj4n12o@-n&CIsFN<*ax-E zLpF`}<2|2G>x&m@nq2}f>ww~PsZhYT)JN7G|i); z4`Ub3!cH~>(jaNHA3Pr$BN$n}pXIjBo!8|JR|s{AKPO}Bg#H~q|F<5y_}iZPbyl0~ z677>D?NlOJ^e)^_gqsWzK=Ue)e*EgP#!;C4*GX?jZkP257E5jy+Zh)r9aM=zin#%n z%9vw>=H*7>HVsI~^#h<^(w;@)hcMR*F!x0dyaVn&qRe7H3)wXAp*=#bH?ua|n8sZ} z%WvoPgBUx00A}xlIsrTvb>*nX-Frgj=Zo1VHiGs9>S^y0IAcgBEj&eAM7E`!RIpTv$Obb(nMkc|h2RaCP znpFa<7>r&y0HS3Q8a<1h0y|w1wZ(CdN&8>=&w}f^)A5D$aZ?gxxPc5gGkqIIrTr}Y zoPOv=n=lKHwlaS&SKui6evmW{gb44&y=*p}X`yHIZ--u)iT*GMb%G%|iK%8(qC6Nh zbMQ5V5QKbX->+G`P@I{+D+n7{r4EnaCr;>M$GB z=?LPeH-d?*rkY!)PsSnA0iWxqU~-Y(tDfTt(OdKA2)NlSAL#F5ckcF1^~pC6IEJH_n^(jy`IZEp{w7 zTDS$%M>cXEdQCgy=>NC z)z)4|OJ5uPo^O(wWpe|Z4aujO%Z4!B$7m_ zx%b^s{dGq}^P)i?VmF#uc;9=gh=OnUu|&L9=AZdAxfFA$5GHbr*3J1!i#+yd8UEl@ z;yvXxTGJeEnx>;-lqCy-ARHU*3f49LFxwmK(gnyBZ&Dc;c^1BN^+m#FY@ zQ;0KC12m$ritKSNT)t)%V3hOYBNO*)kEGJl&Yrpl#K^6;wX z5pz1Yq-Z)Qp~iMGz|bIVsH=D!KCUYwxOUTza zA^C{$0(1dzi9yp5VFB7GkqCAdE^TTT$R-fP%xP&b%P<2~WJW9E1&7prZ4Grzg1vUN zSdt=TF0?J>mtk%4y3(EWx6gvA01zsy&rh=QWH@@*d44sHxHWZc$3j2R^wuzQ<)cIj zj3X2?4*Q75yQ0L!VS!J#M`a!vQav#B9~s=}*x9{#jk6>|6iU$eNlvB1;hM0WLN*uw zwj4f#FAZXTB!Hycb=awbq$Dx+bD0j*aIQ^HQrZxAXCeA|j_MMv#%cp^Iq!Q(n182v zAc*Wd6xKB2B!%W~Q{i?V;$Y-%c|nVBZSDn|M_MO-$*4*(3C^c(a4Dnl!P zI#1Ikk;6Y^reBNry=(P=d#TfdQlf**^??ZR@<?o)Dc;t+?v~0Y;o`Jj{yN%-R-yn>O`26%V&3LY5t0s=6{pP8aj9>+B7j zUX%j&#I*~jGnnLIzYa;u?RSRJZ9?m__^WvN1phg&H?QT97WZA(yx^Ug(io%SU-vG0 zB<$WOoR#BOP(0;Gh7b0|~m7G8*XvhG@fJ zb`3%oi8~Y${kQ|$T>?nGLvL+EyVvbMpr)u9epmn1EQ(@g20D?H2^eupRmh1Tq)sG| zvp=C5Vl!N%Z4h?5yV112F*q((m3a37Nat|+mpou12K&EJjJza;OH13)<%Y%Pbh&La zjw{HqeaK2RM03e8SjI5vOfe(Jc5PvQI4%6vy-g~eE<)irZNXDS;UPf#w8jimn=F1y zI?F#ck@#o&?QE#4=JT3WvZ9m5MKvSn)Vgu_8~L3f*eU=sZ#~U@2{DdUG*7{s6Hu`Bo?)ZuI&06UjHx9?@6q)5PK1O#fod%9o1Vgh~ECJJ#T;O)hv(DUvjF0wvg zj=xLLwfpDOeUmWPtcppNgvQ6?c=GqMB`{8~C=~oLyEqdSr`&pf>U0Dyn4>u{U~NZE z{JDMjsVi7@`dxR^5MnQT^gx=vA1Q4PI|g*}1^19*3eD-8qsnMJ7 zt-Cl!*iGu^04+!BoSMW%iS<)jFzVl21^7U=3`v^ZB;Ie4lgE51lTzh$G+CxnN2lXkxd;C+Lg;_+{lQOY825)GP_$h#d2T zSNW#DhU}b@$C_Rz&dlf>4wzI)gX^3$?{-3A)@_u+fz5o6A-d|^)=ee)z z`rW_ly081XuaGbV2T6L&1tP~X`8BN)$n!%`Xuv26RaBS1o*nZfPJZB_Y@G#VRDO`N zQd0MB$Wp8-qcM2n1BI6=2OZ9UMB*!6k+q$1gIJ{VXjk`k9y2vNv+G>I%R^s~7(yX- zTtmi%t8|UDc+l&Ox*8|#^^U_Jg%mJokP@AAo?zgMZgycymrhVky+0tSthCWsW1Nm* z_I)SO(me0&(&gyU(C1SpyHO0ytwSmr$AId19GM=myyJP|43$OIw2e2ycRX_`2~cxc zzj0$X76>N7W>$pzi8#$;8{-si>Nacw~Z;NW0TYb%&9+T~A4HMsI>c#~x8Y(1d2`7WcUe6JSj%yP(up7bVbf;H}et zyWI50<%Z+_QYvfI>jCi^%|pV_J!M&`K}`Rhd0d;V8B$65vqalTudh6D06EjE-=hZB z@9Rofva9h6e!HJaS~9OK1TP93U3tqZXB$#I=$9-L92<*|WLD|hCYT$6M3_`G=66=2 zT{5aPwHmQKxw9c-(=<$;rSufPkemO-fD~=fgmHW^HXr&t!davMadmMPs&9qzd#GWX z2$WfxfSUAtN~b*KtkSGgUD9&74+ppw#AJwCSW6g%S?g2_*~Cq4@39j?u%OhGv~&=(|1PTvq$jpHS#CtaThz1Zr1<@0Au@DVX#IU>6iTGG4XknmhZC>F5Wpp~~tL zOOjmcGg5d~6MTji+3cB=Y z+W1`1rCg$+F1mTsSgvoo4|G?c`EIli{*35H^!Z7vryaJ_9^u|1vC^>el3J8A+B&#o*P>J(Vlb%= zQ6o47DY4%ZbqCIxMmvE!8cFM1CcE6-LA z_J(;VXevSc@@}fUzs9ZA$ld$SotL{%F3Iz(y;i$h^bL+|h#Z?Nx(vA{PTM-0^#+P~ z0(W1!SbyWLbu5=VDI?E8$X|bl*jDa5{IQbsh1-;8t_6x=ozK$U{S74za!%xhYPTs# zp80vY_{Qvkr!}XRx$a_b=27f!B<$11|}!v9Uh7Z2dsX1`nF$120` zO@}V^pe7TmZ<5MBocO!Hub46_w13RH{3bmbF=GizV;o?ggLPi#@O}%sRaRDG*hfo3 zSLKAZpHgmapyg5J5;8_+Q+<%hAa8_N0i@_uPaaQ9SXni7IrJch6tQN!C5fPjWn3=op;oe@Hg6 zgMu_fInMR#di|v?1=2)dSXYl&RreISp7I_`QOzFf63m+pC1(2_92g{!sr68AjuWlz z2%sUTa)U8*huFd+(I)ywGyQg$yVc66>1^V0Jgrk>pd7p1w{^Eg;_9`nY;$>_AqNpu zZ9F9(vlP>l=@+Kd(u-r>x@U?7`;*eo^23V-VH$BZE<5%Xy{kyN^C=;E@62vsVF2ZA z825!&H;nb$>J%Kzj_2=}zK)cz%(?X-@9)o_n_BG)&3QruNRxw=r!a7<^xo?SwQUnh zG50)pb5CEAnmB>&JEf4P- zI2u^>7zd#=W-=i>3ouHc>=Wo)lDoiu1$8GuUhj{$p{=>r-(-eHL+3yBK^0T#@Y-?N zSl z&LkEAS*-VdnDe046gd<%LQ<(zUEA|Q=|e(e%91z)&JctsRDv?RYM_V?p04y?43Ct zbI)7^cuK?m=y+3V;P`6=wzbd2%`5DF9B?X%qAJ9>Zv4#YHun_cc#PusTzk z$8h=TNLl*KY3v&W=P#H()R%i+PAH;O?EU9LdjmFA)h2cwi4URFeO&rt%F6ge^=N_$ zNtQO8r%i#UEDX$4?&hixuSjj!=9wR7V111##Hdf+dT_&A%<}Y}8aCB<5fgVY=*!j5 zTKg{FZ&CRt)5|$g$X3MZkc>v+$dAvP3&t!fXV&lcQ{T~(L#m^u&73uw?GG=EZ!NAx z7Zi-?i82;QZ)IRp9bc(q(c*40uNaL~7S)_%5`NO|?7HxN&%8k(5_m$bC;;|1dLh-B zHAFpcE`^`gsJW@-i$gbIROZ_EdxmH`{n5kby9S6NK=}suCm4$oLLqt0)hc+ZKjJcu z+dTO}&MLx%7<*P_PIODxd?zS}1E<>lk#p58{ZUSzm2G2QteDN1_Vr4Cj%<9SE(6R( zpd$g!iQuCoR&iCBcZg_EitA|V$u7CUP{paj^KN`}n?Z74PlK$xK84wZ0)zYD&%O=T z&OhO4YfTQCoD)nfF0u!r-HD5Q^CLkW~-cHeNKh`GnOoBz4&9-|^(*wl)` za{*Ks1F%w*{stj3e*c}Aepz$fuXRevEfZsplW$z}Qa+_*8M)*&7LQ=0LM0KP7Cmyg zx?j6Pr)0uCfcz^5xIko#cLq76YwdL9P4K607XWnl9yM*r5+OAsv+fawC06@ssFu3Hf&1k&>xA6>p5M2PJ9m4onilZgUedXV@- zE-z_c9m&w?mC=>+BMa4o+d!=;ETqwv8gykm@EqM-Qz<7*=Ja=fP#yLDfW=S2^wo0u zD-wwH5Jxirwv4Dx8hY+c7_O!Xr1C>$f_P{;TBV}<;PYhHQ4FENAo&2muECL?M@QZ7 zN{Y}evH_wn!HpQpe)59}s4iHoprDRaip&T-1_7LdB11=k4C${P0>Q2Fq1s)47pzyL z->!HZo!iI^3LZspF2eTFpTkgDP5lujQA@M&wZw9Hb0~ zWkJ~KNLxy0wP9R4eS9SPI!WG}1gTKVjqvyo2W8Jd9H*G?s{o|0zkS?qD`j;fN8 zI<87uQe?u&ga~APNeJ|=U&}C6POm0k_Xh~i239dXnJHvLEOm4x($nLgIXFz01l{Wk z8urhW{D_ieLcNt>Bw-zg=GNCkm5Z_6aw^H|Sn5eW@L;0@0bIWkyQtd`qyE8qrykp3 zKE3H7Qv&G2S%_SlpfYlvXl)17QH8{BiJ>w0kvf}z^Ae_bYkP9Y{4=E;L!Tg=S|ea| z`G$K}@z(xAPCJtCG_m4k#eu#890vJ@PrVCIC!KHmxfeC20N3DwCkd|N1?@RE5n#IA z_YXK}MMyb1IA6q>H(EI$7E2R_K0nB!G) -> vec3 { + let s = sin(i_time) * sin(i_time) * sin(i_time) + 0.5; + var p = p_; + var d = length(p * 0.8) - pow(2.0 * abs(0.5 - fract(atan2(p.y, p.x) / 3.1416 * 2.5 + i_time * 0.3)), 2.5) * 0.1; + + var col = vec3(0.0); + // star + col += 0.01 / (d * d) * vec3(0.5, 0.7, 1.5) * (1.0 + s); + + // spiral + d = sin(length(p * 2.0) * 10.0 - i_time * 3.0) - sin(atan2(p.y, p.x) * 5.0) * 1.0; + col += 0.1 / (0.2 + d * d) * vec3(1.0, 0.0, 1.0); + + // background + for (var i = 0; i < 6; i+=1) { + p = abs(p) / dot(p, p) - 1.0; + } + col += vec3(0.001 / dot(p, p)); + + return col; +} + +fn shader_main(frag_coord: vec2) -> vec4 { + + let uv = (frag_coord-i_resolution.xy*0.5)/i_resolution.y; + + let col = render(uv); + + return vec4(col,1.0); + +} + +""" + +shader = Shadertoy(shader_code, resolution=(800, 450)) + +if __name__ == "__main__": + shader.show() diff --git a/examples/shadertoy_circuits.py b/examples/shadertoy_circuits.py new file mode 100644 index 0000000..e8d6be6 --- /dev/null +++ b/examples/shadertoy_circuits.py @@ -0,0 +1,68 @@ +from wgpu.utils.shadertoy import Shadertoy + +shader_code = """ + +// migrated from https://www.shadertoy.com/view/wlBcDK, By Kali + +fn hsv2rgb(c: vec3) -> vec3 { + let K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0); + let p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www); + return c.z * mix(K.xxx, clamp(p - K.xxx, vec3(0.0), vec3(1.0)), c.y); +} + +fn rot(a: f32) -> mat2x2 { + let s=sin(a); + let c=cos(a); + return mat2x2(c, s, -s, c); +} + +fn fractal(p_: vec2) -> vec3 { + var p = vec2(p_.x/p_.y,1./p_.y); + p.y+=i_time*sign(p.y); + p.x+=sin(i_time*.1)*sign(p.y)*4.; + p.y=fract(p.y*.05); + + var ot1 = 1000.; + var ot2 = ot1; + var it = 0.; + for (var i = 0.0; i < 10.0; i+=1.0) { + p = abs(p); + p = p / clamp(p.x*p.y, 0.15, 5.0) - vec2(1.5, 1.0); + var m = abs(p.x); + if (m < ot1) { + ot1 = m + step(fract(i_time*0.2 + f32(i)*0.05), 0.5*abs(p.y)); + it = i; + } + ot2 = min(ot2, length(p)); + } + + ot1=exp(-30.0*ot1); + ot2=exp(-30.0*ot2); + return hsv2rgb(vec3(it*0.1+0.5,0.7,1.0))*ot1+ot2; +} + +fn shader_main(frag_coord: vec2) -> vec4 { + var uv = frag_coord / i_resolution.xy - 0.5; + uv.x*=i_resolution.x/i_resolution.y; + + var aa = 6.0; + uv *= rot(sin(i_time*0.1)*0.3); + var sc = 1.0 / i_resolution.xy / (aa*2.0); + var c = vec3(0.0); + for (var i = -aa; i < aa; i+=1.0) { + for (var j = -aa; j < aa; j+=1.0) { + var p = uv + vec2(i, j)*sc; + c += fractal(p); + } + } + + return vec4(c/(aa*aa*4.0)*(1.0-exp(-20.0*uv.y*uv.y)),1.0); + +} + +""" + +shader = Shadertoy(shader_code, resolution=(800, 450)) + +if __name__ == "__main__": + shader.show() diff --git a/examples/shadertoy_flyby.py b/examples/shadertoy_flyby.py new file mode 100644 index 0000000..73a015f --- /dev/null +++ b/examples/shadertoy_flyby.py @@ -0,0 +1,190 @@ +from wgpu.utils.shadertoy import Shadertoy + +shader_code = """ + +// migrated from: https://www.shadertoy.com/view/csjGDD, By Kali + +var det : f32 = 0.001; +var br : f32 = 0.0; +var tub : f32 = 0.0; +var hit : f32 = 0.0; + +var pos: vec3; +var sphpos: vec3; + + +fn lookat(dir: vec3, up: vec3) -> mat3x3 { + let rt = normalize(cross(dir, up)); + return mat3x3(rt, cross(rt, dir), dir); +} + +fn path(t: f32) -> vec3 { + return vec3(sin(t+cos(t)*0.5)*0.5, cos(t*0.5), t); +} + +fn rot(a: f32) -> mat2x2 { + let s=sin(a); + let c=cos(a); + return mat2x2(c, s, -s, c); +} + +fn fractal(p_: vec2) -> vec3 { + var p = fract(p_*0.1); + var m = 1000.0; + for (var i = 0; i < 7; i = i + 1) { + p = ( abs(p) / clamp( abs(p.x*p.y), 0.25, 2.0 ) ) - 1.2; + m = min(m,abs(p.y)+fract(p.x*0.3 + i_time*0.5 + f32(i)*0.25)); + } + m=exp(-6.0 * m); + return m*vec3(abs(p.x),m,abs(p.y)); +} + +fn coso(pp_: vec3) -> f32 { + var pp = pp_; + pp*=.7; + + pp = vec3( pp.xy * rot(pp.z*2.0), pp.z); + + let ppxz = pp.xz * rot(i_time*2.0); + pp = vec3(ppxz.x, pp.y, ppxz.y); + + pp = vec3(pp.x, pp.yz * rot(i_time)); + + var sph=length(pp) - 0.04; + sph-=length(sin(pp*40.))*.05; + sph=max(sph,-length(pp)+.11); + var br2=length(pp) - 0.03; + br2=min(br2,length(pp.xy)+.005); + br2=min(br2,length(pp.xz)+.005); + br2=min(br2,length(pp.yz)+.005); + br2=max(br2,length(pp) - 1.0); + br=min(br2,br); + let d=min(br,sph); + return d; +} + +fn de(p_: vec3) -> f32 { + hit=0.; + br=1000.; + let pp = p_ - sphpos; + var p = p_; + let pxy = p.xy - path(p.z).xy; + p.x = pxy.x; + p.y = pxy.y; + let rxy = p.xy * rot(p.z + i_time* 0.5); + p.x = rxy.x; + p.y = rxy.y; + + let s = sin(p.z*0.5 + i_time * 0.5); + p.x *= ( 1.3 - s*s*0.7 ); + p.y *= ( 1.3 - s*s*0.7 ); + + for(var i=0; i<6; i+=1) { + p = abs(p) - 0.4; + } + + pos = p; + + tub = -length(p.xy) + 0.45 + sin(p.z*10.0) * 0.1 * smoothstep(0.4,0.5,abs(0.5-fract(p.z*0.05))*2.0); + var co = coso(pp); + co=min(co, coso(pp + 0.7) ); + co=min(co, coso(pp - 0.7) ); + + let d = min(tub,co); + if (d==tub) { + hit = step(fract(0.1 * length(sin(p*10.0))), 0.05); + } + return d * 0.3; +} + + +fn march(fro: vec3, dir_: vec3) -> vec3 { + var dir = dir_; + var uv: vec2 = vec2( atan2( dir.x , dir.y ) + i_time * 0.5, length(dir.xy) + sin(i_time * 0.2)); + var col: vec3 = fractal(uv); + var d: f32 = 0.0; + var td: f32 = 0.0; + var g: f32 = 0.0; + var reff: f32 = 0.0; + var ltd: f32 = 0.0; + var li: f32 = 0.0; + var p: vec3 = fro; + for(var i: i32 = 0; i < 200; i += 1) { + p += dir * d; + d = de(p); + if (d < det && reff == 0.0 && hit == 1.0) { + var e: vec2 = vec2(0.0, 0.1); + var n: vec3 = normalize(vec3(de(p + e.yxx), de(p + e.xyx), de(p + e.xxy)) - de(p)); + p -= dir * d * 2.0; + dir = reflect(dir, n); + reff = 1.0; + td = 0.0; + ltd = td; + continue; + } + if (d < det || td > 5.0) { + break; + } + td += d; + g += 0.1 / (0.1 + br * 13.0); + li += 0.1 / (0.1 + tub * 5.0); + } + g = max(g, li * 0.15); + var f: f32 = 1.0 - td / 3.0; + if (reff == 1.0) { + f = 1.0 - ltd / 3.0; + } + if (d < 0.01) { + col = vec3(1.0); + var e: vec2 = vec2(0.0, det); + var n: vec3 = normalize(vec3( de(p + e.yxx), de(p + e.xyx), de(p + e.xxy)) - de(p)); + col = vec3(n.x) * 0.7; + col += fract(pos.z * 5.0) * vec3(0.2, 0.1, 0.5); + col += fractal(pos.xz * 2.0); + if (tub > 0.01) { + col = vec3(0.0); + } + } + col *= f; + let so = fract(sin(i_time)*123.456); + var glo: vec3 = g * 0.1 * vec3(2.0, 1.0, 2.0) * (0.5 + so * 1.5) * 0.5; + + let glo_rb = glo.rb * rot(dir.y * 1.5); + glo = vec3(glo_rb.x, glo.y, glo_rb.y); + col += glo; + col *= vec3(0.8, 0.7, 0.7); + col = mix(col, vec3(1.0), reff * 0.3); + return col; +} + +fn mod1( x : f32, y : f32 ) -> f32 { + return x - y * floor( x / y ); +} + +fn shader_main(frag_coord : vec2) -> vec4 { + var uv = frag_coord / i_resolution.xy; + uv = uv - 0.5; + uv /= vec2(i_resolution.y / i_resolution.x, 1.0); + + var t = i_time; + + var fro = path(t); + if (mod1(t, 10.0) > 5.0) { + fro = path(floor(t / 4.0 + 0.5) * 4.0); + } + sphpos = path(t + 0.5); + fro.x += 0.2; + var fw = normalize(path(t + 0.5) - fro); + var dir = normalize(vec3(uv, 0.5)); + dir = lookat(fw, vec3(fw.x * 2.0, 1.0, 0.0)) * dir; + dir = vec3(dir.x+sin(t) * 0.3, dir.y, dir.z+sin(t) * 0.3); + var col = march(fro, dir); + col = mix(vec3(0.5) * length(col), col, 0.8); + return vec4(col, 1.0); +} + +""" +shader = Shadertoy(shader_code, resolution=(800, 450)) + +if __name__ == "__main__": + shader.show() diff --git a/examples/shadertoy_gen_art.py b/examples/shadertoy_gen_art.py new file mode 100644 index 0000000..4e858f5 --- /dev/null +++ b/examples/shadertoy_gen_art.py @@ -0,0 +1,86 @@ +from wgpu.utils.shadertoy import Shadertoy + +shader_code = """ + +// migrated from: https://www.shadertoy.com/view/mds3DX + +const SHAPE_SIZE : f32 = .618; +const CHROMATIC_ABBERATION : f32 = .01; +const ITERATIONS : f32 = 10.; +const INITIAL_LUMA : f32= .5; + +const PI : f32 = 3.14159265359; +const TWO_PI : f32 = 6.28318530718; + +fn rotate2d(_angle : f32) -> mat2x2 { + return mat2x2(cos(_angle),-sin(_angle),sin(_angle),cos(_angle)); +} + +fn mod2( v: vec2, y : f32 ) -> vec2 { + return vec2(v.x - y * floor( v.x / y ), v.y - y * floor( v.y / y )); +} + +fn sdPolygon(angle : f32, distance : f32) -> f32 { + let segment = TWO_PI / 4.0; + return cos(floor(0.5 + angle / segment) * segment - angle) * distance; +} + +fn getColorComponent( st: vec2, modScale : f32, blur : f32 ) -> f32 { + let modSt = mod2(st, 1. / modScale) * modScale * 2. - 1.; + let dist = length(modSt); + let angle = atan2(modSt.x, modSt.y) + sin(i_time * .08) * 9.0; + let shapeMap = smoothstep(SHAPE_SIZE + blur, SHAPE_SIZE - blur, sin(dist * 3.0) * .5 + .5); + return shapeMap; +} + +fn shader_main(frag_coord: vec2) -> vec4 { + var blur = .4 + sin(i_time * .52) * .2; + var st = (2.* frag_coord - i_resolution.xy) / min(i_resolution.x, i_resolution.y); + let origSt = st; + st *= rotate2d(sin(i_time * 0.14) * .3); + st *= (sin( i_time * 0.15) + 2.0) * .3; + st *= log(length(st * .428)) * 1.1; + + let modScale = 1.0; + var color = vec3(0.0); + + var luma = INITIAL_LUMA; + + for (var i:f32 = 0.0; i < ITERATIONS; i+=1.0) { + let center = st + vec2(sin(i_time * .12), cos(i_time * .13)); + let shapeColor = vec3( + getColorComponent(center - st * CHROMATIC_ABBERATION, modScale, blur), + getColorComponent(center, modScale, blur), + getColorComponent(center + st * CHROMATIC_ABBERATION, modScale, blur) + ) * luma; + st *= 1.1 + getColorComponent(center, modScale, .04) * 1.2; + st *= rotate2d(sin(i_time * .05) * 1.33); + color = color + shapeColor; + color = vec3(clamp( color.r, 0.0, 1.0 ), clamp( color.g, 0.0, 1.0 ), clamp( color.b, 0.0, 1.0 )); + luma *= .6; + blur *= .63; + } + + let GRADING_INTENSITY = .4; + let topGrading = vec3( + 1. + sin(i_time * 1.13 * .3) * GRADING_INTENSITY, + 1. + sin(i_time * 1.23 * .3) * GRADING_INTENSITY, + 1. - sin(i_time * 1.33 * .3) * GRADING_INTENSITY + ); + let bottomGrading = vec3( + 1. - sin(i_time * 1.43 * .3) * GRADING_INTENSITY, + 1. - sin(i_time * 1.53 * .3) * GRADING_INTENSITY, + 1. + sin(i_time * 1.63 * .3) * GRADING_INTENSITY + ); + let origDist = length(origSt); + let colorGrading = mix(topGrading, bottomGrading, origDist - .5); + var fragColor = vec4(pow(color.rgb, colorGrading), 1.); + // fragColor *= smoothstep(2.1, .7, origDist); + return fragColor; +} + +""" +shader = Shadertoy(shader_code, resolution=(800, 450)) + +if __name__ == "__main__": + shader.show() diff --git a/examples/shadertoy_glsl_clock.py b/examples/shadertoy_glsl_clock.py new file mode 100644 index 0000000..8ebb5c2 --- /dev/null +++ b/examples/shadertoy_glsl_clock.py @@ -0,0 +1,94 @@ +from wgpu.utils.shadertoy import Shadertoy + +shader_code = """ +// source: https://www.shadertoy.com/view/MdVcRd + +// See https://www.shadertoy.com/view/ldKGRR + +// Computes a smooth-edged diamond pixel value (Manhattan distance) +#define P(i, j, b) \ + vec2(.1, b).xyxy * smoothstep(0., 9. / R.y, .1 - abs(i) - abs(j)) + +// Computes a segment value (length = 0.5) +#define S(i, j, b) \ + P(i - clamp(i, 0., .5), j, b & 1) + +// Colon render +#define C \ + x += .5; O += P(x, y + .3, i.w / 50) + P(x, y - .3, i.w / 50); t /= 60 + +// Hyphen render +#define H(b) \ + ++x; O += S(x, y, b) + +// Computes the horizontal and vertical segments based on a denary digit +#define X(i, j, b) \ + S(x - i, y - j, b) +#define Y(i, j, b) \ + S(y - j, x - i, b) +#define D(n) \ + H(892>>n) \ + + X(0., .7, 1005>>n) \ + + X(0., -.7, 877>>n) \ + + Y(-.1, .1, 881>>n) \ + + Y(.6, .1, 927>>n) \ + + Y(-.1, -.6, 325>>n) \ + + Y(.6, -.6, 1019>>n); + +// Two-digit render +#define Z(n) ; D(n % 10) D(n / 10) + +void mainImage(out vec4 O, vec2 U) +{ + vec2 R = iResolution.xy; + U += U - R; + U /= R.y / 3.; // Global scaling with aspect ratio correction + O-=O; // Zero the pixel + + float x = U.x - U.y * .2 - 2.8, // Slight skew to slant the digits + y = --U.y; + ivec4 i = ivec4(iDate); // Convert everything to integers + int t = i.w; + i.w = int(iDate.w * 100.) % 100 // Replace with centiseconds + + // Seconds (preceded by a colon) + Z(t % 60) + C + + // Minutes (preceded by a colon) + Z(t % 60) + C + + // Hours + Z(t) + + // Smaller digits + x /= .6; + y /= .6; + R *= .6; + + // Centiseconds + x -= 14.; + y += .53 + Z(i.w) + + // Day (preceded by a hyphen) + x -= .8; + y += 3. + Z(i.z) + H(1) + + // Month (preceded by a hyphen) + Z((i.y + 1)) // Is it a bug in shadertoy that we have to add one? + H(1) + + // Year + Z(i.x % 100) + Z(i.x / 100) +} + +""" # noqa +shader = Shadertoy(shader_code) + +if __name__ == "__main__": + shader.show() diff --git a/examples/shadertoy_glsl_flame.py b/examples/shadertoy_glsl_flame.py new file mode 100644 index 0000000..a81172e --- /dev/null +++ b/examples/shadertoy_glsl_flame.py @@ -0,0 +1,81 @@ +from wgpu.utils.shadertoy import Shadertoy + +shader_code = """ + +// https://www.shadertoy.com/view/MdX3zr + +// Created by anatole duprat - XT95/2013 +// License Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License. + +float noise(vec3 p) //Thx to Las^Mercury +{ + vec3 i = floor(p); + vec4 a = dot(i, vec3(1., 57., 21.)) + vec4(0., 57., 21., 78.); + vec3 f = cos((p-i)*acos(-1.))*(-.5)+.5; + a = mix(sin(cos(a)*a),sin(cos(1.+a)*(1.+a)), f.x); + a.xy = mix(a.xz, a.yw, f.y); + return mix(a.x, a.y, f.z); +} + +float sphere(vec3 p, vec4 spr) +{ + return length(spr.xyz-p) - spr.w; +} + +float flame(vec3 p) +{ + float d = sphere(p*vec3(1.,.5,1.), vec4(.0,-1.,.0,1.)); + return d + (noise(p+vec3(.0,iTime*2.,.0)) + noise(p*3.)*.5)*.25*(p.y) ; +} + +float scene(vec3 p) +{ + return min(100.-length(p) , abs(flame(p)) ); +} + +vec4 raymarch(vec3 org, vec3 dir) +{ + float d = 0.0, glow = 0.0, eps = 0.02; + vec3 p = org; + bool glowed = false; + + for(int i=0; i<64; i++) + { + d = scene(p) + eps; + p += d * dir; + if( d>eps ) + { + if(flame(p) < .0) + glowed=true; + if(glowed) + glow = float(i)/64.; + } + } + return vec4(p,glow); +} + +void mainImage( out vec4 fragColor, in vec2 fragCoord ) +{ + vec2 v = -1.0 + 2.0 * fragCoord.xy / iResolution.xy; + v.x *= iResolution.x/iResolution.y; + + vec3 org = vec3(0., -2., 4.); + vec3 dir = normalize(vec3(v.x*1.6, -v.y, -1.5)); + + vec4 p = raymarch(org, dir); + float glow = p.w; + + vec4 col = mix(vec4(1.,.5,.1,1.), vec4(0.1,.5,1.,1.), p.y*.02+.4); + + fragColor = mix(vec4(0.), col, pow(glow*2.,4.)); + //fragColor = mix(vec4(1.), mix(vec4(1.,.5,.1,1.),vec4(0.1,.5,1.,1.),p.y*.02+.4), pow(glow*2.,4.)); + +} + + + +""" # noqa +shader = Shadertoy(shader_code) + +if __name__ == "__main__": + shader.show() diff --git a/examples/shadertoy_glsl_fuji.py b/examples/shadertoy_glsl_fuji.py new file mode 100644 index 0000000..8cf3c62 --- /dev/null +++ b/examples/shadertoy_glsl_fuji.py @@ -0,0 +1,169 @@ +from wgpu.utils.shadertoy import Shadertoy + +shader_code = """ + +// https://www.shadertoy.com/view/Wt33Wf + +float sun(vec2 uv, float battery) +{ + float val = smoothstep(0.3, 0.29, length(uv)); + float bloom = smoothstep(0.7, 0.0, length(uv)); + float cut = 3.0 * sin((uv.y + iTime * 0.2 * (battery + 0.02)) * 100.0) + + clamp(uv.y * 14.0 + 1.0, -6.0, 6.0); + cut = clamp(cut, 0.0, 1.0); + return clamp(val * cut, 0.0, 1.0) + bloom * 0.6; +} + +float grid(vec2 uv, float battery) +{ + vec2 size = vec2(uv.y, uv.y * uv.y * 0.2) * 0.01; + uv += vec2(0.0, iTime * 4.0 * (battery + 0.05)); + uv = abs(fract(uv) - 0.5); + vec2 lines = smoothstep(size, vec2(0.0), uv); + lines += smoothstep(size * 5.0, vec2(0.0), uv) * 0.4 * battery; + return clamp(lines.x + lines.y, 0.0, 3.0); +} + +float dot2(in vec2 v ) { return dot(v,v); } + +float sdTrapezoid( in vec2 p, in float r1, float r2, float he ) +{ + vec2 k1 = vec2(r2,he); + vec2 k2 = vec2(r2-r1,2.0*he); + p.x = abs(p.x); + vec2 ca = vec2(p.x-min(p.x,(p.y<0.0)?r1:r2), abs(p.y)-he); + vec2 cb = p - k1 + k2*clamp( dot(k1-p,k2)/dot2(k2), 0.0, 1.0 ); + float s = (cb.x<0.0 && ca.y<0.0) ? -1.0 : 1.0; + return s*sqrt( min(dot2(ca),dot2(cb)) ); +} + +float sdLine( in vec2 p, in vec2 a, in vec2 b ) +{ + vec2 pa = p-a, ba = b-a; + float h = clamp( dot(pa,ba)/dot(ba,ba), 0.0, 1.0 ); + return length( pa - ba*h ); +} + +float sdBox( in vec2 p, in vec2 b ) +{ + vec2 d = abs(p)-b; + return length(max(d,vec2(0))) + min(max(d.x,d.y),0.0); +} + +float opSmoothUnion(float d1, float d2, float k){ + float h = clamp(0.5 + 0.5 * (d2 - d1) /k,0.0,1.0); + return mix(d2, d1 , h) - k * h * ( 1.0 - h); +} + +float sdCloud(in vec2 p, in vec2 a1, in vec2 b1, in vec2 a2, in vec2 b2, float w) +{ + //float lineVal1 = smoothstep(w - 0.0001, w, sdLine(p, a1, b1)); + float lineVal1 = sdLine(p, a1, b1); + float lineVal2 = sdLine(p, a2, b2); + vec2 ww = vec2(w*1.5, 0.0); + vec2 left = max(a1 + ww, a2 + ww); + vec2 right = min(b1 - ww, b2 - ww); + vec2 boxCenter = (left + right) * 0.5; + //float boxW = right.x - left.x; + float boxH = abs(a2.y - a1.y) * 0.5; + //float boxVal = sdBox(p - boxCenter, vec2(boxW, boxH)) + w; + float boxVal = sdBox(p - boxCenter, vec2(0.04, boxH)) + w; + + float uniVal1 = opSmoothUnion(lineVal1, boxVal, 0.05); + float uniVal2 = opSmoothUnion(lineVal2, boxVal, 0.05); + + return min(uniVal1, uniVal2); +} + +void mainImage( out vec4 fragColor, in vec2 fragCoord ) +{ + vec2 uv = (2.0 * fragCoord.xy - iResolution.xy)/iResolution.y; + float battery = 1.0; + //if (iMouse.x > 1.0 && iMouse.y > 1.0) battery = iMouse.y / iResolution.y; + //else battery = 0.8; + + //if (abs(uv.x) < (9.0 / 16.0)) + { + // Grid + float fog = smoothstep(0.1, -0.02, abs(uv.y + 0.2)); + vec3 col = vec3(0.0, 0.1, 0.2); + if (uv.y < -0.2) + { + uv.y = 3.0 / (abs(uv.y + 0.2) + 0.05); + uv.x *= uv.y * 1.0; + float gridVal = grid(uv, battery); + col = mix(col, vec3(1.0, 0.5, 1.0), gridVal); + } + else + { + float fujiD = min(uv.y * 4.5 - 0.5, 1.0); + uv.y -= battery * 1.1 - 0.51; + + vec2 sunUV = uv; + vec2 fujiUV = uv; + + // Sun + sunUV += vec2(0.75, 0.2); + //uv.y -= 1.1 - 0.51; + col = vec3(1.0, 0.2, 1.0); + float sunVal = sun(sunUV, battery); + + col = mix(col, vec3(1.0, 0.4, 0.1), sunUV.y * 2.0 + 0.2); + col = mix(vec3(0.0, 0.0, 0.0), col, sunVal); + + // fuji + float fujiVal = sdTrapezoid( uv + vec2(-0.75+sunUV.y * 0.0, 0.5), 1.75 + pow(uv.y * uv.y, 2.1), 0.2, 0.5); + float waveVal = uv.y + sin(uv.x * 20.0 + iTime * 2.0) * 0.05 + 0.2; + float wave_width = smoothstep(0.0,0.01,(waveVal)); + + // fuji color + col = mix( col, mix(vec3(0.0, 0.0, 0.25), vec3(1.0, 0.0, 0.5), fujiD), step(fujiVal, 0.0)); + // fuji top snow + col = mix( col, vec3(1.0, 0.5, 1.0), wave_width * step(fujiVal, 0.0)); + // fuji outline + col = mix( col, vec3(1.0, 0.5, 1.0), 1.0-smoothstep(0.0,0.01,abs(fujiVal)) ); + //col = mix( col, vec3(1.0, 1.0, 1.0), 1.0-smoothstep(0.03,0.04,abs(fujiVal)) ); + //col = vec3(1.0, 1.0, 1.0) *(1.0-smoothstep(0.03,0.04,abs(fujiVal))); + + // horizon color + col += mix( col, mix(vec3(1.0, 0.12, 0.8), vec3(0.0, 0.0, 0.2), clamp(uv.y * 3.5 + 3.0, 0.0, 1.0)), step(0.0, fujiVal) ); + + // cloud + vec2 cloudUV = uv; + cloudUV.x = mod(cloudUV.x + iTime * 0.1, 4.0) - 2.0; + float cloudTime = iTime * 0.5; + float cloudY = -0.5; + float cloudVal1 = sdCloud(cloudUV, + vec2(0.1 + sin(cloudTime + 140.5)*0.1,cloudY), + vec2(1.05 + cos(cloudTime * 0.9 - 36.56) * 0.1, cloudY), + vec2(0.2 + cos(cloudTime * 0.867 + 387.165) * 0.1,0.25+cloudY), + vec2(0.5 + cos(cloudTime * 0.9675 - 15.162) * 0.09, 0.25+cloudY), 0.075); + cloudY = -0.6; + float cloudVal2 = sdCloud(cloudUV, + vec2(-0.9 + cos(cloudTime * 1.02 + 541.75) * 0.1,cloudY), + vec2(-0.5 + sin(cloudTime * 0.9 - 316.56) * 0.1, cloudY), + vec2(-1.5 + cos(cloudTime * 0.867 + 37.165) * 0.1,0.25+cloudY), + vec2(-0.6 + sin(cloudTime * 0.9675 + 665.162) * 0.09, 0.25+cloudY), 0.075); + + float cloudVal = min(cloudVal1, cloudVal2); + + //col = mix(col, vec3(1.0,1.0,0.0), smoothstep(0.0751, 0.075, cloudVal)); + col = mix(col, vec3(0.0, 0.0, 0.2), 1.0 - smoothstep(0.075 - 0.0001, 0.075, cloudVal)); + col += vec3(1.0, 1.0, 1.0)*(1.0 - smoothstep(0.0,0.01,abs(cloudVal - 0.075))); + } + + col += fog * fog * fog; + col = mix(vec3(col.r, col.r, col.r) * 0.5, col, battery * 0.7); + + fragColor = vec4(col,1.0); + } + //else fragColor = vec4(0.0); + + +} + +""" # noqa +shader = Shadertoy(shader_code) + +if __name__ == "__main__": + shader.show() diff --git a/examples/shadertoy_glsl_inercia.py b/examples/shadertoy_glsl_inercia.py new file mode 100644 index 0000000..0ecef49 --- /dev/null +++ b/examples/shadertoy_glsl_inercia.py @@ -0,0 +1,490 @@ +from wgpu.utils.shadertoy import Shadertoy + +shader_code = """ + +// https://www.shadertoy.com/view/cs2GWD + +#define lofi(i,j) (floor((i)/(j))*(j)) +#define lofir(i,j) (round((i)/(j))*(j)) + +const float PI=3.1415926; +const float TAU=PI*2.; + +mat2 r2d(float t){ + float c=cos(t),s=sin(t); + return mat2(c,s,-s,c); +} + +mat3 orthbas(vec3 z){ + z=normalize(z); + vec3 up=abs(z.y)>.999?vec3(0,0,1):vec3(0,1,0); + vec3 x=normalize(cross(up,z)); + return mat3(x,cross(z,x),z); +} + +uvec3 pcg3d(uvec3 s){ + s=s*1145141919u+1919810u; + s+=s.yzx*s.zxy; + s^=s>>16; + s+=s.yzx*s.zxy; + return s; +} + +vec3 pcg3df(vec3 s){ + uvec3 r=pcg3d(floatBitsToUint(s)); + return vec3(r)/float(0xffffffffu); +} + +struct Grid{ + vec3 s; + vec3 c; + vec3 h; + int i; + float d; +}; + +Grid dogrid(vec3 ro,vec3 rd){ + Grid r; + r.s=vec3(2,2,100); + for(int i=0;i<3;i++){ + r.c=(floor(ro/r.s)+.5)*r.s; + r.h=pcg3df(r.c); + r.i=i; + + if(r.h.x<.4){ + break; + }else if(i==0){ + r.s=vec3(2,1,100); + }else if(i==1){ + r.s=vec3(1,1,100); + } + } + + vec3 src=-(ro-r.c)/rd; + vec3 dst=abs(.501*r.s/rd); + vec3 bv=src+dst; + float b=min(min(bv.x,bv.y),bv.z); + r.d=b; + + return r; +} + +float sdbox(vec3 p,vec3 s){ + vec3 d=abs(p)-s; + return length(max(d,0.))+min(0.,max(max(d.x,d.y),d.z)); +} + +float sdbox(vec2 p,vec2 s){ + vec2 d=abs(p)-s; + return length(max(d,0.))+min(0.,max(d.x,d.y)); +} + +vec4 map(vec3 p,Grid grid){ + p-=grid.c; + p.z+=.4*sin(2.*iTime+1.*fract(grid.h.z*28.)+.3*(grid.c.x+grid.c.y)); + + vec3 psize=grid.s/2.; + psize.z=1.; + psize-=.02; + float d=sdbox(p+vec3(0,0,1),psize)-.02; + + float pcol=1.; + + vec3 pt=p; + + if(grid.i==0){//2x2 + if(grid.h.y<.3){//speaker + vec3 c=vec3(0); + pt.xy*=r2d(PI/4.); + c.xy=lofir(pt.xy,.1); + pt=pt-c; + pt.xy*=r2d(-PI/4.); + + float r=.02*smoothstep(.9,.7,abs(p.x))*smoothstep(.9,.7,abs(p.y)); + float hole=length(pt.xy)-r; + d=max(d,-hole); + }else if(grid.h.y<.5){//eq + vec3 c=vec3(0); + c.x=clamp(lofir(pt.x,.2),-.6,.6); + pt-=c; + float hole=sdbox(pt.xy,vec2(0.,.7))-.03; + d=max(d,-hole); + + pt.y-=.5-smoothstep(-.5,.5,sin(iTime+c.x+grid.h.z*100.)); + float d2=sdbox(pt,vec3(.02,.07,.07))-.03; + + if(d250.){break;} + } + + March r; + r.isect=isect; + r.rp=rp; + r.rl=rl; + r.grid=grid; + + return r; +} + +void mainImage( out vec4 fragColor, in vec2 fragCoord ) +{ + vec2 uv = vec2(fragCoord.x / iResolution.x, fragCoord.y / iResolution.y); + vec2 p=uv*2.-1.; + p.x*=iResolution.x/iResolution.y; + + vec3 col=vec3(0); + + float canim=smoothstep(-.2,.2,sin(iTime)); + vec3 co=mix(vec3(-6,-8,-40),vec3(0,-2,-40),canim); + vec3 ct=vec3(0,0,-50); + float cr=mix(.5,.0,canim); + co.xy+=iTime; + ct.xy+=iTime; + mat3 cb=orthbas(co-ct); + vec3 ro=co+cb*vec3(4.*p*r2d(cr),0); + vec3 rd=cb*normalize(vec3(0,0,-2)); + + March march=domarch(ro,rd,100); + + if(march.isect.x<1E-2){ + vec3 basecol=vec3(.5); + vec3 speccol=vec3(.2); + float specpow=30.; + float ndelta=1E-4; + + float mtl=march.isect.y; + float mtlp=march.isect.z; + if(mtl==0.){ + mtlp=mix(mtlp,1.-mtlp,step(fract(march.grid.h.z*66.),.1)); + vec3 c=.9+.0*sin(.1*(march.grid.c.x+march.grid.c.y)+march.grid.h.z+vec3(0,2,3)); + basecol=mix(vec3(.04),c,mtlp); + }else if(mtl==1.){ + basecol=vec3(0); + speccol=vec3(.5); + specpow=60.; + + vec2 size=vec2(.05,.2); + vec2 pp=(march.rp-march.grid.c).xy; + vec2 c=lofi(pp.xy,size)+size/2.; + vec2 cc=pp-c; + vec3 led=vec3(1); + led*=exp(-60.*sdbox(cc,vec2(0.,.08))); + led*=c.x>.5?vec3(5,1,2):vec3(1,5,2); + // float lv=texture(iChannel0,vec2(march.grid.h.z,0)).x*1.; + col+=led*step(c.x,-.8); + basecol=.04*led; + }else if(mtl==2.){//led + basecol=vec3(0); + speccol=vec3(1.); + specpow=100.; + + col+=mtlp*vec3(2,.5,.5); + }else if(mtl==3.){//metal + basecol=vec3(.2); + speccol=vec3(1.8); + specpow=100.; + ndelta=3E-2; + }else if(mtl==4.){//kaoss + basecol=vec3(0); + speccol=vec3(.5); + specpow=60.; + + vec2 size=vec2(.1); + vec2 pp=(march.rp-march.grid.c).xy; + vec2 c=lofi(pp.xy,size)+size/2.; + vec2 cc=pp-c; + vec3 led=vec3(1); + led*=exp(-60.*sdbox(cc,vec2(0.,.0))); + led*=vec3(2,1,2); + float plasma=sin(length(c)*10.-10.*iTime+march.grid.h.z*.7); + plasma+=sin(c.y*10.-7.*iTime); + led*=.5+.5*sin(plasma); + col+=2.*led; + basecol=.04*led; + }else if(mtl==5.){//808 + basecol=vec3(.9,mtlp,.02); + } + + vec3 n=nmap(march.rp,march.grid,ndelta); + vec3 v=-rd; + + { + vec3 l=normalize(vec3(1,3,5)); + vec3 h=normalize(l+v); + float dotnl=max(0.,dot(n,l)); + float dotnh=max(0.,dot(n,h)); + float shadow=step(1E-1,domarch(march.rp,l,30).isect.x); + vec3 diff=basecol/PI; + vec3 spec=speccol*pow(dotnh,specpow); + col+=vec3(.5,.6,.7)*shadow*dotnl*(diff+spec); + } + { + vec3 l=normalize(vec3(-1,-1,5)); + vec3 h=normalize(l+v); + float dotnl=max(0.,dot(n,l)); + float dotnh=max(0.,dot(n,h)); + float shadow=step(1E-1,domarch(march.rp,l,30).isect.x); + vec3 diff=basecol/PI; + vec3 spec=speccol*pow(dotnh,specpow); + col+=shadow*dotnl*(diff+spec); + } + } + + col=pow(col,vec3(.4545)); + col=smoothstep(vec3(0,-.1,-.2),vec3(1,1.1,1.2),col); + fragColor = vec4(col,0); +} + + +""" # noqa +shader = Shadertoy(shader_code) + +if __name__ == "__main__": + shader.show() diff --git a/examples/shadertoy_glsl_mouse_event.py b/examples/shadertoy_glsl_mouse_event.py new file mode 100644 index 0000000..7fb748f --- /dev/null +++ b/examples/shadertoy_glsl_mouse_event.py @@ -0,0 +1,50 @@ +from wgpu.utils.shadertoy import Shadertoy + +shader_code = """ + +// https://www.shadertoy.com/view/Mss3zH + +// Shows how to use the mouse input (only left button supported): +// +// mouse.xy = mouse position during last button down +// abs(mouse.zw) = mouse position during last button click +// sign(mouze.z) = button is down +// sign(mouze.w) = button is clicked + + +float distanceToSegment( vec2 a, vec2 b, vec2 p ) +{ + vec2 pa = p - a, ba = b - a; + float h = clamp( dot(pa,ba)/dot(ba,ba), 0.0, 1.0 ); + return length( pa - ba*h ); +} + +void mainImage( out vec4 fragColor, in vec2 fragCoord ) +{ + vec2 p = fragCoord / iResolution.x; + vec2 cen = 0.5*iResolution.xy/iResolution.x; + vec4 m = iMouse / iResolution.x; + + vec3 col = vec3(0.0); + + if( m.z>0.0 ) // button is down + { + float d = distanceToSegment( m.xy, abs(m.zw), p ); + col = mix( col, vec3(1.0,1.0,0.0), 1.0-smoothstep(.004,0.008, d) ); + } + if( m.w>0.0 ) // button click + { + col = mix( col, vec3(1.0,1.0,1.0), 1.0-smoothstep(0.1,0.105, length(p-cen)) ); + } + + col = mix( col, vec3(1.0,0.0,0.0), 1.0-smoothstep(0.03,0.035, length(p- m.xy )) ); + col = mix( col, vec3(0.0,0.0,1.0), 1.0-smoothstep(0.03,0.035, length(p-abs(m.zw))) ); + + fragColor = vec4( col, 1.0 ); +} + +""" # noqa +shader = Shadertoy(shader_code) + +if __name__ == "__main__": + shader.show() diff --git a/examples/shadertoy_glsl_sdf.py b/examples/shadertoy_glsl_sdf.py new file mode 100644 index 0000000..10b5b18 --- /dev/null +++ b/examples/shadertoy_glsl_sdf.py @@ -0,0 +1,657 @@ +from wgpu.utils.shadertoy import Shadertoy + +shader_code = """ + +// https://www.shadertoy.com/view/Xds3zN + +#if HW_PERFORMANCE==0 +#define AA 1 +#else +#define AA 2 // make this 2 or 3 for antialiasing +#endif + +//------------------------------------------------------------------ +float dot2( in vec2 v ) { return dot(v,v); } +float dot2( in vec3 v ) { return dot(v,v); } +float ndot( in vec2 a, in vec2 b ) { return a.x*b.x - a.y*b.y; } + +float sdPlane( vec3 p ) +{ + return p.y; +} + +float sdSphere( vec3 p, float s ) +{ + return length(p)-s; +} + +float sdBox( vec3 p, vec3 b ) +{ + vec3 d = abs(p) - b; + return min(max(d.x,max(d.y,d.z)),0.0) + length(max(d,0.0)); +} + +float sdBoxFrame( vec3 p, vec3 b, float e ) +{ + p = abs(p )-b; + vec3 q = abs(p+e)-e; + + return min(min( + length(max(vec3(p.x,q.y,q.z),0.0))+min(max(p.x,max(q.y,q.z)),0.0), + length(max(vec3(q.x,p.y,q.z),0.0))+min(max(q.x,max(p.y,q.z)),0.0)), + length(max(vec3(q.x,q.y,p.z),0.0))+min(max(q.x,max(q.y,p.z)),0.0)); +} +float sdEllipsoid( in vec3 p, in vec3 r ) // approximated +{ + float k0 = length(p/r); + float k1 = length(p/(r*r)); + return k0*(k0-1.0)/k1; +} + +float sdTorus( vec3 p, vec2 t ) +{ + return length( vec2(length(p.xz)-t.x,p.y) )-t.y; +} + +float sdCappedTorus(in vec3 p, in vec2 sc, in float ra, in float rb) +{ + p.x = abs(p.x); + float k = (sc.y*p.x>sc.x*p.y) ? dot(p.xy,sc) : length(p.xy); + return sqrt( dot(p,p) + ra*ra - 2.0*ra*k ) - rb; +} + +float sdHexPrism( vec3 p, vec2 h ) +{ + vec3 q = abs(p); + + const vec3 k = vec3(-0.8660254, 0.5, 0.57735); + p = abs(p); + p.xy -= 2.0*min(dot(k.xy, p.xy), 0.0)*k.xy; + vec2 d = vec2( + length(p.xy - vec2(clamp(p.x, -k.z*h.x, k.z*h.x), h.x))*sign(p.y - h.x), + p.z-h.y ); + return min(max(d.x,d.y),0.0) + length(max(d,0.0)); +} + +float sdOctogonPrism( in vec3 p, in float r, float h ) +{ + const vec3 k = vec3(-0.9238795325, // sqrt(2+sqrt(2))/2 + 0.3826834323, // sqrt(2-sqrt(2))/2 + 0.4142135623 ); // sqrt(2)-1 + // reflections + p = abs(p); + p.xy -= 2.0*min(dot(vec2( k.x,k.y),p.xy),0.0)*vec2( k.x,k.y); + p.xy -= 2.0*min(dot(vec2(-k.x,k.y),p.xy),0.0)*vec2(-k.x,k.y); + // polygon side + p.xy -= vec2(clamp(p.x, -k.z*r, k.z*r), r); + vec2 d = vec2( length(p.xy)*sign(p.y), p.z-h ); + return min(max(d.x,d.y),0.0) + length(max(d,0.0)); +} + +float sdCapsule( vec3 p, vec3 a, vec3 b, float r ) +{ + vec3 pa = p-a, ba = b-a; + float h = clamp( dot(pa,ba)/dot(ba,ba), 0.0, 1.0 ); + return length( pa - ba*h ) - r; +} + +float sdRoundCone( in vec3 p, in float r1, float r2, float h ) +{ + vec2 q = vec2( length(p.xz), p.y ); + + float b = (r1-r2)/h; + float a = sqrt(1.0-b*b); + float k = dot(q,vec2(-b,a)); + + if( k < 0.0 ) return length(q) - r1; + if( k > a*h ) return length(q-vec2(0.0,h)) - r2; + + return dot(q, vec2(a,b) ) - r1; +} + +float sdRoundCone(vec3 p, vec3 a, vec3 b, float r1, float r2) +{ + // sampling independent computations (only depend on shape) + vec3 ba = b - a; + float l2 = dot(ba,ba); + float rr = r1 - r2; + float a2 = l2 - rr*rr; + float il2 = 1.0/l2; + + // sampling dependant computations + vec3 pa = p - a; + float y = dot(pa,ba); + float z = y - l2; + float x2 = dot2( pa*l2 - ba*y ); + float y2 = y*y*l2; + float z2 = z*z*l2; + + // single square root! + float k = sign(rr)*rr*rr*x2; + if( sign(z)*a2*z2 > k ) return sqrt(x2 + z2) *il2 - r2; + if( sign(y)*a2*y2 < k ) return sqrt(x2 + y2) *il2 - r1; + return (sqrt(x2*a2*il2)+y*rr)*il2 - r1; +} + +float sdTriPrism( vec3 p, vec2 h ) +{ + const float k = sqrt(3.0); + h.x *= 0.5*k; + p.xy /= h.x; + p.x = abs(p.x) - 1.0; + p.y = p.y + 1.0/k; + if( p.x+k*p.y>0.0 ) p.xy=vec2(p.x-k*p.y,-k*p.x-p.y)/2.0; + p.x -= clamp( p.x, -2.0, 0.0 ); + float d1 = length(p.xy)*sign(-p.y)*h.x; + float d2 = abs(p.z)-h.y; + return length(max(vec2(d1,d2),0.0)) + min(max(d1,d2), 0.); +} + +// vertical +float sdCylinder( vec3 p, vec2 h ) +{ + vec2 d = abs(vec2(length(p.xz),p.y)) - h; + return min(max(d.x,d.y),0.0) + length(max(d,0.0)); +} + +// arbitrary orientation +float sdCylinder(vec3 p, vec3 a, vec3 b, float r) +{ + vec3 pa = p - a; + vec3 ba = b - a; + float baba = dot(ba,ba); + float paba = dot(pa,ba); + + float x = length(pa*baba-ba*paba) - r*baba; + float y = abs(paba-baba*0.5)-baba*0.5; + float x2 = x*x; + float y2 = y*y*baba; + float d = (max(x,y)<0.0)?-min(x2,y2):(((x>0.0)?x2:0.0)+((y>0.0)?y2:0.0)); + return sign(d)*sqrt(abs(d))/baba; +} + +// vertical +float sdCone( in vec3 p, in vec2 c, float h ) +{ + vec2 q = h*vec2(c.x,-c.y)/c.y; + vec2 w = vec2( length(p.xz), p.y ); + + vec2 a = w - q*clamp( dot(w,q)/dot(q,q), 0.0, 1.0 ); + vec2 b = w - q*vec2( clamp( w.x/q.x, 0.0, 1.0 ), 1.0 ); + float k = sign( q.y ); + float d = min(dot( a, a ),dot(b, b)); + float s = max( k*(w.x*q.y-w.y*q.x),k*(w.y-q.y) ); + return sqrt(d)*sign(s); +} + +float sdCappedCone( in vec3 p, in float h, in float r1, in float r2 ) +{ + vec2 q = vec2( length(p.xz), p.y ); + + vec2 k1 = vec2(r2,h); + vec2 k2 = vec2(r2-r1,2.0*h); + vec2 ca = vec2(q.x-min(q.x,(q.y < 0.0)?r1:r2), abs(q.y)-h); + vec2 cb = q - k1 + k2*clamp( dot(k1-q,k2)/dot2(k2), 0.0, 1.0 ); + float s = (cb.x < 0.0 && ca.y < 0.0) ? -1.0 : 1.0; + return s*sqrt( min(dot2(ca),dot2(cb)) ); +} + +float sdCappedCone(vec3 p, vec3 a, vec3 b, float ra, float rb) +{ + float rba = rb-ra; + float baba = dot(b-a,b-a); + float papa = dot(p-a,p-a); + float paba = dot(p-a,b-a)/baba; + + float x = sqrt( papa - paba*paba*baba ); + + float cax = max(0.0,x-((paba<0.5)?ra:rb)); + float cay = abs(paba-0.5)-0.5; + + float k = rba*rba + baba; + float f = clamp( (rba*(x-ra)+paba*baba)/k, 0.0, 1.0 ); + + float cbx = x-ra - f*rba; + float cby = paba - f; + + float s = (cbx < 0.0 && cay < 0.0) ? -1.0 : 1.0; + + return s*sqrt( min(cax*cax + cay*cay*baba, + cbx*cbx + cby*cby*baba) ); +} + +// c is the sin/cos of the desired cone angle +float sdSolidAngle(vec3 pos, vec2 c, float ra) +{ + vec2 p = vec2( length(pos.xz), pos.y ); + float l = length(p) - ra; + float m = length(p - c*clamp(dot(p,c),0.0,ra) ); + return max(l,m*sign(c.y*p.x-c.x*p.y)); +} + +float sdOctahedron(vec3 p, float s) +{ + p = abs(p); + float m = p.x + p.y + p.z - s; + + // exact distance + #if 0 + vec3 o = min(3.0*p - m, 0.0); + o = max(6.0*p - m*2.0 - o*3.0 + (o.x+o.y+o.z), 0.0); + return length(p - s*o/(o.x+o.y+o.z)); + #endif + + // exact distance + #if 1 + vec3 q; + if( 3.0*p.x < m ) q = p.xyz; + else if( 3.0*p.y < m ) q = p.yzx; + else if( 3.0*p.z < m ) q = p.zxy; + else return m*0.57735027; + float k = clamp(0.5*(q.z-q.y+s),0.0,s); + return length(vec3(q.x,q.y-s+k,q.z-k)); + #endif + + // bound, not exact + #if 0 + return m*0.57735027; + #endif +} + +float sdPyramid( in vec3 p, in float h ) +{ + float m2 = h*h + 0.25; + + // symmetry + p.xz = abs(p.xz); + p.xz = (p.z>p.x) ? p.zx : p.xz; + p.xz -= 0.5; + + // project into face plane (2D) + vec3 q = vec3( p.z, h*p.y - 0.5*p.x, h*p.x + 0.5*p.y); + + float s = max(-q.x,0.0); + float t = clamp( (q.y-0.5*p.z)/(m2+0.25), 0.0, 1.0 ); + + float a = m2*(q.x+s)*(q.x+s) + q.y*q.y; + float b = m2*(q.x+0.5*t)*(q.x+0.5*t) + (q.y-m2*t)*(q.y-m2*t); + + float d2 = min(q.y,-q.x*m2-q.y*0.5) > 0.0 ? 0.0 : min(a,b); + + // recover 3D and scale, and add sign + return sqrt( (d2+q.z*q.z)/m2 ) * sign(max(q.z,-p.y));; +} + +// la,lb=semi axis, h=height, ra=corner +float sdRhombus(vec3 p, float la, float lb, float h, float ra) +{ + p = abs(p); + vec2 b = vec2(la,lb); + float f = clamp( (ndot(b,b-2.0*p.xz))/dot(b,b), -1.0, 1.0 ); + vec2 q = vec2(length(p.xz-0.5*b*vec2(1.0-f,1.0+f))*sign(p.x*b.y+p.z*b.x-b.x*b.y)-ra, p.y-h); + return min(max(q.x,q.y),0.0) + length(max(q,0.0)); +} + +float sdHorseshoe( in vec3 p, in vec2 c, in float r, in float le, vec2 w ) +{ + p.x = abs(p.x); + float l = length(p.xy); + p.xy = mat2(-c.x, c.y, + c.y, c.x)*p.xy; + p.xy = vec2((p.y>0.0 || p.x>0.0)?p.x:l*sign(-c.x), + (p.x>0.0)?p.y:l ); + p.xy = vec2(p.x,abs(p.y-r))-vec2(le,0.0); + + vec2 q = vec2(length(max(p.xy,0.0)) + min(0.0,max(p.x,p.y)),p.z); + vec2 d = abs(q) - w; + return min(max(d.x,d.y),0.0) + length(max(d,0.0)); +} + +float sdU( in vec3 p, in float r, in float le, vec2 w ) +{ + p.x = (p.y>0.0) ? abs(p.x) : length(p.xy); + p.x = abs(p.x-r); + p.y = p.y - le; + float k = max(p.x,p.y); + vec2 q = vec2( (k<0.0) ? -k : length(max(p.xy,0.0)), abs(p.z) ) - w; + return length(max(q,0.0)) + min(max(q.x,q.y),0.0); +} + +//------------------------------------------------------------------ + +vec2 opU( vec2 d1, vec2 d2 ) +{ + return (d1.x0.0 ) + { + tmax = min( tmax, tp1 ); + res = vec2( tp1, 1.0 ); + } + //else return res; + + // raymarch primitives + vec2 tb = iBox( ro-vec3(0.0,0.4,-0.5), rd, vec3(2.5,0.41,3.0) ); + if( tb.x0.0 && tb.x0.0 ) tmax = min( tmax, tp ); + + float res = 1.0; + float t = mint; + for( int i=ZERO; i<24; i++ ) + { + float h = map( ro + rd*t ).x; + float s = clamp(8.0*h/t,0.0,1.0); + res = min( res, s ); + t += clamp( h, 0.01, 0.2 ); + if( res<0.004 || t>tmax ) break; + } + res = clamp( res, 0.0, 1.0 ); + return res*res*(3.0-2.0*res); +} + +// https://iquilezles.org/articles/normalsSDF +vec3 calcNormal( in vec3 pos ) +{ +#if 0 + vec2 e = vec2(1.0,-1.0)*0.5773*0.0005; + return normalize( e.xyy*map( pos + e.xyy ).x + + e.yyx*map( pos + e.yyx ).x + + e.yxy*map( pos + e.yxy ).x + + e.xxx*map( pos + e.xxx ).x ); +#else + // inspired by tdhooper and klems - a way to prevent the compiler from inlining map() 4 times + vec3 n = vec3(0.0); + for( int i=ZERO; i<4; i++ ) + { + vec3 e = 0.5773*(2.0*vec3((((i+3)>>1)&1),((i>>1)&1),(i&1))-1.0); + n += e*map(pos+0.0005*e).x; + //if( n.x+n.y+n.z>100.0 ) break; + } + return normalize(n); +#endif +} + +// https://iquilezles.org/articles/nvscene2008/rwwtt.pdf +float calcAO( in vec3 pos, in vec3 nor ) +{ + float occ = 0.0; + float sca = 1.0; + for( int i=ZERO; i<5; i++ ) + { + float h = 0.01 + 0.12*float(i)/4.0; + float d = map( pos + h*nor ).x; + occ += (h-d)*sca; + sca *= 0.95; + if( occ>0.35 ) break; + } + return clamp( 1.0 - 3.0*occ, 0.0, 1.0 ) * (0.5+0.5*nor.y); +} + +// https://iquilezles.org/articles/checkerfiltering +float checkersGradBox( in vec2 p, in vec2 dpdx, in vec2 dpdy ) +{ + // filter kernel + vec2 w = abs(dpdx)+abs(dpdy) + 0.001; + // analytical integral (box filter) + vec2 i = 2.0*(abs(fract((p-0.5*w)*0.5)-0.5)-abs(fract((p+0.5*w)*0.5)-0.5))/w; + // xor pattern + return 0.5 - 0.5*i.x*i.y; +} + +vec3 render( in vec3 ro, in vec3 rd, in vec3 rdx, in vec3 rdy ) +{ + // background + vec3 col = vec3(0.7, 0.7, 0.9) - max(rd.y,0.0)*0.3; + + // raycast scene + vec2 res = raycast(ro,rd); + float t = res.x; + float m = res.y; + if( m>-0.5 ) + { + vec3 pos = ro + t*rd; + vec3 nor = (m<1.5) ? vec3(0.0,1.0,0.0) : calcNormal( pos ); + vec3 ref = reflect( rd, nor ); + + // material + col = 0.2 + 0.2*sin( m*2.0 + vec3(0.0,1.0,2.0) ); + float ks = 1.0; + + if( m<1.5 ) + { + // project pixel footprint into the plane + vec3 dpdx = ro.y*(rd/rd.y-rdx/rdx.y); + vec3 dpdy = ro.y*(rd/rd.y-rdy/rdy.y); + + float f = checkersGradBox( 3.0*pos.xz, 3.0*dpdx.xz, 3.0*dpdy.xz ); + col = 0.15 + f*vec3(0.05); + ks = 0.4; + } + + // lighting + float occ = calcAO( pos, nor ); + + vec3 lin = vec3(0.0); + + // sun + { + vec3 lig = normalize( vec3(-0.5, 0.4, -0.6) ); + vec3 hal = normalize( lig-rd ); + float dif = clamp( dot( nor, lig ), 0.0, 1.0 ); + //if( dif>0.0001 ) + dif *= calcSoftshadow( pos, lig, 0.02, 2.5 ); + float spe = pow( clamp( dot( nor, hal ), 0.0, 1.0 ),16.0); + spe *= dif; + spe *= 0.04+0.96*pow(clamp(1.0-dot(hal,lig),0.0,1.0),5.0); + //spe *= 0.04+0.96*pow(clamp(1.0-sqrt(0.5*(1.0-dot(rd,lig))),0.0,1.0),5.0); + lin += col*2.20*dif*vec3(1.30,1.00,0.70); + lin += 5.00*spe*vec3(1.30,1.00,0.70)*ks; + } + // sky + { + float dif = sqrt(clamp( 0.5+0.5*nor.y, 0.0, 1.0 )); + dif *= occ; + float spe = smoothstep( -0.2, 0.2, ref.y ); + spe *= dif; + spe *= 0.04+0.96*pow(clamp(1.0+dot(nor,rd),0.0,1.0), 5.0 ); + //if( spe>0.001 ) + spe *= calcSoftshadow( pos, ref, 0.02, 2.5 ); + lin += col*0.60*dif*vec3(0.40,0.60,1.15); + lin += 2.00*spe*vec3(0.40,0.60,1.30)*ks; + } + // back + { + float dif = clamp( dot( nor, normalize(vec3(0.5,0.0,0.6))), 0.0, 1.0 )*clamp( 1.0-pos.y,0.0,1.0); + dif *= occ; + lin += col*0.55*dif*vec3(0.25,0.25,0.25); + } + // sss + { + float dif = pow(clamp(1.0+dot(nor,rd),0.0,1.0),2.0); + dif *= occ; + lin += col*0.25*dif*vec3(1.00,1.00,1.00); + } + + col = lin; + + col = mix( col, vec3(0.7,0.7,0.9), 1.0-exp( -0.0001*t*t*t ) ); + } + + return vec3( clamp(col,0.0,1.0) ); +} + +mat3 setCamera( in vec3 ro, in vec3 ta, float cr ) +{ + vec3 cw = normalize(ta-ro); + vec3 cp = vec3(sin(cr), cos(cr),0.0); + vec3 cu = normalize( cross(cw,cp) ); + vec3 cv = ( cross(cu,cw) ); + return mat3( cu, cv, cw ); +} + +void mainImage( out vec4 fragColor, in vec2 fragCoord ) +{ + vec2 mo = iMouse.xy/iResolution.xy; + float time = 32.0 + iTime*1.5; + + // camera + vec3 ta = vec3( 0.25, -0.75, -0.75 ); + vec3 ro = ta + vec3( 4.5*cos(0.1*time + 7.0*mo.x), 2.2, 4.5*sin(0.1*time + 7.0*mo.x) ); + // camera-to-world transformation + mat3 ca = setCamera( ro, ta, 0.0 ); + + vec3 tot = vec3(0.0); +#if AA>1 + for( int m=ZERO; m1 + } + tot /= float(AA*AA); +#endif + + fragColor = vec4( tot, 1.0 ); +} + + +""" # noqa +shader = Shadertoy(shader_code) + +if __name__ == "__main__": + shader.show() diff --git a/examples/shadertoy_glsl_sea.py b/examples/shadertoy_glsl_sea.py new file mode 100644 index 0000000..8276797 --- /dev/null +++ b/examples/shadertoy_glsl_sea.py @@ -0,0 +1,174 @@ +from wgpu.utils.shadertoy import Shadertoy + +shader_code = """ + +// https://www.shadertoy.com/view/mt2XR3 + +// License Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License. +// Created by S.Guillitte + +#define time iTime + +mat2 m2 = mat2(0.8, 0.6, -0.6, 0.8); + +float noise(in vec2 p){ + + float res=0.; + float f=1.; + for( int i=0; i< 3; i++ ) + { + p=m2*p*f+.6; + f*=1.2; + res+=sin(p.x+sin(2.*p.y)); + } + return res/3.; +} + +vec3 noised(in vec2 p){//noise with derivatives + float res=0.; + vec2 dres=vec2(0.); + float f=1.; + mat2 j=m2; + for( int i=0; i< 3; i++ ) + { + p=m2*p*f+.6; + f*=1.2; + float a=p.x+sin(2.*p.y); + res+=sin(a); + dres+=cos(a)*vec2(1.,2.*cos(2.*p.y))*j; + j*=m2*f; + + } + return vec3(res,dres)/3.; +} + + +float fbmabs( vec2 p ) { + + float f=.7; + float r = 0.0; + for(int i = 0;i<12;i++){ + vec3 n = noised(p); + r += abs(noise( p*f +n.xz)+.5)/f; + f *=1.45; + p=m2*p; + } + return r; +} + +float sea( vec2 p ) +{ + float f=.7; + float r = 0.0; + for(int i = 0;i<6;i++){ + r += (1.-abs(noise( p*f -.6*time)))/f; + f *=1.4; + p-=vec2(-.01,.04)*(r+.2*time/(.1-f)); + } + return r/4.+.8; +} + + + +float rocks(vec2 p){ + return 1.-fbmabs(p)*.15; +} + +vec3 map( vec3 p) +{ + float d1 =p.y+ cos(.2*p.x-sin(.5*p.z))*cos(.2*p.z+sin(.3*p.x))+.5-rocks(p.xz); + float d2 =p.y-.4*sea(p.xz); + //dh = d2-d1; + float d = min(d1,d2); + return vec3(d,d1,d2); + +} + +vec3 normalRocks(in vec2 p) +{ + const vec2 e = vec2(0.004, 0.0); + return normalize(vec3( + rocks(p + e.xy) - rocks(p - e.xy), + .008, + rocks(p + e.yx) - rocks(p - e.yx) + )); +} + +vec3 normalSea(in vec2 p) +{ + const vec2 e = vec2(0.002, 0.0); + return normalize(vec3( + sea(p + e.xy) - sea(p - e.xy), + .004, + sea(p + e.yx) - sea(p - e.yx) + )); +} + +vec3 sky(in vec2 p) +{ + return sin(vec3(1.7,1.5,1)+ 2.-fbmabs(p*12.)*.25)+.3; +} + +vec3 march(in vec3 ro, in vec3 rd) +{ + const float maxd = 35.0; + const float precis = 0.001; + float h = precis * 2.0; + float t = 0.0; + float res = -1.0; + for(int i = 0; i < 128; i++) + { + if(h < precis*t || t > maxd) break; + h = map(ro + rd * t).x; + t += h*.5; + } + if(t < maxd) res = t; + return vec3(res,map(ro + rd * t).yz); +} + + +void mainImage( out vec4 fragColor, in vec2 fragCoord ) +{ + + vec2 p = (2.0 * fragCoord.xy - iResolution.xy) / iResolution.y; + vec3 col = vec3(0.); + vec3 rd = normalize(vec3(p, -2.)); + vec3 ro = vec3(0.0, 2.0, -2.+.2*time); + vec3 li = normalize(vec3(2., 2., -4.)); + + vec3 v = march(ro, rd); + float t = v.x; + float dh = v.z-v.y; + if(t > 0.) + { + + vec3 pos = ro + t * rd; + float k=rocks(pos.xz/2.)*2.; + vec3 nor = normalRocks(pos.xz/2.); + float r = max(dot(nor, li),0.05)/2.; + + r+=.4*exp(-500.*dh*dh); + + col =vec3(r*k*k, r*k, r*.8); + if(dh<0.03){ + vec3 nor = normalSea(pos.xz); + nor = reflect(rd, nor); + col +=vec3(0.9,.2,.05)*dh*1.; + col += pow(max(dot(li, nor), 0.0), 5.0)*vec3(.5); + col +=.2* sky(nor.xz/(.5+nor.y)); + + } + col = .1+col; + + } + else //sky + col = sky(rd.xz*(.1+rd.y)); + + fragColor = vec4(col, 1.0); +} + +""" # noqa +shader = Shadertoy(shader_code) + +if __name__ == "__main__": + shader.show() diff --git a/examples/shadertoy_glsl_stone.py b/examples/shadertoy_glsl_stone.py new file mode 100644 index 0000000..57181f5 --- /dev/null +++ b/examples/shadertoy_glsl_stone.py @@ -0,0 +1,288 @@ +from wgpu.utils.shadertoy import Shadertoy + +shader_code = """ + +// https://www.shadertoy.com/view/ldSSzV + +/* +"Wet stone" by Alexander Alekseev aka TDM - 2014 +License Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License. +Contact: tdmaav@gmail.com +*/ + +#define SMOOTH +#define AA + +const int NUM_STEPS = 32; +const int AO_SAMPLES = 4; +const vec2 AO_PARAM = vec2(1.2, 3.5); +const vec2 CORNER_PARAM = vec2(0.25, 40.0); +const float INV_AO_SAMPLES = 1.0 / float(AO_SAMPLES); +const float TRESHOLD = 0.1; +const float EPSILON = 1e-3; +const float LIGHT_INTENSITY = 0.25; +const vec3 RED = vec3(1.0,0.7,0.7) * LIGHT_INTENSITY; +const vec3 ORANGE = vec3(1.0,0.67,0.43) * LIGHT_INTENSITY; +const vec3 BLUE = vec3(0.54,0.77,1.0) * LIGHT_INTENSITY; +const vec3 WHITE = vec3(1.2,1.07,0.98) * LIGHT_INTENSITY; + +const float DISPLACEMENT = 0.1; + +// math +mat3 fromEuler(vec3 ang) { + vec2 a1 = vec2(sin(ang.x),cos(ang.x)); + vec2 a2 = vec2(sin(ang.y),cos(ang.y)); + vec2 a3 = vec2(sin(ang.z),cos(ang.z)); + mat3 m; + m[0] = vec3(a1.y*a3.y+a1.x*a2.x*a3.x,a1.y*a2.x*a3.x+a3.y*a1.x,-a2.y*a3.x); + m[1] = vec3(-a2.y*a1.x,a1.y*a2.y,a2.x); + m[2] = vec3(a3.y*a1.x*a2.x+a1.y*a3.x,a1.x*a3.x-a1.y*a3.y*a2.x,a2.y*a3.y); + return m; +} +vec3 saturation(vec3 c, float t) { + return mix(vec3(dot(c,vec3(0.2126,0.7152,0.0722))),c,t); +} +float hash11(float p) { + return fract(sin(p * 727.1)*435.545); +} +float hash12(vec2 p) { + float h = dot(p,vec2(127.1,311.7)); + return fract(sin(h)*437.545); +} +vec3 hash31(float p) { + vec3 h = vec3(127.231,491.7,718.423) * p; + return fract(sin(h)*435.543); +} + +// 3d noise +float noise_3(in vec3 p) { + vec3 i = floor(p); + vec3 f = fract(p); + vec3 u = f*f*(3.0-2.0*f); + + vec2 ii = i.xy + i.z * vec2(5.0); + float a = hash12( ii + vec2(0.0,0.0) ); + float b = hash12( ii + vec2(1.0,0.0) ); + float c = hash12( ii + vec2(0.0,1.0) ); + float d = hash12( ii + vec2(1.0,1.0) ); + float v1 = mix(mix(a,b,u.x), mix(c,d,u.x), u.y); + + ii += vec2(5.0); + a = hash12( ii + vec2(0.0,0.0) ); + b = hash12( ii + vec2(1.0,0.0) ); + c = hash12( ii + vec2(0.0,1.0) ); + d = hash12( ii + vec2(1.0,1.0) ); + float v2 = mix(mix(a,b,u.x), mix(c,d,u.x), u.y); + + return max(mix(v1,v2,u.z),0.0); +} + +// fBm +float fbm3(vec3 p, float a, float f) { + return noise_3(p); +} + +float fbm3_high(vec3 p, float a, float f) { + float ret = 0.0; + float amp = 1.0; + float frq = 1.0; + for(int i = 0; i < 5; i++) { + float n = pow(noise_3(p * frq),2.0); + ret += n * amp; + frq *= f; + amp *= a * (pow(n,0.2)); + } + return ret; +} + +// lighting +float diffuse(vec3 n,vec3 l,float p) { return pow(max(dot(n,l),0.0),p); } +float specular(vec3 n,vec3 l,vec3 e,float s) { + float nrm = (s + 8.0) / (3.1415 * 8.0); + return pow(max(dot(reflect(e,n),l),0.0),s) * nrm; +} + +// distance functions +float plane(vec3 gp, vec4 p) { + return dot(p.xyz,gp+p.xyz*p.w); +} +float sphere(vec3 p,float r) { + return length(p)-r; +} +float capsule(vec3 p,float r,float h) { + p.y -= clamp(p.y,-h,h); + return length(p)-r; +} +float cylinder(vec3 p,float r,float h) { + return max(abs(p.y/h),capsule(p,r,h)); +} +float box(vec3 p,vec3 s) { + p = abs(p)-s; + return max(max(p.x,p.y),p.z); +} +float rbox(vec3 p,vec3 s) { + p = abs(p)-s; + return length(p-min(p,0.0)); +} +float quad(vec3 p,vec2 s) { + p = abs(p) - vec3(s.x,0.0,s.y); + return max(max(p.x,p.y),p.z); +} + +// boolean operations +float boolUnion(float a,float b) { return min(a,b); } +float boolIntersect(float a,float b) { return max(a,b); } +float boolSub(float a,float b) { return max(a,-b); } + +// smooth operations. thanks to iq +float boolSmoothIntersect(float a, float b, float k ) { + float h = clamp(0.5+0.5*(b-a)/k, 0.0, 1.0); + return mix(a,b,h) + k*h*(1.0-h); +} +float boolSmoothSub(float a, float b, float k ) { + return boolSmoothIntersect(a,-b,k); +} + +// world +float rock(vec3 p) { + float d = sphere(p,1.0); + for(int i = 0; i < 9; i++) { + float ii = float(i); + float r = 2.5 + hash11(ii); + vec3 v = normalize(hash31(ii) * 2.0 - 1.0); + #ifdef SMOOTH + d = boolSmoothSub(d,sphere(p+v*r,r * 0.8), 0.03); + #else + d = boolSub(d,sphere(p+v*r,r * 0.8)); + #endif + } + return d; +} + +float map(vec3 p) { + float d = rock(p) + fbm3(p*4.0,0.4,2.96) * DISPLACEMENT; + d = boolUnion(d,plane(p,vec4(0.0,1.0,0.0,1.0))); + return d; +} + +float map_detailed(vec3 p) { + float d = rock(p) + fbm3_high(p*4.0,0.4,2.96) * DISPLACEMENT; + d = boolUnion(d,plane(p,vec4(0.0,1.0,0.0,1.0))); + return d; +} + +// tracing +vec3 getNormal(vec3 p, float dens) { + vec3 n; + n.x = map_detailed(vec3(p.x+EPSILON,p.y,p.z)); + n.y = map_detailed(vec3(p.x,p.y+EPSILON,p.z)); + n.z = map_detailed(vec3(p.x,p.y,p.z+EPSILON)); + return normalize(n-map_detailed(p)); +} +vec2 getOcclusion(vec3 p, vec3 n) { + vec2 r = vec2(0.0); + for(int i = 0; i < AO_SAMPLES; i++) { + float f = float(i)*INV_AO_SAMPLES; + float hao = 0.01+f*AO_PARAM.x; + float hc = 0.01+f*CORNER_PARAM.x; + float dao = map(p + n * hao) - TRESHOLD; + float dc = map(p - n * hc) - TRESHOLD; + r.x += clamp(hao-dao,0.0,1.0) * (1.0-f); + r.y += clamp(hc+dc,0.0,1.0) * (1.0-f); + } + r.x = clamp(1.0-r.x*INV_AO_SAMPLES*AO_PARAM.y,0.0,1.0); + r.y = clamp(r.y*INV_AO_SAMPLES*CORNER_PARAM.y,0.0,1.0); + return r; +} +vec2 spheretracing(vec3 ori, vec3 dir, out vec3 p) { + vec2 td = vec2(0.0); + for(int i = 0; i < NUM_STEPS; i++) { + p = ori + dir * td.x; + td.y = map(p); + if(td.y < TRESHOLD) break; + td.x += (td.y-TRESHOLD) * 0.9; + } + return td; +} + +// stone +vec3 getStoneColor(vec3 p, float c, vec3 l, vec3 n, vec3 e) { + c = min(c + pow(noise_3(vec3(p.x*20.0,0.0,p.z*20.0)),70.0) * 8.0, 1.0); + float ic = pow(1.0-c,0.5); + vec3 base = vec3(0.42,0.3,0.2) * 0.35; + vec3 sand = vec3(0.51,0.41,0.32)*0.9; + vec3 color = mix(base,sand,c); + + float f = pow(1.0 - max(dot(n,-e),0.0), 5.0) * 0.75 * ic; + color += vec3(diffuse(n,l,0.5) * WHITE); + color += vec3(specular(n,l,e,8.0) * WHITE * 1.5 * ic); + n = normalize(n - normalize(p) * 0.4); + color += vec3(specular(n,l,e,80.0) * WHITE * 1.5 * ic); + color = mix(color,vec3(1.0),f); + + color *= sqrt(abs(p.y*0.5+0.5)) * 0.4 + 0.6; + color *= (n.y * 0.5 + 0.5) * 0.4 + 0.6; + + return color; +} + +vec3 getPixel(in vec2 coord, float time) { + vec2 iuv = coord / iResolution.xy * 2.0 - 1.0; + vec2 uv = iuv; + uv.x *= iResolution.x / iResolution.y; + + // ray + vec3 ang = vec3(0.0,0.2,time); + if(iMouse.z > 0.0) ang = vec3(0.0,clamp(2.0-iMouse.y*0.01,0.0,3.1415),iMouse.x*0.01); + mat3 rot = fromEuler(ang); + + vec3 ori = vec3(0.0,0.0,2.8); + vec3 dir = normalize(vec3(uv.xy,-2.0)); + ori = ori * rot; + dir = dir * rot; + + // tracing + vec3 p; + vec2 td = spheretracing(ori,dir,p); + vec3 n = getNormal(p,td.y); + vec2 occ = getOcclusion(p,n); + vec3 light = normalize(vec3(0.0,1.0,0.0)); + + // color + vec3 color = vec3(1.0); + if(td.x < 3.5 && p.y > -0.89) color = getStoneColor(p,occ.y,light,n,dir); + color *= occ.x; + return color; +} + +// main +void mainImage( out vec4 fragColor, in vec2 fragCoord ) { + float time = iTime * 0.3; + +#ifdef AA + vec3 color = vec3(0.0); + for(int i = -1; i <= 1; i++) + for(int j = -1; j <= 1; j++) { + vec2 uv = fragCoord+vec2(i,j)/3.0; + color += getPixel(uv, time); + } + color /= 9.0; +#else + vec3 color = getPixel(fragCoord, time); +#endif + color = sqrt(color); + color = saturation(color,1.7); + + // vignette + vec2 iuv = fragCoord / iResolution.xy * 2.0 - 1.0; + float vgn = smoothstep(1.2,0.7,abs(iuv.y)) * smoothstep(1.1,0.8,abs(iuv.x)); + color *= 1.0 - (1.0 - vgn) * 0.15; + + fragColor = vec4(color,1.0); +} + +""" # noqa +shader = Shadertoy(shader_code) + +if __name__ == "__main__": + shader.show() diff --git a/examples/shadertoy_glsl_textures.py b/examples/shadertoy_glsl_textures.py new file mode 100644 index 0000000..8d9e6bf --- /dev/null +++ b/examples/shadertoy_glsl_textures.py @@ -0,0 +1,26 @@ +from wgpu.utils.shadertoy import Shadertoy, ShadertoyChannel + +shader_code = """ +void mainImage( out vec4 fragColor, in vec2 fragCoord ) +{ + vec2 uv = fragCoord/iResolution.xy; + vec4 c0 = texture(iChannel0, 2.0*uv); + vec4 c1 = texture(iChannel1, 3.0*uv); + fragColor = mix(c0,c1,abs(sin(i_time))); +} + +""" +test_pattern = memoryview( + bytearray((int(i != k) * 255 for i in range(8) for k in range(8))) * 4 +).cast("B", shape=[8, 8, 4]) +gradient = memoryview( + bytearray((i for i in range(0, 255, 8) for _ in range(4))) * 32 +).cast("B", shape=[32, 32, 4]) + +channel0 = ShadertoyChannel(test_pattern, wrap="repeat") +channel1 = ShadertoyChannel(gradient) + +shader = Shadertoy(shader_code, resolution=(640, 480), inputs=[channel0, channel1]) + +if __name__ == "__main__": + shader.show() diff --git a/examples/shadertoy_liberation.py b/examples/shadertoy_liberation.py new file mode 100644 index 0000000..0914057 --- /dev/null +++ b/examples/shadertoy_liberation.py @@ -0,0 +1,144 @@ +from wgpu.utils.shadertoy import Shadertoy + +shader_code = """ + +// migrated from https://www.shadertoy.com/view/tlGfzd, By Kali + +var objcol: vec3; + +fn hash12(p: vec2) -> f32 { + var p3 = fract(vec3(p.xyx) * 0.1031); + p3 += vec3( dot(p3, p3.yzx + vec3(33.33)) ); + return fract((p3.x + p3.y) * p3.z); +} + +fn rot(a: f32) -> mat2x2 { + let s=sin(a); + let c=cos(a); + return mat2x2(c, s, -s, c); +} + +fn mod1( x : f32, y : f32 ) -> f32 { + return x - y * floor( x / y ); +} + +fn de(pos_: vec3) -> f32 { + var t = mod1(i_time, 17.0); + var a = smoothstep(13.0, 15.0, t) * 8.0 - smoothstep(4.0, 0.0, t) * 4.0; + var f = sin(i_time * 5.0 + sin(i_time * 20.0) * 0.2); + + var pos = pos_; + + let pxz = pos.xz * rot(i_time + 0.5); + pos.x = pxz.x; + pos.z = pxz.y; + + let pyz = pos.yz * rot(i_time); + pos.y = pyz.x; + pos.z = pyz.y; + + var p = pos; + var s = 1.0; + + for (var i = 0; i < 4; i+=1) { + p = abs(p) * 1.3 - 0.5 - f * 0.1 - a; + + let pxy = p.xy * rot(radians(45.0)); + p.x = pxy.x; + p.y = pxy.y; + + let pxz = p.xz * rot(radians(45.0)); + p.x = pxz.x; + p.z = pxz.y; + + s *= 1.3; + } + + var fra = length(p) / s - 0.5; + + let pxy = pos.xy * rot(i_time); + pos.x = pxy.x; + pos.y = pxy.y; + + p = abs(pos) - 2.0 - a; + var d = length(p) - 0.7; + + d = min(d, max(length(p.xz) - 0.1, p.y)); + d = min(d, max(length(p.yz) - 0.1, p.x)); + d = min(d, max(length(p.xy) - 0.1, p.z)); + + p = abs(pos); + p.x -= 4.0 + a + f * 0.5; + d = min(d, length(p) - 0.7); + d = min(d, length(p.yz - abs(sin(p.x * 0.5 - i_time * 10.0) * 0.3))); + + p = abs(pos); + p.y -= 4.0 + a + f * 0.5; + + d = min(d, length(p) - 0.7); + d = min(d, max(length(p.xz) - 0.1, p.y)); + d = min(d, fra); + + objcol = abs(p); + + if (d == fra) { + objcol = vec3(2.0, 0.0, 0.0); + } + + return d; +} + +fn normal(p: vec3) -> vec3 { + var d = vec2(0.0, 0.01); + return normalize( vec3( de(p + d.yxx), de(p + d.xyx), de(p + d.xxy) ) - de(p) ); +} + +fn march(fro: vec3, dir_: vec3, frag_coord: vec2) -> vec3 { + var d = 0.0; + var td = 0.0; + var maxdist = 30.0; + + var p = fro; + var col = vec3(0.0); + var dir = dir_; + + for (var i = 0; i < 100; i+=1) { + var d2 = de(p) * (1.0 - hash12(frag_coord.xy + i_time) * 0.2); + if (d2 < 0.0) { + var n = normal(p); + dir = reflect(dir, n); + d2 = 0.1; + } + + d = max(0.01, abs(d2)); + p += d * dir; + td += d; + + if (td > maxdist) { + break; + } + + col += 0.01 * objcol; + } + + return pow(col, vec3(2.0)); +} + +fn shader_main(frag_coord: vec2) -> vec4 { + var uv = frag_coord / i_resolution.xy - 0.5; + uv.x *= i_resolution.x / i_resolution.y; + + var fro = vec3(0.0, 0.0, -10.0); + var dir = normalize(vec3(uv, 1.0)); + + var col = march(fro, dir, frag_coord); + + return vec4(col, 1.0); +} + +""" + +shader = Shadertoy(shader_code, resolution=(800, 450)) + +if __name__ == "__main__": + shader.show() diff --git a/examples/shadertoy_matrix.py b/examples/shadertoy_matrix.py new file mode 100644 index 0000000..1687df1 --- /dev/null +++ b/examples/shadertoy_matrix.py @@ -0,0 +1,155 @@ +from wgpu.utils.shadertoy import Shadertoy + +shader_code = """ + +// migrated from https://www.shadertoy.com/view/NlsXDH, By Kali + +const det = 0.001; + +var t: f32; +var boxhit: f32; + +var adv: vec3; +var boxp: vec3; + +fn hash(p: vec2) -> f32 { + var p3 = fract(vec3(p.xyx) * 0.1031); + p3 += vec3( dot(p3, p3.yzx + vec3(33.33)) ); + return fract((p3.x + p3.y) * p3.z); +} + +fn rot(a: f32) -> mat2x2 { + let s=sin(a); + let c=cos(a); + return mat2x2(c, s, -s, c); +} + +fn mod1( x : f32, y : f32 ) -> f32 { + return x - y * floor( x / y ); +} + +fn mod_2( v: vec2, y : f32 ) -> vec2 { + return vec2(v.x - y * floor( v.x / y ), v.y - y * floor( v.y / y )); +} + +fn path(t: f32) -> vec3 { + var p = vec3(sin(t*.1)*10., cos(t*.05)*10., t); + p.x += smoothstep(.0,.5,abs(.5-fract(t*.02)))*10.; + return p; +} + +fn fractal(p_: vec2) -> f32 { + var p = abs( 5.0 - mod_2( p_*0.2, 10.0 ) ) - 5.0; + var ot = 1000.; + for (var i = 0; i < 7; i+=1) { + p = abs(p) / clamp(p.x*p.y, 0.25, 2.0) - 1.0; + if (i > 0) { + ot = min(ot, abs(p.x)+0.7*fract(abs(p.y)*0.05+t*0.05 + f32(i)*0.3)); + } + } + ot = exp(-10.*ot); + return ot; +} + +fn box(p: vec3, l: vec3) -> f32 { + let c = abs(p)-l; + return length(max(vec3(0.),c))+min(0.,max(c.x,max(c.y,c.z))); +} + +fn de(p_: vec3) -> f32 { + var p = p_; + boxhit = 0.0; + var p2 = p-adv; + + let p2_xz = p2.xz*rot(t*0.2); + p2.x = p2_xz.x; + p2.z = p2_xz.y; + + let p2_xy = p2.xy*rot(t*0.1); + p2.x = p2_xy.x; + p2.y = p2_xy.y; + + let p2_yz = p2.yz*rot(t*0.15); + p2.y = p2_yz.x; + p2.z = p2_yz.y; + + let b = box(p2, vec3(1.0)); + + let p_xy = p.xy - path(p.z).xy; + p.x = p_xy.x; + p.y = p_xy.y; + + let s = sign(p.y); + p.y = -abs(p.y) - 3.0; + p.z = mod1(p.z, 20.0) - 10.0; + + for (var i = 0; i < 5; i+=1) { + p = abs(p) - 1.0; + + let p_xz = p.xz*rot(radians(s*-45.0)); + p.x = p_xz.x; + p.z = p_xz.y; + + let p_yz = p.yz*rot(radians(90.0)); + p.y = p_yz.x; + p.z = p_yz.y; + + } + + let f = -box(p, vec3(5.0, 5.0, 10.0)); + let d = min(f,b); + if (d == b) { + boxp = p2; + boxhit = 1.0; + } + return d*0.7; +} + +fn march(fro: vec3, dir: vec3, frag_coord: vec2) -> vec3 { + var p = vec3(0.); + var n = vec3(0.); + var g = vec3(0.); + + var d = 0.0; + var td = 0.0; + + for (var i = 0; i < 80; i+=1) { + p = fro + td*dir; + d = de(p) * (1.0- hash( frag_coord.xy + vec2(t) )*0.3); + if (d < det && boxhit < 0.5) { + break; + } + td += max(det, abs(d)); + let f = fractal(p.xy)+fractal(p.xz)+fractal(p.yz); + let b = fractal(boxp.xy)+fractal(boxp.xz)+fractal(boxp.yz); + let colf = vec3(f*f,f,f*f*f); + let colb = vec3(b+.1,b*b+.05,0.); + g += colf / (3.0 + d*d*2.0) * exp(-0.0015*td*td) * step(5.0,td) / 2.0 * (1.0-boxhit); + g += colb / (10.0 + d*d*20.0) * boxhit*0.5; + } + return g; +} + +fn lookat(dir_: vec3, up: vec3) -> mat3x3 { + let dir = normalize(dir_); + let rt = normalize(cross(dir, normalize(up))); + return mat3x3(rt, cross(rt, dir), dir); +} + +fn shader_main(frag_coord: vec2) -> vec4 { + let uv = (frag_coord-i_resolution.xy*.5)/i_resolution.y; + t=i_time*7.0; + let fro=path(t); + adv=path(t+6.+sin(t*.1)*3.); + var dir=normalize(vec3(uv, 0.7)); + dir=lookat(adv-fro, vec3(0.0, 1.0, 0.0)) * dir; + let col=march(fro, dir, frag_coord); + return vec4(col,1.0); +} + +""" + +shader = Shadertoy(shader_code, resolution=(800, 450)) + +if __name__ == "__main__": + shader.show() diff --git a/examples/shadertoy_riders.py b/examples/shadertoy_riders.py new file mode 100644 index 0000000..ce040b6 --- /dev/null +++ b/examples/shadertoy_riders.py @@ -0,0 +1,45 @@ +from wgpu.utils.shadertoy import Shadertoy + +shader_code = """ + +// migrated from https://www.shadertoy.com/view/3sGfD3, By Kali + +fn rot(a: f32) -> mat2x2 { + let s=sin(a); + let c=cos(a); + return mat2x2(c, s, -s, c); +} + +fn render(p_: vec2) -> vec3 { + var p = p_; + p*=rot(i_time*.1)*(.0002+.7*pow(smoothstep(0.0,0.5,abs(0.5-fract(i_time*.01))),3.)); + p.y-=.2266; + p.x+=.2082; + var ot = vec2(100.0); + var m = 100.0; + for (var i = 0; i < 150; i+=1) { + var cp = vec2(p.x,-p.y); + p=p+cp/dot(p,p)-vec2(0.0,0.25); + p*=.1; + p*=rot(1.5); + ot=min(ot,abs(p)+.15*fract(max(abs(p.x),abs(p.y))*.25+i_time*0.1+f32(i)*0.15)); + m=min(m,abs(p.y)); + } + ot=exp(-200.*ot)*2.; + m=exp(-200.*m); + return vec3(ot.x,ot.y*.5+ot.x*.3,ot.y)+m*.2; +} + +fn shader_main(frag_coord: vec2) -> vec4 { + let uv = (frag_coord - i_resolution.xy / 2.0) / i_resolution.y; + let d=vec2(0.0,0.5)/i_resolution.xy; + let col = render(uv)+render(uv+d.xy)+render(uv-d.xy)+render(uv+d.yx)+render(uv-d.yx); + return vec4(col/5.0, 1.0); +} + + +""" +shader = Shadertoy(shader_code, resolution=(800, 450)) + +if __name__ == "__main__": + shader.show() diff --git a/examples/shadertoy_sea.py b/examples/shadertoy_sea.py new file mode 100644 index 0000000..fa02caa --- /dev/null +++ b/examples/shadertoy_sea.py @@ -0,0 +1,210 @@ +from wgpu.utils.shadertoy import Shadertoy + +shader_code = """ + +// migrated from https://www.shadertoy.com/view/Ms2SD1, "Seascape" by Alexander Alekseev aka TDM - 2014 + +const NUM_STEPS = 8; +const PI = 3.141592; +const EPSILON = 0.001; + +const ITER_GEOMETRY = 3; +const ITER_FRAGMENT = 5; + +const SEA_HEIGHT = 0.6; +const SEA_CHOPPY = 4.0; +const SEA_SPEED = 0.8; +const SEA_FREQ = 0.16; +const SEA_BASE = vec3(0.0,0.09,0.18); +const SEA_WATER_COLOR = vec3(0.48, 0.54, 0.36); + +// const octave_m = mat2x2(1.6, 1.2, -1.2, 1.6); + +fn hash( p : vec2 ) -> f32 { + // let h = dot(p,vec2(127.1,311.7)); // percession issue? + let h = dot(p,vec2(1.0,113.0)); + return fract(sin(h)*43758.5453123); + // return fract(sin(h)); // Use the magic number 43758.5453123 seems to cause some percession issue? +} + +// another hash function +// fn hash(p: vec2) -> f32 { +// var p3 = fract(vec3(p.xyx) * 0.1031); +// p3 += vec3( dot(p3, p3.yzx + vec3(33.33)) ); +// return fract((p3.x + p3.y) * p3.z); +// } + +// Perlin noise, TODO: try simplex noise +fn noise( p : vec2 ) -> f32 { + let i = floor(p); + let f = fract(p); + let u = f * f * (3.0 - 2.0 * f); + + let mix1 = mix( hash( i + vec2(0.0,0.0) ), hash( i + vec2(1.0,0.0) ), u.x); + let mix2 = mix( hash( i + vec2(0.0,1.0) ), hash( i + vec2(1.0,1.0) ), u.x); + + let mix3 = mix(mix1, mix2, u.y); + + return -1.0 + 2.0 * mix3; +} + + +// lighting +fn diffuse( n : vec3, l : vec3, p : f32 ) -> f32 { + return pow(dot(n,l) * 0.4 + 0.6, p); +} + +fn specular( n : vec3, l : vec3, e : vec3, s : f32 ) -> f32 { + let nrm = (s + 8.0) / (PI * 8.0); + return pow(max(dot(reflect(e,n),l),0.0),s) * nrm; +} + +// sky +fn getSkyColor( e_ : vec3 ) -> vec3 { + var e = e_; + e.y = (max(e.y,0.0) * 0.8 + 0.2) * 0.8; + return vec3(pow(1.0-e.y, 2.0), 1.0-e.y, 0.6+(1.0-e.y)*0.4) * 1.1; +} + +// sea +fn sea_octave( uv_ : vec2, choppy : f32 ) -> f32 { + let uv = uv_ + noise(uv_); + var wv = 1.0-abs(sin(uv)); + let swv = abs(cos(uv)); + wv = mix(wv,swv,wv); + return pow(1.0-pow(wv.x * wv.y,0.65),choppy); +} + +fn _map( p : vec3, iter: i32 ) -> f32 { + var freq = SEA_FREQ; + var amp = SEA_HEIGHT; + var choppy = SEA_CHOPPY; + + let sea_time = 1.0 + i_time * SEA_SPEED; + + var uv = p.xz; + uv.x *= 0.75; + + var d = 0.0; + var h = 0.0; + for (var i = 0; i < iter; i+=1) { + d = sea_octave((uv+sea_time)*freq, choppy); + d += sea_octave((uv-sea_time)*freq, choppy); + h += d * amp; + uv *= mat2x2(1.6, 1.2, -1.2, 1.6); + freq *= 1.9; + amp *= 0.22; + choppy = mix(choppy,1.0,0.2); + } + return p.y - h; +} + +fn map( p : vec3) -> f32 { + return _map(p, ITER_GEOMETRY); +} + +fn map_detailed( p : vec3 ) -> f32 { + return _map(p, ITER_FRAGMENT); +} + +fn getSeaColor( p : vec3, n : vec3, l : vec3, eye : vec3, dist : vec3 ) -> vec3 { + var fresnel = clamp(1.0 - dot(n,-eye), 0.0, 1.0); + fresnel = pow(fresnel,3.0) * 0.5; + + let reflected = getSkyColor(reflect(eye,n)); + let refracted = SEA_BASE + diffuse(n,l,80.0) * SEA_WATER_COLOR * 0.12; + + var color = mix(refracted,reflected,fresnel); + + let atten = max(1.0 - dot(dist,dist) * 0.001, 0.0); + color += SEA_WATER_COLOR * (p.y - SEA_HEIGHT) * 0.18 * atten; + + color += vec3(specular(n, l, eye, 60.0)); + + return color; + +} + +// tracing +fn getNormal( p : vec3, eps : f32 ) -> vec3 { + var n : vec3; + n.y = map_detailed(p); + n.x = map_detailed(vec3(p.x+eps,p.y,p.z)) - n.y; + n.z = map_detailed(vec3(p.x,p.y,p.z+eps)) - n.y; + n.y = eps; + return normalize(n); +} + +fn heightMapTracing( ori : vec3, dir : vec3 ) -> vec3 { + var tm = 0.0; + var tx = 1000.0; + var hx = map(ori + dir * tx); + var p : vec3; + if (hx > 0.0){ + p = ori + dir * tx; + return p; + } + var hm = map(ori + dir * tm); + var tmid = 0.0; + for (var i = 0; i < NUM_STEPS; i+=1) { + tmid = mix(tm,tx, hm/(hm-hx)); + p = ori + dir * tmid; + let hmid = map(p); + if (hmid < 0.0) { + tx = tmid; + hx = hmid; + } else { + tm = tmid; + hm = hmid; + } + } + return p; +} + +fn getPixel( coord: vec2, time: f32 ) -> vec3 { + var uv = coord / i_resolution.xy; + uv = uv * 2.0 - 1.0; + uv.x *= i_resolution.x / i_resolution.y; + + // ray + let ori = vec3(0.0,3.5,time*5.0); + let dir = normalize(vec3(uv.xy,-2.0)); + + // tracing + var p = heightMapTracing(ori, dir); + let dist = p - ori; + let n = getNormal(p, dot(dist,dist) * (0.1/i_resolution.x)); + let light = normalize(vec3(0.0,1.0,0.8)); + + // color + return mix( + getSkyColor(dir), + getSeaColor(p,n,light,dir,dist), + pow(smoothstep(0.0,-0.02,dir.y),0.2) + ); +} + +fn shader_main(frag_coord: vec2) -> vec4 { + + let time = i_time * 0.3 + i_mouse.x * 0.01; + + var color: vec3; + for (var i = -1; i <=1; i+=1) { + for (var j =-1; j <=1; j+=1) { + let uv = frag_coord + vec2(f32(i),f32(j)) / 3.0; + color += getPixel(uv, time); + } + } + color = color / 9.0; + + color = getPixel(frag_coord, time); + + // post + return vec4(pow(color, vec3(0.65)), 1.0); +} + +""" +shader = Shadertoy(shader_code) + +if __name__ == "__main__": + shader.show() diff --git a/examples/shadertoy_star.py b/examples/shadertoy_star.py new file mode 100644 index 0000000..48e95ab --- /dev/null +++ b/examples/shadertoy_star.py @@ -0,0 +1,99 @@ +from wgpu.utils.shadertoy import Shadertoy + +shader_code = """ + +// migrated from: https://www.shadertoy.com/view/XlfGRj, By Kali + +const iterations = 17; +const formuparam = 0.53; + +const volsteps = 20; +const stepsize = 0.1; + +const zoom = 0.800; +const tile = 0.850; +const speed = 0.010; + +const brightness = 0.0015; +const darkmatter = 0.300; +const distfading = 0.730; +const saturation = 0.850; + +fn mod3( p1 : vec3, p2 : vec3 ) -> vec3 { + let mx = p1.x - p2.x * floor( p1.x / p2.x ); + let my = p1.y - p2.y * floor( p1.y / p2.y ); + let mz = p1.z - p2.z * floor( p1.z / p2.z ); + return vec3(mx, my, mz); +} + +fn shader_main(frag_coord: vec2) -> vec4 { + + var uv = frag_coord.xy / i_resolution.xy - vec2(0.5, 0.5); + uv.y *= i_resolution.y / i_resolution.x; + var dir = vec3(uv * zoom, 1.0); + let time = i_time * speed + 0.25; + + //mouse rotation + let a1 = 0.5 + i_mouse.x / i_resolution.x * 2.0; + let a2 = 0.8 + i_mouse.y / i_resolution.y * 2.0; + let rot1 = mat2x2(cos(a1), sin(a1), -sin(a1), cos(a1)); + let rot2 = mat2x2(cos(a2), sin(a2), -sin(a2), cos(a2)); + + let dir_xz = dir.xz * rot1; + dir.x = dir_xz.x; + dir.z = dir_xz.y; + + let dir_xy = dir.xy * rot2; + dir.x = dir_xy.x; + dir.y = dir_xy.y; + + var fro = vec3(1.0, 0.5, 0.5); + fro += vec3(time * 2.0, time, -2.0); + + let fro_xz = fro.xz * rot1; + fro.x = fro_xz.x; + fro.z = fro_xz.y; + + let fro_xy = fro.xy * rot2; + fro.x = fro_xy.x; + fro.y = fro_xy.y; + + //volumetric rendering + + var s = 0.1; + var fade = 1.0; + var v = vec3(0.0); + + for (var r: i32 = 0; r < volsteps; r = r + 1) { + var p = fro + s * dir * 0.5; + p = abs(vec3(tile) - mod3(p, vec3(tile * 2.0))); // tiling fold + var pa = 0.0; + var a = 0.0; + for (var i : i32 = 0; i < iterations; i = i + 1) { + p = abs(p) / dot(p, p) - formuparam; // the magic formula + a += abs(length(p) - pa); // absolute sum of average change + pa = length(p); + } + let dm = max(0.0, darkmatter - a * a * 0.001); //dark matter + a = a * a * a; // add contrast + if (r > 6) { + fade = fade * (1.0 - dm); // dark matter, don't render near + } + + v += vec3(fade); + v += vec3(s, s * s, s * s * s * s) * a * brightness * fade; // coloring based on distance + fade = fade * distfading; // distance fading + s = s + stepsize; + } + + v = mix(vec3(length(v)), v, saturation); //color adjust + return vec4(v * 0.01, 1.0); + +} + +""" + +shader = Shadertoy(shader_code, resolution=(800, 450)) + +if __name__ == "__main__": + shader.show() diff --git a/examples/shadertoy_textures.py b/examples/shadertoy_textures.py new file mode 100644 index 0000000..a8b1a49 --- /dev/null +++ b/examples/shadertoy_textures.py @@ -0,0 +1,24 @@ +from wgpu.utils.shadertoy import Shadertoy, ShadertoyChannel + +shader_code_wgsl = """ +fn shader_main(frag_coord: vec2) -> vec4{ + let uv = frag_coord / i_resolution.xy; + let c0 = textureSample(i_channel0, sampler0, 2.0*uv); + let c1 = textureSample(i_channel1, sampler1, 3.0*uv); + return mix(c0,c1,abs(sin(i_time))); +} +""" +test_pattern = memoryview( + bytearray((int(i != k) * 255 for i in range(8) for k in range(8))) * 4 +).cast("B", shape=[8, 8, 4]) +gradient = memoryview( + bytearray((i for i in range(0, 255, 8) for _ in range(4))) * 32 +).cast("B", shape=[32, 32, 4]) + +channel0 = ShadertoyChannel(test_pattern, wrap="repeat") +channel1 = ShadertoyChannel(gradient) + +shader = Shadertoy(shader_code_wgsl, resolution=(640, 480), inputs=[channel0, channel1]) + +if __name__ == "__main__": + shader.show() diff --git a/examples/tests/test_examples.py b/examples/tests/test_examples.py new file mode 100644 index 0000000..5e80e04 --- /dev/null +++ b/examples/tests/test_examples.py @@ -0,0 +1,153 @@ +""" +Test that the examples run without error. +""" + +import os +import importlib +import runpy +import sys +from unittest.mock import patch + +import imageio.v2 as imageio +import numpy as np +import pytest + + +from testutils import ( + can_use_wgpu_lib, + wgpu_backend, + is_lavapipe, + find_examples, + ROOT, + screenshots_dir, + diffs_dir, +) + + +if not can_use_wgpu_lib: + pytest.skip("Skipping tests that need the wgpu lib", allow_module_level=True) + + +# run all tests unless they opt-out +examples_to_run = find_examples( + negative_query="# run_example = false", return_stems=True +) + +# only test output of examples that opt-in +examples_to_test = find_examples(query="# test_example = true", return_stems=True) + + +@pytest.mark.parametrize("module", examples_to_run) +def test_examples_run(module, force_offscreen): + """Run every example marked to see if they can run without error.""" + # use runpy so the module is not actually imported (and can be gc'd) + # but also to be able to run the code in the __main__ block + runpy.run_module(f"examples.{module}", run_name="__main__") + + +@pytest.fixture +def force_offscreen(): + """Force the offscreen canvas to be selected by the auto gui module.""" + os.environ["WGPU_FORCE_OFFSCREEN"] = "true" + try: + yield + finally: + del os.environ["WGPU_FORCE_OFFSCREEN"] + + +@pytest.fixture +def mock_time(): + """Some examples use time to animate. Fix the return value + for repeatable output.""" + with patch("time.time") as time_mock: + time_mock.return_value = 1.23456 + yield + + +def test_that_we_are_on_lavapipe(): + print(wgpu_backend) + if os.getenv("EXPECT_LAVAPIPE"): + assert is_lavapipe + + +@pytest.mark.parametrize("module", examples_to_test) +def test_examples_screenshots( + module, pytestconfig, force_offscreen, mock_time, request +): + """Run every example marked for testing.""" + + # import the example module + module_name = f"examples.{module}" + example = importlib.import_module(module_name) + + # ensure it is unloaded after the test + def unload_module(): + del sys.modules[module_name] + + request.addfinalizer(unload_module) + + # render a frame + img = np.asarray(example.canvas.draw()) + + # check if _something_ was rendered + assert img is not None and img.size > 0 + + # we skip the rest of the test if you are not using lavapipe + # images come out subtly differently when using different wgpu adapters + # so for now we only compare screenshots generated with the same adapter (lavapipe) + # a benefit of using pytest.skip is that you are still running + # the first part of the test everywhere else; ensuring that examples + # can at least import, run and render something + if not is_lavapipe: + pytest.skip("screenshot comparisons are only done when using lavapipe") + + # regenerate screenshot if requested + screenshots_dir.mkdir(exist_ok=True) + screenshot_path = screenshots_dir / f"{module}.png" + if pytestconfig.getoption("regenerate_screenshots"): + imageio.imwrite(screenshot_path, img) + + # if a reference screenshot exists, assert it is equal + assert ( + screenshot_path.exists() + ), "found # test_example = true but no reference screenshot available" + stored_img = imageio.imread(screenshot_path) + # assert similarity + is_similar = np.allclose(img, stored_img, atol=1) + update_diffs(module, is_similar, img, stored_img) + assert is_similar, ( + f"rendered image for example {module} changed, see " + f"the {diffs_dir.relative_to(ROOT).as_posix()} folder" + " for visual diffs (you can download this folder from" + " CI build artifacts as well)" + ) + + +def update_diffs(module, is_similar, img, stored_img): + diffs_dir.mkdir(exist_ok=True) + # cast to float32 to avoid overflow + # compute absolute per-pixel difference + diffs_rgba = np.abs(stored_img.astype("f4") - img) + # magnify small values, making it easier to spot small errors + diffs_rgba = ((diffs_rgba / 255) ** 0.25) * 255 + # cast back to uint8 + diffs_rgba = diffs_rgba.astype("u1") + # split into an rgb and an alpha diff + diffs = { + diffs_dir / f"{module}-rgb.png": diffs_rgba[..., :3], + diffs_dir / f"{module}-alpha.png": diffs_rgba[..., 3], + } + + for path, diff in diffs.items(): + if not is_similar: + imageio.imwrite(path, diff) + elif path.exists(): + path.unlink() + + +if __name__ == "__main__": + # Enable tweaking in an IDE by running in an interactive session. + os.environ["WGPU_FORCE_OFFSCREEN"] = "true" + pytest.getoption = lambda x: False + is_lavapipe = True # noqa: F811 + test_examples_screenshots("validate_volume", pytest, None, None) diff --git a/examples/triangle.py b/examples/triangle.py new file mode 100644 index 0000000..80a14de --- /dev/null +++ b/examples/triangle.py @@ -0,0 +1,155 @@ +""" +Example use of the wgpu API to draw a triangle. This example is set up +so it can be run on canvases provided by any backend. Running this file +as a script will use the auto-backend (using either glfw or jupyter). + + +Similar example in other languages / API's: + +* Rust wgpu: + https://github.com/gfx-rs/wgpu-rs/blob/master/examples/hello-triangle/main.rs +* C wgpu: + https://github.com/gfx-rs/wgpu/blob/master/examples/triangle/main.c +* Python Vulkan: + https://github.com/realitix/vulkan/blob/master/example/contribs/example_glfw.py + +""" + +import wgpu + + +# %% Shaders + + +shader_source = """ +struct VertexInput { + @builtin(vertex_index) vertex_index : u32, +}; +struct VertexOutput { + @location(0) color : vec4, + @builtin(position) pos: vec4, +}; + +@vertex +fn vs_main(in: VertexInput) -> VertexOutput { + var positions = array, 3>( + vec2(0.0, -0.5), + vec2(0.5, 0.5), + vec2(-0.5, 0.75), + ); + var colors = array, 3>( // srgb colors + vec3(1.0, 1.0, 0.0), + vec3(1.0, 0.0, 1.0), + vec3(0.0, 1.0, 1.0), + ); + let index = i32(in.vertex_index); + var out: VertexOutput; + out.pos = vec4(positions[index], 0.0, 1.0); + out.color = vec4(colors[index], 1.0); + return out; +} + +@fragment +fn fs_main(in: VertexOutput) -> @location(0) vec4 { + let physical_color = pow(in.color.rgb, vec3(2.2)); // gamma correct + return vec4(physical_color, in.color.a); +} +""" + + +# %% The wgpu calls + + +def main(canvas, power_preference="high-performance", limits=None): + """Regular function to setup a viz on the given canvas.""" + adapter = wgpu.gpu.request_adapter(power_preference=power_preference) + device = adapter.request_device(required_limits=limits) + return _main(canvas, device) + + +async def main_async(canvas): + """Async function to setup a viz on the given canvas.""" + adapter = await wgpu.gpu.request_adapter_async(power_preference="high-performance") + device = await adapter.request_device_async(required_limits={}) + return _main(canvas, device) + + +def _main(canvas, device): + shader = device.create_shader_module(code=shader_source) + + # No bind group and layout, we should not create empty ones. + pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) + + present_context = canvas.get_context() + render_texture_format = present_context.get_preferred_format(device.adapter) + present_context.configure(device=device, format=render_texture_format) + + render_pipeline = device.create_render_pipeline( + layout=pipeline_layout, + vertex={ + "module": shader, + "entry_point": "vs_main", + "buffers": [], + }, + primitive={ + "topology": wgpu.PrimitiveTopology.triangle_list, + "front_face": wgpu.FrontFace.ccw, + "cull_mode": wgpu.CullMode.none, + }, + depth_stencil=None, + multisample=None, + fragment={ + "module": shader, + "entry_point": "fs_main", + "targets": [ + { + "format": render_texture_format, + "blend": { + "color": ( + wgpu.BlendFactor.one, + wgpu.BlendFactor.zero, + wgpu.BlendOperation.add, + ), + "alpha": ( + wgpu.BlendFactor.one, + wgpu.BlendFactor.zero, + wgpu.BlendOperation.add, + ), + }, + }, + ], + }, + ) + + def draw_frame(): + current_texture = present_context.get_current_texture() + command_encoder = device.create_command_encoder() + + render_pass = command_encoder.begin_render_pass( + color_attachments=[ + { + "view": current_texture.create_view(), + "resolve_target": None, + "clear_value": (0, 0, 0, 1), + "load_op": wgpu.LoadOp.clear, + "store_op": wgpu.StoreOp.store, + } + ], + ) + + render_pass.set_pipeline(render_pipeline) + # render_pass.set_bind_group(0, no_bind_group, [], 0, 1) + render_pass.draw(3, 1, 0, 0) + render_pass.end() + device.queue.submit([command_encoder.finish()]) + + canvas.request_draw(draw_frame) + return device + + +if __name__ == "__main__": + from wgpu.gui.auto import WgpuCanvas, run + + canvas = WgpuCanvas(size=(640, 480), title="wgpu triangle") + main(canvas) + run() diff --git a/examples/triangle_auto.py b/examples/triangle_auto.py new file mode 100644 index 0000000..542e7ca --- /dev/null +++ b/examples/triangle_auto.py @@ -0,0 +1,21 @@ +""" +Import the viz from triangle.py and run it using the auto-gui. +""" +# test_example = true + +import sys +from pathlib import Path + +from wgpu.gui.auto import WgpuCanvas, run + +sys.path.insert(0, str(Path(__file__).parent)) + +from triangle import main # noqa: E402, The function to call to run the visualization + + +canvas = WgpuCanvas() +device = main(canvas) + + +if __name__ == "__main__": + run() diff --git a/examples/triangle_glfw.py b/examples/triangle_glfw.py new file mode 100644 index 0000000..b2b34b7 --- /dev/null +++ b/examples/triangle_glfw.py @@ -0,0 +1,22 @@ +""" +Import the viz from triangle.py and run it using glfw (which uses asyncio for the event loop). + +# run_example = false +""" + +import sys +from pathlib import Path + +from wgpu.gui.glfw import WgpuCanvas, run + +sys.path.insert(0, str(Path(__file__).parent)) + +from triangle import main # noqa: E402, The function to call to run the visualization + + +canvas = WgpuCanvas() +device = main(canvas) + + +if __name__ == "__main__": + run() diff --git a/examples/triangle_glsl.py b/examples/triangle_glsl.py new file mode 100644 index 0000000..702d3a8 --- /dev/null +++ b/examples/triangle_glsl.py @@ -0,0 +1,143 @@ +""" +The triangle example, using GLSL shaders. + +""" + +import wgpu + + +# %% Shaders + + +vertex_shader = """ +#version 450 core +layout(location = 0) out vec4 color; +void main() +{ + vec2 positions[3] = vec2[3]( + vec2(0.0, -0.5), + vec2(0.5, 0.5), + vec2(-0.5, 0.75) + ); + vec3 colors[3] = vec3[3]( // srgb colors + vec3(1.0, 1.0, 0.0), + vec3(1.0, 0.0, 1.0), + vec3(0.0, 1.0, 1.0) + ); + int index = int(gl_VertexID); + gl_Position = vec4(positions[index], 0.0, 1.0); + color = vec4(colors[index], 1.0); +} +""" + +fragment_shader = """ +#version 450 core +out vec4 FragColor; +layout(location = 0) in vec4 color; +void main() +{ + vec3 physical_color = pow(color.rgb, vec3(2.2)); // gamma correct + FragColor = vec4(physical_color, color.a); +} +""" + + +# %% The wgpu calls + + +def main(canvas, power_preference="high-performance", limits=None): + """Regular function to setup a viz on the given canvas.""" + adapter = wgpu.gpu.request_adapter(power_preference=power_preference) + device = adapter.request_device(required_limits=limits) + return _main(canvas, device) + + +async def main_async(canvas): + """Async function to setup a viz on the given canvas.""" + adapter = await wgpu.gpu.request_adapter_async(power_preference="high-performance") + device = await adapter.request_device_async(required_limits={}) + return _main(canvas, device) + + +def _main(canvas, device): + vert_shader = device.create_shader_module(label="triangle_vert", code=vertex_shader) + frag_shader = device.create_shader_module( + label="triangle_frag", code=fragment_shader + ) + + # No bind group and layout, we should not create empty ones. + pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) + + present_context = canvas.get_context() + render_texture_format = present_context.get_preferred_format(device.adapter) + present_context.configure(device=device, format=render_texture_format) + + render_pipeline = device.create_render_pipeline( + layout=pipeline_layout, + vertex={ + "module": vert_shader, + "entry_point": "main", + "buffers": [], + }, + primitive={ + "topology": wgpu.PrimitiveTopology.triangle_list, + "front_face": wgpu.FrontFace.ccw, + "cull_mode": wgpu.CullMode.none, + }, + depth_stencil=None, + multisample=None, + fragment={ + "module": frag_shader, + "entry_point": "main", + "targets": [ + { + "format": render_texture_format, + "blend": { + "color": ( + wgpu.BlendFactor.one, + wgpu.BlendFactor.zero, + wgpu.BlendOperation.add, + ), + "alpha": ( + wgpu.BlendFactor.one, + wgpu.BlendFactor.zero, + wgpu.BlendOperation.add, + ), + }, + }, + ], + }, + ) + + def draw_frame(): + current_texture = present_context.get_current_texture() + command_encoder = device.create_command_encoder() + + render_pass = command_encoder.begin_render_pass( + color_attachments=[ + { + "view": current_texture.create_view(), + "resolve_target": None, + "clear_value": (0, 0, 0, 1), + "load_op": wgpu.LoadOp.clear, + "store_op": wgpu.StoreOp.store, + } + ], + ) + + render_pass.set_pipeline(render_pipeline) + # render_pass.set_bind_group(0, no_bind_group, [], 0, 1) + render_pass.draw(3, 1, 0, 0) + render_pass.end() + device.queue.submit([command_encoder.finish()]) + + canvas.request_draw(draw_frame) + return device + + +if __name__ == "__main__": + from wgpu.gui.auto import WgpuCanvas, run + + canvas = WgpuCanvas(size=(640, 480), title="wgpu triangle") + main(canvas) + run() diff --git a/examples/triangle_qt.py b/examples/triangle_qt.py new file mode 100644 index 0000000..033d7b9 --- /dev/null +++ b/examples/triangle_qt.py @@ -0,0 +1,41 @@ +""" +Import the viz from triangle.py and run it in a Qt window. +Works with either PySide6, PyQt6, PyQt5 or PySide2. + +# run_example = false +""" +import importlib + +# For the sake of making this example Just Work, we try multiple QT libs +for lib in ("PySide6", "PyQt6", "PySide2", "PyQt5"): + try: + QtWidgets = importlib.import_module(".QtWidgets", lib) + break + except ModuleNotFoundError: + pass + + +from wgpu.gui.qt import WgpuCanvas # WgpuCanvas is a QWidget subclass + +from triangle import main # The function to call to run the visualization + + +app = QtWidgets.QApplication([]) +canvas = WgpuCanvas() + +device = main(canvas) + +# Enter Qt event loop (compatible with qt5/qt6) +app.exec() if hasattr(app, "exec") else app.exec_() + + +# For those interested, this is a simple way to integrate Qt's event +# loop with asyncio, but for real apps you probably want to use +# something like the qasync library. +# async def mainloop(): +# await main_async(canvas) +# while not canvas.is_closed(): +# await asyncio.sleep(0.001) +# app.flush() +# app.processEvents() +# loop.stop() diff --git a/examples/triangle_qt_embed.py b/examples/triangle_qt_embed.py new file mode 100644 index 0000000..42c9864 --- /dev/null +++ b/examples/triangle_qt_embed.py @@ -0,0 +1,53 @@ +""" +An example demonstrating a qt app with a wgpu viz inside. +If needed, change the PySide6 import to e.g. PyQt6, PyQt5, or PySide2. + +# run_example = false +""" +import importlib + +# For the sake of making this example Just Work, we try multiple QT libs +for lib in ("PySide6", "PyQt6", "PySide2", "PyQt5"): + try: + QtWidgets = importlib.import_module(".QtWidgets", lib) + break + except ModuleNotFoundError: + pass + + +from wgpu.gui.qt import WgpuWidget + +from triangle import main + + +class ExampleWidget(QtWidgets.QWidget): + def __init__(self): + super().__init__() + self.resize(640, 480) + self.setWindowTitle("wgpu triangle embedded in a qt app") + + splitter = QtWidgets.QSplitter() + + self.button = QtWidgets.QPushButton("Hello world", self) + self.canvas1 = WgpuWidget(splitter) + self.canvas2 = WgpuWidget(splitter) + + splitter.addWidget(self.canvas1) + splitter.addWidget(self.canvas2) + + layout = QtWidgets.QHBoxLayout() + layout.addWidget(self.button, 0) + layout.addWidget(splitter, 1) + self.setLayout(layout) + + self.show() + + +app = QtWidgets.QApplication([]) +example = ExampleWidget() + +main(example.canvas1) +main(example.canvas2) + +# Enter Qt event loop (compatible with qt5/qt6) +app.exec() if hasattr(app, "exec") else app.exec_() diff --git a/examples/triangle_subprocess.py b/examples/triangle_subprocess.py new file mode 100644 index 0000000..e1c2e64 --- /dev/null +++ b/examples/triangle_subprocess.py @@ -0,0 +1,84 @@ +""" +An example showing that with WGPU you can draw to the window of another +process. Just a proof of concept, this is far from perfect yet: + +* It works if I run it in Pyzo, but not if I run it from the terminal. +* I only tried it on Windows. +* You'll want to let the proxy know about size changes. +* The request_draw should invoke a draw (in asyncio?), not draw directly. +* Properly handling closing the figure (from both ends). + +# run_example = false +""" + +import sys +import time +import subprocess + +from wgpu.gui import WgpuCanvasBase + +# Import the (async) function that we must call to run the visualization +from triangle import main + + +code = """ +import sys +from PySide6 import QtWidgets # Use either PySide6 or PyQt6 +from wgpu.gui.qt import WgpuCanvas + +app = QtWidgets.QApplication([]) +canvas = WgpuCanvas(title="wgpu triangle in Qt subprocess") + +print(canvas.get_window_id()) +#print(canvas.get_display_id()) +print(canvas.get_physical_size()) +sys.stdout.flush() + +app.exec_() +""" + + +class ProxyCanvas(WgpuCanvasBase): + def __init__(self): + super().__init__() + self._window_id = int(p.stdout.readline().decode()) + self._psize = tuple( + int(x) for x in p.stdout.readline().decode().strip().strip("()").split(",") + ) + print(self._psize) + time.sleep(0.2) + + def get_window_id(self): + return self._window_id + + def get_physical_size(self): + return self._psize + + def get_pixel_ratio(self): + return 1 + + def get_logical_size(self): + return self._psize + + def set_logical_size(self, width, height): + pass + + def close(self): + p.kill() + + def is_closed(self): + raise NotImplementedError() + + def _request_draw(self): + self.draw_frame() + + +# Create subprocess +p = subprocess.Popen([sys.executable, "-c", code], stdout=subprocess.PIPE) + +# Create a canvas that maps to the window of that subprocess +canvas = ProxyCanvas() + +# Go! +main(canvas) +time.sleep(3) diff --git a/examples/triangle_wx.py b/examples/triangle_wx.py new file mode 100644 index 0000000..22c9002 --- /dev/null +++ b/examples/triangle_wx.py @@ -0,0 +1,16 @@ +""" +Import the viz from triangle.py and run it in a wxPython window. +""" +# run_example = false + +import wx +from wgpu.gui.wx import WgpuCanvas + +from examples.triangle import main # The function to call to run the visualization + + +app = wx.App() +canvas = WgpuCanvas() + +main(canvas) +app.MainLoop() diff --git a/examples/triangle_wx_embed.py b/examples/triangle_wx_embed.py new file mode 100644 index 0000000..e45c13d --- /dev/null +++ b/examples/triangle_wx_embed.py @@ -0,0 +1,40 @@ +""" +An example demonstrating a wx app with a wgpu viz inside. +""" +# run_example = false + +import wx +from wgpu.gui.wx import WgpuWidget + +from examples.triangle import main + + +class Example(wx.Frame): + def __init__(self): + super().__init__(None, title="wgpu triangle embedded in a wx app") + self.SetSize(640, 480) + + splitter = wx.SplitterWindow(self) + + self.button = wx.Button(self, -1, "Hello world") + self.canvas1 = WgpuWidget(splitter) + self.canvas2 = WgpuWidget(splitter) + + splitter.SplitVertically(self.canvas1, self.canvas2) + splitter.SetSashGravity(0.5) + + sizer = wx.BoxSizer(wx.HORIZONTAL) + sizer.Add(self.button, 0, wx.EXPAND) + sizer.Add(splitter, 1, wx.EXPAND) + self.SetSizer(sizer) + + self.Show() + + +app = wx.App() +example = Example() + +main(example.canvas1) +main(example.canvas2) + +app.MainLoop() diff --git a/examples/wgpu-examples.ipynb b/examples/wgpu-examples.ipynb new file mode 100644 index 0000000..3e67105 --- /dev/null +++ b/examples/wgpu-examples.ipynb @@ -0,0 +1,117 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "afd9b3fd", + "metadata": {}, + "source": [ + "# WGPU notebook examples" + ] + }, + { + "cell_type": "markdown", + "id": "2e610ab9", + "metadata": {}, + "source": [ + "## Triangle example\n", + "\n", + "We import the triangle example and show it in the notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c6e4ffe0", + "metadata": {}, + "outputs": [], + "source": [ + "from wgpu.gui.auto import WgpuCanvas, run\n", + "import triangle\n", + "\n", + "canvas = WgpuCanvas(size=(640, 480), title=\"wgpu triangle with GLFW\")\n", + "\n", + "triangle.main(canvas)\n", + "canvas" + ] + }, + { + "cell_type": "markdown", + "id": "e120b752", + "metadata": {}, + "source": [ + "## Cube example\n", + "\n", + "An interactive example this time." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e4f9f67d", + "metadata": {}, + "outputs": [], + "source": [ + "from cube import canvas\n", + "\n", + "canvas" + ] + }, + { + "cell_type": "markdown", + "id": "749ffb40", + "metadata": {}, + "source": [ + "## Event example\n", + "\n", + "The code below is a copy from `show_events.py`. It is just to show how events are handled. These events are the same accross all auto-backends." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c858215a", + "metadata": {}, + "outputs": [], + "source": [ + "from wgpu.gui.auto import WgpuCanvas, run\n", + "\n", + "class MyCanvas(WgpuCanvas):\n", + " def handle_event(self, event):\n", + " if event[\"event_type\"] != \"pointer_move\":\n", + " print(event)\n", + "\n", + "canvas = MyCanvas(size=(640, 480), title=\"wgpu triangle with GLFW\")\n", + "canvas" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6b92d13b", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..7041990 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,34 @@ +[build-system] +requires = [ + "setuptools>=42", + "wheel", +] +build-backend = "setuptools.build_meta" + +[tool.cibuildwheel] +# we only build on one python version since the wheels are not bound to it +build = "cp39-*" + +# we can't list requests under build-system.requires because +# that step happens _after_ the before-build command +before-build = "pip install requests && python download-wgpu-native.py" + +# this is sufficient to trigger an install of the built wheel +test-command = "echo Wheel installed" + +# this is the minimum supported manylinux version +manylinux-x86_64-image = "manylinux_2_24" +manylinux-i686-image = "manylinux_2_24" +manylinux-aarch64-image = "manylinux_2_24" +manylinux-ppc64le-image = "manylinux_2_24" +manylinux-s390x-image = "manylinux_2_24" +manylinux-pypy_x86_64-image = "manylinux_2_24" +manylinux-pypy_i686-image = "manylinux_2_24" +manylinux-pypy_aarch64-image = "manylinux_2_24" + +[tool.cibuildwheel.macos] +# also create apple silicon wheels +archs = ["x86_64", "arm64"] + +# the upstream binaries are not universal yet +# archs = ["x86_64", "universal2", "arm64"] diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..4cbd3ac --- /dev/null +++ b/setup.cfg @@ -0,0 +1,36 @@ +[flake8] + +max_line_length = 88 + +exclude = build,dist,*.egg-info,.venv + +# E501 line too long +# E203 whitespace before ':' +# F722 syntax error in forward annotation +# F821 undefined name -> we must get rid of this! +# B006 Do not use mutable data structures for argument defaults. +# B007 Loop control variable 'j2' not used within the loop body. +# D docstring checks +extend-ignore = E501, E203, B006, B007, D + +per-file-ignores = + tests/test_compute.py: F821,F722 + tests/test_gui_glfw.py: F821,F722 + tests/test_wgpu_native_basics.py: F821,F722 + tests/test_wgpu_native_render.py: F821,F722 + tests/test_wgpu_native_render_tex.py: F821,F722 + tests/test_wgpu_native_compute_tex.py : F821,F722 + examples/*.py: F821,F722 + examples/triangle_qt*.py: E402 + + +[coverage:report] + +exclude_lines = + # Remember that these are reg exp + + # Have to re-enable the standard pragma, plus a less-ugly flavor + pragma: no cover + no-cover + + raise NotImplementedError diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..b50647a --- /dev/null +++ b/setup.py @@ -0,0 +1,63 @@ +import re +import platform + +from setuptools import find_packages, setup +from wheel.bdist_wheel import get_platform, bdist_wheel as _bdist_wheel + + +NAME = "wgpu" +SUMMARY = "Next generation GPU API for Python" + +with open(f"{NAME}/__init__.py") as fh: + VERSION = re.search(r"__version__ = \"(.*?)\"", fh.read()).group(1) + + +class bdist_wheel(_bdist_wheel): # noqa: N801 + def finalize_options(self): + self.plat_name = get_platform(None) # force a platform tag + _bdist_wheel.finalize_options(self) + + +resources_globs = ["*.h", "*.idl"] +if platform.system() == "Linux": + resources_globs.append("*-release.so") +elif platform.system() == "Darwin": + resources_globs.append("*-release.dylib") +elif platform.system() == "Windows": + resources_globs.append("*-release.dll") +else: + pass # don't include binaries; user will have to arrange for the lib + +runtime_deps = ["cffi>=1.15.0", "rubicon-objc>=0.4.1; sys_platform == 'darwin'"] +extra_deps = { + "jupyter": ["jupyter_rfb>=0.4.2"], + "glfw": ["glfw>=1.9"], + "docs": ["sphinx>7.2", "sphinx_rtd_theme"], +} + +setup( + name=NAME, + version=VERSION, + packages=find_packages( + exclude=["codegen", "codegen.*", "tests", "tests.*", "examples", "examples.*"] + ), + package_data={f"{NAME}.resources": resources_globs}, + python_requires=">=3.8.0", + install_requires=runtime_deps, + extras_require=extra_deps, + license="BSD 2-Clause", + description=SUMMARY, + long_description=open("README.md").read(), + long_description_content_type="text/markdown", + author="Almar Klein", + author_email="almar.klein@gmail.com", + url="https://github.com/pygfx/wgpu-py", + cmdclass={"bdist_wheel": bdist_wheel}, + data_files=[("", ["LICENSE"])], + entry_points={ + "pyinstaller40": [ + "hook-dirs = wgpu.__pyinstaller:get_hook_dirs", + "tests = wgpu.__pyinstaller:get_test_dirs", + ], + }, +) diff --git a/tests/renderutils.py b/tests/renderutils.py new file mode 100644 index 0000000..370eadf --- /dev/null +++ b/tests/renderutils.py @@ -0,0 +1,334 @@ +""" Utils to render to a texture or screen. Tuned to the tests, so quite some +assumptions here. +""" + + +import ctypes +import numpy as np + +import wgpu.backends.wgpu_native # noqa + + +def upload_to_texture(device, texture, data, nx, ny, nz): + nbytes = ctypes.sizeof(data) + bpp = nbytes // (nx * ny * nz) + + # Create a buffer to get the data into the GPU + buffer = device.create_buffer_with_data(data=data, usage=wgpu.BufferUsage.COPY_SRC) + + # Copy to texture + command_encoder = device.create_command_encoder() + command_encoder.copy_buffer_to_texture( + { + "buffer": buffer, + "offset": 0, + "bytes_per_row": bpp * nx, + "rows_per_image": ny, + }, + {"texture": texture, "mip_level": 0, "origin": (0, 0, 0)}, + (nx, ny, nz), + ) + device.queue.submit([command_encoder.finish()]) + + +def download_from_texture(device, texture, data_type, nx, ny, nz): + nbytes = ctypes.sizeof(data_type) + bpp = nbytes // (nx * ny * nz) + + # Create a buffer to get the data into the GPU + buffer = device.create_buffer(size=nbytes, usage=wgpu.BufferUsage.COPY_DST) + + # Copy to buffer + command_encoder = device.create_command_encoder() + command_encoder.copy_texture_to_buffer( + {"texture": texture, "mip_level": 0, "origin": (0, 0, 0)}, + { + "buffer": buffer, + "offset": 0, + "bytes_per_row": bpp * nx, + "rows_per_image": ny, + }, + (nx, ny, nz), + ) + device.queue.submit([command_encoder.finish()]) + + # Download + return data_type.from_buffer(device.queue.read_buffer(buffer)) + + +def render_to_texture( + device, + shader_source, + pipeline_layout, + bind_group, + *, + size, + topology=wgpu.PrimitiveTopology.triangle_strip, + ibo=None, + vbos=None, + vbo_views=None, + indirect_buffer=None, + color_attachment=None, + depth_stencil_state=None, + depth_stencil_attachment=None, + renderpass_callback=lambda *args: None, +): + # https://github.com/gfx-rs/wgpu-rs/blob/master/examples/capture/main.rs + + vbos = vbos or [] + vbo_views = vbo_views or [] + + # Select texture format. The srgb norm maps to the srgb colorspace which + # appears to be the default for render pipelines https://en.wikipedia.org/wiki/SRGB + texture_format = wgpu.TextureFormat.rgba8unorm # rgba8unorm or bgra8unorm_srgb + + # Create texture to render to + nx, ny, bpp = size[0], size[1], 4 + nbytes = nx * ny * bpp + texture = device.create_texture( + size=(nx, ny, 1), + dimension=wgpu.TextureDimension.d2, + format=texture_format, + usage=wgpu.TextureUsage.RENDER_ATTACHMENT | wgpu.TextureUsage.COPY_SRC, + ) + current_texture_view = texture.create_view() + + # Also a buffer to read the data to CPU + buffer = device.create_buffer( + size=nbytes, usage=wgpu.BufferUsage.COPY_SRC | wgpu.BufferUsage.COPY_DST + ) + + shader = device.create_shader_module(code=shader_source) + + render_pipeline = device.create_render_pipeline( + layout=pipeline_layout, + vertex={ + "module": shader, + "entry_point": "vs_main", + "buffers": vbo_views, + }, + primitive={ + "topology": topology, + "front_face": wgpu.FrontFace.ccw, + "cull_mode": wgpu.CullMode.none, + }, + depth_stencil=depth_stencil_state, + multisample={ + "count": 1, + "mask": 0xFFFFFFFF, + "alpha_to_coverage_enabled": False, + }, + fragment={ + "module": shader, + "entry_point": "fs_main", + "targets": [ + { + "format": texture_format, + "blend": { + "color": ( + wgpu.BlendFactor.one, + wgpu.BlendFactor.zero, + wgpu.BlendOperation.add, + ), + "alpha": ( + wgpu.BlendFactor.one, + wgpu.BlendFactor.zero, + wgpu.BlendOperation.add, + ), + }, + "write_mask": wgpu.ColorWrite.ALL, + }, + ], + }, + ) + + if bind_group: + # if the bind_group is provided, we can at least retrieve + # the first bind group layout from the pipeline + _ = render_pipeline.get_bind_group_layout(0) + + command_encoder = device.create_command_encoder() + + color_attachment = color_attachment or { + "resolve_target": None, + "clear_value": (0, 0, 0, 0), + "load_op": wgpu.LoadOp.clear, + "store_op": wgpu.StoreOp.store, + } + color_attachment["view"] = current_texture_view + render_pass = command_encoder.begin_render_pass( + color_attachments=[color_attachment], + depth_stencil_attachment=depth_stencil_attachment, + occlusion_query_set=None, + ) + render_pass.push_debug_group("foo") + + render_pass.insert_debug_marker("setting pipeline") + render_pass.set_pipeline(render_pipeline) + render_pass.insert_debug_marker("setting bind group") + if bind_group: + render_pass.set_bind_group( + 0, bind_group, [], 0, 999999 + ) # last 2 elements not used + for slot, vbo in enumerate(vbos): + render_pass.insert_debug_marker(f"setting vbo {slot}") + render_pass.set_vertex_buffer(slot, vbo, 0, 0) + render_pass.insert_debug_marker("invoking callback") + renderpass_callback(render_pass) + render_pass.insert_debug_marker("draw!") + if ibo is None: + if indirect_buffer is None: + render_pass.draw(4, 1, 0, 0) + else: + render_pass.draw_indirect(indirect_buffer, 0) + else: + render_pass.set_index_buffer(ibo, wgpu.IndexFormat.uint32, 0, 0) + if indirect_buffer is None: + render_pass.draw_indexed(6, 1, 0, 0, 0) + else: + render_pass.draw_indexed_indirect(indirect_buffer, 0) + render_pass.pop_debug_group() + render_pass.end() + command_encoder.copy_texture_to_buffer( + {"texture": texture, "mip_level": 0, "origin": (0, 0, 0)}, + { + "buffer": buffer, + "offset": 0, + "bytes_per_row": bpp * nx, + "rows_per_image": ny, + }, + (nx, ny, 1), + ) + device.queue.submit([command_encoder.finish()]) + + # Read the current data of the output buffer - numpy is much easier to work with + mem = device.queue.read_buffer(buffer) + data = (ctypes.c_uint8 * 4 * nx * ny).from_buffer(mem) + return np.frombuffer(data, dtype=np.uint8).reshape(size[0], size[1], 4) + + +def render_to_screen( + device, + shader_source, + pipeline_layout, + bind_group, + *, + topology=wgpu.PrimitiveTopology.triangle_strip, + ibo=None, + vbos=None, + vbo_views=None, + indirect_buffer=None, + color_attachment=None, + depth_stencil_state=None, + depth_stencil_attachment=None, + renderpass_callback=lambda *args: None, +): + """Render to a window on screen, for debugging purposes.""" + import glfw + from wgpu.gui.glfw import WgpuCanvas, update_glfw_canvasses + + vbos = vbos or [] + vbo_views = vbo_views or [] + + # Setup canvas + glfw.init() + canvas = WgpuCanvas(title="wgpu test render with GLFW") + + shader = device.create_shader_module(code=shader_source) + + render_pipeline = device.create_render_pipeline( + layout=pipeline_layout, + vertex={ + "module": shader, + "entry_point": "vs_main", + "buffers": vbo_views, + }, + primitive={ + "topology": topology, + "front_face": wgpu.FrontFace.ccw, + "cull_mode": wgpu.CullMode.none, + }, + depth_stencil=depth_stencil_state, + multisample={ + "count": 1, + "mask": 0xFFFFFFFF, + "alpha_to_coverage_enabled": False, + }, + fragment={ + "module": shader, + "entry_point": "fs_main", + "targets": [ + { + "format": wgpu.TextureFormat.bgra8unorm_srgb, + "blend": { + "color": ( + wgpu.BlendFactor.one, + wgpu.BlendFactor.zero, + wgpu.BlendOperation.add, + ), + "alpha": ( + wgpu.BlendFactor.one, + wgpu.BlendFactor.zero, + wgpu.BlendOperation.add, + ), + }, + "write_mask": wgpu.ColorWrite.ALL, + }, + ], + }, + ) + + present_context = canvas.get_context() + present_context.configure(device=device, format=None) + + def draw_frame(): + current_texture_view = present_context.get_current_texture().create_view() + command_encoder = device.create_command_encoder() + + ca = color_attachment or { + "resolve_target": None, + "clear_value": (0, 0, 0, 0), + "load_op": wgpu.LoadOp.clear, + "store_op": wgpu.StoreOp.store, + } + ca["view"] = current_texture_view + render_pass = command_encoder.begin_render_pass( + color_attachments=[ca], + depth_stencil_attachment=depth_stencil_attachment, + occlusion_query_set=None, + ) + render_pass.push_debug_group("foo") + + render_pass.insert_debug_marker("setting pipeline") + render_pass.set_pipeline(render_pipeline) + render_pass.insert_debug_marker("setting bind group") + render_pass.set_bind_group( + 0, bind_group, [], 0, 999999 + ) # last 2 elements not used + for slot, vbo in enumerate(vbos): + render_pass.insert_debug_marker(f"setting vbo {slot}") + render_pass.set_vertex_buffer(slot, vbo, 0, vbo.size) + render_pass.insert_debug_marker("invoking callback") + renderpass_callback(render_pass) + render_pass.insert_debug_marker("draw!") + if ibo is None: + if indirect_buffer is None: + render_pass.draw(4, 1, 0, 0) + else: + render_pass.draw_indirect(indirect_buffer, 0) + else: + render_pass.set_index_buffer(ibo, wgpu.IndexFormat.uint32, 0, ibo.size) + if indirect_buffer is None: + render_pass.draw_indexed(6, 1, 0, 0, 0) + else: + render_pass.draw_indexed_indirect(indirect_buffer, 0) + render_pass.pop_debug_group() + render_pass.end() + device.queue.submit([command_encoder.finish()]) + + canvas.request_draw(draw_frame) + + # Enter main loop + while update_glfw_canvasses(): + glfw.poll_events() + glfw.terminate() diff --git a/tests/test_api.py b/tests/test_api.py new file mode 100644 index 0000000..313ce5b --- /dev/null +++ b/tests/test_api.py @@ -0,0 +1,220 @@ +import sys +import logging +import subprocess + +import wgpu + +from pytest import raises, mark +from testutils import run_tests, can_use_wgpu_lib + + +def test_basic_api(): + import wgpu # noqa: F401 + + assert isinstance(wgpu.__version__, str) + assert isinstance(wgpu.version_info, tuple) + assert isinstance(wgpu.gpu, wgpu.GPU) + + # Entrypoint funcs + assert wgpu.gpu.request_adapter + assert wgpu.gpu.request_adapter_async + + code1 = wgpu.GPU.request_adapter.__code__ + code2 = wgpu.GPU.request_adapter_async.__code__ + nargs1 = code1.co_argcount + code1.co_kwonlyargcount + assert code1.co_varnames[:nargs1] == code2.co_varnames + + assert repr(wgpu.classes.GPU()).startswith( + " 2 mentions + assert text.count("foo_method") == 2 + assert text.count("call-failed-but-test-passed") == 4 + assert text.count("(4)") == 1 + assert text.count("(5)") == 0 + + assert text.count("spam_method") == 0 + assert text.count("division by zero") == 0 + + canvas._draw_frame_and_present() # prints traceback + canvas._draw_frame_and_present() # prints short logs ... + canvas._draw_frame_and_present() + canvas._draw_frame_and_present() + + text = caplog.text + assert text.count("bar_method") == 2 # one traceback => 2 mentions + assert text.count("foo_method") == 2 + assert text.count("call-failed-but-test-passed") == 4 + + assert text.count("spam_method") == 2 + assert text.count("division by zero") == 4 + + +class MyOffscreenCanvas(wgpu.gui.WgpuOffscreenCanvasBase): + def __init__(self): + super().__init__() + self.textures = [] + self.physical_size = 100, 100 + + def get_pixel_ratio(self): + return 1 + + def get_logical_size(self): + return self.get_physical_size() + + def get_physical_size(self): + return self.physical_size + + def _request_draw(self): + # Note: this would normaly schedule a call in a later event loop iteration + self._draw_frame_and_present() + + def present(self, texture): + self.textures.append(texture) + device = texture._device + size = texture.size + bytes_per_pixel = 4 + data = device.queue.read_texture( + { + "texture": texture, + "mip_level": 0, + "origin": (0, 0, 0), + }, + { + "offset": 0, + "bytes_per_row": bytes_per_pixel * size[0], + "rows_per_image": size[1], + }, + size, + ) + self.array = np.frombuffer(data, np.uint8).reshape(size[1], size[0], 4) + + +@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") +def test_offscreen_canvas(): + canvas = MyOffscreenCanvas() + device = wgpu.utils.get_default_device() + present_context = canvas.get_context() + present_context.configure(device=device, format=None) + + def draw_frame(): + current_texture_view = present_context.get_current_texture().create_view() + command_encoder = device.create_command_encoder() + render_pass = command_encoder.begin_render_pass( + color_attachments=[ + { + "view": current_texture_view, + "resolve_target": None, + "clear_value": (0, 1, 0, 1), + "load_op": wgpu.LoadOp.clear, + "store_op": wgpu.StoreOp.store, + } + ], + ) + render_pass.end() + device.queue.submit([command_encoder.finish()]) + + assert len(canvas.textures) == 0 + + # Draw 1 + canvas.request_draw(draw_frame) + assert canvas.array.shape == (100, 100, 4) + assert np.all(canvas.array[:, :, 0] == 0) + assert np.all(canvas.array[:, :, 1] == 255) + + # Draw 2 + canvas.request_draw(draw_frame) + assert canvas.array.shape == (100, 100, 4) + assert np.all(canvas.array[:, :, 0] == 0) + assert np.all(canvas.array[:, :, 1] == 255) + + # Change resolution + canvas.physical_size = 120, 100 + + # Draw 3 + canvas.request_draw(draw_frame) + assert canvas.array.shape == (100, 120, 4) + assert np.all(canvas.array[:, :, 0] == 0) + assert np.all(canvas.array[:, :, 1] == 255) + + # Change resolution + canvas.physical_size = 120, 140 + + # Draw 4 + canvas.request_draw(draw_frame) + assert canvas.array.shape == (140, 120, 4) + assert np.all(canvas.array[:, :, 0] == 0) + assert np.all(canvas.array[:, :, 1] == 255) + + # We now have four unique texture objects + assert len(canvas.textures) == 4 + assert len(set(canvas.textures)) == 4 + + +def test_autogui_mixin(): + c = wgpu.gui.WgpuAutoGui() + + # It's a mixin + assert not isinstance(c, wgpu.gui.WgpuCanvasBase) + + # It's event handling mechanism should be fully functional + + events = [] + + def handler(event): + events.append(event["value"]) + + c.add_event_handler(handler, "foo", "bar") + c.handle_event({"event_type": "foo", "value": 1}) + c.handle_event({"event_type": "bar", "value": 2}) + c.handle_event({"event_type": "spam", "value": 3}) + c.remove_event_handler(handler, "foo") + c.handle_event({"event_type": "foo", "value": 4}) + c.handle_event({"event_type": "bar", "value": 5}) + c.handle_event({"event_type": "spam", "value": 6}) + c.remove_event_handler(handler, "bar") + c.handle_event({"event_type": "foo", "value": 7}) + c.handle_event({"event_type": "bar", "value": 8}) + c.handle_event({"event_type": "spam", "value": 9}) + + assert events == [1, 2, 5] + + +def test_weakbind(): + weakbind = wgpu.gui.base.weakbind + + xx = [] + + class Foo: + def bar(self): + xx.append(1) + + f1 = Foo() + f2 = Foo() + + b1 = f1.bar + b2 = weakbind(f2.bar) + + assert len(xx) == 0 + b1() + assert len(xx) == 1 + b2() + assert len(xx) == 2 + + del f1 + del f2 + + if is_pypy: + gc.collect() + + assert len(xx) == 2 + b1() + assert len(xx) == 3 # f1 still exists + b2() + assert len(xx) == 3 # f2 is gone! + + +if __name__ == "__main__": + run_tests(globals()) diff --git a/tests/test_gui_glfw.py b/tests/test_gui_glfw.py new file mode 100644 index 0000000..2ee8af0 --- /dev/null +++ b/tests/test_gui_glfw.py @@ -0,0 +1,297 @@ +""" +Test the canvas, and parts of the rendering that involves a canvas, +like the canvas context and surface texture. +""" + +import os +import sys +import time +import weakref +import asyncio + +import wgpu +from pytest import skip +from testutils import run_tests, can_use_glfw, can_use_wgpu_lib +from renderutils import render_to_texture, render_to_screen # noqa + + +if not can_use_glfw or not can_use_wgpu_lib: + skip("Skipping tests that need a window or the wgpu lib", allow_module_level=True) + + +def setup_module(): + import glfw + + glfw.init() + + +def teardown_module(): + pass # Do not glfw.terminate() because other tests may still need glfw + + +def test_is_autogui(): + from wgpu.gui.glfw import WgpuCanvas + + assert issubclass(WgpuCanvas, wgpu.gui.WgpuCanvasBase) + assert issubclass(WgpuCanvas, wgpu.gui.WgpuAutoGui) + + +def test_glfw_canvas_basics(): + """Create a window and check some of its behavior. No wgpu calls here.""" + + import glfw + from wgpu.gui.glfw import WgpuCanvas + + canvas = WgpuCanvas() + + canvas.set_logical_size(300, 200) + etime = time.time() + 0.1 + while time.time() < etime: + glfw.poll_events() + lsize = canvas.get_logical_size() + assert isinstance(lsize, tuple) and len(lsize) == 2 + assert isinstance(lsize[0], float) and isinstance(lsize[1], float) + assert lsize == (300.0, 200.0) + + assert len(canvas.get_physical_size()) == 2 + assert isinstance(canvas.get_pixel_ratio(), float) + + # Close + assert not canvas.is_closed() + if sys.platform.startswith("win"): # On Linux we cant do this multiple times + canvas.close() + glfw.poll_events() + assert canvas.is_closed() + + +def test_glfw_canvas_del(): + from wgpu.gui.glfw import WgpuCanvas, update_glfw_canvasses + import glfw + + loop = asyncio.get_event_loop() + + async def miniloop(): + for i in range(10): + glfw.poll_events() + update_glfw_canvasses() + await asyncio.sleep(0.01) + + canvas = WgpuCanvas() + ref = weakref.ref(canvas) + + assert ref() is not None + loop.run_until_complete(miniloop()) + assert ref() is not None + del canvas + loop.run_until_complete(miniloop()) + assert ref() is None + + +shader_source = """ +@vertex +fn vs_main(@builtin(vertex_index) vertex_index : u32) -> @builtin(position) vec4 { + var positions: array, 3> = array, 3>(vec2(0.0, -0.5), vec2(0.5, 0.5), vec2(-0.5, 0.7)); + let p: vec2 = positions[vertex_index]; + return vec4(p, 0.0, 1.0); +} + +@fragment +fn fs_main() -> @location(0) vec4 { + return vec4(1.0, 0.5, 0.0, 1.0); +} +""" + + +def test_glfw_canvas_render(): + """Render an orange square ... in a glfw window.""" + + import glfw + from wgpu.gui.glfw import update_glfw_canvasses, WgpuCanvas + + loop = asyncio.get_event_loop() + + canvas = WgpuCanvas(max_fps=9999) + + device = wgpu.utils.get_default_device() + draw_frame1 = _get_draw_function(device, canvas) + + frame_counter = 0 + + def draw_frame2(): + nonlocal frame_counter + frame_counter += 1 + draw_frame1() + + canvas.request_draw(draw_frame2) + + # Give it a few rounds to start up + async def miniloop(): + for i in range(10): + glfw.poll_events() + update_glfw_canvasses() + await asyncio.sleep(0.01) + + loop.run_until_complete(miniloop()) + # There should have been exactly one draw now + assert frame_counter == 1 + + # Ask for a lot of draws + for i in range(5): + canvas.request_draw() + # Process evens for a while + loop.run_until_complete(miniloop()) + # We should have had just one draw + assert frame_counter == 2 + + # Change the canvase size + canvas.set_logical_size(300, 200) + canvas.set_logical_size(400, 300) + # We should have had just one draw + loop.run_until_complete(miniloop()) + assert frame_counter == 3 + + # canvas.close() + glfw.poll_events() + + +def test_glfw_canvas_render_custom_canvas(): + """Render an orange square ... in a glfw window. But not using WgpuCanvas. + This helps make sure that WgpuCanvasInterface is indeed the minimal + required canvas API. + """ + + import glfw + + class CustomCanvas: # implements wgpu.WgpuCanvasInterface + def __init__(self): + glfw.window_hint(glfw.CLIENT_API, glfw.NO_API) + glfw.window_hint(glfw.RESIZABLE, True) + self.window = glfw.create_window(300, 200, "canvas", None, None) + self._present_context = None + + def get_window_id(self): + if sys.platform.startswith("win"): + return int(glfw.get_win32_window(self.window)) + elif sys.platform.startswith("darwin"): + return int(glfw.get_cocoa_window(self.window)) + elif sys.platform.startswith("linux"): + is_wayland = "wayland" in os.getenv("XDG_SESSION_TYPE", "").lower() + if is_wayland: + return int(glfw.get_wayland_window(self.window)) + else: + return int(glfw.get_x11_window(self.window)) + else: + raise RuntimeError(f"Cannot get GLFW window id on {sys.platform}.") + + def get_display_id(self): + return wgpu.WgpuCanvasInterface.get_display_id(self) + + def get_physical_size(self): + psize = glfw.get_framebuffer_size(self.window) + return int(psize[0]), int(psize[1]) + + def get_context(self): + if self._present_context is None: + backend_module = sys.modules["wgpu"].gpu.__module__ + PC = sys.modules[backend_module].GPUCanvasContext # noqa N806 + self._present_context = PC(self) + return self._present_context + + canvas = CustomCanvas() + + # Also pass canvas here, to touch that code somewhere + adapter = wgpu.gpu.request_adapter( + canvas=canvas, power_preference="high-performance" + ) + device = adapter.request_device() + draw_frame = _get_draw_function(device, canvas) + + for i in range(5): + time.sleep(0.01) + glfw.poll_events() + draw_frame() + canvas.get_context().present() # WgpuCanvasBase normally automates this + + glfw.hide_window(canvas.window) + + +def _get_draw_function(device, canvas): + # Bindings and layout + pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) + + shader = device.create_shader_module(code=shader_source) + + present_context = canvas.get_context() + render_texture_format = present_context.get_preferred_format(device.adapter) + present_context.configure(device=device, format=render_texture_format) + + render_pipeline = device.create_render_pipeline( + label="my-debug-pipeline", + layout=pipeline_layout, + vertex={ + "module": shader, + "entry_point": "vs_main", + "buffers": [], + }, + primitive={ + "topology": wgpu.PrimitiveTopology.triangle_strip, + "strip_index_format": wgpu.IndexFormat.uint32, + "front_face": wgpu.FrontFace.ccw, + "cull_mode": wgpu.CullMode.none, + }, + depth_stencil=None, + multisample={ + "count": 1, + "mask": 0xFFFFFFFF, + "alpha_to_coverage_enabled": False, + }, + fragment={ + "module": shader, + "entry_point": "fs_main", + "targets": [ + { + "format": render_texture_format, + "blend": { + "color": ( + wgpu.BlendFactor.one, + wgpu.BlendFactor.zero, + wgpu.BlendOperation.add, + ), + "alpha": ( + wgpu.BlendFactor.one, + wgpu.BlendFactor.zero, + wgpu.BlendOperation.add, + ), + }, + }, + ], + }, + ) + + def draw_frame(): + current_texture_view = present_context.get_current_texture().create_view() + command_encoder = device.create_command_encoder() + assert current_texture_view.size + ca = { + "view": current_texture_view, + "resolve_target": None, + "clear_value": (0, 0, 0, 0), + "load_op": wgpu.LoadOp.clear, + "store_op": wgpu.StoreOp.store, + } + render_pass = command_encoder.begin_render_pass( + color_attachments=[ca], + ) + + render_pass.set_pipeline(render_pipeline) + render_pass.draw(4, 1, 0, 0) + render_pass.end() + device.queue.submit([command_encoder.finish()]) + + return draw_frame + + +if __name__ == "__main__": + setup_module() + run_tests(globals()) + teardown_module() diff --git a/tests/test_util_compute.py b/tests/test_util_compute.py new file mode 100644 index 0000000..8bff1c4 --- /dev/null +++ b/tests/test_util_compute.py @@ -0,0 +1,536 @@ +import random +import ctypes +import base64 +from ctypes import c_int32, c_ubyte +import sys + +import wgpu +from wgpu.utils.compute import compute_with_buffers +from pytest import skip, mark, raises +from testutils import run_tests, can_use_wgpu_lib, is_ci, iters_equal + + +if not can_use_wgpu_lib: + skip("Skipping tests that need the wgpu lib", allow_module_level=True) + + +simple_compute_shader = """ + @group(0) + @binding(0) + var data2: array; + + @compute + @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i: u32 = index.x; + data2[i] = i32(i); + } +""" + +# To generate compute_shader_spirv from a Python function +# +# from pyshader import python2shader, Array, i32, ivec3 +# +# def simple_compute_shader_py( +# index: ("input", "GlobalInvocationId", ivec3), +# out: ("buffer", 0, Array(i32)), +# ): +# out[index.x] = index.x +# +# print(base64.encodebytes(python2shader(simple_compute_shader_py).to_spirv()).decode()) + +simple_compute_shader_spirv = base64.decodebytes( + """ +AwIjBwADAQAAAAAAFgAAAAAAAAARAAIAAQAAAA4AAwAAAAAAAAAAAA8ABgAFAAAAAQAAAG1haW4A +AAAACAAAABAABgABAAAAEQAAAAEAAAABAAAAAQAAAAUABAABAAAAbWFpbgAAAAAFAAQACAAAAGlu +ZGV4AAAABQADAAwAAABvdXQABQADAA0AAAAwAAAARwAEAAgAAAALAAAAHAAAAEcABAAJAAAABgAA +AAQAAABIAAUACgAAAAAAAAAjAAAAAAAAAEcAAwAKAAAAAwAAAEcABAAMAAAAIgAAAAAAAABHAAQA +DAAAACEAAAAAAAAAEwACAAIAAAAhAAMAAwAAAAIAAAAVAAQABQAAACAAAAABAAAAFwAEAAYAAAAF +AAAAAwAAACAABAAHAAAAAQAAAAYAAAA7AAQABwAAAAgAAAABAAAAHQADAAkAAAAFAAAAHgADAAoA +AAAJAAAAIAAEAAsAAAACAAAACgAAADsABAALAAAADAAAAAIAAAArAAQABQAAAA0AAAAAAAAAIAAE +AA4AAAACAAAABQAAACAABAAQAAAAAQAAAAUAAAAgAAQAEwAAAAEAAAAFAAAANgAFAAIAAAABAAAA +AAAAAAMAAAD4AAIABAAAAEEABQAQAAAAEQAAAAgAAAANAAAAPQAEAAUAAAASAAAAEQAAAEEABgAO +AAAADwAAAAwAAAANAAAAEgAAAEEABQATAAAAFAAAAAgAAAANAAAAPQAEAAUAAAAVAAAAFAAAAD4A +AwAPAAAAFQAAAP0AAQA4AAEA +""".encode() +) + + +def test_compute_0_1_ctype(): + compute_shader = simple_compute_shader + assert isinstance(compute_shader, str) + + # Create some ints! + out = compute_with_buffers({}, {0: c_int32 * 100}, compute_shader) + assert isinstance(out, dict) and len(out) == 1 + assert isinstance(out[0], ctypes.Array) + assert iters_equal(out[0], range(100)) + + # Same, but specify in bytes + out = compute_with_buffers({}, {0: c_ubyte * 80}, compute_shader, n=20) + assert isinstance(out, dict) and len(out) == 1 + assert isinstance(out[0], ctypes.Array) + out0 = (c_int32 * 20).from_buffer(out[0]) # cast (a view in np) + assert iters_equal(out0, range(20)) + + +def test_compute_0_1_tuple(): + compute_shader = simple_compute_shader + + out = compute_with_buffers({}, {0: (100, "i")}, compute_shader) + assert isinstance(out, dict) and len(out) == 1 + assert isinstance(out[0], memoryview) + assert out[0].tolist() == list(range(100)) + + +def test_compute_0_1_str(): + compute_shader = simple_compute_shader + + out = compute_with_buffers({}, {0: "100xi"}, compute_shader) + assert isinstance(out, dict) and len(out) == 1 + assert isinstance(out[0], memoryview) + assert out[0].tolist() == list(range(100)) + + +def test_compute_0_1_int(): + compute_shader = simple_compute_shader + + out = compute_with_buffers({}, {0: 400}, compute_shader, n=100) + assert isinstance(out, dict) and len(out) == 1 + assert isinstance(out[0], memoryview) + assert out[0].cast("i").tolist() == list(range(100)) + + +@mark.skipif( + is_ci and sys.platform == "win32", reason="Cannot use SpirV shader on dx12" +) +def test_compute_0_1_spirv(): + compute_shader = simple_compute_shader_spirv + assert isinstance(compute_shader, bytes) + + out = compute_with_buffers({}, {0: c_int32 * 100}, compute_shader) + assert isinstance(out, dict) and len(out) == 1 + assert isinstance(out[0], ctypes.Array) + assert iters_equal(out[0], range(100)) + + +def test_compute_1_3(): + compute_shader = """ + + @group(0) + @binding(0) + var data0: array; + + @group(0) + @binding(1) + var data1: array; + + @group(0) + @binding(2) + var data2: array; + + @compute + @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i = i32(index.x); + data1[i] = data0[i]; + data2[i] = i; + } + """ + + # Create an array of 100 random int32 + in1 = [int(random.uniform(0, 100)) for i in range(100)] + in1 = (c_int32 * 100)(*in1) + + outspecs = {1: 100 * c_int32, 2: 100 * c_int32} + out = compute_with_buffers({0: in1}, outspecs, compute_shader) + assert isinstance(out, dict) and len(out) == 2 + assert isinstance(out[1], ctypes.Array) + assert isinstance(out[2], ctypes.Array) + assert iters_equal(out[1], in1) # because the shader copied the data + assert iters_equal(out[2], range(100)) # because this is the index + + +def test_compute_in_is_out(): + compute_shader = """ + + @group(0) + @binding(0) + var data0: array; + + @compute + @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i = i32(index.x); + data0[i] = data0[i] * 2; + } + """ + + # Create an array of 100 random int32 + in1 = [int(random.uniform(0, 100)) for i in range(100)] + expected_out = [i * 2 for i in in1] + buf = (c_int32 * 100)(*in1) + + out = compute_with_buffers({0: buf}, {0: 100 * c_int32}, compute_shader) + assert isinstance(out, dict) and len(out) == 1 + assert isinstance(out[0], ctypes.Array) + assert out[0] is not buf # a copy was made + assert iters_equal(out[0], expected_out) + + +def test_compute_indirect(): + compute_shader = """ + @group(0) + @binding(0) + var data1: array; + + @group(0) + @binding(1) + var data2: array; + + @compute + @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i = i32(index.x); + data2[i] = data1[i] + 1; + } + """ + + # Create an array of 100 random int32 + n = 100 + in1 = [int(random.uniform(0, 100)) for i in range(n)] + in1 = (c_int32 * n)(*in1) + + # Create device and shader object + device = wgpu.utils.get_default_device() + cshader = device.create_shader_module(code=compute_shader) + + # Create input buffer and upload data to in + buffer1 = device.create_buffer_with_data(data=in1, usage=wgpu.BufferUsage.STORAGE) + + # Create output buffer + buffer2 = device.create_buffer( + size=ctypes.sizeof(in1), + usage=wgpu.BufferUsage.STORAGE | wgpu.BufferUsage.COPY_SRC, + ) + + # Create buffer to hold the dispatch parameters for the indirect call + params = (ctypes.c_int32 * 3)(n - 2, 1, 1) # note the minus 2! + buffer3 = device.create_buffer_with_data( + data=params, + usage=wgpu.BufferUsage.INDIRECT, + ) + + # Setup layout and bindings + binding_layouts = [ + { + "binding": 0, + "visibility": wgpu.ShaderStage.COMPUTE, + "buffer": { + "type": wgpu.BufferBindingType.read_only_storage, + }, + }, + { + "binding": 1, + "visibility": wgpu.ShaderStage.COMPUTE, + "buffer": { + "type": wgpu.BufferBindingType.storage, + }, + }, + ] + bindings = [ + { + "binding": 0, + "resource": {"buffer": buffer1, "offset": 0, "size": buffer1.size}, + }, + { + "binding": 1, + "resource": {"buffer": buffer2, "offset": 0, "size": buffer2.size}, + }, + ] + + # Put everything together + bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) + pipeline_layout = device.create_pipeline_layout( + bind_group_layouts=[bind_group_layout] + ) + bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) + + # Create and run the pipeline, fail - test check_struct + with raises(ValueError): + compute_pipeline = device.create_compute_pipeline( + layout=pipeline_layout, + compute={"module": cshader, "entry_point": "main", "foo": 42}, + ) + + # Create and run the pipeline + compute_pipeline = device.create_compute_pipeline( + layout=pipeline_layout, + compute={"module": cshader, "entry_point": "main"}, + ) + command_encoder = device.create_command_encoder() + compute_pass = command_encoder.begin_compute_pass() + compute_pass.set_pipeline(compute_pipeline) + compute_pass.set_bind_group(0, bind_group, [], 0, 999999) # last 2 args not used + compute_pass.dispatch_workgroups_indirect(buffer3, 0) + compute_pass.end() + device.queue.submit([command_encoder.finish()]) + + # Read result + out1 = in1.__class__.from_buffer(device.queue.read_buffer(buffer2)) + in2 = list(in1)[:] + out2 = [i - 1 for i in out1] + # The shader was applied to all but the last two elements + assert in2[:-2] == out2[:-2] + assert out2[-2:] == [-1, -1] + + +def test_compute_default_layout1(): + compute_shader = """ + @group(0) + @binding(0) + var data1: array; + + @group(0) + @binding(1) + var data2: array; + + @compute + @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i = i32(index.x); + data2[i] = data1[i] + 1; + } + """ + + # Create an array of 100 random int32 + n = 100 + in1 = [int(random.uniform(0, 100)) for i in range(n)] + in1 = (c_int32 * n)(*in1) + + # Create device and shader object + device = wgpu.utils.get_default_device() + cshader = device.create_shader_module(code=compute_shader) + + # Create input buffer and upload data to in + buffer1 = device.create_buffer_with_data(data=in1, usage=wgpu.BufferUsage.STORAGE) + + # Create output buffer + buffer2 = device.create_buffer( + size=ctypes.sizeof(in1), + usage=wgpu.BufferUsage.STORAGE | wgpu.BufferUsage.COPY_SRC, + ) + + # Create buffer to hold the dispatch parameters for the indirect call + params = (ctypes.c_int32 * 3)(n - 2, 1, 1) # note the minus 2! + buffer3 = device.create_buffer_with_data( + data=params, + usage=wgpu.BufferUsage.INDIRECT, + ) + + # Setup bindings info + bindings = [ + { + "binding": 0, + "resource": {"buffer": buffer1, "offset": 0, "size": buffer1.size}, + }, + { + "binding": 1, + "resource": {"buffer": buffer2, "offset": 0, "size": buffer2.size}, + }, + ] + + # Create a pipeline using "auto" layout mode + compute_pipeline = device.create_compute_pipeline( + layout=wgpu.enums.AutoLayoutMode.auto, + compute={"module": cshader, "entry_point": "main"}, + ) + bind_group_layout = compute_pipeline.get_bind_group_layout(0) + bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) + + # Run the pipeline + command_encoder = device.create_command_encoder() + compute_pass = command_encoder.begin_compute_pass() + compute_pass.set_pipeline(compute_pipeline) + compute_pass.set_bind_group(0, bind_group, [], 0, 999999) # last 2 args not used + compute_pass.dispatch_workgroups_indirect(buffer3, 0) + compute_pass.end() + device.queue.submit([command_encoder.finish()]) + + # Read result + out1 = in1.__class__.from_buffer(device.queue.read_buffer(buffer2)) + in2 = list(in1)[:] + out2 = [i - 1 for i in out1] + # The shader was applied to all but the last two elements + assert in2[:-2] == out2[:-2] + assert out2[-2:] == [-1, -1] + + +def test_compute_default_layout2(): + # Default layout with multiple bind groups + + compute_shader = """ + @group(0) + @binding(0) + var data1: array; + + @group(1) + @binding(0) + var data2: array; + + @compute + @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i = i32(index.x); + data2[i] = data1[i] + 1; + } + """ + + # Create an array of 100 random int32 + n = 100 + in1 = [int(random.uniform(0, 100)) for i in range(n)] + in1 = (c_int32 * n)(*in1) + + # Create device and shader object + device = wgpu.utils.get_default_device() + cshader = device.create_shader_module(code=compute_shader) + + # Create input buffer and upload data to in + buffer1 = device.create_buffer_with_data(data=in1, usage=wgpu.BufferUsage.STORAGE) + + # Create output buffer + buffer2 = device.create_buffer( + size=ctypes.sizeof(in1), + usage=wgpu.BufferUsage.STORAGE | wgpu.BufferUsage.COPY_SRC, + ) + + # Create buffer to hold the dispatch parameters for the indirect call + params = (ctypes.c_int32 * 3)(n - 2, 1, 1) # note the minus 2! + buffer3 = device.create_buffer_with_data( + data=params, + usage=wgpu.BufferUsage.INDIRECT, + ) + + # Setup bindings info + bindings0 = [ + { + "binding": 0, + "resource": {"buffer": buffer1, "offset": 0, "size": buffer1.size}, + }, + ] + bindings1 = [ + { + "binding": 0, + "resource": {"buffer": buffer2, "offset": 0, "size": buffer2.size}, + }, + ] + + # Create a pipeline using "auto" layout mode + compute_pipeline = device.create_compute_pipeline( + layout=wgpu.enums.AutoLayoutMode.auto, + compute={"module": cshader, "entry_point": "main"}, + ) + bind_group_layout0 = compute_pipeline.get_bind_group_layout(0) + bind_group0 = device.create_bind_group(layout=bind_group_layout0, entries=bindings0) + + bind_group_layout1 = compute_pipeline.get_bind_group_layout(1) + bind_group1 = device.create_bind_group(layout=bind_group_layout1, entries=bindings1) + + # Run the pipeline + command_encoder = device.create_command_encoder() + compute_pass = command_encoder.begin_compute_pass() + compute_pass.set_pipeline(compute_pipeline) + compute_pass.set_bind_group(0, bind_group0, [], 0, 999999) + compute_pass.set_bind_group(1, bind_group1, [], 0, 999999) + compute_pass.dispatch_workgroups_indirect(buffer3, 0) + compute_pass.end() + device.queue.submit([command_encoder.finish()]) + + # Read result + out1 = in1.__class__.from_buffer(device.queue.read_buffer(buffer2)) + in2 = list(in1)[:] + out2 = [i - 1 for i in out1] + # The shader was applied to all but the last two elements + assert in2[:-2] == out2[:-2] + assert out2[-2:] == [-1, -1] + + +def test_compute_fails(): + compute_shader = """ + @group(0) + @binding(0) + var data1: array; + + @group(0) + @binding(1) + var data2: array; + + @compute + @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i = i32(index.x); + data2[i] = data1[i]; + } + """ + + in1 = [int(random.uniform(0, 100)) for i in range(100)] + in1 = (c_int32 * 100)(*in1) + + # Baseline; this works + out = compute_with_buffers( + {0: in1}, {1: c_int32 * 100}, compute_shader, n=(100, 1, 1) + ) + assert iters_equal(out[1], in1) + + with raises(TypeError): # input_arrays is not a dict + compute_with_buffers([in1], {1: c_int32 * 100}, compute_shader) + with raises(TypeError): # input_arrays key not int + compute_with_buffers({"0": in1}, {1: c_int32 * 100}, compute_shader) + with raises(TypeError): # input_arrays value not ctypes array + compute_with_buffers({0: list(in1)}, {1: c_int32 * 100}, compute_shader) + + with raises(TypeError): # output_arrays is not a dict + compute_with_buffers({0: in1}, [c_int32 * 100], compute_shader) + with raises(TypeError): # output_arrays key not int + compute_with_buffers({0: in1}, {"1": c_int32 * 100}, compute_shader) + with raises(TypeError): # output_arrays value not a ctypes Array type + compute_with_buffers({0: in1}, {1: "foobar"}, compute_shader) + + with raises(ValueError): # output_arrays format invalid + compute_with_buffers({0: in1}, {1: "10xfoo"}, compute_shader) + with raises(ValueError): # output_arrays shape invalid + compute_with_buffers({0: in1}, {1: ("i",)}, compute_shader) + with raises(ValueError): # output_arrays shape invalid + compute_with_buffers( + {0: in1}, + { + 1: ( + 0, + "i", + ) + }, + compute_shader, + ) + with raises(ValueError): # output_arrays shape invalid + compute_with_buffers( + {0: in1}, + { + 1: ( + -1, + "i", + ) + }, + compute_shader, + ) + + with raises(TypeError): # invalid n + compute_with_buffers({0: in1}, {1: c_int32 * 100}, compute_shader, n="100") + with raises(ValueError): # invalid n + compute_with_buffers({0: in1}, {1: c_int32 * 100}, compute_shader, n=-1) + + with raises(TypeError): # invalid shader + compute_with_buffers({0: in1}, {1: c_int32 * 100}, {"not", "a", "shader"}) + + +if __name__ == "__main__": + run_tests(globals()) diff --git a/tests/test_util_core.py b/tests/test_util_core.py new file mode 100644 index 0000000..1ff3fb6 --- /dev/null +++ b/tests/test_util_core.py @@ -0,0 +1,47 @@ +import wgpu +from wgpu._coreutils import error_message_hash, str_flag_to_int, _flag_cache +from testutils import run_tests + + +def test_error_message_hash(): + text1 = """In wgpuRenderPassEncoderEnd + In a pass parameter + note: command buffer = `` + The color attachment at index 0's texture view is not renderable: + """ + + text2 = """In wgpuRenderPassEncoderEnd + In a pass parameter + note: command buffer = `` + The color attachment at index 0's texture view is not renderable: + """ + + text3 = """In wgpuRenderPassEncoderEnd + In a pass parameter BLABLA + note: command buffer = `` + The color attachment at index 0's texture view is not renderable: + """ + + assert error_message_hash(text1) == error_message_hash(text2) + assert error_message_hash(text1) != error_message_hash(text3) + + +def test_str_flag_to_int(): + versions = [ + "UNIFORM|VERTEX", + "UNIFORM | VERTEX", + "VERTEX | UNIFORM", + "VERTEX| UNIFORM", + ] + + flags = [str_flag_to_int(wgpu.BufferUsage, v) for v in versions] + + for flag in flags: + assert flag == flags[0] + + for v in versions: + assert f"BufferUsage.{v}" in _flag_cache + + +if __name__ == "__main__": + run_tests(globals()) diff --git a/tests/test_util_shadertoy.py b/tests/test_util_shadertoy.py new file mode 100644 index 0000000..5223243 --- /dev/null +++ b/tests/test_util_shadertoy.py @@ -0,0 +1,154 @@ +import os + +from pytest import fixture, skip +from testutils import can_use_wgpu_lib + + +if not can_use_wgpu_lib: + skip("Skipping tests that need the wgpu lib", allow_module_level=True) + + +@fixture(autouse=True, scope="module") +def force_offscreen(): + os.environ["WGPU_FORCE_OFFSCREEN"] = "true" + try: + yield + finally: + del os.environ["WGPU_FORCE_OFFSCREEN"] + + +def test_shadertoy_wgsl(): + # Import here, because it imports the wgpu.gui.auto + from wgpu.utils.shadertoy import Shadertoy # noqa + + shader_code = """ + fn shader_main(frag_coord: vec2) -> vec4 { + let uv = frag_coord / i_resolution.xy; + + if ( length(frag_coord - i_mouse.xy) < 20.0 ) { + return vec4(0.0, 0.0, 0.0, 1.0); + }else{ + return vec4( 0.5 + 0.5 * sin(i_time * vec3(uv, 1.0) ), 1.0); + } + + } + """ + + shader = Shadertoy(shader_code, resolution=(800, 450)) + assert shader.resolution == (800, 450) + assert shader.shader_code == shader_code + assert shader.shader_type == "wgsl" + + shader._draw_frame() + + +def test_shadertoy_glsl(): + # Import here, because it imports the wgpu.gui.auto + from wgpu.utils.shadertoy import Shadertoy # noqa + + shader_code = """ + void shader_main(out vec4 fragColor, vec2 frag_coord) { + vec2 uv = frag_coord / i_resolution.xy; + + if ( length(frag_coord - i_mouse.xy) < 20.0 ) { + fragColor = vec4(0.0, 0.0, 0.0, 1.0); + }else{ + fragColor = vec4( 0.5 + 0.5 * sin(i_time * vec3(uv, 1.0) ), 1.0); + } + + } + """ + + shader = Shadertoy(shader_code, resolution=(800, 450)) + assert shader.resolution == (800, 450) + assert shader.shader_code == shader_code + assert shader.shader_type == "glsl" + + shader._draw_frame() + + +def test_shadertoy_offscreen(): + # Import here, because it imports the wgpu.gui.auto + from wgpu.utils.shadertoy import Shadertoy # noqa + + shader_code = """ + void shader_main(out vec4 fragColor, vec2 frag_coord) { + vec2 uv = frag_coord / i_resolution.xy; + + if ( length(frag_coord - i_mouse.xy) < 20.0 ) { + fragColor = vec4(0.0, 0.0, 0.0, 1.0); + }else{ + fragColor = vec4( 0.5 + 0.5 * sin(i_time * vec3(uv, 1.0) ), 1.0); + } + + } + """ + + shader = Shadertoy(shader_code, resolution=(800, 450), offscreen=True) + assert shader.resolution == (800, 450) + assert shader.shader_code == shader_code + assert shader.shader_type == "glsl" + assert shader._offscreen is True + + +def test_shadertoy_snapshot(): + # Import here, because it imports the wgpu.gui.auto + from wgpu.utils.shadertoy import Shadertoy # noqa + + shader_code = """ + void shader_main(out vec4 fragColor, vec2 frag_coord) { + vec2 uv = frag_coord / i_resolution.xy; + + if ( length(frag_coord - i_mouse.xy) < 20.0 ) { + fragColor = vec4(0.0, 0.0, 0.0, 1.0); + }else{ + fragColor = vec4( 0.5 + 0.5 * sin(i_time * vec3(uv, 1.0) ), 1.0); + } + + } + """ + + shader = Shadertoy(shader_code, resolution=(800, 450), offscreen=True) + frame1a = shader.snapshot( + time_float=0.0, + mouse_pos=( + 0, + 0, + 0, + 0, + ), + ) + frame2a = shader.snapshot( + time_float=1.2, + mouse_pos=( + 100, + 200, + 0, + 0, + ), + ) + frame1b = shader.snapshot( + time_float=0.0, + mouse_pos=( + 0, + 0, + 0, + 0, + ), + ) + frame2b = shader.snapshot( + time_float=1.2, + mouse_pos=( + 100, + 200, + 0, + 0, + ), + ) + + assert shader.resolution == (800, 450) + assert shader.shader_code == shader_code + assert shader.shader_type == "glsl" + assert shader._offscreen is True + assert frame1a == frame1b + assert frame2a == frame2b diff --git a/tests/test_wgpu_native_basics.py b/tests/test_wgpu_native_basics.py new file mode 100644 index 0000000..d546211 --- /dev/null +++ b/tests/test_wgpu_native_basics.py @@ -0,0 +1,222 @@ +import os +import base64 +import shutil +import ctypes +import sys +import tempfile + +import wgpu.utils +import wgpu.backends.wgpu_native +import numpy as np + +from testutils import run_tests, can_use_wgpu_lib, is_ci +from pytest import mark, raises + + +is_win = sys.platform.startswith("win") + + +def test_get_wgpu_version(): + version = wgpu.backends.wgpu_native.__version__ + commit_sha = wgpu.backends.wgpu_native.__commit_sha__ + version_info = wgpu.backends.wgpu_native.version_info + + assert isinstance(version, str) + assert len(version) > 1 + + assert isinstance(version_info, tuple) + assert all(isinstance(i, int) for i in version_info) + assert len(version_info) == 4 + + assert isinstance(commit_sha, str) + assert len(commit_sha) > 0 + + +def test_override_wgpu_lib_path(): + # Current version + try: + old_path = wgpu.backends.wgpu_native.lib_path + except RuntimeError: + old_path = None + + # Change it + old_env_var = os.environ.get("WGPU_LIB_PATH", None) + os.environ["WGPU_LIB_PATH"] = "foo/bar" + + # Check + assert wgpu.backends.wgpu_native._ffi.get_wgpu_lib_path() == "foo/bar" + + # Change it back + if old_env_var is None: + os.environ.pop("WGPU_LIB_PATH") + else: + os.environ["WGPU_LIB_PATH"] = old_env_var + + # Still the same as before? + try: + path = wgpu.backends.wgpu_native._ffi.get_wgpu_lib_path() + except RuntimeError: + path = None + assert path == old_path + + +def test_tuple_from_tuple_or_dict(): + func = wgpu.backends.wgpu_native._api._tuple_from_tuple_or_dict + + assert func([1, 2, 3], ("x", "y", "z")) == (1, 2, 3) + assert func({"y": 2, "z": 3, "x": 1}, ("x", "y", "z")) == (1, 2, 3) + assert func((10, 20), ("width", "height")) == (10, 20) + assert func({"width": 10, "height": 20}, ("width", "height")) == (10, 20) + + with raises(TypeError): + func("not tuple/dict", ("x", "y")) + with raises(ValueError): + func([1], ("x", "y")) + with raises(ValueError): + func([1, 2, 3], ("x", "y")) + with raises(ValueError): + assert func({"x": 1}, ("x", "y")) + + +compute_shader_wgsl = """ +@group(0) +@binding(0) +var out1: array; + +@compute +@workgroup_size(1) +fn main(@builtin(global_invocation_id) index: vec3) { + let i: u32 = index.x; + out1[i] = i32(i); +} +""" + +compute_shader_spirv = base64.decodebytes( + """ +AwIjBwADAQAAAAAAFgAAAAAAAAARAAIAAQAAAA4AAwAAAAAAAAAAAA8ABgAFAAAAAQAAAG1haW4A +AAAACAAAABAABgABAAAAEQAAAAEAAAABAAAAAQAAAAUABAABAAAAbWFpbgAAAAAFAAQACAAAAGlu +ZGV4AAAABQADAAwAAABvdXQABQADAA0AAAAwAAAARwAEAAgAAAALAAAAHAAAAEcABAAJAAAABgAA +AAQAAABIAAUACgAAAAAAAAAjAAAAAAAAAEcAAwAKAAAAAwAAAEcABAAMAAAAIgAAAAAAAABHAAQA +DAAAACEAAAAAAAAAEwACAAIAAAAhAAMAAwAAAAIAAAAVAAQABQAAACAAAAABAAAAFwAEAAYAAAAF +AAAAAwAAACAABAAHAAAAAQAAAAYAAAA7AAQABwAAAAgAAAABAAAAHQADAAkAAAAFAAAAHgADAAoA +AAAJAAAAIAAEAAsAAAACAAAACgAAADsABAALAAAADAAAAAIAAAArAAQABQAAAA0AAAAAAAAAIAAE +AA4AAAACAAAABQAAACAABAAQAAAAAQAAAAUAAAAgAAQAEwAAAAEAAAAFAAAANgAFAAIAAAABAAAA +AAAAAAMAAAD4AAIABAAAAEEABQAQAAAAEQAAAAgAAAANAAAAPQAEAAUAAAASAAAAEQAAAEEABgAO +AAAADwAAAAwAAAANAAAAEgAAAEEABQATAAAAFAAAAAgAAAANAAAAPQAEAAUAAAAVAAAAFAAAAD4A +AwAPAAAAFQAAAP0AAQA4AAEA +""".encode() +) + + +@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") +def test_logging(): + # Do *something* while we set the log level low + device = wgpu.utils.get_default_device() + + wgpu.logger.setLevel("DEBUG") + + device.create_shader_module(code=compute_shader_wgsl) + + wgpu.logger.setLevel("WARNING") + + # yeah, would be nice to be able to capture the logs. But if we don't crash + # and see from the coverage that we touched the logger integration code, + # we're doing pretty good ... + # (capsys does not work because it logs to the raw stderr) + + +@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") +def test_wgpu_native_tracer(): + tempdir = os.path.join(tempfile.gettempdir(), "wgpu-tracer-test") + adapter = wgpu.utils.get_default_device().adapter + + # Make empty + shutil.rmtree(tempdir, ignore_errors=True) + assert not os.path.isdir(tempdir) + + # Works! + wgpu.backends.wgpu_native.request_device_tracing(adapter, tempdir) + assert os.path.isdir(tempdir) + + # Make dir not empty + with open(os.path.join(tempdir, "stub.txt"), "wb"): + pass + + # Still works, but produces warning + wgpu.backends.wgpu_native.request_device_tracing(adapter, tempdir) + + +@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") +def test_wgpu_native_enumerate_adapters(): + # Get all available adapters + adapters = wgpu.backends.wgpu_native.enumerate_adapters() + assert len(adapters) > 0 + + # Check that we can get a device from each adapter + for adapter in adapters: + d = adapter.request_device() + assert isinstance(d, wgpu.backends.wgpu_native.GPUDevice) + + +@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") +@mark.skipif(is_ci and is_win, reason="Cannot use SpirV shader on dx12") +def test_shader_module_creation_spirv(): + device = wgpu.utils.get_default_device() + + code1 = compute_shader_spirv + assert isinstance(code1, bytes) + code4 = type("CodeObject", (object,), {}) + + m1 = device.create_shader_module(code=code1) + assert m1.get_compilation_info() == [] + + with raises(TypeError): + device.create_shader_module(code=code4) + with raises(TypeError): + device.create_shader_module(code={"not", "a", "shader"}) + with raises(ValueError): + device.create_shader_module(code=b"bytes but no SpirV magic number") + + +@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") +def test_adapter_destroy(): + adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + assert adapter._internal is not None + adapter.__del__() + assert adapter._internal is None + + +def test_get_memoryview_and_address(): + get_memoryview_and_address = ( + wgpu.backends.wgpu_native._helpers.get_memoryview_and_address + ) + + data = b"bytes are readonly, but we can map it. Don't abuse this :)" + m, address = get_memoryview_and_address(data) + assert m.nbytes == len(data) + assert address > 0 + + data = bytearray(b"A bytearray works too") + m, address = get_memoryview_and_address(data) + assert m.nbytes == len(data) + assert address > 0 + + data = (ctypes.c_float * 100)() + m, address = get_memoryview_and_address(data) + assert m.nbytes == ctypes.sizeof(data) + assert address > 0 + + data = np.array([1, 2, 3, 4]) + m, address = get_memoryview_and_address(data) + assert m.nbytes == data.nbytes + assert address > 0 + + data = np.array([1, 2, 3, 4]) + data.flags.writeable = False + m, address = get_memoryview_and_address(data) + assert m.nbytes == data.nbytes + assert address > 0 + + +if __name__ == "__main__": + run_tests(globals()) diff --git a/tests/test_wgpu_native_buffer.py b/tests/test_wgpu_native_buffer.py new file mode 100644 index 0000000..a4bd541 --- /dev/null +++ b/tests/test_wgpu_native_buffer.py @@ -0,0 +1,530 @@ +import random +import ctypes +import sys + +import wgpu.utils +import numpy as np + +from testutils import run_tests, can_use_wgpu_lib, iters_equal +from pytest import mark, raises + + +@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") +def test_buffer_init1(): + # Initializing a buffer with data + + device = wgpu.utils.get_default_device() + data1 = b"abcdefghijkl" + + assert repr(device).startswith("= (3, 8): # no memoryview.toreadonly on 3.7 and below + with raises(TypeError): + data2[0] = 1 + with raises(TypeError): + data3[0] = 1 + with raises(TypeError): + data4[0] = 1 + + buf.unmap() + + # The memoryview is invalidated when the buffer unmapped. + # Note that this unfortunately does *not* hold for views on these arrays. + with raises(ValueError): + data2[0] + with raises(ValueError): + data3[0] + with raises(ValueError): + data4[0] + + with raises(ValueError): + data2[0] = 1 + with raises(ValueError): + data3[0] = 1 + with raises(ValueError): + data4[0] = 1 + + +@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") +def test_clear_buffer(): + data0 = b"111111112222222233333333" + data1 = b"111111110000000000003333" + data2 = b"111100000000000000000000" + data3 = b"000000000000000000000000" + + # Prep + device = wgpu.utils.get_default_device() + buf = device.create_buffer( + size=len(data1), usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.COPY_SRC + ) + device.queue.write_buffer(buf, 0, data0) + + # Download original data + res = device.queue.read_buffer(buf) + assert res == data0 + + # Clear part of the buffer + command_encoder = device.create_command_encoder() + command_encoder.clear_buffer(buf, 8, 12) + device.queue.submit([command_encoder.finish()]) + + res = bytes(device.queue.read_buffer(buf)).replace(b"\x00", b"0") + assert res == data1 + + # Clear the all from index 4 + command_encoder = device.create_command_encoder() + command_encoder.clear_buffer(buf, 4, None) + device.queue.submit([command_encoder.finish()]) + + res = bytes(device.queue.read_buffer(buf)).replace(b"\x00", b"0") + assert res == data2 + + # Clear the whole buffer + command_encoder = device.create_command_encoder() + command_encoder.clear_buffer(buf, 0) + device.queue.submit([command_encoder.finish()]) + + res = bytes(device.queue.read_buffer(buf)).replace(b"\x00", b"0") + assert res == data3 + + +@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") +def test_write_buffer1(): + device = wgpu.utils.get_default_device() + + data1 = memoryview(np.random.random(size=100).astype(np.float32)) + + # Create buffer + buf4 = device.create_buffer( + size=data1.nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.COPY_SRC + ) + + # Upload from CPU to buffer + device.create_command_encoder() # we seem to need to create one + device.queue.write_buffer(buf4, 0, data1) + device.queue.submit([]) + + # Download from buffer to CPU + data2 = device.queue.read_buffer(buf4).cast("f") + assert data1 == data2 + + # Yes, you can compare memoryviews! Check this: + data1[0] += 1 + assert data1 != data2 + + +@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") +def test_write_buffer2(): + device = wgpu.utils.get_default_device() + + nx, ny, nz = 100, 1, 1 + data0 = (ctypes.c_float * 100)(*[random.random() for i in range(nx * ny * nz)]) + data1 = (ctypes.c_float * 100)() + nbytes = ctypes.sizeof(data1) + + # Create buffer + buf4 = device.create_buffer( + size=nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.COPY_SRC + ) + + for i in range(len(data1)): + data1[i] = data0[i] + + # Upload from CPU to buffer + device.create_command_encoder() # we seem to need to create one + device.queue.write_buffer(buf4, 0, data1) + + # We swipe the data. You could also think that we passed something into + # write_buffer without holding a reference to it. Anyway, write_buffer + # seems to copy the data at the moment it is called. + for i in range(len(data1)): + data1[i] = 1 + + device.queue.submit([]) + + # Download from buffer to CPU + data2 = data1.__class__.from_buffer(device.queue.read_buffer(buf4)) + assert iters_equal(data0, data2) + + +@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") +def test_write_buffer3(): + device = wgpu.utils.get_default_device() + nbytes = 12 + + # Create buffer + buf4 = device.create_buffer( + size=nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.COPY_SRC + ) + + # Upload from CPU to buffer, using bytes + device.create_command_encoder() # we seem to need to create one + device.queue.write_buffer(buf4, 0, b"abcdefghijkl", 0, nbytes) + device.queue.submit([]) + + # Download from buffer to CPU + assert device.queue.read_buffer(buf4).tobytes() == b"abcdefghijkl" + + +@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") +def test_buffer_map_read_and_write(): + # Do a mini round-trip using mapped buffers + + device = wgpu.utils.get_default_device() + nbytes = 12 + + # Create buffers + buf1 = device.create_buffer( + size=nbytes, usage=wgpu.BufferUsage.COPY_SRC | wgpu.BufferUsage.MAP_WRITE + ) + buf2 = device.create_buffer( + size=nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.MAP_READ + ) + + # Upload + data1 = b"abcdefghijkl" + buf1.map("write") + buf1.write_mapped(data1) + buf1.unmap() + + # Copy + command_encoder = device.create_command_encoder() + command_encoder.copy_buffer_to_buffer(buf1, 0, buf2, 0, nbytes) + device.queue.submit([command_encoder.finish()]) + + # Download + buf2.map("read") + data2 = buf2.read_mapped() + buf2.unmap() + assert data1 == data2 + + +if __name__ == "__main__": + run_tests(globals()) diff --git a/tests/test_wgpu_native_compute_tex.py b/tests/test_wgpu_native_compute_tex.py new file mode 100644 index 0000000..a1cf291 --- /dev/null +++ b/tests/test_wgpu_native_compute_tex.py @@ -0,0 +1,592 @@ +import random +import ctypes +import sys + +import numpy as np + +import wgpu +from pytest import skip +from testutils import run_tests, get_default_device +from testutils import can_use_wgpu_lib, is_ci + + +if not can_use_wgpu_lib: + skip("Skipping tests that need the wgpu lib", allow_module_level=True) +elif is_ci and sys.platform == "win32": + skip("These tests fail on dx12 for some reason", allow_module_level=True) + +# %% 1D + + +def test_compute_tex_1d_rgba8uint(): + compute_shader = """ + @group(0) @binding(0) + var r_tex1: texture_1d; + + @group(0) @binding(1) + var r_tex2: texture_storage_1d; + + @compute @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i: i32 = i32(index.x); + let color1 = vec4(textureLoad(r_tex1, i, 0)); + let color2 = vec4(color1.x + i, color1.y + 1, color1.z * 2, color1.a); + textureStore(r_tex2, i, vec4(color2)); + } + """ + + # Generate data + nx, ny, nz, nc = 64, 1, 1, 4 + data1 = (ctypes.c_uint8 * nc * nx)() + for x in range(nx): + for c in range(nc): + data1[x][c] = random.randint(0, 20) + + # Compute and validate + _compute_texture( + compute_shader, + wgpu.TextureFormat.rgba8uint, + wgpu.TextureDimension.d1, + (nx, ny, nz, nc), + data1, + ) + + +def test_compute_tex_1d_rgba16sint(): + compute_shader = """ + @group(0) @binding(0) + var r_tex1: texture_1d; + + @group(0) @binding(1) + var r_tex2: texture_storage_1d; + + @compute @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i: i32 = i32(index.x); + let color1 : vec4 = textureLoad(r_tex1, i, 0); + let color2 = vec4(color1.x + i, color1.y + 1, color1.z * 2, color1.a); + textureStore(r_tex2, i, color2); + } + """ + + # Generate data + nx, ny, nz, nc = 128, 1, 1, 4 + data1 = (ctypes.c_int16 * nc * nx)() + for x in range(nx): + for c in range(nc): + data1[x][c] = random.randint(0, 20) + + # Compute and validate + _compute_texture( + compute_shader, + wgpu.TextureFormat.rgba16sint, + wgpu.TextureDimension.d1, + (nx, ny, nz, nc), + data1, + ) + + +def test_compute_tex_1d_r32sint(): + compute_shader = """ + @group(0) @binding(0) + var r_tex1: texture_1d; + + @group(0) @binding(1) + var r_tex2: texture_storage_1d; + + @compute @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i: i32 = i32(index.x); + let color1 : vec4 = textureLoad(r_tex1, i, 0); + let color2 = vec4(color1.x + i, color1.y + 1, color1.z * 2, color1.a); + textureStore(r_tex2, i, color2); + } + """ + + # Generate data + nx, ny, nz, nc = 256, 1, 1, 1 + data1 = (ctypes.c_int32 * nc * nx)() + for x in range(nx): + for c in range(nc): + data1[x][c] = random.randint(0, 20) + + # Compute and validate + _compute_texture( + compute_shader, + wgpu.TextureFormat.r32sint, + wgpu.TextureDimension.d1, + (nx, ny, nz, nc), + data1, + ) + + +def test_compute_tex_1d_r32float(): + compute_shader = """ + @group(0) @binding(0) + var r_tex1: texture_1d; + + @group(0) @binding(1) + var r_tex2: texture_storage_1d; + + @compute @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i: i32 = i32(index.x); + let color1 : vec4 = textureLoad(r_tex1, i, 0); + let color2 = vec4(color1.x + f32(i), color1.y + 1.0, color1.z * 2.0, color1.a); + textureStore(r_tex2, i, color2); + } + """ + + # Generate data + nx, ny, nz, nc = 256, 1, 1, 1 + data1 = (ctypes.c_float * nc * nx)() + for x in range(nx): + for c in range(nc): + data1[x][c] = random.randint(0, 20) + + # Compute and validate + _compute_texture( + compute_shader, + wgpu.TextureFormat.r32float, + wgpu.TextureDimension.d1, + (nx, ny, nz, nc), + data1, + ) + + +# %% 2D + + +def test_compute_tex_2d_rgba8uint(): + compute_shader = """ + @group(0) @binding(0) + var r_tex1: texture_2d; + + @group(0) @binding(1) + var r_tex2: texture_storage_2d; + + @compute @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i = vec2(index.xy); + let color1 = vec4(textureLoad(r_tex1, i, 0)); + let color2 = vec4(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a); + textureStore(r_tex2, i, vec4(color2)); + } + """ + + # Generate data + nx, ny, nz, nc = 64, 8, 1, 4 + data1 = (ctypes.c_uint8 * nc * nx * ny)() + for y in range(ny): + for x in range(nx): + for c in range(nc): + data1[y][x][c] = random.randint(0, 20) + + # Compute and validate + _compute_texture( + compute_shader, + wgpu.TextureFormat.rgba8uint, + wgpu.TextureDimension.d2, + (nx, ny, nz, nc), + data1, + ) + + +def test_compute_tex_2d_rgba16sint(): + compute_shader = """ + @group(0) @binding(0) + var r_tex1: texture_2d; + + @group(0) @binding(1) + var r_tex2: texture_storage_2d; + + @compute @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i = vec2(index.xy); + let color1: vec4 = textureLoad(r_tex1, i, 0); + let color2 = vec4(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a); + textureStore(r_tex2, i, color2); + } + """ + + # Generate data + nx, ny, nz, nc = 128, 8, 1, 4 + data1 = (ctypes.c_int16 * nc * nx * ny)() + for y in range(ny): + for x in range(nx): + for c in range(nc): + data1[y][x][c] = random.randint(0, 20) + + # Compute and validate + _compute_texture( + compute_shader, + wgpu.TextureFormat.rgba16sint, + wgpu.TextureDimension.d2, + (nx, ny, nz, nc), + data1, + ) + + +def test_compute_tex_2d_r32sint(): + compute_shader = """ + @group(0) @binding(0) + var r_tex1: texture_2d; + + @group(0) @binding(1) + var r_tex2: texture_storage_2d; + + @compute @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i = vec2(index.xy); + let color1: vec4 = textureLoad(r_tex1, i, 0); + let color2 = vec4(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a); + textureStore(r_tex2, i, color2); + } + """ + + # Generate data + nx, ny, nz, nc = 256, 8, 1, 1 + data1 = (ctypes.c_int32 * nc * nx * ny)() + for y in range(ny): + for x in range(nx): + for c in range(nc): + data1[y][x][c] = random.randint(0, 20) + + # Compute and validate + _compute_texture( + compute_shader, + wgpu.TextureFormat.r32sint, + wgpu.TextureDimension.d2, + (nx, ny, nz, nc), + data1, + ) + + +def test_compute_tex_2d_r32float(): + compute_shader = """ + @group(0) @binding(0) + var r_tex1:texture_2d; + + @group(0) @binding(1) + var r_tex2: texture_storage_2d; + + @compute @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i = vec2(index.xy); + let color1: vec4 = textureLoad(r_tex1, i, 0); + let color2 = vec4(color1.x + f32(i.x), color1.y + 1.0, color1.z * 2.0, color1.a); + textureStore(r_tex2, i, color2); + } + """ + + # Generate data + nx, ny, nz, nc = 256, 8, 1, 1 + data1 = (ctypes.c_float * nc * nx * ny)() + for y in range(ny): + for x in range(nx): + for c in range(nc): + data1[y][x][c] = random.randint(0, 20) + + # Compute and validate + _compute_texture( + compute_shader, + wgpu.TextureFormat.r32float, + wgpu.TextureDimension.d2, + (nx, ny, nz, nc), + data1, + ) + + +# %% 3D + + +def test_compute_tex_3d_rgba8uint(): + compute_shader = """ + @group(0) @binding(0) + var r_tex1: texture_3d; + + @group(0) @binding(1) + var r_tex2: texture_storage_3d; + + @compute @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i = vec3(index); + let color1 = vec4(textureLoad(r_tex1, i, 0)); + let color2 = vec4(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a); + textureStore(r_tex2, i, vec4(color2)); + } + """ + + # Generate data + nx, ny, nz, nc = 64, 8, 6, 4 + data1 = (ctypes.c_uint8 * nc * nx * ny * nz)() + for z in range(nz): + for y in range(ny): + for x in range(nx): + for c in range(nc): + data1[z][y][x][c] = random.randint(0, 20) + + # Compute and validate + _compute_texture( + compute_shader, + wgpu.TextureFormat.rgba8uint, + wgpu.TextureDimension.d3, + (nx, ny, nz, nc), + data1, + ) + + +def test_compute_tex_3d_rgba16sint(): + compute_shader = """ + @group(0) @binding(0) + var r_tex1: texture_3d; + + @group(0) @binding(1) + var r_tex2: texture_storage_3d; + + @compute @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i = vec3(index); + let color1: vec4 = textureLoad(r_tex1, i, 0); + let color2 = vec4(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a); + textureStore(r_tex2, i, color2); + } + """ + + # Generate data + nx, ny, nz, nc = 128, 8, 6, 4 + data1 = (ctypes.c_int16 * nc * nx * ny * nz)() + for z in range(nz): + for y in range(ny): + for x in range(nx): + for c in range(nc): + data1[z][y][x][c] = random.randint(0, 20) + + # Compute and validate + _compute_texture( + compute_shader, + wgpu.TextureFormat.rgba16sint, + wgpu.TextureDimension.d3, + (nx, ny, nz, nc), + data1, + ) + + +def test_compute_tex_3d_r32sint(): + compute_shader = """ + @group(0) @binding(0) + var r_tex1: texture_3d; + + @group(0) @binding(1) + var r_tex2: texture_storage_3d; + + @compute @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i = vec3(index); + let color1: vec4 = textureLoad(r_tex1, i, 0); + let color2 = vec4(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a); + textureStore(r_tex2, i, color2); + } + """ + + # Generate data + nx, ny, nz, nc = 256, 8, 6, 1 + data1 = (ctypes.c_int32 * nc * nx * ny * nz)() + for z in range(nz): + for y in range(ny): + for x in range(nx): + for c in range(nc): + data1[z][y][x][c] = random.randint(0, 20) + + # Compute and validate + _compute_texture( + compute_shader, + wgpu.TextureFormat.r32sint, + wgpu.TextureDimension.d3, + (nx, ny, nz, nc), + data1, + ) + + +def test_compute_tex_3d_r32float(): + compute_shader = """ + @group(0) @binding(0) + var r_tex1: texture_3d; + + @group(0) @binding(1) + var r_tex2: texture_storage_3d; + + @compute @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i = vec3(index); + let color1: vec4 = textureLoad(r_tex1, i, 0); + let color2 = vec4(color1.x + f32(i.x), color1.y + 1.0, color1.z * 2.0, color1.a); + textureStore(r_tex2, i, color2); + } + """ + + # Generate data + nx, ny, nz, nc = 64, 8, 6, 1 + data1 = (ctypes.c_float * nc * nx * ny * nz)() + for z in range(nz): + for y in range(ny): + for x in range(nx): + for c in range(nc): + data1[z][y][x][c] = random.randint(0, 20) + + # Compute and validate + _compute_texture( + compute_shader, + wgpu.TextureFormat.r32float, + wgpu.TextureDimension.d3, + (nx, ny, nz, nc), + data1, + ) + + +# %% + + +def _compute_texture(compute_shader, texture_format, texture_dim, texture_size, data1): + """ + Apply a computation on a texture and validate the result. The shader should: + * Add the x-coordinate to the red channel. + * Add 1 to the green channel. + * Multiply the blue channel by 2. + * The alpha channel must remain equal. + """ + + nx, ny, nz, nc = texture_size + nbytes = ctypes.sizeof(data1) + bpp = nbytes // (nx * ny * nz) # bytes per pixel + + device = get_default_device() + cshader = device.create_shader_module(code=compute_shader) + + # Create textures and views + texture1 = device.create_texture( + size=(nx, ny, nz), + dimension=texture_dim, + format=texture_format, + usage=wgpu.TextureUsage.TEXTURE_BINDING | wgpu.TextureUsage.COPY_DST, + ) + texture2 = device.create_texture( + size=(nx, ny, nz), + dimension=texture_dim, + format=texture_format, + usage=wgpu.TextureUsage.STORAGE_BINDING | wgpu.TextureUsage.COPY_SRC, + ) + texture_view1 = texture1.create_view() + texture_view2 = texture2.create_view() + + # Create buffer that we need to upload the data + buffer_usage = wgpu.BufferUsage.COPY_SRC | wgpu.BufferUsage.COPY_DST + buffer = device.create_buffer_with_data(data=data1, usage=buffer_usage) + assert buffer.usage == buffer_usage + + texture_sample_type = "unfilterable-float" + if "uint" in texture_format: + texture_sample_type = "uint" + elif "sint" in texture_format: + texture_sample_type = "sint" + + # Define bindings + # One can see here why we need 2 textures: one is readonly, one writeonly + bindings = [ + {"binding": 0, "resource": texture_view1}, + {"binding": 1, "resource": texture_view2}, + ] + binding_layouts = [ + { + "binding": 0, + "visibility": wgpu.ShaderStage.COMPUTE, + "texture": { + "sample_type": texture_sample_type, + "view_dimension": texture_dim, + }, + }, + { + "binding": 1, + "visibility": wgpu.ShaderStage.COMPUTE, + "storage_texture": { + "access": wgpu.StorageTextureAccess.write_only, + "format": texture_format, + "view_dimension": texture_dim, + }, + }, + ] + bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) + pipeline_layout = device.create_pipeline_layout( + bind_group_layouts=[bind_group_layout] + ) + bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) + + # Create a pipeline and run it + compute_pipeline = device.create_compute_pipeline( + layout=pipeline_layout, + compute={"module": cshader, "entry_point": "main"}, + ) + command_encoder = device.create_command_encoder() + + if False: # Upload via alt route (that does not have 256 alignment constraint) + device.queue.write_texture( + {"texture": texture1}, + data1, + {"bytes_per_row": bpp * nx, "rows_per_image": ny}, + (nx, ny, nz), + ) + else: + command_encoder.copy_buffer_to_texture( + { + "buffer": buffer, + "offset": 0, + "bytes_per_row": bpp * nx, + "rows_per_image": ny, + }, + {"texture": texture1, "mip_level": 0, "origin": (0, 0, 0)}, + (nx, ny, nz), + ) + compute_pass = command_encoder.begin_compute_pass() + compute_pass.push_debug_group("foo") + compute_pass.insert_debug_marker("setting pipeline") + compute_pass.set_pipeline(compute_pipeline) + compute_pass.insert_debug_marker("setting bind group") + compute_pass.set_bind_group( + 0, bind_group, [], 0, 999999 + ) # last 2 elements not used + compute_pass.insert_debug_marker("dispatch!") + compute_pass.dispatch_workgroups(nx, ny, nz) + compute_pass.pop_debug_group() + compute_pass.end() + command_encoder.copy_texture_to_buffer( + {"texture": texture2, "mip_level": 0, "origin": (0, 0, 0)}, + { + "buffer": buffer, + "offset": 0, + "bytes_per_row": bpp * nx, + "rows_per_image": ny, + }, + (nx, ny, nz), + ) + device.queue.submit([command_encoder.finish()]) + + # Read the current data of the output buffer + data2 = data1.__class__.from_buffer(device.queue.read_buffer(buffer)) + + # Numpy arrays are easier to work with + a1 = np.ctypeslib.as_array(data1).reshape(nz, ny, nx, nc) + a2 = np.ctypeslib.as_array(data2).reshape(nz, ny, nx, nc) + + # Validate! + for x in range(nx): + assert np.all(a2[:, :, x, 0] == a1[:, :, x, 0] + x) + if nc >= 2: + assert np.all(a2[:, :, :, 1] == a1[:, :, :, 1] + 1) + if nc >= 3: + assert np.all(a2[:, :, :, 2] == a1[:, :, :, 2] * 2) + if nc >= 4: + assert np.all(a2[:, :, :, 3] == a1[:, :, :, 3]) + + +if __name__ == "__main__": + run_tests(globals()) diff --git a/tests/test_wgpu_native_errors.py b/tests/test_wgpu_native_errors.py new file mode 100644 index 0000000..dfab5df --- /dev/null +++ b/tests/test_wgpu_native_errors.py @@ -0,0 +1,268 @@ +import wgpu.utils + +from testutils import run_tests +from pytest import raises + + +dedent = lambda s: s.replace("\n ", "\n").strip() # noqa + + +def test_parse_shader_error1(caplog): + # test1: invalid attribute access + device = wgpu.utils.get_default_device() + + code = """ + struct VertexOutput { + @location(0) texcoord : vec2, + @builtin(position) position: vec4, + }; + + @vertex + fn vs_main(@builtin(vertex_index) vertex_index : u32) -> VertexOutput { + var out: VertexOutput; + out.invalid_attr = vec4(0.0, 0.0, 1.0); + return out; + } + """ + + expected = """ + Validation Error + + Caused by: + In wgpuDeviceCreateShaderModule + + Shader '' parsing error: invalid field accessor `invalid_attr` + ┌─ wgsl:9:9 + │ + 9 │ out.invalid_attr = vec4(0.0, 0.0, 1.0); + │ ^^^^^^^^^^^^ invalid accessor + + + invalid field accessor `invalid_attr` + """ + + code = dedent(code) + expected = dedent(expected) + with raises(wgpu.GPUError) as err: + device.create_shader_module(code=code) + + error = err.value.message + assert error == expected, f"Expected:\n\n{expected}" + + +def test_parse_shader_error2(caplog): + # test2: grammar error, expected ',', not ';' + device = wgpu.utils.get_default_device() + + code = """ + struct VertexOutput { + @location(0) texcoord : vec2; + @builtin(position) position: vec4, + }; + """ + + expected = """ + Validation Error + + Caused by: + In wgpuDeviceCreateShaderModule + + Shader '' parsing error: expected ',', found ';' + ┌─ wgsl:2:38 + │ + 2 │ @location(0) texcoord : vec2; + │ ^ expected ',' + + + expected ',', found ';' + """ + + code = dedent(code) + expected = dedent(expected) + with raises(wgpu.GPUError) as err: + device.create_shader_module(code=code) + + error = err.value.message + assert error == expected, f"Expected:\n\n{expected}" + + +def test_parse_shader_error3(caplog): + # test3: grammar error, contains '\t' and (tab), unknown scalar type: 'f3' + device = wgpu.utils.get_default_device() + + code = """ + struct VertexOutput { + @location(0) texcoord : vec2, + @builtin(position) position: vec4, + }; + """ + + expected = """ + Validation Error + + Caused by: + In wgpuDeviceCreateShaderModule + + Shader '' parsing error: unknown scalar type: 'f3' + ┌─ wgsl:3:39 + │ + 3 │ @builtin(position) position: vec4, + │ ^^ unknown scalar type + │ + = note: Valid scalar types are f32, f64, i32, u32, bool + + + unknown scalar type: 'f3' + """ + + code = dedent(code) + expected = dedent(expected) + with raises(wgpu.GPUError) as err: + device.create_shader_module(code=code) + + error = err.value.message + assert error == expected, f"Expected:\n\n{expected}" + + +def test_parse_shader_error4(caplog): + # test4: no line info available - hopefully Naga produces better error messages soon? + device = wgpu.utils.get_default_device() + + code = """ + fn foobar() { + let m = mat2x2(0.0, 0.0, 0.0, 0.); + let scales = m[4]; + } + """ + + expected = """ + Validation Error + + Caused by: + In wgpuDeviceCreateShaderModule + + Shader validation error: + ┌─ :1:1 + │ + 1 │ ╭ fn foobar() { + 2 │ │ let m = mat2x2(0.0, 0.0, 0.0, 0.); + 3 │ │ let scales = m[4]; + │ │ ^^^^ naga::Expression [9] + │ ╰──────────────────────^ naga::Function [1] + + + Function [1] 'foobar' is invalid + Expression [9] is invalid + Type resolution failed + Index 4 is out of bounds for expression [7] + """ + + code = dedent(code) + expected = dedent(expected) + with raises(wgpu.GPUError) as err: + device.create_shader_module(code=code) + + error = err.value.message + assert error == expected, f"Expected:\n\n{expected}" + + +def test_validate_shader_error1(caplog): + # test1: Validation error, mat4x4 * vec3 + device = wgpu.utils.get_default_device() + + code = """ + struct VertexOutput { + @location(0) texcoord : vec2, + @builtin(position) position: vec3, + }; + + @vertex + fn vs_main(@builtin(vertex_index) vertex_index : u32) -> VertexOutput { + var out: VertexOutput; + var matrics: mat4x4; + out.position = matrics * out.position; + return out; + } + """ + + expected1 = """Left: Load { pointer: [3] } of type Matrix { columns: Quad, rows: Quad, width: 4 }""" + expected2 = """Right: Load { pointer: [6] } of type Vector { size: Tri, kind: Float, width: 4 }""" + expected3 = """ + Validation Error + + Caused by: + In wgpuDeviceCreateShaderModule + + Shader validation error: + ┌─ :10:20 + │ + 10 │ out.position = matrics * out.position; + │ ^^^^^^^^^^^^^^^^^^^^^^ naga::Expression [8] + + + Entry point vs_main at Vertex is invalid + Expression [8] is invalid + Operation Multiply can't work with [5] and [7] + """ + + code = dedent(code) + expected3 = dedent(expected3) + with raises(wgpu.GPUError) as err: + device.create_shader_module(code=code) + + # skip error info + assert caplog.records[0].msg == expected1 + assert caplog.records[1].msg == expected2 + assert err.value.message.strip() == expected3, f"Expected:\n\n{expected3}" + + +def test_validate_shader_error2(caplog): + # test2: Validation error, multiple line error, return type mismatch + device = wgpu.utils.get_default_device() + + code = """ + struct Varyings { + @builtin(position) position : vec4, + @location(0) uv : vec2, + }; + + @vertex + fn fs_main(in: Varyings) -> @location(0) vec4 { + if (in.uv.x > 0.5) { + return vec3(1.0, 0.0, 1.0); + } else { + return vec3(0.0, 1.0, 1.0); + } + } + """ + + expected1 = """Returning Some(Vector { size: Tri, kind: Float, width: 4 }) where Some(Vector { size: Quad, kind: Float, width: 4 }) is expected""" + expected2 = """ + Validation Error + + Caused by: + In wgpuDeviceCreateShaderModule + + Shader validation error: + ┌─ :9:16 + │ + 9 │ return vec3(1.0, 0.0, 1.0); + │ ^^^^^^^^^^^^^^^^^^^^^^^^ naga::Expression [9] + + + Entry point fs_main at Vertex is invalid + The `return` value Some([9]) does not match the function return value + """ + + code = dedent(code) + expected2 = dedent(expected2) + with raises(wgpu.GPUError) as err: + device.create_shader_module(code=code) + + # skip error info + assert caplog.records[0].msg == expected1 + assert err.value.message.strip() == expected2, f"Expected:\n\n{expected2}" + + +if __name__ == "__main__": + run_tests(globals()) diff --git a/tests/test_wgpu_native_query_set.py b/tests/test_wgpu_native_query_set.py new file mode 100644 index 0000000..fe8d1f7 --- /dev/null +++ b/tests/test_wgpu_native_query_set.py @@ -0,0 +1,151 @@ +import wgpu.utils + +from testutils import run_tests, can_use_wgpu_lib +from pytest import mark + + +@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") +def test_query_set(): + shader_source = """ + @group(0) @binding(0) + var data1: array; + + @group(0) @binding(1) + var data2: array; + + @compute + @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i: u32 = index.x; + data2[i] = data1[i] / 2.0; + } + """ + + n = 1024 + data1 = memoryview(bytearray(n * 4)).cast("f") + + for i in range(n): + data1[i] = float(i) + + adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + device = adapter.request_device( + required_features=[wgpu.FeatureName.timestamp_query] + ) + + assert repr(device).startswith(" 0 and timestamps[1] > 0 and timestamps[1] > timestamps[0] + + out = device.queue.read_buffer(buffer2).cast("f") + result = out.tolist() + + # Perform the same division on the CPU + result_cpu = [a / 2.0 for a in data1] + + # Ensure results are the same + assert result == result_cpu + + +if __name__ == "__main__": + run_tests(globals()) diff --git a/tests/test_wgpu_native_render.py b/tests/test_wgpu_native_render.py new file mode 100644 index 0000000..71f1b30 --- /dev/null +++ b/tests/test_wgpu_native_render.py @@ -0,0 +1,629 @@ +""" +Test render pipeline, by drawing a whole lot of orange squares ... +""" + +import ctypes +import numpy as np +import sys + +import wgpu +from pytest import skip +from testutils import run_tests, can_use_wgpu_lib, is_ci, get_default_device +from renderutils import render_to_texture, render_to_screen # noqa + + +if not can_use_wgpu_lib: + skip("Skipping tests that need the wgpu lib", allow_module_level=True) +elif is_ci and sys.platform == "win32": + skip("These tests fail on dx12 for some reason", allow_module_level=True) + + +default_vertex_shader = """ +@vertex +fn vs_main(@builtin(vertex_index) vertex_index : u32) -> @builtin(position) vec4 { + var positions: array, 4> = array, 4>( + vec3(-0.5, -0.5, 0.1), + vec3(-0.5, 0.5, 0.1), + vec3( 0.5, -0.5, 0.1), + vec3( 0.5, 0.5, 0.1), + ); + let p: vec3 = positions[vertex_index]; + return vec4(p, 1.0); +} +""" + + +# %% Simple square + + +def test_render_orange_square(): + """Render an orange square and check that there is an orange square.""" + + device = get_default_device() + + # NOTE: the 0.499 instead of 0.5 is to make sure the resulting value is 127. + # With 0.5 some drivers would produce 127 and others 128. + + fragment_shader = """ + @fragment + fn fs_main() -> @location(0) vec4 { + return vec4(1.0, 0.499, 0.0, 1.0); + } + """ + shader_source = default_vertex_shader + fragment_shader + + # Bindings and layout + bind_group = None + pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) + + # Render + render_args = device, shader_source, pipeline_layout, bind_group + # render_to_screen(*render_args) + a = render_to_texture(*render_args, size=(64, 64)) + + # Check that the background is all zero + bg = a.copy() + bg[16:-16, 16:-16, :] = 0 + assert np.all(bg == 0) + + # Check the square + sq = a[16:-16, 16:-16, :] + assert np.all(sq[:, :, 0] == 255) # red + assert np.all(sq[:, :, 1] == 127) # green + assert np.all(sq[:, :, 2] == 0) # blue + assert np.all(sq[:, :, 3] == 255) # alpha + + +# %% Variations + + +def test_render_orange_square_indexed(): + """Render an orange square, using an index buffer.""" + + device = get_default_device() + + fragment_shader = """ + @fragment + fn fs_main() -> @location(0) vec4 { + return vec4(1.0, 0.499, 0.0, 1.0); + } + """ + shader_source = default_vertex_shader + fragment_shader + + # Bindings and layout + bind_group = None + pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) + + # Index buffer + indices = (ctypes.c_int32 * 6)(0, 1, 2, 2, 1, 3) + ibo = device.create_buffer_with_data( + data=indices, + usage=wgpu.BufferUsage.INDEX, + ) + + # Render + render_args = device, shader_source, pipeline_layout, bind_group + # render_to_screen(*render_args, topology=wgpu.PrimitiveTopology.triangle_list, ibo=ibo) + a = render_to_texture( + *render_args, + size=(64, 64), + topology=wgpu.PrimitiveTopology.triangle_list, + ibo=ibo, + ) + + # Check that the background is all zero + bg = a.copy() + bg[16:-16, 16:-16, :] = 0 + assert np.all(bg == 0) + + # Check the square + sq = a[16:-16, 16:-16, :] + assert np.all(sq[:, :, 0] == 255) # red + assert np.all(sq[:, :, 1] == 127) # green + assert np.all(sq[:, :, 2] == 0) # blue + assert np.all(sq[:, :, 3] == 255) # alpha + + +def test_render_orange_square_indirect(): + """Render an orange square and check that there is an orange square.""" + + device = get_default_device() + + fragment_shader = """ + @fragment + fn fs_main() -> @location(0) vec4 { + return vec4(1.0, 0.499, 0.0, 1.0); + } + """ + shader_source = default_vertex_shader + fragment_shader + + # Bindings and layout + bind_group = None + pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) + + # Buffer with draw parameters for indirect draw call + params = (ctypes.c_int32 * 4)(4, 1, 0, 0) + indirect_buffer = device.create_buffer_with_data( + data=params, + usage=wgpu.BufferUsage.INDIRECT, + ) + + # Render + render_args = device, shader_source, pipeline_layout, bind_group + # render_to_screen(*render_args, indirect_buffer=indirect_buffer) + a = render_to_texture(*render_args, size=(64, 64), indirect_buffer=indirect_buffer) + + # Check that the background is all zero + bg = a.copy() + bg[16:-16, 16:-16, :] = 0 + assert np.all(bg == 0) + + # Check the square + sq = a[16:-16, 16:-16, :] + assert np.all(sq[:, :, 0] == 255) # red + assert np.all(sq[:, :, 1] == 127) # green + assert np.all(sq[:, :, 2] == 0) # blue + assert np.all(sq[:, :, 3] == 255) # alpha + + +def test_render_orange_square_indexed_indirect(): + """Render an orange square, using an index buffer.""" + + device = get_default_device() + + fragment_shader = """ + @fragment + fn fs_main() -> @location(0) vec4 { + return vec4(1.0, 0.499, 0.0, 1.0); + } + """ + shader_source = default_vertex_shader + fragment_shader + + # Bindings and layout + bind_group = None + pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) + + # Index buffer + indices = (ctypes.c_int32 * 6)(0, 1, 2, 2, 1, 3) + ibo = device.create_buffer_with_data( + data=indices, + usage=wgpu.BufferUsage.INDEX, + ) + + # Buffer with draw parameters for indirect draw call + params = (ctypes.c_int32 * 5)(6, 1, 0, 0, 0) + indirect_buffer = device.create_buffer_with_data( + data=params, + usage=wgpu.BufferUsage.INDIRECT, + ) + + # Render + render_args = device, shader_source, pipeline_layout, bind_group + # render_to_screen(*render_args, topology=wgpu.PrimitiveTopology.triangle_list, ibo=ibo, indirect_buffer=indirect_buffer) + a = render_to_texture( + *render_args, + size=(64, 64), + topology=wgpu.PrimitiveTopology.triangle_list, + ibo=ibo, + indirect_buffer=indirect_buffer, + ) + + # Check that the background is all zero + bg = a.copy() + bg[16:-16, 16:-16, :] = 0 + assert np.all(bg == 0) + + # Check the square + sq = a[16:-16, 16:-16, :] + assert np.all(sq[:, :, 0] == 255) # red + assert np.all(sq[:, :, 1] == 127) # green + assert np.all(sq[:, :, 2] == 0) # blue + assert np.all(sq[:, :, 3] == 255) # alpha + + +def test_render_orange_square_vbo(): + """Render an orange square, using a VBO.""" + + device = get_default_device() + + shader_source = """ + @vertex + fn vs_main(@location(0) pos : vec2) -> @builtin(position) vec4 { + return vec4(pos, 0.0, 1.0); + } + + @fragment + fn fs_main() -> @location(0) vec4 { + return vec4(1.0, 0.499, 0.0, 1.0); + } + """ + + # Bindings and layout + bind_group = None + pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) + + # Vertex buffer + pos_data = (ctypes.c_float * 8)(-0.5, -0.5, -0.5, +0.5, +0.5, -0.5, +0.5, +0.5) + vbo = device.create_buffer_with_data( + data=pos_data, + usage=wgpu.BufferUsage.VERTEX, + ) + + # Vertex buffer views + vbo_view = { + "array_stride": 4 * 2, + "step_mode": "vertex", + "attributes": [ + { + "format": wgpu.VertexFormat.float32x2, + "offset": 0, + "shader_location": 0, + }, + ], + } + + # Render + render_args = device, shader_source, pipeline_layout, bind_group + # render_to_screen(*render_args, vbos=[vbo], vbo_views=[vbo_view]) + a = render_to_texture(*render_args, size=(64, 64), vbos=[vbo], vbo_views=[vbo_view]) + + # Check that the background is all zero + bg = a.copy() + bg[16:-16, 16:-16, :] = 0 + assert np.all(bg == 0) + + # Check the square + sq = a[16:-16, 16:-16, :] + assert np.all(sq[:, :, 0] == 255) # red + assert np.all(sq[:, :, 1] == 127) # green + assert np.all(sq[:, :, 2] == 0) # blue + assert np.all(sq[:, :, 3] == 255) # alpha + + +def test_render_orange_square_color_attachment1(): + """Render an orange square on a blue background, testing the load_op.""" + + device = get_default_device() + + fragment_shader = """ + @fragment + fn fs_main() -> @location(0) vec4 { + return vec4(1.0, 0.499, 0.0, 1.0); + } + """ + shader_source = default_vertex_shader + fragment_shader + + # Bindings and layout + bind_group = None + pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) + + ca = { + "resolve_target": None, + "clear_value": (0, 0, 0.8, 1), + "load_op": wgpu.LoadOp.clear, + "store_op": wgpu.StoreOp.store, + } + + # Render + render_args = device, shader_source, pipeline_layout, bind_group + # render_to_screen(*render_args, color_attachment=ca) + a = render_to_texture(*render_args, size=(64, 64), color_attachment=ca) + + # Check the blue background + assert np.all(a[:16, :16, 2] == 204) + assert np.all(a[:16, -16:, 2] == 204) + assert np.all(a[-16:, :16, 2] == 204) + assert np.all(a[-16:, -16:, 2] == 204) + + # Check the square + sq = a[16:-16, 16:-16, :] + assert np.all(sq[:, :, 0] == 255) # red + assert np.all(sq[:, :, 1] == 127) # green + assert np.all(sq[:, :, 2] == 0) # blue + assert np.all(sq[:, :, 3] == 255) # alpha + + +def test_render_orange_square_color_attachment2(): + """Render an orange square on a blue background, testing the LoadOp.load, + though in this case the result is the same as the normal square test. + """ + + device = get_default_device() + + fragment_shader = """ + @fragment + fn fs_main() -> @location(0) vec4 { + return vec4(1.0, 0.499, 0.0, 1.0); + } + """ + shader_source = default_vertex_shader + fragment_shader + + # Bindings and layout + bind_group = None + pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) + + ca = { + "resolve_target": None, + "load_op": wgpu.LoadOp.load, + "store_op": wgpu.StoreOp.store, + } + + # Render + render_args = device, shader_source, pipeline_layout, bind_group + # render_to_screen(*render_args, color_attachment=ca) + a = render_to_texture(*render_args, size=(64, 64), color_attachment=ca) + + # Check the background + bg = a.copy() + bg[16:-16, 16:-16, :] = 0 + # assert np.all(bg == 0) + # Actually, it seems unpredictable what the bg is if we dont clear it? + + # Check the square + sq = a[16:-16, 16:-16, :] + assert np.all(sq[:, :, 0] == 255) # red + assert np.all(sq[:, :, 1] == 127) # green + assert np.all(sq[:, :, 2] == 0) # blue + assert np.all(sq[:, :, 3] == 255) # alpha + + +# %% Viewport and stencil + + +def test_render_orange_square_viewport(): + """Render an orange square, in a sub-viewport of the rendered area.""" + + device = get_default_device() + + fragment_shader = """ + @fragment + fn fs_main() -> @location(0) vec4 { + return vec4(1.0, 0.499, 0.0, 1.0); + } + """ + shader_source = default_vertex_shader + fragment_shader + + def cb(renderpass): + renderpass.set_viewport(10, 20, 32, 32, 0, 1) + + # Bindings and layout + bind_group = None + pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) + + # Render + render_args = device, shader_source, pipeline_layout, bind_group + # render_to_screen(*render_args, renderpass_callback=cb) + a = render_to_texture(*render_args, size=(64, 64), renderpass_callback=cb) + + # Check that the background is all zero + bg = a.copy() + bg[20 + 8 : 52 - 8, 10 + 8 : 42 - 8, :] = 0 + assert np.all(bg == 0) + + # Check the square + sq = a[20 + 8 : 52 - 8, 10 + 8 : 42 - 8, :] + assert np.all(sq[:, :, 0] == 255) # red + assert np.all(sq[:, :, 1] == 127) # green + assert np.all(sq[:, :, 2] == 0) # blue + assert np.all(sq[:, :, 3] == 255) # alpha + + +def test_render_orange_square_scissor(): + """Render an orange square, but scissor half the screen away.""" + + device = get_default_device() + + fragment_shader = """ + @fragment + fn fs_main() -> @location(0) vec4 { + return vec4(1.0, 0.499, 0.0, 1.0); + } + """ + shader_source = default_vertex_shader + fragment_shader + + def cb(renderpass): + renderpass.set_scissor_rect(0, 0, 32, 32) + # Alse set blend color. Does not change outout, but covers the call. + renderpass.set_blend_constant((0, 0, 0, 1)) + + # Bindings and layout + bind_group = None + pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) + + # Render + render_args = device, shader_source, pipeline_layout, bind_group + # render_to_screen(*render_args, renderpass_callback=cb) + a = render_to_texture(*render_args, size=(64, 64), renderpass_callback=cb) + + # Check that the background is all zero + bg = a.copy() + bg[16:32, 16:32, :] = 0 + assert np.all(bg == 0) + + # Check the square + sq = a[16:32, 16:32, :] + assert np.all(sq[:, :, 0] == 255) # red + assert np.all(sq[:, :, 1] == 127) # green + assert np.all(sq[:, :, 2] == 0) # blue + assert np.all(sq[:, :, 3] == 255) # alpha + + +def test_render_orange_square_depth16unorm(): + """Render an orange square, but disable half of it using a depth test using 16 bits.""" + _render_orange_square_depth(wgpu.TextureFormat.depth16unorm) + + +def test_render_orange_square_depth24plus_stencil8(): + """Render an orange square, but disable half of it using a depth test using 24 bits.""" + _render_orange_square_depth(wgpu.TextureFormat.depth24plus_stencil8) + + +def test_render_orange_square_depth32float(): + """Render an orange square, but disable half of it using a depth test using 32 bits.""" + _render_orange_square_depth(wgpu.TextureFormat.depth32float) + + +def _render_orange_square_depth(depth_stencil_tex_format): + device = get_default_device() + + shader_source = """ + @vertex + fn vs_main(@builtin(vertex_index) vertex_index : u32) -> @builtin(position) vec4 { + var positions: array, 4> = array, 4>( + vec3(-0.5, -0.5, 0.0), + vec3(-0.5, 0.5, 0.0), + vec3( 0.5, -0.5, 0.2), + vec3( 0.5, 0.5, 0.2), + ); + let p: vec3 = positions[vertex_index]; + return vec4(p, 1.0); + } + + @fragment + fn fs_main() -> @location(0) vec4 { + return vec4(1.0, 0.499, 0.0, 1.0); + } + """ + + def cb(renderpass): + renderpass.set_stencil_reference(42) + + # Bindings and layout + bind_group = None + pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) + + # Create dept-stencil texture + depth_stencil_texture = device.create_texture( + size=(64, 64, 1), # when rendering to texture + # size=(640, 480, 1), # when rendering to screen + dimension=wgpu.TextureDimension.d2, + format=depth_stencil_tex_format, + usage=wgpu.TextureUsage.RENDER_ATTACHMENT, + ) + + depth_stencil_state = dict( + format=depth_stencil_tex_format, + depth_write_enabled=True, + depth_compare=wgpu.CompareFunction.less_equal, + # stencil_front={ + # "compare": wgpu.CompareFunction.equal, + # "fail_op": wgpu.StencilOperation.keep, + # "depth_fail_op": wgpu.StencilOperation.keep, + # "pass_op": wgpu.StencilOperation.keep, + # }, + # stencil_back={ + # "compare": wgpu.CompareFunction.equal, + # "fail_op": wgpu.StencilOperation.keep, + # "depth_fail_op": wgpu.StencilOperation.keep, + # "pass_op": wgpu.StencilOperation.keep, + # }, + stencil_read_mask=0, + stencil_write_mask=0, + depth_bias=0, + depth_bias_slope_scale=0.0, + depth_bias_clamp=0.0, + ) + + depth_stencil_attachment = dict( + view=depth_stencil_texture.create_view(), + depth_clear_value=0.1, + depth_load_op=wgpu.LoadOp.clear, + depth_store_op=wgpu.StoreOp.store, + stencil_load_op=wgpu.LoadOp.load, + stencil_store_op=wgpu.StoreOp.store, + ) + + # Render + render_args = device, shader_source, pipeline_layout, bind_group + # render_to_screen(*render_args, renderpass_callback=cb, depth_stencil_state=depth_stencil_state, depth_stencil_attachment=depth_stencil_attachment) + a = render_to_texture( + *render_args, + size=(64, 64), + renderpass_callback=cb, + depth_stencil_state=depth_stencil_state, + depth_stencil_attachment=depth_stencil_attachment, + ) + + # Check that the background is all zero + bg = a.copy() + bg[16:-16, 16:32, :] = 0 + assert np.all(bg == 0) + + # Check the square + sq = a[16:-16, 16:32, :] + assert np.all(sq[:, :, 0] == 255) # red + assert np.all(sq[:, :, 1] == 127) # green + assert np.all(sq[:, :, 2] == 0) # blue + assert np.all(sq[:, :, 3] == 255) # alpha + + +# %% Not squares + + +def test_render_orange_dots(): + """Render four orange dots and check that there are four orange square dots.""" + + device = get_default_device() + + shader_source = """ + struct VertexOutput { + @builtin(position) position: vec4, + //@builtin(pointSize) point_size: f32, + }; + + @vertex + fn vs_main(@builtin(vertex_index) vertex_index : u32) -> VertexOutput { + var positions: array, 4> = array, 4>( + vec3(-0.5, -0.5, 0.0), + vec3(-0.5, 0.5, 0.0), + vec3( 0.5, -0.5, 0.2), + vec3( 0.5, 0.5, 0.2), + ); + var out: VertexOutput; + out.position = vec4(positions[vertex_index], 1.0); + //out.point_size = 16.0; + return out; + } + + @fragment + fn fs_main() -> @location(0) vec4 { + return vec4(1.0, 0.499, 0.0, 1.0); + } + """ + + # Bindings and layout + bind_group = None + pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) + + # Render + render_args = device, shader_source, pipeline_layout, bind_group + top = wgpu.PrimitiveTopology.point_list + # render_to_screen(*render_args, topology=top) + a = render_to_texture(*render_args, size=(64, 64), topology=top) + + # Check that the background is all zero + bg = a.copy() + bg[8:24, 8:24, :] = 0 + bg[8:24, 40:56, :] = 0 + bg[40:56, 8:24, :] = 0 + bg[40:56, 40:56, :] = 0 + assert np.all(bg == 0) + + # Check the square + # Ideally we'd want to set the point_size (gl_PointSize) to 16 but + # this is not supported in WGPU, see https://github.com/gpuweb/gpuweb/issues/332 + # So our points are 1px + for dot in ( + a[15:16, 15:16, :], + a[15:16, 47:48, :], + a[47:48, 15:16, :], + a[47:48, 47:48, :], + ): + assert np.all(dot[:, :, 0] == 255) # red + assert np.all(dot[:, :, 1] == 127) # green + assert np.all(dot[:, :, 2] == 0) # blue + assert np.all(dot[:, :, 3] == 255) # alpha + + +if __name__ == "__main__": + run_tests(globals()) diff --git a/tests/test_wgpu_native_render_tex.py b/tests/test_wgpu_native_render_tex.py new file mode 100644 index 0000000..8096d87 --- /dev/null +++ b/tests/test_wgpu_native_render_tex.py @@ -0,0 +1,566 @@ +""" +Test render pipeline by rendering to a texture. +""" + +import ctypes +import numpy as np +import sys + +import wgpu +from pytest import skip +from testutils import run_tests, get_default_device +from testutils import can_use_wgpu_lib, is_ci +from renderutils import upload_to_texture, render_to_texture, render_to_screen # noqa + + +if not can_use_wgpu_lib: + skip("Skipping tests that need the wgpu lib", allow_module_level=True) +elif is_ci and sys.platform == "win32": + skip("These tests fail on dx12 for some reason", allow_module_level=True) + + +default_vertex_shader = """ +struct VertexOutput { + @location(0) texcoord : vec2, + @builtin(position) position: vec4, +}; + +@vertex +fn vs_main(@builtin(vertex_index) vertex_index : u32) -> VertexOutput { + var positions: array, 4> = array, 4>( + vec2(-0.5, -0.5), + vec2(-0.5, 0.5), + vec2( 0.5, -0.5), + vec2( 0.5, 0.5), + ); + let p: vec2 = positions[vertex_index]; + var out: VertexOutput; + out.position = vec4(p, 0.0, 1.0); + out.texcoord = p + 0.5; + return out; +} +""" + + +def _create_data(v1, v2, v3, v4): + assert len(v1) == len(v2) + assert len(v1) == len(v3) + assert len(v1) == len(v4) + data = [] + for y in range(128): + data.extend(list(v1) * 128) + data.extend(list(v2) * 128) + for y in range(128): + data.extend(list(v3) * 128) + data.extend(list(v4) * 128) + return data + + +# %% rgba textures + + +def test_render_textured_square_rgba8unorm(): + """Test a texture with format rgba8unorm.""" + + fragment_shader = """ + @group(0) @binding(0) + var r_tex: texture_2d; + @group(0) @binding(1) + var r_sampler: sampler; + + @fragment + fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { + let sample = textureSample(r_tex, r_sampler, in.texcoord); + return sample; + } + """ + + # Create texture data + nx, ny, nz = 256, 256, 1 + x = _create_data( + (50, 50, 0, 255), (100, 100, 0, 255), (150, 150, 0, 255), (200, 200, 0, 255) + ) + texture_data = (ctypes.c_uint8 * (4 * nx * ny))(*x) + + # Render and validate + render_textured_square( + fragment_shader, wgpu.TextureFormat.rgba8unorm, (nx, ny, nz), texture_data + ) + + +def test_render_textured_square_rgba8uint(): + """Test a texture with format rgba8uint.""" + + fragment_shader = """ + @group(0) @binding(0) + var r_tex: texture_2d; + @group(0) @binding(1) + var r_sampler: sampler; + + @fragment + fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { + // let sample = textureSample(r_tex, r_sampler, in.texcoord); + let texcoords_u = vec2(in.texcoord * vec2(textureDimensions(r_tex))); + let sample = textureLoad(r_tex, texcoords_u, 0); + return vec4(sample) / 255.0; + } + """ + + # Create texture data + nx, ny, nz = 256, 256, 1 + x = _create_data( + (50, 50, 0, 255), (100, 100, 0, 255), (150, 150, 0, 255), (200, 200, 0, 255) + ) + texture_data = (ctypes.c_uint8 * (4 * nx * ny))(*x) + + # Render and validate + render_textured_square( + fragment_shader, wgpu.TextureFormat.rgba8uint, (nx, ny, nz), texture_data + ) + + +def test_render_textured_square_rgba16sint(): + """Test a texture with format rgba16sint.""" + + fragment_shader = """ + @group(0) @binding(0) + var r_tex: texture_2d; + @group(0) @binding(1) + var r_sampler: sampler; + + @fragment + fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { + // let sample = textureSample(r_tex, r_sampler, in.texcoord); + let texcoords_u = vec2(in.texcoord * vec2(textureDimensions(r_tex))); + let sample = textureLoad(r_tex, texcoords_u, 0); + return vec4(sample) / 255.0; + } + """ + + # Create texture data + nx, ny, nz = 256, 256, 1 + x = _create_data( + (50, 50, 0, 255), (100, 100, 0, 255), (150, 150, 0, 255), (200, 200, 0, 255) + ) + texture_data = (ctypes.c_int16 * (4 * nx * ny))(*x) + + # Render and validate + render_textured_square( + fragment_shader, wgpu.TextureFormat.rgba16sint, (nx, ny, nz), texture_data + ) + + +def test_render_textured_square_rgba32float(): + """Test a texture with format rgba32float.""" + + fragment_shader = """ + @group(0) @binding(0) + var r_tex: texture_2d; + @group(0) @binding(1) + var r_sampler: sampler; + + @fragment + fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { + let sample = textureSample(r_tex, r_sampler, in.texcoord); + return sample / 255.0; + } + """ + + # Create texture data + nx, ny, nz = 256, 256, 1 + x = _create_data( + (50, 50, 0, 255), (100, 100, 0, 255), (150, 150, 0, 255), (200, 200, 0, 255) + ) + texture_data = (ctypes.c_float * (4 * nx * ny))(*x) + + # Render and validate + render_textured_square( + fragment_shader, wgpu.TextureFormat.rgba32float, (nx, ny, nz), texture_data + ) + + +# %% rg textures + + +def test_render_textured_square_rg8unorm(): + """Test a texture with format rg8unorm. + The GPU considers blue to be 0 and alpha to be 1. + """ + + fragment_shader = """ + @group(0) @binding(0) + var r_tex: texture_2d; + @group(0) @binding(1) + var r_sampler: sampler; + + @fragment + fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { + let sample = textureSample(r_tex, r_sampler, in.texcoord); + return sample; + } + """ + + # Create texture data + nx, ny, nz = 256, 256, 1 + x = _create_data((50, 50), (100, 100), (150, 150), (200, 200)) + texture_data = (ctypes.c_ubyte * (2 * nx * ny))(*x) + + # Render and validate + render_textured_square( + fragment_shader, wgpu.TextureFormat.rg8unorm, (nx, ny, nz), texture_data + ) + + +def test_render_textured_square_rg8uint(): + """Test a texture with format rg8uint. + The GPU considers blue to be 0 and alpha to be 1. + """ + + fragment_shader = """ + @group(0) @binding(0) + var r_tex: texture_2d; + @group(0) @binding(1) + var r_sampler: sampler; + + @fragment + fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { + // let sample = textureSample(r_tex, r_sampler, in.texcoord); + let texcoords_u = vec2(in.texcoord * vec2(textureDimensions(r_tex))); + let sample = textureLoad(r_tex, texcoords_u, 0); + return vec4(f32(sample.r) / 255.0, f32(sample.g) / 255.0, 0.0, 1.0); + } + """ + + # Create texture data + nx, ny, nz = 256, 256, 1 + x = _create_data((50, 50), (100, 100), (150, 150), (200, 200)) + texture_data = (ctypes.c_ubyte * (2 * nx * ny))(*x) + + # Render and validate + render_textured_square( + fragment_shader, wgpu.TextureFormat.rg8uint, (nx, ny, nz), texture_data + ) + + +def test_render_textured_square_rg16sint(): + """Test a texture with format rg16sint. + The GPU considers blue to be 0 and alpha to be 1. + """ + + fragment_shader = """ + @group(0) @binding(0) + var r_tex: texture_2d; + @group(0) @binding(1) + var r_sampler: sampler; + + @fragment + fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { + // let sample = textureSample(r_tex, r_sampler, in.texcoord); + let texcoords_u = vec2(in.texcoord * vec2(textureDimensions(r_tex))); + let sample = textureLoad(r_tex, texcoords_u, 0); + return vec4(f32(sample.r) / 255.0, f32(sample.g) / 255.0, 0.0, 1.0); + } + """ + + # Create texture data + nx, ny, nz = 256, 256, 1 + x = _create_data((50, 50), (100, 100), (150, 150), (200, 200)) + texture_data = (ctypes.c_int16 * (2 * nx * ny))(*x) + + # Render and validate + render_textured_square( + fragment_shader, wgpu.TextureFormat.rg16sint, (nx, ny, nz), texture_data + ) + + +def test_render_textured_square_rg32float(): + """Test a texture with format rg32float. + The GPU considers blue to be 0 and alpha to be 1. + """ + + fragment_shader = """ + @group(0) @binding(0) + var r_tex: texture_2d; + @group(0) @binding(1) + var r_sampler: sampler; + + @fragment + fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { + let sample = textureSample(r_tex, r_sampler, in.texcoord); + return vec4(sample.rg / 255.0, 0.0, 1.0); + } + """ + + # Create texture data + nx, ny, nz = 256, 256, 1 + x = _create_data((50, 50), (100, 100), (150, 150), (200, 200)) + texture_data = (ctypes.c_float * (2 * nx * ny))(*x) + + # Render and validate + render_textured_square( + fragment_shader, wgpu.TextureFormat.rg32float, (nx, ny, nz), texture_data + ) + + +# %% r textures + + +def test_render_textured_square_r8unorm(): + """Test a texture with format r8unorm.""" + + fragment_shader = """ + @group(0) @binding(0) + var r_tex: texture_2d; + @group(0) @binding(1) + var r_sampler: sampler; + + @fragment + fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { + let sample = textureSample(r_tex, r_sampler, in.texcoord); + let val = sample.r; + return vec4(val, val, 0.0, 1.0); + } + """ + + # Create texture data + nx, ny, nz = 256, 256, 1 + x = _create_data((50,), (100,), (150,), (200,)) + texture_data = (ctypes.c_uint8 * (1 * nx * ny))(*x) + + # Render and validate + render_textured_square( + fragment_shader, wgpu.TextureFormat.r8unorm, (nx, ny, nz), texture_data + ) + + +def test_render_textured_square_r8uint(): + """Test a texture with format r8uint.""" + + fragment_shader = """ + @group(0) @binding(0) + var r_tex: texture_2d; + @group(0) @binding(1) + var r_sampler: sampler; + + @fragment + fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { + let texcoords_u = vec2(in.texcoord * vec2(textureDimensions(r_tex))); + let sample = textureLoad(r_tex, texcoords_u, 0); + let val = f32(sample.r) / 255.0; + return vec4(val, val, 0.0, 1.0); + } + """ + + # Create texture data + nx, ny, nz = 256, 256, 1 + x = _create_data((50,), (100,), (150,), (200,)) + texture_data = (ctypes.c_uint8 * (1 * nx * ny))(*x) + + # Render and validate + render_textured_square( + fragment_shader, wgpu.TextureFormat.r8uint, (nx, ny, nz), texture_data + ) + + +def test_render_textured_square_r16sint(): + """Test a texture with format r16sint. Because e.g. CT data.""" + + fragment_shader = """ + @group(0) @binding(0) + var r_tex: texture_2d; + @group(0) @binding(1) + var r_sampler: sampler; + + @fragment + fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { + let texcoords_u = vec2(in.texcoord * vec2(textureDimensions(r_tex))); + let sample = textureLoad(r_tex, texcoords_u, 0); + let val = f32(sample.r) / 255.0; + return vec4(val, val, 0.0, 1.0); + } + """ + + # Create texture data + nx, ny, nz = 256, 256, 1 + x = _create_data((50,), (100,), (150,), (200,)) + texture_data = (ctypes.c_int16 * (1 * nx * ny))(*x) + + # Render and validate + render_textured_square( + fragment_shader, wgpu.TextureFormat.r16sint, (nx, ny, nz), texture_data + ) + + +def test_render_textured_square_r32sint(): + """Test a texture with format r32sint. Because e.g. CT data.""" + + fragment_shader = """ + @group(0) @binding(0) + var r_tex: texture_2d; + @group(0) @binding(1) + var r_sampler: sampler; + + @fragment + fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { + let texcoords_u = vec2(in.texcoord * vec2(textureDimensions(r_tex))); + let sample = textureLoad(r_tex, texcoords_u, 0); + let val = f32(sample.r) / 255.0; + return vec4(val, val, 0.0, 1.0); + } + """ + + # Create texture data + nx, ny, nz = 256, 256, 1 + x = _create_data((50,), (100,), (150,), (200,)) + texture_data = (ctypes.c_int32 * (1 * nx * ny))(*x) + + # Render and validate + render_textured_square( + fragment_shader, wgpu.TextureFormat.r32sint, (nx, ny, nz), texture_data + ) + + +def test_render_textured_square_r32float(): + """Test a texture with format r32float.""" + + fragment_shader = """ + @group(0) @binding(0) + var r_tex: texture_2d; + @group(0) @binding(1) + var r_sampler: sampler; + + @fragment + fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { + let sample = textureSample(r_tex, r_sampler, in.texcoord); + let val = sample.r / 255.0; + return vec4(val, val, 0.0, 1.0); + } + """ + + # Create texture data + nx, ny, nz = 256, 256, 1 + x = _create_data((50,), (100,), (150,), (200,)) + texture_data = (ctypes.c_float * (1 * nx * ny))(*x) + + # Render and validate + render_textured_square( + fragment_shader, wgpu.TextureFormat.r32float, (nx, ny, nz), texture_data + ) + + +# %% Utils + + +def render_textured_square(fragment_shader, texture_format, texture_size, texture_data): + """Render, and test the result. The resulting image must be a + gradient on R and B, zeros on G and ones on A. + """ + nx, ny, nz = texture_size + + device = get_default_device() + + shader_source = default_vertex_shader + fragment_shader + + # Create texture + texture = device.create_texture( + size=(nx, ny, nz), + dimension=wgpu.TextureDimension.d2, + format=texture_format, + usage=wgpu.TextureUsage.TEXTURE_BINDING | wgpu.TextureUsage.COPY_DST, + ) + upload_to_texture(device, texture, texture_data, nx, ny, nz) + + texture_view = texture.create_view() + # or: + texture_view = texture.create_view( + format=texture_format, + dimension=wgpu.TextureDimension.d2, + ) + + sampler = device.create_sampler(mag_filter="nearest", min_filter="nearest") + + # Default sampler type. + # Note that integer texture types cannot even use a sampler. + sampler_type = wgpu.SamplerBindingType.filtering + + # Determine texture component type from the format + if texture_format.endswith("norm"): + # We can use a filtering sampler + texture_sample_type = wgpu.TextureSampleType.float + elif texture_format.endswith("float"): + # On Vanilla wgpu, float32 textures cannot use a filtering + # (interpolating) texture, (need to enable a feature for that). + # Without it, we need to use a non-filterin sampler. + texture_sample_type = wgpu.TextureSampleType.unfilterable_float + sampler_type = wgpu.SamplerBindingType.non_filtering + elif "uint" in texture_format: + # Cannot even use a sampler (use textureLoad instwad of textureSample) + texture_sample_type = wgpu.TextureSampleType.uint + else: + # Dito + texture_sample_type = wgpu.TextureSampleType.sint + + # Bindings and layout + bindings = [ + {"binding": 0, "resource": texture_view}, + {"binding": 1, "resource": sampler}, + ] + binding_layouts = [ + { + "binding": 0, + "visibility": wgpu.ShaderStage.FRAGMENT, + "texture": { + "sample_type": texture_sample_type, + "view_dimension": wgpu.TextureViewDimension.d2, + }, + }, + { + "binding": 1, + "visibility": wgpu.ShaderStage.FRAGMENT, + "sampler": { + "type": sampler_type, + }, + }, + ] + bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) + pipeline_layout = device.create_pipeline_layout( + bind_group_layouts=[bind_group_layout] + ) + bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) + + # Render + render_args = device, shader_source, pipeline_layout, bind_group + # render_to_screen(*render_args) + a = render_to_texture(*render_args, size=(64, 64)) + + # print(a.max(), a[:,:,0].max()) + + # Check that the background is all zero + bg = a.copy() + bg[16:-16, 16:-16, :] = 0 + assert np.all(bg == 0) + + # Check the square + sq = a[16:-16, 16:-16, :] + ref1 = [ + [150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150], + [150, 150, 150, 200, 200, 200], + [200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200], + ] + ref2 = [ + [150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150], + [150, 150, 150, 50, 50, 50], + [50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50], + ] + ref1, ref2 = sum(ref1, []), sum(ref2, []) + + assert np.allclose(sq[0, :, 0], ref1, atol=1) + assert np.allclose(sq[:, 0, 0], ref2, atol=1) + assert np.allclose(sq[0, :, 1], ref1, atol=1) + assert np.allclose(sq[:, 0, 1], ref2, atol=1) + assert np.all(sq[:, :, 2] == 0) # blue + assert np.all(sq[:, :, 3] == 255) # alpha + + +if __name__ == "__main__": + run_tests(globals()) diff --git a/tests/test_wgpu_native_texture.py b/tests/test_wgpu_native_texture.py new file mode 100644 index 0000000..6bd300e --- /dev/null +++ b/tests/test_wgpu_native_texture.py @@ -0,0 +1,285 @@ +import random +import ctypes + +import wgpu.utils +import numpy as np + +from testutils import run_tests, can_use_wgpu_lib, iters_equal +from pytest import mark, raises + + +@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") +def test_do_a_copy_roundtrip(): + # Let's take some data, and copy it to buffer to texture to + # texture to buffer to buffer and back to CPU. + + device = wgpu.utils.get_default_device() + + nx, ny, nz = 128, 1, 1 + data1 = np.random.random(size=nx * ny * nz).astype(np.float32) + nbytes = data1.nbytes + bpp = nbytes // (nx * ny * nz) + texture_format = wgpu.TextureFormat.r32float + texture_dim = wgpu.TextureDimension.d1 + + # Create buffers and textures + stubusage = wgpu.TextureUsage.STORAGE_BINDING + buf1 = device.create_buffer( + size=nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.COPY_SRC + ) + tex2 = device.create_texture( + size=(nx, ny, nz), + dimension=texture_dim, + format=texture_format, + usage=wgpu.TextureUsage.COPY_SRC | wgpu.TextureUsage.COPY_DST | stubusage, + ) + tex3 = device.create_texture( + size=(nx, ny, nz), + dimension=texture_dim, + format=texture_format, + usage=wgpu.TextureUsage.COPY_SRC | wgpu.TextureUsage.COPY_DST | stubusage, + ) + buf4 = device.create_buffer( + size=nbytes, usage=wgpu.BufferUsage.COPY_SRC | wgpu.BufferUsage.COPY_DST + ) + buf5 = device.create_buffer( + size=nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.COPY_SRC + ) + + # Check texture stats + assert tex2.size == (nx, ny, nz) + assert tex2.mip_level_count == 1 + assert tex2.sample_count == 1 + assert tex2.dimension == wgpu.TextureDimension.d1 + assert tex2.format == texture_format + assert tex2.usage & wgpu.TextureUsage.COPY_SRC + assert tex2.usage & wgpu.TextureUsage.COPY_DST + assert tex2.create_view().texture is tex2 + + # Upload from CPU to buffer + # assert buf1.state == "unmapped" + # mapped_data = buf1.map(wgpu.MapMode.WRITE) + # assert buf1.state == "mapped" + # mapped_data.cast("f")[:] = data1 + # buf1.unmap() + # assert buf1.state == "unmapped" + device.queue.write_buffer(buf1, 0, data1) + + # Copy from buffer to texture + command_encoder = device.create_command_encoder() + command_encoder.copy_buffer_to_texture( + {"buffer": buf1, "offset": 0, "bytes_per_row": bpp * nx, "rows_per_image": ny}, + {"texture": tex2, "mip_level": 0, "origin": (0, 0, 0)}, + (nx, ny, nz), + ) + device.queue.submit([command_encoder.finish()]) + # Copy from texture to texture + command_encoder = device.create_command_encoder() + command_encoder.copy_texture_to_texture( + {"texture": tex2, "mip_level": 0, "origin": (0, 0, 0)}, + {"texture": tex3, "mip_level": 0, "origin": (0, 0, 0)}, + (nx, ny, nz), + ) + device.queue.submit([command_encoder.finish()]) + # Copy from texture to buffer + command_encoder = device.create_command_encoder() + command_encoder.copy_texture_to_buffer( + {"texture": tex3, "mip_level": 0, "origin": (0, 0, 0)}, + {"buffer": buf4, "offset": 0, "bytes_per_row": bpp * nx, "rows_per_image": ny}, + (nx, ny, nz), + ) + device.queue.submit([command_encoder.finish()]) + # Copy from buffer to buffer + command_encoder = device.create_command_encoder() + command_encoder.copy_buffer_to_buffer(buf4, 0, buf5, 0, nbytes) + device.queue.submit([command_encoder.finish()]) + + # Download from buffer to CPU + # assert buf5.state == "unmapped" + # assert buf5.map_mode == 0 + # result_data = buf5.map(wgpu.MapMode.READ) # a memoryview + # assert buf5.state == "mapped" + # assert buf5.map_mode == wgpu.MapMode.READ + # buf5.unmap() + # assert buf5.state == "unmapped" + result_data = device.queue.read_buffer(buf5) + + # CHECK! + data2 = np.frombuffer(result_data, dtype=np.float32) + assert np.all(data1 == data2) + + # Do another round-trip, but now using a single pass + data3 = data1 + 1 + assert np.all(data1 != data3) + + # Upload from CPU to buffer + # assert buf1.state == "unmapped" + # assert buf1.map_mode == 0 + # mapped_data = buf1.map(wgpu.MapMode.WRITE) + # assert buf1.state == "mapped" + # assert buf1.map_mode == wgpu.MapMode.WRITE + # mapped_data.cast("f")[:] = data3 + # buf1.unmap() + # assert buf1.state == "unmapped" + # assert buf1.map_mode == 0 + device.queue.write_buffer(buf1, 0, data3) + + # Copy from buffer to texture + command_encoder = device.create_command_encoder() + command_encoder.copy_buffer_to_texture( + {"buffer": buf1, "offset": 0, "bytes_per_row": bpp * nx, "rows_per_image": ny}, + {"texture": tex2, "mip_level": 0, "origin": (0, 0, 0)}, + (nx, ny, nz), + ) + # Copy from texture to texture + command_encoder.copy_texture_to_texture( + {"texture": tex2, "mip_level": 0, "origin": (0, 0, 0)}, + {"texture": tex3, "mip_level": 0, "origin": (0, 0, 0)}, + (nx, ny, nz), + ) + # Copy from texture to buffer + command_encoder.copy_texture_to_buffer( + {"texture": tex3, "mip_level": 0, "origin": (0, 0, 0)}, + {"buffer": buf4, "offset": 0, "bytes_per_row": bpp * nx, "rows_per_image": ny}, + (nx, ny, nz), + ) + + # Copy from buffer to buffer + command_encoder.copy_buffer_to_buffer(buf4, 0, buf5, 0, nbytes) + device.queue.submit([command_encoder.finish()]) + + # Download from buffer to CPU + # assert buf5.state == "unmapped" + # result_data = buf5.map(wgpu.MapMode.READ) # always an uint8 array + # assert buf5.state == "mapped" + # buf5.unmap() + # assert buf5.state == "unmapped" + result_data = device.queue.read_buffer(buf5) + + # CHECK! + data4 = np.frombuffer(result_data, dtype=np.float32) + assert np.all(data3 == data4) + + +@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") +def test_write_texture1(): + device = wgpu.utils.get_default_device() + + nx, ny, nz = 128, 1, 1 + data1 = memoryview(np.random.random(size=nx).astype(np.float32)) + bpp = data1.nbytes // (nx * ny * nz) + texture_format = wgpu.TextureFormat.r32float + texture_dim = wgpu.TextureDimension.d1 + + # Create buffers and textures + tex3 = device.create_texture( + size=(nx, ny, nz), + dimension=texture_dim, + format=texture_format, + usage=wgpu.TextureUsage.COPY_SRC | wgpu.TextureUsage.COPY_DST, + ) + buf4 = device.create_buffer( + size=data1.nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.COPY_SRC + ) + + # Upload from CPU to texture + command_encoder = device.create_command_encoder() + device.queue.write_texture( + {"texture": tex3}, + data1, + {"bytes_per_row": bpp * nx, "rows_per_image": ny}, + (nx, ny, nz), + ) + # device.queue.submit([]) -> call further down + + # Copy from texture to buffer + command_encoder.copy_texture_to_buffer( + {"texture": tex3, "mip_level": 0, "origin": (0, 0, 0)}, + {"buffer": buf4, "offset": 0, "bytes_per_row": bpp * nx, "rows_per_image": ny}, + (nx, ny, nz), + ) + device.queue.submit([command_encoder.finish()]) + + # Download from buffer to CPU + data2 = device.queue.read_buffer(buf4).cast("f") + assert data1 == data2 + + # That last step can also be done easier + data3 = device.queue.read_texture( + { + "texture": tex3, + }, + {"bytes_per_row": bpp * nx}, + (nx, ny, nz), + ).cast("f") + assert data1 == data3 + + +@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") +def test_write_texture2(): + device = wgpu.utils.get_default_device() + + nx, ny, nz = 100, 1, 1 + data0 = (ctypes.c_float * nx)(*[random.random() for i in range(nx * ny * nz)]) + data1 = (ctypes.c_float * nx)() + nbytes = ctypes.sizeof(data1) + bpp = nbytes // (nx * ny * nz) + texture_format = wgpu.TextureFormat.r32float + texture_dim = wgpu.TextureDimension.d1 + + # Create buffers and textures + tex3 = device.create_texture( + size=(nx, ny, nz), + dimension=texture_dim, + format=texture_format, + usage=wgpu.TextureUsage.COPY_SRC | wgpu.TextureUsage.COPY_DST, + ) + buf4 = device.create_buffer( + size=nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.COPY_SRC + ) + + for i in range(len(data1)): + data1[i] = data0[i] + + # Upload from CPU to texture + command_encoder = device.create_command_encoder() + device.queue.write_texture( + {"texture": tex3}, + data1, + {"bytes_per_row": bpp * nx, "rows_per_image": ny}, + (nx, ny, nz), + ) + # device.queue.submit([]) -> call further down + + # Invalidate the data now, to show that write_texture has made a copy + for i in range(len(data1)): + data1[i] = 1 + + # Copy from texture to buffer - + # FAIL! because bytes_per_row is not multiple of 256! + with raises(ValueError): + command_encoder.copy_texture_to_buffer( + {"texture": tex3, "mip_level": 0, "origin": (0, 0, 0)}, + { + "buffer": buf4, + "offset": 0, + "bytes_per_row": bpp * nx, + "rows_per_image": ny, + }, + (nx, ny, nz), + ) + + # Download from texture to CPU (via a temp buffer) + # No requirent on bytes_per_row! + data2 = device.queue.read_texture( + {"texture": tex3}, + {"bytes_per_row": bpp * nx}, + (nx, ny, nz), + ) + data2 = data1.__class__.from_buffer(data2) + + assert iters_equal(data0, data2) + + +if __name__ == "__main__": + run_tests(globals()) diff --git a/tests/testutils.py b/tests/testutils.py new file mode 100644 index 0000000..567dade --- /dev/null +++ b/tests/testutils.py @@ -0,0 +1,138 @@ +import os +import re +import sys +import logging +import subprocess +from io import StringIO +from pathlib import Path + +from wgpu.utils import get_default_device # noqa + + +ROOT = Path(__file__).parent.parent # repo root +examples_dir = ROOT / "examples" +screenshots_dir = examples_dir / "screenshots" +diffs_dir = screenshots_dir / "diffs" + + +class LogCaptureHandler(logging.StreamHandler): + _ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m") + + def __init__(self): + super().__init__(StringIO()) + self.records = [] + + def emit(self, record): + record.msg = self._ANSI_ESCAPE_SEQ.sub("", record.msg) + self.records.append(record) + super().emit(record) + + def reset(self): + self.records = [] + self.stream = StringIO() + + @property + def text(self): + f = logging.Formatter() + return "\n".join(f.format(r) for r in self.records) + + +def run_tests(scope): + """Run all test functions in the given scope.""" + caplog = LogCaptureHandler() + for func in list(scope.values()): + if callable(func) and func.__name__.startswith("test_"): + nargs = func.__code__.co_argcount + argnames = [func.__code__.co_varnames[i] for i in range(nargs)] + if not argnames: + print(f"Running {func.__name__} ...") + func() + elif argnames == ["caplog"]: + print(f"Running {func.__name__} ...") + logging.root.addHandler(caplog) + caplog.reset() + func(caplog) + logging.root.removeHandler(caplog) + else: + print(f"SKIPPING {func.__name__} because it needs args") + print("Done") + + +def iters_equal(iter1, iter2): + iter1, iter2 = list(iter1), list(iter2) + if len(iter1) == len(iter2): + if all(iter1[i] == iter2[i] for i in range(len(iter1))): + return True + return False + + +def _determine_can_use_wgpu_lib(): + # For some reason, since wgpu-native 5c304b5ea1b933574edb52d5de2d49ea04a053db + # the process' exit code is not zero, so we test more pragmatically. + code = "import wgpu.utils; wgpu.utils.get_default_device(); print('ok')" + result = subprocess.run( + [sys.executable, "-c", code], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + ) + print("_determine_can_use_wgpu_lib() status code:", result.returncode) + return ( + result.stdout.strip().endswith("ok") + and "traceback" not in result.stderr.lower() + ) + + +def _determine_can_use_glfw(): + code = "import glfw;exit(0) if glfw.init() else exit(1)" + try: + subprocess.check_output([sys.executable, "-c", code]) + except Exception: + return False + else: + return True + + +def get_wgpu_backend(): + """ + Query the configured wgpu backend driver. + """ + code = "import wgpu.utils; info = wgpu.utils.get_default_device().adapter.request_adapter_info(); print(info['adapter_type'], info['backend_type'])" + result = subprocess.run( + [ + sys.executable, + "-c", + code, + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + cwd=ROOT, + ) + out = result.stdout.strip() + err = result.stderr.strip() + return err if "traceback" in err.lower() else out + + +def find_examples(query=None, negative_query=None, return_stems=False): + result = [] + for example_path in examples_dir.glob("*.py"): + example_code = example_path.read_text() + query_match = query is None or query in example_code + negative_query_match = ( + negative_query is None or negative_query not in example_code + ) + if query_match and negative_query_match: + result.append(example_path) + result = list(sorted(result)) + if return_stems: + result = [r.stem for r in result] + return result + + +can_use_wgpu_lib = _determine_can_use_wgpu_lib() +can_use_glfw = _determine_can_use_glfw() +is_ci = bool(os.getenv("CI", None)) +is_pypy = sys.implementation.name == "pypy" +wgpu_backend = get_wgpu_backend() +is_lavapipe = wgpu_backend.lower() == "cpu vulkan" diff --git a/tests_mem/test_gui_glfw.py b/tests_mem/test_gui_glfw.py new file mode 100644 index 0000000..5f4f9b6 --- /dev/null +++ b/tests_mem/test_gui_glfw.py @@ -0,0 +1,64 @@ +""" +Test creation of GLFW canvas windows. +""" + +import gc +import weakref +import asyncio + +import pytest +import testutils # noqa +from testutils import create_and_release, can_use_glfw, can_use_wgpu_lib +from test_gui_offscreen import make_draw_func_for_canvas + + +if not can_use_wgpu_lib: + pytest.skip("Skipping tests that need wgpu lib", allow_module_level=True) +if not can_use_glfw: + pytest.skip("Need glfw for this test", allow_module_level=True) + +loop = asyncio.get_event_loop_policy().get_event_loop() +if loop.is_running(): + pytest.skip("Asyncio loop is running", allow_module_level=True) + + +async def stub_event_loop(): + pass + + +@create_and_release +def test_release_canvas_context(n): + # Test with GLFW canvases. + + # Note: in a draw, the textureview is obtained (thus creating a + # Texture and a TextureView, but these are released in present(), + # so we don't see them in the counts. + + from wgpu.gui.glfw import WgpuCanvas # noqa + + yield {} + + canvases = weakref.WeakSet() + + for i in range(n): + c = WgpuCanvas() + canvases.add(c) + c.request_draw(make_draw_func_for_canvas(c)) + loop.run_until_complete(stub_event_loop()) + yield c.get_context() + + # Need some shakes to get all canvas refs gone. + del c + loop.run_until_complete(stub_event_loop()) + gc.collect() + loop.run_until_complete(stub_event_loop()) + gc.collect() + + # Check that the canvas objects are really deleted + assert not canvases, f"Still {len(canvases)} canvases" + + +if __name__ == "__main__": + # testutils.TEST_ITERS = 40 # Uncomment for a mem-usage test run + + test_release_canvas_context() diff --git a/tests_mem/test_gui_offscreen.py b/tests_mem/test_gui_offscreen.py new file mode 100644 index 0000000..0dd2e38 --- /dev/null +++ b/tests_mem/test_gui_offscreen.py @@ -0,0 +1,90 @@ +""" +Test creation of offscreen canvas windows. +""" + +import gc +import weakref + +import wgpu +import pytest +import testutils # noqa +from testutils import can_use_wgpu_lib, create_and_release, is_pypy + + +if not can_use_wgpu_lib: + pytest.skip("Skipping tests that need wgpu lib", allow_module_level=True) + + +DEVICE = wgpu.utils.get_default_device() + + +def make_draw_func_for_canvas(canvas): + """Create a draw function for the given canvas, + so that we can really present something to a canvas being tested. + """ + ctx = canvas.get_context() + ctx.configure(device=DEVICE, format="bgra8unorm-srgb") + + def draw(): + ctx = canvas.get_context() + command_encoder = DEVICE.create_command_encoder() + current_texture_view = ctx.get_current_texture().create_view() + render_pass = command_encoder.begin_render_pass( + color_attachments=[ + { + "view": current_texture_view, + "resolve_target": None, + "clear_value": (1, 1, 1, 1), + "load_op": wgpu.LoadOp.clear, + "store_op": wgpu.StoreOp.store, + } + ], + ) + render_pass.end() + DEVICE.queue.submit([command_encoder.finish()]) + + return draw + + +@create_and_release +def test_release_canvas_context(n): + # Test with offscreen canvases. A context is created, but not a wgpu-native surface. + + # Note: the offscreen canvas keeps the render-texture alive, since it + # is used to e.g. download the resulting image, and who knows how the + # user want to use the result. The context does drop its ref to the + # textures, which is why we don't see textures in the measurements. + + from wgpu.gui.offscreen import WgpuCanvas + + yield { + "expected_counts_after_create": { + "CanvasContext": (n, 0), + }, + } + + canvases = weakref.WeakSet() + for i in range(n): + c = WgpuCanvas() + canvases.add(c) + c.request_draw(make_draw_func_for_canvas(c)) + c.draw() + yield c.get_context() + + del c + gc.collect() + if is_pypy: + gc.collect() # Need a bit more on pypy :) + gc.collect() + + # Check that the canvas objects are really deleted + assert not canvases + + +TEST_FUNCS = [test_release_canvas_context] + + +if __name__ == "__main__": + # testutils.TEST_ITERS = 40 # Uncomment for a mem-usage test run + + test_release_canvas_context() diff --git a/tests_mem/test_gui_qt.py b/tests_mem/test_gui_qt.py new file mode 100644 index 0000000..2804b4c --- /dev/null +++ b/tests_mem/test_gui_qt.py @@ -0,0 +1,58 @@ +""" +Test creation of Qt canvas windows. +""" + +import gc +import weakref + +import pytest +import testutils # noqa +from testutils import create_and_release, can_use_pyside6, can_use_wgpu_lib +from test_gui_offscreen import make_draw_func_for_canvas + + +if not can_use_wgpu_lib: + pytest.skip("Skipping tests that need wgpu lib", allow_module_level=True) +if not can_use_pyside6: + pytest.skip("Need pyside6 for this test", allow_module_level=True) + + +@create_and_release +def test_release_canvas_context(n): + # Test with PySide canvases. + + # Note: in a draw, the textureview is obtained (thus creating a + # Texture and a TextureView, but these are released in present(), + # so we don't see them in the counts. + + import PySide6 # noqa + from wgpu.gui.qt import WgpuCanvas # noqa + + app = PySide6.QtWidgets.QApplication.instance() + if app is None: + app = PySide6.QtWidgets.QApplication([""]) + + yield {} + + canvases = weakref.WeakSet() + + for i in range(n): + c = WgpuCanvas() + canvases.add(c) + c.request_draw(make_draw_func_for_canvas(c)) + app.processEvents() + yield c.get_context() + + # Need some shakes to get all canvas refs gone. + del c + gc.collect() + app.processEvents() + + # Check that the canvas objects are really deleted + assert not canvases + + +if __name__ == "__main__": + # testutils.TEST_ITERS = 40 # Uncomment for a mem-usage test run + + test_release_canvas_context() diff --git a/tests_mem/test_meta.py b/tests_mem/test_meta.py new file mode 100644 index 0000000..9271397 --- /dev/null +++ b/tests_mem/test_meta.py @@ -0,0 +1,81 @@ +""" +Some tests to confirm that the test mechanism is sound, and that tests +indeed fail under the right circumstances. +""" + +import wgpu + +import pytest +from testutils import can_use_wgpu_lib, create_and_release +from testutils import get_counts, ob_name_from_test_func +from test_objects import TEST_FUNCS as OBJECT_TEST_FUNCS +from test_gui_offscreen import TEST_FUNCS as GUI_TEST_FUNCS + + +ALL_TEST_FUNCS = OBJECT_TEST_FUNCS + GUI_TEST_FUNCS + + +if not can_use_wgpu_lib: + pytest.skip("Skipping tests that need wgpu lib", allow_module_level=True) + + +DEVICE = wgpu.utils.get_default_device() + + +def test_meta_all_objects_covered(): + """Test that we have a test_release test function for each known object.""" + + ref_obnames = set(key for key in get_counts().keys()) + func_obnames = set(ob_name_from_test_func(func) for func in ALL_TEST_FUNCS) + + missing = ref_obnames - func_obnames + extra = func_obnames - ref_obnames + assert not missing + assert not extra + + +def test_meta_all_functions_solid(): + """Test that all funcs starting with "test_release_" are decorated appropriately.""" + for func in ALL_TEST_FUNCS: + is_decorated = func.__code__.co_name == "core_test_func" + assert is_decorated, func.__name__ + " not decorated" + + +def test_meta_buffers_1(): + """Making sure that the test indeed fails, when holding onto the objects.""" + + lock = [] + + @create_and_release + def test_release_buffer(n): + yield {} + for i in range(n): + b = DEVICE.create_buffer(size=128, usage=wgpu.BufferUsage.COPY_DST) + lock.append(b) + yield b + + with pytest.raises(AssertionError): + test_release_buffer() + + +def test_meta_buffers_2(): + """Making sure that the test indeed fails, by disabling the release call.""" + + ori = wgpu.backends.wgpu_native.GPUBuffer._destroy + wgpu.backends.wgpu_native.GPUBuffer._destroy = lambda self: None + + from test_objects import test_release_buffer # noqa + + try: + with pytest.raises(AssertionError): + test_release_buffer() + + finally: + wgpu.backends.wgpu_native.GPUBuffer._destroy = ori + + +if __name__ == "__main__": + test_meta_all_objects_covered() + test_meta_all_functions_solid() + test_meta_buffers_1() + test_meta_buffers_2() diff --git a/tests_mem/test_objects.py b/tests_mem/test_objects.py new file mode 100644 index 0000000..f044e3f --- /dev/null +++ b/tests_mem/test_objects.py @@ -0,0 +1,377 @@ +""" +Test all the wgpu objects. +""" + +import pytest +import testutils # noqa +from testutils import can_use_wgpu_lib, create_and_release + + +if not can_use_wgpu_lib: + pytest.skip("Skipping tests that need wgpu lib", allow_module_level=True) + + +import wgpu + +DEVICE = wgpu.utils.get_default_device() + + +@create_and_release +def test_release_adapter(n): + yield {} + for i in range(n): + yield wgpu.gpu.request_adapter(power_preference="high-performance") + + +@create_and_release +def test_release_device(n): + pytest.skip("XFAIL") + # todo: XFAIL: Device object seem not to be cleaned up at wgpu-native. + + # Note: the WebGPU spec says: + # [request_device()] is a one-time action: if a device is returned successfully, the adapter becomes invalid. + + yield { + "expected_counts_after_create": {"Device": (n, n), "Queue": (n, 0)}, + } + adapter = DEVICE.adapter + for i in range(n): + d = adapter.request_device() + # d.queue._destroy() + # d._queue = None + yield d + + +@create_and_release +def test_release_bind_group(n): + buffer1 = DEVICE.create_buffer(size=128, usage=wgpu.BufferUsage.STORAGE) + + binding_layouts = [ + { + "binding": 0, + "visibility": wgpu.ShaderStage.COMPUTE, + "buffer": { + "type": wgpu.BufferBindingType.read_only_storage, + }, + }, + ] + + bindings = [ + { + "binding": 0, + "resource": {"buffer": buffer1, "offset": 0, "size": buffer1.size}, + }, + ] + + bind_group_layout = DEVICE.create_bind_group_layout(entries=binding_layouts) + + yield {} + + for i in range(n): + yield DEVICE.create_bind_group(layout=bind_group_layout, entries=bindings) + + +_bind_group_layout_binding = 10 + + +@create_and_release +def test_release_bind_group_layout(n): + # Note: when we use the same binding layout descriptor, wgpu-native + # re-uses the BindGroupLayout object. + + global _bind_group_layout_binding + _bind_group_layout_binding += 1 + + yield { + "expected_counts_after_create": {"BindGroupLayout": (n, 1)}, + } + + binding_layouts = [ + { + "binding": _bind_group_layout_binding, + "visibility": wgpu.ShaderStage.COMPUTE, + "buffer": { + "type": wgpu.BufferBindingType.read_only_storage, + }, + }, + ] + + for i in range(n): + # binding_layouts[0]["binding"] = i # force unique objects + yield DEVICE.create_bind_group_layout(entries=binding_layouts) + + +@create_and_release +def test_release_buffer(n): + yield {} + for i in range(n): + yield DEVICE.create_buffer(size=128, usage=wgpu.BufferUsage.COPY_DST) + + +@create_and_release +def test_release_command_buffer(n): + # Note: a command encoder can only be used once (it gets destroyed on finish()) + yield { + "expected_counts_after_create": { + "CommandEncoder": (n, 0), + "CommandBuffer": (n, n), + }, + } + + for i in range(n): + command_encoder = DEVICE.create_command_encoder() + yield command_encoder.finish() + + +@create_and_release +def test_release_command_encoder(n): + # Note: a CommandEncoder does not exist in wgpu-core, but we do + # observe its internal CommandBuffer. + yield { + "expected_counts_after_create": { + "CommandEncoder": (n, 0), + "CommandBuffer": (0, n), + }, + } + + for i in range(n): + yield DEVICE.create_command_encoder() + + +@create_and_release +def test_release_compute_pass_encoder(n): + # Note: ComputePassEncoder does not really exist in wgpu-core + # -> Check gpu.diagnostics.wgpu_native_counts.print_report(), nothing there that ends with "Encoder". + command_encoder = DEVICE.create_command_encoder() + + yield { + "expected_counts_after_create": { + "ComputePassEncoder": (n, 0), + }, + } + + for i in range(n): + yield command_encoder.begin_compute_pass() + + +@create_and_release +def test_release_compute_pipeline(n): + code = """ + @compute + @workgroup_size(1) + fn main(@builtin(global_invocation_id) index: vec3) { + let i: u32 = index.x; + } + """ + shader = DEVICE.create_shader_module(code=code) + + binding_layouts = [] + pipeline_layout = DEVICE.create_pipeline_layout(bind_group_layouts=binding_layouts) + + yield {} + + for i in range(n): + yield DEVICE.create_compute_pipeline( + layout=pipeline_layout, + compute={"module": shader, "entry_point": "main"}, + ) + + +@create_and_release +def test_release_pipeline_layout(n): + yield {} + for i in range(n): + yield DEVICE.create_pipeline_layout(bind_group_layouts=[]) + + +@create_and_release +def test_release_query_set(n): + yield {} + for i in range(n): + yield DEVICE.create_query_set(type=wgpu.QueryType.occlusion, count=2) + + +@create_and_release +def test_release_queue(n): + pytest.skip("XFAIL") + # todo: XFAIL: the device and queue are kinda one, and the former won't release at wgpu-native. + yield {} + adapter = DEVICE.adapter + for i in range(n): + d = adapter.request_device() + q = d.queue + d._queue = None # detach + yield q + + +@create_and_release +def test_release_render_bundle(n): + # todo: implement this when we do support them + pytest.skip("Render bundle not implemented") + + +@create_and_release +def test_release_render_bundle_encoder(n): + pytest.skip("Render bundle not implemented") + + +@create_and_release +def test_release_render_pass_encoder(n): + # Note: RenderPassEncoder does not really exist in wgpu-core + # -> Check gpu.diagnostics.wgpu_native_counts.print_report(), nothing there that ends with "Encoder". + command_encoder = DEVICE.create_command_encoder() + + yield { + "expected_counts_after_create": { + "RenderPassEncoder": (n, 0), + }, + } + + for i in range(n): + yield command_encoder.begin_render_pass(color_attachments=[]) + + +@create_and_release +def test_release_render_pipeline(n): + code = """ + struct VertexInput { + @builtin(vertex_index) vertex_index : u32, + }; + struct VertexOutput { + @location(0) color : vec4, + @builtin(position) pos: vec4, + }; + + @vertex + fn vs_main(in: VertexInput) -> VertexOutput { + var positions = array, 3>( + vec2(0.0, -0.5), + vec2(0.5, 0.5), + vec2(-0.5, 0.75), + ); + var colors = array, 3>( // srgb colors + vec3(1.0, 1.0, 0.0), + vec3(1.0, 0.0, 1.0), + vec3(0.0, 1.0, 1.0), + ); + let index = i32(in.vertex_index); + var out: VertexOutput; + out.pos = vec4(positions[index], 0.0, 1.0); + out.color = vec4(colors[index], 1.0); + return out; + } + + @fragment + fn fs_main(in: VertexOutput) -> @location(0) vec4 { + let physical_color = pow(in.color.rgb, vec3(2.2)); // gamma correct + return vec4(physical_color, in.color.a); + } + """ + shader = DEVICE.create_shader_module(code=code) + + binding_layouts = [] + pipeline_layout = DEVICE.create_pipeline_layout(bind_group_layouts=binding_layouts) + + yield {} + + for i in range(n): + yield DEVICE.create_render_pipeline( + layout=pipeline_layout, + vertex={ + "module": shader, + "entry_point": "vs_main", + "buffers": [], + }, + primitive={ + "topology": wgpu.PrimitiveTopology.triangle_list, + "front_face": wgpu.FrontFace.ccw, + "cull_mode": wgpu.CullMode.none, + }, + depth_stencil=None, + multisample=None, + fragment={ + "module": shader, + "entry_point": "fs_main", + "targets": [ + { + "format": "bgra8unorm-srgb", + "blend": { + "color": ( + wgpu.BlendFactor.one, + wgpu.BlendFactor.zero, + wgpu.BlendOperation.add, + ), + "alpha": ( + wgpu.BlendFactor.one, + wgpu.BlendFactor.zero, + wgpu.BlendOperation.add, + ), + }, + }, + ], + }, + ) + + +@create_and_release +def test_release_sampler(n): + yield {} + for i in range(n): + yield DEVICE.create_sampler() + + +@create_and_release +def test_release_shader_module(n): + yield {} + + code = """ + @fragment + fn fs_main() -> @location(0) vec4 { + return vec4(1.0, 0.0, 0.0, 1.0); + } + """ + + for i in range(n): + yield DEVICE.create_shader_module(code=code) + + +@create_and_release +def test_release_texture(n): + yield {} + for i in range(n): + yield DEVICE.create_texture( + size=(16, 16, 16), + usage=wgpu.TextureUsage.TEXTURE_BINDING, + format="rgba8unorm", + ) + + +@create_and_release +def test_release_texture_view(n): + texture = DEVICE.create_texture( + size=(16, 16, 16), usage=wgpu.TextureUsage.TEXTURE_BINDING, format="rgba8unorm" + ) + yield {} + for i in range(n): + yield texture.create_view() + + +# %% The end + + +TEST_FUNCS = [ + ob + for name, ob in list(globals().items()) + if name.startswith("test_") and callable(ob) +] + +if __name__ == "__main__": + # testutils.TEST_ITERS = 40 # Uncomment for a mem-usage test run + + for func in TEST_FUNCS: + print(func.__name__ + " ...") + try: + func() + except pytest.skip.Exception: + print(" skipped") + print("done") diff --git a/tests_mem/testutils.py b/tests_mem/testutils.py new file mode 100644 index 0000000..cd1cce0 --- /dev/null +++ b/tests_mem/testutils.py @@ -0,0 +1,230 @@ +import gc +import os +import sys +import time +import subprocess + +import psutil +import wgpu +from wgpu._diagnostics import int_repr + + +p = psutil.Process() + + +def _determine_can_use_wgpu_lib(): + # For some reason, since wgpu-native 5c304b5ea1b933574edb52d5de2d49ea04a053db + # the process' exit code is not zero, so we test more pragmatically. + code = "import wgpu.utils; wgpu.utils.get_default_device(); print('ok')" + result = subprocess.run( + [ + sys.executable, + "-c", + code, + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + ) + print("_determine_can_use_wgpu_lib() status code:", result.returncode) + return ( + result.stdout.strip().endswith("ok") + and "traceback" not in result.stderr.lower() + ) + + +def _determine_can_use_glfw(): + code = "import glfw;exit(0) if glfw.init() else exit(1)" + try: + subprocess.check_output([sys.executable, "-c", code]) + except Exception: + return False + else: + return True + + +def _determine_can_use_pyside6(): + code = "import PySide6.QtGui" + try: + subprocess.check_output([sys.executable, "-c", code]) + except Exception: + return False + else: + return True + + +can_use_wgpu_lib = _determine_can_use_wgpu_lib() +can_use_glfw = _determine_can_use_glfw() +can_use_pyside6 = _determine_can_use_pyside6() +is_ci = bool(os.getenv("CI", None)) +is_pypy = sys.implementation.name == "pypy" + +TEST_ITERS = None + + +def get_memory_usage(): + """Get how much memory the process consumes right now.""" + # vms: total virtual memory. Seems not suitable, because it gets less but bigger differences. + # rss: the part of the virtual memory that is not in swap, i.e. consumers ram. + # uss: memory that would become available when the process is killed (excludes shared). + # return p.memory_info().rss + return p.memory_full_info().uss + + +def clear_mem(): + time.sleep(0.001) + gc.collect() + + time.sleep(0.001) + gc.collect() + + if is_pypy: + gc.collect() + + device = wgpu.utils.get_default_device() + device._poll() + + +def get_counts(): + """Get a dict that maps object names to a 2-tuple represening + the counts in py and wgpu-native. + """ + counts_py = wgpu.diagnostics.object_counts.get_dict() + counts_native = wgpu.diagnostics.wgpu_native_counts.get_dict() + + all_keys = set(counts_py) | set(counts_native) + + default = {"count": -1} + + counts = {} + for key in sorted(all_keys): + counts[key] = ( + counts_py.get(key, default)["count"], + counts_native.get(key, default)["count"], + ) + counts.pop("total") + + return counts + + +def get_excess_counts(counts1, counts2): + """Compare two counts dicts, and return a new dict with the fields + that have increased counts. + """ + more = {} + for name in counts1: + c1 = counts1[name][0] + c2 = counts2[name][0] + more_py = 0 + if c2 > c1: + more_py = c2 - c1 + c1 = counts1[name][1] + c2 = counts2[name][1] + more_native = 0 + if c2 > c1: + more_native = c2 - c1 + if more_py or more_native: + more[name] = more_py, more_native + return more + + +def ob_name_from_test_func(func): + """Translate test_release_bind_group() to "BindGroup".""" + func_name = func.__name__ + prefix = "test_release_" + assert func_name.startswith(prefix) + words = func_name[len(prefix) :].split("_") + if words[-1].isnumeric(): + words.pop(-1) + return "".join(word.capitalize() for word in words) + + +def create_and_release(create_objects_func): + """Decorator.""" + + def core_test_func(): + """The core function that does the testing.""" + + if TEST_ITERS: + n_objects_list = [8 for i in range(TEST_ITERS)] + else: + n_objects_list = [32, 17] + + # Init mem usage measurements + clear_mem() + mem3 = get_memory_usage() + + for iter, n_objects in enumerate(n_objects_list): + generator = create_objects_func(n_objects) + ob_name = ob_name_from_test_func(create_objects_func) + + # ----- Collect options + + options = { + "expected_counts_after_create": {ob_name: (n_objects, n_objects)}, + "expected_counts_after_release": {}, + } + + func_options = next(generator) + assert isinstance(func_options, dict), "First yield must be an options dict" + options.update(func_options) + + # Measure baseline object counts + clear_mem() + counts1 = get_counts() + + # ----- Create + + # Create objects + objects = list(generator) + + # Test the count + assert len(objects) == n_objects + + # Test that all objects are of the same class. + # (this for-loop is a bit weird, but its to avoid leaking refs to objects) + cls = objects[0].__class__ + assert all(isinstance(objects[i], cls) for i in range(len(objects))) + + # Test that class matches function name (should prevent a group of copy-paste errors) + assert ob_name == cls.__name__[3:] + + # Give wgpu some slack to clean up temporary resources + wgpu.utils.get_default_device()._poll() + + # Measure peak object counts + counts2 = get_counts() + more2 = get_excess_counts(counts1, counts2) + if not TEST_ITERS: + print(" more after create:", more2) + + # Make sure the actual object has increased + assert more2 # not empty + assert more2 == options["expected_counts_after_create"] + + # It's ok if other objects are created too ... + + # ----- Release + + # Delete objects + del objects + clear_mem() + + # Measure after-release object counts + counts3 = get_counts() + more3 = get_excess_counts(counts1, counts3) + if not TEST_ITERS: + print(" more after release:", more3) + + # Check! + assert more3 == options["expected_counts_after_release"] + + # Print mem usage info + if TEST_ITERS: + mem1 = mem3 # initial mem is end-mem of last iter + mem3 = get_memory_usage() + mem_info = (int_repr(mem3 - mem1) + "B").rjust(7) + print(mem_info, end=(" " if (iter + 1) % 10 else "\n")) + + core_test_func.__name__ = create_objects_func.__name__ + return core_test_func diff --git a/wgpu/__init__.py b/wgpu/__init__.py new file mode 100644 index 0000000..f13d909 --- /dev/null +++ b/wgpu/__init__.py @@ -0,0 +1,29 @@ +""" +The wgpu library is a Python implementation of WebGPU. +""" + +from ._coreutils import logger # noqa: F401,F403 +from ._diagnostics import diagnostics # noqa: F401,F403 +from .flags import * # noqa: F401,F403 +from .enums import * # noqa: F401,F403 +from .classes import * # noqa: F401,F403 +from .gui import WgpuCanvasInterface # noqa: F401,F403 +from . import utils # noqa: F401,F403 +from . import backends # noqa: F401,F403 +from . import resources # noqa: F401,F403 + + +__version__ = "0.13.2" +version_info = tuple(map(int, __version__.split("."))) + + +# The API entrypoint, from wgpu.classes - gets replaced when a backend loads. +gpu = GPU() # noqa: F405 + + +# Temporary stub to help transitioning +def request_adapter(*args, **kwargs): + """Deprecated!""" + raise DeprecationWarning( + "wgpu.request_adapter() is deprecated! Use wgpu.gpu.request_adapter() instead." + ) diff --git a/wgpu/__pyinstaller/__init__.py b/wgpu/__pyinstaller/__init__.py new file mode 100644 index 0000000..c27432f --- /dev/null +++ b/wgpu/__pyinstaller/__init__.py @@ -0,0 +1,12 @@ +from os.path import dirname + + +HERE = dirname(__file__) + + +def get_hook_dirs(): + return [HERE] + + +def get_test_dirs(): + return [HERE] diff --git a/wgpu/__pyinstaller/conftest.py b/wgpu/__pyinstaller/conftest.py new file mode 100644 index 0000000..7f8b737 --- /dev/null +++ b/wgpu/__pyinstaller/conftest.py @@ -0,0 +1 @@ +from PyInstaller.utils.conftest import * # noqa diff --git a/wgpu/__pyinstaller/hook-wgpu.py b/wgpu/__pyinstaller/hook-wgpu.py new file mode 100644 index 0000000..70ba783 --- /dev/null +++ b/wgpu/__pyinstaller/hook-wgpu.py @@ -0,0 +1,28 @@ +from PyInstaller.utils.hooks import collect_data_files, collect_dynamic_libs + +# Init variables that PyInstaller will pick up. +hiddenimports = [] +datas = [] +binaries = [] + +# Include our resource data and binaries. +datas += collect_data_files("wgpu", subdir="resources") +binaries += collect_dynamic_libs("wgpu") + +# Always include the wgpu-native backend. Since an import is not needed to +# load this (default) backend, PyInstaller does not see it by itself. +hiddenimports += ["wgpu.backends.auto", "wgpu.backends.wgpu_native"] + +# For the GUI backends, there always is an import. The auto backend is +# problematic because PyInstaller cannot follow it to a specific +# backend. Also, glfw does not have a hook like this, so it does not +# include the binary when freezing. We can solve both problems with the +# code below. Makes the binaray a bit larger, but only marginally (less +# than 300kb). +try: + import glfw # noqa +except ImportError: + pass +else: + hiddenimports += ["wgpu.gui.glfw"] + binaries += collect_dynamic_libs("glfw") diff --git a/wgpu/__pyinstaller/test_wgpu.py b/wgpu/__pyinstaller/test_wgpu.py new file mode 100644 index 0000000..284c8df --- /dev/null +++ b/wgpu/__pyinstaller/test_wgpu.py @@ -0,0 +1,30 @@ +script = """ +# The script part +import sys +import wgpu +import importlib + +# The test part +if "is_test" in sys.argv: + included_modules = [ + "wgpu.backends.auto", + "wgpu.backends.wgpu_native", + "wgpu.gui.glfw", + ] + excluded_modules = [ + "PySide6", + "PyQt6", + ] + for module_name in included_modules: + importlib.import_module(module_name) + for module_name in excluded_modules: + try: + importlib.import_module(module_name) + except ModuleNotFoundError: + continue + raise RuntimeError(module_name + " is not supposed to be importable.") +""" + + +def test_pyi_wgpu(pyi_builder): + pyi_builder.test_source(script, app_args=["is_test"]) diff --git a/wgpu/_classes.py b/wgpu/_classes.py new file mode 100644 index 0000000..d5a5685 --- /dev/null +++ b/wgpu/_classes.py @@ -0,0 +1,2100 @@ +""" +The classes representing the wgpu API. This module defines the classes, +properties, methods and documentation. The majority of methods are +implemented in backend modules. + +This module is maintained using a combination of manual code and +automatically inserted code. Read the codegen/readme.md for more +information. +""" + +import weakref +import logging +from typing import List, Dict, Union + +from ._coreutils import ApiDiff +from ._diagnostics import diagnostics, texture_format_to_bpp +from . import flags, enums, structs + + +__all__ = [ + "GPUObjectBase", + "GPUAdapterInfo", + "GPU", + "GPUAdapter", + "GPUDevice", + "GPUBuffer", + "GPUTexture", + "GPUTextureView", + "GPUSampler", + "GPUBindGroupLayout", + "GPUBindGroup", + "GPUPipelineLayout", + "GPUShaderModule", + "GPUCompilationMessage", + "GPUCompilationInfo", + "GPUPipelineError", + "GPUPipelineBase", + "GPUComputePipeline", + "GPURenderPipeline", + "GPUCommandBuffer", + "GPUCommandsMixin", + "GPUCommandEncoder", + "GPUBindingCommandsMixin", + "GPUDebugCommandsMixin", + "GPUComputePassEncoder", + "GPURenderPassEncoder", + "GPURenderCommandsMixin", + "GPURenderBundle", + "GPURenderBundleEncoder", + "GPUQueue", + "GPUQuerySet", + "GPUCanvasContext", + "GPUDeviceLostInfo", + "GPUError", + "GPUValidationError", + "GPUOutOfMemoryError", + "GPUInternalError", +] + +logger = logging.getLogger("wgpu") + + +apidiff = ApiDiff() + + +# Obtain the object tracker. Note that we store a ref of +# the latter on all classes that refer to it. Otherwise, on a sys exit, +# the module attributes are None-ified, and the destructors would +# therefore fail and produce warnings. +object_tracker = diagnostics.object_counts.tracker + + +class GPU: + """The entrypoint to the wgpu API. + + The starting point of your wgpu-adventure is always to obtain an + adapter. This is the equivalent to browser's ``navigator.gpu``. + When a backend is loaded, the ``wgpu.gpu`` object is replaced with + a backend-specific implementation. + """ + + # IDL: Promise requestAdapter(optional GPURequestAdapterOptions options = {}); + @apidiff.change("arguments include a canvas object") + def request_adapter( + self, *, power_preference=None, force_fallback_adapter=False, canvas=None + ): + """Create a `GPUAdapter`, the object that represents an abstract wgpu + implementation, from which one can request a `GPUDevice`. + + Arguments: + power_preference (PowerPreference): "high-performance" or "low-power". + force_fallback_adapter (bool): whether to use a (probably CPU-based) + fallback adapter. + canvas (WgpuCanvasInterface): The canvas that the adapter should + be able to render to. This can typically be left to None. + """ + # If this method gets called, no backend has been loaded yet, let's do that now! + from .backends.auto import gpu # noqa + + return gpu.request_adapter( + power_preference=power_preference, + force_fallback_adapter=force_fallback_adapter, + canvas=canvas, + ) + + # IDL: Promise requestAdapter(optional GPURequestAdapterOptions options = {}); + @apidiff.change("arguments include a canvas object") + async def request_adapter_async( + self, *, power_preference=None, force_fallback_adapter=False, canvas=None + ): + """Async version of `request_adapter()`.""" + return self.request_adapter( + power_preference=power_preference, + force_fallback_adapter=force_fallback_adapter, + canvas=canvas, + ) + + # IDL: GPUTextureFormat getPreferredCanvasFormat(); + @apidiff.change("Disabled because we put it on the canvas context") + def get_preferred_canvas_format(self): + """Not implemented in wgpu-py; use `GPUCanvasContext.get_preferred_format()` instead. + The WebGPU spec defines this function, but in wgpu there are different + kinds of canvases which may each prefer/support a different format. + """ + raise RuntimeError("Use canvas.get_preferred_format() instead.") + + # IDL: [SameObject] readonly attribute WGSLLanguageFeatures wgslLanguageFeatures; + @property + def wgsl_language_features(self): + """A set of strings representing the WGSL language extensions supported by all adapters. + Returns an empty set for now.""" + # Looks like at the time of writing there are no definitions for extensions yet + return set() + + +# Instantiate API entrypoint +gpu = GPU() + + +class GPUCanvasContext: + """Represents a context to configure a canvas. + + Is also used to obtain the texture to render to. + + Can be obtained via `gui.WgpuCanvasInterface.get_context()`. + """ + + _ot = object_tracker + + def __init__(self, canvas): + self._ot.increase(self.__class__.__name__) + self._canvas_ref = weakref.ref(canvas) + + def _get_canvas(self): + """Getter method for internal use.""" + return self._canvas_ref() + + # IDL: readonly attribute (HTMLCanvasElement or OffscreenCanvas) canvas; + @property + def canvas(self): + """The associated canvas object.""" + return self._canvas_ref() + + # IDL: undefined configure(GPUCanvasConfiguration configuration); + def configure( + self, + *, + device: "GPUDevice", + format: "enums.TextureFormat", + usage: "flags.TextureUsage" = 0x10, + view_formats: "List[enums.TextureFormat]" = [], + color_space: str = "srgb", + alpha_mode: "enums.CanvasAlphaMode" = "opaque", + ): + """Configures the presentation context for the associated canvas. + Destroys any textures produced with a previous configuration. + This clears the drawing buffer to transparent black. + + Arguments: + device (WgpuDevice): The GPU device object to create compatible textures for. + format (enums.TextureFormat): The format that textures returned by + ``get_current_texture()`` will have. Must be one of the supported context + formats. An often used format is "bgra8unorm-srgb". + usage (flags.TextureUsage): Default ``TextureUsage.OUTPUT_ATTACHMENT``. + view_formats (List[enums.TextureFormat]): The formats that views created + from textures returned by ``get_current_texture()`` may use. + color_space (PredefinedColorSpace): The color space that values written + into textures returned by ``get_current_texture()`` should be displayed with. + Default "srgb". + alpha_mode (enums.CanvasAlphaMode): Determines the effect that alpha values + will have on the content of textures returned by ``get_current_texture()`` + when read, displayed, or used as an image source. Default "opaque". + """ + raise NotImplementedError() + + # IDL: undefined unconfigure(); + def unconfigure(self): + """Removes the presentation context configuration. + Destroys any textures produced while configured.""" + raise NotImplementedError() + + # IDL: GPUTexture getCurrentTexture(); + def get_current_texture(self): + """Get the `GPUTexture` that will be composited to the canvas next. + This method should be called exactly once during each draw event. + """ + raise NotImplementedError() + + @apidiff.add("Present method is exposed") + def present(self): + """Present what has been drawn to the current texture, by compositing it + to the canvas. Note that a canvas based on `gui.WgpuCanvasBase` will call this + method automatically at the end of each draw event. + """ + raise NotImplementedError() + + @apidiff.add("Better place to define the preferred format") + def get_preferred_format(self, adapter): + """Get the preferred surface texture format.""" + return "bgra8unorm-srgb" # seems to be a good default + + def __del__(self): + self._ot.decrease(self.__class__.__name__) + self._destroy() + + def _destroy(self): + pass + + +class GPUAdapterInfo: + """Represents information about an adapter.""" + + def __init__(self, info): + self._info + + # IDL: readonly attribute DOMString vendor; + @property + def vendor(self): + """The vendor that built this adaptor.""" + return self._info["vendor"] + + # IDL: readonly attribute DOMString architecture; + @property + def architecture(self): + """The adapters architecrure.""" + return self._info["architecture"] + + # IDL: readonly attribute DOMString device; + @property + def device(self): + """The kind of device that this adapter represents.""" + return self._info["device"] + + # IDL: readonly attribute DOMString description; + @property + def description(self): + """A textual description of the adapter.""" + return self._info["description"] + + +class GPUAdapter: + """Represents an abstract wgpu implementation. + + An adapter represents both an instance of a hardware accelerator + (e.g. GPU or CPU) and an implementation of WGPU on top of that + accelerator. + + The adapter is used to request a device object. The adapter object + enumerates its capabilities (features) and limits. + + If an adapter becomes unavailable, it becomes invalid. + Once invalid, it never becomes valid again. + """ + + _ot = object_tracker + + def __init__(self, internal, features, limits, adapter_info): + self._ot.increase(self.__class__.__name__) + self._internal = internal + + assert isinstance(features, set) + assert isinstance(limits, dict) + assert isinstance(adapter_info, dict) + + self._features = features + self._limits = limits + self._adapter_info = adapter_info + + # IDL: [SameObject] readonly attribute GPUSupportedFeatures features; + @property + def features(self): + """A set of feature names supported by the adapter.""" + return self._features + + # IDL: [SameObject] readonly attribute GPUSupportedLimits limits; + @property + def limits(self): + """A dict with limits for the adapter.""" + return self._limits + + # IDL: Promise requestDevice(optional GPUDeviceDescriptor descriptor = {}); + def request_device( + self, + *, + label="", + required_features: "List[enums.FeatureName]" = [], + required_limits: "Dict[str, int]" = {}, + default_queue: "structs.QueueDescriptor" = {}, + ): + """Request a `GPUDevice` from the adapter. + + Arguments: + label (str): A human readable label. Optional. + required_features (list of str): the features (extensions) that you need. Default []. + required_limits (dict): the various limits that you need. Default {}. + default_queue (structs.QueueDescriptor): Descriptor for the default queue. Optional. + """ + raise NotImplementedError() + + # IDL: Promise requestDevice(optional GPUDeviceDescriptor descriptor = {}); + async def request_device_async( + self, + *, + label="", + required_features: "List[enums.FeatureName]" = [], + required_limits: "Dict[str, int]" = {}, + default_queue: "structs.QueueDescriptor" = {}, + ): + """Async version of `request_device()`.""" + raise NotImplementedError() + + def _destroy(self): + pass + + def __del__(self): + self._ot.decrease(self.__class__.__name__) + self._destroy() + + # IDL: readonly attribute boolean isFallbackAdapter; + @property + def is_fallback_adapter(self): + """Whether this adapter runs on software (rather than dedicated hardware).""" + return self._adapter_info.get("adapter_type", "").lower() in ("software", "cpu") + + # IDL: Promise requestAdapterInfo(); + def request_adapter_info(self): + """Get a dict with information about this adapter, such as the vendor and devicen name.""" + return self._adapter_info + + # IDL: Promise requestAdapterInfo(); + async def request_adapter_info_async(self): + """Async get information about this adapter.""" + return self._adapter_info + + +class GPUObjectBase: + """The base class for all GPU objects. + + A GPU object is an object that can be thought of having a representation on + the GPU; the device and all objects belonging to a device. + """ + + _ot = object_tracker + _nbytes = 0 + + def __init__(self, label, internal, device): + self._ot.increase(self.__class__.__name__, self._nbytes) + self._label = label + self._internal = internal # The native/raw/real GPU object + self._device = device + logger.info(f"Creating {self.__class__.__name__} {label}") + + # IDL: attribute USVString label; + @property + def label(self): + """A human-readable name identifying the GPU object.""" + return self._label + + def _destroy(self): + """Subclasses can implement this to clean up.""" + pass + + def __del__(self): + self._ot.decrease(self.__class__.__name__, self._nbytes) + self._destroy() + + # Public destroy() methods are implemented on classes as the WebGPU spec specifies. + + +class GPUDevice(GPUObjectBase): + """The top-level interface through which GPU objects are created. + + A device is the logical instantiation of an adapter, through which + internal objects are created. It can be shared across threads. + A device is the exclusive owner of all internal objects created + from it: when the device is lost, all objects created from it become + invalid. + + Create a device using `GPUAdapter.request_device()` or + `GPUAdapter.request_device_async()`. + """ + + def __init__(self, label, internal, adapter, features, limits, queue): + super().__init__(label, internal, None) + + assert isinstance(adapter, GPUAdapter) + assert isinstance(features, set) + assert isinstance(limits, dict) + + self._adapter = adapter + self._features = features + self._limits = limits + self._queue = queue + queue._device = self # because it could not be set earlier + + # IDL: [SameObject] readonly attribute GPUSupportedFeatures features; + @property + def features(self): + """A set of feature names supported by this device.""" + return self._features + + # IDL: [SameObject] readonly attribute GPUSupportedLimits limits; + @property + def limits(self): + """A dict with limits for this device.""" + return self._limits + + # IDL: [SameObject] readonly attribute GPUQueue queue; + @property + def queue(self): + """The default `GPUQueue` for this device.""" + return self._queue + + @apidiff.add("Too useful to not-have") + @property + def adapter(self): + """The adapter object corresponding to this device.""" + return self._adapter + + # IDL: readonly attribute Promise lost; + @apidiff.hide("Not a Pythonic API") + @property + def lost(self): + """Provides information about why the device is lost.""" + # In JS you can device.lost.then ... to handle lost devices. + # We may want to eventually support something similar async-like? + # at some point + raise NotImplementedError() + + # IDL: attribute EventHandler onuncapturederror; + @apidiff.hide("Specific to browsers") + @property + def onuncapturederror(self): + """Method called when an error is capured?""" + raise NotImplementedError() + + # IDL: undefined destroy(); + def destroy(self): + """Destroy this device.""" + return self._destroy() + + # IDL: GPUBuffer createBuffer(GPUBufferDescriptor descriptor); + def create_buffer( + self, + *, + label="", + size: int, + usage: "flags.BufferUsage", + mapped_at_creation: bool = False, + ): + """Create a `GPUBuffer` object. + + Arguments: + label (str): A human readable label. Optional. + size (int): The size of the buffer in bytes. + usage (flags.BufferUsage): The ways in which this buffer will be used. + mapped_at_creation (bool): Whether the buffer is initially mapped. + """ + raise NotImplementedError() + + @apidiff.add("Convenience function") + def create_buffer_with_data(self, *, label="", data, usage: "flags.BufferUsage"): + """Create a `GPUBuffer` object initialized with the given data. + + This is a convenience function that creates a mapped buffer, + writes the given data to it, and then unmaps the buffer. + + Arguments: + label (str): A human readable label. Optional. + data: Any object supporting the Python buffer protocol (this + includes bytes, bytearray, ctypes arrays, numpy arrays, etc.). + usage (flags.BufferUsage): The ways in which this buffer will be used. + + Also see `GPUBuffer.write_mapped()` and `GPUQueue.write_buffer()`. + """ + # This function was originally created to support the workflow + # of initializing a buffer with data when we did not support + # buffer mapping. Now that we do have buffer mapping it is not + # strictly necessary, but it's still quite useful and feels + # more Pythonic than having to write the boilerplate code below. + + # Create a view of known type + data = memoryview(data).cast("B") + size = data.nbytes + + # Create the buffer and write data + buf = self.create_buffer( + label=label, size=size, usage=usage, mapped_at_creation=True + ) + buf.write_mapped(data) + buf.unmap() + return buf + + # IDL: GPUTexture createTexture(GPUTextureDescriptor descriptor); + def create_texture( + self, + *, + label="", + size: "Union[List[int], structs.Extent3D]", + mip_level_count: int = 1, + sample_count: int = 1, + dimension: "enums.TextureDimension" = "2d", + format: "enums.TextureFormat", + usage: "flags.TextureUsage", + view_formats: "List[enums.TextureFormat]" = [], + ): + """Create a `GPUTexture` object. + + Arguments: + label (str): A human readable label. Optional. + size (tuple or dict): The texture size as a 3-tuple or a `structs.Extent3D`. + mip_level_count (int): The number of mip leveles. Default 1. + sample_count (int): The number of samples. Default 1. + dimension (enums.TextureDimension): The dimensionality of the texture. Default 2d. + format (TextureFormat): What channels it stores and how. + usage (flags.TextureUsage): The ways in which the texture will be used. + view_formats (optional): A list of formats that views are allowed to have + in addition to the texture's own view. Using these formats may have + a performance penalty. + + See https://gpuweb.github.io/gpuweb/#texture-format-caps for a + list of available texture formats. Note that less formats are + available for storage usage. + """ + raise NotImplementedError() + + # IDL: GPUSampler createSampler(optional GPUSamplerDescriptor descriptor = {}); + def create_sampler( + self, + *, + label="", + address_mode_u: "enums.AddressMode" = "clamp-to-edge", + address_mode_v: "enums.AddressMode" = "clamp-to-edge", + address_mode_w: "enums.AddressMode" = "clamp-to-edge", + mag_filter: "enums.FilterMode" = "nearest", + min_filter: "enums.FilterMode" = "nearest", + mipmap_filter: "enums.MipmapFilterMode" = "nearest", + lod_min_clamp: float = 0, + lod_max_clamp: float = 32, + compare: "enums.CompareFunction" = None, + max_anisotropy: int = 1, + ): + """Create a `GPUSampler` object. Samplers specify how a texture is sampled. + + Arguments: + label (str): A human readable label. Optional. + address_mode_u (enums.AddressMode): What happens when sampling beyond the x edge. + Default "clamp-to-edge". + address_mode_v (enums.AddressMode): What happens when sampling beyond the y edge. + Default "clamp-to-edge". + address_mode_w (enums.AddressMode): What happens when sampling beyond the z edge. + Default "clamp-to-edge". + mag_filter (enums.FilterMode): Interpolation when zoomed in. Default 'nearest'. + min_filter (enums.FilterMode): Interpolation when zoomed out. Default 'nearest'. + mipmap_filter: (enums.MipmapFilterMode): Interpolation between mip levels. Default 'nearest'. + lod_min_clamp (float): The minimum level of detail. Default 0. + lod_max_clamp (float): The maxium level of detail. Default 32. + compare (enums.CompareFunction): The sample compare operation for depth textures. + Only specify this for depth textures. Default None. + max_anisotropy (int): The maximum anisotropy value clamp used by the sample, + betweet 1 and 16, default 1. + """ + raise NotImplementedError() + + # IDL: GPUBindGroupLayout createBindGroupLayout(GPUBindGroupLayoutDescriptor descriptor); + def create_bind_group_layout( + self, *, label="", entries: "List[structs.BindGroupLayoutEntry]" + ): + """Create a `GPUBindGroupLayout` object. One or more + such objects are passed to `create_pipeline_layout()` to + specify the (abstract) pipeline layout for resources. See the + docs on bind groups for details. + + Arguments: + label (str): A human readable label. Optional. + entries (list): A list of `structs.BindGroupLayoutEntry` dicts. + Each contains either a `structs.BufferBindingLayout`, + `structs.SamplerBindingLayout`, `structs.TextureBindingLayout`, + or `structs.StorageTextureBindingLayout`. + + Example with `structs.BufferBindingLayout`: + + .. code-block:: py + + { + "binding": 0, + "visibility": wgpu.ShaderStage.COMPUTE, + "buffer": { + "type": wgpu.BufferBindingType.storage_buffer, + "has_dynamic_offset": False, # optional + "min_binding_size": 0 # optional + } + }, + + Note on ``has_dynamic_offset``: For uniform-buffer, storage-buffer, and + readonly-storage-buffer bindings, it indicates whether the binding has a + dynamic offset. One offset must be passed to `pass.set_bind_group()` + for each dynamic binding in increasing order of binding number. + """ + raise NotImplementedError() + + # IDL: GPUBindGroup createBindGroup(GPUBindGroupDescriptor descriptor); + def create_bind_group( + self, + *, + label="", + layout: "GPUBindGroupLayout", + entries: "List[structs.BindGroupEntry]", + ): + """Create a `GPUBindGroup` object, which can be used in + `pass.set_bind_group()` to attach a group of resources. + + Arguments: + label (str): A human readable label. Optional. + layout (GPUBindGroupLayout): The layout (abstract representation) + for this bind group. + entries (list): A list of `structs.BindGroupEntry` dicts. The ``resource`` field + is either `GPUSampler`, `GPUTextureView` or `structs.BufferBinding`. + + Example entry dicts: + + .. code-block:: py + + # For a sampler + { + "binding" : 0, # slot + "resource": a_sampler, + } + # For a texture view + { + "binding" : 0, # slot + "resource": a_texture_view, + } + # For a buffer + { + "binding" : 0, # slot + "resource": { + "buffer": a_buffer, + "offset": 0, + "size": 812, + } + } + """ + raise NotImplementedError() + + # IDL: GPUPipelineLayout createPipelineLayout(GPUPipelineLayoutDescriptor descriptor); + def create_pipeline_layout( + self, *, label="", bind_group_layouts: "List[GPUBindGroupLayout]" + ): + """Create a `GPUPipelineLayout` object, which can be + used in `create_render_pipeline()` or `create_compute_pipeline()`. + + Arguments: + label (str): A human readable label. Optional. + bind_group_layouts (list): A list of `GPUBindGroupLayout` objects. + """ + raise NotImplementedError() + + # IDL: GPUShaderModule createShaderModule(GPUShaderModuleDescriptor descriptor); + def create_shader_module( + self, + *, + label="", + code: str, + source_map: dict = None, + compilation_hints: "List[structs.ShaderModuleCompilationHint]" = [], + ): + """Create a `GPUShaderModule` object from shader source. + + The primary shader language is WGSL, though SpirV is also supported, + as well as GLSL (experimental). + + Arguments: + label (str): A human readable label. Optional. + code (str | bytes): The shader code, as WGSL, GLSL or SpirV. + For GLSL code, the label must be given and contain the word + 'comp', 'vert' or 'frag'. For SpirV the code must be bytes. + compilation_hints: currently unused. + """ + raise NotImplementedError() + + # IDL: GPUComputePipeline createComputePipeline(GPUComputePipelineDescriptor descriptor); + def create_compute_pipeline( + self, + *, + label="", + layout: "Union[GPUPipelineLayout, enums.AutoLayoutMode]", + compute: "structs.ProgrammableStage", + ): + """Create a `GPUComputePipeline` object. + + Arguments: + label (str): A human readable label. Optional. + layout (GPUPipelineLayout): object created with `create_pipeline_layout()`. + compute (structs.ProgrammableStage): Binds shader module and entrypoint. + """ + raise NotImplementedError() + + # IDL: Promise createComputePipelineAsync(GPUComputePipelineDescriptor descriptor); + async def create_compute_pipeline_async( + self, + *, + label="", + layout: "Union[GPUPipelineLayout, enums.AutoLayoutMode]", + compute: "structs.ProgrammableStage", + ): + """Async version of create_compute_pipeline().""" + raise NotImplementedError() + + # IDL: GPURenderPipeline createRenderPipeline(GPURenderPipelineDescriptor descriptor); + def create_render_pipeline( + self, + *, + label="", + layout: "Union[GPUPipelineLayout, enums.AutoLayoutMode]", + vertex: "structs.VertexState", + primitive: "structs.PrimitiveState" = {}, + depth_stencil: "structs.DepthStencilState" = None, + multisample: "structs.MultisampleState" = {}, + fragment: "structs.FragmentState" = None, + ): + """Create a `GPURenderPipeline` object. + + Arguments: + label (str): A human readable label. Optional. + layout (GPUPipelineLayout): The layout for the new pipeline. + vertex (structs.VertexState): Describes the vertex shader entry point of the + pipeline and its input buffer layouts. + primitive (structs.PrimitiveState): Describes the the primitive-related properties + of the pipeline. If `strip_index_format` is present (which means the + primitive topology is a strip), and the drawCall is indexed, the + vertex index list is split into sub-lists using the maximum value of this + index format as a separator. Example: a list with values + `[1, 2, 65535, 4, 5, 6]` of type "uint16" will be split in sub-lists + `[1, 2]` and `[4, 5, 6]`. + depth_stencil (structs.DepthStencilState): Describes the optional depth-stencil + properties, including the testing, operations, and bias. Optional. + multisample (structs.MultisampleState): Describes the multi-sampling properties of the pipeline. + fragment (structs.FragmentState): Describes the fragment shader + entry point of the pipeline and its output colors. If it’s + None, the No-Color-Output mode is enabled: the pipeline + does not produce any color attachment outputs. It still + performs rasterization and produces depth values based on + the vertex position output. The depth testing and stencil + operations can still be used. + + In the example dicts below, the values that are marked as optional, + the shown value is the default. + + Example vertex (structs.VertexState) dict: + + .. code-block:: py + + { + "module": shader_module, + "entry_point": "main", + "buffers": [ + { + "array_stride": 8, + "step_mode": wgpu.VertexStepMode.vertex, # optional + "attributes": [ + { + "format": wgpu.VertexFormat.float2, + "offset": 0, + "shader_location": 0, + }, + ... + ], + }, + ... + ] + } + + Example primitive (structs.PrimitiveState) dict: + + .. code-block:: py + + { + "topology": wgpu.PrimitiveTopology.triangle_list, + "strip_index_format": wgpu.IndexFormat.uint32, # see note + "front_face": wgpu.FrontFace.ccw, # optional + "cull_mode": wgpu.CullMode.none, # optional + } + + Example depth_stencil (structs.DepthStencilState) dict: + + .. code-block:: py + + { + "format": wgpu.TextureFormat.depth24plus_stencil8, + "depth_write_enabled": False, # optional + "depth_compare": wgpu.CompareFunction.always, # optional + "stencil_front": { # optional + "compare": wgpu.CompareFunction.equal, + "fail_op": wgpu.StencilOperation.keep, + "depth_fail_op": wgpu.StencilOperation.keep, + "pass_op": wgpu.StencilOperation.keep, + }, + "stencil_back": { # optional + "compare": wgpu.CompareFunction.equal, + "fail_op": wgpu.StencilOperation.keep, + "depth_fail_op": wgpu.StencilOperation.keep, + "pass_op": wgpu.StencilOperation.keep, + }, + "stencil_read_mask": 0xFFFFFFFF, # optional + "stencil_write_mask": 0xFFFFFFFF, # optional + "depth_bias": 0, # optional + "depth_bias_slope_scale": 0.0, # optional + "depth_bias_clamp": 0.0, # optional + } + + Example multisample (structs.MultisampleState) dict: + + .. code-block:: py + + { + "count": 1, # optional + "mask": 0xFFFFFFFF, # optional + "alpha_to_coverage_enabled": False # optional + } + + Example fragment (structs.FragmentState) dict. The `blend` parameter can be None + to disable blending (not all texture formats support blending). + + .. code-block:: py + + { + "module": shader_module, + "entry_point": "main", + "targets": [ + { + "format": wgpu.TextureFormat.bgra8unorm_srgb, + "blend": { + "color": ( + wgpu.BlendFactor.One, + wgpu.BlendFactor.zero, + gpu.BlendOperation.add, + ), + "alpha": ( + wgpu.BlendFactor.One, + wgpu.BlendFactor.zero, + wgpu.BlendOperation.add, + ), + } + "write_mask": wgpu.ColorWrite.ALL # optional + }, + ... + ] + } + + """ + raise NotImplementedError() + + # IDL: Promise createRenderPipelineAsync(GPURenderPipelineDescriptor descriptor); + async def create_render_pipeline_async( + self, + *, + label="", + layout: "Union[GPUPipelineLayout, enums.AutoLayoutMode]", + vertex: "structs.VertexState", + primitive: "structs.PrimitiveState" = {}, + depth_stencil: "structs.DepthStencilState" = None, + multisample: "structs.MultisampleState" = {}, + fragment: "structs.FragmentState" = None, + ): + """Async version of create_render_pipeline().""" + raise NotImplementedError() + + # IDL: GPUCommandEncoder createCommandEncoder(optional GPUCommandEncoderDescriptor descriptor = {}); + def create_command_encoder(self, *, label=""): + """Create a `GPUCommandEncoder` object. A command + encoder is used to record commands, which can then be submitted + at once to the GPU. + + Arguments: + label (str): A human readable label. Optional. + """ + raise NotImplementedError() + + # IDL: GPURenderBundleEncoder createRenderBundleEncoder(GPURenderBundleEncoderDescriptor descriptor); + def create_render_bundle_encoder( + self, + *, + label="", + color_formats: "List[enums.TextureFormat]", + depth_stencil_format: "enums.TextureFormat" = None, + sample_count: int = 1, + depth_read_only: bool = False, + stencil_read_only: bool = False, + ): + """Create a `GPURenderBundle` object. + + TODO: not yet available in wgpu-native + """ + raise NotImplementedError() + + # IDL: GPUQuerySet createQuerySet(GPUQuerySetDescriptor descriptor); + def create_query_set(self, *, label="", type: "enums.QueryType", count: int): + """Create a `GPUQuerySet` object.""" + raise NotImplementedError() + + # IDL: undefined pushErrorScope(GPUErrorFilter filter); + @apidiff.hide + def push_error_scope(self, filter): + """Pushes a new GPU error scope onto the stack.""" + raise NotImplementedError() + + # IDL: Promise popErrorScope(); + @apidiff.hide + def pop_error_scope(self): + """Pops a GPU error scope from the stack.""" + raise NotImplementedError() + + # IDL: GPUExternalTexture importExternalTexture(GPUExternalTextureDescriptor descriptor); + @apidiff.hide("Specific to browsers") + def import_external_texture( + self, + *, + label="", + source: "Union[memoryview, object]", + color_space: str = "srgb", + ): + """For browsers only.""" + raise NotImplementedError() + + +class GPUBuffer(GPUObjectBase): + """Represents a block of memory that can be used in GPU operations. + + Data is stored in linear layout, meaning that each byte + of the allocation can be addressed by its offset from the start of + the buffer, subject to alignment restrictions depending on the + operation. + + Create a buffer using `GPUDevice.create_buffer()`. + + One can sync data in a buffer by mapping it and then getting and setting data. + Alternatively, one can tell the GPU (via the command encoder) to + copy data between buffers and textures. + """ + + def __init__(self, label, internal, device, size, usage, map_state): + self._nbytes = size + super().__init__(label, internal, device) + self._size = size + self._usage = usage + self._map_state = map_state + + # IDL: readonly attribute GPUSize64Out size; + @property + def size(self): + """The length of the GPUBuffer allocation in bytes.""" + return self._size + + # IDL: readonly attribute GPUFlagsConstant usage; + @property + def usage(self): + """The allowed usages (int bitmap) for this GPUBuffer, specifying + e.g. whether the buffer may be used as a vertex buffer, uniform buffer, + target or source for copying data, etc. + """ + return self._usage + + # IDL: readonly attribute GPUBufferMapState mapState; + @property + def map_state(self): + """The mapping state of the buffer, see `BufferMapState`.""" + return self._map_state + + # WebGPU specifies an API to sync data with the buffer via mapping. + # The idea is to (async) request mapped data, read from / write to + # this memory (using getMappedRange), and then unmap. A buffer + # must be unmapped before it can be used in a pipeline. + # + # This means that the mapped memory is reclaimed (i.e. invalid) + # when unmap is called, and that whatever object we expose the + # memory with to the user, must be set to a state where it can no + # longer be used. There does not seem to be a good way to do this. + # + # In our Python API we do make use of the same map/unmap mechanism, + # but reading and writing data goes via method calls instead of via + # an array-like object that exposes the shared memory. + + # IDL: Promise mapAsync(GPUMapModeFlags mode, optional GPUSize64 offset = 0, optional GPUSize64 size); + def map(self, mode, offset=0, size=None): + """Maps the given range of the GPUBuffer. + + When this call returns, the buffer content is ready to be + accessed with ``read_mapped`` or ``write_mapped``. Don't forget + to ``unmap()`` when done. + + Arguments: + mode (enum): The mapping mode, either wgpu.MapMode.READ or + wgpu.MapMode.WRITE, can also be a string. + offset (str): the buffer offset in bytes. Default 0. + size (int): the size to read. Default until the end. + """ + raise NotImplementedError() + + # IDL: Promise mapAsync(GPUMapModeFlags mode, optional GPUSize64 offset = 0, optional GPUSize64 size); + async def map_async(self, mode, offset=0, size=None): + """Alternative version of map().""" + raise NotImplementedError() + + # IDL: undefined unmap(); + def unmap(self): + """Unmaps the buffer. + + Unmaps the mapped range of the GPUBuffer and makes it’s contents + available for use by the GPU again. + """ + raise NotImplementedError() + + @apidiff.add("Replacement for get_mapped_range") + def read_mapped(self, buffer_offset=None, size=None, *, copy=True): + """Read mapped buffer data. + + This method must only be called when the buffer is in a mapped state. + This is the Python alternative to WebGPU's ``getMappedRange``. + Returns a memoryview that is a copy of the mapped data (it won't + become invalid when the buffer is ummapped). + + Arguments: + buffer_offset (int): the buffer offset in bytes. Must be at + least as large as the offset specified in ``map()``. The default + is the offset of the mapped range. + size (int): the size to read. The resuling range must fit into the range + specified in ``map()``. The default is as large as the mapped range allows. + copy (boool): whether a copy of the data is given. Default True. + If False, the returned memoryview represents the mapped data + directly, and is released when the buffer is unmapped. + WARNING: views of the returned data (e.g. memoryview objects or + numpy arrays) can still be used after the base memory is released, + which can result in corrupted data and segfaults. Therefore, when + setting copy to False, make *very* sure the memory is not accessed + after the buffer is unmapped. + + Also see `GPUBuffer.write_mapped()`, `GPUQueue.read_buffer()` and `GPUQueue.write_buffer()`. + """ + raise NotImplementedError() + + @apidiff.add("Replacement for get_mapped_range") + def write_mapped(self, data, buffer_offset=None, size=None): + """Read mapped buffer data. + + This method must only be called when the buffer is in a mapped state. + This is the Python alternative to WebGPU's ``getMappedRange``. + Since the data can also be a view into a larger array, this method + allows updating the buffer with minimal data copying. + + Arguments: + data (buffer-like): The data to write to the buffer, in the form of + e.g. a bytes object, memoryview, or numpy array. + buffer_offset (int): the buffer offset in bytes. Must be at least + as large as the offset specified in ``map()``. The default + is the offset of the mapped range. + size (int): the size to read. The default is the size of + the data, so this argument can typically be ignored. The + resuling range must fit into the range specified in ``map()``. + + Also see `GPUBuffer.read_mapped, `GPUQueue.read_buffer()` and `GPUQueue.write_buffer()`. + """ + raise NotImplementedError() + + # IDL: ArrayBuffer getMappedRange(optional GPUSize64 offset = 0, optional GPUSize64 size); + @apidiff.hide + def get_mapped_range(self, offset=0, size=None): + raise NotImplementedError("The Python API differs from WebGPU here") + + @apidiff.add("Deprecated but still here to raise a warning") + def map_read(self, offset=None, size=None, iter=None): + """Deprecated.""" + raise DeprecationWarning( + "map_read() is deprecated, use map() and read_mapped() instead." + ) + + @apidiff.add("Deprecated but still here to raise a warning") + def map_write(self, data): + """Deprecated.""" + raise DeprecationWarning( + "map_read() is deprecated, use map() and write_mapped() instead." + ) + + # IDL: undefined destroy(); + def destroy(self): + """An application that no longer requires a buffer can choose + to destroy it. Note that this is automatically called when the + Python object is cleaned up by the garbadge collector. + """ + raise NotImplementedError() + + +class GPUTexture(GPUObjectBase): + """Represents a 1D, 2D or 3D color image object. + + A texture also can have mipmaps (different levels of varying + detail), and arrays. The texture represents the "raw" data. A + `GPUTextureView` is used to define how the texture data + should be interpreted. + + Create a texture using `GPUDevice.create_texture()`. + """ + + def __init__(self, label, internal, device, tex_info): + self._nbytes = self._estimate_nbytes(tex_info) + super().__init__(label, internal, device) + self._tex_info = tex_info + + def _estimate_nbytes(self, tex_info): + format = tex_info["format"] + size = tex_info["size"] + sample_count = tex_info["sample_count"] or 1 + mip_level_count = tex_info["mip_level_count"] or 1 + + bpp = texture_format_to_bpp.get(format, 0) + npixels = size[0] * size[1] * size[2] + nbytes_at_mip_level = sample_count * npixels * bpp / 8 + + nbytes = 0 + for i in range(mip_level_count): + nbytes += nbytes_at_mip_level + nbytes_at_mip_level /= 2 + + # Return rounded to nearest integer + return int(nbytes + 0.5) + + @apidiff.add("Too useful to not-have") + @property + def size(self): + """The size of the texture in mipmap level 0, as a 3-tuple of ints.""" + return self._tex_info["size"] + + # IDL: readonly attribute GPUIntegerCoordinateOut width; + @property + def width(self): + """The texture's width. Also see ``.size``.""" + return self._tex_info["size"][0] + + # IDL: readonly attribute GPUIntegerCoordinateOut height; + @property + def height(self): + """The texture's height. Also see ``.size``.""" + return self._tex_info["size"][1] + + # IDL: readonly attribute GPUIntegerCoordinateOut depthOrArrayLayers; + @property + def depth_or_array_layers(self): + """The texture's depth or number of layers. Also see ``.size``.""" + return self._tex_info["size"][2] + + # IDL: readonly attribute GPUIntegerCoordinateOut mipLevelCount; + @property + def mip_level_count(self): + """The total number of the mipmap levels of the texture.""" + return self._tex_info["mip_level_count"] + + # IDL: readonly attribute GPUSize32Out sampleCount; + @property + def sample_count(self): + """The number of samples in each texel of the texture.""" + return self._tex_info["sample_count"] + + # IDL: readonly attribute GPUTextureDimension dimension; + @property + def dimension(self): + """The dimension of the texture.""" + return self._tex_info["dimension"] + + # IDL: readonly attribute GPUTextureFormat format; + @property + def format(self): + """The format of the texture.""" + return self._tex_info["format"] + + # IDL: readonly attribute GPUFlagsConstant usage; + @property + def usage(self): + """The allowed usages for this texture.""" + return self._tex_info["usage"] + + # IDL: GPUTextureView createView(optional GPUTextureViewDescriptor descriptor = {}); + def create_view( + self, + *, + label="", + format: "enums.TextureFormat" = None, + dimension: "enums.TextureViewDimension" = None, + aspect: "enums.TextureAspect" = "all", + base_mip_level: int = 0, + mip_level_count: int = None, + base_array_layer: int = 0, + array_layer_count: int = None, + ): + """Create a `GPUTextureView` object. + + If no aguments are given, a default view is given, with the + same format and dimension as the texture. + + Arguments: + label (str): A human readable label. Optional. + format (enums.TextureFormat): What channels it stores and how. + dimension (enums.TextureViewDimension): The dimensionality of the texture view. + aspect (enums.TextureAspect): Whether this view is used for depth, stencil, or all. + Default all. + base_mip_level (int): The starting mip level. Default 0. + mip_level_count (int): The number of mip levels. Default None. + base_array_layer (int): The starting array layer. Default 0. + array_layer_count (int): The number of array layers. Default None. + """ + raise NotImplementedError() + + # IDL: undefined destroy(); + def destroy(self): + """An application that no longer requires a texture can choose + to destroy it. Note that this is automatically called when the + Python object is cleaned up by the garbadge collector. + """ + raise NotImplementedError() + + +class GPUTextureView(GPUObjectBase): + """Represents a way to represent a `GPUTexture`. + + Create a texture view using `GPUTexture.create_view()`. + """ + + def __init__(self, label, internal, device, texture, size): + super().__init__(label, internal, device) + self._texture = texture + self._size = size + + @apidiff.add("Need to know size e.g. for texture view provided by canvas") + @property + def size(self): + """The texture size (as a 3-tuple).""" + return self._size + + @apidiff.add("Too useful to not-have") + @property + def texture(self): + """The texture object to which this is a view.""" + return self._texture + + +class GPUSampler(GPUObjectBase): + """Defines how a texture (view) must be sampled by the shader. + + It defines the subsampling, sampling between mip levels, and sampling out + of the image boundaries. + + Create a sampler using `GPUDevice.create_sampler()`. + """ + + +class GPUBindGroupLayout(GPUObjectBase): + """Defines the interface between a set of resources bound in a `GPUBindGroup`. + + It also defines their accessibility in shader stages. + + Create a bind group layout using `GPUDevice.create_bind_group_layout()`. + """ + + def __init__(self, label, internal, device, bindings): + super().__init__(label, internal, device) + self._bindings = tuple(bindings) + + +class GPUBindGroup(GPUObjectBase): + """Represents a group of resource bindings (buffer, sampler, texture-view). + + It holds the shader slot and a reference to the resource (sampler, + texture-view, buffer). + + Create a bind group using `GPUDevice.create_bind_group()`. + """ + + def __init__(self, label, internal, device, bindings): + super().__init__(label, internal, device) + self._bindings = bindings + + +class GPUPipelineLayout(GPUObjectBase): + """Describes the layout of a pipeline, as a list of `GPUBindGroupLayout` objects. + + Create a pipeline layout using `GPUDevice.create_pipeline_layout()`. + """ + + def __init__(self, label, internal, device, layouts): + super().__init__(label, internal, device) + self._layouts = tuple(layouts) # GPUBindGroupLayout objects + + +class GPUShaderModule(GPUObjectBase): + """Represents a programmable shader. + + Create a shader module using `GPUDevice.create_shader_module()`. + """ + + # IDL: Promise getCompilationInfo(); + def get_compilation_info(self): + """Get shader compilation info. Always returns empty list at the moment.""" + # How can this return shader errors if one cannot create a + # shader module when the shader source has errors? + raise NotImplementedError() + + +class GPUPipelineBase: + """A mixin class for render and compute pipelines.""" + + def __init__(self, label, internal, device): + super().__init__(label, internal, device) + + # IDL: [NewObject] GPUBindGroupLayout getBindGroupLayout(unsigned long index); + def get_bind_group_layout(self, index): + """Get the bind group layout at the given index.""" + raise NotImplementedError() + + +class GPUComputePipeline(GPUPipelineBase, GPUObjectBase): + """Represents a single pipeline for computations (no rendering). + + Create a compute pipeline using `GPUDevice.create_compute_pipeline()`. + """ + + +class GPURenderPipeline(GPUPipelineBase, GPUObjectBase): + """Represents a single pipeline to draw something. + + The rendering typically involves a vertex and fragment stage, though + the latter is optional. + The render target can come from a window on the screen or from an + in-memory texture (off-screen rendering). + + Create a render pipeline using `GPUDevice.create_render_pipeline()`. + """ + + +class GPUCommandBuffer(GPUObjectBase): + """Stores a series of commands generated by a `GPUCommandEncoder`. + + The buffered commands can subsequently be submitted to a `GPUQueue`. + + Command buffers are single use, you must only submit them once and + submitting them destroys them. Use render bundles to re-use commands. + + Create a command buffer using `GPUCommandEncoder.finish()`. + """ + + +class GPUCommandsMixin: + """Mixin for classes that encode commands.""" + + pass + + +class GPUBindingCommandsMixin: + """Mixin for classes that defines bindings.""" + + # IDL: undefined setBindGroup(GPUIndex32 index, GPUBindGroup? bindGroup, Uint32Array dynamicOffsetsData, GPUSize64 dynamicOffsetsDataStart, GPUSize32 dynamicOffsetsDataLength); + def set_bind_group( + self, + index, + bind_group, + dynamic_offsets_data, + dynamic_offsets_data_start, + dynamic_offsets_data_length, + ): + """Associate the given bind group (i.e. group or resources) with the + given slot/index. + + Arguments: + index (int): The slot to bind at. + bind_group (GPUBindGroup): The bind group to bind. + dynamic_offsets_data (list of int): A list of offsets (one for each bind group). + dynamic_offsets_data_start (int): Not used. + dynamic_offsets_data_length (int): Not used. + """ + raise NotImplementedError() + + +class GPUDebugCommandsMixin: + """Mixin for classes that support debug groups and markers.""" + + # IDL: undefined pushDebugGroup(USVString groupLabel); + def push_debug_group(self, group_label): + """Push a named debug group into the command stream.""" + raise NotImplementedError() + + # IDL: undefined popDebugGroup(); + def pop_debug_group(self): + """Pop the active debug group.""" + raise NotImplementedError() + + # IDL: undefined insertDebugMarker(USVString markerLabel); + def insert_debug_marker(self, marker_label): + """Insert the given message into the debug message queue.""" + raise NotImplementedError() + + +class GPURenderCommandsMixin: + """Mixin for classes that provide rendering commands.""" + + # IDL: undefined setPipeline(GPURenderPipeline pipeline); + def set_pipeline(self, pipeline): + """Set the pipeline for this render pass. + + Arguments: + pipeline (GPURenderPipeline): The pipeline to use. + """ + raise NotImplementedError() + + # IDL: undefined setIndexBuffer(GPUBuffer buffer, GPUIndexFormat indexFormat, optional GPUSize64 offset = 0, optional GPUSize64 size); + def set_index_buffer(self, buffer, index_format, offset=0, size=None): + """Set the index buffer for this render pass. + + Arguments: + buffer (GPUBuffer): The buffer that contains the indices. + index_format (GPUIndexFormat): The format of the index data + contained in buffer. If `strip_index_format` is given in the + call to `GPUDevice.create_render_pipeline()`, it must match. + offset (int): The byte offset in the buffer. Default 0. + size (int): The number of bytes to use. If zero, the remaining size + (after offset) of the buffer is used. Default 0. + """ + raise NotImplementedError() + + # IDL: undefined setVertexBuffer(GPUIndex32 slot, GPUBuffer? buffer, optional GPUSize64 offset = 0, optional GPUSize64 size); + def set_vertex_buffer(self, slot, buffer, offset=0, size=None): + """Associate a vertex buffer with a bind slot. + + Arguments: + slot (int): The binding slot for the vertex buffer. + buffer (GPUBuffer): The buffer that contains the vertex data. + offset (int): The byte offset in the buffer. Default 0. + size (int): The number of bytes to use. If zero, the remaining size + (after offset) of the buffer is used. Default 0. + """ + raise NotImplementedError() + + # IDL: undefined draw(GPUSize32 vertexCount, optional GPUSize32 instanceCount = 1, optional GPUSize32 firstVertex = 0, optional GPUSize32 firstInstance = 0); + def draw(self, vertex_count, instance_count=1, first_vertex=0, first_instance=0): + """Run the render pipeline without an index buffer. + + Arguments: + vertex_count (int): The number of vertices to draw. + instance_count (int): The number of instances to draw. Default 1. + first_vertex (int): The vertex offset. Default 0. + first_instance (int): The instance offset. Default 0. + """ + raise NotImplementedError() + + # IDL: undefined drawIndirect(GPUBuffer indirectBuffer, GPUSize64 indirectOffset); + def draw_indirect(self, indirect_buffer, indirect_offset): + """Like `draw()`, but the function arguments are in a buffer. + + Arguments: + indirect_buffer (GPUBuffer): The buffer that contains the arguments. + indirect_offset (int): The byte offset at which the arguments are. + """ + raise NotImplementedError() + + # IDL: undefined drawIndexed(GPUSize32 indexCount, optional GPUSize32 instanceCount = 1, optional GPUSize32 firstIndex = 0, optional GPUSignedOffset32 baseVertex = 0, optional GPUSize32 firstInstance = 0); + def draw_indexed( + self, + index_count, + instance_count=1, + first_index=0, + base_vertex=0, + first_instance=0, + ): + """Run the render pipeline using an index buffer. + + Arguments: + index_count (int): The number of indices to draw. + instance_count (int): The number of instances to draw. Default 1. + first_index (int): The index offset. Default 0. + base_vertex (int): A number added to each index in the index buffer. Default 0. + first_instance (int): The instance offset. Default 0. + """ + raise NotImplementedError() + + # IDL: undefined drawIndexedIndirect(GPUBuffer indirectBuffer, GPUSize64 indirectOffset); + def draw_indexed_indirect(self, indirect_buffer, indirect_offset): + """ + Like `draw_indexed()`, but the function arguments are in a buffer. + + Arguments: + indirect_buffer (GPUBuffer): The buffer that contains the arguments. + indirect_offset (int): The byte offset at which the arguments are. + """ + raise NotImplementedError() + + +class GPUCommandEncoder(GPUCommandsMixin, GPUDebugCommandsMixin, GPUObjectBase): + """Object to record a series of commands. + + When done, call `finish()` to obtain a `GPUCommandBuffer` object. + + Create a command encoder using `GPUDevice.create_command_encoder()`. + """ + + # IDL: GPUComputePassEncoder beginComputePass(optional GPUComputePassDescriptor descriptor = {}); + def begin_compute_pass( + self, *, label="", timestamp_writes: "structs.ComputePassTimestampWrites" = None + ): + """Record the beginning of a compute pass. Returns a + `GPUComputePassEncoder` object. + + Arguments: + label (str): A human readable label. Optional. + timestamp_writes: unused + """ + raise NotImplementedError() + + # IDL: GPURenderPassEncoder beginRenderPass(GPURenderPassDescriptor descriptor); + def begin_render_pass( + self, + *, + label="", + color_attachments: "List[structs.RenderPassColorAttachment]", + depth_stencil_attachment: "structs.RenderPassDepthStencilAttachment" = None, + occlusion_query_set: "GPUQuerySet" = None, + timestamp_writes: "structs.RenderPassTimestampWrites" = None, + max_draw_count: int = 50000000, + ): + """Record the beginning of a render pass. Returns a + `GPURenderPassEncoder` object. + + Arguments: + label (str): A human readable label. Optional. + color_attachments (list): List of `structs.RenderPassColorAttachment` dicts. + depth_stencil_attachment (structs.RenderPassDepthStencilAttachment): Describes the depth stencil attachment. Default None. + occlusion_query_set (GPUQuerySet): Default None. TODO NOT IMPLEMENTED in wgpu-native. + timestamp_writes: unused + """ + raise NotImplementedError() + + # IDL: undefined clearBuffer( GPUBuffer buffer, optional GPUSize64 offset = 0, optional GPUSize64 size); + def clear_buffer(self, buffer, offset=0, size=None): + """Set (part of) the given buffer to zeros. Both offset and size must be a multiple of 4. If size is None, the whole buffer after offset is cleared.""" + raise NotImplementedError() + + # IDL: undefined copyBufferToBuffer( GPUBuffer source, GPUSize64 sourceOffset, GPUBuffer destination, GPUSize64 destinationOffset, GPUSize64 size); + def copy_buffer_to_buffer( + self, source, source_offset, destination, destination_offset, size + ): + """Copy the contents of a buffer to another buffer. + + Arguments: + source (GPUBuffer): The source buffer. + source_offset (int): The byte offset (a multiple of 4). + destination (GPUBuffer): The target buffer. + destination_offset (int): The byte offset in the destination buffer (a multiple of 4). + size (int): The number of bytes to copy (a multiple of 4). + """ + raise NotImplementedError() + + # IDL: undefined copyBufferToTexture( GPUImageCopyBuffer source, GPUImageCopyTexture destination, GPUExtent3D copySize); + def copy_buffer_to_texture(self, source, destination, copy_size): + """Copy the contents of a buffer to a texture (view). + + Arguments: + source (GPUBuffer): A dict with fields: buffer, offset, bytes_per_row, rows_per_image. + destination (GPUTexture): A dict with fields: texture, mip_level, origin. + copy_size (int): The number of bytes to copy. + + Note that the `bytes_per_row` must be a multiple of 256. + """ + raise NotImplementedError() + + # IDL: undefined copyTextureToBuffer( GPUImageCopyTexture source, GPUImageCopyBuffer destination, GPUExtent3D copySize); + def copy_texture_to_buffer(self, source, destination, copy_size): + """Copy the contents of a texture (view) to a buffer. + + Arguments: + source (GPUTexture): A dict with fields: texture, mip_level, origin. + destination (GPUBuffer): A dict with fields: buffer, offset, bytes_per_row, rows_per_image. + copy_size (int): The number of bytes to copy. + + Note that the `bytes_per_row` must be a multiple of 256. + """ + raise NotImplementedError() + + # IDL: undefined copyTextureToTexture( GPUImageCopyTexture source, GPUImageCopyTexture destination, GPUExtent3D copySize); + def copy_texture_to_texture(self, source, destination, copy_size): + """Copy the contents of a texture (view) to another texture (view). + + Arguments: + source (GPUTexture): A dict with fields: texture, mip_level, origin. + destination (GPUTexture): A dict with fields: texture, mip_level, origin. + copy_size (int): The number of bytes to copy. + """ + raise NotImplementedError() + + # IDL: GPUCommandBuffer finish(optional GPUCommandBufferDescriptor descriptor = {}); + def finish(self, *, label=""): + """Finish recording. Returns a `GPUCommandBuffer` to + submit to a `GPUQueue`. + + Arguments: + label (str): A human readable label. Optional. + """ + raise NotImplementedError() + + # IDL: undefined resolveQuerySet( GPUQuerySet querySet, GPUSize32 firstQuery, GPUSize32 queryCount, GPUBuffer destination, GPUSize64 destinationOffset); + def resolve_query_set( + self, query_set, first_query, query_count, destination, destination_offset + ): + """TODO""" + raise NotImplementedError() + + +class GPUComputePassEncoder( + GPUCommandsMixin, GPUDebugCommandsMixin, GPUBindingCommandsMixin, GPUObjectBase +): + """Object to records commands for a compute pass. + + Create a compute pass encoder using `GPUCommandEncoder.begin_compute_pass()`. + """ + + # IDL: undefined setPipeline(GPUComputePipeline pipeline); + def set_pipeline(self, pipeline): + """Set the pipeline for this compute pass. + + Arguments: + pipeline (GPUComputePipeline): The pipeline to use. + """ + raise NotImplementedError() + + # IDL: undefined dispatchWorkgroups(GPUSize32 workgroupCountX, optional GPUSize32 workgroupCountY = 1, optional GPUSize32 workgroupCountZ = 1); + def dispatch_workgroups( + self, workgroup_count_x, workgroup_count_y=1, workgroup_count_z=1 + ): + """Run the compute shader. + + Arguments: + x (int): The number of cycles in index x. + y (int): The number of cycles in index y. Default 1. + z (int): The number of cycles in index z. Default 1. + """ + raise NotImplementedError() + + # IDL: undefined dispatchWorkgroupsIndirect(GPUBuffer indirectBuffer, GPUSize64 indirectOffset); + def dispatch_workgroups_indirect(self, indirect_buffer, indirect_offset): + """Like `dispatch_workgroups()`, but the function arguments are in a buffer. + + Arguments: + indirect_buffer (GPUBuffer): The buffer that contains the arguments. + indirect_offset (int): The byte offset at which the arguments are. + """ + raise NotImplementedError() + + # IDL: undefined end(); + def end(self): + """Record the end of the compute pass.""" + raise NotImplementedError() + + +class GPURenderPassEncoder( + GPUCommandsMixin, + GPUDebugCommandsMixin, + GPUBindingCommandsMixin, + GPURenderCommandsMixin, + GPUObjectBase, +): + """Object to records commands for a render pass. + + Create a render pass encoder using `GPUCommandEncoder.begin_render_pass`. + """ + + # IDL: undefined setViewport(float x, float y, float width, float height, float minDepth, float maxDepth); + def set_viewport(self, x, y, width, height, min_depth, max_depth): + """Set the viewport for this render pass. The whole scene is rendered + to this sub-rectangle. + + Arguments: + x (int): Horizontal coordinate. + y (int): Vertical coordinate. + width (int): Horizontal size. + height (int): Vertical size. + min_depth (int): Clipping in depth. + max_depth (int): Clipping in depth. + + """ + raise NotImplementedError() + + # IDL: undefined setScissorRect(GPUIntegerCoordinate x, GPUIntegerCoordinate y, GPUIntegerCoordinate width, GPUIntegerCoordinate height); + def set_scissor_rect(self, x, y, width, height): + """Set the scissor rectangle for this render pass. The scene + is rendered as usual, but is only applied to this sub-rectangle. + + Arguments: + x (int): Horizontal coordinate. + y (int): Vertical coordinate. + width (int): Horizontal size. + height (int): Vertical size. + """ + raise NotImplementedError() + + # IDL: undefined setBlendConstant(GPUColor color); + def set_blend_constant(self, color): + """Set the blend color for the render pass. + + Arguments: + color (tuple or dict): A color with fields (r, g, b, a). + """ + raise NotImplementedError() + + # IDL: undefined setStencilReference(GPUStencilValue reference); + def set_stencil_reference(self, reference): + """Set the reference stencil value for this render pass. + + Arguments: + reference (int): The reference value. + """ + raise NotImplementedError() + + # IDL: undefined executeBundles(sequence bundles); + def execute_bundles(self, bundles): + """ + TODO: not yet available in wgpu-native + """ + raise NotImplementedError() + + # IDL: undefined end(); + def end(self): + """Record the end of the render pass.""" + raise NotImplementedError() + + # IDL: undefined beginOcclusionQuery(GPUSize32 queryIndex); + def begin_occlusion_query(self, query_index): + """TODO""" + raise NotImplementedError() + + # IDL: undefined endOcclusionQuery(); + def end_occlusion_query(self): + """TODO""" + raise NotImplementedError() + + +class GPURenderBundle(GPUObjectBase): + """ + TODO: not yet wrapped. + """ + + +class GPURenderBundleEncoder( + GPUCommandsMixin, + GPUDebugCommandsMixin, + GPUBindingCommandsMixin, + GPURenderCommandsMixin, + GPUObjectBase, +): + """ + TODO: not yet wrapped + """ + + # IDL: GPURenderBundle finish(optional GPURenderBundleDescriptor descriptor = {}); + def finish(self, *, label=""): + """Finish recording and return a `GPURenderBundle`. + + Arguments: + label (str): A human readable label. Optional. + """ + raise NotImplementedError() + + +class GPUQueue(GPUObjectBase): + """Object to submit command buffers to. + + You can obtain a queue object via the :attr:`GPUDevice.queue` property. + """ + + # IDL: undefined submit(sequence commandBuffers); + def submit(self, command_buffers): + """Submit a `GPUCommandBuffer` to the queue. + + Arguments: + command_buffers (list): The `GPUCommandBuffer` objects to add. + """ + raise NotImplementedError() + + # IDL: undefined writeBuffer( GPUBuffer buffer, GPUSize64 bufferOffset, AllowSharedBufferSource data, optional GPUSize64 dataOffset = 0, optional GPUSize64 size); + def write_buffer(self, buffer, buffer_offset, data, data_offset=0, size=None): + """Takes the data contents and schedules a write operation of + these contents to the buffer. A snapshot of the data is taken; + any changes to the data after this function is called do not + affect the buffer contents. + + Arguments: + buffer: The `GPUBuffer` object to write to. + buffer_offset (int): The offset in the buffer to start writing at. + data: The data to write. Must be contiguous. + data_offset: The byte offset in the data. Default 0. + size: The number of bytes to write. Default all minus offset. + + This maps the data to a temporary buffer and then copies that buffer + to the given buffer. The given buffer's usage must include COPY_DST. + + Also see `GPUBuffer.map()`. + + """ + raise NotImplementedError() + + @apidiff.add("For symmetry with queue.write_buffer") + def read_buffer(self, buffer, buffer_offset=0, size=None): + """Takes the data contents of the buffer and return them as a memoryview. + + Arguments: + buffer: The `GPUBuffer` object to read from. + buffer_offset (int): The offset in the buffer to start reading from. + size: The number of bytes to read. Default all minus offset. + + This copies the data in the given buffer to a temporary buffer + and then maps that buffer to read the data. The given buffer's + usage must include COPY_SRC. + + Also see `GPUBuffer.map()`. + """ + raise NotImplementedError() + + # IDL: undefined writeTexture( GPUImageCopyTexture destination, AllowSharedBufferSource data, GPUImageDataLayout dataLayout, GPUExtent3D size); + def write_texture(self, destination, data, data_layout, size): + """Takes the data contents and schedules a write operation of + these contents to the destination texture in the queue. A + snapshot of the data is taken; any changes to the data after + this function is called do not affect the texture contents. + + Arguments: + destination: A dict with fields: "texture" (a texture object), + "origin" (a 3-tuple), "mip_level" (an int, default 0). + data: The data to write. + data_layout: A dict with fields: "offset" (an int, default 0), + "bytes_per_row" (an int), "rows_per_image" (an int, default 0). + size: A 3-tuple of ints specifying the size to write. + + Unlike `GPUCommandEncoder.copyBufferToTexture()`, there is + no alignment requirement on `bytes_per_row`. + """ + raise NotImplementedError() + + @apidiff.add("For symmetry, and to help work around the bytes_per_row constraint") + def read_texture(self, source, data_layout, size): + """Reads the contents of the texture and return them as a memoryview. + + Arguments: + source: A dict with fields: "texture" (a texture object), + "origin" (a 3-tuple), "mip_level" (an int, default 0). + data_layout: A dict with fields: "offset" (an int, default 0), + "bytes_per_row" (an int), "rows_per_image" (an int, default 0). + size: A 3-tuple of ints specifying the size to write. + + Unlike `GPUCommandEncoder.copyBufferToTexture()`, there is + no alignment requirement on `bytes_per_row`, although in the + current implementation there will be a performance penalty if + ``bytes_per_row`` is not a multiple of 256 (because we'll be + copying data row-by-row in Python). + """ + raise NotImplementedError() + + # IDL: Promise onSubmittedWorkDone(); + def on_submitted_work_done(self): + """TODO""" + raise NotImplementedError() + + # IDL: undefined copyExternalImageToTexture( GPUImageCopyExternalImage source, GPUImageCopyTextureTagged destination, GPUExtent3D copySize); + @apidiff.hide("Specific to browsers") + def copy_external_image_to_texture(self, source, destination, copy_size): + raise NotImplementedError() + + +# %% Further non-GPUObject classes + + +class GPUDeviceLostInfo: + """An object that contains information about the device being lost.""" + + # Not used at the moment, see device.lost prop + + def __init__(self, reason, message): + self._reason = reason + self._message = message + + # IDL: readonly attribute DOMString message; + @property + def message(self): + """The error message specifying the reason for the device being lost.""" + return self._message + + # IDL: readonly attribute GPUDeviceLostReason reason; + @property + def reason(self): + """The reason (enums.GPUDeviceLostReason) for the device getting lost. Can be None.""" + return self._reason + + +class GPUError(Exception): + """A generic GPU error.""" + + def __init__(self, message): + super().__init__(message) + + # IDL: readonly attribute DOMString message; + @property + def message(self): + """The error message.""" + return self.args[0] + + +class GPUOutOfMemoryError(GPUError, MemoryError): + """An error raised when the GPU is out of memory.""" + + # IDL: constructor(DOMString message); + def __init__(self, message): + super().__init__(message or "GPU is out of memory.") + + +class GPUValidationError(GPUError): + """An error raised when the pipeline could not be validated.""" + + # IDL: constructor(DOMString message); + def __init__(self, message): + super().__init__(message) + + +class GPUPipelineError(Exception): + """An error raised when a pipeline could not be created.""" + + # IDL: constructor(optional DOMString message = "", GPUPipelineErrorInit options); + def __init__(self, message="", options=None): + super().__init__(message or "") + self._options = options + + # IDL: readonly attribute GPUPipelineErrorReason reason; + @property + def reason(self): + """The reason for the failure.""" + return self.args[0] + + +class GPUInternalError(GPUError): + """An error raised for implementation-specific reasons. + + An operation failed for a system or implementation-specific + reason even when all validation requirements have been satisfied. + """ + + # IDL: constructor(DOMString message); + def __init__(self, message): + super().__init__(message) + + +# %% Not implemented + + +class GPUCompilationMessage: + """An object that contains information about a problem with shader compilation.""" + + # IDL: readonly attribute DOMString message; + @property + def message(self): + """The warning/error message.""" + raise NotImplementedError() + + # IDL: readonly attribute GPUCompilationMessageType type; + @property + def type(self): + """The type of warning/problem.""" + raise NotImplementedError() + + # IDL: readonly attribute unsigned long long lineNum; + @property + def line_num(self): + """The corresponding line number in the shader source.""" + raise NotImplementedError() + + # IDL: readonly attribute unsigned long long linePos; + @property + def line_pos(self): + """The position on the line in the shader source.""" + raise NotImplementedError() + + # IDL: readonly attribute unsigned long long offset; + @property + def offset(self): + """Offset of ...""" + raise NotImplementedError() + + # IDL: readonly attribute unsigned long long length; + @property + def length(self): + """The length of the line?""" + raise NotImplementedError() + + +class GPUCompilationInfo: + """TODO""" + + # IDL: readonly attribute FrozenArray messages; + @property + def messages(self): + """A list of `GPUCompilationMessage` objects.""" + raise NotImplementedError() + + +class GPUQuerySet(GPUObjectBase): + """An object to store the results of queries on passes. + + You can obtain a query set object via :attr:`GPUDevice.create_query_set`. + """ + + def __init__(self, label, internal, device, type, count): + super().__init__(label, internal, device) + self._type = type + self._count = count + + # IDL: readonly attribute GPUQueryType type; + @property + def type(self): + """The type of the queries managed by this queryset.""" + return self._type + + # IDL: readonly attribute GPUSize32Out count; + @property + def count(self): + """The number of the queries managed by this queryset.""" + return self._count + + # IDL: undefined destroy(); + def destroy(self): + """Destroy this QuerySet.""" + raise NotImplementedError() + + +# %%%%% Post processing + +# Note that some toplevel classes are already filtered out by the codegen, +# like GPUExternalTexture and GPUUncapturedErrorEvent, and more. + +apidiff.remove_hidden_methods(globals()) + + +def _seed_object_counts(): + m = globals() + for class_name in __all__: + cls = m[class_name] + if not class_name.endswith(("Base", "Mixin")): + if hasattr(cls, "_ot"): + object_tracker.counts[class_name] = 0 + + +def generic_repr(self): + try: + module_name = self.__module__ + if module_name.startswith("wgpu"): + if module_name == "wgpu._classes": + module_name = "wgpu" + elif "backends." in module_name: + backend_name = self.__module__.split("backends")[-1].split(".")[1] + module_name = f"wgpu.backends.{backend_name}" + object_str = "object" + if isinstance(self, GPUObjectBase): + object_str = f"object '{self.label}'" + return ( + f"<{module_name}.{self.__class__.__name__} {object_str} at {hex(id(self))}>" + ) + except Exception: # easy fallback + return object.__repr__(self) + + +def _set_repr_methods(): + m = globals() + for class_name in __all__: + cls = m[class_name] + if len(cls.mro()) == 2: # class itself and object + cls.__repr__ = generic_repr + + +_seed_object_counts() +_set_repr_methods() diff --git a/wgpu/_coreutils.py b/wgpu/_coreutils.py new file mode 100644 index 0000000..d126886 --- /dev/null +++ b/wgpu/_coreutils.py @@ -0,0 +1,157 @@ +""" +Core utilities that are loaded into the root namespace or used internally. +""" + +import re +import sys +import atexit +import logging +import importlib.resources +from contextlib import ExitStack + + +# Our resources are most probably always on the file system. But in +# case they don't we have a nice exit handler to remove temporary files. +_resource_files = ExitStack() +atexit.register(_resource_files.close) + + +def get_resource_filename(name): + """Get the filename to a wgpu resource.""" + if sys.version_info < (3, 9): + context = importlib.resources.path("wgpu.resources", name) + else: + ref = importlib.resources.files("wgpu.resources") / name + context = importlib.resources.as_file(ref) + path = _resource_files.enter_context(context) + return str(path) + + +class WGPULogger(logging.getLoggerClass()): + """A custom logger for which we can detect changes in its level.""" + + def setLevel(self, level): # noqa: N802 + super().setLevel(level) + for cb in logger_set_level_callbacks: + cb(self.level) # use arg that is always an int + + +logger_set_level_callbacks = [] +_original_logger_cls = logging.getLoggerClass() +logging.setLoggerClass(WGPULogger) +logger = logging.getLogger("wgpu") +logging.setLoggerClass(_original_logger_cls) +assert isinstance(logger, WGPULogger) +logger.setLevel(logging.WARNING) + + +_re_wgpu_ob = re.compile(r"`<[a-z|A-Z]+-\([0-9]+, [0-9]+, [a-z|A-Z]+\)>`") + + +def error_message_hash(message): + # Remove wgpu object representations, because they contain id's that may change at each draw. + # E.g. `` + message = _re_wgpu_ob.sub("WGPU_OBJECT", message) + return hash(message) + + +_flag_cache = {} # str -> int + + +def str_flag_to_int(flag, s): + """Allow using strings for flags, i.e. 'READ' instead of wgpu.MapMode.READ. + No worries about repeated overhead, because the resuls are cached. + """ + cache_key = ( + f"{flag._name}.{s}" # using private attribute, lets call this a friend func + ) + value = _flag_cache.get(cache_key, None) + + if value is None: + parts = [p.strip() for p in s.split("|")] + parts = [p for p in parts if p] + invalid_parts = [p for p in parts if p.startswith("_")] + if not parts or invalid_parts: + raise ValueError(f"Invalid flag value: {s}") + + value = 0 + for p in parts: + try: + v = flag.__dict__[p.upper()] + value += v + except KeyError: + raise ValueError(f"Invalid flag value for {flag}: '{p}'") + _flag_cache[cache_key] = value + + return value + + +class ApiDiff: + """Helper class to define differences in the API by annotating + methods. This way, these difference are made explicit, plus they're + logged so we can automatically included these changes in the docs. + """ + + def __init__(self): + self.hidden = {} + self.added = {} + self.changed = {} + + def hide(self, func_or_text): + """Decorator to discard certain methods from the "reference" API. + Intended only for the base API where we deviate from WebGPU. + """ + return self._diff("hidden", func_or_text) + + def add(self, func_or_text): + """Decorator to add certain methods that are not part of the "reference" spec. + Intended for the base API where we implement additional/alternative API, + and in the backend implementations where additional methods are provided. + """ + return self._diff("added", func_or_text) + + def change(self, func_or_text): + """Decorator to mark certain methods as having a different signature + as the "reference" spec. Intended only for the base API where we deviate + from WebGPU. + """ + return self._diff("changed", func_or_text) + + def _diff(self, method, func_or_text): + def wrapper(f): + d = getattr(self, method) + name = f.__qualname__ if hasattr(f, "__qualname__") else f.fget.__qualname__ + d[name] = text + return f + + if callable(func_or_text): + text = None + return wrapper(func_or_text) + else: + text = func_or_text + return wrapper + + def remove_hidden_methods(self, scope): + """Call this to remove methods from the API that were decorated as hidden.""" + for name in self.hidden: + classname, _, methodname = name.partition(".") + cls = scope[classname] + delattr(cls, methodname) + + @property + def __doc__(self): + """Generate a docstring for this instance. This way we can + automatically document API differences. + """ + lines = [""] + for name, msg in self.hidden.items(): + line = f" * Hides ``{name}()``" + lines.append(f"{line} - {msg}" if msg else line) + for name, msg in self.added.items(): + line = f" * Adds ``{name}()``" + lines.append(f"{line} - {msg}" if msg else line) + for name, msg in self.changed.items(): + line = f" * Changes ``{name}()``" + lines.append(f"{line} - {msg}" if msg else line) + lines.append("") + return "\n".join(sorted(lines)) diff --git a/wgpu/_diagnostics.py b/wgpu/_diagnostics.py new file mode 100644 index 0000000..e0e0c83 --- /dev/null +++ b/wgpu/_diagnostics.py @@ -0,0 +1,520 @@ +""" +Logic related to providing diagnostic info on wgpu. +""" + +import os +import sys +import platform + + +class DiagnosticsRoot: + """Root object to access wgpu diagnostics (i.e. ``wgpu.diagnostics``). + + Per-topic diagnostics can be accessed as attributes on this object. + These include ``system``, ``wgpu_native_info``, ``versions``, + ``object_counts``, ``wgpu_natrive_counts``. + """ + + def __init__(self): + self._diagnostics_instances = {} + + def __repr__(self): + topics = ", ".join(self._diagnostics_instances.keys()) + return f"" + + def _register_diagnostics(self, name, ob): + self._diagnostics_instances[name] = ob + setattr(self, name, ob) + + def get_dict(self): + """Get a dict that represents the full diagnostics info. + + The keys are the diagnostic topics, and the values are dicts + of dicts. See e.g. ``wgpu.diagnostics.counts.get_dict()`` for + a topic-specific dict. + """ + result = {} + for name, ob in self._diagnostics_instances.items(): + result[name] = ob.get_dict() + return result + + def get_report(self): + """Get the full textual diagnostic report (as a str).""" + text = "" + for name, ob in self._diagnostics_instances.items(): + text += ob.get_report() + return text + + def print_report(self): + """Convenience method to print the full diagnostics report.""" + print(self.get_report(), end="") + + +class Diagnostics: + """Object that represents diagnostics on a specific topic. + + This is a base class that must be subclassed to provide diagnostics + on a certain topic. Instantiating the class registers it with the + root diagnostics object. + """ + + def __init__(self, name): + diagnostics._register_diagnostics(name, self) + self.name = name + self.object_counts = {} + + def __repr__(self): + return f"" + + def get_dict(self): + """Get the diagnostics for this topic, in the form of a Python dict. + + Subclasses must implement this method. The dict can be a simple + map of keys to values (str, int, float):: + + foo: 1 + bar: 2 + + If the values are dicts, the data has a table-like layout, with + the keys representing the table header:: + + count mem + + Adapter: 1 264 + Buffer: 4 704 + + Subdicts are also supported, which results in multi-row entries. + In the report, the keys of the subdicts have colons behind them:: + + count mem backend o v e el_size + + Adapter: 1 264 vulkan: 1 0 0 264 + d3d12: 1 0 0 220 + Buffer: 4 704 vulkan: 4 0 0 176 + d3d12: 0 0 0 154 + + """ + raise NotImplementedError() + + def get_subscript(self): + """Get informative text that helps interpret the report. + + Subclasses can implement this method. The text will show below the table + in the report. + """ + return "" # Optional + + def get_report(self): + """Get the textual diagnostics report for this topic.""" + text = f"\n██ {self.name}:\n\n" + text += dict_to_text(self.get_dict()) + subscript = self.get_subscript() + if subscript: + text += "\n" + subscript.rstrip() + "\n" + return text + + def print_report(self): + """Print the diagnostics report for this topic.""" + print(self.get_report(), end="") + + +class ObjectTracker: + """Little object to help track object counts.""" + + def __init__(self): + self.counts = {} + self.amounts = {} + + def increase(self, name, amount=0): + """Bump the counter.""" + self.counts[name] = self.counts.get(name, 0) + 1 + if amount: + self.amounts[name] = self.amounts.get(name, 0) + amount + + def decrease(self, name, amount=0): + """Bump the counter back.""" + self.counts[name] -= 1 + if amount: + self.amounts[name] -= amount + + +def derive_header(dct): + """Derive a table-header from the given dict.""" + + if not isinstance(dct, dict): # no-cover + raise TypeError(f"Not a dict: {dct}") + + header = [] + sub_dicts = {} + + for key, val in dct.items(): + if not isinstance(val, dict): # no-cover + raise TypeError(f"Element not a dict: {val}") + for k, v in val.items(): + if k not in header: + header.append(k) + if isinstance(v, dict): + sub_dicts[k] = v + + for k, d in sub_dicts.items(): + while k in header: + header.remove(k) + header.append(k) + sub_header = derive_header(d) + for k in sub_header[1:]: + if k not in header: + header.append(k) + + # Add header item for first column, i.e. the key / row title + header.insert(0, "") + + return header + + +def dict_to_text(d, header=None): + """Convert a dict data structure to a textual table representation.""" + + if not d: + return "No data\n" + + # Copy the dict, with simple key-value dicts being transformed into table-like dicts. + # That wat the code in derive_header() and dict_to_table() can assume the table-like + # data structure, keeping it simpler. + d2 = {} + for key, val in d.items(): + if not isinstance(val, dict): + val = {"": val} + d2[key] = val + d = d2 + + if not header: + header = derive_header(d) + + # We have a table-like-layout if any of the values in the header is non-empty + table_layout = any(header) + + # Get the table + rows = dict_to_table(d, header) + ncols = len(header) + + # Sanity check (guard assumptions about dict_to_table) + for row in rows: + assert len(row) == ncols, "dict_to_table failed" + for i in range(ncols): + assert isinstance(row[i], str), "dict_to_table failed" + + # Insert heading + if table_layout: + rows.insert(0, header.copy()) + rows.insert(1, [""] * ncols) + + # Determine what colons have values with a colon at the end + column_has_colon = [False for _ in range(ncols)] + for row in rows: + for i in range(ncols): + column_has_colon[i] |= row[i].endswith(":") + + # Align the values that don't have a colon at the end + for row in rows: + for i in range(ncols): + word = row[i] + if column_has_colon[i] and not word.endswith(":"): + row[i] = word + " " + + # Establish max lengths + max_lens = [0 for _ in range(ncols)] + for row in rows: + for i in range(ncols): + max_lens[i] = max(max_lens[i], len(row[i])) + + # Justify first column (always rjust) + for row in rows: + row[0] = row[0].rjust(max_lens[0]) + + # For the table layour we also rjust the other columns + if table_layout: + for row in rows: + for i in range(1, ncols): + row[i] = row[i].rjust(max_lens[i]) + + # Join into a consistent text + lines = [" ".join(row).rstrip() for row in rows] + text = "\n".join(lines) + return text.rstrip() + "\n" + + +def dict_to_table(d, header, header_offest=0): + """Convert a dict data structure to a table (a list of lists of strings). + The keys form the first entry of the row. Values that are dicts recurse. + """ + + ncols = len(header) + rows = [] + + for row_title, values in d.items(): + if row_title == "total" and row_title == list(d.keys())[-1]: + rows.append([""] * ncols) + row = [row_title + ":" if row_title else ""] + rows.append(row) + for i in range(header_offest + 1, len(header)): + key = header[i] + val = values.get(key, None) + if val is None: + row.append("") + elif isinstance(val, str): + row.append(val) + elif isinstance(val, int): + row.append(int_repr(val)) + elif isinstance(val, float): + row.append(f"{val:.6g}") + elif isinstance(val, dict): + subrows = dict_to_table(val, header, i) + if len(subrows) == 0: + row += [""] * (ncols - i) + else: + row += subrows[0] + extrarows = [[""] * i + subrow for subrow in subrows[1:]] + rows.extend(extrarows) + break # header items are consumed by the sub + else: # no-cover + raise TypeError(f"Unexpected table value: {val}") + + return rows + + +def int_repr(val): + """Represent an integer using K and M suffixes.""" + prefix = "-" if val < 0 else "" + val = abs(val) + if val >= 1_000_000_000: # >= 1G + s = str(val / 1_000_000_000) + suffix = "G" + elif val >= 1_000_000: # >= 1M + s = str(val / 1_000_000) + suffix = "M" + elif val >= 1_000: # >= 1K + s = str(val / 1_000) + suffix = "K" + else: + s = str(val) + suffix = "" + if "." in s: + s1, _, s2 = s.partition(".") + n_decimals = max(0, 3 - len(s1)) + s = s1 + if n_decimals: + s2 += "000" + s = s1 + "." + s2[:n_decimals] + return prefix + s + suffix + + +# Map that we need to calculate texture resource consumption. +# We need to keep this up-to-date as formats change, we have a unit test for this. +# Also see https://wgpu.rs/doc/wgpu/enum.TextureFormat.html + +texture_format_to_bpp = { + # 8 bit + "r8unorm": 8, + "r8snorm": 8, + "r8uint": 8, + "r8sint": 8, + # 16 bit + "r16uint": 16, + "r16sint": 16, + "r16float": 16, + "rg8unorm": 16, + "rg8snorm": 16, + "rg8uint": 16, + "rg8sint": 16, + # 32 bit + "r32uint": 32, + "r32sint": 32, + "r32float": 32, + "rg16uint": 32, + "rg16sint": 32, + "rg16float": 32, + "rgba8unorm": 32, + "rgba8unorm-srgb": 32, + "rgba8snorm": 32, + "rgba8uint": 32, + "rgba8sint": 32, + "bgra8unorm": 32, + "bgra8unorm-srgb": 32, + # special fits + "rgb9e5ufloat": 32, # 3*9 + 5 + "rgb10a2uint": 32, # 3*10 + 2 + "rgb10a2unorm": 32, # 3*10 + 2 + "rg11b10ufloat": 32, # 2*11 + 10 + # 64 bit + "rg32uint": 64, + "rg32sint": 64, + "rg32float": 64, + "rgba16uint": 64, + "rgba16sint": 64, + "rgba16float": 64, + # 128 bit + "rgba32uint": 128, + "rgba32sint": 128, + "rgba32float": 128, + # depth and stencil + "stencil8": 8, + "depth16unorm": 16, + "depth24plus": 24, # "... at least 24 bit integer depth" ? + "depth24plus-stencil8": 32, + "depth32float": 32, + "depth32float-stencil8": 40, + # Compressed + "bc1-rgba-unorm": 4, # 4x4 blocks, 8 bytes per block + "bc1-rgba-unorm-srgb": 4, + "bc2-rgba-unorm": 8, # 4x4 blocks, 16 bytes per block + "bc2-rgba-unorm-srgb": 8, + "bc3-rgba-unorm": 8, # 4x4 blocks, 16 bytes per block + "bc3-rgba-unorm-srgb": 8, + "bc4-r-unorm": 4, + "bc4-r-snorm": 4, + "bc5-rg-unorm": 8, + "bc5-rg-snorm": 8, + "bc6h-rgb-ufloat": 8, + "bc6h-rgb-float": 8, + "bc7-rgba-unorm": 8, + "bc7-rgba-unorm-srgb": 8, + "etc2-rgb8unorm": 4, + "etc2-rgb8unorm-srgb": 4, + "etc2-rgb8a1unorm": 4, + "etc2-rgb8a1unorm-srgb": 4, + "etc2-rgba8unorm": 8, + "etc2-rgba8unorm-srgb": 8, + "eac-r11unorm": 4, + "eac-r11snorm": 4, + "eac-rg11unorm": 8, + "eac-rg11snorm": 8, + # astc always uses 16 bytes (128 bits) per block + "astc-4x4-unorm": 8.0, + "astc-4x4-unorm-srgb": 8.0, + "astc-5x4-unorm": 6.4, + "astc-5x4-unorm-srgb": 6.4, + "astc-5x5-unorm": 5.12, + "astc-5x5-unorm-srgb": 5.12, + "astc-6x5-unorm": 4.267, + "astc-6x5-unorm-srgb": 4.267, + "astc-6x6-unorm": 3.556, + "astc-6x6-unorm-srgb": 3.556, + "astc-8x5-unorm": 3.2, + "astc-8x5-unorm-srgb": 3.2, + "astc-8x6-unorm": 2.667, + "astc-8x6-unorm-srgb": 2.667, + "astc-8x8-unorm": 2.0, + "astc-8x8-unorm-srgb": 2.0, + "astc-10x5-unorm": 2.56, + "astc-10x5-unorm-srgb": 2.56, + "astc-10x6-unorm": 2.133, + "astc-10x6-unorm-srgb": 2.133, + "astc-10x8-unorm": 1.6, + "astc-10x8-unorm-srgb": 1.6, + "astc-10x10-unorm": 1.28, + "astc-10x10-unorm-srgb": 1.28, + "astc-12x10-unorm": 1.067, + "astc-12x10-unorm-srgb": 1.067, + "astc-12x12-unorm": 0.8889, + "astc-12x12-unorm-srgb": 0.8889, +} + + +# %% global diagnostics object, and builtin diagnostics + + +# The global root object +diagnostics = DiagnosticsRoot() + + +class SystemDiagnostics(Diagnostics): + """Provides basic system info.""" + + def get_dict(self): + return { + "platform": platform.platform(), + # "platform_version": platform.version(), # can be quite long + "python_implementation": platform.python_implementation(), + "python": platform.python_version(), + } + + +class WgpuNativeInfoDiagnostics(Diagnostics): + """Provides metadata about the wgpu-native backend.""" + + def get_dict(self): + # Get modules, or skip + try: + wgpu = sys.modules["wgpu"] + wgpu_native = wgpu.backends.wgpu_native + except (KeyError, AttributeError): # no-cover + return {} + + # Process lib path + lib_path = wgpu_native.lib_path + wgpu_path = os.path.dirname(wgpu.__file__) + if lib_path.startswith(wgpu_path): + lib_path = "." + os.path.sep + lib_path[len(wgpu_path) :].lstrip("/\\") + + return { + "expected_version": wgpu_native.__version__, + "lib_version": ".".join(str(i) for i in wgpu_native.lib_version_info), + "lib_path": lib_path, + } + + +class VersionDiagnostics(Diagnostics): + """Provides version numbers from relevant libraries.""" + + def get_dict(self): + core_libs = ["wgpu", "cffi"] + qt_libs = ["PySide6", "PyQt6", "PySide2", "PyQt5"] + gui_libs = qt_libs + ["glfw", "jupyter_rfb", "wx"] + extra_libs = ["numpy", "pygfx", "pylinalg", "fastplotlib"] + + info = {} + + for libname in core_libs + gui_libs + extra_libs: + try: + ver = sys.modules[libname].__version__ + except (KeyError, AttributeError): + pass + else: + info[libname] = str(ver) + + return info + + +class ObjectCountDiagnostics(Diagnostics): + """Provides object counts and resource consumption, used in _classes.py.""" + + def __init__(self, name): + super().__init__(name) + self.tracker = ObjectTracker() + + def get_dict(self): + """Get diagnostics as a dict.""" + object_counts = self.tracker.counts + resource_mem = self.tracker.amounts + + # Collect counts + result = {} + for name in sorted(object_counts.keys()): + d = {"count": object_counts[name]} + if name in resource_mem: + d["resource_mem"] = resource_mem[name] + result[name[3:]] = d # drop the 'GPU' from the name + + # Add totals + totals = {} + for key in ("count", "resource_mem"): + totals[key] = sum(v.get(key, 0) for v in result.values()) + result["total"] = totals + + return result + + +SystemDiagnostics("system") +VersionDiagnostics("versions") +WgpuNativeInfoDiagnostics("wgpu_native_info") +ObjectCountDiagnostics("object_counts") diff --git a/wgpu/backends/__init__.py b/wgpu/backends/__init__.py new file mode 100644 index 0000000..3e78dc0 --- /dev/null +++ b/wgpu/backends/__init__.py @@ -0,0 +1,37 @@ +""" +The backend implementations of the wgpu API. +""" + +import sys + +from ..classes import GPU as _base_GPU # noqa + + +def _register_backend(gpu): + """Backends call this to activate themselves. + It replaces ``wgpu.gpu`` with the ``gpu`` object from the backend. + """ + + root_namespace = sys.modules["wgpu"].__dict__ + needed_attributes = ( + "request_adapter", + "request_adapter_async", + "wgsl_language_features", + ) + + # Check + for attr in needed_attributes: + if not (hasattr(gpu, attr)): + raise RuntimeError( + "The registered WGPU backend object must have attributes " + + ", ".join(f"'{a}'" for a in needed_attributes) + + f". The '{attr}' is missing." + ) + + # Only allow registering a backend once + if not isinstance(root_namespace["gpu"], _base_GPU): + raise RuntimeError("WGPU backend can only be set once.") + + # Apply + root_namespace["gpu"] = gpu + return gpu diff --git a/wgpu/backends/auto.py b/wgpu/backends/auto.py new file mode 100644 index 0000000..f2c87bf --- /dev/null +++ b/wgpu/backends/auto.py @@ -0,0 +1,27 @@ +# The auto/default/only backend is wgpu-native, but this may change in the future. +import sys + + +def _load_backend(backend_name): + """Load a wgpu backend by name.""" + + if backend_name == "wgpu_native": + from . import wgpu_native as module # noqa: F401,F403 + elif backend_name == "js_webgpu": + from . import js_webgpu as module # noqa: F401,F403 + else: # no-cover + raise ImportError(f"Unknown wgpu backend: '{backend_name}'") + + return module.gpu + + +def _auto_load_backend(): + """Decide on the backend automatically.""" + + if sys.platform == "emscripten": + return _load_backend("js_webgpu") + else: + return _load_backend("wgpu_native") + + +gpu = _auto_load_backend() diff --git a/wgpu/backends/js_webgpu/__init__.py b/wgpu/backends/js_webgpu/__init__.py new file mode 100644 index 0000000..d19d6c2 --- /dev/null +++ b/wgpu/backends/js_webgpu/__init__.py @@ -0,0 +1,31 @@ +""" +WGPU backend implementation based on the JS WebGPU API. + +Since the exposed Python API is the same as the JS API, except that +descriptors are arguments, this API can probably be fully automatically +generated. +""" + +# NOTE: this is just a stub for now!! + +from .. import _register_backend + + +class GPU: + def request_adapter(self, **parameters): + raise NotImplementedError("Cannot use sync API functions in JS.") + + async def request_adapter_async(self, **parameters): + gpu = window.navigator.gpu # noqa + return await gpu.request_adapter(**parameters) + + def get_preferred_canvas_format(self): + raise NotImplementedError() + + @property + def wgsl_language_features(self): + return set() + + +gpu = GPU() +_register_backend(gpu) diff --git a/wgpu/backends/rs.py b/wgpu/backends/rs.py new file mode 100644 index 0000000..a2e4a18 --- /dev/null +++ b/wgpu/backends/rs.py @@ -0,0 +1,12 @@ +# Termporaty alias for backwards compatibility. + +from .wgpu_native import gpu # noqa + +_deprecation_msg = """ +WARNING: wgpu.backends.rs is deprecated. Instead you can use: +- import wgpu.backends.wgpu_native to use the backend by its new name. +- import wgpu.backends.auto to do the same, but simpler and more future proof. +- simply use wgpu.gpu.request_adapter() to auto-load the backend. +""".strip() + +print(_deprecation_msg) diff --git a/wgpu/backends/wgpu_native/__init__.py b/wgpu/backends/wgpu_native/__init__.py new file mode 100644 index 0000000..ce02d5f --- /dev/null +++ b/wgpu/backends/wgpu_native/__init__.py @@ -0,0 +1,21 @@ +""" +The wgpu-native backend. +""" + +from ._api import * # noqa: F401, F403 +from ._ffi import ffi, lib, lib_path, lib_version_info # noqa: F401 +from ._ffi import _check_expected_version +from .. import _register_backend + + +# The wgpu-native version that we target/expect +__version__ = "0.18.1.3" +__commit_sha__ = "8561b0d8c0b5af7dfb8631d6f924e5418c92f2ce" +version_info = tuple(map(int, __version__.split("."))) +_check_expected_version(version_info) # produces a warning on mismatch + +# Instantiate and register this backend +gpu = GPU() # noqa: F405 +_register_backend(gpu) # noqa: F405 + +from .extras import enumerate_adapters, request_device_tracing # noqa: F401, E402 diff --git a/wgpu/backends/wgpu_native/_api.py b/wgpu/backends/wgpu_native/_api.py new file mode 100644 index 0000000..808d33d --- /dev/null +++ b/wgpu/backends/wgpu_native/_api.py @@ -0,0 +1,2937 @@ +""" +WGPU backend implementation based on wgpu-native. + +The wgpu-native project (https://github.com/gfx-rs/wgpu-native) is a Rust +library based on wgpu-core, which wraps Metal, Vulkan, DX12, and more. +It compiles to a dynamic library exposing a C-API, accompanied by a C +header file. We wrap this using cffi, which uses the header file to do +most type conversions for us. + +This module is maintained using a combination of manual code and +automatically inserted code. In short, the codegen utility inserts +new methods and checks plus annotates all structs and C api calls. + +Read the codegen/readme.md for more information. +""" + + +import os +import ctypes +import logging +import ctypes.util +from weakref import WeakKeyDictionary +from typing import List, Dict, Union + +from ... import classes, flags, enums, structs +from ..._coreutils import str_flag_to_int + +from ._ffi import ffi, lib +from ._mappings import cstructfield2enum, enummap, enum_str2int, enum_int2str +from ._helpers import ( + get_wgpu_instance, + get_surface_id_from_canvas, + get_memoryview_from_address, + get_memoryview_and_address, + to_snake_case, + to_camel_case, + ErrorHandler, + SafeLibCalls, +) + + +logger = logging.getLogger("wgpu") # noqa + + +# The API is prettu well defined +__all__ = classes.__all__.copy() + + +# %% Helper functions and objects + + +# Features that wgpu-native supports that are not part of WebGPU +NATIVE_FEATURES = ( + "PushConstants", + "TextureAdapterSpecificFormatFeatures", + "MultiDrawIndirect", + "MultiDrawIndirectCount", + "VertexWritableStorage", +) + +# Object to be able to bind the lifetime of objects to other objects +_refs_per_struct = WeakKeyDictionary() + +# Some enum keys need a shortcut +_cstructfield2enum_alt = { + "load_op": "LoadOp", + "store_op": "StoreOp", + "depth_store_op": "StoreOp", + "stencil_store_op": "StoreOp", +} + + +def new_struct_p(ctype, **kwargs): + """Create a pointer to an ffi struct. Provides a flatter syntax + and converts our string enums to int enums needed in C. The passed + kwargs are also bound to the lifetime of the new struct. + """ + assert ctype.endswith(" *") + struct_p = _new_struct_p(ctype, **kwargs) + _refs_per_struct[struct_p] = kwargs + return struct_p + # Some kwargs may be other ffi objects, and some may represent + # pointers. These need special care because them "being in" the + # current struct does not prevent them from being cleaned up by + # Python's garbage collector. Keeping hold of these objects in the + # calling code is painful and prone to missing cases, so we solve + # the issue here. We cannot attach an attribute to the struct directly, + # so we use a global WeakKeyDictionary. Also see issue #52. + + +def new_struct(ctype, **kwargs): + """Create an ffi value struct. The passed kwargs are also bound + to the lifetime of the new struct. + """ + assert not ctype.endswith("*") + struct_p = _new_struct_p(ctype + " *", **kwargs) + struct = struct_p[0] + _refs_per_struct[struct] = kwargs + return struct + + +def _new_struct_p(ctype, **kwargs): + struct_p = ffi.new(ctype) + for key, val in kwargs.items(): + if isinstance(val, str) and isinstance(getattr(struct_p, key), int): + # An enum - these are ints in C, but str in our public API + if key in _cstructfield2enum_alt: + structname = _cstructfield2enum_alt[key] + else: + structname = cstructfield2enum[ctype.strip(" *")[4:] + "." + key] + ival = enummap[structname + "." + val] + setattr(struct_p, key, ival) + else: + setattr(struct_p, key, val) + return struct_p + + +def _tuple_from_tuple_or_dict(ob, fields): + """Given a tuple/list/dict, return a tuple. Also checks tuple size. + + >> # E.g. + >> _tuple_from_tuple_or_dict({"x": 1, "y": 2}, ("x", "y")) + (1, 2) + >> _tuple_from_tuple_or_dict([1, 2], ("x", "y")) + (1, 2) + """ + error_msg = "Expected tuple/key/dict with fields: {}" + if isinstance(ob, (list, tuple)): + if len(ob) != len(fields): + raise ValueError(error_msg.format(", ".join(fields))) + return tuple(ob) + elif isinstance(ob, dict): + try: + return tuple(ob[key] for key in fields) + except KeyError: + raise ValueError(error_msg.format(", ".join(fields))) + else: + raise TypeError(error_msg.format(", ".join(fields))) + + +_empty_label = ffi.new("char []", b"") + + +def to_c_label(label): + """Get the C representation of a label.""" + if not label: + return _empty_label + else: + return ffi.new("char []", label.encode()) + + +def feature_flag_to_feature_names(flag): + """Convert a feature flags into a tuple of names.""" + feature_names = {} # import this from mappings? + features = [] + for i in range(32): + val = int(2**i) + if flag & val: + features.append(feature_names.get(val, val)) + return tuple(sorted(features)) + + +def check_struct(struct_name, d): + """Check that all keys in the given dict exist in the corresponding struct.""" + valid_keys = set(getattr(structs, struct_name)) + invalid_keys = set(d.keys()).difference(valid_keys) + if invalid_keys: + raise ValueError(f"Invalid keys in {struct_name}: {invalid_keys}") + + +error_handler = ErrorHandler(logger) +libf = SafeLibCalls(lib, error_handler) + + +# %% The API + + +class GPU(classes.GPU): + def request_adapter( + self, *, power_preference=None, force_fallback_adapter=False, canvas=None + ): + """Create a `GPUAdapter`, the object that represents an abstract wgpu + implementation, from which one can request a `GPUDevice`. + + This is the implementation based on wgpu-native. + + Arguments: + power_preference (PowerPreference): "high-performance" or "low-power". + force_fallback_adapter (bool): whether to use a (probably CPU-based) + fallback adapter. + canvas (WgpuCanvasInterface): The canvas that the adapter should + be able to render to. This can typically be left to None. + """ + + # ----- Surface ID + + # Get surface id that the adapter must be compatible with. If we + # don't pass a valid surface id, there is no guarantee we'll be + # able to create a surface texture for it (from this adapter). + surface_id = ffi.NULL + if canvas is not None: + window_id = canvas.get_window_id() + if window_id: # e.g. could be an off-screen canvas + surface_id = canvas.get_context()._get_surface_id() + + # ----- Select backend + + # Try to read the WGPU_BACKEND_TYPE environment variable to see + # if a backend should be forced. + force_backend = os.getenv("WGPU_BACKEND_TYPE", None) + backend = enum_str2int["BackendType"]["Undefined"] + if force_backend: + try: + backend = enum_str2int["BackendType"][force_backend] + except KeyError: + logger.warning( + f"Invalid value for WGPU_BACKEND_TYPE: '{force_backend}'.\n" + f"Valid values are: {list(enum_str2int['BackendType'].keys())}" + ) + else: + logger.warning(f"Forcing backend: {force_backend} ({backend})") + + # ----- Request adapter + + # H: nextInChain: WGPUChainedStruct *, compatibleSurface: WGPUSurface, powerPreference: WGPUPowerPreference, backendType: WGPUBackendType, forceFallbackAdapter: WGPUBool/int + struct = new_struct_p( + "WGPURequestAdapterOptions *", + compatibleSurface=surface_id, + powerPreference=power_preference or "high-performance", + forceFallbackAdapter=bool(force_fallback_adapter), + backendType=backend, + # not used: nextInChain + ) + + adapter_id = None + error_msg = None + + @ffi.callback("void(WGPURequestAdapterStatus, WGPUAdapter, char *, void *)") + def callback(status, result, message, userdata): + if status != 0: + nonlocal error_msg + msg = "-" if message == ffi.NULL else ffi.string(message).decode() + error_msg = f"Request adapter failed ({status}): {msg}" + else: + nonlocal adapter_id + adapter_id = result + + # H: void f(WGPUInstance instance, WGPURequestAdapterOptions const * options, WGPURequestAdapterCallback callback, void * userdata) + libf.wgpuInstanceRequestAdapter(get_wgpu_instance(), struct, callback, ffi.NULL) + + # For now, Rust will call the callback immediately + # todo: when wgpu gets an event loop -> while run wgpu event loop or something + if adapter_id is None: # pragma: no cover + error_msg = error_msg or "Could not obtain new adapter id." + raise RuntimeError(error_msg) + + return self._create_adapter(adapter_id) + + def _create_adapter(self, adapter_id): + # ----- Get adapter info + + # H: nextInChain: WGPUChainedStructOut *, vendorID: int, vendorName: char *, architecture: char *, deviceID: int, name: char *, driverDescription: char *, adapterType: WGPUAdapterType, backendType: WGPUBackendType + c_properties = new_struct_p( + "WGPUAdapterProperties *", + # not used: nextInChain + # not used: deviceID + # not used: vendorID + # not used: name + # not used: driverDescription + # not used: adapterType + # not used: backendType + # not used: vendorName + # not used: architecture + ) + + # H: void f(WGPUAdapter adapter, WGPUAdapterProperties * properties) + libf.wgpuAdapterGetProperties(adapter_id, c_properties) + + def to_py_str(key): + char_p = getattr(c_properties, key) + if char_p: + return ffi.string(char_p).decode(errors="ignore") + return "" + + adapter_info = { + "vendor": to_py_str("vendorName"), + "architecture": to_py_str("architecture"), + "device": to_py_str("name"), + "description": to_py_str("driverDescription"), + "adapter_type": enum_int2str["AdapterType"].get( + c_properties.adapterType, "unknown" + ), + "backend_type": enum_int2str["BackendType"].get( + c_properties.backendType, "unknown" + ), + # "vendor_id": c_properties.vendorID, + # "device_id": c_properties.deviceID, + } + + # ----- Get adapter limits + + # H: nextInChain: WGPUChainedStructOut *, limits: WGPULimits + c_supported_limits = new_struct_p( + "WGPUSupportedLimits *", + # not used: nextInChain + # not used: limits + ) + c_limits = c_supported_limits.limits + # H: WGPUBool f(WGPUAdapter adapter, WGPUSupportedLimits * limits) + libf.wgpuAdapterGetLimits(adapter_id, c_supported_limits) + limits = {to_snake_case(k): getattr(c_limits, k) for k in sorted(dir(c_limits))} + + # ----- Get adapter features + + # WebGPU features + features = set() + for f in sorted(enums.FeatureName): + key = f"FeatureName.{f}" + i = enummap[key] + # H: WGPUBool f(WGPUAdapter adapter, WGPUFeatureName feature) + if libf.wgpuAdapterHasFeature(adapter_id, i): + features.add(f) + + # Native features + for f in NATIVE_FEATURES: + i = getattr(lib, f"WGPUNativeFeature_{f}") + # H: WGPUBool f(WGPUAdapter adapter, WGPUFeatureName feature) + if libf.wgpuAdapterHasFeature(adapter_id, i): + features.add(f) + + # ----- Done + + return GPUAdapter(adapter_id, features, limits, adapter_info) + + async def request_adapter_async( + self, *, power_preference=None, force_fallback_adapter=False, canvas=None + ): + """Async version of ``request_adapter()``. + This is the implementation based on wgpu-native. + """ + return self.request_adapter( + power_preference=power_preference, + force_fallback_adapter=force_fallback_adapter, + canvas=canvas, + ) # no-cover + + +# Instantiate API entrypoint +gpu = GPU() + + +class GPUCanvasContext(classes.GPUCanvasContext): + # The way this works, is that the context must first be configured. + # Then a texture can be obtained, which can be written to, and then it + # can be presented. The lifetime of the texture is between + # get_current_texture() and present(). We keep track of the texture so + # we can give meaningful errors/warnings on invalid use, rather than + # the more cryptic Rust panics. + + def __init__(self, canvas): + super().__init__(canvas) + self._device = None # set in configure() + self._surface_id = None + self._config = None + self._texture = None + + def _get_surface_id(self): + if self._surface_id is None: + # get_surface_id_from_canvas calls wgpuInstanceCreateSurface + self._surface_id = get_surface_id_from_canvas(self._get_canvas()) + return self._surface_id + + def configure( + self, + *, + device: "GPUDevice", + format: "enums.TextureFormat", + usage: "flags.TextureUsage" = 0x10, + view_formats: "List[enums.TextureFormat]" = [], + color_space: str = "srgb", + alpha_mode: "enums.CanvasAlphaMode" = "opaque", + ): + # Handle inputs + + # Store for later + self._device = device + # Handle usage + if isinstance(usage, str): + usage = str_flag_to_int(flags.TextureUsage, usage) + # View formats + c_view_formats = ffi.NULL + if view_formats: + view_formats_list = [enummap["TextureFormat." + x] for x in view_formats] + c_view_formats = ffi.new("WGPUTextureFormat []", view_formats_list) + # Lookup alpha mode, needs explicit conversion because enum names mismatch + c_alpha_mode = getattr(lib, f"WGPUCompositeAlphaMode_{alpha_mode.capitalize()}") + # The format is used as-is + if format is None: + format = self.get_preferred_format(device.adapter) + # The color_space is not used for now + color_space + + # Get what's supported + + # H: nextInChain: WGPUChainedStructOut *, formatCount: int, formats: WGPUTextureFormat *, presentModeCount: int, presentModes: WGPUPresentMode *, alphaModeCount: int, alphaModes: WGPUCompositeAlphaMode * + capabilities = new_struct_p( + "WGPUSurfaceCapabilities *", + # not used: formatCount + # not used: formats + # not used: presentModeCount + # not used: presentModes + # not used: alphaModeCount + # not used: alphaModes + # not used: nextInChain + ) + # H: void f(WGPUSurface surface, WGPUAdapter adapter, WGPUSurfaceCapabilities * capabilities) + libf.wgpuSurfaceGetCapabilities( + self._get_surface_id(), self._device.adapter._internal, capabilities + ) + + capable_formats = [] + for i in range(capabilities.formatCount): + int_val = capabilities.formats[i] + capable_formats.append(enum_int2str["TextureFormat"][int_val]) + + capable_present_modes = [] + for i in range(capabilities.presentModeCount): + int_val = capabilities.presentModes[i] + str_val = enum_int2str["PresentMode"][int_val] + capable_present_modes.append(str_val.lower()) + + capable_alpha_modes = [] + for i in range(capabilities.alphaModeCount): + int_val = capabilities.alphaModes[i] + str_val = enum_int2str["CompositeAlphaMode"][int_val] + capable_alpha_modes.append(str_val.lower()) + + # H: void f(WGPUSurfaceCapabilities capabilities) + libf.wgpuSurfaceCapabilitiesFreeMembers(capabilities[0]) + + # Check if input is supported + + if format not in capable_formats: + raise ValueError( + f"Given format '{format}' is not in supported formats {capable_formats}" + ) + if alpha_mode not in capable_alpha_modes: + raise ValueError( + f"Given format '{alpha_mode}' is not in supported formats {capable_alpha_modes}" + ) + + # Select the present mode to determine vsync behavior. + # * https://docs.rs/wgpu/latest/wgpu/enum.PresentMode.html + # * https://github.com/pygfx/wgpu-py/issues/256 + # + # Fifo: Wait for vsync, with a queue of ± 3 frames. + # FifoRelaxed: Like fifo but less lag and more tearing? aka adaptive vsync. + # Mailbox: submit without queue, but present on vsync. Not always available. + # Immediate: no queue, no waiting, with risk of tearing, vsync off. + # + # In general Fifo gives the best result, but sometimes people want to + # benchmark something and get the highest FPS possible. Note + # that we've observed rate limiting regardless of setting this + # to Immediate, depending on OS or being on battery power. + if getattr(self._get_canvas(), "_vsync", True): + present_mode_pref = ["fifo", "mailbox"] + else: + present_mode_pref = ["immediate", "mailbox", "fifo"] + present_modes = [p for p in present_mode_pref if p in capable_present_modes] + present_mode = (present_modes or capable_present_modes)[0] + c_present_mode = getattr(lib, f"WGPUPresentMode_{present_mode.capitalize()}") + + # Prepare config object + + # H: nextInChain: WGPUChainedStruct *, device: WGPUDevice, format: WGPUTextureFormat, usage: WGPUTextureUsageFlags/int, viewFormatCount: int, viewFormats: WGPUTextureFormat *, alphaMode: WGPUCompositeAlphaMode, width: int, height: int, presentMode: WGPUPresentMode + config = new_struct_p( + "WGPUSurfaceConfiguration *", + device=device._internal, + format=format, + usage=usage, + viewFormatCount=len(view_formats), + viewFormats=c_view_formats, + alphaMode=c_alpha_mode, + width=0, + height=0, + presentMode=c_present_mode, + # not used: nextInChain + ) + + # Configure + self._configure(config) + + def _configure(self, config): + # If a texture is still active, better destroy it first + self._destroy_texture() + # Set the size + width, height = self._get_canvas().get_physical_size() + config.width = width + config.height = height + if width <= 0 or height <= 0: + raise RuntimeError( + "Cannot configure canvas that has no pixels ({width}x{height})." + ) + # Configure, and store the config if we did not error out + # H: void f(WGPUSurface surface, WGPUSurfaceConfiguration const * config) + libf.wgpuSurfaceConfigure(self._get_surface_id(), config) + self._config = config + + def unconfigure(self): + self._destroy_texture() + self._config = None + # H: void f(WGPUSurface surface) + libf.wgpuSurfaceUnconfigure(self._get_surface_id()) + + def _destroy_texture(self): + if self._texture: + self._texture.destroy() + self._texture = None + + def get_current_texture(self): + # If the canvas has changed since the last configure, we need to re-configure it + if not self._config: + raise RuntimeError( + "Canvas context must be configured before calling get_current_texture()." + ) + + # When the texture is active right now, we could either: + # * return the existing texture + # * warn about it, and create a new one + # * raise an error + # Right now we do the warning, so things still (kinda) keep working + if self._texture: + self._destroy_texture() + logger.warning( + "get_current_texture() is called multiple times before pesent()." + ) + + # Reconfigure when the canvas has resized. + # On some systems (Windows+Qt) this is not necessary, because + # the texture status would be Outdated below, resulting in a + # reconfigure. But on others (e.g. glfwf) the texture size does + # not have to match the window size, apparently. The downside + # for doing this check on the former systems, is that errors + # get logged, which would not be there if we did not + # pre-emptively reconfigure. These log entries are harmless but + # anoying, and I currently don't know how to prevent them + # elegantly. See issue #352 + old_size = (self._config.width, self._config.height) + new_size = tuple(self._get_canvas().get_physical_size()) + if old_size != new_size: + self._configure(self._config) + + # Try to obtain a texture. + # `If it fails, depending on status, we reconfure and try again. + + # H: texture: WGPUTexture, suboptimal: WGPUBool/int, status: WGPUSurfaceGetCurrentTextureStatus + surface_texture = new_struct_p( + "WGPUSurfaceTexture *", + # not used: texture + # not used: suboptimal + # not used: status + ) + + for attempt in [1, 2]: + # H: void f(WGPUSurface surface, WGPUSurfaceTexture * surfaceTexture) + libf.wgpuSurfaceGetCurrentTexture(self._get_surface_id(), surface_texture) + status = surface_texture.status + texture_id = surface_texture.texture + if status == lib.WGPUSurfaceGetCurrentTextureStatus_Success: + break # success + if texture_id: + # H: void f(WGPUTexture texture) + libf.wgpuTextureRelease(texture_id) + if attempt == 1 and status in [ + lib.WGPUSurfaceGetCurrentTextureStatus_Timeout, + lib.WGPUSurfaceGetCurrentTextureStatus_Outdated, + lib.WGPUSurfaceGetCurrentTextureStatus_Lost, + ]: + # Configure and try again. + # On Window+Qt this happens e.g. when the window has resized + # (status==Outdated), but also when moving the window from one + # monitor to another with different scale-factor. + logger.info(f"Re-configuring canvas context ({status}).") + self._configure(self._config) + else: + # WGPUSurfaceGetCurrentTextureStatus_OutOfMemory + # WGPUSurfaceGetCurrentTextureStatus_DeviceLost + # Or if this is the second attempt. + raise RuntimeError(f"Cannot get surface texture ({status}).") + + # I don't expect this to happen, but lets check just in case. + if not texture_id: + raise RuntimeError("Cannot get surface texture (no texture)") + + # Things look good, but texture may still be suboptimal, whatever that means + if surface_texture.suboptimal: + logger.warning("The surface texture is suboptimal.") + + return self._create_python_texture(texture_id) + + def _create_python_texture(self, texture_id): + # Create the Python wrapper + + # We can derive texture props from the config and common sense: + # width = self._config.width + # height = self._config.height + # depth = 1 + # mip_level_count = 1 + # sample_count = 1 + # dimension = enums.TextureDimension.d2 + # format = enum_int2str["TextureFormat"][self._config.format] + # usage = self._config.usage + + # But we can also read them from the texture + # H: uint32_t f(WGPUTexture texture) + width = libf.wgpuTextureGetWidth(texture_id) + # H: uint32_t f(WGPUTexture texture) + height = libf.wgpuTextureGetHeight(texture_id) + # H: uint32_t f(WGPUTexture texture) + depth = libf.wgpuTextureGetDepthOrArrayLayers(texture_id) + # H: uint32_t f(WGPUTexture texture) + mip_level_count = libf.wgpuTextureGetMipLevelCount(texture_id) + # H: uint32_t f(WGPUTexture texture) + sample_count = libf.wgpuTextureGetSampleCount(texture_id) + # H: WGPUTextureDimension f(WGPUTexture texture) + c_dim = libf.wgpuTextureGetDimension(texture_id) # -> to string + dimension = enum_int2str["TextureDimension"][c_dim] + # H: WGPUTextureFormat f(WGPUTexture texture) + c_format = libf.wgpuTextureGetFormat(texture_id) + format = enum_int2str["TextureFormat"][c_format] + # H: WGPUTextureUsageFlags f(WGPUTexture texture) + usage = libf.wgpuTextureGetUsage(texture_id) + + label = "" + # Cannot yet set label, because it's not implemented in wgpu-native + # label = "surface-texture" + # H: void f(WGPUTexture texture, char const * label) + # libf.wgpuTextureSetLabel(texture_id, to_c_label(label)) + + tex_info = { + "size": (width, height, depth), + "mip_level_count": mip_level_count, + "sample_count": sample_count, + "dimension": dimension, + "format": format, + "usage": usage, + } + + self._texture = GPUTexture(label, texture_id, self._device, tex_info) + return self._texture + + def present(self): + if not self._texture: + msg = "present() is called without a preceeding call to " + msg += "get_current_texture(). Note that present() is usually " + msg += "called automatically after the draw function returns." + raise RuntimeError(msg) + else: + # Present the texture, then destroy it + # H: void f(WGPUSurface surface) + libf.wgpuSurfacePresent(self._get_surface_id()) + self._destroy_texture() + + def get_preferred_format(self, adapter): + # H: WGPUTextureFormat f(WGPUSurface surface, WGPUAdapter adapter) + format = libf.wgpuSurfaceGetPreferredFormat( + self._get_surface_id(), adapter._internal + ) + return enum_int2str["TextureFormat"][format] + + def _destroy(self): + self._destroy_texture() + if self._surface_id is not None and libf is not None: + self._surface_id, surface_id = None, self._surface_id + # H: void f(WGPUSurface surface) + libf.wgpuSurfaceRelease(surface_id) + + +class GPUObjectBase(classes.GPUObjectBase): + pass + + +class GPUAdapterInfo(classes.GPUAdapterInfo): + pass + + +class GPUAdapter(classes.GPUAdapter): + def request_device( + self, + *, + label="", + required_features: "List[enums.FeatureName]" = [], + required_limits: "Dict[str, int]" = {}, + default_queue: "structs.QueueDescriptor" = {}, + ): + if default_queue: + check_struct("QueueDescriptor", default_queue) + return self._request_device( + label, required_features, required_limits, default_queue, "" + ) + + def _request_device( + self, label, required_features, required_limits, default_queue, trace_path + ): + # ---- Handle features + + assert isinstance(required_features, (tuple, list, set)) + + c_features = set() + for f in required_features: + if isinstance(f, str): + if "_" in f: + f = "".join(x.title() for x in f.split("_")) + i1 = enummap.get(f"FeatureName.{f}", None) + i2 = getattr(lib, f"WGPUNativeFeature_{f}", None) + i = i2 if i1 is None else i1 + if i is None: # pragma: no cover + raise KeyError(f"Unknown feature: '{f}'") + c_features.add(i) + else: + raise TypeError("Features must be given as str.") + + c_features = sorted(c_features) # makes it a list + + # ----- Set limits + + # H: nextInChain: WGPUChainedStruct *, limits: WGPULimits + c_required_limits = new_struct_p( + "WGPURequiredLimits *", + # not used: nextInChain + # not used: limits + ) + c_limits = c_required_limits.limits + + # Set all limits to the adapter default + # This is important, because zero does NOT mean default, and a limit of zero + # for a specific limit may break a lot of applications. + for key, val in self.limits.items(): + setattr(c_limits, to_camel_case(key), val) + + # Overload with any set limits + required_limits = required_limits or {} + for key, val in required_limits.items(): + setattr(c_limits, to_camel_case(key), val) + + # ---- Set queue descriptor + + # Note that the default_queue arg is a descriptor (dict for QueueDescriptor), but is currently empty :) + # H: nextInChain: WGPUChainedStruct *, label: char * + queue_struct = new_struct( + "WGPUQueueDescriptor", + label=to_c_label("default_queue"), + # not used: nextInChain + ) + + # ----- Compose device descriptor extras + + c_trace_path = ffi.NULL + if trace_path: # no-cover + c_trace_path = ffi.new("char []", trace_path.encode()) + + # H: chain: WGPUChainedStruct, tracePath: char * + extras = new_struct_p( + "WGPUDeviceExtras *", + tracePath=c_trace_path, + # not used: chain + ) + extras.chain.sType = lib.WGPUSType_DeviceExtras + + # ----- Device lost + + @ffi.callback("void(WGPUDeviceLostReason, char *, void *)") + def device_lost_callback(c_reason, c_message, userdata): + reason = enum_int2str["DeviceLostReason"].get(c_reason, "Unknown") + message = ffi.string(c_message).decode(errors="ignore") + error_handler.log_error(f"The WGPU device was lost ({reason}):\n{message}") + + # Keep the ref alive + self._device_lost_callback = device_lost_callback + + # ----- Request device + + # H: nextInChain: WGPUChainedStruct *, label: char *, requiredFeatureCount: int, requiredFeatures: WGPUFeatureName *, requiredLimits: WGPURequiredLimits *, defaultQueue: WGPUQueueDescriptor, deviceLostCallback: WGPUDeviceLostCallback, deviceLostUserdata: void * + struct = new_struct_p( + "WGPUDeviceDescriptor *", + label=to_c_label(label), + nextInChain=ffi.cast("WGPUChainedStruct * ", extras), + requiredFeatureCount=len(c_features), + requiredFeatures=ffi.new("WGPUFeatureName []", c_features), + requiredLimits=c_required_limits, + defaultQueue=queue_struct, + deviceLostCallback=device_lost_callback, + # not used: deviceLostUserdata + ) + + device_id = None + error_msg = None + + @ffi.callback("void(WGPURequestDeviceStatus, WGPUDevice, char *, void *)") + def callback(status, result, message, userdata): + if status != 0: + nonlocal error_msg + msg = "-" if message == ffi.NULL else ffi.string(message).decode() + error_msg = f"Request device failed ({status}): {msg}" + else: + nonlocal device_id + device_id = result + + # H: void f(WGPUAdapter adapter, WGPUDeviceDescriptor const * descriptor, WGPURequestDeviceCallback callback, void * userdata) + libf.wgpuAdapterRequestDevice(self._internal, struct, callback, ffi.NULL) + + if device_id is None: # pragma: no cover + error_msg = error_msg or "Could not obtain new device id." + raise RuntimeError(error_msg) + + # ----- Get device limits + + # H: nextInChain: WGPUChainedStructOut *, limits: WGPULimits + c_supported_limits = new_struct_p( + "WGPUSupportedLimits *", + # not used: nextInChain + # not used: limits + ) + c_limits = c_supported_limits.limits + # H: WGPUBool f(WGPUDevice device, WGPUSupportedLimits * limits) + libf.wgpuDeviceGetLimits(device_id, c_supported_limits) + limits = {to_snake_case(k): getattr(c_limits, k) for k in dir(c_limits)} + + # ----- Get device features + + # WebGPU features + features = set() + for f in sorted(enums.FeatureName): + key = f"FeatureName.{f}" + i = enummap[key] + # H: WGPUBool f(WGPUDevice device, WGPUFeatureName feature) + if libf.wgpuDeviceHasFeature(device_id, i): + features.add(f) + + # Native features + for f in NATIVE_FEATURES: + i = getattr(lib, f"WGPUNativeFeature_{f}") + # H: WGPUBool f(WGPUDevice device, WGPUFeatureName feature) + if libf.wgpuDeviceHasFeature(device_id, i): + features.add(f) + + # ---- Get queue + + # H: WGPUQueue f(WGPUDevice device) + queue_id = libf.wgpuDeviceGetQueue(device_id) + queue = GPUQueue("", queue_id, None) + + # ----- Done + + return GPUDevice(label, device_id, self, features, limits, queue) + + async def request_device_async( + self, + *, + label="", + required_features: "List[enums.FeatureName]" = [], + required_limits: "Dict[str, int]" = {}, + default_queue: "structs.QueueDescriptor" = {}, + ): + if default_queue: + check_struct("QueueDescriptor", default_queue) + return self._request_device( + label, required_features, required_limits, default_queue, "" + ) # no-cover + + def _destroy(self): + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPUAdapter adapter) + libf.wgpuAdapterRelease(internal) + + +class GPUDevice(classes.GPUDevice, GPUObjectBase): + def __init__(self, label, internal, adapter, features, limits, queue): + super().__init__(label, internal, adapter, features, limits, queue) + + @ffi.callback("void(WGPUErrorType, char *, void *)") + def uncaptured_error_callback(c_type, c_message, userdata): + error_type = enum_int2str["ErrorType"].get(c_type, "Unknown") + message = ffi.string(c_message).decode(errors="ignore") + message = "\n".join(line.rstrip() for line in message.splitlines()) + error_handler.handle_error(error_type, message) + + # Keep the ref alive + self._uncaptured_error_callback = uncaptured_error_callback + + # H: void f(WGPUDevice device, WGPUErrorCallback callback, void * userdata) + libf.wgpuDeviceSetUncapturedErrorCallback( + self._internal, uncaptured_error_callback, ffi.NULL + ) + + def _poll(self): + # Internal function + if self._internal: + # H: WGPUBool f(WGPUDevice device, WGPUBool wait, WGPUWrappedSubmissionIndex const * wrappedSubmissionIndex) + libf.wgpuDevicePoll(self._internal, True, ffi.NULL) + + def create_buffer( + self, + *, + label="", + size: int, + usage: "flags.BufferUsage", + mapped_at_creation: bool = False, + ): + return self._create_buffer(label, int(size), usage, bool(mapped_at_creation)) + + def _create_buffer(self, label, size, usage, mapped_at_creation): + # Create a buffer object + if isinstance(usage, str): + usage = str_flag_to_int(flags.BufferUsage, usage) + # H: nextInChain: WGPUChainedStruct *, label: char *, usage: WGPUBufferUsageFlags/int, size: int, mappedAtCreation: WGPUBool/int + struct = new_struct_p( + "WGPUBufferDescriptor *", + label=to_c_label(label), + size=size, + usage=int(usage), + mappedAtCreation=mapped_at_creation, + # not used: nextInChain + ) + map_state = ( + enums.BufferMapState.mapped + if mapped_at_creation + else enums.BufferMapState.unmapped + ) + # H: WGPUBuffer f(WGPUDevice device, WGPUBufferDescriptor const * descriptor) + id = libf.wgpuDeviceCreateBuffer(self._internal, struct) + # Note that there is wgpuBufferGetSize and wgpuBufferGetUsage, + # but we already know these, so they are kindof useless? + # Return wrapped buffer + return GPUBuffer(label, id, self, size, usage, map_state) + + def create_texture( + self, + *, + label="", + size: "Union[List[int], structs.Extent3D]", + mip_level_count: int = 1, + sample_count: int = 1, + dimension: "enums.TextureDimension" = "2d", + format: "enums.TextureFormat", + usage: "flags.TextureUsage", + view_formats: "List[enums.TextureFormat]" = [], + ): + if isinstance(usage, str): + usage = str_flag_to_int(flags.TextureUsage, usage) + usage = int(usage) + size = _tuple_from_tuple_or_dict( + size, ("width", "height", "depth_or_array_layers") + ) + # H: width: int, height: int, depthOrArrayLayers: int + c_size = new_struct( + "WGPUExtent3D", + width=size[0], + height=size[1], + depthOrArrayLayers=size[2], + ) + + if view_formats: + raise NotImplementedError( + "create_texture(.. view_formats is not yet supported." + ) + + if not mip_level_count: + mip_level_count = 1 # or lib.WGPU_MIP_LEVEL_COUNT_UNDEFINED ? + mip_level_count = int(mip_level_count) + + if not sample_count: + sample_count = 1 + sample_count = int(sample_count) + + # H: nextInChain: WGPUChainedStruct *, label: char *, usage: WGPUTextureUsageFlags/int, dimension: WGPUTextureDimension, size: WGPUExtent3D, format: WGPUTextureFormat, mipLevelCount: int, sampleCount: int, viewFormatCount: int, viewFormats: WGPUTextureFormat * + struct = new_struct_p( + "WGPUTextureDescriptor *", + label=to_c_label(label), + size=c_size, + mipLevelCount=mip_level_count, + sampleCount=sample_count, + dimension=dimension, + format=format, + usage=usage, + # not used: nextInChain + # not used: viewFormatCount + # not used: viewFormats + ) + # H: WGPUTexture f(WGPUDevice device, WGPUTextureDescriptor const * descriptor) + id = libf.wgpuDeviceCreateTexture(self._internal, struct) + + # Note that there are methods (e.g. wgpuTextureGetHeight) to get + # the below props, but we know them now, so why bother? + tex_info = { + "size": size, + "mip_level_count": mip_level_count, + "sample_count": sample_count, + "dimension": dimension, + "format": format, + "usage": usage, + } + return GPUTexture(label, id, self, tex_info) + + def create_sampler( + self, + *, + label="", + address_mode_u: "enums.AddressMode" = "clamp-to-edge", + address_mode_v: "enums.AddressMode" = "clamp-to-edge", + address_mode_w: "enums.AddressMode" = "clamp-to-edge", + mag_filter: "enums.FilterMode" = "nearest", + min_filter: "enums.FilterMode" = "nearest", + mipmap_filter: "enums.MipmapFilterMode" = "nearest", + lod_min_clamp: float = 0, + lod_max_clamp: float = 32, + compare: "enums.CompareFunction" = None, + max_anisotropy: int = 1, + ): + # H: nextInChain: WGPUChainedStruct *, label: char *, addressModeU: WGPUAddressMode, addressModeV: WGPUAddressMode, addressModeW: WGPUAddressMode, magFilter: WGPUFilterMode, minFilter: WGPUFilterMode, mipmapFilter: WGPUMipmapFilterMode, lodMinClamp: float, lodMaxClamp: float, compare: WGPUCompareFunction, maxAnisotropy: int + struct = new_struct_p( + "WGPUSamplerDescriptor *", + label=to_c_label(label), + addressModeU=address_mode_u, + addressModeV=address_mode_v, + addressModeW=address_mode_w, + magFilter=mag_filter, + minFilter=min_filter, + mipmapFilter=mipmap_filter, + lodMinClamp=lod_min_clamp, + lodMaxClamp=lod_max_clamp, + compare=0 if compare is None else compare, + maxAnisotropy=max_anisotropy, + # not used: nextInChain + ) + + # H: WGPUSampler f(WGPUDevice device, WGPUSamplerDescriptor const * descriptor) + id = libf.wgpuDeviceCreateSampler(self._internal, struct) + return GPUSampler(label, id, self) + + def create_bind_group_layout( + self, *, label="", entries: "List[structs.BindGroupLayoutEntry]" + ): + c_entries_list = [] + for entry in entries: + check_struct("BindGroupLayoutEntry", entry) + buffer = {} + sampler = {} + texture = {} + storage_texture = {} + if entry.get("buffer"): + info = entry["buffer"] + check_struct("BufferBindingLayout", info) + min_binding_size = info.get("min_binding_size", None) + if min_binding_size is None: + min_binding_size = 0 # lib.WGPU_LIMIT_U64_UNDEFINED + # H: nextInChain: WGPUChainedStruct *, type: WGPUBufferBindingType, hasDynamicOffset: WGPUBool/int, minBindingSize: int + buffer = new_struct( + "WGPUBufferBindingLayout", + type=info["type"], + hasDynamicOffset=info.get("has_dynamic_offset", False), + minBindingSize=min_binding_size, + # not used: nextInChain + ) + elif entry.get("sampler"): + info = entry["sampler"] + check_struct("SamplerBindingLayout", info) + # H: nextInChain: WGPUChainedStruct *, type: WGPUSamplerBindingType + sampler = new_struct( + "WGPUSamplerBindingLayout", + type=info["type"], + # not used: nextInChain + ) + elif entry.get("texture"): + info = entry["texture"] + check_struct("TextureBindingLayout", info) + # H: nextInChain: WGPUChainedStruct *, sampleType: WGPUTextureSampleType, viewDimension: WGPUTextureViewDimension, multisampled: WGPUBool/int + texture = new_struct( + "WGPUTextureBindingLayout", + sampleType=info.get("sample_type", "float"), + viewDimension=info.get("view_dimension", "2d"), + multisampled=info.get("multisampled", False), + # not used: nextInChain + ) + elif entry.get("storage_texture"): + info = entry["storage_texture"] + check_struct("StorageTextureBindingLayout", info) + # H: nextInChain: WGPUChainedStruct *, access: WGPUStorageTextureAccess, format: WGPUTextureFormat, viewDimension: WGPUTextureViewDimension + storage_texture = new_struct( + "WGPUStorageTextureBindingLayout", + access=info["access"], + viewDimension=info.get("view_dimension", "2d"), + format=info["format"], + # not used: nextInChain + ) + else: + raise ValueError( + "Bind group layout entry did not contain field 'buffer', 'sampler', 'texture', nor 'storage_texture'" + ) + # Unreachable - fool the codegen + check_struct("ExternalTextureBindingLayout", info) + visibility = entry["visibility"] + if isinstance(visibility, str): + visibility = str_flag_to_int(flags.ShaderStage, visibility) + # H: nextInChain: WGPUChainedStruct *, binding: int, visibility: WGPUShaderStageFlags/int, buffer: WGPUBufferBindingLayout, sampler: WGPUSamplerBindingLayout, texture: WGPUTextureBindingLayout, storageTexture: WGPUStorageTextureBindingLayout + c_entry = new_struct( + "WGPUBindGroupLayoutEntry", + binding=int(entry["binding"]), + visibility=int(visibility), + buffer=buffer, + sampler=sampler, + texture=texture, + storageTexture=storage_texture, + # not used: nextInChain + ) + c_entries_list.append(c_entry) + + c_entries_array = ffi.NULL + if c_entries_list: + c_entries_array = ffi.new("WGPUBindGroupLayoutEntry []", c_entries_list) + + # H: nextInChain: WGPUChainedStruct *, label: char *, entryCount: int, entries: WGPUBindGroupLayoutEntry * + struct = new_struct_p( + "WGPUBindGroupLayoutDescriptor *", + label=to_c_label(label), + entries=c_entries_array, + entryCount=len(c_entries_list), + # not used: nextInChain + ) + + # Note: wgpu-core re-uses BindGroupLayouts with the same (or similar + # enough) descriptor. You would think that this means that the id is + # the same when you call wgpuDeviceCreateBindGroupLayout with the same + # input, but it's not. So we cannot let wgpu-native/core decide when + # to re-use a BindGroupLayout. I don't feel confident checking here + # whether a BindGroupLayout can be re-used, so we simply don't. Higher + # level code can sometimes make this decision because it knows the app + # logic. + + # H: WGPUBindGroupLayout f(WGPUDevice device, WGPUBindGroupLayoutDescriptor const * descriptor) + id = libf.wgpuDeviceCreateBindGroupLayout(self._internal, struct) + return GPUBindGroupLayout(label, id, self, entries) + + def create_bind_group( + self, + *, + label="", + layout: "GPUBindGroupLayout", + entries: "List[structs.BindGroupEntry]", + ): + c_entries_list = [] + for entry in entries: + check_struct("BindGroupEntry", entry) + # The resource can be a sampler, texture view, or buffer descriptor + resource = entry["resource"] + if isinstance(resource, GPUSampler): + # H: nextInChain: WGPUChainedStruct *, binding: int, buffer: WGPUBuffer, offset: int, size: int, sampler: WGPUSampler, textureView: WGPUTextureView + c_entry = new_struct( + "WGPUBindGroupEntry", + binding=int(entry["binding"]), + buffer=ffi.NULL, + offset=0, + size=0, + sampler=resource._internal, + textureView=ffi.NULL, + # not used: nextInChain + ) + elif isinstance(resource, GPUTextureView): + # H: nextInChain: WGPUChainedStruct *, binding: int, buffer: WGPUBuffer, offset: int, size: int, sampler: WGPUSampler, textureView: WGPUTextureView + c_entry = new_struct( + "WGPUBindGroupEntry", + binding=int(entry["binding"]), + buffer=ffi.NULL, + offset=0, + size=0, + sampler=ffi.NULL, + textureView=resource._internal, + # not used: nextInChain + ) + elif isinstance(resource, dict): # Buffer binding + # H: nextInChain: WGPUChainedStruct *, binding: int, buffer: WGPUBuffer, offset: int, size: int, sampler: WGPUSampler, textureView: WGPUTextureView + c_entry = new_struct( + "WGPUBindGroupEntry", + binding=int(entry["binding"]), + buffer=resource["buffer"]._internal, + offset=resource["offset"], + size=resource["size"], + sampler=ffi.NULL, + textureView=ffi.NULL, + # not used: nextInChain + ) + else: + raise TypeError(f"Unexpected resource type {type(resource)}") + c_entries_list.append(c_entry) + + c_entries_array = ffi.NULL + if c_entries_list: + c_entries_array = ffi.new("WGPUBindGroupEntry []", c_entries_list) + + # H: nextInChain: WGPUChainedStruct *, label: char *, layout: WGPUBindGroupLayout, entryCount: int, entries: WGPUBindGroupEntry * + struct = new_struct_p( + "WGPUBindGroupDescriptor *", + label=to_c_label(label), + layout=layout._internal, + entries=c_entries_array, + entryCount=len(c_entries_list), + # not used: nextInChain + ) + + # H: WGPUBindGroup f(WGPUDevice device, WGPUBindGroupDescriptor const * descriptor) + id = libf.wgpuDeviceCreateBindGroup(self._internal, struct) + return GPUBindGroup(label, id, self, entries) + + def create_pipeline_layout( + self, *, label="", bind_group_layouts: "List[GPUBindGroupLayout]" + ): + bind_group_layouts_ids = [x._internal for x in bind_group_layouts] + + c_layout_array = ffi.new("WGPUBindGroupLayout []", bind_group_layouts_ids) + # H: nextInChain: WGPUChainedStruct *, label: char *, bindGroupLayoutCount: int, bindGroupLayouts: WGPUBindGroupLayout * + struct = new_struct_p( + "WGPUPipelineLayoutDescriptor *", + label=to_c_label(label), + bindGroupLayouts=c_layout_array, + bindGroupLayoutCount=len(bind_group_layouts), + # not used: nextInChain + ) + + # H: WGPUPipelineLayout f(WGPUDevice device, WGPUPipelineLayoutDescriptor const * descriptor) + id = libf.wgpuDeviceCreatePipelineLayout(self._internal, struct) + return GPUPipelineLayout(label, id, self, bind_group_layouts) + + def create_shader_module( + self, + *, + label="", + code: str, + source_map: dict = None, + compilation_hints: "List[structs.ShaderModuleCompilationHint]" = [], + ): + if compilation_hints: + for hint in compilation_hints.values(): + check_struct("ShaderModuleCompilationHint", hint) + if isinstance(code, str): + looks_like_wgsl = any( + x in code for x in ("@compute", "@vertex", "@fragment") + ) + looks_like_glsl = code.lstrip().startswith("#version ") + if looks_like_glsl and not looks_like_wgsl: + # === GLSL + if "comp" in label.lower(): + c_stage = flags.ShaderStage.COMPUTE + elif "vert" in label.lower(): + c_stage = flags.ShaderStage.VERTEX + elif "frag" in label.lower(): + c_stage = flags.ShaderStage.FRAGMENT + else: + raise ValueError( + "GLSL shader needs to use the label to specify compute/vertex/fragment stage." + ) + defines = [] + if c_stage == flags.ShaderStage.VERTEX: + defines.append( + # H: name: char *, value: char * + new_struct( + "WGPUShaderDefine", + name=ffi.new("char []", "gl_VertexID".encode()), + value=ffi.new("char []", "gl_VertexIndex".encode()), + ) + ) + c_defines = ffi.new("WGPUShaderDefine []", defines) + # H: chain: WGPUChainedStruct, stage: WGPUShaderStage, code: char *, defineCount: int, defines: WGPUShaderDefine * + source_struct = new_struct_p( + "WGPUShaderModuleGLSLDescriptor *", + code=ffi.new("char []", code.encode()), + stage=c_stage, + defineCount=len(defines), + defines=c_defines, + # not used: chain + ) + source_struct[0].chain.next = ffi.NULL + source_struct[0].chain.sType = lib.WGPUSType_ShaderModuleGLSLDescriptor + else: + # === WGSL + # H: chain: WGPUChainedStruct, code: char * + source_struct = new_struct_p( + "WGPUShaderModuleWGSLDescriptor *", + code=ffi.new("char []", code.encode()), + # not used: chain + ) + source_struct[0].chain.next = ffi.NULL + source_struct[0].chain.sType = lib.WGPUSType_ShaderModuleWGSLDescriptor + elif isinstance(code, bytes): + # === Spirv + data = code + # Validate + magic_nr = b"\x03\x02#\x07" # 0x7230203 + if data[:4] != magic_nr: + raise ValueError("Given shader data does not look like a SpirV module") + # From bytes to WGPUU32Array + data_u8 = ffi.new("uint8_t[]", data) + data_u32 = ffi.cast("uint32_t *", data_u8) + # H: chain: WGPUChainedStruct, codeSize: int, code: uint32_t * + source_struct = new_struct_p( + "WGPUShaderModuleSPIRVDescriptor *", + code=data_u32, + codeSize=len(data) // 4, + # not used: chain + ) + source_struct[0].chain.next = ffi.NULL + source_struct[0].chain.sType = lib.WGPUSType_ShaderModuleSPIRVDescriptor + else: + raise TypeError( + "Shader code must be str for WGSL or GLSL, or bytes for SpirV." + ) + + # Note, we could give hints here that specify entrypoint and pipelinelayout before compiling + # H: nextInChain: WGPUChainedStruct *, label: char *, hintCount: int, hints: WGPUShaderModuleCompilationHint * + struct = new_struct_p( + "WGPUShaderModuleDescriptor *", + label=to_c_label(label), + nextInChain=ffi.cast("WGPUChainedStruct *", source_struct), + hintCount=0, + hints=ffi.NULL, + ) + # H: WGPUShaderModule f(WGPUDevice device, WGPUShaderModuleDescriptor const * descriptor) + id = libf.wgpuDeviceCreateShaderModule(self._internal, struct) + if id == ffi.NULL: + raise RuntimeError("Shader module creation failed") + return GPUShaderModule(label, id, self) + + def create_compute_pipeline( + self, + *, + label="", + layout: "Union[GPUPipelineLayout, enums.AutoLayoutMode]", + compute: "structs.ProgrammableStage", + ): + check_struct("ProgrammableStage", compute) + # H: nextInChain: WGPUChainedStruct *, module: WGPUShaderModule, entryPoint: char *, constantCount: int, constants: WGPUConstantEntry * + c_compute_stage = new_struct( + "WGPUProgrammableStageDescriptor", + module=compute["module"]._internal, + entryPoint=ffi.new("char []", compute["entry_point"].encode()), + # not used: nextInChain + # not used: constantCount + # not used: constants + ) + + if isinstance(layout, GPUPipelineLayout): + layout_id = layout._internal + elif layout == enums.AutoLayoutMode.auto: + layout_id = ffi.NULL + else: + raise TypeError( + "create_compute_pipeline() 'layout' arg must be a GPUPipelineLayout or 'auto'" + ) + + # H: nextInChain: WGPUChainedStruct *, label: char *, layout: WGPUPipelineLayout, compute: WGPUProgrammableStageDescriptor + struct = new_struct_p( + "WGPUComputePipelineDescriptor *", + label=to_c_label(label), + layout=layout_id, + compute=c_compute_stage, + # not used: nextInChain + ) + # H: WGPUComputePipeline f(WGPUDevice device, WGPUComputePipelineDescriptor const * descriptor) + id = libf.wgpuDeviceCreateComputePipeline(self._internal, struct) + return GPUComputePipeline(label, id, self) + + async def create_compute_pipeline_async( + self, + *, + label="", + layout: "Union[GPUPipelineLayout, enums.AutoLayoutMode]", + compute: "structs.ProgrammableStage", + ): + return self.create_compute_pipeline(label=label, layout=layout, compute=compute) + + def create_render_pipeline( + self, + *, + label="", + layout: "Union[GPUPipelineLayout, enums.AutoLayoutMode]", + vertex: "structs.VertexState", + primitive: "structs.PrimitiveState" = {}, + depth_stencil: "structs.DepthStencilState" = None, + multisample: "structs.MultisampleState" = {}, + fragment: "structs.FragmentState" = None, + ): + depth_stencil = depth_stencil or {} + multisample = multisample or {} + primitive = primitive or {} + + check_struct("VertexState", vertex) + check_struct("DepthStencilState", depth_stencil) + check_struct("MultisampleState", multisample) + check_struct("PrimitiveState", primitive) + + c_vertex_buffer_layout_list = [] + for buffer_des in vertex["buffers"]: + c_attributes_list = [] + for attribute in buffer_des["attributes"]: + # H: format: WGPUVertexFormat, offset: int, shaderLocation: int + c_attribute = new_struct( + "WGPUVertexAttribute", + format=attribute["format"], + offset=attribute["offset"], + shaderLocation=attribute["shader_location"], + ) + c_attributes_list.append(c_attribute) + c_attributes_array = ffi.new("WGPUVertexAttribute []", c_attributes_list) + # H: arrayStride: int, stepMode: WGPUVertexStepMode, attributeCount: int, attributes: WGPUVertexAttribute * + c_vertex_buffer_descriptor = new_struct( + "WGPUVertexBufferLayout", + arrayStride=buffer_des["array_stride"], + stepMode=buffer_des.get("step_mode", "vertex"), + attributes=c_attributes_array, + attributeCount=len(c_attributes_list), + ) + c_vertex_buffer_layout_list.append(c_vertex_buffer_descriptor) + c_vertex_buffer_descriptors_array = ffi.new( + "WGPUVertexBufferLayout []", c_vertex_buffer_layout_list + ) + # H: nextInChain: WGPUChainedStruct *, module: WGPUShaderModule, entryPoint: char *, constantCount: int, constants: WGPUConstantEntry *, bufferCount: int, buffers: WGPUVertexBufferLayout * + c_vertex_state = new_struct( + "WGPUVertexState", + module=vertex["module"]._internal, + entryPoint=ffi.new("char []", vertex["entry_point"].encode()), + buffers=c_vertex_buffer_descriptors_array, + bufferCount=len(c_vertex_buffer_layout_list), + # not used: nextInChain + # not used: constantCount + # not used: constants + ) + + # H: nextInChain: WGPUChainedStruct *, topology: WGPUPrimitiveTopology, stripIndexFormat: WGPUIndexFormat, frontFace: WGPUFrontFace, cullMode: WGPUCullMode + c_primitive_state = new_struct( + "WGPUPrimitiveState", + topology=primitive["topology"], + stripIndexFormat=primitive.get("strip_index_format", 0), + frontFace=primitive.get("front_face", "ccw"), + cullMode=primitive.get("cull_mode", "none"), + # not used: nextInChain + ) + + c_depth_stencil_state = ffi.NULL + if depth_stencil: + if depth_stencil.get("format", None) is None: + raise ValueError("depth_stencil needs format") + stencil_front = depth_stencil.get("stencil_front", {}) + check_struct("StencilFaceState", stencil_front) + # H: compare: WGPUCompareFunction, failOp: WGPUStencilOperation, depthFailOp: WGPUStencilOperation, passOp: WGPUStencilOperation + c_stencil_front = new_struct( + "WGPUStencilFaceState", + compare=stencil_front.get("compare", "always"), + failOp=stencil_front.get("fail_op", "keep"), + depthFailOp=stencil_front.get("depth_fail_op", "keep"), + passOp=stencil_front.get("pass_op", "keep"), + ) + stencil_back = depth_stencil.get("stencil_back", {}) + check_struct("StencilFaceState", stencil_back) + # H: compare: WGPUCompareFunction, failOp: WGPUStencilOperation, depthFailOp: WGPUStencilOperation, passOp: WGPUStencilOperation + c_stencil_back = new_struct( + "WGPUStencilFaceState", + compare=stencil_back.get("compare", "always"), + failOp=stencil_back.get("fail_op", "keep"), + depthFailOp=stencil_back.get("depth_fail_op", "keep"), + passOp=stencil_back.get("pass_op", "keep"), + ) + # H: nextInChain: WGPUChainedStruct *, format: WGPUTextureFormat, depthWriteEnabled: WGPUBool/int, depthCompare: WGPUCompareFunction, stencilFront: WGPUStencilFaceState, stencilBack: WGPUStencilFaceState, stencilReadMask: int, stencilWriteMask: int, depthBias: int, depthBiasSlopeScale: float, depthBiasClamp: float + c_depth_stencil_state = new_struct_p( + "WGPUDepthStencilState *", + format=depth_stencil["format"], + depthWriteEnabled=bool(depth_stencil.get("depth_write_enabled", False)), + depthCompare=depth_stencil.get("depth_compare", "always"), + stencilFront=c_stencil_front, + stencilBack=c_stencil_back, + stencilReadMask=depth_stencil.get("stencil_read_mask", 0xFFFFFFFF), + stencilWriteMask=depth_stencil.get("stencil_write_mask", 0xFFFFFFFF), + depthBias=depth_stencil.get("depth_bias", 0), + depthBiasSlopeScale=depth_stencil.get("depth_bias_slope_scale", 0), + depthBiasClamp=depth_stencil.get("depth_bias_clamp", 0), + # not used: nextInChain + ) + + # H: nextInChain: WGPUChainedStruct *, count: int, mask: int, alphaToCoverageEnabled: WGPUBool/int + c_multisample_state = new_struct( + "WGPUMultisampleState", + count=multisample.get("count", 1), + mask=multisample.get("mask", 0xFFFFFFFF), + alphaToCoverageEnabled=multisample.get("alpha_to_coverage_enabled", False), + # not used: nextInChain + ) + + c_fragment_state = ffi.NULL + if fragment is not None: + c_color_targets_list = [] + for target in fragment["targets"]: + if not target.get("blend", None): + c_blend = ffi.NULL + else: + alpha_blend = _tuple_from_tuple_or_dict( + target["blend"]["alpha"], + ("src_factor", "dst_factor", "operation"), + ) + # H: operation: WGPUBlendOperation, srcFactor: WGPUBlendFactor, dstFactor: WGPUBlendFactor + c_alpha_blend = new_struct( + "WGPUBlendComponent", + srcFactor=alpha_blend[0], + dstFactor=alpha_blend[1], + operation=alpha_blend[2], + ) + color_blend = _tuple_from_tuple_or_dict( + target["blend"]["color"], + ("src_factor", "dst_factor", "operation"), + ) + # H: operation: WGPUBlendOperation, srcFactor: WGPUBlendFactor, dstFactor: WGPUBlendFactor + c_color_blend = new_struct( + "WGPUBlendComponent", + srcFactor=color_blend[0], + dstFactor=color_blend[1], + operation=color_blend[2], + ) + # H: color: WGPUBlendComponent, alpha: WGPUBlendComponent + c_blend = new_struct_p( + "WGPUBlendState *", + color=c_color_blend, + alpha=c_alpha_blend, + ) + # H: nextInChain: WGPUChainedStruct *, format: WGPUTextureFormat, blend: WGPUBlendState *, writeMask: WGPUColorWriteMaskFlags/int + c_color_state = new_struct( + "WGPUColorTargetState", + format=target["format"], + blend=c_blend, + writeMask=target.get("write_mask", 0xF), + # not used: nextInChain + ) + c_color_targets_list.append(c_color_state) + c_color_targets_array = ffi.new( + "WGPUColorTargetState []", c_color_targets_list + ) + check_struct("FragmentState", fragment) + # H: nextInChain: WGPUChainedStruct *, module: WGPUShaderModule, entryPoint: char *, constantCount: int, constants: WGPUConstantEntry *, targetCount: int, targets: WGPUColorTargetState * + c_fragment_state = new_struct_p( + "WGPUFragmentState *", + module=fragment["module"]._internal, + entryPoint=ffi.new("char []", fragment["entry_point"].encode()), + targets=c_color_targets_array, + targetCount=len(c_color_targets_list), + # not used: nextInChain + # not used: constantCount + # not used: constants + ) + + # H: nextInChain: WGPUChainedStruct *, label: char *, layout: WGPUPipelineLayout, vertex: WGPUVertexState, primitive: WGPUPrimitiveState, depthStencil: WGPUDepthStencilState *, multisample: WGPUMultisampleState, fragment: WGPUFragmentState * + struct = new_struct_p( + "WGPURenderPipelineDescriptor *", + label=to_c_label(label), + layout=layout._internal, + vertex=c_vertex_state, + primitive=c_primitive_state, + depthStencil=c_depth_stencil_state, + multisample=c_multisample_state, + fragment=c_fragment_state, + # not used: nextInChain + ) + + # H: WGPURenderPipeline f(WGPUDevice device, WGPURenderPipelineDescriptor const * descriptor) + id = libf.wgpuDeviceCreateRenderPipeline(self._internal, struct) + return GPURenderPipeline(label, id, self) + + async def create_render_pipeline_async( + self, + *, + label="", + layout: "Union[GPUPipelineLayout, enums.AutoLayoutMode]", + vertex: "structs.VertexState", + primitive: "structs.PrimitiveState" = {}, + depth_stencil: "structs.DepthStencilState" = None, + multisample: "structs.MultisampleState" = {}, + fragment: "structs.FragmentState" = None, + ): + return self.create_render_pipeline( + label=label, + layout=layout, + vertex=vertex, + primitive=primitive, + depth_stencil=depth_stencil, + multisample=multisample, + fragment=fragment, + ) + + def create_command_encoder(self, *, label=""): + # H: nextInChain: WGPUChainedStruct *, label: char * + struct = new_struct_p( + "WGPUCommandEncoderDescriptor *", + label=to_c_label(label), + # not used: nextInChain + ) + + # H: WGPUCommandEncoder f(WGPUDevice device, WGPUCommandEncoderDescriptor const * descriptor) + id = libf.wgpuDeviceCreateCommandEncoder(self._internal, struct) + return GPUCommandEncoder(label, id, self) + + def create_render_bundle_encoder( + self, + *, + label="", + color_formats: "List[enums.TextureFormat]", + depth_stencil_format: "enums.TextureFormat" = None, + sample_count: int = 1, + depth_read_only: bool = False, + stencil_read_only: bool = False, + ): + raise NotImplementedError() + # Note: also enable the coresponing memtest when implementing this! + + def create_query_set(self, *, label="", type: "enums.QueryType", count: int): + # H: nextInChain: WGPUChainedStruct *, label: char *, type: WGPUQueryType, count: int + query_set_descriptor = new_struct_p( + "WGPUQuerySetDescriptor *", + label=to_c_label(label), + type=type, + count=count, + # not used: nextInChain + ) + + # H: WGPUQuerySet f(WGPUDevice device, WGPUQuerySetDescriptor const * descriptor) + query_id = libf.wgpuDeviceCreateQuerySet(self._internal, query_set_descriptor) + return GPUQuerySet(label, query_id, self._internal, type, count) + + def _destroy(self): + if self._queue is not None: + self._queue._destroy() + self._queue = None + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPUDevice device) + libf.wgpuDeviceRelease(internal) + # wgpuDeviceDestroy(internal) is also an option + + +class GPUBuffer(classes.GPUBuffer, GPUObjectBase): + def __init__(self, label, internal, device, size, usage, map_state): + super().__init__(label, internal, device, size, usage, map_state) + + self._mapped_status = 0, 0, 0 + self._mapped_memoryviews = [] + # If mapped at creation, set to write mode (no point in reading zeros) + if self._map_state == enums.BufferMapState.mapped: + self._mapped_status = 0, self.size, flags.MapMode.WRITE + + def _check_range(self, offset, size): + # Apply defaults + if offset is None: + offset = 0 + if self._mapped_status[2] != 0: + offset = self._mapped_status[0] + else: + offset = int(offset) + if size is None: + size = self.size - offset + if self._mapped_status[2] != 0: + size = self._mapped_status[1] - offset + else: + size = int(size) + # Checks + if offset < 0: + raise ValueError("Mapped offset must not be smaller than zero.") + if offset % 8: + raise ValueError("Mapped offset must be a multiple of 8.") + if size < 1: + raise ValueError("Mapped size must be larger than zero.") + if size % 4: + raise ValueError("Mapped offset must be a multiple of 4.") + if offset + size > self.size: + raise ValueError("Mapped range must not extend beyond total buffer size.") + return offset, size + + def map(self, mode, offset=0, size=None): + sync_on_read = True + + # Check mode + if isinstance(mode, str): + if mode == "READ_NOSYNC": # for internal use + sync_on_read = False + mode = "READ" + mode = str_flag_to_int(flags.MapMode, mode) + map_mode = int(mode) + + # Check offset and size + offset, size = self._check_range(offset, size) + + # Can we even map? + if self._map_state != enums.BufferMapState.unmapped: + raise RuntimeError("Can only map a buffer if its currently unmapped.") + + # Sync up when reading, otherwise the memory may be all zeros. + # See https://github.com/gfx-rs/wgpu-native/issues/305 + if sync_on_read and map_mode & lib.WGPUMapMode_Read: + if self._mapped_status[2] == 0 and self._usage & flags.BufferUsage.MAP_READ: + encoder = self._device.create_command_encoder() + self._device.queue.submit([encoder.finish()]) + + status = 999 + + @ffi.callback("void(WGPUBufferMapAsyncStatus, void*)") + def callback(status_, user_data_p): + nonlocal status + status = status_ + + # Map it + self._map_state = enums.BufferMapState.pending + # H: void f(WGPUBuffer buffer, WGPUMapModeFlags mode, size_t offset, size_t size, WGPUBufferMapCallback callback, void * userdata) + libf.wgpuBufferMapAsync( + self._internal, map_mode, offset, size, callback, ffi.NULL + ) + + # Let it do some cycles + self._device._poll() + + if status != 0: # no-cover + raise RuntimeError(f"Could not map buffer ({status}).") + self._map_state = enums.BufferMapState.mapped + self._mapped_status = offset, offset + size, mode + self._mapped_memoryviews = [] + + async def map_async(self, mode, offset=0, size=None): + return self.map(mode, offset, size) # for now + + def unmap(self): + if self._map_state != enums.BufferMapState.mapped: + raise RuntimeError("Can only unmap a buffer if its currently mapped.") + # H: void f(WGPUBuffer buffer) + libf.wgpuBufferUnmap(self._internal) + self._map_state = enums.BufferMapState.unmapped + self._mapped_status = 0, 0, 0 + self._release_memoryviews() + + def _release_memoryviews(self): + # Release the mapped memoryview objects. These objects + # themselves become unusable, but any views on them do not. + for m in self._mapped_memoryviews: + try: + m.release() + except Exception: # no-cover + pass + self._mapped_memoryviews = [] + + def read_mapped(self, buffer_offset=None, size=None, *, copy=True): + # Can we even read? + if self._map_state != enums.BufferMapState.mapped: + raise RuntimeError("Can only read from a buffer if its mapped.") + elif not (self._mapped_status[2] & flags.MapMode.READ): + raise RuntimeError( + "Can only read from a buffer if its mapped in read mode." + ) + + # Check offset and size + offset, size = self._check_range(buffer_offset, size) + if offset < self._mapped_status[0] or (offset + size) > self._mapped_status[1]: + raise ValueError( + "The range for buffer reading is not contained in the currently mapped range." + ) + + # Get mapped memoryview. + # H: void * f(WGPUBuffer buffer, size_t offset, size_t size) + src_ptr = libf.wgpuBufferGetMappedRange(self._internal, offset, size) + src_address = int(ffi.cast("intptr_t", src_ptr)) + src_m = get_memoryview_from_address(src_address, size) + + if copy: + # Copy the data. The memoryview created above becomes invalid when the buffer + # is unmapped, so we don't want to pass that memory to the user. + data = memoryview((ctypes.c_uint8 * size)()).cast("B") + data[:] = src_m + return data + else: + # Return view on the actually mapped data + data = src_m + if hasattr(data, "toreadonly"): # Py 3.8+ + data = data.toreadonly() + self._mapped_memoryviews.append(data) + return data + + def write_mapped(self, data, buffer_offset=None, size=None): + # Can we even write? + if self._map_state != enums.BufferMapState.mapped: + raise RuntimeError("Can only write to a buffer if its mapped.") + elif not (self._mapped_status[2] & flags.MapMode.WRITE): + raise RuntimeError( + "Can only write from a buffer if its mapped in write mode." + ) + + # Cast data to a memoryview. This also works for e.g. numpy arrays, + # and the resulting memoryview will be a view on the data. + data = memoryview(data).cast("B") + + # Check offset and size + if size is None: + size = data.nbytes + offset, size = self._check_range(buffer_offset, size) + if offset < self._mapped_status[0] or (offset + size) > self._mapped_status[1]: + raise ValueError( + "The range for buffer writing is not contained in the currently mapped range." + ) + + # Check data size and given size. If the latter was given, it should match! + if data.nbytes != size: # no-cover + raise ValueError( + "Data passed to GPUBuffer.write_mapped() does not match the given size." + ) + + # Get mapped memoryview + # H: void * f(WGPUBuffer buffer, size_t offset, size_t size) + src_ptr = libf.wgpuBufferGetMappedRange(self._internal, offset, size) + src_address = int(ffi.cast("intptr_t", src_ptr)) + src_m = get_memoryview_from_address(src_address, size) + + # Copy data + src_m[:] = data + + def destroy(self): + self._destroy() # no-cover + + def _destroy(self): + self._release_memoryviews() + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPUBuffer buffer) + libf.wgpuBufferRelease(internal) + + +class GPUTexture(classes.GPUTexture, GPUObjectBase): + def create_view( + self, + *, + label="", + format: "enums.TextureFormat" = None, + dimension: "enums.TextureViewDimension" = None, + aspect: "enums.TextureAspect" = "all", + base_mip_level: int = 0, + mip_level_count: int = None, + base_array_layer: int = 0, + array_layer_count: int = None, + ): + # Resolve defaults + if not format: + format = self._tex_info["format"] + if not dimension: + dimension = self._tex_info["dimension"] # from create_texture + if not aspect: + aspect = "all" + if not mip_level_count: + mip_level_count = self._tex_info["mip_level_count"] - base_mip_level + if not array_layer_count: + if dimension in ("1d", "2d", "3d"): + array_layer_count = 1 # or WGPU_ARRAY_LAYER_COUNT_UNDEFINED ? + elif dimension == "cube": + array_layer_count = 6 + elif dimension in ("2d-array", "cube-array"): + array_layer_count = self._tex_info["size"][2] - base_array_layer + + # H: nextInChain: WGPUChainedStruct *, label: char *, format: WGPUTextureFormat, dimension: WGPUTextureViewDimension, baseMipLevel: int, mipLevelCount: int, baseArrayLayer: int, arrayLayerCount: int, aspect: WGPUTextureAspect + struct = new_struct_p( + "WGPUTextureViewDescriptor *", + label=to_c_label(label), + format=format, + dimension=dimension, + aspect=aspect, + baseMipLevel=base_mip_level, + mipLevelCount=mip_level_count, + baseArrayLayer=base_array_layer, + arrayLayerCount=array_layer_count, + # not used: nextInChain + ) + # H: WGPUTextureView f(WGPUTexture texture, WGPUTextureViewDescriptor const * descriptor) + id = libf.wgpuTextureCreateView(self._internal, struct) + return GPUTextureView(label, id, self._device, self, self.size) + + def destroy(self): + self._destroy() # no-cover + + def _destroy(self): + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPUTexture texture) + libf.wgpuTextureRelease(internal) + + +class GPUTextureView(classes.GPUTextureView, GPUObjectBase): + def _destroy(self): + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPUTextureView textureView) + libf.wgpuTextureViewRelease(internal) + + +class GPUSampler(classes.GPUSampler, GPUObjectBase): + def _destroy(self): + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPUSampler sampler) + libf.wgpuSamplerRelease(internal) + + +class GPUBindGroupLayout(classes.GPUBindGroupLayout, GPUObjectBase): + def _destroy(self): + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPUBindGroupLayout bindGroupLayout) + libf.wgpuBindGroupLayoutRelease(internal) + + +class GPUBindGroup(classes.GPUBindGroup, GPUObjectBase): + def _destroy(self): + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPUBindGroup bindGroup) + libf.wgpuBindGroupRelease(internal) + + +class GPUPipelineLayout(classes.GPUPipelineLayout, GPUObjectBase): + def _destroy(self): + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPUPipelineLayout pipelineLayout) + libf.wgpuPipelineLayoutRelease(internal) + + +class GPUShaderModule(classes.GPUShaderModule, GPUObjectBase): + def get_compilation_info(self): + # Here's a little setup to implement this method. Unfortunately, + # this is not yet implemented in wgpu-native. Another problem + # is that if there is an error in the shader source, we raise + # an exception, so the user never gets a GPUShaderModule object + # that can be used to call this method :/ So perhaps we should + # do this stuff in device.create_shader_module() and attach it + # to the exception that we raise? + + # info = None + # + # @ffi.callback("void(WGPUCompilationInfoRequestStatus, WGPUCompilationInfo*, void*)") + # def callback(status_, info_, userdata): + # if status_ == 0: + # nonlocal info + # info = info_ + # else: + # pass + # + # H: void f(WGPUShaderModule shaderModule, WGPUCompilationInfoCallback callback, void * userdata) + # libf.wgpuShaderModuleGetCompilationInfo(self._internal, callback, ffi.NULL) + # + # self._device._poll() + # + # if info is None: + # raise RuntimeError("Could not obtain shader compilation info.") + # + # ... and then turn these WGPUCompilationInfoRequestStatus objects into Python objects ... + + return [] + + def _destroy(self): + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPUShaderModule shaderModule) + libf.wgpuShaderModuleRelease(internal) + + +class GPUPipelineBase(classes.GPUPipelineBase): + def get_bind_group_layout(self, index): + """Get the bind group layout at the given index.""" + if isinstance(self, GPUComputePipeline): + # H: WGPUBindGroupLayout f(WGPUComputePipeline computePipeline, uint32_t groupIndex) + layout_id = libf.wgpuComputePipelineGetBindGroupLayout( + self._internal, index + ) + else: + # H: WGPUBindGroupLayout f(WGPURenderPipeline renderPipeline, uint32_t groupIndex) + layout_id = libf.wgpuRenderPipelineGetBindGroupLayout(self._internal, index) + return GPUBindGroupLayout("", layout_id, self._device, []) + + +class GPUComputePipeline(classes.GPUComputePipeline, GPUPipelineBase, GPUObjectBase): + def _destroy(self): + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPUComputePipeline computePipeline) + libf.wgpuComputePipelineRelease(internal) + + +class GPURenderPipeline(classes.GPURenderPipeline, GPUPipelineBase, GPUObjectBase): + def _destroy(self): + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPURenderPipeline renderPipeline) + libf.wgpuRenderPipelineRelease(internal) + + +class GPUCommandBuffer(classes.GPUCommandBuffer, GPUObjectBase): + def _destroy(self): + # Since command buffers get destroyed when you submit them, we + # must only release them if they've not been submitted, or we get + # 'Cannot remove a vacant resource'. Got this info from the + # wgpu chat. Also see + # https://docs.rs/wgpu-core/latest/src/wgpu_core/device/mod.rs.html#4180-4194 + # --> That's why _internal is set to None in Queue.submit() + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPUCommandBuffer commandBuffer) + libf.wgpuCommandBufferRelease(internal) + + +class GPUCommandsMixin(classes.GPUCommandsMixin): + pass + + +class GPUBindingCommandsMixin(classes.GPUBindingCommandsMixin): + def set_bind_group( + self, + index, + bind_group, + dynamic_offsets_data, + dynamic_offsets_data_start, + dynamic_offsets_data_length, + ): + offsets = list(dynamic_offsets_data) + c_offsets = ffi.new("uint32_t []", offsets) + bind_group_id = bind_group._internal + if isinstance(self, GPUComputePassEncoder): + # H: void f(WGPUComputePassEncoder computePassEncoder, uint32_t groupIndex, WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets) + libf.wgpuComputePassEncoderSetBindGroup( + self._internal, index, bind_group_id, len(offsets), c_offsets + ) + else: + # H: void f(WGPURenderPassEncoder renderPassEncoder, uint32_t groupIndex, WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets) + libf.wgpuRenderPassEncoderSetBindGroup( + self._internal, + index, + bind_group_id, + len(offsets), + c_offsets, + ) + + +class GPUDebugCommandsMixin(classes.GPUDebugCommandsMixin): + def push_debug_group(self, group_label): + c_group_label = ffi.new("char []", group_label.encode()) + color = 0 + # todo: these functions are temporarily not available in wgpu-native + return # noqa + if isinstance(self, GPUComputePassEncoder): + # H: void f(WGPUComputePassEncoder computePassEncoder, char const * groupLabel) + libf.wgpuComputePassEncoderPushDebugGroup( + self._internal, c_group_label, color + ) + else: + # H: void f(WGPURenderPassEncoder renderPassEncoder, char const * groupLabel) + libf.wgpuRenderPassEncoderPushDebugGroup( + self._internal, c_group_label, color + ) + + def pop_debug_group(self): + # todo: these functions are temporarily not available in wgpu-native + return # noqa + if isinstance(self, GPUComputePassEncoder): + # H: void f(WGPUComputePassEncoder computePassEncoder) + libf.wgpuComputePassEncoderPopDebugGroup(self._internal) + else: + # H: void f(WGPURenderPassEncoder renderPassEncoder) + libf.wgpuRenderPassEncoderPopDebugGroup(self._internal) + + def insert_debug_marker(self, marker_label): + c_marker_label = ffi.new("char []", marker_label.encode()) + color = 0 + # todo: these functions are temporarily not available in wgpu-native + return # noqa + if isinstance(self, GPUComputePassEncoder): + # H: void f(WGPUComputePassEncoder computePassEncoder, char const * markerLabel) + libf.wgpuComputePassEncoderInsertDebugMarker( + self._internal, c_marker_label, color + ) + else: + # H: void f(WGPURenderPassEncoder renderPassEncoder, char const * markerLabel) + libf.wgpuRenderPassEncoderInsertDebugMarker( + self._internal, c_marker_label, color + ) + + +class GPURenderCommandsMixin(classes.GPURenderCommandsMixin): + def set_pipeline(self, pipeline): + pipeline_id = pipeline._internal + # H: void f(WGPURenderPassEncoder renderPassEncoder, WGPURenderPipeline pipeline) + libf.wgpuRenderPassEncoderSetPipeline(self._internal, pipeline_id) + + def set_index_buffer(self, buffer, index_format, offset=0, size=None): + if not size: + size = buffer.size - offset + c_index_format = enummap[f"IndexFormat.{index_format}"] + # H: void f(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer buffer, WGPUIndexFormat format, uint64_t offset, uint64_t size) + libf.wgpuRenderPassEncoderSetIndexBuffer( + self._internal, buffer._internal, c_index_format, int(offset), int(size) + ) + + def set_vertex_buffer(self, slot, buffer, offset=0, size=None): + if not size: + size = buffer.size - offset + # H: void f(WGPURenderPassEncoder renderPassEncoder, uint32_t slot, WGPUBuffer buffer, uint64_t offset, uint64_t size) + libf.wgpuRenderPassEncoderSetVertexBuffer( + self._internal, int(slot), buffer._internal, int(offset), int(size) + ) + + def draw(self, vertex_count, instance_count=1, first_vertex=0, first_instance=0): + # H: void f(WGPURenderPassEncoder renderPassEncoder, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) + libf.wgpuRenderPassEncoderDraw( + self._internal, vertex_count, instance_count, first_vertex, first_instance + ) + + def draw_indirect(self, indirect_buffer, indirect_offset): + buffer_id = indirect_buffer._internal + # H: void f(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) + libf.wgpuRenderPassEncoderDrawIndirect( + self._internal, buffer_id, int(indirect_offset) + ) + + def draw_indexed( + self, + index_count, + instance_count=1, + first_index=0, + base_vertex=0, + first_instance=0, + ): + # H: void f(WGPURenderPassEncoder renderPassEncoder, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance) + libf.wgpuRenderPassEncoderDrawIndexed( + self._internal, + index_count, + instance_count, + first_index, + base_vertex, + first_instance, + ) + + def draw_indexed_indirect(self, indirect_buffer, indirect_offset): + buffer_id = indirect_buffer._internal + # H: void f(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) + libf.wgpuRenderPassEncoderDrawIndexedIndirect( + self._internal, buffer_id, int(indirect_offset) + ) + + +class GPUCommandEncoder( + classes.GPUCommandEncoder, GPUCommandsMixin, GPUDebugCommandsMixin, GPUObjectBase +): + def begin_compute_pass( + self, *, label="", timestamp_writes: "structs.ComputePassTimestampWrites" = None + ): + timestamp_writes_struct = ffi.NULL + if timestamp_writes is not None: + check_struct("ComputePassTimestampWrites", timestamp_writes) + # H: querySet: WGPUQuerySet, beginningOfPassWriteIndex: int, endOfPassWriteIndex: int + timestamp_writes_struct = new_struct_p( + "WGPUComputePassTimestampWrites *", + querySet=timestamp_writes["query_set"]._internal, + beginningOfPassWriteIndex=timestamp_writes[ + "beginning_of_pass_write_index" + ], + endOfPassWriteIndex=timestamp_writes["end_of_pass_write_index"], + ) + # H: nextInChain: WGPUChainedStruct *, label: char *, timestampWrites: WGPUComputePassTimestampWrites * + struct = new_struct_p( + "WGPUComputePassDescriptor *", + label=to_c_label(label), + timestampWrites=timestamp_writes_struct + # not used: nextInChain + ) + # H: WGPUComputePassEncoder f(WGPUCommandEncoder commandEncoder, WGPUComputePassDescriptor const * descriptor) + raw_pass = libf.wgpuCommandEncoderBeginComputePass(self._internal, struct) + return GPUComputePassEncoder(label, raw_pass, self) + + def begin_render_pass( + self, + *, + label="", + color_attachments: "List[structs.RenderPassColorAttachment]", + depth_stencil_attachment: "structs.RenderPassDepthStencilAttachment" = None, + occlusion_query_set: "GPUQuerySet" = None, + timestamp_writes: "structs.RenderPassTimestampWrites" = None, + max_draw_count: int = 50000000, + ): + # Note that occlusion_query_set is ignored because wgpu-native does not have it. + if timestamp_writes is not None: + check_struct("RenderPassTimestampWrites", timestamp_writes) + + objects_to_keep_alive = {} + + c_color_attachments_list = [] + for color_attachment in color_attachments: + check_struct("RenderPassColorAttachment", color_attachment) + texture_view = color_attachment["view"] + if not isinstance(texture_view, GPUTextureView): + raise TypeError("Color attachement view must be a GPUTextureView.") + texture_view_id = texture_view._internal + objects_to_keep_alive[texture_view_id] = texture_view + c_resolve_target = ( + ffi.NULL + if color_attachment.get("resolve_target", None) is None + else color_attachment["resolve_target"]._internal + ) # this is a TextureViewId or null + clear_value = color_attachment.get("clear_value", (0, 0, 0, 0)) + if isinstance(clear_value, dict): + check_struct("Color", clear_value) + clear_value = _tuple_from_tuple_or_dict(clear_value, "rgba") + # H: r: float, g: float, b: float, a: float + c_clear_value = new_struct( + "WGPUColor", + r=clear_value[0], + g=clear_value[1], + b=clear_value[2], + a=clear_value[3], + ) + # H: nextInChain: WGPUChainedStruct *, view: WGPUTextureView, resolveTarget: WGPUTextureView, loadOp: WGPULoadOp, storeOp: WGPUStoreOp, clearValue: WGPUColor + c_attachment = new_struct( + "WGPURenderPassColorAttachment", + view=texture_view_id, + resolveTarget=c_resolve_target, + loadOp=color_attachment["load_op"], + storeOp=color_attachment["store_op"], + clearValue=c_clear_value, + # not used: resolveTarget + # not used: nextInChain + ) + c_color_attachments_list.append(c_attachment) + c_color_attachments_array = ffi.new( + "WGPURenderPassColorAttachment []", c_color_attachments_list + ) + + c_depth_stencil_attachment = ffi.NULL + if depth_stencil_attachment is not None: + check_struct("RenderPassDepthStencilAttachment", depth_stencil_attachment) + depth_clear_value = depth_stencil_attachment.get("depth_clear_value", 0) + stencil_clear_value = depth_stencil_attachment.get("stencil_clear_value", 0) + # H: view: WGPUTextureView, depthLoadOp: WGPULoadOp, depthStoreOp: WGPUStoreOp, depthClearValue: float, depthReadOnly: WGPUBool/int, stencilLoadOp: WGPULoadOp, stencilStoreOp: WGPUStoreOp, stencilClearValue: int, stencilReadOnly: WGPUBool/int + c_depth_stencil_attachment = new_struct_p( + "WGPURenderPassDepthStencilAttachment *", + view=depth_stencil_attachment["view"]._internal, + depthLoadOp=depth_stencil_attachment["depth_load_op"], + depthStoreOp=depth_stencil_attachment["depth_store_op"], + depthClearValue=float(depth_clear_value), + depthReadOnly=depth_stencil_attachment.get("depth_read_only", False), + stencilLoadOp=depth_stencil_attachment["stencil_load_op"], + stencilStoreOp=depth_stencil_attachment["stencil_store_op"], + stencilClearValue=int(stencil_clear_value), + stencilReadOnly=depth_stencil_attachment.get( + "stencil_read_only", False + ), + ) + + # H: nextInChain: WGPUChainedStruct *, label: char *, colorAttachmentCount: int, colorAttachments: WGPURenderPassColorAttachment *, depthStencilAttachment: WGPURenderPassDepthStencilAttachment *, occlusionQuerySet: WGPUQuerySet, timestampWrites: WGPURenderPassTimestampWrites * + struct = new_struct_p( + "WGPURenderPassDescriptor *", + label=to_c_label(label), + colorAttachments=c_color_attachments_array, + colorAttachmentCount=len(c_color_attachments_list), + depthStencilAttachment=c_depth_stencil_attachment, + # not used: occlusionQuerySet + # not used: timestampWrites + # not used: nextInChain + ) + + # H: WGPURenderPassEncoder f(WGPUCommandEncoder commandEncoder, WGPURenderPassDescriptor const * descriptor) + raw_pass = libf.wgpuCommandEncoderBeginRenderPass(self._internal, struct) + encoder = GPURenderPassEncoder(label, raw_pass, self) + encoder._objects_to_keep_alive = objects_to_keep_alive + return encoder + + def clear_buffer(self, buffer, offset=0, size=None): + offset = int(offset) + if offset % 4 != 0: # pragma: no cover + raise ValueError("offset must be a multiple of 4") + if size is None: # pragma: no cover + size = buffer.size - offset + size = int(size) + if size <= 0: # pragma: no cover + raise ValueError("clear_buffer size must be > 0") + if size % 4 != 0: # pragma: no cover + raise ValueError("size must be a multiple of 4") + if offset + size > buffer.size: # pragma: no cover + raise ValueError("buffer size out of range") + # H: void f(WGPUCommandEncoder commandEncoder, WGPUBuffer buffer, uint64_t offset, uint64_t size) + libf.wgpuCommandEncoderClearBuffer( + self._internal, buffer._internal, int(offset), size + ) + + def copy_buffer_to_buffer( + self, source, source_offset, destination, destination_offset, size + ): + if source_offset % 4 != 0: # pragma: no cover + raise ValueError("source_offset must be a multiple of 4") + if destination_offset % 4 != 0: # pragma: no cover + raise ValueError("destination_offset must be a multiple of 4") + if size % 4 != 0: # pragma: no cover + raise ValueError("size must be a multiple of 4") + + if not isinstance(source, GPUBuffer): # pragma: no cover + raise TypeError("copy_buffer_to_buffer() source must be a GPUBuffer.") + if not isinstance(destination, GPUBuffer): # pragma: no cover + raise TypeError("copy_buffer_to_buffer() destination must be a GPUBuffer.") + # H: void f(WGPUCommandEncoder commandEncoder, WGPUBuffer source, uint64_t sourceOffset, WGPUBuffer destination, uint64_t destinationOffset, uint64_t size) + libf.wgpuCommandEncoderCopyBufferToBuffer( + self._internal, + source._internal, + int(source_offset), + destination._internal, + int(destination_offset), + int(size), + ) + + def copy_buffer_to_texture(self, source, destination, copy_size): + row_alignment = 256 + bytes_per_row = int(source["bytes_per_row"]) + if (bytes_per_row % row_alignment) != 0: + raise ValueError( + f"bytes_per_row must ({bytes_per_row}) be a multiple of {row_alignment}" + ) + if isinstance(destination["texture"], GPUTextureView): + raise ValueError("copy destination texture must be a texture, not a view") + + size = _tuple_from_tuple_or_dict( + copy_size, ("width", "height", "depth_or_array_layers") + ) + + c_source = new_struct_p( + "WGPUImageCopyBuffer *", + buffer=source["buffer"]._internal, + # H: nextInChain: WGPUChainedStruct *, offset: int, bytesPerRow: int, rowsPerImage: int + layout=new_struct( + "WGPUTextureDataLayout", + offset=int(source.get("offset", 0)), + bytesPerRow=bytes_per_row, + rowsPerImage=int(source.get("rows_per_image", size[1])), + # not used: nextInChain + ), + ) + + ori = _tuple_from_tuple_or_dict(destination.get("origin", (0, 0, 0)), "xyz") + # H: x: int, y: int, z: int + c_origin = new_struct( + "WGPUOrigin3D", + x=ori[0], + y=ori[1], + z=ori[2], + ) + # H: nextInChain: WGPUChainedStruct *, texture: WGPUTexture, mipLevel: int, origin: WGPUOrigin3D, aspect: WGPUTextureAspect + c_destination = new_struct_p( + "WGPUImageCopyTexture *", + texture=destination["texture"]._internal, + mipLevel=int(destination.get("mip_level", 0)), + origin=c_origin, + aspect=enums.TextureAspect.all, + # not used: nextInChain + ) + + # H: width: int, height: int, depthOrArrayLayers: int + c_copy_size = new_struct_p( + "WGPUExtent3D *", + width=size[0], + height=size[1], + depthOrArrayLayers=size[2], + ) + + # H: void f(WGPUCommandEncoder commandEncoder, WGPUImageCopyBuffer const * source, WGPUImageCopyTexture const * destination, WGPUExtent3D const * copySize) + libf.wgpuCommandEncoderCopyBufferToTexture( + self._internal, + c_source, + c_destination, + c_copy_size, + ) + + def copy_texture_to_buffer(self, source, destination, copy_size): + row_alignment = 256 + bytes_per_row = int(destination["bytes_per_row"]) + if (bytes_per_row % row_alignment) != 0: + raise ValueError( + f"bytes_per_row must ({bytes_per_row}) be a multiple of {row_alignment}" + ) + if isinstance(source["texture"], GPUTextureView): + raise ValueError("copy source texture must be a texture, not a view") + + size = _tuple_from_tuple_or_dict( + copy_size, ("width", "height", "depth_or_array_layers") + ) + + ori = _tuple_from_tuple_or_dict(source.get("origin", (0, 0, 0)), "xyz") + # H: x: int, y: int, z: int + c_origin = new_struct( + "WGPUOrigin3D", + x=ori[0], + y=ori[1], + z=ori[2], + ) + # H: nextInChain: WGPUChainedStruct *, texture: WGPUTexture, mipLevel: int, origin: WGPUOrigin3D, aspect: WGPUTextureAspect + c_source = new_struct_p( + "WGPUImageCopyTexture *", + texture=source["texture"]._internal, + mipLevel=int(source.get("mip_level", 0)), + origin=c_origin, + aspect=0, + # not used: nextInChain + ) + + c_destination = new_struct_p( + "WGPUImageCopyBuffer *", + buffer=destination["buffer"]._internal, + # H: nextInChain: WGPUChainedStruct *, offset: int, bytesPerRow: int, rowsPerImage: int + layout=new_struct( + "WGPUTextureDataLayout", + offset=int(destination.get("offset", 0)), + bytesPerRow=bytes_per_row, + rowsPerImage=int(destination.get("rows_per_image", size[1])), + # not used: nextInChain + ), + ) + + # H: width: int, height: int, depthOrArrayLayers: int + c_copy_size = new_struct_p( + "WGPUExtent3D *", + width=size[0], + height=size[1], + depthOrArrayLayers=size[2], + ) + + # H: void f(WGPUCommandEncoder commandEncoder, WGPUImageCopyTexture const * source, WGPUImageCopyBuffer const * destination, WGPUExtent3D const * copySize) + libf.wgpuCommandEncoderCopyTextureToBuffer( + self._internal, + c_source, + c_destination, + c_copy_size, + ) + + def copy_texture_to_texture(self, source, destination, copy_size): + if isinstance(source["texture"], GPUTextureView): + raise ValueError("copy source texture must be a texture, not a view") + if isinstance(destination["texture"], GPUTextureView): + raise ValueError("copy destination texture must be a texture, not a view") + + ori = _tuple_from_tuple_or_dict(source.get("origin", (0, 0, 0)), "xyz") + # H: x: int, y: int, z: int + c_origin1 = new_struct( + "WGPUOrigin3D", + x=ori[0], + y=ori[1], + z=ori[2], + ) + # H: nextInChain: WGPUChainedStruct *, texture: WGPUTexture, mipLevel: int, origin: WGPUOrigin3D, aspect: WGPUTextureAspect + c_source = new_struct_p( + "WGPUImageCopyTexture *", + texture=source["texture"]._internal, + mipLevel=int(source.get("mip_level", 0)), + origin=c_origin1, + # not used: nextInChain + # not used: aspect + ) + + ori = _tuple_from_tuple_or_dict(destination.get("origin", (0, 0, 0)), "xyz") + # H: x: int, y: int, z: int + c_origin2 = new_struct( + "WGPUOrigin3D", + x=ori[0], + y=ori[1], + z=ori[2], + ) + # H: nextInChain: WGPUChainedStruct *, texture: WGPUTexture, mipLevel: int, origin: WGPUOrigin3D, aspect: WGPUTextureAspect + c_destination = new_struct_p( + "WGPUImageCopyTexture *", + texture=destination["texture"]._internal, + mipLevel=int(destination.get("mip_level", 0)), + origin=c_origin2, + # not used: nextInChain + # not used: aspect + ) + + size = _tuple_from_tuple_or_dict( + copy_size, ("width", "height", "depth_or_array_layers") + ) + # H: width: int, height: int, depthOrArrayLayers: int + c_copy_size = new_struct_p( + "WGPUExtent3D *", + width=size[0], + height=size[1], + depthOrArrayLayers=size[2], + ) + + # H: void f(WGPUCommandEncoder commandEncoder, WGPUImageCopyTexture const * source, WGPUImageCopyTexture const * destination, WGPUExtent3D const * copySize) + libf.wgpuCommandEncoderCopyTextureToTexture( + self._internal, + c_source, + c_destination, + c_copy_size, + ) + + def finish(self, *, label=""): + # H: nextInChain: WGPUChainedStruct *, label: char * + struct = new_struct_p( + "WGPUCommandBufferDescriptor *", + label=to_c_label(label), + # not used: nextInChain + ) + # H: WGPUCommandBuffer f(WGPUCommandEncoder commandEncoder, WGPUCommandBufferDescriptor const * descriptor) + id = libf.wgpuCommandEncoderFinish(self._internal, struct) + return GPUCommandBuffer(label, id, self) + + def resolve_query_set( + self, query_set, first_query, query_count, destination, destination_offset + ): + # H: void f(WGPUCommandEncoder commandEncoder, WGPUQuerySet querySet, uint32_t firstQuery, uint32_t queryCount, WGPUBuffer destination, uint64_t destinationOffset) + libf.wgpuCommandEncoderResolveQuerySet( + self._internal, + query_set._internal, + int(first_query), + int(query_count), + destination._internal, + int(destination_offset), + ) + + def _destroy(self): + # Note that the native object gets destroyed on finish. + # Also see GPUCommandBuffer._destroy() + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPUCommandEncoder commandEncoder) + libf.wgpuCommandEncoderRelease(internal) + + +class GPUComputePassEncoder( + classes.GPUComputePassEncoder, + GPUCommandsMixin, + GPUDebugCommandsMixin, + GPUBindingCommandsMixin, + GPUObjectBase, +): + """ """ + + def set_pipeline(self, pipeline): + pipeline_id = pipeline._internal + # H: void f(WGPUComputePassEncoder computePassEncoder, WGPUComputePipeline pipeline) + libf.wgpuComputePassEncoderSetPipeline(self._internal, pipeline_id) + + def dispatch_workgroups( + self, workgroup_count_x, workgroup_count_y=1, workgroup_count_z=1 + ): + # H: void f(WGPUComputePassEncoder computePassEncoder, uint32_t workgroupCountX, uint32_t workgroupCountY, uint32_t workgroupCountZ) + libf.wgpuComputePassEncoderDispatchWorkgroups( + self._internal, workgroup_count_x, workgroup_count_y, workgroup_count_z + ) + + def dispatch_workgroups_indirect(self, indirect_buffer, indirect_offset): + buffer_id = indirect_buffer._internal + # H: void f(WGPUComputePassEncoder computePassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) + libf.wgpuComputePassEncoderDispatchWorkgroupsIndirect( + self._internal, buffer_id, int(indirect_offset) + ) + + def end(self): + # H: void f(WGPUComputePassEncoder computePassEncoder) + libf.wgpuComputePassEncoderEnd(self._internal) + + def _destroy(self): + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPUComputePassEncoder computePassEncoder) + libf.wgpuComputePassEncoderRelease(internal) + + +class GPURenderPassEncoder( + classes.GPURenderPassEncoder, + GPUCommandsMixin, + GPUDebugCommandsMixin, + GPUBindingCommandsMixin, + GPURenderCommandsMixin, + GPUObjectBase, +): + def set_viewport(self, x, y, width, height, min_depth, max_depth): + # H: void f(WGPURenderPassEncoder renderPassEncoder, float x, float y, float width, float height, float minDepth, float maxDepth) + libf.wgpuRenderPassEncoderSetViewport( + self._internal, + float(x), + float(y), + float(width), + float(height), + float(min_depth), + float(max_depth), + ) + + def set_scissor_rect(self, x, y, width, height): + # H: void f(WGPURenderPassEncoder renderPassEncoder, uint32_t x, uint32_t y, uint32_t width, uint32_t height) + libf.wgpuRenderPassEncoderSetScissorRect( + self._internal, int(x), int(y), int(width), int(height) + ) + + def set_blend_constant(self, color): + color = _tuple_from_tuple_or_dict(color, "rgba") + # H: r: float, g: float, b: float, a: float + c_color = new_struct_p( + "WGPUColor *", + r=color[0], + g=color[1], + b=color[2], + a=color[3], + ) + # H: void f(WGPURenderPassEncoder renderPassEncoder, WGPUColor const * color) + libf.wgpuRenderPassEncoderSetBlendConstant(self._internal, c_color) + + def set_stencil_reference(self, reference): + # H: void f(WGPURenderPassEncoder renderPassEncoder, uint32_t reference) + libf.wgpuRenderPassEncoderSetStencilReference(self._internal, int(reference)) + + def end(self): + # H: void f(WGPURenderPassEncoder renderPassEncoder) + libf.wgpuRenderPassEncoderEnd(self._internal) + + def execute_bundles(self, bundles): + raise NotImplementedError() + + def begin_occlusion_query(self, query_index): + raise NotImplementedError() + + def end_occlusion_query(self): + raise NotImplementedError() + + def _destroy(self): + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPURenderPassEncoder renderPassEncoder) + libf.wgpuRenderPassEncoderRelease(internal) + + +class GPURenderBundleEncoder( + classes.GPURenderBundleEncoder, + GPUCommandsMixin, + GPUDebugCommandsMixin, + GPUBindingCommandsMixin, + GPURenderCommandsMixin, + GPUObjectBase, +): + def finish(self, *, label=""): + raise NotImplementedError() + + def _destroy(self): + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPURenderBundleEncoder renderBundleEncoder) + libf.wgpuRenderBundleEncoderRelease(internal) + + +class GPUQueue(classes.GPUQueue, GPUObjectBase): + def submit(self, command_buffers): + command_buffer_ids = [cb._internal for cb in command_buffers] + c_command_buffers = ffi.new("WGPUCommandBuffer []", command_buffer_ids) + # H: void f(WGPUQueue queue, size_t commandCount, WGPUCommandBuffer const * commands) + libf.wgpuQueueSubmit(self._internal, len(command_buffer_ids), c_command_buffers) + + def write_buffer(self, buffer, buffer_offset, data, data_offset=0, size=None): + # We support anything that memoryview supports, i.e. anything + # that implements the buffer protocol, including, bytes, + # bytearray, ctypes arrays, numpy arrays, etc. + m, address = get_memoryview_and_address(data) + nbytes = m.nbytes + + # Deal with offset and size + buffer_offset = int(buffer_offset) + data_offset = int(data_offset) + if not size: + data_length = nbytes - data_offset + else: + data_length = int(size) + + if not (0 <= buffer_offset < buffer.size): # pragma: no cover + raise ValueError("Invalid buffer_offset") + if not (0 <= data_offset < nbytes): # pragma: no cover + raise ValueError("Invalid data_offset") + if not (0 <= data_length <= (nbytes - data_offset)): # pragma: no cover + raise ValueError("Invalid data_length") + if not (data_length <= buffer.size - buffer_offset): # pragma: no cover + raise ValueError("Invalid data_length") + + # Make the call. Note that this call copies the data - it's ok + # if we lose our reference to the data once we leave this function. + c_data = ffi.cast("uint8_t *", address + data_offset) + # H: void f(WGPUQueue queue, WGPUBuffer buffer, uint64_t bufferOffset, void const * data, size_t size) + libf.wgpuQueueWriteBuffer( + self._internal, buffer._internal, buffer_offset, c_data, data_length + ) + + def read_buffer(self, buffer, buffer_offset=0, size=None): + # Note that write_buffer probably does a very similar thing + # using a temporary buffer. But write_buffer is official API + # so it's a single call, while here we must create the temporary + # buffer and do the copying ourselves. + + if not size: + data_length = buffer.size - buffer_offset + else: + data_length = int(size) + if not (0 <= buffer_offset < buffer.size): # pragma: no cover + raise ValueError("Invalid buffer_offset") + if not (data_length <= buffer.size - buffer_offset): # pragma: no cover + raise ValueError("Invalid data_length") + + device = buffer._device + + # Create temporary buffer + tmp_usage = flags.BufferUsage.COPY_DST | flags.BufferUsage.MAP_READ + tmp_buffer = device._create_buffer("", data_length, tmp_usage, False) + + # Copy data to temp buffer + encoder = device.create_command_encoder() + encoder.copy_buffer_to_buffer(buffer, buffer_offset, tmp_buffer, 0, data_length) + command_buffer = encoder.finish() + self.submit([command_buffer]) + + # Download from mappable buffer + tmp_buffer.map("READ_NOSYNC") + data = tmp_buffer.read_mapped() + tmp_buffer.destroy() + + return data + + def write_texture(self, destination, data, data_layout, size): + # Note that the bytes_per_row restriction does not apply for + # this function; wgpu-native deals with it. + + if isinstance(destination["texture"], GPUTextureView): + raise ValueError("copy destination texture must be a texture, not a view") + + m, address = get_memoryview_and_address(data) + + c_data = ffi.cast("uint8_t *", address) + data_length = m.nbytes + + # We could allow size=None in this method, and derive the size from the data. + # Or compare size with the data size if it is given. However, the data + # could be a bit raw, being 1D and/or the shape expressed in bytes, so + # this gets a bit muddy. Also methods like copy_buffer_to_texture have the + # same size arg, so let's just leave it like this. + # + # data_size = list(reversed(m.shape)) + [1, 1, 1] + # data_size = data_size[:3] + + size = _tuple_from_tuple_or_dict( + size, ("width", "height", "depth_or_array_layers") + ) + + ori = _tuple_from_tuple_or_dict(destination.get("origin", (0, 0, 0)), "xyz") + # H: x: int, y: int, z: int + c_origin = new_struct( + "WGPUOrigin3D", + x=ori[0], + y=ori[1], + z=ori[2], + ) + # H: nextInChain: WGPUChainedStruct *, texture: WGPUTexture, mipLevel: int, origin: WGPUOrigin3D, aspect: WGPUTextureAspect + c_destination = new_struct_p( + "WGPUImageCopyTexture *", + texture=destination["texture"]._internal, + mipLevel=destination.get("mip_level", 0), + origin=c_origin, + aspect=enums.TextureAspect.all, + # not used: nextInChain + ) + + # H: nextInChain: WGPUChainedStruct *, offset: int, bytesPerRow: int, rowsPerImage: int + c_data_layout = new_struct_p( + "WGPUTextureDataLayout *", + offset=data_layout.get("offset", 0), + bytesPerRow=data_layout["bytes_per_row"], + rowsPerImage=data_layout.get("rows_per_image", size[1]), + # not used: nextInChain + ) + + # H: width: int, height: int, depthOrArrayLayers: int + c_size = new_struct_p( + "WGPUExtent3D *", + width=size[0], + height=size[1], + depthOrArrayLayers=size[2], + ) + + # H: void f(WGPUQueue queue, WGPUImageCopyTexture const * destination, void const * data, size_t dataSize, WGPUTextureDataLayout const * dataLayout, WGPUExtent3D const * writeSize) + libf.wgpuQueueWriteTexture( + self._internal, c_destination, c_data, data_length, c_data_layout, c_size + ) + + def read_texture(self, source, data_layout, size): + # Note that the bytes_per_row restriction does not apply for + # this function; we have to deal with it. + + device = source["texture"]._device + + # Get and calculate striding info + ori_offset = data_layout.get("offset", 0) + ori_stride = data_layout["bytes_per_row"] + extra_stride = (256 - ori_stride % 256) % 256 + full_stride = ori_stride + extra_stride + + size = _tuple_from_tuple_or_dict( + size, ("width", "height", "depth_or_array_layers") + ) + + # Create temporary buffer + data_length = full_stride * size[1] * size[2] + tmp_usage = flags.BufferUsage.COPY_DST | flags.BufferUsage.MAP_READ + tmp_buffer = device._create_buffer("", data_length, tmp_usage, False) + + destination = { + "buffer": tmp_buffer, + "offset": 0, + "bytes_per_row": full_stride, # or WGPU_COPY_STRIDE_UNDEFINED ? + "rows_per_image": data_layout.get("rows_per_image", size[1]), + } + + # Copy data to temp buffer + encoder = device.create_command_encoder() + encoder.copy_texture_to_buffer(source, destination, size) + command_buffer = encoder.finish() + self.submit([command_buffer]) + + # Download from mappable buffer + tmp_buffer.map("READ_NOSYNC") + data = tmp_buffer.read_mapped() + tmp_buffer.destroy() + + # Fix data strides if necessary + # Ugh, cannot do striding with memoryviews (yet: https://bugs.python.org/issue41226) + # and Numpy is not a dependency. + if extra_stride or ori_offset: + data_length2 = ori_stride * size[1] * size[2] + ori_offset + data2 = memoryview((ctypes.c_uint8 * data_length2)()).cast(data.format) + for i in range(size[1] * size[2]): + row = data[i * full_stride : i * full_stride + ori_stride] + data2[ + ori_offset + + i * ori_stride : ori_offset + + i * ori_stride + + ori_stride + ] = row + data = data2 + + return data + + def on_submitted_work_done(self): + raise NotImplementedError() + + def _destroy(self): + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPUQueue queue) + libf.wgpuQueueRelease(internal) + + +class GPURenderBundle(classes.GPURenderBundle, GPUObjectBase): + def _destroy(self): + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPURenderBundle renderBundle) + libf.wgpuRenderBundleRelease(internal) + + +class GPUQuerySet(classes.GPUQuerySet, GPUObjectBase): + def _destroy(self): + if self._internal is not None and libf is not None: + self._internal, internal = None, self._internal + # H: void f(WGPUQuerySet querySet) + libf.wgpuQuerySetRelease(internal) + + def destroy(self): + self._destroy() + + +# %% Subclasses that don't need anything else + + +class GPUCompilationMessage(classes.GPUCompilationMessage): + pass + + +class GPUCompilationInfo(classes.GPUCompilationInfo): + pass + + +class GPUDeviceLostInfo(classes.GPUDeviceLostInfo): + pass + + +class GPUError(classes.GPUError): + pass + + +class GPUOutOfMemoryError(classes.GPUOutOfMemoryError, GPUError): + pass + + +class GPUValidationError(classes.GPUValidationError, GPUError): + pass + + +class GPUPipelineError(classes.GPUPipelineError): + pass + + +class GPUInternalError(classes.GPUInternalError, GPUError): + pass + + +# %% + + +def _copy_docstrings(): + base_classes = GPUObjectBase, GPUCanvasContext, GPUAdapter + for ob in globals().values(): + if not (isinstance(ob, type) and issubclass(ob, base_classes)): + continue + elif ob.__module__ != __name__: + continue # no-cover + base_cls = ob.mro()[1] + ob.__doc__ = base_cls.__doc__ + for name, attr in ob.__dict__.items(): + if name.startswith("_") or not hasattr(attr, "__doc__"): + continue # no-cover + base_attr = getattr(base_cls, name, None) + if base_attr is not None: + attr.__doc__ = base_attr.__doc__ + + +_copy_docstrings() diff --git a/wgpu/backends/wgpu_native/_ffi.py b/wgpu/backends/wgpu_native/_ffi.py new file mode 100644 index 0000000..641dd5a --- /dev/null +++ b/wgpu/backends/wgpu_native/_ffi.py @@ -0,0 +1,205 @@ +"""Loading the header, the lib, and setting up its logging. +""" + +import os +import sys +import logging + +from ..._coreutils import get_resource_filename, logger_set_level_callbacks + +from cffi import FFI, __version_info__ as cffi_version_info + + +logger = logging.getLogger("wgpu") # noqa + + +if cffi_version_info < (1, 10): # no-cover + raise ImportError(f"{__name__} needs cffi 1.10 or later.") + + +def get_wgpu_header(): + """Read header file and strip some stuff that cffi would stumble on.""" + return _get_wgpu_header( + get_resource_filename("webgpu.h"), + get_resource_filename("wgpu.h"), + ) + + +def _get_wgpu_header(*filenames): + """Func written so we can use this in both wgpu_native/_ffi.py and codegen/hparser.py""" + # Read files + lines1 = [] + for filename in filenames: + with open(filename) as f: + lines1.extend(f.readlines()) + # Deal with pre-processor commands, because cffi cannot handle them. + # Just removing them, plus a few extra lines, seems to do the trick. + lines2 = [] + for line in lines1: + if line.startswith("#define ") and len(line.split()) > 2 and "0x" in line: + line = line.replace("(", "").replace(")", "") + elif line.startswith("#"): + continue + elif 'extern "C"' in line: + continue + for define_to_drop in [ + "WGPU_EXPORT ", + "WGPU_NULLABLE ", + " WGPU_OBJECT_ATTRIBUTE", + " WGPU_ENUM_ATTRIBUTE", + " WGPU_FUNCTION_ATTRIBUTE", + " WGPU_STRUCTURE_ATTRIBUTE", + ]: + line = line.replace(define_to_drop, "") + lines2.append(line) + return "\n".join(lines2) + + +def get_wgpu_lib_path(): + """Get the path to the wgpu library, taking into account the + WGPU_LIB_PATH environment variable. + """ + + # If path is given, use that or fail trying + override_path = os.getenv("WGPU_LIB_PATH", "").strip() + if override_path: + return override_path + + # Load the debug binary if requested + debug_mode = os.getenv("WGPU_DEBUG", "").strip() == "1" + build = "debug" if debug_mode else "release" + + # Get lib filename for supported platforms + if sys.platform.startswith("win"): # no-cover + lib_filename = f"wgpu_native-{build}.dll" + elif sys.platform.startswith("darwin"): # no-cover + lib_filename = f"libwgpu_native-{build}.dylib" + elif sys.platform.startswith("linux"): # no-cover + lib_filename = f"libwgpu_native-{build}.so" + else: # no-cover + raise RuntimeError( + f"No WGPU library shipped for platform {sys.platform}. Set WGPU_LIB_PATH instead." + ) + + # Note that this can be a false positive, e.g. ARM linux. + embedded_path = get_resource_filename(lib_filename) + if not os.path.isfile(embedded_path): # no-cover + download_hint = _maybe_get_hint_on_download_script() + pip_hint = _maybe_get_pip_hint() + raise RuntimeError( + f"Could not find WGPU library in {embedded_path}. {download_hint} {pip_hint}" + ) + else: + return embedded_path + + +def _maybe_get_hint_on_download_script(): + root_dir = os.path.join(get_resource_filename(""), "..", "..") + filename = os.path.abspath(os.path.join(root_dir, "download-wgpu-native.py")) + uses_repo = os.path.isfile(filename) + + uses_custom_lib = os.getenv("WGPU_LIB_PATH", "").strip() + + if uses_repo and not uses_custom_lib: + return "You may need to run download-wgpu-native.py (in the root of the repo)." + return "" + + +def _maybe_get_pip_hint(): + if not sys.platform.startswith("linux"): + return "" + + # Get pip version + pip_version = () + try: + import pip # noqa + + parts = [] + for x in pip.__version__.split("."): + if not x.isnumeric(): + break + parts.append(int(x)) + pip_version = tuple(parts) + except Exception: + pass + + if pip_version < (20, 3): + return "If you install wgpu with pip, pip needs to be at least version 20.3 or the wgpu-native binary may not be included." + return "" + + +def get_lib_version_info(): + # Get lib version + version_int = lib.wgpuGetVersion() + if version_int < 65536: # no-cover - old version encoding with 3 ints + lib_version_info = tuple((version_int >> bits) & 0xFF for bits in (16, 8, 0)) + else: + lib_version_info = tuple( + (version_int >> bits) & 0xFF for bits in (24, 16, 8, 0) + ) + # When the 0.7.0 tag was made, the version was not bumped. + if lib_version_info == (0, 6, 0, 0): + lib_version_info = (0, 7, 0) + return lib_version_info + + +# Configure cffi and load the dynamic library +# NOTE: `import wgpu.backends.wgpu_native` is used in pyinstaller tests to verify +# that we can load the DLL after freezing +ffi = FFI() +ffi.cdef(get_wgpu_header()) +ffi.set_source("wgpu.h", None) +lib_path = get_wgpu_lib_path() # store path on this module so it can be checked +lib = ffi.dlopen(lib_path) +lib_version_info = get_lib_version_info() + + +def _check_expected_version(version_info): + lib_version_info = get_lib_version_info() + # Compare + if lib_version_info != version_info: # no-cover + logger.warning( + f"Expected wgpu-native version {version_info} but got {lib_version_info}. {_maybe_get_hint_on_download_script()}" + ) + + +@ffi.callback("void(WGPULogLevel, char *, void *)") +def _logger_callback(level, c_msg, userdata): + """Called when Rust emits a log message.""" + # Make a copy of the msg. Rust reclaims the memory when this returns + try: + msg = ffi.string(c_msg).decode(errors="ignore") + except Exception: + if sys.is_finalizing(): + return # Python is shutting down + m = { + lib.WGPULogLevel_Error: logger.error, + lib.WGPULogLevel_Warn: logger.warning, + lib.WGPULogLevel_Info: logger.info, + lib.WGPULogLevel_Debug: logger.debug, + lib.WGPULogLevel_Trace: logger.debug, + } + func = m.get(level, logger.warning) + func(msg) + + +def _logger_set_level_callback(level): + """Called when the log level is set from Python.""" + if level >= 40: + lib.wgpuSetLogLevel(lib.WGPULogLevel_Error) + elif level >= 30: + lib.wgpuSetLogLevel(lib.WGPULogLevel_Warn) + elif level >= 20: + lib.wgpuSetLogLevel(lib.WGPULogLevel_Info) + elif level >= 10: + lib.wgpuSetLogLevel(lib.WGPULogLevel_Debug) + elif level >= 5: + lib.wgpuSetLogLevel(lib.WGPULogLevel_Trace) # extra level + else: + lib.wgpuSetLogLevel(lib.WGPULogLevel_Off) + + +# Connect Rust logging with Python logging (userdata set to null) +lib.wgpuSetLogCallback(_logger_callback, ffi.NULL) +logger_set_level_callbacks.append(_logger_set_level_callback) +_logger_set_level_callback(logger.level) diff --git a/wgpu/backends/wgpu_native/_helpers.py b/wgpu/backends/wgpu_native/_helpers.py new file mode 100644 index 0000000..2492e2d --- /dev/null +++ b/wgpu/backends/wgpu_native/_helpers.py @@ -0,0 +1,445 @@ +"""Utilities used in the wgpu-native backend. +""" + +import os +import sys +import ctypes + +from ._ffi import ffi, lib +from ..._diagnostics import Diagnostics +from ...classes import ( + GPUError, + GPUOutOfMemoryError, + GPUValidationError, + GPUPipelineError, + GPUInternalError, +) + + +ERROR_TYPES = { + "": GPUError, + "OutOfMemory": GPUOutOfMemoryError, + "Validation": GPUValidationError, + "Pipeline": GPUPipelineError, + "Internal": GPUInternalError, +} + + +if sys.platform.startswith("darwin"): + from rubicon.objc.api import ObjCInstance, ObjCClass + + +def get_memoryview_and_address(data): + """Get a memoryview for the given data and its memory address. + The data object must support the buffer protocol. + """ + + # To get the address from a memoryview, there are multiple options. + # The most obvious is using ctypes: + # + # c_array = (ctypes.c_uint8 * nbytes).from_buffer(m) + # address = ctypes.addressof(c_array) + # + # Unfortunately, this call fails if the memoryview is readonly, e.g. if + # the data is a bytes object or readonly numpy array. One could then + # use from_buffer_copy(), but that introduces an extra data copy, which + # can hurt performance when the data is large. + # + # Another alternative that can be used for objects implementing the array + # interface (like numpy arrays) is to directly read the address: + # + # address = data.__array_interface__["data"][0] + # + # But what seems to work best (at the moment) is using cffi. + + # Convert data to a memoryview. That way we have something consistent + # to work with, which supports all objects implementing the buffer protocol. + m = memoryview(data) + + # Test that the data is contiguous. + # Note that pypy does not have the contiguous attribute, so we assume it is. + if not getattr(m, "contiguous", True): + raise ValueError("The given texture data is not contiguous") + + # Get the address via ffi. In contrast to ctypes, this also + # works for readonly data (e.g. bytes) + c_data = ffi.from_buffer("uint8_t []", m) + address = int(ffi.cast("uintptr_t", c_data)) + + return m, address + + +def get_memoryview_from_address(address, nbytes, format="B"): + """Get a memoryview from an int memory address and a byte count,""" + # The default format is " 0 and name2[-1] not in "_123": + name2 += "_" + name2 += c2 + return name2 + + +def to_camel_case(name): + """Convert a name from snake_case to camelCase. Names that already are + camelCase remain the same. + """ + is_capital = False + name2 = "" + for c in name: + if c == "_" and name2: + is_capital = True + elif is_capital: + name2 += c.upper() + is_capital = False + else: + name2 += c + if name2.endswith(("1d", "2d", "3d")): + name2 = name2[:-1] + "D" + return name2 + + +class ErrorHandler: + """Object that logs errors, with the option to collect incoming + errors elsewhere. + """ + + def __init__(self, logger): + self._logger = logger + self._proxy_stack = [] + self._error_message_counts = {} + + def capture(self, func): + """Send incoming error messages to the given func instead of logging them.""" + self._proxy_stack.append(func) + + def release(self, func): + """Release the given func.""" + f = self._proxy_stack.pop(-1) + if f is not func: + self._proxy_stack.clear() + self._logger.warning("ErrorHandler capture/release out of sync") + + def handle_error(self, error_type: str, message: str): + """Handle an error message.""" + if self._proxy_stack: + self._proxy_stack[-1](error_type, message) + else: + self.log_error(message) + + def log_error(self, message): + """Hanle an error message by logging it, bypassing any capturing.""" + # Get count for this message. Use a hash that does not use the + # digits in the message, because of id's getting renewed on + # each draw. + h = hash("".join(c for c in message if not c.isdigit())) + count = self._error_message_counts.get(h, 0) + 1 + self._error_message_counts[h] = count + + # Decide what to do + if count == 1: + self._logger.error(message) + elif count < 10: + self._logger.error(message.splitlines()[0] + f" ({count})") + elif count == 10: + self._logger.error(message.splitlines()[0] + " (hiding from now)") + + +class SafeLibCalls: + """Object that copies all library functions, but wrapped in such + a way that errors occuring in that call are raised as exceptions. + """ + + def __init__(self, lib, error_handler): + self._error_handler = error_handler + self._error_message = None + self._make_function_copies(lib) + + def _make_function_copies(self, lib): + for name in dir(lib): + if name.startswith("wgpu"): + ob = getattr(lib, name) + if callable(ob): + setattr(self, name, self._make_proxy_func(name, ob)) + + def _handle_error(self, error_type, message): + # If we already had an error, we log the earlier one now + if self._error_message: + self._error_handler.log_error(self._error_message[1]) + # Store new error + self._error_message = (error_type, message) + + def _make_proxy_func(self, name, ob): + def proxy_func(*args): + # Make the call, with error capturing on + handle_error = self._handle_error + self._error_handler.capture(handle_error) + try: + result = ob(*args) + finally: + self._error_handler.release(handle_error) + + # Handle the error. + if self._error_message: + error_type, message = self._error_message + self._error_message = None + cls = ERROR_TYPES.get(error_type, GPUError) + wgpu_error = cls(message) + # The line below will be the bottom line in the traceback, + # so better make it informative! As far as I know there is + # no way to exclude this frame from the traceback. + raise wgpu_error # the frame above is more interesting ↑↑ + return result + + proxy_func.__name__ = name + return proxy_func + + +def generate_report(): + """Get a report similar to the one produced by wgpuGenerateReport(), + but in the form of a Python dict. + """ + + # H: surfaces: WGPUStorageReport, backendType: WGPUBackendType, vulkan: WGPUHubReport, metal: WGPUHubReport, dx12: WGPUHubReport, dx11: WGPUHubReport, gl: WGPUHubReport + struct = ffi.new("WGPUGlobalReport *") + + # H: void f(WGPUInstance instance, WGPUGlobalReport * report) + lib.wgpuGenerateReport(get_wgpu_instance(), struct) + + report = {} + + report["surfaces"] = { + "occupied": struct.surfaces.numOccupied, + "vacant": struct.surfaces.numVacant, + "error": struct.surfaces.numError, + "element_size": struct.surfaces.elementSize, + } + + for backend in ("vulkan", "metal", "dx12", "dx11", "gl"): + c_hub_report = getattr(struct, backend) + report[backend] = {} + for key in dir(c_hub_report): + c_storage_report = getattr(c_hub_report, key) + storage_report = { + "occupied": c_storage_report.numOccupied, + "vacant": c_storage_report.numVacant, + "error": c_storage_report.numError, + "element_size": c_storage_report.elementSize, + } + # if any(x!=0 for x in storage_report.values()): + report[backend][key] = storage_report + + return report + + +class WgpuNativeCountsDiagnostics(Diagnostics): + def get_subscript(self): + text = "" + text += " * The o, v, e are occupied, vacant and error, respecitively.\n" + text += " * Reported memory does not include buffer/texture data.\n" + return text + + def get_dict(self): + result = {} + native_report = generate_report() + + # Names in the root of the report (backend-less) + root_names = ["surfaces"] + + # Get per-backend names and a list of backends + names = list(native_report["vulkan"].keys()) + backends = [name for name in native_report.keys() if name not in root_names] + + # Get a mapping from native names to wgpu-py names + name_map = {"surfaces": "CanvasContext"} + for name in names: + if name not in name_map: + name_map[name] = name[0].upper() + name[1:-1] + + # Initialize the result dict (sorted) + for report_name in sorted(name_map[name] for name in names + root_names): + result[report_name] = {"count": 0, "mem": 0} + + # Establish what backends are active + active_backends = [] + for backend in backends: + total = 0 + for name in names: + d = native_report[backend][name] + total += d["occupied"] + d["vacant"] + d["error"] + if total > 0: + active_backends.append(backend) + + # Process names in the root + for name in root_names: + d = native_report[name] + subtotal_count = d["occupied"] + d["vacant"] + d["error"] + impl = { + "o": d["occupied"], + "v": d["vacant"], + "e": d["error"], + "el_size": d["element_size"], + } + # Store in report + report_name = name_map[name] + result[report_name]["count"] = subtotal_count + result[report_name]["mem"] = subtotal_count * d["element_size"] + result[report_name]["backend"] = {"": impl} + + # Iterate over backends + for name in names: + total_count = 0 + total_mem = 0 + implementations = {} + for backend in active_backends: + d = native_report[backend][name] + subtotal_count = d["occupied"] + d["vacant"] + d["error"] + subtotal_mem = subtotal_count * d["element_size"] + impl = { + "o": d["occupied"], + "v": d["vacant"], + "e": d["error"], + "el_size": d["element_size"], + } + total_count += subtotal_count + total_mem += subtotal_mem + implementations[backend] = impl + # Store in report + report_name = name_map[name] + result[report_name]["count"] = total_count + result[report_name]["mem"] = total_mem + result[report_name]["backend"] = implementations + + # Add totals + totals = {} + for key in ("count", "mem"): + totals[key] = sum(v.get(key, 0) for v in result.values()) + result["total"] = totals + + return result + + +diagnostics = WgpuNativeCountsDiagnostics("wgpu_native_counts") diff --git a/wgpu/backends/wgpu_native/_mappings.py b/wgpu/backends/wgpu_native/_mappings.py new file mode 100644 index 0000000..cde8a6f --- /dev/null +++ b/wgpu/backends/wgpu_native/_mappings.py @@ -0,0 +1,454 @@ +""" Mappings for the wgpu-native backend. """ + +# THIS CODE IS AUTOGENERATED - DO NOT EDIT + +# flake8: noqa + +# There are 232 enum mappings + +enummap = { + "AddressMode.clamp-to-edge": 2, + "AddressMode.mirror-repeat": 1, + "AddressMode.repeat": 0, + "BlendFactor.constant": 11, + "BlendFactor.dst": 6, + "BlendFactor.dst-alpha": 8, + "BlendFactor.one": 1, + "BlendFactor.one-minus-constant": 12, + "BlendFactor.one-minus-dst": 7, + "BlendFactor.one-minus-dst-alpha": 9, + "BlendFactor.one-minus-src": 3, + "BlendFactor.one-minus-src-alpha": 5, + "BlendFactor.src": 2, + "BlendFactor.src-alpha": 4, + "BlendFactor.src-alpha-saturated": 10, + "BlendFactor.zero": 0, + "BlendOperation.add": 0, + "BlendOperation.max": 4, + "BlendOperation.min": 3, + "BlendOperation.reverse-subtract": 2, + "BlendOperation.subtract": 1, + "BufferBindingType.read-only-storage": 3, + "BufferBindingType.storage": 2, + "BufferBindingType.uniform": 1, + "BufferMapState.mapped": 2, + "BufferMapState.pending": 1, + "BufferMapState.unmapped": 0, + "CompareFunction.always": 8, + "CompareFunction.equal": 6, + "CompareFunction.greater": 4, + "CompareFunction.greater-equal": 5, + "CompareFunction.less": 2, + "CompareFunction.less-equal": 3, + "CompareFunction.never": 1, + "CompareFunction.not-equal": 7, + "CompilationMessageType.error": 0, + "CompilationMessageType.info": 2, + "CompilationMessageType.warning": 1, + "CullMode.back": 2, + "CullMode.front": 1, + "CullMode.none": 0, + "DeviceLostReason.destroyed": 1, + "ErrorFilter.internal": 2, + "ErrorFilter.out-of-memory": 1, + "ErrorFilter.validation": 0, + "FeatureName.bgra8unorm-storage": 10, + "FeatureName.depth-clip-control": 1, + "FeatureName.depth32float-stencil8": 2, + "FeatureName.float32-filterable": 11, + "FeatureName.indirect-first-instance": 7, + "FeatureName.rg11b10ufloat-renderable": 9, + "FeatureName.shader-f16": 8, + "FeatureName.texture-compression-astc": 6, + "FeatureName.texture-compression-bc": 4, + "FeatureName.texture-compression-etc2": 5, + "FeatureName.timestamp-query": 3, + "FilterMode.linear": 1, + "FilterMode.nearest": 0, + "FrontFace.ccw": 0, + "FrontFace.cw": 1, + "IndexFormat.uint16": 1, + "IndexFormat.uint32": 2, + "LoadOp.clear": 1, + "LoadOp.load": 2, + "MipmapFilterMode.linear": 1, + "MipmapFilterMode.nearest": 0, + "PowerPreference.high-performance": 2, + "PowerPreference.low-power": 1, + "PrimitiveTopology.line-list": 1, + "PrimitiveTopology.line-strip": 2, + "PrimitiveTopology.point-list": 0, + "PrimitiveTopology.triangle-list": 3, + "PrimitiveTopology.triangle-strip": 4, + "QueryType.occlusion": 0, + "QueryType.timestamp": 1, + "SamplerBindingType.comparison": 3, + "SamplerBindingType.filtering": 1, + "SamplerBindingType.non-filtering": 2, + "StencilOperation.decrement-clamp": 5, + "StencilOperation.decrement-wrap": 7, + "StencilOperation.increment-clamp": 4, + "StencilOperation.increment-wrap": 6, + "StencilOperation.invert": 3, + "StencilOperation.keep": 0, + "StencilOperation.replace": 2, + "StencilOperation.zero": 1, + "StorageTextureAccess.write-only": 1, + "StoreOp.discard": 2, + "StoreOp.store": 1, + "TextureAspect.all": 0, + "TextureAspect.depth-only": 2, + "TextureAspect.stencil-only": 1, + "TextureDimension.1d": 0, + "TextureDimension.2d": 1, + "TextureDimension.3d": 2, + "TextureFormat.astc-10x10-unorm": 89, + "TextureFormat.astc-10x10-unorm-srgb": 90, + "TextureFormat.astc-10x5-unorm": 83, + "TextureFormat.astc-10x5-unorm-srgb": 84, + "TextureFormat.astc-10x6-unorm": 85, + "TextureFormat.astc-10x6-unorm-srgb": 86, + "TextureFormat.astc-10x8-unorm": 87, + "TextureFormat.astc-10x8-unorm-srgb": 88, + "TextureFormat.astc-12x10-unorm": 91, + "TextureFormat.astc-12x10-unorm-srgb": 92, + "TextureFormat.astc-12x12-unorm": 93, + "TextureFormat.astc-12x12-unorm-srgb": 94, + "TextureFormat.astc-4x4-unorm": 67, + "TextureFormat.astc-4x4-unorm-srgb": 68, + "TextureFormat.astc-5x4-unorm": 69, + "TextureFormat.astc-5x4-unorm-srgb": 70, + "TextureFormat.astc-5x5-unorm": 71, + "TextureFormat.astc-5x5-unorm-srgb": 72, + "TextureFormat.astc-6x5-unorm": 73, + "TextureFormat.astc-6x5-unorm-srgb": 74, + "TextureFormat.astc-6x6-unorm": 75, + "TextureFormat.astc-6x6-unorm-srgb": 76, + "TextureFormat.astc-8x5-unorm": 77, + "TextureFormat.astc-8x5-unorm-srgb": 78, + "TextureFormat.astc-8x6-unorm": 79, + "TextureFormat.astc-8x6-unorm-srgb": 80, + "TextureFormat.astc-8x8-unorm": 81, + "TextureFormat.astc-8x8-unorm-srgb": 82, + "TextureFormat.bc1-rgba-unorm": 43, + "TextureFormat.bc1-rgba-unorm-srgb": 44, + "TextureFormat.bc2-rgba-unorm": 45, + "TextureFormat.bc2-rgba-unorm-srgb": 46, + "TextureFormat.bc3-rgba-unorm": 47, + "TextureFormat.bc3-rgba-unorm-srgb": 48, + "TextureFormat.bc4-r-snorm": 50, + "TextureFormat.bc4-r-unorm": 49, + "TextureFormat.bc5-rg-snorm": 52, + "TextureFormat.bc5-rg-unorm": 51, + "TextureFormat.bc6h-rgb-float": 54, + "TextureFormat.bc6h-rgb-ufloat": 53, + "TextureFormat.bc7-rgba-unorm": 55, + "TextureFormat.bc7-rgba-unorm-srgb": 56, + "TextureFormat.bgra8unorm": 23, + "TextureFormat.bgra8unorm-srgb": 24, + "TextureFormat.depth16unorm": 38, + "TextureFormat.depth24plus": 39, + "TextureFormat.depth24plus-stencil8": 40, + "TextureFormat.depth32float": 41, + "TextureFormat.depth32float-stencil8": 42, + "TextureFormat.eac-r11snorm": 64, + "TextureFormat.eac-r11unorm": 63, + "TextureFormat.eac-rg11snorm": 66, + "TextureFormat.eac-rg11unorm": 65, + "TextureFormat.etc2-rgb8a1unorm": 59, + "TextureFormat.etc2-rgb8a1unorm-srgb": 60, + "TextureFormat.etc2-rgb8unorm": 57, + "TextureFormat.etc2-rgb8unorm-srgb": 58, + "TextureFormat.etc2-rgba8unorm": 61, + "TextureFormat.etc2-rgba8unorm-srgb": 62, + "TextureFormat.r16float": 7, + "TextureFormat.r16sint": 6, + "TextureFormat.r16uint": 5, + "TextureFormat.r32float": 12, + "TextureFormat.r32sint": 14, + "TextureFormat.r32uint": 13, + "TextureFormat.r8sint": 4, + "TextureFormat.r8snorm": 2, + "TextureFormat.r8uint": 3, + "TextureFormat.r8unorm": 1, + "TextureFormat.rg11b10ufloat": 26, + "TextureFormat.rg16float": 17, + "TextureFormat.rg16sint": 16, + "TextureFormat.rg16uint": 15, + "TextureFormat.rg32float": 28, + "TextureFormat.rg32sint": 30, + "TextureFormat.rg32uint": 29, + "TextureFormat.rg8sint": 11, + "TextureFormat.rg8snorm": 9, + "TextureFormat.rg8uint": 10, + "TextureFormat.rg8unorm": 8, + "TextureFormat.rgb10a2unorm": 25, + "TextureFormat.rgb9e5ufloat": 27, + "TextureFormat.rgba16float": 33, + "TextureFormat.rgba16sint": 32, + "TextureFormat.rgba16uint": 31, + "TextureFormat.rgba32float": 34, + "TextureFormat.rgba32sint": 36, + "TextureFormat.rgba32uint": 35, + "TextureFormat.rgba8sint": 22, + "TextureFormat.rgba8snorm": 20, + "TextureFormat.rgba8uint": 21, + "TextureFormat.rgba8unorm": 18, + "TextureFormat.rgba8unorm-srgb": 19, + "TextureFormat.stencil8": 37, + "TextureSampleType.depth": 3, + "TextureSampleType.float": 1, + "TextureSampleType.sint": 4, + "TextureSampleType.uint": 5, + "TextureSampleType.unfilterable-float": 2, + "TextureViewDimension.1d": 1, + "TextureViewDimension.2d": 2, + "TextureViewDimension.2d-array": 3, + "TextureViewDimension.3d": 6, + "TextureViewDimension.cube": 4, + "TextureViewDimension.cube-array": 5, + "VertexFormat.float16x2": 17, + "VertexFormat.float16x4": 18, + "VertexFormat.float32": 19, + "VertexFormat.float32x2": 20, + "VertexFormat.float32x3": 21, + "VertexFormat.float32x4": 22, + "VertexFormat.sint16x2": 11, + "VertexFormat.sint16x4": 12, + "VertexFormat.sint32": 27, + "VertexFormat.sint32x2": 28, + "VertexFormat.sint32x3": 29, + "VertexFormat.sint32x4": 30, + "VertexFormat.sint8x2": 3, + "VertexFormat.sint8x4": 4, + "VertexFormat.snorm16x2": 15, + "VertexFormat.snorm16x4": 16, + "VertexFormat.snorm8x2": 7, + "VertexFormat.snorm8x4": 8, + "VertexFormat.uint16x2": 9, + "VertexFormat.uint16x4": 10, + "VertexFormat.uint32": 23, + "VertexFormat.uint32x2": 24, + "VertexFormat.uint32x3": 25, + "VertexFormat.uint32x4": 26, + "VertexFormat.uint8x2": 1, + "VertexFormat.uint8x4": 2, + "VertexFormat.unorm16x2": 13, + "VertexFormat.unorm16x4": 14, + "VertexFormat.unorm8x2": 5, + "VertexFormat.unorm8x4": 6, + "VertexStepMode.instance": 1, + "VertexStepMode.vertex": 0, +} + +# There are 47 struct-field enum mappings + +cstructfield2enum = { + "BlendComponent.dstFactor": "BlendFactor", + "BlendComponent.operation": "BlendOperation", + "BlendComponent.srcFactor": "BlendFactor", + "BufferBindingLayout.type": "BufferBindingType", + "ColorTargetState.format": "TextureFormat", + "CompilationMessage.type": "CompilationMessageType", + "DepthStencilState.depthCompare": "CompareFunction", + "DepthStencilState.format": "TextureFormat", + "ImageCopyTexture.aspect": "TextureAspect", + "PrimitiveState.cullMode": "CullMode", + "PrimitiveState.frontFace": "FrontFace", + "PrimitiveState.stripIndexFormat": "IndexFormat", + "PrimitiveState.topology": "PrimitiveTopology", + "QuerySetDescriptor.type": "QueryType", + "RenderBundleEncoderDescriptor.depthStencilFormat": "TextureFormat", + "RenderPassColorAttachment.loadOp": "LoadOp", + "RenderPassColorAttachment.storeOp": "StoreOp", + "RenderPassDepthStencilAttachment.depthLoadOp": "LoadOp", + "RenderPassDepthStencilAttachment.depthStoreOp": "StoreOp", + "RenderPassDepthStencilAttachment.stencilLoadOp": "LoadOp", + "RenderPassDepthStencilAttachment.stencilStoreOp": "StoreOp", + "RequestAdapterOptions.powerPreference": "PowerPreference", + "SamplerBindingLayout.type": "SamplerBindingType", + "SamplerDescriptor.addressModeU": "AddressMode", + "SamplerDescriptor.addressModeV": "AddressMode", + "SamplerDescriptor.addressModeW": "AddressMode", + "SamplerDescriptor.compare": "CompareFunction", + "SamplerDescriptor.magFilter": "FilterMode", + "SamplerDescriptor.minFilter": "FilterMode", + "SamplerDescriptor.mipmapFilter": "MipmapFilterMode", + "StencilFaceState.compare": "CompareFunction", + "StencilFaceState.depthFailOp": "StencilOperation", + "StencilFaceState.failOp": "StencilOperation", + "StencilFaceState.passOp": "StencilOperation", + "StorageTextureBindingLayout.access": "StorageTextureAccess", + "StorageTextureBindingLayout.format": "TextureFormat", + "StorageTextureBindingLayout.viewDimension": "TextureViewDimension", + "SurfaceConfiguration.format": "TextureFormat", + "TextureBindingLayout.sampleType": "TextureSampleType", + "TextureBindingLayout.viewDimension": "TextureViewDimension", + "TextureDescriptor.dimension": "TextureDimension", + "TextureDescriptor.format": "TextureFormat", + "TextureViewDescriptor.aspect": "TextureAspect", + "TextureViewDescriptor.dimension": "TextureViewDimension", + "TextureViewDescriptor.format": "TextureFormat", + "VertexAttribute.format": "VertexFormat", + "VertexBufferLayout.stepMode": "VertexStepMode", +} + +enum_str2int = { + "BackendType": { + "Undefined": 0, + "Null": 1, + "WebGPU": 2, + "D3D11": 3, + "D3D12": 4, + "Metal": 5, + "Vulkan": 6, + "OpenGL": 7, + "OpenGLES": 8, + } +} +enum_int2str = { + "BackendType": { + 0: "Undefined", + 1: "Null", + 2: "WebGPU", + 3: "D3D11", + 4: "D3D12", + 5: "Metal", + 6: "Vulkan", + 7: "OpenGL", + 8: "OpenGLES", + }, + "AdapterType": { + 0: "DiscreteGPU", + 1: "IntegratedGPU", + 2: "CPU", + 3: "Unknown", + }, + "ErrorType": { + 0: "NoError", + 1: "Validation", + 2: "OutOfMemory", + 3: "Internal", + 4: "Unknown", + 5: "DeviceLost", + }, + "DeviceLostReason": { + 0: "unknown", + 1: "destroyed", + }, + "TextureFormat": { + 0: "Undefined", + 1: "r8unorm", + 2: "r8snorm", + 3: "r8uint", + 4: "r8sint", + 5: "r16uint", + 6: "r16sint", + 7: "r16float", + 8: "rg8unorm", + 9: "rg8snorm", + 10: "rg8uint", + 11: "rg8sint", + 12: "r32float", + 13: "r32uint", + 14: "r32sint", + 15: "rg16uint", + 16: "rg16sint", + 17: "rg16float", + 18: "rgba8unorm", + 19: "rgba8unorm-srgb", + 20: "rgba8snorm", + 21: "rgba8uint", + 22: "rgba8sint", + 23: "bgra8unorm", + 24: "bgra8unorm-srgb", + 25: "rgb10a2unorm", + 26: "rg11b10ufloat", + 27: "rgb9e5ufloat", + 28: "rg32float", + 29: "rg32uint", + 30: "rg32sint", + 31: "rgba16uint", + 32: "rgba16sint", + 33: "rgba16float", + 34: "rgba32float", + 35: "rgba32uint", + 36: "rgba32sint", + 37: "stencil8", + 38: "depth16unorm", + 39: "depth24plus", + 40: "depth24plus-stencil8", + 41: "depth32float", + 42: "depth32float-stencil8", + 43: "bc1-rgba-unorm", + 44: "bc1-rgba-unorm-srgb", + 45: "bc2-rgba-unorm", + 46: "bc2-rgba-unorm-srgb", + 47: "bc3-rgba-unorm", + 48: "bc3-rgba-unorm-srgb", + 49: "bc4-r-unorm", + 50: "bc4-r-snorm", + 51: "bc5-rg-unorm", + 52: "bc5-rg-snorm", + 53: "bc6h-rgb-ufloat", + 54: "bc6h-rgb-float", + 55: "bc7-rgba-unorm", + 56: "bc7-rgba-unorm-srgb", + 57: "etc2-rgb8unorm", + 58: "etc2-rgb8unorm-srgb", + 59: "etc2-rgb8a1unorm", + 60: "etc2-rgb8a1unorm-srgb", + 61: "etc2-rgba8unorm", + 62: "etc2-rgba8unorm-srgb", + 63: "eac-r11unorm", + 64: "eac-r11snorm", + 65: "eac-rg11unorm", + 66: "eac-rg11snorm", + 67: "astc-4x4-unorm", + 68: "astc-4x4-unorm-srgb", + 69: "astc-5x4-unorm", + 70: "astc-5x4-unorm-srgb", + 71: "astc-5x5-unorm", + 72: "astc-5x5-unorm-srgb", + 73: "astc-6x5-unorm", + 74: "astc-6x5-unorm-srgb", + 75: "astc-6x6-unorm", + 76: "astc-6x6-unorm-srgb", + 77: "astc-8x5-unorm", + 78: "astc-8x5-unorm-srgb", + 79: "astc-8x6-unorm", + 80: "astc-8x6-unorm-srgb", + 81: "astc-8x8-unorm", + 82: "astc-8x8-unorm-srgb", + 83: "astc-10x5-unorm", + 84: "astc-10x5-unorm-srgb", + 85: "astc-10x6-unorm", + 86: "astc-10x6-unorm-srgb", + 87: "astc-10x8-unorm", + 88: "astc-10x8-unorm-srgb", + 89: "astc-10x10-unorm", + 90: "astc-10x10-unorm-srgb", + 91: "astc-12x10-unorm", + 92: "astc-12x10-unorm-srgb", + 93: "astc-12x12-unorm", + 94: "astc-12x12-unorm-srgb", + }, + "TextureDimension": { + 0: "1d", + 1: "2d", + 2: "3d", + }, + "PresentMode": { + 0: "Fifo", + 1: "FifoRelaxed", + 2: "Immediate", + 3: "Mailbox", + }, + "CompositeAlphaMode": { + 0: "Auto", + 1: "Opaque", + 2: "Premultiplied", + 3: "Unpremultiplied", + 4: "Inherit", + }, +} diff --git a/wgpu/backends/wgpu_native/extras.py b/wgpu/backends/wgpu_native/extras.py new file mode 100644 index 0000000..d61101e --- /dev/null +++ b/wgpu/backends/wgpu_native/extras.py @@ -0,0 +1,50 @@ +import os + +from ._api import ffi, libf, structs, enums, Dict, logger +from ._helpers import get_wgpu_instance + + +# NOTE: these functions represent backend-specific extra API. +# NOTE: changes to this module must be reflected in docs/backends.rst. +# We don't use Sphinx automodule because this way the doc build do not +# need to be able to load wgpu-native. + + +def enumerate_adapters(): + """Return a list of all available adapters.""" + # The first call is to get the number of adapters, and the second + # call is to get the actual adapters. Note that the second arg (now + # NULL) can be a `WGPUInstanceEnumerateAdapterOptions` to filter + # by backend. + + adapter_count = libf.wgpuInstanceEnumerateAdapters( + get_wgpu_instance(), ffi.NULL, ffi.NULL + ) + + adapters = ffi.new("WGPUAdapter[]", adapter_count) + libf.wgpuInstanceEnumerateAdapters(get_wgpu_instance(), ffi.NULL, adapters) + + from . import gpu # noqa + + return [gpu._create_adapter(adapter) for adapter in adapters] + + +def request_device_tracing( + adapter, + trace_path, + *, + label="", + required_features: "list(enums.FeatureName)" = [], + required_limits: "Dict[str, int]" = {}, + default_queue: "structs.QueueDescriptor" = {}, +): + """Write a trace of all commands to a file so it can be reproduced + elsewhere. The trace is cross-platform! + """ + if not os.path.isdir(trace_path): + os.makedirs(trace_path, exist_ok=True) + elif os.listdir(trace_path): + logger.warning(f"Trace directory not empty: {trace_path}") + return adapter._request_device( + label, required_features, required_limits, default_queue, trace_path + ) diff --git a/wgpu/classes.py b/wgpu/classes.py new file mode 100644 index 0000000..2019014 --- /dev/null +++ b/wgpu/classes.py @@ -0,0 +1,8 @@ +""" +The classes that make up the wgpu API. +These can be accessed via ``wgpu.classes``, +but are also available in the root wgpu namespace. +""" + +from ._classes import * # noqa: F401, F403 +from ._classes import __all__ # noqa: F401 diff --git a/wgpu/enums.py b/wgpu/enums.py new file mode 100644 index 0000000..a745626 --- /dev/null +++ b/wgpu/enums.py @@ -0,0 +1,686 @@ +""" +These enums are defined in ``wgpu.enums``, but are also available from the root wgpu namespace. + +Enums are choices; exactly one field must be selected. +Enum values are strings, so instead of ``wgpu.TextureFormat.rgba8unorm``, +one can also write ``"rgba8unorm"``. +""" + +_use_sphinx_repr = False + + +class Enum: + def __init__(self, name, **kwargs): + self._name = name + for key, val in kwargs.items(): + setattr(self, key, val) + + def __iter__(self): + return iter( + [getattr(self, key) for key in dir(self) if not key.startswith("_")] + ) + + def __repr__(self): + if _use_sphinx_repr: # no-cover + return "" + options = ", ".join(f"'{x}'" for x in self) + return f"" + + +# CODE BELOW THIS POINT IS AUTOGENERATED - DO NOT EDIT + + +# There are 33 enums + +__all__ = [ + "PowerPreference", + "FeatureName", + "BufferMapState", + "TextureDimension", + "TextureViewDimension", + "TextureAspect", + "TextureFormat", + "AddressMode", + "FilterMode", + "MipmapFilterMode", + "CompareFunction", + "BufferBindingType", + "SamplerBindingType", + "TextureSampleType", + "StorageTextureAccess", + "CompilationMessageType", + "PipelineErrorReason", + "AutoLayoutMode", + "PrimitiveTopology", + "FrontFace", + "CullMode", + "BlendFactor", + "BlendOperation", + "StencilOperation", + "IndexFormat", + "VertexFormat", + "VertexStepMode", + "LoadOp", + "StoreOp", + "QueryType", + "CanvasAlphaMode", + "DeviceLostReason", + "ErrorFilter", +] + + +#: * "low_power" +#: * "high_performance" +PowerPreference = Enum( + "PowerPreference", + low_power="low-power", + high_performance="high-performance", +) + +#: * "depth_clip_control" +#: * "depth32float_stencil8" +#: * "texture_compression_bc" +#: * "texture_compression_etc2" +#: * "texture_compression_astc" +#: * "timestamp_query" +#: * "indirect_first_instance" +#: * "shader_f16" +#: * "rg11b10ufloat_renderable" +#: * "bgra8unorm_storage" +#: * "float32_filterable" +FeatureName = Enum( + "FeatureName", + depth_clip_control="depth-clip-control", + depth32float_stencil8="depth32float-stencil8", + texture_compression_bc="texture-compression-bc", + texture_compression_etc2="texture-compression-etc2", + texture_compression_astc="texture-compression-astc", + timestamp_query="timestamp-query", + indirect_first_instance="indirect-first-instance", + shader_f16="shader-f16", + rg11b10ufloat_renderable="rg11b10ufloat-renderable", + bgra8unorm_storage="bgra8unorm-storage", + float32_filterable="float32-filterable", +) + +#: * "unmapped" +#: * "pending" +#: * "mapped" +BufferMapState = Enum( + "BufferMapState", + unmapped="unmapped", + pending="pending", + mapped="mapped", +) + +#: * "d1" +#: * "d2" +#: * "d3" +TextureDimension = Enum( + "TextureDimension", + d1="1d", + d2="2d", + d3="3d", +) + +#: * "d1" +#: * "d2" +#: * "d2_array" +#: * "cube" +#: * "cube_array" +#: * "d3" +TextureViewDimension = Enum( + "TextureViewDimension", + d1="1d", + d2="2d", + d2_array="2d-array", + cube="cube", + cube_array="cube-array", + d3="3d", +) + +#: * "all" +#: * "stencil_only" +#: * "depth_only" +TextureAspect = Enum( + "TextureAspect", + all="all", + stencil_only="stencil-only", + depth_only="depth-only", +) + +#: * "r8unorm" +#: * "r8snorm" +#: * "r8uint" +#: * "r8sint" +#: * "r16uint" +#: * "r16sint" +#: * "r16float" +#: * "rg8unorm" +#: * "rg8snorm" +#: * "rg8uint" +#: * "rg8sint" +#: * "r32uint" +#: * "r32sint" +#: * "r32float" +#: * "rg16uint" +#: * "rg16sint" +#: * "rg16float" +#: * "rgba8unorm" +#: * "rgba8unorm_srgb" +#: * "rgba8snorm" +#: * "rgba8uint" +#: * "rgba8sint" +#: * "bgra8unorm" +#: * "bgra8unorm_srgb" +#: * "rgb9e5ufloat" +#: * "rgb10a2uint" +#: * "rgb10a2unorm" +#: * "rg11b10ufloat" +#: * "rg32uint" +#: * "rg32sint" +#: * "rg32float" +#: * "rgba16uint" +#: * "rgba16sint" +#: * "rgba16float" +#: * "rgba32uint" +#: * "rgba32sint" +#: * "rgba32float" +#: * "stencil8" +#: * "depth16unorm" +#: * "depth24plus" +#: * "depth24plus_stencil8" +#: * "depth32float" +#: * "depth32float_stencil8" +#: * "bc1_rgba_unorm" +#: * "bc1_rgba_unorm_srgb" +#: * "bc2_rgba_unorm" +#: * "bc2_rgba_unorm_srgb" +#: * "bc3_rgba_unorm" +#: * "bc3_rgba_unorm_srgb" +#: * "bc4_r_unorm" +#: * "bc4_r_snorm" +#: * "bc5_rg_unorm" +#: * "bc5_rg_snorm" +#: * "bc6h_rgb_ufloat" +#: * "bc6h_rgb_float" +#: * "bc7_rgba_unorm" +#: * "bc7_rgba_unorm_srgb" +#: * "etc2_rgb8unorm" +#: * "etc2_rgb8unorm_srgb" +#: * "etc2_rgb8a1unorm" +#: * "etc2_rgb8a1unorm_srgb" +#: * "etc2_rgba8unorm" +#: * "etc2_rgba8unorm_srgb" +#: * "eac_r11unorm" +#: * "eac_r11snorm" +#: * "eac_rg11unorm" +#: * "eac_rg11snorm" +#: * "astc_4x4_unorm" +#: * "astc_4x4_unorm_srgb" +#: * "astc_5x4_unorm" +#: * "astc_5x4_unorm_srgb" +#: * "astc_5x5_unorm" +#: * "astc_5x5_unorm_srgb" +#: * "astc_6x5_unorm" +#: * "astc_6x5_unorm_srgb" +#: * "astc_6x6_unorm" +#: * "astc_6x6_unorm_srgb" +#: * "astc_8x5_unorm" +#: * "astc_8x5_unorm_srgb" +#: * "astc_8x6_unorm" +#: * "astc_8x6_unorm_srgb" +#: * "astc_8x8_unorm" +#: * "astc_8x8_unorm_srgb" +#: * "astc_10x5_unorm" +#: * "astc_10x5_unorm_srgb" +#: * "astc_10x6_unorm" +#: * "astc_10x6_unorm_srgb" +#: * "astc_10x8_unorm" +#: * "astc_10x8_unorm_srgb" +#: * "astc_10x10_unorm" +#: * "astc_10x10_unorm_srgb" +#: * "astc_12x10_unorm" +#: * "astc_12x10_unorm_srgb" +#: * "astc_12x12_unorm" +#: * "astc_12x12_unorm_srgb" +TextureFormat = Enum( + "TextureFormat", + r8unorm="r8unorm", + r8snorm="r8snorm", + r8uint="r8uint", + r8sint="r8sint", + r16uint="r16uint", + r16sint="r16sint", + r16float="r16float", + rg8unorm="rg8unorm", + rg8snorm="rg8snorm", + rg8uint="rg8uint", + rg8sint="rg8sint", + r32uint="r32uint", + r32sint="r32sint", + r32float="r32float", + rg16uint="rg16uint", + rg16sint="rg16sint", + rg16float="rg16float", + rgba8unorm="rgba8unorm", + rgba8unorm_srgb="rgba8unorm-srgb", + rgba8snorm="rgba8snorm", + rgba8uint="rgba8uint", + rgba8sint="rgba8sint", + bgra8unorm="bgra8unorm", + bgra8unorm_srgb="bgra8unorm-srgb", + rgb9e5ufloat="rgb9e5ufloat", + rgb10a2uint="rgb10a2uint", + rgb10a2unorm="rgb10a2unorm", + rg11b10ufloat="rg11b10ufloat", + rg32uint="rg32uint", + rg32sint="rg32sint", + rg32float="rg32float", + rgba16uint="rgba16uint", + rgba16sint="rgba16sint", + rgba16float="rgba16float", + rgba32uint="rgba32uint", + rgba32sint="rgba32sint", + rgba32float="rgba32float", + stencil8="stencil8", + depth16unorm="depth16unorm", + depth24plus="depth24plus", + depth24plus_stencil8="depth24plus-stencil8", + depth32float="depth32float", + depth32float_stencil8="depth32float-stencil8", + bc1_rgba_unorm="bc1-rgba-unorm", + bc1_rgba_unorm_srgb="bc1-rgba-unorm-srgb", + bc2_rgba_unorm="bc2-rgba-unorm", + bc2_rgba_unorm_srgb="bc2-rgba-unorm-srgb", + bc3_rgba_unorm="bc3-rgba-unorm", + bc3_rgba_unorm_srgb="bc3-rgba-unorm-srgb", + bc4_r_unorm="bc4-r-unorm", + bc4_r_snorm="bc4-r-snorm", + bc5_rg_unorm="bc5-rg-unorm", + bc5_rg_snorm="bc5-rg-snorm", + bc6h_rgb_ufloat="bc6h-rgb-ufloat", + bc6h_rgb_float="bc6h-rgb-float", + bc7_rgba_unorm="bc7-rgba-unorm", + bc7_rgba_unorm_srgb="bc7-rgba-unorm-srgb", + etc2_rgb8unorm="etc2-rgb8unorm", + etc2_rgb8unorm_srgb="etc2-rgb8unorm-srgb", + etc2_rgb8a1unorm="etc2-rgb8a1unorm", + etc2_rgb8a1unorm_srgb="etc2-rgb8a1unorm-srgb", + etc2_rgba8unorm="etc2-rgba8unorm", + etc2_rgba8unorm_srgb="etc2-rgba8unorm-srgb", + eac_r11unorm="eac-r11unorm", + eac_r11snorm="eac-r11snorm", + eac_rg11unorm="eac-rg11unorm", + eac_rg11snorm="eac-rg11snorm", + astc_4x4_unorm="astc-4x4-unorm", + astc_4x4_unorm_srgb="astc-4x4-unorm-srgb", + astc_5x4_unorm="astc-5x4-unorm", + astc_5x4_unorm_srgb="astc-5x4-unorm-srgb", + astc_5x5_unorm="astc-5x5-unorm", + astc_5x5_unorm_srgb="astc-5x5-unorm-srgb", + astc_6x5_unorm="astc-6x5-unorm", + astc_6x5_unorm_srgb="astc-6x5-unorm-srgb", + astc_6x6_unorm="astc-6x6-unorm", + astc_6x6_unorm_srgb="astc-6x6-unorm-srgb", + astc_8x5_unorm="astc-8x5-unorm", + astc_8x5_unorm_srgb="astc-8x5-unorm-srgb", + astc_8x6_unorm="astc-8x6-unorm", + astc_8x6_unorm_srgb="astc-8x6-unorm-srgb", + astc_8x8_unorm="astc-8x8-unorm", + astc_8x8_unorm_srgb="astc-8x8-unorm-srgb", + astc_10x5_unorm="astc-10x5-unorm", + astc_10x5_unorm_srgb="astc-10x5-unorm-srgb", + astc_10x6_unorm="astc-10x6-unorm", + astc_10x6_unorm_srgb="astc-10x6-unorm-srgb", + astc_10x8_unorm="astc-10x8-unorm", + astc_10x8_unorm_srgb="astc-10x8-unorm-srgb", + astc_10x10_unorm="astc-10x10-unorm", + astc_10x10_unorm_srgb="astc-10x10-unorm-srgb", + astc_12x10_unorm="astc-12x10-unorm", + astc_12x10_unorm_srgb="astc-12x10-unorm-srgb", + astc_12x12_unorm="astc-12x12-unorm", + astc_12x12_unorm_srgb="astc-12x12-unorm-srgb", +) + +#: * "clamp_to_edge" +#: * "repeat" +#: * "mirror_repeat" +AddressMode = Enum( + "AddressMode", + clamp_to_edge="clamp-to-edge", + repeat="repeat", + mirror_repeat="mirror-repeat", +) + +#: * "nearest" +#: * "linear" +FilterMode = Enum( + "FilterMode", + nearest="nearest", + linear="linear", +) + +#: * "nearest" +#: * "linear" +MipmapFilterMode = Enum( + "MipmapFilterMode", + nearest="nearest", + linear="linear", +) + +#: * "never" +#: * "less" +#: * "equal" +#: * "less_equal" +#: * "greater" +#: * "not_equal" +#: * "greater_equal" +#: * "always" +CompareFunction = Enum( + "CompareFunction", + never="never", + less="less", + equal="equal", + less_equal="less-equal", + greater="greater", + not_equal="not-equal", + greater_equal="greater-equal", + always="always", +) + +#: * "uniform" +#: * "storage" +#: * "read_only_storage" +BufferBindingType = Enum( + "BufferBindingType", + uniform="uniform", + storage="storage", + read_only_storage="read-only-storage", +) + +#: * "filtering" +#: * "non_filtering" +#: * "comparison" +SamplerBindingType = Enum( + "SamplerBindingType", + filtering="filtering", + non_filtering="non-filtering", + comparison="comparison", +) + +#: * "float" +#: * "unfilterable_float" +#: * "depth" +#: * "sint" +#: * "uint" +TextureSampleType = Enum( + "TextureSampleType", + float="float", + unfilterable_float="unfilterable-float", + depth="depth", + sint="sint", + uint="uint", +) + +#: * "write_only" +#: * "read_only" +#: * "read_write" +StorageTextureAccess = Enum( + "StorageTextureAccess", + write_only="write-only", + read_only="read-only", + read_write="read-write", +) + +#: * "error" +#: * "warning" +#: * "info" +CompilationMessageType = Enum( + "CompilationMessageType", + error="error", + warning="warning", + info="info", +) + +#: * "validation" +#: * "internal" +PipelineErrorReason = Enum( + "PipelineErrorReason", + validation="validation", + internal="internal", +) + +#: * "auto" +AutoLayoutMode = Enum( + "AutoLayoutMode", + auto="auto", +) + +#: * "point_list" +#: * "line_list" +#: * "line_strip" +#: * "triangle_list" +#: * "triangle_strip" +PrimitiveTopology = Enum( + "PrimitiveTopology", + point_list="point-list", + line_list="line-list", + line_strip="line-strip", + triangle_list="triangle-list", + triangle_strip="triangle-strip", +) + +#: * "ccw" +#: * "cw" +FrontFace = Enum( + "FrontFace", + ccw="ccw", + cw="cw", +) + +#: * "none" +#: * "front" +#: * "back" +CullMode = Enum( + "CullMode", + none="none", + front="front", + back="back", +) + +#: * "zero" +#: * "one" +#: * "src" +#: * "one_minus_src" +#: * "src_alpha" +#: * "one_minus_src_alpha" +#: * "dst" +#: * "one_minus_dst" +#: * "dst_alpha" +#: * "one_minus_dst_alpha" +#: * "src_alpha_saturated" +#: * "constant" +#: * "one_minus_constant" +BlendFactor = Enum( + "BlendFactor", + zero="zero", + one="one", + src="src", + one_minus_src="one-minus-src", + src_alpha="src-alpha", + one_minus_src_alpha="one-minus-src-alpha", + dst="dst", + one_minus_dst="one-minus-dst", + dst_alpha="dst-alpha", + one_minus_dst_alpha="one-minus-dst-alpha", + src_alpha_saturated="src-alpha-saturated", + constant="constant", + one_minus_constant="one-minus-constant", +) + +#: * "add" +#: * "subtract" +#: * "reverse_subtract" +#: * "min" +#: * "max" +BlendOperation = Enum( + "BlendOperation", + add="add", + subtract="subtract", + reverse_subtract="reverse-subtract", + min="min", + max="max", +) + +#: * "keep" +#: * "zero" +#: * "replace" +#: * "invert" +#: * "increment_clamp" +#: * "decrement_clamp" +#: * "increment_wrap" +#: * "decrement_wrap" +StencilOperation = Enum( + "StencilOperation", + keep="keep", + zero="zero", + replace="replace", + invert="invert", + increment_clamp="increment-clamp", + decrement_clamp="decrement-clamp", + increment_wrap="increment-wrap", + decrement_wrap="decrement-wrap", +) + +#: * "uint16" +#: * "uint32" +IndexFormat = Enum( + "IndexFormat", + uint16="uint16", + uint32="uint32", +) + +#: * "uint8x2" +#: * "uint8x4" +#: * "sint8x2" +#: * "sint8x4" +#: * "unorm8x2" +#: * "unorm8x4" +#: * "snorm8x2" +#: * "snorm8x4" +#: * "uint16x2" +#: * "uint16x4" +#: * "sint16x2" +#: * "sint16x4" +#: * "unorm16x2" +#: * "unorm16x4" +#: * "snorm16x2" +#: * "snorm16x4" +#: * "float16x2" +#: * "float16x4" +#: * "float32" +#: * "float32x2" +#: * "float32x3" +#: * "float32x4" +#: * "uint32" +#: * "uint32x2" +#: * "uint32x3" +#: * "uint32x4" +#: * "sint32" +#: * "sint32x2" +#: * "sint32x3" +#: * "sint32x4" +#: * "unorm10_10_10_2" +VertexFormat = Enum( + "VertexFormat", + uint8x2="uint8x2", + uint8x4="uint8x4", + sint8x2="sint8x2", + sint8x4="sint8x4", + unorm8x2="unorm8x2", + unorm8x4="unorm8x4", + snorm8x2="snorm8x2", + snorm8x4="snorm8x4", + uint16x2="uint16x2", + uint16x4="uint16x4", + sint16x2="sint16x2", + sint16x4="sint16x4", + unorm16x2="unorm16x2", + unorm16x4="unorm16x4", + snorm16x2="snorm16x2", + snorm16x4="snorm16x4", + float16x2="float16x2", + float16x4="float16x4", + float32="float32", + float32x2="float32x2", + float32x3="float32x3", + float32x4="float32x4", + uint32="uint32", + uint32x2="uint32x2", + uint32x3="uint32x3", + uint32x4="uint32x4", + sint32="sint32", + sint32x2="sint32x2", + sint32x3="sint32x3", + sint32x4="sint32x4", + unorm10_10_10_2="unorm10-10-10-2", +) + +#: * "vertex" +#: * "instance" +VertexStepMode = Enum( + "VertexStepMode", + vertex="vertex", + instance="instance", +) + +#: * "load" +#: * "clear" +LoadOp = Enum( + "LoadOp", + load="load", + clear="clear", +) + +#: * "store" +#: * "discard" +StoreOp = Enum( + "StoreOp", + store="store", + discard="discard", +) + +#: * "occlusion" +#: * "timestamp" +QueryType = Enum( + "QueryType", + occlusion="occlusion", + timestamp="timestamp", +) + +#: * "opaque" +#: * "premultiplied" +CanvasAlphaMode = Enum( + "CanvasAlphaMode", + opaque="opaque", + premultiplied="premultiplied", +) + +#: * "unknown" +#: * "destroyed" +DeviceLostReason = Enum( + "DeviceLostReason", + unknown="unknown", + destroyed="destroyed", +) + +#: * "validation" +#: * "out_of_memory" +#: * "internal" +ErrorFilter = Enum( + "ErrorFilter", + validation="validation", + out_of_memory="out-of-memory", + internal="internal", +) diff --git a/wgpu/flags.py b/wgpu/flags.py new file mode 100644 index 0000000..6b63b00 --- /dev/null +++ b/wgpu/flags.py @@ -0,0 +1,111 @@ +""" +These flags are defined in ``wgpu.flags``, but are also available from the root wgpu namespace. + +Flags are bitmasks; zero or multiple fields can be set at the same time. +Flags are integer bitmasks, but can also be passed as strings, so instead of +``wgpu.BufferUsage.MAP_READ | wgpu.BufferUsage.COPY_DST``, +one can also write ``"MAP_READ|COPY_DIST"``. +""" + +_use_sphinx_repr = False + + +class Flags: + def __init__(self, name, **kwargs): + self._name = name + for key, val in kwargs.items(): + setattr(self, key, val) + + def __iter__(self): + return iter([key for key in dir(self) if not key.startswith("_")]) + + def __repr__(self): + if _use_sphinx_repr: # no-cover + return "" + values = ", ".join(self) + return f"" + + +# CODE BELOW THIS POINT IS AUTOGENERATED - DO NOT EDIT + + +# There are 5 flags + +__all__ = [ + "BufferUsage", + "MapMode", + "TextureUsage", + "ShaderStage", + "ColorWrite", +] + + +#: * "MAP_READ" (1) +#: * "MAP_WRITE" (2) +#: * "COPY_SRC" (4) +#: * "COPY_DST" (8) +#: * "INDEX" (16) +#: * "VERTEX" (32) +#: * "UNIFORM" (64) +#: * "STORAGE" (128) +#: * "INDIRECT" (256) +#: * "QUERY_RESOLVE" (512) +BufferUsage = Flags( + "BufferUsage", + MAP_READ=1, + MAP_WRITE=2, + COPY_SRC=4, + COPY_DST=8, + INDEX=16, + VERTEX=32, + UNIFORM=64, + STORAGE=128, + INDIRECT=256, + QUERY_RESOLVE=512, +) + +#: * "READ" (1) +#: * "WRITE" (2) +MapMode = Flags( + "MapMode", + READ=1, + WRITE=2, +) + +#: * "COPY_SRC" (1) +#: * "COPY_DST" (2) +#: * "TEXTURE_BINDING" (4) +#: * "STORAGE_BINDING" (8) +#: * "RENDER_ATTACHMENT" (16) +TextureUsage = Flags( + "TextureUsage", + COPY_SRC=1, + COPY_DST=2, + TEXTURE_BINDING=4, + STORAGE_BINDING=8, + RENDER_ATTACHMENT=16, +) + +#: * "VERTEX" (1) +#: * "FRAGMENT" (2) +#: * "COMPUTE" (4) +ShaderStage = Flags( + "ShaderStage", + VERTEX=1, + FRAGMENT=2, + COMPUTE=4, +) + +#: * "RED" (1) +#: * "GREEN" (2) +#: * "BLUE" (4) +#: * "ALPHA" (8) +#: * "ALL" (15) +ColorWrite = Flags( + "ColorWrite", + RED=1, + GREEN=2, + BLUE=4, + ALPHA=8, + ALL=15, +) diff --git a/wgpu/gui/__init__.py b/wgpu/gui/__init__.py new file mode 100644 index 0000000..c959168 --- /dev/null +++ b/wgpu/gui/__init__.py @@ -0,0 +1,13 @@ +""" +Code to provide a canvas to render to. +""" + +from .base import WgpuCanvasInterface, WgpuCanvasBase, WgpuAutoGui # noqa: F401 +from .offscreen import WgpuOffscreenCanvasBase # noqa: F401 + +__all__ = [ + "WgpuCanvasInterface", + "WgpuCanvasBase", + "WgpuAutoGui", + "WgpuOffscreenCanvasBase", +] diff --git a/wgpu/gui/auto.py b/wgpu/gui/auto.py new file mode 100644 index 0000000..da30440 --- /dev/null +++ b/wgpu/gui/auto.py @@ -0,0 +1,106 @@ +""" +Automatic GUI backend selection. + +Right now we only chose between GLFW, Qt and Jupyter. We might add support +for e.g. wx later. Or we might decide to stick with these three. +""" + +__all__ = ["WgpuCanvas", "run", "call_later"] + +import importlib +import os +import sys + + +def is_jupyter(): + """Determine whether the user is executing in a Jupyter Notebook / Lab.""" + try: + ip = get_ipython() + if ip.has_trait("kernel"): + return True + else: + return False + except NameError: + return False + + +def _load_backend(backend_name): + """Load a gui backend by name.""" + if backend_name == "glfw": + from . import glfw as module # noqa + elif backend_name == "qt": + from . import qt as module # noqa + elif backend_name == "jupyter": + from . import jupyter as module # noqa + elif backend_name == "wx": + from . import wx as module # noqa + elif backend_name == "offscreen": + from . import offscreen as module # noqa + else: # no-cover + raise ImportError("Unknown wgpu gui backend: '{backend_name}'") + return module + + +def _auto_load_backend(): + """Decide on the gui backend automatically.""" + + # Backends to auto load, ordered by preference. Maps libname -> backend_name + gui_backends = { + "glfw": "glfw", + "PySide6": "qt", + "PyQt6": "qt", + "PySide2": "qt", + "PyQt5": "qt", + } + + # The module that we try to find + module = None + + # Any errors we come accross as we try to import the gui backends + errors = [] + + # Prefer a backend for which the lib is already imported + imported = [libname for libname in gui_backends if libname in sys.modules] + for libname in imported: + try: + module = _load_backend(gui_backends[libname]) + break + except Exception as err: + errors.append(err) + + # If no module found yet, try importing the lib, then import the backend + if not module: + for libname in gui_backends: + try: + importlib.import_module(libname) + except ModuleNotFoundError: + continue + try: + module = _load_backend(gui_backends[libname]) + break + except Exception as err: + errors.append(err) + + # If still nothing found, raise a useful error + if not module: + msg = "\n".join(str(err) for err in errors) + msg += "\n\n Could not find either glfw or Qt framework." + msg += "\n Install glfw using e.g. ``pip install -U glfw``," + msg += "\n or install a qt framework using e.g. ``pip install -U pyside6``." + if sys.platform.startswith("linux"): + msg += "\n You may also need to run the equivalent of ``apt install libglfw3``." + raise ImportError(msg) from None + + return module + + +# Triage +if os.environ.get("WGPU_FORCE_OFFSCREEN") == "true": + module = _load_backend("offscreen") +elif is_jupyter(): + module = _load_backend("jupyter") +else: + module = _auto_load_backend() + + +WgpuCanvas, run, call_later = module.WgpuCanvas, module.run, module.call_later diff --git a/wgpu/gui/base.py b/wgpu/gui/base.py new file mode 100644 index 0000000..559d0c2 --- /dev/null +++ b/wgpu/gui/base.py @@ -0,0 +1,417 @@ +import os +import sys +import time +import weakref +import logging +from contextlib import contextmanager +import ctypes.util +from collections import defaultdict + +from .._coreutils import error_message_hash + +logger = logging.getLogger("wgpu") + +err_hashes = {} + + +@contextmanager +def log_exception(kind): + """Context manager to log any exceptions, but only log a one-liner + for subsequent occurances of the same error to avoid spamming by + repeating errors in e.g. a draw function or event callback. + """ + try: + yield + except Exception as err: + # Store exc info for postmortem debugging + exc_info = list(sys.exc_info()) + exc_info[2] = exc_info[2].tb_next # skip *this* function + sys.last_type, sys.last_value, sys.last_traceback = exc_info + # Show traceback, or a one-line summary + msg = str(err) + msgh = error_message_hash(msg) + if msgh not in err_hashes: + # Provide the exception, so the default logger prints a stacktrace. + # IDE's can get the exception from the root logger for PM debugging. + err_hashes[msgh] = 1 + logger.error(kind, exc_info=err) + else: + # We've seen this message before, return a one-liner instead. + err_hashes[msgh] = count = err_hashes[msgh] + 1 + msg = kind + ": " + msg.split("\n")[0].strip() + msg = msg if len(msg) <= 70 else msg[:69] + "…" + logger.error(msg + f" ({count})") + + +def weakbind(method): + """Replace a bound method with a callable object that stores the `self` using a weakref.""" + ref = weakref.ref(method.__self__) + class_func = method.__func__ + del method + + def proxy(*args, **kwargs): + self = ref() + if self is not None: + return class_func(self, *args, **kwargs) + + proxy.__name__ = class_func.__name__ + return proxy + + +class WgpuCanvasInterface: + """The minimal interface to be a valid canvas. + + Any object that implements these methods is a canvas that wgpu can work with. + The object does not even have to derive from this class. + + In most cases it's more convenient to subclass :class:`WgpuCanvasBase `. + """ + + def __init__(self, *args, **kwargs): + # The args/kwargs are there because we may be mixed with e.g. a Qt widget + super().__init__(*args, **kwargs) + self._canvas_context = None + + def get_window_id(self): + """Get the native window id. + + This is used to obtain a surface id, so that wgpu can render + to the region of the screen occupied by the canvas. + """ + raise NotImplementedError() + + def get_display_id(self): + """Get the native display id (Linux only). + + On Linux this is needed in addition to the window id to obtain + a surface id. The default implementation calls into the X11 lib + to get the display id. + """ + # Re-use to avoid creating loads of id's + if getattr(self, "_display_id", None) is not None: + return self._display_id + + if sys.platform.startswith("linux"): + is_wayland = "wayland" in os.getenv("XDG_SESSION_TYPE", "").lower() + if is_wayland: + raise NotImplementedError( + f"Cannot (yet) get display id on {self.__class__.__name__}." + ) + else: + x11 = ctypes.CDLL(ctypes.util.find_library("X11")) + x11.XOpenDisplay.restype = ctypes.c_void_p + self._display_id = x11.XOpenDisplay(None) + else: + raise RuntimeError(f"Cannot get display id on {sys.platform}.") + + return self._display_id + + def get_physical_size(self): + """Get the physical size of the canvas in integer pixels.""" + raise NotImplementedError() + + def get_context(self, kind="webgpu"): + """Get the ``GPUCanvasContext`` object corresponding to this canvas. + + The context is used to obtain a texture to render to, and to + present that texture to the canvas. This class provides a + default implementation to get the appropriate context. + + The ``kind`` argument is a remnant from the WebGPU spec and + must always be "webgpu". + """ + # Note that this function is analog to HtmlCanvas.getContext(), except + # here the only valid arg is 'webgpu', which is also made the default. + assert kind == "webgpu" + if self._canvas_context is None: + # Get the active wgpu backend module + backend_module = sys.modules["wgpu"].gpu.__module__ + # Instantiate the context + PC = sys.modules[backend_module].GPUCanvasContext # noqa: N806 + self._canvas_context = PC(self) + return self._canvas_context + + +class WgpuCanvasBase(WgpuCanvasInterface): + """A convenient base canvas class. + + This class provides a uniform API and implements common + functionality, to increase consistency and reduce code duplication. + It is convenient (but not strictly necessary) for canvas classes + to inherit from this class (but all builtin canvases do). + + This class provides an API for scheduling draws (``request_draw()``) + and implements a mechanism to call the provided draw function + (``draw_frame()``) and then present the result to the canvas. + + This class also implements draw rate limiting, which can be set + with the ``max_fps`` attribute (default 30). For benchmarks you may + also want to set ``vsync`` to False. + """ + + def __init__(self, *args, max_fps=30, vsync=True, **kwargs): + super().__init__(*args, **kwargs) + self._last_draw_time = 0 + self._max_fps = float(max_fps) + self._vsync = bool(vsync) + + def __del__(self): + # On delete, we call the custom close method. + try: + self.close() + except Exception: + pass + # Since this is sometimes used in a multiple inheritance, the + # superclass may (or may not) have a __del__ method. + try: + super().__del__() + except Exception: + pass + + def draw_frame(self): + """The function that gets called at each draw. + + You can implement this method in a subclass, or set it via a + call to request_draw(). + """ + pass + + def request_draw(self, draw_function=None): + """Schedule a new draw event. + + This function does not perform a draw directly, but schedules + a draw event at a suitable moment in time. In the draw event + the draw function is called, and the resulting rendered image + is presented to screen. + + Arguments: + draw_function (callable or None): The function to set as the new draw + function. If not given or None, the last set draw function is used. + + """ + if draw_function is not None: + self.draw_frame = draw_function + self._request_draw() + + def _draw_frame_and_present(self): + """Draw the frame and present the result. + + Errors are logged to the "wgpu" logger. Should be called by the + subclass at an appropriate time. + """ + self._last_draw_time = time.perf_counter() + # Perform the user-defined drawing code. When this errors, + # we should report the error and then continue, otherwise we crash. + # Returns the result of the context's present() call or None. + with log_exception("Draw error"): + self.draw_frame() + with log_exception("Present error"): + if self._canvas_context: + return self._canvas_context.present() + + def _get_draw_wait_time(self): + """Get time (in seconds) to wait until the next draw in order to honour max_fps.""" + now = time.perf_counter() + target_time = self._last_draw_time + 1.0 / self._max_fps + return max(0, target_time - now) + + # Methods that must be overloaded + + def get_pixel_ratio(self): + """Get the float ratio between logical and physical pixels.""" + raise NotImplementedError() + + def get_logical_size(self): + """Get the logical size in float pixels.""" + raise NotImplementedError() + + def get_physical_size(self): + """Get the physical size in integer pixels.""" + raise NotImplementedError() + + def set_logical_size(self, width, height): + """Set the window size (in logical pixels).""" + raise NotImplementedError() + + def close(self): + """Close the window.""" + pass + + def is_closed(self): + """Get whether the window is closed.""" + raise NotImplementedError() + + def _request_draw(self): + """GUI-specific implementation for ``request_draw()``. + + * This should invoke a new draw at a later time. + * The call itself should return directly. + * Multiple calls should result in a single new draw. + * Preferably the ``max_fps`` and ``vsync`` are honored. + """ + raise NotImplementedError() + + +class WgpuAutoGui: + """Mixin class for canvases implementing autogui. + + This class provides a common API for handling events and registering + event handlers. It adds to :class:`WgpuCanvasBase ` + that interactive examples and applications can be written in a + generic way (no-GUI specific code). + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._last_event_time = 0 + self._pending_events = {} + self._event_handlers = defaultdict(set) + + def _get_event_wait_time(self): + """Calculate the time to wait for the next event dispatching. + + Used for rate-limited events. + """ + rate = 75 # events per second + now = time.perf_counter() + target_time = self._last_event_time + 1.0 / rate + return max(0, target_time - now) + + def _handle_event_rate_limited( + self, event, call_later_func, match_keys, accum_keys + ): + """Alternative `to handle_event()` for events that must be rate-limted. + + If any of the ``match_keys`` keys of the new event differ from the currently + pending event, the old event is dispatched now. The ``accum_keys`` keys of + the current and new event are added together (e.g. to accumulate wheel delta). + + The (accumulated) event is handled in the following cases: + * When the timer runs out. + * When a non-rate-limited event is dispatched. + * When a rate-limited event of the same type is scheduled + that has different match_keys (e.g. modifiers changes). + + Subclasses that use this method must use ``_handle_event_and_flush()`` + where they would otherwise call ``handle_event()``, to preserve event order. + """ + event_type = event["event_type"] + event.setdefault("time_stamp", time.perf_counter()) + # We may need to emit the old event. Otherwise, we need to update the new one. + old = self._pending_events.get(event_type, None) + if old: + if any(event[key] != old[key] for key in match_keys): + self.handle_event(old) + else: + for key in accum_keys: + event[key] = old[key] + event[key] + # Make sure that we have scheduled a moment to handle events + if not self._pending_events: + call_later_func(self._get_event_wait_time(), self._handle_pending_events) + # Store the event object + self._pending_events[event_type] = event + + def _handle_event_and_flush(self, event): + """Call handle_event after flushing any pending (rate-limited) events.""" + event.setdefault("time_stamp", time.perf_counter()) + self._handle_pending_events() + self.handle_event(event) + + def _handle_pending_events(self): + """Handle any pending rate-limited events.""" + if self._pending_events: + events = self._pending_events.values() + self._last_event_time = time.perf_counter() + self._pending_events = {} + for ev in events: + self.handle_event(ev) + + def handle_event(self, event): + """Handle an incoming event. + + Subclasses can overload this method. Events include widget + resize, mouse/touch interaction, key events, and more. An event + is a dict with at least the key event_type. For details, see + https://jupyter-rfb.readthedocs.io/en/stable/events.html + + The default implementation dispatches the event to the + registered event handlers. + + Arguments: + event (dict): the event to handle. + """ + # Collect callbacks + event_type = event.get("event_type") + callbacks = self._event_handlers[event_type] | self._event_handlers["*"] + # Dispatch + for callback in callbacks: + with log_exception(f"Error during handling {event['event_type']} event"): + callback(event) + + def add_event_handler(self, *args): + """Register an event handler to receive events. + + Arguments: + callback (callable): The event handler. Must accept a single event argument. + *types (list of strings): A list of event types. + + For the available events, see + https://jupyter-rfb.readthedocs.io/en/stable/events.html. + + The callback is stored, so it can be a lambda or closure. This also + means that if a method is given, a reference to the object is held, + which may cause circular references or prevent the Python GC from + destroying that object. + + Example: + + .. code-block:: py + + def my_handler(event): + print(event) + + canvas.add_event_handler(my_handler, "pointer_up", "pointer_down") + + Can also be used as a decorator: + + .. code-block:: py + + @canvas.add_event_handler("pointer_up", "pointer_down") + def my_handler(event): + print(event) + + Catch 'm all: + + .. code-block:: py + + canvas.add_event_handler(my_handler, "*") + + """ + decorating = not callable(args[0]) + callback = None if decorating else args[0] + types = args if decorating else args[1:] + + if not types: + raise TypeError("No event types are given to add_event_handler.") + for type in types: + if not isinstance(type, str): + raise TypeError(f"Event types must be str, but got {type}") + + def decorator(_callback): + for type in types: + self._event_handlers[type].add(_callback) + return _callback + + if decorating: + return decorator + return decorator(callback) + + def remove_event_handler(self, callback, *types): + """Unregister an event handler. + + Arguments: + callback (callable): The event handler. + *types (list of strings): A list of event types. + """ + for type in types: + self._event_handlers[type].remove(callback) diff --git a/wgpu/gui/glfw.py b/wgpu/gui/glfw.py new file mode 100644 index 0000000..0679668 --- /dev/null +++ b/wgpu/gui/glfw.py @@ -0,0 +1,553 @@ +""" +Support to render in a glfw window. The advantage of glfw is that it's +very lightweight. + +Install pyGLFW using ``pip install glfw``. On Windows this is enough. +On Linux, install the glfw lib using ``sudo apt install libglfw3``, +or ``sudo apt install libglfw3-wayland`` when using Wayland. +""" + + +import os +import sys +import time +import weakref +import asyncio + +import glfw + +from .base import WgpuCanvasBase, WgpuAutoGui, weakbind + + +# Make sure that glfw is new enough +glfw_version_info = tuple(int(i) for i in glfw.__version__.split(".")[:2]) +if glfw_version_info < (1, 9): + raise ImportError("wgpu-py requires glfw 1.9 or higher.") + +# Do checks to prevent pitfalls on hybrid Xorg/Wayland systems +is_wayland = False +if sys.platform.startswith("linux"): + is_wayland = "wayland" in os.getenv("XDG_SESSION_TYPE", "").lower() + if is_wayland and not hasattr(glfw, "get_wayland_window"): + raise RuntimeError( + "We're on Wayland but Wayland functions not available. " + + "Did you apt install libglfw3-wayland?" + ) + +# Some glfw functions are not always available +set_window_content_scale_callback = lambda *args: None # noqa: E731 +set_window_maximize_callback = lambda *args: None # noqa: E731 +get_window_content_scale = lambda *args: (1, 1) # noqa: E731 + +if hasattr(glfw, "set_window_content_scale_callback"): + set_window_content_scale_callback = glfw.set_window_content_scale_callback +if hasattr(glfw, "set_window_maximize_callback"): + set_window_maximize_callback = glfw.set_window_maximize_callback +if hasattr(glfw, "get_window_content_scale"): + get_window_content_scale = glfw.get_window_content_scale + + +# Map keys to JS key definitions +# https://www.glfw.org/docs/3.3/group__keys.html +# https://developer.mozilla.org/en-US/docs/Web/API/KeyboardEvent/key/Key_Values +KEY_MAP = { + glfw.KEY_DOWN: "ArrowDown", + glfw.KEY_UP: "ArrowUp", + glfw.KEY_LEFT: "ArrowLeft", + glfw.KEY_RIGHT: "ArrowRight", + glfw.KEY_BACKSPACE: "Backspace", + glfw.KEY_CAPS_LOCK: "CapsLock", + glfw.KEY_DELETE: "Delete", + glfw.KEY_END: "End", + glfw.KEY_ENTER: "Enter", # aka return + glfw.KEY_ESCAPE: "Escape", + glfw.KEY_F1: "F1", + glfw.KEY_F2: "F2", + glfw.KEY_F3: "F3", + glfw.KEY_F4: "F4", + glfw.KEY_F5: "F5", + glfw.KEY_F6: "F6", + glfw.KEY_F7: "F7", + glfw.KEY_F8: "F8", + glfw.KEY_F9: "F9", + glfw.KEY_F10: "F10", + glfw.KEY_F11: "F11", + glfw.KEY_F12: "F12", + glfw.KEY_HOME: "Home", + glfw.KEY_INSERT: "Insert", + glfw.KEY_LEFT_ALT: "Alt", + glfw.KEY_LEFT_CONTROL: "Control", + glfw.KEY_LEFT_SHIFT: "Shift", + glfw.KEY_LEFT_SUPER: "Meta", # in glfw super means Windows or MacOS-command + glfw.KEY_NUM_LOCK: "NumLock", + glfw.KEY_PAGE_DOWN: "PageDown", + glfw.KEY_PAGE_UP: "Pageup", + glfw.KEY_PAUSE: "Pause", + glfw.KEY_PRINT_SCREEN: "PrintScreen", + glfw.KEY_RIGHT_ALT: "Alt", + glfw.KEY_RIGHT_CONTROL: "Control", + glfw.KEY_RIGHT_SHIFT: "Shift", + glfw.KEY_RIGHT_SUPER: "Meta", + glfw.KEY_SCROLL_LOCK: "ScrollLock", + glfw.KEY_TAB: "Tab", +} + +KEY_MAP_MOD = { + glfw.KEY_LEFT_SHIFT: "Shift", + glfw.KEY_RIGHT_SHIFT: "Shift", + glfw.KEY_LEFT_CONTROL: "Control", + glfw.KEY_RIGHT_CONTROL: "Control", + glfw.KEY_LEFT_ALT: "Alt", + glfw.KEY_RIGHT_ALT: "Alt", + glfw.KEY_LEFT_SUPER: "Meta", + glfw.KEY_RIGHT_SUPER: "Meta", +} + + +class GlfwWgpuCanvas(WgpuAutoGui, WgpuCanvasBase): + """A glfw window providing a wgpu canvas.""" + + # See https://www.glfw.org/docs/latest/group__window.html + + def __init__(self, *, size=None, title=None, **kwargs): + ensure_app() + super().__init__(**kwargs) + + # Handle inputs + if not size: + size = 640, 480 + title = str(title or "glfw wgpu canvas") + + # Set window hints + glfw.window_hint(glfw.CLIENT_API, glfw.NO_API) + glfw.window_hint(glfw.RESIZABLE, True) + # see https://github.com/FlorianRhiem/pyGLFW/issues/42 + # Alternatively, from pyGLFW 1.10 one can set glfw.ERROR_REPORTING='warn' + if sys.platform.startswith("linux"): + if is_wayland: + glfw.window_hint(glfw.FOCUSED, False) # prevent Wayland focus error + + # Create the window (the initial size may not be in logical pixels) + self._window = glfw.create_window(int(size[0]), int(size[1]), title, None, None) + + # Other internal variables + self._need_draw = False + self._request_draw_timer_running = False + self._changing_pixel_ratio = False + self._is_minimized = False + + # Register ourselves + all_glfw_canvases.add(self) + + # Register callbacks. We may get notified too often, but that's + # ok, they'll result in a single draw. + glfw.set_framebuffer_size_callback(self._window, weakbind(self._on_size_change)) + glfw.set_window_close_callback(self._window, weakbind(self._check_close)) + glfw.set_window_refresh_callback(self._window, weakbind(self._on_window_dirty)) + glfw.set_window_focus_callback(self._window, weakbind(self._on_window_dirty)) + set_window_content_scale_callback( + self._window, weakbind(self._on_pixelratio_change) + ) + set_window_maximize_callback(self._window, weakbind(self._on_window_dirty)) + glfw.set_window_iconify_callback(self._window, weakbind(self._on_iconify)) + + # User input + self._key_modifiers = [] + self._pointer_buttons = [] + self._pointer_pos = 0, 0 + self._double_click_state = {"clicks": 0} + glfw.set_mouse_button_callback(self._window, weakbind(self._on_mouse_button)) + glfw.set_cursor_pos_callback(self._window, weakbind(self._on_cursor_pos)) + glfw.set_scroll_callback(self._window, weakbind(self._on_scroll)) + glfw.set_key_callback(self._window, weakbind(self._on_key)) + + # Initialize the size + self._pixel_ratio = -1 + self._screen_size_is_logical = False + self.set_logical_size(*size) + self._request_draw() + + # Callbacks to provide a minimal working canvas for wgpu + + def _on_pixelratio_change(self, *args): + if self._changing_pixel_ratio: + return + self._changing_pixel_ratio = True # prevent recursion (on Wayland) + try: + self._set_logical_size(self._logical_size) + finally: + self._changing_pixel_ratio = False + self._request_draw() + + def _on_size_change(self, *args): + self._determine_size() + self._request_draw() + + def _check_close(self, *args): + # Follow the close flow that glfw intended. + # This method can be overloaded and the close-flag can be set to False + # using set_window_should_close() if now is not a good time to close. + if self._window is not None and glfw.window_should_close(self._window): + self._on_close() + + def _on_close(self, *args): + all_glfw_canvases.discard(self) + if self._window is not None: + glfw.destroy_window(self._window) # not just glfw.hide_window + self._window = None + self._handle_event_and_flush({"event_type": "close"}) + + def _on_window_dirty(self, *args): + self._request_draw() + + def _on_iconify(self, window, iconified): + self._is_minimized = bool(iconified) + + # helpers + + def _mark_ready_for_draw(self): + self._request_draw_timer_running = False + self._need_draw = True # The event loop looks at this flag + glfw.post_empty_event() # Awake the event loop, if it's in wait-mode + + def _determine_size(self): + if self._window is None: + return + # Because the value of get_window_size is in physical-pixels + # on some systems and in logical-pixels on other, we use the + # framebuffer size and pixel ratio to derive the logical size. + pixel_ratio = get_window_content_scale(self._window)[0] + psize = glfw.get_framebuffer_size(self._window) + psize = int(psize[0]), int(psize[1]) + + self._pixel_ratio = pixel_ratio + self._physical_size = psize + self._logical_size = psize[0] / pixel_ratio, psize[1] / pixel_ratio + + ev = { + "event_type": "resize", + "width": self._logical_size[0], + "height": self._logical_size[1], + "pixel_ratio": self._pixel_ratio, + } + self._handle_event_and_flush(ev) + + def _set_logical_size(self, new_logical_size): + if self._window is None: + return + # There is unclarity about the window size in "screen pixels". + # It appears that on Windows and X11 its the same as the + # framebuffer size, and on macOS it's logical pixels. + # See https://github.com/glfw/glfw/issues/845 + # Here, we simply do a quick test so we can compensate. + + # The current screen size and physical size, and its ratio + pixel_ratio = get_window_content_scale(self._window)[0] + ssize = glfw.get_window_size(self._window) + psize = glfw.get_framebuffer_size(self._window) + + # Apply + if is_wayland: + # Not sure why, but on Wayland things work differently + screen_ratio = ssize[0] / new_logical_size[0] + glfw.set_window_size( + self._window, + int(new_logical_size[0] / screen_ratio), + int(new_logical_size[1] / screen_ratio), + ) + else: + screen_ratio = ssize[0] / psize[0] + glfw.set_window_size( + self._window, + int(new_logical_size[0] * pixel_ratio * screen_ratio), + int(new_logical_size[1] * pixel_ratio * screen_ratio), + ) + self._screen_size_is_logical = screen_ratio != 1 + # If this causes the widget size to change, then _on_size_change will + # be called, but we may want force redetermining the size. + if pixel_ratio != self._pixel_ratio: + self._determine_size() + + # API + + def get_window_id(self): + if sys.platform.startswith("win"): + return int(glfw.get_win32_window(self._window)) + elif sys.platform.startswith("darwin"): + return int(glfw.get_cocoa_window(self._window)) + elif sys.platform.startswith("linux"): + if is_wayland: + return int(glfw.get_wayland_window(self._window)) + else: + return int(glfw.get_x11_window(self._window)) + else: + raise RuntimeError(f"Cannot get GLFW window id on {sys.platform}.") + + def get_display_id(self): + if sys.platform.startswith("linux"): + if is_wayland: + return glfw.get_wayland_display() + else: + return glfw.get_x11_display() + else: + raise RuntimeError(f"Cannot get GLFW display id on {sys.platform}.") + + def get_pixel_ratio(self): + return self._pixel_ratio + + def get_logical_size(self): + return self._logical_size + + def get_physical_size(self): + return self._physical_size + + def set_logical_size(self, width, height): + if width < 0 or height < 0: + raise ValueError("Window width and height must not be negative") + self._set_logical_size((float(width), float(height))) + + def _request_draw(self): + if not self._request_draw_timer_running: + self._request_draw_timer_running = True + call_later(self._get_draw_wait_time(), self._mark_ready_for_draw) + + def close(self): + if self._window is not None: + glfw.set_window_should_close(self._window, True) + self._check_close() + + def is_closed(self): + return self._window is None + + # User events + + def _on_mouse_button(self, window, but, action, mods): + # Map button being changed, which we use to update self._pointer_buttons. + button_map = { + glfw.MOUSE_BUTTON_1: 1, # == MOUSE_BUTTON_LEFT + glfw.MOUSE_BUTTON_2: 2, # == MOUSE_BUTTON_RIGHT + glfw.MOUSE_BUTTON_3: 3, # == MOUSE_BUTTON_MIDDLE + glfw.MOUSE_BUTTON_4: 4, + glfw.MOUSE_BUTTON_5: 5, + glfw.MOUSE_BUTTON_6: 6, + glfw.MOUSE_BUTTON_7: 7, + glfw.MOUSE_BUTTON_8: 8, + } + button = button_map.get(but, 0) + + if action == glfw.PRESS: + event_type = "pointer_down" + buttons = set(self._pointer_buttons) + buttons.add(button) + self._pointer_buttons = list(sorted(buttons)) + elif action == glfw.RELEASE: + event_type = "pointer_up" + buttons = set(self._pointer_buttons) + buttons.discard(button) + self._pointer_buttons = list(sorted(buttons)) + else: + return + + ev = { + "event_type": event_type, + "x": self._pointer_pos[0], + "y": self._pointer_pos[1], + "button": button, + "buttons": list(self._pointer_buttons), + "modifiers": list(self._key_modifiers), + "ntouches": 0, # glfw dows not have touch support + "touches": {}, + } + + # Emit the current event + self._handle_event_and_flush(ev) + + # Maybe emit a double-click event + self._follow_double_click(action, button) + + def _follow_double_click(self, action, button): + # If a sequence of down-up-down-up is made in nearly the same + # spot, and within a short time, we emit the double-click event. + + x, y = self._pointer_pos[0], self._pointer_pos[1] + state = self._double_click_state + + timeout = 0.25 + distance = 5 + + # Clear the state if it does no longer match + if state["clicks"] > 0: + d = ((x - state["x"]) ** 2 + (y - state["y"]) ** 2) ** 0.5 + if ( + d > distance + or time.perf_counter() - state["time"] > timeout + or button != state["button"] + ): + self._double_click_state = {"clicks": 0} + + clicks = self._double_click_state["clicks"] + + # Check and update order. Emit event if we make it to the final step + if clicks == 0 and action == glfw.PRESS: + self._double_click_state = { + "clicks": 1, + "button": button, + "time": time.perf_counter(), + "x": x, + "y": y, + } + elif clicks == 1 and action == glfw.RELEASE: + self._double_click_state["clicks"] = 2 + elif clicks == 2 and action == glfw.PRESS: + self._double_click_state["clicks"] = 3 + elif clicks == 3 and action == glfw.RELEASE: + self._double_click_state = {"clicks": 0} + ev = { + "event_type": "double_click", + "x": self._pointer_pos[0], + "y": self._pointer_pos[1], + "button": button, + "buttons": list(self._pointer_buttons), + "modifiers": list(self._key_modifiers), + "ntouches": 0, # glfw dows not have touch support + "touches": {}, + } + self._handle_event_and_flush(ev) + + def _on_cursor_pos(self, window, x, y): + # Store pointer position in logical coordinates + if self._screen_size_is_logical: + self._pointer_pos = x, y + else: + self._pointer_pos = x / self._pixel_ratio, y / self._pixel_ratio + + ev = { + "event_type": "pointer_move", + "x": self._pointer_pos[0], + "y": self._pointer_pos[1], + "button": 0, + "buttons": list(self._pointer_buttons), + "modifiers": list(self._key_modifiers), + "ntouches": 0, # glfw dows not have touch support + "touches": {}, + } + + match_keys = {"buttons", "modifiers", "ntouches"} + accum_keys = {} + self._handle_event_rate_limited(ev, call_later, match_keys, accum_keys) + + def _on_scroll(self, window, dx, dy): + # wheel is 1 or -1 in glfw, in jupyter_rfb this is ~100 + ev = { + "event_type": "wheel", + "dx": 100.0 * dx, + "dy": -100.0 * dy, + "x": self._pointer_pos[0], + "y": self._pointer_pos[1], + "buttons": list(self._pointer_buttons), + "modifiers": list(self._key_modifiers), + } + match_keys = {"modifiers"} + accum_keys = {"dx", "dy"} + self._handle_event_rate_limited(ev, call_later, match_keys, accum_keys) + + def _on_key(self, window, key, scancode, action, mods): + modifier = KEY_MAP_MOD.get(key, None) + + if action == glfw.PRESS: + event_type = "key_down" + if modifier: + modifiers = set(self._key_modifiers) + modifiers.add(modifier) + self._key_modifiers = list(sorted(modifiers)) + elif action == glfw.RELEASE: + event_type = "key_up" + if modifier: + modifiers = set(self._key_modifiers) + modifiers.discard(modifier) + self._key_modifiers = list(sorted(modifiers)) + else: # glfw.REPEAT + return + + # Note that if the user holds shift while pressing "5", will result in "5", + # and not in the "%" that you'd expect on a US keyboard. Glfw wants us to + # use set_char_callback for text input, but then we'd only get an event for + # key presses (down followed by up). So we accept that GLFW is less complete + # in this respect. + if key in KEY_MAP: + keyname = KEY_MAP[key] + else: + try: + keyname = chr(key) + except ValueError: + return # Probably a special key that we don't have in our KEY_MAP + if "Shift" not in self._key_modifiers: + keyname = keyname.lower() + + ev = { + "event_type": event_type, + "key": keyname, + "modifiers": list(self._key_modifiers), + } + self._handle_event_and_flush(ev) + + +# Make available under a name that is the same for all gui backends +WgpuCanvas = GlfwWgpuCanvas + + +all_glfw_canvases = weakref.WeakSet() +glfw._pygfx_mainloop = None +glfw._pygfx_stop_if_no_more_canvases = False + + +def update_glfw_canvasses(): + """Call this in your glfw event loop to draw each canvas that needs + an update. Returns the number of visible canvases. + """ + # Note that _draw_frame_and_present already catches errors, it can + # only raise errors if the logging system fails. + canvases = tuple(all_glfw_canvases) + for canvas in canvases: + if canvas._need_draw and not canvas._is_minimized: + canvas._need_draw = False + canvas._draw_frame_and_present() + return len(canvases) + + +async def mainloop(): + loop = asyncio.get_event_loop() + while True: + n = update_glfw_canvasses() + if glfw._pygfx_stop_if_no_more_canvases and not n: + break + await asyncio.sleep(0.001) + glfw.poll_events() + loop.stop() + glfw.terminate() + + +def ensure_app(): + # It is safe to call init multiple times: + # "Additional calls to this function after successful initialization + # but before termination will return GLFW_TRUE immediately." + glfw.init() + if glfw._pygfx_mainloop is None: + loop = asyncio.get_event_loop() + glfw._pygfx_mainloop = mainloop() + loop.create_task(glfw._pygfx_mainloop) + + +def call_later(delay, callback, *args): + loop = asyncio.get_event_loop() + loop.call_later(delay, callback, *args) + + +def run(): + ensure_app() + loop = asyncio.get_event_loop() + if not loop.is_running(): + glfw._pygfx_stop_if_no_more_canvases = True + loop.run_forever() + else: + pass # Probably an interactive session diff --git a/wgpu/gui/jupyter.py b/wgpu/gui/jupyter.py new file mode 100644 index 0000000..905146f --- /dev/null +++ b/wgpu/gui/jupyter.py @@ -0,0 +1,137 @@ +""" +Support for rendering in a Jupyter widget. Provides a widget subclass that +can be used as cell output, or embedded in a ipywidgets gui. +""" + +from collections import defaultdict +import weakref +import asyncio + +from .offscreen import WgpuOffscreenCanvasBase +from .base import WgpuAutoGui + +import numpy as np +from jupyter_rfb import RemoteFrameBuffer +from IPython.display import display + + +pending_jupyter_canvases = [] + + +class JupyterWgpuCanvas(WgpuAutoGui, WgpuOffscreenCanvasBase, RemoteFrameBuffer): + """An ipywidgets widget providing a wgpu canvas. Needs the jupyter_rfb library.""" + + def __init__(self, *, size=None, title=None, **kwargs): + super().__init__(**kwargs) + + # Internal variables + self._pixel_ratio = 1 + self._logical_size = 0, 0 + self._is_closed = False + self._request_draw_timer_running = False + self._event_handlers = defaultdict(set) + + # Register so this can be display'ed when run() is called + pending_jupyter_canvases.append(weakref.ref(self)) + + # Initialize size + if size is not None: + self.set_logical_size(*size) + + # Implementation needed for RemoteFrameBuffer + + def handle_event(self, event): + event_type = event.get("event_type") + if event_type == "close": + self._is_closed = True + elif event_type == "resize": + self._pixel_ratio = event["pixel_ratio"] + self._logical_size = event["width"], event["height"] + + # No need to rate-limit the pointer_move and wheel events; + # they're already rate limited by jupyter_rfb in the client. + super().handle_event(event) + + def get_frame(self): + self._request_draw_timer_running = False + # The _draw_frame_and_present() does the drawing and then calls + # present_context.present(), which calls our present() method. + # The resuls is either a numpy array or None, and this matches + # with what this method is expected to return. + return self._draw_frame_and_present() + + # Implementation needed for WgpuCanvasBase + + def get_pixel_ratio(self): + return self._pixel_ratio + + def get_logical_size(self): + return self._logical_size + + def get_physical_size(self): + return int(self._logical_size[0] * self._pixel_ratio), int( + self._logical_size[1] * self._pixel_ratio + ) + + def set_logical_size(self, width, height): + self.css_width = f"{width}px" + self.css_height = f"{height}px" + + def close(self): + RemoteFrameBuffer.close(self) + + def is_closed(self): + return self._is_closed + + def _request_draw(self): + if not self._request_draw_timer_running: + self._request_draw_timer_running = True + call_later(self._get_draw_wait_time(), RemoteFrameBuffer.request_draw, self) + + # Implementation needed for WgpuOffscreenCanvasBase + + def present(self, texture): + # This gets called at the end of a draw pass via offscreen.GPUCanvasContext + device = texture._device + size = texture.size + bytes_per_pixel = 4 + data = device.queue.read_texture( + { + "texture": texture, + "mip_level": 0, + "origin": (0, 0, 0), + }, + { + "offset": 0, + "bytes_per_row": bytes_per_pixel * size[0], + "rows_per_image": size[1], + }, + size, + ) + return np.frombuffer(data, np.uint8).reshape(size[1], size[0], 4) + + def get_preferred_format(self): + # Use a format that maps well to PNG: rgba8norm. Use srgb for + # perseptive color mapping. This is the common colorspace for + # e.g. png and jpg images. Most tools (browsers included) will + # blit the png to screen as-is, and a screen wants colors in srgb. + return "rgba8unorm-srgb" + + +# Make available under a name that is the same for all gui backends +WgpuCanvas = JupyterWgpuCanvas + + +def call_later(delay, callback, *args): + loop = asyncio.get_event_loop() + loop.call_later(delay, callback, *args) + + +def run(): + # Show all widgets that have been created so far. + # No need to actually start an event loop, since Jupyter already runs it. + canvases = [r() for r in pending_jupyter_canvases] + pending_jupyter_canvases.clear() + for w in canvases: + if w and not w.is_closed(): + display(w) diff --git a/wgpu/gui/offscreen.py b/wgpu/gui/offscreen.py new file mode 100644 index 0000000..7ea5b06 --- /dev/null +++ b/wgpu/gui/offscreen.py @@ -0,0 +1,244 @@ +import time + +from .. import classes, flags +from .base import WgpuCanvasBase, WgpuAutoGui + + +class GPUCanvasContext(classes.GPUCanvasContext): + """GPUCanvasContext subclass for rendering to an offscreen texture.""" + + # In this context implementation, we keep a ref to the texture, to keep + # it alive until at least until present() is called, and to be able to + # pass it to the canvas' present() method. Thereafter, the texture + # reference is removed. If there are no more references to it, it will + # be cleaned up. But if the offscreen canvas uses it for something, + # it'll simply stay alive longer. + + def __init__(self, canvas): + super().__init__(canvas) + self._config = None + self._texture = None + + def configure( + self, + *, + device, + format, + usage=flags.TextureUsage.RENDER_ATTACHMENT | flags.TextureUsage.COPY_SRC, + view_formats=[], + color_space="srgb", + alpha_mode="opaque" + ): + if format is None: + format = self.get_preferred_format(device.adapter) + self._config = { + "device": device, + "format": format, + "usage": usage, + "width": 0, + "height": 0, + # "view_formats": xx, + # "color_space": xx, + # "alpha_mode": xx, + } + + def unconfigure(self): + self._texture = None + self._config = None + + def get_current_texture(self): + if not self._config: + raise RuntimeError( + "Canvas context must be configured before calling get_current_texture()." + ) + + width, height = self._get_canvas().get_physical_size() + width, height = max(width, 1), max(height, 1) + + self._texture = self._config["device"].create_texture( + label="presentation-context", + size=(width, height, 1), + format=self._config["format"], + usage=self._config["usage"], + ) + return self._texture + + def present(self): + if not self._texture: + msg = "present() is called without a preceeding call to " + msg += "get_current_texture(). Note that present() is usually " + msg += "called automatically after the draw function returns." + raise RuntimeError(msg) + else: + texture = self._texture + self._texture = None + return self._get_canvas().present(texture) + + def get_preferred_format(self, adapter): + canvas = self._get_canvas() + if canvas: + return canvas.get_preferred_format() + else: + return "rgba8unorm-srgb" + + +class WgpuOffscreenCanvasBase(WgpuCanvasBase): + """Base class for off-screen canvases. + + It provides a custom context that renders to a texture instead of + a surface/screen. On each draw the resulting image is passes as a + texture to the ``present()`` method. Subclasses should (at least) + implement ``present()`` + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def get_window_id(self): + """This canvas does not correspond to an on-screen window.""" + return None + + def get_context(self, kind="webgpu"): + """Get the GPUCanvasContext object to obtain a texture to render to.""" + # Normally this creates a GPUCanvasContext object provided by + # the backend (e.g. wgpu-native), but here we use our own context. + assert kind == "webgpu" + if self._canvas_context is None: + self._canvas_context = GPUCanvasContext(self) + return self._canvas_context + + def present(self, texture): + """Method that gets called at the end of each draw event. + + The rendered image is represented by the texture argument. + Subclasses should overload this method and use the texture to + process the rendered image. + + The texture is a new object at each draw, but is not explicitly + destroyed, so it can be used e.g. as a texture binding (subject + to set TextureUsage). + """ + # Notes: Creating a new texture object for each draw is + # consistent with how real canvas contexts work, plus it avoids + # confusion of re-using the same texture except when the canvas + # changes size. For use-cases where you do want to render to the + # same texture one does not need the canvas API. E.g. in pygfx + # the renderer can also work with a target that is a (fixed + # size) texture. + pass + + def get_preferred_format(self): + """Get the preferred format for this canvas. + + This method can be overloaded to control the used texture + format. The default is "rgba8unorm-srgb". + """ + # Use rgba because that order is more common for processing and storage. + # Use srgb because that's what how colors are usually expected to be. + # Use 8unorm because 8bit is enough (when using srgb). + return "rgba8unorm-srgb" + + +class WgpuManualOffscreenCanvas(WgpuAutoGui, WgpuOffscreenCanvasBase): + """An offscreen canvas intended for manual use. + + Call the ``.draw()`` method to perform a draw and get the result. + """ + + def __init__(self, *args, size=None, pixel_ratio=1, title=None, **kwargs): + super().__init__(*args, **kwargs) + self._logical_size = (float(size[0]), float(size[1])) if size else (640, 480) + self._pixel_ratio = pixel_ratio + self._title = title + self._closed = False + + def get_pixel_ratio(self): + return self._pixel_ratio + + def get_logical_size(self): + return self._logical_size + + def get_physical_size(self): + return int(self._logical_size[0] * self._pixel_ratio), int( + self._logical_size[1] * self._pixel_ratio + ) + + def set_logical_size(self, width, height): + self._logical_size = width, height + + def close(self): + self._closed = True + + def is_closed(self): + return self._closed + + def _request_draw(self): + # Deliberately a no-op, because people use .draw() instead. + pass + + def present(self, texture): + # This gets called at the end of a draw pass via GPUCanvasContext + device = texture._device + size = texture.size + bytes_per_pixel = 4 + data = device.queue.read_texture( + { + "texture": texture, + "mip_level": 0, + "origin": (0, 0, 0), + }, + { + "offset": 0, + "bytes_per_row": bytes_per_pixel * size[0], + "rows_per_image": size[1], + }, + size, + ) + + # Return as memory object to avoid numpy dependency + # Equivalent: np.frombuffer(data, np.uint8).reshape(size[1], size[0], 4) + return data.cast("B", (size[1], size[0], 4)) + + def draw(self): + """Perform a draw and get the resulting image. + + The image array is returned as an NxMx4 memoryview object. + This object can be converted to a numpy array (without copying data) + using ``np.asarray(arr)``. + """ + return self._draw_frame_and_present() + + +WgpuCanvas = WgpuManualOffscreenCanvas + + +# If we consider the use-cases for using this offscreen canvas: +# +# * Using wgpu.gui.auto in test-mode: in this case run() should not hang, +# and call_later should not cause lingering refs. +# * Using the offscreen canvas directly, in a script: in this case you +# do not have/want an event system. +# * Using the offscreen canvas in an evented app. In that case you already +# have an app with a specific event-loop (it might be PySide6 or +# something else entirely). +# +# In summary, we provide a call_later() and run() that behave pretty +# well for the first case. + +_pending_calls = [] + + +def call_later(delay, callback, *args): + # Note that this module never calls call_later() itself; request_draw() is a no-op. + etime = time.time() + delay + _pending_calls.append((etime, callback, args)) + + +def run(): + # Process pending calls + for etime, callback, args in _pending_calls.copy(): + if time.time() >= etime: + callback(*args) + + # Clear any leftover scheduled calls, to avoid lingering refs. + _pending_calls.clear() diff --git a/wgpu/gui/qt.py b/wgpu/gui/qt.py new file mode 100644 index 0000000..90cecd9 --- /dev/null +++ b/wgpu/gui/qt.py @@ -0,0 +1,430 @@ +""" +Support for rendering in a Qt widget. Provides a widget subclass that +can be used as a standalone window or in a larger GUI. +""" + +import sys +import ctypes +import importlib + +from .base import WgpuCanvasBase, WgpuAutoGui, weakbind + + +# Select GUI toolkit +for libname in ("PySide6", "PyQt6", "PySide2", "PyQt5"): + if libname in sys.modules: + QtCore = importlib.import_module(".QtCore", libname) + QtWidgets = importlib.import_module(".QtWidgets", libname) + try: + WA_PaintOnScreen = QtCore.Qt.WidgetAttribute.WA_PaintOnScreen + WA_DeleteOnClose = QtCore.Qt.WidgetAttribute.WA_DeleteOnClose + PreciseTimer = QtCore.Qt.TimerType.PreciseTimer + KeyboardModifiers = QtCore.Qt.KeyboardModifier + FocusPolicy = QtCore.Qt.FocusPolicy + Keys = QtCore.Qt.Key + except AttributeError: + WA_PaintOnScreen = QtCore.Qt.WA_PaintOnScreen + WA_DeleteOnClose = QtCore.Qt.WA_DeleteOnClose + PreciseTimer = QtCore.Qt.PreciseTimer + KeyboardModifiers = QtCore.Qt + FocusPolicy = QtCore.Qt + Keys = QtCore.Qt + break +else: + raise ImportError( + "Before importing wgpu.gui.qt, import one of PySide6/PySide2/PyQt6/PyQt5 to select a Qt toolkit." + ) + + +# Get version +if libname.startswith("PySide"): + qt_version_info = QtCore.__version_info__ +else: + try: + qt_version_info = tuple(int(i) for i in QtCore.QT_VERSION_STR.split(".")[:3]) + except Exception: # Failsafe + qt_version_info = (0, 0, 0) + + +BUTTON_MAP = { + QtCore.Qt.MouseButton.LeftButton: 1, # == MOUSE_BUTTON_LEFT + QtCore.Qt.MouseButton.RightButton: 2, # == MOUSE_BUTTON_RIGHT + QtCore.Qt.MouseButton.MiddleButton: 3, # == MOUSE_BUTTON_MIDDLE + QtCore.Qt.MouseButton.BackButton: 4, + QtCore.Qt.MouseButton.ForwardButton: 5, + QtCore.Qt.MouseButton.TaskButton: 6, + QtCore.Qt.MouseButton.ExtraButton4: 7, + QtCore.Qt.MouseButton.ExtraButton5: 8, +} + +MODIFIERS_MAP = { + KeyboardModifiers.ShiftModifier: "Shift", + KeyboardModifiers.ControlModifier: "Control", + KeyboardModifiers.AltModifier: "Alt", + KeyboardModifiers.MetaModifier: "Meta", +} + +KEY_MAP = { + int(Keys.Key_Down): "ArrowDown", + int(Keys.Key_Up): "ArrowUp", + int(Keys.Key_Left): "ArrowLeft", + int(Keys.Key_Right): "ArrowRight", + int(Keys.Key_Backspace): "Backspace", + int(Keys.Key_CapsLock): "CapsLock", + int(Keys.Key_Delete): "Delete", + int(Keys.Key_End): "End", + int(Keys.Key_Enter): "Enter", + int(Keys.Key_Escape): "Escape", + int(Keys.Key_F1): "F1", + int(Keys.Key_F2): "F2", + int(Keys.Key_F3): "F3", + int(Keys.Key_F4): "F4", + int(Keys.Key_F5): "F5", + int(Keys.Key_F6): "F6", + int(Keys.Key_F7): "F7", + int(Keys.Key_F8): "F8", + int(Keys.Key_F9): "F9", + int(Keys.Key_F10): "F10", + int(Keys.Key_F11): "F11", + int(Keys.Key_F12): "F12", + int(Keys.Key_Home): "Home", + int(Keys.Key_Insert): "Insert", + int(Keys.Key_Alt): "Alt", + int(Keys.Key_Control): "Control", + int(Keys.Key_Shift): "Shift", + int(Keys.Key_Meta): "Meta", # meta maps to control in QT on macOS, and vice-versa + int(Keys.Key_NumLock): "NumLock", + int(Keys.Key_PageDown): "PageDown", + int(Keys.Key_PageUp): "Pageup", + int(Keys.Key_Pause): "Pause", + int(Keys.Key_ScrollLock): "ScrollLock", + int(Keys.Key_Tab): "Tab", +} + + +# Make Qt not ignore XDG_SESSION_TYPE +# is_wayland = "wayland" in os.getenv("XDG_SESSION_TYPE", "").lower() +# if is_wayland: +# os.environ["QT_QPA_PLATFORM"] = "wayland" + + +def enable_hidpi(): + """Enable high-res displays.""" + set_dpi_aware = qt_version_info < (6, 4) # Pyside + if set_dpi_aware: + try: + # See https://github.com/pyzo/pyzo/pull/700 why we seem to need both + # See https://github.com/pygfx/pygfx/issues/368 for high Qt versions + ctypes.windll.shcore.SetProcessDpiAwareness(1) # global dpi aware + ctypes.windll.shcore.SetProcessDpiAwareness(2) # per-monitor dpi aware + except Exception: + pass # fail on non-windows + try: + QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True) + except Exception: + pass # fail on older Qt's + + +# If you import this module, you want to use wgpu in a way that does not suck +# on high-res monitors. So we apply the minimal configuration to make this so. +# Most apps probably should also set AA_UseHighDpiPixmaps, but it's not +# needed for wgpu, so not our responsibility (some users may NOT want it set). +enable_hidpi() + + +class QWgpuWidget(WgpuAutoGui, WgpuCanvasBase, QtWidgets.QWidget): + """A QWidget representing a wgpu canvas that can be embedded in a Qt application.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # Configure how Qt renders this widget + self.setAttribute(WA_PaintOnScreen, True) + self.setAttribute(WA_DeleteOnClose, True) + self.setAutoFillBackground(False) + self.setMouseTracking(True) + self.setFocusPolicy(FocusPolicy.StrongFocus) + + # A timer for limiting fps + self._request_draw_timer = QtCore.QTimer() + self._request_draw_timer.setTimerType(PreciseTimer) + self._request_draw_timer.setSingleShot(True) + self._request_draw_timer.timeout.connect(self.update) + + def paintEngine(self): # noqa: N802 - this is a Qt method + # https://doc.qt.io/qt-5/qt.html#WidgetAttribute-enum WA_PaintOnScreen + return None + + def paintEvent(self, event): # noqa: N802 - this is a Qt method + self._draw_frame_and_present() + + # Methods that we add from wgpu (snake_case) + + def get_display_id(self): + # There is qx11info, but it is rarely available. + # https://doc.qt.io/qt-5/qx11info.html#display + return super().get_display_id() # uses X11 lib + + def get_window_id(self): + return int(self.winId()) + + def get_pixel_ratio(self): + # Observations: + # * On Win10 + PyQt5 the ratio is a whole number (175% becomes 2). + # * On Win10 + PyQt6 the ratio is correct (non-integer). + return self.devicePixelRatioF() + + def get_logical_size(self): + # Sizes in Qt are logical + lsize = self.width(), self.height() + return float(lsize[0]), float(lsize[1]) + + def get_physical_size(self): + # https://doc.qt.io/qt-5/qpaintdevice.html + # https://doc.qt.io/qt-5/highdpi.html + lsize = self.width(), self.height() + lsize = float(lsize[0]), float(lsize[1]) + ratio = self.devicePixelRatioF() + # When the ratio is not integer (qt6), we need to somehow round + # it. It turns out that we need to round it, but also add a + # small offset. Tested on Win10 with several different OS + # scales. Would be nice if we could ask Qt for the exact + # physical size! Not an issue on qt5, because ratio is always + # integer then. + return round(lsize[0] * ratio + 0.01), round(lsize[1] * ratio + 0.01) + + def set_logical_size(self, width, height): + if width < 0 or height < 0: + raise ValueError("Window width and height must not be negative") + self.resize(width, height) # See comment on pixel ratio + + def _request_draw(self): + if not self._request_draw_timer.isActive(): + self._request_draw_timer.start(int(self._get_draw_wait_time() * 1000)) + + def close(self): + QtWidgets.QWidget.close(self) + + def is_closed(self): + return not self.isVisible() + + # User events to jupyter_rfb events + + def _key_event(self, event_type, event): + modifiers = [ + MODIFIERS_MAP[mod] + for mod in MODIFIERS_MAP.keys() + if mod & event.modifiers() + ] + + ev = { + "event_type": event_type, + "key": KEY_MAP.get(event.key(), event.text()), + "modifiers": modifiers, + } + self._handle_event_and_flush(ev) + + def keyPressEvent(self, event): # noqa: N802 + self._key_event("key_down", event) + + def keyReleaseEvent(self, event): # noqa: N802 + self._key_event("key_up", event) + + def _mouse_event(self, event_type, event, touches=True): + button = BUTTON_MAP.get(event.button(), 0) + buttons = [ + BUTTON_MAP[button] + for button in BUTTON_MAP.keys() + if button & event.buttons() + ] + + # For Qt on macOS Control and Meta are switched + modifiers = [ + MODIFIERS_MAP[mod] + for mod in MODIFIERS_MAP.keys() + if mod & event.modifiers() + ] + + ev = { + "event_type": event_type, + "x": event.pos().x(), + "y": event.pos().y(), + "button": button, + "buttons": buttons, + "modifiers": modifiers, + } + if touches: + ev.update( + { + "ntouches": 0, + "touches": {}, # TODO: Qt touch events + } + ) + + if event_type == "pointer_move": + match_keys = {"buttons", "modifiers", "ntouches"} + accum_keys = {} + self._handle_event_rate_limited(ev, call_later, match_keys, accum_keys) + else: + self._handle_event_and_flush(ev) + + def mousePressEvent(self, event): # noqa: N802 + self._mouse_event("pointer_down", event) + + def mouseMoveEvent(self, event): # noqa: N802 + self._mouse_event("pointer_move", event) + + def mouseReleaseEvent(self, event): # noqa: N802 + self._mouse_event("pointer_up", event) + + def mouseDoubleClickEvent(self, event): # noqa: N802 + super().mouseDoubleClickEvent(event) + self._mouse_event("double_click", event, touches=False) + + def wheelEvent(self, event): # noqa: N802 + # For Qt on macOS Control and Meta are switched + modifiers = [ + MODIFIERS_MAP[mod] + for mod in MODIFIERS_MAP.keys() + if mod & event.modifiers() + ] + buttons = [ + BUTTON_MAP[button] + for button in BUTTON_MAP.keys() + if button & event.buttons() + ] + + ev = { + "event_type": "wheel", + "dx": -event.angleDelta().x(), + "dy": -event.angleDelta().y(), + "x": event.position().x(), + "y": event.position().y(), + "buttons": buttons, + "modifiers": modifiers, + } + match_keys = {"modifiers"} + accum_keys = {"dx", "dy"} + self._handle_event_rate_limited(ev, call_later, match_keys, accum_keys) + + def resizeEvent(self, event): # noqa: N802 + ev = { + "event_type": "resize", + "width": float(event.size().width()), + "height": float(event.size().height()), + "pixel_ratio": self.get_pixel_ratio(), + } + self._handle_event_and_flush(ev) + + def closeEvent(self, event): # noqa: N802 + self._handle_event_and_flush({"event_type": "close"}) + + +class QWgpuCanvas(WgpuAutoGui, WgpuCanvasBase, QtWidgets.QWidget): + """A toplevel Qt widget providing a wgpu canvas.""" + + # Most of this is proxying stuff to the inner widget. + # We cannot use a toplevel widget directly, otherwise the window + # size can be set to subpixel (logical) values, without being able to + # detect this. See https://github.com/pygfx/wgpu-py/pull/68 + + def __init__(self, *, size=None, title=None, max_fps=30, **kwargs): + # When using Qt, there needs to be an + # application before any widget is created + get_app() + + super().__init__(**kwargs) + + self.setAttribute(WA_DeleteOnClose, True) + self.set_logical_size(*(size or (640, 480))) + self.setWindowTitle(title or "qt wgpu canvas") + self.setMouseTracking(True) + + self._subwidget = QWgpuWidget(self, max_fps=max_fps) + self._subwidget.add_event_handler(weakbind(self.handle_event), "*") + + # Get the window id one time. For some reason this is needed + # to "activate" the canvas. Otherwise the viz is not shown if + # one does not provide canvas to request_adapter(). + # (AK: Cannot reproduce this now, what qtlib/os/versions was this on?) + self._subwidget.get_window_id() + + layout = QtWidgets.QHBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + self.setLayout(layout) + layout.addWidget(self._subwidget) + + self.show() + + # Qt methods + + def update(self): + super().update() + self._subwidget.update() + + # Methods that we add from wgpu (snake_case) + + @property + def draw_frame(self): + return self._subwidget.draw_frame + + @draw_frame.setter + def draw_frame(self, f): + self._subwidget.draw_frame = f + + def get_display_id(self): + return self._subwidget.get_display_id() + + def get_window_id(self): + return self._subwidget.get_window_id() + + def get_pixel_ratio(self): + return self._subwidget.get_pixel_ratio() + + def get_logical_size(self): + return self._subwidget.get_logical_size() + + def get_physical_size(self): + return self._subwidget.get_physical_size() + + def set_logical_size(self, width, height): + if width < 0 or height < 0: + raise ValueError("Window width and height must not be negative") + self.resize(width, height) # See comment on pixel ratio + + def _request_draw(self): + return self._subwidget._request_draw() + + def close(self): + self._subwidget.close() + QtWidgets.QWidget.close(self) + + def is_closed(self): + return not self.isVisible() + + # Methods that we need to explicitly delegate to the subwidget + + def get_context(self, *args, **kwargs): + return self._subwidget.get_context(*args, **kwargs) + + def request_draw(self, *args, **kwargs): + return self._subwidget.request_draw(*args, **kwargs) + + +# Make available under a name that is the same for all gui backends +WgpuWidget = QWgpuWidget +WgpuCanvas = QWgpuCanvas + + +def get_app(): + """Return global instance of Qt app instance or create one if not created yet.""" + return QtWidgets.QApplication.instance() or QtWidgets.QApplication([]) + + +def run(): + app = get_app() + app.exec() if hasattr(app, "exec") else app.exec_() + + +def call_later(delay, callback, *args): + QtCore.QTimer.singleShot(int(delay * 1000), lambda: callback(*args)) diff --git a/wgpu/gui/wx.py b/wgpu/gui/wx.py new file mode 100644 index 0000000..106d751 --- /dev/null +++ b/wgpu/gui/wx.py @@ -0,0 +1,176 @@ +""" +Support for rendering in a wxPython window. Provides a widget that +can be used as a standalone window or in a larger GUI. +""" + +import ctypes + +from .base import WgpuCanvasBase, weakbind + +import wx + + +def enable_hidpi(): + """Enable high-res displays.""" + try: + ctypes.windll.shcore.SetProcessDpiAwareness(1) + ctypes.windll.shcore.SetProcessDpiAwareness(2) + except Exception: + pass # fail on non-windows + + +enable_hidpi() + + +class TimerWithCallback(wx.Timer): + def __init__(self, callback): + super().__init__() + self._callback = callback + + def Notify(self, *args): # noqa: N802 + try: + self._callback() + except RuntimeError: + pass # wrapped C/C++ object of type WxWgpuWindow has been deleted + + +class WxWgpuWindow(WgpuCanvasBase, wx.Window): + """A wx Window representing a wgpu canvas that can be embedded in a wx application.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # A timer for limiting fps + self._request_draw_timer = TimerWithCallback(self.Refresh) + + # We keep a timer to prevent draws during a resize. This prevents + # issues with mismatching present sizes during resizing (on Linux). + self._resize_timer = TimerWithCallback(self._on_resize_done) + self._draw_lock = False + + self.Bind(wx.EVT_PAINT, self.on_paint) + self.Bind(wx.EVT_ERASE_BACKGROUND, lambda x: None) + self.Bind(wx.EVT_SIZE, self._on_resize) + + def on_paint(self, event): + dc = wx.PaintDC(self) # needed for wx + if not self._draw_lock: + self._draw_frame_and_present() + del dc + event.Skip() + + def _on_resize(self, *args): + self._draw_lock = True + self._resize_timer.Start(100, wx.TIMER_ONE_SHOT) + + def _on_resize_done(self, *args): + self._draw_lock = False + self._request_draw() + + # Methods that we add from wgpu + + def get_window_id(self): + return int(self.GetHandle()) + + def get_pixel_ratio(self): + # todo: this is not hidpi-ready (at least on win10) + # Observations: + # * On Win10 this always returns 1 - so hidpi is effectively broken + return self.GetContentScaleFactor() + + def get_logical_size(self): + lsize = self.Size[0], self.Size[1] + return float(lsize[0]), float(lsize[1]) + + def get_physical_size(self): + lsize = self.Size[0], self.Size[1] + lsize = float(lsize[0]), float(lsize[1]) + ratio = self.GetContentScaleFactor() + return round(lsize[0] * ratio + 0.01), round(lsize[1] * ratio + 0.01) + + def set_logical_size(self, width, height): + if width < 0 or height < 0: + raise ValueError("Window width and height must not be negative") + self.SetSize(width, height) + + def _request_draw(self): + # Despite the FPS limiting the delayed call to refresh solves + # that drawing only happens when the mouse is down, see #209. + if not self._request_draw_timer.IsRunning(): + self._request_draw_timer.Start( + self._get_draw_wait_time() * 1000, wx.TIMER_ONE_SHOT + ) + + def close(self): + self.Hide() + + def is_closed(self): + return not self.IsShown() + + +class WxWgpuCanvas(WgpuCanvasBase, wx.Frame): + """A toplevel wx Frame providing a wgpu canvas.""" + + # Most of this is proxying stuff to the inner widget. + + def __init__(self, *, parent=None, size=None, title=None, max_fps=30, **kwargs): + super().__init__(parent, **kwargs) + + self.set_logical_size(*(size or (640, 480))) + self.SetTitle(title or "wx wgpu canvas") + + self._subwidget = WxWgpuWindow(parent=self, max_fps=max_fps) + self._subwidget.add_event_handler(weakbind(self.handle_event), "*") + self.Bind(wx.EVT_CLOSE, lambda e: self.Destroy()) + + self.Show() + + # wx methods + + def Refresh(self): # noqa: N802 + super().Refresh() + self._subwidget.Refresh() + + # Methods that we add from wgpu + + def get_display_id(self): + return self._subwidget.get_display_id() + + def get_window_id(self): + return self._subwidget.get_window_id() + + def get_pixel_ratio(self): + return self._subwidget.get_pixel_ratio() + + def get_logical_size(self): + return self._subwidget.get_logical_size() + + def get_physical_size(self): + return self._subwidget.get_physical_size() + + def set_logical_size(self, width, height): + if width < 0 or height < 0: + raise ValueError("Window width and height must not be negative") + self.SetSize(width, height) + + def _request_draw(self): + return self._subwidget._request_draw() + + def close(self): + super().close() + + def is_closed(self): + return not self.isVisible() + + # Methods that we need to explicitly delegate to the subwidget + + def get_context(self, *args, **kwargs): + return self._subwidget.get_context(*args, **kwargs) + + def request_draw(self, *args, **kwargs): + return self._subwidget.request_draw(*args, **kwargs) + + +# Make available under a name that is the same for all gui backends +WgpuWidget = WxWgpuWindow +WgpuCanvas = WxWgpuCanvas diff --git a/wgpu/resources/__init__.py b/wgpu/resources/__init__.py new file mode 100644 index 0000000..eca5e0f --- /dev/null +++ b/wgpu/resources/__init__.py @@ -0,0 +1,2 @@ +""" This module exists to have importlib.resources and setuptools recognize the folder as a module. +""" diff --git a/wgpu/resources/codegen_report.md b/wgpu/resources/codegen_report.md new file mode 100644 index 0000000..240bb68 --- /dev/null +++ b/wgpu/resources/codegen_report.md @@ -0,0 +1,34 @@ +# Code generatation report +## Preparing +* The webgpu.idl defines 37 classes with 76 functions +* The webgpu.idl defines 5 flags, 33 enums, 59 structs +* The wgpu.h defines 198 functions +* The wgpu.h defines 7 flags, 50 enums, 92 structs +## Updating API +* Wrote 5 flags to flags.py +* Wrote 33 enums to enums.py +* Wrote 59 structs to structs.py +### Patching API for _classes.py +* Diffs for GPU: change get_preferred_canvas_format, change request_adapter, change request_adapter_async +* Diffs for GPUCanvasContext: add get_preferred_format, add present +* Diffs for GPUDevice: add adapter, add create_buffer_with_data, hide import_external_texture, hide lost, hide onuncapturederror, hide pop_error_scope, hide push_error_scope +* Diffs for GPUBuffer: add map_read, add map_write, add read_mapped, add write_mapped, hide get_mapped_range +* Diffs for GPUTexture: add size +* Diffs for GPUTextureView: add size, add texture +* Diffs for GPUQueue: add read_buffer, add read_texture, hide copy_external_image_to_texture +* Validated 37 classes, 112 methods, 43 properties +### Patching API for backends/wgpu_native/_api.py +* Validated 37 classes, 107 methods, 0 properties +## Validating backends/wgpu_native/_api.py +* Enum field TextureFormat.rgb10a2uint missing in wgpu.h +* Enum field StorageTextureAccess.read-only missing in wgpu.h +* Enum field StorageTextureAccess.read-write missing in wgpu.h +* Enum PipelineErrorReason missing in wgpu.h +* Enum AutoLayoutMode missing in wgpu.h +* Enum field VertexFormat.unorm10-10-10-2 missing in wgpu.h +* Enum CanvasAlphaMode missing in wgpu.h +* Enum field DeviceLostReason.unknown missing in wgpu.h +* Wrote 232 enum mappings and 47 struct-field mappings to wgpu_native/_mappings.py +* Validated 105 C function calls +* Not using 97 C functions +* Validated 75 C structs diff --git a/wgpu/resources/webgpu.h b/wgpu/resources/webgpu.h new file mode 100644 index 0000000..79c0bc0 --- /dev/null +++ b/wgpu/resources/webgpu.h @@ -0,0 +1,1803 @@ +// BSD 3-Clause License +// +// Copyright (c) 2019, "WebGPU native" developers +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#ifndef WEBGPU_H_ +#define WEBGPU_H_ + +#if defined(WGPU_SHARED_LIBRARY) +# if defined(_WIN32) +# if defined(WGPU_IMPLEMENTATION) +# define WGPU_EXPORT __declspec(dllexport) +# else +# define WGPU_EXPORT __declspec(dllimport) +# endif +# else // defined(_WIN32) +# if defined(WGPU_IMPLEMENTATION) +# define WGPU_EXPORT __attribute__((visibility("default"))) +# else +# define WGPU_EXPORT +# endif +# endif // defined(_WIN32) +#else // defined(WGPU_SHARED_LIBRARY) +# define WGPU_EXPORT +#endif // defined(WGPU_SHARED_LIBRARY) + +#if !defined(WGPU_OBJECT_ATTRIBUTE) +#define WGPU_OBJECT_ATTRIBUTE +#endif +#if !defined(WGPU_ENUM_ATTRIBUTE) +#define WGPU_ENUM_ATTRIBUTE +#endif +#if !defined(WGPU_STRUCTURE_ATTRIBUTE) +#define WGPU_STRUCTURE_ATTRIBUTE +#endif +#if !defined(WGPU_FUNCTION_ATTRIBUTE) +#define WGPU_FUNCTION_ATTRIBUTE +#endif +#if !defined(WGPU_NULLABLE) +#define WGPU_NULLABLE +#endif + +#include +#include + +#define WGPU_ARRAY_LAYER_COUNT_UNDEFINED (0xffffffffUL) +#define WGPU_COPY_STRIDE_UNDEFINED (0xffffffffUL) +#define WGPU_LIMIT_U32_UNDEFINED (0xffffffffUL) +#define WGPU_LIMIT_U64_UNDEFINED (0xffffffffffffffffULL) +#define WGPU_MIP_LEVEL_COUNT_UNDEFINED (0xffffffffUL) +#define WGPU_QUERY_SET_INDEX_UNDEFINED (0xffffffffUL) +#define WGPU_WHOLE_MAP_SIZE SIZE_MAX +#define WGPU_WHOLE_SIZE (0xffffffffffffffffULL) + +typedef uint32_t WGPUFlags; +typedef uint32_t WGPUBool; + +typedef struct WGPUAdapterImpl* WGPUAdapter WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPUBindGroupImpl* WGPUBindGroup WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPUBindGroupLayoutImpl* WGPUBindGroupLayout WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPUBufferImpl* WGPUBuffer WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPUCommandBufferImpl* WGPUCommandBuffer WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPUCommandEncoderImpl* WGPUCommandEncoder WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPUComputePassEncoderImpl* WGPUComputePassEncoder WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPUComputePipelineImpl* WGPUComputePipeline WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPUDeviceImpl* WGPUDevice WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPUInstanceImpl* WGPUInstance WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPUPipelineLayoutImpl* WGPUPipelineLayout WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPUQuerySetImpl* WGPUQuerySet WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPUQueueImpl* WGPUQueue WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPURenderBundleImpl* WGPURenderBundle WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPURenderBundleEncoderImpl* WGPURenderBundleEncoder WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPURenderPassEncoderImpl* WGPURenderPassEncoder WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPURenderPipelineImpl* WGPURenderPipeline WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPUSamplerImpl* WGPUSampler WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPUShaderModuleImpl* WGPUShaderModule WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPUSurfaceImpl* WGPUSurface WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPUTextureImpl* WGPUTexture WGPU_OBJECT_ATTRIBUTE; +typedef struct WGPUTextureViewImpl* WGPUTextureView WGPU_OBJECT_ATTRIBUTE; + +// Structure forward declarations +struct WGPUAdapterProperties; +struct WGPUBindGroupEntry; +struct WGPUBlendComponent; +struct WGPUBufferBindingLayout; +struct WGPUBufferDescriptor; +struct WGPUColor; +struct WGPUCommandBufferDescriptor; +struct WGPUCommandEncoderDescriptor; +struct WGPUCompilationMessage; +struct WGPUComputePassTimestampWrites; +struct WGPUConstantEntry; +struct WGPUExtent3D; +struct WGPUInstanceDescriptor; +struct WGPULimits; +struct WGPUMultisampleState; +struct WGPUOrigin3D; +struct WGPUPipelineLayoutDescriptor; +struct WGPUPrimitiveDepthClipControl; +struct WGPUPrimitiveState; +struct WGPUQuerySetDescriptor; +struct WGPUQueueDescriptor; +struct WGPURenderBundleDescriptor; +struct WGPURenderBundleEncoderDescriptor; +struct WGPURenderPassDepthStencilAttachment; +struct WGPURenderPassDescriptorMaxDrawCount; +struct WGPURenderPassTimestampWrites; +struct WGPURequestAdapterOptions; +struct WGPUSamplerBindingLayout; +struct WGPUSamplerDescriptor; +struct WGPUShaderModuleCompilationHint; +struct WGPUShaderModuleSPIRVDescriptor; +struct WGPUShaderModuleWGSLDescriptor; +struct WGPUStencilFaceState; +struct WGPUStorageTextureBindingLayout; +struct WGPUSurfaceCapabilities; +struct WGPUSurfaceConfiguration; +struct WGPUSurfaceDescriptor; +struct WGPUSurfaceDescriptorFromAndroidNativeWindow; +struct WGPUSurfaceDescriptorFromCanvasHTMLSelector; +struct WGPUSurfaceDescriptorFromMetalLayer; +struct WGPUSurfaceDescriptorFromWaylandSurface; +struct WGPUSurfaceDescriptorFromWindowsHWND; +struct WGPUSurfaceDescriptorFromXcbWindow; +struct WGPUSurfaceDescriptorFromXlibWindow; +struct WGPUSurfaceTexture; +struct WGPUTextureBindingLayout; +struct WGPUTextureDataLayout; +struct WGPUTextureViewDescriptor; +struct WGPUVertexAttribute; +struct WGPUBindGroupDescriptor; +struct WGPUBindGroupLayoutEntry; +struct WGPUBlendState; +struct WGPUCompilationInfo; +struct WGPUComputePassDescriptor; +struct WGPUDepthStencilState; +struct WGPUImageCopyBuffer; +struct WGPUImageCopyTexture; +struct WGPUProgrammableStageDescriptor; +struct WGPURenderPassColorAttachment; +struct WGPURequiredLimits; +struct WGPUShaderModuleDescriptor; +struct WGPUSupportedLimits; +struct WGPUTextureDescriptor; +struct WGPUVertexBufferLayout; +struct WGPUBindGroupLayoutDescriptor; +struct WGPUColorTargetState; +struct WGPUComputePipelineDescriptor; +struct WGPUDeviceDescriptor; +struct WGPURenderPassDescriptor; +struct WGPUVertexState; +struct WGPUFragmentState; +struct WGPURenderPipelineDescriptor; + +typedef enum WGPUAdapterType { + WGPUAdapterType_DiscreteGPU = 0x00000000, + WGPUAdapterType_IntegratedGPU = 0x00000001, + WGPUAdapterType_CPU = 0x00000002, + WGPUAdapterType_Unknown = 0x00000003, + WGPUAdapterType_Force32 = 0x7FFFFFFF +} WGPUAdapterType WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUAddressMode { + WGPUAddressMode_Repeat = 0x00000000, + WGPUAddressMode_MirrorRepeat = 0x00000001, + WGPUAddressMode_ClampToEdge = 0x00000002, + WGPUAddressMode_Force32 = 0x7FFFFFFF +} WGPUAddressMode WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUBackendType { + WGPUBackendType_Undefined = 0x00000000, + WGPUBackendType_Null = 0x00000001, + WGPUBackendType_WebGPU = 0x00000002, + WGPUBackendType_D3D11 = 0x00000003, + WGPUBackendType_D3D12 = 0x00000004, + WGPUBackendType_Metal = 0x00000005, + WGPUBackendType_Vulkan = 0x00000006, + WGPUBackendType_OpenGL = 0x00000007, + WGPUBackendType_OpenGLES = 0x00000008, + WGPUBackendType_Force32 = 0x7FFFFFFF +} WGPUBackendType WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUBlendFactor { + WGPUBlendFactor_Zero = 0x00000000, + WGPUBlendFactor_One = 0x00000001, + WGPUBlendFactor_Src = 0x00000002, + WGPUBlendFactor_OneMinusSrc = 0x00000003, + WGPUBlendFactor_SrcAlpha = 0x00000004, + WGPUBlendFactor_OneMinusSrcAlpha = 0x00000005, + WGPUBlendFactor_Dst = 0x00000006, + WGPUBlendFactor_OneMinusDst = 0x00000007, + WGPUBlendFactor_DstAlpha = 0x00000008, + WGPUBlendFactor_OneMinusDstAlpha = 0x00000009, + WGPUBlendFactor_SrcAlphaSaturated = 0x0000000A, + WGPUBlendFactor_Constant = 0x0000000B, + WGPUBlendFactor_OneMinusConstant = 0x0000000C, + WGPUBlendFactor_Force32 = 0x7FFFFFFF +} WGPUBlendFactor WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUBlendOperation { + WGPUBlendOperation_Add = 0x00000000, + WGPUBlendOperation_Subtract = 0x00000001, + WGPUBlendOperation_ReverseSubtract = 0x00000002, + WGPUBlendOperation_Min = 0x00000003, + WGPUBlendOperation_Max = 0x00000004, + WGPUBlendOperation_Force32 = 0x7FFFFFFF +} WGPUBlendOperation WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUBufferBindingType { + WGPUBufferBindingType_Undefined = 0x00000000, + WGPUBufferBindingType_Uniform = 0x00000001, + WGPUBufferBindingType_Storage = 0x00000002, + WGPUBufferBindingType_ReadOnlyStorage = 0x00000003, + WGPUBufferBindingType_Force32 = 0x7FFFFFFF +} WGPUBufferBindingType WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUBufferMapAsyncStatus { + WGPUBufferMapAsyncStatus_Success = 0x00000000, + WGPUBufferMapAsyncStatus_ValidationError = 0x00000001, + WGPUBufferMapAsyncStatus_Unknown = 0x00000002, + WGPUBufferMapAsyncStatus_DeviceLost = 0x00000003, + WGPUBufferMapAsyncStatus_DestroyedBeforeCallback = 0x00000004, + WGPUBufferMapAsyncStatus_UnmappedBeforeCallback = 0x00000005, + WGPUBufferMapAsyncStatus_MappingAlreadyPending = 0x00000006, + WGPUBufferMapAsyncStatus_OffsetOutOfRange = 0x00000007, + WGPUBufferMapAsyncStatus_SizeOutOfRange = 0x00000008, + WGPUBufferMapAsyncStatus_Force32 = 0x7FFFFFFF +} WGPUBufferMapAsyncStatus WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUBufferMapState { + WGPUBufferMapState_Unmapped = 0x00000000, + WGPUBufferMapState_Pending = 0x00000001, + WGPUBufferMapState_Mapped = 0x00000002, + WGPUBufferMapState_Force32 = 0x7FFFFFFF +} WGPUBufferMapState WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUCompareFunction { + WGPUCompareFunction_Undefined = 0x00000000, + WGPUCompareFunction_Never = 0x00000001, + WGPUCompareFunction_Less = 0x00000002, + WGPUCompareFunction_LessEqual = 0x00000003, + WGPUCompareFunction_Greater = 0x00000004, + WGPUCompareFunction_GreaterEqual = 0x00000005, + WGPUCompareFunction_Equal = 0x00000006, + WGPUCompareFunction_NotEqual = 0x00000007, + WGPUCompareFunction_Always = 0x00000008, + WGPUCompareFunction_Force32 = 0x7FFFFFFF +} WGPUCompareFunction WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUCompilationInfoRequestStatus { + WGPUCompilationInfoRequestStatus_Success = 0x00000000, + WGPUCompilationInfoRequestStatus_Error = 0x00000001, + WGPUCompilationInfoRequestStatus_DeviceLost = 0x00000002, + WGPUCompilationInfoRequestStatus_Unknown = 0x00000003, + WGPUCompilationInfoRequestStatus_Force32 = 0x7FFFFFFF +} WGPUCompilationInfoRequestStatus WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUCompilationMessageType { + WGPUCompilationMessageType_Error = 0x00000000, + WGPUCompilationMessageType_Warning = 0x00000001, + WGPUCompilationMessageType_Info = 0x00000002, + WGPUCompilationMessageType_Force32 = 0x7FFFFFFF +} WGPUCompilationMessageType WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUCompositeAlphaMode { + WGPUCompositeAlphaMode_Auto = 0x00000000, + WGPUCompositeAlphaMode_Opaque = 0x00000001, + WGPUCompositeAlphaMode_Premultiplied = 0x00000002, + WGPUCompositeAlphaMode_Unpremultiplied = 0x00000003, + WGPUCompositeAlphaMode_Inherit = 0x00000004, + WGPUCompositeAlphaMode_Force32 = 0x7FFFFFFF +} WGPUCompositeAlphaMode WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUCreatePipelineAsyncStatus { + WGPUCreatePipelineAsyncStatus_Success = 0x00000000, + WGPUCreatePipelineAsyncStatus_ValidationError = 0x00000001, + WGPUCreatePipelineAsyncStatus_InternalError = 0x00000002, + WGPUCreatePipelineAsyncStatus_DeviceLost = 0x00000003, + WGPUCreatePipelineAsyncStatus_DeviceDestroyed = 0x00000004, + WGPUCreatePipelineAsyncStatus_Unknown = 0x00000005, + WGPUCreatePipelineAsyncStatus_Force32 = 0x7FFFFFFF +} WGPUCreatePipelineAsyncStatus WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUCullMode { + WGPUCullMode_None = 0x00000000, + WGPUCullMode_Front = 0x00000001, + WGPUCullMode_Back = 0x00000002, + WGPUCullMode_Force32 = 0x7FFFFFFF +} WGPUCullMode WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUDeviceLostReason { + WGPUDeviceLostReason_Undefined = 0x00000000, + WGPUDeviceLostReason_Destroyed = 0x00000001, + WGPUDeviceLostReason_Force32 = 0x7FFFFFFF +} WGPUDeviceLostReason WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUErrorFilter { + WGPUErrorFilter_Validation = 0x00000000, + WGPUErrorFilter_OutOfMemory = 0x00000001, + WGPUErrorFilter_Internal = 0x00000002, + WGPUErrorFilter_Force32 = 0x7FFFFFFF +} WGPUErrorFilter WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUErrorType { + WGPUErrorType_NoError = 0x00000000, + WGPUErrorType_Validation = 0x00000001, + WGPUErrorType_OutOfMemory = 0x00000002, + WGPUErrorType_Internal = 0x00000003, + WGPUErrorType_Unknown = 0x00000004, + WGPUErrorType_DeviceLost = 0x00000005, + WGPUErrorType_Force32 = 0x7FFFFFFF +} WGPUErrorType WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUFeatureName { + WGPUFeatureName_Undefined = 0x00000000, + WGPUFeatureName_DepthClipControl = 0x00000001, + WGPUFeatureName_Depth32FloatStencil8 = 0x00000002, + WGPUFeatureName_TimestampQuery = 0x00000003, + WGPUFeatureName_TextureCompressionBC = 0x00000004, + WGPUFeatureName_TextureCompressionETC2 = 0x00000005, + WGPUFeatureName_TextureCompressionASTC = 0x00000006, + WGPUFeatureName_IndirectFirstInstance = 0x00000007, + WGPUFeatureName_ShaderF16 = 0x00000008, + WGPUFeatureName_RG11B10UfloatRenderable = 0x00000009, + WGPUFeatureName_BGRA8UnormStorage = 0x0000000A, + WGPUFeatureName_Float32Filterable = 0x0000000B, + WGPUFeatureName_Force32 = 0x7FFFFFFF +} WGPUFeatureName WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUFilterMode { + WGPUFilterMode_Nearest = 0x00000000, + WGPUFilterMode_Linear = 0x00000001, + WGPUFilterMode_Force32 = 0x7FFFFFFF +} WGPUFilterMode WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUFrontFace { + WGPUFrontFace_CCW = 0x00000000, + WGPUFrontFace_CW = 0x00000001, + WGPUFrontFace_Force32 = 0x7FFFFFFF +} WGPUFrontFace WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUIndexFormat { + WGPUIndexFormat_Undefined = 0x00000000, + WGPUIndexFormat_Uint16 = 0x00000001, + WGPUIndexFormat_Uint32 = 0x00000002, + WGPUIndexFormat_Force32 = 0x7FFFFFFF +} WGPUIndexFormat WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPULoadOp { + WGPULoadOp_Undefined = 0x00000000, + WGPULoadOp_Clear = 0x00000001, + WGPULoadOp_Load = 0x00000002, + WGPULoadOp_Force32 = 0x7FFFFFFF +} WGPULoadOp WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUMipmapFilterMode { + WGPUMipmapFilterMode_Nearest = 0x00000000, + WGPUMipmapFilterMode_Linear = 0x00000001, + WGPUMipmapFilterMode_Force32 = 0x7FFFFFFF +} WGPUMipmapFilterMode WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUPowerPreference { + WGPUPowerPreference_Undefined = 0x00000000, + WGPUPowerPreference_LowPower = 0x00000001, + WGPUPowerPreference_HighPerformance = 0x00000002, + WGPUPowerPreference_Force32 = 0x7FFFFFFF +} WGPUPowerPreference WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUPresentMode { + WGPUPresentMode_Fifo = 0x00000000, + WGPUPresentMode_FifoRelaxed = 0x00000001, + WGPUPresentMode_Immediate = 0x00000002, + WGPUPresentMode_Mailbox = 0x00000003, + WGPUPresentMode_Force32 = 0x7FFFFFFF +} WGPUPresentMode WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUPrimitiveTopology { + WGPUPrimitiveTopology_PointList = 0x00000000, + WGPUPrimitiveTopology_LineList = 0x00000001, + WGPUPrimitiveTopology_LineStrip = 0x00000002, + WGPUPrimitiveTopology_TriangleList = 0x00000003, + WGPUPrimitiveTopology_TriangleStrip = 0x00000004, + WGPUPrimitiveTopology_Force32 = 0x7FFFFFFF +} WGPUPrimitiveTopology WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUQueryType { + WGPUQueryType_Occlusion = 0x00000000, + WGPUQueryType_Timestamp = 0x00000001, + WGPUQueryType_Force32 = 0x7FFFFFFF +} WGPUQueryType WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUQueueWorkDoneStatus { + WGPUQueueWorkDoneStatus_Success = 0x00000000, + WGPUQueueWorkDoneStatus_Error = 0x00000001, + WGPUQueueWorkDoneStatus_Unknown = 0x00000002, + WGPUQueueWorkDoneStatus_DeviceLost = 0x00000003, + WGPUQueueWorkDoneStatus_Force32 = 0x7FFFFFFF +} WGPUQueueWorkDoneStatus WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPURequestAdapterStatus { + WGPURequestAdapterStatus_Success = 0x00000000, + WGPURequestAdapterStatus_Unavailable = 0x00000001, + WGPURequestAdapterStatus_Error = 0x00000002, + WGPURequestAdapterStatus_Unknown = 0x00000003, + WGPURequestAdapterStatus_Force32 = 0x7FFFFFFF +} WGPURequestAdapterStatus WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPURequestDeviceStatus { + WGPURequestDeviceStatus_Success = 0x00000000, + WGPURequestDeviceStatus_Error = 0x00000001, + WGPURequestDeviceStatus_Unknown = 0x00000002, + WGPURequestDeviceStatus_Force32 = 0x7FFFFFFF +} WGPURequestDeviceStatus WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUSType { + WGPUSType_Invalid = 0x00000000, + WGPUSType_SurfaceDescriptorFromMetalLayer = 0x00000001, + WGPUSType_SurfaceDescriptorFromWindowsHWND = 0x00000002, + WGPUSType_SurfaceDescriptorFromXlibWindow = 0x00000003, + WGPUSType_SurfaceDescriptorFromCanvasHTMLSelector = 0x00000004, + WGPUSType_ShaderModuleSPIRVDescriptor = 0x00000005, + WGPUSType_ShaderModuleWGSLDescriptor = 0x00000006, + WGPUSType_PrimitiveDepthClipControl = 0x00000007, + WGPUSType_SurfaceDescriptorFromWaylandSurface = 0x00000008, + WGPUSType_SurfaceDescriptorFromAndroidNativeWindow = 0x00000009, + WGPUSType_SurfaceDescriptorFromXcbWindow = 0x0000000A, + WGPUSType_RenderPassDescriptorMaxDrawCount = 0x0000000F, + WGPUSType_Force32 = 0x7FFFFFFF +} WGPUSType WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUSamplerBindingType { + WGPUSamplerBindingType_Undefined = 0x00000000, + WGPUSamplerBindingType_Filtering = 0x00000001, + WGPUSamplerBindingType_NonFiltering = 0x00000002, + WGPUSamplerBindingType_Comparison = 0x00000003, + WGPUSamplerBindingType_Force32 = 0x7FFFFFFF +} WGPUSamplerBindingType WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUStencilOperation { + WGPUStencilOperation_Keep = 0x00000000, + WGPUStencilOperation_Zero = 0x00000001, + WGPUStencilOperation_Replace = 0x00000002, + WGPUStencilOperation_Invert = 0x00000003, + WGPUStencilOperation_IncrementClamp = 0x00000004, + WGPUStencilOperation_DecrementClamp = 0x00000005, + WGPUStencilOperation_IncrementWrap = 0x00000006, + WGPUStencilOperation_DecrementWrap = 0x00000007, + WGPUStencilOperation_Force32 = 0x7FFFFFFF +} WGPUStencilOperation WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUStorageTextureAccess { + WGPUStorageTextureAccess_Undefined = 0x00000000, + WGPUStorageTextureAccess_WriteOnly = 0x00000001, + WGPUStorageTextureAccess_Force32 = 0x7FFFFFFF +} WGPUStorageTextureAccess WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUStoreOp { + WGPUStoreOp_Undefined = 0x00000000, + WGPUStoreOp_Store = 0x00000001, + WGPUStoreOp_Discard = 0x00000002, + WGPUStoreOp_Force32 = 0x7FFFFFFF +} WGPUStoreOp WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUSurfaceGetCurrentTextureStatus { + WGPUSurfaceGetCurrentTextureStatus_Success = 0x00000000, + WGPUSurfaceGetCurrentTextureStatus_Timeout = 0x00000001, + WGPUSurfaceGetCurrentTextureStatus_Outdated = 0x00000002, + WGPUSurfaceGetCurrentTextureStatus_Lost = 0x00000003, + WGPUSurfaceGetCurrentTextureStatus_OutOfMemory = 0x00000004, + WGPUSurfaceGetCurrentTextureStatus_DeviceLost = 0x00000005, + WGPUSurfaceGetCurrentTextureStatus_Force32 = 0x7FFFFFFF +} WGPUSurfaceGetCurrentTextureStatus WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUTextureAspect { + WGPUTextureAspect_All = 0x00000000, + WGPUTextureAspect_StencilOnly = 0x00000001, + WGPUTextureAspect_DepthOnly = 0x00000002, + WGPUTextureAspect_Force32 = 0x7FFFFFFF +} WGPUTextureAspect WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUTextureDimension { + WGPUTextureDimension_1D = 0x00000000, + WGPUTextureDimension_2D = 0x00000001, + WGPUTextureDimension_3D = 0x00000002, + WGPUTextureDimension_Force32 = 0x7FFFFFFF +} WGPUTextureDimension WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUTextureFormat { + WGPUTextureFormat_Undefined = 0x00000000, + WGPUTextureFormat_R8Unorm = 0x00000001, + WGPUTextureFormat_R8Snorm = 0x00000002, + WGPUTextureFormat_R8Uint = 0x00000003, + WGPUTextureFormat_R8Sint = 0x00000004, + WGPUTextureFormat_R16Uint = 0x00000005, + WGPUTextureFormat_R16Sint = 0x00000006, + WGPUTextureFormat_R16Float = 0x00000007, + WGPUTextureFormat_RG8Unorm = 0x00000008, + WGPUTextureFormat_RG8Snorm = 0x00000009, + WGPUTextureFormat_RG8Uint = 0x0000000A, + WGPUTextureFormat_RG8Sint = 0x0000000B, + WGPUTextureFormat_R32Float = 0x0000000C, + WGPUTextureFormat_R32Uint = 0x0000000D, + WGPUTextureFormat_R32Sint = 0x0000000E, + WGPUTextureFormat_RG16Uint = 0x0000000F, + WGPUTextureFormat_RG16Sint = 0x00000010, + WGPUTextureFormat_RG16Float = 0x00000011, + WGPUTextureFormat_RGBA8Unorm = 0x00000012, + WGPUTextureFormat_RGBA8UnormSrgb = 0x00000013, + WGPUTextureFormat_RGBA8Snorm = 0x00000014, + WGPUTextureFormat_RGBA8Uint = 0x00000015, + WGPUTextureFormat_RGBA8Sint = 0x00000016, + WGPUTextureFormat_BGRA8Unorm = 0x00000017, + WGPUTextureFormat_BGRA8UnormSrgb = 0x00000018, + WGPUTextureFormat_RGB10A2Unorm = 0x00000019, + WGPUTextureFormat_RG11B10Ufloat = 0x0000001A, + WGPUTextureFormat_RGB9E5Ufloat = 0x0000001B, + WGPUTextureFormat_RG32Float = 0x0000001C, + WGPUTextureFormat_RG32Uint = 0x0000001D, + WGPUTextureFormat_RG32Sint = 0x0000001E, + WGPUTextureFormat_RGBA16Uint = 0x0000001F, + WGPUTextureFormat_RGBA16Sint = 0x00000020, + WGPUTextureFormat_RGBA16Float = 0x00000021, + WGPUTextureFormat_RGBA32Float = 0x00000022, + WGPUTextureFormat_RGBA32Uint = 0x00000023, + WGPUTextureFormat_RGBA32Sint = 0x00000024, + WGPUTextureFormat_Stencil8 = 0x00000025, + WGPUTextureFormat_Depth16Unorm = 0x00000026, + WGPUTextureFormat_Depth24Plus = 0x00000027, + WGPUTextureFormat_Depth24PlusStencil8 = 0x00000028, + WGPUTextureFormat_Depth32Float = 0x00000029, + WGPUTextureFormat_Depth32FloatStencil8 = 0x0000002A, + WGPUTextureFormat_BC1RGBAUnorm = 0x0000002B, + WGPUTextureFormat_BC1RGBAUnormSrgb = 0x0000002C, + WGPUTextureFormat_BC2RGBAUnorm = 0x0000002D, + WGPUTextureFormat_BC2RGBAUnormSrgb = 0x0000002E, + WGPUTextureFormat_BC3RGBAUnorm = 0x0000002F, + WGPUTextureFormat_BC3RGBAUnormSrgb = 0x00000030, + WGPUTextureFormat_BC4RUnorm = 0x00000031, + WGPUTextureFormat_BC4RSnorm = 0x00000032, + WGPUTextureFormat_BC5RGUnorm = 0x00000033, + WGPUTextureFormat_BC5RGSnorm = 0x00000034, + WGPUTextureFormat_BC6HRGBUfloat = 0x00000035, + WGPUTextureFormat_BC6HRGBFloat = 0x00000036, + WGPUTextureFormat_BC7RGBAUnorm = 0x00000037, + WGPUTextureFormat_BC7RGBAUnormSrgb = 0x00000038, + WGPUTextureFormat_ETC2RGB8Unorm = 0x00000039, + WGPUTextureFormat_ETC2RGB8UnormSrgb = 0x0000003A, + WGPUTextureFormat_ETC2RGB8A1Unorm = 0x0000003B, + WGPUTextureFormat_ETC2RGB8A1UnormSrgb = 0x0000003C, + WGPUTextureFormat_ETC2RGBA8Unorm = 0x0000003D, + WGPUTextureFormat_ETC2RGBA8UnormSrgb = 0x0000003E, + WGPUTextureFormat_EACR11Unorm = 0x0000003F, + WGPUTextureFormat_EACR11Snorm = 0x00000040, + WGPUTextureFormat_EACRG11Unorm = 0x00000041, + WGPUTextureFormat_EACRG11Snorm = 0x00000042, + WGPUTextureFormat_ASTC4x4Unorm = 0x00000043, + WGPUTextureFormat_ASTC4x4UnormSrgb = 0x00000044, + WGPUTextureFormat_ASTC5x4Unorm = 0x00000045, + WGPUTextureFormat_ASTC5x4UnormSrgb = 0x00000046, + WGPUTextureFormat_ASTC5x5Unorm = 0x00000047, + WGPUTextureFormat_ASTC5x5UnormSrgb = 0x00000048, + WGPUTextureFormat_ASTC6x5Unorm = 0x00000049, + WGPUTextureFormat_ASTC6x5UnormSrgb = 0x0000004A, + WGPUTextureFormat_ASTC6x6Unorm = 0x0000004B, + WGPUTextureFormat_ASTC6x6UnormSrgb = 0x0000004C, + WGPUTextureFormat_ASTC8x5Unorm = 0x0000004D, + WGPUTextureFormat_ASTC8x5UnormSrgb = 0x0000004E, + WGPUTextureFormat_ASTC8x6Unorm = 0x0000004F, + WGPUTextureFormat_ASTC8x6UnormSrgb = 0x00000050, + WGPUTextureFormat_ASTC8x8Unorm = 0x00000051, + WGPUTextureFormat_ASTC8x8UnormSrgb = 0x00000052, + WGPUTextureFormat_ASTC10x5Unorm = 0x00000053, + WGPUTextureFormat_ASTC10x5UnormSrgb = 0x00000054, + WGPUTextureFormat_ASTC10x6Unorm = 0x00000055, + WGPUTextureFormat_ASTC10x6UnormSrgb = 0x00000056, + WGPUTextureFormat_ASTC10x8Unorm = 0x00000057, + WGPUTextureFormat_ASTC10x8UnormSrgb = 0x00000058, + WGPUTextureFormat_ASTC10x10Unorm = 0x00000059, + WGPUTextureFormat_ASTC10x10UnormSrgb = 0x0000005A, + WGPUTextureFormat_ASTC12x10Unorm = 0x0000005B, + WGPUTextureFormat_ASTC12x10UnormSrgb = 0x0000005C, + WGPUTextureFormat_ASTC12x12Unorm = 0x0000005D, + WGPUTextureFormat_ASTC12x12UnormSrgb = 0x0000005E, + WGPUTextureFormat_Force32 = 0x7FFFFFFF +} WGPUTextureFormat WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUTextureSampleType { + WGPUTextureSampleType_Undefined = 0x00000000, + WGPUTextureSampleType_Float = 0x00000001, + WGPUTextureSampleType_UnfilterableFloat = 0x00000002, + WGPUTextureSampleType_Depth = 0x00000003, + WGPUTextureSampleType_Sint = 0x00000004, + WGPUTextureSampleType_Uint = 0x00000005, + WGPUTextureSampleType_Force32 = 0x7FFFFFFF +} WGPUTextureSampleType WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUTextureViewDimension { + WGPUTextureViewDimension_Undefined = 0x00000000, + WGPUTextureViewDimension_1D = 0x00000001, + WGPUTextureViewDimension_2D = 0x00000002, + WGPUTextureViewDimension_2DArray = 0x00000003, + WGPUTextureViewDimension_Cube = 0x00000004, + WGPUTextureViewDimension_CubeArray = 0x00000005, + WGPUTextureViewDimension_3D = 0x00000006, + WGPUTextureViewDimension_Force32 = 0x7FFFFFFF +} WGPUTextureViewDimension WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUVertexFormat { + WGPUVertexFormat_Undefined = 0x00000000, + WGPUVertexFormat_Uint8x2 = 0x00000001, + WGPUVertexFormat_Uint8x4 = 0x00000002, + WGPUVertexFormat_Sint8x2 = 0x00000003, + WGPUVertexFormat_Sint8x4 = 0x00000004, + WGPUVertexFormat_Unorm8x2 = 0x00000005, + WGPUVertexFormat_Unorm8x4 = 0x00000006, + WGPUVertexFormat_Snorm8x2 = 0x00000007, + WGPUVertexFormat_Snorm8x4 = 0x00000008, + WGPUVertexFormat_Uint16x2 = 0x00000009, + WGPUVertexFormat_Uint16x4 = 0x0000000A, + WGPUVertexFormat_Sint16x2 = 0x0000000B, + WGPUVertexFormat_Sint16x4 = 0x0000000C, + WGPUVertexFormat_Unorm16x2 = 0x0000000D, + WGPUVertexFormat_Unorm16x4 = 0x0000000E, + WGPUVertexFormat_Snorm16x2 = 0x0000000F, + WGPUVertexFormat_Snorm16x4 = 0x00000010, + WGPUVertexFormat_Float16x2 = 0x00000011, + WGPUVertexFormat_Float16x4 = 0x00000012, + WGPUVertexFormat_Float32 = 0x00000013, + WGPUVertexFormat_Float32x2 = 0x00000014, + WGPUVertexFormat_Float32x3 = 0x00000015, + WGPUVertexFormat_Float32x4 = 0x00000016, + WGPUVertexFormat_Uint32 = 0x00000017, + WGPUVertexFormat_Uint32x2 = 0x00000018, + WGPUVertexFormat_Uint32x3 = 0x00000019, + WGPUVertexFormat_Uint32x4 = 0x0000001A, + WGPUVertexFormat_Sint32 = 0x0000001B, + WGPUVertexFormat_Sint32x2 = 0x0000001C, + WGPUVertexFormat_Sint32x3 = 0x0000001D, + WGPUVertexFormat_Sint32x4 = 0x0000001E, + WGPUVertexFormat_Force32 = 0x7FFFFFFF +} WGPUVertexFormat WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUVertexStepMode { + WGPUVertexStepMode_Vertex = 0x00000000, + WGPUVertexStepMode_Instance = 0x00000001, + WGPUVertexStepMode_VertexBufferNotUsed = 0x00000002, + WGPUVertexStepMode_Force32 = 0x7FFFFFFF +} WGPUVertexStepMode WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUBufferUsage { + WGPUBufferUsage_None = 0x00000000, + WGPUBufferUsage_MapRead = 0x00000001, + WGPUBufferUsage_MapWrite = 0x00000002, + WGPUBufferUsage_CopySrc = 0x00000004, + WGPUBufferUsage_CopyDst = 0x00000008, + WGPUBufferUsage_Index = 0x00000010, + WGPUBufferUsage_Vertex = 0x00000020, + WGPUBufferUsage_Uniform = 0x00000040, + WGPUBufferUsage_Storage = 0x00000080, + WGPUBufferUsage_Indirect = 0x00000100, + WGPUBufferUsage_QueryResolve = 0x00000200, + WGPUBufferUsage_Force32 = 0x7FFFFFFF +} WGPUBufferUsage WGPU_ENUM_ATTRIBUTE; +typedef WGPUFlags WGPUBufferUsageFlags WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUColorWriteMask { + WGPUColorWriteMask_None = 0x00000000, + WGPUColorWriteMask_Red = 0x00000001, + WGPUColorWriteMask_Green = 0x00000002, + WGPUColorWriteMask_Blue = 0x00000004, + WGPUColorWriteMask_Alpha = 0x00000008, + WGPUColorWriteMask_All = 0x0000000F, + WGPUColorWriteMask_Force32 = 0x7FFFFFFF +} WGPUColorWriteMask WGPU_ENUM_ATTRIBUTE; +typedef WGPUFlags WGPUColorWriteMaskFlags WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUMapMode { + WGPUMapMode_None = 0x00000000, + WGPUMapMode_Read = 0x00000001, + WGPUMapMode_Write = 0x00000002, + WGPUMapMode_Force32 = 0x7FFFFFFF +} WGPUMapMode WGPU_ENUM_ATTRIBUTE; +typedef WGPUFlags WGPUMapModeFlags WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUShaderStage { + WGPUShaderStage_None = 0x00000000, + WGPUShaderStage_Vertex = 0x00000001, + WGPUShaderStage_Fragment = 0x00000002, + WGPUShaderStage_Compute = 0x00000004, + WGPUShaderStage_Force32 = 0x7FFFFFFF +} WGPUShaderStage WGPU_ENUM_ATTRIBUTE; +typedef WGPUFlags WGPUShaderStageFlags WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUTextureUsage { + WGPUTextureUsage_None = 0x00000000, + WGPUTextureUsage_CopySrc = 0x00000001, + WGPUTextureUsage_CopyDst = 0x00000002, + WGPUTextureUsage_TextureBinding = 0x00000004, + WGPUTextureUsage_StorageBinding = 0x00000008, + WGPUTextureUsage_RenderAttachment = 0x00000010, + WGPUTextureUsage_Force32 = 0x7FFFFFFF +} WGPUTextureUsage WGPU_ENUM_ATTRIBUTE; +typedef WGPUFlags WGPUTextureUsageFlags WGPU_ENUM_ATTRIBUTE; + +typedef void (*WGPUBufferMapCallback)(WGPUBufferMapAsyncStatus status, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUCompilationInfoCallback)(WGPUCompilationInfoRequestStatus status, struct WGPUCompilationInfo const * compilationInfo, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUCreateComputePipelineAsyncCallback)(WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline pipeline, char const * message, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUCreateRenderPipelineAsyncCallback)(WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline pipeline, char const * message, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUDeviceLostCallback)(WGPUDeviceLostReason reason, char const * message, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUErrorCallback)(WGPUErrorType type, char const * message, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProc)(void) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUQueueWorkDoneCallback)(WGPUQueueWorkDoneStatus status, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPURequestAdapterCallback)(WGPURequestAdapterStatus status, WGPUAdapter adapter, char const * message, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPURequestDeviceCallback)(WGPURequestDeviceStatus status, WGPUDevice device, char const * message, void * userdata) WGPU_FUNCTION_ATTRIBUTE; + +typedef struct WGPUChainedStruct { + struct WGPUChainedStruct const * next; + WGPUSType sType; +} WGPUChainedStruct WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUChainedStructOut { + struct WGPUChainedStructOut * next; + WGPUSType sType; +} WGPUChainedStructOut WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUAdapterProperties { + WGPUChainedStructOut * nextInChain; + uint32_t vendorID; + char const * vendorName; + char const * architecture; + uint32_t deviceID; + char const * name; + char const * driverDescription; + WGPUAdapterType adapterType; + WGPUBackendType backendType; +} WGPUAdapterProperties WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUBindGroupEntry { + WGPUChainedStruct const * nextInChain; + uint32_t binding; + WGPU_NULLABLE WGPUBuffer buffer; + uint64_t offset; + uint64_t size; + WGPU_NULLABLE WGPUSampler sampler; + WGPU_NULLABLE WGPUTextureView textureView; +} WGPUBindGroupEntry WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUBlendComponent { + WGPUBlendOperation operation; + WGPUBlendFactor srcFactor; + WGPUBlendFactor dstFactor; +} WGPUBlendComponent WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUBufferBindingLayout { + WGPUChainedStruct const * nextInChain; + WGPUBufferBindingType type; + WGPUBool hasDynamicOffset; + uint64_t minBindingSize; +} WGPUBufferBindingLayout WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUBufferDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; + WGPUBufferUsageFlags usage; + uint64_t size; + WGPUBool mappedAtCreation; +} WGPUBufferDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUColor { + double r; + double g; + double b; + double a; +} WGPUColor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUCommandBufferDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; +} WGPUCommandBufferDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUCommandEncoderDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; +} WGPUCommandEncoderDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUCompilationMessage { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * message; + WGPUCompilationMessageType type; + uint64_t lineNum; + uint64_t linePos; + uint64_t offset; + uint64_t length; + uint64_t utf16LinePos; + uint64_t utf16Offset; + uint64_t utf16Length; +} WGPUCompilationMessage WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUComputePassTimestampWrites { + WGPUQuerySet querySet; + uint32_t beginningOfPassWriteIndex; + uint32_t endOfPassWriteIndex; +} WGPUComputePassTimestampWrites WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUConstantEntry { + WGPUChainedStruct const * nextInChain; + char const * key; + double value; +} WGPUConstantEntry WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUExtent3D { + uint32_t width; + uint32_t height; + uint32_t depthOrArrayLayers; +} WGPUExtent3D WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUInstanceDescriptor { + WGPUChainedStruct const * nextInChain; +} WGPUInstanceDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPULimits { + uint32_t maxTextureDimension1D; + uint32_t maxTextureDimension2D; + uint32_t maxTextureDimension3D; + uint32_t maxTextureArrayLayers; + uint32_t maxBindGroups; + uint32_t maxBindGroupsPlusVertexBuffers; + uint32_t maxBindingsPerBindGroup; + uint32_t maxDynamicUniformBuffersPerPipelineLayout; + uint32_t maxDynamicStorageBuffersPerPipelineLayout; + uint32_t maxSampledTexturesPerShaderStage; + uint32_t maxSamplersPerShaderStage; + uint32_t maxStorageBuffersPerShaderStage; + uint32_t maxStorageTexturesPerShaderStage; + uint32_t maxUniformBuffersPerShaderStage; + uint64_t maxUniformBufferBindingSize; + uint64_t maxStorageBufferBindingSize; + uint32_t minUniformBufferOffsetAlignment; + uint32_t minStorageBufferOffsetAlignment; + uint32_t maxVertexBuffers; + uint64_t maxBufferSize; + uint32_t maxVertexAttributes; + uint32_t maxVertexBufferArrayStride; + uint32_t maxInterStageShaderComponents; + uint32_t maxInterStageShaderVariables; + uint32_t maxColorAttachments; + uint32_t maxColorAttachmentBytesPerSample; + uint32_t maxComputeWorkgroupStorageSize; + uint32_t maxComputeInvocationsPerWorkgroup; + uint32_t maxComputeWorkgroupSizeX; + uint32_t maxComputeWorkgroupSizeY; + uint32_t maxComputeWorkgroupSizeZ; + uint32_t maxComputeWorkgroupsPerDimension; +} WGPULimits WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUMultisampleState { + WGPUChainedStruct const * nextInChain; + uint32_t count; + uint32_t mask; + WGPUBool alphaToCoverageEnabled; +} WGPUMultisampleState WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUOrigin3D { + uint32_t x; + uint32_t y; + uint32_t z; +} WGPUOrigin3D WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUPipelineLayoutDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; + size_t bindGroupLayoutCount; + WGPUBindGroupLayout const * bindGroupLayouts; +} WGPUPipelineLayoutDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +// Can be chained in WGPUPrimitiveState +typedef struct WGPUPrimitiveDepthClipControl { + WGPUChainedStruct chain; + WGPUBool unclippedDepth; +} WGPUPrimitiveDepthClipControl WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUPrimitiveState { + WGPUChainedStruct const * nextInChain; + WGPUPrimitiveTopology topology; + WGPUIndexFormat stripIndexFormat; + WGPUFrontFace frontFace; + WGPUCullMode cullMode; +} WGPUPrimitiveState WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUQuerySetDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; + WGPUQueryType type; + uint32_t count; +} WGPUQuerySetDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUQueueDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; +} WGPUQueueDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPURenderBundleDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; +} WGPURenderBundleDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPURenderBundleEncoderDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; + size_t colorFormatCount; + WGPUTextureFormat const * colorFormats; + WGPUTextureFormat depthStencilFormat; + uint32_t sampleCount; + WGPUBool depthReadOnly; + WGPUBool stencilReadOnly; +} WGPURenderBundleEncoderDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPURenderPassDepthStencilAttachment { + WGPUTextureView view; + WGPULoadOp depthLoadOp; + WGPUStoreOp depthStoreOp; + float depthClearValue; + WGPUBool depthReadOnly; + WGPULoadOp stencilLoadOp; + WGPUStoreOp stencilStoreOp; + uint32_t stencilClearValue; + WGPUBool stencilReadOnly; +} WGPURenderPassDepthStencilAttachment WGPU_STRUCTURE_ATTRIBUTE; + +// Can be chained in WGPURenderPassDescriptor +typedef struct WGPURenderPassDescriptorMaxDrawCount { + WGPUChainedStruct chain; + uint64_t maxDrawCount; +} WGPURenderPassDescriptorMaxDrawCount WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPURenderPassTimestampWrites { + WGPUQuerySet querySet; + uint32_t beginningOfPassWriteIndex; + uint32_t endOfPassWriteIndex; +} WGPURenderPassTimestampWrites WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPURequestAdapterOptions { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE WGPUSurface compatibleSurface; + WGPUPowerPreference powerPreference; + WGPUBackendType backendType; + WGPUBool forceFallbackAdapter; +} WGPURequestAdapterOptions WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUSamplerBindingLayout { + WGPUChainedStruct const * nextInChain; + WGPUSamplerBindingType type; +} WGPUSamplerBindingLayout WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUSamplerDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; + WGPUAddressMode addressModeU; + WGPUAddressMode addressModeV; + WGPUAddressMode addressModeW; + WGPUFilterMode magFilter; + WGPUFilterMode minFilter; + WGPUMipmapFilterMode mipmapFilter; + float lodMinClamp; + float lodMaxClamp; + WGPUCompareFunction compare; + uint16_t maxAnisotropy; +} WGPUSamplerDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUShaderModuleCompilationHint { + WGPUChainedStruct const * nextInChain; + char const * entryPoint; + WGPUPipelineLayout layout; +} WGPUShaderModuleCompilationHint WGPU_STRUCTURE_ATTRIBUTE; + +// Can be chained in WGPUShaderModuleDescriptor +typedef struct WGPUShaderModuleSPIRVDescriptor { + WGPUChainedStruct chain; + uint32_t codeSize; + uint32_t const * code; +} WGPUShaderModuleSPIRVDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +// Can be chained in WGPUShaderModuleDescriptor +typedef struct WGPUShaderModuleWGSLDescriptor { + WGPUChainedStruct chain; + char const * code; +} WGPUShaderModuleWGSLDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUStencilFaceState { + WGPUCompareFunction compare; + WGPUStencilOperation failOp; + WGPUStencilOperation depthFailOp; + WGPUStencilOperation passOp; +} WGPUStencilFaceState WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUStorageTextureBindingLayout { + WGPUChainedStruct const * nextInChain; + WGPUStorageTextureAccess access; + WGPUTextureFormat format; + WGPUTextureViewDimension viewDimension; +} WGPUStorageTextureBindingLayout WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUSurfaceCapabilities { + WGPUChainedStructOut * nextInChain; + size_t formatCount; + WGPUTextureFormat * formats; + size_t presentModeCount; + WGPUPresentMode * presentModes; + size_t alphaModeCount; + WGPUCompositeAlphaMode * alphaModes; +} WGPUSurfaceCapabilities WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUSurfaceConfiguration { + WGPUChainedStruct const * nextInChain; + WGPUDevice device; + WGPUTextureFormat format; + WGPUTextureUsageFlags usage; + size_t viewFormatCount; + WGPUTextureFormat const * viewFormats; + WGPUCompositeAlphaMode alphaMode; + uint32_t width; + uint32_t height; + WGPUPresentMode presentMode; +} WGPUSurfaceConfiguration WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUSurfaceDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; +} WGPUSurfaceDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +// Can be chained in WGPUSurfaceDescriptor +typedef struct WGPUSurfaceDescriptorFromAndroidNativeWindow { + WGPUChainedStruct chain; + void * window; +} WGPUSurfaceDescriptorFromAndroidNativeWindow WGPU_STRUCTURE_ATTRIBUTE; + +// Can be chained in WGPUSurfaceDescriptor +typedef struct WGPUSurfaceDescriptorFromCanvasHTMLSelector { + WGPUChainedStruct chain; + char const * selector; +} WGPUSurfaceDescriptorFromCanvasHTMLSelector WGPU_STRUCTURE_ATTRIBUTE; + +// Can be chained in WGPUSurfaceDescriptor +typedef struct WGPUSurfaceDescriptorFromMetalLayer { + WGPUChainedStruct chain; + void * layer; +} WGPUSurfaceDescriptorFromMetalLayer WGPU_STRUCTURE_ATTRIBUTE; + +// Can be chained in WGPUSurfaceDescriptor +typedef struct WGPUSurfaceDescriptorFromWaylandSurface { + WGPUChainedStruct chain; + void * display; + void * surface; +} WGPUSurfaceDescriptorFromWaylandSurface WGPU_STRUCTURE_ATTRIBUTE; + +// Can be chained in WGPUSurfaceDescriptor +typedef struct WGPUSurfaceDescriptorFromWindowsHWND { + WGPUChainedStruct chain; + void * hinstance; + void * hwnd; +} WGPUSurfaceDescriptorFromWindowsHWND WGPU_STRUCTURE_ATTRIBUTE; + +// Can be chained in WGPUSurfaceDescriptor +typedef struct WGPUSurfaceDescriptorFromXcbWindow { + WGPUChainedStruct chain; + void * connection; + uint32_t window; +} WGPUSurfaceDescriptorFromXcbWindow WGPU_STRUCTURE_ATTRIBUTE; + +// Can be chained in WGPUSurfaceDescriptor +typedef struct WGPUSurfaceDescriptorFromXlibWindow { + WGPUChainedStruct chain; + void * display; + uint32_t window; +} WGPUSurfaceDescriptorFromXlibWindow WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUSurfaceTexture { + WGPUTexture texture; + WGPUBool suboptimal; + WGPUSurfaceGetCurrentTextureStatus status; +} WGPUSurfaceTexture WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUTextureBindingLayout { + WGPUChainedStruct const * nextInChain; + WGPUTextureSampleType sampleType; + WGPUTextureViewDimension viewDimension; + WGPUBool multisampled; +} WGPUTextureBindingLayout WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUTextureDataLayout { + WGPUChainedStruct const * nextInChain; + uint64_t offset; + uint32_t bytesPerRow; + uint32_t rowsPerImage; +} WGPUTextureDataLayout WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUTextureViewDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; + WGPUTextureFormat format; + WGPUTextureViewDimension dimension; + uint32_t baseMipLevel; + uint32_t mipLevelCount; + uint32_t baseArrayLayer; + uint32_t arrayLayerCount; + WGPUTextureAspect aspect; +} WGPUTextureViewDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUVertexAttribute { + WGPUVertexFormat format; + uint64_t offset; + uint32_t shaderLocation; +} WGPUVertexAttribute WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUBindGroupDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; + WGPUBindGroupLayout layout; + size_t entryCount; + WGPUBindGroupEntry const * entries; +} WGPUBindGroupDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUBindGroupLayoutEntry { + WGPUChainedStruct const * nextInChain; + uint32_t binding; + WGPUShaderStageFlags visibility; + WGPUBufferBindingLayout buffer; + WGPUSamplerBindingLayout sampler; + WGPUTextureBindingLayout texture; + WGPUStorageTextureBindingLayout storageTexture; +} WGPUBindGroupLayoutEntry WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUBlendState { + WGPUBlendComponent color; + WGPUBlendComponent alpha; +} WGPUBlendState WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUCompilationInfo { + WGPUChainedStruct const * nextInChain; + size_t messageCount; + WGPUCompilationMessage const * messages; +} WGPUCompilationInfo WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUComputePassDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; + WGPU_NULLABLE WGPUComputePassTimestampWrites const * timestampWrites; +} WGPUComputePassDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUDepthStencilState { + WGPUChainedStruct const * nextInChain; + WGPUTextureFormat format; + WGPUBool depthWriteEnabled; + WGPUCompareFunction depthCompare; + WGPUStencilFaceState stencilFront; + WGPUStencilFaceState stencilBack; + uint32_t stencilReadMask; + uint32_t stencilWriteMask; + int32_t depthBias; + float depthBiasSlopeScale; + float depthBiasClamp; +} WGPUDepthStencilState WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUImageCopyBuffer { + WGPUChainedStruct const * nextInChain; + WGPUTextureDataLayout layout; + WGPUBuffer buffer; +} WGPUImageCopyBuffer WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUImageCopyTexture { + WGPUChainedStruct const * nextInChain; + WGPUTexture texture; + uint32_t mipLevel; + WGPUOrigin3D origin; + WGPUTextureAspect aspect; +} WGPUImageCopyTexture WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUProgrammableStageDescriptor { + WGPUChainedStruct const * nextInChain; + WGPUShaderModule module; + char const * entryPoint; + size_t constantCount; + WGPUConstantEntry const * constants; +} WGPUProgrammableStageDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPURenderPassColorAttachment { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE WGPUTextureView view; + WGPU_NULLABLE WGPUTextureView resolveTarget; + WGPULoadOp loadOp; + WGPUStoreOp storeOp; + WGPUColor clearValue; +} WGPURenderPassColorAttachment WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPURequiredLimits { + WGPUChainedStruct const * nextInChain; + WGPULimits limits; +} WGPURequiredLimits WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUShaderModuleDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; + size_t hintCount; + WGPUShaderModuleCompilationHint const * hints; +} WGPUShaderModuleDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUSupportedLimits { + WGPUChainedStructOut * nextInChain; + WGPULimits limits; +} WGPUSupportedLimits WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUTextureDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; + WGPUTextureUsageFlags usage; + WGPUTextureDimension dimension; + WGPUExtent3D size; + WGPUTextureFormat format; + uint32_t mipLevelCount; + uint32_t sampleCount; + size_t viewFormatCount; + WGPUTextureFormat const * viewFormats; +} WGPUTextureDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUVertexBufferLayout { + uint64_t arrayStride; + WGPUVertexStepMode stepMode; + size_t attributeCount; + WGPUVertexAttribute const * attributes; +} WGPUVertexBufferLayout WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUBindGroupLayoutDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; + size_t entryCount; + WGPUBindGroupLayoutEntry const * entries; +} WGPUBindGroupLayoutDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUColorTargetState { + WGPUChainedStruct const * nextInChain; + WGPUTextureFormat format; + WGPU_NULLABLE WGPUBlendState const * blend; + WGPUColorWriteMaskFlags writeMask; +} WGPUColorTargetState WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUComputePipelineDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; + WGPU_NULLABLE WGPUPipelineLayout layout; + WGPUProgrammableStageDescriptor compute; +} WGPUComputePipelineDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUDeviceDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; + size_t requiredFeatureCount; + WGPUFeatureName const * requiredFeatures; + WGPU_NULLABLE WGPURequiredLimits const * requiredLimits; + WGPUQueueDescriptor defaultQueue; + WGPUDeviceLostCallback deviceLostCallback; + void * deviceLostUserdata; +} WGPUDeviceDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPURenderPassDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; + size_t colorAttachmentCount; + WGPURenderPassColorAttachment const * colorAttachments; + WGPU_NULLABLE WGPURenderPassDepthStencilAttachment const * depthStencilAttachment; + WGPU_NULLABLE WGPUQuerySet occlusionQuerySet; + WGPU_NULLABLE WGPURenderPassTimestampWrites const * timestampWrites; +} WGPURenderPassDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUVertexState { + WGPUChainedStruct const * nextInChain; + WGPUShaderModule module; + char const * entryPoint; + size_t constantCount; + WGPUConstantEntry const * constants; + size_t bufferCount; + WGPUVertexBufferLayout const * buffers; +} WGPUVertexState WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPUFragmentState { + WGPUChainedStruct const * nextInChain; + WGPUShaderModule module; + char const * entryPoint; + size_t constantCount; + WGPUConstantEntry const * constants; + size_t targetCount; + WGPUColorTargetState const * targets; +} WGPUFragmentState WGPU_STRUCTURE_ATTRIBUTE; + +typedef struct WGPURenderPipelineDescriptor { + WGPUChainedStruct const * nextInChain; + WGPU_NULLABLE char const * label; + WGPU_NULLABLE WGPUPipelineLayout layout; + WGPUVertexState vertex; + WGPUPrimitiveState primitive; + WGPU_NULLABLE WGPUDepthStencilState const * depthStencil; + WGPUMultisampleState multisample; + WGPU_NULLABLE WGPUFragmentState const * fragment; +} WGPURenderPipelineDescriptor WGPU_STRUCTURE_ATTRIBUTE; + +#ifdef __cplusplus +extern "C" { +#endif + +#if !defined(WGPU_SKIP_PROCS) + +typedef WGPUInstance (*WGPUProcCreateInstance)(WGPU_NULLABLE WGPUInstanceDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUProc (*WGPUProcGetProcAddress)(WGPUDevice device, char const * procName) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of Adapter +typedef size_t (*WGPUProcAdapterEnumerateFeatures)(WGPUAdapter adapter, WGPUFeatureName * features) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUBool (*WGPUProcAdapterGetLimits)(WGPUAdapter adapter, WGPUSupportedLimits * limits) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcAdapterGetProperties)(WGPUAdapter adapter, WGPUAdapterProperties * properties) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUBool (*WGPUProcAdapterHasFeature)(WGPUAdapter adapter, WGPUFeatureName feature) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcAdapterRequestDevice)(WGPUAdapter adapter, WGPU_NULLABLE WGPUDeviceDescriptor const * descriptor, WGPURequestDeviceCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcAdapterReference)(WGPUAdapter adapter) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcAdapterRelease)(WGPUAdapter adapter) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of BindGroup +typedef void (*WGPUProcBindGroupSetLabel)(WGPUBindGroup bindGroup, char const * label) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcBindGroupReference)(WGPUBindGroup bindGroup) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcBindGroupRelease)(WGPUBindGroup bindGroup) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of BindGroupLayout +typedef void (*WGPUProcBindGroupLayoutSetLabel)(WGPUBindGroupLayout bindGroupLayout, char const * label) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcBindGroupLayoutReference)(WGPUBindGroupLayout bindGroupLayout) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcBindGroupLayoutRelease)(WGPUBindGroupLayout bindGroupLayout) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of Buffer +typedef void (*WGPUProcBufferDestroy)(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; +typedef void const * (*WGPUProcBufferGetConstMappedRange)(WGPUBuffer buffer, size_t offset, size_t size) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUBufferMapState (*WGPUProcBufferGetMapState)(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; +typedef void * (*WGPUProcBufferGetMappedRange)(WGPUBuffer buffer, size_t offset, size_t size) WGPU_FUNCTION_ATTRIBUTE; +typedef uint64_t (*WGPUProcBufferGetSize)(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUBufferUsageFlags (*WGPUProcBufferGetUsage)(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcBufferMapAsync)(WGPUBuffer buffer, WGPUMapModeFlags mode, size_t offset, size_t size, WGPUBufferMapCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcBufferSetLabel)(WGPUBuffer buffer, char const * label) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcBufferUnmap)(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcBufferReference)(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcBufferRelease)(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of CommandBuffer +typedef void (*WGPUProcCommandBufferSetLabel)(WGPUCommandBuffer commandBuffer, char const * label) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcCommandBufferReference)(WGPUCommandBuffer commandBuffer) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcCommandBufferRelease)(WGPUCommandBuffer commandBuffer) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of CommandEncoder +typedef WGPUComputePassEncoder (*WGPUProcCommandEncoderBeginComputePass)(WGPUCommandEncoder commandEncoder, WGPU_NULLABLE WGPUComputePassDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPURenderPassEncoder (*WGPUProcCommandEncoderBeginRenderPass)(WGPUCommandEncoder commandEncoder, WGPURenderPassDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcCommandEncoderClearBuffer)(WGPUCommandEncoder commandEncoder, WGPUBuffer buffer, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcCommandEncoderCopyBufferToBuffer)(WGPUCommandEncoder commandEncoder, WGPUBuffer source, uint64_t sourceOffset, WGPUBuffer destination, uint64_t destinationOffset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcCommandEncoderCopyBufferToTexture)(WGPUCommandEncoder commandEncoder, WGPUImageCopyBuffer const * source, WGPUImageCopyTexture const * destination, WGPUExtent3D const * copySize) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcCommandEncoderCopyTextureToBuffer)(WGPUCommandEncoder commandEncoder, WGPUImageCopyTexture const * source, WGPUImageCopyBuffer const * destination, WGPUExtent3D const * copySize) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcCommandEncoderCopyTextureToTexture)(WGPUCommandEncoder commandEncoder, WGPUImageCopyTexture const * source, WGPUImageCopyTexture const * destination, WGPUExtent3D const * copySize) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUCommandBuffer (*WGPUProcCommandEncoderFinish)(WGPUCommandEncoder commandEncoder, WGPU_NULLABLE WGPUCommandBufferDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcCommandEncoderInsertDebugMarker)(WGPUCommandEncoder commandEncoder, char const * markerLabel) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcCommandEncoderPopDebugGroup)(WGPUCommandEncoder commandEncoder) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcCommandEncoderPushDebugGroup)(WGPUCommandEncoder commandEncoder, char const * groupLabel) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcCommandEncoderResolveQuerySet)(WGPUCommandEncoder commandEncoder, WGPUQuerySet querySet, uint32_t firstQuery, uint32_t queryCount, WGPUBuffer destination, uint64_t destinationOffset) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcCommandEncoderSetLabel)(WGPUCommandEncoder commandEncoder, char const * label) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcCommandEncoderWriteTimestamp)(WGPUCommandEncoder commandEncoder, WGPUQuerySet querySet, uint32_t queryIndex) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcCommandEncoderReference)(WGPUCommandEncoder commandEncoder) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcCommandEncoderRelease)(WGPUCommandEncoder commandEncoder) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of ComputePassEncoder +typedef void (*WGPUProcComputePassEncoderDispatchWorkgroups)(WGPUComputePassEncoder computePassEncoder, uint32_t workgroupCountX, uint32_t workgroupCountY, uint32_t workgroupCountZ) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcComputePassEncoderDispatchWorkgroupsIndirect)(WGPUComputePassEncoder computePassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcComputePassEncoderEnd)(WGPUComputePassEncoder computePassEncoder) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcComputePassEncoderInsertDebugMarker)(WGPUComputePassEncoder computePassEncoder, char const * markerLabel) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcComputePassEncoderPopDebugGroup)(WGPUComputePassEncoder computePassEncoder) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcComputePassEncoderPushDebugGroup)(WGPUComputePassEncoder computePassEncoder, char const * groupLabel) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcComputePassEncoderSetBindGroup)(WGPUComputePassEncoder computePassEncoder, uint32_t groupIndex, WGPU_NULLABLE WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcComputePassEncoderSetLabel)(WGPUComputePassEncoder computePassEncoder, char const * label) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcComputePassEncoderSetPipeline)(WGPUComputePassEncoder computePassEncoder, WGPUComputePipeline pipeline) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcComputePassEncoderReference)(WGPUComputePassEncoder computePassEncoder) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcComputePassEncoderRelease)(WGPUComputePassEncoder computePassEncoder) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of ComputePipeline +typedef WGPUBindGroupLayout (*WGPUProcComputePipelineGetBindGroupLayout)(WGPUComputePipeline computePipeline, uint32_t groupIndex) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcComputePipelineSetLabel)(WGPUComputePipeline computePipeline, char const * label) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcComputePipelineReference)(WGPUComputePipeline computePipeline) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcComputePipelineRelease)(WGPUComputePipeline computePipeline) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of Device +typedef WGPUBindGroup (*WGPUProcDeviceCreateBindGroup)(WGPUDevice device, WGPUBindGroupDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUBindGroupLayout (*WGPUProcDeviceCreateBindGroupLayout)(WGPUDevice device, WGPUBindGroupLayoutDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUBuffer (*WGPUProcDeviceCreateBuffer)(WGPUDevice device, WGPUBufferDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUCommandEncoder (*WGPUProcDeviceCreateCommandEncoder)(WGPUDevice device, WGPU_NULLABLE WGPUCommandEncoderDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUComputePipeline (*WGPUProcDeviceCreateComputePipeline)(WGPUDevice device, WGPUComputePipelineDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcDeviceCreateComputePipelineAsync)(WGPUDevice device, WGPUComputePipelineDescriptor const * descriptor, WGPUCreateComputePipelineAsyncCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUPipelineLayout (*WGPUProcDeviceCreatePipelineLayout)(WGPUDevice device, WGPUPipelineLayoutDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUQuerySet (*WGPUProcDeviceCreateQuerySet)(WGPUDevice device, WGPUQuerySetDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPURenderBundleEncoder (*WGPUProcDeviceCreateRenderBundleEncoder)(WGPUDevice device, WGPURenderBundleEncoderDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPURenderPipeline (*WGPUProcDeviceCreateRenderPipeline)(WGPUDevice device, WGPURenderPipelineDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcDeviceCreateRenderPipelineAsync)(WGPUDevice device, WGPURenderPipelineDescriptor const * descriptor, WGPUCreateRenderPipelineAsyncCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUSampler (*WGPUProcDeviceCreateSampler)(WGPUDevice device, WGPU_NULLABLE WGPUSamplerDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUShaderModule (*WGPUProcDeviceCreateShaderModule)(WGPUDevice device, WGPUShaderModuleDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUTexture (*WGPUProcDeviceCreateTexture)(WGPUDevice device, WGPUTextureDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcDeviceDestroy)(WGPUDevice device) WGPU_FUNCTION_ATTRIBUTE; +typedef size_t (*WGPUProcDeviceEnumerateFeatures)(WGPUDevice device, WGPUFeatureName * features) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUBool (*WGPUProcDeviceGetLimits)(WGPUDevice device, WGPUSupportedLimits * limits) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUQueue (*WGPUProcDeviceGetQueue)(WGPUDevice device) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUBool (*WGPUProcDeviceHasFeature)(WGPUDevice device, WGPUFeatureName feature) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcDevicePopErrorScope)(WGPUDevice device, WGPUErrorCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcDevicePushErrorScope)(WGPUDevice device, WGPUErrorFilter filter) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcDeviceSetLabel)(WGPUDevice device, char const * label) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcDeviceSetUncapturedErrorCallback)(WGPUDevice device, WGPUErrorCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcDeviceReference)(WGPUDevice device) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcDeviceRelease)(WGPUDevice device) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of Instance +typedef WGPUSurface (*WGPUProcInstanceCreateSurface)(WGPUInstance instance, WGPUSurfaceDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcInstanceProcessEvents)(WGPUInstance instance) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcInstanceRequestAdapter)(WGPUInstance instance, WGPU_NULLABLE WGPURequestAdapterOptions const * options, WGPURequestAdapterCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcInstanceReference)(WGPUInstance instance) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcInstanceRelease)(WGPUInstance instance) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of PipelineLayout +typedef void (*WGPUProcPipelineLayoutSetLabel)(WGPUPipelineLayout pipelineLayout, char const * label) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcPipelineLayoutReference)(WGPUPipelineLayout pipelineLayout) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcPipelineLayoutRelease)(WGPUPipelineLayout pipelineLayout) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of QuerySet +typedef void (*WGPUProcQuerySetDestroy)(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; +typedef uint32_t (*WGPUProcQuerySetGetCount)(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUQueryType (*WGPUProcQuerySetGetType)(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcQuerySetSetLabel)(WGPUQuerySet querySet, char const * label) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcQuerySetReference)(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcQuerySetRelease)(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of Queue +typedef void (*WGPUProcQueueOnSubmittedWorkDone)(WGPUQueue queue, WGPUQueueWorkDoneCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcQueueSetLabel)(WGPUQueue queue, char const * label) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcQueueSubmit)(WGPUQueue queue, size_t commandCount, WGPUCommandBuffer const * commands) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcQueueWriteBuffer)(WGPUQueue queue, WGPUBuffer buffer, uint64_t bufferOffset, void const * data, size_t size) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcQueueWriteTexture)(WGPUQueue queue, WGPUImageCopyTexture const * destination, void const * data, size_t dataSize, WGPUTextureDataLayout const * dataLayout, WGPUExtent3D const * writeSize) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcQueueReference)(WGPUQueue queue) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcQueueRelease)(WGPUQueue queue) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of RenderBundle +typedef void (*WGPUProcRenderBundleSetLabel)(WGPURenderBundle renderBundle, char const * label) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderBundleReference)(WGPURenderBundle renderBundle) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderBundleRelease)(WGPURenderBundle renderBundle) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of RenderBundleEncoder +typedef void (*WGPUProcRenderBundleEncoderDraw)(WGPURenderBundleEncoder renderBundleEncoder, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderBundleEncoderDrawIndexed)(WGPURenderBundleEncoder renderBundleEncoder, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderBundleEncoderDrawIndexedIndirect)(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderBundleEncoderDrawIndirect)(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPURenderBundle (*WGPUProcRenderBundleEncoderFinish)(WGPURenderBundleEncoder renderBundleEncoder, WGPU_NULLABLE WGPURenderBundleDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderBundleEncoderInsertDebugMarker)(WGPURenderBundleEncoder renderBundleEncoder, char const * markerLabel) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderBundleEncoderPopDebugGroup)(WGPURenderBundleEncoder renderBundleEncoder) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderBundleEncoderPushDebugGroup)(WGPURenderBundleEncoder renderBundleEncoder, char const * groupLabel) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderBundleEncoderSetBindGroup)(WGPURenderBundleEncoder renderBundleEncoder, uint32_t groupIndex, WGPU_NULLABLE WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderBundleEncoderSetIndexBuffer)(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer buffer, WGPUIndexFormat format, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderBundleEncoderSetLabel)(WGPURenderBundleEncoder renderBundleEncoder, char const * label) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderBundleEncoderSetPipeline)(WGPURenderBundleEncoder renderBundleEncoder, WGPURenderPipeline pipeline) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderBundleEncoderSetVertexBuffer)(WGPURenderBundleEncoder renderBundleEncoder, uint32_t slot, WGPU_NULLABLE WGPUBuffer buffer, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderBundleEncoderReference)(WGPURenderBundleEncoder renderBundleEncoder) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderBundleEncoderRelease)(WGPURenderBundleEncoder renderBundleEncoder) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of RenderPassEncoder +typedef void (*WGPUProcRenderPassEncoderBeginOcclusionQuery)(WGPURenderPassEncoder renderPassEncoder, uint32_t queryIndex) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderDraw)(WGPURenderPassEncoder renderPassEncoder, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderDrawIndexed)(WGPURenderPassEncoder renderPassEncoder, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderDrawIndexedIndirect)(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderDrawIndirect)(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderEnd)(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderEndOcclusionQuery)(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderExecuteBundles)(WGPURenderPassEncoder renderPassEncoder, size_t bundleCount, WGPURenderBundle const * bundles) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderInsertDebugMarker)(WGPURenderPassEncoder renderPassEncoder, char const * markerLabel) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderPopDebugGroup)(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderPushDebugGroup)(WGPURenderPassEncoder renderPassEncoder, char const * groupLabel) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderSetBindGroup)(WGPURenderPassEncoder renderPassEncoder, uint32_t groupIndex, WGPU_NULLABLE WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderSetBlendConstant)(WGPURenderPassEncoder renderPassEncoder, WGPUColor const * color) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderSetIndexBuffer)(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer buffer, WGPUIndexFormat format, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderSetLabel)(WGPURenderPassEncoder renderPassEncoder, char const * label) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderSetPipeline)(WGPURenderPassEncoder renderPassEncoder, WGPURenderPipeline pipeline) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderSetScissorRect)(WGPURenderPassEncoder renderPassEncoder, uint32_t x, uint32_t y, uint32_t width, uint32_t height) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderSetStencilReference)(WGPURenderPassEncoder renderPassEncoder, uint32_t reference) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderSetVertexBuffer)(WGPURenderPassEncoder renderPassEncoder, uint32_t slot, WGPU_NULLABLE WGPUBuffer buffer, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderSetViewport)(WGPURenderPassEncoder renderPassEncoder, float x, float y, float width, float height, float minDepth, float maxDepth) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderReference)(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPassEncoderRelease)(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of RenderPipeline +typedef WGPUBindGroupLayout (*WGPUProcRenderPipelineGetBindGroupLayout)(WGPURenderPipeline renderPipeline, uint32_t groupIndex) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPipelineSetLabel)(WGPURenderPipeline renderPipeline, char const * label) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPipelineReference)(WGPURenderPipeline renderPipeline) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcRenderPipelineRelease)(WGPURenderPipeline renderPipeline) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of Sampler +typedef void (*WGPUProcSamplerSetLabel)(WGPUSampler sampler, char const * label) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcSamplerReference)(WGPUSampler sampler) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcSamplerRelease)(WGPUSampler sampler) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of ShaderModule +typedef void (*WGPUProcShaderModuleGetCompilationInfo)(WGPUShaderModule shaderModule, WGPUCompilationInfoCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcShaderModuleSetLabel)(WGPUShaderModule shaderModule, char const * label) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcShaderModuleReference)(WGPUShaderModule shaderModule) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcShaderModuleRelease)(WGPUShaderModule shaderModule) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of Surface +typedef void (*WGPUProcSurfaceConfigure)(WGPUSurface surface, WGPUSurfaceConfiguration const * config) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcSurfaceGetCapabilities)(WGPUSurface surface, WGPUAdapter adapter, WGPUSurfaceCapabilities * capabilities) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcSurfaceGetCurrentTexture)(WGPUSurface surface, WGPUSurfaceTexture * surfaceTexture) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUTextureFormat (*WGPUProcSurfaceGetPreferredFormat)(WGPUSurface surface, WGPUAdapter adapter) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcSurfacePresent)(WGPUSurface surface) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcSurfaceUnconfigure)(WGPUSurface surface) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcSurfaceReference)(WGPUSurface surface) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcSurfaceRelease)(WGPUSurface surface) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of SurfaceCapabilities +typedef void (*WGPUProcSurfaceCapabilitiesFreeMembers)(WGPUSurfaceCapabilities capabilities) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of Texture +typedef WGPUTextureView (*WGPUProcTextureCreateView)(WGPUTexture texture, WGPU_NULLABLE WGPUTextureViewDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcTextureDestroy)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +typedef uint32_t (*WGPUProcTextureGetDepthOrArrayLayers)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUTextureDimension (*WGPUProcTextureGetDimension)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUTextureFormat (*WGPUProcTextureGetFormat)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +typedef uint32_t (*WGPUProcTextureGetHeight)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +typedef uint32_t (*WGPUProcTextureGetMipLevelCount)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +typedef uint32_t (*WGPUProcTextureGetSampleCount)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +typedef WGPUTextureUsageFlags (*WGPUProcTextureGetUsage)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +typedef uint32_t (*WGPUProcTextureGetWidth)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcTextureSetLabel)(WGPUTexture texture, char const * label) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcTextureReference)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcTextureRelease)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; + +// Procs of TextureView +typedef void (*WGPUProcTextureViewSetLabel)(WGPUTextureView textureView, char const * label) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcTextureViewReference)(WGPUTextureView textureView) WGPU_FUNCTION_ATTRIBUTE; +typedef void (*WGPUProcTextureViewRelease)(WGPUTextureView textureView) WGPU_FUNCTION_ATTRIBUTE; + +#endif // !defined(WGPU_SKIP_PROCS) + +#if !defined(WGPU_SKIP_DECLARATIONS) + +WGPU_EXPORT WGPUInstance wgpuCreateInstance(WGPU_NULLABLE WGPUInstanceDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUProc wgpuGetProcAddress(WGPUDevice device, char const * procName) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of Adapter +WGPU_EXPORT size_t wgpuAdapterEnumerateFeatures(WGPUAdapter adapter, WGPUFeatureName * features) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUBool wgpuAdapterGetLimits(WGPUAdapter adapter, WGPUSupportedLimits * limits) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuAdapterGetProperties(WGPUAdapter adapter, WGPUAdapterProperties * properties) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUBool wgpuAdapterHasFeature(WGPUAdapter adapter, WGPUFeatureName feature) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuAdapterRequestDevice(WGPUAdapter adapter, WGPU_NULLABLE WGPUDeviceDescriptor const * descriptor, WGPURequestDeviceCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuAdapterReference(WGPUAdapter adapter) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuAdapterRelease(WGPUAdapter adapter) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of BindGroup +WGPU_EXPORT void wgpuBindGroupSetLabel(WGPUBindGroup bindGroup, char const * label) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuBindGroupReference(WGPUBindGroup bindGroup) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuBindGroupRelease(WGPUBindGroup bindGroup) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of BindGroupLayout +WGPU_EXPORT void wgpuBindGroupLayoutSetLabel(WGPUBindGroupLayout bindGroupLayout, char const * label) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuBindGroupLayoutReference(WGPUBindGroupLayout bindGroupLayout) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuBindGroupLayoutRelease(WGPUBindGroupLayout bindGroupLayout) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of Buffer +WGPU_EXPORT void wgpuBufferDestroy(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void const * wgpuBufferGetConstMappedRange(WGPUBuffer buffer, size_t offset, size_t size) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUBufferMapState wgpuBufferGetMapState(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void * wgpuBufferGetMappedRange(WGPUBuffer buffer, size_t offset, size_t size) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT uint64_t wgpuBufferGetSize(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUBufferUsageFlags wgpuBufferGetUsage(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuBufferMapAsync(WGPUBuffer buffer, WGPUMapModeFlags mode, size_t offset, size_t size, WGPUBufferMapCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuBufferSetLabel(WGPUBuffer buffer, char const * label) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuBufferUnmap(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuBufferReference(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuBufferRelease(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of CommandBuffer +WGPU_EXPORT void wgpuCommandBufferSetLabel(WGPUCommandBuffer commandBuffer, char const * label) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuCommandBufferReference(WGPUCommandBuffer commandBuffer) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuCommandBufferRelease(WGPUCommandBuffer commandBuffer) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of CommandEncoder +WGPU_EXPORT WGPUComputePassEncoder wgpuCommandEncoderBeginComputePass(WGPUCommandEncoder commandEncoder, WGPU_NULLABLE WGPUComputePassDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPURenderPassEncoder wgpuCommandEncoderBeginRenderPass(WGPUCommandEncoder commandEncoder, WGPURenderPassDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuCommandEncoderClearBuffer(WGPUCommandEncoder commandEncoder, WGPUBuffer buffer, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuCommandEncoderCopyBufferToBuffer(WGPUCommandEncoder commandEncoder, WGPUBuffer source, uint64_t sourceOffset, WGPUBuffer destination, uint64_t destinationOffset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuCommandEncoderCopyBufferToTexture(WGPUCommandEncoder commandEncoder, WGPUImageCopyBuffer const * source, WGPUImageCopyTexture const * destination, WGPUExtent3D const * copySize) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuCommandEncoderCopyTextureToBuffer(WGPUCommandEncoder commandEncoder, WGPUImageCopyTexture const * source, WGPUImageCopyBuffer const * destination, WGPUExtent3D const * copySize) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuCommandEncoderCopyTextureToTexture(WGPUCommandEncoder commandEncoder, WGPUImageCopyTexture const * source, WGPUImageCopyTexture const * destination, WGPUExtent3D const * copySize) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUCommandBuffer wgpuCommandEncoderFinish(WGPUCommandEncoder commandEncoder, WGPU_NULLABLE WGPUCommandBufferDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuCommandEncoderInsertDebugMarker(WGPUCommandEncoder commandEncoder, char const * markerLabel) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuCommandEncoderPopDebugGroup(WGPUCommandEncoder commandEncoder) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuCommandEncoderPushDebugGroup(WGPUCommandEncoder commandEncoder, char const * groupLabel) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuCommandEncoderResolveQuerySet(WGPUCommandEncoder commandEncoder, WGPUQuerySet querySet, uint32_t firstQuery, uint32_t queryCount, WGPUBuffer destination, uint64_t destinationOffset) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuCommandEncoderSetLabel(WGPUCommandEncoder commandEncoder, char const * label) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuCommandEncoderWriteTimestamp(WGPUCommandEncoder commandEncoder, WGPUQuerySet querySet, uint32_t queryIndex) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuCommandEncoderReference(WGPUCommandEncoder commandEncoder) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuCommandEncoderRelease(WGPUCommandEncoder commandEncoder) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of ComputePassEncoder +WGPU_EXPORT void wgpuComputePassEncoderDispatchWorkgroups(WGPUComputePassEncoder computePassEncoder, uint32_t workgroupCountX, uint32_t workgroupCountY, uint32_t workgroupCountZ) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuComputePassEncoderDispatchWorkgroupsIndirect(WGPUComputePassEncoder computePassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuComputePassEncoderEnd(WGPUComputePassEncoder computePassEncoder) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuComputePassEncoderInsertDebugMarker(WGPUComputePassEncoder computePassEncoder, char const * markerLabel) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuComputePassEncoderPopDebugGroup(WGPUComputePassEncoder computePassEncoder) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuComputePassEncoderPushDebugGroup(WGPUComputePassEncoder computePassEncoder, char const * groupLabel) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuComputePassEncoderSetBindGroup(WGPUComputePassEncoder computePassEncoder, uint32_t groupIndex, WGPU_NULLABLE WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuComputePassEncoderSetLabel(WGPUComputePassEncoder computePassEncoder, char const * label) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuComputePassEncoderSetPipeline(WGPUComputePassEncoder computePassEncoder, WGPUComputePipeline pipeline) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuComputePassEncoderReference(WGPUComputePassEncoder computePassEncoder) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuComputePassEncoderRelease(WGPUComputePassEncoder computePassEncoder) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of ComputePipeline +WGPU_EXPORT WGPUBindGroupLayout wgpuComputePipelineGetBindGroupLayout(WGPUComputePipeline computePipeline, uint32_t groupIndex) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuComputePipelineSetLabel(WGPUComputePipeline computePipeline, char const * label) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuComputePipelineReference(WGPUComputePipeline computePipeline) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuComputePipelineRelease(WGPUComputePipeline computePipeline) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of Device +WGPU_EXPORT WGPUBindGroup wgpuDeviceCreateBindGroup(WGPUDevice device, WGPUBindGroupDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUBindGroupLayout wgpuDeviceCreateBindGroupLayout(WGPUDevice device, WGPUBindGroupLayoutDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUBuffer wgpuDeviceCreateBuffer(WGPUDevice device, WGPUBufferDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUCommandEncoder wgpuDeviceCreateCommandEncoder(WGPUDevice device, WGPU_NULLABLE WGPUCommandEncoderDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUComputePipeline wgpuDeviceCreateComputePipeline(WGPUDevice device, WGPUComputePipelineDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuDeviceCreateComputePipelineAsync(WGPUDevice device, WGPUComputePipelineDescriptor const * descriptor, WGPUCreateComputePipelineAsyncCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUPipelineLayout wgpuDeviceCreatePipelineLayout(WGPUDevice device, WGPUPipelineLayoutDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUQuerySet wgpuDeviceCreateQuerySet(WGPUDevice device, WGPUQuerySetDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPURenderBundleEncoder wgpuDeviceCreateRenderBundleEncoder(WGPUDevice device, WGPURenderBundleEncoderDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPURenderPipeline wgpuDeviceCreateRenderPipeline(WGPUDevice device, WGPURenderPipelineDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuDeviceCreateRenderPipelineAsync(WGPUDevice device, WGPURenderPipelineDescriptor const * descriptor, WGPUCreateRenderPipelineAsyncCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUSampler wgpuDeviceCreateSampler(WGPUDevice device, WGPU_NULLABLE WGPUSamplerDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUShaderModule wgpuDeviceCreateShaderModule(WGPUDevice device, WGPUShaderModuleDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUTexture wgpuDeviceCreateTexture(WGPUDevice device, WGPUTextureDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuDeviceDestroy(WGPUDevice device) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT size_t wgpuDeviceEnumerateFeatures(WGPUDevice device, WGPUFeatureName * features) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUBool wgpuDeviceGetLimits(WGPUDevice device, WGPUSupportedLimits * limits) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUQueue wgpuDeviceGetQueue(WGPUDevice device) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUBool wgpuDeviceHasFeature(WGPUDevice device, WGPUFeatureName feature) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuDevicePopErrorScope(WGPUDevice device, WGPUErrorCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuDevicePushErrorScope(WGPUDevice device, WGPUErrorFilter filter) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuDeviceSetLabel(WGPUDevice device, char const * label) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuDeviceSetUncapturedErrorCallback(WGPUDevice device, WGPUErrorCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuDeviceReference(WGPUDevice device) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuDeviceRelease(WGPUDevice device) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of Instance +WGPU_EXPORT WGPUSurface wgpuInstanceCreateSurface(WGPUInstance instance, WGPUSurfaceDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuInstanceProcessEvents(WGPUInstance instance) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuInstanceRequestAdapter(WGPUInstance instance, WGPU_NULLABLE WGPURequestAdapterOptions const * options, WGPURequestAdapterCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuInstanceReference(WGPUInstance instance) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuInstanceRelease(WGPUInstance instance) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of PipelineLayout +WGPU_EXPORT void wgpuPipelineLayoutSetLabel(WGPUPipelineLayout pipelineLayout, char const * label) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuPipelineLayoutReference(WGPUPipelineLayout pipelineLayout) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuPipelineLayoutRelease(WGPUPipelineLayout pipelineLayout) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of QuerySet +WGPU_EXPORT void wgpuQuerySetDestroy(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT uint32_t wgpuQuerySetGetCount(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUQueryType wgpuQuerySetGetType(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuQuerySetSetLabel(WGPUQuerySet querySet, char const * label) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuQuerySetReference(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuQuerySetRelease(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of Queue +WGPU_EXPORT void wgpuQueueOnSubmittedWorkDone(WGPUQueue queue, WGPUQueueWorkDoneCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuQueueSetLabel(WGPUQueue queue, char const * label) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuQueueSubmit(WGPUQueue queue, size_t commandCount, WGPUCommandBuffer const * commands) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuQueueWriteBuffer(WGPUQueue queue, WGPUBuffer buffer, uint64_t bufferOffset, void const * data, size_t size) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuQueueWriteTexture(WGPUQueue queue, WGPUImageCopyTexture const * destination, void const * data, size_t dataSize, WGPUTextureDataLayout const * dataLayout, WGPUExtent3D const * writeSize) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuQueueReference(WGPUQueue queue) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuQueueRelease(WGPUQueue queue) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of RenderBundle +WGPU_EXPORT void wgpuRenderBundleSetLabel(WGPURenderBundle renderBundle, char const * label) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderBundleReference(WGPURenderBundle renderBundle) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderBundleRelease(WGPURenderBundle renderBundle) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of RenderBundleEncoder +WGPU_EXPORT void wgpuRenderBundleEncoderDraw(WGPURenderBundleEncoder renderBundleEncoder, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderBundleEncoderDrawIndexed(WGPURenderBundleEncoder renderBundleEncoder, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderBundleEncoderDrawIndexedIndirect(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderBundleEncoderDrawIndirect(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPURenderBundle wgpuRenderBundleEncoderFinish(WGPURenderBundleEncoder renderBundleEncoder, WGPU_NULLABLE WGPURenderBundleDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderBundleEncoderInsertDebugMarker(WGPURenderBundleEncoder renderBundleEncoder, char const * markerLabel) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderBundleEncoderPopDebugGroup(WGPURenderBundleEncoder renderBundleEncoder) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderBundleEncoderPushDebugGroup(WGPURenderBundleEncoder renderBundleEncoder, char const * groupLabel) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderBundleEncoderSetBindGroup(WGPURenderBundleEncoder renderBundleEncoder, uint32_t groupIndex, WGPU_NULLABLE WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderBundleEncoderSetIndexBuffer(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer buffer, WGPUIndexFormat format, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderBundleEncoderSetLabel(WGPURenderBundleEncoder renderBundleEncoder, char const * label) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderBundleEncoderSetPipeline(WGPURenderBundleEncoder renderBundleEncoder, WGPURenderPipeline pipeline) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderBundleEncoderSetVertexBuffer(WGPURenderBundleEncoder renderBundleEncoder, uint32_t slot, WGPU_NULLABLE WGPUBuffer buffer, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderBundleEncoderReference(WGPURenderBundleEncoder renderBundleEncoder) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderBundleEncoderRelease(WGPURenderBundleEncoder renderBundleEncoder) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of RenderPassEncoder +WGPU_EXPORT void wgpuRenderPassEncoderBeginOcclusionQuery(WGPURenderPassEncoder renderPassEncoder, uint32_t queryIndex) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderDraw(WGPURenderPassEncoder renderPassEncoder, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderDrawIndexed(WGPURenderPassEncoder renderPassEncoder, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderDrawIndexedIndirect(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderDrawIndirect(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderEnd(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderEndOcclusionQuery(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderExecuteBundles(WGPURenderPassEncoder renderPassEncoder, size_t bundleCount, WGPURenderBundle const * bundles) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderInsertDebugMarker(WGPURenderPassEncoder renderPassEncoder, char const * markerLabel) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderPopDebugGroup(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderPushDebugGroup(WGPURenderPassEncoder renderPassEncoder, char const * groupLabel) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderSetBindGroup(WGPURenderPassEncoder renderPassEncoder, uint32_t groupIndex, WGPU_NULLABLE WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderSetBlendConstant(WGPURenderPassEncoder renderPassEncoder, WGPUColor const * color) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderSetIndexBuffer(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer buffer, WGPUIndexFormat format, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderSetLabel(WGPURenderPassEncoder renderPassEncoder, char const * label) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderSetPipeline(WGPURenderPassEncoder renderPassEncoder, WGPURenderPipeline pipeline) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderSetScissorRect(WGPURenderPassEncoder renderPassEncoder, uint32_t x, uint32_t y, uint32_t width, uint32_t height) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderSetStencilReference(WGPURenderPassEncoder renderPassEncoder, uint32_t reference) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderSetVertexBuffer(WGPURenderPassEncoder renderPassEncoder, uint32_t slot, WGPU_NULLABLE WGPUBuffer buffer, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderSetViewport(WGPURenderPassEncoder renderPassEncoder, float x, float y, float width, float height, float minDepth, float maxDepth) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderReference(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPassEncoderRelease(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of RenderPipeline +WGPU_EXPORT WGPUBindGroupLayout wgpuRenderPipelineGetBindGroupLayout(WGPURenderPipeline renderPipeline, uint32_t groupIndex) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPipelineSetLabel(WGPURenderPipeline renderPipeline, char const * label) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPipelineReference(WGPURenderPipeline renderPipeline) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuRenderPipelineRelease(WGPURenderPipeline renderPipeline) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of Sampler +WGPU_EXPORT void wgpuSamplerSetLabel(WGPUSampler sampler, char const * label) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuSamplerReference(WGPUSampler sampler) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuSamplerRelease(WGPUSampler sampler) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of ShaderModule +WGPU_EXPORT void wgpuShaderModuleGetCompilationInfo(WGPUShaderModule shaderModule, WGPUCompilationInfoCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuShaderModuleSetLabel(WGPUShaderModule shaderModule, char const * label) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuShaderModuleReference(WGPUShaderModule shaderModule) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuShaderModuleRelease(WGPUShaderModule shaderModule) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of Surface +WGPU_EXPORT void wgpuSurfaceConfigure(WGPUSurface surface, WGPUSurfaceConfiguration const * config) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuSurfaceGetCapabilities(WGPUSurface surface, WGPUAdapter adapter, WGPUSurfaceCapabilities * capabilities) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuSurfaceGetCurrentTexture(WGPUSurface surface, WGPUSurfaceTexture * surfaceTexture) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUTextureFormat wgpuSurfaceGetPreferredFormat(WGPUSurface surface, WGPUAdapter adapter) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuSurfacePresent(WGPUSurface surface) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuSurfaceUnconfigure(WGPUSurface surface) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuSurfaceReference(WGPUSurface surface) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuSurfaceRelease(WGPUSurface surface) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of SurfaceCapabilities +WGPU_EXPORT void wgpuSurfaceCapabilitiesFreeMembers(WGPUSurfaceCapabilities capabilities) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of Texture +WGPU_EXPORT WGPUTextureView wgpuTextureCreateView(WGPUTexture texture, WGPU_NULLABLE WGPUTextureViewDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuTextureDestroy(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT uint32_t wgpuTextureGetDepthOrArrayLayers(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUTextureDimension wgpuTextureGetDimension(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUTextureFormat wgpuTextureGetFormat(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT uint32_t wgpuTextureGetHeight(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT uint32_t wgpuTextureGetMipLevelCount(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT uint32_t wgpuTextureGetSampleCount(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT WGPUTextureUsageFlags wgpuTextureGetUsage(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT uint32_t wgpuTextureGetWidth(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuTextureSetLabel(WGPUTexture texture, char const * label) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuTextureReference(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuTextureRelease(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; + +// Methods of TextureView +WGPU_EXPORT void wgpuTextureViewSetLabel(WGPUTextureView textureView, char const * label) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuTextureViewReference(WGPUTextureView textureView) WGPU_FUNCTION_ATTRIBUTE; +WGPU_EXPORT void wgpuTextureViewRelease(WGPUTextureView textureView) WGPU_FUNCTION_ATTRIBUTE; + +#endif // !defined(WGPU_SKIP_DECLARATIONS) + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // WEBGPU_H_ diff --git a/wgpu/resources/webgpu.idl b/wgpu/resources/webgpu.idl new file mode 100644 index 0000000..448e21f --- /dev/null +++ b/wgpu/resources/webgpu.idl @@ -0,0 +1,1314 @@ +// Copyright (C) [2023] World Wide Web Consortium, +// (Massachusetts Institute of Technology, European Research Consortium for +// Informatics and Mathematics, Keio University, Beihang). +// All Rights Reserved. +// +// This work is distributed under the W3C (R) Software License [1] in the hope +// that it will be useful, but WITHOUT ANY WARRANTY; without even the implied +// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// +// [1] http://www.w3.org/Consortium/Legal/copyright-software + +// **** This file is auto-generated. Do not edit. **** + +interface mixin GPUObjectBase { + attribute USVString label; +}; + +dictionary GPUObjectDescriptorBase { + USVString label = ""; +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUSupportedLimits { + readonly attribute unsigned long maxTextureDimension1D; + readonly attribute unsigned long maxTextureDimension2D; + readonly attribute unsigned long maxTextureDimension3D; + readonly attribute unsigned long maxTextureArrayLayers; + readonly attribute unsigned long maxBindGroups; + readonly attribute unsigned long maxBindGroupsPlusVertexBuffers; + readonly attribute unsigned long maxBindingsPerBindGroup; + readonly attribute unsigned long maxDynamicUniformBuffersPerPipelineLayout; + readonly attribute unsigned long maxDynamicStorageBuffersPerPipelineLayout; + readonly attribute unsigned long maxSampledTexturesPerShaderStage; + readonly attribute unsigned long maxSamplersPerShaderStage; + readonly attribute unsigned long maxStorageBuffersPerShaderStage; + readonly attribute unsigned long maxStorageTexturesPerShaderStage; + readonly attribute unsigned long maxUniformBuffersPerShaderStage; + readonly attribute unsigned long long maxUniformBufferBindingSize; + readonly attribute unsigned long long maxStorageBufferBindingSize; + readonly attribute unsigned long minUniformBufferOffsetAlignment; + readonly attribute unsigned long minStorageBufferOffsetAlignment; + readonly attribute unsigned long maxVertexBuffers; + readonly attribute unsigned long long maxBufferSize; + readonly attribute unsigned long maxVertexAttributes; + readonly attribute unsigned long maxVertexBufferArrayStride; + readonly attribute unsigned long maxInterStageShaderComponents; + readonly attribute unsigned long maxInterStageShaderVariables; + readonly attribute unsigned long maxColorAttachments; + readonly attribute unsigned long maxColorAttachmentBytesPerSample; + readonly attribute unsigned long maxComputeWorkgroupStorageSize; + readonly attribute unsigned long maxComputeInvocationsPerWorkgroup; + readonly attribute unsigned long maxComputeWorkgroupSizeX; + readonly attribute unsigned long maxComputeWorkgroupSizeY; + readonly attribute unsigned long maxComputeWorkgroupSizeZ; + readonly attribute unsigned long maxComputeWorkgroupsPerDimension; +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUSupportedFeatures { + readonly setlike; +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface WGSLLanguageFeatures { + readonly setlike; +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUAdapterInfo { + readonly attribute DOMString vendor; + readonly attribute DOMString architecture; + readonly attribute DOMString device; + readonly attribute DOMString description; +}; + +interface mixin NavigatorGPU { + [SameObject, SecureContext] readonly attribute GPU gpu; +}; +Navigator includes NavigatorGPU; +WorkerNavigator includes NavigatorGPU; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPU { + Promise requestAdapter(optional GPURequestAdapterOptions options = {}); + GPUTextureFormat getPreferredCanvasFormat(); + [SameObject] readonly attribute WGSLLanguageFeatures wgslLanguageFeatures; +}; + +dictionary GPURequestAdapterOptions { + GPUPowerPreference powerPreference; + boolean forceFallbackAdapter = false; +}; + +enum GPUPowerPreference { + "low-power", + "high-performance", +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUAdapter { + [SameObject] readonly attribute GPUSupportedFeatures features; + [SameObject] readonly attribute GPUSupportedLimits limits; + readonly attribute boolean isFallbackAdapter; + + Promise requestDevice(optional GPUDeviceDescriptor descriptor = {}); + Promise requestAdapterInfo(); +}; + +dictionary GPUDeviceDescriptor + : GPUObjectDescriptorBase { + sequence requiredFeatures = []; + record requiredLimits = {}; + GPUQueueDescriptor defaultQueue = {}; +}; + +enum GPUFeatureName { + "depth-clip-control", + "depth32float-stencil8", + "texture-compression-bc", + "texture-compression-etc2", + "texture-compression-astc", + "timestamp-query", + "indirect-first-instance", + "shader-f16", + "rg11b10ufloat-renderable", + "bgra8unorm-storage", + "float32-filterable", +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUDevice : EventTarget { + [SameObject] readonly attribute GPUSupportedFeatures features; + [SameObject] readonly attribute GPUSupportedLimits limits; + + [SameObject] readonly attribute GPUQueue queue; + + undefined destroy(); + + GPUBuffer createBuffer(GPUBufferDescriptor descriptor); + GPUTexture createTexture(GPUTextureDescriptor descriptor); + GPUSampler createSampler(optional GPUSamplerDescriptor descriptor = {}); + GPUExternalTexture importExternalTexture(GPUExternalTextureDescriptor descriptor); + + GPUBindGroupLayout createBindGroupLayout(GPUBindGroupLayoutDescriptor descriptor); + GPUPipelineLayout createPipelineLayout(GPUPipelineLayoutDescriptor descriptor); + GPUBindGroup createBindGroup(GPUBindGroupDescriptor descriptor); + + GPUShaderModule createShaderModule(GPUShaderModuleDescriptor descriptor); + GPUComputePipeline createComputePipeline(GPUComputePipelineDescriptor descriptor); + GPURenderPipeline createRenderPipeline(GPURenderPipelineDescriptor descriptor); + Promise createComputePipelineAsync(GPUComputePipelineDescriptor descriptor); + Promise createRenderPipelineAsync(GPURenderPipelineDescriptor descriptor); + + GPUCommandEncoder createCommandEncoder(optional GPUCommandEncoderDescriptor descriptor = {}); + GPURenderBundleEncoder createRenderBundleEncoder(GPURenderBundleEncoderDescriptor descriptor); + + GPUQuerySet createQuerySet(GPUQuerySetDescriptor descriptor); +}; +GPUDevice includes GPUObjectBase; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUBuffer { + readonly attribute GPUSize64Out size; + readonly attribute GPUFlagsConstant usage; + + readonly attribute GPUBufferMapState mapState; + + Promise mapAsync(GPUMapModeFlags mode, optional GPUSize64 offset = 0, optional GPUSize64 size); + ArrayBuffer getMappedRange(optional GPUSize64 offset = 0, optional GPUSize64 size); + undefined unmap(); + + undefined destroy(); +}; +GPUBuffer includes GPUObjectBase; + +enum GPUBufferMapState { + "unmapped", + "pending", + "mapped", +}; + +dictionary GPUBufferDescriptor + : GPUObjectDescriptorBase { + required GPUSize64 size; + required GPUBufferUsageFlags usage; + boolean mappedAtCreation = false; +}; + +typedef [EnforceRange] unsigned long GPUBufferUsageFlags; +[Exposed=(Window, DedicatedWorker), SecureContext] +namespace GPUBufferUsage { + const GPUFlagsConstant MAP_READ = 0x0001; + const GPUFlagsConstant MAP_WRITE = 0x0002; + const GPUFlagsConstant COPY_SRC = 0x0004; + const GPUFlagsConstant COPY_DST = 0x0008; + const GPUFlagsConstant INDEX = 0x0010; + const GPUFlagsConstant VERTEX = 0x0020; + const GPUFlagsConstant UNIFORM = 0x0040; + const GPUFlagsConstant STORAGE = 0x0080; + const GPUFlagsConstant INDIRECT = 0x0100; + const GPUFlagsConstant QUERY_RESOLVE = 0x0200; +}; + +typedef [EnforceRange] unsigned long GPUMapModeFlags; +[Exposed=(Window, DedicatedWorker), SecureContext] +namespace GPUMapMode { + const GPUFlagsConstant READ = 0x0001; + const GPUFlagsConstant WRITE = 0x0002; +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUTexture { + GPUTextureView createView(optional GPUTextureViewDescriptor descriptor = {}); + + undefined destroy(); + + readonly attribute GPUIntegerCoordinateOut width; + readonly attribute GPUIntegerCoordinateOut height; + readonly attribute GPUIntegerCoordinateOut depthOrArrayLayers; + readonly attribute GPUIntegerCoordinateOut mipLevelCount; + readonly attribute GPUSize32Out sampleCount; + readonly attribute GPUTextureDimension dimension; + readonly attribute GPUTextureFormat format; + readonly attribute GPUFlagsConstant usage; +}; +GPUTexture includes GPUObjectBase; + +dictionary GPUTextureDescriptor + : GPUObjectDescriptorBase { + required GPUExtent3D size; + GPUIntegerCoordinate mipLevelCount = 1; + GPUSize32 sampleCount = 1; + GPUTextureDimension dimension = "2d"; + required GPUTextureFormat format; + required GPUTextureUsageFlags usage; + sequence viewFormats = []; +}; + +enum GPUTextureDimension { + "1d", + "2d", + "3d", +}; + +typedef [EnforceRange] unsigned long GPUTextureUsageFlags; +[Exposed=(Window, DedicatedWorker), SecureContext] +namespace GPUTextureUsage { + const GPUFlagsConstant COPY_SRC = 0x01; + const GPUFlagsConstant COPY_DST = 0x02; + const GPUFlagsConstant TEXTURE_BINDING = 0x04; + const GPUFlagsConstant STORAGE_BINDING = 0x08; + const GPUFlagsConstant RENDER_ATTACHMENT = 0x10; +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUTextureView { +}; +GPUTextureView includes GPUObjectBase; + +dictionary GPUTextureViewDescriptor + : GPUObjectDescriptorBase { + GPUTextureFormat format; + GPUTextureViewDimension dimension; + GPUTextureAspect aspect = "all"; + GPUIntegerCoordinate baseMipLevel = 0; + GPUIntegerCoordinate mipLevelCount; + GPUIntegerCoordinate baseArrayLayer = 0; + GPUIntegerCoordinate arrayLayerCount; +}; + +enum GPUTextureViewDimension { + "1d", + "2d", + "2d-array", + "cube", + "cube-array", + "3d", +}; + +enum GPUTextureAspect { + "all", + "stencil-only", + "depth-only", +}; + +enum GPUTextureFormat { + // 8-bit formats + "r8unorm", + "r8snorm", + "r8uint", + "r8sint", + + // 16-bit formats + "r16uint", + "r16sint", + "r16float", + "rg8unorm", + "rg8snorm", + "rg8uint", + "rg8sint", + + // 32-bit formats + "r32uint", + "r32sint", + "r32float", + "rg16uint", + "rg16sint", + "rg16float", + "rgba8unorm", + "rgba8unorm-srgb", + "rgba8snorm", + "rgba8uint", + "rgba8sint", + "bgra8unorm", + "bgra8unorm-srgb", + // Packed 32-bit formats + "rgb9e5ufloat", + "rgb10a2uint", + "rgb10a2unorm", + "rg11b10ufloat", + + // 64-bit formats + "rg32uint", + "rg32sint", + "rg32float", + "rgba16uint", + "rgba16sint", + "rgba16float", + + // 128-bit formats + "rgba32uint", + "rgba32sint", + "rgba32float", + + // Depth/stencil formats + "stencil8", + "depth16unorm", + "depth24plus", + "depth24plus-stencil8", + "depth32float", + + // "depth32float-stencil8" feature + "depth32float-stencil8", + + // BC compressed formats usable if "texture-compression-bc" is both + // supported by the device/user agent and enabled in requestDevice. + "bc1-rgba-unorm", + "bc1-rgba-unorm-srgb", + "bc2-rgba-unorm", + "bc2-rgba-unorm-srgb", + "bc3-rgba-unorm", + "bc3-rgba-unorm-srgb", + "bc4-r-unorm", + "bc4-r-snorm", + "bc5-rg-unorm", + "bc5-rg-snorm", + "bc6h-rgb-ufloat", + "bc6h-rgb-float", + "bc7-rgba-unorm", + "bc7-rgba-unorm-srgb", + + // ETC2 compressed formats usable if "texture-compression-etc2" is both + // supported by the device/user agent and enabled in requestDevice. + "etc2-rgb8unorm", + "etc2-rgb8unorm-srgb", + "etc2-rgb8a1unorm", + "etc2-rgb8a1unorm-srgb", + "etc2-rgba8unorm", + "etc2-rgba8unorm-srgb", + "eac-r11unorm", + "eac-r11snorm", + "eac-rg11unorm", + "eac-rg11snorm", + + // ASTC compressed formats usable if "texture-compression-astc" is both + // supported by the device/user agent and enabled in requestDevice. + "astc-4x4-unorm", + "astc-4x4-unorm-srgb", + "astc-5x4-unorm", + "astc-5x4-unorm-srgb", + "astc-5x5-unorm", + "astc-5x5-unorm-srgb", + "astc-6x5-unorm", + "astc-6x5-unorm-srgb", + "astc-6x6-unorm", + "astc-6x6-unorm-srgb", + "astc-8x5-unorm", + "astc-8x5-unorm-srgb", + "astc-8x6-unorm", + "astc-8x6-unorm-srgb", + "astc-8x8-unorm", + "astc-8x8-unorm-srgb", + "astc-10x5-unorm", + "astc-10x5-unorm-srgb", + "astc-10x6-unorm", + "astc-10x6-unorm-srgb", + "astc-10x8-unorm", + "astc-10x8-unorm-srgb", + "astc-10x10-unorm", + "astc-10x10-unorm-srgb", + "astc-12x10-unorm", + "astc-12x10-unorm-srgb", + "astc-12x12-unorm", + "astc-12x12-unorm-srgb", +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUExternalTexture { +}; +GPUExternalTexture includes GPUObjectBase; + +dictionary GPUExternalTextureDescriptor + : GPUObjectDescriptorBase { + required (HTMLVideoElement or VideoFrame) source; + PredefinedColorSpace colorSpace = "srgb"; +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUSampler { +}; +GPUSampler includes GPUObjectBase; + +dictionary GPUSamplerDescriptor + : GPUObjectDescriptorBase { + GPUAddressMode addressModeU = "clamp-to-edge"; + GPUAddressMode addressModeV = "clamp-to-edge"; + GPUAddressMode addressModeW = "clamp-to-edge"; + GPUFilterMode magFilter = "nearest"; + GPUFilterMode minFilter = "nearest"; + GPUMipmapFilterMode mipmapFilter = "nearest"; + float lodMinClamp = 0; + float lodMaxClamp = 32; + GPUCompareFunction compare; + [Clamp] unsigned short maxAnisotropy = 1; +}; + +enum GPUAddressMode { + "clamp-to-edge", + "repeat", + "mirror-repeat", +}; + +enum GPUFilterMode { + "nearest", + "linear", +}; + +enum GPUMipmapFilterMode { + "nearest", + "linear", +}; + +enum GPUCompareFunction { + "never", + "less", + "equal", + "less-equal", + "greater", + "not-equal", + "greater-equal", + "always", +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUBindGroupLayout { +}; +GPUBindGroupLayout includes GPUObjectBase; + +dictionary GPUBindGroupLayoutDescriptor + : GPUObjectDescriptorBase { + required sequence entries; +}; + +dictionary GPUBindGroupLayoutEntry { + required GPUIndex32 binding; + required GPUShaderStageFlags visibility; + + GPUBufferBindingLayout buffer; + GPUSamplerBindingLayout sampler; + GPUTextureBindingLayout texture; + GPUStorageTextureBindingLayout storageTexture; + GPUExternalTextureBindingLayout externalTexture; +}; + +typedef [EnforceRange] unsigned long GPUShaderStageFlags; +[Exposed=(Window, DedicatedWorker), SecureContext] +namespace GPUShaderStage { + const GPUFlagsConstant VERTEX = 0x1; + const GPUFlagsConstant FRAGMENT = 0x2; + const GPUFlagsConstant COMPUTE = 0x4; +}; + +enum GPUBufferBindingType { + "uniform", + "storage", + "read-only-storage", +}; + +dictionary GPUBufferBindingLayout { + GPUBufferBindingType type = "uniform"; + boolean hasDynamicOffset = false; + GPUSize64 minBindingSize = 0; +}; + +enum GPUSamplerBindingType { + "filtering", + "non-filtering", + "comparison", +}; + +dictionary GPUSamplerBindingLayout { + GPUSamplerBindingType type = "filtering"; +}; + +enum GPUTextureSampleType { + "float", + "unfilterable-float", + "depth", + "sint", + "uint", +}; + +dictionary GPUTextureBindingLayout { + GPUTextureSampleType sampleType = "float"; + GPUTextureViewDimension viewDimension = "2d"; + boolean multisampled = false; +}; + +enum GPUStorageTextureAccess { + "write-only", + "read-only", + "read-write", +}; + +dictionary GPUStorageTextureBindingLayout { + GPUStorageTextureAccess access = "write-only"; + required GPUTextureFormat format; + GPUTextureViewDimension viewDimension = "2d"; +}; + +dictionary GPUExternalTextureBindingLayout { +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUBindGroup { +}; +GPUBindGroup includes GPUObjectBase; + +dictionary GPUBindGroupDescriptor + : GPUObjectDescriptorBase { + required GPUBindGroupLayout layout; + required sequence entries; +}; + +typedef (GPUSampler or GPUTextureView or GPUBufferBinding or GPUExternalTexture) GPUBindingResource; + +dictionary GPUBindGroupEntry { + required GPUIndex32 binding; + required GPUBindingResource resource; +}; + +dictionary GPUBufferBinding { + required GPUBuffer buffer; + GPUSize64 offset = 0; + GPUSize64 size; +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUPipelineLayout { +}; +GPUPipelineLayout includes GPUObjectBase; + +dictionary GPUPipelineLayoutDescriptor + : GPUObjectDescriptorBase { + required sequence bindGroupLayouts; +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUShaderModule { + Promise getCompilationInfo(); +}; +GPUShaderModule includes GPUObjectBase; + +dictionary GPUShaderModuleDescriptor + : GPUObjectDescriptorBase { + required USVString code; + object sourceMap; + sequence compilationHints = []; +}; + +dictionary GPUShaderModuleCompilationHint { + required USVString entryPoint; + (GPUPipelineLayout or GPUAutoLayoutMode) layout; +}; + +enum GPUCompilationMessageType { + "error", + "warning", + "info", +}; + +[Exposed=(Window, DedicatedWorker), Serializable, SecureContext] +interface GPUCompilationMessage { + readonly attribute DOMString message; + readonly attribute GPUCompilationMessageType type; + readonly attribute unsigned long long lineNum; + readonly attribute unsigned long long linePos; + readonly attribute unsigned long long offset; + readonly attribute unsigned long long length; +}; + +[Exposed=(Window, DedicatedWorker), Serializable, SecureContext] +interface GPUCompilationInfo { + readonly attribute FrozenArray messages; +}; + +[Exposed=(Window, DedicatedWorker), SecureContext, Serializable] +interface GPUPipelineError : DOMException { + constructor(optional DOMString message = "", GPUPipelineErrorInit options); + readonly attribute GPUPipelineErrorReason reason; +}; + +dictionary GPUPipelineErrorInit { + required GPUPipelineErrorReason reason; +}; + +enum GPUPipelineErrorReason { + "validation", + "internal", +}; + +enum GPUAutoLayoutMode { + "auto", +}; + +dictionary GPUPipelineDescriptorBase + : GPUObjectDescriptorBase { + required (GPUPipelineLayout or GPUAutoLayoutMode) layout; +}; + +interface mixin GPUPipelineBase { + [NewObject] GPUBindGroupLayout getBindGroupLayout(unsigned long index); +}; + +dictionary GPUProgrammableStage { + required GPUShaderModule module; + required USVString entryPoint; + record constants; +}; + +typedef double GPUPipelineConstantValue; // May represent WGSL’s bool, f32, i32, u32, and f16 if enabled. + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUComputePipeline { +}; +GPUComputePipeline includes GPUObjectBase; +GPUComputePipeline includes GPUPipelineBase; + +dictionary GPUComputePipelineDescriptor + : GPUPipelineDescriptorBase { + required GPUProgrammableStage compute; +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPURenderPipeline { +}; +GPURenderPipeline includes GPUObjectBase; +GPURenderPipeline includes GPUPipelineBase; + +dictionary GPURenderPipelineDescriptor + : GPUPipelineDescriptorBase { + required GPUVertexState vertex; + GPUPrimitiveState primitive = {}; + GPUDepthStencilState depthStencil; + GPUMultisampleState multisample = {}; + GPUFragmentState fragment; +}; + +dictionary GPUPrimitiveState { + GPUPrimitiveTopology topology = "triangle-list"; + GPUIndexFormat stripIndexFormat; + GPUFrontFace frontFace = "ccw"; + GPUCullMode cullMode = "none"; + + // Requires "depth-clip-control" feature. + boolean unclippedDepth = false; +}; + +enum GPUPrimitiveTopology { + "point-list", + "line-list", + "line-strip", + "triangle-list", + "triangle-strip", +}; + +enum GPUFrontFace { + "ccw", + "cw", +}; + +enum GPUCullMode { + "none", + "front", + "back", +}; + +dictionary GPUMultisampleState { + GPUSize32 count = 1; + GPUSampleMask mask = 0xFFFFFFFF; + boolean alphaToCoverageEnabled = false; +}; + +dictionary GPUFragmentState + : GPUProgrammableStage { + required sequence targets; +}; + +dictionary GPUColorTargetState { + required GPUTextureFormat format; + + GPUBlendState blend; + GPUColorWriteFlags writeMask = 0xF; // GPUColorWrite.ALL +}; + +dictionary GPUBlendState { + required GPUBlendComponent color; + required GPUBlendComponent alpha; +}; + +typedef [EnforceRange] unsigned long GPUColorWriteFlags; +[Exposed=(Window, DedicatedWorker), SecureContext] +namespace GPUColorWrite { + const GPUFlagsConstant RED = 0x1; + const GPUFlagsConstant GREEN = 0x2; + const GPUFlagsConstant BLUE = 0x4; + const GPUFlagsConstant ALPHA = 0x8; + const GPUFlagsConstant ALL = 0xF; +}; + +dictionary GPUBlendComponent { + GPUBlendOperation operation = "add"; + GPUBlendFactor srcFactor = "one"; + GPUBlendFactor dstFactor = "zero"; +}; + +enum GPUBlendFactor { + "zero", + "one", + "src", + "one-minus-src", + "src-alpha", + "one-minus-src-alpha", + "dst", + "one-minus-dst", + "dst-alpha", + "one-minus-dst-alpha", + "src-alpha-saturated", + "constant", + "one-minus-constant", +}; + +enum GPUBlendOperation { + "add", + "subtract", + "reverse-subtract", + "min", + "max", +}; + +dictionary GPUDepthStencilState { + required GPUTextureFormat format; + + boolean depthWriteEnabled; + GPUCompareFunction depthCompare; + + GPUStencilFaceState stencilFront = {}; + GPUStencilFaceState stencilBack = {}; + + GPUStencilValue stencilReadMask = 0xFFFFFFFF; + GPUStencilValue stencilWriteMask = 0xFFFFFFFF; + + GPUDepthBias depthBias = 0; + float depthBiasSlopeScale = 0; + float depthBiasClamp = 0; +}; + +dictionary GPUStencilFaceState { + GPUCompareFunction compare = "always"; + GPUStencilOperation failOp = "keep"; + GPUStencilOperation depthFailOp = "keep"; + GPUStencilOperation passOp = "keep"; +}; + +enum GPUStencilOperation { + "keep", + "zero", + "replace", + "invert", + "increment-clamp", + "decrement-clamp", + "increment-wrap", + "decrement-wrap", +}; + +enum GPUIndexFormat { + "uint16", + "uint32", +}; + +enum GPUVertexFormat { + "uint8x2", + "uint8x4", + "sint8x2", + "sint8x4", + "unorm8x2", + "unorm8x4", + "snorm8x2", + "snorm8x4", + "uint16x2", + "uint16x4", + "sint16x2", + "sint16x4", + "unorm16x2", + "unorm16x4", + "snorm16x2", + "snorm16x4", + "float16x2", + "float16x4", + "float32", + "float32x2", + "float32x3", + "float32x4", + "uint32", + "uint32x2", + "uint32x3", + "uint32x4", + "sint32", + "sint32x2", + "sint32x3", + "sint32x4", + "unorm10-10-10-2", +}; + +enum GPUVertexStepMode { + "vertex", + "instance", +}; + +dictionary GPUVertexState + : GPUProgrammableStage { + sequence buffers = []; +}; + +dictionary GPUVertexBufferLayout { + required GPUSize64 arrayStride; + GPUVertexStepMode stepMode = "vertex"; + required sequence attributes; +}; + +dictionary GPUVertexAttribute { + required GPUVertexFormat format; + required GPUSize64 offset; + + required GPUIndex32 shaderLocation; +}; + +dictionary GPUImageDataLayout { + GPUSize64 offset = 0; + GPUSize32 bytesPerRow; + GPUSize32 rowsPerImage; +}; + +dictionary GPUImageCopyBuffer + : GPUImageDataLayout { + required GPUBuffer buffer; +}; + +dictionary GPUImageCopyTexture { + required GPUTexture texture; + GPUIntegerCoordinate mipLevel = 0; + GPUOrigin3D origin = {}; + GPUTextureAspect aspect = "all"; +}; + +dictionary GPUImageCopyTextureTagged + : GPUImageCopyTexture { + PredefinedColorSpace colorSpace = "srgb"; + boolean premultipliedAlpha = false; +}; + +typedef (ImageBitmap or + ImageData or + HTMLImageElement or + HTMLVideoElement or + VideoFrame or + HTMLCanvasElement or + OffscreenCanvas) GPUImageCopyExternalImageSource; + +dictionary GPUImageCopyExternalImage { + required GPUImageCopyExternalImageSource source; + GPUOrigin2D origin = {}; + boolean flipY = false; +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUCommandBuffer { +}; +GPUCommandBuffer includes GPUObjectBase; + +dictionary GPUCommandBufferDescriptor + : GPUObjectDescriptorBase { +}; + +interface mixin GPUCommandsMixin { +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUCommandEncoder { + GPURenderPassEncoder beginRenderPass(GPURenderPassDescriptor descriptor); + GPUComputePassEncoder beginComputePass(optional GPUComputePassDescriptor descriptor = {}); + + undefined copyBufferToBuffer( + GPUBuffer source, + GPUSize64 sourceOffset, + GPUBuffer destination, + GPUSize64 destinationOffset, + GPUSize64 size); + + undefined copyBufferToTexture( + GPUImageCopyBuffer source, + GPUImageCopyTexture destination, + GPUExtent3D copySize); + + undefined copyTextureToBuffer( + GPUImageCopyTexture source, + GPUImageCopyBuffer destination, + GPUExtent3D copySize); + + undefined copyTextureToTexture( + GPUImageCopyTexture source, + GPUImageCopyTexture destination, + GPUExtent3D copySize); + + undefined clearBuffer( + GPUBuffer buffer, + optional GPUSize64 offset = 0, + optional GPUSize64 size); + + undefined resolveQuerySet( + GPUQuerySet querySet, + GPUSize32 firstQuery, + GPUSize32 queryCount, + GPUBuffer destination, + GPUSize64 destinationOffset); + + GPUCommandBuffer finish(optional GPUCommandBufferDescriptor descriptor = {}); +}; +GPUCommandEncoder includes GPUObjectBase; +GPUCommandEncoder includes GPUCommandsMixin; +GPUCommandEncoder includes GPUDebugCommandsMixin; + +dictionary GPUCommandEncoderDescriptor + : GPUObjectDescriptorBase { +}; + +interface mixin GPUBindingCommandsMixin { + undefined setBindGroup(GPUIndex32 index, GPUBindGroup? bindGroup, + optional sequence dynamicOffsets = []); + + undefined setBindGroup(GPUIndex32 index, GPUBindGroup? bindGroup, + Uint32Array dynamicOffsetsData, + GPUSize64 dynamicOffsetsDataStart, + GPUSize32 dynamicOffsetsDataLength); +}; + +interface mixin GPUDebugCommandsMixin { + undefined pushDebugGroup(USVString groupLabel); + undefined popDebugGroup(); + undefined insertDebugMarker(USVString markerLabel); +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUComputePassEncoder { + undefined setPipeline(GPUComputePipeline pipeline); + undefined dispatchWorkgroups(GPUSize32 workgroupCountX, optional GPUSize32 workgroupCountY = 1, optional GPUSize32 workgroupCountZ = 1); + undefined dispatchWorkgroupsIndirect(GPUBuffer indirectBuffer, GPUSize64 indirectOffset); + + undefined end(); +}; +GPUComputePassEncoder includes GPUObjectBase; +GPUComputePassEncoder includes GPUCommandsMixin; +GPUComputePassEncoder includes GPUDebugCommandsMixin; +GPUComputePassEncoder includes GPUBindingCommandsMixin; + +dictionary GPUComputePassTimestampWrites { + required GPUQuerySet querySet; + GPUSize32 beginningOfPassWriteIndex; + GPUSize32 endOfPassWriteIndex; +}; + +dictionary GPUComputePassDescriptor + : GPUObjectDescriptorBase { + GPUComputePassTimestampWrites timestampWrites; +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPURenderPassEncoder { + undefined setViewport(float x, float y, + float width, float height, + float minDepth, float maxDepth); + + undefined setScissorRect(GPUIntegerCoordinate x, GPUIntegerCoordinate y, + GPUIntegerCoordinate width, GPUIntegerCoordinate height); + + undefined setBlendConstant(GPUColor color); + undefined setStencilReference(GPUStencilValue reference); + + undefined beginOcclusionQuery(GPUSize32 queryIndex); + undefined endOcclusionQuery(); + + undefined executeBundles(sequence bundles); + undefined end(); +}; +GPURenderPassEncoder includes GPUObjectBase; +GPURenderPassEncoder includes GPUCommandsMixin; +GPURenderPassEncoder includes GPUDebugCommandsMixin; +GPURenderPassEncoder includes GPUBindingCommandsMixin; +GPURenderPassEncoder includes GPURenderCommandsMixin; + +dictionary GPURenderPassTimestampWrites { + required GPUQuerySet querySet; + GPUSize32 beginningOfPassWriteIndex; + GPUSize32 endOfPassWriteIndex; +}; + +dictionary GPURenderPassDescriptor + : GPUObjectDescriptorBase { + required sequence colorAttachments; + GPURenderPassDepthStencilAttachment depthStencilAttachment; + GPUQuerySet occlusionQuerySet; + GPURenderPassTimestampWrites timestampWrites; + GPUSize64 maxDrawCount = 50000000; +}; + +dictionary GPURenderPassColorAttachment { + required GPUTextureView view; + GPUIntegerCoordinate depthSlice; + GPUTextureView resolveTarget; + + GPUColor clearValue; + required GPULoadOp loadOp; + required GPUStoreOp storeOp; +}; + +dictionary GPURenderPassDepthStencilAttachment { + required GPUTextureView view; + + float depthClearValue; + GPULoadOp depthLoadOp; + GPUStoreOp depthStoreOp; + boolean depthReadOnly = false; + + GPUStencilValue stencilClearValue = 0; + GPULoadOp stencilLoadOp; + GPUStoreOp stencilStoreOp; + boolean stencilReadOnly = false; +}; + +enum GPULoadOp { + "load", + "clear", +}; + +enum GPUStoreOp { + "store", + "discard", +}; + +dictionary GPURenderPassLayout + : GPUObjectDescriptorBase { + required sequence colorFormats; + GPUTextureFormat depthStencilFormat; + GPUSize32 sampleCount = 1; +}; + +interface mixin GPURenderCommandsMixin { + undefined setPipeline(GPURenderPipeline pipeline); + + undefined setIndexBuffer(GPUBuffer buffer, GPUIndexFormat indexFormat, optional GPUSize64 offset = 0, optional GPUSize64 size); + undefined setVertexBuffer(GPUIndex32 slot, GPUBuffer? buffer, optional GPUSize64 offset = 0, optional GPUSize64 size); + + undefined draw(GPUSize32 vertexCount, optional GPUSize32 instanceCount = 1, + optional GPUSize32 firstVertex = 0, optional GPUSize32 firstInstance = 0); + undefined drawIndexed(GPUSize32 indexCount, optional GPUSize32 instanceCount = 1, + optional GPUSize32 firstIndex = 0, + optional GPUSignedOffset32 baseVertex = 0, + optional GPUSize32 firstInstance = 0); + + undefined drawIndirect(GPUBuffer indirectBuffer, GPUSize64 indirectOffset); + undefined drawIndexedIndirect(GPUBuffer indirectBuffer, GPUSize64 indirectOffset); +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPURenderBundle { +}; +GPURenderBundle includes GPUObjectBase; + +dictionary GPURenderBundleDescriptor + : GPUObjectDescriptorBase { +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPURenderBundleEncoder { + GPURenderBundle finish(optional GPURenderBundleDescriptor descriptor = {}); +}; +GPURenderBundleEncoder includes GPUObjectBase; +GPURenderBundleEncoder includes GPUCommandsMixin; +GPURenderBundleEncoder includes GPUDebugCommandsMixin; +GPURenderBundleEncoder includes GPUBindingCommandsMixin; +GPURenderBundleEncoder includes GPURenderCommandsMixin; + +dictionary GPURenderBundleEncoderDescriptor + : GPURenderPassLayout { + boolean depthReadOnly = false; + boolean stencilReadOnly = false; +}; + +dictionary GPUQueueDescriptor + : GPUObjectDescriptorBase { +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUQueue { + undefined submit(sequence commandBuffers); + + Promise onSubmittedWorkDone(); + + undefined writeBuffer( + GPUBuffer buffer, + GPUSize64 bufferOffset, + AllowSharedBufferSource data, + optional GPUSize64 dataOffset = 0, + optional GPUSize64 size); + + undefined writeTexture( + GPUImageCopyTexture destination, + AllowSharedBufferSource data, + GPUImageDataLayout dataLayout, + GPUExtent3D size); + + undefined copyExternalImageToTexture( + GPUImageCopyExternalImage source, + GPUImageCopyTextureTagged destination, + GPUExtent3D copySize); +}; +GPUQueue includes GPUObjectBase; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUQuerySet { + undefined destroy(); + + readonly attribute GPUQueryType type; + readonly attribute GPUSize32Out count; +}; +GPUQuerySet includes GPUObjectBase; + +dictionary GPUQuerySetDescriptor + : GPUObjectDescriptorBase { + required GPUQueryType type; + required GPUSize32 count; +}; + +enum GPUQueryType { + "occlusion", + "timestamp", +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUCanvasContext { + readonly attribute (HTMLCanvasElement or OffscreenCanvas) canvas; + + undefined configure(GPUCanvasConfiguration configuration); + undefined unconfigure(); + + GPUTexture getCurrentTexture(); +}; + +enum GPUCanvasAlphaMode { + "opaque", + "premultiplied", +}; + +dictionary GPUCanvasConfiguration { + required GPUDevice device; + required GPUTextureFormat format; + GPUTextureUsageFlags usage = 0x10; // GPUTextureUsage.RENDER_ATTACHMENT + sequence viewFormats = []; + PredefinedColorSpace colorSpace = "srgb"; + GPUCanvasAlphaMode alphaMode = "opaque"; +}; + +enum GPUDeviceLostReason { + "unknown", + "destroyed", +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUDeviceLostInfo { + readonly attribute GPUDeviceLostReason reason; + readonly attribute DOMString message; +}; + +partial interface GPUDevice { + readonly attribute Promise lost; +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUError { + readonly attribute DOMString message; +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUValidationError + : GPUError { + constructor(DOMString message); +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUOutOfMemoryError + : GPUError { + constructor(DOMString message); +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUInternalError + : GPUError { + constructor(DOMString message); +}; + +enum GPUErrorFilter { + "validation", + "out-of-memory", + "internal", +}; + +partial interface GPUDevice { + undefined pushErrorScope(GPUErrorFilter filter); + Promise popErrorScope(); +}; + +[Exposed=(Window, DedicatedWorker), SecureContext] +interface GPUUncapturedErrorEvent : Event { + constructor( + DOMString type, + GPUUncapturedErrorEventInit gpuUncapturedErrorEventInitDict + ); + [SameObject] readonly attribute GPUError error; +}; + +dictionary GPUUncapturedErrorEventInit : EventInit { + required GPUError error; +}; + +partial interface GPUDevice { + [Exposed=(Window, DedicatedWorker)] + attribute EventHandler onuncapturederror; +}; + +typedef [EnforceRange] unsigned long GPUBufferDynamicOffset; +typedef [EnforceRange] unsigned long GPUStencilValue; +typedef [EnforceRange] unsigned long GPUSampleMask; +typedef [EnforceRange] long GPUDepthBias; + +typedef [EnforceRange] unsigned long long GPUSize64; +typedef [EnforceRange] unsigned long GPUIntegerCoordinate; +typedef [EnforceRange] unsigned long GPUIndex32; +typedef [EnforceRange] unsigned long GPUSize32; +typedef [EnforceRange] long GPUSignedOffset32; + +typedef unsigned long long GPUSize64Out; +typedef unsigned long GPUIntegerCoordinateOut; +typedef unsigned long GPUSize32Out; + +typedef unsigned long GPUFlagsConstant; + +dictionary GPUColorDict { + required double r; + required double g; + required double b; + required double a; +}; +typedef (sequence or GPUColorDict) GPUColor; + +dictionary GPUOrigin2DDict { + GPUIntegerCoordinate x = 0; + GPUIntegerCoordinate y = 0; +}; +typedef (sequence or GPUOrigin2DDict) GPUOrigin2D; + +dictionary GPUOrigin3DDict { + GPUIntegerCoordinate x = 0; + GPUIntegerCoordinate y = 0; + GPUIntegerCoordinate z = 0; +}; +typedef (sequence or GPUOrigin3DDict) GPUOrigin3D; + +dictionary GPUExtent3DDict { + required GPUIntegerCoordinate width; + GPUIntegerCoordinate height = 1; + GPUIntegerCoordinate depthOrArrayLayers = 1; +}; +typedef (sequence or GPUExtent3DDict) GPUExtent3D; + diff --git a/wgpu/resources/wgpu.h b/wgpu/resources/wgpu.h new file mode 100644 index 0000000..76bdb47 --- /dev/null +++ b/wgpu/resources/wgpu.h @@ -0,0 +1,256 @@ +#ifndef WGPU_H_ +#define WGPU_H_ + +#include "webgpu.h" + +typedef enum WGPUNativeSType { + // Start at 0003 since that's allocated range for wgpu-native + WGPUSType_DeviceExtras = 0x00030001, + WGPUSType_RequiredLimitsExtras = 0x00030002, + WGPUSType_PipelineLayoutExtras = 0x00030003, + WGPUSType_ShaderModuleGLSLDescriptor = 0x00030004, + WGPUSType_SupportedLimitsExtras = 0x00030005, + WGPUSType_InstanceExtras = 0x00030006, + WGPUSType_BindGroupEntryExtras = 0x00030007, + WGPUSType_BindGroupLayoutEntryExtras = 0x00030008, + WGPUSType_QuerySetDescriptorExtras = 0x00030009, + WGPUNativeSType_Force32 = 0x7FFFFFFF +} WGPUNativeSType; + +typedef enum WGPUNativeFeature { + WGPUNativeFeature_PushConstants = 0x00030001, + WGPUNativeFeature_TextureAdapterSpecificFormatFeatures = 0x00030002, + WGPUNativeFeature_MultiDrawIndirect = 0x00030003, + WGPUNativeFeature_MultiDrawIndirectCount = 0x00030004, + WGPUNativeFeature_VertexWritableStorage = 0x00030005, + WGPUNativeFeature_TextureBindingArray = 0x00030006, + WGPUNativeFeature_SampledTextureAndStorageBufferArrayNonUniformIndexing = 0x00030007, + WGPUNativeFeature_PipelineStatisticsQuery = 0x00030008, + WGPUNativeFeature_Force32 = 0x7FFFFFFF +} WGPUNativeFeature; + +typedef enum WGPULogLevel { + WGPULogLevel_Off = 0x00000000, + WGPULogLevel_Error = 0x00000001, + WGPULogLevel_Warn = 0x00000002, + WGPULogLevel_Info = 0x00000003, + WGPULogLevel_Debug = 0x00000004, + WGPULogLevel_Trace = 0x00000005, + WGPULogLevel_Force32 = 0x7FFFFFFF +} WGPULogLevel; + +typedef enum WGPUInstanceBackend { + WGPUInstanceBackend_All = 0x00000000, + WGPUInstanceBackend_Vulkan = 1 << 0, + WGPUInstanceBackend_GL = 1 << 1, + WGPUInstanceBackend_Metal = 1 << 2, + WGPUInstanceBackend_DX12 = 1 << 3, + WGPUInstanceBackend_DX11 = 1 << 4, + WGPUInstanceBackend_BrowserWebGPU = 1 << 5, + WGPUInstanceBackend_Primary = WGPUInstanceBackend_Vulkan | WGPUInstanceBackend_Metal | + WGPUInstanceBackend_DX12 | + WGPUInstanceBackend_BrowserWebGPU, + WGPUInstanceBackend_Secondary = WGPUInstanceBackend_GL | WGPUInstanceBackend_DX11, + WGPUInstanceBackend_Force32 = 0x7FFFFFFF +} WGPUInstanceBackend; +typedef WGPUFlags WGPUInstanceBackendFlags; + +typedef enum WGPUInstanceFlag { + WGPUInstanceFlag_Default = 0x00000000, + WGPUInstanceFlag_Debug = 1 << 0, + WGPUInstanceFlag_Validation = 1 << 1, + WGPUInstanceFlag_DiscardHalLabels = 1 << 2, + WGPUInstanceFlag_Force32 = 0x7FFFFFFF +} WGPUInstanceFlag; +typedef WGPUFlags WGPUInstanceFlags; + +typedef enum WGPUDx12Compiler { + WGPUDx12Compiler_Undefined = 0x00000000, + WGPUDx12Compiler_Fxc = 0x00000001, + WGPUDx12Compiler_Dxc = 0x00000002, + WGPUDx12Compiler_Force32 = 0x7FFFFFFF +} WGPUDx12Compiler; + +typedef enum WGPUGles3MinorVersion { + WGPUGles3MinorVersion_Automatic = 0x00000000, + WGPUGles3MinorVersion_Version0 = 0x00000001, + WGPUGles3MinorVersion_Version1 = 0x00000002, + WGPUGles3MinorVersion_Version2 = 0x00000003, + WGPUGles3MinorVersion_Force32 = 0x7FFFFFFF +} WGPUGles3MinorVersion; + +typedef enum WGPUPipelineStatisticName { + WGPUPipelineStatisticName_VertexShaderInvocations = 0x00000000, + WGPUPipelineStatisticName_ClipperInvocations = 0x00000001, + WGPUPipelineStatisticName_ClipperPrimitivesOut = 0x00000002, + WGPUPipelineStatisticName_FragmentShaderInvocations = 0x00000003, + WGPUPipelineStatisticName_ComputeShaderInvocations = 0x00000004, + WGPUPipelineStatisticName_Force32 = 0x7FFFFFFF +} WGPUPipelineStatisticName WGPU_ENUM_ATTRIBUTE; + +typedef enum WGPUNativeQueryType { + WGPUNativeQueryType_PipelineStatistics = 0x00030000, + WGPUNativeQueryType_Force32 = 0x7FFFFFFF +} WGPUNativeQueryType WGPU_ENUM_ATTRIBUTE; + +typedef struct WGPUInstanceExtras { + WGPUChainedStruct chain; + WGPUInstanceBackendFlags backends; + WGPUInstanceFlags flags; + WGPUDx12Compiler dx12ShaderCompiler; + WGPUGles3MinorVersion gles3MinorVersion; + const char * dxilPath; + const char * dxcPath; +} WGPUInstanceExtras; + +typedef struct WGPUDeviceExtras { + WGPUChainedStruct chain; + const char * tracePath; +} WGPUDeviceExtras; + +typedef struct WGPUNativeLimits { + uint32_t maxPushConstantSize; + uint32_t maxNonSamplerBindings; +} WGPUNativeLimits; + +typedef struct WGPURequiredLimitsExtras { + WGPUChainedStruct chain; + WGPUNativeLimits limits; +} WGPURequiredLimitsExtras; + +typedef struct WGPUSupportedLimitsExtras { + WGPUChainedStructOut chain; + WGPUNativeLimits limits; +} WGPUSupportedLimitsExtras; + +typedef struct WGPUPushConstantRange { + WGPUShaderStageFlags stages; + uint32_t start; + uint32_t end; +} WGPUPushConstantRange; + +typedef struct WGPUPipelineLayoutExtras { + WGPUChainedStruct chain; + uint32_t pushConstantRangeCount; + WGPUPushConstantRange* pushConstantRanges; +} WGPUPipelineLayoutExtras; + +typedef uint64_t WGPUSubmissionIndex; + +typedef struct WGPUWrappedSubmissionIndex { + WGPUQueue queue; + WGPUSubmissionIndex submissionIndex; +} WGPUWrappedSubmissionIndex; + +typedef struct WGPUShaderDefine { + char const * name; + char const * value; +} WGPUShaderDefine; + +typedef struct WGPUShaderModuleGLSLDescriptor { + WGPUChainedStruct chain; + WGPUShaderStage stage; + char const * code; + uint32_t defineCount; + WGPUShaderDefine * defines; +} WGPUShaderModuleGLSLDescriptor; + +typedef struct WGPUStorageReport { + size_t numOccupied; + size_t numVacant; + size_t numError; + size_t elementSize; +} WGPUStorageReport; + +typedef struct WGPUHubReport { + WGPUStorageReport adapters; + WGPUStorageReport devices; + WGPUStorageReport pipelineLayouts; + WGPUStorageReport shaderModules; + WGPUStorageReport bindGroupLayouts; + WGPUStorageReport bindGroups; + WGPUStorageReport commandBuffers; + WGPUStorageReport renderBundles; + WGPUStorageReport renderPipelines; + WGPUStorageReport computePipelines; + WGPUStorageReport querySets; + WGPUStorageReport buffers; + WGPUStorageReport textures; + WGPUStorageReport textureViews; + WGPUStorageReport samplers; +} WGPUHubReport; + +typedef struct WGPUGlobalReport { + WGPUStorageReport surfaces; + WGPUBackendType backendType; + WGPUHubReport vulkan; + WGPUHubReport metal; + WGPUHubReport dx12; + WGPUHubReport dx11; + WGPUHubReport gl; +} WGPUGlobalReport; + +typedef struct WGPUInstanceEnumerateAdapterOptions { + WGPUChainedStruct const * nextInChain; + WGPUInstanceBackendFlags backends; +} WGPUInstanceEnumerateAdapterOptions; + +typedef struct WGPUBindGroupEntryExtras { + WGPUChainedStruct chain; + WGPUBuffer const * buffers; + size_t bufferCount; + WGPUSampler const * samplers; + size_t samplerCount; + WGPUTextureView const * textureViews; + size_t textureViewCount; +} WGPUBindGroupEntryExtras; + +typedef struct WGPUBindGroupLayoutEntryExtras { + WGPUChainedStruct chain; + uint32_t count; +} WGPUBindGroupLayoutEntryExtras; + +typedef struct WGPUQuerySetDescriptorExtras { + WGPUChainedStruct chain; + WGPUPipelineStatisticName const * pipelineStatistics; + size_t pipelineStatisticCount; +} WGPUQuerySetDescriptorExtras WGPU_STRUCTURE_ATTRIBUTE; + +typedef void (*WGPULogCallback)(WGPULogLevel level, char const * message, void * userdata); + +#ifdef __cplusplus +extern "C" { +#endif + +void wgpuGenerateReport(WGPUInstance instance, WGPUGlobalReport * report); +size_t wgpuInstanceEnumerateAdapters(WGPUInstance instance, WGPUInstanceEnumerateAdapterOptions const * options, WGPUAdapter * adapters); + +WGPUSubmissionIndex wgpuQueueSubmitForIndex(WGPUQueue queue, size_t commandCount, WGPUCommandBuffer const * commands); + +// Returns true if the queue is empty, or false if there are more queue submissions still in flight. +WGPUBool wgpuDevicePoll(WGPUDevice device, WGPUBool wait, WGPUWrappedSubmissionIndex const * wrappedSubmissionIndex); + +void wgpuSetLogCallback(WGPULogCallback callback, void * userdata); + +void wgpuSetLogLevel(WGPULogLevel level); + +uint32_t wgpuGetVersion(void); + +void wgpuRenderPassEncoderSetPushConstants(WGPURenderPassEncoder encoder, WGPUShaderStageFlags stages, uint32_t offset, uint32_t sizeBytes, void* const data); + +void wgpuRenderPassEncoderMultiDrawIndirect(WGPURenderPassEncoder encoder, WGPUBuffer buffer, uint64_t offset, uint32_t count); +void wgpuRenderPassEncoderMultiDrawIndexedIndirect(WGPURenderPassEncoder encoder, WGPUBuffer buffer, uint64_t offset, uint32_t count); + +void wgpuRenderPassEncoderMultiDrawIndirectCount(WGPURenderPassEncoder encoder, WGPUBuffer buffer, uint64_t offset, WGPUBuffer count_buffer, uint64_t count_buffer_offset, uint32_t max_count); +void wgpuRenderPassEncoderMultiDrawIndexedIndirectCount(WGPURenderPassEncoder encoder, WGPUBuffer buffer, uint64_t offset, WGPUBuffer count_buffer, uint64_t count_buffer_offset, uint32_t max_count); + +void wgpuComputePassEncoderBeginPipelineStatisticsQuery(WGPUComputePassEncoder computePassEncoder, WGPUQuerySet querySet, uint32_t queryIndex); +void wgpuComputePassEncoderEndPipelineStatisticsQuery(WGPUComputePassEncoder computePassEncoder); +void wgpuRenderPassEncoderBeginPipelineStatisticsQuery(WGPURenderPassEncoder renderPassEncoder, WGPUQuerySet querySet, uint32_t queryIndex); +void wgpuRenderPassEncoderEndPipelineStatisticsQuery(WGPURenderPassEncoder renderPassEncoder); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif diff --git a/wgpu/structs.py b/wgpu/structs.py new file mode 100644 index 0000000..c225aca --- /dev/null +++ b/wgpu/structs.py @@ -0,0 +1,748 @@ +""" +These structs are defined in ``wgpu.structs``. + +The sructs in wgpu-py are represented as Python dictionaries. +Fields that have default values (as indicated below) may be omitted. +""" + +_use_sphinx_repr = False + + +class Struct: + def __init__(self, name, **kwargs): + self._name = name + for key, val in kwargs.items(): + setattr(self, key, val) + + def __iter__(self): + return iter([key for key in dir(self) if not key.startswith("_")]) + + def __repr__(self): + if _use_sphinx_repr: # no-cover + return "" + options = ", ".join(f"'{x}'" for x in self) + return f"" + + +# CODE BELOW THIS POINT IS AUTOGENERATED - DO NOT EDIT + + +# There are 59 structs + +__all__ = [ + "RequestAdapterOptions", + "DeviceDescriptor", + "BufferDescriptor", + "TextureDescriptor", + "TextureViewDescriptor", + "ExternalTextureDescriptor", + "SamplerDescriptor", + "BindGroupLayoutDescriptor", + "BindGroupLayoutEntry", + "BufferBindingLayout", + "SamplerBindingLayout", + "TextureBindingLayout", + "StorageTextureBindingLayout", + "ExternalTextureBindingLayout", + "BindGroupDescriptor", + "BindGroupEntry", + "BufferBinding", + "PipelineLayoutDescriptor", + "ShaderModuleDescriptor", + "ShaderModuleCompilationHint", + "PipelineErrorInit", + "ProgrammableStage", + "ComputePipelineDescriptor", + "RenderPipelineDescriptor", + "PrimitiveState", + "MultisampleState", + "FragmentState", + "ColorTargetState", + "BlendState", + "BlendComponent", + "DepthStencilState", + "StencilFaceState", + "VertexState", + "VertexBufferLayout", + "VertexAttribute", + "ImageDataLayout", + "ImageCopyBuffer", + "ImageCopyTexture", + "ImageCopyExternalImage", + "CommandBufferDescriptor", + "CommandEncoderDescriptor", + "ComputePassTimestampWrites", + "ComputePassDescriptor", + "RenderPassTimestampWrites", + "RenderPassDescriptor", + "RenderPassColorAttachment", + "RenderPassDepthStencilAttachment", + "RenderPassLayout", + "RenderBundleDescriptor", + "RenderBundleEncoderDescriptor", + "QueueDescriptor", + "QuerySetDescriptor", + "CanvasConfiguration", + "UncapturedErrorEventInit", + "Color", + "Origin2D", + "Origin3D", + "Extent3D", +] + + +#: * powerPreference :: :obj:`enums.PowerPreference ` = None +#: * forceFallbackAdapter :: bool = false +RequestAdapterOptions = Struct( + "RequestAdapterOptions", + power_preference="enums.PowerPreference", + force_fallback_adapter="bool", +) + +#: * label :: str = "" +#: * requiredFeatures :: List[:obj:`enums.FeatureName `] = [] +#: * requiredLimits :: Dict[str, int] = {} +#: * defaultQueue :: :obj:`structs.QueueDescriptor ` = {} +DeviceDescriptor = Struct( + "DeviceDescriptor", + label="str", + required_features="List[enums.FeatureName]", + required_limits="Dict[str, int]", + default_queue="structs.QueueDescriptor", +) + +#: * label :: str = "" +#: * size :: int +#: * usage :: :obj:`flags.BufferUsage ` +#: * mappedAtCreation :: bool = false +BufferDescriptor = Struct( + "BufferDescriptor", + label="str", + size="int", + usage="flags.BufferUsage", + mapped_at_creation="bool", +) + +#: * label :: str = "" +#: * size :: Union[List[int], :obj:`structs.Extent3D `] +#: * mipLevelCount :: int = 1 +#: * sampleCount :: int = 1 +#: * dimension :: :obj:`enums.TextureDimension ` = "2d" +#: * format :: :obj:`enums.TextureFormat ` +#: * usage :: :obj:`flags.TextureUsage ` +#: * viewFormats :: List[:obj:`enums.TextureFormat `] = [] +TextureDescriptor = Struct( + "TextureDescriptor", + label="str", + size="Union[List[int], structs.Extent3D]", + mip_level_count="int", + sample_count="int", + dimension="enums.TextureDimension", + format="enums.TextureFormat", + usage="flags.TextureUsage", + view_formats="List[enums.TextureFormat]", +) + +#: * label :: str = "" +#: * format :: :obj:`enums.TextureFormat ` = None +#: * dimension :: :obj:`enums.TextureViewDimension ` = None +#: * aspect :: :obj:`enums.TextureAspect ` = "all" +#: * baseMipLevel :: int = 0 +#: * mipLevelCount :: int = None +#: * baseArrayLayer :: int = 0 +#: * arrayLayerCount :: int = None +TextureViewDescriptor = Struct( + "TextureViewDescriptor", + label="str", + format="enums.TextureFormat", + dimension="enums.TextureViewDimension", + aspect="enums.TextureAspect", + base_mip_level="int", + mip_level_count="int", + base_array_layer="int", + array_layer_count="int", +) + +#: * label :: str = "" +#: * source :: Union[memoryview, object] +#: * colorSpace :: str = "srgb" +ExternalTextureDescriptor = Struct( + "ExternalTextureDescriptor", + label="str", + source="Union[memoryview, object]", + color_space="str", +) + +#: * label :: str = "" +#: * addressModeU :: :obj:`enums.AddressMode ` = "clamp-to-edge" +#: * addressModeV :: :obj:`enums.AddressMode ` = "clamp-to-edge" +#: * addressModeW :: :obj:`enums.AddressMode ` = "clamp-to-edge" +#: * magFilter :: :obj:`enums.FilterMode ` = "nearest" +#: * minFilter :: :obj:`enums.FilterMode ` = "nearest" +#: * mipmapFilter :: :obj:`enums.MipmapFilterMode ` = "nearest" +#: * lodMinClamp :: float = 0 +#: * lodMaxClamp :: float = 32 +#: * compare :: :obj:`enums.CompareFunction ` = None +#: * maxAnisotropy :: int = 1 +SamplerDescriptor = Struct( + "SamplerDescriptor", + label="str", + address_mode_u="enums.AddressMode", + address_mode_v="enums.AddressMode", + address_mode_w="enums.AddressMode", + mag_filter="enums.FilterMode", + min_filter="enums.FilterMode", + mipmap_filter="enums.MipmapFilterMode", + lod_min_clamp="float", + lod_max_clamp="float", + compare="enums.CompareFunction", + max_anisotropy="int", +) + +#: * label :: str = "" +#: * entries :: List[:obj:`structs.BindGroupLayoutEntry `] +BindGroupLayoutDescriptor = Struct( + "BindGroupLayoutDescriptor", + label="str", + entries="List[structs.BindGroupLayoutEntry]", +) + +#: * binding :: int +#: * visibility :: :obj:`flags.ShaderStage ` +#: * buffer :: :obj:`structs.BufferBindingLayout ` = None +#: * sampler :: :obj:`structs.SamplerBindingLayout ` = None +#: * texture :: :obj:`structs.TextureBindingLayout ` = None +#: * storageTexture :: :obj:`structs.StorageTextureBindingLayout ` = None +#: * externalTexture :: :obj:`structs.ExternalTextureBindingLayout ` = None +BindGroupLayoutEntry = Struct( + "BindGroupLayoutEntry", + binding="int", + visibility="flags.ShaderStage", + buffer="structs.BufferBindingLayout", + sampler="structs.SamplerBindingLayout", + texture="structs.TextureBindingLayout", + storage_texture="structs.StorageTextureBindingLayout", + external_texture="structs.ExternalTextureBindingLayout", +) + +#: * type :: :obj:`enums.BufferBindingType ` = "uniform" +#: * hasDynamicOffset :: bool = false +#: * minBindingSize :: int = 0 +BufferBindingLayout = Struct( + "BufferBindingLayout", + type="enums.BufferBindingType", + has_dynamic_offset="bool", + min_binding_size="int", +) + +#: * type :: :obj:`enums.SamplerBindingType ` = "filtering" +SamplerBindingLayout = Struct( + "SamplerBindingLayout", + type="enums.SamplerBindingType", +) + +#: * sampleType :: :obj:`enums.TextureSampleType ` = "float" +#: * viewDimension :: :obj:`enums.TextureViewDimension ` = "2d" +#: * multisampled :: bool = false +TextureBindingLayout = Struct( + "TextureBindingLayout", + sample_type="enums.TextureSampleType", + view_dimension="enums.TextureViewDimension", + multisampled="bool", +) + +#: * access :: :obj:`enums.StorageTextureAccess ` = "write-only" +#: * format :: :obj:`enums.TextureFormat ` +#: * viewDimension :: :obj:`enums.TextureViewDimension ` = "2d" +StorageTextureBindingLayout = Struct( + "StorageTextureBindingLayout", + access="enums.StorageTextureAccess", + format="enums.TextureFormat", + view_dimension="enums.TextureViewDimension", +) + +ExternalTextureBindingLayout = Struct( + "ExternalTextureBindingLayout", +) + +#: * label :: str = "" +#: * layout :: :class:`GPUBindGroupLayout ` +#: * entries :: List[:obj:`structs.BindGroupEntry `] +BindGroupDescriptor = Struct( + "BindGroupDescriptor", + label="str", + layout="GPUBindGroupLayout", + entries="List[structs.BindGroupEntry]", +) + +#: * binding :: int +#: * resource :: Union[:class:`GPUSampler `, :class:`GPUTextureView `, object, :obj:`structs.BufferBinding `] +BindGroupEntry = Struct( + "BindGroupEntry", + binding="int", + resource="Union[GPUSampler, GPUTextureView, object, structs.BufferBinding]", +) + +#: * buffer :: :class:`GPUBuffer ` +#: * offset :: int = 0 +#: * size :: int = None +BufferBinding = Struct( + "BufferBinding", + buffer="GPUBuffer", + offset="int", + size="int", +) + +#: * label :: str = "" +#: * bindGroupLayouts :: List[:class:`GPUBindGroupLayout `] +PipelineLayoutDescriptor = Struct( + "PipelineLayoutDescriptor", + label="str", + bind_group_layouts="List[GPUBindGroupLayout]", +) + +#: * label :: str = "" +#: * code :: str +#: * sourceMap :: dict = None +#: * compilationHints :: List[:obj:`structs.ShaderModuleCompilationHint `] = [] +ShaderModuleDescriptor = Struct( + "ShaderModuleDescriptor", + label="str", + code="str", + source_map="dict", + compilation_hints="List[structs.ShaderModuleCompilationHint]", +) + +#: * entryPoint :: str +#: * layout :: Union[:class:`GPUPipelineLayout `, :obj:`enums.AutoLayoutMode `] = None +ShaderModuleCompilationHint = Struct( + "ShaderModuleCompilationHint", + entry_point="str", + layout="Union[GPUPipelineLayout, enums.AutoLayoutMode]", +) + +#: * reason :: :obj:`enums.PipelineErrorReason ` +PipelineErrorInit = Struct( + "PipelineErrorInit", + reason="enums.PipelineErrorReason", +) + +#: * module :: :class:`GPUShaderModule ` +#: * entryPoint :: str +#: * constants :: Dict[str, float] = None +ProgrammableStage = Struct( + "ProgrammableStage", + module="GPUShaderModule", + entry_point="str", + constants="Dict[str, float]", +) + +#: * label :: str = "" +#: * layout :: Union[:class:`GPUPipelineLayout `, :obj:`enums.AutoLayoutMode `] +#: * compute :: :obj:`structs.ProgrammableStage ` +ComputePipelineDescriptor = Struct( + "ComputePipelineDescriptor", + label="str", + layout="Union[GPUPipelineLayout, enums.AutoLayoutMode]", + compute="structs.ProgrammableStage", +) + +#: * label :: str = "" +#: * layout :: Union[:class:`GPUPipelineLayout `, :obj:`enums.AutoLayoutMode `] +#: * vertex :: :obj:`structs.VertexState ` +#: * primitive :: :obj:`structs.PrimitiveState ` = {} +#: * depthStencil :: :obj:`structs.DepthStencilState ` = None +#: * multisample :: :obj:`structs.MultisampleState ` = {} +#: * fragment :: :obj:`structs.FragmentState ` = None +RenderPipelineDescriptor = Struct( + "RenderPipelineDescriptor", + label="str", + layout="Union[GPUPipelineLayout, enums.AutoLayoutMode]", + vertex="structs.VertexState", + primitive="structs.PrimitiveState", + depth_stencil="structs.DepthStencilState", + multisample="structs.MultisampleState", + fragment="structs.FragmentState", +) + +#: * topology :: :obj:`enums.PrimitiveTopology ` = "triangle-list" +#: * stripIndexFormat :: :obj:`enums.IndexFormat ` = None +#: * frontFace :: :obj:`enums.FrontFace ` = "ccw" +#: * cullMode :: :obj:`enums.CullMode ` = "none" +#: * unclippedDepth :: bool = false +PrimitiveState = Struct( + "PrimitiveState", + topology="enums.PrimitiveTopology", + strip_index_format="enums.IndexFormat", + front_face="enums.FrontFace", + cull_mode="enums.CullMode", + unclipped_depth="bool", +) + +#: * count :: int = 1 +#: * mask :: int = 0xFFFFFFFF +#: * alphaToCoverageEnabled :: bool = false +MultisampleState = Struct( + "MultisampleState", + count="int", + mask="int", + alpha_to_coverage_enabled="bool", +) + +#: * module :: :class:`GPUShaderModule ` +#: * entryPoint :: str +#: * constants :: Dict[str, float] = None +#: * targets :: List[:obj:`structs.ColorTargetState `] +FragmentState = Struct( + "FragmentState", + module="GPUShaderModule", + entry_point="str", + constants="Dict[str, float]", + targets="List[structs.ColorTargetState]", +) + +#: * format :: :obj:`enums.TextureFormat ` +#: * blend :: :obj:`structs.BlendState ` = None +#: * writeMask :: :obj:`flags.ColorWrite ` = 0xF +ColorTargetState = Struct( + "ColorTargetState", + format="enums.TextureFormat", + blend="structs.BlendState", + write_mask="flags.ColorWrite", +) + +#: * color :: :obj:`structs.BlendComponent ` +#: * alpha :: :obj:`structs.BlendComponent ` +BlendState = Struct( + "BlendState", + color="structs.BlendComponent", + alpha="structs.BlendComponent", +) + +#: * operation :: :obj:`enums.BlendOperation ` = "add" +#: * srcFactor :: :obj:`enums.BlendFactor ` = "one" +#: * dstFactor :: :obj:`enums.BlendFactor ` = "zero" +BlendComponent = Struct( + "BlendComponent", + operation="enums.BlendOperation", + src_factor="enums.BlendFactor", + dst_factor="enums.BlendFactor", +) + +#: * format :: :obj:`enums.TextureFormat ` +#: * depthWriteEnabled :: bool = None +#: * depthCompare :: :obj:`enums.CompareFunction ` = None +#: * stencilFront :: :obj:`structs.StencilFaceState ` = {} +#: * stencilBack :: :obj:`structs.StencilFaceState ` = {} +#: * stencilReadMask :: int = 0xFFFFFFFF +#: * stencilWriteMask :: int = 0xFFFFFFFF +#: * depthBias :: int = 0 +#: * depthBiasSlopeScale :: float = 0 +#: * depthBiasClamp :: float = 0 +DepthStencilState = Struct( + "DepthStencilState", + format="enums.TextureFormat", + depth_write_enabled="bool", + depth_compare="enums.CompareFunction", + stencil_front="structs.StencilFaceState", + stencil_back="structs.StencilFaceState", + stencil_read_mask="int", + stencil_write_mask="int", + depth_bias="int", + depth_bias_slope_scale="float", + depth_bias_clamp="float", +) + +#: * compare :: :obj:`enums.CompareFunction ` = "always" +#: * failOp :: :obj:`enums.StencilOperation ` = "keep" +#: * depthFailOp :: :obj:`enums.StencilOperation ` = "keep" +#: * passOp :: :obj:`enums.StencilOperation ` = "keep" +StencilFaceState = Struct( + "StencilFaceState", + compare="enums.CompareFunction", + fail_op="enums.StencilOperation", + depth_fail_op="enums.StencilOperation", + pass_op="enums.StencilOperation", +) + +#: * module :: :class:`GPUShaderModule ` +#: * entryPoint :: str +#: * constants :: Dict[str, float] = None +#: * buffers :: List[:obj:`structs.VertexBufferLayout `] = [] +VertexState = Struct( + "VertexState", + module="GPUShaderModule", + entry_point="str", + constants="Dict[str, float]", + buffers="List[structs.VertexBufferLayout]", +) + +#: * arrayStride :: int +#: * stepMode :: :obj:`enums.VertexStepMode ` = "vertex" +#: * attributes :: List[:obj:`structs.VertexAttribute `] +VertexBufferLayout = Struct( + "VertexBufferLayout", + array_stride="int", + step_mode="enums.VertexStepMode", + attributes="List[structs.VertexAttribute]", +) + +#: * format :: :obj:`enums.VertexFormat ` +#: * offset :: int +#: * shaderLocation :: int +VertexAttribute = Struct( + "VertexAttribute", + format="enums.VertexFormat", + offset="int", + shader_location="int", +) + +#: * offset :: int = 0 +#: * bytesPerRow :: int = None +#: * rowsPerImage :: int = None +ImageDataLayout = Struct( + "ImageDataLayout", + offset="int", + bytes_per_row="int", + rows_per_image="int", +) + +#: * offset :: int = 0 +#: * bytesPerRow :: int = None +#: * rowsPerImage :: int = None +#: * buffer :: :class:`GPUBuffer ` +ImageCopyBuffer = Struct( + "ImageCopyBuffer", + offset="int", + bytes_per_row="int", + rows_per_image="int", + buffer="GPUBuffer", +) + +#: * texture :: :class:`GPUTexture ` +#: * mipLevel :: int = 0 +#: * origin :: Union[List[int], :obj:`structs.Origin3D `] = {} +#: * aspect :: :obj:`enums.TextureAspect ` = "all" +ImageCopyTexture = Struct( + "ImageCopyTexture", + texture="GPUTexture", + mip_level="int", + origin="Union[List[int], structs.Origin3D]", + aspect="enums.TextureAspect", +) + +#: * source :: Union[memoryview, object] +#: * origin :: Union[List[int], :obj:`structs.Origin2D `] = {} +#: * flipY :: bool = false +ImageCopyExternalImage = Struct( + "ImageCopyExternalImage", + source="Union[memoryview, object]", + origin="Union[List[int], structs.Origin2D]", + flip_y="bool", +) + +#: * label :: str = "" +CommandBufferDescriptor = Struct( + "CommandBufferDescriptor", + label="str", +) + +#: * label :: str = "" +CommandEncoderDescriptor = Struct( + "CommandEncoderDescriptor", + label="str", +) + +#: * querySet :: :class:`GPUQuerySet ` +#: * beginningOfPassWriteIndex :: int = None +#: * endOfPassWriteIndex :: int = None +ComputePassTimestampWrites = Struct( + "ComputePassTimestampWrites", + query_set="GPUQuerySet", + beginning_of_pass_write_index="int", + end_of_pass_write_index="int", +) + +#: * label :: str = "" +#: * timestampWrites :: :obj:`structs.ComputePassTimestampWrites ` = None +ComputePassDescriptor = Struct( + "ComputePassDescriptor", + label="str", + timestamp_writes="structs.ComputePassTimestampWrites", +) + +#: * querySet :: :class:`GPUQuerySet ` +#: * beginningOfPassWriteIndex :: int = None +#: * endOfPassWriteIndex :: int = None +RenderPassTimestampWrites = Struct( + "RenderPassTimestampWrites", + query_set="GPUQuerySet", + beginning_of_pass_write_index="int", + end_of_pass_write_index="int", +) + +#: * label :: str = "" +#: * colorAttachments :: List[:obj:`structs.RenderPassColorAttachment `] +#: * depthStencilAttachment :: :obj:`structs.RenderPassDepthStencilAttachment ` = None +#: * occlusionQuerySet :: :class:`GPUQuerySet ` = None +#: * timestampWrites :: :obj:`structs.RenderPassTimestampWrites ` = None +#: * maxDrawCount :: int = 50000000 +RenderPassDescriptor = Struct( + "RenderPassDescriptor", + label="str", + color_attachments="List[structs.RenderPassColorAttachment]", + depth_stencil_attachment="structs.RenderPassDepthStencilAttachment", + occlusion_query_set="GPUQuerySet", + timestamp_writes="structs.RenderPassTimestampWrites", + max_draw_count="int", +) + +#: * view :: :class:`GPUTextureView ` +#: * depthSlice :: int = None +#: * resolveTarget :: :class:`GPUTextureView ` = None +#: * clearValue :: Union[List[float], :obj:`structs.Color `] = None +#: * loadOp :: :obj:`enums.LoadOp ` +#: * storeOp :: :obj:`enums.StoreOp ` +RenderPassColorAttachment = Struct( + "RenderPassColorAttachment", + view="GPUTextureView", + depth_slice="int", + resolve_target="GPUTextureView", + clear_value="Union[List[float], structs.Color]", + load_op="enums.LoadOp", + store_op="enums.StoreOp", +) + +#: * view :: :class:`GPUTextureView ` +#: * depthClearValue :: float = None +#: * depthLoadOp :: :obj:`enums.LoadOp ` = None +#: * depthStoreOp :: :obj:`enums.StoreOp ` = None +#: * depthReadOnly :: bool = false +#: * stencilClearValue :: int = 0 +#: * stencilLoadOp :: :obj:`enums.LoadOp ` = None +#: * stencilStoreOp :: :obj:`enums.StoreOp ` = None +#: * stencilReadOnly :: bool = false +RenderPassDepthStencilAttachment = Struct( + "RenderPassDepthStencilAttachment", + view="GPUTextureView", + depth_clear_value="float", + depth_load_op="enums.LoadOp", + depth_store_op="enums.StoreOp", + depth_read_only="bool", + stencil_clear_value="int", + stencil_load_op="enums.LoadOp", + stencil_store_op="enums.StoreOp", + stencil_read_only="bool", +) + +#: * label :: str = "" +#: * colorFormats :: List[:obj:`enums.TextureFormat `] +#: * depthStencilFormat :: :obj:`enums.TextureFormat ` = None +#: * sampleCount :: int = 1 +RenderPassLayout = Struct( + "RenderPassLayout", + label="str", + color_formats="List[enums.TextureFormat]", + depth_stencil_format="enums.TextureFormat", + sample_count="int", +) + +#: * label :: str = "" +RenderBundleDescriptor = Struct( + "RenderBundleDescriptor", + label="str", +) + +#: * label :: str = "" +#: * colorFormats :: List[:obj:`enums.TextureFormat `] +#: * depthStencilFormat :: :obj:`enums.TextureFormat ` = None +#: * sampleCount :: int = 1 +#: * depthReadOnly :: bool = false +#: * stencilReadOnly :: bool = false +RenderBundleEncoderDescriptor = Struct( + "RenderBundleEncoderDescriptor", + label="str", + color_formats="List[enums.TextureFormat]", + depth_stencil_format="enums.TextureFormat", + sample_count="int", + depth_read_only="bool", + stencil_read_only="bool", +) + +#: * label :: str = "" +QueueDescriptor = Struct( + "QueueDescriptor", + label="str", +) + +#: * label :: str = "" +#: * type :: :obj:`enums.QueryType ` +#: * count :: int +QuerySetDescriptor = Struct( + "QuerySetDescriptor", + label="str", + type="enums.QueryType", + count="int", +) + +#: * device :: :class:`GPUDevice ` +#: * format :: :obj:`enums.TextureFormat ` +#: * usage :: :obj:`flags.TextureUsage ` = 0x10 +#: * viewFormats :: List[:obj:`enums.TextureFormat `] = [] +#: * colorSpace :: str = "srgb" +#: * alphaMode :: :obj:`enums.CanvasAlphaMode ` = "opaque" +CanvasConfiguration = Struct( + "CanvasConfiguration", + device="GPUDevice", + format="enums.TextureFormat", + usage="flags.TextureUsage", + view_formats="List[enums.TextureFormat]", + color_space="str", + alpha_mode="enums.CanvasAlphaMode", +) + +#: * error :: :class:`GPUError ` +UncapturedErrorEventInit = Struct( + "UncapturedErrorEventInit", + error="GPUError", +) + +#: * r :: float +#: * g :: float +#: * b :: float +#: * a :: float +Color = Struct( + "Color", + r="float", + g="float", + b="float", + a="float", +) + +#: * x :: int = 0 +#: * y :: int = 0 +Origin2D = Struct( + "Origin2D", + x="int", + y="int", +) + +#: * x :: int = 0 +#: * y :: int = 0 +#: * z :: int = 0 +Origin3D = Struct( + "Origin3D", + x="int", + y="int", + z="int", +) + +#: * width :: int +#: * height :: int = 1 +#: * depthOrArrayLayers :: int = 1 +Extent3D = Struct( + "Extent3D", + width="int", + height="int", + depth_or_array_layers="int", +) diff --git a/wgpu/utils/__init__.py b/wgpu/utils/__init__.py new file mode 100644 index 0000000..faeddbe --- /dev/null +++ b/wgpu/utils/__init__.py @@ -0,0 +1,42 @@ +""" +Higher level utilities. Must be explicitly imported from ``wgpu.utils.xx``. +""" + +# The purpose of wgpu-py is to provide a Pythonic wrapper around +# wgpu-native. In principal, a higher-level API is not within the scope +# of the project. However, by providing a few utility functions, other +# projects can use wgpu without having to keep track of changes in wgpu +# itself. +# +# We should be conservative here: functionality added here should have +# an unopinionated API, providing tools that are still low-level (follow +# GPU/wgpu semantics), but without using low level details of the wgpu +# API itself. + +# The get_default_device() is so small and generally convenient that we import it by default. +from .device import get_default_device # noqa: F401 + + +class _StubModule: + def __init__(self, module): + self._module = module + self.must_be_explicitly_imported = True + + def __getattr__(self, *args, **kwargs): + raise RuntimeError(f"wgpu.utils.{self._module} must be explicitly imported.") + + def __repr__(self): + return f"" + + +# Create stubs + + +def compute_with_buffers(*args, **kwargs): + raise DeprecationWarning( + "wgpu.utils.compute_with_buffers() must now be imported from wgpu.utils.compute" + ) + + +compute = _StubModule("compute") +shadertoy = _StubModule("shadertoy") diff --git a/wgpu/utils/compute.py b/wgpu/utils/compute.py new file mode 100644 index 0000000..9970dae --- /dev/null +++ b/wgpu/utils/compute.py @@ -0,0 +1,198 @@ +""" +Simple high-level utilities for doing compute on the GPU. +""" + +import ctypes + +import wgpu.utils + + +def compute_with_buffers(input_arrays, output_arrays, shader, n=None): + """Apply the given compute shader to the given input_arrays and return + output arrays. Both input and output arrays are represented on the GPU + using storage buffer objects. + + Parameters: + input_arrays (dict): A dict mapping int bindings to arrays. The array + can be anything that supports the buffer protocol, including + bytes, memoryviews, ctypes arrays and numpy arrays. The + type and shape of the array does not need to match the type + with which the shader will interpret the buffer data (though + it probably makes your code easier to follow). + output_arrays (dict): A dict mapping int bindings to output shapes. + If the value is int, it represents the size (in bytes) of + the buffer. If the value is a tuple, its last element + specifies the format (see below), and the preceding elements + specify the shape. These are used to ``cast()`` the + memoryview object before it is returned. If the value is a + ctypes array type, the result will be cast to that instead + of a memoryview. Note that any buffer that is NOT in the + output arrays dict will be considered readonly in the shader. + shader (str or bytes): The shader as a string of WGSL code or SpirV bytes. + n (int, tuple, optional): The dispatch counts. Can be an int + or a 3-tuple of ints to specify (x, y, z). If not given or None, + the length of the first output array type is used. + + Returns: + output (dict): A dict mapping int bindings to memoryviews. + + The format characters to cast a ``memoryview`` are hard to remember, so + here's a refresher: + + * "b" and "B" are signed and unsiged 8-bit ints. + * "h" and "H" are signed and unsiged 16-bit ints. + * "i" and "I" are signed and unsiged 32-bit ints. + * "e" and "f" are 16-bit and 32-bit floats. + """ + + # Check input arrays + if not isinstance(input_arrays, dict): # empty is ok + raise TypeError("input_arrays must be a dict.") + for key, array in input_arrays.items(): + if not isinstance(key, int): + raise TypeError("keys of input_arrays must be int.") + # Simply wrapping in a memoryview ensures that it supports the buffer protocol + memoryview(array) + + # Check output arrays + output_infos = {} + if not isinstance(output_arrays, dict) or not output_arrays: + raise TypeError("output_arrays must be a nonempty dict.") + for key, array_descr in output_arrays.items(): + if not isinstance(key, int): + raise TypeError("keys of output_arrays must be int.") + if isinstance(array_descr, str) and "x" in array_descr: + array_descr = tuple(array_descr.split("x")) + if isinstance(array_descr, int): + output_infos[key] = { + "length": array_descr, + "nbytes": array_descr, + "format": "B", + "shape": (array_descr,), + } + elif isinstance(array_descr, tuple): + format = array_descr[-1] + try: + format_size = FORMAT_SIZES[format] + except KeyError: + raise ValueError(f"Invalid format for output array {key}: {format}") + shape = tuple(int(i) for i in array_descr[:-1]) + if not (shape and all(i > 0 for i in shape)): + raise ValueError(f"Invalid shape for output array {key}: {shape}") + nbytes = format_size + for i in shape: + nbytes *= i + output_infos[key] = { + "length": shape[0], + "nbytes": nbytes, + "format": format, + "shape": shape, + } + elif isinstance(array_descr, type) and issubclass(array_descr, ctypes.Array): + output_infos[key] = { + "length": array_descr._length_, + "nbytes": ctypes.sizeof(array_descr), + "ctypes_array_type": array_descr, + } + else: + raise TypeError( + f"Invalid value for output array description: {array_descr}" + ) + + # Get nx, ny, nz from n + if n is None: + output_info = list(output_infos.values())[0] + nx, ny, nz = output_info["length"], 1, 1 + elif isinstance(n, int): + nx, ny, nz = int(n), 1, 1 + elif isinstance(n, tuple) and len(n) == 3: + nx, ny, nz = int(n[0]), int(n[1]), int(n[2]) + else: + raise TypeError("compute_with_buffers: n must be None, an int, or 3-int tuple.") + if not (nx >= 1 and ny >= 1 and nz >= 1): + raise ValueError("compute_with_buffers: n value(s) must be >= 1.") + + # Create a device and compile the shader + device = wgpu.utils.get_default_device() + cshader = device.create_shader_module(code=shader) + + # Create buffers for input and output arrays + buffers = {} + for index, array in input_arrays.items(): + usage = wgpu.BufferUsage.STORAGE + if index in output_arrays: + usage |= wgpu.BufferUsage.COPY_SRC + buffer = device.create_buffer_with_data(data=array, usage=usage) + buffers[index] = buffer + for index, info in output_infos.items(): + if index in input_arrays: + continue # We already have this buffer + usage = wgpu.BufferUsage.STORAGE | wgpu.BufferUsage.COPY_SRC + buffers[index] = device.create_buffer(size=info["nbytes"], usage=usage) + + # Create bindings and binding layouts + bindings = [] + binding_layouts = [] + for index, buffer in buffers.items(): + bindings.append( + { + "binding": index, + "resource": {"buffer": buffer, "offset": 0, "size": buffer.size}, + } + ) + storage_types = ( + wgpu.BufferBindingType.read_only_storage, + wgpu.BufferBindingType.storage, + ) + binding_layouts.append( + { + "binding": index, + "visibility": wgpu.ShaderStage.COMPUTE, + "buffer": { + "type": storage_types[index in output_infos], + "has_dynamic_offset": False, + }, + } + ) + + # Put buffers together + bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) + pipeline_layout = device.create_pipeline_layout( + bind_group_layouts=[bind_group_layout] + ) + bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) + + # Create a pipeline and "run it" + compute_pipeline = device.create_compute_pipeline( + layout=pipeline_layout, + compute={"module": cshader, "entry_point": "main"}, + ) + command_encoder = device.create_command_encoder() + compute_pass = command_encoder.begin_compute_pass() + compute_pass.set_pipeline(compute_pipeline) + compute_pass.set_bind_group(0, bind_group, [], 0, 999999) # last 2 args not used + compute_pass.dispatch_workgroups(nx, ny, nz) + compute_pass.end() + device.queue.submit([command_encoder.finish()]) + + # Read the current data of the output buffers + output = {} + for index, info in output_infos.items(): + buffer = buffers[index] + # m = buffer.read_data() # old API + m = device.queue.read_buffer(buffer) # slow, can also be done async + if "ctypes_array_type" in info: + output[index] = info["ctypes_array_type"].from_buffer(m) + else: + output[index] = m.cast(info["format"], shape=info["shape"]) + + return output + + +FORMAT_SIZES = {"b": 1, "B": 1, "h": 2, "H": 2, "i": 4, "I": 4, "e": 2, "f": 4} + +# It's tempting to allow for other formats, like "int32" and "f4", but +# users who like numpy will simply specify the number of bytes and +# convert the result. Users who will work with the memoryview directly +# should not be confused with other formats than memoryview.cast() +# normally supports. diff --git a/wgpu/utils/device.py b/wgpu/utils/device.py new file mode 100644 index 0000000..1a42076 --- /dev/null +++ b/wgpu/utils/device.py @@ -0,0 +1,17 @@ +_default_device = None + + +def get_default_device(): + """Get a wgpu device object. If this succeeds, it's likely that + the WGPU lib is usable on this system. If not, this call will + probably exit (Rust panic). When called multiple times, + returns the same global device object (useful for e.g. unit tests). + """ + global _default_device + + if _default_device is None: + import wgpu.backends.auto # noqa + + adapter = wgpu.gpu.request_adapter(power_preference="high-performance") + _default_device = adapter.request_device() + return _default_device diff --git a/wgpu/utils/shadertoy.py b/wgpu/utils/shadertoy.py new file mode 100644 index 0000000..d475c35 --- /dev/null +++ b/wgpu/utils/shadertoy.py @@ -0,0 +1,704 @@ +import time +import ctypes +import collections + +import wgpu +from wgpu.gui.auto import WgpuCanvas, run +from wgpu.gui.offscreen import WgpuCanvas as OffscreenCanvas, run as run_offscreen + +vertex_code_glsl = """#version 450 core + +layout(location = 0) out vec2 uv; + +void main(void){ + int index = int(gl_VertexID); + if (index == 0) { + gl_Position = vec4(-1.0, -1.0, 0.0, 1.0); + uv = vec2(0.0, 1.0); + } else if (index == 1) { + gl_Position = vec4(3.0, -1.0, 0.0, 1.0); + uv = vec2(2.0, 1.0); + } else { + gl_Position = vec4(-1.0, 3.0, 0.0, 1.0); + uv = vec2(0.0, -1.0); + } +} +""" + + +builtin_variables_glsl = """#version 450 core + +vec4 i_mouse; +vec4 i_date; +vec3 i_resolution; +float i_time; +float i_time_delta; +int i_frame; +float i_framerate; + +layout(binding = 1) uniform texture2D i_channel0; +layout(binding = 2) uniform sampler sampler0; +layout(binding = 3) uniform texture2D i_channel1; +layout(binding = 4) uniform sampler sampler1; +layout(binding = 5) uniform texture2D i_channel2; +layout(binding = 6) uniform sampler sampler2; +layout(binding = 7) uniform texture2D i_channel3; +layout(binding = 8) uniform sampler sampler3; + +// Shadertoy compatibility, see we can use the same code copied from shadertoy website + +#define iChannel0 sampler2D(i_channel0, sampler0) +#define iChannel1 sampler2D(i_channel1, sampler1) +#define iChannel2 sampler2D(i_channel2, sampler2) +#define iChannel3 sampler2D(i_channel3, sampler3) + +#define iMouse i_mouse +#define iDate i_date +#define iResolution i_resolution +#define iTime i_time +#define iTimeDelta i_time_delta +#define iFrame i_frame +#define iFrameRate i_framerate + +#define mainImage shader_main +""" + + +fragment_code_glsl = """ +layout(location = 0) in vec2 uv; + +struct ShadertoyInput { + vec4 mouse; + vec4 date; + vec3 resolution; + float time; + float time_delta; + int frame; + float framerate; +}; + +layout(binding = 0) uniform ShadertoyInput input; +out vec4 FragColor; +void main(){ + + i_mouse = input.mouse; + i_date = input.date; + i_resolution = input.resolution; + i_time = input.time; + i_time_delta = input.time_delta; + i_frame = input.frame; + i_framerate = input.framerate; + vec2 uv = vec2(uv.x, 1.0 - uv.y); + vec2 frag_coord = uv * i_resolution.xy; + + shader_main(FragColor, frag_coord); + +} + +""" + + +vertex_code_wgsl = """ + +struct Varyings { + @builtin(position) position : vec4, + @location(0) uv : vec2, +}; + +@vertex +fn main(@builtin(vertex_index) index: u32) -> Varyings { + var out: Varyings; + if (index == u32(0)) { + out.position = vec4(-1.0, -1.0, 0.0, 1.0); + out.uv = vec2(0.0, 1.0); + } else if (index == u32(1)) { + out.position = vec4(3.0, -1.0, 0.0, 1.0); + out.uv = vec2(2.0, 1.0); + } else { + out.position = vec4(-1.0, 3.0, 0.0, 1.0); + out.uv = vec2(0.0, -1.0); + } + return out; + +} +""" + + +builtin_variables_wgsl = """ + +var i_mouse: vec4; +var i_date: vec4; +var i_resolution: vec3; +var i_time_delta: f32; +var i_time: f32; +var i_frame: u32; +var i_framerate: f32; + +// TODO: more global variables +// var i_frag_coord: vec2; + +""" + + +fragment_code_wgsl = """ + +struct ShadertoyInput { + mouse: vec4, + date: vec4, + resolution: vec3, + time: f32, + time_delta: f32, + frame: u32, + framerate: f32, +}; + +struct Varyings { + @builtin(position) position : vec4, + @location(0) uv : vec2, +}; + +@group(0) @binding(0) +var input: ShadertoyInput; + +@group(0) @binding(1) +var i_channel0: texture_2d; +@group(0) @binding(3) +var i_channel1: texture_2d; +@group(0) @binding(5) +var i_channel2: texture_2d; +@group(0) @binding(7) +var i_channel3: texture_2d; + +@group(0) @binding(2) +var sampler0: sampler; +@group(0) @binding(4) +var sampler1: sampler; +@group(0) @binding(6) +var sampler2: sampler; +@group(0) @binding(8) +var sampler3: sampler; + +@fragment +fn main(in: Varyings) -> @location(0) vec4 { + + i_mouse = input.mouse; + i_date = input.date; + i_resolution = input.resolution; + i_time = input.time; + i_time_delta = input.time_delta; + i_frame = input.frame; + i_framerate = input.framerate; + let uv = vec2(in.uv.x, 1.0 - in.uv.y); + let frag_coord = uv * i_resolution.xy; + + return shader_main(frag_coord); +} + +""" + + +class UniformArray: + """Convenience class to create a uniform array. + + Maybe we can make it a public util at some point. + Ensure that the order matches structs in the shader code. + See https://www.w3.org/TR/WGSL/#alignment-and-size for reference on alignment. + """ + + def __init__(self, *args): + # Analyse incoming fields + fields = [] + byte_offet = 0 + for name, format, n in args: + assert format in ("f", "i", "I") + field = name, format, byte_offet, byte_offet + n * 4 + fields.append(field) + byte_offet += n * 4 + # Get padding + nbytes = byte_offet + while nbytes % 16: + nbytes += 1 + # Construct memoryview object and a view for each field + self._mem = memoryview((ctypes.c_uint8 * nbytes)()).cast("B") + self._views = {} + for name, format, i1, i2 in fields: + self._views[name] = self._mem[i1:i2].cast(format) + + @property + def mem(self): + return self._mem + + @property + def nbytes(self): + return self._mem.nbytes + + def __getitem__(self, key): + v = self._views[key].tolist() + return v[0] if len(v) == 1 else v + + def __setitem__(self, key, val): + m = self._views[key] + n = m.shape[0] + if n == 1: + assert isinstance(val, (float, int)) + m[0] = val + else: + assert isinstance(val, (tuple, list)) + for i in range(n): + m[i] = val[i] + + +class ShadertoyChannel: + """ + Represents a shadertoy channel. It can be a texture. + Parameters: + data (array-like): Of shape (width, height, 4), will be converted to memoryview. For example read in your images using ``np.asarray(Image.open("image.png"))`` + kind (str): The kind of channel. Can be one of ("texture"). More will be supported in the future + **kwargs: Additional arguments for the sampler: + wrap (str): The wrap mode, can be one of ("clamp-to-edge", "repeat", "clamp"). Default is "clamp-to-edge". + """ + + # TODO: add cubemap/volume, buffer, webcam, video, audio, keyboard? + + def __init__(self, data=None, kind="texture", **kwargs): + if kind != "texture": + raise NotImplementedError("Only texture is supported for now.") + if data is not None: + self.data = memoryview(data) + else: + self.data = ( + memoryview((ctypes.c_uint8 * 8 * 8 * 4)()) + .cast("B") + .cast("B", shape=[8, 8, 4]) + ) + self.size = self.data.shape # (rows, columns, channels) + self.texture_size = ( + self.data.shape[1], + self.data.shape[0], + 1, + ) # orientation change (columns, rows, 1) + self.bytes_per_pixel = ( + self.data.nbytes // self.data.shape[1] // self.data.shape[0] + ) + self.sampler_settings = {} + wrap = kwargs.pop("wrap", "clamp-to-edge") + if wrap.startswith("clamp"): + wrap = "clamp-to-edge" + self.sampler_settings["address_mode_u"] = wrap + self.sampler_settings["address_mode_v"] = wrap + self.sampler_settings["address_mode_w"] = wrap + + def __repr__(self): + """ + Convenience method to get a representation of this object for debugging. + """ + data_repr = { + "repr": self.data.__repr__(), + "shape": self.data.shape, + "strides": self.data.strides, + "nbytes": self.data.nbytes, + "obj": self.data.obj, + } + class_repr = {k: v for k, v in self.__dict__.items() if k != "data"} + class_repr["data"] = data_repr + return repr(class_repr) + + +class Shadertoy: + """Provides a "screen pixel shader programming interface" similar to `shadertoy `_. + + It helps you research and quickly build or test shaders using `WGSL` or `GLSL` via WGPU. + + Parameters: + shader_code (str): The shader code to use. + resolution (tuple): The resolution of the shadertoy. + offscreen (bool): Whether to render offscreen. Default is False. + inputs (list): A list of :class:`ShadertoyChannel` objects. Supports up to 4 inputs. Defaults to sampling a black texture. + + The shader code must contain a entry point function: + + WGSL: ``fn shader_main(frag_coord: vec2) -> vec4{}`` + GLSL: ``void shader_main(out vec4 frag_color, in vec2 frag_coord){}`` + + It has a parameter ``frag_coord`` which is the current pixel coordinate (in range 0..resolution, origin is bottom-left), + and it must return a vec4 color (for GLSL, it's the ``out vec4 frag_color`` parameter), which is the color of the pixel at that coordinate. + + some built-in variables are available in the shader: + + * ``i_mouse``: the mouse position in pixels + * ``i_date``: the current date and time as a vec4 (year, month, day, seconds) + * ``i_resolution``: the resolution of the shadertoy + * ``i_time``: the global time in seconds + * ``i_time_delta``: the time since last frame in seconds + * ``i_frame``: the frame number + * ``i_framerate``: the number of frames rendered in the last second. + + For GLSL, you can also use the aliases ``iTime``, ``iTimeDelta``, ``iFrame``, ``iResolution``, ``iMouse``, ``iDate`` and ``iFrameRate`` of these built-in variables, + the entry point function also has an alias ``mainImage``, so you can use the shader code copied from shadertoy website without making any changes. + """ + + # todo: add remaining built-in variables (i_channel_time, i_channel_resolution) + # todo: support multiple render passes (`i_channel0`, `i_channel1`, etc.) + + def __init__( + self, shader_code, resolution=(800, 450), offscreen=False, inputs=[] + ) -> None: + self._uniform_data = UniformArray( + ("mouse", "f", 4), + ("date", "f", 4), + ("resolution", "f", 3), + ("time", "f", 1), + ("time_delta", "f", 1), + ("frame", "I", 1), + ("framerate", "f", 1), + ) + + self._shader_code = shader_code + self._uniform_data["resolution"] = resolution + (1,) + + self._offscreen = offscreen + + if len(inputs) > 4: + raise ValueError("Only 4 inputs are supported.") + self.inputs = inputs + self.inputs.extend([ShadertoyChannel() for _ in range(4 - len(inputs))]) + + self._prepare_render() + self._bind_events() + + @property + def resolution(self): + """The resolution of the shadertoy as a tuple (width, height) in pixels.""" + return tuple(self._uniform_data["resolution"])[:2] + + @property + def shader_code(self): + """The shader code to use.""" + return self._shader_code + + @property + def shader_type(self): + """The shader type, automatically detected from the shader code, can be "wgsl" or "glsl".""" + if "fn shader_main" in self.shader_code: + return "wgsl" + elif ( + "void shader_main" in self.shader_code + or "void mainImage" in self.shader_code + ): + return "glsl" + else: + raise ValueError("Invalid shader code.") + + def _prepare_render(self): + import wgpu.backends.auto # noqa + + if self._offscreen: + self._canvas = OffscreenCanvas( + title="Shadertoy", size=self.resolution, max_fps=60 + ) + else: + self._canvas = WgpuCanvas( + title="Shadertoy", size=self.resolution, max_fps=60 + ) + + self._device = wgpu.utils.device.get_default_device() + + self._present_context = self._canvas.get_context() + + # We use "bgra8unorm" not "bgra8unorm-srgb" here because we want to let the shader fully control the color-space. + self._present_context.configure( + device=self._device, format=wgpu.TextureFormat.bgra8unorm + ) + + shader_type = self.shader_type + if shader_type == "glsl": + vertex_shader_code = vertex_code_glsl + frag_shader_code = ( + builtin_variables_glsl + self.shader_code + fragment_code_glsl + ) + elif shader_type == "wgsl": + vertex_shader_code = vertex_code_wgsl + frag_shader_code = ( + builtin_variables_wgsl + self.shader_code + fragment_code_wgsl + ) + + vertex_shader_program = self._device.create_shader_module( + label="triangle_vert", code=vertex_shader_code + ) + frag_shader_program = self._device.create_shader_module( + label="triangle_frag", code=frag_shader_code + ) + + self._uniform_buffer = self._device.create_buffer( + size=self._uniform_data.nbytes, + usage=wgpu.BufferUsage.UNIFORM | wgpu.BufferUsage.COPY_DST, + ) + + bind_groups_layout_entries = [ + { + "binding": 0, + "resource": { + "buffer": self._uniform_buffer, + "offset": 0, + "size": self._uniform_data.nbytes, + }, + }, + ] + + binding_layout = [ + { + "binding": 0, + "visibility": wgpu.ShaderStage.FRAGMENT, + "buffer": {"type": wgpu.BufferBindingType.uniform}, + }, + ] + + for input_idx, channel_input in enumerate(self.inputs): + texture_binding = (2 * input_idx) + 1 + sampler_binding = 2 * (input_idx + 1) + binding_layout.extend( + [ + { + "binding": texture_binding, + "visibility": wgpu.ShaderStage.FRAGMENT, + "texture": { + "sample_type": wgpu.TextureSampleType.float, + "view_dimension": wgpu.TextureViewDimension.d2, + }, + }, + { + "binding": sampler_binding, + "visibility": wgpu.ShaderStage.FRAGMENT, + "sampler": {"type": wgpu.SamplerBindingType.filtering}, + }, + ] + ) + + texture = self._device.create_texture( + size=channel_input.texture_size, + format=wgpu.TextureFormat.rgba8unorm, + usage=wgpu.TextureUsage.COPY_DST | wgpu.TextureUsage.TEXTURE_BINDING, + ) + texture_view = texture.create_view() + + self._device.queue.write_texture( + { + "texture": texture, + "origin": (0, 0, 0), + "mip_level": 0, + }, + channel_input.data, + { + "offset": 0, + "bytes_per_row": channel_input.bytes_per_pixel + * channel_input.size[1], # must be multiple of 256? + "rows_per_image": channel_input.size[0], # same is done internally + }, + texture.size, + ) + + sampler = self._device.create_sampler(**channel_input.sampler_settings) + bind_groups_layout_entries.extend( + [ + { + "binding": texture_binding, + "resource": texture_view, + }, + { + "binding": sampler_binding, + "resource": sampler, + }, + ] + ) + + bind_group_layout = self._device.create_bind_group_layout( + entries=binding_layout + ) + + self._bind_group = self._device.create_bind_group( + layout=bind_group_layout, + entries=bind_groups_layout_entries, + ) + + self._render_pipeline = self._device.create_render_pipeline( + layout=self._device.create_pipeline_layout( + bind_group_layouts=[bind_group_layout] + ), + vertex={ + "module": vertex_shader_program, + "entry_point": "main", + "buffers": [], + }, + primitive={ + "topology": wgpu.PrimitiveTopology.triangle_list, + "front_face": wgpu.FrontFace.ccw, + "cull_mode": wgpu.CullMode.none, + }, + depth_stencil=None, + multisample=None, + fragment={ + "module": frag_shader_program, + "entry_point": "main", + "targets": [ + { + "format": wgpu.TextureFormat.bgra8unorm, + "blend": { + "color": ( + wgpu.BlendFactor.one, + wgpu.BlendFactor.zero, + wgpu.BlendOperation.add, + ), + "alpha": ( + wgpu.BlendFactor.one, + wgpu.BlendFactor.zero, + wgpu.BlendOperation.add, + ), + }, + }, + ], + }, + ) + + def _bind_events(self): + def on_resize(event): + w, h = event["width"], event["height"] + self._uniform_data["resolution"] = (w, h, 1) + + def on_mouse_move(event): + if event["button"] == 1 or 1 in event["buttons"]: + _, _, x2, y2 = self._uniform_data["mouse"] + x1, y1 = event["x"], self.resolution[1] - event["y"] + self._uniform_data["mouse"] = x1, y1, x2, y2 + + def on_mouse_down(event): + if event["button"] == 1 or 1 in event["buttons"]: + x, y = event["x"], self.resolution[1] - event["y"] + self._uniform_data["mouse"] = (x, y, x, -y) + + def on_mouse_up(event): + if event["button"] == 1 or 1 in event["buttons"]: + x1, y1, x2, y2 = self._uniform_data["mouse"] + self._uniform_data["mouse"] = x1, y1, abs(x2), y2 + + self._canvas.add_event_handler(on_resize, "resize") + self._canvas.add_event_handler(on_mouse_move, "pointer_move") + self._canvas.add_event_handler(on_mouse_down, "pointer_down") + self._canvas.add_event_handler(on_mouse_up, "pointer_up") + + def _update(self): + now = time.perf_counter() + if not hasattr(self, "_last_time"): + self._last_time = now + + if not hasattr(self, "_time_history"): + self._time_history = collections.deque(maxlen=256) + + time_delta = now - self._last_time + self._uniform_data["time_delta"] = time_delta + self._last_time = now + self._uniform_data["time"] += time_delta + self._time_history.append(self._uniform_data["time"]) + + self._uniform_data["framerate"] = sum( + [1 for t in self._time_history if t > self._uniform_data["time"] - 1] + ) + + if not hasattr(self, "_frame"): + self._frame = 0 + + time_struct = time.localtime() + self._uniform_data["date"] = ( + float(time_struct.tm_year), + float(time_struct.tm_mon - 1), + float(time_struct.tm_mday), + time_struct.tm_hour * 3600 + + time_struct.tm_min * 60 + + time_struct.tm_sec + + now % 1, + ) + + self._uniform_data["frame"] = self._frame + self._frame += 1 + + def _draw_frame(self): + # Update uniform buffer + self._update() + self._device.queue.write_buffer( + self._uniform_buffer, + 0, + self._uniform_data.mem, + 0, + self._uniform_data.nbytes, + ) + + command_encoder = self._device.create_command_encoder() + current_texture = self._present_context.get_current_texture() + + render_pass = command_encoder.begin_render_pass( + color_attachments=[ + { + "view": current_texture.create_view(), + "resolve_target": None, + "clear_value": (0, 0, 0, 1), + "load_op": wgpu.LoadOp.clear, + "store_op": wgpu.StoreOp.store, + } + ], + ) + + render_pass.set_pipeline(self._render_pipeline) + render_pass.set_bind_group(0, self._bind_group, [], 0, 99) + render_pass.draw(3, 1, 0, 0) + render_pass.end() + + self._device.queue.submit([command_encoder.finish()]) + + self._canvas.request_draw() + + def show(self): + self._canvas.request_draw(self._draw_frame) + if self._offscreen: + run_offscreen() + else: + run() + + def snapshot(self, time_float: float = 0.0, mouse_pos: tuple = (0, 0, 0, 0)): + """ + Returns an image of the specified time. (Only available when ``offscreen=True``) + + Parameters: + time_float (float): The time to snapshot. It essentially sets ``i_time`` to a specific number. (Default is 0.0) + mouse_pos (tuple): The mouse position in pixels in the snapshot. It essentially sets ``i_mouse`` to a 4-tuple. (Default is (0,0,0,0)) + Returns: + frame (memoryview): snapshot with transparancy. This object can be converted to a numpy array (without copying data) + using ``np.asarray(arr)`` + """ + if not self._offscreen: + raise NotImplementedError("Snapshot is only available in offscreen mode.") + + if hasattr(self, "_last_time"): + self.__delattr__("_last_time") + self._uniform_data["time"] = time_float + self._uniform_data["mouse"] = mouse_pos + self._canvas.request_draw(self._draw_frame) + frame = self._canvas.draw() + return frame + + +if __name__ == "__main__": + shader = Shadertoy( + """ + fn shader_main(frag_coord: vec2) -> vec4 { + let uv = frag_coord / i_resolution.xy; + + if ( length(frag_coord - i_mouse.xy) < 20.0 ) { + return vec4(textureSample(i_channel0, sampler0, uv)); + }else{ + return vec4( 0.5 + 0.5 * sin(i_time * vec3(uv, 1.0) ), 1.0); + } + + } + """ + ) + + shader.show() From 9c891c88da7042cd22e9eca212b0e867d190ce15 Mon Sep 17 00:00:00 2001 From: Jan Date: Wed, 27 Dec 2023 22:43:56 +0100 Subject: [PATCH 02/20] Remove other tests --- tests/test_api.py | 220 --------- tests/test_diagnostics.py | 382 ---------------- tests/test_gui_auto_offscreen.py | 66 --- tests/test_gui_base.py | 246 ---------- tests/test_gui_glfw.py | 297 ------------ tests/test_util_compute.py | 536 ---------------------- tests/test_util_core.py | 47 -- tests/test_wgpu_native_basics.py | 222 --------- tests/test_wgpu_native_buffer.py | 530 ---------------------- tests/test_wgpu_native_compute_tex.py | 592 ------------------------ tests/test_wgpu_native_errors.py | 268 ----------- tests/test_wgpu_native_query_set.py | 151 ------- tests/test_wgpu_native_render.py | 629 -------------------------- tests/test_wgpu_native_render_tex.py | 566 ----------------------- tests/test_wgpu_native_texture.py | 285 ------------ tests_mem/test_gui_glfw.py | 64 --- tests_mem/test_gui_offscreen.py | 90 ---- tests_mem/test_gui_qt.py | 58 --- tests_mem/test_meta.py | 81 ---- tests_mem/test_objects.py | 377 --------------- tests_mem/testutils.py | 230 ---------- 21 files changed, 5937 deletions(-) delete mode 100644 tests/test_api.py delete mode 100644 tests/test_diagnostics.py delete mode 100644 tests/test_gui_auto_offscreen.py delete mode 100644 tests/test_gui_base.py delete mode 100644 tests/test_gui_glfw.py delete mode 100644 tests/test_util_compute.py delete mode 100644 tests/test_util_core.py delete mode 100644 tests/test_wgpu_native_basics.py delete mode 100644 tests/test_wgpu_native_buffer.py delete mode 100644 tests/test_wgpu_native_compute_tex.py delete mode 100644 tests/test_wgpu_native_errors.py delete mode 100644 tests/test_wgpu_native_query_set.py delete mode 100644 tests/test_wgpu_native_render.py delete mode 100644 tests/test_wgpu_native_render_tex.py delete mode 100644 tests/test_wgpu_native_texture.py delete mode 100644 tests_mem/test_gui_glfw.py delete mode 100644 tests_mem/test_gui_offscreen.py delete mode 100644 tests_mem/test_gui_qt.py delete mode 100644 tests_mem/test_meta.py delete mode 100644 tests_mem/test_objects.py delete mode 100644 tests_mem/testutils.py diff --git a/tests/test_api.py b/tests/test_api.py deleted file mode 100644 index 313ce5b..0000000 --- a/tests/test_api.py +++ /dev/null @@ -1,220 +0,0 @@ -import sys -import logging -import subprocess - -import wgpu - -from pytest import raises, mark -from testutils import run_tests, can_use_wgpu_lib - - -def test_basic_api(): - import wgpu # noqa: F401 - - assert isinstance(wgpu.__version__, str) - assert isinstance(wgpu.version_info, tuple) - assert isinstance(wgpu.gpu, wgpu.GPU) - - # Entrypoint funcs - assert wgpu.gpu.request_adapter - assert wgpu.gpu.request_adapter_async - - code1 = wgpu.GPU.request_adapter.__code__ - code2 = wgpu.GPU.request_adapter_async.__code__ - nargs1 = code1.co_argcount + code1.co_kwonlyargcount - assert code1.co_varnames[:nargs1] == code2.co_varnames - - assert repr(wgpu.classes.GPU()).startswith( - " 2 mentions - assert text.count("foo_method") == 2 - assert text.count("call-failed-but-test-passed") == 4 - assert text.count("(4)") == 1 - assert text.count("(5)") == 0 - - assert text.count("spam_method") == 0 - assert text.count("division by zero") == 0 - - canvas._draw_frame_and_present() # prints traceback - canvas._draw_frame_and_present() # prints short logs ... - canvas._draw_frame_and_present() - canvas._draw_frame_and_present() - - text = caplog.text - assert text.count("bar_method") == 2 # one traceback => 2 mentions - assert text.count("foo_method") == 2 - assert text.count("call-failed-but-test-passed") == 4 - - assert text.count("spam_method") == 2 - assert text.count("division by zero") == 4 - - -class MyOffscreenCanvas(wgpu.gui.WgpuOffscreenCanvasBase): - def __init__(self): - super().__init__() - self.textures = [] - self.physical_size = 100, 100 - - def get_pixel_ratio(self): - return 1 - - def get_logical_size(self): - return self.get_physical_size() - - def get_physical_size(self): - return self.physical_size - - def _request_draw(self): - # Note: this would normaly schedule a call in a later event loop iteration - self._draw_frame_and_present() - - def present(self, texture): - self.textures.append(texture) - device = texture._device - size = texture.size - bytes_per_pixel = 4 - data = device.queue.read_texture( - { - "texture": texture, - "mip_level": 0, - "origin": (0, 0, 0), - }, - { - "offset": 0, - "bytes_per_row": bytes_per_pixel * size[0], - "rows_per_image": size[1], - }, - size, - ) - self.array = np.frombuffer(data, np.uint8).reshape(size[1], size[0], 4) - - -@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") -def test_offscreen_canvas(): - canvas = MyOffscreenCanvas() - device = wgpu.utils.get_default_device() - present_context = canvas.get_context() - present_context.configure(device=device, format=None) - - def draw_frame(): - current_texture_view = present_context.get_current_texture().create_view() - command_encoder = device.create_command_encoder() - render_pass = command_encoder.begin_render_pass( - color_attachments=[ - { - "view": current_texture_view, - "resolve_target": None, - "clear_value": (0, 1, 0, 1), - "load_op": wgpu.LoadOp.clear, - "store_op": wgpu.StoreOp.store, - } - ], - ) - render_pass.end() - device.queue.submit([command_encoder.finish()]) - - assert len(canvas.textures) == 0 - - # Draw 1 - canvas.request_draw(draw_frame) - assert canvas.array.shape == (100, 100, 4) - assert np.all(canvas.array[:, :, 0] == 0) - assert np.all(canvas.array[:, :, 1] == 255) - - # Draw 2 - canvas.request_draw(draw_frame) - assert canvas.array.shape == (100, 100, 4) - assert np.all(canvas.array[:, :, 0] == 0) - assert np.all(canvas.array[:, :, 1] == 255) - - # Change resolution - canvas.physical_size = 120, 100 - - # Draw 3 - canvas.request_draw(draw_frame) - assert canvas.array.shape == (100, 120, 4) - assert np.all(canvas.array[:, :, 0] == 0) - assert np.all(canvas.array[:, :, 1] == 255) - - # Change resolution - canvas.physical_size = 120, 140 - - # Draw 4 - canvas.request_draw(draw_frame) - assert canvas.array.shape == (140, 120, 4) - assert np.all(canvas.array[:, :, 0] == 0) - assert np.all(canvas.array[:, :, 1] == 255) - - # We now have four unique texture objects - assert len(canvas.textures) == 4 - assert len(set(canvas.textures)) == 4 - - -def test_autogui_mixin(): - c = wgpu.gui.WgpuAutoGui() - - # It's a mixin - assert not isinstance(c, wgpu.gui.WgpuCanvasBase) - - # It's event handling mechanism should be fully functional - - events = [] - - def handler(event): - events.append(event["value"]) - - c.add_event_handler(handler, "foo", "bar") - c.handle_event({"event_type": "foo", "value": 1}) - c.handle_event({"event_type": "bar", "value": 2}) - c.handle_event({"event_type": "spam", "value": 3}) - c.remove_event_handler(handler, "foo") - c.handle_event({"event_type": "foo", "value": 4}) - c.handle_event({"event_type": "bar", "value": 5}) - c.handle_event({"event_type": "spam", "value": 6}) - c.remove_event_handler(handler, "bar") - c.handle_event({"event_type": "foo", "value": 7}) - c.handle_event({"event_type": "bar", "value": 8}) - c.handle_event({"event_type": "spam", "value": 9}) - - assert events == [1, 2, 5] - - -def test_weakbind(): - weakbind = wgpu.gui.base.weakbind - - xx = [] - - class Foo: - def bar(self): - xx.append(1) - - f1 = Foo() - f2 = Foo() - - b1 = f1.bar - b2 = weakbind(f2.bar) - - assert len(xx) == 0 - b1() - assert len(xx) == 1 - b2() - assert len(xx) == 2 - - del f1 - del f2 - - if is_pypy: - gc.collect() - - assert len(xx) == 2 - b1() - assert len(xx) == 3 # f1 still exists - b2() - assert len(xx) == 3 # f2 is gone! - - -if __name__ == "__main__": - run_tests(globals()) diff --git a/tests/test_gui_glfw.py b/tests/test_gui_glfw.py deleted file mode 100644 index 2ee8af0..0000000 --- a/tests/test_gui_glfw.py +++ /dev/null @@ -1,297 +0,0 @@ -""" -Test the canvas, and parts of the rendering that involves a canvas, -like the canvas context and surface texture. -""" - -import os -import sys -import time -import weakref -import asyncio - -import wgpu -from pytest import skip -from testutils import run_tests, can_use_glfw, can_use_wgpu_lib -from renderutils import render_to_texture, render_to_screen # noqa - - -if not can_use_glfw or not can_use_wgpu_lib: - skip("Skipping tests that need a window or the wgpu lib", allow_module_level=True) - - -def setup_module(): - import glfw - - glfw.init() - - -def teardown_module(): - pass # Do not glfw.terminate() because other tests may still need glfw - - -def test_is_autogui(): - from wgpu.gui.glfw import WgpuCanvas - - assert issubclass(WgpuCanvas, wgpu.gui.WgpuCanvasBase) - assert issubclass(WgpuCanvas, wgpu.gui.WgpuAutoGui) - - -def test_glfw_canvas_basics(): - """Create a window and check some of its behavior. No wgpu calls here.""" - - import glfw - from wgpu.gui.glfw import WgpuCanvas - - canvas = WgpuCanvas() - - canvas.set_logical_size(300, 200) - etime = time.time() + 0.1 - while time.time() < etime: - glfw.poll_events() - lsize = canvas.get_logical_size() - assert isinstance(lsize, tuple) and len(lsize) == 2 - assert isinstance(lsize[0], float) and isinstance(lsize[1], float) - assert lsize == (300.0, 200.0) - - assert len(canvas.get_physical_size()) == 2 - assert isinstance(canvas.get_pixel_ratio(), float) - - # Close - assert not canvas.is_closed() - if sys.platform.startswith("win"): # On Linux we cant do this multiple times - canvas.close() - glfw.poll_events() - assert canvas.is_closed() - - -def test_glfw_canvas_del(): - from wgpu.gui.glfw import WgpuCanvas, update_glfw_canvasses - import glfw - - loop = asyncio.get_event_loop() - - async def miniloop(): - for i in range(10): - glfw.poll_events() - update_glfw_canvasses() - await asyncio.sleep(0.01) - - canvas = WgpuCanvas() - ref = weakref.ref(canvas) - - assert ref() is not None - loop.run_until_complete(miniloop()) - assert ref() is not None - del canvas - loop.run_until_complete(miniloop()) - assert ref() is None - - -shader_source = """ -@vertex -fn vs_main(@builtin(vertex_index) vertex_index : u32) -> @builtin(position) vec4 { - var positions: array, 3> = array, 3>(vec2(0.0, -0.5), vec2(0.5, 0.5), vec2(-0.5, 0.7)); - let p: vec2 = positions[vertex_index]; - return vec4(p, 0.0, 1.0); -} - -@fragment -fn fs_main() -> @location(0) vec4 { - return vec4(1.0, 0.5, 0.0, 1.0); -} -""" - - -def test_glfw_canvas_render(): - """Render an orange square ... in a glfw window.""" - - import glfw - from wgpu.gui.glfw import update_glfw_canvasses, WgpuCanvas - - loop = asyncio.get_event_loop() - - canvas = WgpuCanvas(max_fps=9999) - - device = wgpu.utils.get_default_device() - draw_frame1 = _get_draw_function(device, canvas) - - frame_counter = 0 - - def draw_frame2(): - nonlocal frame_counter - frame_counter += 1 - draw_frame1() - - canvas.request_draw(draw_frame2) - - # Give it a few rounds to start up - async def miniloop(): - for i in range(10): - glfw.poll_events() - update_glfw_canvasses() - await asyncio.sleep(0.01) - - loop.run_until_complete(miniloop()) - # There should have been exactly one draw now - assert frame_counter == 1 - - # Ask for a lot of draws - for i in range(5): - canvas.request_draw() - # Process evens for a while - loop.run_until_complete(miniloop()) - # We should have had just one draw - assert frame_counter == 2 - - # Change the canvase size - canvas.set_logical_size(300, 200) - canvas.set_logical_size(400, 300) - # We should have had just one draw - loop.run_until_complete(miniloop()) - assert frame_counter == 3 - - # canvas.close() - glfw.poll_events() - - -def test_glfw_canvas_render_custom_canvas(): - """Render an orange square ... in a glfw window. But not using WgpuCanvas. - This helps make sure that WgpuCanvasInterface is indeed the minimal - required canvas API. - """ - - import glfw - - class CustomCanvas: # implements wgpu.WgpuCanvasInterface - def __init__(self): - glfw.window_hint(glfw.CLIENT_API, glfw.NO_API) - glfw.window_hint(glfw.RESIZABLE, True) - self.window = glfw.create_window(300, 200, "canvas", None, None) - self._present_context = None - - def get_window_id(self): - if sys.platform.startswith("win"): - return int(glfw.get_win32_window(self.window)) - elif sys.platform.startswith("darwin"): - return int(glfw.get_cocoa_window(self.window)) - elif sys.platform.startswith("linux"): - is_wayland = "wayland" in os.getenv("XDG_SESSION_TYPE", "").lower() - if is_wayland: - return int(glfw.get_wayland_window(self.window)) - else: - return int(glfw.get_x11_window(self.window)) - else: - raise RuntimeError(f"Cannot get GLFW window id on {sys.platform}.") - - def get_display_id(self): - return wgpu.WgpuCanvasInterface.get_display_id(self) - - def get_physical_size(self): - psize = glfw.get_framebuffer_size(self.window) - return int(psize[0]), int(psize[1]) - - def get_context(self): - if self._present_context is None: - backend_module = sys.modules["wgpu"].gpu.__module__ - PC = sys.modules[backend_module].GPUCanvasContext # noqa N806 - self._present_context = PC(self) - return self._present_context - - canvas = CustomCanvas() - - # Also pass canvas here, to touch that code somewhere - adapter = wgpu.gpu.request_adapter( - canvas=canvas, power_preference="high-performance" - ) - device = adapter.request_device() - draw_frame = _get_draw_function(device, canvas) - - for i in range(5): - time.sleep(0.01) - glfw.poll_events() - draw_frame() - canvas.get_context().present() # WgpuCanvasBase normally automates this - - glfw.hide_window(canvas.window) - - -def _get_draw_function(device, canvas): - # Bindings and layout - pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) - - shader = device.create_shader_module(code=shader_source) - - present_context = canvas.get_context() - render_texture_format = present_context.get_preferred_format(device.adapter) - present_context.configure(device=device, format=render_texture_format) - - render_pipeline = device.create_render_pipeline( - label="my-debug-pipeline", - layout=pipeline_layout, - vertex={ - "module": shader, - "entry_point": "vs_main", - "buffers": [], - }, - primitive={ - "topology": wgpu.PrimitiveTopology.triangle_strip, - "strip_index_format": wgpu.IndexFormat.uint32, - "front_face": wgpu.FrontFace.ccw, - "cull_mode": wgpu.CullMode.none, - }, - depth_stencil=None, - multisample={ - "count": 1, - "mask": 0xFFFFFFFF, - "alpha_to_coverage_enabled": False, - }, - fragment={ - "module": shader, - "entry_point": "fs_main", - "targets": [ - { - "format": render_texture_format, - "blend": { - "color": ( - wgpu.BlendFactor.one, - wgpu.BlendFactor.zero, - wgpu.BlendOperation.add, - ), - "alpha": ( - wgpu.BlendFactor.one, - wgpu.BlendFactor.zero, - wgpu.BlendOperation.add, - ), - }, - }, - ], - }, - ) - - def draw_frame(): - current_texture_view = present_context.get_current_texture().create_view() - command_encoder = device.create_command_encoder() - assert current_texture_view.size - ca = { - "view": current_texture_view, - "resolve_target": None, - "clear_value": (0, 0, 0, 0), - "load_op": wgpu.LoadOp.clear, - "store_op": wgpu.StoreOp.store, - } - render_pass = command_encoder.begin_render_pass( - color_attachments=[ca], - ) - - render_pass.set_pipeline(render_pipeline) - render_pass.draw(4, 1, 0, 0) - render_pass.end() - device.queue.submit([command_encoder.finish()]) - - return draw_frame - - -if __name__ == "__main__": - setup_module() - run_tests(globals()) - teardown_module() diff --git a/tests/test_util_compute.py b/tests/test_util_compute.py deleted file mode 100644 index 8bff1c4..0000000 --- a/tests/test_util_compute.py +++ /dev/null @@ -1,536 +0,0 @@ -import random -import ctypes -import base64 -from ctypes import c_int32, c_ubyte -import sys - -import wgpu -from wgpu.utils.compute import compute_with_buffers -from pytest import skip, mark, raises -from testutils import run_tests, can_use_wgpu_lib, is_ci, iters_equal - - -if not can_use_wgpu_lib: - skip("Skipping tests that need the wgpu lib", allow_module_level=True) - - -simple_compute_shader = """ - @group(0) - @binding(0) - var data2: array; - - @compute - @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i: u32 = index.x; - data2[i] = i32(i); - } -""" - -# To generate compute_shader_spirv from a Python function -# -# from pyshader import python2shader, Array, i32, ivec3 -# -# def simple_compute_shader_py( -# index: ("input", "GlobalInvocationId", ivec3), -# out: ("buffer", 0, Array(i32)), -# ): -# out[index.x] = index.x -# -# print(base64.encodebytes(python2shader(simple_compute_shader_py).to_spirv()).decode()) - -simple_compute_shader_spirv = base64.decodebytes( - """ -AwIjBwADAQAAAAAAFgAAAAAAAAARAAIAAQAAAA4AAwAAAAAAAAAAAA8ABgAFAAAAAQAAAG1haW4A -AAAACAAAABAABgABAAAAEQAAAAEAAAABAAAAAQAAAAUABAABAAAAbWFpbgAAAAAFAAQACAAAAGlu -ZGV4AAAABQADAAwAAABvdXQABQADAA0AAAAwAAAARwAEAAgAAAALAAAAHAAAAEcABAAJAAAABgAA -AAQAAABIAAUACgAAAAAAAAAjAAAAAAAAAEcAAwAKAAAAAwAAAEcABAAMAAAAIgAAAAAAAABHAAQA -DAAAACEAAAAAAAAAEwACAAIAAAAhAAMAAwAAAAIAAAAVAAQABQAAACAAAAABAAAAFwAEAAYAAAAF -AAAAAwAAACAABAAHAAAAAQAAAAYAAAA7AAQABwAAAAgAAAABAAAAHQADAAkAAAAFAAAAHgADAAoA -AAAJAAAAIAAEAAsAAAACAAAACgAAADsABAALAAAADAAAAAIAAAArAAQABQAAAA0AAAAAAAAAIAAE -AA4AAAACAAAABQAAACAABAAQAAAAAQAAAAUAAAAgAAQAEwAAAAEAAAAFAAAANgAFAAIAAAABAAAA -AAAAAAMAAAD4AAIABAAAAEEABQAQAAAAEQAAAAgAAAANAAAAPQAEAAUAAAASAAAAEQAAAEEABgAO -AAAADwAAAAwAAAANAAAAEgAAAEEABQATAAAAFAAAAAgAAAANAAAAPQAEAAUAAAAVAAAAFAAAAD4A -AwAPAAAAFQAAAP0AAQA4AAEA -""".encode() -) - - -def test_compute_0_1_ctype(): - compute_shader = simple_compute_shader - assert isinstance(compute_shader, str) - - # Create some ints! - out = compute_with_buffers({}, {0: c_int32 * 100}, compute_shader) - assert isinstance(out, dict) and len(out) == 1 - assert isinstance(out[0], ctypes.Array) - assert iters_equal(out[0], range(100)) - - # Same, but specify in bytes - out = compute_with_buffers({}, {0: c_ubyte * 80}, compute_shader, n=20) - assert isinstance(out, dict) and len(out) == 1 - assert isinstance(out[0], ctypes.Array) - out0 = (c_int32 * 20).from_buffer(out[0]) # cast (a view in np) - assert iters_equal(out0, range(20)) - - -def test_compute_0_1_tuple(): - compute_shader = simple_compute_shader - - out = compute_with_buffers({}, {0: (100, "i")}, compute_shader) - assert isinstance(out, dict) and len(out) == 1 - assert isinstance(out[0], memoryview) - assert out[0].tolist() == list(range(100)) - - -def test_compute_0_1_str(): - compute_shader = simple_compute_shader - - out = compute_with_buffers({}, {0: "100xi"}, compute_shader) - assert isinstance(out, dict) and len(out) == 1 - assert isinstance(out[0], memoryview) - assert out[0].tolist() == list(range(100)) - - -def test_compute_0_1_int(): - compute_shader = simple_compute_shader - - out = compute_with_buffers({}, {0: 400}, compute_shader, n=100) - assert isinstance(out, dict) and len(out) == 1 - assert isinstance(out[0], memoryview) - assert out[0].cast("i").tolist() == list(range(100)) - - -@mark.skipif( - is_ci and sys.platform == "win32", reason="Cannot use SpirV shader on dx12" -) -def test_compute_0_1_spirv(): - compute_shader = simple_compute_shader_spirv - assert isinstance(compute_shader, bytes) - - out = compute_with_buffers({}, {0: c_int32 * 100}, compute_shader) - assert isinstance(out, dict) and len(out) == 1 - assert isinstance(out[0], ctypes.Array) - assert iters_equal(out[0], range(100)) - - -def test_compute_1_3(): - compute_shader = """ - - @group(0) - @binding(0) - var data0: array; - - @group(0) - @binding(1) - var data1: array; - - @group(0) - @binding(2) - var data2: array; - - @compute - @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i = i32(index.x); - data1[i] = data0[i]; - data2[i] = i; - } - """ - - # Create an array of 100 random int32 - in1 = [int(random.uniform(0, 100)) for i in range(100)] - in1 = (c_int32 * 100)(*in1) - - outspecs = {1: 100 * c_int32, 2: 100 * c_int32} - out = compute_with_buffers({0: in1}, outspecs, compute_shader) - assert isinstance(out, dict) and len(out) == 2 - assert isinstance(out[1], ctypes.Array) - assert isinstance(out[2], ctypes.Array) - assert iters_equal(out[1], in1) # because the shader copied the data - assert iters_equal(out[2], range(100)) # because this is the index - - -def test_compute_in_is_out(): - compute_shader = """ - - @group(0) - @binding(0) - var data0: array; - - @compute - @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i = i32(index.x); - data0[i] = data0[i] * 2; - } - """ - - # Create an array of 100 random int32 - in1 = [int(random.uniform(0, 100)) for i in range(100)] - expected_out = [i * 2 for i in in1] - buf = (c_int32 * 100)(*in1) - - out = compute_with_buffers({0: buf}, {0: 100 * c_int32}, compute_shader) - assert isinstance(out, dict) and len(out) == 1 - assert isinstance(out[0], ctypes.Array) - assert out[0] is not buf # a copy was made - assert iters_equal(out[0], expected_out) - - -def test_compute_indirect(): - compute_shader = """ - @group(0) - @binding(0) - var data1: array; - - @group(0) - @binding(1) - var data2: array; - - @compute - @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i = i32(index.x); - data2[i] = data1[i] + 1; - } - """ - - # Create an array of 100 random int32 - n = 100 - in1 = [int(random.uniform(0, 100)) for i in range(n)] - in1 = (c_int32 * n)(*in1) - - # Create device and shader object - device = wgpu.utils.get_default_device() - cshader = device.create_shader_module(code=compute_shader) - - # Create input buffer and upload data to in - buffer1 = device.create_buffer_with_data(data=in1, usage=wgpu.BufferUsage.STORAGE) - - # Create output buffer - buffer2 = device.create_buffer( - size=ctypes.sizeof(in1), - usage=wgpu.BufferUsage.STORAGE | wgpu.BufferUsage.COPY_SRC, - ) - - # Create buffer to hold the dispatch parameters for the indirect call - params = (ctypes.c_int32 * 3)(n - 2, 1, 1) # note the minus 2! - buffer3 = device.create_buffer_with_data( - data=params, - usage=wgpu.BufferUsage.INDIRECT, - ) - - # Setup layout and bindings - binding_layouts = [ - { - "binding": 0, - "visibility": wgpu.ShaderStage.COMPUTE, - "buffer": { - "type": wgpu.BufferBindingType.read_only_storage, - }, - }, - { - "binding": 1, - "visibility": wgpu.ShaderStage.COMPUTE, - "buffer": { - "type": wgpu.BufferBindingType.storage, - }, - }, - ] - bindings = [ - { - "binding": 0, - "resource": {"buffer": buffer1, "offset": 0, "size": buffer1.size}, - }, - { - "binding": 1, - "resource": {"buffer": buffer2, "offset": 0, "size": buffer2.size}, - }, - ] - - # Put everything together - bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) - pipeline_layout = device.create_pipeline_layout( - bind_group_layouts=[bind_group_layout] - ) - bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) - - # Create and run the pipeline, fail - test check_struct - with raises(ValueError): - compute_pipeline = device.create_compute_pipeline( - layout=pipeline_layout, - compute={"module": cshader, "entry_point": "main", "foo": 42}, - ) - - # Create and run the pipeline - compute_pipeline = device.create_compute_pipeline( - layout=pipeline_layout, - compute={"module": cshader, "entry_point": "main"}, - ) - command_encoder = device.create_command_encoder() - compute_pass = command_encoder.begin_compute_pass() - compute_pass.set_pipeline(compute_pipeline) - compute_pass.set_bind_group(0, bind_group, [], 0, 999999) # last 2 args not used - compute_pass.dispatch_workgroups_indirect(buffer3, 0) - compute_pass.end() - device.queue.submit([command_encoder.finish()]) - - # Read result - out1 = in1.__class__.from_buffer(device.queue.read_buffer(buffer2)) - in2 = list(in1)[:] - out2 = [i - 1 for i in out1] - # The shader was applied to all but the last two elements - assert in2[:-2] == out2[:-2] - assert out2[-2:] == [-1, -1] - - -def test_compute_default_layout1(): - compute_shader = """ - @group(0) - @binding(0) - var data1: array; - - @group(0) - @binding(1) - var data2: array; - - @compute - @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i = i32(index.x); - data2[i] = data1[i] + 1; - } - """ - - # Create an array of 100 random int32 - n = 100 - in1 = [int(random.uniform(0, 100)) for i in range(n)] - in1 = (c_int32 * n)(*in1) - - # Create device and shader object - device = wgpu.utils.get_default_device() - cshader = device.create_shader_module(code=compute_shader) - - # Create input buffer and upload data to in - buffer1 = device.create_buffer_with_data(data=in1, usage=wgpu.BufferUsage.STORAGE) - - # Create output buffer - buffer2 = device.create_buffer( - size=ctypes.sizeof(in1), - usage=wgpu.BufferUsage.STORAGE | wgpu.BufferUsage.COPY_SRC, - ) - - # Create buffer to hold the dispatch parameters for the indirect call - params = (ctypes.c_int32 * 3)(n - 2, 1, 1) # note the minus 2! - buffer3 = device.create_buffer_with_data( - data=params, - usage=wgpu.BufferUsage.INDIRECT, - ) - - # Setup bindings info - bindings = [ - { - "binding": 0, - "resource": {"buffer": buffer1, "offset": 0, "size": buffer1.size}, - }, - { - "binding": 1, - "resource": {"buffer": buffer2, "offset": 0, "size": buffer2.size}, - }, - ] - - # Create a pipeline using "auto" layout mode - compute_pipeline = device.create_compute_pipeline( - layout=wgpu.enums.AutoLayoutMode.auto, - compute={"module": cshader, "entry_point": "main"}, - ) - bind_group_layout = compute_pipeline.get_bind_group_layout(0) - bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) - - # Run the pipeline - command_encoder = device.create_command_encoder() - compute_pass = command_encoder.begin_compute_pass() - compute_pass.set_pipeline(compute_pipeline) - compute_pass.set_bind_group(0, bind_group, [], 0, 999999) # last 2 args not used - compute_pass.dispatch_workgroups_indirect(buffer3, 0) - compute_pass.end() - device.queue.submit([command_encoder.finish()]) - - # Read result - out1 = in1.__class__.from_buffer(device.queue.read_buffer(buffer2)) - in2 = list(in1)[:] - out2 = [i - 1 for i in out1] - # The shader was applied to all but the last two elements - assert in2[:-2] == out2[:-2] - assert out2[-2:] == [-1, -1] - - -def test_compute_default_layout2(): - # Default layout with multiple bind groups - - compute_shader = """ - @group(0) - @binding(0) - var data1: array; - - @group(1) - @binding(0) - var data2: array; - - @compute - @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i = i32(index.x); - data2[i] = data1[i] + 1; - } - """ - - # Create an array of 100 random int32 - n = 100 - in1 = [int(random.uniform(0, 100)) for i in range(n)] - in1 = (c_int32 * n)(*in1) - - # Create device and shader object - device = wgpu.utils.get_default_device() - cshader = device.create_shader_module(code=compute_shader) - - # Create input buffer and upload data to in - buffer1 = device.create_buffer_with_data(data=in1, usage=wgpu.BufferUsage.STORAGE) - - # Create output buffer - buffer2 = device.create_buffer( - size=ctypes.sizeof(in1), - usage=wgpu.BufferUsage.STORAGE | wgpu.BufferUsage.COPY_SRC, - ) - - # Create buffer to hold the dispatch parameters for the indirect call - params = (ctypes.c_int32 * 3)(n - 2, 1, 1) # note the minus 2! - buffer3 = device.create_buffer_with_data( - data=params, - usage=wgpu.BufferUsage.INDIRECT, - ) - - # Setup bindings info - bindings0 = [ - { - "binding": 0, - "resource": {"buffer": buffer1, "offset": 0, "size": buffer1.size}, - }, - ] - bindings1 = [ - { - "binding": 0, - "resource": {"buffer": buffer2, "offset": 0, "size": buffer2.size}, - }, - ] - - # Create a pipeline using "auto" layout mode - compute_pipeline = device.create_compute_pipeline( - layout=wgpu.enums.AutoLayoutMode.auto, - compute={"module": cshader, "entry_point": "main"}, - ) - bind_group_layout0 = compute_pipeline.get_bind_group_layout(0) - bind_group0 = device.create_bind_group(layout=bind_group_layout0, entries=bindings0) - - bind_group_layout1 = compute_pipeline.get_bind_group_layout(1) - bind_group1 = device.create_bind_group(layout=bind_group_layout1, entries=bindings1) - - # Run the pipeline - command_encoder = device.create_command_encoder() - compute_pass = command_encoder.begin_compute_pass() - compute_pass.set_pipeline(compute_pipeline) - compute_pass.set_bind_group(0, bind_group0, [], 0, 999999) - compute_pass.set_bind_group(1, bind_group1, [], 0, 999999) - compute_pass.dispatch_workgroups_indirect(buffer3, 0) - compute_pass.end() - device.queue.submit([command_encoder.finish()]) - - # Read result - out1 = in1.__class__.from_buffer(device.queue.read_buffer(buffer2)) - in2 = list(in1)[:] - out2 = [i - 1 for i in out1] - # The shader was applied to all but the last two elements - assert in2[:-2] == out2[:-2] - assert out2[-2:] == [-1, -1] - - -def test_compute_fails(): - compute_shader = """ - @group(0) - @binding(0) - var data1: array; - - @group(0) - @binding(1) - var data2: array; - - @compute - @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i = i32(index.x); - data2[i] = data1[i]; - } - """ - - in1 = [int(random.uniform(0, 100)) for i in range(100)] - in1 = (c_int32 * 100)(*in1) - - # Baseline; this works - out = compute_with_buffers( - {0: in1}, {1: c_int32 * 100}, compute_shader, n=(100, 1, 1) - ) - assert iters_equal(out[1], in1) - - with raises(TypeError): # input_arrays is not a dict - compute_with_buffers([in1], {1: c_int32 * 100}, compute_shader) - with raises(TypeError): # input_arrays key not int - compute_with_buffers({"0": in1}, {1: c_int32 * 100}, compute_shader) - with raises(TypeError): # input_arrays value not ctypes array - compute_with_buffers({0: list(in1)}, {1: c_int32 * 100}, compute_shader) - - with raises(TypeError): # output_arrays is not a dict - compute_with_buffers({0: in1}, [c_int32 * 100], compute_shader) - with raises(TypeError): # output_arrays key not int - compute_with_buffers({0: in1}, {"1": c_int32 * 100}, compute_shader) - with raises(TypeError): # output_arrays value not a ctypes Array type - compute_with_buffers({0: in1}, {1: "foobar"}, compute_shader) - - with raises(ValueError): # output_arrays format invalid - compute_with_buffers({0: in1}, {1: "10xfoo"}, compute_shader) - with raises(ValueError): # output_arrays shape invalid - compute_with_buffers({0: in1}, {1: ("i",)}, compute_shader) - with raises(ValueError): # output_arrays shape invalid - compute_with_buffers( - {0: in1}, - { - 1: ( - 0, - "i", - ) - }, - compute_shader, - ) - with raises(ValueError): # output_arrays shape invalid - compute_with_buffers( - {0: in1}, - { - 1: ( - -1, - "i", - ) - }, - compute_shader, - ) - - with raises(TypeError): # invalid n - compute_with_buffers({0: in1}, {1: c_int32 * 100}, compute_shader, n="100") - with raises(ValueError): # invalid n - compute_with_buffers({0: in1}, {1: c_int32 * 100}, compute_shader, n=-1) - - with raises(TypeError): # invalid shader - compute_with_buffers({0: in1}, {1: c_int32 * 100}, {"not", "a", "shader"}) - - -if __name__ == "__main__": - run_tests(globals()) diff --git a/tests/test_util_core.py b/tests/test_util_core.py deleted file mode 100644 index 1ff3fb6..0000000 --- a/tests/test_util_core.py +++ /dev/null @@ -1,47 +0,0 @@ -import wgpu -from wgpu._coreutils import error_message_hash, str_flag_to_int, _flag_cache -from testutils import run_tests - - -def test_error_message_hash(): - text1 = """In wgpuRenderPassEncoderEnd - In a pass parameter - note: command buffer = `` - The color attachment at index 0's texture view is not renderable: - """ - - text2 = """In wgpuRenderPassEncoderEnd - In a pass parameter - note: command buffer = `` - The color attachment at index 0's texture view is not renderable: - """ - - text3 = """In wgpuRenderPassEncoderEnd - In a pass parameter BLABLA - note: command buffer = `` - The color attachment at index 0's texture view is not renderable: - """ - - assert error_message_hash(text1) == error_message_hash(text2) - assert error_message_hash(text1) != error_message_hash(text3) - - -def test_str_flag_to_int(): - versions = [ - "UNIFORM|VERTEX", - "UNIFORM | VERTEX", - "VERTEX | UNIFORM", - "VERTEX| UNIFORM", - ] - - flags = [str_flag_to_int(wgpu.BufferUsage, v) for v in versions] - - for flag in flags: - assert flag == flags[0] - - for v in versions: - assert f"BufferUsage.{v}" in _flag_cache - - -if __name__ == "__main__": - run_tests(globals()) diff --git a/tests/test_wgpu_native_basics.py b/tests/test_wgpu_native_basics.py deleted file mode 100644 index d546211..0000000 --- a/tests/test_wgpu_native_basics.py +++ /dev/null @@ -1,222 +0,0 @@ -import os -import base64 -import shutil -import ctypes -import sys -import tempfile - -import wgpu.utils -import wgpu.backends.wgpu_native -import numpy as np - -from testutils import run_tests, can_use_wgpu_lib, is_ci -from pytest import mark, raises - - -is_win = sys.platform.startswith("win") - - -def test_get_wgpu_version(): - version = wgpu.backends.wgpu_native.__version__ - commit_sha = wgpu.backends.wgpu_native.__commit_sha__ - version_info = wgpu.backends.wgpu_native.version_info - - assert isinstance(version, str) - assert len(version) > 1 - - assert isinstance(version_info, tuple) - assert all(isinstance(i, int) for i in version_info) - assert len(version_info) == 4 - - assert isinstance(commit_sha, str) - assert len(commit_sha) > 0 - - -def test_override_wgpu_lib_path(): - # Current version - try: - old_path = wgpu.backends.wgpu_native.lib_path - except RuntimeError: - old_path = None - - # Change it - old_env_var = os.environ.get("WGPU_LIB_PATH", None) - os.environ["WGPU_LIB_PATH"] = "foo/bar" - - # Check - assert wgpu.backends.wgpu_native._ffi.get_wgpu_lib_path() == "foo/bar" - - # Change it back - if old_env_var is None: - os.environ.pop("WGPU_LIB_PATH") - else: - os.environ["WGPU_LIB_PATH"] = old_env_var - - # Still the same as before? - try: - path = wgpu.backends.wgpu_native._ffi.get_wgpu_lib_path() - except RuntimeError: - path = None - assert path == old_path - - -def test_tuple_from_tuple_or_dict(): - func = wgpu.backends.wgpu_native._api._tuple_from_tuple_or_dict - - assert func([1, 2, 3], ("x", "y", "z")) == (1, 2, 3) - assert func({"y": 2, "z": 3, "x": 1}, ("x", "y", "z")) == (1, 2, 3) - assert func((10, 20), ("width", "height")) == (10, 20) - assert func({"width": 10, "height": 20}, ("width", "height")) == (10, 20) - - with raises(TypeError): - func("not tuple/dict", ("x", "y")) - with raises(ValueError): - func([1], ("x", "y")) - with raises(ValueError): - func([1, 2, 3], ("x", "y")) - with raises(ValueError): - assert func({"x": 1}, ("x", "y")) - - -compute_shader_wgsl = """ -@group(0) -@binding(0) -var out1: array; - -@compute -@workgroup_size(1) -fn main(@builtin(global_invocation_id) index: vec3) { - let i: u32 = index.x; - out1[i] = i32(i); -} -""" - -compute_shader_spirv = base64.decodebytes( - """ -AwIjBwADAQAAAAAAFgAAAAAAAAARAAIAAQAAAA4AAwAAAAAAAAAAAA8ABgAFAAAAAQAAAG1haW4A -AAAACAAAABAABgABAAAAEQAAAAEAAAABAAAAAQAAAAUABAABAAAAbWFpbgAAAAAFAAQACAAAAGlu -ZGV4AAAABQADAAwAAABvdXQABQADAA0AAAAwAAAARwAEAAgAAAALAAAAHAAAAEcABAAJAAAABgAA -AAQAAABIAAUACgAAAAAAAAAjAAAAAAAAAEcAAwAKAAAAAwAAAEcABAAMAAAAIgAAAAAAAABHAAQA -DAAAACEAAAAAAAAAEwACAAIAAAAhAAMAAwAAAAIAAAAVAAQABQAAACAAAAABAAAAFwAEAAYAAAAF -AAAAAwAAACAABAAHAAAAAQAAAAYAAAA7AAQABwAAAAgAAAABAAAAHQADAAkAAAAFAAAAHgADAAoA -AAAJAAAAIAAEAAsAAAACAAAACgAAADsABAALAAAADAAAAAIAAAArAAQABQAAAA0AAAAAAAAAIAAE -AA4AAAACAAAABQAAACAABAAQAAAAAQAAAAUAAAAgAAQAEwAAAAEAAAAFAAAANgAFAAIAAAABAAAA -AAAAAAMAAAD4AAIABAAAAEEABQAQAAAAEQAAAAgAAAANAAAAPQAEAAUAAAASAAAAEQAAAEEABgAO -AAAADwAAAAwAAAANAAAAEgAAAEEABQATAAAAFAAAAAgAAAANAAAAPQAEAAUAAAAVAAAAFAAAAD4A -AwAPAAAAFQAAAP0AAQA4AAEA -""".encode() -) - - -@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") -def test_logging(): - # Do *something* while we set the log level low - device = wgpu.utils.get_default_device() - - wgpu.logger.setLevel("DEBUG") - - device.create_shader_module(code=compute_shader_wgsl) - - wgpu.logger.setLevel("WARNING") - - # yeah, would be nice to be able to capture the logs. But if we don't crash - # and see from the coverage that we touched the logger integration code, - # we're doing pretty good ... - # (capsys does not work because it logs to the raw stderr) - - -@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") -def test_wgpu_native_tracer(): - tempdir = os.path.join(tempfile.gettempdir(), "wgpu-tracer-test") - adapter = wgpu.utils.get_default_device().adapter - - # Make empty - shutil.rmtree(tempdir, ignore_errors=True) - assert not os.path.isdir(tempdir) - - # Works! - wgpu.backends.wgpu_native.request_device_tracing(adapter, tempdir) - assert os.path.isdir(tempdir) - - # Make dir not empty - with open(os.path.join(tempdir, "stub.txt"), "wb"): - pass - - # Still works, but produces warning - wgpu.backends.wgpu_native.request_device_tracing(adapter, tempdir) - - -@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") -def test_wgpu_native_enumerate_adapters(): - # Get all available adapters - adapters = wgpu.backends.wgpu_native.enumerate_adapters() - assert len(adapters) > 0 - - # Check that we can get a device from each adapter - for adapter in adapters: - d = adapter.request_device() - assert isinstance(d, wgpu.backends.wgpu_native.GPUDevice) - - -@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") -@mark.skipif(is_ci and is_win, reason="Cannot use SpirV shader on dx12") -def test_shader_module_creation_spirv(): - device = wgpu.utils.get_default_device() - - code1 = compute_shader_spirv - assert isinstance(code1, bytes) - code4 = type("CodeObject", (object,), {}) - - m1 = device.create_shader_module(code=code1) - assert m1.get_compilation_info() == [] - - with raises(TypeError): - device.create_shader_module(code=code4) - with raises(TypeError): - device.create_shader_module(code={"not", "a", "shader"}) - with raises(ValueError): - device.create_shader_module(code=b"bytes but no SpirV magic number") - - -@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") -def test_adapter_destroy(): - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") - assert adapter._internal is not None - adapter.__del__() - assert adapter._internal is None - - -def test_get_memoryview_and_address(): - get_memoryview_and_address = ( - wgpu.backends.wgpu_native._helpers.get_memoryview_and_address - ) - - data = b"bytes are readonly, but we can map it. Don't abuse this :)" - m, address = get_memoryview_and_address(data) - assert m.nbytes == len(data) - assert address > 0 - - data = bytearray(b"A bytearray works too") - m, address = get_memoryview_and_address(data) - assert m.nbytes == len(data) - assert address > 0 - - data = (ctypes.c_float * 100)() - m, address = get_memoryview_and_address(data) - assert m.nbytes == ctypes.sizeof(data) - assert address > 0 - - data = np.array([1, 2, 3, 4]) - m, address = get_memoryview_and_address(data) - assert m.nbytes == data.nbytes - assert address > 0 - - data = np.array([1, 2, 3, 4]) - data.flags.writeable = False - m, address = get_memoryview_and_address(data) - assert m.nbytes == data.nbytes - assert address > 0 - - -if __name__ == "__main__": - run_tests(globals()) diff --git a/tests/test_wgpu_native_buffer.py b/tests/test_wgpu_native_buffer.py deleted file mode 100644 index a4bd541..0000000 --- a/tests/test_wgpu_native_buffer.py +++ /dev/null @@ -1,530 +0,0 @@ -import random -import ctypes -import sys - -import wgpu.utils -import numpy as np - -from testutils import run_tests, can_use_wgpu_lib, iters_equal -from pytest import mark, raises - - -@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") -def test_buffer_init1(): - # Initializing a buffer with data - - device = wgpu.utils.get_default_device() - data1 = b"abcdefghijkl" - - assert repr(device).startswith("= (3, 8): # no memoryview.toreadonly on 3.7 and below - with raises(TypeError): - data2[0] = 1 - with raises(TypeError): - data3[0] = 1 - with raises(TypeError): - data4[0] = 1 - - buf.unmap() - - # The memoryview is invalidated when the buffer unmapped. - # Note that this unfortunately does *not* hold for views on these arrays. - with raises(ValueError): - data2[0] - with raises(ValueError): - data3[0] - with raises(ValueError): - data4[0] - - with raises(ValueError): - data2[0] = 1 - with raises(ValueError): - data3[0] = 1 - with raises(ValueError): - data4[0] = 1 - - -@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") -def test_clear_buffer(): - data0 = b"111111112222222233333333" - data1 = b"111111110000000000003333" - data2 = b"111100000000000000000000" - data3 = b"000000000000000000000000" - - # Prep - device = wgpu.utils.get_default_device() - buf = device.create_buffer( - size=len(data1), usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.COPY_SRC - ) - device.queue.write_buffer(buf, 0, data0) - - # Download original data - res = device.queue.read_buffer(buf) - assert res == data0 - - # Clear part of the buffer - command_encoder = device.create_command_encoder() - command_encoder.clear_buffer(buf, 8, 12) - device.queue.submit([command_encoder.finish()]) - - res = bytes(device.queue.read_buffer(buf)).replace(b"\x00", b"0") - assert res == data1 - - # Clear the all from index 4 - command_encoder = device.create_command_encoder() - command_encoder.clear_buffer(buf, 4, None) - device.queue.submit([command_encoder.finish()]) - - res = bytes(device.queue.read_buffer(buf)).replace(b"\x00", b"0") - assert res == data2 - - # Clear the whole buffer - command_encoder = device.create_command_encoder() - command_encoder.clear_buffer(buf, 0) - device.queue.submit([command_encoder.finish()]) - - res = bytes(device.queue.read_buffer(buf)).replace(b"\x00", b"0") - assert res == data3 - - -@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") -def test_write_buffer1(): - device = wgpu.utils.get_default_device() - - data1 = memoryview(np.random.random(size=100).astype(np.float32)) - - # Create buffer - buf4 = device.create_buffer( - size=data1.nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.COPY_SRC - ) - - # Upload from CPU to buffer - device.create_command_encoder() # we seem to need to create one - device.queue.write_buffer(buf4, 0, data1) - device.queue.submit([]) - - # Download from buffer to CPU - data2 = device.queue.read_buffer(buf4).cast("f") - assert data1 == data2 - - # Yes, you can compare memoryviews! Check this: - data1[0] += 1 - assert data1 != data2 - - -@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") -def test_write_buffer2(): - device = wgpu.utils.get_default_device() - - nx, ny, nz = 100, 1, 1 - data0 = (ctypes.c_float * 100)(*[random.random() for i in range(nx * ny * nz)]) - data1 = (ctypes.c_float * 100)() - nbytes = ctypes.sizeof(data1) - - # Create buffer - buf4 = device.create_buffer( - size=nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.COPY_SRC - ) - - for i in range(len(data1)): - data1[i] = data0[i] - - # Upload from CPU to buffer - device.create_command_encoder() # we seem to need to create one - device.queue.write_buffer(buf4, 0, data1) - - # We swipe the data. You could also think that we passed something into - # write_buffer without holding a reference to it. Anyway, write_buffer - # seems to copy the data at the moment it is called. - for i in range(len(data1)): - data1[i] = 1 - - device.queue.submit([]) - - # Download from buffer to CPU - data2 = data1.__class__.from_buffer(device.queue.read_buffer(buf4)) - assert iters_equal(data0, data2) - - -@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") -def test_write_buffer3(): - device = wgpu.utils.get_default_device() - nbytes = 12 - - # Create buffer - buf4 = device.create_buffer( - size=nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.COPY_SRC - ) - - # Upload from CPU to buffer, using bytes - device.create_command_encoder() # we seem to need to create one - device.queue.write_buffer(buf4, 0, b"abcdefghijkl", 0, nbytes) - device.queue.submit([]) - - # Download from buffer to CPU - assert device.queue.read_buffer(buf4).tobytes() == b"abcdefghijkl" - - -@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") -def test_buffer_map_read_and_write(): - # Do a mini round-trip using mapped buffers - - device = wgpu.utils.get_default_device() - nbytes = 12 - - # Create buffers - buf1 = device.create_buffer( - size=nbytes, usage=wgpu.BufferUsage.COPY_SRC | wgpu.BufferUsage.MAP_WRITE - ) - buf2 = device.create_buffer( - size=nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.MAP_READ - ) - - # Upload - data1 = b"abcdefghijkl" - buf1.map("write") - buf1.write_mapped(data1) - buf1.unmap() - - # Copy - command_encoder = device.create_command_encoder() - command_encoder.copy_buffer_to_buffer(buf1, 0, buf2, 0, nbytes) - device.queue.submit([command_encoder.finish()]) - - # Download - buf2.map("read") - data2 = buf2.read_mapped() - buf2.unmap() - assert data1 == data2 - - -if __name__ == "__main__": - run_tests(globals()) diff --git a/tests/test_wgpu_native_compute_tex.py b/tests/test_wgpu_native_compute_tex.py deleted file mode 100644 index a1cf291..0000000 --- a/tests/test_wgpu_native_compute_tex.py +++ /dev/null @@ -1,592 +0,0 @@ -import random -import ctypes -import sys - -import numpy as np - -import wgpu -from pytest import skip -from testutils import run_tests, get_default_device -from testutils import can_use_wgpu_lib, is_ci - - -if not can_use_wgpu_lib: - skip("Skipping tests that need the wgpu lib", allow_module_level=True) -elif is_ci and sys.platform == "win32": - skip("These tests fail on dx12 for some reason", allow_module_level=True) - -# %% 1D - - -def test_compute_tex_1d_rgba8uint(): - compute_shader = """ - @group(0) @binding(0) - var r_tex1: texture_1d; - - @group(0) @binding(1) - var r_tex2: texture_storage_1d; - - @compute @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i: i32 = i32(index.x); - let color1 = vec4(textureLoad(r_tex1, i, 0)); - let color2 = vec4(color1.x + i, color1.y + 1, color1.z * 2, color1.a); - textureStore(r_tex2, i, vec4(color2)); - } - """ - - # Generate data - nx, ny, nz, nc = 64, 1, 1, 4 - data1 = (ctypes.c_uint8 * nc * nx)() - for x in range(nx): - for c in range(nc): - data1[x][c] = random.randint(0, 20) - - # Compute and validate - _compute_texture( - compute_shader, - wgpu.TextureFormat.rgba8uint, - wgpu.TextureDimension.d1, - (nx, ny, nz, nc), - data1, - ) - - -def test_compute_tex_1d_rgba16sint(): - compute_shader = """ - @group(0) @binding(0) - var r_tex1: texture_1d; - - @group(0) @binding(1) - var r_tex2: texture_storage_1d; - - @compute @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i: i32 = i32(index.x); - let color1 : vec4 = textureLoad(r_tex1, i, 0); - let color2 = vec4(color1.x + i, color1.y + 1, color1.z * 2, color1.a); - textureStore(r_tex2, i, color2); - } - """ - - # Generate data - nx, ny, nz, nc = 128, 1, 1, 4 - data1 = (ctypes.c_int16 * nc * nx)() - for x in range(nx): - for c in range(nc): - data1[x][c] = random.randint(0, 20) - - # Compute and validate - _compute_texture( - compute_shader, - wgpu.TextureFormat.rgba16sint, - wgpu.TextureDimension.d1, - (nx, ny, nz, nc), - data1, - ) - - -def test_compute_tex_1d_r32sint(): - compute_shader = """ - @group(0) @binding(0) - var r_tex1: texture_1d; - - @group(0) @binding(1) - var r_tex2: texture_storage_1d; - - @compute @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i: i32 = i32(index.x); - let color1 : vec4 = textureLoad(r_tex1, i, 0); - let color2 = vec4(color1.x + i, color1.y + 1, color1.z * 2, color1.a); - textureStore(r_tex2, i, color2); - } - """ - - # Generate data - nx, ny, nz, nc = 256, 1, 1, 1 - data1 = (ctypes.c_int32 * nc * nx)() - for x in range(nx): - for c in range(nc): - data1[x][c] = random.randint(0, 20) - - # Compute and validate - _compute_texture( - compute_shader, - wgpu.TextureFormat.r32sint, - wgpu.TextureDimension.d1, - (nx, ny, nz, nc), - data1, - ) - - -def test_compute_tex_1d_r32float(): - compute_shader = """ - @group(0) @binding(0) - var r_tex1: texture_1d; - - @group(0) @binding(1) - var r_tex2: texture_storage_1d; - - @compute @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i: i32 = i32(index.x); - let color1 : vec4 = textureLoad(r_tex1, i, 0); - let color2 = vec4(color1.x + f32(i), color1.y + 1.0, color1.z * 2.0, color1.a); - textureStore(r_tex2, i, color2); - } - """ - - # Generate data - nx, ny, nz, nc = 256, 1, 1, 1 - data1 = (ctypes.c_float * nc * nx)() - for x in range(nx): - for c in range(nc): - data1[x][c] = random.randint(0, 20) - - # Compute and validate - _compute_texture( - compute_shader, - wgpu.TextureFormat.r32float, - wgpu.TextureDimension.d1, - (nx, ny, nz, nc), - data1, - ) - - -# %% 2D - - -def test_compute_tex_2d_rgba8uint(): - compute_shader = """ - @group(0) @binding(0) - var r_tex1: texture_2d; - - @group(0) @binding(1) - var r_tex2: texture_storage_2d; - - @compute @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i = vec2(index.xy); - let color1 = vec4(textureLoad(r_tex1, i, 0)); - let color2 = vec4(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a); - textureStore(r_tex2, i, vec4(color2)); - } - """ - - # Generate data - nx, ny, nz, nc = 64, 8, 1, 4 - data1 = (ctypes.c_uint8 * nc * nx * ny)() - for y in range(ny): - for x in range(nx): - for c in range(nc): - data1[y][x][c] = random.randint(0, 20) - - # Compute and validate - _compute_texture( - compute_shader, - wgpu.TextureFormat.rgba8uint, - wgpu.TextureDimension.d2, - (nx, ny, nz, nc), - data1, - ) - - -def test_compute_tex_2d_rgba16sint(): - compute_shader = """ - @group(0) @binding(0) - var r_tex1: texture_2d; - - @group(0) @binding(1) - var r_tex2: texture_storage_2d; - - @compute @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i = vec2(index.xy); - let color1: vec4 = textureLoad(r_tex1, i, 0); - let color2 = vec4(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a); - textureStore(r_tex2, i, color2); - } - """ - - # Generate data - nx, ny, nz, nc = 128, 8, 1, 4 - data1 = (ctypes.c_int16 * nc * nx * ny)() - for y in range(ny): - for x in range(nx): - for c in range(nc): - data1[y][x][c] = random.randint(0, 20) - - # Compute and validate - _compute_texture( - compute_shader, - wgpu.TextureFormat.rgba16sint, - wgpu.TextureDimension.d2, - (nx, ny, nz, nc), - data1, - ) - - -def test_compute_tex_2d_r32sint(): - compute_shader = """ - @group(0) @binding(0) - var r_tex1: texture_2d; - - @group(0) @binding(1) - var r_tex2: texture_storage_2d; - - @compute @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i = vec2(index.xy); - let color1: vec4 = textureLoad(r_tex1, i, 0); - let color2 = vec4(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a); - textureStore(r_tex2, i, color2); - } - """ - - # Generate data - nx, ny, nz, nc = 256, 8, 1, 1 - data1 = (ctypes.c_int32 * nc * nx * ny)() - for y in range(ny): - for x in range(nx): - for c in range(nc): - data1[y][x][c] = random.randint(0, 20) - - # Compute and validate - _compute_texture( - compute_shader, - wgpu.TextureFormat.r32sint, - wgpu.TextureDimension.d2, - (nx, ny, nz, nc), - data1, - ) - - -def test_compute_tex_2d_r32float(): - compute_shader = """ - @group(0) @binding(0) - var r_tex1:texture_2d; - - @group(0) @binding(1) - var r_tex2: texture_storage_2d; - - @compute @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i = vec2(index.xy); - let color1: vec4 = textureLoad(r_tex1, i, 0); - let color2 = vec4(color1.x + f32(i.x), color1.y + 1.0, color1.z * 2.0, color1.a); - textureStore(r_tex2, i, color2); - } - """ - - # Generate data - nx, ny, nz, nc = 256, 8, 1, 1 - data1 = (ctypes.c_float * nc * nx * ny)() - for y in range(ny): - for x in range(nx): - for c in range(nc): - data1[y][x][c] = random.randint(0, 20) - - # Compute and validate - _compute_texture( - compute_shader, - wgpu.TextureFormat.r32float, - wgpu.TextureDimension.d2, - (nx, ny, nz, nc), - data1, - ) - - -# %% 3D - - -def test_compute_tex_3d_rgba8uint(): - compute_shader = """ - @group(0) @binding(0) - var r_tex1: texture_3d; - - @group(0) @binding(1) - var r_tex2: texture_storage_3d; - - @compute @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i = vec3(index); - let color1 = vec4(textureLoad(r_tex1, i, 0)); - let color2 = vec4(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a); - textureStore(r_tex2, i, vec4(color2)); - } - """ - - # Generate data - nx, ny, nz, nc = 64, 8, 6, 4 - data1 = (ctypes.c_uint8 * nc * nx * ny * nz)() - for z in range(nz): - for y in range(ny): - for x in range(nx): - for c in range(nc): - data1[z][y][x][c] = random.randint(0, 20) - - # Compute and validate - _compute_texture( - compute_shader, - wgpu.TextureFormat.rgba8uint, - wgpu.TextureDimension.d3, - (nx, ny, nz, nc), - data1, - ) - - -def test_compute_tex_3d_rgba16sint(): - compute_shader = """ - @group(0) @binding(0) - var r_tex1: texture_3d; - - @group(0) @binding(1) - var r_tex2: texture_storage_3d; - - @compute @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i = vec3(index); - let color1: vec4 = textureLoad(r_tex1, i, 0); - let color2 = vec4(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a); - textureStore(r_tex2, i, color2); - } - """ - - # Generate data - nx, ny, nz, nc = 128, 8, 6, 4 - data1 = (ctypes.c_int16 * nc * nx * ny * nz)() - for z in range(nz): - for y in range(ny): - for x in range(nx): - for c in range(nc): - data1[z][y][x][c] = random.randint(0, 20) - - # Compute and validate - _compute_texture( - compute_shader, - wgpu.TextureFormat.rgba16sint, - wgpu.TextureDimension.d3, - (nx, ny, nz, nc), - data1, - ) - - -def test_compute_tex_3d_r32sint(): - compute_shader = """ - @group(0) @binding(0) - var r_tex1: texture_3d; - - @group(0) @binding(1) - var r_tex2: texture_storage_3d; - - @compute @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i = vec3(index); - let color1: vec4 = textureLoad(r_tex1, i, 0); - let color2 = vec4(color1.x + i.x, color1.y + 1, color1.z * 2, color1.a); - textureStore(r_tex2, i, color2); - } - """ - - # Generate data - nx, ny, nz, nc = 256, 8, 6, 1 - data1 = (ctypes.c_int32 * nc * nx * ny * nz)() - for z in range(nz): - for y in range(ny): - for x in range(nx): - for c in range(nc): - data1[z][y][x][c] = random.randint(0, 20) - - # Compute and validate - _compute_texture( - compute_shader, - wgpu.TextureFormat.r32sint, - wgpu.TextureDimension.d3, - (nx, ny, nz, nc), - data1, - ) - - -def test_compute_tex_3d_r32float(): - compute_shader = """ - @group(0) @binding(0) - var r_tex1: texture_3d; - - @group(0) @binding(1) - var r_tex2: texture_storage_3d; - - @compute @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i = vec3(index); - let color1: vec4 = textureLoad(r_tex1, i, 0); - let color2 = vec4(color1.x + f32(i.x), color1.y + 1.0, color1.z * 2.0, color1.a); - textureStore(r_tex2, i, color2); - } - """ - - # Generate data - nx, ny, nz, nc = 64, 8, 6, 1 - data1 = (ctypes.c_float * nc * nx * ny * nz)() - for z in range(nz): - for y in range(ny): - for x in range(nx): - for c in range(nc): - data1[z][y][x][c] = random.randint(0, 20) - - # Compute and validate - _compute_texture( - compute_shader, - wgpu.TextureFormat.r32float, - wgpu.TextureDimension.d3, - (nx, ny, nz, nc), - data1, - ) - - -# %% - - -def _compute_texture(compute_shader, texture_format, texture_dim, texture_size, data1): - """ - Apply a computation on a texture and validate the result. The shader should: - * Add the x-coordinate to the red channel. - * Add 1 to the green channel. - * Multiply the blue channel by 2. - * The alpha channel must remain equal. - """ - - nx, ny, nz, nc = texture_size - nbytes = ctypes.sizeof(data1) - bpp = nbytes // (nx * ny * nz) # bytes per pixel - - device = get_default_device() - cshader = device.create_shader_module(code=compute_shader) - - # Create textures and views - texture1 = device.create_texture( - size=(nx, ny, nz), - dimension=texture_dim, - format=texture_format, - usage=wgpu.TextureUsage.TEXTURE_BINDING | wgpu.TextureUsage.COPY_DST, - ) - texture2 = device.create_texture( - size=(nx, ny, nz), - dimension=texture_dim, - format=texture_format, - usage=wgpu.TextureUsage.STORAGE_BINDING | wgpu.TextureUsage.COPY_SRC, - ) - texture_view1 = texture1.create_view() - texture_view2 = texture2.create_view() - - # Create buffer that we need to upload the data - buffer_usage = wgpu.BufferUsage.COPY_SRC | wgpu.BufferUsage.COPY_DST - buffer = device.create_buffer_with_data(data=data1, usage=buffer_usage) - assert buffer.usage == buffer_usage - - texture_sample_type = "unfilterable-float" - if "uint" in texture_format: - texture_sample_type = "uint" - elif "sint" in texture_format: - texture_sample_type = "sint" - - # Define bindings - # One can see here why we need 2 textures: one is readonly, one writeonly - bindings = [ - {"binding": 0, "resource": texture_view1}, - {"binding": 1, "resource": texture_view2}, - ] - binding_layouts = [ - { - "binding": 0, - "visibility": wgpu.ShaderStage.COMPUTE, - "texture": { - "sample_type": texture_sample_type, - "view_dimension": texture_dim, - }, - }, - { - "binding": 1, - "visibility": wgpu.ShaderStage.COMPUTE, - "storage_texture": { - "access": wgpu.StorageTextureAccess.write_only, - "format": texture_format, - "view_dimension": texture_dim, - }, - }, - ] - bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) - pipeline_layout = device.create_pipeline_layout( - bind_group_layouts=[bind_group_layout] - ) - bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) - - # Create a pipeline and run it - compute_pipeline = device.create_compute_pipeline( - layout=pipeline_layout, - compute={"module": cshader, "entry_point": "main"}, - ) - command_encoder = device.create_command_encoder() - - if False: # Upload via alt route (that does not have 256 alignment constraint) - device.queue.write_texture( - {"texture": texture1}, - data1, - {"bytes_per_row": bpp * nx, "rows_per_image": ny}, - (nx, ny, nz), - ) - else: - command_encoder.copy_buffer_to_texture( - { - "buffer": buffer, - "offset": 0, - "bytes_per_row": bpp * nx, - "rows_per_image": ny, - }, - {"texture": texture1, "mip_level": 0, "origin": (0, 0, 0)}, - (nx, ny, nz), - ) - compute_pass = command_encoder.begin_compute_pass() - compute_pass.push_debug_group("foo") - compute_pass.insert_debug_marker("setting pipeline") - compute_pass.set_pipeline(compute_pipeline) - compute_pass.insert_debug_marker("setting bind group") - compute_pass.set_bind_group( - 0, bind_group, [], 0, 999999 - ) # last 2 elements not used - compute_pass.insert_debug_marker("dispatch!") - compute_pass.dispatch_workgroups(nx, ny, nz) - compute_pass.pop_debug_group() - compute_pass.end() - command_encoder.copy_texture_to_buffer( - {"texture": texture2, "mip_level": 0, "origin": (0, 0, 0)}, - { - "buffer": buffer, - "offset": 0, - "bytes_per_row": bpp * nx, - "rows_per_image": ny, - }, - (nx, ny, nz), - ) - device.queue.submit([command_encoder.finish()]) - - # Read the current data of the output buffer - data2 = data1.__class__.from_buffer(device.queue.read_buffer(buffer)) - - # Numpy arrays are easier to work with - a1 = np.ctypeslib.as_array(data1).reshape(nz, ny, nx, nc) - a2 = np.ctypeslib.as_array(data2).reshape(nz, ny, nx, nc) - - # Validate! - for x in range(nx): - assert np.all(a2[:, :, x, 0] == a1[:, :, x, 0] + x) - if nc >= 2: - assert np.all(a2[:, :, :, 1] == a1[:, :, :, 1] + 1) - if nc >= 3: - assert np.all(a2[:, :, :, 2] == a1[:, :, :, 2] * 2) - if nc >= 4: - assert np.all(a2[:, :, :, 3] == a1[:, :, :, 3]) - - -if __name__ == "__main__": - run_tests(globals()) diff --git a/tests/test_wgpu_native_errors.py b/tests/test_wgpu_native_errors.py deleted file mode 100644 index dfab5df..0000000 --- a/tests/test_wgpu_native_errors.py +++ /dev/null @@ -1,268 +0,0 @@ -import wgpu.utils - -from testutils import run_tests -from pytest import raises - - -dedent = lambda s: s.replace("\n ", "\n").strip() # noqa - - -def test_parse_shader_error1(caplog): - # test1: invalid attribute access - device = wgpu.utils.get_default_device() - - code = """ - struct VertexOutput { - @location(0) texcoord : vec2, - @builtin(position) position: vec4, - }; - - @vertex - fn vs_main(@builtin(vertex_index) vertex_index : u32) -> VertexOutput { - var out: VertexOutput; - out.invalid_attr = vec4(0.0, 0.0, 1.0); - return out; - } - """ - - expected = """ - Validation Error - - Caused by: - In wgpuDeviceCreateShaderModule - - Shader '' parsing error: invalid field accessor `invalid_attr` - ┌─ wgsl:9:9 - │ - 9 │ out.invalid_attr = vec4(0.0, 0.0, 1.0); - │ ^^^^^^^^^^^^ invalid accessor - - - invalid field accessor `invalid_attr` - """ - - code = dedent(code) - expected = dedent(expected) - with raises(wgpu.GPUError) as err: - device.create_shader_module(code=code) - - error = err.value.message - assert error == expected, f"Expected:\n\n{expected}" - - -def test_parse_shader_error2(caplog): - # test2: grammar error, expected ',', not ';' - device = wgpu.utils.get_default_device() - - code = """ - struct VertexOutput { - @location(0) texcoord : vec2; - @builtin(position) position: vec4, - }; - """ - - expected = """ - Validation Error - - Caused by: - In wgpuDeviceCreateShaderModule - - Shader '' parsing error: expected ',', found ';' - ┌─ wgsl:2:38 - │ - 2 │ @location(0) texcoord : vec2; - │ ^ expected ',' - - - expected ',', found ';' - """ - - code = dedent(code) - expected = dedent(expected) - with raises(wgpu.GPUError) as err: - device.create_shader_module(code=code) - - error = err.value.message - assert error == expected, f"Expected:\n\n{expected}" - - -def test_parse_shader_error3(caplog): - # test3: grammar error, contains '\t' and (tab), unknown scalar type: 'f3' - device = wgpu.utils.get_default_device() - - code = """ - struct VertexOutput { - @location(0) texcoord : vec2, - @builtin(position) position: vec4, - }; - """ - - expected = """ - Validation Error - - Caused by: - In wgpuDeviceCreateShaderModule - - Shader '' parsing error: unknown scalar type: 'f3' - ┌─ wgsl:3:39 - │ - 3 │ @builtin(position) position: vec4, - │ ^^ unknown scalar type - │ - = note: Valid scalar types are f32, f64, i32, u32, bool - - - unknown scalar type: 'f3' - """ - - code = dedent(code) - expected = dedent(expected) - with raises(wgpu.GPUError) as err: - device.create_shader_module(code=code) - - error = err.value.message - assert error == expected, f"Expected:\n\n{expected}" - - -def test_parse_shader_error4(caplog): - # test4: no line info available - hopefully Naga produces better error messages soon? - device = wgpu.utils.get_default_device() - - code = """ - fn foobar() { - let m = mat2x2(0.0, 0.0, 0.0, 0.); - let scales = m[4]; - } - """ - - expected = """ - Validation Error - - Caused by: - In wgpuDeviceCreateShaderModule - - Shader validation error: - ┌─ :1:1 - │ - 1 │ ╭ fn foobar() { - 2 │ │ let m = mat2x2(0.0, 0.0, 0.0, 0.); - 3 │ │ let scales = m[4]; - │ │ ^^^^ naga::Expression [9] - │ ╰──────────────────────^ naga::Function [1] - - - Function [1] 'foobar' is invalid - Expression [9] is invalid - Type resolution failed - Index 4 is out of bounds for expression [7] - """ - - code = dedent(code) - expected = dedent(expected) - with raises(wgpu.GPUError) as err: - device.create_shader_module(code=code) - - error = err.value.message - assert error == expected, f"Expected:\n\n{expected}" - - -def test_validate_shader_error1(caplog): - # test1: Validation error, mat4x4 * vec3 - device = wgpu.utils.get_default_device() - - code = """ - struct VertexOutput { - @location(0) texcoord : vec2, - @builtin(position) position: vec3, - }; - - @vertex - fn vs_main(@builtin(vertex_index) vertex_index : u32) -> VertexOutput { - var out: VertexOutput; - var matrics: mat4x4; - out.position = matrics * out.position; - return out; - } - """ - - expected1 = """Left: Load { pointer: [3] } of type Matrix { columns: Quad, rows: Quad, width: 4 }""" - expected2 = """Right: Load { pointer: [6] } of type Vector { size: Tri, kind: Float, width: 4 }""" - expected3 = """ - Validation Error - - Caused by: - In wgpuDeviceCreateShaderModule - - Shader validation error: - ┌─ :10:20 - │ - 10 │ out.position = matrics * out.position; - │ ^^^^^^^^^^^^^^^^^^^^^^ naga::Expression [8] - - - Entry point vs_main at Vertex is invalid - Expression [8] is invalid - Operation Multiply can't work with [5] and [7] - """ - - code = dedent(code) - expected3 = dedent(expected3) - with raises(wgpu.GPUError) as err: - device.create_shader_module(code=code) - - # skip error info - assert caplog.records[0].msg == expected1 - assert caplog.records[1].msg == expected2 - assert err.value.message.strip() == expected3, f"Expected:\n\n{expected3}" - - -def test_validate_shader_error2(caplog): - # test2: Validation error, multiple line error, return type mismatch - device = wgpu.utils.get_default_device() - - code = """ - struct Varyings { - @builtin(position) position : vec4, - @location(0) uv : vec2, - }; - - @vertex - fn fs_main(in: Varyings) -> @location(0) vec4 { - if (in.uv.x > 0.5) { - return vec3(1.0, 0.0, 1.0); - } else { - return vec3(0.0, 1.0, 1.0); - } - } - """ - - expected1 = """Returning Some(Vector { size: Tri, kind: Float, width: 4 }) where Some(Vector { size: Quad, kind: Float, width: 4 }) is expected""" - expected2 = """ - Validation Error - - Caused by: - In wgpuDeviceCreateShaderModule - - Shader validation error: - ┌─ :9:16 - │ - 9 │ return vec3(1.0, 0.0, 1.0); - │ ^^^^^^^^^^^^^^^^^^^^^^^^ naga::Expression [9] - - - Entry point fs_main at Vertex is invalid - The `return` value Some([9]) does not match the function return value - """ - - code = dedent(code) - expected2 = dedent(expected2) - with raises(wgpu.GPUError) as err: - device.create_shader_module(code=code) - - # skip error info - assert caplog.records[0].msg == expected1 - assert err.value.message.strip() == expected2, f"Expected:\n\n{expected2}" - - -if __name__ == "__main__": - run_tests(globals()) diff --git a/tests/test_wgpu_native_query_set.py b/tests/test_wgpu_native_query_set.py deleted file mode 100644 index fe8d1f7..0000000 --- a/tests/test_wgpu_native_query_set.py +++ /dev/null @@ -1,151 +0,0 @@ -import wgpu.utils - -from testutils import run_tests, can_use_wgpu_lib -from pytest import mark - - -@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") -def test_query_set(): - shader_source = """ - @group(0) @binding(0) - var data1: array; - - @group(0) @binding(1) - var data2: array; - - @compute - @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i: u32 = index.x; - data2[i] = data1[i] / 2.0; - } - """ - - n = 1024 - data1 = memoryview(bytearray(n * 4)).cast("f") - - for i in range(n): - data1[i] = float(i) - - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") - device = adapter.request_device( - required_features=[wgpu.FeatureName.timestamp_query] - ) - - assert repr(device).startswith(" 0 and timestamps[1] > 0 and timestamps[1] > timestamps[0] - - out = device.queue.read_buffer(buffer2).cast("f") - result = out.tolist() - - # Perform the same division on the CPU - result_cpu = [a / 2.0 for a in data1] - - # Ensure results are the same - assert result == result_cpu - - -if __name__ == "__main__": - run_tests(globals()) diff --git a/tests/test_wgpu_native_render.py b/tests/test_wgpu_native_render.py deleted file mode 100644 index 71f1b30..0000000 --- a/tests/test_wgpu_native_render.py +++ /dev/null @@ -1,629 +0,0 @@ -""" -Test render pipeline, by drawing a whole lot of orange squares ... -""" - -import ctypes -import numpy as np -import sys - -import wgpu -from pytest import skip -from testutils import run_tests, can_use_wgpu_lib, is_ci, get_default_device -from renderutils import render_to_texture, render_to_screen # noqa - - -if not can_use_wgpu_lib: - skip("Skipping tests that need the wgpu lib", allow_module_level=True) -elif is_ci and sys.platform == "win32": - skip("These tests fail on dx12 for some reason", allow_module_level=True) - - -default_vertex_shader = """ -@vertex -fn vs_main(@builtin(vertex_index) vertex_index : u32) -> @builtin(position) vec4 { - var positions: array, 4> = array, 4>( - vec3(-0.5, -0.5, 0.1), - vec3(-0.5, 0.5, 0.1), - vec3( 0.5, -0.5, 0.1), - vec3( 0.5, 0.5, 0.1), - ); - let p: vec3 = positions[vertex_index]; - return vec4(p, 1.0); -} -""" - - -# %% Simple square - - -def test_render_orange_square(): - """Render an orange square and check that there is an orange square.""" - - device = get_default_device() - - # NOTE: the 0.499 instead of 0.5 is to make sure the resulting value is 127. - # With 0.5 some drivers would produce 127 and others 128. - - fragment_shader = """ - @fragment - fn fs_main() -> @location(0) vec4 { - return vec4(1.0, 0.499, 0.0, 1.0); - } - """ - shader_source = default_vertex_shader + fragment_shader - - # Bindings and layout - bind_group = None - pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) - - # Render - render_args = device, shader_source, pipeline_layout, bind_group - # render_to_screen(*render_args) - a = render_to_texture(*render_args, size=(64, 64)) - - # Check that the background is all zero - bg = a.copy() - bg[16:-16, 16:-16, :] = 0 - assert np.all(bg == 0) - - # Check the square - sq = a[16:-16, 16:-16, :] - assert np.all(sq[:, :, 0] == 255) # red - assert np.all(sq[:, :, 1] == 127) # green - assert np.all(sq[:, :, 2] == 0) # blue - assert np.all(sq[:, :, 3] == 255) # alpha - - -# %% Variations - - -def test_render_orange_square_indexed(): - """Render an orange square, using an index buffer.""" - - device = get_default_device() - - fragment_shader = """ - @fragment - fn fs_main() -> @location(0) vec4 { - return vec4(1.0, 0.499, 0.0, 1.0); - } - """ - shader_source = default_vertex_shader + fragment_shader - - # Bindings and layout - bind_group = None - pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) - - # Index buffer - indices = (ctypes.c_int32 * 6)(0, 1, 2, 2, 1, 3) - ibo = device.create_buffer_with_data( - data=indices, - usage=wgpu.BufferUsage.INDEX, - ) - - # Render - render_args = device, shader_source, pipeline_layout, bind_group - # render_to_screen(*render_args, topology=wgpu.PrimitiveTopology.triangle_list, ibo=ibo) - a = render_to_texture( - *render_args, - size=(64, 64), - topology=wgpu.PrimitiveTopology.triangle_list, - ibo=ibo, - ) - - # Check that the background is all zero - bg = a.copy() - bg[16:-16, 16:-16, :] = 0 - assert np.all(bg == 0) - - # Check the square - sq = a[16:-16, 16:-16, :] - assert np.all(sq[:, :, 0] == 255) # red - assert np.all(sq[:, :, 1] == 127) # green - assert np.all(sq[:, :, 2] == 0) # blue - assert np.all(sq[:, :, 3] == 255) # alpha - - -def test_render_orange_square_indirect(): - """Render an orange square and check that there is an orange square.""" - - device = get_default_device() - - fragment_shader = """ - @fragment - fn fs_main() -> @location(0) vec4 { - return vec4(1.0, 0.499, 0.0, 1.0); - } - """ - shader_source = default_vertex_shader + fragment_shader - - # Bindings and layout - bind_group = None - pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) - - # Buffer with draw parameters for indirect draw call - params = (ctypes.c_int32 * 4)(4, 1, 0, 0) - indirect_buffer = device.create_buffer_with_data( - data=params, - usage=wgpu.BufferUsage.INDIRECT, - ) - - # Render - render_args = device, shader_source, pipeline_layout, bind_group - # render_to_screen(*render_args, indirect_buffer=indirect_buffer) - a = render_to_texture(*render_args, size=(64, 64), indirect_buffer=indirect_buffer) - - # Check that the background is all zero - bg = a.copy() - bg[16:-16, 16:-16, :] = 0 - assert np.all(bg == 0) - - # Check the square - sq = a[16:-16, 16:-16, :] - assert np.all(sq[:, :, 0] == 255) # red - assert np.all(sq[:, :, 1] == 127) # green - assert np.all(sq[:, :, 2] == 0) # blue - assert np.all(sq[:, :, 3] == 255) # alpha - - -def test_render_orange_square_indexed_indirect(): - """Render an orange square, using an index buffer.""" - - device = get_default_device() - - fragment_shader = """ - @fragment - fn fs_main() -> @location(0) vec4 { - return vec4(1.0, 0.499, 0.0, 1.0); - } - """ - shader_source = default_vertex_shader + fragment_shader - - # Bindings and layout - bind_group = None - pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) - - # Index buffer - indices = (ctypes.c_int32 * 6)(0, 1, 2, 2, 1, 3) - ibo = device.create_buffer_with_data( - data=indices, - usage=wgpu.BufferUsage.INDEX, - ) - - # Buffer with draw parameters for indirect draw call - params = (ctypes.c_int32 * 5)(6, 1, 0, 0, 0) - indirect_buffer = device.create_buffer_with_data( - data=params, - usage=wgpu.BufferUsage.INDIRECT, - ) - - # Render - render_args = device, shader_source, pipeline_layout, bind_group - # render_to_screen(*render_args, topology=wgpu.PrimitiveTopology.triangle_list, ibo=ibo, indirect_buffer=indirect_buffer) - a = render_to_texture( - *render_args, - size=(64, 64), - topology=wgpu.PrimitiveTopology.triangle_list, - ibo=ibo, - indirect_buffer=indirect_buffer, - ) - - # Check that the background is all zero - bg = a.copy() - bg[16:-16, 16:-16, :] = 0 - assert np.all(bg == 0) - - # Check the square - sq = a[16:-16, 16:-16, :] - assert np.all(sq[:, :, 0] == 255) # red - assert np.all(sq[:, :, 1] == 127) # green - assert np.all(sq[:, :, 2] == 0) # blue - assert np.all(sq[:, :, 3] == 255) # alpha - - -def test_render_orange_square_vbo(): - """Render an orange square, using a VBO.""" - - device = get_default_device() - - shader_source = """ - @vertex - fn vs_main(@location(0) pos : vec2) -> @builtin(position) vec4 { - return vec4(pos, 0.0, 1.0); - } - - @fragment - fn fs_main() -> @location(0) vec4 { - return vec4(1.0, 0.499, 0.0, 1.0); - } - """ - - # Bindings and layout - bind_group = None - pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) - - # Vertex buffer - pos_data = (ctypes.c_float * 8)(-0.5, -0.5, -0.5, +0.5, +0.5, -0.5, +0.5, +0.5) - vbo = device.create_buffer_with_data( - data=pos_data, - usage=wgpu.BufferUsage.VERTEX, - ) - - # Vertex buffer views - vbo_view = { - "array_stride": 4 * 2, - "step_mode": "vertex", - "attributes": [ - { - "format": wgpu.VertexFormat.float32x2, - "offset": 0, - "shader_location": 0, - }, - ], - } - - # Render - render_args = device, shader_source, pipeline_layout, bind_group - # render_to_screen(*render_args, vbos=[vbo], vbo_views=[vbo_view]) - a = render_to_texture(*render_args, size=(64, 64), vbos=[vbo], vbo_views=[vbo_view]) - - # Check that the background is all zero - bg = a.copy() - bg[16:-16, 16:-16, :] = 0 - assert np.all(bg == 0) - - # Check the square - sq = a[16:-16, 16:-16, :] - assert np.all(sq[:, :, 0] == 255) # red - assert np.all(sq[:, :, 1] == 127) # green - assert np.all(sq[:, :, 2] == 0) # blue - assert np.all(sq[:, :, 3] == 255) # alpha - - -def test_render_orange_square_color_attachment1(): - """Render an orange square on a blue background, testing the load_op.""" - - device = get_default_device() - - fragment_shader = """ - @fragment - fn fs_main() -> @location(0) vec4 { - return vec4(1.0, 0.499, 0.0, 1.0); - } - """ - shader_source = default_vertex_shader + fragment_shader - - # Bindings and layout - bind_group = None - pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) - - ca = { - "resolve_target": None, - "clear_value": (0, 0, 0.8, 1), - "load_op": wgpu.LoadOp.clear, - "store_op": wgpu.StoreOp.store, - } - - # Render - render_args = device, shader_source, pipeline_layout, bind_group - # render_to_screen(*render_args, color_attachment=ca) - a = render_to_texture(*render_args, size=(64, 64), color_attachment=ca) - - # Check the blue background - assert np.all(a[:16, :16, 2] == 204) - assert np.all(a[:16, -16:, 2] == 204) - assert np.all(a[-16:, :16, 2] == 204) - assert np.all(a[-16:, -16:, 2] == 204) - - # Check the square - sq = a[16:-16, 16:-16, :] - assert np.all(sq[:, :, 0] == 255) # red - assert np.all(sq[:, :, 1] == 127) # green - assert np.all(sq[:, :, 2] == 0) # blue - assert np.all(sq[:, :, 3] == 255) # alpha - - -def test_render_orange_square_color_attachment2(): - """Render an orange square on a blue background, testing the LoadOp.load, - though in this case the result is the same as the normal square test. - """ - - device = get_default_device() - - fragment_shader = """ - @fragment - fn fs_main() -> @location(0) vec4 { - return vec4(1.0, 0.499, 0.0, 1.0); - } - """ - shader_source = default_vertex_shader + fragment_shader - - # Bindings and layout - bind_group = None - pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) - - ca = { - "resolve_target": None, - "load_op": wgpu.LoadOp.load, - "store_op": wgpu.StoreOp.store, - } - - # Render - render_args = device, shader_source, pipeline_layout, bind_group - # render_to_screen(*render_args, color_attachment=ca) - a = render_to_texture(*render_args, size=(64, 64), color_attachment=ca) - - # Check the background - bg = a.copy() - bg[16:-16, 16:-16, :] = 0 - # assert np.all(bg == 0) - # Actually, it seems unpredictable what the bg is if we dont clear it? - - # Check the square - sq = a[16:-16, 16:-16, :] - assert np.all(sq[:, :, 0] == 255) # red - assert np.all(sq[:, :, 1] == 127) # green - assert np.all(sq[:, :, 2] == 0) # blue - assert np.all(sq[:, :, 3] == 255) # alpha - - -# %% Viewport and stencil - - -def test_render_orange_square_viewport(): - """Render an orange square, in a sub-viewport of the rendered area.""" - - device = get_default_device() - - fragment_shader = """ - @fragment - fn fs_main() -> @location(0) vec4 { - return vec4(1.0, 0.499, 0.0, 1.0); - } - """ - shader_source = default_vertex_shader + fragment_shader - - def cb(renderpass): - renderpass.set_viewport(10, 20, 32, 32, 0, 1) - - # Bindings and layout - bind_group = None - pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) - - # Render - render_args = device, shader_source, pipeline_layout, bind_group - # render_to_screen(*render_args, renderpass_callback=cb) - a = render_to_texture(*render_args, size=(64, 64), renderpass_callback=cb) - - # Check that the background is all zero - bg = a.copy() - bg[20 + 8 : 52 - 8, 10 + 8 : 42 - 8, :] = 0 - assert np.all(bg == 0) - - # Check the square - sq = a[20 + 8 : 52 - 8, 10 + 8 : 42 - 8, :] - assert np.all(sq[:, :, 0] == 255) # red - assert np.all(sq[:, :, 1] == 127) # green - assert np.all(sq[:, :, 2] == 0) # blue - assert np.all(sq[:, :, 3] == 255) # alpha - - -def test_render_orange_square_scissor(): - """Render an orange square, but scissor half the screen away.""" - - device = get_default_device() - - fragment_shader = """ - @fragment - fn fs_main() -> @location(0) vec4 { - return vec4(1.0, 0.499, 0.0, 1.0); - } - """ - shader_source = default_vertex_shader + fragment_shader - - def cb(renderpass): - renderpass.set_scissor_rect(0, 0, 32, 32) - # Alse set blend color. Does not change outout, but covers the call. - renderpass.set_blend_constant((0, 0, 0, 1)) - - # Bindings and layout - bind_group = None - pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) - - # Render - render_args = device, shader_source, pipeline_layout, bind_group - # render_to_screen(*render_args, renderpass_callback=cb) - a = render_to_texture(*render_args, size=(64, 64), renderpass_callback=cb) - - # Check that the background is all zero - bg = a.copy() - bg[16:32, 16:32, :] = 0 - assert np.all(bg == 0) - - # Check the square - sq = a[16:32, 16:32, :] - assert np.all(sq[:, :, 0] == 255) # red - assert np.all(sq[:, :, 1] == 127) # green - assert np.all(sq[:, :, 2] == 0) # blue - assert np.all(sq[:, :, 3] == 255) # alpha - - -def test_render_orange_square_depth16unorm(): - """Render an orange square, but disable half of it using a depth test using 16 bits.""" - _render_orange_square_depth(wgpu.TextureFormat.depth16unorm) - - -def test_render_orange_square_depth24plus_stencil8(): - """Render an orange square, but disable half of it using a depth test using 24 bits.""" - _render_orange_square_depth(wgpu.TextureFormat.depth24plus_stencil8) - - -def test_render_orange_square_depth32float(): - """Render an orange square, but disable half of it using a depth test using 32 bits.""" - _render_orange_square_depth(wgpu.TextureFormat.depth32float) - - -def _render_orange_square_depth(depth_stencil_tex_format): - device = get_default_device() - - shader_source = """ - @vertex - fn vs_main(@builtin(vertex_index) vertex_index : u32) -> @builtin(position) vec4 { - var positions: array, 4> = array, 4>( - vec3(-0.5, -0.5, 0.0), - vec3(-0.5, 0.5, 0.0), - vec3( 0.5, -0.5, 0.2), - vec3( 0.5, 0.5, 0.2), - ); - let p: vec3 = positions[vertex_index]; - return vec4(p, 1.0); - } - - @fragment - fn fs_main() -> @location(0) vec4 { - return vec4(1.0, 0.499, 0.0, 1.0); - } - """ - - def cb(renderpass): - renderpass.set_stencil_reference(42) - - # Bindings and layout - bind_group = None - pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) - - # Create dept-stencil texture - depth_stencil_texture = device.create_texture( - size=(64, 64, 1), # when rendering to texture - # size=(640, 480, 1), # when rendering to screen - dimension=wgpu.TextureDimension.d2, - format=depth_stencil_tex_format, - usage=wgpu.TextureUsage.RENDER_ATTACHMENT, - ) - - depth_stencil_state = dict( - format=depth_stencil_tex_format, - depth_write_enabled=True, - depth_compare=wgpu.CompareFunction.less_equal, - # stencil_front={ - # "compare": wgpu.CompareFunction.equal, - # "fail_op": wgpu.StencilOperation.keep, - # "depth_fail_op": wgpu.StencilOperation.keep, - # "pass_op": wgpu.StencilOperation.keep, - # }, - # stencil_back={ - # "compare": wgpu.CompareFunction.equal, - # "fail_op": wgpu.StencilOperation.keep, - # "depth_fail_op": wgpu.StencilOperation.keep, - # "pass_op": wgpu.StencilOperation.keep, - # }, - stencil_read_mask=0, - stencil_write_mask=0, - depth_bias=0, - depth_bias_slope_scale=0.0, - depth_bias_clamp=0.0, - ) - - depth_stencil_attachment = dict( - view=depth_stencil_texture.create_view(), - depth_clear_value=0.1, - depth_load_op=wgpu.LoadOp.clear, - depth_store_op=wgpu.StoreOp.store, - stencil_load_op=wgpu.LoadOp.load, - stencil_store_op=wgpu.StoreOp.store, - ) - - # Render - render_args = device, shader_source, pipeline_layout, bind_group - # render_to_screen(*render_args, renderpass_callback=cb, depth_stencil_state=depth_stencil_state, depth_stencil_attachment=depth_stencil_attachment) - a = render_to_texture( - *render_args, - size=(64, 64), - renderpass_callback=cb, - depth_stencil_state=depth_stencil_state, - depth_stencil_attachment=depth_stencil_attachment, - ) - - # Check that the background is all zero - bg = a.copy() - bg[16:-16, 16:32, :] = 0 - assert np.all(bg == 0) - - # Check the square - sq = a[16:-16, 16:32, :] - assert np.all(sq[:, :, 0] == 255) # red - assert np.all(sq[:, :, 1] == 127) # green - assert np.all(sq[:, :, 2] == 0) # blue - assert np.all(sq[:, :, 3] == 255) # alpha - - -# %% Not squares - - -def test_render_orange_dots(): - """Render four orange dots and check that there are four orange square dots.""" - - device = get_default_device() - - shader_source = """ - struct VertexOutput { - @builtin(position) position: vec4, - //@builtin(pointSize) point_size: f32, - }; - - @vertex - fn vs_main(@builtin(vertex_index) vertex_index : u32) -> VertexOutput { - var positions: array, 4> = array, 4>( - vec3(-0.5, -0.5, 0.0), - vec3(-0.5, 0.5, 0.0), - vec3( 0.5, -0.5, 0.2), - vec3( 0.5, 0.5, 0.2), - ); - var out: VertexOutput; - out.position = vec4(positions[vertex_index], 1.0); - //out.point_size = 16.0; - return out; - } - - @fragment - fn fs_main() -> @location(0) vec4 { - return vec4(1.0, 0.499, 0.0, 1.0); - } - """ - - # Bindings and layout - bind_group = None - pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) - - # Render - render_args = device, shader_source, pipeline_layout, bind_group - top = wgpu.PrimitiveTopology.point_list - # render_to_screen(*render_args, topology=top) - a = render_to_texture(*render_args, size=(64, 64), topology=top) - - # Check that the background is all zero - bg = a.copy() - bg[8:24, 8:24, :] = 0 - bg[8:24, 40:56, :] = 0 - bg[40:56, 8:24, :] = 0 - bg[40:56, 40:56, :] = 0 - assert np.all(bg == 0) - - # Check the square - # Ideally we'd want to set the point_size (gl_PointSize) to 16 but - # this is not supported in WGPU, see https://github.com/gpuweb/gpuweb/issues/332 - # So our points are 1px - for dot in ( - a[15:16, 15:16, :], - a[15:16, 47:48, :], - a[47:48, 15:16, :], - a[47:48, 47:48, :], - ): - assert np.all(dot[:, :, 0] == 255) # red - assert np.all(dot[:, :, 1] == 127) # green - assert np.all(dot[:, :, 2] == 0) # blue - assert np.all(dot[:, :, 3] == 255) # alpha - - -if __name__ == "__main__": - run_tests(globals()) diff --git a/tests/test_wgpu_native_render_tex.py b/tests/test_wgpu_native_render_tex.py deleted file mode 100644 index 8096d87..0000000 --- a/tests/test_wgpu_native_render_tex.py +++ /dev/null @@ -1,566 +0,0 @@ -""" -Test render pipeline by rendering to a texture. -""" - -import ctypes -import numpy as np -import sys - -import wgpu -from pytest import skip -from testutils import run_tests, get_default_device -from testutils import can_use_wgpu_lib, is_ci -from renderutils import upload_to_texture, render_to_texture, render_to_screen # noqa - - -if not can_use_wgpu_lib: - skip("Skipping tests that need the wgpu lib", allow_module_level=True) -elif is_ci and sys.platform == "win32": - skip("These tests fail on dx12 for some reason", allow_module_level=True) - - -default_vertex_shader = """ -struct VertexOutput { - @location(0) texcoord : vec2, - @builtin(position) position: vec4, -}; - -@vertex -fn vs_main(@builtin(vertex_index) vertex_index : u32) -> VertexOutput { - var positions: array, 4> = array, 4>( - vec2(-0.5, -0.5), - vec2(-0.5, 0.5), - vec2( 0.5, -0.5), - vec2( 0.5, 0.5), - ); - let p: vec2 = positions[vertex_index]; - var out: VertexOutput; - out.position = vec4(p, 0.0, 1.0); - out.texcoord = p + 0.5; - return out; -} -""" - - -def _create_data(v1, v2, v3, v4): - assert len(v1) == len(v2) - assert len(v1) == len(v3) - assert len(v1) == len(v4) - data = [] - for y in range(128): - data.extend(list(v1) * 128) - data.extend(list(v2) * 128) - for y in range(128): - data.extend(list(v3) * 128) - data.extend(list(v4) * 128) - return data - - -# %% rgba textures - - -def test_render_textured_square_rgba8unorm(): - """Test a texture with format rgba8unorm.""" - - fragment_shader = """ - @group(0) @binding(0) - var r_tex: texture_2d; - @group(0) @binding(1) - var r_sampler: sampler; - - @fragment - fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { - let sample = textureSample(r_tex, r_sampler, in.texcoord); - return sample; - } - """ - - # Create texture data - nx, ny, nz = 256, 256, 1 - x = _create_data( - (50, 50, 0, 255), (100, 100, 0, 255), (150, 150, 0, 255), (200, 200, 0, 255) - ) - texture_data = (ctypes.c_uint8 * (4 * nx * ny))(*x) - - # Render and validate - render_textured_square( - fragment_shader, wgpu.TextureFormat.rgba8unorm, (nx, ny, nz), texture_data - ) - - -def test_render_textured_square_rgba8uint(): - """Test a texture with format rgba8uint.""" - - fragment_shader = """ - @group(0) @binding(0) - var r_tex: texture_2d; - @group(0) @binding(1) - var r_sampler: sampler; - - @fragment - fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { - // let sample = textureSample(r_tex, r_sampler, in.texcoord); - let texcoords_u = vec2(in.texcoord * vec2(textureDimensions(r_tex))); - let sample = textureLoad(r_tex, texcoords_u, 0); - return vec4(sample) / 255.0; - } - """ - - # Create texture data - nx, ny, nz = 256, 256, 1 - x = _create_data( - (50, 50, 0, 255), (100, 100, 0, 255), (150, 150, 0, 255), (200, 200, 0, 255) - ) - texture_data = (ctypes.c_uint8 * (4 * nx * ny))(*x) - - # Render and validate - render_textured_square( - fragment_shader, wgpu.TextureFormat.rgba8uint, (nx, ny, nz), texture_data - ) - - -def test_render_textured_square_rgba16sint(): - """Test a texture with format rgba16sint.""" - - fragment_shader = """ - @group(0) @binding(0) - var r_tex: texture_2d; - @group(0) @binding(1) - var r_sampler: sampler; - - @fragment - fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { - // let sample = textureSample(r_tex, r_sampler, in.texcoord); - let texcoords_u = vec2(in.texcoord * vec2(textureDimensions(r_tex))); - let sample = textureLoad(r_tex, texcoords_u, 0); - return vec4(sample) / 255.0; - } - """ - - # Create texture data - nx, ny, nz = 256, 256, 1 - x = _create_data( - (50, 50, 0, 255), (100, 100, 0, 255), (150, 150, 0, 255), (200, 200, 0, 255) - ) - texture_data = (ctypes.c_int16 * (4 * nx * ny))(*x) - - # Render and validate - render_textured_square( - fragment_shader, wgpu.TextureFormat.rgba16sint, (nx, ny, nz), texture_data - ) - - -def test_render_textured_square_rgba32float(): - """Test a texture with format rgba32float.""" - - fragment_shader = """ - @group(0) @binding(0) - var r_tex: texture_2d; - @group(0) @binding(1) - var r_sampler: sampler; - - @fragment - fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { - let sample = textureSample(r_tex, r_sampler, in.texcoord); - return sample / 255.0; - } - """ - - # Create texture data - nx, ny, nz = 256, 256, 1 - x = _create_data( - (50, 50, 0, 255), (100, 100, 0, 255), (150, 150, 0, 255), (200, 200, 0, 255) - ) - texture_data = (ctypes.c_float * (4 * nx * ny))(*x) - - # Render and validate - render_textured_square( - fragment_shader, wgpu.TextureFormat.rgba32float, (nx, ny, nz), texture_data - ) - - -# %% rg textures - - -def test_render_textured_square_rg8unorm(): - """Test a texture with format rg8unorm. - The GPU considers blue to be 0 and alpha to be 1. - """ - - fragment_shader = """ - @group(0) @binding(0) - var r_tex: texture_2d; - @group(0) @binding(1) - var r_sampler: sampler; - - @fragment - fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { - let sample = textureSample(r_tex, r_sampler, in.texcoord); - return sample; - } - """ - - # Create texture data - nx, ny, nz = 256, 256, 1 - x = _create_data((50, 50), (100, 100), (150, 150), (200, 200)) - texture_data = (ctypes.c_ubyte * (2 * nx * ny))(*x) - - # Render and validate - render_textured_square( - fragment_shader, wgpu.TextureFormat.rg8unorm, (nx, ny, nz), texture_data - ) - - -def test_render_textured_square_rg8uint(): - """Test a texture with format rg8uint. - The GPU considers blue to be 0 and alpha to be 1. - """ - - fragment_shader = """ - @group(0) @binding(0) - var r_tex: texture_2d; - @group(0) @binding(1) - var r_sampler: sampler; - - @fragment - fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { - // let sample = textureSample(r_tex, r_sampler, in.texcoord); - let texcoords_u = vec2(in.texcoord * vec2(textureDimensions(r_tex))); - let sample = textureLoad(r_tex, texcoords_u, 0); - return vec4(f32(sample.r) / 255.0, f32(sample.g) / 255.0, 0.0, 1.0); - } - """ - - # Create texture data - nx, ny, nz = 256, 256, 1 - x = _create_data((50, 50), (100, 100), (150, 150), (200, 200)) - texture_data = (ctypes.c_ubyte * (2 * nx * ny))(*x) - - # Render and validate - render_textured_square( - fragment_shader, wgpu.TextureFormat.rg8uint, (nx, ny, nz), texture_data - ) - - -def test_render_textured_square_rg16sint(): - """Test a texture with format rg16sint. - The GPU considers blue to be 0 and alpha to be 1. - """ - - fragment_shader = """ - @group(0) @binding(0) - var r_tex: texture_2d; - @group(0) @binding(1) - var r_sampler: sampler; - - @fragment - fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { - // let sample = textureSample(r_tex, r_sampler, in.texcoord); - let texcoords_u = vec2(in.texcoord * vec2(textureDimensions(r_tex))); - let sample = textureLoad(r_tex, texcoords_u, 0); - return vec4(f32(sample.r) / 255.0, f32(sample.g) / 255.0, 0.0, 1.0); - } - """ - - # Create texture data - nx, ny, nz = 256, 256, 1 - x = _create_data((50, 50), (100, 100), (150, 150), (200, 200)) - texture_data = (ctypes.c_int16 * (2 * nx * ny))(*x) - - # Render and validate - render_textured_square( - fragment_shader, wgpu.TextureFormat.rg16sint, (nx, ny, nz), texture_data - ) - - -def test_render_textured_square_rg32float(): - """Test a texture with format rg32float. - The GPU considers blue to be 0 and alpha to be 1. - """ - - fragment_shader = """ - @group(0) @binding(0) - var r_tex: texture_2d; - @group(0) @binding(1) - var r_sampler: sampler; - - @fragment - fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { - let sample = textureSample(r_tex, r_sampler, in.texcoord); - return vec4(sample.rg / 255.0, 0.0, 1.0); - } - """ - - # Create texture data - nx, ny, nz = 256, 256, 1 - x = _create_data((50, 50), (100, 100), (150, 150), (200, 200)) - texture_data = (ctypes.c_float * (2 * nx * ny))(*x) - - # Render and validate - render_textured_square( - fragment_shader, wgpu.TextureFormat.rg32float, (nx, ny, nz), texture_data - ) - - -# %% r textures - - -def test_render_textured_square_r8unorm(): - """Test a texture with format r8unorm.""" - - fragment_shader = """ - @group(0) @binding(0) - var r_tex: texture_2d; - @group(0) @binding(1) - var r_sampler: sampler; - - @fragment - fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { - let sample = textureSample(r_tex, r_sampler, in.texcoord); - let val = sample.r; - return vec4(val, val, 0.0, 1.0); - } - """ - - # Create texture data - nx, ny, nz = 256, 256, 1 - x = _create_data((50,), (100,), (150,), (200,)) - texture_data = (ctypes.c_uint8 * (1 * nx * ny))(*x) - - # Render and validate - render_textured_square( - fragment_shader, wgpu.TextureFormat.r8unorm, (nx, ny, nz), texture_data - ) - - -def test_render_textured_square_r8uint(): - """Test a texture with format r8uint.""" - - fragment_shader = """ - @group(0) @binding(0) - var r_tex: texture_2d; - @group(0) @binding(1) - var r_sampler: sampler; - - @fragment - fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { - let texcoords_u = vec2(in.texcoord * vec2(textureDimensions(r_tex))); - let sample = textureLoad(r_tex, texcoords_u, 0); - let val = f32(sample.r) / 255.0; - return vec4(val, val, 0.0, 1.0); - } - """ - - # Create texture data - nx, ny, nz = 256, 256, 1 - x = _create_data((50,), (100,), (150,), (200,)) - texture_data = (ctypes.c_uint8 * (1 * nx * ny))(*x) - - # Render and validate - render_textured_square( - fragment_shader, wgpu.TextureFormat.r8uint, (nx, ny, nz), texture_data - ) - - -def test_render_textured_square_r16sint(): - """Test a texture with format r16sint. Because e.g. CT data.""" - - fragment_shader = """ - @group(0) @binding(0) - var r_tex: texture_2d; - @group(0) @binding(1) - var r_sampler: sampler; - - @fragment - fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { - let texcoords_u = vec2(in.texcoord * vec2(textureDimensions(r_tex))); - let sample = textureLoad(r_tex, texcoords_u, 0); - let val = f32(sample.r) / 255.0; - return vec4(val, val, 0.0, 1.0); - } - """ - - # Create texture data - nx, ny, nz = 256, 256, 1 - x = _create_data((50,), (100,), (150,), (200,)) - texture_data = (ctypes.c_int16 * (1 * nx * ny))(*x) - - # Render and validate - render_textured_square( - fragment_shader, wgpu.TextureFormat.r16sint, (nx, ny, nz), texture_data - ) - - -def test_render_textured_square_r32sint(): - """Test a texture with format r32sint. Because e.g. CT data.""" - - fragment_shader = """ - @group(0) @binding(0) - var r_tex: texture_2d; - @group(0) @binding(1) - var r_sampler: sampler; - - @fragment - fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { - let texcoords_u = vec2(in.texcoord * vec2(textureDimensions(r_tex))); - let sample = textureLoad(r_tex, texcoords_u, 0); - let val = f32(sample.r) / 255.0; - return vec4(val, val, 0.0, 1.0); - } - """ - - # Create texture data - nx, ny, nz = 256, 256, 1 - x = _create_data((50,), (100,), (150,), (200,)) - texture_data = (ctypes.c_int32 * (1 * nx * ny))(*x) - - # Render and validate - render_textured_square( - fragment_shader, wgpu.TextureFormat.r32sint, (nx, ny, nz), texture_data - ) - - -def test_render_textured_square_r32float(): - """Test a texture with format r32float.""" - - fragment_shader = """ - @group(0) @binding(0) - var r_tex: texture_2d; - @group(0) @binding(1) - var r_sampler: sampler; - - @fragment - fn fs_main(in: VertexOutput, ) -> @location(0) vec4 { - let sample = textureSample(r_tex, r_sampler, in.texcoord); - let val = sample.r / 255.0; - return vec4(val, val, 0.0, 1.0); - } - """ - - # Create texture data - nx, ny, nz = 256, 256, 1 - x = _create_data((50,), (100,), (150,), (200,)) - texture_data = (ctypes.c_float * (1 * nx * ny))(*x) - - # Render and validate - render_textured_square( - fragment_shader, wgpu.TextureFormat.r32float, (nx, ny, nz), texture_data - ) - - -# %% Utils - - -def render_textured_square(fragment_shader, texture_format, texture_size, texture_data): - """Render, and test the result. The resulting image must be a - gradient on R and B, zeros on G and ones on A. - """ - nx, ny, nz = texture_size - - device = get_default_device() - - shader_source = default_vertex_shader + fragment_shader - - # Create texture - texture = device.create_texture( - size=(nx, ny, nz), - dimension=wgpu.TextureDimension.d2, - format=texture_format, - usage=wgpu.TextureUsage.TEXTURE_BINDING | wgpu.TextureUsage.COPY_DST, - ) - upload_to_texture(device, texture, texture_data, nx, ny, nz) - - texture_view = texture.create_view() - # or: - texture_view = texture.create_view( - format=texture_format, - dimension=wgpu.TextureDimension.d2, - ) - - sampler = device.create_sampler(mag_filter="nearest", min_filter="nearest") - - # Default sampler type. - # Note that integer texture types cannot even use a sampler. - sampler_type = wgpu.SamplerBindingType.filtering - - # Determine texture component type from the format - if texture_format.endswith("norm"): - # We can use a filtering sampler - texture_sample_type = wgpu.TextureSampleType.float - elif texture_format.endswith("float"): - # On Vanilla wgpu, float32 textures cannot use a filtering - # (interpolating) texture, (need to enable a feature for that). - # Without it, we need to use a non-filterin sampler. - texture_sample_type = wgpu.TextureSampleType.unfilterable_float - sampler_type = wgpu.SamplerBindingType.non_filtering - elif "uint" in texture_format: - # Cannot even use a sampler (use textureLoad instwad of textureSample) - texture_sample_type = wgpu.TextureSampleType.uint - else: - # Dito - texture_sample_type = wgpu.TextureSampleType.sint - - # Bindings and layout - bindings = [ - {"binding": 0, "resource": texture_view}, - {"binding": 1, "resource": sampler}, - ] - binding_layouts = [ - { - "binding": 0, - "visibility": wgpu.ShaderStage.FRAGMENT, - "texture": { - "sample_type": texture_sample_type, - "view_dimension": wgpu.TextureViewDimension.d2, - }, - }, - { - "binding": 1, - "visibility": wgpu.ShaderStage.FRAGMENT, - "sampler": { - "type": sampler_type, - }, - }, - ] - bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) - pipeline_layout = device.create_pipeline_layout( - bind_group_layouts=[bind_group_layout] - ) - bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) - - # Render - render_args = device, shader_source, pipeline_layout, bind_group - # render_to_screen(*render_args) - a = render_to_texture(*render_args, size=(64, 64)) - - # print(a.max(), a[:,:,0].max()) - - # Check that the background is all zero - bg = a.copy() - bg[16:-16, 16:-16, :] = 0 - assert np.all(bg == 0) - - # Check the square - sq = a[16:-16, 16:-16, :] - ref1 = [ - [150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150], - [150, 150, 150, 200, 200, 200], - [200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200], - ] - ref2 = [ - [150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150], - [150, 150, 150, 50, 50, 50], - [50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50], - ] - ref1, ref2 = sum(ref1, []), sum(ref2, []) - - assert np.allclose(sq[0, :, 0], ref1, atol=1) - assert np.allclose(sq[:, 0, 0], ref2, atol=1) - assert np.allclose(sq[0, :, 1], ref1, atol=1) - assert np.allclose(sq[:, 0, 1], ref2, atol=1) - assert np.all(sq[:, :, 2] == 0) # blue - assert np.all(sq[:, :, 3] == 255) # alpha - - -if __name__ == "__main__": - run_tests(globals()) diff --git a/tests/test_wgpu_native_texture.py b/tests/test_wgpu_native_texture.py deleted file mode 100644 index 6bd300e..0000000 --- a/tests/test_wgpu_native_texture.py +++ /dev/null @@ -1,285 +0,0 @@ -import random -import ctypes - -import wgpu.utils -import numpy as np - -from testutils import run_tests, can_use_wgpu_lib, iters_equal -from pytest import mark, raises - - -@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") -def test_do_a_copy_roundtrip(): - # Let's take some data, and copy it to buffer to texture to - # texture to buffer to buffer and back to CPU. - - device = wgpu.utils.get_default_device() - - nx, ny, nz = 128, 1, 1 - data1 = np.random.random(size=nx * ny * nz).astype(np.float32) - nbytes = data1.nbytes - bpp = nbytes // (nx * ny * nz) - texture_format = wgpu.TextureFormat.r32float - texture_dim = wgpu.TextureDimension.d1 - - # Create buffers and textures - stubusage = wgpu.TextureUsage.STORAGE_BINDING - buf1 = device.create_buffer( - size=nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.COPY_SRC - ) - tex2 = device.create_texture( - size=(nx, ny, nz), - dimension=texture_dim, - format=texture_format, - usage=wgpu.TextureUsage.COPY_SRC | wgpu.TextureUsage.COPY_DST | stubusage, - ) - tex3 = device.create_texture( - size=(nx, ny, nz), - dimension=texture_dim, - format=texture_format, - usage=wgpu.TextureUsage.COPY_SRC | wgpu.TextureUsage.COPY_DST | stubusage, - ) - buf4 = device.create_buffer( - size=nbytes, usage=wgpu.BufferUsage.COPY_SRC | wgpu.BufferUsage.COPY_DST - ) - buf5 = device.create_buffer( - size=nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.COPY_SRC - ) - - # Check texture stats - assert tex2.size == (nx, ny, nz) - assert tex2.mip_level_count == 1 - assert tex2.sample_count == 1 - assert tex2.dimension == wgpu.TextureDimension.d1 - assert tex2.format == texture_format - assert tex2.usage & wgpu.TextureUsage.COPY_SRC - assert tex2.usage & wgpu.TextureUsage.COPY_DST - assert tex2.create_view().texture is tex2 - - # Upload from CPU to buffer - # assert buf1.state == "unmapped" - # mapped_data = buf1.map(wgpu.MapMode.WRITE) - # assert buf1.state == "mapped" - # mapped_data.cast("f")[:] = data1 - # buf1.unmap() - # assert buf1.state == "unmapped" - device.queue.write_buffer(buf1, 0, data1) - - # Copy from buffer to texture - command_encoder = device.create_command_encoder() - command_encoder.copy_buffer_to_texture( - {"buffer": buf1, "offset": 0, "bytes_per_row": bpp * nx, "rows_per_image": ny}, - {"texture": tex2, "mip_level": 0, "origin": (0, 0, 0)}, - (nx, ny, nz), - ) - device.queue.submit([command_encoder.finish()]) - # Copy from texture to texture - command_encoder = device.create_command_encoder() - command_encoder.copy_texture_to_texture( - {"texture": tex2, "mip_level": 0, "origin": (0, 0, 0)}, - {"texture": tex3, "mip_level": 0, "origin": (0, 0, 0)}, - (nx, ny, nz), - ) - device.queue.submit([command_encoder.finish()]) - # Copy from texture to buffer - command_encoder = device.create_command_encoder() - command_encoder.copy_texture_to_buffer( - {"texture": tex3, "mip_level": 0, "origin": (0, 0, 0)}, - {"buffer": buf4, "offset": 0, "bytes_per_row": bpp * nx, "rows_per_image": ny}, - (nx, ny, nz), - ) - device.queue.submit([command_encoder.finish()]) - # Copy from buffer to buffer - command_encoder = device.create_command_encoder() - command_encoder.copy_buffer_to_buffer(buf4, 0, buf5, 0, nbytes) - device.queue.submit([command_encoder.finish()]) - - # Download from buffer to CPU - # assert buf5.state == "unmapped" - # assert buf5.map_mode == 0 - # result_data = buf5.map(wgpu.MapMode.READ) # a memoryview - # assert buf5.state == "mapped" - # assert buf5.map_mode == wgpu.MapMode.READ - # buf5.unmap() - # assert buf5.state == "unmapped" - result_data = device.queue.read_buffer(buf5) - - # CHECK! - data2 = np.frombuffer(result_data, dtype=np.float32) - assert np.all(data1 == data2) - - # Do another round-trip, but now using a single pass - data3 = data1 + 1 - assert np.all(data1 != data3) - - # Upload from CPU to buffer - # assert buf1.state == "unmapped" - # assert buf1.map_mode == 0 - # mapped_data = buf1.map(wgpu.MapMode.WRITE) - # assert buf1.state == "mapped" - # assert buf1.map_mode == wgpu.MapMode.WRITE - # mapped_data.cast("f")[:] = data3 - # buf1.unmap() - # assert buf1.state == "unmapped" - # assert buf1.map_mode == 0 - device.queue.write_buffer(buf1, 0, data3) - - # Copy from buffer to texture - command_encoder = device.create_command_encoder() - command_encoder.copy_buffer_to_texture( - {"buffer": buf1, "offset": 0, "bytes_per_row": bpp * nx, "rows_per_image": ny}, - {"texture": tex2, "mip_level": 0, "origin": (0, 0, 0)}, - (nx, ny, nz), - ) - # Copy from texture to texture - command_encoder.copy_texture_to_texture( - {"texture": tex2, "mip_level": 0, "origin": (0, 0, 0)}, - {"texture": tex3, "mip_level": 0, "origin": (0, 0, 0)}, - (nx, ny, nz), - ) - # Copy from texture to buffer - command_encoder.copy_texture_to_buffer( - {"texture": tex3, "mip_level": 0, "origin": (0, 0, 0)}, - {"buffer": buf4, "offset": 0, "bytes_per_row": bpp * nx, "rows_per_image": ny}, - (nx, ny, nz), - ) - - # Copy from buffer to buffer - command_encoder.copy_buffer_to_buffer(buf4, 0, buf5, 0, nbytes) - device.queue.submit([command_encoder.finish()]) - - # Download from buffer to CPU - # assert buf5.state == "unmapped" - # result_data = buf5.map(wgpu.MapMode.READ) # always an uint8 array - # assert buf5.state == "mapped" - # buf5.unmap() - # assert buf5.state == "unmapped" - result_data = device.queue.read_buffer(buf5) - - # CHECK! - data4 = np.frombuffer(result_data, dtype=np.float32) - assert np.all(data3 == data4) - - -@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") -def test_write_texture1(): - device = wgpu.utils.get_default_device() - - nx, ny, nz = 128, 1, 1 - data1 = memoryview(np.random.random(size=nx).astype(np.float32)) - bpp = data1.nbytes // (nx * ny * nz) - texture_format = wgpu.TextureFormat.r32float - texture_dim = wgpu.TextureDimension.d1 - - # Create buffers and textures - tex3 = device.create_texture( - size=(nx, ny, nz), - dimension=texture_dim, - format=texture_format, - usage=wgpu.TextureUsage.COPY_SRC | wgpu.TextureUsage.COPY_DST, - ) - buf4 = device.create_buffer( - size=data1.nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.COPY_SRC - ) - - # Upload from CPU to texture - command_encoder = device.create_command_encoder() - device.queue.write_texture( - {"texture": tex3}, - data1, - {"bytes_per_row": bpp * nx, "rows_per_image": ny}, - (nx, ny, nz), - ) - # device.queue.submit([]) -> call further down - - # Copy from texture to buffer - command_encoder.copy_texture_to_buffer( - {"texture": tex3, "mip_level": 0, "origin": (0, 0, 0)}, - {"buffer": buf4, "offset": 0, "bytes_per_row": bpp * nx, "rows_per_image": ny}, - (nx, ny, nz), - ) - device.queue.submit([command_encoder.finish()]) - - # Download from buffer to CPU - data2 = device.queue.read_buffer(buf4).cast("f") - assert data1 == data2 - - # That last step can also be done easier - data3 = device.queue.read_texture( - { - "texture": tex3, - }, - {"bytes_per_row": bpp * nx}, - (nx, ny, nz), - ).cast("f") - assert data1 == data3 - - -@mark.skipif(not can_use_wgpu_lib, reason="Needs wgpu lib") -def test_write_texture2(): - device = wgpu.utils.get_default_device() - - nx, ny, nz = 100, 1, 1 - data0 = (ctypes.c_float * nx)(*[random.random() for i in range(nx * ny * nz)]) - data1 = (ctypes.c_float * nx)() - nbytes = ctypes.sizeof(data1) - bpp = nbytes // (nx * ny * nz) - texture_format = wgpu.TextureFormat.r32float - texture_dim = wgpu.TextureDimension.d1 - - # Create buffers and textures - tex3 = device.create_texture( - size=(nx, ny, nz), - dimension=texture_dim, - format=texture_format, - usage=wgpu.TextureUsage.COPY_SRC | wgpu.TextureUsage.COPY_DST, - ) - buf4 = device.create_buffer( - size=nbytes, usage=wgpu.BufferUsage.COPY_DST | wgpu.BufferUsage.COPY_SRC - ) - - for i in range(len(data1)): - data1[i] = data0[i] - - # Upload from CPU to texture - command_encoder = device.create_command_encoder() - device.queue.write_texture( - {"texture": tex3}, - data1, - {"bytes_per_row": bpp * nx, "rows_per_image": ny}, - (nx, ny, nz), - ) - # device.queue.submit([]) -> call further down - - # Invalidate the data now, to show that write_texture has made a copy - for i in range(len(data1)): - data1[i] = 1 - - # Copy from texture to buffer - - # FAIL! because bytes_per_row is not multiple of 256! - with raises(ValueError): - command_encoder.copy_texture_to_buffer( - {"texture": tex3, "mip_level": 0, "origin": (0, 0, 0)}, - { - "buffer": buf4, - "offset": 0, - "bytes_per_row": bpp * nx, - "rows_per_image": ny, - }, - (nx, ny, nz), - ) - - # Download from texture to CPU (via a temp buffer) - # No requirent on bytes_per_row! - data2 = device.queue.read_texture( - {"texture": tex3}, - {"bytes_per_row": bpp * nx}, - (nx, ny, nz), - ) - data2 = data1.__class__.from_buffer(data2) - - assert iters_equal(data0, data2) - - -if __name__ == "__main__": - run_tests(globals()) diff --git a/tests_mem/test_gui_glfw.py b/tests_mem/test_gui_glfw.py deleted file mode 100644 index 5f4f9b6..0000000 --- a/tests_mem/test_gui_glfw.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -Test creation of GLFW canvas windows. -""" - -import gc -import weakref -import asyncio - -import pytest -import testutils # noqa -from testutils import create_and_release, can_use_glfw, can_use_wgpu_lib -from test_gui_offscreen import make_draw_func_for_canvas - - -if not can_use_wgpu_lib: - pytest.skip("Skipping tests that need wgpu lib", allow_module_level=True) -if not can_use_glfw: - pytest.skip("Need glfw for this test", allow_module_level=True) - -loop = asyncio.get_event_loop_policy().get_event_loop() -if loop.is_running(): - pytest.skip("Asyncio loop is running", allow_module_level=True) - - -async def stub_event_loop(): - pass - - -@create_and_release -def test_release_canvas_context(n): - # Test with GLFW canvases. - - # Note: in a draw, the textureview is obtained (thus creating a - # Texture and a TextureView, but these are released in present(), - # so we don't see them in the counts. - - from wgpu.gui.glfw import WgpuCanvas # noqa - - yield {} - - canvases = weakref.WeakSet() - - for i in range(n): - c = WgpuCanvas() - canvases.add(c) - c.request_draw(make_draw_func_for_canvas(c)) - loop.run_until_complete(stub_event_loop()) - yield c.get_context() - - # Need some shakes to get all canvas refs gone. - del c - loop.run_until_complete(stub_event_loop()) - gc.collect() - loop.run_until_complete(stub_event_loop()) - gc.collect() - - # Check that the canvas objects are really deleted - assert not canvases, f"Still {len(canvases)} canvases" - - -if __name__ == "__main__": - # testutils.TEST_ITERS = 40 # Uncomment for a mem-usage test run - - test_release_canvas_context() diff --git a/tests_mem/test_gui_offscreen.py b/tests_mem/test_gui_offscreen.py deleted file mode 100644 index 0dd2e38..0000000 --- a/tests_mem/test_gui_offscreen.py +++ /dev/null @@ -1,90 +0,0 @@ -""" -Test creation of offscreen canvas windows. -""" - -import gc -import weakref - -import wgpu -import pytest -import testutils # noqa -from testutils import can_use_wgpu_lib, create_and_release, is_pypy - - -if not can_use_wgpu_lib: - pytest.skip("Skipping tests that need wgpu lib", allow_module_level=True) - - -DEVICE = wgpu.utils.get_default_device() - - -def make_draw_func_for_canvas(canvas): - """Create a draw function for the given canvas, - so that we can really present something to a canvas being tested. - """ - ctx = canvas.get_context() - ctx.configure(device=DEVICE, format="bgra8unorm-srgb") - - def draw(): - ctx = canvas.get_context() - command_encoder = DEVICE.create_command_encoder() - current_texture_view = ctx.get_current_texture().create_view() - render_pass = command_encoder.begin_render_pass( - color_attachments=[ - { - "view": current_texture_view, - "resolve_target": None, - "clear_value": (1, 1, 1, 1), - "load_op": wgpu.LoadOp.clear, - "store_op": wgpu.StoreOp.store, - } - ], - ) - render_pass.end() - DEVICE.queue.submit([command_encoder.finish()]) - - return draw - - -@create_and_release -def test_release_canvas_context(n): - # Test with offscreen canvases. A context is created, but not a wgpu-native surface. - - # Note: the offscreen canvas keeps the render-texture alive, since it - # is used to e.g. download the resulting image, and who knows how the - # user want to use the result. The context does drop its ref to the - # textures, which is why we don't see textures in the measurements. - - from wgpu.gui.offscreen import WgpuCanvas - - yield { - "expected_counts_after_create": { - "CanvasContext": (n, 0), - }, - } - - canvases = weakref.WeakSet() - for i in range(n): - c = WgpuCanvas() - canvases.add(c) - c.request_draw(make_draw_func_for_canvas(c)) - c.draw() - yield c.get_context() - - del c - gc.collect() - if is_pypy: - gc.collect() # Need a bit more on pypy :) - gc.collect() - - # Check that the canvas objects are really deleted - assert not canvases - - -TEST_FUNCS = [test_release_canvas_context] - - -if __name__ == "__main__": - # testutils.TEST_ITERS = 40 # Uncomment for a mem-usage test run - - test_release_canvas_context() diff --git a/tests_mem/test_gui_qt.py b/tests_mem/test_gui_qt.py deleted file mode 100644 index 2804b4c..0000000 --- a/tests_mem/test_gui_qt.py +++ /dev/null @@ -1,58 +0,0 @@ -""" -Test creation of Qt canvas windows. -""" - -import gc -import weakref - -import pytest -import testutils # noqa -from testutils import create_and_release, can_use_pyside6, can_use_wgpu_lib -from test_gui_offscreen import make_draw_func_for_canvas - - -if not can_use_wgpu_lib: - pytest.skip("Skipping tests that need wgpu lib", allow_module_level=True) -if not can_use_pyside6: - pytest.skip("Need pyside6 for this test", allow_module_level=True) - - -@create_and_release -def test_release_canvas_context(n): - # Test with PySide canvases. - - # Note: in a draw, the textureview is obtained (thus creating a - # Texture and a TextureView, but these are released in present(), - # so we don't see them in the counts. - - import PySide6 # noqa - from wgpu.gui.qt import WgpuCanvas # noqa - - app = PySide6.QtWidgets.QApplication.instance() - if app is None: - app = PySide6.QtWidgets.QApplication([""]) - - yield {} - - canvases = weakref.WeakSet() - - for i in range(n): - c = WgpuCanvas() - canvases.add(c) - c.request_draw(make_draw_func_for_canvas(c)) - app.processEvents() - yield c.get_context() - - # Need some shakes to get all canvas refs gone. - del c - gc.collect() - app.processEvents() - - # Check that the canvas objects are really deleted - assert not canvases - - -if __name__ == "__main__": - # testutils.TEST_ITERS = 40 # Uncomment for a mem-usage test run - - test_release_canvas_context() diff --git a/tests_mem/test_meta.py b/tests_mem/test_meta.py deleted file mode 100644 index 9271397..0000000 --- a/tests_mem/test_meta.py +++ /dev/null @@ -1,81 +0,0 @@ -""" -Some tests to confirm that the test mechanism is sound, and that tests -indeed fail under the right circumstances. -""" - -import wgpu - -import pytest -from testutils import can_use_wgpu_lib, create_and_release -from testutils import get_counts, ob_name_from_test_func -from test_objects import TEST_FUNCS as OBJECT_TEST_FUNCS -from test_gui_offscreen import TEST_FUNCS as GUI_TEST_FUNCS - - -ALL_TEST_FUNCS = OBJECT_TEST_FUNCS + GUI_TEST_FUNCS - - -if not can_use_wgpu_lib: - pytest.skip("Skipping tests that need wgpu lib", allow_module_level=True) - - -DEVICE = wgpu.utils.get_default_device() - - -def test_meta_all_objects_covered(): - """Test that we have a test_release test function for each known object.""" - - ref_obnames = set(key for key in get_counts().keys()) - func_obnames = set(ob_name_from_test_func(func) for func in ALL_TEST_FUNCS) - - missing = ref_obnames - func_obnames - extra = func_obnames - ref_obnames - assert not missing - assert not extra - - -def test_meta_all_functions_solid(): - """Test that all funcs starting with "test_release_" are decorated appropriately.""" - for func in ALL_TEST_FUNCS: - is_decorated = func.__code__.co_name == "core_test_func" - assert is_decorated, func.__name__ + " not decorated" - - -def test_meta_buffers_1(): - """Making sure that the test indeed fails, when holding onto the objects.""" - - lock = [] - - @create_and_release - def test_release_buffer(n): - yield {} - for i in range(n): - b = DEVICE.create_buffer(size=128, usage=wgpu.BufferUsage.COPY_DST) - lock.append(b) - yield b - - with pytest.raises(AssertionError): - test_release_buffer() - - -def test_meta_buffers_2(): - """Making sure that the test indeed fails, by disabling the release call.""" - - ori = wgpu.backends.wgpu_native.GPUBuffer._destroy - wgpu.backends.wgpu_native.GPUBuffer._destroy = lambda self: None - - from test_objects import test_release_buffer # noqa - - try: - with pytest.raises(AssertionError): - test_release_buffer() - - finally: - wgpu.backends.wgpu_native.GPUBuffer._destroy = ori - - -if __name__ == "__main__": - test_meta_all_objects_covered() - test_meta_all_functions_solid() - test_meta_buffers_1() - test_meta_buffers_2() diff --git a/tests_mem/test_objects.py b/tests_mem/test_objects.py deleted file mode 100644 index f044e3f..0000000 --- a/tests_mem/test_objects.py +++ /dev/null @@ -1,377 +0,0 @@ -""" -Test all the wgpu objects. -""" - -import pytest -import testutils # noqa -from testutils import can_use_wgpu_lib, create_and_release - - -if not can_use_wgpu_lib: - pytest.skip("Skipping tests that need wgpu lib", allow_module_level=True) - - -import wgpu - -DEVICE = wgpu.utils.get_default_device() - - -@create_and_release -def test_release_adapter(n): - yield {} - for i in range(n): - yield wgpu.gpu.request_adapter(power_preference="high-performance") - - -@create_and_release -def test_release_device(n): - pytest.skip("XFAIL") - # todo: XFAIL: Device object seem not to be cleaned up at wgpu-native. - - # Note: the WebGPU spec says: - # [request_device()] is a one-time action: if a device is returned successfully, the adapter becomes invalid. - - yield { - "expected_counts_after_create": {"Device": (n, n), "Queue": (n, 0)}, - } - adapter = DEVICE.adapter - for i in range(n): - d = adapter.request_device() - # d.queue._destroy() - # d._queue = None - yield d - - -@create_and_release -def test_release_bind_group(n): - buffer1 = DEVICE.create_buffer(size=128, usage=wgpu.BufferUsage.STORAGE) - - binding_layouts = [ - { - "binding": 0, - "visibility": wgpu.ShaderStage.COMPUTE, - "buffer": { - "type": wgpu.BufferBindingType.read_only_storage, - }, - }, - ] - - bindings = [ - { - "binding": 0, - "resource": {"buffer": buffer1, "offset": 0, "size": buffer1.size}, - }, - ] - - bind_group_layout = DEVICE.create_bind_group_layout(entries=binding_layouts) - - yield {} - - for i in range(n): - yield DEVICE.create_bind_group(layout=bind_group_layout, entries=bindings) - - -_bind_group_layout_binding = 10 - - -@create_and_release -def test_release_bind_group_layout(n): - # Note: when we use the same binding layout descriptor, wgpu-native - # re-uses the BindGroupLayout object. - - global _bind_group_layout_binding - _bind_group_layout_binding += 1 - - yield { - "expected_counts_after_create": {"BindGroupLayout": (n, 1)}, - } - - binding_layouts = [ - { - "binding": _bind_group_layout_binding, - "visibility": wgpu.ShaderStage.COMPUTE, - "buffer": { - "type": wgpu.BufferBindingType.read_only_storage, - }, - }, - ] - - for i in range(n): - # binding_layouts[0]["binding"] = i # force unique objects - yield DEVICE.create_bind_group_layout(entries=binding_layouts) - - -@create_and_release -def test_release_buffer(n): - yield {} - for i in range(n): - yield DEVICE.create_buffer(size=128, usage=wgpu.BufferUsage.COPY_DST) - - -@create_and_release -def test_release_command_buffer(n): - # Note: a command encoder can only be used once (it gets destroyed on finish()) - yield { - "expected_counts_after_create": { - "CommandEncoder": (n, 0), - "CommandBuffer": (n, n), - }, - } - - for i in range(n): - command_encoder = DEVICE.create_command_encoder() - yield command_encoder.finish() - - -@create_and_release -def test_release_command_encoder(n): - # Note: a CommandEncoder does not exist in wgpu-core, but we do - # observe its internal CommandBuffer. - yield { - "expected_counts_after_create": { - "CommandEncoder": (n, 0), - "CommandBuffer": (0, n), - }, - } - - for i in range(n): - yield DEVICE.create_command_encoder() - - -@create_and_release -def test_release_compute_pass_encoder(n): - # Note: ComputePassEncoder does not really exist in wgpu-core - # -> Check gpu.diagnostics.wgpu_native_counts.print_report(), nothing there that ends with "Encoder". - command_encoder = DEVICE.create_command_encoder() - - yield { - "expected_counts_after_create": { - "ComputePassEncoder": (n, 0), - }, - } - - for i in range(n): - yield command_encoder.begin_compute_pass() - - -@create_and_release -def test_release_compute_pipeline(n): - code = """ - @compute - @workgroup_size(1) - fn main(@builtin(global_invocation_id) index: vec3) { - let i: u32 = index.x; - } - """ - shader = DEVICE.create_shader_module(code=code) - - binding_layouts = [] - pipeline_layout = DEVICE.create_pipeline_layout(bind_group_layouts=binding_layouts) - - yield {} - - for i in range(n): - yield DEVICE.create_compute_pipeline( - layout=pipeline_layout, - compute={"module": shader, "entry_point": "main"}, - ) - - -@create_and_release -def test_release_pipeline_layout(n): - yield {} - for i in range(n): - yield DEVICE.create_pipeline_layout(bind_group_layouts=[]) - - -@create_and_release -def test_release_query_set(n): - yield {} - for i in range(n): - yield DEVICE.create_query_set(type=wgpu.QueryType.occlusion, count=2) - - -@create_and_release -def test_release_queue(n): - pytest.skip("XFAIL") - # todo: XFAIL: the device and queue are kinda one, and the former won't release at wgpu-native. - yield {} - adapter = DEVICE.adapter - for i in range(n): - d = adapter.request_device() - q = d.queue - d._queue = None # detach - yield q - - -@create_and_release -def test_release_render_bundle(n): - # todo: implement this when we do support them - pytest.skip("Render bundle not implemented") - - -@create_and_release -def test_release_render_bundle_encoder(n): - pytest.skip("Render bundle not implemented") - - -@create_and_release -def test_release_render_pass_encoder(n): - # Note: RenderPassEncoder does not really exist in wgpu-core - # -> Check gpu.diagnostics.wgpu_native_counts.print_report(), nothing there that ends with "Encoder". - command_encoder = DEVICE.create_command_encoder() - - yield { - "expected_counts_after_create": { - "RenderPassEncoder": (n, 0), - }, - } - - for i in range(n): - yield command_encoder.begin_render_pass(color_attachments=[]) - - -@create_and_release -def test_release_render_pipeline(n): - code = """ - struct VertexInput { - @builtin(vertex_index) vertex_index : u32, - }; - struct VertexOutput { - @location(0) color : vec4, - @builtin(position) pos: vec4, - }; - - @vertex - fn vs_main(in: VertexInput) -> VertexOutput { - var positions = array, 3>( - vec2(0.0, -0.5), - vec2(0.5, 0.5), - vec2(-0.5, 0.75), - ); - var colors = array, 3>( // srgb colors - vec3(1.0, 1.0, 0.0), - vec3(1.0, 0.0, 1.0), - vec3(0.0, 1.0, 1.0), - ); - let index = i32(in.vertex_index); - var out: VertexOutput; - out.pos = vec4(positions[index], 0.0, 1.0); - out.color = vec4(colors[index], 1.0); - return out; - } - - @fragment - fn fs_main(in: VertexOutput) -> @location(0) vec4 { - let physical_color = pow(in.color.rgb, vec3(2.2)); // gamma correct - return vec4(physical_color, in.color.a); - } - """ - shader = DEVICE.create_shader_module(code=code) - - binding_layouts = [] - pipeline_layout = DEVICE.create_pipeline_layout(bind_group_layouts=binding_layouts) - - yield {} - - for i in range(n): - yield DEVICE.create_render_pipeline( - layout=pipeline_layout, - vertex={ - "module": shader, - "entry_point": "vs_main", - "buffers": [], - }, - primitive={ - "topology": wgpu.PrimitiveTopology.triangle_list, - "front_face": wgpu.FrontFace.ccw, - "cull_mode": wgpu.CullMode.none, - }, - depth_stencil=None, - multisample=None, - fragment={ - "module": shader, - "entry_point": "fs_main", - "targets": [ - { - "format": "bgra8unorm-srgb", - "blend": { - "color": ( - wgpu.BlendFactor.one, - wgpu.BlendFactor.zero, - wgpu.BlendOperation.add, - ), - "alpha": ( - wgpu.BlendFactor.one, - wgpu.BlendFactor.zero, - wgpu.BlendOperation.add, - ), - }, - }, - ], - }, - ) - - -@create_and_release -def test_release_sampler(n): - yield {} - for i in range(n): - yield DEVICE.create_sampler() - - -@create_and_release -def test_release_shader_module(n): - yield {} - - code = """ - @fragment - fn fs_main() -> @location(0) vec4 { - return vec4(1.0, 0.0, 0.0, 1.0); - } - """ - - for i in range(n): - yield DEVICE.create_shader_module(code=code) - - -@create_and_release -def test_release_texture(n): - yield {} - for i in range(n): - yield DEVICE.create_texture( - size=(16, 16, 16), - usage=wgpu.TextureUsage.TEXTURE_BINDING, - format="rgba8unorm", - ) - - -@create_and_release -def test_release_texture_view(n): - texture = DEVICE.create_texture( - size=(16, 16, 16), usage=wgpu.TextureUsage.TEXTURE_BINDING, format="rgba8unorm" - ) - yield {} - for i in range(n): - yield texture.create_view() - - -# %% The end - - -TEST_FUNCS = [ - ob - for name, ob in list(globals().items()) - if name.startswith("test_") and callable(ob) -] - -if __name__ == "__main__": - # testutils.TEST_ITERS = 40 # Uncomment for a mem-usage test run - - for func in TEST_FUNCS: - print(func.__name__ + " ...") - try: - func() - except pytest.skip.Exception: - print(" skipped") - print("done") diff --git a/tests_mem/testutils.py b/tests_mem/testutils.py deleted file mode 100644 index cd1cce0..0000000 --- a/tests_mem/testutils.py +++ /dev/null @@ -1,230 +0,0 @@ -import gc -import os -import sys -import time -import subprocess - -import psutil -import wgpu -from wgpu._diagnostics import int_repr - - -p = psutil.Process() - - -def _determine_can_use_wgpu_lib(): - # For some reason, since wgpu-native 5c304b5ea1b933574edb52d5de2d49ea04a053db - # the process' exit code is not zero, so we test more pragmatically. - code = "import wgpu.utils; wgpu.utils.get_default_device(); print('ok')" - result = subprocess.run( - [ - sys.executable, - "-c", - code, - ], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - ) - print("_determine_can_use_wgpu_lib() status code:", result.returncode) - return ( - result.stdout.strip().endswith("ok") - and "traceback" not in result.stderr.lower() - ) - - -def _determine_can_use_glfw(): - code = "import glfw;exit(0) if glfw.init() else exit(1)" - try: - subprocess.check_output([sys.executable, "-c", code]) - except Exception: - return False - else: - return True - - -def _determine_can_use_pyside6(): - code = "import PySide6.QtGui" - try: - subprocess.check_output([sys.executable, "-c", code]) - except Exception: - return False - else: - return True - - -can_use_wgpu_lib = _determine_can_use_wgpu_lib() -can_use_glfw = _determine_can_use_glfw() -can_use_pyside6 = _determine_can_use_pyside6() -is_ci = bool(os.getenv("CI", None)) -is_pypy = sys.implementation.name == "pypy" - -TEST_ITERS = None - - -def get_memory_usage(): - """Get how much memory the process consumes right now.""" - # vms: total virtual memory. Seems not suitable, because it gets less but bigger differences. - # rss: the part of the virtual memory that is not in swap, i.e. consumers ram. - # uss: memory that would become available when the process is killed (excludes shared). - # return p.memory_info().rss - return p.memory_full_info().uss - - -def clear_mem(): - time.sleep(0.001) - gc.collect() - - time.sleep(0.001) - gc.collect() - - if is_pypy: - gc.collect() - - device = wgpu.utils.get_default_device() - device._poll() - - -def get_counts(): - """Get a dict that maps object names to a 2-tuple represening - the counts in py and wgpu-native. - """ - counts_py = wgpu.diagnostics.object_counts.get_dict() - counts_native = wgpu.diagnostics.wgpu_native_counts.get_dict() - - all_keys = set(counts_py) | set(counts_native) - - default = {"count": -1} - - counts = {} - for key in sorted(all_keys): - counts[key] = ( - counts_py.get(key, default)["count"], - counts_native.get(key, default)["count"], - ) - counts.pop("total") - - return counts - - -def get_excess_counts(counts1, counts2): - """Compare two counts dicts, and return a new dict with the fields - that have increased counts. - """ - more = {} - for name in counts1: - c1 = counts1[name][0] - c2 = counts2[name][0] - more_py = 0 - if c2 > c1: - more_py = c2 - c1 - c1 = counts1[name][1] - c2 = counts2[name][1] - more_native = 0 - if c2 > c1: - more_native = c2 - c1 - if more_py or more_native: - more[name] = more_py, more_native - return more - - -def ob_name_from_test_func(func): - """Translate test_release_bind_group() to "BindGroup".""" - func_name = func.__name__ - prefix = "test_release_" - assert func_name.startswith(prefix) - words = func_name[len(prefix) :].split("_") - if words[-1].isnumeric(): - words.pop(-1) - return "".join(word.capitalize() for word in words) - - -def create_and_release(create_objects_func): - """Decorator.""" - - def core_test_func(): - """The core function that does the testing.""" - - if TEST_ITERS: - n_objects_list = [8 for i in range(TEST_ITERS)] - else: - n_objects_list = [32, 17] - - # Init mem usage measurements - clear_mem() - mem3 = get_memory_usage() - - for iter, n_objects in enumerate(n_objects_list): - generator = create_objects_func(n_objects) - ob_name = ob_name_from_test_func(create_objects_func) - - # ----- Collect options - - options = { - "expected_counts_after_create": {ob_name: (n_objects, n_objects)}, - "expected_counts_after_release": {}, - } - - func_options = next(generator) - assert isinstance(func_options, dict), "First yield must be an options dict" - options.update(func_options) - - # Measure baseline object counts - clear_mem() - counts1 = get_counts() - - # ----- Create - - # Create objects - objects = list(generator) - - # Test the count - assert len(objects) == n_objects - - # Test that all objects are of the same class. - # (this for-loop is a bit weird, but its to avoid leaking refs to objects) - cls = objects[0].__class__ - assert all(isinstance(objects[i], cls) for i in range(len(objects))) - - # Test that class matches function name (should prevent a group of copy-paste errors) - assert ob_name == cls.__name__[3:] - - # Give wgpu some slack to clean up temporary resources - wgpu.utils.get_default_device()._poll() - - # Measure peak object counts - counts2 = get_counts() - more2 = get_excess_counts(counts1, counts2) - if not TEST_ITERS: - print(" more after create:", more2) - - # Make sure the actual object has increased - assert more2 # not empty - assert more2 == options["expected_counts_after_create"] - - # It's ok if other objects are created too ... - - # ----- Release - - # Delete objects - del objects - clear_mem() - - # Measure after-release object counts - counts3 = get_counts() - more3 = get_excess_counts(counts1, counts3) - if not TEST_ITERS: - print(" more after release:", more3) - - # Check! - assert more3 == options["expected_counts_after_release"] - - # Print mem usage info - if TEST_ITERS: - mem1 = mem3 # initial mem is end-mem of last iter - mem3 = get_memory_usage() - mem_info = (int_repr(mem3 - mem1) + "B").rjust(7) - print(mem_info, end=(" " if (iter + 1) % 10 else "\n")) - - core_test_func.__name__ = create_objects_func.__name__ - return core_test_func From 1a6bfc76eeaf4e055ca33a3193b6623f3d665b5f Mon Sep 17 00:00:00 2001 From: Jan Date: Wed, 27 Dec 2023 22:50:45 +0100 Subject: [PATCH 03/20] Remove gui and backends --- wgpu/backends/__init__.py | 37 - wgpu/backends/auto.py | 27 - wgpu/backends/js_webgpu/__init__.py | 31 - wgpu/backends/rs.py | 12 - wgpu/backends/wgpu_native/__init__.py | 21 - wgpu/backends/wgpu_native/_api.py | 2937 ------------------------ wgpu/backends/wgpu_native/_ffi.py | 205 -- wgpu/backends/wgpu_native/_helpers.py | 445 ---- wgpu/backends/wgpu_native/_mappings.py | 454 ---- wgpu/backends/wgpu_native/extras.py | 50 - wgpu/gui/__init__.py | 13 - wgpu/gui/auto.py | 106 - wgpu/gui/base.py | 417 ---- wgpu/gui/glfw.py | 553 ----- wgpu/gui/jupyter.py | 137 -- wgpu/gui/offscreen.py | 244 -- wgpu/gui/qt.py | 430 ---- wgpu/gui/wx.py | 176 -- 18 files changed, 6295 deletions(-) delete mode 100644 wgpu/backends/__init__.py delete mode 100644 wgpu/backends/auto.py delete mode 100644 wgpu/backends/js_webgpu/__init__.py delete mode 100644 wgpu/backends/rs.py delete mode 100644 wgpu/backends/wgpu_native/__init__.py delete mode 100644 wgpu/backends/wgpu_native/_api.py delete mode 100644 wgpu/backends/wgpu_native/_ffi.py delete mode 100644 wgpu/backends/wgpu_native/_helpers.py delete mode 100644 wgpu/backends/wgpu_native/_mappings.py delete mode 100644 wgpu/backends/wgpu_native/extras.py delete mode 100644 wgpu/gui/__init__.py delete mode 100644 wgpu/gui/auto.py delete mode 100644 wgpu/gui/base.py delete mode 100644 wgpu/gui/glfw.py delete mode 100644 wgpu/gui/jupyter.py delete mode 100644 wgpu/gui/offscreen.py delete mode 100644 wgpu/gui/qt.py delete mode 100644 wgpu/gui/wx.py diff --git a/wgpu/backends/__init__.py b/wgpu/backends/__init__.py deleted file mode 100644 index 3e78dc0..0000000 --- a/wgpu/backends/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -""" -The backend implementations of the wgpu API. -""" - -import sys - -from ..classes import GPU as _base_GPU # noqa - - -def _register_backend(gpu): - """Backends call this to activate themselves. - It replaces ``wgpu.gpu`` with the ``gpu`` object from the backend. - """ - - root_namespace = sys.modules["wgpu"].__dict__ - needed_attributes = ( - "request_adapter", - "request_adapter_async", - "wgsl_language_features", - ) - - # Check - for attr in needed_attributes: - if not (hasattr(gpu, attr)): - raise RuntimeError( - "The registered WGPU backend object must have attributes " - + ", ".join(f"'{a}'" for a in needed_attributes) - + f". The '{attr}' is missing." - ) - - # Only allow registering a backend once - if not isinstance(root_namespace["gpu"], _base_GPU): - raise RuntimeError("WGPU backend can only be set once.") - - # Apply - root_namespace["gpu"] = gpu - return gpu diff --git a/wgpu/backends/auto.py b/wgpu/backends/auto.py deleted file mode 100644 index f2c87bf..0000000 --- a/wgpu/backends/auto.py +++ /dev/null @@ -1,27 +0,0 @@ -# The auto/default/only backend is wgpu-native, but this may change in the future. -import sys - - -def _load_backend(backend_name): - """Load a wgpu backend by name.""" - - if backend_name == "wgpu_native": - from . import wgpu_native as module # noqa: F401,F403 - elif backend_name == "js_webgpu": - from . import js_webgpu as module # noqa: F401,F403 - else: # no-cover - raise ImportError(f"Unknown wgpu backend: '{backend_name}'") - - return module.gpu - - -def _auto_load_backend(): - """Decide on the backend automatically.""" - - if sys.platform == "emscripten": - return _load_backend("js_webgpu") - else: - return _load_backend("wgpu_native") - - -gpu = _auto_load_backend() diff --git a/wgpu/backends/js_webgpu/__init__.py b/wgpu/backends/js_webgpu/__init__.py deleted file mode 100644 index d19d6c2..0000000 --- a/wgpu/backends/js_webgpu/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -""" -WGPU backend implementation based on the JS WebGPU API. - -Since the exposed Python API is the same as the JS API, except that -descriptors are arguments, this API can probably be fully automatically -generated. -""" - -# NOTE: this is just a stub for now!! - -from .. import _register_backend - - -class GPU: - def request_adapter(self, **parameters): - raise NotImplementedError("Cannot use sync API functions in JS.") - - async def request_adapter_async(self, **parameters): - gpu = window.navigator.gpu # noqa - return await gpu.request_adapter(**parameters) - - def get_preferred_canvas_format(self): - raise NotImplementedError() - - @property - def wgsl_language_features(self): - return set() - - -gpu = GPU() -_register_backend(gpu) diff --git a/wgpu/backends/rs.py b/wgpu/backends/rs.py deleted file mode 100644 index a2e4a18..0000000 --- a/wgpu/backends/rs.py +++ /dev/null @@ -1,12 +0,0 @@ -# Termporaty alias for backwards compatibility. - -from .wgpu_native import gpu # noqa - -_deprecation_msg = """ -WARNING: wgpu.backends.rs is deprecated. Instead you can use: -- import wgpu.backends.wgpu_native to use the backend by its new name. -- import wgpu.backends.auto to do the same, but simpler and more future proof. -- simply use wgpu.gpu.request_adapter() to auto-load the backend. -""".strip() - -print(_deprecation_msg) diff --git a/wgpu/backends/wgpu_native/__init__.py b/wgpu/backends/wgpu_native/__init__.py deleted file mode 100644 index ce02d5f..0000000 --- a/wgpu/backends/wgpu_native/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -""" -The wgpu-native backend. -""" - -from ._api import * # noqa: F401, F403 -from ._ffi import ffi, lib, lib_path, lib_version_info # noqa: F401 -from ._ffi import _check_expected_version -from .. import _register_backend - - -# The wgpu-native version that we target/expect -__version__ = "0.18.1.3" -__commit_sha__ = "8561b0d8c0b5af7dfb8631d6f924e5418c92f2ce" -version_info = tuple(map(int, __version__.split("."))) -_check_expected_version(version_info) # produces a warning on mismatch - -# Instantiate and register this backend -gpu = GPU() # noqa: F405 -_register_backend(gpu) # noqa: F405 - -from .extras import enumerate_adapters, request_device_tracing # noqa: F401, E402 diff --git a/wgpu/backends/wgpu_native/_api.py b/wgpu/backends/wgpu_native/_api.py deleted file mode 100644 index 808d33d..0000000 --- a/wgpu/backends/wgpu_native/_api.py +++ /dev/null @@ -1,2937 +0,0 @@ -""" -WGPU backend implementation based on wgpu-native. - -The wgpu-native project (https://github.com/gfx-rs/wgpu-native) is a Rust -library based on wgpu-core, which wraps Metal, Vulkan, DX12, and more. -It compiles to a dynamic library exposing a C-API, accompanied by a C -header file. We wrap this using cffi, which uses the header file to do -most type conversions for us. - -This module is maintained using a combination of manual code and -automatically inserted code. In short, the codegen utility inserts -new methods and checks plus annotates all structs and C api calls. - -Read the codegen/readme.md for more information. -""" - - -import os -import ctypes -import logging -import ctypes.util -from weakref import WeakKeyDictionary -from typing import List, Dict, Union - -from ... import classes, flags, enums, structs -from ..._coreutils import str_flag_to_int - -from ._ffi import ffi, lib -from ._mappings import cstructfield2enum, enummap, enum_str2int, enum_int2str -from ._helpers import ( - get_wgpu_instance, - get_surface_id_from_canvas, - get_memoryview_from_address, - get_memoryview_and_address, - to_snake_case, - to_camel_case, - ErrorHandler, - SafeLibCalls, -) - - -logger = logging.getLogger("wgpu") # noqa - - -# The API is prettu well defined -__all__ = classes.__all__.copy() - - -# %% Helper functions and objects - - -# Features that wgpu-native supports that are not part of WebGPU -NATIVE_FEATURES = ( - "PushConstants", - "TextureAdapterSpecificFormatFeatures", - "MultiDrawIndirect", - "MultiDrawIndirectCount", - "VertexWritableStorage", -) - -# Object to be able to bind the lifetime of objects to other objects -_refs_per_struct = WeakKeyDictionary() - -# Some enum keys need a shortcut -_cstructfield2enum_alt = { - "load_op": "LoadOp", - "store_op": "StoreOp", - "depth_store_op": "StoreOp", - "stencil_store_op": "StoreOp", -} - - -def new_struct_p(ctype, **kwargs): - """Create a pointer to an ffi struct. Provides a flatter syntax - and converts our string enums to int enums needed in C. The passed - kwargs are also bound to the lifetime of the new struct. - """ - assert ctype.endswith(" *") - struct_p = _new_struct_p(ctype, **kwargs) - _refs_per_struct[struct_p] = kwargs - return struct_p - # Some kwargs may be other ffi objects, and some may represent - # pointers. These need special care because them "being in" the - # current struct does not prevent them from being cleaned up by - # Python's garbage collector. Keeping hold of these objects in the - # calling code is painful and prone to missing cases, so we solve - # the issue here. We cannot attach an attribute to the struct directly, - # so we use a global WeakKeyDictionary. Also see issue #52. - - -def new_struct(ctype, **kwargs): - """Create an ffi value struct. The passed kwargs are also bound - to the lifetime of the new struct. - """ - assert not ctype.endswith("*") - struct_p = _new_struct_p(ctype + " *", **kwargs) - struct = struct_p[0] - _refs_per_struct[struct] = kwargs - return struct - - -def _new_struct_p(ctype, **kwargs): - struct_p = ffi.new(ctype) - for key, val in kwargs.items(): - if isinstance(val, str) and isinstance(getattr(struct_p, key), int): - # An enum - these are ints in C, but str in our public API - if key in _cstructfield2enum_alt: - structname = _cstructfield2enum_alt[key] - else: - structname = cstructfield2enum[ctype.strip(" *")[4:] + "." + key] - ival = enummap[structname + "." + val] - setattr(struct_p, key, ival) - else: - setattr(struct_p, key, val) - return struct_p - - -def _tuple_from_tuple_or_dict(ob, fields): - """Given a tuple/list/dict, return a tuple. Also checks tuple size. - - >> # E.g. - >> _tuple_from_tuple_or_dict({"x": 1, "y": 2}, ("x", "y")) - (1, 2) - >> _tuple_from_tuple_or_dict([1, 2], ("x", "y")) - (1, 2) - """ - error_msg = "Expected tuple/key/dict with fields: {}" - if isinstance(ob, (list, tuple)): - if len(ob) != len(fields): - raise ValueError(error_msg.format(", ".join(fields))) - return tuple(ob) - elif isinstance(ob, dict): - try: - return tuple(ob[key] for key in fields) - except KeyError: - raise ValueError(error_msg.format(", ".join(fields))) - else: - raise TypeError(error_msg.format(", ".join(fields))) - - -_empty_label = ffi.new("char []", b"") - - -def to_c_label(label): - """Get the C representation of a label.""" - if not label: - return _empty_label - else: - return ffi.new("char []", label.encode()) - - -def feature_flag_to_feature_names(flag): - """Convert a feature flags into a tuple of names.""" - feature_names = {} # import this from mappings? - features = [] - for i in range(32): - val = int(2**i) - if flag & val: - features.append(feature_names.get(val, val)) - return tuple(sorted(features)) - - -def check_struct(struct_name, d): - """Check that all keys in the given dict exist in the corresponding struct.""" - valid_keys = set(getattr(structs, struct_name)) - invalid_keys = set(d.keys()).difference(valid_keys) - if invalid_keys: - raise ValueError(f"Invalid keys in {struct_name}: {invalid_keys}") - - -error_handler = ErrorHandler(logger) -libf = SafeLibCalls(lib, error_handler) - - -# %% The API - - -class GPU(classes.GPU): - def request_adapter( - self, *, power_preference=None, force_fallback_adapter=False, canvas=None - ): - """Create a `GPUAdapter`, the object that represents an abstract wgpu - implementation, from which one can request a `GPUDevice`. - - This is the implementation based on wgpu-native. - - Arguments: - power_preference (PowerPreference): "high-performance" or "low-power". - force_fallback_adapter (bool): whether to use a (probably CPU-based) - fallback adapter. - canvas (WgpuCanvasInterface): The canvas that the adapter should - be able to render to. This can typically be left to None. - """ - - # ----- Surface ID - - # Get surface id that the adapter must be compatible with. If we - # don't pass a valid surface id, there is no guarantee we'll be - # able to create a surface texture for it (from this adapter). - surface_id = ffi.NULL - if canvas is not None: - window_id = canvas.get_window_id() - if window_id: # e.g. could be an off-screen canvas - surface_id = canvas.get_context()._get_surface_id() - - # ----- Select backend - - # Try to read the WGPU_BACKEND_TYPE environment variable to see - # if a backend should be forced. - force_backend = os.getenv("WGPU_BACKEND_TYPE", None) - backend = enum_str2int["BackendType"]["Undefined"] - if force_backend: - try: - backend = enum_str2int["BackendType"][force_backend] - except KeyError: - logger.warning( - f"Invalid value for WGPU_BACKEND_TYPE: '{force_backend}'.\n" - f"Valid values are: {list(enum_str2int['BackendType'].keys())}" - ) - else: - logger.warning(f"Forcing backend: {force_backend} ({backend})") - - # ----- Request adapter - - # H: nextInChain: WGPUChainedStruct *, compatibleSurface: WGPUSurface, powerPreference: WGPUPowerPreference, backendType: WGPUBackendType, forceFallbackAdapter: WGPUBool/int - struct = new_struct_p( - "WGPURequestAdapterOptions *", - compatibleSurface=surface_id, - powerPreference=power_preference or "high-performance", - forceFallbackAdapter=bool(force_fallback_adapter), - backendType=backend, - # not used: nextInChain - ) - - adapter_id = None - error_msg = None - - @ffi.callback("void(WGPURequestAdapterStatus, WGPUAdapter, char *, void *)") - def callback(status, result, message, userdata): - if status != 0: - nonlocal error_msg - msg = "-" if message == ffi.NULL else ffi.string(message).decode() - error_msg = f"Request adapter failed ({status}): {msg}" - else: - nonlocal adapter_id - adapter_id = result - - # H: void f(WGPUInstance instance, WGPURequestAdapterOptions const * options, WGPURequestAdapterCallback callback, void * userdata) - libf.wgpuInstanceRequestAdapter(get_wgpu_instance(), struct, callback, ffi.NULL) - - # For now, Rust will call the callback immediately - # todo: when wgpu gets an event loop -> while run wgpu event loop or something - if adapter_id is None: # pragma: no cover - error_msg = error_msg or "Could not obtain new adapter id." - raise RuntimeError(error_msg) - - return self._create_adapter(adapter_id) - - def _create_adapter(self, adapter_id): - # ----- Get adapter info - - # H: nextInChain: WGPUChainedStructOut *, vendorID: int, vendorName: char *, architecture: char *, deviceID: int, name: char *, driverDescription: char *, adapterType: WGPUAdapterType, backendType: WGPUBackendType - c_properties = new_struct_p( - "WGPUAdapterProperties *", - # not used: nextInChain - # not used: deviceID - # not used: vendorID - # not used: name - # not used: driverDescription - # not used: adapterType - # not used: backendType - # not used: vendorName - # not used: architecture - ) - - # H: void f(WGPUAdapter adapter, WGPUAdapterProperties * properties) - libf.wgpuAdapterGetProperties(adapter_id, c_properties) - - def to_py_str(key): - char_p = getattr(c_properties, key) - if char_p: - return ffi.string(char_p).decode(errors="ignore") - return "" - - adapter_info = { - "vendor": to_py_str("vendorName"), - "architecture": to_py_str("architecture"), - "device": to_py_str("name"), - "description": to_py_str("driverDescription"), - "adapter_type": enum_int2str["AdapterType"].get( - c_properties.adapterType, "unknown" - ), - "backend_type": enum_int2str["BackendType"].get( - c_properties.backendType, "unknown" - ), - # "vendor_id": c_properties.vendorID, - # "device_id": c_properties.deviceID, - } - - # ----- Get adapter limits - - # H: nextInChain: WGPUChainedStructOut *, limits: WGPULimits - c_supported_limits = new_struct_p( - "WGPUSupportedLimits *", - # not used: nextInChain - # not used: limits - ) - c_limits = c_supported_limits.limits - # H: WGPUBool f(WGPUAdapter adapter, WGPUSupportedLimits * limits) - libf.wgpuAdapterGetLimits(adapter_id, c_supported_limits) - limits = {to_snake_case(k): getattr(c_limits, k) for k in sorted(dir(c_limits))} - - # ----- Get adapter features - - # WebGPU features - features = set() - for f in sorted(enums.FeatureName): - key = f"FeatureName.{f}" - i = enummap[key] - # H: WGPUBool f(WGPUAdapter adapter, WGPUFeatureName feature) - if libf.wgpuAdapterHasFeature(adapter_id, i): - features.add(f) - - # Native features - for f in NATIVE_FEATURES: - i = getattr(lib, f"WGPUNativeFeature_{f}") - # H: WGPUBool f(WGPUAdapter adapter, WGPUFeatureName feature) - if libf.wgpuAdapterHasFeature(adapter_id, i): - features.add(f) - - # ----- Done - - return GPUAdapter(adapter_id, features, limits, adapter_info) - - async def request_adapter_async( - self, *, power_preference=None, force_fallback_adapter=False, canvas=None - ): - """Async version of ``request_adapter()``. - This is the implementation based on wgpu-native. - """ - return self.request_adapter( - power_preference=power_preference, - force_fallback_adapter=force_fallback_adapter, - canvas=canvas, - ) # no-cover - - -# Instantiate API entrypoint -gpu = GPU() - - -class GPUCanvasContext(classes.GPUCanvasContext): - # The way this works, is that the context must first be configured. - # Then a texture can be obtained, which can be written to, and then it - # can be presented. The lifetime of the texture is between - # get_current_texture() and present(). We keep track of the texture so - # we can give meaningful errors/warnings on invalid use, rather than - # the more cryptic Rust panics. - - def __init__(self, canvas): - super().__init__(canvas) - self._device = None # set in configure() - self._surface_id = None - self._config = None - self._texture = None - - def _get_surface_id(self): - if self._surface_id is None: - # get_surface_id_from_canvas calls wgpuInstanceCreateSurface - self._surface_id = get_surface_id_from_canvas(self._get_canvas()) - return self._surface_id - - def configure( - self, - *, - device: "GPUDevice", - format: "enums.TextureFormat", - usage: "flags.TextureUsage" = 0x10, - view_formats: "List[enums.TextureFormat]" = [], - color_space: str = "srgb", - alpha_mode: "enums.CanvasAlphaMode" = "opaque", - ): - # Handle inputs - - # Store for later - self._device = device - # Handle usage - if isinstance(usage, str): - usage = str_flag_to_int(flags.TextureUsage, usage) - # View formats - c_view_formats = ffi.NULL - if view_formats: - view_formats_list = [enummap["TextureFormat." + x] for x in view_formats] - c_view_formats = ffi.new("WGPUTextureFormat []", view_formats_list) - # Lookup alpha mode, needs explicit conversion because enum names mismatch - c_alpha_mode = getattr(lib, f"WGPUCompositeAlphaMode_{alpha_mode.capitalize()}") - # The format is used as-is - if format is None: - format = self.get_preferred_format(device.adapter) - # The color_space is not used for now - color_space - - # Get what's supported - - # H: nextInChain: WGPUChainedStructOut *, formatCount: int, formats: WGPUTextureFormat *, presentModeCount: int, presentModes: WGPUPresentMode *, alphaModeCount: int, alphaModes: WGPUCompositeAlphaMode * - capabilities = new_struct_p( - "WGPUSurfaceCapabilities *", - # not used: formatCount - # not used: formats - # not used: presentModeCount - # not used: presentModes - # not used: alphaModeCount - # not used: alphaModes - # not used: nextInChain - ) - # H: void f(WGPUSurface surface, WGPUAdapter adapter, WGPUSurfaceCapabilities * capabilities) - libf.wgpuSurfaceGetCapabilities( - self._get_surface_id(), self._device.adapter._internal, capabilities - ) - - capable_formats = [] - for i in range(capabilities.formatCount): - int_val = capabilities.formats[i] - capable_formats.append(enum_int2str["TextureFormat"][int_val]) - - capable_present_modes = [] - for i in range(capabilities.presentModeCount): - int_val = capabilities.presentModes[i] - str_val = enum_int2str["PresentMode"][int_val] - capable_present_modes.append(str_val.lower()) - - capable_alpha_modes = [] - for i in range(capabilities.alphaModeCount): - int_val = capabilities.alphaModes[i] - str_val = enum_int2str["CompositeAlphaMode"][int_val] - capable_alpha_modes.append(str_val.lower()) - - # H: void f(WGPUSurfaceCapabilities capabilities) - libf.wgpuSurfaceCapabilitiesFreeMembers(capabilities[0]) - - # Check if input is supported - - if format not in capable_formats: - raise ValueError( - f"Given format '{format}' is not in supported formats {capable_formats}" - ) - if alpha_mode not in capable_alpha_modes: - raise ValueError( - f"Given format '{alpha_mode}' is not in supported formats {capable_alpha_modes}" - ) - - # Select the present mode to determine vsync behavior. - # * https://docs.rs/wgpu/latest/wgpu/enum.PresentMode.html - # * https://github.com/pygfx/wgpu-py/issues/256 - # - # Fifo: Wait for vsync, with a queue of ± 3 frames. - # FifoRelaxed: Like fifo but less lag and more tearing? aka adaptive vsync. - # Mailbox: submit without queue, but present on vsync. Not always available. - # Immediate: no queue, no waiting, with risk of tearing, vsync off. - # - # In general Fifo gives the best result, but sometimes people want to - # benchmark something and get the highest FPS possible. Note - # that we've observed rate limiting regardless of setting this - # to Immediate, depending on OS or being on battery power. - if getattr(self._get_canvas(), "_vsync", True): - present_mode_pref = ["fifo", "mailbox"] - else: - present_mode_pref = ["immediate", "mailbox", "fifo"] - present_modes = [p for p in present_mode_pref if p in capable_present_modes] - present_mode = (present_modes or capable_present_modes)[0] - c_present_mode = getattr(lib, f"WGPUPresentMode_{present_mode.capitalize()}") - - # Prepare config object - - # H: nextInChain: WGPUChainedStruct *, device: WGPUDevice, format: WGPUTextureFormat, usage: WGPUTextureUsageFlags/int, viewFormatCount: int, viewFormats: WGPUTextureFormat *, alphaMode: WGPUCompositeAlphaMode, width: int, height: int, presentMode: WGPUPresentMode - config = new_struct_p( - "WGPUSurfaceConfiguration *", - device=device._internal, - format=format, - usage=usage, - viewFormatCount=len(view_formats), - viewFormats=c_view_formats, - alphaMode=c_alpha_mode, - width=0, - height=0, - presentMode=c_present_mode, - # not used: nextInChain - ) - - # Configure - self._configure(config) - - def _configure(self, config): - # If a texture is still active, better destroy it first - self._destroy_texture() - # Set the size - width, height = self._get_canvas().get_physical_size() - config.width = width - config.height = height - if width <= 0 or height <= 0: - raise RuntimeError( - "Cannot configure canvas that has no pixels ({width}x{height})." - ) - # Configure, and store the config if we did not error out - # H: void f(WGPUSurface surface, WGPUSurfaceConfiguration const * config) - libf.wgpuSurfaceConfigure(self._get_surface_id(), config) - self._config = config - - def unconfigure(self): - self._destroy_texture() - self._config = None - # H: void f(WGPUSurface surface) - libf.wgpuSurfaceUnconfigure(self._get_surface_id()) - - def _destroy_texture(self): - if self._texture: - self._texture.destroy() - self._texture = None - - def get_current_texture(self): - # If the canvas has changed since the last configure, we need to re-configure it - if not self._config: - raise RuntimeError( - "Canvas context must be configured before calling get_current_texture()." - ) - - # When the texture is active right now, we could either: - # * return the existing texture - # * warn about it, and create a new one - # * raise an error - # Right now we do the warning, so things still (kinda) keep working - if self._texture: - self._destroy_texture() - logger.warning( - "get_current_texture() is called multiple times before pesent()." - ) - - # Reconfigure when the canvas has resized. - # On some systems (Windows+Qt) this is not necessary, because - # the texture status would be Outdated below, resulting in a - # reconfigure. But on others (e.g. glfwf) the texture size does - # not have to match the window size, apparently. The downside - # for doing this check on the former systems, is that errors - # get logged, which would not be there if we did not - # pre-emptively reconfigure. These log entries are harmless but - # anoying, and I currently don't know how to prevent them - # elegantly. See issue #352 - old_size = (self._config.width, self._config.height) - new_size = tuple(self._get_canvas().get_physical_size()) - if old_size != new_size: - self._configure(self._config) - - # Try to obtain a texture. - # `If it fails, depending on status, we reconfure and try again. - - # H: texture: WGPUTexture, suboptimal: WGPUBool/int, status: WGPUSurfaceGetCurrentTextureStatus - surface_texture = new_struct_p( - "WGPUSurfaceTexture *", - # not used: texture - # not used: suboptimal - # not used: status - ) - - for attempt in [1, 2]: - # H: void f(WGPUSurface surface, WGPUSurfaceTexture * surfaceTexture) - libf.wgpuSurfaceGetCurrentTexture(self._get_surface_id(), surface_texture) - status = surface_texture.status - texture_id = surface_texture.texture - if status == lib.WGPUSurfaceGetCurrentTextureStatus_Success: - break # success - if texture_id: - # H: void f(WGPUTexture texture) - libf.wgpuTextureRelease(texture_id) - if attempt == 1 and status in [ - lib.WGPUSurfaceGetCurrentTextureStatus_Timeout, - lib.WGPUSurfaceGetCurrentTextureStatus_Outdated, - lib.WGPUSurfaceGetCurrentTextureStatus_Lost, - ]: - # Configure and try again. - # On Window+Qt this happens e.g. when the window has resized - # (status==Outdated), but also when moving the window from one - # monitor to another with different scale-factor. - logger.info(f"Re-configuring canvas context ({status}).") - self._configure(self._config) - else: - # WGPUSurfaceGetCurrentTextureStatus_OutOfMemory - # WGPUSurfaceGetCurrentTextureStatus_DeviceLost - # Or if this is the second attempt. - raise RuntimeError(f"Cannot get surface texture ({status}).") - - # I don't expect this to happen, but lets check just in case. - if not texture_id: - raise RuntimeError("Cannot get surface texture (no texture)") - - # Things look good, but texture may still be suboptimal, whatever that means - if surface_texture.suboptimal: - logger.warning("The surface texture is suboptimal.") - - return self._create_python_texture(texture_id) - - def _create_python_texture(self, texture_id): - # Create the Python wrapper - - # We can derive texture props from the config and common sense: - # width = self._config.width - # height = self._config.height - # depth = 1 - # mip_level_count = 1 - # sample_count = 1 - # dimension = enums.TextureDimension.d2 - # format = enum_int2str["TextureFormat"][self._config.format] - # usage = self._config.usage - - # But we can also read them from the texture - # H: uint32_t f(WGPUTexture texture) - width = libf.wgpuTextureGetWidth(texture_id) - # H: uint32_t f(WGPUTexture texture) - height = libf.wgpuTextureGetHeight(texture_id) - # H: uint32_t f(WGPUTexture texture) - depth = libf.wgpuTextureGetDepthOrArrayLayers(texture_id) - # H: uint32_t f(WGPUTexture texture) - mip_level_count = libf.wgpuTextureGetMipLevelCount(texture_id) - # H: uint32_t f(WGPUTexture texture) - sample_count = libf.wgpuTextureGetSampleCount(texture_id) - # H: WGPUTextureDimension f(WGPUTexture texture) - c_dim = libf.wgpuTextureGetDimension(texture_id) # -> to string - dimension = enum_int2str["TextureDimension"][c_dim] - # H: WGPUTextureFormat f(WGPUTexture texture) - c_format = libf.wgpuTextureGetFormat(texture_id) - format = enum_int2str["TextureFormat"][c_format] - # H: WGPUTextureUsageFlags f(WGPUTexture texture) - usage = libf.wgpuTextureGetUsage(texture_id) - - label = "" - # Cannot yet set label, because it's not implemented in wgpu-native - # label = "surface-texture" - # H: void f(WGPUTexture texture, char const * label) - # libf.wgpuTextureSetLabel(texture_id, to_c_label(label)) - - tex_info = { - "size": (width, height, depth), - "mip_level_count": mip_level_count, - "sample_count": sample_count, - "dimension": dimension, - "format": format, - "usage": usage, - } - - self._texture = GPUTexture(label, texture_id, self._device, tex_info) - return self._texture - - def present(self): - if not self._texture: - msg = "present() is called without a preceeding call to " - msg += "get_current_texture(). Note that present() is usually " - msg += "called automatically after the draw function returns." - raise RuntimeError(msg) - else: - # Present the texture, then destroy it - # H: void f(WGPUSurface surface) - libf.wgpuSurfacePresent(self._get_surface_id()) - self._destroy_texture() - - def get_preferred_format(self, adapter): - # H: WGPUTextureFormat f(WGPUSurface surface, WGPUAdapter adapter) - format = libf.wgpuSurfaceGetPreferredFormat( - self._get_surface_id(), adapter._internal - ) - return enum_int2str["TextureFormat"][format] - - def _destroy(self): - self._destroy_texture() - if self._surface_id is not None and libf is not None: - self._surface_id, surface_id = None, self._surface_id - # H: void f(WGPUSurface surface) - libf.wgpuSurfaceRelease(surface_id) - - -class GPUObjectBase(classes.GPUObjectBase): - pass - - -class GPUAdapterInfo(classes.GPUAdapterInfo): - pass - - -class GPUAdapter(classes.GPUAdapter): - def request_device( - self, - *, - label="", - required_features: "List[enums.FeatureName]" = [], - required_limits: "Dict[str, int]" = {}, - default_queue: "structs.QueueDescriptor" = {}, - ): - if default_queue: - check_struct("QueueDescriptor", default_queue) - return self._request_device( - label, required_features, required_limits, default_queue, "" - ) - - def _request_device( - self, label, required_features, required_limits, default_queue, trace_path - ): - # ---- Handle features - - assert isinstance(required_features, (tuple, list, set)) - - c_features = set() - for f in required_features: - if isinstance(f, str): - if "_" in f: - f = "".join(x.title() for x in f.split("_")) - i1 = enummap.get(f"FeatureName.{f}", None) - i2 = getattr(lib, f"WGPUNativeFeature_{f}", None) - i = i2 if i1 is None else i1 - if i is None: # pragma: no cover - raise KeyError(f"Unknown feature: '{f}'") - c_features.add(i) - else: - raise TypeError("Features must be given as str.") - - c_features = sorted(c_features) # makes it a list - - # ----- Set limits - - # H: nextInChain: WGPUChainedStruct *, limits: WGPULimits - c_required_limits = new_struct_p( - "WGPURequiredLimits *", - # not used: nextInChain - # not used: limits - ) - c_limits = c_required_limits.limits - - # Set all limits to the adapter default - # This is important, because zero does NOT mean default, and a limit of zero - # for a specific limit may break a lot of applications. - for key, val in self.limits.items(): - setattr(c_limits, to_camel_case(key), val) - - # Overload with any set limits - required_limits = required_limits or {} - for key, val in required_limits.items(): - setattr(c_limits, to_camel_case(key), val) - - # ---- Set queue descriptor - - # Note that the default_queue arg is a descriptor (dict for QueueDescriptor), but is currently empty :) - # H: nextInChain: WGPUChainedStruct *, label: char * - queue_struct = new_struct( - "WGPUQueueDescriptor", - label=to_c_label("default_queue"), - # not used: nextInChain - ) - - # ----- Compose device descriptor extras - - c_trace_path = ffi.NULL - if trace_path: # no-cover - c_trace_path = ffi.new("char []", trace_path.encode()) - - # H: chain: WGPUChainedStruct, tracePath: char * - extras = new_struct_p( - "WGPUDeviceExtras *", - tracePath=c_trace_path, - # not used: chain - ) - extras.chain.sType = lib.WGPUSType_DeviceExtras - - # ----- Device lost - - @ffi.callback("void(WGPUDeviceLostReason, char *, void *)") - def device_lost_callback(c_reason, c_message, userdata): - reason = enum_int2str["DeviceLostReason"].get(c_reason, "Unknown") - message = ffi.string(c_message).decode(errors="ignore") - error_handler.log_error(f"The WGPU device was lost ({reason}):\n{message}") - - # Keep the ref alive - self._device_lost_callback = device_lost_callback - - # ----- Request device - - # H: nextInChain: WGPUChainedStruct *, label: char *, requiredFeatureCount: int, requiredFeatures: WGPUFeatureName *, requiredLimits: WGPURequiredLimits *, defaultQueue: WGPUQueueDescriptor, deviceLostCallback: WGPUDeviceLostCallback, deviceLostUserdata: void * - struct = new_struct_p( - "WGPUDeviceDescriptor *", - label=to_c_label(label), - nextInChain=ffi.cast("WGPUChainedStruct * ", extras), - requiredFeatureCount=len(c_features), - requiredFeatures=ffi.new("WGPUFeatureName []", c_features), - requiredLimits=c_required_limits, - defaultQueue=queue_struct, - deviceLostCallback=device_lost_callback, - # not used: deviceLostUserdata - ) - - device_id = None - error_msg = None - - @ffi.callback("void(WGPURequestDeviceStatus, WGPUDevice, char *, void *)") - def callback(status, result, message, userdata): - if status != 0: - nonlocal error_msg - msg = "-" if message == ffi.NULL else ffi.string(message).decode() - error_msg = f"Request device failed ({status}): {msg}" - else: - nonlocal device_id - device_id = result - - # H: void f(WGPUAdapter adapter, WGPUDeviceDescriptor const * descriptor, WGPURequestDeviceCallback callback, void * userdata) - libf.wgpuAdapterRequestDevice(self._internal, struct, callback, ffi.NULL) - - if device_id is None: # pragma: no cover - error_msg = error_msg or "Could not obtain new device id." - raise RuntimeError(error_msg) - - # ----- Get device limits - - # H: nextInChain: WGPUChainedStructOut *, limits: WGPULimits - c_supported_limits = new_struct_p( - "WGPUSupportedLimits *", - # not used: nextInChain - # not used: limits - ) - c_limits = c_supported_limits.limits - # H: WGPUBool f(WGPUDevice device, WGPUSupportedLimits * limits) - libf.wgpuDeviceGetLimits(device_id, c_supported_limits) - limits = {to_snake_case(k): getattr(c_limits, k) for k in dir(c_limits)} - - # ----- Get device features - - # WebGPU features - features = set() - for f in sorted(enums.FeatureName): - key = f"FeatureName.{f}" - i = enummap[key] - # H: WGPUBool f(WGPUDevice device, WGPUFeatureName feature) - if libf.wgpuDeviceHasFeature(device_id, i): - features.add(f) - - # Native features - for f in NATIVE_FEATURES: - i = getattr(lib, f"WGPUNativeFeature_{f}") - # H: WGPUBool f(WGPUDevice device, WGPUFeatureName feature) - if libf.wgpuDeviceHasFeature(device_id, i): - features.add(f) - - # ---- Get queue - - # H: WGPUQueue f(WGPUDevice device) - queue_id = libf.wgpuDeviceGetQueue(device_id) - queue = GPUQueue("", queue_id, None) - - # ----- Done - - return GPUDevice(label, device_id, self, features, limits, queue) - - async def request_device_async( - self, - *, - label="", - required_features: "List[enums.FeatureName]" = [], - required_limits: "Dict[str, int]" = {}, - default_queue: "structs.QueueDescriptor" = {}, - ): - if default_queue: - check_struct("QueueDescriptor", default_queue) - return self._request_device( - label, required_features, required_limits, default_queue, "" - ) # no-cover - - def _destroy(self): - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPUAdapter adapter) - libf.wgpuAdapterRelease(internal) - - -class GPUDevice(classes.GPUDevice, GPUObjectBase): - def __init__(self, label, internal, adapter, features, limits, queue): - super().__init__(label, internal, adapter, features, limits, queue) - - @ffi.callback("void(WGPUErrorType, char *, void *)") - def uncaptured_error_callback(c_type, c_message, userdata): - error_type = enum_int2str["ErrorType"].get(c_type, "Unknown") - message = ffi.string(c_message).decode(errors="ignore") - message = "\n".join(line.rstrip() for line in message.splitlines()) - error_handler.handle_error(error_type, message) - - # Keep the ref alive - self._uncaptured_error_callback = uncaptured_error_callback - - # H: void f(WGPUDevice device, WGPUErrorCallback callback, void * userdata) - libf.wgpuDeviceSetUncapturedErrorCallback( - self._internal, uncaptured_error_callback, ffi.NULL - ) - - def _poll(self): - # Internal function - if self._internal: - # H: WGPUBool f(WGPUDevice device, WGPUBool wait, WGPUWrappedSubmissionIndex const * wrappedSubmissionIndex) - libf.wgpuDevicePoll(self._internal, True, ffi.NULL) - - def create_buffer( - self, - *, - label="", - size: int, - usage: "flags.BufferUsage", - mapped_at_creation: bool = False, - ): - return self._create_buffer(label, int(size), usage, bool(mapped_at_creation)) - - def _create_buffer(self, label, size, usage, mapped_at_creation): - # Create a buffer object - if isinstance(usage, str): - usage = str_flag_to_int(flags.BufferUsage, usage) - # H: nextInChain: WGPUChainedStruct *, label: char *, usage: WGPUBufferUsageFlags/int, size: int, mappedAtCreation: WGPUBool/int - struct = new_struct_p( - "WGPUBufferDescriptor *", - label=to_c_label(label), - size=size, - usage=int(usage), - mappedAtCreation=mapped_at_creation, - # not used: nextInChain - ) - map_state = ( - enums.BufferMapState.mapped - if mapped_at_creation - else enums.BufferMapState.unmapped - ) - # H: WGPUBuffer f(WGPUDevice device, WGPUBufferDescriptor const * descriptor) - id = libf.wgpuDeviceCreateBuffer(self._internal, struct) - # Note that there is wgpuBufferGetSize and wgpuBufferGetUsage, - # but we already know these, so they are kindof useless? - # Return wrapped buffer - return GPUBuffer(label, id, self, size, usage, map_state) - - def create_texture( - self, - *, - label="", - size: "Union[List[int], structs.Extent3D]", - mip_level_count: int = 1, - sample_count: int = 1, - dimension: "enums.TextureDimension" = "2d", - format: "enums.TextureFormat", - usage: "flags.TextureUsage", - view_formats: "List[enums.TextureFormat]" = [], - ): - if isinstance(usage, str): - usage = str_flag_to_int(flags.TextureUsage, usage) - usage = int(usage) - size = _tuple_from_tuple_or_dict( - size, ("width", "height", "depth_or_array_layers") - ) - # H: width: int, height: int, depthOrArrayLayers: int - c_size = new_struct( - "WGPUExtent3D", - width=size[0], - height=size[1], - depthOrArrayLayers=size[2], - ) - - if view_formats: - raise NotImplementedError( - "create_texture(.. view_formats is not yet supported." - ) - - if not mip_level_count: - mip_level_count = 1 # or lib.WGPU_MIP_LEVEL_COUNT_UNDEFINED ? - mip_level_count = int(mip_level_count) - - if not sample_count: - sample_count = 1 - sample_count = int(sample_count) - - # H: nextInChain: WGPUChainedStruct *, label: char *, usage: WGPUTextureUsageFlags/int, dimension: WGPUTextureDimension, size: WGPUExtent3D, format: WGPUTextureFormat, mipLevelCount: int, sampleCount: int, viewFormatCount: int, viewFormats: WGPUTextureFormat * - struct = new_struct_p( - "WGPUTextureDescriptor *", - label=to_c_label(label), - size=c_size, - mipLevelCount=mip_level_count, - sampleCount=sample_count, - dimension=dimension, - format=format, - usage=usage, - # not used: nextInChain - # not used: viewFormatCount - # not used: viewFormats - ) - # H: WGPUTexture f(WGPUDevice device, WGPUTextureDescriptor const * descriptor) - id = libf.wgpuDeviceCreateTexture(self._internal, struct) - - # Note that there are methods (e.g. wgpuTextureGetHeight) to get - # the below props, but we know them now, so why bother? - tex_info = { - "size": size, - "mip_level_count": mip_level_count, - "sample_count": sample_count, - "dimension": dimension, - "format": format, - "usage": usage, - } - return GPUTexture(label, id, self, tex_info) - - def create_sampler( - self, - *, - label="", - address_mode_u: "enums.AddressMode" = "clamp-to-edge", - address_mode_v: "enums.AddressMode" = "clamp-to-edge", - address_mode_w: "enums.AddressMode" = "clamp-to-edge", - mag_filter: "enums.FilterMode" = "nearest", - min_filter: "enums.FilterMode" = "nearest", - mipmap_filter: "enums.MipmapFilterMode" = "nearest", - lod_min_clamp: float = 0, - lod_max_clamp: float = 32, - compare: "enums.CompareFunction" = None, - max_anisotropy: int = 1, - ): - # H: nextInChain: WGPUChainedStruct *, label: char *, addressModeU: WGPUAddressMode, addressModeV: WGPUAddressMode, addressModeW: WGPUAddressMode, magFilter: WGPUFilterMode, minFilter: WGPUFilterMode, mipmapFilter: WGPUMipmapFilterMode, lodMinClamp: float, lodMaxClamp: float, compare: WGPUCompareFunction, maxAnisotropy: int - struct = new_struct_p( - "WGPUSamplerDescriptor *", - label=to_c_label(label), - addressModeU=address_mode_u, - addressModeV=address_mode_v, - addressModeW=address_mode_w, - magFilter=mag_filter, - minFilter=min_filter, - mipmapFilter=mipmap_filter, - lodMinClamp=lod_min_clamp, - lodMaxClamp=lod_max_clamp, - compare=0 if compare is None else compare, - maxAnisotropy=max_anisotropy, - # not used: nextInChain - ) - - # H: WGPUSampler f(WGPUDevice device, WGPUSamplerDescriptor const * descriptor) - id = libf.wgpuDeviceCreateSampler(self._internal, struct) - return GPUSampler(label, id, self) - - def create_bind_group_layout( - self, *, label="", entries: "List[structs.BindGroupLayoutEntry]" - ): - c_entries_list = [] - for entry in entries: - check_struct("BindGroupLayoutEntry", entry) - buffer = {} - sampler = {} - texture = {} - storage_texture = {} - if entry.get("buffer"): - info = entry["buffer"] - check_struct("BufferBindingLayout", info) - min_binding_size = info.get("min_binding_size", None) - if min_binding_size is None: - min_binding_size = 0 # lib.WGPU_LIMIT_U64_UNDEFINED - # H: nextInChain: WGPUChainedStruct *, type: WGPUBufferBindingType, hasDynamicOffset: WGPUBool/int, minBindingSize: int - buffer = new_struct( - "WGPUBufferBindingLayout", - type=info["type"], - hasDynamicOffset=info.get("has_dynamic_offset", False), - minBindingSize=min_binding_size, - # not used: nextInChain - ) - elif entry.get("sampler"): - info = entry["sampler"] - check_struct("SamplerBindingLayout", info) - # H: nextInChain: WGPUChainedStruct *, type: WGPUSamplerBindingType - sampler = new_struct( - "WGPUSamplerBindingLayout", - type=info["type"], - # not used: nextInChain - ) - elif entry.get("texture"): - info = entry["texture"] - check_struct("TextureBindingLayout", info) - # H: nextInChain: WGPUChainedStruct *, sampleType: WGPUTextureSampleType, viewDimension: WGPUTextureViewDimension, multisampled: WGPUBool/int - texture = new_struct( - "WGPUTextureBindingLayout", - sampleType=info.get("sample_type", "float"), - viewDimension=info.get("view_dimension", "2d"), - multisampled=info.get("multisampled", False), - # not used: nextInChain - ) - elif entry.get("storage_texture"): - info = entry["storage_texture"] - check_struct("StorageTextureBindingLayout", info) - # H: nextInChain: WGPUChainedStruct *, access: WGPUStorageTextureAccess, format: WGPUTextureFormat, viewDimension: WGPUTextureViewDimension - storage_texture = new_struct( - "WGPUStorageTextureBindingLayout", - access=info["access"], - viewDimension=info.get("view_dimension", "2d"), - format=info["format"], - # not used: nextInChain - ) - else: - raise ValueError( - "Bind group layout entry did not contain field 'buffer', 'sampler', 'texture', nor 'storage_texture'" - ) - # Unreachable - fool the codegen - check_struct("ExternalTextureBindingLayout", info) - visibility = entry["visibility"] - if isinstance(visibility, str): - visibility = str_flag_to_int(flags.ShaderStage, visibility) - # H: nextInChain: WGPUChainedStruct *, binding: int, visibility: WGPUShaderStageFlags/int, buffer: WGPUBufferBindingLayout, sampler: WGPUSamplerBindingLayout, texture: WGPUTextureBindingLayout, storageTexture: WGPUStorageTextureBindingLayout - c_entry = new_struct( - "WGPUBindGroupLayoutEntry", - binding=int(entry["binding"]), - visibility=int(visibility), - buffer=buffer, - sampler=sampler, - texture=texture, - storageTexture=storage_texture, - # not used: nextInChain - ) - c_entries_list.append(c_entry) - - c_entries_array = ffi.NULL - if c_entries_list: - c_entries_array = ffi.new("WGPUBindGroupLayoutEntry []", c_entries_list) - - # H: nextInChain: WGPUChainedStruct *, label: char *, entryCount: int, entries: WGPUBindGroupLayoutEntry * - struct = new_struct_p( - "WGPUBindGroupLayoutDescriptor *", - label=to_c_label(label), - entries=c_entries_array, - entryCount=len(c_entries_list), - # not used: nextInChain - ) - - # Note: wgpu-core re-uses BindGroupLayouts with the same (or similar - # enough) descriptor. You would think that this means that the id is - # the same when you call wgpuDeviceCreateBindGroupLayout with the same - # input, but it's not. So we cannot let wgpu-native/core decide when - # to re-use a BindGroupLayout. I don't feel confident checking here - # whether a BindGroupLayout can be re-used, so we simply don't. Higher - # level code can sometimes make this decision because it knows the app - # logic. - - # H: WGPUBindGroupLayout f(WGPUDevice device, WGPUBindGroupLayoutDescriptor const * descriptor) - id = libf.wgpuDeviceCreateBindGroupLayout(self._internal, struct) - return GPUBindGroupLayout(label, id, self, entries) - - def create_bind_group( - self, - *, - label="", - layout: "GPUBindGroupLayout", - entries: "List[structs.BindGroupEntry]", - ): - c_entries_list = [] - for entry in entries: - check_struct("BindGroupEntry", entry) - # The resource can be a sampler, texture view, or buffer descriptor - resource = entry["resource"] - if isinstance(resource, GPUSampler): - # H: nextInChain: WGPUChainedStruct *, binding: int, buffer: WGPUBuffer, offset: int, size: int, sampler: WGPUSampler, textureView: WGPUTextureView - c_entry = new_struct( - "WGPUBindGroupEntry", - binding=int(entry["binding"]), - buffer=ffi.NULL, - offset=0, - size=0, - sampler=resource._internal, - textureView=ffi.NULL, - # not used: nextInChain - ) - elif isinstance(resource, GPUTextureView): - # H: nextInChain: WGPUChainedStruct *, binding: int, buffer: WGPUBuffer, offset: int, size: int, sampler: WGPUSampler, textureView: WGPUTextureView - c_entry = new_struct( - "WGPUBindGroupEntry", - binding=int(entry["binding"]), - buffer=ffi.NULL, - offset=0, - size=0, - sampler=ffi.NULL, - textureView=resource._internal, - # not used: nextInChain - ) - elif isinstance(resource, dict): # Buffer binding - # H: nextInChain: WGPUChainedStruct *, binding: int, buffer: WGPUBuffer, offset: int, size: int, sampler: WGPUSampler, textureView: WGPUTextureView - c_entry = new_struct( - "WGPUBindGroupEntry", - binding=int(entry["binding"]), - buffer=resource["buffer"]._internal, - offset=resource["offset"], - size=resource["size"], - sampler=ffi.NULL, - textureView=ffi.NULL, - # not used: nextInChain - ) - else: - raise TypeError(f"Unexpected resource type {type(resource)}") - c_entries_list.append(c_entry) - - c_entries_array = ffi.NULL - if c_entries_list: - c_entries_array = ffi.new("WGPUBindGroupEntry []", c_entries_list) - - # H: nextInChain: WGPUChainedStruct *, label: char *, layout: WGPUBindGroupLayout, entryCount: int, entries: WGPUBindGroupEntry * - struct = new_struct_p( - "WGPUBindGroupDescriptor *", - label=to_c_label(label), - layout=layout._internal, - entries=c_entries_array, - entryCount=len(c_entries_list), - # not used: nextInChain - ) - - # H: WGPUBindGroup f(WGPUDevice device, WGPUBindGroupDescriptor const * descriptor) - id = libf.wgpuDeviceCreateBindGroup(self._internal, struct) - return GPUBindGroup(label, id, self, entries) - - def create_pipeline_layout( - self, *, label="", bind_group_layouts: "List[GPUBindGroupLayout]" - ): - bind_group_layouts_ids = [x._internal for x in bind_group_layouts] - - c_layout_array = ffi.new("WGPUBindGroupLayout []", bind_group_layouts_ids) - # H: nextInChain: WGPUChainedStruct *, label: char *, bindGroupLayoutCount: int, bindGroupLayouts: WGPUBindGroupLayout * - struct = new_struct_p( - "WGPUPipelineLayoutDescriptor *", - label=to_c_label(label), - bindGroupLayouts=c_layout_array, - bindGroupLayoutCount=len(bind_group_layouts), - # not used: nextInChain - ) - - # H: WGPUPipelineLayout f(WGPUDevice device, WGPUPipelineLayoutDescriptor const * descriptor) - id = libf.wgpuDeviceCreatePipelineLayout(self._internal, struct) - return GPUPipelineLayout(label, id, self, bind_group_layouts) - - def create_shader_module( - self, - *, - label="", - code: str, - source_map: dict = None, - compilation_hints: "List[structs.ShaderModuleCompilationHint]" = [], - ): - if compilation_hints: - for hint in compilation_hints.values(): - check_struct("ShaderModuleCompilationHint", hint) - if isinstance(code, str): - looks_like_wgsl = any( - x in code for x in ("@compute", "@vertex", "@fragment") - ) - looks_like_glsl = code.lstrip().startswith("#version ") - if looks_like_glsl and not looks_like_wgsl: - # === GLSL - if "comp" in label.lower(): - c_stage = flags.ShaderStage.COMPUTE - elif "vert" in label.lower(): - c_stage = flags.ShaderStage.VERTEX - elif "frag" in label.lower(): - c_stage = flags.ShaderStage.FRAGMENT - else: - raise ValueError( - "GLSL shader needs to use the label to specify compute/vertex/fragment stage." - ) - defines = [] - if c_stage == flags.ShaderStage.VERTEX: - defines.append( - # H: name: char *, value: char * - new_struct( - "WGPUShaderDefine", - name=ffi.new("char []", "gl_VertexID".encode()), - value=ffi.new("char []", "gl_VertexIndex".encode()), - ) - ) - c_defines = ffi.new("WGPUShaderDefine []", defines) - # H: chain: WGPUChainedStruct, stage: WGPUShaderStage, code: char *, defineCount: int, defines: WGPUShaderDefine * - source_struct = new_struct_p( - "WGPUShaderModuleGLSLDescriptor *", - code=ffi.new("char []", code.encode()), - stage=c_stage, - defineCount=len(defines), - defines=c_defines, - # not used: chain - ) - source_struct[0].chain.next = ffi.NULL - source_struct[0].chain.sType = lib.WGPUSType_ShaderModuleGLSLDescriptor - else: - # === WGSL - # H: chain: WGPUChainedStruct, code: char * - source_struct = new_struct_p( - "WGPUShaderModuleWGSLDescriptor *", - code=ffi.new("char []", code.encode()), - # not used: chain - ) - source_struct[0].chain.next = ffi.NULL - source_struct[0].chain.sType = lib.WGPUSType_ShaderModuleWGSLDescriptor - elif isinstance(code, bytes): - # === Spirv - data = code - # Validate - magic_nr = b"\x03\x02#\x07" # 0x7230203 - if data[:4] != magic_nr: - raise ValueError("Given shader data does not look like a SpirV module") - # From bytes to WGPUU32Array - data_u8 = ffi.new("uint8_t[]", data) - data_u32 = ffi.cast("uint32_t *", data_u8) - # H: chain: WGPUChainedStruct, codeSize: int, code: uint32_t * - source_struct = new_struct_p( - "WGPUShaderModuleSPIRVDescriptor *", - code=data_u32, - codeSize=len(data) // 4, - # not used: chain - ) - source_struct[0].chain.next = ffi.NULL - source_struct[0].chain.sType = lib.WGPUSType_ShaderModuleSPIRVDescriptor - else: - raise TypeError( - "Shader code must be str for WGSL or GLSL, or bytes for SpirV." - ) - - # Note, we could give hints here that specify entrypoint and pipelinelayout before compiling - # H: nextInChain: WGPUChainedStruct *, label: char *, hintCount: int, hints: WGPUShaderModuleCompilationHint * - struct = new_struct_p( - "WGPUShaderModuleDescriptor *", - label=to_c_label(label), - nextInChain=ffi.cast("WGPUChainedStruct *", source_struct), - hintCount=0, - hints=ffi.NULL, - ) - # H: WGPUShaderModule f(WGPUDevice device, WGPUShaderModuleDescriptor const * descriptor) - id = libf.wgpuDeviceCreateShaderModule(self._internal, struct) - if id == ffi.NULL: - raise RuntimeError("Shader module creation failed") - return GPUShaderModule(label, id, self) - - def create_compute_pipeline( - self, - *, - label="", - layout: "Union[GPUPipelineLayout, enums.AutoLayoutMode]", - compute: "structs.ProgrammableStage", - ): - check_struct("ProgrammableStage", compute) - # H: nextInChain: WGPUChainedStruct *, module: WGPUShaderModule, entryPoint: char *, constantCount: int, constants: WGPUConstantEntry * - c_compute_stage = new_struct( - "WGPUProgrammableStageDescriptor", - module=compute["module"]._internal, - entryPoint=ffi.new("char []", compute["entry_point"].encode()), - # not used: nextInChain - # not used: constantCount - # not used: constants - ) - - if isinstance(layout, GPUPipelineLayout): - layout_id = layout._internal - elif layout == enums.AutoLayoutMode.auto: - layout_id = ffi.NULL - else: - raise TypeError( - "create_compute_pipeline() 'layout' arg must be a GPUPipelineLayout or 'auto'" - ) - - # H: nextInChain: WGPUChainedStruct *, label: char *, layout: WGPUPipelineLayout, compute: WGPUProgrammableStageDescriptor - struct = new_struct_p( - "WGPUComputePipelineDescriptor *", - label=to_c_label(label), - layout=layout_id, - compute=c_compute_stage, - # not used: nextInChain - ) - # H: WGPUComputePipeline f(WGPUDevice device, WGPUComputePipelineDescriptor const * descriptor) - id = libf.wgpuDeviceCreateComputePipeline(self._internal, struct) - return GPUComputePipeline(label, id, self) - - async def create_compute_pipeline_async( - self, - *, - label="", - layout: "Union[GPUPipelineLayout, enums.AutoLayoutMode]", - compute: "structs.ProgrammableStage", - ): - return self.create_compute_pipeline(label=label, layout=layout, compute=compute) - - def create_render_pipeline( - self, - *, - label="", - layout: "Union[GPUPipelineLayout, enums.AutoLayoutMode]", - vertex: "structs.VertexState", - primitive: "structs.PrimitiveState" = {}, - depth_stencil: "structs.DepthStencilState" = None, - multisample: "structs.MultisampleState" = {}, - fragment: "structs.FragmentState" = None, - ): - depth_stencil = depth_stencil or {} - multisample = multisample or {} - primitive = primitive or {} - - check_struct("VertexState", vertex) - check_struct("DepthStencilState", depth_stencil) - check_struct("MultisampleState", multisample) - check_struct("PrimitiveState", primitive) - - c_vertex_buffer_layout_list = [] - for buffer_des in vertex["buffers"]: - c_attributes_list = [] - for attribute in buffer_des["attributes"]: - # H: format: WGPUVertexFormat, offset: int, shaderLocation: int - c_attribute = new_struct( - "WGPUVertexAttribute", - format=attribute["format"], - offset=attribute["offset"], - shaderLocation=attribute["shader_location"], - ) - c_attributes_list.append(c_attribute) - c_attributes_array = ffi.new("WGPUVertexAttribute []", c_attributes_list) - # H: arrayStride: int, stepMode: WGPUVertexStepMode, attributeCount: int, attributes: WGPUVertexAttribute * - c_vertex_buffer_descriptor = new_struct( - "WGPUVertexBufferLayout", - arrayStride=buffer_des["array_stride"], - stepMode=buffer_des.get("step_mode", "vertex"), - attributes=c_attributes_array, - attributeCount=len(c_attributes_list), - ) - c_vertex_buffer_layout_list.append(c_vertex_buffer_descriptor) - c_vertex_buffer_descriptors_array = ffi.new( - "WGPUVertexBufferLayout []", c_vertex_buffer_layout_list - ) - # H: nextInChain: WGPUChainedStruct *, module: WGPUShaderModule, entryPoint: char *, constantCount: int, constants: WGPUConstantEntry *, bufferCount: int, buffers: WGPUVertexBufferLayout * - c_vertex_state = new_struct( - "WGPUVertexState", - module=vertex["module"]._internal, - entryPoint=ffi.new("char []", vertex["entry_point"].encode()), - buffers=c_vertex_buffer_descriptors_array, - bufferCount=len(c_vertex_buffer_layout_list), - # not used: nextInChain - # not used: constantCount - # not used: constants - ) - - # H: nextInChain: WGPUChainedStruct *, topology: WGPUPrimitiveTopology, stripIndexFormat: WGPUIndexFormat, frontFace: WGPUFrontFace, cullMode: WGPUCullMode - c_primitive_state = new_struct( - "WGPUPrimitiveState", - topology=primitive["topology"], - stripIndexFormat=primitive.get("strip_index_format", 0), - frontFace=primitive.get("front_face", "ccw"), - cullMode=primitive.get("cull_mode", "none"), - # not used: nextInChain - ) - - c_depth_stencil_state = ffi.NULL - if depth_stencil: - if depth_stencil.get("format", None) is None: - raise ValueError("depth_stencil needs format") - stencil_front = depth_stencil.get("stencil_front", {}) - check_struct("StencilFaceState", stencil_front) - # H: compare: WGPUCompareFunction, failOp: WGPUStencilOperation, depthFailOp: WGPUStencilOperation, passOp: WGPUStencilOperation - c_stencil_front = new_struct( - "WGPUStencilFaceState", - compare=stencil_front.get("compare", "always"), - failOp=stencil_front.get("fail_op", "keep"), - depthFailOp=stencil_front.get("depth_fail_op", "keep"), - passOp=stencil_front.get("pass_op", "keep"), - ) - stencil_back = depth_stencil.get("stencil_back", {}) - check_struct("StencilFaceState", stencil_back) - # H: compare: WGPUCompareFunction, failOp: WGPUStencilOperation, depthFailOp: WGPUStencilOperation, passOp: WGPUStencilOperation - c_stencil_back = new_struct( - "WGPUStencilFaceState", - compare=stencil_back.get("compare", "always"), - failOp=stencil_back.get("fail_op", "keep"), - depthFailOp=stencil_back.get("depth_fail_op", "keep"), - passOp=stencil_back.get("pass_op", "keep"), - ) - # H: nextInChain: WGPUChainedStruct *, format: WGPUTextureFormat, depthWriteEnabled: WGPUBool/int, depthCompare: WGPUCompareFunction, stencilFront: WGPUStencilFaceState, stencilBack: WGPUStencilFaceState, stencilReadMask: int, stencilWriteMask: int, depthBias: int, depthBiasSlopeScale: float, depthBiasClamp: float - c_depth_stencil_state = new_struct_p( - "WGPUDepthStencilState *", - format=depth_stencil["format"], - depthWriteEnabled=bool(depth_stencil.get("depth_write_enabled", False)), - depthCompare=depth_stencil.get("depth_compare", "always"), - stencilFront=c_stencil_front, - stencilBack=c_stencil_back, - stencilReadMask=depth_stencil.get("stencil_read_mask", 0xFFFFFFFF), - stencilWriteMask=depth_stencil.get("stencil_write_mask", 0xFFFFFFFF), - depthBias=depth_stencil.get("depth_bias", 0), - depthBiasSlopeScale=depth_stencil.get("depth_bias_slope_scale", 0), - depthBiasClamp=depth_stencil.get("depth_bias_clamp", 0), - # not used: nextInChain - ) - - # H: nextInChain: WGPUChainedStruct *, count: int, mask: int, alphaToCoverageEnabled: WGPUBool/int - c_multisample_state = new_struct( - "WGPUMultisampleState", - count=multisample.get("count", 1), - mask=multisample.get("mask", 0xFFFFFFFF), - alphaToCoverageEnabled=multisample.get("alpha_to_coverage_enabled", False), - # not used: nextInChain - ) - - c_fragment_state = ffi.NULL - if fragment is not None: - c_color_targets_list = [] - for target in fragment["targets"]: - if not target.get("blend", None): - c_blend = ffi.NULL - else: - alpha_blend = _tuple_from_tuple_or_dict( - target["blend"]["alpha"], - ("src_factor", "dst_factor", "operation"), - ) - # H: operation: WGPUBlendOperation, srcFactor: WGPUBlendFactor, dstFactor: WGPUBlendFactor - c_alpha_blend = new_struct( - "WGPUBlendComponent", - srcFactor=alpha_blend[0], - dstFactor=alpha_blend[1], - operation=alpha_blend[2], - ) - color_blend = _tuple_from_tuple_or_dict( - target["blend"]["color"], - ("src_factor", "dst_factor", "operation"), - ) - # H: operation: WGPUBlendOperation, srcFactor: WGPUBlendFactor, dstFactor: WGPUBlendFactor - c_color_blend = new_struct( - "WGPUBlendComponent", - srcFactor=color_blend[0], - dstFactor=color_blend[1], - operation=color_blend[2], - ) - # H: color: WGPUBlendComponent, alpha: WGPUBlendComponent - c_blend = new_struct_p( - "WGPUBlendState *", - color=c_color_blend, - alpha=c_alpha_blend, - ) - # H: nextInChain: WGPUChainedStruct *, format: WGPUTextureFormat, blend: WGPUBlendState *, writeMask: WGPUColorWriteMaskFlags/int - c_color_state = new_struct( - "WGPUColorTargetState", - format=target["format"], - blend=c_blend, - writeMask=target.get("write_mask", 0xF), - # not used: nextInChain - ) - c_color_targets_list.append(c_color_state) - c_color_targets_array = ffi.new( - "WGPUColorTargetState []", c_color_targets_list - ) - check_struct("FragmentState", fragment) - # H: nextInChain: WGPUChainedStruct *, module: WGPUShaderModule, entryPoint: char *, constantCount: int, constants: WGPUConstantEntry *, targetCount: int, targets: WGPUColorTargetState * - c_fragment_state = new_struct_p( - "WGPUFragmentState *", - module=fragment["module"]._internal, - entryPoint=ffi.new("char []", fragment["entry_point"].encode()), - targets=c_color_targets_array, - targetCount=len(c_color_targets_list), - # not used: nextInChain - # not used: constantCount - # not used: constants - ) - - # H: nextInChain: WGPUChainedStruct *, label: char *, layout: WGPUPipelineLayout, vertex: WGPUVertexState, primitive: WGPUPrimitiveState, depthStencil: WGPUDepthStencilState *, multisample: WGPUMultisampleState, fragment: WGPUFragmentState * - struct = new_struct_p( - "WGPURenderPipelineDescriptor *", - label=to_c_label(label), - layout=layout._internal, - vertex=c_vertex_state, - primitive=c_primitive_state, - depthStencil=c_depth_stencil_state, - multisample=c_multisample_state, - fragment=c_fragment_state, - # not used: nextInChain - ) - - # H: WGPURenderPipeline f(WGPUDevice device, WGPURenderPipelineDescriptor const * descriptor) - id = libf.wgpuDeviceCreateRenderPipeline(self._internal, struct) - return GPURenderPipeline(label, id, self) - - async def create_render_pipeline_async( - self, - *, - label="", - layout: "Union[GPUPipelineLayout, enums.AutoLayoutMode]", - vertex: "structs.VertexState", - primitive: "structs.PrimitiveState" = {}, - depth_stencil: "structs.DepthStencilState" = None, - multisample: "structs.MultisampleState" = {}, - fragment: "structs.FragmentState" = None, - ): - return self.create_render_pipeline( - label=label, - layout=layout, - vertex=vertex, - primitive=primitive, - depth_stencil=depth_stencil, - multisample=multisample, - fragment=fragment, - ) - - def create_command_encoder(self, *, label=""): - # H: nextInChain: WGPUChainedStruct *, label: char * - struct = new_struct_p( - "WGPUCommandEncoderDescriptor *", - label=to_c_label(label), - # not used: nextInChain - ) - - # H: WGPUCommandEncoder f(WGPUDevice device, WGPUCommandEncoderDescriptor const * descriptor) - id = libf.wgpuDeviceCreateCommandEncoder(self._internal, struct) - return GPUCommandEncoder(label, id, self) - - def create_render_bundle_encoder( - self, - *, - label="", - color_formats: "List[enums.TextureFormat]", - depth_stencil_format: "enums.TextureFormat" = None, - sample_count: int = 1, - depth_read_only: bool = False, - stencil_read_only: bool = False, - ): - raise NotImplementedError() - # Note: also enable the coresponing memtest when implementing this! - - def create_query_set(self, *, label="", type: "enums.QueryType", count: int): - # H: nextInChain: WGPUChainedStruct *, label: char *, type: WGPUQueryType, count: int - query_set_descriptor = new_struct_p( - "WGPUQuerySetDescriptor *", - label=to_c_label(label), - type=type, - count=count, - # not used: nextInChain - ) - - # H: WGPUQuerySet f(WGPUDevice device, WGPUQuerySetDescriptor const * descriptor) - query_id = libf.wgpuDeviceCreateQuerySet(self._internal, query_set_descriptor) - return GPUQuerySet(label, query_id, self._internal, type, count) - - def _destroy(self): - if self._queue is not None: - self._queue._destroy() - self._queue = None - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPUDevice device) - libf.wgpuDeviceRelease(internal) - # wgpuDeviceDestroy(internal) is also an option - - -class GPUBuffer(classes.GPUBuffer, GPUObjectBase): - def __init__(self, label, internal, device, size, usage, map_state): - super().__init__(label, internal, device, size, usage, map_state) - - self._mapped_status = 0, 0, 0 - self._mapped_memoryviews = [] - # If mapped at creation, set to write mode (no point in reading zeros) - if self._map_state == enums.BufferMapState.mapped: - self._mapped_status = 0, self.size, flags.MapMode.WRITE - - def _check_range(self, offset, size): - # Apply defaults - if offset is None: - offset = 0 - if self._mapped_status[2] != 0: - offset = self._mapped_status[0] - else: - offset = int(offset) - if size is None: - size = self.size - offset - if self._mapped_status[2] != 0: - size = self._mapped_status[1] - offset - else: - size = int(size) - # Checks - if offset < 0: - raise ValueError("Mapped offset must not be smaller than zero.") - if offset % 8: - raise ValueError("Mapped offset must be a multiple of 8.") - if size < 1: - raise ValueError("Mapped size must be larger than zero.") - if size % 4: - raise ValueError("Mapped offset must be a multiple of 4.") - if offset + size > self.size: - raise ValueError("Mapped range must not extend beyond total buffer size.") - return offset, size - - def map(self, mode, offset=0, size=None): - sync_on_read = True - - # Check mode - if isinstance(mode, str): - if mode == "READ_NOSYNC": # for internal use - sync_on_read = False - mode = "READ" - mode = str_flag_to_int(flags.MapMode, mode) - map_mode = int(mode) - - # Check offset and size - offset, size = self._check_range(offset, size) - - # Can we even map? - if self._map_state != enums.BufferMapState.unmapped: - raise RuntimeError("Can only map a buffer if its currently unmapped.") - - # Sync up when reading, otherwise the memory may be all zeros. - # See https://github.com/gfx-rs/wgpu-native/issues/305 - if sync_on_read and map_mode & lib.WGPUMapMode_Read: - if self._mapped_status[2] == 0 and self._usage & flags.BufferUsage.MAP_READ: - encoder = self._device.create_command_encoder() - self._device.queue.submit([encoder.finish()]) - - status = 999 - - @ffi.callback("void(WGPUBufferMapAsyncStatus, void*)") - def callback(status_, user_data_p): - nonlocal status - status = status_ - - # Map it - self._map_state = enums.BufferMapState.pending - # H: void f(WGPUBuffer buffer, WGPUMapModeFlags mode, size_t offset, size_t size, WGPUBufferMapCallback callback, void * userdata) - libf.wgpuBufferMapAsync( - self._internal, map_mode, offset, size, callback, ffi.NULL - ) - - # Let it do some cycles - self._device._poll() - - if status != 0: # no-cover - raise RuntimeError(f"Could not map buffer ({status}).") - self._map_state = enums.BufferMapState.mapped - self._mapped_status = offset, offset + size, mode - self._mapped_memoryviews = [] - - async def map_async(self, mode, offset=0, size=None): - return self.map(mode, offset, size) # for now - - def unmap(self): - if self._map_state != enums.BufferMapState.mapped: - raise RuntimeError("Can only unmap a buffer if its currently mapped.") - # H: void f(WGPUBuffer buffer) - libf.wgpuBufferUnmap(self._internal) - self._map_state = enums.BufferMapState.unmapped - self._mapped_status = 0, 0, 0 - self._release_memoryviews() - - def _release_memoryviews(self): - # Release the mapped memoryview objects. These objects - # themselves become unusable, but any views on them do not. - for m in self._mapped_memoryviews: - try: - m.release() - except Exception: # no-cover - pass - self._mapped_memoryviews = [] - - def read_mapped(self, buffer_offset=None, size=None, *, copy=True): - # Can we even read? - if self._map_state != enums.BufferMapState.mapped: - raise RuntimeError("Can only read from a buffer if its mapped.") - elif not (self._mapped_status[2] & flags.MapMode.READ): - raise RuntimeError( - "Can only read from a buffer if its mapped in read mode." - ) - - # Check offset and size - offset, size = self._check_range(buffer_offset, size) - if offset < self._mapped_status[0] or (offset + size) > self._mapped_status[1]: - raise ValueError( - "The range for buffer reading is not contained in the currently mapped range." - ) - - # Get mapped memoryview. - # H: void * f(WGPUBuffer buffer, size_t offset, size_t size) - src_ptr = libf.wgpuBufferGetMappedRange(self._internal, offset, size) - src_address = int(ffi.cast("intptr_t", src_ptr)) - src_m = get_memoryview_from_address(src_address, size) - - if copy: - # Copy the data. The memoryview created above becomes invalid when the buffer - # is unmapped, so we don't want to pass that memory to the user. - data = memoryview((ctypes.c_uint8 * size)()).cast("B") - data[:] = src_m - return data - else: - # Return view on the actually mapped data - data = src_m - if hasattr(data, "toreadonly"): # Py 3.8+ - data = data.toreadonly() - self._mapped_memoryviews.append(data) - return data - - def write_mapped(self, data, buffer_offset=None, size=None): - # Can we even write? - if self._map_state != enums.BufferMapState.mapped: - raise RuntimeError("Can only write to a buffer if its mapped.") - elif not (self._mapped_status[2] & flags.MapMode.WRITE): - raise RuntimeError( - "Can only write from a buffer if its mapped in write mode." - ) - - # Cast data to a memoryview. This also works for e.g. numpy arrays, - # and the resulting memoryview will be a view on the data. - data = memoryview(data).cast("B") - - # Check offset and size - if size is None: - size = data.nbytes - offset, size = self._check_range(buffer_offset, size) - if offset < self._mapped_status[0] or (offset + size) > self._mapped_status[1]: - raise ValueError( - "The range for buffer writing is not contained in the currently mapped range." - ) - - # Check data size and given size. If the latter was given, it should match! - if data.nbytes != size: # no-cover - raise ValueError( - "Data passed to GPUBuffer.write_mapped() does not match the given size." - ) - - # Get mapped memoryview - # H: void * f(WGPUBuffer buffer, size_t offset, size_t size) - src_ptr = libf.wgpuBufferGetMappedRange(self._internal, offset, size) - src_address = int(ffi.cast("intptr_t", src_ptr)) - src_m = get_memoryview_from_address(src_address, size) - - # Copy data - src_m[:] = data - - def destroy(self): - self._destroy() # no-cover - - def _destroy(self): - self._release_memoryviews() - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPUBuffer buffer) - libf.wgpuBufferRelease(internal) - - -class GPUTexture(classes.GPUTexture, GPUObjectBase): - def create_view( - self, - *, - label="", - format: "enums.TextureFormat" = None, - dimension: "enums.TextureViewDimension" = None, - aspect: "enums.TextureAspect" = "all", - base_mip_level: int = 0, - mip_level_count: int = None, - base_array_layer: int = 0, - array_layer_count: int = None, - ): - # Resolve defaults - if not format: - format = self._tex_info["format"] - if not dimension: - dimension = self._tex_info["dimension"] # from create_texture - if not aspect: - aspect = "all" - if not mip_level_count: - mip_level_count = self._tex_info["mip_level_count"] - base_mip_level - if not array_layer_count: - if dimension in ("1d", "2d", "3d"): - array_layer_count = 1 # or WGPU_ARRAY_LAYER_COUNT_UNDEFINED ? - elif dimension == "cube": - array_layer_count = 6 - elif dimension in ("2d-array", "cube-array"): - array_layer_count = self._tex_info["size"][2] - base_array_layer - - # H: nextInChain: WGPUChainedStruct *, label: char *, format: WGPUTextureFormat, dimension: WGPUTextureViewDimension, baseMipLevel: int, mipLevelCount: int, baseArrayLayer: int, arrayLayerCount: int, aspect: WGPUTextureAspect - struct = new_struct_p( - "WGPUTextureViewDescriptor *", - label=to_c_label(label), - format=format, - dimension=dimension, - aspect=aspect, - baseMipLevel=base_mip_level, - mipLevelCount=mip_level_count, - baseArrayLayer=base_array_layer, - arrayLayerCount=array_layer_count, - # not used: nextInChain - ) - # H: WGPUTextureView f(WGPUTexture texture, WGPUTextureViewDescriptor const * descriptor) - id = libf.wgpuTextureCreateView(self._internal, struct) - return GPUTextureView(label, id, self._device, self, self.size) - - def destroy(self): - self._destroy() # no-cover - - def _destroy(self): - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPUTexture texture) - libf.wgpuTextureRelease(internal) - - -class GPUTextureView(classes.GPUTextureView, GPUObjectBase): - def _destroy(self): - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPUTextureView textureView) - libf.wgpuTextureViewRelease(internal) - - -class GPUSampler(classes.GPUSampler, GPUObjectBase): - def _destroy(self): - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPUSampler sampler) - libf.wgpuSamplerRelease(internal) - - -class GPUBindGroupLayout(classes.GPUBindGroupLayout, GPUObjectBase): - def _destroy(self): - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPUBindGroupLayout bindGroupLayout) - libf.wgpuBindGroupLayoutRelease(internal) - - -class GPUBindGroup(classes.GPUBindGroup, GPUObjectBase): - def _destroy(self): - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPUBindGroup bindGroup) - libf.wgpuBindGroupRelease(internal) - - -class GPUPipelineLayout(classes.GPUPipelineLayout, GPUObjectBase): - def _destroy(self): - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPUPipelineLayout pipelineLayout) - libf.wgpuPipelineLayoutRelease(internal) - - -class GPUShaderModule(classes.GPUShaderModule, GPUObjectBase): - def get_compilation_info(self): - # Here's a little setup to implement this method. Unfortunately, - # this is not yet implemented in wgpu-native. Another problem - # is that if there is an error in the shader source, we raise - # an exception, so the user never gets a GPUShaderModule object - # that can be used to call this method :/ So perhaps we should - # do this stuff in device.create_shader_module() and attach it - # to the exception that we raise? - - # info = None - # - # @ffi.callback("void(WGPUCompilationInfoRequestStatus, WGPUCompilationInfo*, void*)") - # def callback(status_, info_, userdata): - # if status_ == 0: - # nonlocal info - # info = info_ - # else: - # pass - # - # H: void f(WGPUShaderModule shaderModule, WGPUCompilationInfoCallback callback, void * userdata) - # libf.wgpuShaderModuleGetCompilationInfo(self._internal, callback, ffi.NULL) - # - # self._device._poll() - # - # if info is None: - # raise RuntimeError("Could not obtain shader compilation info.") - # - # ... and then turn these WGPUCompilationInfoRequestStatus objects into Python objects ... - - return [] - - def _destroy(self): - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPUShaderModule shaderModule) - libf.wgpuShaderModuleRelease(internal) - - -class GPUPipelineBase(classes.GPUPipelineBase): - def get_bind_group_layout(self, index): - """Get the bind group layout at the given index.""" - if isinstance(self, GPUComputePipeline): - # H: WGPUBindGroupLayout f(WGPUComputePipeline computePipeline, uint32_t groupIndex) - layout_id = libf.wgpuComputePipelineGetBindGroupLayout( - self._internal, index - ) - else: - # H: WGPUBindGroupLayout f(WGPURenderPipeline renderPipeline, uint32_t groupIndex) - layout_id = libf.wgpuRenderPipelineGetBindGroupLayout(self._internal, index) - return GPUBindGroupLayout("", layout_id, self._device, []) - - -class GPUComputePipeline(classes.GPUComputePipeline, GPUPipelineBase, GPUObjectBase): - def _destroy(self): - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPUComputePipeline computePipeline) - libf.wgpuComputePipelineRelease(internal) - - -class GPURenderPipeline(classes.GPURenderPipeline, GPUPipelineBase, GPUObjectBase): - def _destroy(self): - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPURenderPipeline renderPipeline) - libf.wgpuRenderPipelineRelease(internal) - - -class GPUCommandBuffer(classes.GPUCommandBuffer, GPUObjectBase): - def _destroy(self): - # Since command buffers get destroyed when you submit them, we - # must only release them if they've not been submitted, or we get - # 'Cannot remove a vacant resource'. Got this info from the - # wgpu chat. Also see - # https://docs.rs/wgpu-core/latest/src/wgpu_core/device/mod.rs.html#4180-4194 - # --> That's why _internal is set to None in Queue.submit() - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPUCommandBuffer commandBuffer) - libf.wgpuCommandBufferRelease(internal) - - -class GPUCommandsMixin(classes.GPUCommandsMixin): - pass - - -class GPUBindingCommandsMixin(classes.GPUBindingCommandsMixin): - def set_bind_group( - self, - index, - bind_group, - dynamic_offsets_data, - dynamic_offsets_data_start, - dynamic_offsets_data_length, - ): - offsets = list(dynamic_offsets_data) - c_offsets = ffi.new("uint32_t []", offsets) - bind_group_id = bind_group._internal - if isinstance(self, GPUComputePassEncoder): - # H: void f(WGPUComputePassEncoder computePassEncoder, uint32_t groupIndex, WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets) - libf.wgpuComputePassEncoderSetBindGroup( - self._internal, index, bind_group_id, len(offsets), c_offsets - ) - else: - # H: void f(WGPURenderPassEncoder renderPassEncoder, uint32_t groupIndex, WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets) - libf.wgpuRenderPassEncoderSetBindGroup( - self._internal, - index, - bind_group_id, - len(offsets), - c_offsets, - ) - - -class GPUDebugCommandsMixin(classes.GPUDebugCommandsMixin): - def push_debug_group(self, group_label): - c_group_label = ffi.new("char []", group_label.encode()) - color = 0 - # todo: these functions are temporarily not available in wgpu-native - return # noqa - if isinstance(self, GPUComputePassEncoder): - # H: void f(WGPUComputePassEncoder computePassEncoder, char const * groupLabel) - libf.wgpuComputePassEncoderPushDebugGroup( - self._internal, c_group_label, color - ) - else: - # H: void f(WGPURenderPassEncoder renderPassEncoder, char const * groupLabel) - libf.wgpuRenderPassEncoderPushDebugGroup( - self._internal, c_group_label, color - ) - - def pop_debug_group(self): - # todo: these functions are temporarily not available in wgpu-native - return # noqa - if isinstance(self, GPUComputePassEncoder): - # H: void f(WGPUComputePassEncoder computePassEncoder) - libf.wgpuComputePassEncoderPopDebugGroup(self._internal) - else: - # H: void f(WGPURenderPassEncoder renderPassEncoder) - libf.wgpuRenderPassEncoderPopDebugGroup(self._internal) - - def insert_debug_marker(self, marker_label): - c_marker_label = ffi.new("char []", marker_label.encode()) - color = 0 - # todo: these functions are temporarily not available in wgpu-native - return # noqa - if isinstance(self, GPUComputePassEncoder): - # H: void f(WGPUComputePassEncoder computePassEncoder, char const * markerLabel) - libf.wgpuComputePassEncoderInsertDebugMarker( - self._internal, c_marker_label, color - ) - else: - # H: void f(WGPURenderPassEncoder renderPassEncoder, char const * markerLabel) - libf.wgpuRenderPassEncoderInsertDebugMarker( - self._internal, c_marker_label, color - ) - - -class GPURenderCommandsMixin(classes.GPURenderCommandsMixin): - def set_pipeline(self, pipeline): - pipeline_id = pipeline._internal - # H: void f(WGPURenderPassEncoder renderPassEncoder, WGPURenderPipeline pipeline) - libf.wgpuRenderPassEncoderSetPipeline(self._internal, pipeline_id) - - def set_index_buffer(self, buffer, index_format, offset=0, size=None): - if not size: - size = buffer.size - offset - c_index_format = enummap[f"IndexFormat.{index_format}"] - # H: void f(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer buffer, WGPUIndexFormat format, uint64_t offset, uint64_t size) - libf.wgpuRenderPassEncoderSetIndexBuffer( - self._internal, buffer._internal, c_index_format, int(offset), int(size) - ) - - def set_vertex_buffer(self, slot, buffer, offset=0, size=None): - if not size: - size = buffer.size - offset - # H: void f(WGPURenderPassEncoder renderPassEncoder, uint32_t slot, WGPUBuffer buffer, uint64_t offset, uint64_t size) - libf.wgpuRenderPassEncoderSetVertexBuffer( - self._internal, int(slot), buffer._internal, int(offset), int(size) - ) - - def draw(self, vertex_count, instance_count=1, first_vertex=0, first_instance=0): - # H: void f(WGPURenderPassEncoder renderPassEncoder, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) - libf.wgpuRenderPassEncoderDraw( - self._internal, vertex_count, instance_count, first_vertex, first_instance - ) - - def draw_indirect(self, indirect_buffer, indirect_offset): - buffer_id = indirect_buffer._internal - # H: void f(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) - libf.wgpuRenderPassEncoderDrawIndirect( - self._internal, buffer_id, int(indirect_offset) - ) - - def draw_indexed( - self, - index_count, - instance_count=1, - first_index=0, - base_vertex=0, - first_instance=0, - ): - # H: void f(WGPURenderPassEncoder renderPassEncoder, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance) - libf.wgpuRenderPassEncoderDrawIndexed( - self._internal, - index_count, - instance_count, - first_index, - base_vertex, - first_instance, - ) - - def draw_indexed_indirect(self, indirect_buffer, indirect_offset): - buffer_id = indirect_buffer._internal - # H: void f(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) - libf.wgpuRenderPassEncoderDrawIndexedIndirect( - self._internal, buffer_id, int(indirect_offset) - ) - - -class GPUCommandEncoder( - classes.GPUCommandEncoder, GPUCommandsMixin, GPUDebugCommandsMixin, GPUObjectBase -): - def begin_compute_pass( - self, *, label="", timestamp_writes: "structs.ComputePassTimestampWrites" = None - ): - timestamp_writes_struct = ffi.NULL - if timestamp_writes is not None: - check_struct("ComputePassTimestampWrites", timestamp_writes) - # H: querySet: WGPUQuerySet, beginningOfPassWriteIndex: int, endOfPassWriteIndex: int - timestamp_writes_struct = new_struct_p( - "WGPUComputePassTimestampWrites *", - querySet=timestamp_writes["query_set"]._internal, - beginningOfPassWriteIndex=timestamp_writes[ - "beginning_of_pass_write_index" - ], - endOfPassWriteIndex=timestamp_writes["end_of_pass_write_index"], - ) - # H: nextInChain: WGPUChainedStruct *, label: char *, timestampWrites: WGPUComputePassTimestampWrites * - struct = new_struct_p( - "WGPUComputePassDescriptor *", - label=to_c_label(label), - timestampWrites=timestamp_writes_struct - # not used: nextInChain - ) - # H: WGPUComputePassEncoder f(WGPUCommandEncoder commandEncoder, WGPUComputePassDescriptor const * descriptor) - raw_pass = libf.wgpuCommandEncoderBeginComputePass(self._internal, struct) - return GPUComputePassEncoder(label, raw_pass, self) - - def begin_render_pass( - self, - *, - label="", - color_attachments: "List[structs.RenderPassColorAttachment]", - depth_stencil_attachment: "structs.RenderPassDepthStencilAttachment" = None, - occlusion_query_set: "GPUQuerySet" = None, - timestamp_writes: "structs.RenderPassTimestampWrites" = None, - max_draw_count: int = 50000000, - ): - # Note that occlusion_query_set is ignored because wgpu-native does not have it. - if timestamp_writes is not None: - check_struct("RenderPassTimestampWrites", timestamp_writes) - - objects_to_keep_alive = {} - - c_color_attachments_list = [] - for color_attachment in color_attachments: - check_struct("RenderPassColorAttachment", color_attachment) - texture_view = color_attachment["view"] - if not isinstance(texture_view, GPUTextureView): - raise TypeError("Color attachement view must be a GPUTextureView.") - texture_view_id = texture_view._internal - objects_to_keep_alive[texture_view_id] = texture_view - c_resolve_target = ( - ffi.NULL - if color_attachment.get("resolve_target", None) is None - else color_attachment["resolve_target"]._internal - ) # this is a TextureViewId or null - clear_value = color_attachment.get("clear_value", (0, 0, 0, 0)) - if isinstance(clear_value, dict): - check_struct("Color", clear_value) - clear_value = _tuple_from_tuple_or_dict(clear_value, "rgba") - # H: r: float, g: float, b: float, a: float - c_clear_value = new_struct( - "WGPUColor", - r=clear_value[0], - g=clear_value[1], - b=clear_value[2], - a=clear_value[3], - ) - # H: nextInChain: WGPUChainedStruct *, view: WGPUTextureView, resolveTarget: WGPUTextureView, loadOp: WGPULoadOp, storeOp: WGPUStoreOp, clearValue: WGPUColor - c_attachment = new_struct( - "WGPURenderPassColorAttachment", - view=texture_view_id, - resolveTarget=c_resolve_target, - loadOp=color_attachment["load_op"], - storeOp=color_attachment["store_op"], - clearValue=c_clear_value, - # not used: resolveTarget - # not used: nextInChain - ) - c_color_attachments_list.append(c_attachment) - c_color_attachments_array = ffi.new( - "WGPURenderPassColorAttachment []", c_color_attachments_list - ) - - c_depth_stencil_attachment = ffi.NULL - if depth_stencil_attachment is not None: - check_struct("RenderPassDepthStencilAttachment", depth_stencil_attachment) - depth_clear_value = depth_stencil_attachment.get("depth_clear_value", 0) - stencil_clear_value = depth_stencil_attachment.get("stencil_clear_value", 0) - # H: view: WGPUTextureView, depthLoadOp: WGPULoadOp, depthStoreOp: WGPUStoreOp, depthClearValue: float, depthReadOnly: WGPUBool/int, stencilLoadOp: WGPULoadOp, stencilStoreOp: WGPUStoreOp, stencilClearValue: int, stencilReadOnly: WGPUBool/int - c_depth_stencil_attachment = new_struct_p( - "WGPURenderPassDepthStencilAttachment *", - view=depth_stencil_attachment["view"]._internal, - depthLoadOp=depth_stencil_attachment["depth_load_op"], - depthStoreOp=depth_stencil_attachment["depth_store_op"], - depthClearValue=float(depth_clear_value), - depthReadOnly=depth_stencil_attachment.get("depth_read_only", False), - stencilLoadOp=depth_stencil_attachment["stencil_load_op"], - stencilStoreOp=depth_stencil_attachment["stencil_store_op"], - stencilClearValue=int(stencil_clear_value), - stencilReadOnly=depth_stencil_attachment.get( - "stencil_read_only", False - ), - ) - - # H: nextInChain: WGPUChainedStruct *, label: char *, colorAttachmentCount: int, colorAttachments: WGPURenderPassColorAttachment *, depthStencilAttachment: WGPURenderPassDepthStencilAttachment *, occlusionQuerySet: WGPUQuerySet, timestampWrites: WGPURenderPassTimestampWrites * - struct = new_struct_p( - "WGPURenderPassDescriptor *", - label=to_c_label(label), - colorAttachments=c_color_attachments_array, - colorAttachmentCount=len(c_color_attachments_list), - depthStencilAttachment=c_depth_stencil_attachment, - # not used: occlusionQuerySet - # not used: timestampWrites - # not used: nextInChain - ) - - # H: WGPURenderPassEncoder f(WGPUCommandEncoder commandEncoder, WGPURenderPassDescriptor const * descriptor) - raw_pass = libf.wgpuCommandEncoderBeginRenderPass(self._internal, struct) - encoder = GPURenderPassEncoder(label, raw_pass, self) - encoder._objects_to_keep_alive = objects_to_keep_alive - return encoder - - def clear_buffer(self, buffer, offset=0, size=None): - offset = int(offset) - if offset % 4 != 0: # pragma: no cover - raise ValueError("offset must be a multiple of 4") - if size is None: # pragma: no cover - size = buffer.size - offset - size = int(size) - if size <= 0: # pragma: no cover - raise ValueError("clear_buffer size must be > 0") - if size % 4 != 0: # pragma: no cover - raise ValueError("size must be a multiple of 4") - if offset + size > buffer.size: # pragma: no cover - raise ValueError("buffer size out of range") - # H: void f(WGPUCommandEncoder commandEncoder, WGPUBuffer buffer, uint64_t offset, uint64_t size) - libf.wgpuCommandEncoderClearBuffer( - self._internal, buffer._internal, int(offset), size - ) - - def copy_buffer_to_buffer( - self, source, source_offset, destination, destination_offset, size - ): - if source_offset % 4 != 0: # pragma: no cover - raise ValueError("source_offset must be a multiple of 4") - if destination_offset % 4 != 0: # pragma: no cover - raise ValueError("destination_offset must be a multiple of 4") - if size % 4 != 0: # pragma: no cover - raise ValueError("size must be a multiple of 4") - - if not isinstance(source, GPUBuffer): # pragma: no cover - raise TypeError("copy_buffer_to_buffer() source must be a GPUBuffer.") - if not isinstance(destination, GPUBuffer): # pragma: no cover - raise TypeError("copy_buffer_to_buffer() destination must be a GPUBuffer.") - # H: void f(WGPUCommandEncoder commandEncoder, WGPUBuffer source, uint64_t sourceOffset, WGPUBuffer destination, uint64_t destinationOffset, uint64_t size) - libf.wgpuCommandEncoderCopyBufferToBuffer( - self._internal, - source._internal, - int(source_offset), - destination._internal, - int(destination_offset), - int(size), - ) - - def copy_buffer_to_texture(self, source, destination, copy_size): - row_alignment = 256 - bytes_per_row = int(source["bytes_per_row"]) - if (bytes_per_row % row_alignment) != 0: - raise ValueError( - f"bytes_per_row must ({bytes_per_row}) be a multiple of {row_alignment}" - ) - if isinstance(destination["texture"], GPUTextureView): - raise ValueError("copy destination texture must be a texture, not a view") - - size = _tuple_from_tuple_or_dict( - copy_size, ("width", "height", "depth_or_array_layers") - ) - - c_source = new_struct_p( - "WGPUImageCopyBuffer *", - buffer=source["buffer"]._internal, - # H: nextInChain: WGPUChainedStruct *, offset: int, bytesPerRow: int, rowsPerImage: int - layout=new_struct( - "WGPUTextureDataLayout", - offset=int(source.get("offset", 0)), - bytesPerRow=bytes_per_row, - rowsPerImage=int(source.get("rows_per_image", size[1])), - # not used: nextInChain - ), - ) - - ori = _tuple_from_tuple_or_dict(destination.get("origin", (0, 0, 0)), "xyz") - # H: x: int, y: int, z: int - c_origin = new_struct( - "WGPUOrigin3D", - x=ori[0], - y=ori[1], - z=ori[2], - ) - # H: nextInChain: WGPUChainedStruct *, texture: WGPUTexture, mipLevel: int, origin: WGPUOrigin3D, aspect: WGPUTextureAspect - c_destination = new_struct_p( - "WGPUImageCopyTexture *", - texture=destination["texture"]._internal, - mipLevel=int(destination.get("mip_level", 0)), - origin=c_origin, - aspect=enums.TextureAspect.all, - # not used: nextInChain - ) - - # H: width: int, height: int, depthOrArrayLayers: int - c_copy_size = new_struct_p( - "WGPUExtent3D *", - width=size[0], - height=size[1], - depthOrArrayLayers=size[2], - ) - - # H: void f(WGPUCommandEncoder commandEncoder, WGPUImageCopyBuffer const * source, WGPUImageCopyTexture const * destination, WGPUExtent3D const * copySize) - libf.wgpuCommandEncoderCopyBufferToTexture( - self._internal, - c_source, - c_destination, - c_copy_size, - ) - - def copy_texture_to_buffer(self, source, destination, copy_size): - row_alignment = 256 - bytes_per_row = int(destination["bytes_per_row"]) - if (bytes_per_row % row_alignment) != 0: - raise ValueError( - f"bytes_per_row must ({bytes_per_row}) be a multiple of {row_alignment}" - ) - if isinstance(source["texture"], GPUTextureView): - raise ValueError("copy source texture must be a texture, not a view") - - size = _tuple_from_tuple_or_dict( - copy_size, ("width", "height", "depth_or_array_layers") - ) - - ori = _tuple_from_tuple_or_dict(source.get("origin", (0, 0, 0)), "xyz") - # H: x: int, y: int, z: int - c_origin = new_struct( - "WGPUOrigin3D", - x=ori[0], - y=ori[1], - z=ori[2], - ) - # H: nextInChain: WGPUChainedStruct *, texture: WGPUTexture, mipLevel: int, origin: WGPUOrigin3D, aspect: WGPUTextureAspect - c_source = new_struct_p( - "WGPUImageCopyTexture *", - texture=source["texture"]._internal, - mipLevel=int(source.get("mip_level", 0)), - origin=c_origin, - aspect=0, - # not used: nextInChain - ) - - c_destination = new_struct_p( - "WGPUImageCopyBuffer *", - buffer=destination["buffer"]._internal, - # H: nextInChain: WGPUChainedStruct *, offset: int, bytesPerRow: int, rowsPerImage: int - layout=new_struct( - "WGPUTextureDataLayout", - offset=int(destination.get("offset", 0)), - bytesPerRow=bytes_per_row, - rowsPerImage=int(destination.get("rows_per_image", size[1])), - # not used: nextInChain - ), - ) - - # H: width: int, height: int, depthOrArrayLayers: int - c_copy_size = new_struct_p( - "WGPUExtent3D *", - width=size[0], - height=size[1], - depthOrArrayLayers=size[2], - ) - - # H: void f(WGPUCommandEncoder commandEncoder, WGPUImageCopyTexture const * source, WGPUImageCopyBuffer const * destination, WGPUExtent3D const * copySize) - libf.wgpuCommandEncoderCopyTextureToBuffer( - self._internal, - c_source, - c_destination, - c_copy_size, - ) - - def copy_texture_to_texture(self, source, destination, copy_size): - if isinstance(source["texture"], GPUTextureView): - raise ValueError("copy source texture must be a texture, not a view") - if isinstance(destination["texture"], GPUTextureView): - raise ValueError("copy destination texture must be a texture, not a view") - - ori = _tuple_from_tuple_or_dict(source.get("origin", (0, 0, 0)), "xyz") - # H: x: int, y: int, z: int - c_origin1 = new_struct( - "WGPUOrigin3D", - x=ori[0], - y=ori[1], - z=ori[2], - ) - # H: nextInChain: WGPUChainedStruct *, texture: WGPUTexture, mipLevel: int, origin: WGPUOrigin3D, aspect: WGPUTextureAspect - c_source = new_struct_p( - "WGPUImageCopyTexture *", - texture=source["texture"]._internal, - mipLevel=int(source.get("mip_level", 0)), - origin=c_origin1, - # not used: nextInChain - # not used: aspect - ) - - ori = _tuple_from_tuple_or_dict(destination.get("origin", (0, 0, 0)), "xyz") - # H: x: int, y: int, z: int - c_origin2 = new_struct( - "WGPUOrigin3D", - x=ori[0], - y=ori[1], - z=ori[2], - ) - # H: nextInChain: WGPUChainedStruct *, texture: WGPUTexture, mipLevel: int, origin: WGPUOrigin3D, aspect: WGPUTextureAspect - c_destination = new_struct_p( - "WGPUImageCopyTexture *", - texture=destination["texture"]._internal, - mipLevel=int(destination.get("mip_level", 0)), - origin=c_origin2, - # not used: nextInChain - # not used: aspect - ) - - size = _tuple_from_tuple_or_dict( - copy_size, ("width", "height", "depth_or_array_layers") - ) - # H: width: int, height: int, depthOrArrayLayers: int - c_copy_size = new_struct_p( - "WGPUExtent3D *", - width=size[0], - height=size[1], - depthOrArrayLayers=size[2], - ) - - # H: void f(WGPUCommandEncoder commandEncoder, WGPUImageCopyTexture const * source, WGPUImageCopyTexture const * destination, WGPUExtent3D const * copySize) - libf.wgpuCommandEncoderCopyTextureToTexture( - self._internal, - c_source, - c_destination, - c_copy_size, - ) - - def finish(self, *, label=""): - # H: nextInChain: WGPUChainedStruct *, label: char * - struct = new_struct_p( - "WGPUCommandBufferDescriptor *", - label=to_c_label(label), - # not used: nextInChain - ) - # H: WGPUCommandBuffer f(WGPUCommandEncoder commandEncoder, WGPUCommandBufferDescriptor const * descriptor) - id = libf.wgpuCommandEncoderFinish(self._internal, struct) - return GPUCommandBuffer(label, id, self) - - def resolve_query_set( - self, query_set, first_query, query_count, destination, destination_offset - ): - # H: void f(WGPUCommandEncoder commandEncoder, WGPUQuerySet querySet, uint32_t firstQuery, uint32_t queryCount, WGPUBuffer destination, uint64_t destinationOffset) - libf.wgpuCommandEncoderResolveQuerySet( - self._internal, - query_set._internal, - int(first_query), - int(query_count), - destination._internal, - int(destination_offset), - ) - - def _destroy(self): - # Note that the native object gets destroyed on finish. - # Also see GPUCommandBuffer._destroy() - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPUCommandEncoder commandEncoder) - libf.wgpuCommandEncoderRelease(internal) - - -class GPUComputePassEncoder( - classes.GPUComputePassEncoder, - GPUCommandsMixin, - GPUDebugCommandsMixin, - GPUBindingCommandsMixin, - GPUObjectBase, -): - """ """ - - def set_pipeline(self, pipeline): - pipeline_id = pipeline._internal - # H: void f(WGPUComputePassEncoder computePassEncoder, WGPUComputePipeline pipeline) - libf.wgpuComputePassEncoderSetPipeline(self._internal, pipeline_id) - - def dispatch_workgroups( - self, workgroup_count_x, workgroup_count_y=1, workgroup_count_z=1 - ): - # H: void f(WGPUComputePassEncoder computePassEncoder, uint32_t workgroupCountX, uint32_t workgroupCountY, uint32_t workgroupCountZ) - libf.wgpuComputePassEncoderDispatchWorkgroups( - self._internal, workgroup_count_x, workgroup_count_y, workgroup_count_z - ) - - def dispatch_workgroups_indirect(self, indirect_buffer, indirect_offset): - buffer_id = indirect_buffer._internal - # H: void f(WGPUComputePassEncoder computePassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) - libf.wgpuComputePassEncoderDispatchWorkgroupsIndirect( - self._internal, buffer_id, int(indirect_offset) - ) - - def end(self): - # H: void f(WGPUComputePassEncoder computePassEncoder) - libf.wgpuComputePassEncoderEnd(self._internal) - - def _destroy(self): - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPUComputePassEncoder computePassEncoder) - libf.wgpuComputePassEncoderRelease(internal) - - -class GPURenderPassEncoder( - classes.GPURenderPassEncoder, - GPUCommandsMixin, - GPUDebugCommandsMixin, - GPUBindingCommandsMixin, - GPURenderCommandsMixin, - GPUObjectBase, -): - def set_viewport(self, x, y, width, height, min_depth, max_depth): - # H: void f(WGPURenderPassEncoder renderPassEncoder, float x, float y, float width, float height, float minDepth, float maxDepth) - libf.wgpuRenderPassEncoderSetViewport( - self._internal, - float(x), - float(y), - float(width), - float(height), - float(min_depth), - float(max_depth), - ) - - def set_scissor_rect(self, x, y, width, height): - # H: void f(WGPURenderPassEncoder renderPassEncoder, uint32_t x, uint32_t y, uint32_t width, uint32_t height) - libf.wgpuRenderPassEncoderSetScissorRect( - self._internal, int(x), int(y), int(width), int(height) - ) - - def set_blend_constant(self, color): - color = _tuple_from_tuple_or_dict(color, "rgba") - # H: r: float, g: float, b: float, a: float - c_color = new_struct_p( - "WGPUColor *", - r=color[0], - g=color[1], - b=color[2], - a=color[3], - ) - # H: void f(WGPURenderPassEncoder renderPassEncoder, WGPUColor const * color) - libf.wgpuRenderPassEncoderSetBlendConstant(self._internal, c_color) - - def set_stencil_reference(self, reference): - # H: void f(WGPURenderPassEncoder renderPassEncoder, uint32_t reference) - libf.wgpuRenderPassEncoderSetStencilReference(self._internal, int(reference)) - - def end(self): - # H: void f(WGPURenderPassEncoder renderPassEncoder) - libf.wgpuRenderPassEncoderEnd(self._internal) - - def execute_bundles(self, bundles): - raise NotImplementedError() - - def begin_occlusion_query(self, query_index): - raise NotImplementedError() - - def end_occlusion_query(self): - raise NotImplementedError() - - def _destroy(self): - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPURenderPassEncoder renderPassEncoder) - libf.wgpuRenderPassEncoderRelease(internal) - - -class GPURenderBundleEncoder( - classes.GPURenderBundleEncoder, - GPUCommandsMixin, - GPUDebugCommandsMixin, - GPUBindingCommandsMixin, - GPURenderCommandsMixin, - GPUObjectBase, -): - def finish(self, *, label=""): - raise NotImplementedError() - - def _destroy(self): - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPURenderBundleEncoder renderBundleEncoder) - libf.wgpuRenderBundleEncoderRelease(internal) - - -class GPUQueue(classes.GPUQueue, GPUObjectBase): - def submit(self, command_buffers): - command_buffer_ids = [cb._internal for cb in command_buffers] - c_command_buffers = ffi.new("WGPUCommandBuffer []", command_buffer_ids) - # H: void f(WGPUQueue queue, size_t commandCount, WGPUCommandBuffer const * commands) - libf.wgpuQueueSubmit(self._internal, len(command_buffer_ids), c_command_buffers) - - def write_buffer(self, buffer, buffer_offset, data, data_offset=0, size=None): - # We support anything that memoryview supports, i.e. anything - # that implements the buffer protocol, including, bytes, - # bytearray, ctypes arrays, numpy arrays, etc. - m, address = get_memoryview_and_address(data) - nbytes = m.nbytes - - # Deal with offset and size - buffer_offset = int(buffer_offset) - data_offset = int(data_offset) - if not size: - data_length = nbytes - data_offset - else: - data_length = int(size) - - if not (0 <= buffer_offset < buffer.size): # pragma: no cover - raise ValueError("Invalid buffer_offset") - if not (0 <= data_offset < nbytes): # pragma: no cover - raise ValueError("Invalid data_offset") - if not (0 <= data_length <= (nbytes - data_offset)): # pragma: no cover - raise ValueError("Invalid data_length") - if not (data_length <= buffer.size - buffer_offset): # pragma: no cover - raise ValueError("Invalid data_length") - - # Make the call. Note that this call copies the data - it's ok - # if we lose our reference to the data once we leave this function. - c_data = ffi.cast("uint8_t *", address + data_offset) - # H: void f(WGPUQueue queue, WGPUBuffer buffer, uint64_t bufferOffset, void const * data, size_t size) - libf.wgpuQueueWriteBuffer( - self._internal, buffer._internal, buffer_offset, c_data, data_length - ) - - def read_buffer(self, buffer, buffer_offset=0, size=None): - # Note that write_buffer probably does a very similar thing - # using a temporary buffer. But write_buffer is official API - # so it's a single call, while here we must create the temporary - # buffer and do the copying ourselves. - - if not size: - data_length = buffer.size - buffer_offset - else: - data_length = int(size) - if not (0 <= buffer_offset < buffer.size): # pragma: no cover - raise ValueError("Invalid buffer_offset") - if not (data_length <= buffer.size - buffer_offset): # pragma: no cover - raise ValueError("Invalid data_length") - - device = buffer._device - - # Create temporary buffer - tmp_usage = flags.BufferUsage.COPY_DST | flags.BufferUsage.MAP_READ - tmp_buffer = device._create_buffer("", data_length, tmp_usage, False) - - # Copy data to temp buffer - encoder = device.create_command_encoder() - encoder.copy_buffer_to_buffer(buffer, buffer_offset, tmp_buffer, 0, data_length) - command_buffer = encoder.finish() - self.submit([command_buffer]) - - # Download from mappable buffer - tmp_buffer.map("READ_NOSYNC") - data = tmp_buffer.read_mapped() - tmp_buffer.destroy() - - return data - - def write_texture(self, destination, data, data_layout, size): - # Note that the bytes_per_row restriction does not apply for - # this function; wgpu-native deals with it. - - if isinstance(destination["texture"], GPUTextureView): - raise ValueError("copy destination texture must be a texture, not a view") - - m, address = get_memoryview_and_address(data) - - c_data = ffi.cast("uint8_t *", address) - data_length = m.nbytes - - # We could allow size=None in this method, and derive the size from the data. - # Or compare size with the data size if it is given. However, the data - # could be a bit raw, being 1D and/or the shape expressed in bytes, so - # this gets a bit muddy. Also methods like copy_buffer_to_texture have the - # same size arg, so let's just leave it like this. - # - # data_size = list(reversed(m.shape)) + [1, 1, 1] - # data_size = data_size[:3] - - size = _tuple_from_tuple_or_dict( - size, ("width", "height", "depth_or_array_layers") - ) - - ori = _tuple_from_tuple_or_dict(destination.get("origin", (0, 0, 0)), "xyz") - # H: x: int, y: int, z: int - c_origin = new_struct( - "WGPUOrigin3D", - x=ori[0], - y=ori[1], - z=ori[2], - ) - # H: nextInChain: WGPUChainedStruct *, texture: WGPUTexture, mipLevel: int, origin: WGPUOrigin3D, aspect: WGPUTextureAspect - c_destination = new_struct_p( - "WGPUImageCopyTexture *", - texture=destination["texture"]._internal, - mipLevel=destination.get("mip_level", 0), - origin=c_origin, - aspect=enums.TextureAspect.all, - # not used: nextInChain - ) - - # H: nextInChain: WGPUChainedStruct *, offset: int, bytesPerRow: int, rowsPerImage: int - c_data_layout = new_struct_p( - "WGPUTextureDataLayout *", - offset=data_layout.get("offset", 0), - bytesPerRow=data_layout["bytes_per_row"], - rowsPerImage=data_layout.get("rows_per_image", size[1]), - # not used: nextInChain - ) - - # H: width: int, height: int, depthOrArrayLayers: int - c_size = new_struct_p( - "WGPUExtent3D *", - width=size[0], - height=size[1], - depthOrArrayLayers=size[2], - ) - - # H: void f(WGPUQueue queue, WGPUImageCopyTexture const * destination, void const * data, size_t dataSize, WGPUTextureDataLayout const * dataLayout, WGPUExtent3D const * writeSize) - libf.wgpuQueueWriteTexture( - self._internal, c_destination, c_data, data_length, c_data_layout, c_size - ) - - def read_texture(self, source, data_layout, size): - # Note that the bytes_per_row restriction does not apply for - # this function; we have to deal with it. - - device = source["texture"]._device - - # Get and calculate striding info - ori_offset = data_layout.get("offset", 0) - ori_stride = data_layout["bytes_per_row"] - extra_stride = (256 - ori_stride % 256) % 256 - full_stride = ori_stride + extra_stride - - size = _tuple_from_tuple_or_dict( - size, ("width", "height", "depth_or_array_layers") - ) - - # Create temporary buffer - data_length = full_stride * size[1] * size[2] - tmp_usage = flags.BufferUsage.COPY_DST | flags.BufferUsage.MAP_READ - tmp_buffer = device._create_buffer("", data_length, tmp_usage, False) - - destination = { - "buffer": tmp_buffer, - "offset": 0, - "bytes_per_row": full_stride, # or WGPU_COPY_STRIDE_UNDEFINED ? - "rows_per_image": data_layout.get("rows_per_image", size[1]), - } - - # Copy data to temp buffer - encoder = device.create_command_encoder() - encoder.copy_texture_to_buffer(source, destination, size) - command_buffer = encoder.finish() - self.submit([command_buffer]) - - # Download from mappable buffer - tmp_buffer.map("READ_NOSYNC") - data = tmp_buffer.read_mapped() - tmp_buffer.destroy() - - # Fix data strides if necessary - # Ugh, cannot do striding with memoryviews (yet: https://bugs.python.org/issue41226) - # and Numpy is not a dependency. - if extra_stride or ori_offset: - data_length2 = ori_stride * size[1] * size[2] + ori_offset - data2 = memoryview((ctypes.c_uint8 * data_length2)()).cast(data.format) - for i in range(size[1] * size[2]): - row = data[i * full_stride : i * full_stride + ori_stride] - data2[ - ori_offset - + i * ori_stride : ori_offset - + i * ori_stride - + ori_stride - ] = row - data = data2 - - return data - - def on_submitted_work_done(self): - raise NotImplementedError() - - def _destroy(self): - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPUQueue queue) - libf.wgpuQueueRelease(internal) - - -class GPURenderBundle(classes.GPURenderBundle, GPUObjectBase): - def _destroy(self): - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPURenderBundle renderBundle) - libf.wgpuRenderBundleRelease(internal) - - -class GPUQuerySet(classes.GPUQuerySet, GPUObjectBase): - def _destroy(self): - if self._internal is not None and libf is not None: - self._internal, internal = None, self._internal - # H: void f(WGPUQuerySet querySet) - libf.wgpuQuerySetRelease(internal) - - def destroy(self): - self._destroy() - - -# %% Subclasses that don't need anything else - - -class GPUCompilationMessage(classes.GPUCompilationMessage): - pass - - -class GPUCompilationInfo(classes.GPUCompilationInfo): - pass - - -class GPUDeviceLostInfo(classes.GPUDeviceLostInfo): - pass - - -class GPUError(classes.GPUError): - pass - - -class GPUOutOfMemoryError(classes.GPUOutOfMemoryError, GPUError): - pass - - -class GPUValidationError(classes.GPUValidationError, GPUError): - pass - - -class GPUPipelineError(classes.GPUPipelineError): - pass - - -class GPUInternalError(classes.GPUInternalError, GPUError): - pass - - -# %% - - -def _copy_docstrings(): - base_classes = GPUObjectBase, GPUCanvasContext, GPUAdapter - for ob in globals().values(): - if not (isinstance(ob, type) and issubclass(ob, base_classes)): - continue - elif ob.__module__ != __name__: - continue # no-cover - base_cls = ob.mro()[1] - ob.__doc__ = base_cls.__doc__ - for name, attr in ob.__dict__.items(): - if name.startswith("_") or not hasattr(attr, "__doc__"): - continue # no-cover - base_attr = getattr(base_cls, name, None) - if base_attr is not None: - attr.__doc__ = base_attr.__doc__ - - -_copy_docstrings() diff --git a/wgpu/backends/wgpu_native/_ffi.py b/wgpu/backends/wgpu_native/_ffi.py deleted file mode 100644 index 641dd5a..0000000 --- a/wgpu/backends/wgpu_native/_ffi.py +++ /dev/null @@ -1,205 +0,0 @@ -"""Loading the header, the lib, and setting up its logging. -""" - -import os -import sys -import logging - -from ..._coreutils import get_resource_filename, logger_set_level_callbacks - -from cffi import FFI, __version_info__ as cffi_version_info - - -logger = logging.getLogger("wgpu") # noqa - - -if cffi_version_info < (1, 10): # no-cover - raise ImportError(f"{__name__} needs cffi 1.10 or later.") - - -def get_wgpu_header(): - """Read header file and strip some stuff that cffi would stumble on.""" - return _get_wgpu_header( - get_resource_filename("webgpu.h"), - get_resource_filename("wgpu.h"), - ) - - -def _get_wgpu_header(*filenames): - """Func written so we can use this in both wgpu_native/_ffi.py and codegen/hparser.py""" - # Read files - lines1 = [] - for filename in filenames: - with open(filename) as f: - lines1.extend(f.readlines()) - # Deal with pre-processor commands, because cffi cannot handle them. - # Just removing them, plus a few extra lines, seems to do the trick. - lines2 = [] - for line in lines1: - if line.startswith("#define ") and len(line.split()) > 2 and "0x" in line: - line = line.replace("(", "").replace(")", "") - elif line.startswith("#"): - continue - elif 'extern "C"' in line: - continue - for define_to_drop in [ - "WGPU_EXPORT ", - "WGPU_NULLABLE ", - " WGPU_OBJECT_ATTRIBUTE", - " WGPU_ENUM_ATTRIBUTE", - " WGPU_FUNCTION_ATTRIBUTE", - " WGPU_STRUCTURE_ATTRIBUTE", - ]: - line = line.replace(define_to_drop, "") - lines2.append(line) - return "\n".join(lines2) - - -def get_wgpu_lib_path(): - """Get the path to the wgpu library, taking into account the - WGPU_LIB_PATH environment variable. - """ - - # If path is given, use that or fail trying - override_path = os.getenv("WGPU_LIB_PATH", "").strip() - if override_path: - return override_path - - # Load the debug binary if requested - debug_mode = os.getenv("WGPU_DEBUG", "").strip() == "1" - build = "debug" if debug_mode else "release" - - # Get lib filename for supported platforms - if sys.platform.startswith("win"): # no-cover - lib_filename = f"wgpu_native-{build}.dll" - elif sys.platform.startswith("darwin"): # no-cover - lib_filename = f"libwgpu_native-{build}.dylib" - elif sys.platform.startswith("linux"): # no-cover - lib_filename = f"libwgpu_native-{build}.so" - else: # no-cover - raise RuntimeError( - f"No WGPU library shipped for platform {sys.platform}. Set WGPU_LIB_PATH instead." - ) - - # Note that this can be a false positive, e.g. ARM linux. - embedded_path = get_resource_filename(lib_filename) - if not os.path.isfile(embedded_path): # no-cover - download_hint = _maybe_get_hint_on_download_script() - pip_hint = _maybe_get_pip_hint() - raise RuntimeError( - f"Could not find WGPU library in {embedded_path}. {download_hint} {pip_hint}" - ) - else: - return embedded_path - - -def _maybe_get_hint_on_download_script(): - root_dir = os.path.join(get_resource_filename(""), "..", "..") - filename = os.path.abspath(os.path.join(root_dir, "download-wgpu-native.py")) - uses_repo = os.path.isfile(filename) - - uses_custom_lib = os.getenv("WGPU_LIB_PATH", "").strip() - - if uses_repo and not uses_custom_lib: - return "You may need to run download-wgpu-native.py (in the root of the repo)." - return "" - - -def _maybe_get_pip_hint(): - if not sys.platform.startswith("linux"): - return "" - - # Get pip version - pip_version = () - try: - import pip # noqa - - parts = [] - for x in pip.__version__.split("."): - if not x.isnumeric(): - break - parts.append(int(x)) - pip_version = tuple(parts) - except Exception: - pass - - if pip_version < (20, 3): - return "If you install wgpu with pip, pip needs to be at least version 20.3 or the wgpu-native binary may not be included." - return "" - - -def get_lib_version_info(): - # Get lib version - version_int = lib.wgpuGetVersion() - if version_int < 65536: # no-cover - old version encoding with 3 ints - lib_version_info = tuple((version_int >> bits) & 0xFF for bits in (16, 8, 0)) - else: - lib_version_info = tuple( - (version_int >> bits) & 0xFF for bits in (24, 16, 8, 0) - ) - # When the 0.7.0 tag was made, the version was not bumped. - if lib_version_info == (0, 6, 0, 0): - lib_version_info = (0, 7, 0) - return lib_version_info - - -# Configure cffi and load the dynamic library -# NOTE: `import wgpu.backends.wgpu_native` is used in pyinstaller tests to verify -# that we can load the DLL after freezing -ffi = FFI() -ffi.cdef(get_wgpu_header()) -ffi.set_source("wgpu.h", None) -lib_path = get_wgpu_lib_path() # store path on this module so it can be checked -lib = ffi.dlopen(lib_path) -lib_version_info = get_lib_version_info() - - -def _check_expected_version(version_info): - lib_version_info = get_lib_version_info() - # Compare - if lib_version_info != version_info: # no-cover - logger.warning( - f"Expected wgpu-native version {version_info} but got {lib_version_info}. {_maybe_get_hint_on_download_script()}" - ) - - -@ffi.callback("void(WGPULogLevel, char *, void *)") -def _logger_callback(level, c_msg, userdata): - """Called when Rust emits a log message.""" - # Make a copy of the msg. Rust reclaims the memory when this returns - try: - msg = ffi.string(c_msg).decode(errors="ignore") - except Exception: - if sys.is_finalizing(): - return # Python is shutting down - m = { - lib.WGPULogLevel_Error: logger.error, - lib.WGPULogLevel_Warn: logger.warning, - lib.WGPULogLevel_Info: logger.info, - lib.WGPULogLevel_Debug: logger.debug, - lib.WGPULogLevel_Trace: logger.debug, - } - func = m.get(level, logger.warning) - func(msg) - - -def _logger_set_level_callback(level): - """Called when the log level is set from Python.""" - if level >= 40: - lib.wgpuSetLogLevel(lib.WGPULogLevel_Error) - elif level >= 30: - lib.wgpuSetLogLevel(lib.WGPULogLevel_Warn) - elif level >= 20: - lib.wgpuSetLogLevel(lib.WGPULogLevel_Info) - elif level >= 10: - lib.wgpuSetLogLevel(lib.WGPULogLevel_Debug) - elif level >= 5: - lib.wgpuSetLogLevel(lib.WGPULogLevel_Trace) # extra level - else: - lib.wgpuSetLogLevel(lib.WGPULogLevel_Off) - - -# Connect Rust logging with Python logging (userdata set to null) -lib.wgpuSetLogCallback(_logger_callback, ffi.NULL) -logger_set_level_callbacks.append(_logger_set_level_callback) -_logger_set_level_callback(logger.level) diff --git a/wgpu/backends/wgpu_native/_helpers.py b/wgpu/backends/wgpu_native/_helpers.py deleted file mode 100644 index 2492e2d..0000000 --- a/wgpu/backends/wgpu_native/_helpers.py +++ /dev/null @@ -1,445 +0,0 @@ -"""Utilities used in the wgpu-native backend. -""" - -import os -import sys -import ctypes - -from ._ffi import ffi, lib -from ..._diagnostics import Diagnostics -from ...classes import ( - GPUError, - GPUOutOfMemoryError, - GPUValidationError, - GPUPipelineError, - GPUInternalError, -) - - -ERROR_TYPES = { - "": GPUError, - "OutOfMemory": GPUOutOfMemoryError, - "Validation": GPUValidationError, - "Pipeline": GPUPipelineError, - "Internal": GPUInternalError, -} - - -if sys.platform.startswith("darwin"): - from rubicon.objc.api import ObjCInstance, ObjCClass - - -def get_memoryview_and_address(data): - """Get a memoryview for the given data and its memory address. - The data object must support the buffer protocol. - """ - - # To get the address from a memoryview, there are multiple options. - # The most obvious is using ctypes: - # - # c_array = (ctypes.c_uint8 * nbytes).from_buffer(m) - # address = ctypes.addressof(c_array) - # - # Unfortunately, this call fails if the memoryview is readonly, e.g. if - # the data is a bytes object or readonly numpy array. One could then - # use from_buffer_copy(), but that introduces an extra data copy, which - # can hurt performance when the data is large. - # - # Another alternative that can be used for objects implementing the array - # interface (like numpy arrays) is to directly read the address: - # - # address = data.__array_interface__["data"][0] - # - # But what seems to work best (at the moment) is using cffi. - - # Convert data to a memoryview. That way we have something consistent - # to work with, which supports all objects implementing the buffer protocol. - m = memoryview(data) - - # Test that the data is contiguous. - # Note that pypy does not have the contiguous attribute, so we assume it is. - if not getattr(m, "contiguous", True): - raise ValueError("The given texture data is not contiguous") - - # Get the address via ffi. In contrast to ctypes, this also - # works for readonly data (e.g. bytes) - c_data = ffi.from_buffer("uint8_t []", m) - address = int(ffi.cast("uintptr_t", c_data)) - - return m, address - - -def get_memoryview_from_address(address, nbytes, format="B"): - """Get a memoryview from an int memory address and a byte count,""" - # The default format is " 0 and name2[-1] not in "_123": - name2 += "_" - name2 += c2 - return name2 - - -def to_camel_case(name): - """Convert a name from snake_case to camelCase. Names that already are - camelCase remain the same. - """ - is_capital = False - name2 = "" - for c in name: - if c == "_" and name2: - is_capital = True - elif is_capital: - name2 += c.upper() - is_capital = False - else: - name2 += c - if name2.endswith(("1d", "2d", "3d")): - name2 = name2[:-1] + "D" - return name2 - - -class ErrorHandler: - """Object that logs errors, with the option to collect incoming - errors elsewhere. - """ - - def __init__(self, logger): - self._logger = logger - self._proxy_stack = [] - self._error_message_counts = {} - - def capture(self, func): - """Send incoming error messages to the given func instead of logging them.""" - self._proxy_stack.append(func) - - def release(self, func): - """Release the given func.""" - f = self._proxy_stack.pop(-1) - if f is not func: - self._proxy_stack.clear() - self._logger.warning("ErrorHandler capture/release out of sync") - - def handle_error(self, error_type: str, message: str): - """Handle an error message.""" - if self._proxy_stack: - self._proxy_stack[-1](error_type, message) - else: - self.log_error(message) - - def log_error(self, message): - """Hanle an error message by logging it, bypassing any capturing.""" - # Get count for this message. Use a hash that does not use the - # digits in the message, because of id's getting renewed on - # each draw. - h = hash("".join(c for c in message if not c.isdigit())) - count = self._error_message_counts.get(h, 0) + 1 - self._error_message_counts[h] = count - - # Decide what to do - if count == 1: - self._logger.error(message) - elif count < 10: - self._logger.error(message.splitlines()[0] + f" ({count})") - elif count == 10: - self._logger.error(message.splitlines()[0] + " (hiding from now)") - - -class SafeLibCalls: - """Object that copies all library functions, but wrapped in such - a way that errors occuring in that call are raised as exceptions. - """ - - def __init__(self, lib, error_handler): - self._error_handler = error_handler - self._error_message = None - self._make_function_copies(lib) - - def _make_function_copies(self, lib): - for name in dir(lib): - if name.startswith("wgpu"): - ob = getattr(lib, name) - if callable(ob): - setattr(self, name, self._make_proxy_func(name, ob)) - - def _handle_error(self, error_type, message): - # If we already had an error, we log the earlier one now - if self._error_message: - self._error_handler.log_error(self._error_message[1]) - # Store new error - self._error_message = (error_type, message) - - def _make_proxy_func(self, name, ob): - def proxy_func(*args): - # Make the call, with error capturing on - handle_error = self._handle_error - self._error_handler.capture(handle_error) - try: - result = ob(*args) - finally: - self._error_handler.release(handle_error) - - # Handle the error. - if self._error_message: - error_type, message = self._error_message - self._error_message = None - cls = ERROR_TYPES.get(error_type, GPUError) - wgpu_error = cls(message) - # The line below will be the bottom line in the traceback, - # so better make it informative! As far as I know there is - # no way to exclude this frame from the traceback. - raise wgpu_error # the frame above is more interesting ↑↑ - return result - - proxy_func.__name__ = name - return proxy_func - - -def generate_report(): - """Get a report similar to the one produced by wgpuGenerateReport(), - but in the form of a Python dict. - """ - - # H: surfaces: WGPUStorageReport, backendType: WGPUBackendType, vulkan: WGPUHubReport, metal: WGPUHubReport, dx12: WGPUHubReport, dx11: WGPUHubReport, gl: WGPUHubReport - struct = ffi.new("WGPUGlobalReport *") - - # H: void f(WGPUInstance instance, WGPUGlobalReport * report) - lib.wgpuGenerateReport(get_wgpu_instance(), struct) - - report = {} - - report["surfaces"] = { - "occupied": struct.surfaces.numOccupied, - "vacant": struct.surfaces.numVacant, - "error": struct.surfaces.numError, - "element_size": struct.surfaces.elementSize, - } - - for backend in ("vulkan", "metal", "dx12", "dx11", "gl"): - c_hub_report = getattr(struct, backend) - report[backend] = {} - for key in dir(c_hub_report): - c_storage_report = getattr(c_hub_report, key) - storage_report = { - "occupied": c_storage_report.numOccupied, - "vacant": c_storage_report.numVacant, - "error": c_storage_report.numError, - "element_size": c_storage_report.elementSize, - } - # if any(x!=0 for x in storage_report.values()): - report[backend][key] = storage_report - - return report - - -class WgpuNativeCountsDiagnostics(Diagnostics): - def get_subscript(self): - text = "" - text += " * The o, v, e are occupied, vacant and error, respecitively.\n" - text += " * Reported memory does not include buffer/texture data.\n" - return text - - def get_dict(self): - result = {} - native_report = generate_report() - - # Names in the root of the report (backend-less) - root_names = ["surfaces"] - - # Get per-backend names and a list of backends - names = list(native_report["vulkan"].keys()) - backends = [name for name in native_report.keys() if name not in root_names] - - # Get a mapping from native names to wgpu-py names - name_map = {"surfaces": "CanvasContext"} - for name in names: - if name not in name_map: - name_map[name] = name[0].upper() + name[1:-1] - - # Initialize the result dict (sorted) - for report_name in sorted(name_map[name] for name in names + root_names): - result[report_name] = {"count": 0, "mem": 0} - - # Establish what backends are active - active_backends = [] - for backend in backends: - total = 0 - for name in names: - d = native_report[backend][name] - total += d["occupied"] + d["vacant"] + d["error"] - if total > 0: - active_backends.append(backend) - - # Process names in the root - for name in root_names: - d = native_report[name] - subtotal_count = d["occupied"] + d["vacant"] + d["error"] - impl = { - "o": d["occupied"], - "v": d["vacant"], - "e": d["error"], - "el_size": d["element_size"], - } - # Store in report - report_name = name_map[name] - result[report_name]["count"] = subtotal_count - result[report_name]["mem"] = subtotal_count * d["element_size"] - result[report_name]["backend"] = {"": impl} - - # Iterate over backends - for name in names: - total_count = 0 - total_mem = 0 - implementations = {} - for backend in active_backends: - d = native_report[backend][name] - subtotal_count = d["occupied"] + d["vacant"] + d["error"] - subtotal_mem = subtotal_count * d["element_size"] - impl = { - "o": d["occupied"], - "v": d["vacant"], - "e": d["error"], - "el_size": d["element_size"], - } - total_count += subtotal_count - total_mem += subtotal_mem - implementations[backend] = impl - # Store in report - report_name = name_map[name] - result[report_name]["count"] = total_count - result[report_name]["mem"] = total_mem - result[report_name]["backend"] = implementations - - # Add totals - totals = {} - for key in ("count", "mem"): - totals[key] = sum(v.get(key, 0) for v in result.values()) - result["total"] = totals - - return result - - -diagnostics = WgpuNativeCountsDiagnostics("wgpu_native_counts") diff --git a/wgpu/backends/wgpu_native/_mappings.py b/wgpu/backends/wgpu_native/_mappings.py deleted file mode 100644 index cde8a6f..0000000 --- a/wgpu/backends/wgpu_native/_mappings.py +++ /dev/null @@ -1,454 +0,0 @@ -""" Mappings for the wgpu-native backend. """ - -# THIS CODE IS AUTOGENERATED - DO NOT EDIT - -# flake8: noqa - -# There are 232 enum mappings - -enummap = { - "AddressMode.clamp-to-edge": 2, - "AddressMode.mirror-repeat": 1, - "AddressMode.repeat": 0, - "BlendFactor.constant": 11, - "BlendFactor.dst": 6, - "BlendFactor.dst-alpha": 8, - "BlendFactor.one": 1, - "BlendFactor.one-minus-constant": 12, - "BlendFactor.one-minus-dst": 7, - "BlendFactor.one-minus-dst-alpha": 9, - "BlendFactor.one-minus-src": 3, - "BlendFactor.one-minus-src-alpha": 5, - "BlendFactor.src": 2, - "BlendFactor.src-alpha": 4, - "BlendFactor.src-alpha-saturated": 10, - "BlendFactor.zero": 0, - "BlendOperation.add": 0, - "BlendOperation.max": 4, - "BlendOperation.min": 3, - "BlendOperation.reverse-subtract": 2, - "BlendOperation.subtract": 1, - "BufferBindingType.read-only-storage": 3, - "BufferBindingType.storage": 2, - "BufferBindingType.uniform": 1, - "BufferMapState.mapped": 2, - "BufferMapState.pending": 1, - "BufferMapState.unmapped": 0, - "CompareFunction.always": 8, - "CompareFunction.equal": 6, - "CompareFunction.greater": 4, - "CompareFunction.greater-equal": 5, - "CompareFunction.less": 2, - "CompareFunction.less-equal": 3, - "CompareFunction.never": 1, - "CompareFunction.not-equal": 7, - "CompilationMessageType.error": 0, - "CompilationMessageType.info": 2, - "CompilationMessageType.warning": 1, - "CullMode.back": 2, - "CullMode.front": 1, - "CullMode.none": 0, - "DeviceLostReason.destroyed": 1, - "ErrorFilter.internal": 2, - "ErrorFilter.out-of-memory": 1, - "ErrorFilter.validation": 0, - "FeatureName.bgra8unorm-storage": 10, - "FeatureName.depth-clip-control": 1, - "FeatureName.depth32float-stencil8": 2, - "FeatureName.float32-filterable": 11, - "FeatureName.indirect-first-instance": 7, - "FeatureName.rg11b10ufloat-renderable": 9, - "FeatureName.shader-f16": 8, - "FeatureName.texture-compression-astc": 6, - "FeatureName.texture-compression-bc": 4, - "FeatureName.texture-compression-etc2": 5, - "FeatureName.timestamp-query": 3, - "FilterMode.linear": 1, - "FilterMode.nearest": 0, - "FrontFace.ccw": 0, - "FrontFace.cw": 1, - "IndexFormat.uint16": 1, - "IndexFormat.uint32": 2, - "LoadOp.clear": 1, - "LoadOp.load": 2, - "MipmapFilterMode.linear": 1, - "MipmapFilterMode.nearest": 0, - "PowerPreference.high-performance": 2, - "PowerPreference.low-power": 1, - "PrimitiveTopology.line-list": 1, - "PrimitiveTopology.line-strip": 2, - "PrimitiveTopology.point-list": 0, - "PrimitiveTopology.triangle-list": 3, - "PrimitiveTopology.triangle-strip": 4, - "QueryType.occlusion": 0, - "QueryType.timestamp": 1, - "SamplerBindingType.comparison": 3, - "SamplerBindingType.filtering": 1, - "SamplerBindingType.non-filtering": 2, - "StencilOperation.decrement-clamp": 5, - "StencilOperation.decrement-wrap": 7, - "StencilOperation.increment-clamp": 4, - "StencilOperation.increment-wrap": 6, - "StencilOperation.invert": 3, - "StencilOperation.keep": 0, - "StencilOperation.replace": 2, - "StencilOperation.zero": 1, - "StorageTextureAccess.write-only": 1, - "StoreOp.discard": 2, - "StoreOp.store": 1, - "TextureAspect.all": 0, - "TextureAspect.depth-only": 2, - "TextureAspect.stencil-only": 1, - "TextureDimension.1d": 0, - "TextureDimension.2d": 1, - "TextureDimension.3d": 2, - "TextureFormat.astc-10x10-unorm": 89, - "TextureFormat.astc-10x10-unorm-srgb": 90, - "TextureFormat.astc-10x5-unorm": 83, - "TextureFormat.astc-10x5-unorm-srgb": 84, - "TextureFormat.astc-10x6-unorm": 85, - "TextureFormat.astc-10x6-unorm-srgb": 86, - "TextureFormat.astc-10x8-unorm": 87, - "TextureFormat.astc-10x8-unorm-srgb": 88, - "TextureFormat.astc-12x10-unorm": 91, - "TextureFormat.astc-12x10-unorm-srgb": 92, - "TextureFormat.astc-12x12-unorm": 93, - "TextureFormat.astc-12x12-unorm-srgb": 94, - "TextureFormat.astc-4x4-unorm": 67, - "TextureFormat.astc-4x4-unorm-srgb": 68, - "TextureFormat.astc-5x4-unorm": 69, - "TextureFormat.astc-5x4-unorm-srgb": 70, - "TextureFormat.astc-5x5-unorm": 71, - "TextureFormat.astc-5x5-unorm-srgb": 72, - "TextureFormat.astc-6x5-unorm": 73, - "TextureFormat.astc-6x5-unorm-srgb": 74, - "TextureFormat.astc-6x6-unorm": 75, - "TextureFormat.astc-6x6-unorm-srgb": 76, - "TextureFormat.astc-8x5-unorm": 77, - "TextureFormat.astc-8x5-unorm-srgb": 78, - "TextureFormat.astc-8x6-unorm": 79, - "TextureFormat.astc-8x6-unorm-srgb": 80, - "TextureFormat.astc-8x8-unorm": 81, - "TextureFormat.astc-8x8-unorm-srgb": 82, - "TextureFormat.bc1-rgba-unorm": 43, - "TextureFormat.bc1-rgba-unorm-srgb": 44, - "TextureFormat.bc2-rgba-unorm": 45, - "TextureFormat.bc2-rgba-unorm-srgb": 46, - "TextureFormat.bc3-rgba-unorm": 47, - "TextureFormat.bc3-rgba-unorm-srgb": 48, - "TextureFormat.bc4-r-snorm": 50, - "TextureFormat.bc4-r-unorm": 49, - "TextureFormat.bc5-rg-snorm": 52, - "TextureFormat.bc5-rg-unorm": 51, - "TextureFormat.bc6h-rgb-float": 54, - "TextureFormat.bc6h-rgb-ufloat": 53, - "TextureFormat.bc7-rgba-unorm": 55, - "TextureFormat.bc7-rgba-unorm-srgb": 56, - "TextureFormat.bgra8unorm": 23, - "TextureFormat.bgra8unorm-srgb": 24, - "TextureFormat.depth16unorm": 38, - "TextureFormat.depth24plus": 39, - "TextureFormat.depth24plus-stencil8": 40, - "TextureFormat.depth32float": 41, - "TextureFormat.depth32float-stencil8": 42, - "TextureFormat.eac-r11snorm": 64, - "TextureFormat.eac-r11unorm": 63, - "TextureFormat.eac-rg11snorm": 66, - "TextureFormat.eac-rg11unorm": 65, - "TextureFormat.etc2-rgb8a1unorm": 59, - "TextureFormat.etc2-rgb8a1unorm-srgb": 60, - "TextureFormat.etc2-rgb8unorm": 57, - "TextureFormat.etc2-rgb8unorm-srgb": 58, - "TextureFormat.etc2-rgba8unorm": 61, - "TextureFormat.etc2-rgba8unorm-srgb": 62, - "TextureFormat.r16float": 7, - "TextureFormat.r16sint": 6, - "TextureFormat.r16uint": 5, - "TextureFormat.r32float": 12, - "TextureFormat.r32sint": 14, - "TextureFormat.r32uint": 13, - "TextureFormat.r8sint": 4, - "TextureFormat.r8snorm": 2, - "TextureFormat.r8uint": 3, - "TextureFormat.r8unorm": 1, - "TextureFormat.rg11b10ufloat": 26, - "TextureFormat.rg16float": 17, - "TextureFormat.rg16sint": 16, - "TextureFormat.rg16uint": 15, - "TextureFormat.rg32float": 28, - "TextureFormat.rg32sint": 30, - "TextureFormat.rg32uint": 29, - "TextureFormat.rg8sint": 11, - "TextureFormat.rg8snorm": 9, - "TextureFormat.rg8uint": 10, - "TextureFormat.rg8unorm": 8, - "TextureFormat.rgb10a2unorm": 25, - "TextureFormat.rgb9e5ufloat": 27, - "TextureFormat.rgba16float": 33, - "TextureFormat.rgba16sint": 32, - "TextureFormat.rgba16uint": 31, - "TextureFormat.rgba32float": 34, - "TextureFormat.rgba32sint": 36, - "TextureFormat.rgba32uint": 35, - "TextureFormat.rgba8sint": 22, - "TextureFormat.rgba8snorm": 20, - "TextureFormat.rgba8uint": 21, - "TextureFormat.rgba8unorm": 18, - "TextureFormat.rgba8unorm-srgb": 19, - "TextureFormat.stencil8": 37, - "TextureSampleType.depth": 3, - "TextureSampleType.float": 1, - "TextureSampleType.sint": 4, - "TextureSampleType.uint": 5, - "TextureSampleType.unfilterable-float": 2, - "TextureViewDimension.1d": 1, - "TextureViewDimension.2d": 2, - "TextureViewDimension.2d-array": 3, - "TextureViewDimension.3d": 6, - "TextureViewDimension.cube": 4, - "TextureViewDimension.cube-array": 5, - "VertexFormat.float16x2": 17, - "VertexFormat.float16x4": 18, - "VertexFormat.float32": 19, - "VertexFormat.float32x2": 20, - "VertexFormat.float32x3": 21, - "VertexFormat.float32x4": 22, - "VertexFormat.sint16x2": 11, - "VertexFormat.sint16x4": 12, - "VertexFormat.sint32": 27, - "VertexFormat.sint32x2": 28, - "VertexFormat.sint32x3": 29, - "VertexFormat.sint32x4": 30, - "VertexFormat.sint8x2": 3, - "VertexFormat.sint8x4": 4, - "VertexFormat.snorm16x2": 15, - "VertexFormat.snorm16x4": 16, - "VertexFormat.snorm8x2": 7, - "VertexFormat.snorm8x4": 8, - "VertexFormat.uint16x2": 9, - "VertexFormat.uint16x4": 10, - "VertexFormat.uint32": 23, - "VertexFormat.uint32x2": 24, - "VertexFormat.uint32x3": 25, - "VertexFormat.uint32x4": 26, - "VertexFormat.uint8x2": 1, - "VertexFormat.uint8x4": 2, - "VertexFormat.unorm16x2": 13, - "VertexFormat.unorm16x4": 14, - "VertexFormat.unorm8x2": 5, - "VertexFormat.unorm8x4": 6, - "VertexStepMode.instance": 1, - "VertexStepMode.vertex": 0, -} - -# There are 47 struct-field enum mappings - -cstructfield2enum = { - "BlendComponent.dstFactor": "BlendFactor", - "BlendComponent.operation": "BlendOperation", - "BlendComponent.srcFactor": "BlendFactor", - "BufferBindingLayout.type": "BufferBindingType", - "ColorTargetState.format": "TextureFormat", - "CompilationMessage.type": "CompilationMessageType", - "DepthStencilState.depthCompare": "CompareFunction", - "DepthStencilState.format": "TextureFormat", - "ImageCopyTexture.aspect": "TextureAspect", - "PrimitiveState.cullMode": "CullMode", - "PrimitiveState.frontFace": "FrontFace", - "PrimitiveState.stripIndexFormat": "IndexFormat", - "PrimitiveState.topology": "PrimitiveTopology", - "QuerySetDescriptor.type": "QueryType", - "RenderBundleEncoderDescriptor.depthStencilFormat": "TextureFormat", - "RenderPassColorAttachment.loadOp": "LoadOp", - "RenderPassColorAttachment.storeOp": "StoreOp", - "RenderPassDepthStencilAttachment.depthLoadOp": "LoadOp", - "RenderPassDepthStencilAttachment.depthStoreOp": "StoreOp", - "RenderPassDepthStencilAttachment.stencilLoadOp": "LoadOp", - "RenderPassDepthStencilAttachment.stencilStoreOp": "StoreOp", - "RequestAdapterOptions.powerPreference": "PowerPreference", - "SamplerBindingLayout.type": "SamplerBindingType", - "SamplerDescriptor.addressModeU": "AddressMode", - "SamplerDescriptor.addressModeV": "AddressMode", - "SamplerDescriptor.addressModeW": "AddressMode", - "SamplerDescriptor.compare": "CompareFunction", - "SamplerDescriptor.magFilter": "FilterMode", - "SamplerDescriptor.minFilter": "FilterMode", - "SamplerDescriptor.mipmapFilter": "MipmapFilterMode", - "StencilFaceState.compare": "CompareFunction", - "StencilFaceState.depthFailOp": "StencilOperation", - "StencilFaceState.failOp": "StencilOperation", - "StencilFaceState.passOp": "StencilOperation", - "StorageTextureBindingLayout.access": "StorageTextureAccess", - "StorageTextureBindingLayout.format": "TextureFormat", - "StorageTextureBindingLayout.viewDimension": "TextureViewDimension", - "SurfaceConfiguration.format": "TextureFormat", - "TextureBindingLayout.sampleType": "TextureSampleType", - "TextureBindingLayout.viewDimension": "TextureViewDimension", - "TextureDescriptor.dimension": "TextureDimension", - "TextureDescriptor.format": "TextureFormat", - "TextureViewDescriptor.aspect": "TextureAspect", - "TextureViewDescriptor.dimension": "TextureViewDimension", - "TextureViewDescriptor.format": "TextureFormat", - "VertexAttribute.format": "VertexFormat", - "VertexBufferLayout.stepMode": "VertexStepMode", -} - -enum_str2int = { - "BackendType": { - "Undefined": 0, - "Null": 1, - "WebGPU": 2, - "D3D11": 3, - "D3D12": 4, - "Metal": 5, - "Vulkan": 6, - "OpenGL": 7, - "OpenGLES": 8, - } -} -enum_int2str = { - "BackendType": { - 0: "Undefined", - 1: "Null", - 2: "WebGPU", - 3: "D3D11", - 4: "D3D12", - 5: "Metal", - 6: "Vulkan", - 7: "OpenGL", - 8: "OpenGLES", - }, - "AdapterType": { - 0: "DiscreteGPU", - 1: "IntegratedGPU", - 2: "CPU", - 3: "Unknown", - }, - "ErrorType": { - 0: "NoError", - 1: "Validation", - 2: "OutOfMemory", - 3: "Internal", - 4: "Unknown", - 5: "DeviceLost", - }, - "DeviceLostReason": { - 0: "unknown", - 1: "destroyed", - }, - "TextureFormat": { - 0: "Undefined", - 1: "r8unorm", - 2: "r8snorm", - 3: "r8uint", - 4: "r8sint", - 5: "r16uint", - 6: "r16sint", - 7: "r16float", - 8: "rg8unorm", - 9: "rg8snorm", - 10: "rg8uint", - 11: "rg8sint", - 12: "r32float", - 13: "r32uint", - 14: "r32sint", - 15: "rg16uint", - 16: "rg16sint", - 17: "rg16float", - 18: "rgba8unorm", - 19: "rgba8unorm-srgb", - 20: "rgba8snorm", - 21: "rgba8uint", - 22: "rgba8sint", - 23: "bgra8unorm", - 24: "bgra8unorm-srgb", - 25: "rgb10a2unorm", - 26: "rg11b10ufloat", - 27: "rgb9e5ufloat", - 28: "rg32float", - 29: "rg32uint", - 30: "rg32sint", - 31: "rgba16uint", - 32: "rgba16sint", - 33: "rgba16float", - 34: "rgba32float", - 35: "rgba32uint", - 36: "rgba32sint", - 37: "stencil8", - 38: "depth16unorm", - 39: "depth24plus", - 40: "depth24plus-stencil8", - 41: "depth32float", - 42: "depth32float-stencil8", - 43: "bc1-rgba-unorm", - 44: "bc1-rgba-unorm-srgb", - 45: "bc2-rgba-unorm", - 46: "bc2-rgba-unorm-srgb", - 47: "bc3-rgba-unorm", - 48: "bc3-rgba-unorm-srgb", - 49: "bc4-r-unorm", - 50: "bc4-r-snorm", - 51: "bc5-rg-unorm", - 52: "bc5-rg-snorm", - 53: "bc6h-rgb-ufloat", - 54: "bc6h-rgb-float", - 55: "bc7-rgba-unorm", - 56: "bc7-rgba-unorm-srgb", - 57: "etc2-rgb8unorm", - 58: "etc2-rgb8unorm-srgb", - 59: "etc2-rgb8a1unorm", - 60: "etc2-rgb8a1unorm-srgb", - 61: "etc2-rgba8unorm", - 62: "etc2-rgba8unorm-srgb", - 63: "eac-r11unorm", - 64: "eac-r11snorm", - 65: "eac-rg11unorm", - 66: "eac-rg11snorm", - 67: "astc-4x4-unorm", - 68: "astc-4x4-unorm-srgb", - 69: "astc-5x4-unorm", - 70: "astc-5x4-unorm-srgb", - 71: "astc-5x5-unorm", - 72: "astc-5x5-unorm-srgb", - 73: "astc-6x5-unorm", - 74: "astc-6x5-unorm-srgb", - 75: "astc-6x6-unorm", - 76: "astc-6x6-unorm-srgb", - 77: "astc-8x5-unorm", - 78: "astc-8x5-unorm-srgb", - 79: "astc-8x6-unorm", - 80: "astc-8x6-unorm-srgb", - 81: "astc-8x8-unorm", - 82: "astc-8x8-unorm-srgb", - 83: "astc-10x5-unorm", - 84: "astc-10x5-unorm-srgb", - 85: "astc-10x6-unorm", - 86: "astc-10x6-unorm-srgb", - 87: "astc-10x8-unorm", - 88: "astc-10x8-unorm-srgb", - 89: "astc-10x10-unorm", - 90: "astc-10x10-unorm-srgb", - 91: "astc-12x10-unorm", - 92: "astc-12x10-unorm-srgb", - 93: "astc-12x12-unorm", - 94: "astc-12x12-unorm-srgb", - }, - "TextureDimension": { - 0: "1d", - 1: "2d", - 2: "3d", - }, - "PresentMode": { - 0: "Fifo", - 1: "FifoRelaxed", - 2: "Immediate", - 3: "Mailbox", - }, - "CompositeAlphaMode": { - 0: "Auto", - 1: "Opaque", - 2: "Premultiplied", - 3: "Unpremultiplied", - 4: "Inherit", - }, -} diff --git a/wgpu/backends/wgpu_native/extras.py b/wgpu/backends/wgpu_native/extras.py deleted file mode 100644 index d61101e..0000000 --- a/wgpu/backends/wgpu_native/extras.py +++ /dev/null @@ -1,50 +0,0 @@ -import os - -from ._api import ffi, libf, structs, enums, Dict, logger -from ._helpers import get_wgpu_instance - - -# NOTE: these functions represent backend-specific extra API. -# NOTE: changes to this module must be reflected in docs/backends.rst. -# We don't use Sphinx automodule because this way the doc build do not -# need to be able to load wgpu-native. - - -def enumerate_adapters(): - """Return a list of all available adapters.""" - # The first call is to get the number of adapters, and the second - # call is to get the actual adapters. Note that the second arg (now - # NULL) can be a `WGPUInstanceEnumerateAdapterOptions` to filter - # by backend. - - adapter_count = libf.wgpuInstanceEnumerateAdapters( - get_wgpu_instance(), ffi.NULL, ffi.NULL - ) - - adapters = ffi.new("WGPUAdapter[]", adapter_count) - libf.wgpuInstanceEnumerateAdapters(get_wgpu_instance(), ffi.NULL, adapters) - - from . import gpu # noqa - - return [gpu._create_adapter(adapter) for adapter in adapters] - - -def request_device_tracing( - adapter, - trace_path, - *, - label="", - required_features: "list(enums.FeatureName)" = [], - required_limits: "Dict[str, int]" = {}, - default_queue: "structs.QueueDescriptor" = {}, -): - """Write a trace of all commands to a file so it can be reproduced - elsewhere. The trace is cross-platform! - """ - if not os.path.isdir(trace_path): - os.makedirs(trace_path, exist_ok=True) - elif os.listdir(trace_path): - logger.warning(f"Trace directory not empty: {trace_path}") - return adapter._request_device( - label, required_features, required_limits, default_queue, trace_path - ) diff --git a/wgpu/gui/__init__.py b/wgpu/gui/__init__.py deleted file mode 100644 index c959168..0000000 --- a/wgpu/gui/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -""" -Code to provide a canvas to render to. -""" - -from .base import WgpuCanvasInterface, WgpuCanvasBase, WgpuAutoGui # noqa: F401 -from .offscreen import WgpuOffscreenCanvasBase # noqa: F401 - -__all__ = [ - "WgpuCanvasInterface", - "WgpuCanvasBase", - "WgpuAutoGui", - "WgpuOffscreenCanvasBase", -] diff --git a/wgpu/gui/auto.py b/wgpu/gui/auto.py deleted file mode 100644 index da30440..0000000 --- a/wgpu/gui/auto.py +++ /dev/null @@ -1,106 +0,0 @@ -""" -Automatic GUI backend selection. - -Right now we only chose between GLFW, Qt and Jupyter. We might add support -for e.g. wx later. Or we might decide to stick with these three. -""" - -__all__ = ["WgpuCanvas", "run", "call_later"] - -import importlib -import os -import sys - - -def is_jupyter(): - """Determine whether the user is executing in a Jupyter Notebook / Lab.""" - try: - ip = get_ipython() - if ip.has_trait("kernel"): - return True - else: - return False - except NameError: - return False - - -def _load_backend(backend_name): - """Load a gui backend by name.""" - if backend_name == "glfw": - from . import glfw as module # noqa - elif backend_name == "qt": - from . import qt as module # noqa - elif backend_name == "jupyter": - from . import jupyter as module # noqa - elif backend_name == "wx": - from . import wx as module # noqa - elif backend_name == "offscreen": - from . import offscreen as module # noqa - else: # no-cover - raise ImportError("Unknown wgpu gui backend: '{backend_name}'") - return module - - -def _auto_load_backend(): - """Decide on the gui backend automatically.""" - - # Backends to auto load, ordered by preference. Maps libname -> backend_name - gui_backends = { - "glfw": "glfw", - "PySide6": "qt", - "PyQt6": "qt", - "PySide2": "qt", - "PyQt5": "qt", - } - - # The module that we try to find - module = None - - # Any errors we come accross as we try to import the gui backends - errors = [] - - # Prefer a backend for which the lib is already imported - imported = [libname for libname in gui_backends if libname in sys.modules] - for libname in imported: - try: - module = _load_backend(gui_backends[libname]) - break - except Exception as err: - errors.append(err) - - # If no module found yet, try importing the lib, then import the backend - if not module: - for libname in gui_backends: - try: - importlib.import_module(libname) - except ModuleNotFoundError: - continue - try: - module = _load_backend(gui_backends[libname]) - break - except Exception as err: - errors.append(err) - - # If still nothing found, raise a useful error - if not module: - msg = "\n".join(str(err) for err in errors) - msg += "\n\n Could not find either glfw or Qt framework." - msg += "\n Install glfw using e.g. ``pip install -U glfw``," - msg += "\n or install a qt framework using e.g. ``pip install -U pyside6``." - if sys.platform.startswith("linux"): - msg += "\n You may also need to run the equivalent of ``apt install libglfw3``." - raise ImportError(msg) from None - - return module - - -# Triage -if os.environ.get("WGPU_FORCE_OFFSCREEN") == "true": - module = _load_backend("offscreen") -elif is_jupyter(): - module = _load_backend("jupyter") -else: - module = _auto_load_backend() - - -WgpuCanvas, run, call_later = module.WgpuCanvas, module.run, module.call_later diff --git a/wgpu/gui/base.py b/wgpu/gui/base.py deleted file mode 100644 index 559d0c2..0000000 --- a/wgpu/gui/base.py +++ /dev/null @@ -1,417 +0,0 @@ -import os -import sys -import time -import weakref -import logging -from contextlib import contextmanager -import ctypes.util -from collections import defaultdict - -from .._coreutils import error_message_hash - -logger = logging.getLogger("wgpu") - -err_hashes = {} - - -@contextmanager -def log_exception(kind): - """Context manager to log any exceptions, but only log a one-liner - for subsequent occurances of the same error to avoid spamming by - repeating errors in e.g. a draw function or event callback. - """ - try: - yield - except Exception as err: - # Store exc info for postmortem debugging - exc_info = list(sys.exc_info()) - exc_info[2] = exc_info[2].tb_next # skip *this* function - sys.last_type, sys.last_value, sys.last_traceback = exc_info - # Show traceback, or a one-line summary - msg = str(err) - msgh = error_message_hash(msg) - if msgh not in err_hashes: - # Provide the exception, so the default logger prints a stacktrace. - # IDE's can get the exception from the root logger for PM debugging. - err_hashes[msgh] = 1 - logger.error(kind, exc_info=err) - else: - # We've seen this message before, return a one-liner instead. - err_hashes[msgh] = count = err_hashes[msgh] + 1 - msg = kind + ": " + msg.split("\n")[0].strip() - msg = msg if len(msg) <= 70 else msg[:69] + "…" - logger.error(msg + f" ({count})") - - -def weakbind(method): - """Replace a bound method with a callable object that stores the `self` using a weakref.""" - ref = weakref.ref(method.__self__) - class_func = method.__func__ - del method - - def proxy(*args, **kwargs): - self = ref() - if self is not None: - return class_func(self, *args, **kwargs) - - proxy.__name__ = class_func.__name__ - return proxy - - -class WgpuCanvasInterface: - """The minimal interface to be a valid canvas. - - Any object that implements these methods is a canvas that wgpu can work with. - The object does not even have to derive from this class. - - In most cases it's more convenient to subclass :class:`WgpuCanvasBase `. - """ - - def __init__(self, *args, **kwargs): - # The args/kwargs are there because we may be mixed with e.g. a Qt widget - super().__init__(*args, **kwargs) - self._canvas_context = None - - def get_window_id(self): - """Get the native window id. - - This is used to obtain a surface id, so that wgpu can render - to the region of the screen occupied by the canvas. - """ - raise NotImplementedError() - - def get_display_id(self): - """Get the native display id (Linux only). - - On Linux this is needed in addition to the window id to obtain - a surface id. The default implementation calls into the X11 lib - to get the display id. - """ - # Re-use to avoid creating loads of id's - if getattr(self, "_display_id", None) is not None: - return self._display_id - - if sys.platform.startswith("linux"): - is_wayland = "wayland" in os.getenv("XDG_SESSION_TYPE", "").lower() - if is_wayland: - raise NotImplementedError( - f"Cannot (yet) get display id on {self.__class__.__name__}." - ) - else: - x11 = ctypes.CDLL(ctypes.util.find_library("X11")) - x11.XOpenDisplay.restype = ctypes.c_void_p - self._display_id = x11.XOpenDisplay(None) - else: - raise RuntimeError(f"Cannot get display id on {sys.platform}.") - - return self._display_id - - def get_physical_size(self): - """Get the physical size of the canvas in integer pixels.""" - raise NotImplementedError() - - def get_context(self, kind="webgpu"): - """Get the ``GPUCanvasContext`` object corresponding to this canvas. - - The context is used to obtain a texture to render to, and to - present that texture to the canvas. This class provides a - default implementation to get the appropriate context. - - The ``kind`` argument is a remnant from the WebGPU spec and - must always be "webgpu". - """ - # Note that this function is analog to HtmlCanvas.getContext(), except - # here the only valid arg is 'webgpu', which is also made the default. - assert kind == "webgpu" - if self._canvas_context is None: - # Get the active wgpu backend module - backend_module = sys.modules["wgpu"].gpu.__module__ - # Instantiate the context - PC = sys.modules[backend_module].GPUCanvasContext # noqa: N806 - self._canvas_context = PC(self) - return self._canvas_context - - -class WgpuCanvasBase(WgpuCanvasInterface): - """A convenient base canvas class. - - This class provides a uniform API and implements common - functionality, to increase consistency and reduce code duplication. - It is convenient (but not strictly necessary) for canvas classes - to inherit from this class (but all builtin canvases do). - - This class provides an API for scheduling draws (``request_draw()``) - and implements a mechanism to call the provided draw function - (``draw_frame()``) and then present the result to the canvas. - - This class also implements draw rate limiting, which can be set - with the ``max_fps`` attribute (default 30). For benchmarks you may - also want to set ``vsync`` to False. - """ - - def __init__(self, *args, max_fps=30, vsync=True, **kwargs): - super().__init__(*args, **kwargs) - self._last_draw_time = 0 - self._max_fps = float(max_fps) - self._vsync = bool(vsync) - - def __del__(self): - # On delete, we call the custom close method. - try: - self.close() - except Exception: - pass - # Since this is sometimes used in a multiple inheritance, the - # superclass may (or may not) have a __del__ method. - try: - super().__del__() - except Exception: - pass - - def draw_frame(self): - """The function that gets called at each draw. - - You can implement this method in a subclass, or set it via a - call to request_draw(). - """ - pass - - def request_draw(self, draw_function=None): - """Schedule a new draw event. - - This function does not perform a draw directly, but schedules - a draw event at a suitable moment in time. In the draw event - the draw function is called, and the resulting rendered image - is presented to screen. - - Arguments: - draw_function (callable or None): The function to set as the new draw - function. If not given or None, the last set draw function is used. - - """ - if draw_function is not None: - self.draw_frame = draw_function - self._request_draw() - - def _draw_frame_and_present(self): - """Draw the frame and present the result. - - Errors are logged to the "wgpu" logger. Should be called by the - subclass at an appropriate time. - """ - self._last_draw_time = time.perf_counter() - # Perform the user-defined drawing code. When this errors, - # we should report the error and then continue, otherwise we crash. - # Returns the result of the context's present() call or None. - with log_exception("Draw error"): - self.draw_frame() - with log_exception("Present error"): - if self._canvas_context: - return self._canvas_context.present() - - def _get_draw_wait_time(self): - """Get time (in seconds) to wait until the next draw in order to honour max_fps.""" - now = time.perf_counter() - target_time = self._last_draw_time + 1.0 / self._max_fps - return max(0, target_time - now) - - # Methods that must be overloaded - - def get_pixel_ratio(self): - """Get the float ratio between logical and physical pixels.""" - raise NotImplementedError() - - def get_logical_size(self): - """Get the logical size in float pixels.""" - raise NotImplementedError() - - def get_physical_size(self): - """Get the physical size in integer pixels.""" - raise NotImplementedError() - - def set_logical_size(self, width, height): - """Set the window size (in logical pixels).""" - raise NotImplementedError() - - def close(self): - """Close the window.""" - pass - - def is_closed(self): - """Get whether the window is closed.""" - raise NotImplementedError() - - def _request_draw(self): - """GUI-specific implementation for ``request_draw()``. - - * This should invoke a new draw at a later time. - * The call itself should return directly. - * Multiple calls should result in a single new draw. - * Preferably the ``max_fps`` and ``vsync`` are honored. - """ - raise NotImplementedError() - - -class WgpuAutoGui: - """Mixin class for canvases implementing autogui. - - This class provides a common API for handling events and registering - event handlers. It adds to :class:`WgpuCanvasBase ` - that interactive examples and applications can be written in a - generic way (no-GUI specific code). - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._last_event_time = 0 - self._pending_events = {} - self._event_handlers = defaultdict(set) - - def _get_event_wait_time(self): - """Calculate the time to wait for the next event dispatching. - - Used for rate-limited events. - """ - rate = 75 # events per second - now = time.perf_counter() - target_time = self._last_event_time + 1.0 / rate - return max(0, target_time - now) - - def _handle_event_rate_limited( - self, event, call_later_func, match_keys, accum_keys - ): - """Alternative `to handle_event()` for events that must be rate-limted. - - If any of the ``match_keys`` keys of the new event differ from the currently - pending event, the old event is dispatched now. The ``accum_keys`` keys of - the current and new event are added together (e.g. to accumulate wheel delta). - - The (accumulated) event is handled in the following cases: - * When the timer runs out. - * When a non-rate-limited event is dispatched. - * When a rate-limited event of the same type is scheduled - that has different match_keys (e.g. modifiers changes). - - Subclasses that use this method must use ``_handle_event_and_flush()`` - where they would otherwise call ``handle_event()``, to preserve event order. - """ - event_type = event["event_type"] - event.setdefault("time_stamp", time.perf_counter()) - # We may need to emit the old event. Otherwise, we need to update the new one. - old = self._pending_events.get(event_type, None) - if old: - if any(event[key] != old[key] for key in match_keys): - self.handle_event(old) - else: - for key in accum_keys: - event[key] = old[key] + event[key] - # Make sure that we have scheduled a moment to handle events - if not self._pending_events: - call_later_func(self._get_event_wait_time(), self._handle_pending_events) - # Store the event object - self._pending_events[event_type] = event - - def _handle_event_and_flush(self, event): - """Call handle_event after flushing any pending (rate-limited) events.""" - event.setdefault("time_stamp", time.perf_counter()) - self._handle_pending_events() - self.handle_event(event) - - def _handle_pending_events(self): - """Handle any pending rate-limited events.""" - if self._pending_events: - events = self._pending_events.values() - self._last_event_time = time.perf_counter() - self._pending_events = {} - for ev in events: - self.handle_event(ev) - - def handle_event(self, event): - """Handle an incoming event. - - Subclasses can overload this method. Events include widget - resize, mouse/touch interaction, key events, and more. An event - is a dict with at least the key event_type. For details, see - https://jupyter-rfb.readthedocs.io/en/stable/events.html - - The default implementation dispatches the event to the - registered event handlers. - - Arguments: - event (dict): the event to handle. - """ - # Collect callbacks - event_type = event.get("event_type") - callbacks = self._event_handlers[event_type] | self._event_handlers["*"] - # Dispatch - for callback in callbacks: - with log_exception(f"Error during handling {event['event_type']} event"): - callback(event) - - def add_event_handler(self, *args): - """Register an event handler to receive events. - - Arguments: - callback (callable): The event handler. Must accept a single event argument. - *types (list of strings): A list of event types. - - For the available events, see - https://jupyter-rfb.readthedocs.io/en/stable/events.html. - - The callback is stored, so it can be a lambda or closure. This also - means that if a method is given, a reference to the object is held, - which may cause circular references or prevent the Python GC from - destroying that object. - - Example: - - .. code-block:: py - - def my_handler(event): - print(event) - - canvas.add_event_handler(my_handler, "pointer_up", "pointer_down") - - Can also be used as a decorator: - - .. code-block:: py - - @canvas.add_event_handler("pointer_up", "pointer_down") - def my_handler(event): - print(event) - - Catch 'm all: - - .. code-block:: py - - canvas.add_event_handler(my_handler, "*") - - """ - decorating = not callable(args[0]) - callback = None if decorating else args[0] - types = args if decorating else args[1:] - - if not types: - raise TypeError("No event types are given to add_event_handler.") - for type in types: - if not isinstance(type, str): - raise TypeError(f"Event types must be str, but got {type}") - - def decorator(_callback): - for type in types: - self._event_handlers[type].add(_callback) - return _callback - - if decorating: - return decorator - return decorator(callback) - - def remove_event_handler(self, callback, *types): - """Unregister an event handler. - - Arguments: - callback (callable): The event handler. - *types (list of strings): A list of event types. - """ - for type in types: - self._event_handlers[type].remove(callback) diff --git a/wgpu/gui/glfw.py b/wgpu/gui/glfw.py deleted file mode 100644 index 0679668..0000000 --- a/wgpu/gui/glfw.py +++ /dev/null @@ -1,553 +0,0 @@ -""" -Support to render in a glfw window. The advantage of glfw is that it's -very lightweight. - -Install pyGLFW using ``pip install glfw``. On Windows this is enough. -On Linux, install the glfw lib using ``sudo apt install libglfw3``, -or ``sudo apt install libglfw3-wayland`` when using Wayland. -""" - - -import os -import sys -import time -import weakref -import asyncio - -import glfw - -from .base import WgpuCanvasBase, WgpuAutoGui, weakbind - - -# Make sure that glfw is new enough -glfw_version_info = tuple(int(i) for i in glfw.__version__.split(".")[:2]) -if glfw_version_info < (1, 9): - raise ImportError("wgpu-py requires glfw 1.9 or higher.") - -# Do checks to prevent pitfalls on hybrid Xorg/Wayland systems -is_wayland = False -if sys.platform.startswith("linux"): - is_wayland = "wayland" in os.getenv("XDG_SESSION_TYPE", "").lower() - if is_wayland and not hasattr(glfw, "get_wayland_window"): - raise RuntimeError( - "We're on Wayland but Wayland functions not available. " - + "Did you apt install libglfw3-wayland?" - ) - -# Some glfw functions are not always available -set_window_content_scale_callback = lambda *args: None # noqa: E731 -set_window_maximize_callback = lambda *args: None # noqa: E731 -get_window_content_scale = lambda *args: (1, 1) # noqa: E731 - -if hasattr(glfw, "set_window_content_scale_callback"): - set_window_content_scale_callback = glfw.set_window_content_scale_callback -if hasattr(glfw, "set_window_maximize_callback"): - set_window_maximize_callback = glfw.set_window_maximize_callback -if hasattr(glfw, "get_window_content_scale"): - get_window_content_scale = glfw.get_window_content_scale - - -# Map keys to JS key definitions -# https://www.glfw.org/docs/3.3/group__keys.html -# https://developer.mozilla.org/en-US/docs/Web/API/KeyboardEvent/key/Key_Values -KEY_MAP = { - glfw.KEY_DOWN: "ArrowDown", - glfw.KEY_UP: "ArrowUp", - glfw.KEY_LEFT: "ArrowLeft", - glfw.KEY_RIGHT: "ArrowRight", - glfw.KEY_BACKSPACE: "Backspace", - glfw.KEY_CAPS_LOCK: "CapsLock", - glfw.KEY_DELETE: "Delete", - glfw.KEY_END: "End", - glfw.KEY_ENTER: "Enter", # aka return - glfw.KEY_ESCAPE: "Escape", - glfw.KEY_F1: "F1", - glfw.KEY_F2: "F2", - glfw.KEY_F3: "F3", - glfw.KEY_F4: "F4", - glfw.KEY_F5: "F5", - glfw.KEY_F6: "F6", - glfw.KEY_F7: "F7", - glfw.KEY_F8: "F8", - glfw.KEY_F9: "F9", - glfw.KEY_F10: "F10", - glfw.KEY_F11: "F11", - glfw.KEY_F12: "F12", - glfw.KEY_HOME: "Home", - glfw.KEY_INSERT: "Insert", - glfw.KEY_LEFT_ALT: "Alt", - glfw.KEY_LEFT_CONTROL: "Control", - glfw.KEY_LEFT_SHIFT: "Shift", - glfw.KEY_LEFT_SUPER: "Meta", # in glfw super means Windows or MacOS-command - glfw.KEY_NUM_LOCK: "NumLock", - glfw.KEY_PAGE_DOWN: "PageDown", - glfw.KEY_PAGE_UP: "Pageup", - glfw.KEY_PAUSE: "Pause", - glfw.KEY_PRINT_SCREEN: "PrintScreen", - glfw.KEY_RIGHT_ALT: "Alt", - glfw.KEY_RIGHT_CONTROL: "Control", - glfw.KEY_RIGHT_SHIFT: "Shift", - glfw.KEY_RIGHT_SUPER: "Meta", - glfw.KEY_SCROLL_LOCK: "ScrollLock", - glfw.KEY_TAB: "Tab", -} - -KEY_MAP_MOD = { - glfw.KEY_LEFT_SHIFT: "Shift", - glfw.KEY_RIGHT_SHIFT: "Shift", - glfw.KEY_LEFT_CONTROL: "Control", - glfw.KEY_RIGHT_CONTROL: "Control", - glfw.KEY_LEFT_ALT: "Alt", - glfw.KEY_RIGHT_ALT: "Alt", - glfw.KEY_LEFT_SUPER: "Meta", - glfw.KEY_RIGHT_SUPER: "Meta", -} - - -class GlfwWgpuCanvas(WgpuAutoGui, WgpuCanvasBase): - """A glfw window providing a wgpu canvas.""" - - # See https://www.glfw.org/docs/latest/group__window.html - - def __init__(self, *, size=None, title=None, **kwargs): - ensure_app() - super().__init__(**kwargs) - - # Handle inputs - if not size: - size = 640, 480 - title = str(title or "glfw wgpu canvas") - - # Set window hints - glfw.window_hint(glfw.CLIENT_API, glfw.NO_API) - glfw.window_hint(glfw.RESIZABLE, True) - # see https://github.com/FlorianRhiem/pyGLFW/issues/42 - # Alternatively, from pyGLFW 1.10 one can set glfw.ERROR_REPORTING='warn' - if sys.platform.startswith("linux"): - if is_wayland: - glfw.window_hint(glfw.FOCUSED, False) # prevent Wayland focus error - - # Create the window (the initial size may not be in logical pixels) - self._window = glfw.create_window(int(size[0]), int(size[1]), title, None, None) - - # Other internal variables - self._need_draw = False - self._request_draw_timer_running = False - self._changing_pixel_ratio = False - self._is_minimized = False - - # Register ourselves - all_glfw_canvases.add(self) - - # Register callbacks. We may get notified too often, but that's - # ok, they'll result in a single draw. - glfw.set_framebuffer_size_callback(self._window, weakbind(self._on_size_change)) - glfw.set_window_close_callback(self._window, weakbind(self._check_close)) - glfw.set_window_refresh_callback(self._window, weakbind(self._on_window_dirty)) - glfw.set_window_focus_callback(self._window, weakbind(self._on_window_dirty)) - set_window_content_scale_callback( - self._window, weakbind(self._on_pixelratio_change) - ) - set_window_maximize_callback(self._window, weakbind(self._on_window_dirty)) - glfw.set_window_iconify_callback(self._window, weakbind(self._on_iconify)) - - # User input - self._key_modifiers = [] - self._pointer_buttons = [] - self._pointer_pos = 0, 0 - self._double_click_state = {"clicks": 0} - glfw.set_mouse_button_callback(self._window, weakbind(self._on_mouse_button)) - glfw.set_cursor_pos_callback(self._window, weakbind(self._on_cursor_pos)) - glfw.set_scroll_callback(self._window, weakbind(self._on_scroll)) - glfw.set_key_callback(self._window, weakbind(self._on_key)) - - # Initialize the size - self._pixel_ratio = -1 - self._screen_size_is_logical = False - self.set_logical_size(*size) - self._request_draw() - - # Callbacks to provide a minimal working canvas for wgpu - - def _on_pixelratio_change(self, *args): - if self._changing_pixel_ratio: - return - self._changing_pixel_ratio = True # prevent recursion (on Wayland) - try: - self._set_logical_size(self._logical_size) - finally: - self._changing_pixel_ratio = False - self._request_draw() - - def _on_size_change(self, *args): - self._determine_size() - self._request_draw() - - def _check_close(self, *args): - # Follow the close flow that glfw intended. - # This method can be overloaded and the close-flag can be set to False - # using set_window_should_close() if now is not a good time to close. - if self._window is not None and glfw.window_should_close(self._window): - self._on_close() - - def _on_close(self, *args): - all_glfw_canvases.discard(self) - if self._window is not None: - glfw.destroy_window(self._window) # not just glfw.hide_window - self._window = None - self._handle_event_and_flush({"event_type": "close"}) - - def _on_window_dirty(self, *args): - self._request_draw() - - def _on_iconify(self, window, iconified): - self._is_minimized = bool(iconified) - - # helpers - - def _mark_ready_for_draw(self): - self._request_draw_timer_running = False - self._need_draw = True # The event loop looks at this flag - glfw.post_empty_event() # Awake the event loop, if it's in wait-mode - - def _determine_size(self): - if self._window is None: - return - # Because the value of get_window_size is in physical-pixels - # on some systems and in logical-pixels on other, we use the - # framebuffer size and pixel ratio to derive the logical size. - pixel_ratio = get_window_content_scale(self._window)[0] - psize = glfw.get_framebuffer_size(self._window) - psize = int(psize[0]), int(psize[1]) - - self._pixel_ratio = pixel_ratio - self._physical_size = psize - self._logical_size = psize[0] / pixel_ratio, psize[1] / pixel_ratio - - ev = { - "event_type": "resize", - "width": self._logical_size[0], - "height": self._logical_size[1], - "pixel_ratio": self._pixel_ratio, - } - self._handle_event_and_flush(ev) - - def _set_logical_size(self, new_logical_size): - if self._window is None: - return - # There is unclarity about the window size in "screen pixels". - # It appears that on Windows and X11 its the same as the - # framebuffer size, and on macOS it's logical pixels. - # See https://github.com/glfw/glfw/issues/845 - # Here, we simply do a quick test so we can compensate. - - # The current screen size and physical size, and its ratio - pixel_ratio = get_window_content_scale(self._window)[0] - ssize = glfw.get_window_size(self._window) - psize = glfw.get_framebuffer_size(self._window) - - # Apply - if is_wayland: - # Not sure why, but on Wayland things work differently - screen_ratio = ssize[0] / new_logical_size[0] - glfw.set_window_size( - self._window, - int(new_logical_size[0] / screen_ratio), - int(new_logical_size[1] / screen_ratio), - ) - else: - screen_ratio = ssize[0] / psize[0] - glfw.set_window_size( - self._window, - int(new_logical_size[0] * pixel_ratio * screen_ratio), - int(new_logical_size[1] * pixel_ratio * screen_ratio), - ) - self._screen_size_is_logical = screen_ratio != 1 - # If this causes the widget size to change, then _on_size_change will - # be called, but we may want force redetermining the size. - if pixel_ratio != self._pixel_ratio: - self._determine_size() - - # API - - def get_window_id(self): - if sys.platform.startswith("win"): - return int(glfw.get_win32_window(self._window)) - elif sys.platform.startswith("darwin"): - return int(glfw.get_cocoa_window(self._window)) - elif sys.platform.startswith("linux"): - if is_wayland: - return int(glfw.get_wayland_window(self._window)) - else: - return int(glfw.get_x11_window(self._window)) - else: - raise RuntimeError(f"Cannot get GLFW window id on {sys.platform}.") - - def get_display_id(self): - if sys.platform.startswith("linux"): - if is_wayland: - return glfw.get_wayland_display() - else: - return glfw.get_x11_display() - else: - raise RuntimeError(f"Cannot get GLFW display id on {sys.platform}.") - - def get_pixel_ratio(self): - return self._pixel_ratio - - def get_logical_size(self): - return self._logical_size - - def get_physical_size(self): - return self._physical_size - - def set_logical_size(self, width, height): - if width < 0 or height < 0: - raise ValueError("Window width and height must not be negative") - self._set_logical_size((float(width), float(height))) - - def _request_draw(self): - if not self._request_draw_timer_running: - self._request_draw_timer_running = True - call_later(self._get_draw_wait_time(), self._mark_ready_for_draw) - - def close(self): - if self._window is not None: - glfw.set_window_should_close(self._window, True) - self._check_close() - - def is_closed(self): - return self._window is None - - # User events - - def _on_mouse_button(self, window, but, action, mods): - # Map button being changed, which we use to update self._pointer_buttons. - button_map = { - glfw.MOUSE_BUTTON_1: 1, # == MOUSE_BUTTON_LEFT - glfw.MOUSE_BUTTON_2: 2, # == MOUSE_BUTTON_RIGHT - glfw.MOUSE_BUTTON_3: 3, # == MOUSE_BUTTON_MIDDLE - glfw.MOUSE_BUTTON_4: 4, - glfw.MOUSE_BUTTON_5: 5, - glfw.MOUSE_BUTTON_6: 6, - glfw.MOUSE_BUTTON_7: 7, - glfw.MOUSE_BUTTON_8: 8, - } - button = button_map.get(but, 0) - - if action == glfw.PRESS: - event_type = "pointer_down" - buttons = set(self._pointer_buttons) - buttons.add(button) - self._pointer_buttons = list(sorted(buttons)) - elif action == glfw.RELEASE: - event_type = "pointer_up" - buttons = set(self._pointer_buttons) - buttons.discard(button) - self._pointer_buttons = list(sorted(buttons)) - else: - return - - ev = { - "event_type": event_type, - "x": self._pointer_pos[0], - "y": self._pointer_pos[1], - "button": button, - "buttons": list(self._pointer_buttons), - "modifiers": list(self._key_modifiers), - "ntouches": 0, # glfw dows not have touch support - "touches": {}, - } - - # Emit the current event - self._handle_event_and_flush(ev) - - # Maybe emit a double-click event - self._follow_double_click(action, button) - - def _follow_double_click(self, action, button): - # If a sequence of down-up-down-up is made in nearly the same - # spot, and within a short time, we emit the double-click event. - - x, y = self._pointer_pos[0], self._pointer_pos[1] - state = self._double_click_state - - timeout = 0.25 - distance = 5 - - # Clear the state if it does no longer match - if state["clicks"] > 0: - d = ((x - state["x"]) ** 2 + (y - state["y"]) ** 2) ** 0.5 - if ( - d > distance - or time.perf_counter() - state["time"] > timeout - or button != state["button"] - ): - self._double_click_state = {"clicks": 0} - - clicks = self._double_click_state["clicks"] - - # Check and update order. Emit event if we make it to the final step - if clicks == 0 and action == glfw.PRESS: - self._double_click_state = { - "clicks": 1, - "button": button, - "time": time.perf_counter(), - "x": x, - "y": y, - } - elif clicks == 1 and action == glfw.RELEASE: - self._double_click_state["clicks"] = 2 - elif clicks == 2 and action == glfw.PRESS: - self._double_click_state["clicks"] = 3 - elif clicks == 3 and action == glfw.RELEASE: - self._double_click_state = {"clicks": 0} - ev = { - "event_type": "double_click", - "x": self._pointer_pos[0], - "y": self._pointer_pos[1], - "button": button, - "buttons": list(self._pointer_buttons), - "modifiers": list(self._key_modifiers), - "ntouches": 0, # glfw dows not have touch support - "touches": {}, - } - self._handle_event_and_flush(ev) - - def _on_cursor_pos(self, window, x, y): - # Store pointer position in logical coordinates - if self._screen_size_is_logical: - self._pointer_pos = x, y - else: - self._pointer_pos = x / self._pixel_ratio, y / self._pixel_ratio - - ev = { - "event_type": "pointer_move", - "x": self._pointer_pos[0], - "y": self._pointer_pos[1], - "button": 0, - "buttons": list(self._pointer_buttons), - "modifiers": list(self._key_modifiers), - "ntouches": 0, # glfw dows not have touch support - "touches": {}, - } - - match_keys = {"buttons", "modifiers", "ntouches"} - accum_keys = {} - self._handle_event_rate_limited(ev, call_later, match_keys, accum_keys) - - def _on_scroll(self, window, dx, dy): - # wheel is 1 or -1 in glfw, in jupyter_rfb this is ~100 - ev = { - "event_type": "wheel", - "dx": 100.0 * dx, - "dy": -100.0 * dy, - "x": self._pointer_pos[0], - "y": self._pointer_pos[1], - "buttons": list(self._pointer_buttons), - "modifiers": list(self._key_modifiers), - } - match_keys = {"modifiers"} - accum_keys = {"dx", "dy"} - self._handle_event_rate_limited(ev, call_later, match_keys, accum_keys) - - def _on_key(self, window, key, scancode, action, mods): - modifier = KEY_MAP_MOD.get(key, None) - - if action == glfw.PRESS: - event_type = "key_down" - if modifier: - modifiers = set(self._key_modifiers) - modifiers.add(modifier) - self._key_modifiers = list(sorted(modifiers)) - elif action == glfw.RELEASE: - event_type = "key_up" - if modifier: - modifiers = set(self._key_modifiers) - modifiers.discard(modifier) - self._key_modifiers = list(sorted(modifiers)) - else: # glfw.REPEAT - return - - # Note that if the user holds shift while pressing "5", will result in "5", - # and not in the "%" that you'd expect on a US keyboard. Glfw wants us to - # use set_char_callback for text input, but then we'd only get an event for - # key presses (down followed by up). So we accept that GLFW is less complete - # in this respect. - if key in KEY_MAP: - keyname = KEY_MAP[key] - else: - try: - keyname = chr(key) - except ValueError: - return # Probably a special key that we don't have in our KEY_MAP - if "Shift" not in self._key_modifiers: - keyname = keyname.lower() - - ev = { - "event_type": event_type, - "key": keyname, - "modifiers": list(self._key_modifiers), - } - self._handle_event_and_flush(ev) - - -# Make available under a name that is the same for all gui backends -WgpuCanvas = GlfwWgpuCanvas - - -all_glfw_canvases = weakref.WeakSet() -glfw._pygfx_mainloop = None -glfw._pygfx_stop_if_no_more_canvases = False - - -def update_glfw_canvasses(): - """Call this in your glfw event loop to draw each canvas that needs - an update. Returns the number of visible canvases. - """ - # Note that _draw_frame_and_present already catches errors, it can - # only raise errors if the logging system fails. - canvases = tuple(all_glfw_canvases) - for canvas in canvases: - if canvas._need_draw and not canvas._is_minimized: - canvas._need_draw = False - canvas._draw_frame_and_present() - return len(canvases) - - -async def mainloop(): - loop = asyncio.get_event_loop() - while True: - n = update_glfw_canvasses() - if glfw._pygfx_stop_if_no_more_canvases and not n: - break - await asyncio.sleep(0.001) - glfw.poll_events() - loop.stop() - glfw.terminate() - - -def ensure_app(): - # It is safe to call init multiple times: - # "Additional calls to this function after successful initialization - # but before termination will return GLFW_TRUE immediately." - glfw.init() - if glfw._pygfx_mainloop is None: - loop = asyncio.get_event_loop() - glfw._pygfx_mainloop = mainloop() - loop.create_task(glfw._pygfx_mainloop) - - -def call_later(delay, callback, *args): - loop = asyncio.get_event_loop() - loop.call_later(delay, callback, *args) - - -def run(): - ensure_app() - loop = asyncio.get_event_loop() - if not loop.is_running(): - glfw._pygfx_stop_if_no_more_canvases = True - loop.run_forever() - else: - pass # Probably an interactive session diff --git a/wgpu/gui/jupyter.py b/wgpu/gui/jupyter.py deleted file mode 100644 index 905146f..0000000 --- a/wgpu/gui/jupyter.py +++ /dev/null @@ -1,137 +0,0 @@ -""" -Support for rendering in a Jupyter widget. Provides a widget subclass that -can be used as cell output, or embedded in a ipywidgets gui. -""" - -from collections import defaultdict -import weakref -import asyncio - -from .offscreen import WgpuOffscreenCanvasBase -from .base import WgpuAutoGui - -import numpy as np -from jupyter_rfb import RemoteFrameBuffer -from IPython.display import display - - -pending_jupyter_canvases = [] - - -class JupyterWgpuCanvas(WgpuAutoGui, WgpuOffscreenCanvasBase, RemoteFrameBuffer): - """An ipywidgets widget providing a wgpu canvas. Needs the jupyter_rfb library.""" - - def __init__(self, *, size=None, title=None, **kwargs): - super().__init__(**kwargs) - - # Internal variables - self._pixel_ratio = 1 - self._logical_size = 0, 0 - self._is_closed = False - self._request_draw_timer_running = False - self._event_handlers = defaultdict(set) - - # Register so this can be display'ed when run() is called - pending_jupyter_canvases.append(weakref.ref(self)) - - # Initialize size - if size is not None: - self.set_logical_size(*size) - - # Implementation needed for RemoteFrameBuffer - - def handle_event(self, event): - event_type = event.get("event_type") - if event_type == "close": - self._is_closed = True - elif event_type == "resize": - self._pixel_ratio = event["pixel_ratio"] - self._logical_size = event["width"], event["height"] - - # No need to rate-limit the pointer_move and wheel events; - # they're already rate limited by jupyter_rfb in the client. - super().handle_event(event) - - def get_frame(self): - self._request_draw_timer_running = False - # The _draw_frame_and_present() does the drawing and then calls - # present_context.present(), which calls our present() method. - # The resuls is either a numpy array or None, and this matches - # with what this method is expected to return. - return self._draw_frame_and_present() - - # Implementation needed for WgpuCanvasBase - - def get_pixel_ratio(self): - return self._pixel_ratio - - def get_logical_size(self): - return self._logical_size - - def get_physical_size(self): - return int(self._logical_size[0] * self._pixel_ratio), int( - self._logical_size[1] * self._pixel_ratio - ) - - def set_logical_size(self, width, height): - self.css_width = f"{width}px" - self.css_height = f"{height}px" - - def close(self): - RemoteFrameBuffer.close(self) - - def is_closed(self): - return self._is_closed - - def _request_draw(self): - if not self._request_draw_timer_running: - self._request_draw_timer_running = True - call_later(self._get_draw_wait_time(), RemoteFrameBuffer.request_draw, self) - - # Implementation needed for WgpuOffscreenCanvasBase - - def present(self, texture): - # This gets called at the end of a draw pass via offscreen.GPUCanvasContext - device = texture._device - size = texture.size - bytes_per_pixel = 4 - data = device.queue.read_texture( - { - "texture": texture, - "mip_level": 0, - "origin": (0, 0, 0), - }, - { - "offset": 0, - "bytes_per_row": bytes_per_pixel * size[0], - "rows_per_image": size[1], - }, - size, - ) - return np.frombuffer(data, np.uint8).reshape(size[1], size[0], 4) - - def get_preferred_format(self): - # Use a format that maps well to PNG: rgba8norm. Use srgb for - # perseptive color mapping. This is the common colorspace for - # e.g. png and jpg images. Most tools (browsers included) will - # blit the png to screen as-is, and a screen wants colors in srgb. - return "rgba8unorm-srgb" - - -# Make available under a name that is the same for all gui backends -WgpuCanvas = JupyterWgpuCanvas - - -def call_later(delay, callback, *args): - loop = asyncio.get_event_loop() - loop.call_later(delay, callback, *args) - - -def run(): - # Show all widgets that have been created so far. - # No need to actually start an event loop, since Jupyter already runs it. - canvases = [r() for r in pending_jupyter_canvases] - pending_jupyter_canvases.clear() - for w in canvases: - if w and not w.is_closed(): - display(w) diff --git a/wgpu/gui/offscreen.py b/wgpu/gui/offscreen.py deleted file mode 100644 index 7ea5b06..0000000 --- a/wgpu/gui/offscreen.py +++ /dev/null @@ -1,244 +0,0 @@ -import time - -from .. import classes, flags -from .base import WgpuCanvasBase, WgpuAutoGui - - -class GPUCanvasContext(classes.GPUCanvasContext): - """GPUCanvasContext subclass for rendering to an offscreen texture.""" - - # In this context implementation, we keep a ref to the texture, to keep - # it alive until at least until present() is called, and to be able to - # pass it to the canvas' present() method. Thereafter, the texture - # reference is removed. If there are no more references to it, it will - # be cleaned up. But if the offscreen canvas uses it for something, - # it'll simply stay alive longer. - - def __init__(self, canvas): - super().__init__(canvas) - self._config = None - self._texture = None - - def configure( - self, - *, - device, - format, - usage=flags.TextureUsage.RENDER_ATTACHMENT | flags.TextureUsage.COPY_SRC, - view_formats=[], - color_space="srgb", - alpha_mode="opaque" - ): - if format is None: - format = self.get_preferred_format(device.adapter) - self._config = { - "device": device, - "format": format, - "usage": usage, - "width": 0, - "height": 0, - # "view_formats": xx, - # "color_space": xx, - # "alpha_mode": xx, - } - - def unconfigure(self): - self._texture = None - self._config = None - - def get_current_texture(self): - if not self._config: - raise RuntimeError( - "Canvas context must be configured before calling get_current_texture()." - ) - - width, height = self._get_canvas().get_physical_size() - width, height = max(width, 1), max(height, 1) - - self._texture = self._config["device"].create_texture( - label="presentation-context", - size=(width, height, 1), - format=self._config["format"], - usage=self._config["usage"], - ) - return self._texture - - def present(self): - if not self._texture: - msg = "present() is called without a preceeding call to " - msg += "get_current_texture(). Note that present() is usually " - msg += "called automatically after the draw function returns." - raise RuntimeError(msg) - else: - texture = self._texture - self._texture = None - return self._get_canvas().present(texture) - - def get_preferred_format(self, adapter): - canvas = self._get_canvas() - if canvas: - return canvas.get_preferred_format() - else: - return "rgba8unorm-srgb" - - -class WgpuOffscreenCanvasBase(WgpuCanvasBase): - """Base class for off-screen canvases. - - It provides a custom context that renders to a texture instead of - a surface/screen. On each draw the resulting image is passes as a - texture to the ``present()`` method. Subclasses should (at least) - implement ``present()`` - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def get_window_id(self): - """This canvas does not correspond to an on-screen window.""" - return None - - def get_context(self, kind="webgpu"): - """Get the GPUCanvasContext object to obtain a texture to render to.""" - # Normally this creates a GPUCanvasContext object provided by - # the backend (e.g. wgpu-native), but here we use our own context. - assert kind == "webgpu" - if self._canvas_context is None: - self._canvas_context = GPUCanvasContext(self) - return self._canvas_context - - def present(self, texture): - """Method that gets called at the end of each draw event. - - The rendered image is represented by the texture argument. - Subclasses should overload this method and use the texture to - process the rendered image. - - The texture is a new object at each draw, but is not explicitly - destroyed, so it can be used e.g. as a texture binding (subject - to set TextureUsage). - """ - # Notes: Creating a new texture object for each draw is - # consistent with how real canvas contexts work, plus it avoids - # confusion of re-using the same texture except when the canvas - # changes size. For use-cases where you do want to render to the - # same texture one does not need the canvas API. E.g. in pygfx - # the renderer can also work with a target that is a (fixed - # size) texture. - pass - - def get_preferred_format(self): - """Get the preferred format for this canvas. - - This method can be overloaded to control the used texture - format. The default is "rgba8unorm-srgb". - """ - # Use rgba because that order is more common for processing and storage. - # Use srgb because that's what how colors are usually expected to be. - # Use 8unorm because 8bit is enough (when using srgb). - return "rgba8unorm-srgb" - - -class WgpuManualOffscreenCanvas(WgpuAutoGui, WgpuOffscreenCanvasBase): - """An offscreen canvas intended for manual use. - - Call the ``.draw()`` method to perform a draw and get the result. - """ - - def __init__(self, *args, size=None, pixel_ratio=1, title=None, **kwargs): - super().__init__(*args, **kwargs) - self._logical_size = (float(size[0]), float(size[1])) if size else (640, 480) - self._pixel_ratio = pixel_ratio - self._title = title - self._closed = False - - def get_pixel_ratio(self): - return self._pixel_ratio - - def get_logical_size(self): - return self._logical_size - - def get_physical_size(self): - return int(self._logical_size[0] * self._pixel_ratio), int( - self._logical_size[1] * self._pixel_ratio - ) - - def set_logical_size(self, width, height): - self._logical_size = width, height - - def close(self): - self._closed = True - - def is_closed(self): - return self._closed - - def _request_draw(self): - # Deliberately a no-op, because people use .draw() instead. - pass - - def present(self, texture): - # This gets called at the end of a draw pass via GPUCanvasContext - device = texture._device - size = texture.size - bytes_per_pixel = 4 - data = device.queue.read_texture( - { - "texture": texture, - "mip_level": 0, - "origin": (0, 0, 0), - }, - { - "offset": 0, - "bytes_per_row": bytes_per_pixel * size[0], - "rows_per_image": size[1], - }, - size, - ) - - # Return as memory object to avoid numpy dependency - # Equivalent: np.frombuffer(data, np.uint8).reshape(size[1], size[0], 4) - return data.cast("B", (size[1], size[0], 4)) - - def draw(self): - """Perform a draw and get the resulting image. - - The image array is returned as an NxMx4 memoryview object. - This object can be converted to a numpy array (without copying data) - using ``np.asarray(arr)``. - """ - return self._draw_frame_and_present() - - -WgpuCanvas = WgpuManualOffscreenCanvas - - -# If we consider the use-cases for using this offscreen canvas: -# -# * Using wgpu.gui.auto in test-mode: in this case run() should not hang, -# and call_later should not cause lingering refs. -# * Using the offscreen canvas directly, in a script: in this case you -# do not have/want an event system. -# * Using the offscreen canvas in an evented app. In that case you already -# have an app with a specific event-loop (it might be PySide6 or -# something else entirely). -# -# In summary, we provide a call_later() and run() that behave pretty -# well for the first case. - -_pending_calls = [] - - -def call_later(delay, callback, *args): - # Note that this module never calls call_later() itself; request_draw() is a no-op. - etime = time.time() + delay - _pending_calls.append((etime, callback, args)) - - -def run(): - # Process pending calls - for etime, callback, args in _pending_calls.copy(): - if time.time() >= etime: - callback(*args) - - # Clear any leftover scheduled calls, to avoid lingering refs. - _pending_calls.clear() diff --git a/wgpu/gui/qt.py b/wgpu/gui/qt.py deleted file mode 100644 index 90cecd9..0000000 --- a/wgpu/gui/qt.py +++ /dev/null @@ -1,430 +0,0 @@ -""" -Support for rendering in a Qt widget. Provides a widget subclass that -can be used as a standalone window or in a larger GUI. -""" - -import sys -import ctypes -import importlib - -from .base import WgpuCanvasBase, WgpuAutoGui, weakbind - - -# Select GUI toolkit -for libname in ("PySide6", "PyQt6", "PySide2", "PyQt5"): - if libname in sys.modules: - QtCore = importlib.import_module(".QtCore", libname) - QtWidgets = importlib.import_module(".QtWidgets", libname) - try: - WA_PaintOnScreen = QtCore.Qt.WidgetAttribute.WA_PaintOnScreen - WA_DeleteOnClose = QtCore.Qt.WidgetAttribute.WA_DeleteOnClose - PreciseTimer = QtCore.Qt.TimerType.PreciseTimer - KeyboardModifiers = QtCore.Qt.KeyboardModifier - FocusPolicy = QtCore.Qt.FocusPolicy - Keys = QtCore.Qt.Key - except AttributeError: - WA_PaintOnScreen = QtCore.Qt.WA_PaintOnScreen - WA_DeleteOnClose = QtCore.Qt.WA_DeleteOnClose - PreciseTimer = QtCore.Qt.PreciseTimer - KeyboardModifiers = QtCore.Qt - FocusPolicy = QtCore.Qt - Keys = QtCore.Qt - break -else: - raise ImportError( - "Before importing wgpu.gui.qt, import one of PySide6/PySide2/PyQt6/PyQt5 to select a Qt toolkit." - ) - - -# Get version -if libname.startswith("PySide"): - qt_version_info = QtCore.__version_info__ -else: - try: - qt_version_info = tuple(int(i) for i in QtCore.QT_VERSION_STR.split(".")[:3]) - except Exception: # Failsafe - qt_version_info = (0, 0, 0) - - -BUTTON_MAP = { - QtCore.Qt.MouseButton.LeftButton: 1, # == MOUSE_BUTTON_LEFT - QtCore.Qt.MouseButton.RightButton: 2, # == MOUSE_BUTTON_RIGHT - QtCore.Qt.MouseButton.MiddleButton: 3, # == MOUSE_BUTTON_MIDDLE - QtCore.Qt.MouseButton.BackButton: 4, - QtCore.Qt.MouseButton.ForwardButton: 5, - QtCore.Qt.MouseButton.TaskButton: 6, - QtCore.Qt.MouseButton.ExtraButton4: 7, - QtCore.Qt.MouseButton.ExtraButton5: 8, -} - -MODIFIERS_MAP = { - KeyboardModifiers.ShiftModifier: "Shift", - KeyboardModifiers.ControlModifier: "Control", - KeyboardModifiers.AltModifier: "Alt", - KeyboardModifiers.MetaModifier: "Meta", -} - -KEY_MAP = { - int(Keys.Key_Down): "ArrowDown", - int(Keys.Key_Up): "ArrowUp", - int(Keys.Key_Left): "ArrowLeft", - int(Keys.Key_Right): "ArrowRight", - int(Keys.Key_Backspace): "Backspace", - int(Keys.Key_CapsLock): "CapsLock", - int(Keys.Key_Delete): "Delete", - int(Keys.Key_End): "End", - int(Keys.Key_Enter): "Enter", - int(Keys.Key_Escape): "Escape", - int(Keys.Key_F1): "F1", - int(Keys.Key_F2): "F2", - int(Keys.Key_F3): "F3", - int(Keys.Key_F4): "F4", - int(Keys.Key_F5): "F5", - int(Keys.Key_F6): "F6", - int(Keys.Key_F7): "F7", - int(Keys.Key_F8): "F8", - int(Keys.Key_F9): "F9", - int(Keys.Key_F10): "F10", - int(Keys.Key_F11): "F11", - int(Keys.Key_F12): "F12", - int(Keys.Key_Home): "Home", - int(Keys.Key_Insert): "Insert", - int(Keys.Key_Alt): "Alt", - int(Keys.Key_Control): "Control", - int(Keys.Key_Shift): "Shift", - int(Keys.Key_Meta): "Meta", # meta maps to control in QT on macOS, and vice-versa - int(Keys.Key_NumLock): "NumLock", - int(Keys.Key_PageDown): "PageDown", - int(Keys.Key_PageUp): "Pageup", - int(Keys.Key_Pause): "Pause", - int(Keys.Key_ScrollLock): "ScrollLock", - int(Keys.Key_Tab): "Tab", -} - - -# Make Qt not ignore XDG_SESSION_TYPE -# is_wayland = "wayland" in os.getenv("XDG_SESSION_TYPE", "").lower() -# if is_wayland: -# os.environ["QT_QPA_PLATFORM"] = "wayland" - - -def enable_hidpi(): - """Enable high-res displays.""" - set_dpi_aware = qt_version_info < (6, 4) # Pyside - if set_dpi_aware: - try: - # See https://github.com/pyzo/pyzo/pull/700 why we seem to need both - # See https://github.com/pygfx/pygfx/issues/368 for high Qt versions - ctypes.windll.shcore.SetProcessDpiAwareness(1) # global dpi aware - ctypes.windll.shcore.SetProcessDpiAwareness(2) # per-monitor dpi aware - except Exception: - pass # fail on non-windows - try: - QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True) - except Exception: - pass # fail on older Qt's - - -# If you import this module, you want to use wgpu in a way that does not suck -# on high-res monitors. So we apply the minimal configuration to make this so. -# Most apps probably should also set AA_UseHighDpiPixmaps, but it's not -# needed for wgpu, so not our responsibility (some users may NOT want it set). -enable_hidpi() - - -class QWgpuWidget(WgpuAutoGui, WgpuCanvasBase, QtWidgets.QWidget): - """A QWidget representing a wgpu canvas that can be embedded in a Qt application.""" - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # Configure how Qt renders this widget - self.setAttribute(WA_PaintOnScreen, True) - self.setAttribute(WA_DeleteOnClose, True) - self.setAutoFillBackground(False) - self.setMouseTracking(True) - self.setFocusPolicy(FocusPolicy.StrongFocus) - - # A timer for limiting fps - self._request_draw_timer = QtCore.QTimer() - self._request_draw_timer.setTimerType(PreciseTimer) - self._request_draw_timer.setSingleShot(True) - self._request_draw_timer.timeout.connect(self.update) - - def paintEngine(self): # noqa: N802 - this is a Qt method - # https://doc.qt.io/qt-5/qt.html#WidgetAttribute-enum WA_PaintOnScreen - return None - - def paintEvent(self, event): # noqa: N802 - this is a Qt method - self._draw_frame_and_present() - - # Methods that we add from wgpu (snake_case) - - def get_display_id(self): - # There is qx11info, but it is rarely available. - # https://doc.qt.io/qt-5/qx11info.html#display - return super().get_display_id() # uses X11 lib - - def get_window_id(self): - return int(self.winId()) - - def get_pixel_ratio(self): - # Observations: - # * On Win10 + PyQt5 the ratio is a whole number (175% becomes 2). - # * On Win10 + PyQt6 the ratio is correct (non-integer). - return self.devicePixelRatioF() - - def get_logical_size(self): - # Sizes in Qt are logical - lsize = self.width(), self.height() - return float(lsize[0]), float(lsize[1]) - - def get_physical_size(self): - # https://doc.qt.io/qt-5/qpaintdevice.html - # https://doc.qt.io/qt-5/highdpi.html - lsize = self.width(), self.height() - lsize = float(lsize[0]), float(lsize[1]) - ratio = self.devicePixelRatioF() - # When the ratio is not integer (qt6), we need to somehow round - # it. It turns out that we need to round it, but also add a - # small offset. Tested on Win10 with several different OS - # scales. Would be nice if we could ask Qt for the exact - # physical size! Not an issue on qt5, because ratio is always - # integer then. - return round(lsize[0] * ratio + 0.01), round(lsize[1] * ratio + 0.01) - - def set_logical_size(self, width, height): - if width < 0 or height < 0: - raise ValueError("Window width and height must not be negative") - self.resize(width, height) # See comment on pixel ratio - - def _request_draw(self): - if not self._request_draw_timer.isActive(): - self._request_draw_timer.start(int(self._get_draw_wait_time() * 1000)) - - def close(self): - QtWidgets.QWidget.close(self) - - def is_closed(self): - return not self.isVisible() - - # User events to jupyter_rfb events - - def _key_event(self, event_type, event): - modifiers = [ - MODIFIERS_MAP[mod] - for mod in MODIFIERS_MAP.keys() - if mod & event.modifiers() - ] - - ev = { - "event_type": event_type, - "key": KEY_MAP.get(event.key(), event.text()), - "modifiers": modifiers, - } - self._handle_event_and_flush(ev) - - def keyPressEvent(self, event): # noqa: N802 - self._key_event("key_down", event) - - def keyReleaseEvent(self, event): # noqa: N802 - self._key_event("key_up", event) - - def _mouse_event(self, event_type, event, touches=True): - button = BUTTON_MAP.get(event.button(), 0) - buttons = [ - BUTTON_MAP[button] - for button in BUTTON_MAP.keys() - if button & event.buttons() - ] - - # For Qt on macOS Control and Meta are switched - modifiers = [ - MODIFIERS_MAP[mod] - for mod in MODIFIERS_MAP.keys() - if mod & event.modifiers() - ] - - ev = { - "event_type": event_type, - "x": event.pos().x(), - "y": event.pos().y(), - "button": button, - "buttons": buttons, - "modifiers": modifiers, - } - if touches: - ev.update( - { - "ntouches": 0, - "touches": {}, # TODO: Qt touch events - } - ) - - if event_type == "pointer_move": - match_keys = {"buttons", "modifiers", "ntouches"} - accum_keys = {} - self._handle_event_rate_limited(ev, call_later, match_keys, accum_keys) - else: - self._handle_event_and_flush(ev) - - def mousePressEvent(self, event): # noqa: N802 - self._mouse_event("pointer_down", event) - - def mouseMoveEvent(self, event): # noqa: N802 - self._mouse_event("pointer_move", event) - - def mouseReleaseEvent(self, event): # noqa: N802 - self._mouse_event("pointer_up", event) - - def mouseDoubleClickEvent(self, event): # noqa: N802 - super().mouseDoubleClickEvent(event) - self._mouse_event("double_click", event, touches=False) - - def wheelEvent(self, event): # noqa: N802 - # For Qt on macOS Control and Meta are switched - modifiers = [ - MODIFIERS_MAP[mod] - for mod in MODIFIERS_MAP.keys() - if mod & event.modifiers() - ] - buttons = [ - BUTTON_MAP[button] - for button in BUTTON_MAP.keys() - if button & event.buttons() - ] - - ev = { - "event_type": "wheel", - "dx": -event.angleDelta().x(), - "dy": -event.angleDelta().y(), - "x": event.position().x(), - "y": event.position().y(), - "buttons": buttons, - "modifiers": modifiers, - } - match_keys = {"modifiers"} - accum_keys = {"dx", "dy"} - self._handle_event_rate_limited(ev, call_later, match_keys, accum_keys) - - def resizeEvent(self, event): # noqa: N802 - ev = { - "event_type": "resize", - "width": float(event.size().width()), - "height": float(event.size().height()), - "pixel_ratio": self.get_pixel_ratio(), - } - self._handle_event_and_flush(ev) - - def closeEvent(self, event): # noqa: N802 - self._handle_event_and_flush({"event_type": "close"}) - - -class QWgpuCanvas(WgpuAutoGui, WgpuCanvasBase, QtWidgets.QWidget): - """A toplevel Qt widget providing a wgpu canvas.""" - - # Most of this is proxying stuff to the inner widget. - # We cannot use a toplevel widget directly, otherwise the window - # size can be set to subpixel (logical) values, without being able to - # detect this. See https://github.com/pygfx/wgpu-py/pull/68 - - def __init__(self, *, size=None, title=None, max_fps=30, **kwargs): - # When using Qt, there needs to be an - # application before any widget is created - get_app() - - super().__init__(**kwargs) - - self.setAttribute(WA_DeleteOnClose, True) - self.set_logical_size(*(size or (640, 480))) - self.setWindowTitle(title or "qt wgpu canvas") - self.setMouseTracking(True) - - self._subwidget = QWgpuWidget(self, max_fps=max_fps) - self._subwidget.add_event_handler(weakbind(self.handle_event), "*") - - # Get the window id one time. For some reason this is needed - # to "activate" the canvas. Otherwise the viz is not shown if - # one does not provide canvas to request_adapter(). - # (AK: Cannot reproduce this now, what qtlib/os/versions was this on?) - self._subwidget.get_window_id() - - layout = QtWidgets.QHBoxLayout(self) - layout.setContentsMargins(0, 0, 0, 0) - self.setLayout(layout) - layout.addWidget(self._subwidget) - - self.show() - - # Qt methods - - def update(self): - super().update() - self._subwidget.update() - - # Methods that we add from wgpu (snake_case) - - @property - def draw_frame(self): - return self._subwidget.draw_frame - - @draw_frame.setter - def draw_frame(self, f): - self._subwidget.draw_frame = f - - def get_display_id(self): - return self._subwidget.get_display_id() - - def get_window_id(self): - return self._subwidget.get_window_id() - - def get_pixel_ratio(self): - return self._subwidget.get_pixel_ratio() - - def get_logical_size(self): - return self._subwidget.get_logical_size() - - def get_physical_size(self): - return self._subwidget.get_physical_size() - - def set_logical_size(self, width, height): - if width < 0 or height < 0: - raise ValueError("Window width and height must not be negative") - self.resize(width, height) # See comment on pixel ratio - - def _request_draw(self): - return self._subwidget._request_draw() - - def close(self): - self._subwidget.close() - QtWidgets.QWidget.close(self) - - def is_closed(self): - return not self.isVisible() - - # Methods that we need to explicitly delegate to the subwidget - - def get_context(self, *args, **kwargs): - return self._subwidget.get_context(*args, **kwargs) - - def request_draw(self, *args, **kwargs): - return self._subwidget.request_draw(*args, **kwargs) - - -# Make available under a name that is the same for all gui backends -WgpuWidget = QWgpuWidget -WgpuCanvas = QWgpuCanvas - - -def get_app(): - """Return global instance of Qt app instance or create one if not created yet.""" - return QtWidgets.QApplication.instance() or QtWidgets.QApplication([]) - - -def run(): - app = get_app() - app.exec() if hasattr(app, "exec") else app.exec_() - - -def call_later(delay, callback, *args): - QtCore.QTimer.singleShot(int(delay * 1000), lambda: callback(*args)) diff --git a/wgpu/gui/wx.py b/wgpu/gui/wx.py deleted file mode 100644 index 106d751..0000000 --- a/wgpu/gui/wx.py +++ /dev/null @@ -1,176 +0,0 @@ -""" -Support for rendering in a wxPython window. Provides a widget that -can be used as a standalone window or in a larger GUI. -""" - -import ctypes - -from .base import WgpuCanvasBase, weakbind - -import wx - - -def enable_hidpi(): - """Enable high-res displays.""" - try: - ctypes.windll.shcore.SetProcessDpiAwareness(1) - ctypes.windll.shcore.SetProcessDpiAwareness(2) - except Exception: - pass # fail on non-windows - - -enable_hidpi() - - -class TimerWithCallback(wx.Timer): - def __init__(self, callback): - super().__init__() - self._callback = callback - - def Notify(self, *args): # noqa: N802 - try: - self._callback() - except RuntimeError: - pass # wrapped C/C++ object of type WxWgpuWindow has been deleted - - -class WxWgpuWindow(WgpuCanvasBase, wx.Window): - """A wx Window representing a wgpu canvas that can be embedded in a wx application.""" - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # A timer for limiting fps - self._request_draw_timer = TimerWithCallback(self.Refresh) - - # We keep a timer to prevent draws during a resize. This prevents - # issues with mismatching present sizes during resizing (on Linux). - self._resize_timer = TimerWithCallback(self._on_resize_done) - self._draw_lock = False - - self.Bind(wx.EVT_PAINT, self.on_paint) - self.Bind(wx.EVT_ERASE_BACKGROUND, lambda x: None) - self.Bind(wx.EVT_SIZE, self._on_resize) - - def on_paint(self, event): - dc = wx.PaintDC(self) # needed for wx - if not self._draw_lock: - self._draw_frame_and_present() - del dc - event.Skip() - - def _on_resize(self, *args): - self._draw_lock = True - self._resize_timer.Start(100, wx.TIMER_ONE_SHOT) - - def _on_resize_done(self, *args): - self._draw_lock = False - self._request_draw() - - # Methods that we add from wgpu - - def get_window_id(self): - return int(self.GetHandle()) - - def get_pixel_ratio(self): - # todo: this is not hidpi-ready (at least on win10) - # Observations: - # * On Win10 this always returns 1 - so hidpi is effectively broken - return self.GetContentScaleFactor() - - def get_logical_size(self): - lsize = self.Size[0], self.Size[1] - return float(lsize[0]), float(lsize[1]) - - def get_physical_size(self): - lsize = self.Size[0], self.Size[1] - lsize = float(lsize[0]), float(lsize[1]) - ratio = self.GetContentScaleFactor() - return round(lsize[0] * ratio + 0.01), round(lsize[1] * ratio + 0.01) - - def set_logical_size(self, width, height): - if width < 0 or height < 0: - raise ValueError("Window width and height must not be negative") - self.SetSize(width, height) - - def _request_draw(self): - # Despite the FPS limiting the delayed call to refresh solves - # that drawing only happens when the mouse is down, see #209. - if not self._request_draw_timer.IsRunning(): - self._request_draw_timer.Start( - self._get_draw_wait_time() * 1000, wx.TIMER_ONE_SHOT - ) - - def close(self): - self.Hide() - - def is_closed(self): - return not self.IsShown() - - -class WxWgpuCanvas(WgpuCanvasBase, wx.Frame): - """A toplevel wx Frame providing a wgpu canvas.""" - - # Most of this is proxying stuff to the inner widget. - - def __init__(self, *, parent=None, size=None, title=None, max_fps=30, **kwargs): - super().__init__(parent, **kwargs) - - self.set_logical_size(*(size or (640, 480))) - self.SetTitle(title or "wx wgpu canvas") - - self._subwidget = WxWgpuWindow(parent=self, max_fps=max_fps) - self._subwidget.add_event_handler(weakbind(self.handle_event), "*") - self.Bind(wx.EVT_CLOSE, lambda e: self.Destroy()) - - self.Show() - - # wx methods - - def Refresh(self): # noqa: N802 - super().Refresh() - self._subwidget.Refresh() - - # Methods that we add from wgpu - - def get_display_id(self): - return self._subwidget.get_display_id() - - def get_window_id(self): - return self._subwidget.get_window_id() - - def get_pixel_ratio(self): - return self._subwidget.get_pixel_ratio() - - def get_logical_size(self): - return self._subwidget.get_logical_size() - - def get_physical_size(self): - return self._subwidget.get_physical_size() - - def set_logical_size(self, width, height): - if width < 0 or height < 0: - raise ValueError("Window width and height must not be negative") - self.SetSize(width, height) - - def _request_draw(self): - return self._subwidget._request_draw() - - def close(self): - super().close() - - def is_closed(self): - return not self.isVisible() - - # Methods that we need to explicitly delegate to the subwidget - - def get_context(self, *args, **kwargs): - return self._subwidget.get_context(*args, **kwargs) - - def request_draw(self, *args, **kwargs): - return self._subwidget.request_draw(*args, **kwargs) - - -# Make available under a name that is the same for all gui backends -WgpuWidget = WxWgpuWindow -WgpuCanvas = WxWgpuCanvas From 6442da06042aba9a9907b84682a9b90d33a8457e Mon Sep 17 00:00:00 2001 From: Jan Date: Wed, 27 Dec 2023 22:56:32 +0100 Subject: [PATCH 04/20] Remove more files from wgpu subfolder --- codegen/README.md | 153 -- codegen/__init__.py | 65 - codegen/__main__.py | 19 - codegen/apipatcher.py | 529 ------ codegen/apiwriter.py | 146 -- codegen/files.py | 97 - codegen/hparser.py | 231 --- codegen/idlparser.py | 432 ----- codegen/tests/test_codegen_apipatcher.py | 118 -- codegen/tests/test_codegen_rspatcher.py | 94 - codegen/tests/test_codegen_utils.py | 256 --- codegen/tests/test_codegen_z.py | 83 - codegen/utils.py | 312 ---- codegen/wgpu_native_patcher.py | 359 ---- wgpu/__pyinstaller/__init__.py | 12 - wgpu/__pyinstaller/conftest.py | 1 - wgpu/__pyinstaller/hook-wgpu.py | 28 - wgpu/__pyinstaller/test_wgpu.py | 30 - wgpu/_classes.py | 2100 ---------------------- wgpu/_coreutils.py | 157 -- wgpu/_diagnostics.py | 520 ------ wgpu/classes.py | 8 - wgpu/enums.py | 686 ------- wgpu/flags.py | 111 -- wgpu/resources/__init__.py | 2 - wgpu/resources/codegen_report.md | 34 - wgpu/resources/webgpu.h | 1803 ------------------- wgpu/resources/webgpu.idl | 1314 -------------- wgpu/resources/wgpu.h | 256 --- wgpu/structs.py | 748 -------- 30 files changed, 10704 deletions(-) delete mode 100644 codegen/README.md delete mode 100644 codegen/__init__.py delete mode 100644 codegen/__main__.py delete mode 100644 codegen/apipatcher.py delete mode 100644 codegen/apiwriter.py delete mode 100644 codegen/files.py delete mode 100644 codegen/hparser.py delete mode 100644 codegen/idlparser.py delete mode 100644 codegen/tests/test_codegen_apipatcher.py delete mode 100644 codegen/tests/test_codegen_rspatcher.py delete mode 100644 codegen/tests/test_codegen_utils.py delete mode 100644 codegen/tests/test_codegen_z.py delete mode 100644 codegen/utils.py delete mode 100644 codegen/wgpu_native_patcher.py delete mode 100644 wgpu/__pyinstaller/__init__.py delete mode 100644 wgpu/__pyinstaller/conftest.py delete mode 100644 wgpu/__pyinstaller/hook-wgpu.py delete mode 100644 wgpu/__pyinstaller/test_wgpu.py delete mode 100644 wgpu/_classes.py delete mode 100644 wgpu/_coreutils.py delete mode 100644 wgpu/_diagnostics.py delete mode 100644 wgpu/classes.py delete mode 100644 wgpu/enums.py delete mode 100644 wgpu/flags.py delete mode 100644 wgpu/resources/__init__.py delete mode 100644 wgpu/resources/codegen_report.md delete mode 100644 wgpu/resources/webgpu.h delete mode 100644 wgpu/resources/webgpu.idl delete mode 100644 wgpu/resources/wgpu.h delete mode 100644 wgpu/structs.py diff --git a/codegen/README.md b/codegen/README.md deleted file mode 100644 index fdeebb6..0000000 --- a/codegen/README.md +++ /dev/null @@ -1,153 +0,0 @@ -# wgpu-py codegen - - - -## Introduction - -### How wgpu-py is maintained - -The wgpu-py library provides a Pythonic interpretation of the [WebGPU API](https://www.w3.org/TR/webgpu/). It closely follows the official spec (in the form of an [IDL file](https://gpuweb.github.io/gpuweb/webgpu.idl)). Further below is a section on how we deviate from the spec. - -The actual implementation is implemented in backends. At the moment there is only one backend, based on [wgpu-native](https://github.com/gfx-rs/wgpu-native). We make API calls into this dynamic library, as specified by [two header files](https://github.com/gfx-rs/wgpu-native/tree/trunk/ffi). - -The API (based on the IDL) and the backend (based on the header files) can be updated independently. In both cases, however, we are dealing with a relatively large API, which is (currently) changing quite a bit, and we need the implementation to be precise. Therefore, doing the maintenance completely by hand would be a big burden and prone to errors. - -On the other hand, applying fully automated code generation is also not feasible, because of the many edge-cases that have to be taken into account. Plus the code-generation code must also be maintained. - -Therefore we aim for a hybrid approach in which the aforementioned specs are used to *check* the implementations and introduce code and comments to make updates easier. - -### The purpose of `codegen` - -* Make maintaining wgpu-py as easy as possible; -* In particular the process of updating to new versions of WebGPU and wgpu-native; -* To validate that our API matches the WebGPU spec, and know where it differs. -* To validate that our calls into wgpu-native are correct. - -During an update, it should *not* be necessary to check the diffs of `webgpu.idl` or `webgpu.h`. Instead, by running the -codegen, any relevant differences in these specs should result in changes (of code or annotations) in the respective `.py`files. That said, during development it can be helpful to use the WebGPU spec and the header files as a reference. - -This package is *not* part of the wgpu library - it is a tool to help maintain it. It has its own tests, which try to cover the utils well, -but the parsers and generators are less important to fully cover by tests, because we are the only users. If it breaks, we fix it. - -### General tips - -* It's probably easier to update relatively often, so that each increment is small. -* Sometimes certain features or changes are present in WebGPU, but not in wgpu-native. This may result in some manual mappings etc. which make the code less elegant. These hacks are generally temporary though. -* It's generally recommended to update `webgpu.idl` and `webgpu.h` separately. Though it could also be advantageous to combine them, to avoid the hacky stuff mentioned in the previous point. - - - -## What the codegen does in general - -* Help update the front API. - * Make changes to `_classes.py`. - * Generate `flags.py`, `enums.py`, and `structs.py`. -* Help update the wgpu-native backend: - * Make changes to `backends/wgpu_native/_api.py`. - * Generate `backends/wgpu_native/_mappings.py`. -* Write `resources/codegen_report.md` providing a summary of the codegen process. - - - -## Updating the front API - -### Introduction - -The WebGPU API is specified by `webgpu.idl` (in the resources directory). We parse this file with a custom parser (`idlparser.py`) to obtain a description of the interfaces, enums, and flags. - -Note that while `wgpu/_classes.py` defines the API (and corresponding docstrings), the implementation of the majority of methods occurs in the backends, so most methods simply `raise NotimplementedError()`. - -### Changes with respect to JS - -In some cases we may want to deviate from the WebGPU API, because well ... Python is not JavaScript. There is a simple system in place to mark any such differences, that also makes sure that these changes are listed in the docs. To mark how the py API deviates from the WebGPU spec: - -* Decorate a method with `@apidiff.hide` to mark it as not supported by our API. -* Decorate a method with `@apidiff.add` to mark it as intended even though it does not - match the WebGPU spec. -* Decorate a method with `@apidiff.change` to mark that our method has a different signature. - -Other changes include: - -* Where in JS the input args are provided via a dict, we use kwargs directly. Nevertheless, some input args have subdicts (and sub-sub-dicts) -* For methods that are async in IDL, we also provide sync methods. The Async method names have an "_async" suffix. - -### Codegen summary - -* Generate `flags.py`, `enums.py`, and `structs.py`. - -* Make changes to `_classes.py`. - - * Add missing classes, methods and properties, along with a FIXME comment.. - - * Modify changed signatures, along with a FIXME comment. - * Mark unknown classes, methods and properties with a FIXME comment. - - * Put a comment that contains the corresponding IDL-line for each method and attribute. - - -### The update process - -* Download the latest [webgpu.idl](https://gpuweb.github.io/gpuweb/webgpu.idl) and place in the resources folder. -* Run `python codegen` to apply the automatic patches to the code. -* It may be necessary to tweak the `idlparser.py` to adjust to new formatting. -* Check the diff of `flags.py`, `enums.py`, `structs.py` for any changes that might need manual work. -* Go through all FIXME comments that were added in `_classes.py`: - * Apply any necessary changes. - * Remove the FIXME comment if no further action is needed, or turn into a TODO for later. - * Note that all new classes/methods/properties (instead those marked as hidden) need a docstring. -* Run `python codegen` again to validate that all is well. Repeat the step above if necessary. -* Make sure that the tests run and provide full coverage. -* Make sure that the examples all work. -* Update downstream code, like our own tests and examples, but also e.g. pygfx. -* Make a summary of the API changes to put in the release notes. - - - -## Updating the wgpu-native backend - -### Introduction - -The backends are almost a copy of `_classes.py`: all methods in `_classes.py` that `raise NotImplementedError()` must be implemented. - -The wgpu-native backend calls into a dynamic library, which interface is specified by `webgpu.h` and `wgpu.h` (in the resources directory). We parse these files with a custom parser (`hparser.py`) to obtain a description of the interfaces, enums, flags, and structs. - -The majority of work in the wgpu-native backend is the conversion of Python dicts to C structs, and then using them to call into the dynamic library. The codegen helps by validating the structs and API calls. - -### Tips - -* In the code, use `new_struct()` and `new_struct_p()` to create a C structure with minimal boilerplate. It also converts string enum values to their corresponding integers. - -* Since the codegen adds comments for missing fields, you can instantiate a struct without any fields, then run the codegen to fill it in, and then further implement the logic. -* The API of the backends should not deviate from the base API - only`@apidiff.add` is allowed (and should be used sparingly). -* Use `webgpu.py` and `wgpu.h` as a reference to check available functions and structs. -* No docstrings needed in this module. -* This process typically does not introduce changes to the API, but wgpu may now be more strict on specific usage or require changes to the shaders. - -### Codegen summary - -* Generate `backends/wgpu_native/_mappings.py`. - * Generate mappings for enum field names to ints. - * Detect and report missing flags and enum fields. - -* Make changes to `wgpu_native/_api.py`. - * Validate and annotate function calls into the lib. - * Validate and annotate struct creations (missing struct fields are filled in). - * Ensure that each incoming struct is checked to catch invalid input. - -### The update process - -* Download the latest `webgpu.h` and DLL using `python download-wgpu-native.py --version xx` -* Run `python codegen` to apply the automatic patches to the code. -* It may be necessary to tweak the `hparser.py` to adjust to new formatting. -* Diff the report for new differences to take into account. -* Diff `wgpu_native/_api.py` to get an idea of what structs and functions have changed. -* Go through all FIXME comments that were added in `_api.py`: - * Apply any necessary changes. - * Remove the FIXME comment if no further action is needed, or turn into a TODO for later. - -* Run `python codegen` again to validate that all is well. Repeat the steps above if necessary. -* Make sure that the tests run and provide full coverage. -* Make sure that the examples all work. -* Update downstream code, like our own tests and examples, but also e.g. pygfx. - -* Make a summary of the API changes to put in the release notes. diff --git a/codegen/__init__.py b/codegen/__init__.py deleted file mode 100644 index b2b7767..0000000 --- a/codegen/__init__.py +++ /dev/null @@ -1,65 +0,0 @@ -import io - -from .utils import print, PrintToFile -from . import apiwriter, apipatcher, wgpu_native_patcher, idlparser, hparser -from .files import file_cache - - -def main(): - """Codegen entry point. This will populate the file cache with the - new code, but not write it to disk.""" - - log = io.StringIO() - with PrintToFile(log): - print("# Code generatation report") - prepare() - update_api() - update_wgpu_native() - file_cache.write("resources/codegen_report.md", log.getvalue()) - - -def prepare(): - """Force parsing (and caching) the IDL and C header.""" - print("## Preparing") - file_cache.reset() - idlparser.get_idl_parser(allow_cache=False) - hparser.get_h_parser(allow_cache=False) - - -def update_api(): - """Update the public API and patch the public-facing API of the backends.""" - - print("## Updating API") - - # Write the simple stuff - apiwriter.write_flags() - apiwriter.write_enums() - apiwriter.write_structs() - - # Patch base API: IDL -> API - code1 = file_cache.read("_classes.py") - print("### Patching API for _classes.py") - code2 = apipatcher.patch_base_api(code1) - file_cache.write("_classes.py", code2) - - # Patch backend APIs: _classes.py -> API - for fname in ["backends/wgpu_native/_api.py"]: - code1 = file_cache.read(fname) - print(f"### Patching API for {fname}") - code2 = apipatcher.patch_backend_api(code1) - file_cache.write(fname, code2) - - -def update_wgpu_native(): - """Update and check the wgpu-native backend.""" - - print("## Validating backends/wgpu_native/_api.py") - - # Write the simple stuff - wgpu_native_patcher.compare_flags() - wgpu_native_patcher.write_mappings() - - # Patch wgpu_native api - code1 = file_cache.read("backends/wgpu_native/_api.py") - code2 = wgpu_native_patcher.patch_wgpu_native_backend(code1) - file_cache.write("backends/wgpu_native/_api.py", code2) diff --git a/codegen/__main__.py b/codegen/__main__.py deleted file mode 100644 index 9c8a261..0000000 --- a/codegen/__main__.py +++ /dev/null @@ -1,19 +0,0 @@ -""" -The entrypoint / script to apply automatic patches to the code. -See README.md for more information. -""" - -import os -import sys - - -# Little trick to allow running this file as a script -sys.path.insert(0, os.path.abspath(os.path.join(__file__, "..", ".."))) - - -from codegen import main, file_cache # noqa: E402 - - -if __name__ == "__main__": - main() - file_cache.write_changed_files_to_disk() diff --git a/codegen/apipatcher.py b/codegen/apipatcher.py deleted file mode 100644 index a4cfdcc..0000000 --- a/codegen/apipatcher.py +++ /dev/null @@ -1,529 +0,0 @@ -""" -The logic to generate/patch the base API from the WebGPU -spec (IDL), and the backend implementations from the base API. -""" - -from codegen.utils import print, blacken, to_snake_case, to_camel_case, Patcher -from codegen.idlparser import get_idl_parser -from codegen.files import file_cache - - -def patch_base_api(code): - """Given the Python code, applies patches to make the code conform - to the IDL. - """ - idl = get_idl_parser() - - # Write __all__ - part1, found_all, part2 = code.partition("\n__all__ =") - if found_all: - part2 = part2.split("]", 1)[-1] - line = "\n__all__ = [" - line += ", ".join(f'"{name}"' for name in idl.classes.keys()) - line += "]" - code = part1 + line + part2 - - # Patch! - for patcher in [CommentRemover(), BaseApiPatcher(), IdlCommentInjector()]: - patcher.apply(code) - code = patcher.dumps() - return code - - -def patch_backend_api(code): - """Given the Python code, applies patches to make the code conform - to the base API. - """ - - # Obtain the base API definition - base_api_code = file_cache.read("_classes.py") - - # Patch! - for patcher in [ - CommentRemover(), - BackendApiPatcher(base_api_code), - StructValidationChecker(), - ]: - patcher.apply(code) - code = patcher.dumps() - return code - - -class CommentRemover(Patcher): - """A patcher that removes comments that we add in other parsers, - to prevent accumulating comments. - """ - - triggers = "# IDL:", "# FIXME: unknown api", "# FIXME: missing check_struct" - - def apply(self, code): - self._init(code) - for line, i in self.iter_lines(): - if line.lstrip().startswith(self.triggers): - self.remove_line(i) - - -class AbstractCommentInjector(Patcher): - """A base patcher that can insert helpful comments in front of - properties, methods, and classes. It does not mark any as new or unknown, - since that is the task of the API patchers. - - Also moves decorators just above the def. Doing this here in a - post-processing step means we dont have to worry about decorators - in the other patchers, keeping them simpler. - """ - - # Note that in terms of structure, this class is basically a simplified - # version of the AbstractApiPatcher - - def apply(self, code): - self._init(code) - self.patch_classes() - - def patch_classes(self): - for classname, i1, i2 in self.iter_classes(): - if self.class_is_known(classname): - comment = self.get_class_comment(classname) - if comment: - self.insert_line(i1, comment) - self.patch_properties(classname, i1 + 1, i2) - self.patch_methods(classname, i1 + 1, i2) - - def patch_properties(self, classname, i1, i2): - for propname, j1, j2 in self.iter_properties(i1): - comment = self.get_prop_comment(classname, propname) - if comment: - self.insert_line(j1, comment) - self._move_decorator_below_comments(j1) - - def patch_methods(self, classname, i1, i2): - for methodname, j1, j2 in self.iter_methods(i1): - comment = self.get_method_comment(classname, methodname) - if comment: - self.insert_line(j1, comment) - self._move_decorator_below_comments(j1) - - def _move_decorator_below_comments(self, i_def): - for i in range(i_def - 3, i_def): - line = self.lines[i] - if line.lstrip().startswith("@"): - self.remove_line(i) - self.insert_line(i_def, line) - - -class AbstractApiPatcher(Patcher): - """The base patcher to update a wgpu API. - - This code is generalized, so it can be used both to generate the base API - as well as the backends (implementations). - - The idea is to walk over all classes, patch it if necessary, then - walk over each of its properties and methods to patch these too. - """ - - def apply(self, code): - self._init(code) - self._counts = {"classes": 0, "methods": 0, "properties": 0} - self.patch_classes() - stats = ", ".join(f"{self._counts[key]} {key}" for key in self._counts) - print("Validated " + stats) - - def patch_classes(self): - seen_classes = set() - - # Update existing classes in the Python code - for classname, i1, i2 in self.iter_classes(): - seen_classes.add(classname) - self._apidiffs = set() - if self.class_is_known(classname): - old_line = self.lines[i1] - new_line = self.get_class_def(classname) - if old_line != new_line: - fixme_line = "# FIXME: was " + old_line.split("class ", 1)[-1] - self.replace_line(i1, f"{fixme_line}\n{new_line}") - self.patch_properties(classname, i1 + 1, i2) - self.patch_methods(classname, i1 + 1, i2) - else: - msg = f"unknown api: class {classname}" - self.insert_line(i1, "# FIXME: " + msg) - print("Warning: " + msg) - if self._apidiffs: - print(f"Diffs for {classname}:", ", ".join(sorted(self._apidiffs))) - - # Add missing classes - lines = [] - for classname in self.get_class_names(): - if classname not in seen_classes: - lines.append("# FIXME: new class to implement") - lines.append(self.get_class_def(classname)) - more_lines = [] - more_lines += self.get_missing_properties(classname, set()) - more_lines += self.get_missing_methods(classname, set()) - lines.extend(more_lines or [" pass"]) - if lines: - self.insert_line(i2 + 1, "\n".join(lines)) - - self._counts["classes"] += len(seen_classes) - - def patch_properties(self, classname, i1, i2): - seen_props = set() - - # Update existing properties in Python code - for propname, j1, j2 in self.iter_properties(i1): - seen_props.add(propname) - pre_lines = "\n".join(self.lines[j1 - 3 : j1]) - self._apidiffs_from_lines(pre_lines, propname) - if self.prop_is_known(classname, propname): - if "@apidiff.add" in pre_lines: - print(f"ERROR: apidiff.add for known {classname}.{propname}") - elif "@apidiff.hide" in pre_lines: - pass # continue as normal - old_line = self.lines[j1] - new_line = f" def {propname}(self):" - if old_line != new_line: - fixme_line = " # FIXME: was " + old_line.split("def ", 1)[-1] - lines = [fixme_line, new_line] - self.replace_line(j1, "\n".join(lines)) - elif "@apidiff.add" in pre_lines: - pass - else: - msg = f"unknown api: prop {classname}.{propname}" - self.insert_line(j1, " # FIXME: " + msg) - print("Warning: " + msg) - - # Add missing properties for this class - lines = self.get_missing_properties(classname, seen_props) - if lines: - self.insert_line(i2 + 1, "\n".join(lines)) - - self._counts["properties"] += len(seen_props) - - def patch_methods(self, classname, i1, i2): - seen_funcs = set() - - # Update existing methods in Python code - for methodname, j1, j2 in self.iter_methods(i1): - seen_funcs.add(methodname) - pre_lines = "\n".join(self.lines[j1 - 3 : j1]) - self._apidiffs_from_lines(pre_lines, methodname) - if self.method_is_known(classname, methodname): - if "@apidiff.add" in pre_lines: - print(f"ERROR: apidiff.add for known {classname}.{methodname}") - elif "@apidiff.hide" in pre_lines: - pass # continue as normal - elif "@apidiff.change" in pre_lines: - continue - old_line = self.lines[j1] - new_line = self.get_method_def(classname, methodname) - if old_line != new_line: - fixme_line = " # FIXME: was " + old_line.split("def ", 1)[-1] - lines = [fixme_line, new_line] - self.replace_line(j1, "\n".join(lines)) - elif "@apidiff.add" in pre_lines: - pass - elif methodname.startswith("_"): - pass - else: - msg = f"unknown api: method {classname}.{methodname}" - self.insert_line(j1, " # FIXME: " + msg) - print("Warning: " + msg) - - # Add missing methods for this class - lines = self.get_missing_methods(classname, seen_funcs) - if lines: - self.insert_line(i2 + 1, "\n".join(lines)) - - self._counts["methods"] += len(seen_funcs) - - def get_missing_properties(self, classname, seen_props): - lines = [] - for propname in self.get_required_prop_names(classname): - if propname not in seen_props: - lines.append(" # FIXME: new prop to implement") - lines.append(" @property") - lines.append(f" def {propname}(self):") - lines.append(" raise NotImplementedError()") - lines.append("") - return lines - - def get_missing_methods(self, classname, seen_funcs): - lines = [] - for methodname in self.get_required_method_names(classname): - if methodname not in seen_funcs: - lines.append(" # FIXME: new method to implement") - lines.append(self.get_method_def(classname, methodname)) - lines.append(" raise NotImplementedError()\n") - return lines - - def _apidiffs_from_lines(self, text, what): - diffs = [x.replace("(", " ").split()[0] for x in text.split("@apidiff.")[1:]] - if diffs: - self._apidiffs.add(f"{'/'.join(diffs)} {what}") - - -class IdlPatcherMixin: - def __init__(self): - super().__init__() - self.idl = get_idl_parser() - - def name2idl(self, name): - m = {"__init__": "constructor"} - name = m.get(name, name) - return to_camel_case(name) - - def name2py(self, name): - m = {"constructor": "__init__"} - name = m.get(name, name) - return to_snake_case(name) - - def class_is_known(self, classname): - return classname in self.idl.classes - - def get_class_def(self, classname): - cls = self.idl.classes[classname] - # Make sure that GPUObjectBase comes last, for MRO - ignore = "Event", "EventTarget", "DOMException" - bases = sorted(cls.bases or [], key=lambda n: n.count("GPUObjectBase")) - bases = [b for b in bases if b not in ignore] - # Cover some special cases - if classname.lower().endswith("error"): - if "memory" in classname.lower(): - bases.append("MemoryError") - elif not bases: - bases.append("Exception") - - bases = "" if not bases else f"({', '.join(bases)})" - return f"class {classname}{bases}:" - - def get_method_def(self, classname, methodname): - # Get the corresponding IDL line - functions = self.idl.classes[classname].functions - name_idl = self.name2idl(methodname) - if methodname.endswith("_async") and name_idl not in functions: - name_idl = self.name2idl(methodname.replace("_async", "")) - elif name_idl not in functions and name_idl + "Async" in functions: - name_idl += "Async" - idl_line = functions[name_idl] - - # Construct preamble - preamble = "def " + to_snake_case(methodname) + "(" - if "async" in methodname: - preamble = "async " + preamble - - # Get arg names and types - args = idl_line.split("(", 1)[1].split(")", 1)[0].split(",") - args = [arg.strip() for arg in args if arg.strip()] - raw_defaults = [arg.partition("=")[2].strip() for arg in args] - place_holder_default = False - defaults = [] - for default, arg in zip(raw_defaults, args): - if default: - place_holder_default = "None" # any next args must have a default - elif arg.startswith("optional "): - default = "None" - else: - default = place_holder_default - defaults.append(default) - - argnames = [arg.split("=")[0].split()[-1] for arg in args] - argnames = [to_snake_case(argname) for argname in argnames] - argnames = [(f"{n}={v}" if v else n) for n, v in zip(argnames, defaults)] - argtypes = [arg.split("=")[0].split()[-2] for arg in args] - - # If one arg that is a dict, flatten dict to kwargs - if len(argtypes) == 1 and argtypes[0].endswith( - ("Options", "Descriptor", "Configuration") - ): - assert argtypes[0].startswith("GPU") - fields = self.idl.structs[argtypes[0][3:]].values() # struct fields - py_args = [self._arg_from_struct_field(field) for field in fields] - if py_args[0].startswith("label: str"): - py_args[0] = 'label=""' - py_args = ["self", "*"] + py_args - else: - py_args = ["self"] + argnames - - # Construct final def - line = preamble + ", ".join(py_args) + "): pass\n" - line = blacken(line, True).split("):")[0] + "):" - return " " + line - - def _arg_from_struct_field(self, field): - name = to_snake_case(field.name) - d = field.default - t = self.idl.resolve_type(field.typename) - result = name - if t: - result += f": {t}" - if d: - d = {"false": "False", "true": "True"}.get(d, d) - result += f"={d}" - return result - - def prop_is_known(self, classname, propname): - propname_idl = self.name2idl(propname) - return propname_idl in self.idl.classes[classname].attributes - - def method_is_known(self, classname, methodname): - functions = self.idl.classes[classname].functions - name_idl = self.name2idl(methodname) - if "_async" in methodname and name_idl not in functions: - name_idl = self.name2idl(methodname.replace("_async", "")) - elif name_idl not in functions and name_idl + "Async" in functions: - name_idl += "Async" - return name_idl if name_idl in functions else None - - def get_class_names(self): - return list(self.idl.classes.keys()) - - def get_required_prop_names(self, classname): - propnames_idl = self.idl.classes[classname].attributes.keys() - return [self.name2py(x) for x in propnames_idl] - - def get_required_method_names(self, classname): - methodnames_idl = self.idl.classes[classname].functions.keys() - return [self.name2py(x) for x in methodnames_idl] - - -class BaseApiPatcher(IdlPatcherMixin, AbstractApiPatcher): - """A patcher to patch the base API (in _classes.py), using IDL as input.""" - - -class IdlCommentInjector(IdlPatcherMixin, AbstractCommentInjector): - """A patcher that injects signatures as defined in IDL, which can be useful - to determine the types of arguments, etc. - """ - - def get_class_comment(self, classname): - return None - - def get_prop_comment(self, classname, propname): - if self.prop_is_known(classname, propname): - propname_idl = self.name2idl(propname) - return " # IDL: " + self.idl.classes[classname].attributes[propname_idl] - - def get_method_comment(self, classname, methodname): - name_idl = self.method_is_known(classname, methodname) - if name_idl: - return " # IDL: " + self.idl.classes[classname].functions[name_idl] - - -class BackendApiPatcher(AbstractApiPatcher): - """A patcher to patch a backend API, using the base API as input.""" - - def __init__(self, base_api_code): - super().__init__() - - p1 = Patcher(base_api_code) - - # Collect what's needed - self.classes = classes = {} - for classname, i1, i2 in p1.iter_classes(): - methods = {} - for methodname, j1, j2 in p1.iter_methods(i1 + 1): - pre_lines = "\n".join(p1.lines[j1 - 3 : j1]) - if "@apidiff.hide" in pre_lines: - continue # method (currently) not part of our API - body = "\n".join(p1.lines[j1 + 1 : j2 + 1]) - must_overload = "raise NotImplementedError()" in body - methods[methodname] = p1.lines[j1], must_overload - classes[classname] = p1.lines[i1], methods - # We assume that all properties can be implemented on the base class - - def class_is_known(self, classname): - return classname in self.classes - - def get_class_def(self, classname): - line, _ = self.classes[classname] - - if "):" not in line: - return line.replace(":", f"(classes.{classname}):") - else: - i = line.find("(") - bases = line[i:].strip("():").replace(",", " ").split() - bases = [b for b in bases if b.startswith("GPU")] - bases.insert(0, f"classes.{classname}") - return line[:i] + "(" + ", ".join(bases) + "):" - - def get_method_def(self, classname, methodname): - _, methods = self.classes[classname] - line, _ = methods[methodname] - return line - - def prop_is_known(self, classname, propname): - return False - - def method_is_known(self, classname, methodname): - _, methods = self.classes[classname] - return methodname in methods - - def get_class_names(self): - return list(self.classes.keys()) - - def get_required_prop_names(self, classname): - return [] - - def get_required_method_names(self, classname): - _, methods = self.classes[classname] - return list(name for name in methods.keys() if methods[name][1]) - - -class StructValidationChecker(Patcher): - """Checks that all structs are vaildated in the methods that have incoming structs.""" - - def apply(self, code): - self._init(code) - - idl = get_idl_parser() - all_structs = set() - ignore_structs = {"Extent3D"} - - for classname, i1, i2 in self.iter_classes(): - if classname not in idl.classes: - continue - - # For each method ... - for methodname, j1, j2 in self.iter_methods(i1 + 1): - code = "\n".join(self.lines[j1 : j2 + 1]) - # Get signature and cut it up in words - sig_words = code.partition("(")[2].split("):")[0] - for c in "][(),\"'": - sig_words = sig_words.replace(c, " ") - # Collect incoming structs from signature - method_structs = set() - for word in sig_words.split(): - if word.startswith("structs."): - structname = word.partition(".")[2] - method_structs.update(self._get_sub_structs(idl, structname)) - all_structs.update(method_structs) - # Collect structs being checked - checked = set() - for line in code.splitlines(): - line = line.lstrip() - if line.startswith("check_struct("): - name = line.split("(")[1].split(",")[0].strip('"') - checked.add(name) - # Test that a matching check is done - unchecked = method_structs.difference(checked) - unchecked = list(sorted(unchecked.difference(ignore_structs))) - if ( - methodname.endswith("_async") - and f"return self.{methodname[:-7]}" in code - ): - pass - elif unchecked: - msg = f"missing check_struct in {methodname}: {unchecked}" - self.insert_line(j1, f"# FIXME: {msg}") - print(f"ERROR: {msg}") - - # Test that we did find structs. In case our detection fails for - # some reason, this would probably catch that. - assert len(all_structs) > 10 - - def _get_sub_structs(self, idl, structname): - structnames = {structname} - for structfield in idl.structs[structname].values(): - structname2 = structfield.typename[3:] # remove "GPU" - if structname2 in idl.structs: - structnames.update(self._get_sub_structs(idl, structname2)) - return structnames diff --git a/codegen/apiwriter.py b/codegen/apiwriter.py deleted file mode 100644 index 488ca1c..0000000 --- a/codegen/apiwriter.py +++ /dev/null @@ -1,146 +0,0 @@ -""" -Writes the parts of the API that are simple: flags, enums, structs. -""" - -import re - -from codegen.utils import print, blacken, to_snake_case -from codegen.idlparser import get_idl_parser -from codegen.files import file_cache - - -ref_pattern = re.compile(r"\W((GPU|flags\.|enums\.|structs\.)\w+?)\W", re.MULTILINE) - - -def resolve_crossrefs(text): - # Similar code as in docs/conf.py - text += " " - i2 = 0 - while True: - m = ref_pattern.search(text, i2) - if not m: - break - i1, i2 = m.start(1), m.end(1) - prefix = m.group(2) - ref_indicator = ":obj:" if prefix.lower() == prefix else ":class:" - name = m.group(1) - if name.startswith("structs."): - link = name.split(".")[1] - else: - link = "wgpu." + name - insertion = f"{ref_indicator}`{name} <{link}>`" - text = text[:i1] + insertion + text[i2:] - i2 += len(insertion) - len(name) - return text.rstrip() - - -def write_flags(): - # Get preamble - pylines = [] - for line in file_cache.read("flags.py").splitlines(): - pylines.append(line) - if "AUTOGENERATED" in line: - pylines += ["", ""] - break - # Prepare - idl = get_idl_parser() - n = len(idl.flags) - # List'm - pylines.append(f"# There are {n} flags\n") - pylines.append("__all__ = [") - for name in idl.flags.keys(): - pylines.append(f' "{name}",') - pylines.append("]\n\n") - # The flags definitions - for name, d in idl.flags.items(): - # Object-docstring as a comment - for key, val in d.items(): - pylines.append(f'#: * "{key}" ({val})') - # Generate Code - pylines.append(f'{name} = Flags(\n "{name}",') - for key, val in d.items(): - pylines.append(f" {key}={val!r},") - pylines.append(")\n") - # Write - code = blacken("\n".join(pylines)) - file_cache.write("flags.py", code) - print(f"Wrote {n} flags to flags.py") - - -def write_enums(): - # Get preamble - pylines = [] - for line in file_cache.read("enums.py").splitlines(): - pylines.append(line) - if "AUTOGENERATED" in line: - pylines += ["", ""] - break - # Prepare - idl = get_idl_parser() - n = len(idl.enums) - # List'm - pylines.append(f"# There are {n} enums\n") - pylines.append("__all__ = [") - for name in idl.enums.keys(): - pylines.append(f' "{name}",') - pylines.append("]\n\n") - for name, d in idl.enums.items(): - # Object-docstring as a comment - for key, val in d.items(): - pylines.append(f'#: * "{key}"') - # Generate Code - pylines.append(f'{name} = Enum(\n "{name}",') - for key, val in d.items(): - pylines.append(f' {key}="{val}",') - pylines.append(")\n") - # Write - code = blacken("\n".join(pylines)) - file_cache.write("enums.py", code) - print(f"Wrote {n} enums to enums.py") - - -def write_structs(): - # Get preamble - pylines = [] - for line in file_cache.read("structs.py").splitlines(): - pylines.append(line) - if "AUTOGENERATED" in line: - pylines += ["", ""] - break - # Prepare - idl = get_idl_parser() - n = len(idl.structs) - ignore = ["ImageCopyTextureTagged"] - pylines.append(f"# There are {n} structs\n") - # List'm - pylines.append("__all__ = [") - for name in idl.structs.keys(): - if name not in ignore: - pylines.append(f' "{name}",') - pylines.append("]\n\n") - for name, d in idl.structs.items(): - if name in ignore: - continue - # Object-docstring as a comment - for field in d.values(): - tp = idl.resolve_type(field.typename).strip("'") - if field.default is not None: - pylines.append( - resolve_crossrefs(f"#: * {field.name} :: {tp} = {field.default}") - ) - else: - pylines.append(resolve_crossrefs(f"#: * {field.name} :: {tp}")) - # Generate Code - pylines.append(f'{name} = Struct(\n "{name}",') - for field in d.values(): - key = to_snake_case(field.name) - val = idl.resolve_type(field.typename) - if not val.startswith(("'", '"')): - val = f"'{val}'" - pylines.append(f" {key}={val},") - pylines.append(")\n") - - # Write - code = blacken("\n".join(pylines)) - file_cache.write("structs.py", code) - print(f"Wrote {n} structs to structs.py") diff --git a/codegen/files.py b/codegen/files.py deleted file mode 100644 index 0b7c4b6..0000000 --- a/codegen/files.py +++ /dev/null @@ -1,97 +0,0 @@ -""" -Simple utilities to handle files, including a mini virtual file system. -""" - -import os - - -lib_dir = os.path.abspath(os.path.join(__file__, "..", "..", "wgpu")) - - -def read_file(*fname): - """Read a file from disk using the relative filename. Line endings are normalized.""" - filename = os.path.join(lib_dir, *fname) - with open(filename, "rb") as f: - return f.read().decode().replace("\r\n", "\n").replace("\r", "\n") - - -class FileCache: - """An in-memory file cache, to allow performing the codegen - in-memory, providing checks on what is actually changed, enabling - dry runs for tests, and make it easier to write back files with the - correct line endings. - """ - - _filenames_to_change = [ - "_classes.py", - "flags.py", - "enums.py", - "structs.py", - "backends/wgpu_native/_api.py", - "backends/wgpu_native/_mappings.py", - "resources/codegen_report.md", - ] - - def __init__(self): - self._file_contents = {} - self._files_written = set() - - def reset(self): - """Reset the cache, populating the files with a copy from disk.""" - self._file_contents.clear() - for fname in self.filenames_to_change: - self.write(fname, read_file(fname)) - self._files_written.clear() - - @property - def filenames_to_change(self): - """The (relative) filenames that the codegen is allowed to change.""" - return tuple(self._filenames_to_change) - - @property - def filenames_written(self): - """The (relative) filenames that are actually written.""" - return set(self._files_written) - - def write(self, fname, text): - """Write to a (virtual) file. The text is a string with LF newlines.""" - assert fname in self.filenames_to_change - self._files_written.add(fname) - self._file_contents[fname] = text - - def read(self, fname): - """Read from a (virtual) file. Returns text with LF newlines.""" - assert fname in self.filenames_to_change - return self._file_contents[fname] - - def write_changed_files_to_disk(self): - """Write the virtual files to disk, using appropriate newlines.""" - # Get reference line ending chars - with open(os.path.join(lib_dir, "__init__.py"), "rb") as f: - text = f.read().decode() - line_endings = get_line_endings(text) - # Write files - for fname in self.filenames_to_change: - text = self.read(fname) - filename = os.path.join(lib_dir, fname) - with open(filename, "wb") as f: - f.write(text.replace("\n", line_endings).encode()) - - -file_cache = FileCache() - - -def get_line_endings(text): - """Detect whether the line endings in use is CR LF or CRLF.""" - # Count how many line ending chars there are - crlf_count = text.count("\r\n") - lf_count = text.count("\n") - crlf_count - cr_count = text.count("\r") - crlf_count - assert lf_count + cr_count + crlf_count >= 4 - # Check what's used the most, or whether it's a combination. - if lf_count > cr_count and lf_count > crlf_count: - return "\n" - elif cr_count > lf_count and cr_count > crlf_count: - return "\r" - else: - return "\r\n" diff --git a/codegen/hparser.py b/codegen/hparser.py deleted file mode 100644 index 740ba14..0000000 --- a/codegen/hparser.py +++ /dev/null @@ -1,231 +0,0 @@ -from cffi import FFI - -from codegen.utils import print, remove_c_comments -from codegen.files import read_file - - -_parser = None - - -def _get_wgpu_header(): - """Func written so we can use this in both wgpu_native/_ffi.py and codegen/hparser.py""" - # Read files - lines1 = [] - lines1.extend(read_file("resources", "webgpu.h").splitlines()) - lines1.extend(read_file("resources", "wgpu.h").splitlines()) - # Deal with pre-processor commands, because cffi cannot handle them. - # Just removing them, plus a few extra lines, seems to do the trick. - lines2 = [] - for line in lines1: - if line.startswith("#define ") and len(line.split()) > 2 and "0x" in line: - line = line.replace("(", "").replace(")", "") - elif line.startswith("#"): - continue - elif 'extern "C"' in line: - continue - for define_to_drop in [ - "WGPU_EXPORT ", - "WGPU_NULLABLE ", - " WGPU_OBJECT_ATTRIBUTE", - " WGPU_ENUM_ATTRIBUTE", - " WGPU_FUNCTION_ATTRIBUTE", - " WGPU_STRUCTURE_ATTRIBUTE", - ]: - line = line.replace(define_to_drop, "") - lines2.append(line) - return "\n".join(lines2) - - -def get_h_parser(*, allow_cache=True): - """Get the global HParser object.""" - - # Singleton pattern - global _parser - if _parser and allow_cache: - return _parser - - source = _get_wgpu_header() - - # Create parser - hp = HParser(source) - hp.parse() - _parser = hp - return hp - - -class HParser: - """Object to parse the wgpu.h header file, by letting cffi do the heavy lifting.""" - - def __init__(self, source): - self.source = source - - def parse(self, verbose=True): - self.flags = {} - self.enums = {} - self.structs = {} - self.functions = {} - - self._parse_from_h() - self._parse_from_cffi() - - if verbose: - print(f"The wgpu.h defines {len(self.functions)} functions") - keys = "flags", "enums", "structs" - stats = ", ".join(f"{len(getattr(self, key))} {key}" for key in keys) - print("The wgpu.h defines " + stats) - - def _parse_from_h(self): - code = self.source - - # Collect enums and flags. This is easy. - # Note that flags are first defined as enums and then redefined as flags later. - i1 = i2 = i3 = i4 = 0 - while True: - # Find enum - i1 = code.find("typedef enum", i4) - i2 = code.find("{", i1) - i3 = code.find("}", i2) - i4 = code.find(";", i3) - if i1 < 0: - break - # Decompose "typedef enum XX {...} XX;" - name1 = code[i1 + 13 : i2].strip() - name2 = code[i3 + 1 : i4].strip() - assert name1 == name2 - assert name1.startswith("WGPU") - name = name1[4:] - self.enums[name] = enum = {} - for f in code[i2 + 1 : i3].strip().strip(";").split(","): - f = remove_c_comments(f).strip() - if not f: - continue # happens when last item has a comma - key, _, val = f.partition("=") - # Handle key - key = key.strip() - assert key.startswith("WGPU") and "_" in key - key = key.split("_", 1)[1] - # Turn value into an int - val = val.strip() - if val.startswith("0x"): - enum[key] = int(val, 16) - elif "<<" in val: - val1, _, val2 = val.partition("<<") - enum[key] = int(val1) << int(val2) - elif "|" in val: # field is an OR of the earlier fields :/ - keys = [k.strip().split("_", 1)[1] for k in val.split("|")] - val = 0 - for k in keys: - val |= enum[k] - enum[key] = val - else: - enum[key] = int(val) - - # Turn some enums into flags - for line in code.splitlines(): - if line.startswith("typedef WGPUFlags "): - parts = line.strip().strip(";").split() - assert len(parts) == 3 - name = parts[-1] - if name.endswith("Flags"): - assert name.startswith("WGPU") - name1 = name[4:-1] # xxFlags -> xxFlag - name2 = name[4:-5] # xxFlags -> xx - name = name1 if name1 in self.enums else name2 - self.flags[name] = self.enums.pop(name) - - # Collect structs. This is relatively easy, since we only need the C code. - # But we dont deal with union structs. - i1 = i2 = i3 = i4 = 0 - while True: - # Find struct - i1 = code.find("typedef struct", i4) - i2 = code.find("{", i1) - i3 = code.find("}", i2) - i4 = code.find(";", i3) - if i1 < 0: - break - # Only do simple structs, not Unions - if 0 < code.find("{", i2 + 1) < i3: - continue - # Decompose - name = code[i3 + 1 : i4].strip() - self.structs[name] = struct = {} - for f in code[i2 + 1 : i3].strip().strip(";").split(";"): - f = remove_c_comments(f).strip() - if not f: - continue # probably last item ended with a comma - parts = f.strip().split() - typename = " ".join(parts[:-1]) - typename = typename.replace("const ", "") - key = parts[-1].strip("*") - struct[key] = typename - - # Collect functions. This is not too hard, since we only need the C code. - i1 = i2 = i3 = 0 - while True: - # Find function - i1 = code.find("wgpu", i3) - i2 = code.find("(", i1) - i3 = code.find(");", i2) - if i1 < 0: - break - # Extract name, and check whether we found something real - name = code[i1:i2] - if not (name and name.isidentifier()): - i3 = i1 + 5 - continue - # Decompose further - i1 = code.rfind("\n", 0, i1) - line = code[i1 : i3 + 2] - line = " ".join(line.split()) # effective way to put on one line - self.functions[name] = line - - def _parse_from_cffi(self): - self.ffi = ffi = FFI() - ffi.cdef(self.source) - - # Collect structs. We iterate over all types. Some will resolve - # to C types, the rest are structs. The types for the struct - # fields are reduced to the C primitives, making it less useful - # for annotations. We update the structs that we've found by - # parsing wgpu.h directly. - for names in ffi.list_types(): - for name in names: - # name = ffi.getctype(name) - no, keep original - if name.startswith("WGPU") and not name.endswith("Impl"): - t = ffi.typeof(name) - if not hasattr(t, "fields"): - continue # probably an enum - elif not t.fields: - continue # base struct / alias - s = ffi.new(f"{name} *") - # Construct struct - struct = {} - for key, field in t.fields: - typename = field.type.cname - # typename = ffi.getctype(typename) - if typename.startswith("WGPU"): - val = typename # Enum or struct - else: - val = type(getattr(s, key)).__name__ - struct[key] = val - # Update - if name not in self.structs: - self.structs[name] = struct - else: - ori_struct = self.structs[name] - assert set(struct) == set(ori_struct) - for key, val in struct.items(): - if ori_struct[key] != val: - if val.startswith("_"): # _CDataBase - pass - elif ori_struct[key].startswith("WGPU"): - if "/" not in ori_struct[key]: - ori_struct[key] += "/" + val - else: - ori_struct[key] = val - # Make copies - alt_name = name - while alt_name != ffi.getctype(alt_name): - alt_name = ffi.getctype(alt_name) - self.structs[alt_name] = self.structs[name] diff --git a/codegen/idlparser.py b/codegen/idlparser.py deleted file mode 100644 index 5063c91..0000000 --- a/codegen/idlparser.py +++ /dev/null @@ -1,432 +0,0 @@ -""" -The logic to parse the IDL file, from this we generate the base API. - -This module may need tweaks as the used IDL syntax/constructs changes. - -It would be good to occasionally check the coverage of this module to -identify and remove code paths that are no longer used. -""" - -from codegen.utils import print -from codegen.files import read_file - - -_parser = None - - -def get_idl_parser(*, allow_cache=True): - """Get the global IdlParser object.""" - - # Singleton pattern - global _parser - if _parser and allow_cache: - return _parser - - # Get source - source = read_file("resources", "webgpu.idl") - - # Create parser - idl = IdlParser(source) - idl.parse() - _parser = idl - return idl - - -class StructField: - """A little object to specify the field of a struct.""" - - def __init__(self, line, name, typename, default=None): - self.line = line - self.name = name - self.typename = typename - self.default = default - - def __repr__(self): - return f"" - - def to_str(self): - return self.line - - -class Interface: - """A class definition, or flags.""" - - def __init__(self, name, bases): - self.bases = bases - self.constants = {} - self.attributes = {} # name -> line - self.functions = {} - - -class IdlParser: - """An object that can be used to walk over a str in an easy way. - - This parser has the following attributes: - - * flags: a dict mapping the (neutral) flag name to a dict of field-value pairs. - * enums: a dict mapping the (Pythonic) enum name to a dict of field-value pairs. - * structs: a dict mapping the (Pythonic) struct name to a dict of StructField - objects. - * functions: a dict mapping the (normalized) func name to the line defining the - function. - - """ - - def __init__(self, source): - self.source = self._pre_process(source) - self._length = len(self.source) - self._pos = 0 - - def _reset(self): - self._pos = 0 - - def end_reached(self): - return self._pos >= self._length - - def read_until(self, char): - start = self._pos - while self._pos < self._length: - c = self.source[self._pos] - self._pos += 1 - if c == char: - return self.source[start : self._pos] - return "" - - def read_line(self): - return self.read_until("\n") - - def peek_line(self): - char = "\n" - start = pos = self._pos - while pos < self._length: - c = self.source[pos] - pos += 1 - if c == char: - return self.source[start:pos] - return "" - - def parse(self, verbose=True): - self._interfaces = {} - self.classes = {} - self.structs = {} - self.flags = {} - self.enums = {} - - self.typedefs = {} - - self._reset() - self._parse() - self._post_process() - - if verbose: - f_count = sum(len(cls.functions) for cls in self.classes.values()) - print( - f"The webgpu.idl defines {len(self.classes)} classes with {f_count} functions" - ) - keys = "flags", "enums", "structs" - stats = ", ".join(f"{len(getattr(self, key))} {key}" for key in keys) - print("The webgpu.idl defines " + stats) - - def _pre_process(self, text): - """Pre-process the text to make it a bit easier to parse. - Beware to keep line numbers the same - """ - text = text.replace("\n[\n", "\n\n[").replace("\n]\n", "]\n\n") - text = text.replace("[ ", "[") - text = self._remove_comments(text) - return text - - def _remove_comments(self, text): - lines = [] - in_multiline_comment = False - for line in text.splitlines(): - if in_multiline_comment: - if "*/" in line: - _, _, line = line.partition("//") - if "//" in line: - line, _, _ = line.partition("//") - lines.append(line if line.strip() else "") - in_multiline_comment = False - else: - lines.append("") - else: - if "//" in line: - line, _, _ = line.partition("//") - lines.append(line if line.strip() else "") - elif "/*" in line: - line, _, _ = line.partition("/*") - lines.append(line if line.strip() else "") - in_multiline_comment = True - else: - lines.append(line) - return "\n".join(lines) - - def resolve_type(self, typename): - """Resolve a type to a suitable name that is also valid so that flake8 - wont complain when this is used as a type annotation. - """ - - name = typename.strip().strip("?") - - # We want the flag, not the type that is an alias for int - name = name[:-5] if name.endswith("Flags") else name - - # First resolve using typedefs that we found in the IDL - while name in self.typedefs: - new_name = self.typedefs[name] - if new_name == name: - break - name = new_name - - # Resolve to a Python type (maybe) - pythonmap = { - "DOMString": "str", - "DOMString?": "str", - "USVString": "str", - "long": "int", - "unsigned long": "int", - "unsigned long long": "int", - "[Clamp] unsigned short": "int", - "unsigned short": "int", - "GPUIntegerCoordinate": "int", - "GPUSampleMask": "int", - "GPUFenceValue": "int", - "GPUSize64": "int", - "GPUSize32": "int", - "GPUIndex32": "int", - "double": "float", - "boolean": "bool", - "object": "dict", - "ImageBitmap": "memoryview", - "ImageData": "memoryview", - "VideoFrame": "memoryview", - "GPUPipelineConstantValue": "float", - "GPUExternalTexture": "object", - } - name = pythonmap.get(name, name) - - # Is this a case for which we need to recurse? - if name.startswith("sequence<") and name.endswith(">"): - name = name.split("<")[-1].rstrip(">") - name = self.resolve_type(name).strip("'") - return f"'List[{name}]'" - elif name.startswith("record<") and name.endswith(">"): - name = name.split("<")[-1].rstrip(">") - names = [self.resolve_type(t).strip("'") for t in name.split(",")] - return f"'Dict[{', '.join(names)}]'" - elif " or " in name: - name = name.strip("()") - names = [self.resolve_type(t).strip("'") for t in name.split(" or ")] - names = sorted(set(names)) - return f"'Union[{', '.join(names)}]'" - - # Triage - if name in __builtins__: - return name # ok - elif name in self.classes: - return f"'{name}'" # ok, but wrap in string because can be declared later - elif name.startswith("HTML"): - return "object" # anything, we ignore this stuff anyway - elif name in ["OffscreenCanvas"]: - return "object" - elif name in ["PredefinedColorSpace"]: - return "str" - else: - assert name.startswith("GPU") - name = name[3:] - name = name[:-4] if name.endswith("Dict") else name - if name in self.flags: - return f"'flags.{name}'" - elif name in self.enums: - return f"'enums.{name}'" - elif name in self.structs: - return f"'structs.{name}'" - else: - # When this happens, update the code above or the pythonmap - raise RuntimeError("Encountered unknown IDL type: ", name) - - def _parse(self): - while not self.end_reached(): - line = self.read_line() - - if not line.strip(): - pass - elif line.startswith("typedef "): - # Get the important bit - value = line.split(" ", 1)[-1] - if value.startswith("["): - value = value.split("]")[-1] - # Parse - if value.startswith("("): # Union type - while ")" not in value: - value = value.rstrip() + " " + self.read_line().lstrip() - assert value.count("(") == 1 and value.count(")") == 1 - value = value.split("(")[1] - val, _, key = value.partition(")") - else: # Singleton type - val, _, key = value.rpartition(" ") - key = key.strip().strip(";").strip() - self.typedefs[key] = val.strip() - elif line.startswith(("namespace ", "interface ", "partial interface ")): - # A class or a set of flags - # Collect lines that define this interface - while "{" not in line: - line = line.rstrip() + " " + self.read_line().lstrip() - lines = [line] - while not line.startswith("};"): - line = self.read_line() - lines.append(line) - classname_raw, _, base_raw = lines[0].split("{")[0].partition(":") - classname = classname_raw.split()[-1] - # Collect base classes - based_on = list(base_raw.split()) - while self.peek_line().startswith(classname + " includes "): - line = self.read_line() - based_on.append(line.split()[-1].rstrip(";")) - # Create / get interface object - if classname not in self._interfaces: - self._interfaces[classname] = Interface(classname, based_on) - interface = self._interfaces[classname] - # Parse members - line_index = 0 - while line_index < len(lines) - 1: - line_index += 1 - line = lines[line_index].strip() - if not line: - continue - elif line.startswith("[Exposed="): - continue # WTF? - elif line.startswith("const "): - parts = line.strip(";").split() - assert len(parts) == 5 - assert parts[-2] == "=" - name = parts[2] - val = int(parts[-1], 16) - interface.constants[name] = val - elif "attribute " in line: - name = line.partition("attribute")[2].split()[-1].strip(";") - interface.attributes[name] = line - elif "(" in line: - line = lines[line_index] - while line.count("(") > line.count(")"): - line_index += 1 - line += lines[line_index] - assert line.count("(") == line.count(")") - line = line.strip() - line.replace("\n", " ") - for c in (" ", " ", " "): - line = line.replace(c, " ") - assert line.endswith(";") - funcname = line.split("(")[0].split()[-1] - line = ( - line.replace("\n", " ") - .replace(" ", " ") - .replace(" ", " ") - ) - interface.functions[funcname] = line - elif " includes " in line: - parts = line.strip(";").split() - assert len(parts) == 3 and parts[1] == "includes" - classname, _, base = parts - if classname not in self._interfaces: - self._interfaces[classname] = Interface(classname, []) - self._interfaces[classname].bases.append(parts[2]) - elif line.startswith("enum "): - line += self.read_until("}") + self.read_line() - lines = line.strip().split("\n") - name = lines[0].split(" ", 1)[1].strip("{ \t\r\n") - d = {} - for i, line in enumerate(lines[1:-1]): - line = line.strip() - if not line or line.startswith("//"): - continue - key = val = line.strip('", \t') - for i1, i2 in [ - ("-", "_"), - ("1d", "d1"), - ("2d", "d2"), - ("3d", "d3"), - ]: - key = key.replace(i1, i2) - d[key] = val - self.enums[name] = d - elif line.startswith("dictionary "): - while "{" not in line: - line = line.rstrip() + self.read_line() - assert line.count("{") == 1 and line.count("}") == 0 - lines = [line] - while not line.startswith("};"): - line = self.read_line() - lines.append(line) - name = lines[0].split(" ", 1)[1].strip("{ \t\r\n") - if ":" in name: - name, _, base = name.partition(":") - name, base = name.strip(), base.strip() - if base not in self.structs: - # print(f"dict {name} has unknown base dict {base}") - d = {} - else: - d = self.structs[base].copy() - else: - d = {} - for line in lines[1:-1]: - line = line.split("//")[0].strip() - if not line: - continue - assert line.endswith(";") - arg = line.strip().strip(",;").strip() - default = None - if "=" in arg: - arg, default = arg.rsplit("=", 1) - arg, default = arg.strip(), default.strip() - arg_type, arg_name = arg.strip().rsplit(" ", 1) - if arg_type.startswith("required "): - arg_type = arg_type[9:] - # required args should not have a default - assert default is None - else: - default = default or "None" - d[arg_name] = StructField(line, arg_name, arg_type, default) - self.structs[name] = d - elif line.startswith(("[Exposed=", "[Serializable]")): - pass - else: - raise RuntimeError("Unknown line:", line.rstrip()) - - def _post_process(self): - """We don't do any name format normalization in the parser code itself; - we do that here. - """ - - # Drop some toplevel names - for name in [ - "NavigatorGPU", - "GPUSupportedLimits", - "GPUSupportedFeatures", - "WGSLLanguageFeatures", - "GPUUncapturedErrorEvent", - "GPUExternalTexture", - ]: - self._interfaces.pop(name, None) - - # Divide flags and actual class definitions - for name, interface in self._interfaces.items(): - if interface.constants: - self.flags[name] = interface.constants - elif name not in ("Navigator", "WorkerNavigator"): - delattr(interface, "constants") - self.classes[name] = interface - - # Remove GPU prefix - for d in (self.structs, self.flags, self.enums): - for name in list(d.keys()): - assert name.startswith("GPU") - new_name = name[3:] - if new_name.endswith("Dict"): - new_name = new_name[:-4] - d[new_name] = d.pop(name) - - # Remove (abstract) base structs - for name in list(self.structs): - if name.endswith("Base"): - self.structs.pop(name) diff --git a/codegen/tests/test_codegen_apipatcher.py b/codegen/tests/test_codegen_apipatcher.py deleted file mode 100644 index 6ef5bb1..0000000 --- a/codegen/tests/test_codegen_apipatcher.py +++ /dev/null @@ -1,118 +0,0 @@ -""" Test some parts of apipatcher.py, and Implicitly tests idlparser.py. -""" - -from codegen.utils import blacken -from codegen.apipatcher import CommentRemover, AbstractCommentInjector - - -def dedent(code): - return code.replace("\n ", "\n") - - -def test_comment_remover(): - code = """ - # - # a comment - # IDL: some idl spec - # FIXME: unknown api method - # FIXME: unknown api property - # FIXME: unknown api class - # FIXME: new method - only user should remove - # FIXME: was changed - only user should remove - """ - - p = CommentRemover() - p.apply(dedent(code)) - code = p.dumps() - - assert code.count("#") == 4 - - assert "IDL" not in code # IDL is auto-added by the codegen - assert "unknown" not in code # these are also auto-added - - assert "new" in code # user should remove these - assert "was changed" in code # user should remove these - - -class MyCommentInjector(AbstractCommentInjector): - def class_is_known(self, classname): - return True - - def prop_is_known(self, classname, propname): - return True - - def method_is_known(self, classname, methodname): - return True - - def get_class_comment(self, classname): - return "# this is a class" - - def get_prop_comment(self, classname, propname): - return "# this is a property" - - def get_method_comment(self, classname, methodname): - return "# this is a method" - - -def test_comment_injector(): - code1 = """ - class X: - 'x' - - def foo(self): - pass - - @whatever - def bar(self): - pass - - @property - def spam(self): - pass - - @property - # valid Python, but we want comments above decorators - def eggs(self): - pass - """ - - code3 = """ - # this is a class - class X: - 'x' - - # this is a method - def foo(self): - pass - - # this is a method - @whatever - def bar(self): - pass - - # this is a property - @property - def spam(self): - pass - - # valid Python, but we want comments above decorators - # this is a property - @property - def eggs(self): - pass - """ - code3 = blacken(dedent(code3)).strip() - - p = MyCommentInjector() - p.apply(dedent(code1)) - code2 = p.dumps().strip() - - assert code2 == code3 - - -if __name__ == "__main__": - for func in list(globals().values()): - if callable(func) and func.__name__.startswith("test_"): - print(f"Running {func.__name__} ...") - func() - print("Done") diff --git a/codegen/tests/test_codegen_rspatcher.py b/codegen/tests/test_codegen_rspatcher.py deleted file mode 100644 index 6b6e80f..0000000 --- a/codegen/tests/test_codegen_rspatcher.py +++ /dev/null @@ -1,94 +0,0 @@ -""" Test some parts of rsbackend.py, and implicitly tests hparser.py. -""" - -from codegen.wgpu_native_patcher import patch_wgpu_native_backend - - -def dedent(code): - return code.replace("\n ", "\n") - - -def test_patch_functions(): - code1 = """ - libf.wgpuAdapterRequestDevice(1, 2, 3) - libf.wgpuFooBar(1, 2, 3) - """ - - code2 = patch_wgpu_native_backend(dedent(code1)) - - # All original lines are there - assert all(line[4:] in code2 for line in code1 if line.strip()) - - # But also an annotation - assert "WGPUAdapter adapter, WGPUDeviceDescriptor" in code2 - # And a notification that foo_bar is unknown - assert code2.count("# FIXME:") == 1 - assert code2.count("FooBar") == 2 - - -def test_patch_structs(): - # Check simple struct - code1 = """ - struct = new_struct_p( - "WGPUBufferDescriptor *", - label=c_label, - size=size, - usage=usage, - ) - """ - code2 = patch_wgpu_native_backend(dedent(code1)) - assert all(line[4:] in code2 for line in code1 if line.strip()) - assert "usage: WGPUBufferUsageFlags/int" in code2 - assert "size: int" in code2 - assert "# FIXME:" not in code2 - assert code2 == patch_wgpu_native_backend(code2) # Don't stack comments - - # Check, but now using not-pointer - code1 = """ - struct = new_struct( - "WGPUBufferDescriptor", - label=c_label, - size=size, - usage=usage, - ) - """ - code2 = patch_wgpu_native_backend(dedent(code1)) - assert all(line[4:] in code2 for line in code1 if line.strip()) - assert "usage: WGPUBufferUsageFlags/int" in code2 - assert "size: int" in code2 - assert "# FIXME:" not in code2 - - # Fail - code1 = 'struct = new_struct("WGPUBufferDescriptor *",label=c_label,size=size,usage=usage,)' - code2 = patch_wgpu_native_backend(dedent(code1)) - assert "# FIXME:" in code2 - assert code2 == patch_wgpu_native_backend(code2) # Don't stack comments - - # Fail - code1 = 'struct = new_struct_p("WGPUBufferDescriptor",label=c_label,size=size,usage=usage,)' - code2 = patch_wgpu_native_backend(dedent(code1)) - assert "# FIXME:" in code2 - assert code2 == patch_wgpu_native_backend(code2) # Don't stack comments - - # Missing values - code1 = 'struct = new_struct_p("WGPUBufferDescriptor *",label=c_label,size=size,)' - code2 = patch_wgpu_native_backend(dedent(code1)) - assert "usage: WGPUBufferUsageFlags/int" in code2 - assert "# FIXME:" not in code2 - assert "usage" in code2 # comment added - assert code2 == patch_wgpu_native_backend(code2) # Don't stack comments - - # Too many values - code1 = 'struct = new_struct_p("WGPUBufferDescriptor *",label=c_label,foo=size,)' - code2 = patch_wgpu_native_backend(dedent(code1)) - assert "usage: WGPUBufferUsageFlags/int" in code2 - assert "# FIXME: unknown" in code2 - assert code2 == patch_wgpu_native_backend(code2) # Don't stack comments - - -if __name__ == "__main__": - for func in list(globals().values()): - if callable(func) and func.__name__.startswith("test_"): - print(f"Running {func.__name__} ...") - func() - print("Done") diff --git a/codegen/tests/test_codegen_utils.py b/codegen/tests/test_codegen_utils.py deleted file mode 100644 index f92741d..0000000 --- a/codegen/tests/test_codegen_utils.py +++ /dev/null @@ -1,256 +0,0 @@ -""" -Strive for full coverage of the codegen utils module. -""" - -from codegen.utils import ( - remove_c_comments, - blacken, - Patcher, - to_snake_case, - to_camel_case, -) - -from pytest import raises - - -def dedent(code): - return code.replace("\n ", "\n") - - -def test_to_snake_case(): - assert to_snake_case("foo_bar_spam") == "foo_bar_spam" - assert to_snake_case("_foo_bar_spam") == "_foo_bar_spam" - assert to_snake_case("fooBarSpam") == "foo_bar_spam" - assert to_snake_case("_fooBarSpam") == "_foo_bar_spam" - assert to_snake_case("maxTextureDimension1D") == "max_texture_dimension1d" - - -def test_to_camel_case(): - assert to_camel_case("foo_bar_spam") == "fooBarSpam" - assert to_camel_case("_foo_bar_spam") == "_fooBarSpam" - assert to_camel_case("fooBarSpam") == "fooBarSpam" - assert to_camel_case("_fooBarSpam") == "_fooBarSpam" - assert to_camel_case("max_texture_dimension1d") == "maxTextureDimension1D" - - -def test_remove_c_comments(): - code1 = """ - x1 hello// comment - // comment - x2 hello/* comment */ - x3/* comment */ hello - x4 /* comment - comment - */hello - """ - - code3 = """ - x1 hello - - x2 hello - x3 hello - x4 hello - """ - - code1, code3 = dedent(code1), dedent(code3) - - code2 = remove_c_comments(code1) - - assert code2 == code3 - - -def test_blacken_singleline(): - code1 = """ - def foo(): - pass - def foo( - ): - pass - def foo( - a1, a2, a3 - ): - pass - def foo( - a1, a2, a3, - ): - pass - def foo( - a1, - a2, - a3, - ): - pass - """ - - code2 = """ - def foo(): - pass - def foo(): - pass - def foo(a1, a2, a3): - pass - def foo(a1, a2, a3): - pass - def foo(a1, a2, a3): - pass - """ - - code1 = dedent(code1).strip() - code2 = dedent(code2).strip() - - code3 = blacken(code1, True) - code3 = code3.replace("\n\n", "\n").replace("\n\n", "\n").strip() - - assert code3 == code2 - - # Also test simply long lines - code = "foo = 1" + " + 1" * 100 - assert len(code) > 300 - assert code.count("\n") == 0 - assert blacken(code, False).strip().count("\n") > 3 - assert blacken(code, True).strip().count("\n") == 0 - - -def test_blacken_comments(): - code1 = """ - def foo(): # hi - pass - def foo( - a1, # hi - a2, # ha - a3, - ): # ho - pass - """ - - code2 = """ - def foo(): # hi - pass - def foo(a1, a2, a3): # hi ha ho - pass - """ - - code1 = dedent(code1).strip() - code2 = dedent(code2).strip() - - code3 = blacken(code1, True) - code3 = code3.replace("\n\n", "\n").replace("\n\n", "\n").strip() - - assert code3 == code2 - - -def test_patcher(): - code = """ - class Foo1: - def bar1(self): - pass - def bar2(self): - pass - @property - def bar3(self): - pass - - class Foo2: - def bar1(self): - pass - @property - def bar2(self): - pass - def bar3(self): - pass - """ - - code = blacken(dedent(code)) - p = Patcher(code) - - # Dump before doing anything, should yield original - assert p.dumps() == code - - # Check iter_lines - lines = [] - for line, i in p.iter_lines(): - assert isinstance(line, str) - assert isinstance(i, int) - lines.append(line) - assert "\n".join(lines).strip() == code.strip() - - # Check iter_properties - names = [] - for classname, i1, i2 in p.iter_classes(): - for funcname, j1, j2 in p.iter_properties(i1 + 1): - names.append(classname + "." + funcname) - assert names == ["Foo1.bar3", "Foo2.bar2"] - - # Check iter_methods - names = [] - for classname, i1, i2 in p.iter_classes(): - for funcname, j1, j2 in p.iter_methods(i1 + 1): - names.append(classname + "." + funcname) - assert names == ["Foo1.bar1", "Foo1.bar2", "Foo2.bar1", "Foo2.bar3"] - - # Check insert_line (can insert into same line multiple times - p = Patcher(code) - for classname, i1, i2 in p.iter_classes(): - p.insert_line(i1, "# a class") - p.insert_line(i1, "# a class") - code2 = p.dumps() - assert code2.count("# a class") == 4 - - # Check replace_line (can only replace one time per line) - p = Patcher(code2) - for line, i in p.iter_lines(): - if line.lstrip().startswith("#"): - p.replace_line(i, "# comment") - with raises(Exception): - p.replace_line(i, "# comment") - code2 = p.dumps() - assert code2.count("#") == 4 - assert code2.count("# comment") == 4 - - # Remove comments - p = Patcher(code2) - for line, i in p.iter_lines(): - if line.lstrip().startswith("#"): - p.remove_line(i) - code2 = p.dumps() - assert code2.count("#") == 0 - - # We should be back to where we started - assert code2 == code - - -def test_patcher2(): - code = """ - class Foo1: - def bar1(self): - pass - @property - def bar2(self): - pass - """ - - p = Patcher(dedent(code)) - - # Check property line indices - for classname, i1, i2 in p.iter_classes(): - for funcname, j1, j2 in p.iter_properties(i1 + 1): - line = p.lines[j1].lstrip() - assert line.startswith("def") - assert funcname in line - assert "pass" in p.lines[j2] - - # Check method line indices - for classname, i1, i2 in p.iter_classes(): - for funcname, j1, j2 in p.iter_methods(i1 + 1): - line = p.lines[j1].lstrip() - assert line.startswith("def") - assert funcname in line - assert "pass" in p.lines[j2] - - -if __name__ == "__main__": - for func in list(globals().values()): - if callable(func) and func.__name__.startswith("test_"): - print(f"Running {func.__name__} ...") - func() - print("Done") diff --git a/codegen/tests/test_codegen_z.py b/codegen/tests/test_codegen_z.py deleted file mode 100644 index c5b381c..0000000 --- a/codegen/tests/test_codegen_z.py +++ /dev/null @@ -1,83 +0,0 @@ -""" -Applying the codegen should not introduce changes. -""" - -import os -import time - -from codegen import main -from codegen.files import file_cache, get_line_endings, lib_dir - - -def test_line_endings(): - # All LF, but also ok if fraction is CR - assert get_line_endings("foo\nbar\nspam\neggs\n") == "\n" - assert get_line_endings("foo\nbar\nspam\reggs\n") == "\n" - - # All CR, but also ok if fraction is LF. - # I know that CR line endings are an archaic Mac thing, but some dev - # might just have his git setup in a weird way :) - assert get_line_endings("foo\rbar\rspam\reggs\r") == "\r" - assert get_line_endings("foo\rbar\rspam\neggs\r") == "\r" - - # If most are equal, - assert get_line_endings("foo\r\nbar\r\nspam\r\neggs\r\n") == "\r\n" - assert get_line_endings("foo\r\nbar\r\nspam\neggs\r\n") == "\r\n" - assert get_line_endings("foo\r\nbar\r\nspam\reggs\r\n") == "\r\n" - - -def test_that_code_is_up_to_date(): - """Test that running the codegen updates what we expect, but does not introduce changes.""" - - # Obtain mtime of all file that can change - mtimes = {} - for fname in file_cache.filenames_to_change: - filename = os.path.join(lib_dir, fname) - mtimes[filename] = os.path.getmtime(filename) - - time.sleep(0.2) - - # Reset the file cache - file_cache.reset() - assert file_cache.filenames_written == set() - - # Collect original version of the files - originals = {} - for fname in file_cache.filenames_to_change: - originals[fname] = file_cache.read(fname) - - # Perform the codegen - main() - - # Confirm that all files that are allowed to change, are actually written - assert file_cache.filenames_written == set(file_cache.filenames_to_change) - - # Double-check that mtimes have not changed - i.e. the real file system is not touched - all(t == os.path.getmtime(filename) for filename, t in mtimes.items()) - - # Check that the files have not actually changed. This is to ensure that: - # * an update to wgpu-native is actually completed. - # * the autogenerated code is not manually changed. - # * The codegen report is correct. - for fname in file_cache.filenames_to_change: - content1 = originals[fname] - content2 = file_cache.read(fname) - assert content1 == content2 - - print("Codegen check ok!") - - -def test_that_codegen_report_has_no_errors(): - filename = os.path.join(lib_dir, "resources", "codegen_report.md") - with open(filename, "rb") as f: - text = f.read().decode() - - # The codegen uses a prefix "ERROR:" for unacceptable things. - # All caps, some function names may contain the name "error". - assert "ERROR" not in text - - -if __name__ == "__main__": - test_line_endings() - test_that_code_is_up_to_date() - test_that_codegen_report_has_no_errors() diff --git a/codegen/utils.py b/codegen/utils.py deleted file mode 100644 index e4f68c8..0000000 --- a/codegen/utils.py +++ /dev/null @@ -1,312 +0,0 @@ -""" -Codegen utils. -""" - -import os -import sys -import tempfile - -import black - - -def to_snake_case(name): - """Convert a name from camelCase to snake_case. Names that already are - snake_case remain the same. - """ - name2 = "" - for c in name: - c2 = c.lower() - if c2 != c and len(name2) > 0 and name2[-1] not in "_123": - name2 += "_" - name2 += c2 - return name2 - - -def to_camel_case(name): - """Convert a name from snake_case to camelCase. Names that already are - camelCase remain the same. - """ - is_capital = False - name2 = "" - for c in name: - if c == "_" and name2: - is_capital = True - elif is_capital: - name2 += c.upper() - is_capital = False - else: - name2 += c - if name2.endswith(("1d", "2d", "3d")): - name2 = name2[:-1] + "D" - return name2 - - -_file_objects_to_print_to = [sys.stdout] - - -def print(*args, **kwargs): - """Report something (will be printed and added to a file.""" - # __builtins__.print(*args, **kwargs) - if args and not args[0].lstrip().startswith("#"): - args = ("*",) + args - for f in _file_objects_to_print_to: - __builtins__["print"](*args, file=f, flush=True, **kwargs) - - -class PrintToFile: - """Context manager to print to file.""" - - def __init__(self, f): - assert hasattr(f, "write") - self.f = f - - def __enter__(self): - _file_objects_to_print_to.append(self.f) - - def __exit__(self, type, value, tb): - while self.f in _file_objects_to_print_to: - _file_objects_to_print_to.remove(self.f) - self.f.close() - - -def remove_c_comments(code): - """Remove C-style comments from the given code.""" - pos = 0 - new_code = "" - - while True: - # Find start of comment - lookfor = None - i1 = code.find("//", pos) - i2 = code.find("/*", pos) - if i1 >= 0: - lookfor = "\n" - comment_start = i1 - if i2 >= 0: - if not (i1 >= 0 and i1 < i2): - lookfor = "*/" - comment_start = i2 - # Found a start? - if not lookfor: - new_code += code[pos:] - break - else: - new_code += code[pos:comment_start] - # Find the end - comment_end = code.find(lookfor, comment_start + 2) - if comment_end < 0: - break - if lookfor == "\n": - pos = comment_end - else: - pos = comment_end + len(lookfor) - return new_code - - -def blacken(src, singleline=False): - """Format the given src string using black. If singleline is True, - all function signatures become single-line, so they can be parsed - and updated. - """ - # Normal black - mode = black.FileMode(line_length=999 if singleline else 88) - result = black.format_str(src, mode=mode) - - # Make defs single-line. You'd think that setting the line length - # to a very high number would do the trick, but it does not. - if singleline: - lines1 = result.splitlines() - lines2 = [] - in_sig = False - comment = "" - for line in lines1: - if in_sig: - # Handle comment - line, _, c = line.partition("#") - line = line.rstrip() - c = c.strip() - if c: - comment += " " + c.strip() - # Detect end - if line.endswith("):"): - in_sig = False - # Compose line - current_line = lines2[-1] - if not current_line.endswith("("): - current_line += " " - current_line += line.lstrip() - # Finalize - if not in_sig: - # Remove trailing spaces and commas - current_line = current_line.replace(" ):", "):") - current_line = current_line.replace(",):", "):") - # Add comment - if comment: - current_line += " #" + comment - comment = "" - lines2[-1] = current_line - else: - lines2.append(line) - line_nc = line.split("#")[0].strip() - if ( - line_nc.startswith(("def ", "async def", "class ")) - and "(" in line_nc - ): - if not line_nc.endswith("):"): - in_sig = True - lines2.append("") - result = "\n".join(lines2) - - return result - - -class Patcher: - """Class to help patch a Python module. Supports iterating (over - lines, classes, properties, methods), and applying diffs (replace, - remove, insert). - """ - - def __init__(self, code=None): - self._init(code) - - def _init(self, code): - """Subclasses can call this to reset the patcher.""" - self.lines = [] - self._diffs = {} - self._classes = {} - if code: - self.lines = blacken(code, True).splitlines() # inf line length - - def remove_line(self, i): - """Remove the line at the given position. There must not have been - an action on line i. - """ - assert i not in self._diffs, f"Line {i} already has a diff" - self._diffs[i] = i, "remove" - - def insert_line(self, i, line): - """Insert a new line at the given position. It's ok if there - has already been an insertion an line i, but there must not have been - any other actions. - """ - if i in self._diffs and self._diffs[i][1] == "insert": - cur_line = self._diffs[i][2] - self._diffs[i] = i, "insert", cur_line + "\n" + line - else: - assert i not in self._diffs, f"Line {i} already has a diff" - self._diffs[i] = i, "insert", line - - def replace_line(self, i, line): - """Replace the line at the given position with another line. - There must not have been an action on line i. - """ - assert i not in self._diffs, f"Line {i} already has a diff" - self._diffs[i] = i, "replace", line - - def dumps(self, format=True): - """Return the patched result as a string.""" - lines = self.lines.copy() - # Apply diff - diffs = sorted(self._diffs.values()) - for diff in reversed(diffs): - if diff[1] == "remove": - lines.pop(diff[0]) - elif diff[1] == "insert": - lines.insert(diff[0], diff[2]) - elif diff[1] == "replace": - lines[diff[0]] = diff[2] - else: # pragma: no cover - raise ValueError(f"Unknown diff: {diff}") - # Format - text = "\n".join(lines) - if format: - try: - text = blacken(text) - except black.InvalidInput as err: # pragma: no cover - # If you get this error, it really helps to load the code - # in an IDE to see where the error is. Let's help with that ... - filename = os.path.join(tempfile.gettempdir(), "wgpu_patcher_fail.py") - with open(filename, "wb") as f: - f.write(text.encode()) - err = str(err) - err = err if len(err) < 78 else err[:77] + "…" - raise RuntimeError( - f"It appears that the patcher has generated invalid Python:" - f"\n\n {err}\n\n" - f'Wrote the generated (but unblackened) code to:\n\n "{filename}"' - ) - - return text - - def iter_lines(self, start_line=0): - """Generator to iterate over the lines. - Each iteration yields (line, linenr) - """ - for i in range(start_line, len(self.lines)): - line = self.lines[i] - yield line, i - - def iter_classes(self, start_line=0): - """Generator to iterate over the classes. - Each iteration yields (classname, linenr_start, linenr_end), - where linenr_end is the last line of code. - """ - current_class = None - for i in range(start_line, len(self.lines)): - line = self.lines[i] - sline = line.rstrip() - if current_class and sline: - if sline.startswith(" "): - current_class[2] = i - else: # code has less indentation -> something new - yield current_class - current_class = None - if line.startswith("class "): - name = line.split(":")[0].split("(")[0].split()[-1] - current_class = [name, i, i] - if current_class: - yield current_class - - def iter_properties(self, start_line=0): - """Generator to iterate over the properties. - Each iteration yields (classname, linenr_first, linenr_last), - where linenr_first is the line that startswith `def`, - and linenr_last is the last line of code. - """ - return self._iter_props_and_methods(start_line, True) - - def iter_methods(self, start_line=0): - """Generator to iterate over the methods. - Each iteration yields (classname, linenr_first, linenr_last) - where linenr_first is the line that startswith `def`, - and linenr_last is the last line of code. - """ - return self._iter_props_and_methods(start_line, False) - - def _iter_props_and_methods(self, start_line, find_props): - prop_mark = None - current_def = None - for i in range(start_line, len(self.lines)): - line = self.lines[i] - sline = line.rstrip() - if current_def and sline: - if sline.startswith(" "): - current_def[2] = i - else: - yield current_def - current_def = None - if sline and not sline.startswith(" "): - break # exit class - if line.startswith((" def ", " async def ")): - name = line.split("(")[0].split()[-1] - if prop_mark and find_props: - current_def = [name, i, i] - elif not prop_mark and not find_props: - current_def = [name, i, i] - if line.startswith(" @property"): - prop_mark = i - elif sline and not sline.lstrip().startswith("#"): - prop_mark = None - - if current_def: - yield current_def diff --git a/codegen/wgpu_native_patcher.py b/codegen/wgpu_native_patcher.py deleted file mode 100644 index bc8110d..0000000 --- a/codegen/wgpu_native_patcher.py +++ /dev/null @@ -1,359 +0,0 @@ -""" -Apply codegen to wgpu-native backend. - -The idea is that when there are any changes in wgpu.h that affect how -wgpu_native/_api.py should be written, this module will: - -* For enums: automatically update the mappings. -* For flags: report discrepancies. -* For structs and functions: update the code, so a diff of _api.py quickly - shows if manual changes are needed. - -Note that the apipatcher will also patch wgpu_native/_api.py, but where that codegen -focuses on the API, here we focus on the C library usage. -""" - -from codegen.utils import print, blacken, Patcher -from codegen.hparser import get_h_parser -from codegen.idlparser import get_idl_parser -from codegen.files import file_cache - - -mappings_preamble = ''' -""" Mappings for the wgpu-native backend. """ - -# THIS CODE IS AUTOGENERATED - DO NOT EDIT - -# flake8: noqa -'''.lstrip() - - -def compare_flags(): - """For each flag in WebGPU: - - * Verify that there is a corresponding flag in wgpu.h - * Verify that all fields are present too. - * Verify that the (integer) value is equal. - - """ - - idl = get_idl_parser() - hp = get_h_parser() - - name_map = { - "ColorWrite": "ColorWriteMask", - } - - for name, flag in idl.flags.items(): - name = name_map.get(name, name) - if name not in hp.flags: - print(f"Flag {name} missing in wgpu.h") - else: - for key, val in flag.items(): - key = key.title().replace("_", "") # MAP_READ -> MapRead - key = name_map.get(f"{name}.{key}") or key - if key not in hp.flags[name]: - print(f"Flag field {name}.{key} missing in wgpu.h") - elif val != hp.flags[name][key]: - print(f"Warning: Flag field {name}.{key} have different values.") - - -def write_mappings(): - """Generate the file with dicts to map enums strings to ints. This - also compares the enums in wgpu-native with WebGPU, and reports any - missing ones. - """ - - idl = get_idl_parser() - hp = get_h_parser() - - name_map = {} - name_map_i = {v: k for k, v in name_map.items()} - - # Init generated code - pylines = [mappings_preamble] - - # Create enummap, which allows the wgpu-native backend to resolve enum field names - # to the corresponding integer value. - enummap = {} - for name in idl.enums: - hname = name_map.get(name, name) - if hname not in hp.enums: - print(f"Enum {hname} missing in wgpu.h") - continue - hp_enum = {key.lower(): val for key, val in hp.enums[hname].items()} - for ikey in idl.enums[name].values(): - hkey = ikey.lower().replace("-", "") - hkey = name_map.get(f"{name}.{hkey}") or hkey - if hkey in hp_enum: - enummap[name + "." + ikey] = hp_enum[hkey] - else: - print(f"Enum field {name}.{ikey} missing in wgpu.h") - - # Write enummap - pylines.append(f"# There are {len(enummap)} enum mappings\n") - pylines.append("enummap = {") - for key in sorted(enummap.keys()): - pylines.append(f' "{key}": {enummap[key]!r},') - pylines.append("}\n") - - # Some structs have fields that are enum values. The wgpu-native backend - # must be able to resolve these too. - cstructfield2enum = {} - for structname, struct in hp.structs.items(): - for key, val in struct.items(): - if isinstance(val, str) and val.startswith("WGPU"): - henumname = val[4:].split("/")[0] - enumname = name_map_i.get(henumname, henumname) - if enumname in idl.enums: - cstructfield2enum[f"{structname[4:]}.{key}"] = enumname - else: - pass # a struct - - # Write cstructfield2enum - pylines.append(f"# There are {len(cstructfield2enum)} struct-field enum mappings\n") - pylines.append("cstructfield2enum = {") - for key in sorted(cstructfield2enum.keys()): - pylines.append(f' "{key}": {cstructfield2enum[key]!r},') - pylines.append("}\n") - - # Write a few native-only mappings: key => int - pylines.append("enum_str2int = {") - for name in ["BackendType"]: - pylines.append(f' "{name}":' + " {") - for key, val in hp.enums[name].items(): - if key == "Force32": - continue - pylines.append(f' "{key}": {val},') - pylines.append(" }") - pylines.append("}") - - # Write a few native-only mappings: int => key - # If possible, resolve to WebGPU names, otherwise use the native name. - pylines.append("enum_int2str = {") - for name in [ - "BackendType", - "AdapterType", - "ErrorType", - "DeviceLostReason", - "TextureFormat", - "TextureDimension", - "PresentMode", - "CompositeAlphaMode", - ]: - webgpu_names = {} - if name in idl.enums: - webgpu_names = { - val.replace("-", ""): val for val in idl.enums[name].values() - } - if "unknown" in webgpu_names: - webgpu_names["undefined"] = "unknown" - pylines.append(f' "{name}":' + " {") - for key, val in hp.enums[name].items(): - if key == "Force32": - continue - enum_val = webgpu_names.get(key.lower(), key) - pylines.append(f' {val}: "{enum_val}",') - pylines.append(" },") - pylines.append("}") - - # Wrap up - code = blacken("\n".join(pylines)) # just in case; code is already black - file_cache.write("backends/wgpu_native/_mappings.py", code) - print( - f"Wrote {len(enummap)} enum mappings and {len(cstructfield2enum)} struct-field mappings to wgpu_native/_mappings.py" - ) - - -def patch_wgpu_native_backend(code): - """Given the Python code, applies patches to annotate functions - calls and struct instantiations. - - For functions: - - * Verify that the function exists in wgpu.h. If not, add a fixme comment. - * Add a comment showing correspinding signature from wgpu.h. - - For structs: - - * Verify that the struct name exists. - * Verify that the correct form (pointer or not) is used. - * Verify that all used fields exists. - * Annotate any missing fields. - * Add a comment that shows all fields and their type. - - """ - - for patcher in [CommentRemover(), FunctionPatcher(), StructPatcher()]: - patcher.apply(code) - code = patcher.dumps() - return code - - -class CommentRemover(Patcher): - triggers = "# FIXME: unknown C", "# FIXME: invalid C", "# H:" - - def apply(self, code): - self._init(code) - for line, i in self.iter_lines(): - if line.lstrip().startswith(self.triggers): - self.remove_line(i) - - -class FunctionPatcher(Patcher): - def apply(self, code): - self._init(code) - hp = get_h_parser() - count = 0 - detected = set() - - for line, i in self.iter_lines(): - if "lib.wgpu" in line or "libf.wgpu" in line: - start = line.index(".wgpu") + 1 - end = line.index("(", start) - name = line[start:end] - indent = " " * (len(line) - len(line.lstrip())) - if "lib.wgpu" in line: - self.insert_line( - i, f"{indent}# FIXME: wgpu func calls must be done from libf" - ) - if name not in hp.functions: - msg = f"unknown C function {name}" - self.insert_line(i, f"{indent}# FIXME: {msg}") - print(f"ERROR: {msg}") - else: - detected.add(name) - anno = hp.functions[name].replace(name, "f").strip(";") - self.insert_line(i, indent + f"# H: " + anno) - count += 1 - - print(f"Validated {count} C function calls") - - # Determine what functions were not detected - # There are still quite a few, so we don't list them yet - ignore = ( - "wgpu_create_surface_from", - "wgpu_set_log_level", - "wgpu_get_version", - "wgpu_set_log_callback", - ) - unused = set(name for name in hp.functions if not name.startswith(ignore)) - unused.difference_update(detected) - print(f"Not using {len(unused)} C functions") - - -class StructPatcher(Patcher): - def apply(self, code): - self._init(code) - hp = get_h_parser() - - count = 0 - line_index = -1 - brace_depth = 0 - - for line, i in self.iter_lines(): - if "new_struct_p(" in line or "new_struct(" in line: - if line.lstrip().startswith("def "): - continue # Implementation - if "_new_struct" in line: - continue # Implementation - if "new_struct_p()" in line or "new_struct()" in line: - continue # Comments or docs - line_index = i - j = line.index("new_struct") - line = line[j:] # start brace searching from right pos - brace_depth = 0 - - if line_index >= 0: - for c in line: - if c == "#": - break - elif c == "(": - brace_depth += 1 - elif c == ")": - brace_depth -= 1 - assert brace_depth >= 0 - if brace_depth == 0: - self._validate_struct(hp, line_index, i) - count += 1 - line_index = -1 - break - - print(f"Validated {count} C structs") - - def _validate_struct(self, hp, i1, i2): - """Validate a specific struct usage.""" - - lines = self.lines[ - i1 : i2 + 1 - ] # note: i2 is the line index where the closing brace is - indent = " " * (len(lines[-1]) - len(lines[-1].lstrip())) - - if len(lines) == 1: - # Single line - add a comma before the closing brace - print( - "Notice: made a struct multiline. Rerun codegen to validate the struct." - ) - line = lines[0] - i = line.rindex(")") - line = line[:i] + "," + line[i:] - self.replace_line(i1, line) - return - elif len(lines) == 3 and lines[1].count("="): - # Triplet - add a comma after the last element - print( - "Notice: made a struct multiline. Rerun codegen to validate the struct." - ) - self.replace_line(i1 + 1, self.lines[i1 + 1] + ",") - return - - # We can assume that the struct is multi-line and formatted by Black! - assert len(lines) >= 3 - - # Get struct name, and verify - name = lines[1].strip().strip(',"') - struct_name = name.strip(" *") - if name.endswith("*"): - if "new_struct_p" not in lines[0]: - self.insert_line( - i1, indent + f"# FIXME: invalid C struct, use new_struct_p()" - ) - else: - if "new_struct_p" in lines[0]: - self.insert_line( - i1, indent + f"# FIXME: invalid C struct, use new_struct()" - ) - - # Get struct object and create annotation line - if struct_name not in hp.structs: - msg = f"unknown C struct {struct_name}" - self.insert_line(i1, f"{indent}# FIXME: {msg}") - print(f"ERROR: {msg}") - return - else: - struct = hp.structs[struct_name] - fields = ", ".join(f"{key}: {val}" for key, val in struct.items()) - self.insert_line(i1, indent + f"# H: " + fields) - - # Check keys - keys_found = [] - for j in range(2, len(lines) - 1): - line = lines[j] - key = line.split("=")[0].strip() - if key.startswith("# not used:"): - key = key.split(":")[1].split("=")[0].strip() - elif key.startswith("#"): - continue - keys_found.append(key) - if key not in struct: - msg = f"unknown C struct field {struct_name}.{key}" - self.insert_line(i1 + j, f"{indent}# FIXME: {msg}") - print(f"ERROR: {msg}") - - # Insert comments for unused keys - more_lines = [] - for key in struct: - if key not in keys_found: - more_lines.append(indent + f" # not used: {key}") - if more_lines: - self.insert_line(i2, "\n".join(more_lines)) diff --git a/wgpu/__pyinstaller/__init__.py b/wgpu/__pyinstaller/__init__.py deleted file mode 100644 index c27432f..0000000 --- a/wgpu/__pyinstaller/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -from os.path import dirname - - -HERE = dirname(__file__) - - -def get_hook_dirs(): - return [HERE] - - -def get_test_dirs(): - return [HERE] diff --git a/wgpu/__pyinstaller/conftest.py b/wgpu/__pyinstaller/conftest.py deleted file mode 100644 index 7f8b737..0000000 --- a/wgpu/__pyinstaller/conftest.py +++ /dev/null @@ -1 +0,0 @@ -from PyInstaller.utils.conftest import * # noqa diff --git a/wgpu/__pyinstaller/hook-wgpu.py b/wgpu/__pyinstaller/hook-wgpu.py deleted file mode 100644 index 70ba783..0000000 --- a/wgpu/__pyinstaller/hook-wgpu.py +++ /dev/null @@ -1,28 +0,0 @@ -from PyInstaller.utils.hooks import collect_data_files, collect_dynamic_libs - -# Init variables that PyInstaller will pick up. -hiddenimports = [] -datas = [] -binaries = [] - -# Include our resource data and binaries. -datas += collect_data_files("wgpu", subdir="resources") -binaries += collect_dynamic_libs("wgpu") - -# Always include the wgpu-native backend. Since an import is not needed to -# load this (default) backend, PyInstaller does not see it by itself. -hiddenimports += ["wgpu.backends.auto", "wgpu.backends.wgpu_native"] - -# For the GUI backends, there always is an import. The auto backend is -# problematic because PyInstaller cannot follow it to a specific -# backend. Also, glfw does not have a hook like this, so it does not -# include the binary when freezing. We can solve both problems with the -# code below. Makes the binaray a bit larger, but only marginally (less -# than 300kb). -try: - import glfw # noqa -except ImportError: - pass -else: - hiddenimports += ["wgpu.gui.glfw"] - binaries += collect_dynamic_libs("glfw") diff --git a/wgpu/__pyinstaller/test_wgpu.py b/wgpu/__pyinstaller/test_wgpu.py deleted file mode 100644 index 284c8df..0000000 --- a/wgpu/__pyinstaller/test_wgpu.py +++ /dev/null @@ -1,30 +0,0 @@ -script = """ -# The script part -import sys -import wgpu -import importlib - -# The test part -if "is_test" in sys.argv: - included_modules = [ - "wgpu.backends.auto", - "wgpu.backends.wgpu_native", - "wgpu.gui.glfw", - ] - excluded_modules = [ - "PySide6", - "PyQt6", - ] - for module_name in included_modules: - importlib.import_module(module_name) - for module_name in excluded_modules: - try: - importlib.import_module(module_name) - except ModuleNotFoundError: - continue - raise RuntimeError(module_name + " is not supposed to be importable.") -""" - - -def test_pyi_wgpu(pyi_builder): - pyi_builder.test_source(script, app_args=["is_test"]) diff --git a/wgpu/_classes.py b/wgpu/_classes.py deleted file mode 100644 index d5a5685..0000000 --- a/wgpu/_classes.py +++ /dev/null @@ -1,2100 +0,0 @@ -""" -The classes representing the wgpu API. This module defines the classes, -properties, methods and documentation. The majority of methods are -implemented in backend modules. - -This module is maintained using a combination of manual code and -automatically inserted code. Read the codegen/readme.md for more -information. -""" - -import weakref -import logging -from typing import List, Dict, Union - -from ._coreutils import ApiDiff -from ._diagnostics import diagnostics, texture_format_to_bpp -from . import flags, enums, structs - - -__all__ = [ - "GPUObjectBase", - "GPUAdapterInfo", - "GPU", - "GPUAdapter", - "GPUDevice", - "GPUBuffer", - "GPUTexture", - "GPUTextureView", - "GPUSampler", - "GPUBindGroupLayout", - "GPUBindGroup", - "GPUPipelineLayout", - "GPUShaderModule", - "GPUCompilationMessage", - "GPUCompilationInfo", - "GPUPipelineError", - "GPUPipelineBase", - "GPUComputePipeline", - "GPURenderPipeline", - "GPUCommandBuffer", - "GPUCommandsMixin", - "GPUCommandEncoder", - "GPUBindingCommandsMixin", - "GPUDebugCommandsMixin", - "GPUComputePassEncoder", - "GPURenderPassEncoder", - "GPURenderCommandsMixin", - "GPURenderBundle", - "GPURenderBundleEncoder", - "GPUQueue", - "GPUQuerySet", - "GPUCanvasContext", - "GPUDeviceLostInfo", - "GPUError", - "GPUValidationError", - "GPUOutOfMemoryError", - "GPUInternalError", -] - -logger = logging.getLogger("wgpu") - - -apidiff = ApiDiff() - - -# Obtain the object tracker. Note that we store a ref of -# the latter on all classes that refer to it. Otherwise, on a sys exit, -# the module attributes are None-ified, and the destructors would -# therefore fail and produce warnings. -object_tracker = diagnostics.object_counts.tracker - - -class GPU: - """The entrypoint to the wgpu API. - - The starting point of your wgpu-adventure is always to obtain an - adapter. This is the equivalent to browser's ``navigator.gpu``. - When a backend is loaded, the ``wgpu.gpu`` object is replaced with - a backend-specific implementation. - """ - - # IDL: Promise requestAdapter(optional GPURequestAdapterOptions options = {}); - @apidiff.change("arguments include a canvas object") - def request_adapter( - self, *, power_preference=None, force_fallback_adapter=False, canvas=None - ): - """Create a `GPUAdapter`, the object that represents an abstract wgpu - implementation, from which one can request a `GPUDevice`. - - Arguments: - power_preference (PowerPreference): "high-performance" or "low-power". - force_fallback_adapter (bool): whether to use a (probably CPU-based) - fallback adapter. - canvas (WgpuCanvasInterface): The canvas that the adapter should - be able to render to. This can typically be left to None. - """ - # If this method gets called, no backend has been loaded yet, let's do that now! - from .backends.auto import gpu # noqa - - return gpu.request_adapter( - power_preference=power_preference, - force_fallback_adapter=force_fallback_adapter, - canvas=canvas, - ) - - # IDL: Promise requestAdapter(optional GPURequestAdapterOptions options = {}); - @apidiff.change("arguments include a canvas object") - async def request_adapter_async( - self, *, power_preference=None, force_fallback_adapter=False, canvas=None - ): - """Async version of `request_adapter()`.""" - return self.request_adapter( - power_preference=power_preference, - force_fallback_adapter=force_fallback_adapter, - canvas=canvas, - ) - - # IDL: GPUTextureFormat getPreferredCanvasFormat(); - @apidiff.change("Disabled because we put it on the canvas context") - def get_preferred_canvas_format(self): - """Not implemented in wgpu-py; use `GPUCanvasContext.get_preferred_format()` instead. - The WebGPU spec defines this function, but in wgpu there are different - kinds of canvases which may each prefer/support a different format. - """ - raise RuntimeError("Use canvas.get_preferred_format() instead.") - - # IDL: [SameObject] readonly attribute WGSLLanguageFeatures wgslLanguageFeatures; - @property - def wgsl_language_features(self): - """A set of strings representing the WGSL language extensions supported by all adapters. - Returns an empty set for now.""" - # Looks like at the time of writing there are no definitions for extensions yet - return set() - - -# Instantiate API entrypoint -gpu = GPU() - - -class GPUCanvasContext: - """Represents a context to configure a canvas. - - Is also used to obtain the texture to render to. - - Can be obtained via `gui.WgpuCanvasInterface.get_context()`. - """ - - _ot = object_tracker - - def __init__(self, canvas): - self._ot.increase(self.__class__.__name__) - self._canvas_ref = weakref.ref(canvas) - - def _get_canvas(self): - """Getter method for internal use.""" - return self._canvas_ref() - - # IDL: readonly attribute (HTMLCanvasElement or OffscreenCanvas) canvas; - @property - def canvas(self): - """The associated canvas object.""" - return self._canvas_ref() - - # IDL: undefined configure(GPUCanvasConfiguration configuration); - def configure( - self, - *, - device: "GPUDevice", - format: "enums.TextureFormat", - usage: "flags.TextureUsage" = 0x10, - view_formats: "List[enums.TextureFormat]" = [], - color_space: str = "srgb", - alpha_mode: "enums.CanvasAlphaMode" = "opaque", - ): - """Configures the presentation context for the associated canvas. - Destroys any textures produced with a previous configuration. - This clears the drawing buffer to transparent black. - - Arguments: - device (WgpuDevice): The GPU device object to create compatible textures for. - format (enums.TextureFormat): The format that textures returned by - ``get_current_texture()`` will have. Must be one of the supported context - formats. An often used format is "bgra8unorm-srgb". - usage (flags.TextureUsage): Default ``TextureUsage.OUTPUT_ATTACHMENT``. - view_formats (List[enums.TextureFormat]): The formats that views created - from textures returned by ``get_current_texture()`` may use. - color_space (PredefinedColorSpace): The color space that values written - into textures returned by ``get_current_texture()`` should be displayed with. - Default "srgb". - alpha_mode (enums.CanvasAlphaMode): Determines the effect that alpha values - will have on the content of textures returned by ``get_current_texture()`` - when read, displayed, or used as an image source. Default "opaque". - """ - raise NotImplementedError() - - # IDL: undefined unconfigure(); - def unconfigure(self): - """Removes the presentation context configuration. - Destroys any textures produced while configured.""" - raise NotImplementedError() - - # IDL: GPUTexture getCurrentTexture(); - def get_current_texture(self): - """Get the `GPUTexture` that will be composited to the canvas next. - This method should be called exactly once during each draw event. - """ - raise NotImplementedError() - - @apidiff.add("Present method is exposed") - def present(self): - """Present what has been drawn to the current texture, by compositing it - to the canvas. Note that a canvas based on `gui.WgpuCanvasBase` will call this - method automatically at the end of each draw event. - """ - raise NotImplementedError() - - @apidiff.add("Better place to define the preferred format") - def get_preferred_format(self, adapter): - """Get the preferred surface texture format.""" - return "bgra8unorm-srgb" # seems to be a good default - - def __del__(self): - self._ot.decrease(self.__class__.__name__) - self._destroy() - - def _destroy(self): - pass - - -class GPUAdapterInfo: - """Represents information about an adapter.""" - - def __init__(self, info): - self._info - - # IDL: readonly attribute DOMString vendor; - @property - def vendor(self): - """The vendor that built this adaptor.""" - return self._info["vendor"] - - # IDL: readonly attribute DOMString architecture; - @property - def architecture(self): - """The adapters architecrure.""" - return self._info["architecture"] - - # IDL: readonly attribute DOMString device; - @property - def device(self): - """The kind of device that this adapter represents.""" - return self._info["device"] - - # IDL: readonly attribute DOMString description; - @property - def description(self): - """A textual description of the adapter.""" - return self._info["description"] - - -class GPUAdapter: - """Represents an abstract wgpu implementation. - - An adapter represents both an instance of a hardware accelerator - (e.g. GPU or CPU) and an implementation of WGPU on top of that - accelerator. - - The adapter is used to request a device object. The adapter object - enumerates its capabilities (features) and limits. - - If an adapter becomes unavailable, it becomes invalid. - Once invalid, it never becomes valid again. - """ - - _ot = object_tracker - - def __init__(self, internal, features, limits, adapter_info): - self._ot.increase(self.__class__.__name__) - self._internal = internal - - assert isinstance(features, set) - assert isinstance(limits, dict) - assert isinstance(adapter_info, dict) - - self._features = features - self._limits = limits - self._adapter_info = adapter_info - - # IDL: [SameObject] readonly attribute GPUSupportedFeatures features; - @property - def features(self): - """A set of feature names supported by the adapter.""" - return self._features - - # IDL: [SameObject] readonly attribute GPUSupportedLimits limits; - @property - def limits(self): - """A dict with limits for the adapter.""" - return self._limits - - # IDL: Promise requestDevice(optional GPUDeviceDescriptor descriptor = {}); - def request_device( - self, - *, - label="", - required_features: "List[enums.FeatureName]" = [], - required_limits: "Dict[str, int]" = {}, - default_queue: "structs.QueueDescriptor" = {}, - ): - """Request a `GPUDevice` from the adapter. - - Arguments: - label (str): A human readable label. Optional. - required_features (list of str): the features (extensions) that you need. Default []. - required_limits (dict): the various limits that you need. Default {}. - default_queue (structs.QueueDescriptor): Descriptor for the default queue. Optional. - """ - raise NotImplementedError() - - # IDL: Promise requestDevice(optional GPUDeviceDescriptor descriptor = {}); - async def request_device_async( - self, - *, - label="", - required_features: "List[enums.FeatureName]" = [], - required_limits: "Dict[str, int]" = {}, - default_queue: "structs.QueueDescriptor" = {}, - ): - """Async version of `request_device()`.""" - raise NotImplementedError() - - def _destroy(self): - pass - - def __del__(self): - self._ot.decrease(self.__class__.__name__) - self._destroy() - - # IDL: readonly attribute boolean isFallbackAdapter; - @property - def is_fallback_adapter(self): - """Whether this adapter runs on software (rather than dedicated hardware).""" - return self._adapter_info.get("adapter_type", "").lower() in ("software", "cpu") - - # IDL: Promise requestAdapterInfo(); - def request_adapter_info(self): - """Get a dict with information about this adapter, such as the vendor and devicen name.""" - return self._adapter_info - - # IDL: Promise requestAdapterInfo(); - async def request_adapter_info_async(self): - """Async get information about this adapter.""" - return self._adapter_info - - -class GPUObjectBase: - """The base class for all GPU objects. - - A GPU object is an object that can be thought of having a representation on - the GPU; the device and all objects belonging to a device. - """ - - _ot = object_tracker - _nbytes = 0 - - def __init__(self, label, internal, device): - self._ot.increase(self.__class__.__name__, self._nbytes) - self._label = label - self._internal = internal # The native/raw/real GPU object - self._device = device - logger.info(f"Creating {self.__class__.__name__} {label}") - - # IDL: attribute USVString label; - @property - def label(self): - """A human-readable name identifying the GPU object.""" - return self._label - - def _destroy(self): - """Subclasses can implement this to clean up.""" - pass - - def __del__(self): - self._ot.decrease(self.__class__.__name__, self._nbytes) - self._destroy() - - # Public destroy() methods are implemented on classes as the WebGPU spec specifies. - - -class GPUDevice(GPUObjectBase): - """The top-level interface through which GPU objects are created. - - A device is the logical instantiation of an adapter, through which - internal objects are created. It can be shared across threads. - A device is the exclusive owner of all internal objects created - from it: when the device is lost, all objects created from it become - invalid. - - Create a device using `GPUAdapter.request_device()` or - `GPUAdapter.request_device_async()`. - """ - - def __init__(self, label, internal, adapter, features, limits, queue): - super().__init__(label, internal, None) - - assert isinstance(adapter, GPUAdapter) - assert isinstance(features, set) - assert isinstance(limits, dict) - - self._adapter = adapter - self._features = features - self._limits = limits - self._queue = queue - queue._device = self # because it could not be set earlier - - # IDL: [SameObject] readonly attribute GPUSupportedFeatures features; - @property - def features(self): - """A set of feature names supported by this device.""" - return self._features - - # IDL: [SameObject] readonly attribute GPUSupportedLimits limits; - @property - def limits(self): - """A dict with limits for this device.""" - return self._limits - - # IDL: [SameObject] readonly attribute GPUQueue queue; - @property - def queue(self): - """The default `GPUQueue` for this device.""" - return self._queue - - @apidiff.add("Too useful to not-have") - @property - def adapter(self): - """The adapter object corresponding to this device.""" - return self._adapter - - # IDL: readonly attribute Promise lost; - @apidiff.hide("Not a Pythonic API") - @property - def lost(self): - """Provides information about why the device is lost.""" - # In JS you can device.lost.then ... to handle lost devices. - # We may want to eventually support something similar async-like? - # at some point - raise NotImplementedError() - - # IDL: attribute EventHandler onuncapturederror; - @apidiff.hide("Specific to browsers") - @property - def onuncapturederror(self): - """Method called when an error is capured?""" - raise NotImplementedError() - - # IDL: undefined destroy(); - def destroy(self): - """Destroy this device.""" - return self._destroy() - - # IDL: GPUBuffer createBuffer(GPUBufferDescriptor descriptor); - def create_buffer( - self, - *, - label="", - size: int, - usage: "flags.BufferUsage", - mapped_at_creation: bool = False, - ): - """Create a `GPUBuffer` object. - - Arguments: - label (str): A human readable label. Optional. - size (int): The size of the buffer in bytes. - usage (flags.BufferUsage): The ways in which this buffer will be used. - mapped_at_creation (bool): Whether the buffer is initially mapped. - """ - raise NotImplementedError() - - @apidiff.add("Convenience function") - def create_buffer_with_data(self, *, label="", data, usage: "flags.BufferUsage"): - """Create a `GPUBuffer` object initialized with the given data. - - This is a convenience function that creates a mapped buffer, - writes the given data to it, and then unmaps the buffer. - - Arguments: - label (str): A human readable label. Optional. - data: Any object supporting the Python buffer protocol (this - includes bytes, bytearray, ctypes arrays, numpy arrays, etc.). - usage (flags.BufferUsage): The ways in which this buffer will be used. - - Also see `GPUBuffer.write_mapped()` and `GPUQueue.write_buffer()`. - """ - # This function was originally created to support the workflow - # of initializing a buffer with data when we did not support - # buffer mapping. Now that we do have buffer mapping it is not - # strictly necessary, but it's still quite useful and feels - # more Pythonic than having to write the boilerplate code below. - - # Create a view of known type - data = memoryview(data).cast("B") - size = data.nbytes - - # Create the buffer and write data - buf = self.create_buffer( - label=label, size=size, usage=usage, mapped_at_creation=True - ) - buf.write_mapped(data) - buf.unmap() - return buf - - # IDL: GPUTexture createTexture(GPUTextureDescriptor descriptor); - def create_texture( - self, - *, - label="", - size: "Union[List[int], structs.Extent3D]", - mip_level_count: int = 1, - sample_count: int = 1, - dimension: "enums.TextureDimension" = "2d", - format: "enums.TextureFormat", - usage: "flags.TextureUsage", - view_formats: "List[enums.TextureFormat]" = [], - ): - """Create a `GPUTexture` object. - - Arguments: - label (str): A human readable label. Optional. - size (tuple or dict): The texture size as a 3-tuple or a `structs.Extent3D`. - mip_level_count (int): The number of mip leveles. Default 1. - sample_count (int): The number of samples. Default 1. - dimension (enums.TextureDimension): The dimensionality of the texture. Default 2d. - format (TextureFormat): What channels it stores and how. - usage (flags.TextureUsage): The ways in which the texture will be used. - view_formats (optional): A list of formats that views are allowed to have - in addition to the texture's own view. Using these formats may have - a performance penalty. - - See https://gpuweb.github.io/gpuweb/#texture-format-caps for a - list of available texture formats. Note that less formats are - available for storage usage. - """ - raise NotImplementedError() - - # IDL: GPUSampler createSampler(optional GPUSamplerDescriptor descriptor = {}); - def create_sampler( - self, - *, - label="", - address_mode_u: "enums.AddressMode" = "clamp-to-edge", - address_mode_v: "enums.AddressMode" = "clamp-to-edge", - address_mode_w: "enums.AddressMode" = "clamp-to-edge", - mag_filter: "enums.FilterMode" = "nearest", - min_filter: "enums.FilterMode" = "nearest", - mipmap_filter: "enums.MipmapFilterMode" = "nearest", - lod_min_clamp: float = 0, - lod_max_clamp: float = 32, - compare: "enums.CompareFunction" = None, - max_anisotropy: int = 1, - ): - """Create a `GPUSampler` object. Samplers specify how a texture is sampled. - - Arguments: - label (str): A human readable label. Optional. - address_mode_u (enums.AddressMode): What happens when sampling beyond the x edge. - Default "clamp-to-edge". - address_mode_v (enums.AddressMode): What happens when sampling beyond the y edge. - Default "clamp-to-edge". - address_mode_w (enums.AddressMode): What happens when sampling beyond the z edge. - Default "clamp-to-edge". - mag_filter (enums.FilterMode): Interpolation when zoomed in. Default 'nearest'. - min_filter (enums.FilterMode): Interpolation when zoomed out. Default 'nearest'. - mipmap_filter: (enums.MipmapFilterMode): Interpolation between mip levels. Default 'nearest'. - lod_min_clamp (float): The minimum level of detail. Default 0. - lod_max_clamp (float): The maxium level of detail. Default 32. - compare (enums.CompareFunction): The sample compare operation for depth textures. - Only specify this for depth textures. Default None. - max_anisotropy (int): The maximum anisotropy value clamp used by the sample, - betweet 1 and 16, default 1. - """ - raise NotImplementedError() - - # IDL: GPUBindGroupLayout createBindGroupLayout(GPUBindGroupLayoutDescriptor descriptor); - def create_bind_group_layout( - self, *, label="", entries: "List[structs.BindGroupLayoutEntry]" - ): - """Create a `GPUBindGroupLayout` object. One or more - such objects are passed to `create_pipeline_layout()` to - specify the (abstract) pipeline layout for resources. See the - docs on bind groups for details. - - Arguments: - label (str): A human readable label. Optional. - entries (list): A list of `structs.BindGroupLayoutEntry` dicts. - Each contains either a `structs.BufferBindingLayout`, - `structs.SamplerBindingLayout`, `structs.TextureBindingLayout`, - or `structs.StorageTextureBindingLayout`. - - Example with `structs.BufferBindingLayout`: - - .. code-block:: py - - { - "binding": 0, - "visibility": wgpu.ShaderStage.COMPUTE, - "buffer": { - "type": wgpu.BufferBindingType.storage_buffer, - "has_dynamic_offset": False, # optional - "min_binding_size": 0 # optional - } - }, - - Note on ``has_dynamic_offset``: For uniform-buffer, storage-buffer, and - readonly-storage-buffer bindings, it indicates whether the binding has a - dynamic offset. One offset must be passed to `pass.set_bind_group()` - for each dynamic binding in increasing order of binding number. - """ - raise NotImplementedError() - - # IDL: GPUBindGroup createBindGroup(GPUBindGroupDescriptor descriptor); - def create_bind_group( - self, - *, - label="", - layout: "GPUBindGroupLayout", - entries: "List[structs.BindGroupEntry]", - ): - """Create a `GPUBindGroup` object, which can be used in - `pass.set_bind_group()` to attach a group of resources. - - Arguments: - label (str): A human readable label. Optional. - layout (GPUBindGroupLayout): The layout (abstract representation) - for this bind group. - entries (list): A list of `structs.BindGroupEntry` dicts. The ``resource`` field - is either `GPUSampler`, `GPUTextureView` or `structs.BufferBinding`. - - Example entry dicts: - - .. code-block:: py - - # For a sampler - { - "binding" : 0, # slot - "resource": a_sampler, - } - # For a texture view - { - "binding" : 0, # slot - "resource": a_texture_view, - } - # For a buffer - { - "binding" : 0, # slot - "resource": { - "buffer": a_buffer, - "offset": 0, - "size": 812, - } - } - """ - raise NotImplementedError() - - # IDL: GPUPipelineLayout createPipelineLayout(GPUPipelineLayoutDescriptor descriptor); - def create_pipeline_layout( - self, *, label="", bind_group_layouts: "List[GPUBindGroupLayout]" - ): - """Create a `GPUPipelineLayout` object, which can be - used in `create_render_pipeline()` or `create_compute_pipeline()`. - - Arguments: - label (str): A human readable label. Optional. - bind_group_layouts (list): A list of `GPUBindGroupLayout` objects. - """ - raise NotImplementedError() - - # IDL: GPUShaderModule createShaderModule(GPUShaderModuleDescriptor descriptor); - def create_shader_module( - self, - *, - label="", - code: str, - source_map: dict = None, - compilation_hints: "List[structs.ShaderModuleCompilationHint]" = [], - ): - """Create a `GPUShaderModule` object from shader source. - - The primary shader language is WGSL, though SpirV is also supported, - as well as GLSL (experimental). - - Arguments: - label (str): A human readable label. Optional. - code (str | bytes): The shader code, as WGSL, GLSL or SpirV. - For GLSL code, the label must be given and contain the word - 'comp', 'vert' or 'frag'. For SpirV the code must be bytes. - compilation_hints: currently unused. - """ - raise NotImplementedError() - - # IDL: GPUComputePipeline createComputePipeline(GPUComputePipelineDescriptor descriptor); - def create_compute_pipeline( - self, - *, - label="", - layout: "Union[GPUPipelineLayout, enums.AutoLayoutMode]", - compute: "structs.ProgrammableStage", - ): - """Create a `GPUComputePipeline` object. - - Arguments: - label (str): A human readable label. Optional. - layout (GPUPipelineLayout): object created with `create_pipeline_layout()`. - compute (structs.ProgrammableStage): Binds shader module and entrypoint. - """ - raise NotImplementedError() - - # IDL: Promise createComputePipelineAsync(GPUComputePipelineDescriptor descriptor); - async def create_compute_pipeline_async( - self, - *, - label="", - layout: "Union[GPUPipelineLayout, enums.AutoLayoutMode]", - compute: "structs.ProgrammableStage", - ): - """Async version of create_compute_pipeline().""" - raise NotImplementedError() - - # IDL: GPURenderPipeline createRenderPipeline(GPURenderPipelineDescriptor descriptor); - def create_render_pipeline( - self, - *, - label="", - layout: "Union[GPUPipelineLayout, enums.AutoLayoutMode]", - vertex: "structs.VertexState", - primitive: "structs.PrimitiveState" = {}, - depth_stencil: "structs.DepthStencilState" = None, - multisample: "structs.MultisampleState" = {}, - fragment: "structs.FragmentState" = None, - ): - """Create a `GPURenderPipeline` object. - - Arguments: - label (str): A human readable label. Optional. - layout (GPUPipelineLayout): The layout for the new pipeline. - vertex (structs.VertexState): Describes the vertex shader entry point of the - pipeline and its input buffer layouts. - primitive (structs.PrimitiveState): Describes the the primitive-related properties - of the pipeline. If `strip_index_format` is present (which means the - primitive topology is a strip), and the drawCall is indexed, the - vertex index list is split into sub-lists using the maximum value of this - index format as a separator. Example: a list with values - `[1, 2, 65535, 4, 5, 6]` of type "uint16" will be split in sub-lists - `[1, 2]` and `[4, 5, 6]`. - depth_stencil (structs.DepthStencilState): Describes the optional depth-stencil - properties, including the testing, operations, and bias. Optional. - multisample (structs.MultisampleState): Describes the multi-sampling properties of the pipeline. - fragment (structs.FragmentState): Describes the fragment shader - entry point of the pipeline and its output colors. If it’s - None, the No-Color-Output mode is enabled: the pipeline - does not produce any color attachment outputs. It still - performs rasterization and produces depth values based on - the vertex position output. The depth testing and stencil - operations can still be used. - - In the example dicts below, the values that are marked as optional, - the shown value is the default. - - Example vertex (structs.VertexState) dict: - - .. code-block:: py - - { - "module": shader_module, - "entry_point": "main", - "buffers": [ - { - "array_stride": 8, - "step_mode": wgpu.VertexStepMode.vertex, # optional - "attributes": [ - { - "format": wgpu.VertexFormat.float2, - "offset": 0, - "shader_location": 0, - }, - ... - ], - }, - ... - ] - } - - Example primitive (structs.PrimitiveState) dict: - - .. code-block:: py - - { - "topology": wgpu.PrimitiveTopology.triangle_list, - "strip_index_format": wgpu.IndexFormat.uint32, # see note - "front_face": wgpu.FrontFace.ccw, # optional - "cull_mode": wgpu.CullMode.none, # optional - } - - Example depth_stencil (structs.DepthStencilState) dict: - - .. code-block:: py - - { - "format": wgpu.TextureFormat.depth24plus_stencil8, - "depth_write_enabled": False, # optional - "depth_compare": wgpu.CompareFunction.always, # optional - "stencil_front": { # optional - "compare": wgpu.CompareFunction.equal, - "fail_op": wgpu.StencilOperation.keep, - "depth_fail_op": wgpu.StencilOperation.keep, - "pass_op": wgpu.StencilOperation.keep, - }, - "stencil_back": { # optional - "compare": wgpu.CompareFunction.equal, - "fail_op": wgpu.StencilOperation.keep, - "depth_fail_op": wgpu.StencilOperation.keep, - "pass_op": wgpu.StencilOperation.keep, - }, - "stencil_read_mask": 0xFFFFFFFF, # optional - "stencil_write_mask": 0xFFFFFFFF, # optional - "depth_bias": 0, # optional - "depth_bias_slope_scale": 0.0, # optional - "depth_bias_clamp": 0.0, # optional - } - - Example multisample (structs.MultisampleState) dict: - - .. code-block:: py - - { - "count": 1, # optional - "mask": 0xFFFFFFFF, # optional - "alpha_to_coverage_enabled": False # optional - } - - Example fragment (structs.FragmentState) dict. The `blend` parameter can be None - to disable blending (not all texture formats support blending). - - .. code-block:: py - - { - "module": shader_module, - "entry_point": "main", - "targets": [ - { - "format": wgpu.TextureFormat.bgra8unorm_srgb, - "blend": { - "color": ( - wgpu.BlendFactor.One, - wgpu.BlendFactor.zero, - gpu.BlendOperation.add, - ), - "alpha": ( - wgpu.BlendFactor.One, - wgpu.BlendFactor.zero, - wgpu.BlendOperation.add, - ), - } - "write_mask": wgpu.ColorWrite.ALL # optional - }, - ... - ] - } - - """ - raise NotImplementedError() - - # IDL: Promise createRenderPipelineAsync(GPURenderPipelineDescriptor descriptor); - async def create_render_pipeline_async( - self, - *, - label="", - layout: "Union[GPUPipelineLayout, enums.AutoLayoutMode]", - vertex: "structs.VertexState", - primitive: "structs.PrimitiveState" = {}, - depth_stencil: "structs.DepthStencilState" = None, - multisample: "structs.MultisampleState" = {}, - fragment: "structs.FragmentState" = None, - ): - """Async version of create_render_pipeline().""" - raise NotImplementedError() - - # IDL: GPUCommandEncoder createCommandEncoder(optional GPUCommandEncoderDescriptor descriptor = {}); - def create_command_encoder(self, *, label=""): - """Create a `GPUCommandEncoder` object. A command - encoder is used to record commands, which can then be submitted - at once to the GPU. - - Arguments: - label (str): A human readable label. Optional. - """ - raise NotImplementedError() - - # IDL: GPURenderBundleEncoder createRenderBundleEncoder(GPURenderBundleEncoderDescriptor descriptor); - def create_render_bundle_encoder( - self, - *, - label="", - color_formats: "List[enums.TextureFormat]", - depth_stencil_format: "enums.TextureFormat" = None, - sample_count: int = 1, - depth_read_only: bool = False, - stencil_read_only: bool = False, - ): - """Create a `GPURenderBundle` object. - - TODO: not yet available in wgpu-native - """ - raise NotImplementedError() - - # IDL: GPUQuerySet createQuerySet(GPUQuerySetDescriptor descriptor); - def create_query_set(self, *, label="", type: "enums.QueryType", count: int): - """Create a `GPUQuerySet` object.""" - raise NotImplementedError() - - # IDL: undefined pushErrorScope(GPUErrorFilter filter); - @apidiff.hide - def push_error_scope(self, filter): - """Pushes a new GPU error scope onto the stack.""" - raise NotImplementedError() - - # IDL: Promise popErrorScope(); - @apidiff.hide - def pop_error_scope(self): - """Pops a GPU error scope from the stack.""" - raise NotImplementedError() - - # IDL: GPUExternalTexture importExternalTexture(GPUExternalTextureDescriptor descriptor); - @apidiff.hide("Specific to browsers") - def import_external_texture( - self, - *, - label="", - source: "Union[memoryview, object]", - color_space: str = "srgb", - ): - """For browsers only.""" - raise NotImplementedError() - - -class GPUBuffer(GPUObjectBase): - """Represents a block of memory that can be used in GPU operations. - - Data is stored in linear layout, meaning that each byte - of the allocation can be addressed by its offset from the start of - the buffer, subject to alignment restrictions depending on the - operation. - - Create a buffer using `GPUDevice.create_buffer()`. - - One can sync data in a buffer by mapping it and then getting and setting data. - Alternatively, one can tell the GPU (via the command encoder) to - copy data between buffers and textures. - """ - - def __init__(self, label, internal, device, size, usage, map_state): - self._nbytes = size - super().__init__(label, internal, device) - self._size = size - self._usage = usage - self._map_state = map_state - - # IDL: readonly attribute GPUSize64Out size; - @property - def size(self): - """The length of the GPUBuffer allocation in bytes.""" - return self._size - - # IDL: readonly attribute GPUFlagsConstant usage; - @property - def usage(self): - """The allowed usages (int bitmap) for this GPUBuffer, specifying - e.g. whether the buffer may be used as a vertex buffer, uniform buffer, - target or source for copying data, etc. - """ - return self._usage - - # IDL: readonly attribute GPUBufferMapState mapState; - @property - def map_state(self): - """The mapping state of the buffer, see `BufferMapState`.""" - return self._map_state - - # WebGPU specifies an API to sync data with the buffer via mapping. - # The idea is to (async) request mapped data, read from / write to - # this memory (using getMappedRange), and then unmap. A buffer - # must be unmapped before it can be used in a pipeline. - # - # This means that the mapped memory is reclaimed (i.e. invalid) - # when unmap is called, and that whatever object we expose the - # memory with to the user, must be set to a state where it can no - # longer be used. There does not seem to be a good way to do this. - # - # In our Python API we do make use of the same map/unmap mechanism, - # but reading and writing data goes via method calls instead of via - # an array-like object that exposes the shared memory. - - # IDL: Promise mapAsync(GPUMapModeFlags mode, optional GPUSize64 offset = 0, optional GPUSize64 size); - def map(self, mode, offset=0, size=None): - """Maps the given range of the GPUBuffer. - - When this call returns, the buffer content is ready to be - accessed with ``read_mapped`` or ``write_mapped``. Don't forget - to ``unmap()`` when done. - - Arguments: - mode (enum): The mapping mode, either wgpu.MapMode.READ or - wgpu.MapMode.WRITE, can also be a string. - offset (str): the buffer offset in bytes. Default 0. - size (int): the size to read. Default until the end. - """ - raise NotImplementedError() - - # IDL: Promise mapAsync(GPUMapModeFlags mode, optional GPUSize64 offset = 0, optional GPUSize64 size); - async def map_async(self, mode, offset=0, size=None): - """Alternative version of map().""" - raise NotImplementedError() - - # IDL: undefined unmap(); - def unmap(self): - """Unmaps the buffer. - - Unmaps the mapped range of the GPUBuffer and makes it’s contents - available for use by the GPU again. - """ - raise NotImplementedError() - - @apidiff.add("Replacement for get_mapped_range") - def read_mapped(self, buffer_offset=None, size=None, *, copy=True): - """Read mapped buffer data. - - This method must only be called when the buffer is in a mapped state. - This is the Python alternative to WebGPU's ``getMappedRange``. - Returns a memoryview that is a copy of the mapped data (it won't - become invalid when the buffer is ummapped). - - Arguments: - buffer_offset (int): the buffer offset in bytes. Must be at - least as large as the offset specified in ``map()``. The default - is the offset of the mapped range. - size (int): the size to read. The resuling range must fit into the range - specified in ``map()``. The default is as large as the mapped range allows. - copy (boool): whether a copy of the data is given. Default True. - If False, the returned memoryview represents the mapped data - directly, and is released when the buffer is unmapped. - WARNING: views of the returned data (e.g. memoryview objects or - numpy arrays) can still be used after the base memory is released, - which can result in corrupted data and segfaults. Therefore, when - setting copy to False, make *very* sure the memory is not accessed - after the buffer is unmapped. - - Also see `GPUBuffer.write_mapped()`, `GPUQueue.read_buffer()` and `GPUQueue.write_buffer()`. - """ - raise NotImplementedError() - - @apidiff.add("Replacement for get_mapped_range") - def write_mapped(self, data, buffer_offset=None, size=None): - """Read mapped buffer data. - - This method must only be called when the buffer is in a mapped state. - This is the Python alternative to WebGPU's ``getMappedRange``. - Since the data can also be a view into a larger array, this method - allows updating the buffer with minimal data copying. - - Arguments: - data (buffer-like): The data to write to the buffer, in the form of - e.g. a bytes object, memoryview, or numpy array. - buffer_offset (int): the buffer offset in bytes. Must be at least - as large as the offset specified in ``map()``. The default - is the offset of the mapped range. - size (int): the size to read. The default is the size of - the data, so this argument can typically be ignored. The - resuling range must fit into the range specified in ``map()``. - - Also see `GPUBuffer.read_mapped, `GPUQueue.read_buffer()` and `GPUQueue.write_buffer()`. - """ - raise NotImplementedError() - - # IDL: ArrayBuffer getMappedRange(optional GPUSize64 offset = 0, optional GPUSize64 size); - @apidiff.hide - def get_mapped_range(self, offset=0, size=None): - raise NotImplementedError("The Python API differs from WebGPU here") - - @apidiff.add("Deprecated but still here to raise a warning") - def map_read(self, offset=None, size=None, iter=None): - """Deprecated.""" - raise DeprecationWarning( - "map_read() is deprecated, use map() and read_mapped() instead." - ) - - @apidiff.add("Deprecated but still here to raise a warning") - def map_write(self, data): - """Deprecated.""" - raise DeprecationWarning( - "map_read() is deprecated, use map() and write_mapped() instead." - ) - - # IDL: undefined destroy(); - def destroy(self): - """An application that no longer requires a buffer can choose - to destroy it. Note that this is automatically called when the - Python object is cleaned up by the garbadge collector. - """ - raise NotImplementedError() - - -class GPUTexture(GPUObjectBase): - """Represents a 1D, 2D or 3D color image object. - - A texture also can have mipmaps (different levels of varying - detail), and arrays. The texture represents the "raw" data. A - `GPUTextureView` is used to define how the texture data - should be interpreted. - - Create a texture using `GPUDevice.create_texture()`. - """ - - def __init__(self, label, internal, device, tex_info): - self._nbytes = self._estimate_nbytes(tex_info) - super().__init__(label, internal, device) - self._tex_info = tex_info - - def _estimate_nbytes(self, tex_info): - format = tex_info["format"] - size = tex_info["size"] - sample_count = tex_info["sample_count"] or 1 - mip_level_count = tex_info["mip_level_count"] or 1 - - bpp = texture_format_to_bpp.get(format, 0) - npixels = size[0] * size[1] * size[2] - nbytes_at_mip_level = sample_count * npixels * bpp / 8 - - nbytes = 0 - for i in range(mip_level_count): - nbytes += nbytes_at_mip_level - nbytes_at_mip_level /= 2 - - # Return rounded to nearest integer - return int(nbytes + 0.5) - - @apidiff.add("Too useful to not-have") - @property - def size(self): - """The size of the texture in mipmap level 0, as a 3-tuple of ints.""" - return self._tex_info["size"] - - # IDL: readonly attribute GPUIntegerCoordinateOut width; - @property - def width(self): - """The texture's width. Also see ``.size``.""" - return self._tex_info["size"][0] - - # IDL: readonly attribute GPUIntegerCoordinateOut height; - @property - def height(self): - """The texture's height. Also see ``.size``.""" - return self._tex_info["size"][1] - - # IDL: readonly attribute GPUIntegerCoordinateOut depthOrArrayLayers; - @property - def depth_or_array_layers(self): - """The texture's depth or number of layers. Also see ``.size``.""" - return self._tex_info["size"][2] - - # IDL: readonly attribute GPUIntegerCoordinateOut mipLevelCount; - @property - def mip_level_count(self): - """The total number of the mipmap levels of the texture.""" - return self._tex_info["mip_level_count"] - - # IDL: readonly attribute GPUSize32Out sampleCount; - @property - def sample_count(self): - """The number of samples in each texel of the texture.""" - return self._tex_info["sample_count"] - - # IDL: readonly attribute GPUTextureDimension dimension; - @property - def dimension(self): - """The dimension of the texture.""" - return self._tex_info["dimension"] - - # IDL: readonly attribute GPUTextureFormat format; - @property - def format(self): - """The format of the texture.""" - return self._tex_info["format"] - - # IDL: readonly attribute GPUFlagsConstant usage; - @property - def usage(self): - """The allowed usages for this texture.""" - return self._tex_info["usage"] - - # IDL: GPUTextureView createView(optional GPUTextureViewDescriptor descriptor = {}); - def create_view( - self, - *, - label="", - format: "enums.TextureFormat" = None, - dimension: "enums.TextureViewDimension" = None, - aspect: "enums.TextureAspect" = "all", - base_mip_level: int = 0, - mip_level_count: int = None, - base_array_layer: int = 0, - array_layer_count: int = None, - ): - """Create a `GPUTextureView` object. - - If no aguments are given, a default view is given, with the - same format and dimension as the texture. - - Arguments: - label (str): A human readable label. Optional. - format (enums.TextureFormat): What channels it stores and how. - dimension (enums.TextureViewDimension): The dimensionality of the texture view. - aspect (enums.TextureAspect): Whether this view is used for depth, stencil, or all. - Default all. - base_mip_level (int): The starting mip level. Default 0. - mip_level_count (int): The number of mip levels. Default None. - base_array_layer (int): The starting array layer. Default 0. - array_layer_count (int): The number of array layers. Default None. - """ - raise NotImplementedError() - - # IDL: undefined destroy(); - def destroy(self): - """An application that no longer requires a texture can choose - to destroy it. Note that this is automatically called when the - Python object is cleaned up by the garbadge collector. - """ - raise NotImplementedError() - - -class GPUTextureView(GPUObjectBase): - """Represents a way to represent a `GPUTexture`. - - Create a texture view using `GPUTexture.create_view()`. - """ - - def __init__(self, label, internal, device, texture, size): - super().__init__(label, internal, device) - self._texture = texture - self._size = size - - @apidiff.add("Need to know size e.g. for texture view provided by canvas") - @property - def size(self): - """The texture size (as a 3-tuple).""" - return self._size - - @apidiff.add("Too useful to not-have") - @property - def texture(self): - """The texture object to which this is a view.""" - return self._texture - - -class GPUSampler(GPUObjectBase): - """Defines how a texture (view) must be sampled by the shader. - - It defines the subsampling, sampling between mip levels, and sampling out - of the image boundaries. - - Create a sampler using `GPUDevice.create_sampler()`. - """ - - -class GPUBindGroupLayout(GPUObjectBase): - """Defines the interface between a set of resources bound in a `GPUBindGroup`. - - It also defines their accessibility in shader stages. - - Create a bind group layout using `GPUDevice.create_bind_group_layout()`. - """ - - def __init__(self, label, internal, device, bindings): - super().__init__(label, internal, device) - self._bindings = tuple(bindings) - - -class GPUBindGroup(GPUObjectBase): - """Represents a group of resource bindings (buffer, sampler, texture-view). - - It holds the shader slot and a reference to the resource (sampler, - texture-view, buffer). - - Create a bind group using `GPUDevice.create_bind_group()`. - """ - - def __init__(self, label, internal, device, bindings): - super().__init__(label, internal, device) - self._bindings = bindings - - -class GPUPipelineLayout(GPUObjectBase): - """Describes the layout of a pipeline, as a list of `GPUBindGroupLayout` objects. - - Create a pipeline layout using `GPUDevice.create_pipeline_layout()`. - """ - - def __init__(self, label, internal, device, layouts): - super().__init__(label, internal, device) - self._layouts = tuple(layouts) # GPUBindGroupLayout objects - - -class GPUShaderModule(GPUObjectBase): - """Represents a programmable shader. - - Create a shader module using `GPUDevice.create_shader_module()`. - """ - - # IDL: Promise getCompilationInfo(); - def get_compilation_info(self): - """Get shader compilation info. Always returns empty list at the moment.""" - # How can this return shader errors if one cannot create a - # shader module when the shader source has errors? - raise NotImplementedError() - - -class GPUPipelineBase: - """A mixin class for render and compute pipelines.""" - - def __init__(self, label, internal, device): - super().__init__(label, internal, device) - - # IDL: [NewObject] GPUBindGroupLayout getBindGroupLayout(unsigned long index); - def get_bind_group_layout(self, index): - """Get the bind group layout at the given index.""" - raise NotImplementedError() - - -class GPUComputePipeline(GPUPipelineBase, GPUObjectBase): - """Represents a single pipeline for computations (no rendering). - - Create a compute pipeline using `GPUDevice.create_compute_pipeline()`. - """ - - -class GPURenderPipeline(GPUPipelineBase, GPUObjectBase): - """Represents a single pipeline to draw something. - - The rendering typically involves a vertex and fragment stage, though - the latter is optional. - The render target can come from a window on the screen or from an - in-memory texture (off-screen rendering). - - Create a render pipeline using `GPUDevice.create_render_pipeline()`. - """ - - -class GPUCommandBuffer(GPUObjectBase): - """Stores a series of commands generated by a `GPUCommandEncoder`. - - The buffered commands can subsequently be submitted to a `GPUQueue`. - - Command buffers are single use, you must only submit them once and - submitting them destroys them. Use render bundles to re-use commands. - - Create a command buffer using `GPUCommandEncoder.finish()`. - """ - - -class GPUCommandsMixin: - """Mixin for classes that encode commands.""" - - pass - - -class GPUBindingCommandsMixin: - """Mixin for classes that defines bindings.""" - - # IDL: undefined setBindGroup(GPUIndex32 index, GPUBindGroup? bindGroup, Uint32Array dynamicOffsetsData, GPUSize64 dynamicOffsetsDataStart, GPUSize32 dynamicOffsetsDataLength); - def set_bind_group( - self, - index, - bind_group, - dynamic_offsets_data, - dynamic_offsets_data_start, - dynamic_offsets_data_length, - ): - """Associate the given bind group (i.e. group or resources) with the - given slot/index. - - Arguments: - index (int): The slot to bind at. - bind_group (GPUBindGroup): The bind group to bind. - dynamic_offsets_data (list of int): A list of offsets (one for each bind group). - dynamic_offsets_data_start (int): Not used. - dynamic_offsets_data_length (int): Not used. - """ - raise NotImplementedError() - - -class GPUDebugCommandsMixin: - """Mixin for classes that support debug groups and markers.""" - - # IDL: undefined pushDebugGroup(USVString groupLabel); - def push_debug_group(self, group_label): - """Push a named debug group into the command stream.""" - raise NotImplementedError() - - # IDL: undefined popDebugGroup(); - def pop_debug_group(self): - """Pop the active debug group.""" - raise NotImplementedError() - - # IDL: undefined insertDebugMarker(USVString markerLabel); - def insert_debug_marker(self, marker_label): - """Insert the given message into the debug message queue.""" - raise NotImplementedError() - - -class GPURenderCommandsMixin: - """Mixin for classes that provide rendering commands.""" - - # IDL: undefined setPipeline(GPURenderPipeline pipeline); - def set_pipeline(self, pipeline): - """Set the pipeline for this render pass. - - Arguments: - pipeline (GPURenderPipeline): The pipeline to use. - """ - raise NotImplementedError() - - # IDL: undefined setIndexBuffer(GPUBuffer buffer, GPUIndexFormat indexFormat, optional GPUSize64 offset = 0, optional GPUSize64 size); - def set_index_buffer(self, buffer, index_format, offset=0, size=None): - """Set the index buffer for this render pass. - - Arguments: - buffer (GPUBuffer): The buffer that contains the indices. - index_format (GPUIndexFormat): The format of the index data - contained in buffer. If `strip_index_format` is given in the - call to `GPUDevice.create_render_pipeline()`, it must match. - offset (int): The byte offset in the buffer. Default 0. - size (int): The number of bytes to use. If zero, the remaining size - (after offset) of the buffer is used. Default 0. - """ - raise NotImplementedError() - - # IDL: undefined setVertexBuffer(GPUIndex32 slot, GPUBuffer? buffer, optional GPUSize64 offset = 0, optional GPUSize64 size); - def set_vertex_buffer(self, slot, buffer, offset=0, size=None): - """Associate a vertex buffer with a bind slot. - - Arguments: - slot (int): The binding slot for the vertex buffer. - buffer (GPUBuffer): The buffer that contains the vertex data. - offset (int): The byte offset in the buffer. Default 0. - size (int): The number of bytes to use. If zero, the remaining size - (after offset) of the buffer is used. Default 0. - """ - raise NotImplementedError() - - # IDL: undefined draw(GPUSize32 vertexCount, optional GPUSize32 instanceCount = 1, optional GPUSize32 firstVertex = 0, optional GPUSize32 firstInstance = 0); - def draw(self, vertex_count, instance_count=1, first_vertex=0, first_instance=0): - """Run the render pipeline without an index buffer. - - Arguments: - vertex_count (int): The number of vertices to draw. - instance_count (int): The number of instances to draw. Default 1. - first_vertex (int): The vertex offset. Default 0. - first_instance (int): The instance offset. Default 0. - """ - raise NotImplementedError() - - # IDL: undefined drawIndirect(GPUBuffer indirectBuffer, GPUSize64 indirectOffset); - def draw_indirect(self, indirect_buffer, indirect_offset): - """Like `draw()`, but the function arguments are in a buffer. - - Arguments: - indirect_buffer (GPUBuffer): The buffer that contains the arguments. - indirect_offset (int): The byte offset at which the arguments are. - """ - raise NotImplementedError() - - # IDL: undefined drawIndexed(GPUSize32 indexCount, optional GPUSize32 instanceCount = 1, optional GPUSize32 firstIndex = 0, optional GPUSignedOffset32 baseVertex = 0, optional GPUSize32 firstInstance = 0); - def draw_indexed( - self, - index_count, - instance_count=1, - first_index=0, - base_vertex=0, - first_instance=0, - ): - """Run the render pipeline using an index buffer. - - Arguments: - index_count (int): The number of indices to draw. - instance_count (int): The number of instances to draw. Default 1. - first_index (int): The index offset. Default 0. - base_vertex (int): A number added to each index in the index buffer. Default 0. - first_instance (int): The instance offset. Default 0. - """ - raise NotImplementedError() - - # IDL: undefined drawIndexedIndirect(GPUBuffer indirectBuffer, GPUSize64 indirectOffset); - def draw_indexed_indirect(self, indirect_buffer, indirect_offset): - """ - Like `draw_indexed()`, but the function arguments are in a buffer. - - Arguments: - indirect_buffer (GPUBuffer): The buffer that contains the arguments. - indirect_offset (int): The byte offset at which the arguments are. - """ - raise NotImplementedError() - - -class GPUCommandEncoder(GPUCommandsMixin, GPUDebugCommandsMixin, GPUObjectBase): - """Object to record a series of commands. - - When done, call `finish()` to obtain a `GPUCommandBuffer` object. - - Create a command encoder using `GPUDevice.create_command_encoder()`. - """ - - # IDL: GPUComputePassEncoder beginComputePass(optional GPUComputePassDescriptor descriptor = {}); - def begin_compute_pass( - self, *, label="", timestamp_writes: "structs.ComputePassTimestampWrites" = None - ): - """Record the beginning of a compute pass. Returns a - `GPUComputePassEncoder` object. - - Arguments: - label (str): A human readable label. Optional. - timestamp_writes: unused - """ - raise NotImplementedError() - - # IDL: GPURenderPassEncoder beginRenderPass(GPURenderPassDescriptor descriptor); - def begin_render_pass( - self, - *, - label="", - color_attachments: "List[structs.RenderPassColorAttachment]", - depth_stencil_attachment: "structs.RenderPassDepthStencilAttachment" = None, - occlusion_query_set: "GPUQuerySet" = None, - timestamp_writes: "structs.RenderPassTimestampWrites" = None, - max_draw_count: int = 50000000, - ): - """Record the beginning of a render pass. Returns a - `GPURenderPassEncoder` object. - - Arguments: - label (str): A human readable label. Optional. - color_attachments (list): List of `structs.RenderPassColorAttachment` dicts. - depth_stencil_attachment (structs.RenderPassDepthStencilAttachment): Describes the depth stencil attachment. Default None. - occlusion_query_set (GPUQuerySet): Default None. TODO NOT IMPLEMENTED in wgpu-native. - timestamp_writes: unused - """ - raise NotImplementedError() - - # IDL: undefined clearBuffer( GPUBuffer buffer, optional GPUSize64 offset = 0, optional GPUSize64 size); - def clear_buffer(self, buffer, offset=0, size=None): - """Set (part of) the given buffer to zeros. Both offset and size must be a multiple of 4. If size is None, the whole buffer after offset is cleared.""" - raise NotImplementedError() - - # IDL: undefined copyBufferToBuffer( GPUBuffer source, GPUSize64 sourceOffset, GPUBuffer destination, GPUSize64 destinationOffset, GPUSize64 size); - def copy_buffer_to_buffer( - self, source, source_offset, destination, destination_offset, size - ): - """Copy the contents of a buffer to another buffer. - - Arguments: - source (GPUBuffer): The source buffer. - source_offset (int): The byte offset (a multiple of 4). - destination (GPUBuffer): The target buffer. - destination_offset (int): The byte offset in the destination buffer (a multiple of 4). - size (int): The number of bytes to copy (a multiple of 4). - """ - raise NotImplementedError() - - # IDL: undefined copyBufferToTexture( GPUImageCopyBuffer source, GPUImageCopyTexture destination, GPUExtent3D copySize); - def copy_buffer_to_texture(self, source, destination, copy_size): - """Copy the contents of a buffer to a texture (view). - - Arguments: - source (GPUBuffer): A dict with fields: buffer, offset, bytes_per_row, rows_per_image. - destination (GPUTexture): A dict with fields: texture, mip_level, origin. - copy_size (int): The number of bytes to copy. - - Note that the `bytes_per_row` must be a multiple of 256. - """ - raise NotImplementedError() - - # IDL: undefined copyTextureToBuffer( GPUImageCopyTexture source, GPUImageCopyBuffer destination, GPUExtent3D copySize); - def copy_texture_to_buffer(self, source, destination, copy_size): - """Copy the contents of a texture (view) to a buffer. - - Arguments: - source (GPUTexture): A dict with fields: texture, mip_level, origin. - destination (GPUBuffer): A dict with fields: buffer, offset, bytes_per_row, rows_per_image. - copy_size (int): The number of bytes to copy. - - Note that the `bytes_per_row` must be a multiple of 256. - """ - raise NotImplementedError() - - # IDL: undefined copyTextureToTexture( GPUImageCopyTexture source, GPUImageCopyTexture destination, GPUExtent3D copySize); - def copy_texture_to_texture(self, source, destination, copy_size): - """Copy the contents of a texture (view) to another texture (view). - - Arguments: - source (GPUTexture): A dict with fields: texture, mip_level, origin. - destination (GPUTexture): A dict with fields: texture, mip_level, origin. - copy_size (int): The number of bytes to copy. - """ - raise NotImplementedError() - - # IDL: GPUCommandBuffer finish(optional GPUCommandBufferDescriptor descriptor = {}); - def finish(self, *, label=""): - """Finish recording. Returns a `GPUCommandBuffer` to - submit to a `GPUQueue`. - - Arguments: - label (str): A human readable label. Optional. - """ - raise NotImplementedError() - - # IDL: undefined resolveQuerySet( GPUQuerySet querySet, GPUSize32 firstQuery, GPUSize32 queryCount, GPUBuffer destination, GPUSize64 destinationOffset); - def resolve_query_set( - self, query_set, first_query, query_count, destination, destination_offset - ): - """TODO""" - raise NotImplementedError() - - -class GPUComputePassEncoder( - GPUCommandsMixin, GPUDebugCommandsMixin, GPUBindingCommandsMixin, GPUObjectBase -): - """Object to records commands for a compute pass. - - Create a compute pass encoder using `GPUCommandEncoder.begin_compute_pass()`. - """ - - # IDL: undefined setPipeline(GPUComputePipeline pipeline); - def set_pipeline(self, pipeline): - """Set the pipeline for this compute pass. - - Arguments: - pipeline (GPUComputePipeline): The pipeline to use. - """ - raise NotImplementedError() - - # IDL: undefined dispatchWorkgroups(GPUSize32 workgroupCountX, optional GPUSize32 workgroupCountY = 1, optional GPUSize32 workgroupCountZ = 1); - def dispatch_workgroups( - self, workgroup_count_x, workgroup_count_y=1, workgroup_count_z=1 - ): - """Run the compute shader. - - Arguments: - x (int): The number of cycles in index x. - y (int): The number of cycles in index y. Default 1. - z (int): The number of cycles in index z. Default 1. - """ - raise NotImplementedError() - - # IDL: undefined dispatchWorkgroupsIndirect(GPUBuffer indirectBuffer, GPUSize64 indirectOffset); - def dispatch_workgroups_indirect(self, indirect_buffer, indirect_offset): - """Like `dispatch_workgroups()`, but the function arguments are in a buffer. - - Arguments: - indirect_buffer (GPUBuffer): The buffer that contains the arguments. - indirect_offset (int): The byte offset at which the arguments are. - """ - raise NotImplementedError() - - # IDL: undefined end(); - def end(self): - """Record the end of the compute pass.""" - raise NotImplementedError() - - -class GPURenderPassEncoder( - GPUCommandsMixin, - GPUDebugCommandsMixin, - GPUBindingCommandsMixin, - GPURenderCommandsMixin, - GPUObjectBase, -): - """Object to records commands for a render pass. - - Create a render pass encoder using `GPUCommandEncoder.begin_render_pass`. - """ - - # IDL: undefined setViewport(float x, float y, float width, float height, float minDepth, float maxDepth); - def set_viewport(self, x, y, width, height, min_depth, max_depth): - """Set the viewport for this render pass. The whole scene is rendered - to this sub-rectangle. - - Arguments: - x (int): Horizontal coordinate. - y (int): Vertical coordinate. - width (int): Horizontal size. - height (int): Vertical size. - min_depth (int): Clipping in depth. - max_depth (int): Clipping in depth. - - """ - raise NotImplementedError() - - # IDL: undefined setScissorRect(GPUIntegerCoordinate x, GPUIntegerCoordinate y, GPUIntegerCoordinate width, GPUIntegerCoordinate height); - def set_scissor_rect(self, x, y, width, height): - """Set the scissor rectangle for this render pass. The scene - is rendered as usual, but is only applied to this sub-rectangle. - - Arguments: - x (int): Horizontal coordinate. - y (int): Vertical coordinate. - width (int): Horizontal size. - height (int): Vertical size. - """ - raise NotImplementedError() - - # IDL: undefined setBlendConstant(GPUColor color); - def set_blend_constant(self, color): - """Set the blend color for the render pass. - - Arguments: - color (tuple or dict): A color with fields (r, g, b, a). - """ - raise NotImplementedError() - - # IDL: undefined setStencilReference(GPUStencilValue reference); - def set_stencil_reference(self, reference): - """Set the reference stencil value for this render pass. - - Arguments: - reference (int): The reference value. - """ - raise NotImplementedError() - - # IDL: undefined executeBundles(sequence bundles); - def execute_bundles(self, bundles): - """ - TODO: not yet available in wgpu-native - """ - raise NotImplementedError() - - # IDL: undefined end(); - def end(self): - """Record the end of the render pass.""" - raise NotImplementedError() - - # IDL: undefined beginOcclusionQuery(GPUSize32 queryIndex); - def begin_occlusion_query(self, query_index): - """TODO""" - raise NotImplementedError() - - # IDL: undefined endOcclusionQuery(); - def end_occlusion_query(self): - """TODO""" - raise NotImplementedError() - - -class GPURenderBundle(GPUObjectBase): - """ - TODO: not yet wrapped. - """ - - -class GPURenderBundleEncoder( - GPUCommandsMixin, - GPUDebugCommandsMixin, - GPUBindingCommandsMixin, - GPURenderCommandsMixin, - GPUObjectBase, -): - """ - TODO: not yet wrapped - """ - - # IDL: GPURenderBundle finish(optional GPURenderBundleDescriptor descriptor = {}); - def finish(self, *, label=""): - """Finish recording and return a `GPURenderBundle`. - - Arguments: - label (str): A human readable label. Optional. - """ - raise NotImplementedError() - - -class GPUQueue(GPUObjectBase): - """Object to submit command buffers to. - - You can obtain a queue object via the :attr:`GPUDevice.queue` property. - """ - - # IDL: undefined submit(sequence commandBuffers); - def submit(self, command_buffers): - """Submit a `GPUCommandBuffer` to the queue. - - Arguments: - command_buffers (list): The `GPUCommandBuffer` objects to add. - """ - raise NotImplementedError() - - # IDL: undefined writeBuffer( GPUBuffer buffer, GPUSize64 bufferOffset, AllowSharedBufferSource data, optional GPUSize64 dataOffset = 0, optional GPUSize64 size); - def write_buffer(self, buffer, buffer_offset, data, data_offset=0, size=None): - """Takes the data contents and schedules a write operation of - these contents to the buffer. A snapshot of the data is taken; - any changes to the data after this function is called do not - affect the buffer contents. - - Arguments: - buffer: The `GPUBuffer` object to write to. - buffer_offset (int): The offset in the buffer to start writing at. - data: The data to write. Must be contiguous. - data_offset: The byte offset in the data. Default 0. - size: The number of bytes to write. Default all minus offset. - - This maps the data to a temporary buffer and then copies that buffer - to the given buffer. The given buffer's usage must include COPY_DST. - - Also see `GPUBuffer.map()`. - - """ - raise NotImplementedError() - - @apidiff.add("For symmetry with queue.write_buffer") - def read_buffer(self, buffer, buffer_offset=0, size=None): - """Takes the data contents of the buffer and return them as a memoryview. - - Arguments: - buffer: The `GPUBuffer` object to read from. - buffer_offset (int): The offset in the buffer to start reading from. - size: The number of bytes to read. Default all minus offset. - - This copies the data in the given buffer to a temporary buffer - and then maps that buffer to read the data. The given buffer's - usage must include COPY_SRC. - - Also see `GPUBuffer.map()`. - """ - raise NotImplementedError() - - # IDL: undefined writeTexture( GPUImageCopyTexture destination, AllowSharedBufferSource data, GPUImageDataLayout dataLayout, GPUExtent3D size); - def write_texture(self, destination, data, data_layout, size): - """Takes the data contents and schedules a write operation of - these contents to the destination texture in the queue. A - snapshot of the data is taken; any changes to the data after - this function is called do not affect the texture contents. - - Arguments: - destination: A dict with fields: "texture" (a texture object), - "origin" (a 3-tuple), "mip_level" (an int, default 0). - data: The data to write. - data_layout: A dict with fields: "offset" (an int, default 0), - "bytes_per_row" (an int), "rows_per_image" (an int, default 0). - size: A 3-tuple of ints specifying the size to write. - - Unlike `GPUCommandEncoder.copyBufferToTexture()`, there is - no alignment requirement on `bytes_per_row`. - """ - raise NotImplementedError() - - @apidiff.add("For symmetry, and to help work around the bytes_per_row constraint") - def read_texture(self, source, data_layout, size): - """Reads the contents of the texture and return them as a memoryview. - - Arguments: - source: A dict with fields: "texture" (a texture object), - "origin" (a 3-tuple), "mip_level" (an int, default 0). - data_layout: A dict with fields: "offset" (an int, default 0), - "bytes_per_row" (an int), "rows_per_image" (an int, default 0). - size: A 3-tuple of ints specifying the size to write. - - Unlike `GPUCommandEncoder.copyBufferToTexture()`, there is - no alignment requirement on `bytes_per_row`, although in the - current implementation there will be a performance penalty if - ``bytes_per_row`` is not a multiple of 256 (because we'll be - copying data row-by-row in Python). - """ - raise NotImplementedError() - - # IDL: Promise onSubmittedWorkDone(); - def on_submitted_work_done(self): - """TODO""" - raise NotImplementedError() - - # IDL: undefined copyExternalImageToTexture( GPUImageCopyExternalImage source, GPUImageCopyTextureTagged destination, GPUExtent3D copySize); - @apidiff.hide("Specific to browsers") - def copy_external_image_to_texture(self, source, destination, copy_size): - raise NotImplementedError() - - -# %% Further non-GPUObject classes - - -class GPUDeviceLostInfo: - """An object that contains information about the device being lost.""" - - # Not used at the moment, see device.lost prop - - def __init__(self, reason, message): - self._reason = reason - self._message = message - - # IDL: readonly attribute DOMString message; - @property - def message(self): - """The error message specifying the reason for the device being lost.""" - return self._message - - # IDL: readonly attribute GPUDeviceLostReason reason; - @property - def reason(self): - """The reason (enums.GPUDeviceLostReason) for the device getting lost. Can be None.""" - return self._reason - - -class GPUError(Exception): - """A generic GPU error.""" - - def __init__(self, message): - super().__init__(message) - - # IDL: readonly attribute DOMString message; - @property - def message(self): - """The error message.""" - return self.args[0] - - -class GPUOutOfMemoryError(GPUError, MemoryError): - """An error raised when the GPU is out of memory.""" - - # IDL: constructor(DOMString message); - def __init__(self, message): - super().__init__(message or "GPU is out of memory.") - - -class GPUValidationError(GPUError): - """An error raised when the pipeline could not be validated.""" - - # IDL: constructor(DOMString message); - def __init__(self, message): - super().__init__(message) - - -class GPUPipelineError(Exception): - """An error raised when a pipeline could not be created.""" - - # IDL: constructor(optional DOMString message = "", GPUPipelineErrorInit options); - def __init__(self, message="", options=None): - super().__init__(message or "") - self._options = options - - # IDL: readonly attribute GPUPipelineErrorReason reason; - @property - def reason(self): - """The reason for the failure.""" - return self.args[0] - - -class GPUInternalError(GPUError): - """An error raised for implementation-specific reasons. - - An operation failed for a system or implementation-specific - reason even when all validation requirements have been satisfied. - """ - - # IDL: constructor(DOMString message); - def __init__(self, message): - super().__init__(message) - - -# %% Not implemented - - -class GPUCompilationMessage: - """An object that contains information about a problem with shader compilation.""" - - # IDL: readonly attribute DOMString message; - @property - def message(self): - """The warning/error message.""" - raise NotImplementedError() - - # IDL: readonly attribute GPUCompilationMessageType type; - @property - def type(self): - """The type of warning/problem.""" - raise NotImplementedError() - - # IDL: readonly attribute unsigned long long lineNum; - @property - def line_num(self): - """The corresponding line number in the shader source.""" - raise NotImplementedError() - - # IDL: readonly attribute unsigned long long linePos; - @property - def line_pos(self): - """The position on the line in the shader source.""" - raise NotImplementedError() - - # IDL: readonly attribute unsigned long long offset; - @property - def offset(self): - """Offset of ...""" - raise NotImplementedError() - - # IDL: readonly attribute unsigned long long length; - @property - def length(self): - """The length of the line?""" - raise NotImplementedError() - - -class GPUCompilationInfo: - """TODO""" - - # IDL: readonly attribute FrozenArray messages; - @property - def messages(self): - """A list of `GPUCompilationMessage` objects.""" - raise NotImplementedError() - - -class GPUQuerySet(GPUObjectBase): - """An object to store the results of queries on passes. - - You can obtain a query set object via :attr:`GPUDevice.create_query_set`. - """ - - def __init__(self, label, internal, device, type, count): - super().__init__(label, internal, device) - self._type = type - self._count = count - - # IDL: readonly attribute GPUQueryType type; - @property - def type(self): - """The type of the queries managed by this queryset.""" - return self._type - - # IDL: readonly attribute GPUSize32Out count; - @property - def count(self): - """The number of the queries managed by this queryset.""" - return self._count - - # IDL: undefined destroy(); - def destroy(self): - """Destroy this QuerySet.""" - raise NotImplementedError() - - -# %%%%% Post processing - -# Note that some toplevel classes are already filtered out by the codegen, -# like GPUExternalTexture and GPUUncapturedErrorEvent, and more. - -apidiff.remove_hidden_methods(globals()) - - -def _seed_object_counts(): - m = globals() - for class_name in __all__: - cls = m[class_name] - if not class_name.endswith(("Base", "Mixin")): - if hasattr(cls, "_ot"): - object_tracker.counts[class_name] = 0 - - -def generic_repr(self): - try: - module_name = self.__module__ - if module_name.startswith("wgpu"): - if module_name == "wgpu._classes": - module_name = "wgpu" - elif "backends." in module_name: - backend_name = self.__module__.split("backends")[-1].split(".")[1] - module_name = f"wgpu.backends.{backend_name}" - object_str = "object" - if isinstance(self, GPUObjectBase): - object_str = f"object '{self.label}'" - return ( - f"<{module_name}.{self.__class__.__name__} {object_str} at {hex(id(self))}>" - ) - except Exception: # easy fallback - return object.__repr__(self) - - -def _set_repr_methods(): - m = globals() - for class_name in __all__: - cls = m[class_name] - if len(cls.mro()) == 2: # class itself and object - cls.__repr__ = generic_repr - - -_seed_object_counts() -_set_repr_methods() diff --git a/wgpu/_coreutils.py b/wgpu/_coreutils.py deleted file mode 100644 index d126886..0000000 --- a/wgpu/_coreutils.py +++ /dev/null @@ -1,157 +0,0 @@ -""" -Core utilities that are loaded into the root namespace or used internally. -""" - -import re -import sys -import atexit -import logging -import importlib.resources -from contextlib import ExitStack - - -# Our resources are most probably always on the file system. But in -# case they don't we have a nice exit handler to remove temporary files. -_resource_files = ExitStack() -atexit.register(_resource_files.close) - - -def get_resource_filename(name): - """Get the filename to a wgpu resource.""" - if sys.version_info < (3, 9): - context = importlib.resources.path("wgpu.resources", name) - else: - ref = importlib.resources.files("wgpu.resources") / name - context = importlib.resources.as_file(ref) - path = _resource_files.enter_context(context) - return str(path) - - -class WGPULogger(logging.getLoggerClass()): - """A custom logger for which we can detect changes in its level.""" - - def setLevel(self, level): # noqa: N802 - super().setLevel(level) - for cb in logger_set_level_callbacks: - cb(self.level) # use arg that is always an int - - -logger_set_level_callbacks = [] -_original_logger_cls = logging.getLoggerClass() -logging.setLoggerClass(WGPULogger) -logger = logging.getLogger("wgpu") -logging.setLoggerClass(_original_logger_cls) -assert isinstance(logger, WGPULogger) -logger.setLevel(logging.WARNING) - - -_re_wgpu_ob = re.compile(r"`<[a-z|A-Z]+-\([0-9]+, [0-9]+, [a-z|A-Z]+\)>`") - - -def error_message_hash(message): - # Remove wgpu object representations, because they contain id's that may change at each draw. - # E.g. `` - message = _re_wgpu_ob.sub("WGPU_OBJECT", message) - return hash(message) - - -_flag_cache = {} # str -> int - - -def str_flag_to_int(flag, s): - """Allow using strings for flags, i.e. 'READ' instead of wgpu.MapMode.READ. - No worries about repeated overhead, because the resuls are cached. - """ - cache_key = ( - f"{flag._name}.{s}" # using private attribute, lets call this a friend func - ) - value = _flag_cache.get(cache_key, None) - - if value is None: - parts = [p.strip() for p in s.split("|")] - parts = [p for p in parts if p] - invalid_parts = [p for p in parts if p.startswith("_")] - if not parts or invalid_parts: - raise ValueError(f"Invalid flag value: {s}") - - value = 0 - for p in parts: - try: - v = flag.__dict__[p.upper()] - value += v - except KeyError: - raise ValueError(f"Invalid flag value for {flag}: '{p}'") - _flag_cache[cache_key] = value - - return value - - -class ApiDiff: - """Helper class to define differences in the API by annotating - methods. This way, these difference are made explicit, plus they're - logged so we can automatically included these changes in the docs. - """ - - def __init__(self): - self.hidden = {} - self.added = {} - self.changed = {} - - def hide(self, func_or_text): - """Decorator to discard certain methods from the "reference" API. - Intended only for the base API where we deviate from WebGPU. - """ - return self._diff("hidden", func_or_text) - - def add(self, func_or_text): - """Decorator to add certain methods that are not part of the "reference" spec. - Intended for the base API where we implement additional/alternative API, - and in the backend implementations where additional methods are provided. - """ - return self._diff("added", func_or_text) - - def change(self, func_or_text): - """Decorator to mark certain methods as having a different signature - as the "reference" spec. Intended only for the base API where we deviate - from WebGPU. - """ - return self._diff("changed", func_or_text) - - def _diff(self, method, func_or_text): - def wrapper(f): - d = getattr(self, method) - name = f.__qualname__ if hasattr(f, "__qualname__") else f.fget.__qualname__ - d[name] = text - return f - - if callable(func_or_text): - text = None - return wrapper(func_or_text) - else: - text = func_or_text - return wrapper - - def remove_hidden_methods(self, scope): - """Call this to remove methods from the API that were decorated as hidden.""" - for name in self.hidden: - classname, _, methodname = name.partition(".") - cls = scope[classname] - delattr(cls, methodname) - - @property - def __doc__(self): - """Generate a docstring for this instance. This way we can - automatically document API differences. - """ - lines = [""] - for name, msg in self.hidden.items(): - line = f" * Hides ``{name}()``" - lines.append(f"{line} - {msg}" if msg else line) - for name, msg in self.added.items(): - line = f" * Adds ``{name}()``" - lines.append(f"{line} - {msg}" if msg else line) - for name, msg in self.changed.items(): - line = f" * Changes ``{name}()``" - lines.append(f"{line} - {msg}" if msg else line) - lines.append("") - return "\n".join(sorted(lines)) diff --git a/wgpu/_diagnostics.py b/wgpu/_diagnostics.py deleted file mode 100644 index e0e0c83..0000000 --- a/wgpu/_diagnostics.py +++ /dev/null @@ -1,520 +0,0 @@ -""" -Logic related to providing diagnostic info on wgpu. -""" - -import os -import sys -import platform - - -class DiagnosticsRoot: - """Root object to access wgpu diagnostics (i.e. ``wgpu.diagnostics``). - - Per-topic diagnostics can be accessed as attributes on this object. - These include ``system``, ``wgpu_native_info``, ``versions``, - ``object_counts``, ``wgpu_natrive_counts``. - """ - - def __init__(self): - self._diagnostics_instances = {} - - def __repr__(self): - topics = ", ".join(self._diagnostics_instances.keys()) - return f"" - - def _register_diagnostics(self, name, ob): - self._diagnostics_instances[name] = ob - setattr(self, name, ob) - - def get_dict(self): - """Get a dict that represents the full diagnostics info. - - The keys are the diagnostic topics, and the values are dicts - of dicts. See e.g. ``wgpu.diagnostics.counts.get_dict()`` for - a topic-specific dict. - """ - result = {} - for name, ob in self._diagnostics_instances.items(): - result[name] = ob.get_dict() - return result - - def get_report(self): - """Get the full textual diagnostic report (as a str).""" - text = "" - for name, ob in self._diagnostics_instances.items(): - text += ob.get_report() - return text - - def print_report(self): - """Convenience method to print the full diagnostics report.""" - print(self.get_report(), end="") - - -class Diagnostics: - """Object that represents diagnostics on a specific topic. - - This is a base class that must be subclassed to provide diagnostics - on a certain topic. Instantiating the class registers it with the - root diagnostics object. - """ - - def __init__(self, name): - diagnostics._register_diagnostics(name, self) - self.name = name - self.object_counts = {} - - def __repr__(self): - return f"" - - def get_dict(self): - """Get the diagnostics for this topic, in the form of a Python dict. - - Subclasses must implement this method. The dict can be a simple - map of keys to values (str, int, float):: - - foo: 1 - bar: 2 - - If the values are dicts, the data has a table-like layout, with - the keys representing the table header:: - - count mem - - Adapter: 1 264 - Buffer: 4 704 - - Subdicts are also supported, which results in multi-row entries. - In the report, the keys of the subdicts have colons behind them:: - - count mem backend o v e el_size - - Adapter: 1 264 vulkan: 1 0 0 264 - d3d12: 1 0 0 220 - Buffer: 4 704 vulkan: 4 0 0 176 - d3d12: 0 0 0 154 - - """ - raise NotImplementedError() - - def get_subscript(self): - """Get informative text that helps interpret the report. - - Subclasses can implement this method. The text will show below the table - in the report. - """ - return "" # Optional - - def get_report(self): - """Get the textual diagnostics report for this topic.""" - text = f"\n██ {self.name}:\n\n" - text += dict_to_text(self.get_dict()) - subscript = self.get_subscript() - if subscript: - text += "\n" + subscript.rstrip() + "\n" - return text - - def print_report(self): - """Print the diagnostics report for this topic.""" - print(self.get_report(), end="") - - -class ObjectTracker: - """Little object to help track object counts.""" - - def __init__(self): - self.counts = {} - self.amounts = {} - - def increase(self, name, amount=0): - """Bump the counter.""" - self.counts[name] = self.counts.get(name, 0) + 1 - if amount: - self.amounts[name] = self.amounts.get(name, 0) + amount - - def decrease(self, name, amount=0): - """Bump the counter back.""" - self.counts[name] -= 1 - if amount: - self.amounts[name] -= amount - - -def derive_header(dct): - """Derive a table-header from the given dict.""" - - if not isinstance(dct, dict): # no-cover - raise TypeError(f"Not a dict: {dct}") - - header = [] - sub_dicts = {} - - for key, val in dct.items(): - if not isinstance(val, dict): # no-cover - raise TypeError(f"Element not a dict: {val}") - for k, v in val.items(): - if k not in header: - header.append(k) - if isinstance(v, dict): - sub_dicts[k] = v - - for k, d in sub_dicts.items(): - while k in header: - header.remove(k) - header.append(k) - sub_header = derive_header(d) - for k in sub_header[1:]: - if k not in header: - header.append(k) - - # Add header item for first column, i.e. the key / row title - header.insert(0, "") - - return header - - -def dict_to_text(d, header=None): - """Convert a dict data structure to a textual table representation.""" - - if not d: - return "No data\n" - - # Copy the dict, with simple key-value dicts being transformed into table-like dicts. - # That wat the code in derive_header() and dict_to_table() can assume the table-like - # data structure, keeping it simpler. - d2 = {} - for key, val in d.items(): - if not isinstance(val, dict): - val = {"": val} - d2[key] = val - d = d2 - - if not header: - header = derive_header(d) - - # We have a table-like-layout if any of the values in the header is non-empty - table_layout = any(header) - - # Get the table - rows = dict_to_table(d, header) - ncols = len(header) - - # Sanity check (guard assumptions about dict_to_table) - for row in rows: - assert len(row) == ncols, "dict_to_table failed" - for i in range(ncols): - assert isinstance(row[i], str), "dict_to_table failed" - - # Insert heading - if table_layout: - rows.insert(0, header.copy()) - rows.insert(1, [""] * ncols) - - # Determine what colons have values with a colon at the end - column_has_colon = [False for _ in range(ncols)] - for row in rows: - for i in range(ncols): - column_has_colon[i] |= row[i].endswith(":") - - # Align the values that don't have a colon at the end - for row in rows: - for i in range(ncols): - word = row[i] - if column_has_colon[i] and not word.endswith(":"): - row[i] = word + " " - - # Establish max lengths - max_lens = [0 for _ in range(ncols)] - for row in rows: - for i in range(ncols): - max_lens[i] = max(max_lens[i], len(row[i])) - - # Justify first column (always rjust) - for row in rows: - row[0] = row[0].rjust(max_lens[0]) - - # For the table layour we also rjust the other columns - if table_layout: - for row in rows: - for i in range(1, ncols): - row[i] = row[i].rjust(max_lens[i]) - - # Join into a consistent text - lines = [" ".join(row).rstrip() for row in rows] - text = "\n".join(lines) - return text.rstrip() + "\n" - - -def dict_to_table(d, header, header_offest=0): - """Convert a dict data structure to a table (a list of lists of strings). - The keys form the first entry of the row. Values that are dicts recurse. - """ - - ncols = len(header) - rows = [] - - for row_title, values in d.items(): - if row_title == "total" and row_title == list(d.keys())[-1]: - rows.append([""] * ncols) - row = [row_title + ":" if row_title else ""] - rows.append(row) - for i in range(header_offest + 1, len(header)): - key = header[i] - val = values.get(key, None) - if val is None: - row.append("") - elif isinstance(val, str): - row.append(val) - elif isinstance(val, int): - row.append(int_repr(val)) - elif isinstance(val, float): - row.append(f"{val:.6g}") - elif isinstance(val, dict): - subrows = dict_to_table(val, header, i) - if len(subrows) == 0: - row += [""] * (ncols - i) - else: - row += subrows[0] - extrarows = [[""] * i + subrow for subrow in subrows[1:]] - rows.extend(extrarows) - break # header items are consumed by the sub - else: # no-cover - raise TypeError(f"Unexpected table value: {val}") - - return rows - - -def int_repr(val): - """Represent an integer using K and M suffixes.""" - prefix = "-" if val < 0 else "" - val = abs(val) - if val >= 1_000_000_000: # >= 1G - s = str(val / 1_000_000_000) - suffix = "G" - elif val >= 1_000_000: # >= 1M - s = str(val / 1_000_000) - suffix = "M" - elif val >= 1_000: # >= 1K - s = str(val / 1_000) - suffix = "K" - else: - s = str(val) - suffix = "" - if "." in s: - s1, _, s2 = s.partition(".") - n_decimals = max(0, 3 - len(s1)) - s = s1 - if n_decimals: - s2 += "000" - s = s1 + "." + s2[:n_decimals] - return prefix + s + suffix - - -# Map that we need to calculate texture resource consumption. -# We need to keep this up-to-date as formats change, we have a unit test for this. -# Also see https://wgpu.rs/doc/wgpu/enum.TextureFormat.html - -texture_format_to_bpp = { - # 8 bit - "r8unorm": 8, - "r8snorm": 8, - "r8uint": 8, - "r8sint": 8, - # 16 bit - "r16uint": 16, - "r16sint": 16, - "r16float": 16, - "rg8unorm": 16, - "rg8snorm": 16, - "rg8uint": 16, - "rg8sint": 16, - # 32 bit - "r32uint": 32, - "r32sint": 32, - "r32float": 32, - "rg16uint": 32, - "rg16sint": 32, - "rg16float": 32, - "rgba8unorm": 32, - "rgba8unorm-srgb": 32, - "rgba8snorm": 32, - "rgba8uint": 32, - "rgba8sint": 32, - "bgra8unorm": 32, - "bgra8unorm-srgb": 32, - # special fits - "rgb9e5ufloat": 32, # 3*9 + 5 - "rgb10a2uint": 32, # 3*10 + 2 - "rgb10a2unorm": 32, # 3*10 + 2 - "rg11b10ufloat": 32, # 2*11 + 10 - # 64 bit - "rg32uint": 64, - "rg32sint": 64, - "rg32float": 64, - "rgba16uint": 64, - "rgba16sint": 64, - "rgba16float": 64, - # 128 bit - "rgba32uint": 128, - "rgba32sint": 128, - "rgba32float": 128, - # depth and stencil - "stencil8": 8, - "depth16unorm": 16, - "depth24plus": 24, # "... at least 24 bit integer depth" ? - "depth24plus-stencil8": 32, - "depth32float": 32, - "depth32float-stencil8": 40, - # Compressed - "bc1-rgba-unorm": 4, # 4x4 blocks, 8 bytes per block - "bc1-rgba-unorm-srgb": 4, - "bc2-rgba-unorm": 8, # 4x4 blocks, 16 bytes per block - "bc2-rgba-unorm-srgb": 8, - "bc3-rgba-unorm": 8, # 4x4 blocks, 16 bytes per block - "bc3-rgba-unorm-srgb": 8, - "bc4-r-unorm": 4, - "bc4-r-snorm": 4, - "bc5-rg-unorm": 8, - "bc5-rg-snorm": 8, - "bc6h-rgb-ufloat": 8, - "bc6h-rgb-float": 8, - "bc7-rgba-unorm": 8, - "bc7-rgba-unorm-srgb": 8, - "etc2-rgb8unorm": 4, - "etc2-rgb8unorm-srgb": 4, - "etc2-rgb8a1unorm": 4, - "etc2-rgb8a1unorm-srgb": 4, - "etc2-rgba8unorm": 8, - "etc2-rgba8unorm-srgb": 8, - "eac-r11unorm": 4, - "eac-r11snorm": 4, - "eac-rg11unorm": 8, - "eac-rg11snorm": 8, - # astc always uses 16 bytes (128 bits) per block - "astc-4x4-unorm": 8.0, - "astc-4x4-unorm-srgb": 8.0, - "astc-5x4-unorm": 6.4, - "astc-5x4-unorm-srgb": 6.4, - "astc-5x5-unorm": 5.12, - "astc-5x5-unorm-srgb": 5.12, - "astc-6x5-unorm": 4.267, - "astc-6x5-unorm-srgb": 4.267, - "astc-6x6-unorm": 3.556, - "astc-6x6-unorm-srgb": 3.556, - "astc-8x5-unorm": 3.2, - "astc-8x5-unorm-srgb": 3.2, - "astc-8x6-unorm": 2.667, - "astc-8x6-unorm-srgb": 2.667, - "astc-8x8-unorm": 2.0, - "astc-8x8-unorm-srgb": 2.0, - "astc-10x5-unorm": 2.56, - "astc-10x5-unorm-srgb": 2.56, - "astc-10x6-unorm": 2.133, - "astc-10x6-unorm-srgb": 2.133, - "astc-10x8-unorm": 1.6, - "astc-10x8-unorm-srgb": 1.6, - "astc-10x10-unorm": 1.28, - "astc-10x10-unorm-srgb": 1.28, - "astc-12x10-unorm": 1.067, - "astc-12x10-unorm-srgb": 1.067, - "astc-12x12-unorm": 0.8889, - "astc-12x12-unorm-srgb": 0.8889, -} - - -# %% global diagnostics object, and builtin diagnostics - - -# The global root object -diagnostics = DiagnosticsRoot() - - -class SystemDiagnostics(Diagnostics): - """Provides basic system info.""" - - def get_dict(self): - return { - "platform": platform.platform(), - # "platform_version": platform.version(), # can be quite long - "python_implementation": platform.python_implementation(), - "python": platform.python_version(), - } - - -class WgpuNativeInfoDiagnostics(Diagnostics): - """Provides metadata about the wgpu-native backend.""" - - def get_dict(self): - # Get modules, or skip - try: - wgpu = sys.modules["wgpu"] - wgpu_native = wgpu.backends.wgpu_native - except (KeyError, AttributeError): # no-cover - return {} - - # Process lib path - lib_path = wgpu_native.lib_path - wgpu_path = os.path.dirname(wgpu.__file__) - if lib_path.startswith(wgpu_path): - lib_path = "." + os.path.sep + lib_path[len(wgpu_path) :].lstrip("/\\") - - return { - "expected_version": wgpu_native.__version__, - "lib_version": ".".join(str(i) for i in wgpu_native.lib_version_info), - "lib_path": lib_path, - } - - -class VersionDiagnostics(Diagnostics): - """Provides version numbers from relevant libraries.""" - - def get_dict(self): - core_libs = ["wgpu", "cffi"] - qt_libs = ["PySide6", "PyQt6", "PySide2", "PyQt5"] - gui_libs = qt_libs + ["glfw", "jupyter_rfb", "wx"] - extra_libs = ["numpy", "pygfx", "pylinalg", "fastplotlib"] - - info = {} - - for libname in core_libs + gui_libs + extra_libs: - try: - ver = sys.modules[libname].__version__ - except (KeyError, AttributeError): - pass - else: - info[libname] = str(ver) - - return info - - -class ObjectCountDiagnostics(Diagnostics): - """Provides object counts and resource consumption, used in _classes.py.""" - - def __init__(self, name): - super().__init__(name) - self.tracker = ObjectTracker() - - def get_dict(self): - """Get diagnostics as a dict.""" - object_counts = self.tracker.counts - resource_mem = self.tracker.amounts - - # Collect counts - result = {} - for name in sorted(object_counts.keys()): - d = {"count": object_counts[name]} - if name in resource_mem: - d["resource_mem"] = resource_mem[name] - result[name[3:]] = d # drop the 'GPU' from the name - - # Add totals - totals = {} - for key in ("count", "resource_mem"): - totals[key] = sum(v.get(key, 0) for v in result.values()) - result["total"] = totals - - return result - - -SystemDiagnostics("system") -VersionDiagnostics("versions") -WgpuNativeInfoDiagnostics("wgpu_native_info") -ObjectCountDiagnostics("object_counts") diff --git a/wgpu/classes.py b/wgpu/classes.py deleted file mode 100644 index 2019014..0000000 --- a/wgpu/classes.py +++ /dev/null @@ -1,8 +0,0 @@ -""" -The classes that make up the wgpu API. -These can be accessed via ``wgpu.classes``, -but are also available in the root wgpu namespace. -""" - -from ._classes import * # noqa: F401, F403 -from ._classes import __all__ # noqa: F401 diff --git a/wgpu/enums.py b/wgpu/enums.py deleted file mode 100644 index a745626..0000000 --- a/wgpu/enums.py +++ /dev/null @@ -1,686 +0,0 @@ -""" -These enums are defined in ``wgpu.enums``, but are also available from the root wgpu namespace. - -Enums are choices; exactly one field must be selected. -Enum values are strings, so instead of ``wgpu.TextureFormat.rgba8unorm``, -one can also write ``"rgba8unorm"``. -""" - -_use_sphinx_repr = False - - -class Enum: - def __init__(self, name, **kwargs): - self._name = name - for key, val in kwargs.items(): - setattr(self, key, val) - - def __iter__(self): - return iter( - [getattr(self, key) for key in dir(self) if not key.startswith("_")] - ) - - def __repr__(self): - if _use_sphinx_repr: # no-cover - return "" - options = ", ".join(f"'{x}'" for x in self) - return f"" - - -# CODE BELOW THIS POINT IS AUTOGENERATED - DO NOT EDIT - - -# There are 33 enums - -__all__ = [ - "PowerPreference", - "FeatureName", - "BufferMapState", - "TextureDimension", - "TextureViewDimension", - "TextureAspect", - "TextureFormat", - "AddressMode", - "FilterMode", - "MipmapFilterMode", - "CompareFunction", - "BufferBindingType", - "SamplerBindingType", - "TextureSampleType", - "StorageTextureAccess", - "CompilationMessageType", - "PipelineErrorReason", - "AutoLayoutMode", - "PrimitiveTopology", - "FrontFace", - "CullMode", - "BlendFactor", - "BlendOperation", - "StencilOperation", - "IndexFormat", - "VertexFormat", - "VertexStepMode", - "LoadOp", - "StoreOp", - "QueryType", - "CanvasAlphaMode", - "DeviceLostReason", - "ErrorFilter", -] - - -#: * "low_power" -#: * "high_performance" -PowerPreference = Enum( - "PowerPreference", - low_power="low-power", - high_performance="high-performance", -) - -#: * "depth_clip_control" -#: * "depth32float_stencil8" -#: * "texture_compression_bc" -#: * "texture_compression_etc2" -#: * "texture_compression_astc" -#: * "timestamp_query" -#: * "indirect_first_instance" -#: * "shader_f16" -#: * "rg11b10ufloat_renderable" -#: * "bgra8unorm_storage" -#: * "float32_filterable" -FeatureName = Enum( - "FeatureName", - depth_clip_control="depth-clip-control", - depth32float_stencil8="depth32float-stencil8", - texture_compression_bc="texture-compression-bc", - texture_compression_etc2="texture-compression-etc2", - texture_compression_astc="texture-compression-astc", - timestamp_query="timestamp-query", - indirect_first_instance="indirect-first-instance", - shader_f16="shader-f16", - rg11b10ufloat_renderable="rg11b10ufloat-renderable", - bgra8unorm_storage="bgra8unorm-storage", - float32_filterable="float32-filterable", -) - -#: * "unmapped" -#: * "pending" -#: * "mapped" -BufferMapState = Enum( - "BufferMapState", - unmapped="unmapped", - pending="pending", - mapped="mapped", -) - -#: * "d1" -#: * "d2" -#: * "d3" -TextureDimension = Enum( - "TextureDimension", - d1="1d", - d2="2d", - d3="3d", -) - -#: * "d1" -#: * "d2" -#: * "d2_array" -#: * "cube" -#: * "cube_array" -#: * "d3" -TextureViewDimension = Enum( - "TextureViewDimension", - d1="1d", - d2="2d", - d2_array="2d-array", - cube="cube", - cube_array="cube-array", - d3="3d", -) - -#: * "all" -#: * "stencil_only" -#: * "depth_only" -TextureAspect = Enum( - "TextureAspect", - all="all", - stencil_only="stencil-only", - depth_only="depth-only", -) - -#: * "r8unorm" -#: * "r8snorm" -#: * "r8uint" -#: * "r8sint" -#: * "r16uint" -#: * "r16sint" -#: * "r16float" -#: * "rg8unorm" -#: * "rg8snorm" -#: * "rg8uint" -#: * "rg8sint" -#: * "r32uint" -#: * "r32sint" -#: * "r32float" -#: * "rg16uint" -#: * "rg16sint" -#: * "rg16float" -#: * "rgba8unorm" -#: * "rgba8unorm_srgb" -#: * "rgba8snorm" -#: * "rgba8uint" -#: * "rgba8sint" -#: * "bgra8unorm" -#: * "bgra8unorm_srgb" -#: * "rgb9e5ufloat" -#: * "rgb10a2uint" -#: * "rgb10a2unorm" -#: * "rg11b10ufloat" -#: * "rg32uint" -#: * "rg32sint" -#: * "rg32float" -#: * "rgba16uint" -#: * "rgba16sint" -#: * "rgba16float" -#: * "rgba32uint" -#: * "rgba32sint" -#: * "rgba32float" -#: * "stencil8" -#: * "depth16unorm" -#: * "depth24plus" -#: * "depth24plus_stencil8" -#: * "depth32float" -#: * "depth32float_stencil8" -#: * "bc1_rgba_unorm" -#: * "bc1_rgba_unorm_srgb" -#: * "bc2_rgba_unorm" -#: * "bc2_rgba_unorm_srgb" -#: * "bc3_rgba_unorm" -#: * "bc3_rgba_unorm_srgb" -#: * "bc4_r_unorm" -#: * "bc4_r_snorm" -#: * "bc5_rg_unorm" -#: * "bc5_rg_snorm" -#: * "bc6h_rgb_ufloat" -#: * "bc6h_rgb_float" -#: * "bc7_rgba_unorm" -#: * "bc7_rgba_unorm_srgb" -#: * "etc2_rgb8unorm" -#: * "etc2_rgb8unorm_srgb" -#: * "etc2_rgb8a1unorm" -#: * "etc2_rgb8a1unorm_srgb" -#: * "etc2_rgba8unorm" -#: * "etc2_rgba8unorm_srgb" -#: * "eac_r11unorm" -#: * "eac_r11snorm" -#: * "eac_rg11unorm" -#: * "eac_rg11snorm" -#: * "astc_4x4_unorm" -#: * "astc_4x4_unorm_srgb" -#: * "astc_5x4_unorm" -#: * "astc_5x4_unorm_srgb" -#: * "astc_5x5_unorm" -#: * "astc_5x5_unorm_srgb" -#: * "astc_6x5_unorm" -#: * "astc_6x5_unorm_srgb" -#: * "astc_6x6_unorm" -#: * "astc_6x6_unorm_srgb" -#: * "astc_8x5_unorm" -#: * "astc_8x5_unorm_srgb" -#: * "astc_8x6_unorm" -#: * "astc_8x6_unorm_srgb" -#: * "astc_8x8_unorm" -#: * "astc_8x8_unorm_srgb" -#: * "astc_10x5_unorm" -#: * "astc_10x5_unorm_srgb" -#: * "astc_10x6_unorm" -#: * "astc_10x6_unorm_srgb" -#: * "astc_10x8_unorm" -#: * "astc_10x8_unorm_srgb" -#: * "astc_10x10_unorm" -#: * "astc_10x10_unorm_srgb" -#: * "astc_12x10_unorm" -#: * "astc_12x10_unorm_srgb" -#: * "astc_12x12_unorm" -#: * "astc_12x12_unorm_srgb" -TextureFormat = Enum( - "TextureFormat", - r8unorm="r8unorm", - r8snorm="r8snorm", - r8uint="r8uint", - r8sint="r8sint", - r16uint="r16uint", - r16sint="r16sint", - r16float="r16float", - rg8unorm="rg8unorm", - rg8snorm="rg8snorm", - rg8uint="rg8uint", - rg8sint="rg8sint", - r32uint="r32uint", - r32sint="r32sint", - r32float="r32float", - rg16uint="rg16uint", - rg16sint="rg16sint", - rg16float="rg16float", - rgba8unorm="rgba8unorm", - rgba8unorm_srgb="rgba8unorm-srgb", - rgba8snorm="rgba8snorm", - rgba8uint="rgba8uint", - rgba8sint="rgba8sint", - bgra8unorm="bgra8unorm", - bgra8unorm_srgb="bgra8unorm-srgb", - rgb9e5ufloat="rgb9e5ufloat", - rgb10a2uint="rgb10a2uint", - rgb10a2unorm="rgb10a2unorm", - rg11b10ufloat="rg11b10ufloat", - rg32uint="rg32uint", - rg32sint="rg32sint", - rg32float="rg32float", - rgba16uint="rgba16uint", - rgba16sint="rgba16sint", - rgba16float="rgba16float", - rgba32uint="rgba32uint", - rgba32sint="rgba32sint", - rgba32float="rgba32float", - stencil8="stencil8", - depth16unorm="depth16unorm", - depth24plus="depth24plus", - depth24plus_stencil8="depth24plus-stencil8", - depth32float="depth32float", - depth32float_stencil8="depth32float-stencil8", - bc1_rgba_unorm="bc1-rgba-unorm", - bc1_rgba_unorm_srgb="bc1-rgba-unorm-srgb", - bc2_rgba_unorm="bc2-rgba-unorm", - bc2_rgba_unorm_srgb="bc2-rgba-unorm-srgb", - bc3_rgba_unorm="bc3-rgba-unorm", - bc3_rgba_unorm_srgb="bc3-rgba-unorm-srgb", - bc4_r_unorm="bc4-r-unorm", - bc4_r_snorm="bc4-r-snorm", - bc5_rg_unorm="bc5-rg-unorm", - bc5_rg_snorm="bc5-rg-snorm", - bc6h_rgb_ufloat="bc6h-rgb-ufloat", - bc6h_rgb_float="bc6h-rgb-float", - bc7_rgba_unorm="bc7-rgba-unorm", - bc7_rgba_unorm_srgb="bc7-rgba-unorm-srgb", - etc2_rgb8unorm="etc2-rgb8unorm", - etc2_rgb8unorm_srgb="etc2-rgb8unorm-srgb", - etc2_rgb8a1unorm="etc2-rgb8a1unorm", - etc2_rgb8a1unorm_srgb="etc2-rgb8a1unorm-srgb", - etc2_rgba8unorm="etc2-rgba8unorm", - etc2_rgba8unorm_srgb="etc2-rgba8unorm-srgb", - eac_r11unorm="eac-r11unorm", - eac_r11snorm="eac-r11snorm", - eac_rg11unorm="eac-rg11unorm", - eac_rg11snorm="eac-rg11snorm", - astc_4x4_unorm="astc-4x4-unorm", - astc_4x4_unorm_srgb="astc-4x4-unorm-srgb", - astc_5x4_unorm="astc-5x4-unorm", - astc_5x4_unorm_srgb="astc-5x4-unorm-srgb", - astc_5x5_unorm="astc-5x5-unorm", - astc_5x5_unorm_srgb="astc-5x5-unorm-srgb", - astc_6x5_unorm="astc-6x5-unorm", - astc_6x5_unorm_srgb="astc-6x5-unorm-srgb", - astc_6x6_unorm="astc-6x6-unorm", - astc_6x6_unorm_srgb="astc-6x6-unorm-srgb", - astc_8x5_unorm="astc-8x5-unorm", - astc_8x5_unorm_srgb="astc-8x5-unorm-srgb", - astc_8x6_unorm="astc-8x6-unorm", - astc_8x6_unorm_srgb="astc-8x6-unorm-srgb", - astc_8x8_unorm="astc-8x8-unorm", - astc_8x8_unorm_srgb="astc-8x8-unorm-srgb", - astc_10x5_unorm="astc-10x5-unorm", - astc_10x5_unorm_srgb="astc-10x5-unorm-srgb", - astc_10x6_unorm="astc-10x6-unorm", - astc_10x6_unorm_srgb="astc-10x6-unorm-srgb", - astc_10x8_unorm="astc-10x8-unorm", - astc_10x8_unorm_srgb="astc-10x8-unorm-srgb", - astc_10x10_unorm="astc-10x10-unorm", - astc_10x10_unorm_srgb="astc-10x10-unorm-srgb", - astc_12x10_unorm="astc-12x10-unorm", - astc_12x10_unorm_srgb="astc-12x10-unorm-srgb", - astc_12x12_unorm="astc-12x12-unorm", - astc_12x12_unorm_srgb="astc-12x12-unorm-srgb", -) - -#: * "clamp_to_edge" -#: * "repeat" -#: * "mirror_repeat" -AddressMode = Enum( - "AddressMode", - clamp_to_edge="clamp-to-edge", - repeat="repeat", - mirror_repeat="mirror-repeat", -) - -#: * "nearest" -#: * "linear" -FilterMode = Enum( - "FilterMode", - nearest="nearest", - linear="linear", -) - -#: * "nearest" -#: * "linear" -MipmapFilterMode = Enum( - "MipmapFilterMode", - nearest="nearest", - linear="linear", -) - -#: * "never" -#: * "less" -#: * "equal" -#: * "less_equal" -#: * "greater" -#: * "not_equal" -#: * "greater_equal" -#: * "always" -CompareFunction = Enum( - "CompareFunction", - never="never", - less="less", - equal="equal", - less_equal="less-equal", - greater="greater", - not_equal="not-equal", - greater_equal="greater-equal", - always="always", -) - -#: * "uniform" -#: * "storage" -#: * "read_only_storage" -BufferBindingType = Enum( - "BufferBindingType", - uniform="uniform", - storage="storage", - read_only_storage="read-only-storage", -) - -#: * "filtering" -#: * "non_filtering" -#: * "comparison" -SamplerBindingType = Enum( - "SamplerBindingType", - filtering="filtering", - non_filtering="non-filtering", - comparison="comparison", -) - -#: * "float" -#: * "unfilterable_float" -#: * "depth" -#: * "sint" -#: * "uint" -TextureSampleType = Enum( - "TextureSampleType", - float="float", - unfilterable_float="unfilterable-float", - depth="depth", - sint="sint", - uint="uint", -) - -#: * "write_only" -#: * "read_only" -#: * "read_write" -StorageTextureAccess = Enum( - "StorageTextureAccess", - write_only="write-only", - read_only="read-only", - read_write="read-write", -) - -#: * "error" -#: * "warning" -#: * "info" -CompilationMessageType = Enum( - "CompilationMessageType", - error="error", - warning="warning", - info="info", -) - -#: * "validation" -#: * "internal" -PipelineErrorReason = Enum( - "PipelineErrorReason", - validation="validation", - internal="internal", -) - -#: * "auto" -AutoLayoutMode = Enum( - "AutoLayoutMode", - auto="auto", -) - -#: * "point_list" -#: * "line_list" -#: * "line_strip" -#: * "triangle_list" -#: * "triangle_strip" -PrimitiveTopology = Enum( - "PrimitiveTopology", - point_list="point-list", - line_list="line-list", - line_strip="line-strip", - triangle_list="triangle-list", - triangle_strip="triangle-strip", -) - -#: * "ccw" -#: * "cw" -FrontFace = Enum( - "FrontFace", - ccw="ccw", - cw="cw", -) - -#: * "none" -#: * "front" -#: * "back" -CullMode = Enum( - "CullMode", - none="none", - front="front", - back="back", -) - -#: * "zero" -#: * "one" -#: * "src" -#: * "one_minus_src" -#: * "src_alpha" -#: * "one_minus_src_alpha" -#: * "dst" -#: * "one_minus_dst" -#: * "dst_alpha" -#: * "one_minus_dst_alpha" -#: * "src_alpha_saturated" -#: * "constant" -#: * "one_minus_constant" -BlendFactor = Enum( - "BlendFactor", - zero="zero", - one="one", - src="src", - one_minus_src="one-minus-src", - src_alpha="src-alpha", - one_minus_src_alpha="one-minus-src-alpha", - dst="dst", - one_minus_dst="one-minus-dst", - dst_alpha="dst-alpha", - one_minus_dst_alpha="one-minus-dst-alpha", - src_alpha_saturated="src-alpha-saturated", - constant="constant", - one_minus_constant="one-minus-constant", -) - -#: * "add" -#: * "subtract" -#: * "reverse_subtract" -#: * "min" -#: * "max" -BlendOperation = Enum( - "BlendOperation", - add="add", - subtract="subtract", - reverse_subtract="reverse-subtract", - min="min", - max="max", -) - -#: * "keep" -#: * "zero" -#: * "replace" -#: * "invert" -#: * "increment_clamp" -#: * "decrement_clamp" -#: * "increment_wrap" -#: * "decrement_wrap" -StencilOperation = Enum( - "StencilOperation", - keep="keep", - zero="zero", - replace="replace", - invert="invert", - increment_clamp="increment-clamp", - decrement_clamp="decrement-clamp", - increment_wrap="increment-wrap", - decrement_wrap="decrement-wrap", -) - -#: * "uint16" -#: * "uint32" -IndexFormat = Enum( - "IndexFormat", - uint16="uint16", - uint32="uint32", -) - -#: * "uint8x2" -#: * "uint8x4" -#: * "sint8x2" -#: * "sint8x4" -#: * "unorm8x2" -#: * "unorm8x4" -#: * "snorm8x2" -#: * "snorm8x4" -#: * "uint16x2" -#: * "uint16x4" -#: * "sint16x2" -#: * "sint16x4" -#: * "unorm16x2" -#: * "unorm16x4" -#: * "snorm16x2" -#: * "snorm16x4" -#: * "float16x2" -#: * "float16x4" -#: * "float32" -#: * "float32x2" -#: * "float32x3" -#: * "float32x4" -#: * "uint32" -#: * "uint32x2" -#: * "uint32x3" -#: * "uint32x4" -#: * "sint32" -#: * "sint32x2" -#: * "sint32x3" -#: * "sint32x4" -#: * "unorm10_10_10_2" -VertexFormat = Enum( - "VertexFormat", - uint8x2="uint8x2", - uint8x4="uint8x4", - sint8x2="sint8x2", - sint8x4="sint8x4", - unorm8x2="unorm8x2", - unorm8x4="unorm8x4", - snorm8x2="snorm8x2", - snorm8x4="snorm8x4", - uint16x2="uint16x2", - uint16x4="uint16x4", - sint16x2="sint16x2", - sint16x4="sint16x4", - unorm16x2="unorm16x2", - unorm16x4="unorm16x4", - snorm16x2="snorm16x2", - snorm16x4="snorm16x4", - float16x2="float16x2", - float16x4="float16x4", - float32="float32", - float32x2="float32x2", - float32x3="float32x3", - float32x4="float32x4", - uint32="uint32", - uint32x2="uint32x2", - uint32x3="uint32x3", - uint32x4="uint32x4", - sint32="sint32", - sint32x2="sint32x2", - sint32x3="sint32x3", - sint32x4="sint32x4", - unorm10_10_10_2="unorm10-10-10-2", -) - -#: * "vertex" -#: * "instance" -VertexStepMode = Enum( - "VertexStepMode", - vertex="vertex", - instance="instance", -) - -#: * "load" -#: * "clear" -LoadOp = Enum( - "LoadOp", - load="load", - clear="clear", -) - -#: * "store" -#: * "discard" -StoreOp = Enum( - "StoreOp", - store="store", - discard="discard", -) - -#: * "occlusion" -#: * "timestamp" -QueryType = Enum( - "QueryType", - occlusion="occlusion", - timestamp="timestamp", -) - -#: * "opaque" -#: * "premultiplied" -CanvasAlphaMode = Enum( - "CanvasAlphaMode", - opaque="opaque", - premultiplied="premultiplied", -) - -#: * "unknown" -#: * "destroyed" -DeviceLostReason = Enum( - "DeviceLostReason", - unknown="unknown", - destroyed="destroyed", -) - -#: * "validation" -#: * "out_of_memory" -#: * "internal" -ErrorFilter = Enum( - "ErrorFilter", - validation="validation", - out_of_memory="out-of-memory", - internal="internal", -) diff --git a/wgpu/flags.py b/wgpu/flags.py deleted file mode 100644 index 6b63b00..0000000 --- a/wgpu/flags.py +++ /dev/null @@ -1,111 +0,0 @@ -""" -These flags are defined in ``wgpu.flags``, but are also available from the root wgpu namespace. - -Flags are bitmasks; zero or multiple fields can be set at the same time. -Flags are integer bitmasks, but can also be passed as strings, so instead of -``wgpu.BufferUsage.MAP_READ | wgpu.BufferUsage.COPY_DST``, -one can also write ``"MAP_READ|COPY_DIST"``. -""" - -_use_sphinx_repr = False - - -class Flags: - def __init__(self, name, **kwargs): - self._name = name - for key, val in kwargs.items(): - setattr(self, key, val) - - def __iter__(self): - return iter([key for key in dir(self) if not key.startswith("_")]) - - def __repr__(self): - if _use_sphinx_repr: # no-cover - return "" - values = ", ".join(self) - return f"" - - -# CODE BELOW THIS POINT IS AUTOGENERATED - DO NOT EDIT - - -# There are 5 flags - -__all__ = [ - "BufferUsage", - "MapMode", - "TextureUsage", - "ShaderStage", - "ColorWrite", -] - - -#: * "MAP_READ" (1) -#: * "MAP_WRITE" (2) -#: * "COPY_SRC" (4) -#: * "COPY_DST" (8) -#: * "INDEX" (16) -#: * "VERTEX" (32) -#: * "UNIFORM" (64) -#: * "STORAGE" (128) -#: * "INDIRECT" (256) -#: * "QUERY_RESOLVE" (512) -BufferUsage = Flags( - "BufferUsage", - MAP_READ=1, - MAP_WRITE=2, - COPY_SRC=4, - COPY_DST=8, - INDEX=16, - VERTEX=32, - UNIFORM=64, - STORAGE=128, - INDIRECT=256, - QUERY_RESOLVE=512, -) - -#: * "READ" (1) -#: * "WRITE" (2) -MapMode = Flags( - "MapMode", - READ=1, - WRITE=2, -) - -#: * "COPY_SRC" (1) -#: * "COPY_DST" (2) -#: * "TEXTURE_BINDING" (4) -#: * "STORAGE_BINDING" (8) -#: * "RENDER_ATTACHMENT" (16) -TextureUsage = Flags( - "TextureUsage", - COPY_SRC=1, - COPY_DST=2, - TEXTURE_BINDING=4, - STORAGE_BINDING=8, - RENDER_ATTACHMENT=16, -) - -#: * "VERTEX" (1) -#: * "FRAGMENT" (2) -#: * "COMPUTE" (4) -ShaderStage = Flags( - "ShaderStage", - VERTEX=1, - FRAGMENT=2, - COMPUTE=4, -) - -#: * "RED" (1) -#: * "GREEN" (2) -#: * "BLUE" (4) -#: * "ALPHA" (8) -#: * "ALL" (15) -ColorWrite = Flags( - "ColorWrite", - RED=1, - GREEN=2, - BLUE=4, - ALPHA=8, - ALL=15, -) diff --git a/wgpu/resources/__init__.py b/wgpu/resources/__init__.py deleted file mode 100644 index eca5e0f..0000000 --- a/wgpu/resources/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -""" This module exists to have importlib.resources and setuptools recognize the folder as a module. -""" diff --git a/wgpu/resources/codegen_report.md b/wgpu/resources/codegen_report.md deleted file mode 100644 index 240bb68..0000000 --- a/wgpu/resources/codegen_report.md +++ /dev/null @@ -1,34 +0,0 @@ -# Code generatation report -## Preparing -* The webgpu.idl defines 37 classes with 76 functions -* The webgpu.idl defines 5 flags, 33 enums, 59 structs -* The wgpu.h defines 198 functions -* The wgpu.h defines 7 flags, 50 enums, 92 structs -## Updating API -* Wrote 5 flags to flags.py -* Wrote 33 enums to enums.py -* Wrote 59 structs to structs.py -### Patching API for _classes.py -* Diffs for GPU: change get_preferred_canvas_format, change request_adapter, change request_adapter_async -* Diffs for GPUCanvasContext: add get_preferred_format, add present -* Diffs for GPUDevice: add adapter, add create_buffer_with_data, hide import_external_texture, hide lost, hide onuncapturederror, hide pop_error_scope, hide push_error_scope -* Diffs for GPUBuffer: add map_read, add map_write, add read_mapped, add write_mapped, hide get_mapped_range -* Diffs for GPUTexture: add size -* Diffs for GPUTextureView: add size, add texture -* Diffs for GPUQueue: add read_buffer, add read_texture, hide copy_external_image_to_texture -* Validated 37 classes, 112 methods, 43 properties -### Patching API for backends/wgpu_native/_api.py -* Validated 37 classes, 107 methods, 0 properties -## Validating backends/wgpu_native/_api.py -* Enum field TextureFormat.rgb10a2uint missing in wgpu.h -* Enum field StorageTextureAccess.read-only missing in wgpu.h -* Enum field StorageTextureAccess.read-write missing in wgpu.h -* Enum PipelineErrorReason missing in wgpu.h -* Enum AutoLayoutMode missing in wgpu.h -* Enum field VertexFormat.unorm10-10-10-2 missing in wgpu.h -* Enum CanvasAlphaMode missing in wgpu.h -* Enum field DeviceLostReason.unknown missing in wgpu.h -* Wrote 232 enum mappings and 47 struct-field mappings to wgpu_native/_mappings.py -* Validated 105 C function calls -* Not using 97 C functions -* Validated 75 C structs diff --git a/wgpu/resources/webgpu.h b/wgpu/resources/webgpu.h deleted file mode 100644 index 79c0bc0..0000000 --- a/wgpu/resources/webgpu.h +++ /dev/null @@ -1,1803 +0,0 @@ -// BSD 3-Clause License -// -// Copyright (c) 2019, "WebGPU native" developers -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this -// list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#ifndef WEBGPU_H_ -#define WEBGPU_H_ - -#if defined(WGPU_SHARED_LIBRARY) -# if defined(_WIN32) -# if defined(WGPU_IMPLEMENTATION) -# define WGPU_EXPORT __declspec(dllexport) -# else -# define WGPU_EXPORT __declspec(dllimport) -# endif -# else // defined(_WIN32) -# if defined(WGPU_IMPLEMENTATION) -# define WGPU_EXPORT __attribute__((visibility("default"))) -# else -# define WGPU_EXPORT -# endif -# endif // defined(_WIN32) -#else // defined(WGPU_SHARED_LIBRARY) -# define WGPU_EXPORT -#endif // defined(WGPU_SHARED_LIBRARY) - -#if !defined(WGPU_OBJECT_ATTRIBUTE) -#define WGPU_OBJECT_ATTRIBUTE -#endif -#if !defined(WGPU_ENUM_ATTRIBUTE) -#define WGPU_ENUM_ATTRIBUTE -#endif -#if !defined(WGPU_STRUCTURE_ATTRIBUTE) -#define WGPU_STRUCTURE_ATTRIBUTE -#endif -#if !defined(WGPU_FUNCTION_ATTRIBUTE) -#define WGPU_FUNCTION_ATTRIBUTE -#endif -#if !defined(WGPU_NULLABLE) -#define WGPU_NULLABLE -#endif - -#include -#include - -#define WGPU_ARRAY_LAYER_COUNT_UNDEFINED (0xffffffffUL) -#define WGPU_COPY_STRIDE_UNDEFINED (0xffffffffUL) -#define WGPU_LIMIT_U32_UNDEFINED (0xffffffffUL) -#define WGPU_LIMIT_U64_UNDEFINED (0xffffffffffffffffULL) -#define WGPU_MIP_LEVEL_COUNT_UNDEFINED (0xffffffffUL) -#define WGPU_QUERY_SET_INDEX_UNDEFINED (0xffffffffUL) -#define WGPU_WHOLE_MAP_SIZE SIZE_MAX -#define WGPU_WHOLE_SIZE (0xffffffffffffffffULL) - -typedef uint32_t WGPUFlags; -typedef uint32_t WGPUBool; - -typedef struct WGPUAdapterImpl* WGPUAdapter WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPUBindGroupImpl* WGPUBindGroup WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPUBindGroupLayoutImpl* WGPUBindGroupLayout WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPUBufferImpl* WGPUBuffer WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPUCommandBufferImpl* WGPUCommandBuffer WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPUCommandEncoderImpl* WGPUCommandEncoder WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPUComputePassEncoderImpl* WGPUComputePassEncoder WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPUComputePipelineImpl* WGPUComputePipeline WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPUDeviceImpl* WGPUDevice WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPUInstanceImpl* WGPUInstance WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPUPipelineLayoutImpl* WGPUPipelineLayout WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPUQuerySetImpl* WGPUQuerySet WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPUQueueImpl* WGPUQueue WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPURenderBundleImpl* WGPURenderBundle WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPURenderBundleEncoderImpl* WGPURenderBundleEncoder WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPURenderPassEncoderImpl* WGPURenderPassEncoder WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPURenderPipelineImpl* WGPURenderPipeline WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPUSamplerImpl* WGPUSampler WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPUShaderModuleImpl* WGPUShaderModule WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPUSurfaceImpl* WGPUSurface WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPUTextureImpl* WGPUTexture WGPU_OBJECT_ATTRIBUTE; -typedef struct WGPUTextureViewImpl* WGPUTextureView WGPU_OBJECT_ATTRIBUTE; - -// Structure forward declarations -struct WGPUAdapterProperties; -struct WGPUBindGroupEntry; -struct WGPUBlendComponent; -struct WGPUBufferBindingLayout; -struct WGPUBufferDescriptor; -struct WGPUColor; -struct WGPUCommandBufferDescriptor; -struct WGPUCommandEncoderDescriptor; -struct WGPUCompilationMessage; -struct WGPUComputePassTimestampWrites; -struct WGPUConstantEntry; -struct WGPUExtent3D; -struct WGPUInstanceDescriptor; -struct WGPULimits; -struct WGPUMultisampleState; -struct WGPUOrigin3D; -struct WGPUPipelineLayoutDescriptor; -struct WGPUPrimitiveDepthClipControl; -struct WGPUPrimitiveState; -struct WGPUQuerySetDescriptor; -struct WGPUQueueDescriptor; -struct WGPURenderBundleDescriptor; -struct WGPURenderBundleEncoderDescriptor; -struct WGPURenderPassDepthStencilAttachment; -struct WGPURenderPassDescriptorMaxDrawCount; -struct WGPURenderPassTimestampWrites; -struct WGPURequestAdapterOptions; -struct WGPUSamplerBindingLayout; -struct WGPUSamplerDescriptor; -struct WGPUShaderModuleCompilationHint; -struct WGPUShaderModuleSPIRVDescriptor; -struct WGPUShaderModuleWGSLDescriptor; -struct WGPUStencilFaceState; -struct WGPUStorageTextureBindingLayout; -struct WGPUSurfaceCapabilities; -struct WGPUSurfaceConfiguration; -struct WGPUSurfaceDescriptor; -struct WGPUSurfaceDescriptorFromAndroidNativeWindow; -struct WGPUSurfaceDescriptorFromCanvasHTMLSelector; -struct WGPUSurfaceDescriptorFromMetalLayer; -struct WGPUSurfaceDescriptorFromWaylandSurface; -struct WGPUSurfaceDescriptorFromWindowsHWND; -struct WGPUSurfaceDescriptorFromXcbWindow; -struct WGPUSurfaceDescriptorFromXlibWindow; -struct WGPUSurfaceTexture; -struct WGPUTextureBindingLayout; -struct WGPUTextureDataLayout; -struct WGPUTextureViewDescriptor; -struct WGPUVertexAttribute; -struct WGPUBindGroupDescriptor; -struct WGPUBindGroupLayoutEntry; -struct WGPUBlendState; -struct WGPUCompilationInfo; -struct WGPUComputePassDescriptor; -struct WGPUDepthStencilState; -struct WGPUImageCopyBuffer; -struct WGPUImageCopyTexture; -struct WGPUProgrammableStageDescriptor; -struct WGPURenderPassColorAttachment; -struct WGPURequiredLimits; -struct WGPUShaderModuleDescriptor; -struct WGPUSupportedLimits; -struct WGPUTextureDescriptor; -struct WGPUVertexBufferLayout; -struct WGPUBindGroupLayoutDescriptor; -struct WGPUColorTargetState; -struct WGPUComputePipelineDescriptor; -struct WGPUDeviceDescriptor; -struct WGPURenderPassDescriptor; -struct WGPUVertexState; -struct WGPUFragmentState; -struct WGPURenderPipelineDescriptor; - -typedef enum WGPUAdapterType { - WGPUAdapterType_DiscreteGPU = 0x00000000, - WGPUAdapterType_IntegratedGPU = 0x00000001, - WGPUAdapterType_CPU = 0x00000002, - WGPUAdapterType_Unknown = 0x00000003, - WGPUAdapterType_Force32 = 0x7FFFFFFF -} WGPUAdapterType WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUAddressMode { - WGPUAddressMode_Repeat = 0x00000000, - WGPUAddressMode_MirrorRepeat = 0x00000001, - WGPUAddressMode_ClampToEdge = 0x00000002, - WGPUAddressMode_Force32 = 0x7FFFFFFF -} WGPUAddressMode WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUBackendType { - WGPUBackendType_Undefined = 0x00000000, - WGPUBackendType_Null = 0x00000001, - WGPUBackendType_WebGPU = 0x00000002, - WGPUBackendType_D3D11 = 0x00000003, - WGPUBackendType_D3D12 = 0x00000004, - WGPUBackendType_Metal = 0x00000005, - WGPUBackendType_Vulkan = 0x00000006, - WGPUBackendType_OpenGL = 0x00000007, - WGPUBackendType_OpenGLES = 0x00000008, - WGPUBackendType_Force32 = 0x7FFFFFFF -} WGPUBackendType WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUBlendFactor { - WGPUBlendFactor_Zero = 0x00000000, - WGPUBlendFactor_One = 0x00000001, - WGPUBlendFactor_Src = 0x00000002, - WGPUBlendFactor_OneMinusSrc = 0x00000003, - WGPUBlendFactor_SrcAlpha = 0x00000004, - WGPUBlendFactor_OneMinusSrcAlpha = 0x00000005, - WGPUBlendFactor_Dst = 0x00000006, - WGPUBlendFactor_OneMinusDst = 0x00000007, - WGPUBlendFactor_DstAlpha = 0x00000008, - WGPUBlendFactor_OneMinusDstAlpha = 0x00000009, - WGPUBlendFactor_SrcAlphaSaturated = 0x0000000A, - WGPUBlendFactor_Constant = 0x0000000B, - WGPUBlendFactor_OneMinusConstant = 0x0000000C, - WGPUBlendFactor_Force32 = 0x7FFFFFFF -} WGPUBlendFactor WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUBlendOperation { - WGPUBlendOperation_Add = 0x00000000, - WGPUBlendOperation_Subtract = 0x00000001, - WGPUBlendOperation_ReverseSubtract = 0x00000002, - WGPUBlendOperation_Min = 0x00000003, - WGPUBlendOperation_Max = 0x00000004, - WGPUBlendOperation_Force32 = 0x7FFFFFFF -} WGPUBlendOperation WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUBufferBindingType { - WGPUBufferBindingType_Undefined = 0x00000000, - WGPUBufferBindingType_Uniform = 0x00000001, - WGPUBufferBindingType_Storage = 0x00000002, - WGPUBufferBindingType_ReadOnlyStorage = 0x00000003, - WGPUBufferBindingType_Force32 = 0x7FFFFFFF -} WGPUBufferBindingType WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUBufferMapAsyncStatus { - WGPUBufferMapAsyncStatus_Success = 0x00000000, - WGPUBufferMapAsyncStatus_ValidationError = 0x00000001, - WGPUBufferMapAsyncStatus_Unknown = 0x00000002, - WGPUBufferMapAsyncStatus_DeviceLost = 0x00000003, - WGPUBufferMapAsyncStatus_DestroyedBeforeCallback = 0x00000004, - WGPUBufferMapAsyncStatus_UnmappedBeforeCallback = 0x00000005, - WGPUBufferMapAsyncStatus_MappingAlreadyPending = 0x00000006, - WGPUBufferMapAsyncStatus_OffsetOutOfRange = 0x00000007, - WGPUBufferMapAsyncStatus_SizeOutOfRange = 0x00000008, - WGPUBufferMapAsyncStatus_Force32 = 0x7FFFFFFF -} WGPUBufferMapAsyncStatus WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUBufferMapState { - WGPUBufferMapState_Unmapped = 0x00000000, - WGPUBufferMapState_Pending = 0x00000001, - WGPUBufferMapState_Mapped = 0x00000002, - WGPUBufferMapState_Force32 = 0x7FFFFFFF -} WGPUBufferMapState WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUCompareFunction { - WGPUCompareFunction_Undefined = 0x00000000, - WGPUCompareFunction_Never = 0x00000001, - WGPUCompareFunction_Less = 0x00000002, - WGPUCompareFunction_LessEqual = 0x00000003, - WGPUCompareFunction_Greater = 0x00000004, - WGPUCompareFunction_GreaterEqual = 0x00000005, - WGPUCompareFunction_Equal = 0x00000006, - WGPUCompareFunction_NotEqual = 0x00000007, - WGPUCompareFunction_Always = 0x00000008, - WGPUCompareFunction_Force32 = 0x7FFFFFFF -} WGPUCompareFunction WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUCompilationInfoRequestStatus { - WGPUCompilationInfoRequestStatus_Success = 0x00000000, - WGPUCompilationInfoRequestStatus_Error = 0x00000001, - WGPUCompilationInfoRequestStatus_DeviceLost = 0x00000002, - WGPUCompilationInfoRequestStatus_Unknown = 0x00000003, - WGPUCompilationInfoRequestStatus_Force32 = 0x7FFFFFFF -} WGPUCompilationInfoRequestStatus WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUCompilationMessageType { - WGPUCompilationMessageType_Error = 0x00000000, - WGPUCompilationMessageType_Warning = 0x00000001, - WGPUCompilationMessageType_Info = 0x00000002, - WGPUCompilationMessageType_Force32 = 0x7FFFFFFF -} WGPUCompilationMessageType WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUCompositeAlphaMode { - WGPUCompositeAlphaMode_Auto = 0x00000000, - WGPUCompositeAlphaMode_Opaque = 0x00000001, - WGPUCompositeAlphaMode_Premultiplied = 0x00000002, - WGPUCompositeAlphaMode_Unpremultiplied = 0x00000003, - WGPUCompositeAlphaMode_Inherit = 0x00000004, - WGPUCompositeAlphaMode_Force32 = 0x7FFFFFFF -} WGPUCompositeAlphaMode WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUCreatePipelineAsyncStatus { - WGPUCreatePipelineAsyncStatus_Success = 0x00000000, - WGPUCreatePipelineAsyncStatus_ValidationError = 0x00000001, - WGPUCreatePipelineAsyncStatus_InternalError = 0x00000002, - WGPUCreatePipelineAsyncStatus_DeviceLost = 0x00000003, - WGPUCreatePipelineAsyncStatus_DeviceDestroyed = 0x00000004, - WGPUCreatePipelineAsyncStatus_Unknown = 0x00000005, - WGPUCreatePipelineAsyncStatus_Force32 = 0x7FFFFFFF -} WGPUCreatePipelineAsyncStatus WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUCullMode { - WGPUCullMode_None = 0x00000000, - WGPUCullMode_Front = 0x00000001, - WGPUCullMode_Back = 0x00000002, - WGPUCullMode_Force32 = 0x7FFFFFFF -} WGPUCullMode WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUDeviceLostReason { - WGPUDeviceLostReason_Undefined = 0x00000000, - WGPUDeviceLostReason_Destroyed = 0x00000001, - WGPUDeviceLostReason_Force32 = 0x7FFFFFFF -} WGPUDeviceLostReason WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUErrorFilter { - WGPUErrorFilter_Validation = 0x00000000, - WGPUErrorFilter_OutOfMemory = 0x00000001, - WGPUErrorFilter_Internal = 0x00000002, - WGPUErrorFilter_Force32 = 0x7FFFFFFF -} WGPUErrorFilter WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUErrorType { - WGPUErrorType_NoError = 0x00000000, - WGPUErrorType_Validation = 0x00000001, - WGPUErrorType_OutOfMemory = 0x00000002, - WGPUErrorType_Internal = 0x00000003, - WGPUErrorType_Unknown = 0x00000004, - WGPUErrorType_DeviceLost = 0x00000005, - WGPUErrorType_Force32 = 0x7FFFFFFF -} WGPUErrorType WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUFeatureName { - WGPUFeatureName_Undefined = 0x00000000, - WGPUFeatureName_DepthClipControl = 0x00000001, - WGPUFeatureName_Depth32FloatStencil8 = 0x00000002, - WGPUFeatureName_TimestampQuery = 0x00000003, - WGPUFeatureName_TextureCompressionBC = 0x00000004, - WGPUFeatureName_TextureCompressionETC2 = 0x00000005, - WGPUFeatureName_TextureCompressionASTC = 0x00000006, - WGPUFeatureName_IndirectFirstInstance = 0x00000007, - WGPUFeatureName_ShaderF16 = 0x00000008, - WGPUFeatureName_RG11B10UfloatRenderable = 0x00000009, - WGPUFeatureName_BGRA8UnormStorage = 0x0000000A, - WGPUFeatureName_Float32Filterable = 0x0000000B, - WGPUFeatureName_Force32 = 0x7FFFFFFF -} WGPUFeatureName WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUFilterMode { - WGPUFilterMode_Nearest = 0x00000000, - WGPUFilterMode_Linear = 0x00000001, - WGPUFilterMode_Force32 = 0x7FFFFFFF -} WGPUFilterMode WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUFrontFace { - WGPUFrontFace_CCW = 0x00000000, - WGPUFrontFace_CW = 0x00000001, - WGPUFrontFace_Force32 = 0x7FFFFFFF -} WGPUFrontFace WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUIndexFormat { - WGPUIndexFormat_Undefined = 0x00000000, - WGPUIndexFormat_Uint16 = 0x00000001, - WGPUIndexFormat_Uint32 = 0x00000002, - WGPUIndexFormat_Force32 = 0x7FFFFFFF -} WGPUIndexFormat WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPULoadOp { - WGPULoadOp_Undefined = 0x00000000, - WGPULoadOp_Clear = 0x00000001, - WGPULoadOp_Load = 0x00000002, - WGPULoadOp_Force32 = 0x7FFFFFFF -} WGPULoadOp WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUMipmapFilterMode { - WGPUMipmapFilterMode_Nearest = 0x00000000, - WGPUMipmapFilterMode_Linear = 0x00000001, - WGPUMipmapFilterMode_Force32 = 0x7FFFFFFF -} WGPUMipmapFilterMode WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUPowerPreference { - WGPUPowerPreference_Undefined = 0x00000000, - WGPUPowerPreference_LowPower = 0x00000001, - WGPUPowerPreference_HighPerformance = 0x00000002, - WGPUPowerPreference_Force32 = 0x7FFFFFFF -} WGPUPowerPreference WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUPresentMode { - WGPUPresentMode_Fifo = 0x00000000, - WGPUPresentMode_FifoRelaxed = 0x00000001, - WGPUPresentMode_Immediate = 0x00000002, - WGPUPresentMode_Mailbox = 0x00000003, - WGPUPresentMode_Force32 = 0x7FFFFFFF -} WGPUPresentMode WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUPrimitiveTopology { - WGPUPrimitiveTopology_PointList = 0x00000000, - WGPUPrimitiveTopology_LineList = 0x00000001, - WGPUPrimitiveTopology_LineStrip = 0x00000002, - WGPUPrimitiveTopology_TriangleList = 0x00000003, - WGPUPrimitiveTopology_TriangleStrip = 0x00000004, - WGPUPrimitiveTopology_Force32 = 0x7FFFFFFF -} WGPUPrimitiveTopology WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUQueryType { - WGPUQueryType_Occlusion = 0x00000000, - WGPUQueryType_Timestamp = 0x00000001, - WGPUQueryType_Force32 = 0x7FFFFFFF -} WGPUQueryType WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUQueueWorkDoneStatus { - WGPUQueueWorkDoneStatus_Success = 0x00000000, - WGPUQueueWorkDoneStatus_Error = 0x00000001, - WGPUQueueWorkDoneStatus_Unknown = 0x00000002, - WGPUQueueWorkDoneStatus_DeviceLost = 0x00000003, - WGPUQueueWorkDoneStatus_Force32 = 0x7FFFFFFF -} WGPUQueueWorkDoneStatus WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPURequestAdapterStatus { - WGPURequestAdapterStatus_Success = 0x00000000, - WGPURequestAdapterStatus_Unavailable = 0x00000001, - WGPURequestAdapterStatus_Error = 0x00000002, - WGPURequestAdapterStatus_Unknown = 0x00000003, - WGPURequestAdapterStatus_Force32 = 0x7FFFFFFF -} WGPURequestAdapterStatus WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPURequestDeviceStatus { - WGPURequestDeviceStatus_Success = 0x00000000, - WGPURequestDeviceStatus_Error = 0x00000001, - WGPURequestDeviceStatus_Unknown = 0x00000002, - WGPURequestDeviceStatus_Force32 = 0x7FFFFFFF -} WGPURequestDeviceStatus WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUSType { - WGPUSType_Invalid = 0x00000000, - WGPUSType_SurfaceDescriptorFromMetalLayer = 0x00000001, - WGPUSType_SurfaceDescriptorFromWindowsHWND = 0x00000002, - WGPUSType_SurfaceDescriptorFromXlibWindow = 0x00000003, - WGPUSType_SurfaceDescriptorFromCanvasHTMLSelector = 0x00000004, - WGPUSType_ShaderModuleSPIRVDescriptor = 0x00000005, - WGPUSType_ShaderModuleWGSLDescriptor = 0x00000006, - WGPUSType_PrimitiveDepthClipControl = 0x00000007, - WGPUSType_SurfaceDescriptorFromWaylandSurface = 0x00000008, - WGPUSType_SurfaceDescriptorFromAndroidNativeWindow = 0x00000009, - WGPUSType_SurfaceDescriptorFromXcbWindow = 0x0000000A, - WGPUSType_RenderPassDescriptorMaxDrawCount = 0x0000000F, - WGPUSType_Force32 = 0x7FFFFFFF -} WGPUSType WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUSamplerBindingType { - WGPUSamplerBindingType_Undefined = 0x00000000, - WGPUSamplerBindingType_Filtering = 0x00000001, - WGPUSamplerBindingType_NonFiltering = 0x00000002, - WGPUSamplerBindingType_Comparison = 0x00000003, - WGPUSamplerBindingType_Force32 = 0x7FFFFFFF -} WGPUSamplerBindingType WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUStencilOperation { - WGPUStencilOperation_Keep = 0x00000000, - WGPUStencilOperation_Zero = 0x00000001, - WGPUStencilOperation_Replace = 0x00000002, - WGPUStencilOperation_Invert = 0x00000003, - WGPUStencilOperation_IncrementClamp = 0x00000004, - WGPUStencilOperation_DecrementClamp = 0x00000005, - WGPUStencilOperation_IncrementWrap = 0x00000006, - WGPUStencilOperation_DecrementWrap = 0x00000007, - WGPUStencilOperation_Force32 = 0x7FFFFFFF -} WGPUStencilOperation WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUStorageTextureAccess { - WGPUStorageTextureAccess_Undefined = 0x00000000, - WGPUStorageTextureAccess_WriteOnly = 0x00000001, - WGPUStorageTextureAccess_Force32 = 0x7FFFFFFF -} WGPUStorageTextureAccess WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUStoreOp { - WGPUStoreOp_Undefined = 0x00000000, - WGPUStoreOp_Store = 0x00000001, - WGPUStoreOp_Discard = 0x00000002, - WGPUStoreOp_Force32 = 0x7FFFFFFF -} WGPUStoreOp WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUSurfaceGetCurrentTextureStatus { - WGPUSurfaceGetCurrentTextureStatus_Success = 0x00000000, - WGPUSurfaceGetCurrentTextureStatus_Timeout = 0x00000001, - WGPUSurfaceGetCurrentTextureStatus_Outdated = 0x00000002, - WGPUSurfaceGetCurrentTextureStatus_Lost = 0x00000003, - WGPUSurfaceGetCurrentTextureStatus_OutOfMemory = 0x00000004, - WGPUSurfaceGetCurrentTextureStatus_DeviceLost = 0x00000005, - WGPUSurfaceGetCurrentTextureStatus_Force32 = 0x7FFFFFFF -} WGPUSurfaceGetCurrentTextureStatus WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUTextureAspect { - WGPUTextureAspect_All = 0x00000000, - WGPUTextureAspect_StencilOnly = 0x00000001, - WGPUTextureAspect_DepthOnly = 0x00000002, - WGPUTextureAspect_Force32 = 0x7FFFFFFF -} WGPUTextureAspect WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUTextureDimension { - WGPUTextureDimension_1D = 0x00000000, - WGPUTextureDimension_2D = 0x00000001, - WGPUTextureDimension_3D = 0x00000002, - WGPUTextureDimension_Force32 = 0x7FFFFFFF -} WGPUTextureDimension WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUTextureFormat { - WGPUTextureFormat_Undefined = 0x00000000, - WGPUTextureFormat_R8Unorm = 0x00000001, - WGPUTextureFormat_R8Snorm = 0x00000002, - WGPUTextureFormat_R8Uint = 0x00000003, - WGPUTextureFormat_R8Sint = 0x00000004, - WGPUTextureFormat_R16Uint = 0x00000005, - WGPUTextureFormat_R16Sint = 0x00000006, - WGPUTextureFormat_R16Float = 0x00000007, - WGPUTextureFormat_RG8Unorm = 0x00000008, - WGPUTextureFormat_RG8Snorm = 0x00000009, - WGPUTextureFormat_RG8Uint = 0x0000000A, - WGPUTextureFormat_RG8Sint = 0x0000000B, - WGPUTextureFormat_R32Float = 0x0000000C, - WGPUTextureFormat_R32Uint = 0x0000000D, - WGPUTextureFormat_R32Sint = 0x0000000E, - WGPUTextureFormat_RG16Uint = 0x0000000F, - WGPUTextureFormat_RG16Sint = 0x00000010, - WGPUTextureFormat_RG16Float = 0x00000011, - WGPUTextureFormat_RGBA8Unorm = 0x00000012, - WGPUTextureFormat_RGBA8UnormSrgb = 0x00000013, - WGPUTextureFormat_RGBA8Snorm = 0x00000014, - WGPUTextureFormat_RGBA8Uint = 0x00000015, - WGPUTextureFormat_RGBA8Sint = 0x00000016, - WGPUTextureFormat_BGRA8Unorm = 0x00000017, - WGPUTextureFormat_BGRA8UnormSrgb = 0x00000018, - WGPUTextureFormat_RGB10A2Unorm = 0x00000019, - WGPUTextureFormat_RG11B10Ufloat = 0x0000001A, - WGPUTextureFormat_RGB9E5Ufloat = 0x0000001B, - WGPUTextureFormat_RG32Float = 0x0000001C, - WGPUTextureFormat_RG32Uint = 0x0000001D, - WGPUTextureFormat_RG32Sint = 0x0000001E, - WGPUTextureFormat_RGBA16Uint = 0x0000001F, - WGPUTextureFormat_RGBA16Sint = 0x00000020, - WGPUTextureFormat_RGBA16Float = 0x00000021, - WGPUTextureFormat_RGBA32Float = 0x00000022, - WGPUTextureFormat_RGBA32Uint = 0x00000023, - WGPUTextureFormat_RGBA32Sint = 0x00000024, - WGPUTextureFormat_Stencil8 = 0x00000025, - WGPUTextureFormat_Depth16Unorm = 0x00000026, - WGPUTextureFormat_Depth24Plus = 0x00000027, - WGPUTextureFormat_Depth24PlusStencil8 = 0x00000028, - WGPUTextureFormat_Depth32Float = 0x00000029, - WGPUTextureFormat_Depth32FloatStencil8 = 0x0000002A, - WGPUTextureFormat_BC1RGBAUnorm = 0x0000002B, - WGPUTextureFormat_BC1RGBAUnormSrgb = 0x0000002C, - WGPUTextureFormat_BC2RGBAUnorm = 0x0000002D, - WGPUTextureFormat_BC2RGBAUnormSrgb = 0x0000002E, - WGPUTextureFormat_BC3RGBAUnorm = 0x0000002F, - WGPUTextureFormat_BC3RGBAUnormSrgb = 0x00000030, - WGPUTextureFormat_BC4RUnorm = 0x00000031, - WGPUTextureFormat_BC4RSnorm = 0x00000032, - WGPUTextureFormat_BC5RGUnorm = 0x00000033, - WGPUTextureFormat_BC5RGSnorm = 0x00000034, - WGPUTextureFormat_BC6HRGBUfloat = 0x00000035, - WGPUTextureFormat_BC6HRGBFloat = 0x00000036, - WGPUTextureFormat_BC7RGBAUnorm = 0x00000037, - WGPUTextureFormat_BC7RGBAUnormSrgb = 0x00000038, - WGPUTextureFormat_ETC2RGB8Unorm = 0x00000039, - WGPUTextureFormat_ETC2RGB8UnormSrgb = 0x0000003A, - WGPUTextureFormat_ETC2RGB8A1Unorm = 0x0000003B, - WGPUTextureFormat_ETC2RGB8A1UnormSrgb = 0x0000003C, - WGPUTextureFormat_ETC2RGBA8Unorm = 0x0000003D, - WGPUTextureFormat_ETC2RGBA8UnormSrgb = 0x0000003E, - WGPUTextureFormat_EACR11Unorm = 0x0000003F, - WGPUTextureFormat_EACR11Snorm = 0x00000040, - WGPUTextureFormat_EACRG11Unorm = 0x00000041, - WGPUTextureFormat_EACRG11Snorm = 0x00000042, - WGPUTextureFormat_ASTC4x4Unorm = 0x00000043, - WGPUTextureFormat_ASTC4x4UnormSrgb = 0x00000044, - WGPUTextureFormat_ASTC5x4Unorm = 0x00000045, - WGPUTextureFormat_ASTC5x4UnormSrgb = 0x00000046, - WGPUTextureFormat_ASTC5x5Unorm = 0x00000047, - WGPUTextureFormat_ASTC5x5UnormSrgb = 0x00000048, - WGPUTextureFormat_ASTC6x5Unorm = 0x00000049, - WGPUTextureFormat_ASTC6x5UnormSrgb = 0x0000004A, - WGPUTextureFormat_ASTC6x6Unorm = 0x0000004B, - WGPUTextureFormat_ASTC6x6UnormSrgb = 0x0000004C, - WGPUTextureFormat_ASTC8x5Unorm = 0x0000004D, - WGPUTextureFormat_ASTC8x5UnormSrgb = 0x0000004E, - WGPUTextureFormat_ASTC8x6Unorm = 0x0000004F, - WGPUTextureFormat_ASTC8x6UnormSrgb = 0x00000050, - WGPUTextureFormat_ASTC8x8Unorm = 0x00000051, - WGPUTextureFormat_ASTC8x8UnormSrgb = 0x00000052, - WGPUTextureFormat_ASTC10x5Unorm = 0x00000053, - WGPUTextureFormat_ASTC10x5UnormSrgb = 0x00000054, - WGPUTextureFormat_ASTC10x6Unorm = 0x00000055, - WGPUTextureFormat_ASTC10x6UnormSrgb = 0x00000056, - WGPUTextureFormat_ASTC10x8Unorm = 0x00000057, - WGPUTextureFormat_ASTC10x8UnormSrgb = 0x00000058, - WGPUTextureFormat_ASTC10x10Unorm = 0x00000059, - WGPUTextureFormat_ASTC10x10UnormSrgb = 0x0000005A, - WGPUTextureFormat_ASTC12x10Unorm = 0x0000005B, - WGPUTextureFormat_ASTC12x10UnormSrgb = 0x0000005C, - WGPUTextureFormat_ASTC12x12Unorm = 0x0000005D, - WGPUTextureFormat_ASTC12x12UnormSrgb = 0x0000005E, - WGPUTextureFormat_Force32 = 0x7FFFFFFF -} WGPUTextureFormat WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUTextureSampleType { - WGPUTextureSampleType_Undefined = 0x00000000, - WGPUTextureSampleType_Float = 0x00000001, - WGPUTextureSampleType_UnfilterableFloat = 0x00000002, - WGPUTextureSampleType_Depth = 0x00000003, - WGPUTextureSampleType_Sint = 0x00000004, - WGPUTextureSampleType_Uint = 0x00000005, - WGPUTextureSampleType_Force32 = 0x7FFFFFFF -} WGPUTextureSampleType WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUTextureViewDimension { - WGPUTextureViewDimension_Undefined = 0x00000000, - WGPUTextureViewDimension_1D = 0x00000001, - WGPUTextureViewDimension_2D = 0x00000002, - WGPUTextureViewDimension_2DArray = 0x00000003, - WGPUTextureViewDimension_Cube = 0x00000004, - WGPUTextureViewDimension_CubeArray = 0x00000005, - WGPUTextureViewDimension_3D = 0x00000006, - WGPUTextureViewDimension_Force32 = 0x7FFFFFFF -} WGPUTextureViewDimension WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUVertexFormat { - WGPUVertexFormat_Undefined = 0x00000000, - WGPUVertexFormat_Uint8x2 = 0x00000001, - WGPUVertexFormat_Uint8x4 = 0x00000002, - WGPUVertexFormat_Sint8x2 = 0x00000003, - WGPUVertexFormat_Sint8x4 = 0x00000004, - WGPUVertexFormat_Unorm8x2 = 0x00000005, - WGPUVertexFormat_Unorm8x4 = 0x00000006, - WGPUVertexFormat_Snorm8x2 = 0x00000007, - WGPUVertexFormat_Snorm8x4 = 0x00000008, - WGPUVertexFormat_Uint16x2 = 0x00000009, - WGPUVertexFormat_Uint16x4 = 0x0000000A, - WGPUVertexFormat_Sint16x2 = 0x0000000B, - WGPUVertexFormat_Sint16x4 = 0x0000000C, - WGPUVertexFormat_Unorm16x2 = 0x0000000D, - WGPUVertexFormat_Unorm16x4 = 0x0000000E, - WGPUVertexFormat_Snorm16x2 = 0x0000000F, - WGPUVertexFormat_Snorm16x4 = 0x00000010, - WGPUVertexFormat_Float16x2 = 0x00000011, - WGPUVertexFormat_Float16x4 = 0x00000012, - WGPUVertexFormat_Float32 = 0x00000013, - WGPUVertexFormat_Float32x2 = 0x00000014, - WGPUVertexFormat_Float32x3 = 0x00000015, - WGPUVertexFormat_Float32x4 = 0x00000016, - WGPUVertexFormat_Uint32 = 0x00000017, - WGPUVertexFormat_Uint32x2 = 0x00000018, - WGPUVertexFormat_Uint32x3 = 0x00000019, - WGPUVertexFormat_Uint32x4 = 0x0000001A, - WGPUVertexFormat_Sint32 = 0x0000001B, - WGPUVertexFormat_Sint32x2 = 0x0000001C, - WGPUVertexFormat_Sint32x3 = 0x0000001D, - WGPUVertexFormat_Sint32x4 = 0x0000001E, - WGPUVertexFormat_Force32 = 0x7FFFFFFF -} WGPUVertexFormat WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUVertexStepMode { - WGPUVertexStepMode_Vertex = 0x00000000, - WGPUVertexStepMode_Instance = 0x00000001, - WGPUVertexStepMode_VertexBufferNotUsed = 0x00000002, - WGPUVertexStepMode_Force32 = 0x7FFFFFFF -} WGPUVertexStepMode WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUBufferUsage { - WGPUBufferUsage_None = 0x00000000, - WGPUBufferUsage_MapRead = 0x00000001, - WGPUBufferUsage_MapWrite = 0x00000002, - WGPUBufferUsage_CopySrc = 0x00000004, - WGPUBufferUsage_CopyDst = 0x00000008, - WGPUBufferUsage_Index = 0x00000010, - WGPUBufferUsage_Vertex = 0x00000020, - WGPUBufferUsage_Uniform = 0x00000040, - WGPUBufferUsage_Storage = 0x00000080, - WGPUBufferUsage_Indirect = 0x00000100, - WGPUBufferUsage_QueryResolve = 0x00000200, - WGPUBufferUsage_Force32 = 0x7FFFFFFF -} WGPUBufferUsage WGPU_ENUM_ATTRIBUTE; -typedef WGPUFlags WGPUBufferUsageFlags WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUColorWriteMask { - WGPUColorWriteMask_None = 0x00000000, - WGPUColorWriteMask_Red = 0x00000001, - WGPUColorWriteMask_Green = 0x00000002, - WGPUColorWriteMask_Blue = 0x00000004, - WGPUColorWriteMask_Alpha = 0x00000008, - WGPUColorWriteMask_All = 0x0000000F, - WGPUColorWriteMask_Force32 = 0x7FFFFFFF -} WGPUColorWriteMask WGPU_ENUM_ATTRIBUTE; -typedef WGPUFlags WGPUColorWriteMaskFlags WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUMapMode { - WGPUMapMode_None = 0x00000000, - WGPUMapMode_Read = 0x00000001, - WGPUMapMode_Write = 0x00000002, - WGPUMapMode_Force32 = 0x7FFFFFFF -} WGPUMapMode WGPU_ENUM_ATTRIBUTE; -typedef WGPUFlags WGPUMapModeFlags WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUShaderStage { - WGPUShaderStage_None = 0x00000000, - WGPUShaderStage_Vertex = 0x00000001, - WGPUShaderStage_Fragment = 0x00000002, - WGPUShaderStage_Compute = 0x00000004, - WGPUShaderStage_Force32 = 0x7FFFFFFF -} WGPUShaderStage WGPU_ENUM_ATTRIBUTE; -typedef WGPUFlags WGPUShaderStageFlags WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUTextureUsage { - WGPUTextureUsage_None = 0x00000000, - WGPUTextureUsage_CopySrc = 0x00000001, - WGPUTextureUsage_CopyDst = 0x00000002, - WGPUTextureUsage_TextureBinding = 0x00000004, - WGPUTextureUsage_StorageBinding = 0x00000008, - WGPUTextureUsage_RenderAttachment = 0x00000010, - WGPUTextureUsage_Force32 = 0x7FFFFFFF -} WGPUTextureUsage WGPU_ENUM_ATTRIBUTE; -typedef WGPUFlags WGPUTextureUsageFlags WGPU_ENUM_ATTRIBUTE; - -typedef void (*WGPUBufferMapCallback)(WGPUBufferMapAsyncStatus status, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUCompilationInfoCallback)(WGPUCompilationInfoRequestStatus status, struct WGPUCompilationInfo const * compilationInfo, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUCreateComputePipelineAsyncCallback)(WGPUCreatePipelineAsyncStatus status, WGPUComputePipeline pipeline, char const * message, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUCreateRenderPipelineAsyncCallback)(WGPUCreatePipelineAsyncStatus status, WGPURenderPipeline pipeline, char const * message, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUDeviceLostCallback)(WGPUDeviceLostReason reason, char const * message, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUErrorCallback)(WGPUErrorType type, char const * message, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProc)(void) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUQueueWorkDoneCallback)(WGPUQueueWorkDoneStatus status, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPURequestAdapterCallback)(WGPURequestAdapterStatus status, WGPUAdapter adapter, char const * message, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPURequestDeviceCallback)(WGPURequestDeviceStatus status, WGPUDevice device, char const * message, void * userdata) WGPU_FUNCTION_ATTRIBUTE; - -typedef struct WGPUChainedStruct { - struct WGPUChainedStruct const * next; - WGPUSType sType; -} WGPUChainedStruct WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUChainedStructOut { - struct WGPUChainedStructOut * next; - WGPUSType sType; -} WGPUChainedStructOut WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUAdapterProperties { - WGPUChainedStructOut * nextInChain; - uint32_t vendorID; - char const * vendorName; - char const * architecture; - uint32_t deviceID; - char const * name; - char const * driverDescription; - WGPUAdapterType adapterType; - WGPUBackendType backendType; -} WGPUAdapterProperties WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUBindGroupEntry { - WGPUChainedStruct const * nextInChain; - uint32_t binding; - WGPU_NULLABLE WGPUBuffer buffer; - uint64_t offset; - uint64_t size; - WGPU_NULLABLE WGPUSampler sampler; - WGPU_NULLABLE WGPUTextureView textureView; -} WGPUBindGroupEntry WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUBlendComponent { - WGPUBlendOperation operation; - WGPUBlendFactor srcFactor; - WGPUBlendFactor dstFactor; -} WGPUBlendComponent WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUBufferBindingLayout { - WGPUChainedStruct const * nextInChain; - WGPUBufferBindingType type; - WGPUBool hasDynamicOffset; - uint64_t minBindingSize; -} WGPUBufferBindingLayout WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUBufferDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; - WGPUBufferUsageFlags usage; - uint64_t size; - WGPUBool mappedAtCreation; -} WGPUBufferDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUColor { - double r; - double g; - double b; - double a; -} WGPUColor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUCommandBufferDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; -} WGPUCommandBufferDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUCommandEncoderDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; -} WGPUCommandEncoderDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUCompilationMessage { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * message; - WGPUCompilationMessageType type; - uint64_t lineNum; - uint64_t linePos; - uint64_t offset; - uint64_t length; - uint64_t utf16LinePos; - uint64_t utf16Offset; - uint64_t utf16Length; -} WGPUCompilationMessage WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUComputePassTimestampWrites { - WGPUQuerySet querySet; - uint32_t beginningOfPassWriteIndex; - uint32_t endOfPassWriteIndex; -} WGPUComputePassTimestampWrites WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUConstantEntry { - WGPUChainedStruct const * nextInChain; - char const * key; - double value; -} WGPUConstantEntry WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUExtent3D { - uint32_t width; - uint32_t height; - uint32_t depthOrArrayLayers; -} WGPUExtent3D WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUInstanceDescriptor { - WGPUChainedStruct const * nextInChain; -} WGPUInstanceDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPULimits { - uint32_t maxTextureDimension1D; - uint32_t maxTextureDimension2D; - uint32_t maxTextureDimension3D; - uint32_t maxTextureArrayLayers; - uint32_t maxBindGroups; - uint32_t maxBindGroupsPlusVertexBuffers; - uint32_t maxBindingsPerBindGroup; - uint32_t maxDynamicUniformBuffersPerPipelineLayout; - uint32_t maxDynamicStorageBuffersPerPipelineLayout; - uint32_t maxSampledTexturesPerShaderStage; - uint32_t maxSamplersPerShaderStage; - uint32_t maxStorageBuffersPerShaderStage; - uint32_t maxStorageTexturesPerShaderStage; - uint32_t maxUniformBuffersPerShaderStage; - uint64_t maxUniformBufferBindingSize; - uint64_t maxStorageBufferBindingSize; - uint32_t minUniformBufferOffsetAlignment; - uint32_t minStorageBufferOffsetAlignment; - uint32_t maxVertexBuffers; - uint64_t maxBufferSize; - uint32_t maxVertexAttributes; - uint32_t maxVertexBufferArrayStride; - uint32_t maxInterStageShaderComponents; - uint32_t maxInterStageShaderVariables; - uint32_t maxColorAttachments; - uint32_t maxColorAttachmentBytesPerSample; - uint32_t maxComputeWorkgroupStorageSize; - uint32_t maxComputeInvocationsPerWorkgroup; - uint32_t maxComputeWorkgroupSizeX; - uint32_t maxComputeWorkgroupSizeY; - uint32_t maxComputeWorkgroupSizeZ; - uint32_t maxComputeWorkgroupsPerDimension; -} WGPULimits WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUMultisampleState { - WGPUChainedStruct const * nextInChain; - uint32_t count; - uint32_t mask; - WGPUBool alphaToCoverageEnabled; -} WGPUMultisampleState WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUOrigin3D { - uint32_t x; - uint32_t y; - uint32_t z; -} WGPUOrigin3D WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUPipelineLayoutDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; - size_t bindGroupLayoutCount; - WGPUBindGroupLayout const * bindGroupLayouts; -} WGPUPipelineLayoutDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -// Can be chained in WGPUPrimitiveState -typedef struct WGPUPrimitiveDepthClipControl { - WGPUChainedStruct chain; - WGPUBool unclippedDepth; -} WGPUPrimitiveDepthClipControl WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUPrimitiveState { - WGPUChainedStruct const * nextInChain; - WGPUPrimitiveTopology topology; - WGPUIndexFormat stripIndexFormat; - WGPUFrontFace frontFace; - WGPUCullMode cullMode; -} WGPUPrimitiveState WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUQuerySetDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; - WGPUQueryType type; - uint32_t count; -} WGPUQuerySetDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUQueueDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; -} WGPUQueueDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPURenderBundleDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; -} WGPURenderBundleDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPURenderBundleEncoderDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; - size_t colorFormatCount; - WGPUTextureFormat const * colorFormats; - WGPUTextureFormat depthStencilFormat; - uint32_t sampleCount; - WGPUBool depthReadOnly; - WGPUBool stencilReadOnly; -} WGPURenderBundleEncoderDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPURenderPassDepthStencilAttachment { - WGPUTextureView view; - WGPULoadOp depthLoadOp; - WGPUStoreOp depthStoreOp; - float depthClearValue; - WGPUBool depthReadOnly; - WGPULoadOp stencilLoadOp; - WGPUStoreOp stencilStoreOp; - uint32_t stencilClearValue; - WGPUBool stencilReadOnly; -} WGPURenderPassDepthStencilAttachment WGPU_STRUCTURE_ATTRIBUTE; - -// Can be chained in WGPURenderPassDescriptor -typedef struct WGPURenderPassDescriptorMaxDrawCount { - WGPUChainedStruct chain; - uint64_t maxDrawCount; -} WGPURenderPassDescriptorMaxDrawCount WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPURenderPassTimestampWrites { - WGPUQuerySet querySet; - uint32_t beginningOfPassWriteIndex; - uint32_t endOfPassWriteIndex; -} WGPURenderPassTimestampWrites WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPURequestAdapterOptions { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE WGPUSurface compatibleSurface; - WGPUPowerPreference powerPreference; - WGPUBackendType backendType; - WGPUBool forceFallbackAdapter; -} WGPURequestAdapterOptions WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUSamplerBindingLayout { - WGPUChainedStruct const * nextInChain; - WGPUSamplerBindingType type; -} WGPUSamplerBindingLayout WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUSamplerDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; - WGPUAddressMode addressModeU; - WGPUAddressMode addressModeV; - WGPUAddressMode addressModeW; - WGPUFilterMode magFilter; - WGPUFilterMode minFilter; - WGPUMipmapFilterMode mipmapFilter; - float lodMinClamp; - float lodMaxClamp; - WGPUCompareFunction compare; - uint16_t maxAnisotropy; -} WGPUSamplerDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUShaderModuleCompilationHint { - WGPUChainedStruct const * nextInChain; - char const * entryPoint; - WGPUPipelineLayout layout; -} WGPUShaderModuleCompilationHint WGPU_STRUCTURE_ATTRIBUTE; - -// Can be chained in WGPUShaderModuleDescriptor -typedef struct WGPUShaderModuleSPIRVDescriptor { - WGPUChainedStruct chain; - uint32_t codeSize; - uint32_t const * code; -} WGPUShaderModuleSPIRVDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -// Can be chained in WGPUShaderModuleDescriptor -typedef struct WGPUShaderModuleWGSLDescriptor { - WGPUChainedStruct chain; - char const * code; -} WGPUShaderModuleWGSLDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUStencilFaceState { - WGPUCompareFunction compare; - WGPUStencilOperation failOp; - WGPUStencilOperation depthFailOp; - WGPUStencilOperation passOp; -} WGPUStencilFaceState WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUStorageTextureBindingLayout { - WGPUChainedStruct const * nextInChain; - WGPUStorageTextureAccess access; - WGPUTextureFormat format; - WGPUTextureViewDimension viewDimension; -} WGPUStorageTextureBindingLayout WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUSurfaceCapabilities { - WGPUChainedStructOut * nextInChain; - size_t formatCount; - WGPUTextureFormat * formats; - size_t presentModeCount; - WGPUPresentMode * presentModes; - size_t alphaModeCount; - WGPUCompositeAlphaMode * alphaModes; -} WGPUSurfaceCapabilities WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUSurfaceConfiguration { - WGPUChainedStruct const * nextInChain; - WGPUDevice device; - WGPUTextureFormat format; - WGPUTextureUsageFlags usage; - size_t viewFormatCount; - WGPUTextureFormat const * viewFormats; - WGPUCompositeAlphaMode alphaMode; - uint32_t width; - uint32_t height; - WGPUPresentMode presentMode; -} WGPUSurfaceConfiguration WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUSurfaceDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; -} WGPUSurfaceDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -// Can be chained in WGPUSurfaceDescriptor -typedef struct WGPUSurfaceDescriptorFromAndroidNativeWindow { - WGPUChainedStruct chain; - void * window; -} WGPUSurfaceDescriptorFromAndroidNativeWindow WGPU_STRUCTURE_ATTRIBUTE; - -// Can be chained in WGPUSurfaceDescriptor -typedef struct WGPUSurfaceDescriptorFromCanvasHTMLSelector { - WGPUChainedStruct chain; - char const * selector; -} WGPUSurfaceDescriptorFromCanvasHTMLSelector WGPU_STRUCTURE_ATTRIBUTE; - -// Can be chained in WGPUSurfaceDescriptor -typedef struct WGPUSurfaceDescriptorFromMetalLayer { - WGPUChainedStruct chain; - void * layer; -} WGPUSurfaceDescriptorFromMetalLayer WGPU_STRUCTURE_ATTRIBUTE; - -// Can be chained in WGPUSurfaceDescriptor -typedef struct WGPUSurfaceDescriptorFromWaylandSurface { - WGPUChainedStruct chain; - void * display; - void * surface; -} WGPUSurfaceDescriptorFromWaylandSurface WGPU_STRUCTURE_ATTRIBUTE; - -// Can be chained in WGPUSurfaceDescriptor -typedef struct WGPUSurfaceDescriptorFromWindowsHWND { - WGPUChainedStruct chain; - void * hinstance; - void * hwnd; -} WGPUSurfaceDescriptorFromWindowsHWND WGPU_STRUCTURE_ATTRIBUTE; - -// Can be chained in WGPUSurfaceDescriptor -typedef struct WGPUSurfaceDescriptorFromXcbWindow { - WGPUChainedStruct chain; - void * connection; - uint32_t window; -} WGPUSurfaceDescriptorFromXcbWindow WGPU_STRUCTURE_ATTRIBUTE; - -// Can be chained in WGPUSurfaceDescriptor -typedef struct WGPUSurfaceDescriptorFromXlibWindow { - WGPUChainedStruct chain; - void * display; - uint32_t window; -} WGPUSurfaceDescriptorFromXlibWindow WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUSurfaceTexture { - WGPUTexture texture; - WGPUBool suboptimal; - WGPUSurfaceGetCurrentTextureStatus status; -} WGPUSurfaceTexture WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUTextureBindingLayout { - WGPUChainedStruct const * nextInChain; - WGPUTextureSampleType sampleType; - WGPUTextureViewDimension viewDimension; - WGPUBool multisampled; -} WGPUTextureBindingLayout WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUTextureDataLayout { - WGPUChainedStruct const * nextInChain; - uint64_t offset; - uint32_t bytesPerRow; - uint32_t rowsPerImage; -} WGPUTextureDataLayout WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUTextureViewDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; - WGPUTextureFormat format; - WGPUTextureViewDimension dimension; - uint32_t baseMipLevel; - uint32_t mipLevelCount; - uint32_t baseArrayLayer; - uint32_t arrayLayerCount; - WGPUTextureAspect aspect; -} WGPUTextureViewDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUVertexAttribute { - WGPUVertexFormat format; - uint64_t offset; - uint32_t shaderLocation; -} WGPUVertexAttribute WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUBindGroupDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; - WGPUBindGroupLayout layout; - size_t entryCount; - WGPUBindGroupEntry const * entries; -} WGPUBindGroupDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUBindGroupLayoutEntry { - WGPUChainedStruct const * nextInChain; - uint32_t binding; - WGPUShaderStageFlags visibility; - WGPUBufferBindingLayout buffer; - WGPUSamplerBindingLayout sampler; - WGPUTextureBindingLayout texture; - WGPUStorageTextureBindingLayout storageTexture; -} WGPUBindGroupLayoutEntry WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUBlendState { - WGPUBlendComponent color; - WGPUBlendComponent alpha; -} WGPUBlendState WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUCompilationInfo { - WGPUChainedStruct const * nextInChain; - size_t messageCount; - WGPUCompilationMessage const * messages; -} WGPUCompilationInfo WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUComputePassDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; - WGPU_NULLABLE WGPUComputePassTimestampWrites const * timestampWrites; -} WGPUComputePassDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUDepthStencilState { - WGPUChainedStruct const * nextInChain; - WGPUTextureFormat format; - WGPUBool depthWriteEnabled; - WGPUCompareFunction depthCompare; - WGPUStencilFaceState stencilFront; - WGPUStencilFaceState stencilBack; - uint32_t stencilReadMask; - uint32_t stencilWriteMask; - int32_t depthBias; - float depthBiasSlopeScale; - float depthBiasClamp; -} WGPUDepthStencilState WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUImageCopyBuffer { - WGPUChainedStruct const * nextInChain; - WGPUTextureDataLayout layout; - WGPUBuffer buffer; -} WGPUImageCopyBuffer WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUImageCopyTexture { - WGPUChainedStruct const * nextInChain; - WGPUTexture texture; - uint32_t mipLevel; - WGPUOrigin3D origin; - WGPUTextureAspect aspect; -} WGPUImageCopyTexture WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUProgrammableStageDescriptor { - WGPUChainedStruct const * nextInChain; - WGPUShaderModule module; - char const * entryPoint; - size_t constantCount; - WGPUConstantEntry const * constants; -} WGPUProgrammableStageDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPURenderPassColorAttachment { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE WGPUTextureView view; - WGPU_NULLABLE WGPUTextureView resolveTarget; - WGPULoadOp loadOp; - WGPUStoreOp storeOp; - WGPUColor clearValue; -} WGPURenderPassColorAttachment WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPURequiredLimits { - WGPUChainedStruct const * nextInChain; - WGPULimits limits; -} WGPURequiredLimits WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUShaderModuleDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; - size_t hintCount; - WGPUShaderModuleCompilationHint const * hints; -} WGPUShaderModuleDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUSupportedLimits { - WGPUChainedStructOut * nextInChain; - WGPULimits limits; -} WGPUSupportedLimits WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUTextureDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; - WGPUTextureUsageFlags usage; - WGPUTextureDimension dimension; - WGPUExtent3D size; - WGPUTextureFormat format; - uint32_t mipLevelCount; - uint32_t sampleCount; - size_t viewFormatCount; - WGPUTextureFormat const * viewFormats; -} WGPUTextureDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUVertexBufferLayout { - uint64_t arrayStride; - WGPUVertexStepMode stepMode; - size_t attributeCount; - WGPUVertexAttribute const * attributes; -} WGPUVertexBufferLayout WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUBindGroupLayoutDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; - size_t entryCount; - WGPUBindGroupLayoutEntry const * entries; -} WGPUBindGroupLayoutDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUColorTargetState { - WGPUChainedStruct const * nextInChain; - WGPUTextureFormat format; - WGPU_NULLABLE WGPUBlendState const * blend; - WGPUColorWriteMaskFlags writeMask; -} WGPUColorTargetState WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUComputePipelineDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; - WGPU_NULLABLE WGPUPipelineLayout layout; - WGPUProgrammableStageDescriptor compute; -} WGPUComputePipelineDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUDeviceDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; - size_t requiredFeatureCount; - WGPUFeatureName const * requiredFeatures; - WGPU_NULLABLE WGPURequiredLimits const * requiredLimits; - WGPUQueueDescriptor defaultQueue; - WGPUDeviceLostCallback deviceLostCallback; - void * deviceLostUserdata; -} WGPUDeviceDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPURenderPassDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; - size_t colorAttachmentCount; - WGPURenderPassColorAttachment const * colorAttachments; - WGPU_NULLABLE WGPURenderPassDepthStencilAttachment const * depthStencilAttachment; - WGPU_NULLABLE WGPUQuerySet occlusionQuerySet; - WGPU_NULLABLE WGPURenderPassTimestampWrites const * timestampWrites; -} WGPURenderPassDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUVertexState { - WGPUChainedStruct const * nextInChain; - WGPUShaderModule module; - char const * entryPoint; - size_t constantCount; - WGPUConstantEntry const * constants; - size_t bufferCount; - WGPUVertexBufferLayout const * buffers; -} WGPUVertexState WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPUFragmentState { - WGPUChainedStruct const * nextInChain; - WGPUShaderModule module; - char const * entryPoint; - size_t constantCount; - WGPUConstantEntry const * constants; - size_t targetCount; - WGPUColorTargetState const * targets; -} WGPUFragmentState WGPU_STRUCTURE_ATTRIBUTE; - -typedef struct WGPURenderPipelineDescriptor { - WGPUChainedStruct const * nextInChain; - WGPU_NULLABLE char const * label; - WGPU_NULLABLE WGPUPipelineLayout layout; - WGPUVertexState vertex; - WGPUPrimitiveState primitive; - WGPU_NULLABLE WGPUDepthStencilState const * depthStencil; - WGPUMultisampleState multisample; - WGPU_NULLABLE WGPUFragmentState const * fragment; -} WGPURenderPipelineDescriptor WGPU_STRUCTURE_ATTRIBUTE; - -#ifdef __cplusplus -extern "C" { -#endif - -#if !defined(WGPU_SKIP_PROCS) - -typedef WGPUInstance (*WGPUProcCreateInstance)(WGPU_NULLABLE WGPUInstanceDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUProc (*WGPUProcGetProcAddress)(WGPUDevice device, char const * procName) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of Adapter -typedef size_t (*WGPUProcAdapterEnumerateFeatures)(WGPUAdapter adapter, WGPUFeatureName * features) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUBool (*WGPUProcAdapterGetLimits)(WGPUAdapter adapter, WGPUSupportedLimits * limits) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcAdapterGetProperties)(WGPUAdapter adapter, WGPUAdapterProperties * properties) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUBool (*WGPUProcAdapterHasFeature)(WGPUAdapter adapter, WGPUFeatureName feature) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcAdapterRequestDevice)(WGPUAdapter adapter, WGPU_NULLABLE WGPUDeviceDescriptor const * descriptor, WGPURequestDeviceCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcAdapterReference)(WGPUAdapter adapter) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcAdapterRelease)(WGPUAdapter adapter) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of BindGroup -typedef void (*WGPUProcBindGroupSetLabel)(WGPUBindGroup bindGroup, char const * label) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcBindGroupReference)(WGPUBindGroup bindGroup) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcBindGroupRelease)(WGPUBindGroup bindGroup) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of BindGroupLayout -typedef void (*WGPUProcBindGroupLayoutSetLabel)(WGPUBindGroupLayout bindGroupLayout, char const * label) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcBindGroupLayoutReference)(WGPUBindGroupLayout bindGroupLayout) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcBindGroupLayoutRelease)(WGPUBindGroupLayout bindGroupLayout) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of Buffer -typedef void (*WGPUProcBufferDestroy)(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; -typedef void const * (*WGPUProcBufferGetConstMappedRange)(WGPUBuffer buffer, size_t offset, size_t size) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUBufferMapState (*WGPUProcBufferGetMapState)(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; -typedef void * (*WGPUProcBufferGetMappedRange)(WGPUBuffer buffer, size_t offset, size_t size) WGPU_FUNCTION_ATTRIBUTE; -typedef uint64_t (*WGPUProcBufferGetSize)(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUBufferUsageFlags (*WGPUProcBufferGetUsage)(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcBufferMapAsync)(WGPUBuffer buffer, WGPUMapModeFlags mode, size_t offset, size_t size, WGPUBufferMapCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcBufferSetLabel)(WGPUBuffer buffer, char const * label) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcBufferUnmap)(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcBufferReference)(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcBufferRelease)(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of CommandBuffer -typedef void (*WGPUProcCommandBufferSetLabel)(WGPUCommandBuffer commandBuffer, char const * label) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcCommandBufferReference)(WGPUCommandBuffer commandBuffer) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcCommandBufferRelease)(WGPUCommandBuffer commandBuffer) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of CommandEncoder -typedef WGPUComputePassEncoder (*WGPUProcCommandEncoderBeginComputePass)(WGPUCommandEncoder commandEncoder, WGPU_NULLABLE WGPUComputePassDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPURenderPassEncoder (*WGPUProcCommandEncoderBeginRenderPass)(WGPUCommandEncoder commandEncoder, WGPURenderPassDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcCommandEncoderClearBuffer)(WGPUCommandEncoder commandEncoder, WGPUBuffer buffer, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcCommandEncoderCopyBufferToBuffer)(WGPUCommandEncoder commandEncoder, WGPUBuffer source, uint64_t sourceOffset, WGPUBuffer destination, uint64_t destinationOffset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcCommandEncoderCopyBufferToTexture)(WGPUCommandEncoder commandEncoder, WGPUImageCopyBuffer const * source, WGPUImageCopyTexture const * destination, WGPUExtent3D const * copySize) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcCommandEncoderCopyTextureToBuffer)(WGPUCommandEncoder commandEncoder, WGPUImageCopyTexture const * source, WGPUImageCopyBuffer const * destination, WGPUExtent3D const * copySize) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcCommandEncoderCopyTextureToTexture)(WGPUCommandEncoder commandEncoder, WGPUImageCopyTexture const * source, WGPUImageCopyTexture const * destination, WGPUExtent3D const * copySize) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUCommandBuffer (*WGPUProcCommandEncoderFinish)(WGPUCommandEncoder commandEncoder, WGPU_NULLABLE WGPUCommandBufferDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcCommandEncoderInsertDebugMarker)(WGPUCommandEncoder commandEncoder, char const * markerLabel) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcCommandEncoderPopDebugGroup)(WGPUCommandEncoder commandEncoder) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcCommandEncoderPushDebugGroup)(WGPUCommandEncoder commandEncoder, char const * groupLabel) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcCommandEncoderResolveQuerySet)(WGPUCommandEncoder commandEncoder, WGPUQuerySet querySet, uint32_t firstQuery, uint32_t queryCount, WGPUBuffer destination, uint64_t destinationOffset) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcCommandEncoderSetLabel)(WGPUCommandEncoder commandEncoder, char const * label) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcCommandEncoderWriteTimestamp)(WGPUCommandEncoder commandEncoder, WGPUQuerySet querySet, uint32_t queryIndex) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcCommandEncoderReference)(WGPUCommandEncoder commandEncoder) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcCommandEncoderRelease)(WGPUCommandEncoder commandEncoder) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of ComputePassEncoder -typedef void (*WGPUProcComputePassEncoderDispatchWorkgroups)(WGPUComputePassEncoder computePassEncoder, uint32_t workgroupCountX, uint32_t workgroupCountY, uint32_t workgroupCountZ) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcComputePassEncoderDispatchWorkgroupsIndirect)(WGPUComputePassEncoder computePassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcComputePassEncoderEnd)(WGPUComputePassEncoder computePassEncoder) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcComputePassEncoderInsertDebugMarker)(WGPUComputePassEncoder computePassEncoder, char const * markerLabel) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcComputePassEncoderPopDebugGroup)(WGPUComputePassEncoder computePassEncoder) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcComputePassEncoderPushDebugGroup)(WGPUComputePassEncoder computePassEncoder, char const * groupLabel) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcComputePassEncoderSetBindGroup)(WGPUComputePassEncoder computePassEncoder, uint32_t groupIndex, WGPU_NULLABLE WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcComputePassEncoderSetLabel)(WGPUComputePassEncoder computePassEncoder, char const * label) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcComputePassEncoderSetPipeline)(WGPUComputePassEncoder computePassEncoder, WGPUComputePipeline pipeline) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcComputePassEncoderReference)(WGPUComputePassEncoder computePassEncoder) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcComputePassEncoderRelease)(WGPUComputePassEncoder computePassEncoder) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of ComputePipeline -typedef WGPUBindGroupLayout (*WGPUProcComputePipelineGetBindGroupLayout)(WGPUComputePipeline computePipeline, uint32_t groupIndex) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcComputePipelineSetLabel)(WGPUComputePipeline computePipeline, char const * label) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcComputePipelineReference)(WGPUComputePipeline computePipeline) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcComputePipelineRelease)(WGPUComputePipeline computePipeline) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of Device -typedef WGPUBindGroup (*WGPUProcDeviceCreateBindGroup)(WGPUDevice device, WGPUBindGroupDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUBindGroupLayout (*WGPUProcDeviceCreateBindGroupLayout)(WGPUDevice device, WGPUBindGroupLayoutDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUBuffer (*WGPUProcDeviceCreateBuffer)(WGPUDevice device, WGPUBufferDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUCommandEncoder (*WGPUProcDeviceCreateCommandEncoder)(WGPUDevice device, WGPU_NULLABLE WGPUCommandEncoderDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUComputePipeline (*WGPUProcDeviceCreateComputePipeline)(WGPUDevice device, WGPUComputePipelineDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcDeviceCreateComputePipelineAsync)(WGPUDevice device, WGPUComputePipelineDescriptor const * descriptor, WGPUCreateComputePipelineAsyncCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUPipelineLayout (*WGPUProcDeviceCreatePipelineLayout)(WGPUDevice device, WGPUPipelineLayoutDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUQuerySet (*WGPUProcDeviceCreateQuerySet)(WGPUDevice device, WGPUQuerySetDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPURenderBundleEncoder (*WGPUProcDeviceCreateRenderBundleEncoder)(WGPUDevice device, WGPURenderBundleEncoderDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPURenderPipeline (*WGPUProcDeviceCreateRenderPipeline)(WGPUDevice device, WGPURenderPipelineDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcDeviceCreateRenderPipelineAsync)(WGPUDevice device, WGPURenderPipelineDescriptor const * descriptor, WGPUCreateRenderPipelineAsyncCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUSampler (*WGPUProcDeviceCreateSampler)(WGPUDevice device, WGPU_NULLABLE WGPUSamplerDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUShaderModule (*WGPUProcDeviceCreateShaderModule)(WGPUDevice device, WGPUShaderModuleDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUTexture (*WGPUProcDeviceCreateTexture)(WGPUDevice device, WGPUTextureDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcDeviceDestroy)(WGPUDevice device) WGPU_FUNCTION_ATTRIBUTE; -typedef size_t (*WGPUProcDeviceEnumerateFeatures)(WGPUDevice device, WGPUFeatureName * features) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUBool (*WGPUProcDeviceGetLimits)(WGPUDevice device, WGPUSupportedLimits * limits) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUQueue (*WGPUProcDeviceGetQueue)(WGPUDevice device) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUBool (*WGPUProcDeviceHasFeature)(WGPUDevice device, WGPUFeatureName feature) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcDevicePopErrorScope)(WGPUDevice device, WGPUErrorCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcDevicePushErrorScope)(WGPUDevice device, WGPUErrorFilter filter) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcDeviceSetLabel)(WGPUDevice device, char const * label) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcDeviceSetUncapturedErrorCallback)(WGPUDevice device, WGPUErrorCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcDeviceReference)(WGPUDevice device) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcDeviceRelease)(WGPUDevice device) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of Instance -typedef WGPUSurface (*WGPUProcInstanceCreateSurface)(WGPUInstance instance, WGPUSurfaceDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcInstanceProcessEvents)(WGPUInstance instance) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcInstanceRequestAdapter)(WGPUInstance instance, WGPU_NULLABLE WGPURequestAdapterOptions const * options, WGPURequestAdapterCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcInstanceReference)(WGPUInstance instance) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcInstanceRelease)(WGPUInstance instance) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of PipelineLayout -typedef void (*WGPUProcPipelineLayoutSetLabel)(WGPUPipelineLayout pipelineLayout, char const * label) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcPipelineLayoutReference)(WGPUPipelineLayout pipelineLayout) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcPipelineLayoutRelease)(WGPUPipelineLayout pipelineLayout) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of QuerySet -typedef void (*WGPUProcQuerySetDestroy)(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; -typedef uint32_t (*WGPUProcQuerySetGetCount)(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUQueryType (*WGPUProcQuerySetGetType)(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcQuerySetSetLabel)(WGPUQuerySet querySet, char const * label) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcQuerySetReference)(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcQuerySetRelease)(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of Queue -typedef void (*WGPUProcQueueOnSubmittedWorkDone)(WGPUQueue queue, WGPUQueueWorkDoneCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcQueueSetLabel)(WGPUQueue queue, char const * label) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcQueueSubmit)(WGPUQueue queue, size_t commandCount, WGPUCommandBuffer const * commands) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcQueueWriteBuffer)(WGPUQueue queue, WGPUBuffer buffer, uint64_t bufferOffset, void const * data, size_t size) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcQueueWriteTexture)(WGPUQueue queue, WGPUImageCopyTexture const * destination, void const * data, size_t dataSize, WGPUTextureDataLayout const * dataLayout, WGPUExtent3D const * writeSize) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcQueueReference)(WGPUQueue queue) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcQueueRelease)(WGPUQueue queue) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of RenderBundle -typedef void (*WGPUProcRenderBundleSetLabel)(WGPURenderBundle renderBundle, char const * label) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderBundleReference)(WGPURenderBundle renderBundle) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderBundleRelease)(WGPURenderBundle renderBundle) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of RenderBundleEncoder -typedef void (*WGPUProcRenderBundleEncoderDraw)(WGPURenderBundleEncoder renderBundleEncoder, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderBundleEncoderDrawIndexed)(WGPURenderBundleEncoder renderBundleEncoder, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderBundleEncoderDrawIndexedIndirect)(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderBundleEncoderDrawIndirect)(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPURenderBundle (*WGPUProcRenderBundleEncoderFinish)(WGPURenderBundleEncoder renderBundleEncoder, WGPU_NULLABLE WGPURenderBundleDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderBundleEncoderInsertDebugMarker)(WGPURenderBundleEncoder renderBundleEncoder, char const * markerLabel) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderBundleEncoderPopDebugGroup)(WGPURenderBundleEncoder renderBundleEncoder) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderBundleEncoderPushDebugGroup)(WGPURenderBundleEncoder renderBundleEncoder, char const * groupLabel) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderBundleEncoderSetBindGroup)(WGPURenderBundleEncoder renderBundleEncoder, uint32_t groupIndex, WGPU_NULLABLE WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderBundleEncoderSetIndexBuffer)(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer buffer, WGPUIndexFormat format, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderBundleEncoderSetLabel)(WGPURenderBundleEncoder renderBundleEncoder, char const * label) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderBundleEncoderSetPipeline)(WGPURenderBundleEncoder renderBundleEncoder, WGPURenderPipeline pipeline) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderBundleEncoderSetVertexBuffer)(WGPURenderBundleEncoder renderBundleEncoder, uint32_t slot, WGPU_NULLABLE WGPUBuffer buffer, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderBundleEncoderReference)(WGPURenderBundleEncoder renderBundleEncoder) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderBundleEncoderRelease)(WGPURenderBundleEncoder renderBundleEncoder) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of RenderPassEncoder -typedef void (*WGPUProcRenderPassEncoderBeginOcclusionQuery)(WGPURenderPassEncoder renderPassEncoder, uint32_t queryIndex) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderDraw)(WGPURenderPassEncoder renderPassEncoder, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderDrawIndexed)(WGPURenderPassEncoder renderPassEncoder, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderDrawIndexedIndirect)(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderDrawIndirect)(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderEnd)(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderEndOcclusionQuery)(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderExecuteBundles)(WGPURenderPassEncoder renderPassEncoder, size_t bundleCount, WGPURenderBundle const * bundles) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderInsertDebugMarker)(WGPURenderPassEncoder renderPassEncoder, char const * markerLabel) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderPopDebugGroup)(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderPushDebugGroup)(WGPURenderPassEncoder renderPassEncoder, char const * groupLabel) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderSetBindGroup)(WGPURenderPassEncoder renderPassEncoder, uint32_t groupIndex, WGPU_NULLABLE WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderSetBlendConstant)(WGPURenderPassEncoder renderPassEncoder, WGPUColor const * color) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderSetIndexBuffer)(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer buffer, WGPUIndexFormat format, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderSetLabel)(WGPURenderPassEncoder renderPassEncoder, char const * label) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderSetPipeline)(WGPURenderPassEncoder renderPassEncoder, WGPURenderPipeline pipeline) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderSetScissorRect)(WGPURenderPassEncoder renderPassEncoder, uint32_t x, uint32_t y, uint32_t width, uint32_t height) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderSetStencilReference)(WGPURenderPassEncoder renderPassEncoder, uint32_t reference) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderSetVertexBuffer)(WGPURenderPassEncoder renderPassEncoder, uint32_t slot, WGPU_NULLABLE WGPUBuffer buffer, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderSetViewport)(WGPURenderPassEncoder renderPassEncoder, float x, float y, float width, float height, float minDepth, float maxDepth) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderReference)(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPassEncoderRelease)(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of RenderPipeline -typedef WGPUBindGroupLayout (*WGPUProcRenderPipelineGetBindGroupLayout)(WGPURenderPipeline renderPipeline, uint32_t groupIndex) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPipelineSetLabel)(WGPURenderPipeline renderPipeline, char const * label) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPipelineReference)(WGPURenderPipeline renderPipeline) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcRenderPipelineRelease)(WGPURenderPipeline renderPipeline) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of Sampler -typedef void (*WGPUProcSamplerSetLabel)(WGPUSampler sampler, char const * label) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcSamplerReference)(WGPUSampler sampler) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcSamplerRelease)(WGPUSampler sampler) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of ShaderModule -typedef void (*WGPUProcShaderModuleGetCompilationInfo)(WGPUShaderModule shaderModule, WGPUCompilationInfoCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcShaderModuleSetLabel)(WGPUShaderModule shaderModule, char const * label) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcShaderModuleReference)(WGPUShaderModule shaderModule) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcShaderModuleRelease)(WGPUShaderModule shaderModule) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of Surface -typedef void (*WGPUProcSurfaceConfigure)(WGPUSurface surface, WGPUSurfaceConfiguration const * config) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcSurfaceGetCapabilities)(WGPUSurface surface, WGPUAdapter adapter, WGPUSurfaceCapabilities * capabilities) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcSurfaceGetCurrentTexture)(WGPUSurface surface, WGPUSurfaceTexture * surfaceTexture) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUTextureFormat (*WGPUProcSurfaceGetPreferredFormat)(WGPUSurface surface, WGPUAdapter adapter) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcSurfacePresent)(WGPUSurface surface) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcSurfaceUnconfigure)(WGPUSurface surface) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcSurfaceReference)(WGPUSurface surface) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcSurfaceRelease)(WGPUSurface surface) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of SurfaceCapabilities -typedef void (*WGPUProcSurfaceCapabilitiesFreeMembers)(WGPUSurfaceCapabilities capabilities) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of Texture -typedef WGPUTextureView (*WGPUProcTextureCreateView)(WGPUTexture texture, WGPU_NULLABLE WGPUTextureViewDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcTextureDestroy)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -typedef uint32_t (*WGPUProcTextureGetDepthOrArrayLayers)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUTextureDimension (*WGPUProcTextureGetDimension)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUTextureFormat (*WGPUProcTextureGetFormat)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -typedef uint32_t (*WGPUProcTextureGetHeight)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -typedef uint32_t (*WGPUProcTextureGetMipLevelCount)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -typedef uint32_t (*WGPUProcTextureGetSampleCount)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -typedef WGPUTextureUsageFlags (*WGPUProcTextureGetUsage)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -typedef uint32_t (*WGPUProcTextureGetWidth)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcTextureSetLabel)(WGPUTexture texture, char const * label) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcTextureReference)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcTextureRelease)(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; - -// Procs of TextureView -typedef void (*WGPUProcTextureViewSetLabel)(WGPUTextureView textureView, char const * label) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcTextureViewReference)(WGPUTextureView textureView) WGPU_FUNCTION_ATTRIBUTE; -typedef void (*WGPUProcTextureViewRelease)(WGPUTextureView textureView) WGPU_FUNCTION_ATTRIBUTE; - -#endif // !defined(WGPU_SKIP_PROCS) - -#if !defined(WGPU_SKIP_DECLARATIONS) - -WGPU_EXPORT WGPUInstance wgpuCreateInstance(WGPU_NULLABLE WGPUInstanceDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUProc wgpuGetProcAddress(WGPUDevice device, char const * procName) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of Adapter -WGPU_EXPORT size_t wgpuAdapterEnumerateFeatures(WGPUAdapter adapter, WGPUFeatureName * features) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUBool wgpuAdapterGetLimits(WGPUAdapter adapter, WGPUSupportedLimits * limits) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuAdapterGetProperties(WGPUAdapter adapter, WGPUAdapterProperties * properties) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUBool wgpuAdapterHasFeature(WGPUAdapter adapter, WGPUFeatureName feature) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuAdapterRequestDevice(WGPUAdapter adapter, WGPU_NULLABLE WGPUDeviceDescriptor const * descriptor, WGPURequestDeviceCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuAdapterReference(WGPUAdapter adapter) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuAdapterRelease(WGPUAdapter adapter) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of BindGroup -WGPU_EXPORT void wgpuBindGroupSetLabel(WGPUBindGroup bindGroup, char const * label) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuBindGroupReference(WGPUBindGroup bindGroup) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuBindGroupRelease(WGPUBindGroup bindGroup) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of BindGroupLayout -WGPU_EXPORT void wgpuBindGroupLayoutSetLabel(WGPUBindGroupLayout bindGroupLayout, char const * label) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuBindGroupLayoutReference(WGPUBindGroupLayout bindGroupLayout) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuBindGroupLayoutRelease(WGPUBindGroupLayout bindGroupLayout) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of Buffer -WGPU_EXPORT void wgpuBufferDestroy(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void const * wgpuBufferGetConstMappedRange(WGPUBuffer buffer, size_t offset, size_t size) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUBufferMapState wgpuBufferGetMapState(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void * wgpuBufferGetMappedRange(WGPUBuffer buffer, size_t offset, size_t size) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT uint64_t wgpuBufferGetSize(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUBufferUsageFlags wgpuBufferGetUsage(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuBufferMapAsync(WGPUBuffer buffer, WGPUMapModeFlags mode, size_t offset, size_t size, WGPUBufferMapCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuBufferSetLabel(WGPUBuffer buffer, char const * label) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuBufferUnmap(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuBufferReference(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuBufferRelease(WGPUBuffer buffer) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of CommandBuffer -WGPU_EXPORT void wgpuCommandBufferSetLabel(WGPUCommandBuffer commandBuffer, char const * label) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuCommandBufferReference(WGPUCommandBuffer commandBuffer) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuCommandBufferRelease(WGPUCommandBuffer commandBuffer) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of CommandEncoder -WGPU_EXPORT WGPUComputePassEncoder wgpuCommandEncoderBeginComputePass(WGPUCommandEncoder commandEncoder, WGPU_NULLABLE WGPUComputePassDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPURenderPassEncoder wgpuCommandEncoderBeginRenderPass(WGPUCommandEncoder commandEncoder, WGPURenderPassDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuCommandEncoderClearBuffer(WGPUCommandEncoder commandEncoder, WGPUBuffer buffer, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuCommandEncoderCopyBufferToBuffer(WGPUCommandEncoder commandEncoder, WGPUBuffer source, uint64_t sourceOffset, WGPUBuffer destination, uint64_t destinationOffset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuCommandEncoderCopyBufferToTexture(WGPUCommandEncoder commandEncoder, WGPUImageCopyBuffer const * source, WGPUImageCopyTexture const * destination, WGPUExtent3D const * copySize) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuCommandEncoderCopyTextureToBuffer(WGPUCommandEncoder commandEncoder, WGPUImageCopyTexture const * source, WGPUImageCopyBuffer const * destination, WGPUExtent3D const * copySize) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuCommandEncoderCopyTextureToTexture(WGPUCommandEncoder commandEncoder, WGPUImageCopyTexture const * source, WGPUImageCopyTexture const * destination, WGPUExtent3D const * copySize) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUCommandBuffer wgpuCommandEncoderFinish(WGPUCommandEncoder commandEncoder, WGPU_NULLABLE WGPUCommandBufferDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuCommandEncoderInsertDebugMarker(WGPUCommandEncoder commandEncoder, char const * markerLabel) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuCommandEncoderPopDebugGroup(WGPUCommandEncoder commandEncoder) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuCommandEncoderPushDebugGroup(WGPUCommandEncoder commandEncoder, char const * groupLabel) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuCommandEncoderResolveQuerySet(WGPUCommandEncoder commandEncoder, WGPUQuerySet querySet, uint32_t firstQuery, uint32_t queryCount, WGPUBuffer destination, uint64_t destinationOffset) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuCommandEncoderSetLabel(WGPUCommandEncoder commandEncoder, char const * label) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuCommandEncoderWriteTimestamp(WGPUCommandEncoder commandEncoder, WGPUQuerySet querySet, uint32_t queryIndex) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuCommandEncoderReference(WGPUCommandEncoder commandEncoder) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuCommandEncoderRelease(WGPUCommandEncoder commandEncoder) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of ComputePassEncoder -WGPU_EXPORT void wgpuComputePassEncoderDispatchWorkgroups(WGPUComputePassEncoder computePassEncoder, uint32_t workgroupCountX, uint32_t workgroupCountY, uint32_t workgroupCountZ) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuComputePassEncoderDispatchWorkgroupsIndirect(WGPUComputePassEncoder computePassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuComputePassEncoderEnd(WGPUComputePassEncoder computePassEncoder) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuComputePassEncoderInsertDebugMarker(WGPUComputePassEncoder computePassEncoder, char const * markerLabel) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuComputePassEncoderPopDebugGroup(WGPUComputePassEncoder computePassEncoder) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuComputePassEncoderPushDebugGroup(WGPUComputePassEncoder computePassEncoder, char const * groupLabel) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuComputePassEncoderSetBindGroup(WGPUComputePassEncoder computePassEncoder, uint32_t groupIndex, WGPU_NULLABLE WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuComputePassEncoderSetLabel(WGPUComputePassEncoder computePassEncoder, char const * label) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuComputePassEncoderSetPipeline(WGPUComputePassEncoder computePassEncoder, WGPUComputePipeline pipeline) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuComputePassEncoderReference(WGPUComputePassEncoder computePassEncoder) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuComputePassEncoderRelease(WGPUComputePassEncoder computePassEncoder) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of ComputePipeline -WGPU_EXPORT WGPUBindGroupLayout wgpuComputePipelineGetBindGroupLayout(WGPUComputePipeline computePipeline, uint32_t groupIndex) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuComputePipelineSetLabel(WGPUComputePipeline computePipeline, char const * label) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuComputePipelineReference(WGPUComputePipeline computePipeline) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuComputePipelineRelease(WGPUComputePipeline computePipeline) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of Device -WGPU_EXPORT WGPUBindGroup wgpuDeviceCreateBindGroup(WGPUDevice device, WGPUBindGroupDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUBindGroupLayout wgpuDeviceCreateBindGroupLayout(WGPUDevice device, WGPUBindGroupLayoutDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUBuffer wgpuDeviceCreateBuffer(WGPUDevice device, WGPUBufferDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUCommandEncoder wgpuDeviceCreateCommandEncoder(WGPUDevice device, WGPU_NULLABLE WGPUCommandEncoderDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUComputePipeline wgpuDeviceCreateComputePipeline(WGPUDevice device, WGPUComputePipelineDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuDeviceCreateComputePipelineAsync(WGPUDevice device, WGPUComputePipelineDescriptor const * descriptor, WGPUCreateComputePipelineAsyncCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUPipelineLayout wgpuDeviceCreatePipelineLayout(WGPUDevice device, WGPUPipelineLayoutDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUQuerySet wgpuDeviceCreateQuerySet(WGPUDevice device, WGPUQuerySetDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPURenderBundleEncoder wgpuDeviceCreateRenderBundleEncoder(WGPUDevice device, WGPURenderBundleEncoderDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPURenderPipeline wgpuDeviceCreateRenderPipeline(WGPUDevice device, WGPURenderPipelineDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuDeviceCreateRenderPipelineAsync(WGPUDevice device, WGPURenderPipelineDescriptor const * descriptor, WGPUCreateRenderPipelineAsyncCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUSampler wgpuDeviceCreateSampler(WGPUDevice device, WGPU_NULLABLE WGPUSamplerDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUShaderModule wgpuDeviceCreateShaderModule(WGPUDevice device, WGPUShaderModuleDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUTexture wgpuDeviceCreateTexture(WGPUDevice device, WGPUTextureDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuDeviceDestroy(WGPUDevice device) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT size_t wgpuDeviceEnumerateFeatures(WGPUDevice device, WGPUFeatureName * features) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUBool wgpuDeviceGetLimits(WGPUDevice device, WGPUSupportedLimits * limits) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUQueue wgpuDeviceGetQueue(WGPUDevice device) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUBool wgpuDeviceHasFeature(WGPUDevice device, WGPUFeatureName feature) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuDevicePopErrorScope(WGPUDevice device, WGPUErrorCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuDevicePushErrorScope(WGPUDevice device, WGPUErrorFilter filter) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuDeviceSetLabel(WGPUDevice device, char const * label) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuDeviceSetUncapturedErrorCallback(WGPUDevice device, WGPUErrorCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuDeviceReference(WGPUDevice device) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuDeviceRelease(WGPUDevice device) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of Instance -WGPU_EXPORT WGPUSurface wgpuInstanceCreateSurface(WGPUInstance instance, WGPUSurfaceDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuInstanceProcessEvents(WGPUInstance instance) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuInstanceRequestAdapter(WGPUInstance instance, WGPU_NULLABLE WGPURequestAdapterOptions const * options, WGPURequestAdapterCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuInstanceReference(WGPUInstance instance) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuInstanceRelease(WGPUInstance instance) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of PipelineLayout -WGPU_EXPORT void wgpuPipelineLayoutSetLabel(WGPUPipelineLayout pipelineLayout, char const * label) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuPipelineLayoutReference(WGPUPipelineLayout pipelineLayout) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuPipelineLayoutRelease(WGPUPipelineLayout pipelineLayout) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of QuerySet -WGPU_EXPORT void wgpuQuerySetDestroy(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT uint32_t wgpuQuerySetGetCount(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUQueryType wgpuQuerySetGetType(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuQuerySetSetLabel(WGPUQuerySet querySet, char const * label) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuQuerySetReference(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuQuerySetRelease(WGPUQuerySet querySet) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of Queue -WGPU_EXPORT void wgpuQueueOnSubmittedWorkDone(WGPUQueue queue, WGPUQueueWorkDoneCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuQueueSetLabel(WGPUQueue queue, char const * label) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuQueueSubmit(WGPUQueue queue, size_t commandCount, WGPUCommandBuffer const * commands) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuQueueWriteBuffer(WGPUQueue queue, WGPUBuffer buffer, uint64_t bufferOffset, void const * data, size_t size) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuQueueWriteTexture(WGPUQueue queue, WGPUImageCopyTexture const * destination, void const * data, size_t dataSize, WGPUTextureDataLayout const * dataLayout, WGPUExtent3D const * writeSize) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuQueueReference(WGPUQueue queue) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuQueueRelease(WGPUQueue queue) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of RenderBundle -WGPU_EXPORT void wgpuRenderBundleSetLabel(WGPURenderBundle renderBundle, char const * label) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderBundleReference(WGPURenderBundle renderBundle) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderBundleRelease(WGPURenderBundle renderBundle) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of RenderBundleEncoder -WGPU_EXPORT void wgpuRenderBundleEncoderDraw(WGPURenderBundleEncoder renderBundleEncoder, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderBundleEncoderDrawIndexed(WGPURenderBundleEncoder renderBundleEncoder, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderBundleEncoderDrawIndexedIndirect(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderBundleEncoderDrawIndirect(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPURenderBundle wgpuRenderBundleEncoderFinish(WGPURenderBundleEncoder renderBundleEncoder, WGPU_NULLABLE WGPURenderBundleDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderBundleEncoderInsertDebugMarker(WGPURenderBundleEncoder renderBundleEncoder, char const * markerLabel) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderBundleEncoderPopDebugGroup(WGPURenderBundleEncoder renderBundleEncoder) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderBundleEncoderPushDebugGroup(WGPURenderBundleEncoder renderBundleEncoder, char const * groupLabel) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderBundleEncoderSetBindGroup(WGPURenderBundleEncoder renderBundleEncoder, uint32_t groupIndex, WGPU_NULLABLE WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderBundleEncoderSetIndexBuffer(WGPURenderBundleEncoder renderBundleEncoder, WGPUBuffer buffer, WGPUIndexFormat format, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderBundleEncoderSetLabel(WGPURenderBundleEncoder renderBundleEncoder, char const * label) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderBundleEncoderSetPipeline(WGPURenderBundleEncoder renderBundleEncoder, WGPURenderPipeline pipeline) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderBundleEncoderSetVertexBuffer(WGPURenderBundleEncoder renderBundleEncoder, uint32_t slot, WGPU_NULLABLE WGPUBuffer buffer, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderBundleEncoderReference(WGPURenderBundleEncoder renderBundleEncoder) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderBundleEncoderRelease(WGPURenderBundleEncoder renderBundleEncoder) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of RenderPassEncoder -WGPU_EXPORT void wgpuRenderPassEncoderBeginOcclusionQuery(WGPURenderPassEncoder renderPassEncoder, uint32_t queryIndex) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderDraw(WGPURenderPassEncoder renderPassEncoder, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderDrawIndexed(WGPURenderPassEncoder renderPassEncoder, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t baseVertex, uint32_t firstInstance) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderDrawIndexedIndirect(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderDrawIndirect(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer indirectBuffer, uint64_t indirectOffset) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderEnd(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderEndOcclusionQuery(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderExecuteBundles(WGPURenderPassEncoder renderPassEncoder, size_t bundleCount, WGPURenderBundle const * bundles) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderInsertDebugMarker(WGPURenderPassEncoder renderPassEncoder, char const * markerLabel) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderPopDebugGroup(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderPushDebugGroup(WGPURenderPassEncoder renderPassEncoder, char const * groupLabel) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderSetBindGroup(WGPURenderPassEncoder renderPassEncoder, uint32_t groupIndex, WGPU_NULLABLE WGPUBindGroup group, size_t dynamicOffsetCount, uint32_t const * dynamicOffsets) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderSetBlendConstant(WGPURenderPassEncoder renderPassEncoder, WGPUColor const * color) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderSetIndexBuffer(WGPURenderPassEncoder renderPassEncoder, WGPUBuffer buffer, WGPUIndexFormat format, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderSetLabel(WGPURenderPassEncoder renderPassEncoder, char const * label) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderSetPipeline(WGPURenderPassEncoder renderPassEncoder, WGPURenderPipeline pipeline) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderSetScissorRect(WGPURenderPassEncoder renderPassEncoder, uint32_t x, uint32_t y, uint32_t width, uint32_t height) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderSetStencilReference(WGPURenderPassEncoder renderPassEncoder, uint32_t reference) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderSetVertexBuffer(WGPURenderPassEncoder renderPassEncoder, uint32_t slot, WGPU_NULLABLE WGPUBuffer buffer, uint64_t offset, uint64_t size) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderSetViewport(WGPURenderPassEncoder renderPassEncoder, float x, float y, float width, float height, float minDepth, float maxDepth) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderReference(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPassEncoderRelease(WGPURenderPassEncoder renderPassEncoder) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of RenderPipeline -WGPU_EXPORT WGPUBindGroupLayout wgpuRenderPipelineGetBindGroupLayout(WGPURenderPipeline renderPipeline, uint32_t groupIndex) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPipelineSetLabel(WGPURenderPipeline renderPipeline, char const * label) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPipelineReference(WGPURenderPipeline renderPipeline) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuRenderPipelineRelease(WGPURenderPipeline renderPipeline) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of Sampler -WGPU_EXPORT void wgpuSamplerSetLabel(WGPUSampler sampler, char const * label) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuSamplerReference(WGPUSampler sampler) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuSamplerRelease(WGPUSampler sampler) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of ShaderModule -WGPU_EXPORT void wgpuShaderModuleGetCompilationInfo(WGPUShaderModule shaderModule, WGPUCompilationInfoCallback callback, void * userdata) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuShaderModuleSetLabel(WGPUShaderModule shaderModule, char const * label) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuShaderModuleReference(WGPUShaderModule shaderModule) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuShaderModuleRelease(WGPUShaderModule shaderModule) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of Surface -WGPU_EXPORT void wgpuSurfaceConfigure(WGPUSurface surface, WGPUSurfaceConfiguration const * config) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuSurfaceGetCapabilities(WGPUSurface surface, WGPUAdapter adapter, WGPUSurfaceCapabilities * capabilities) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuSurfaceGetCurrentTexture(WGPUSurface surface, WGPUSurfaceTexture * surfaceTexture) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUTextureFormat wgpuSurfaceGetPreferredFormat(WGPUSurface surface, WGPUAdapter adapter) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuSurfacePresent(WGPUSurface surface) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuSurfaceUnconfigure(WGPUSurface surface) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuSurfaceReference(WGPUSurface surface) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuSurfaceRelease(WGPUSurface surface) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of SurfaceCapabilities -WGPU_EXPORT void wgpuSurfaceCapabilitiesFreeMembers(WGPUSurfaceCapabilities capabilities) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of Texture -WGPU_EXPORT WGPUTextureView wgpuTextureCreateView(WGPUTexture texture, WGPU_NULLABLE WGPUTextureViewDescriptor const * descriptor) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuTextureDestroy(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT uint32_t wgpuTextureGetDepthOrArrayLayers(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUTextureDimension wgpuTextureGetDimension(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUTextureFormat wgpuTextureGetFormat(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT uint32_t wgpuTextureGetHeight(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT uint32_t wgpuTextureGetMipLevelCount(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT uint32_t wgpuTextureGetSampleCount(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT WGPUTextureUsageFlags wgpuTextureGetUsage(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT uint32_t wgpuTextureGetWidth(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuTextureSetLabel(WGPUTexture texture, char const * label) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuTextureReference(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuTextureRelease(WGPUTexture texture) WGPU_FUNCTION_ATTRIBUTE; - -// Methods of TextureView -WGPU_EXPORT void wgpuTextureViewSetLabel(WGPUTextureView textureView, char const * label) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuTextureViewReference(WGPUTextureView textureView) WGPU_FUNCTION_ATTRIBUTE; -WGPU_EXPORT void wgpuTextureViewRelease(WGPUTextureView textureView) WGPU_FUNCTION_ATTRIBUTE; - -#endif // !defined(WGPU_SKIP_DECLARATIONS) - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif // WEBGPU_H_ diff --git a/wgpu/resources/webgpu.idl b/wgpu/resources/webgpu.idl deleted file mode 100644 index 448e21f..0000000 --- a/wgpu/resources/webgpu.idl +++ /dev/null @@ -1,1314 +0,0 @@ -// Copyright (C) [2023] World Wide Web Consortium, -// (Massachusetts Institute of Technology, European Research Consortium for -// Informatics and Mathematics, Keio University, Beihang). -// All Rights Reserved. -// -// This work is distributed under the W3C (R) Software License [1] in the hope -// that it will be useful, but WITHOUT ANY WARRANTY; without even the implied -// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// -// [1] http://www.w3.org/Consortium/Legal/copyright-software - -// **** This file is auto-generated. Do not edit. **** - -interface mixin GPUObjectBase { - attribute USVString label; -}; - -dictionary GPUObjectDescriptorBase { - USVString label = ""; -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUSupportedLimits { - readonly attribute unsigned long maxTextureDimension1D; - readonly attribute unsigned long maxTextureDimension2D; - readonly attribute unsigned long maxTextureDimension3D; - readonly attribute unsigned long maxTextureArrayLayers; - readonly attribute unsigned long maxBindGroups; - readonly attribute unsigned long maxBindGroupsPlusVertexBuffers; - readonly attribute unsigned long maxBindingsPerBindGroup; - readonly attribute unsigned long maxDynamicUniformBuffersPerPipelineLayout; - readonly attribute unsigned long maxDynamicStorageBuffersPerPipelineLayout; - readonly attribute unsigned long maxSampledTexturesPerShaderStage; - readonly attribute unsigned long maxSamplersPerShaderStage; - readonly attribute unsigned long maxStorageBuffersPerShaderStage; - readonly attribute unsigned long maxStorageTexturesPerShaderStage; - readonly attribute unsigned long maxUniformBuffersPerShaderStage; - readonly attribute unsigned long long maxUniformBufferBindingSize; - readonly attribute unsigned long long maxStorageBufferBindingSize; - readonly attribute unsigned long minUniformBufferOffsetAlignment; - readonly attribute unsigned long minStorageBufferOffsetAlignment; - readonly attribute unsigned long maxVertexBuffers; - readonly attribute unsigned long long maxBufferSize; - readonly attribute unsigned long maxVertexAttributes; - readonly attribute unsigned long maxVertexBufferArrayStride; - readonly attribute unsigned long maxInterStageShaderComponents; - readonly attribute unsigned long maxInterStageShaderVariables; - readonly attribute unsigned long maxColorAttachments; - readonly attribute unsigned long maxColorAttachmentBytesPerSample; - readonly attribute unsigned long maxComputeWorkgroupStorageSize; - readonly attribute unsigned long maxComputeInvocationsPerWorkgroup; - readonly attribute unsigned long maxComputeWorkgroupSizeX; - readonly attribute unsigned long maxComputeWorkgroupSizeY; - readonly attribute unsigned long maxComputeWorkgroupSizeZ; - readonly attribute unsigned long maxComputeWorkgroupsPerDimension; -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUSupportedFeatures { - readonly setlike; -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface WGSLLanguageFeatures { - readonly setlike; -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUAdapterInfo { - readonly attribute DOMString vendor; - readonly attribute DOMString architecture; - readonly attribute DOMString device; - readonly attribute DOMString description; -}; - -interface mixin NavigatorGPU { - [SameObject, SecureContext] readonly attribute GPU gpu; -}; -Navigator includes NavigatorGPU; -WorkerNavigator includes NavigatorGPU; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPU { - Promise requestAdapter(optional GPURequestAdapterOptions options = {}); - GPUTextureFormat getPreferredCanvasFormat(); - [SameObject] readonly attribute WGSLLanguageFeatures wgslLanguageFeatures; -}; - -dictionary GPURequestAdapterOptions { - GPUPowerPreference powerPreference; - boolean forceFallbackAdapter = false; -}; - -enum GPUPowerPreference { - "low-power", - "high-performance", -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUAdapter { - [SameObject] readonly attribute GPUSupportedFeatures features; - [SameObject] readonly attribute GPUSupportedLimits limits; - readonly attribute boolean isFallbackAdapter; - - Promise requestDevice(optional GPUDeviceDescriptor descriptor = {}); - Promise requestAdapterInfo(); -}; - -dictionary GPUDeviceDescriptor - : GPUObjectDescriptorBase { - sequence requiredFeatures = []; - record requiredLimits = {}; - GPUQueueDescriptor defaultQueue = {}; -}; - -enum GPUFeatureName { - "depth-clip-control", - "depth32float-stencil8", - "texture-compression-bc", - "texture-compression-etc2", - "texture-compression-astc", - "timestamp-query", - "indirect-first-instance", - "shader-f16", - "rg11b10ufloat-renderable", - "bgra8unorm-storage", - "float32-filterable", -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUDevice : EventTarget { - [SameObject] readonly attribute GPUSupportedFeatures features; - [SameObject] readonly attribute GPUSupportedLimits limits; - - [SameObject] readonly attribute GPUQueue queue; - - undefined destroy(); - - GPUBuffer createBuffer(GPUBufferDescriptor descriptor); - GPUTexture createTexture(GPUTextureDescriptor descriptor); - GPUSampler createSampler(optional GPUSamplerDescriptor descriptor = {}); - GPUExternalTexture importExternalTexture(GPUExternalTextureDescriptor descriptor); - - GPUBindGroupLayout createBindGroupLayout(GPUBindGroupLayoutDescriptor descriptor); - GPUPipelineLayout createPipelineLayout(GPUPipelineLayoutDescriptor descriptor); - GPUBindGroup createBindGroup(GPUBindGroupDescriptor descriptor); - - GPUShaderModule createShaderModule(GPUShaderModuleDescriptor descriptor); - GPUComputePipeline createComputePipeline(GPUComputePipelineDescriptor descriptor); - GPURenderPipeline createRenderPipeline(GPURenderPipelineDescriptor descriptor); - Promise createComputePipelineAsync(GPUComputePipelineDescriptor descriptor); - Promise createRenderPipelineAsync(GPURenderPipelineDescriptor descriptor); - - GPUCommandEncoder createCommandEncoder(optional GPUCommandEncoderDescriptor descriptor = {}); - GPURenderBundleEncoder createRenderBundleEncoder(GPURenderBundleEncoderDescriptor descriptor); - - GPUQuerySet createQuerySet(GPUQuerySetDescriptor descriptor); -}; -GPUDevice includes GPUObjectBase; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUBuffer { - readonly attribute GPUSize64Out size; - readonly attribute GPUFlagsConstant usage; - - readonly attribute GPUBufferMapState mapState; - - Promise mapAsync(GPUMapModeFlags mode, optional GPUSize64 offset = 0, optional GPUSize64 size); - ArrayBuffer getMappedRange(optional GPUSize64 offset = 0, optional GPUSize64 size); - undefined unmap(); - - undefined destroy(); -}; -GPUBuffer includes GPUObjectBase; - -enum GPUBufferMapState { - "unmapped", - "pending", - "mapped", -}; - -dictionary GPUBufferDescriptor - : GPUObjectDescriptorBase { - required GPUSize64 size; - required GPUBufferUsageFlags usage; - boolean mappedAtCreation = false; -}; - -typedef [EnforceRange] unsigned long GPUBufferUsageFlags; -[Exposed=(Window, DedicatedWorker), SecureContext] -namespace GPUBufferUsage { - const GPUFlagsConstant MAP_READ = 0x0001; - const GPUFlagsConstant MAP_WRITE = 0x0002; - const GPUFlagsConstant COPY_SRC = 0x0004; - const GPUFlagsConstant COPY_DST = 0x0008; - const GPUFlagsConstant INDEX = 0x0010; - const GPUFlagsConstant VERTEX = 0x0020; - const GPUFlagsConstant UNIFORM = 0x0040; - const GPUFlagsConstant STORAGE = 0x0080; - const GPUFlagsConstant INDIRECT = 0x0100; - const GPUFlagsConstant QUERY_RESOLVE = 0x0200; -}; - -typedef [EnforceRange] unsigned long GPUMapModeFlags; -[Exposed=(Window, DedicatedWorker), SecureContext] -namespace GPUMapMode { - const GPUFlagsConstant READ = 0x0001; - const GPUFlagsConstant WRITE = 0x0002; -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUTexture { - GPUTextureView createView(optional GPUTextureViewDescriptor descriptor = {}); - - undefined destroy(); - - readonly attribute GPUIntegerCoordinateOut width; - readonly attribute GPUIntegerCoordinateOut height; - readonly attribute GPUIntegerCoordinateOut depthOrArrayLayers; - readonly attribute GPUIntegerCoordinateOut mipLevelCount; - readonly attribute GPUSize32Out sampleCount; - readonly attribute GPUTextureDimension dimension; - readonly attribute GPUTextureFormat format; - readonly attribute GPUFlagsConstant usage; -}; -GPUTexture includes GPUObjectBase; - -dictionary GPUTextureDescriptor - : GPUObjectDescriptorBase { - required GPUExtent3D size; - GPUIntegerCoordinate mipLevelCount = 1; - GPUSize32 sampleCount = 1; - GPUTextureDimension dimension = "2d"; - required GPUTextureFormat format; - required GPUTextureUsageFlags usage; - sequence viewFormats = []; -}; - -enum GPUTextureDimension { - "1d", - "2d", - "3d", -}; - -typedef [EnforceRange] unsigned long GPUTextureUsageFlags; -[Exposed=(Window, DedicatedWorker), SecureContext] -namespace GPUTextureUsage { - const GPUFlagsConstant COPY_SRC = 0x01; - const GPUFlagsConstant COPY_DST = 0x02; - const GPUFlagsConstant TEXTURE_BINDING = 0x04; - const GPUFlagsConstant STORAGE_BINDING = 0x08; - const GPUFlagsConstant RENDER_ATTACHMENT = 0x10; -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUTextureView { -}; -GPUTextureView includes GPUObjectBase; - -dictionary GPUTextureViewDescriptor - : GPUObjectDescriptorBase { - GPUTextureFormat format; - GPUTextureViewDimension dimension; - GPUTextureAspect aspect = "all"; - GPUIntegerCoordinate baseMipLevel = 0; - GPUIntegerCoordinate mipLevelCount; - GPUIntegerCoordinate baseArrayLayer = 0; - GPUIntegerCoordinate arrayLayerCount; -}; - -enum GPUTextureViewDimension { - "1d", - "2d", - "2d-array", - "cube", - "cube-array", - "3d", -}; - -enum GPUTextureAspect { - "all", - "stencil-only", - "depth-only", -}; - -enum GPUTextureFormat { - // 8-bit formats - "r8unorm", - "r8snorm", - "r8uint", - "r8sint", - - // 16-bit formats - "r16uint", - "r16sint", - "r16float", - "rg8unorm", - "rg8snorm", - "rg8uint", - "rg8sint", - - // 32-bit formats - "r32uint", - "r32sint", - "r32float", - "rg16uint", - "rg16sint", - "rg16float", - "rgba8unorm", - "rgba8unorm-srgb", - "rgba8snorm", - "rgba8uint", - "rgba8sint", - "bgra8unorm", - "bgra8unorm-srgb", - // Packed 32-bit formats - "rgb9e5ufloat", - "rgb10a2uint", - "rgb10a2unorm", - "rg11b10ufloat", - - // 64-bit formats - "rg32uint", - "rg32sint", - "rg32float", - "rgba16uint", - "rgba16sint", - "rgba16float", - - // 128-bit formats - "rgba32uint", - "rgba32sint", - "rgba32float", - - // Depth/stencil formats - "stencil8", - "depth16unorm", - "depth24plus", - "depth24plus-stencil8", - "depth32float", - - // "depth32float-stencil8" feature - "depth32float-stencil8", - - // BC compressed formats usable if "texture-compression-bc" is both - // supported by the device/user agent and enabled in requestDevice. - "bc1-rgba-unorm", - "bc1-rgba-unorm-srgb", - "bc2-rgba-unorm", - "bc2-rgba-unorm-srgb", - "bc3-rgba-unorm", - "bc3-rgba-unorm-srgb", - "bc4-r-unorm", - "bc4-r-snorm", - "bc5-rg-unorm", - "bc5-rg-snorm", - "bc6h-rgb-ufloat", - "bc6h-rgb-float", - "bc7-rgba-unorm", - "bc7-rgba-unorm-srgb", - - // ETC2 compressed formats usable if "texture-compression-etc2" is both - // supported by the device/user agent and enabled in requestDevice. - "etc2-rgb8unorm", - "etc2-rgb8unorm-srgb", - "etc2-rgb8a1unorm", - "etc2-rgb8a1unorm-srgb", - "etc2-rgba8unorm", - "etc2-rgba8unorm-srgb", - "eac-r11unorm", - "eac-r11snorm", - "eac-rg11unorm", - "eac-rg11snorm", - - // ASTC compressed formats usable if "texture-compression-astc" is both - // supported by the device/user agent and enabled in requestDevice. - "astc-4x4-unorm", - "astc-4x4-unorm-srgb", - "astc-5x4-unorm", - "astc-5x4-unorm-srgb", - "astc-5x5-unorm", - "astc-5x5-unorm-srgb", - "astc-6x5-unorm", - "astc-6x5-unorm-srgb", - "astc-6x6-unorm", - "astc-6x6-unorm-srgb", - "astc-8x5-unorm", - "astc-8x5-unorm-srgb", - "astc-8x6-unorm", - "astc-8x6-unorm-srgb", - "astc-8x8-unorm", - "astc-8x8-unorm-srgb", - "astc-10x5-unorm", - "astc-10x5-unorm-srgb", - "astc-10x6-unorm", - "astc-10x6-unorm-srgb", - "astc-10x8-unorm", - "astc-10x8-unorm-srgb", - "astc-10x10-unorm", - "astc-10x10-unorm-srgb", - "astc-12x10-unorm", - "astc-12x10-unorm-srgb", - "astc-12x12-unorm", - "astc-12x12-unorm-srgb", -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUExternalTexture { -}; -GPUExternalTexture includes GPUObjectBase; - -dictionary GPUExternalTextureDescriptor - : GPUObjectDescriptorBase { - required (HTMLVideoElement or VideoFrame) source; - PredefinedColorSpace colorSpace = "srgb"; -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUSampler { -}; -GPUSampler includes GPUObjectBase; - -dictionary GPUSamplerDescriptor - : GPUObjectDescriptorBase { - GPUAddressMode addressModeU = "clamp-to-edge"; - GPUAddressMode addressModeV = "clamp-to-edge"; - GPUAddressMode addressModeW = "clamp-to-edge"; - GPUFilterMode magFilter = "nearest"; - GPUFilterMode minFilter = "nearest"; - GPUMipmapFilterMode mipmapFilter = "nearest"; - float lodMinClamp = 0; - float lodMaxClamp = 32; - GPUCompareFunction compare; - [Clamp] unsigned short maxAnisotropy = 1; -}; - -enum GPUAddressMode { - "clamp-to-edge", - "repeat", - "mirror-repeat", -}; - -enum GPUFilterMode { - "nearest", - "linear", -}; - -enum GPUMipmapFilterMode { - "nearest", - "linear", -}; - -enum GPUCompareFunction { - "never", - "less", - "equal", - "less-equal", - "greater", - "not-equal", - "greater-equal", - "always", -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUBindGroupLayout { -}; -GPUBindGroupLayout includes GPUObjectBase; - -dictionary GPUBindGroupLayoutDescriptor - : GPUObjectDescriptorBase { - required sequence entries; -}; - -dictionary GPUBindGroupLayoutEntry { - required GPUIndex32 binding; - required GPUShaderStageFlags visibility; - - GPUBufferBindingLayout buffer; - GPUSamplerBindingLayout sampler; - GPUTextureBindingLayout texture; - GPUStorageTextureBindingLayout storageTexture; - GPUExternalTextureBindingLayout externalTexture; -}; - -typedef [EnforceRange] unsigned long GPUShaderStageFlags; -[Exposed=(Window, DedicatedWorker), SecureContext] -namespace GPUShaderStage { - const GPUFlagsConstant VERTEX = 0x1; - const GPUFlagsConstant FRAGMENT = 0x2; - const GPUFlagsConstant COMPUTE = 0x4; -}; - -enum GPUBufferBindingType { - "uniform", - "storage", - "read-only-storage", -}; - -dictionary GPUBufferBindingLayout { - GPUBufferBindingType type = "uniform"; - boolean hasDynamicOffset = false; - GPUSize64 minBindingSize = 0; -}; - -enum GPUSamplerBindingType { - "filtering", - "non-filtering", - "comparison", -}; - -dictionary GPUSamplerBindingLayout { - GPUSamplerBindingType type = "filtering"; -}; - -enum GPUTextureSampleType { - "float", - "unfilterable-float", - "depth", - "sint", - "uint", -}; - -dictionary GPUTextureBindingLayout { - GPUTextureSampleType sampleType = "float"; - GPUTextureViewDimension viewDimension = "2d"; - boolean multisampled = false; -}; - -enum GPUStorageTextureAccess { - "write-only", - "read-only", - "read-write", -}; - -dictionary GPUStorageTextureBindingLayout { - GPUStorageTextureAccess access = "write-only"; - required GPUTextureFormat format; - GPUTextureViewDimension viewDimension = "2d"; -}; - -dictionary GPUExternalTextureBindingLayout { -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUBindGroup { -}; -GPUBindGroup includes GPUObjectBase; - -dictionary GPUBindGroupDescriptor - : GPUObjectDescriptorBase { - required GPUBindGroupLayout layout; - required sequence entries; -}; - -typedef (GPUSampler or GPUTextureView or GPUBufferBinding or GPUExternalTexture) GPUBindingResource; - -dictionary GPUBindGroupEntry { - required GPUIndex32 binding; - required GPUBindingResource resource; -}; - -dictionary GPUBufferBinding { - required GPUBuffer buffer; - GPUSize64 offset = 0; - GPUSize64 size; -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUPipelineLayout { -}; -GPUPipelineLayout includes GPUObjectBase; - -dictionary GPUPipelineLayoutDescriptor - : GPUObjectDescriptorBase { - required sequence bindGroupLayouts; -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUShaderModule { - Promise getCompilationInfo(); -}; -GPUShaderModule includes GPUObjectBase; - -dictionary GPUShaderModuleDescriptor - : GPUObjectDescriptorBase { - required USVString code; - object sourceMap; - sequence compilationHints = []; -}; - -dictionary GPUShaderModuleCompilationHint { - required USVString entryPoint; - (GPUPipelineLayout or GPUAutoLayoutMode) layout; -}; - -enum GPUCompilationMessageType { - "error", - "warning", - "info", -}; - -[Exposed=(Window, DedicatedWorker), Serializable, SecureContext] -interface GPUCompilationMessage { - readonly attribute DOMString message; - readonly attribute GPUCompilationMessageType type; - readonly attribute unsigned long long lineNum; - readonly attribute unsigned long long linePos; - readonly attribute unsigned long long offset; - readonly attribute unsigned long long length; -}; - -[Exposed=(Window, DedicatedWorker), Serializable, SecureContext] -interface GPUCompilationInfo { - readonly attribute FrozenArray messages; -}; - -[Exposed=(Window, DedicatedWorker), SecureContext, Serializable] -interface GPUPipelineError : DOMException { - constructor(optional DOMString message = "", GPUPipelineErrorInit options); - readonly attribute GPUPipelineErrorReason reason; -}; - -dictionary GPUPipelineErrorInit { - required GPUPipelineErrorReason reason; -}; - -enum GPUPipelineErrorReason { - "validation", - "internal", -}; - -enum GPUAutoLayoutMode { - "auto", -}; - -dictionary GPUPipelineDescriptorBase - : GPUObjectDescriptorBase { - required (GPUPipelineLayout or GPUAutoLayoutMode) layout; -}; - -interface mixin GPUPipelineBase { - [NewObject] GPUBindGroupLayout getBindGroupLayout(unsigned long index); -}; - -dictionary GPUProgrammableStage { - required GPUShaderModule module; - required USVString entryPoint; - record constants; -}; - -typedef double GPUPipelineConstantValue; // May represent WGSL’s bool, f32, i32, u32, and f16 if enabled. - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUComputePipeline { -}; -GPUComputePipeline includes GPUObjectBase; -GPUComputePipeline includes GPUPipelineBase; - -dictionary GPUComputePipelineDescriptor - : GPUPipelineDescriptorBase { - required GPUProgrammableStage compute; -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPURenderPipeline { -}; -GPURenderPipeline includes GPUObjectBase; -GPURenderPipeline includes GPUPipelineBase; - -dictionary GPURenderPipelineDescriptor - : GPUPipelineDescriptorBase { - required GPUVertexState vertex; - GPUPrimitiveState primitive = {}; - GPUDepthStencilState depthStencil; - GPUMultisampleState multisample = {}; - GPUFragmentState fragment; -}; - -dictionary GPUPrimitiveState { - GPUPrimitiveTopology topology = "triangle-list"; - GPUIndexFormat stripIndexFormat; - GPUFrontFace frontFace = "ccw"; - GPUCullMode cullMode = "none"; - - // Requires "depth-clip-control" feature. - boolean unclippedDepth = false; -}; - -enum GPUPrimitiveTopology { - "point-list", - "line-list", - "line-strip", - "triangle-list", - "triangle-strip", -}; - -enum GPUFrontFace { - "ccw", - "cw", -}; - -enum GPUCullMode { - "none", - "front", - "back", -}; - -dictionary GPUMultisampleState { - GPUSize32 count = 1; - GPUSampleMask mask = 0xFFFFFFFF; - boolean alphaToCoverageEnabled = false; -}; - -dictionary GPUFragmentState - : GPUProgrammableStage { - required sequence targets; -}; - -dictionary GPUColorTargetState { - required GPUTextureFormat format; - - GPUBlendState blend; - GPUColorWriteFlags writeMask = 0xF; // GPUColorWrite.ALL -}; - -dictionary GPUBlendState { - required GPUBlendComponent color; - required GPUBlendComponent alpha; -}; - -typedef [EnforceRange] unsigned long GPUColorWriteFlags; -[Exposed=(Window, DedicatedWorker), SecureContext] -namespace GPUColorWrite { - const GPUFlagsConstant RED = 0x1; - const GPUFlagsConstant GREEN = 0x2; - const GPUFlagsConstant BLUE = 0x4; - const GPUFlagsConstant ALPHA = 0x8; - const GPUFlagsConstant ALL = 0xF; -}; - -dictionary GPUBlendComponent { - GPUBlendOperation operation = "add"; - GPUBlendFactor srcFactor = "one"; - GPUBlendFactor dstFactor = "zero"; -}; - -enum GPUBlendFactor { - "zero", - "one", - "src", - "one-minus-src", - "src-alpha", - "one-minus-src-alpha", - "dst", - "one-minus-dst", - "dst-alpha", - "one-minus-dst-alpha", - "src-alpha-saturated", - "constant", - "one-minus-constant", -}; - -enum GPUBlendOperation { - "add", - "subtract", - "reverse-subtract", - "min", - "max", -}; - -dictionary GPUDepthStencilState { - required GPUTextureFormat format; - - boolean depthWriteEnabled; - GPUCompareFunction depthCompare; - - GPUStencilFaceState stencilFront = {}; - GPUStencilFaceState stencilBack = {}; - - GPUStencilValue stencilReadMask = 0xFFFFFFFF; - GPUStencilValue stencilWriteMask = 0xFFFFFFFF; - - GPUDepthBias depthBias = 0; - float depthBiasSlopeScale = 0; - float depthBiasClamp = 0; -}; - -dictionary GPUStencilFaceState { - GPUCompareFunction compare = "always"; - GPUStencilOperation failOp = "keep"; - GPUStencilOperation depthFailOp = "keep"; - GPUStencilOperation passOp = "keep"; -}; - -enum GPUStencilOperation { - "keep", - "zero", - "replace", - "invert", - "increment-clamp", - "decrement-clamp", - "increment-wrap", - "decrement-wrap", -}; - -enum GPUIndexFormat { - "uint16", - "uint32", -}; - -enum GPUVertexFormat { - "uint8x2", - "uint8x4", - "sint8x2", - "sint8x4", - "unorm8x2", - "unorm8x4", - "snorm8x2", - "snorm8x4", - "uint16x2", - "uint16x4", - "sint16x2", - "sint16x4", - "unorm16x2", - "unorm16x4", - "snorm16x2", - "snorm16x4", - "float16x2", - "float16x4", - "float32", - "float32x2", - "float32x3", - "float32x4", - "uint32", - "uint32x2", - "uint32x3", - "uint32x4", - "sint32", - "sint32x2", - "sint32x3", - "sint32x4", - "unorm10-10-10-2", -}; - -enum GPUVertexStepMode { - "vertex", - "instance", -}; - -dictionary GPUVertexState - : GPUProgrammableStage { - sequence buffers = []; -}; - -dictionary GPUVertexBufferLayout { - required GPUSize64 arrayStride; - GPUVertexStepMode stepMode = "vertex"; - required sequence attributes; -}; - -dictionary GPUVertexAttribute { - required GPUVertexFormat format; - required GPUSize64 offset; - - required GPUIndex32 shaderLocation; -}; - -dictionary GPUImageDataLayout { - GPUSize64 offset = 0; - GPUSize32 bytesPerRow; - GPUSize32 rowsPerImage; -}; - -dictionary GPUImageCopyBuffer - : GPUImageDataLayout { - required GPUBuffer buffer; -}; - -dictionary GPUImageCopyTexture { - required GPUTexture texture; - GPUIntegerCoordinate mipLevel = 0; - GPUOrigin3D origin = {}; - GPUTextureAspect aspect = "all"; -}; - -dictionary GPUImageCopyTextureTagged - : GPUImageCopyTexture { - PredefinedColorSpace colorSpace = "srgb"; - boolean premultipliedAlpha = false; -}; - -typedef (ImageBitmap or - ImageData or - HTMLImageElement or - HTMLVideoElement or - VideoFrame or - HTMLCanvasElement or - OffscreenCanvas) GPUImageCopyExternalImageSource; - -dictionary GPUImageCopyExternalImage { - required GPUImageCopyExternalImageSource source; - GPUOrigin2D origin = {}; - boolean flipY = false; -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUCommandBuffer { -}; -GPUCommandBuffer includes GPUObjectBase; - -dictionary GPUCommandBufferDescriptor - : GPUObjectDescriptorBase { -}; - -interface mixin GPUCommandsMixin { -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUCommandEncoder { - GPURenderPassEncoder beginRenderPass(GPURenderPassDescriptor descriptor); - GPUComputePassEncoder beginComputePass(optional GPUComputePassDescriptor descriptor = {}); - - undefined copyBufferToBuffer( - GPUBuffer source, - GPUSize64 sourceOffset, - GPUBuffer destination, - GPUSize64 destinationOffset, - GPUSize64 size); - - undefined copyBufferToTexture( - GPUImageCopyBuffer source, - GPUImageCopyTexture destination, - GPUExtent3D copySize); - - undefined copyTextureToBuffer( - GPUImageCopyTexture source, - GPUImageCopyBuffer destination, - GPUExtent3D copySize); - - undefined copyTextureToTexture( - GPUImageCopyTexture source, - GPUImageCopyTexture destination, - GPUExtent3D copySize); - - undefined clearBuffer( - GPUBuffer buffer, - optional GPUSize64 offset = 0, - optional GPUSize64 size); - - undefined resolveQuerySet( - GPUQuerySet querySet, - GPUSize32 firstQuery, - GPUSize32 queryCount, - GPUBuffer destination, - GPUSize64 destinationOffset); - - GPUCommandBuffer finish(optional GPUCommandBufferDescriptor descriptor = {}); -}; -GPUCommandEncoder includes GPUObjectBase; -GPUCommandEncoder includes GPUCommandsMixin; -GPUCommandEncoder includes GPUDebugCommandsMixin; - -dictionary GPUCommandEncoderDescriptor - : GPUObjectDescriptorBase { -}; - -interface mixin GPUBindingCommandsMixin { - undefined setBindGroup(GPUIndex32 index, GPUBindGroup? bindGroup, - optional sequence dynamicOffsets = []); - - undefined setBindGroup(GPUIndex32 index, GPUBindGroup? bindGroup, - Uint32Array dynamicOffsetsData, - GPUSize64 dynamicOffsetsDataStart, - GPUSize32 dynamicOffsetsDataLength); -}; - -interface mixin GPUDebugCommandsMixin { - undefined pushDebugGroup(USVString groupLabel); - undefined popDebugGroup(); - undefined insertDebugMarker(USVString markerLabel); -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUComputePassEncoder { - undefined setPipeline(GPUComputePipeline pipeline); - undefined dispatchWorkgroups(GPUSize32 workgroupCountX, optional GPUSize32 workgroupCountY = 1, optional GPUSize32 workgroupCountZ = 1); - undefined dispatchWorkgroupsIndirect(GPUBuffer indirectBuffer, GPUSize64 indirectOffset); - - undefined end(); -}; -GPUComputePassEncoder includes GPUObjectBase; -GPUComputePassEncoder includes GPUCommandsMixin; -GPUComputePassEncoder includes GPUDebugCommandsMixin; -GPUComputePassEncoder includes GPUBindingCommandsMixin; - -dictionary GPUComputePassTimestampWrites { - required GPUQuerySet querySet; - GPUSize32 beginningOfPassWriteIndex; - GPUSize32 endOfPassWriteIndex; -}; - -dictionary GPUComputePassDescriptor - : GPUObjectDescriptorBase { - GPUComputePassTimestampWrites timestampWrites; -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPURenderPassEncoder { - undefined setViewport(float x, float y, - float width, float height, - float minDepth, float maxDepth); - - undefined setScissorRect(GPUIntegerCoordinate x, GPUIntegerCoordinate y, - GPUIntegerCoordinate width, GPUIntegerCoordinate height); - - undefined setBlendConstant(GPUColor color); - undefined setStencilReference(GPUStencilValue reference); - - undefined beginOcclusionQuery(GPUSize32 queryIndex); - undefined endOcclusionQuery(); - - undefined executeBundles(sequence bundles); - undefined end(); -}; -GPURenderPassEncoder includes GPUObjectBase; -GPURenderPassEncoder includes GPUCommandsMixin; -GPURenderPassEncoder includes GPUDebugCommandsMixin; -GPURenderPassEncoder includes GPUBindingCommandsMixin; -GPURenderPassEncoder includes GPURenderCommandsMixin; - -dictionary GPURenderPassTimestampWrites { - required GPUQuerySet querySet; - GPUSize32 beginningOfPassWriteIndex; - GPUSize32 endOfPassWriteIndex; -}; - -dictionary GPURenderPassDescriptor - : GPUObjectDescriptorBase { - required sequence colorAttachments; - GPURenderPassDepthStencilAttachment depthStencilAttachment; - GPUQuerySet occlusionQuerySet; - GPURenderPassTimestampWrites timestampWrites; - GPUSize64 maxDrawCount = 50000000; -}; - -dictionary GPURenderPassColorAttachment { - required GPUTextureView view; - GPUIntegerCoordinate depthSlice; - GPUTextureView resolveTarget; - - GPUColor clearValue; - required GPULoadOp loadOp; - required GPUStoreOp storeOp; -}; - -dictionary GPURenderPassDepthStencilAttachment { - required GPUTextureView view; - - float depthClearValue; - GPULoadOp depthLoadOp; - GPUStoreOp depthStoreOp; - boolean depthReadOnly = false; - - GPUStencilValue stencilClearValue = 0; - GPULoadOp stencilLoadOp; - GPUStoreOp stencilStoreOp; - boolean stencilReadOnly = false; -}; - -enum GPULoadOp { - "load", - "clear", -}; - -enum GPUStoreOp { - "store", - "discard", -}; - -dictionary GPURenderPassLayout - : GPUObjectDescriptorBase { - required sequence colorFormats; - GPUTextureFormat depthStencilFormat; - GPUSize32 sampleCount = 1; -}; - -interface mixin GPURenderCommandsMixin { - undefined setPipeline(GPURenderPipeline pipeline); - - undefined setIndexBuffer(GPUBuffer buffer, GPUIndexFormat indexFormat, optional GPUSize64 offset = 0, optional GPUSize64 size); - undefined setVertexBuffer(GPUIndex32 slot, GPUBuffer? buffer, optional GPUSize64 offset = 0, optional GPUSize64 size); - - undefined draw(GPUSize32 vertexCount, optional GPUSize32 instanceCount = 1, - optional GPUSize32 firstVertex = 0, optional GPUSize32 firstInstance = 0); - undefined drawIndexed(GPUSize32 indexCount, optional GPUSize32 instanceCount = 1, - optional GPUSize32 firstIndex = 0, - optional GPUSignedOffset32 baseVertex = 0, - optional GPUSize32 firstInstance = 0); - - undefined drawIndirect(GPUBuffer indirectBuffer, GPUSize64 indirectOffset); - undefined drawIndexedIndirect(GPUBuffer indirectBuffer, GPUSize64 indirectOffset); -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPURenderBundle { -}; -GPURenderBundle includes GPUObjectBase; - -dictionary GPURenderBundleDescriptor - : GPUObjectDescriptorBase { -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPURenderBundleEncoder { - GPURenderBundle finish(optional GPURenderBundleDescriptor descriptor = {}); -}; -GPURenderBundleEncoder includes GPUObjectBase; -GPURenderBundleEncoder includes GPUCommandsMixin; -GPURenderBundleEncoder includes GPUDebugCommandsMixin; -GPURenderBundleEncoder includes GPUBindingCommandsMixin; -GPURenderBundleEncoder includes GPURenderCommandsMixin; - -dictionary GPURenderBundleEncoderDescriptor - : GPURenderPassLayout { - boolean depthReadOnly = false; - boolean stencilReadOnly = false; -}; - -dictionary GPUQueueDescriptor - : GPUObjectDescriptorBase { -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUQueue { - undefined submit(sequence commandBuffers); - - Promise onSubmittedWorkDone(); - - undefined writeBuffer( - GPUBuffer buffer, - GPUSize64 bufferOffset, - AllowSharedBufferSource data, - optional GPUSize64 dataOffset = 0, - optional GPUSize64 size); - - undefined writeTexture( - GPUImageCopyTexture destination, - AllowSharedBufferSource data, - GPUImageDataLayout dataLayout, - GPUExtent3D size); - - undefined copyExternalImageToTexture( - GPUImageCopyExternalImage source, - GPUImageCopyTextureTagged destination, - GPUExtent3D copySize); -}; -GPUQueue includes GPUObjectBase; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUQuerySet { - undefined destroy(); - - readonly attribute GPUQueryType type; - readonly attribute GPUSize32Out count; -}; -GPUQuerySet includes GPUObjectBase; - -dictionary GPUQuerySetDescriptor - : GPUObjectDescriptorBase { - required GPUQueryType type; - required GPUSize32 count; -}; - -enum GPUQueryType { - "occlusion", - "timestamp", -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUCanvasContext { - readonly attribute (HTMLCanvasElement or OffscreenCanvas) canvas; - - undefined configure(GPUCanvasConfiguration configuration); - undefined unconfigure(); - - GPUTexture getCurrentTexture(); -}; - -enum GPUCanvasAlphaMode { - "opaque", - "premultiplied", -}; - -dictionary GPUCanvasConfiguration { - required GPUDevice device; - required GPUTextureFormat format; - GPUTextureUsageFlags usage = 0x10; // GPUTextureUsage.RENDER_ATTACHMENT - sequence viewFormats = []; - PredefinedColorSpace colorSpace = "srgb"; - GPUCanvasAlphaMode alphaMode = "opaque"; -}; - -enum GPUDeviceLostReason { - "unknown", - "destroyed", -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUDeviceLostInfo { - readonly attribute GPUDeviceLostReason reason; - readonly attribute DOMString message; -}; - -partial interface GPUDevice { - readonly attribute Promise lost; -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUError { - readonly attribute DOMString message; -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUValidationError - : GPUError { - constructor(DOMString message); -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUOutOfMemoryError - : GPUError { - constructor(DOMString message); -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUInternalError - : GPUError { - constructor(DOMString message); -}; - -enum GPUErrorFilter { - "validation", - "out-of-memory", - "internal", -}; - -partial interface GPUDevice { - undefined pushErrorScope(GPUErrorFilter filter); - Promise popErrorScope(); -}; - -[Exposed=(Window, DedicatedWorker), SecureContext] -interface GPUUncapturedErrorEvent : Event { - constructor( - DOMString type, - GPUUncapturedErrorEventInit gpuUncapturedErrorEventInitDict - ); - [SameObject] readonly attribute GPUError error; -}; - -dictionary GPUUncapturedErrorEventInit : EventInit { - required GPUError error; -}; - -partial interface GPUDevice { - [Exposed=(Window, DedicatedWorker)] - attribute EventHandler onuncapturederror; -}; - -typedef [EnforceRange] unsigned long GPUBufferDynamicOffset; -typedef [EnforceRange] unsigned long GPUStencilValue; -typedef [EnforceRange] unsigned long GPUSampleMask; -typedef [EnforceRange] long GPUDepthBias; - -typedef [EnforceRange] unsigned long long GPUSize64; -typedef [EnforceRange] unsigned long GPUIntegerCoordinate; -typedef [EnforceRange] unsigned long GPUIndex32; -typedef [EnforceRange] unsigned long GPUSize32; -typedef [EnforceRange] long GPUSignedOffset32; - -typedef unsigned long long GPUSize64Out; -typedef unsigned long GPUIntegerCoordinateOut; -typedef unsigned long GPUSize32Out; - -typedef unsigned long GPUFlagsConstant; - -dictionary GPUColorDict { - required double r; - required double g; - required double b; - required double a; -}; -typedef (sequence or GPUColorDict) GPUColor; - -dictionary GPUOrigin2DDict { - GPUIntegerCoordinate x = 0; - GPUIntegerCoordinate y = 0; -}; -typedef (sequence or GPUOrigin2DDict) GPUOrigin2D; - -dictionary GPUOrigin3DDict { - GPUIntegerCoordinate x = 0; - GPUIntegerCoordinate y = 0; - GPUIntegerCoordinate z = 0; -}; -typedef (sequence or GPUOrigin3DDict) GPUOrigin3D; - -dictionary GPUExtent3DDict { - required GPUIntegerCoordinate width; - GPUIntegerCoordinate height = 1; - GPUIntegerCoordinate depthOrArrayLayers = 1; -}; -typedef (sequence or GPUExtent3DDict) GPUExtent3D; - diff --git a/wgpu/resources/wgpu.h b/wgpu/resources/wgpu.h deleted file mode 100644 index 76bdb47..0000000 --- a/wgpu/resources/wgpu.h +++ /dev/null @@ -1,256 +0,0 @@ -#ifndef WGPU_H_ -#define WGPU_H_ - -#include "webgpu.h" - -typedef enum WGPUNativeSType { - // Start at 0003 since that's allocated range for wgpu-native - WGPUSType_DeviceExtras = 0x00030001, - WGPUSType_RequiredLimitsExtras = 0x00030002, - WGPUSType_PipelineLayoutExtras = 0x00030003, - WGPUSType_ShaderModuleGLSLDescriptor = 0x00030004, - WGPUSType_SupportedLimitsExtras = 0x00030005, - WGPUSType_InstanceExtras = 0x00030006, - WGPUSType_BindGroupEntryExtras = 0x00030007, - WGPUSType_BindGroupLayoutEntryExtras = 0x00030008, - WGPUSType_QuerySetDescriptorExtras = 0x00030009, - WGPUNativeSType_Force32 = 0x7FFFFFFF -} WGPUNativeSType; - -typedef enum WGPUNativeFeature { - WGPUNativeFeature_PushConstants = 0x00030001, - WGPUNativeFeature_TextureAdapterSpecificFormatFeatures = 0x00030002, - WGPUNativeFeature_MultiDrawIndirect = 0x00030003, - WGPUNativeFeature_MultiDrawIndirectCount = 0x00030004, - WGPUNativeFeature_VertexWritableStorage = 0x00030005, - WGPUNativeFeature_TextureBindingArray = 0x00030006, - WGPUNativeFeature_SampledTextureAndStorageBufferArrayNonUniformIndexing = 0x00030007, - WGPUNativeFeature_PipelineStatisticsQuery = 0x00030008, - WGPUNativeFeature_Force32 = 0x7FFFFFFF -} WGPUNativeFeature; - -typedef enum WGPULogLevel { - WGPULogLevel_Off = 0x00000000, - WGPULogLevel_Error = 0x00000001, - WGPULogLevel_Warn = 0x00000002, - WGPULogLevel_Info = 0x00000003, - WGPULogLevel_Debug = 0x00000004, - WGPULogLevel_Trace = 0x00000005, - WGPULogLevel_Force32 = 0x7FFFFFFF -} WGPULogLevel; - -typedef enum WGPUInstanceBackend { - WGPUInstanceBackend_All = 0x00000000, - WGPUInstanceBackend_Vulkan = 1 << 0, - WGPUInstanceBackend_GL = 1 << 1, - WGPUInstanceBackend_Metal = 1 << 2, - WGPUInstanceBackend_DX12 = 1 << 3, - WGPUInstanceBackend_DX11 = 1 << 4, - WGPUInstanceBackend_BrowserWebGPU = 1 << 5, - WGPUInstanceBackend_Primary = WGPUInstanceBackend_Vulkan | WGPUInstanceBackend_Metal | - WGPUInstanceBackend_DX12 | - WGPUInstanceBackend_BrowserWebGPU, - WGPUInstanceBackend_Secondary = WGPUInstanceBackend_GL | WGPUInstanceBackend_DX11, - WGPUInstanceBackend_Force32 = 0x7FFFFFFF -} WGPUInstanceBackend; -typedef WGPUFlags WGPUInstanceBackendFlags; - -typedef enum WGPUInstanceFlag { - WGPUInstanceFlag_Default = 0x00000000, - WGPUInstanceFlag_Debug = 1 << 0, - WGPUInstanceFlag_Validation = 1 << 1, - WGPUInstanceFlag_DiscardHalLabels = 1 << 2, - WGPUInstanceFlag_Force32 = 0x7FFFFFFF -} WGPUInstanceFlag; -typedef WGPUFlags WGPUInstanceFlags; - -typedef enum WGPUDx12Compiler { - WGPUDx12Compiler_Undefined = 0x00000000, - WGPUDx12Compiler_Fxc = 0x00000001, - WGPUDx12Compiler_Dxc = 0x00000002, - WGPUDx12Compiler_Force32 = 0x7FFFFFFF -} WGPUDx12Compiler; - -typedef enum WGPUGles3MinorVersion { - WGPUGles3MinorVersion_Automatic = 0x00000000, - WGPUGles3MinorVersion_Version0 = 0x00000001, - WGPUGles3MinorVersion_Version1 = 0x00000002, - WGPUGles3MinorVersion_Version2 = 0x00000003, - WGPUGles3MinorVersion_Force32 = 0x7FFFFFFF -} WGPUGles3MinorVersion; - -typedef enum WGPUPipelineStatisticName { - WGPUPipelineStatisticName_VertexShaderInvocations = 0x00000000, - WGPUPipelineStatisticName_ClipperInvocations = 0x00000001, - WGPUPipelineStatisticName_ClipperPrimitivesOut = 0x00000002, - WGPUPipelineStatisticName_FragmentShaderInvocations = 0x00000003, - WGPUPipelineStatisticName_ComputeShaderInvocations = 0x00000004, - WGPUPipelineStatisticName_Force32 = 0x7FFFFFFF -} WGPUPipelineStatisticName WGPU_ENUM_ATTRIBUTE; - -typedef enum WGPUNativeQueryType { - WGPUNativeQueryType_PipelineStatistics = 0x00030000, - WGPUNativeQueryType_Force32 = 0x7FFFFFFF -} WGPUNativeQueryType WGPU_ENUM_ATTRIBUTE; - -typedef struct WGPUInstanceExtras { - WGPUChainedStruct chain; - WGPUInstanceBackendFlags backends; - WGPUInstanceFlags flags; - WGPUDx12Compiler dx12ShaderCompiler; - WGPUGles3MinorVersion gles3MinorVersion; - const char * dxilPath; - const char * dxcPath; -} WGPUInstanceExtras; - -typedef struct WGPUDeviceExtras { - WGPUChainedStruct chain; - const char * tracePath; -} WGPUDeviceExtras; - -typedef struct WGPUNativeLimits { - uint32_t maxPushConstantSize; - uint32_t maxNonSamplerBindings; -} WGPUNativeLimits; - -typedef struct WGPURequiredLimitsExtras { - WGPUChainedStruct chain; - WGPUNativeLimits limits; -} WGPURequiredLimitsExtras; - -typedef struct WGPUSupportedLimitsExtras { - WGPUChainedStructOut chain; - WGPUNativeLimits limits; -} WGPUSupportedLimitsExtras; - -typedef struct WGPUPushConstantRange { - WGPUShaderStageFlags stages; - uint32_t start; - uint32_t end; -} WGPUPushConstantRange; - -typedef struct WGPUPipelineLayoutExtras { - WGPUChainedStruct chain; - uint32_t pushConstantRangeCount; - WGPUPushConstantRange* pushConstantRanges; -} WGPUPipelineLayoutExtras; - -typedef uint64_t WGPUSubmissionIndex; - -typedef struct WGPUWrappedSubmissionIndex { - WGPUQueue queue; - WGPUSubmissionIndex submissionIndex; -} WGPUWrappedSubmissionIndex; - -typedef struct WGPUShaderDefine { - char const * name; - char const * value; -} WGPUShaderDefine; - -typedef struct WGPUShaderModuleGLSLDescriptor { - WGPUChainedStruct chain; - WGPUShaderStage stage; - char const * code; - uint32_t defineCount; - WGPUShaderDefine * defines; -} WGPUShaderModuleGLSLDescriptor; - -typedef struct WGPUStorageReport { - size_t numOccupied; - size_t numVacant; - size_t numError; - size_t elementSize; -} WGPUStorageReport; - -typedef struct WGPUHubReport { - WGPUStorageReport adapters; - WGPUStorageReport devices; - WGPUStorageReport pipelineLayouts; - WGPUStorageReport shaderModules; - WGPUStorageReport bindGroupLayouts; - WGPUStorageReport bindGroups; - WGPUStorageReport commandBuffers; - WGPUStorageReport renderBundles; - WGPUStorageReport renderPipelines; - WGPUStorageReport computePipelines; - WGPUStorageReport querySets; - WGPUStorageReport buffers; - WGPUStorageReport textures; - WGPUStorageReport textureViews; - WGPUStorageReport samplers; -} WGPUHubReport; - -typedef struct WGPUGlobalReport { - WGPUStorageReport surfaces; - WGPUBackendType backendType; - WGPUHubReport vulkan; - WGPUHubReport metal; - WGPUHubReport dx12; - WGPUHubReport dx11; - WGPUHubReport gl; -} WGPUGlobalReport; - -typedef struct WGPUInstanceEnumerateAdapterOptions { - WGPUChainedStruct const * nextInChain; - WGPUInstanceBackendFlags backends; -} WGPUInstanceEnumerateAdapterOptions; - -typedef struct WGPUBindGroupEntryExtras { - WGPUChainedStruct chain; - WGPUBuffer const * buffers; - size_t bufferCount; - WGPUSampler const * samplers; - size_t samplerCount; - WGPUTextureView const * textureViews; - size_t textureViewCount; -} WGPUBindGroupEntryExtras; - -typedef struct WGPUBindGroupLayoutEntryExtras { - WGPUChainedStruct chain; - uint32_t count; -} WGPUBindGroupLayoutEntryExtras; - -typedef struct WGPUQuerySetDescriptorExtras { - WGPUChainedStruct chain; - WGPUPipelineStatisticName const * pipelineStatistics; - size_t pipelineStatisticCount; -} WGPUQuerySetDescriptorExtras WGPU_STRUCTURE_ATTRIBUTE; - -typedef void (*WGPULogCallback)(WGPULogLevel level, char const * message, void * userdata); - -#ifdef __cplusplus -extern "C" { -#endif - -void wgpuGenerateReport(WGPUInstance instance, WGPUGlobalReport * report); -size_t wgpuInstanceEnumerateAdapters(WGPUInstance instance, WGPUInstanceEnumerateAdapterOptions const * options, WGPUAdapter * adapters); - -WGPUSubmissionIndex wgpuQueueSubmitForIndex(WGPUQueue queue, size_t commandCount, WGPUCommandBuffer const * commands); - -// Returns true if the queue is empty, or false if there are more queue submissions still in flight. -WGPUBool wgpuDevicePoll(WGPUDevice device, WGPUBool wait, WGPUWrappedSubmissionIndex const * wrappedSubmissionIndex); - -void wgpuSetLogCallback(WGPULogCallback callback, void * userdata); - -void wgpuSetLogLevel(WGPULogLevel level); - -uint32_t wgpuGetVersion(void); - -void wgpuRenderPassEncoderSetPushConstants(WGPURenderPassEncoder encoder, WGPUShaderStageFlags stages, uint32_t offset, uint32_t sizeBytes, void* const data); - -void wgpuRenderPassEncoderMultiDrawIndirect(WGPURenderPassEncoder encoder, WGPUBuffer buffer, uint64_t offset, uint32_t count); -void wgpuRenderPassEncoderMultiDrawIndexedIndirect(WGPURenderPassEncoder encoder, WGPUBuffer buffer, uint64_t offset, uint32_t count); - -void wgpuRenderPassEncoderMultiDrawIndirectCount(WGPURenderPassEncoder encoder, WGPUBuffer buffer, uint64_t offset, WGPUBuffer count_buffer, uint64_t count_buffer_offset, uint32_t max_count); -void wgpuRenderPassEncoderMultiDrawIndexedIndirectCount(WGPURenderPassEncoder encoder, WGPUBuffer buffer, uint64_t offset, WGPUBuffer count_buffer, uint64_t count_buffer_offset, uint32_t max_count); - -void wgpuComputePassEncoderBeginPipelineStatisticsQuery(WGPUComputePassEncoder computePassEncoder, WGPUQuerySet querySet, uint32_t queryIndex); -void wgpuComputePassEncoderEndPipelineStatisticsQuery(WGPUComputePassEncoder computePassEncoder); -void wgpuRenderPassEncoderBeginPipelineStatisticsQuery(WGPURenderPassEncoder renderPassEncoder, WGPUQuerySet querySet, uint32_t queryIndex); -void wgpuRenderPassEncoderEndPipelineStatisticsQuery(WGPURenderPassEncoder renderPassEncoder); - -#ifdef __cplusplus -} // extern "C" -#endif - -#endif diff --git a/wgpu/structs.py b/wgpu/structs.py deleted file mode 100644 index c225aca..0000000 --- a/wgpu/structs.py +++ /dev/null @@ -1,748 +0,0 @@ -""" -These structs are defined in ``wgpu.structs``. - -The sructs in wgpu-py are represented as Python dictionaries. -Fields that have default values (as indicated below) may be omitted. -""" - -_use_sphinx_repr = False - - -class Struct: - def __init__(self, name, **kwargs): - self._name = name - for key, val in kwargs.items(): - setattr(self, key, val) - - def __iter__(self): - return iter([key for key in dir(self) if not key.startswith("_")]) - - def __repr__(self): - if _use_sphinx_repr: # no-cover - return "" - options = ", ".join(f"'{x}'" for x in self) - return f"" - - -# CODE BELOW THIS POINT IS AUTOGENERATED - DO NOT EDIT - - -# There are 59 structs - -__all__ = [ - "RequestAdapterOptions", - "DeviceDescriptor", - "BufferDescriptor", - "TextureDescriptor", - "TextureViewDescriptor", - "ExternalTextureDescriptor", - "SamplerDescriptor", - "BindGroupLayoutDescriptor", - "BindGroupLayoutEntry", - "BufferBindingLayout", - "SamplerBindingLayout", - "TextureBindingLayout", - "StorageTextureBindingLayout", - "ExternalTextureBindingLayout", - "BindGroupDescriptor", - "BindGroupEntry", - "BufferBinding", - "PipelineLayoutDescriptor", - "ShaderModuleDescriptor", - "ShaderModuleCompilationHint", - "PipelineErrorInit", - "ProgrammableStage", - "ComputePipelineDescriptor", - "RenderPipelineDescriptor", - "PrimitiveState", - "MultisampleState", - "FragmentState", - "ColorTargetState", - "BlendState", - "BlendComponent", - "DepthStencilState", - "StencilFaceState", - "VertexState", - "VertexBufferLayout", - "VertexAttribute", - "ImageDataLayout", - "ImageCopyBuffer", - "ImageCopyTexture", - "ImageCopyExternalImage", - "CommandBufferDescriptor", - "CommandEncoderDescriptor", - "ComputePassTimestampWrites", - "ComputePassDescriptor", - "RenderPassTimestampWrites", - "RenderPassDescriptor", - "RenderPassColorAttachment", - "RenderPassDepthStencilAttachment", - "RenderPassLayout", - "RenderBundleDescriptor", - "RenderBundleEncoderDescriptor", - "QueueDescriptor", - "QuerySetDescriptor", - "CanvasConfiguration", - "UncapturedErrorEventInit", - "Color", - "Origin2D", - "Origin3D", - "Extent3D", -] - - -#: * powerPreference :: :obj:`enums.PowerPreference ` = None -#: * forceFallbackAdapter :: bool = false -RequestAdapterOptions = Struct( - "RequestAdapterOptions", - power_preference="enums.PowerPreference", - force_fallback_adapter="bool", -) - -#: * label :: str = "" -#: * requiredFeatures :: List[:obj:`enums.FeatureName `] = [] -#: * requiredLimits :: Dict[str, int] = {} -#: * defaultQueue :: :obj:`structs.QueueDescriptor ` = {} -DeviceDescriptor = Struct( - "DeviceDescriptor", - label="str", - required_features="List[enums.FeatureName]", - required_limits="Dict[str, int]", - default_queue="structs.QueueDescriptor", -) - -#: * label :: str = "" -#: * size :: int -#: * usage :: :obj:`flags.BufferUsage ` -#: * mappedAtCreation :: bool = false -BufferDescriptor = Struct( - "BufferDescriptor", - label="str", - size="int", - usage="flags.BufferUsage", - mapped_at_creation="bool", -) - -#: * label :: str = "" -#: * size :: Union[List[int], :obj:`structs.Extent3D `] -#: * mipLevelCount :: int = 1 -#: * sampleCount :: int = 1 -#: * dimension :: :obj:`enums.TextureDimension ` = "2d" -#: * format :: :obj:`enums.TextureFormat ` -#: * usage :: :obj:`flags.TextureUsage ` -#: * viewFormats :: List[:obj:`enums.TextureFormat `] = [] -TextureDescriptor = Struct( - "TextureDescriptor", - label="str", - size="Union[List[int], structs.Extent3D]", - mip_level_count="int", - sample_count="int", - dimension="enums.TextureDimension", - format="enums.TextureFormat", - usage="flags.TextureUsage", - view_formats="List[enums.TextureFormat]", -) - -#: * label :: str = "" -#: * format :: :obj:`enums.TextureFormat ` = None -#: * dimension :: :obj:`enums.TextureViewDimension ` = None -#: * aspect :: :obj:`enums.TextureAspect ` = "all" -#: * baseMipLevel :: int = 0 -#: * mipLevelCount :: int = None -#: * baseArrayLayer :: int = 0 -#: * arrayLayerCount :: int = None -TextureViewDescriptor = Struct( - "TextureViewDescriptor", - label="str", - format="enums.TextureFormat", - dimension="enums.TextureViewDimension", - aspect="enums.TextureAspect", - base_mip_level="int", - mip_level_count="int", - base_array_layer="int", - array_layer_count="int", -) - -#: * label :: str = "" -#: * source :: Union[memoryview, object] -#: * colorSpace :: str = "srgb" -ExternalTextureDescriptor = Struct( - "ExternalTextureDescriptor", - label="str", - source="Union[memoryview, object]", - color_space="str", -) - -#: * label :: str = "" -#: * addressModeU :: :obj:`enums.AddressMode ` = "clamp-to-edge" -#: * addressModeV :: :obj:`enums.AddressMode ` = "clamp-to-edge" -#: * addressModeW :: :obj:`enums.AddressMode ` = "clamp-to-edge" -#: * magFilter :: :obj:`enums.FilterMode ` = "nearest" -#: * minFilter :: :obj:`enums.FilterMode ` = "nearest" -#: * mipmapFilter :: :obj:`enums.MipmapFilterMode ` = "nearest" -#: * lodMinClamp :: float = 0 -#: * lodMaxClamp :: float = 32 -#: * compare :: :obj:`enums.CompareFunction ` = None -#: * maxAnisotropy :: int = 1 -SamplerDescriptor = Struct( - "SamplerDescriptor", - label="str", - address_mode_u="enums.AddressMode", - address_mode_v="enums.AddressMode", - address_mode_w="enums.AddressMode", - mag_filter="enums.FilterMode", - min_filter="enums.FilterMode", - mipmap_filter="enums.MipmapFilterMode", - lod_min_clamp="float", - lod_max_clamp="float", - compare="enums.CompareFunction", - max_anisotropy="int", -) - -#: * label :: str = "" -#: * entries :: List[:obj:`structs.BindGroupLayoutEntry `] -BindGroupLayoutDescriptor = Struct( - "BindGroupLayoutDescriptor", - label="str", - entries="List[structs.BindGroupLayoutEntry]", -) - -#: * binding :: int -#: * visibility :: :obj:`flags.ShaderStage ` -#: * buffer :: :obj:`structs.BufferBindingLayout ` = None -#: * sampler :: :obj:`structs.SamplerBindingLayout ` = None -#: * texture :: :obj:`structs.TextureBindingLayout ` = None -#: * storageTexture :: :obj:`structs.StorageTextureBindingLayout ` = None -#: * externalTexture :: :obj:`structs.ExternalTextureBindingLayout ` = None -BindGroupLayoutEntry = Struct( - "BindGroupLayoutEntry", - binding="int", - visibility="flags.ShaderStage", - buffer="structs.BufferBindingLayout", - sampler="structs.SamplerBindingLayout", - texture="structs.TextureBindingLayout", - storage_texture="structs.StorageTextureBindingLayout", - external_texture="structs.ExternalTextureBindingLayout", -) - -#: * type :: :obj:`enums.BufferBindingType ` = "uniform" -#: * hasDynamicOffset :: bool = false -#: * minBindingSize :: int = 0 -BufferBindingLayout = Struct( - "BufferBindingLayout", - type="enums.BufferBindingType", - has_dynamic_offset="bool", - min_binding_size="int", -) - -#: * type :: :obj:`enums.SamplerBindingType ` = "filtering" -SamplerBindingLayout = Struct( - "SamplerBindingLayout", - type="enums.SamplerBindingType", -) - -#: * sampleType :: :obj:`enums.TextureSampleType ` = "float" -#: * viewDimension :: :obj:`enums.TextureViewDimension ` = "2d" -#: * multisampled :: bool = false -TextureBindingLayout = Struct( - "TextureBindingLayout", - sample_type="enums.TextureSampleType", - view_dimension="enums.TextureViewDimension", - multisampled="bool", -) - -#: * access :: :obj:`enums.StorageTextureAccess ` = "write-only" -#: * format :: :obj:`enums.TextureFormat ` -#: * viewDimension :: :obj:`enums.TextureViewDimension ` = "2d" -StorageTextureBindingLayout = Struct( - "StorageTextureBindingLayout", - access="enums.StorageTextureAccess", - format="enums.TextureFormat", - view_dimension="enums.TextureViewDimension", -) - -ExternalTextureBindingLayout = Struct( - "ExternalTextureBindingLayout", -) - -#: * label :: str = "" -#: * layout :: :class:`GPUBindGroupLayout ` -#: * entries :: List[:obj:`structs.BindGroupEntry `] -BindGroupDescriptor = Struct( - "BindGroupDescriptor", - label="str", - layout="GPUBindGroupLayout", - entries="List[structs.BindGroupEntry]", -) - -#: * binding :: int -#: * resource :: Union[:class:`GPUSampler `, :class:`GPUTextureView `, object, :obj:`structs.BufferBinding `] -BindGroupEntry = Struct( - "BindGroupEntry", - binding="int", - resource="Union[GPUSampler, GPUTextureView, object, structs.BufferBinding]", -) - -#: * buffer :: :class:`GPUBuffer ` -#: * offset :: int = 0 -#: * size :: int = None -BufferBinding = Struct( - "BufferBinding", - buffer="GPUBuffer", - offset="int", - size="int", -) - -#: * label :: str = "" -#: * bindGroupLayouts :: List[:class:`GPUBindGroupLayout `] -PipelineLayoutDescriptor = Struct( - "PipelineLayoutDescriptor", - label="str", - bind_group_layouts="List[GPUBindGroupLayout]", -) - -#: * label :: str = "" -#: * code :: str -#: * sourceMap :: dict = None -#: * compilationHints :: List[:obj:`structs.ShaderModuleCompilationHint `] = [] -ShaderModuleDescriptor = Struct( - "ShaderModuleDescriptor", - label="str", - code="str", - source_map="dict", - compilation_hints="List[structs.ShaderModuleCompilationHint]", -) - -#: * entryPoint :: str -#: * layout :: Union[:class:`GPUPipelineLayout `, :obj:`enums.AutoLayoutMode `] = None -ShaderModuleCompilationHint = Struct( - "ShaderModuleCompilationHint", - entry_point="str", - layout="Union[GPUPipelineLayout, enums.AutoLayoutMode]", -) - -#: * reason :: :obj:`enums.PipelineErrorReason ` -PipelineErrorInit = Struct( - "PipelineErrorInit", - reason="enums.PipelineErrorReason", -) - -#: * module :: :class:`GPUShaderModule ` -#: * entryPoint :: str -#: * constants :: Dict[str, float] = None -ProgrammableStage = Struct( - "ProgrammableStage", - module="GPUShaderModule", - entry_point="str", - constants="Dict[str, float]", -) - -#: * label :: str = "" -#: * layout :: Union[:class:`GPUPipelineLayout `, :obj:`enums.AutoLayoutMode `] -#: * compute :: :obj:`structs.ProgrammableStage ` -ComputePipelineDescriptor = Struct( - "ComputePipelineDescriptor", - label="str", - layout="Union[GPUPipelineLayout, enums.AutoLayoutMode]", - compute="structs.ProgrammableStage", -) - -#: * label :: str = "" -#: * layout :: Union[:class:`GPUPipelineLayout `, :obj:`enums.AutoLayoutMode `] -#: * vertex :: :obj:`structs.VertexState ` -#: * primitive :: :obj:`structs.PrimitiveState ` = {} -#: * depthStencil :: :obj:`structs.DepthStencilState ` = None -#: * multisample :: :obj:`structs.MultisampleState ` = {} -#: * fragment :: :obj:`structs.FragmentState ` = None -RenderPipelineDescriptor = Struct( - "RenderPipelineDescriptor", - label="str", - layout="Union[GPUPipelineLayout, enums.AutoLayoutMode]", - vertex="structs.VertexState", - primitive="structs.PrimitiveState", - depth_stencil="structs.DepthStencilState", - multisample="structs.MultisampleState", - fragment="structs.FragmentState", -) - -#: * topology :: :obj:`enums.PrimitiveTopology ` = "triangle-list" -#: * stripIndexFormat :: :obj:`enums.IndexFormat ` = None -#: * frontFace :: :obj:`enums.FrontFace ` = "ccw" -#: * cullMode :: :obj:`enums.CullMode ` = "none" -#: * unclippedDepth :: bool = false -PrimitiveState = Struct( - "PrimitiveState", - topology="enums.PrimitiveTopology", - strip_index_format="enums.IndexFormat", - front_face="enums.FrontFace", - cull_mode="enums.CullMode", - unclipped_depth="bool", -) - -#: * count :: int = 1 -#: * mask :: int = 0xFFFFFFFF -#: * alphaToCoverageEnabled :: bool = false -MultisampleState = Struct( - "MultisampleState", - count="int", - mask="int", - alpha_to_coverage_enabled="bool", -) - -#: * module :: :class:`GPUShaderModule ` -#: * entryPoint :: str -#: * constants :: Dict[str, float] = None -#: * targets :: List[:obj:`structs.ColorTargetState `] -FragmentState = Struct( - "FragmentState", - module="GPUShaderModule", - entry_point="str", - constants="Dict[str, float]", - targets="List[structs.ColorTargetState]", -) - -#: * format :: :obj:`enums.TextureFormat ` -#: * blend :: :obj:`structs.BlendState ` = None -#: * writeMask :: :obj:`flags.ColorWrite ` = 0xF -ColorTargetState = Struct( - "ColorTargetState", - format="enums.TextureFormat", - blend="structs.BlendState", - write_mask="flags.ColorWrite", -) - -#: * color :: :obj:`structs.BlendComponent ` -#: * alpha :: :obj:`structs.BlendComponent ` -BlendState = Struct( - "BlendState", - color="structs.BlendComponent", - alpha="structs.BlendComponent", -) - -#: * operation :: :obj:`enums.BlendOperation ` = "add" -#: * srcFactor :: :obj:`enums.BlendFactor ` = "one" -#: * dstFactor :: :obj:`enums.BlendFactor ` = "zero" -BlendComponent = Struct( - "BlendComponent", - operation="enums.BlendOperation", - src_factor="enums.BlendFactor", - dst_factor="enums.BlendFactor", -) - -#: * format :: :obj:`enums.TextureFormat ` -#: * depthWriteEnabled :: bool = None -#: * depthCompare :: :obj:`enums.CompareFunction ` = None -#: * stencilFront :: :obj:`structs.StencilFaceState ` = {} -#: * stencilBack :: :obj:`structs.StencilFaceState ` = {} -#: * stencilReadMask :: int = 0xFFFFFFFF -#: * stencilWriteMask :: int = 0xFFFFFFFF -#: * depthBias :: int = 0 -#: * depthBiasSlopeScale :: float = 0 -#: * depthBiasClamp :: float = 0 -DepthStencilState = Struct( - "DepthStencilState", - format="enums.TextureFormat", - depth_write_enabled="bool", - depth_compare="enums.CompareFunction", - stencil_front="structs.StencilFaceState", - stencil_back="structs.StencilFaceState", - stencil_read_mask="int", - stencil_write_mask="int", - depth_bias="int", - depth_bias_slope_scale="float", - depth_bias_clamp="float", -) - -#: * compare :: :obj:`enums.CompareFunction ` = "always" -#: * failOp :: :obj:`enums.StencilOperation ` = "keep" -#: * depthFailOp :: :obj:`enums.StencilOperation ` = "keep" -#: * passOp :: :obj:`enums.StencilOperation ` = "keep" -StencilFaceState = Struct( - "StencilFaceState", - compare="enums.CompareFunction", - fail_op="enums.StencilOperation", - depth_fail_op="enums.StencilOperation", - pass_op="enums.StencilOperation", -) - -#: * module :: :class:`GPUShaderModule ` -#: * entryPoint :: str -#: * constants :: Dict[str, float] = None -#: * buffers :: List[:obj:`structs.VertexBufferLayout `] = [] -VertexState = Struct( - "VertexState", - module="GPUShaderModule", - entry_point="str", - constants="Dict[str, float]", - buffers="List[structs.VertexBufferLayout]", -) - -#: * arrayStride :: int -#: * stepMode :: :obj:`enums.VertexStepMode ` = "vertex" -#: * attributes :: List[:obj:`structs.VertexAttribute `] -VertexBufferLayout = Struct( - "VertexBufferLayout", - array_stride="int", - step_mode="enums.VertexStepMode", - attributes="List[structs.VertexAttribute]", -) - -#: * format :: :obj:`enums.VertexFormat ` -#: * offset :: int -#: * shaderLocation :: int -VertexAttribute = Struct( - "VertexAttribute", - format="enums.VertexFormat", - offset="int", - shader_location="int", -) - -#: * offset :: int = 0 -#: * bytesPerRow :: int = None -#: * rowsPerImage :: int = None -ImageDataLayout = Struct( - "ImageDataLayout", - offset="int", - bytes_per_row="int", - rows_per_image="int", -) - -#: * offset :: int = 0 -#: * bytesPerRow :: int = None -#: * rowsPerImage :: int = None -#: * buffer :: :class:`GPUBuffer ` -ImageCopyBuffer = Struct( - "ImageCopyBuffer", - offset="int", - bytes_per_row="int", - rows_per_image="int", - buffer="GPUBuffer", -) - -#: * texture :: :class:`GPUTexture ` -#: * mipLevel :: int = 0 -#: * origin :: Union[List[int], :obj:`structs.Origin3D `] = {} -#: * aspect :: :obj:`enums.TextureAspect ` = "all" -ImageCopyTexture = Struct( - "ImageCopyTexture", - texture="GPUTexture", - mip_level="int", - origin="Union[List[int], structs.Origin3D]", - aspect="enums.TextureAspect", -) - -#: * source :: Union[memoryview, object] -#: * origin :: Union[List[int], :obj:`structs.Origin2D `] = {} -#: * flipY :: bool = false -ImageCopyExternalImage = Struct( - "ImageCopyExternalImage", - source="Union[memoryview, object]", - origin="Union[List[int], structs.Origin2D]", - flip_y="bool", -) - -#: * label :: str = "" -CommandBufferDescriptor = Struct( - "CommandBufferDescriptor", - label="str", -) - -#: * label :: str = "" -CommandEncoderDescriptor = Struct( - "CommandEncoderDescriptor", - label="str", -) - -#: * querySet :: :class:`GPUQuerySet ` -#: * beginningOfPassWriteIndex :: int = None -#: * endOfPassWriteIndex :: int = None -ComputePassTimestampWrites = Struct( - "ComputePassTimestampWrites", - query_set="GPUQuerySet", - beginning_of_pass_write_index="int", - end_of_pass_write_index="int", -) - -#: * label :: str = "" -#: * timestampWrites :: :obj:`structs.ComputePassTimestampWrites ` = None -ComputePassDescriptor = Struct( - "ComputePassDescriptor", - label="str", - timestamp_writes="structs.ComputePassTimestampWrites", -) - -#: * querySet :: :class:`GPUQuerySet ` -#: * beginningOfPassWriteIndex :: int = None -#: * endOfPassWriteIndex :: int = None -RenderPassTimestampWrites = Struct( - "RenderPassTimestampWrites", - query_set="GPUQuerySet", - beginning_of_pass_write_index="int", - end_of_pass_write_index="int", -) - -#: * label :: str = "" -#: * colorAttachments :: List[:obj:`structs.RenderPassColorAttachment `] -#: * depthStencilAttachment :: :obj:`structs.RenderPassDepthStencilAttachment ` = None -#: * occlusionQuerySet :: :class:`GPUQuerySet ` = None -#: * timestampWrites :: :obj:`structs.RenderPassTimestampWrites ` = None -#: * maxDrawCount :: int = 50000000 -RenderPassDescriptor = Struct( - "RenderPassDescriptor", - label="str", - color_attachments="List[structs.RenderPassColorAttachment]", - depth_stencil_attachment="structs.RenderPassDepthStencilAttachment", - occlusion_query_set="GPUQuerySet", - timestamp_writes="structs.RenderPassTimestampWrites", - max_draw_count="int", -) - -#: * view :: :class:`GPUTextureView ` -#: * depthSlice :: int = None -#: * resolveTarget :: :class:`GPUTextureView ` = None -#: * clearValue :: Union[List[float], :obj:`structs.Color `] = None -#: * loadOp :: :obj:`enums.LoadOp ` -#: * storeOp :: :obj:`enums.StoreOp ` -RenderPassColorAttachment = Struct( - "RenderPassColorAttachment", - view="GPUTextureView", - depth_slice="int", - resolve_target="GPUTextureView", - clear_value="Union[List[float], structs.Color]", - load_op="enums.LoadOp", - store_op="enums.StoreOp", -) - -#: * view :: :class:`GPUTextureView ` -#: * depthClearValue :: float = None -#: * depthLoadOp :: :obj:`enums.LoadOp ` = None -#: * depthStoreOp :: :obj:`enums.StoreOp ` = None -#: * depthReadOnly :: bool = false -#: * stencilClearValue :: int = 0 -#: * stencilLoadOp :: :obj:`enums.LoadOp ` = None -#: * stencilStoreOp :: :obj:`enums.StoreOp ` = None -#: * stencilReadOnly :: bool = false -RenderPassDepthStencilAttachment = Struct( - "RenderPassDepthStencilAttachment", - view="GPUTextureView", - depth_clear_value="float", - depth_load_op="enums.LoadOp", - depth_store_op="enums.StoreOp", - depth_read_only="bool", - stencil_clear_value="int", - stencil_load_op="enums.LoadOp", - stencil_store_op="enums.StoreOp", - stencil_read_only="bool", -) - -#: * label :: str = "" -#: * colorFormats :: List[:obj:`enums.TextureFormat `] -#: * depthStencilFormat :: :obj:`enums.TextureFormat ` = None -#: * sampleCount :: int = 1 -RenderPassLayout = Struct( - "RenderPassLayout", - label="str", - color_formats="List[enums.TextureFormat]", - depth_stencil_format="enums.TextureFormat", - sample_count="int", -) - -#: * label :: str = "" -RenderBundleDescriptor = Struct( - "RenderBundleDescriptor", - label="str", -) - -#: * label :: str = "" -#: * colorFormats :: List[:obj:`enums.TextureFormat `] -#: * depthStencilFormat :: :obj:`enums.TextureFormat ` = None -#: * sampleCount :: int = 1 -#: * depthReadOnly :: bool = false -#: * stencilReadOnly :: bool = false -RenderBundleEncoderDescriptor = Struct( - "RenderBundleEncoderDescriptor", - label="str", - color_formats="List[enums.TextureFormat]", - depth_stencil_format="enums.TextureFormat", - sample_count="int", - depth_read_only="bool", - stencil_read_only="bool", -) - -#: * label :: str = "" -QueueDescriptor = Struct( - "QueueDescriptor", - label="str", -) - -#: * label :: str = "" -#: * type :: :obj:`enums.QueryType ` -#: * count :: int -QuerySetDescriptor = Struct( - "QuerySetDescriptor", - label="str", - type="enums.QueryType", - count="int", -) - -#: * device :: :class:`GPUDevice ` -#: * format :: :obj:`enums.TextureFormat ` -#: * usage :: :obj:`flags.TextureUsage ` = 0x10 -#: * viewFormats :: List[:obj:`enums.TextureFormat `] = [] -#: * colorSpace :: str = "srgb" -#: * alphaMode :: :obj:`enums.CanvasAlphaMode ` = "opaque" -CanvasConfiguration = Struct( - "CanvasConfiguration", - device="GPUDevice", - format="enums.TextureFormat", - usage="flags.TextureUsage", - view_formats="List[enums.TextureFormat]", - color_space="str", - alpha_mode="enums.CanvasAlphaMode", -) - -#: * error :: :class:`GPUError ` -UncapturedErrorEventInit = Struct( - "UncapturedErrorEventInit", - error="GPUError", -) - -#: * r :: float -#: * g :: float -#: * b :: float -#: * a :: float -Color = Struct( - "Color", - r="float", - g="float", - b="float", - a="float", -) - -#: * x :: int = 0 -#: * y :: int = 0 -Origin2D = Struct( - "Origin2D", - x="int", - y="int", -) - -#: * x :: int = 0 -#: * y :: int = 0 -#: * z :: int = 0 -Origin3D = Struct( - "Origin3D", - x="int", - y="int", - z="int", -) - -#: * width :: int -#: * height :: int = 1 -#: * depthOrArrayLayers :: int = 1 -Extent3D = Struct( - "Extent3D", - width="int", - height="int", - depth_or_array_layers="int", -) From 5857d19f757e575884917cd44065c69ae8ae435a Mon Sep 17 00:00:00 2001 From: Jan Date: Wed, 27 Dec 2023 23:00:32 +0100 Subject: [PATCH 05/20] Remove docs --- docs/Makefile | 20 -- docs/_static/style.css | 0 docs/_templates/wgpu_class_layout.rst | 7 - docs/backends.rst | 83 --------- docs/conf.py | 166 ----------------- docs/gui.rst | 172 ----------------- docs/guide.rst | 254 -------------------------- docs/index.rst | 24 --- docs/make.bat | 35 ---- docs/start.rst | 103 ----------- docs/utils.rst | 71 ------- docs/wgpu.rst | 224 ----------------------- docs/wgpu_enums.rst | 7 - docs/wgpu_flags.rst | 7 - docs/wgpu_structs.rst | 7 - 15 files changed, 1180 deletions(-) delete mode 100644 docs/Makefile delete mode 100644 docs/_static/style.css delete mode 100644 docs/_templates/wgpu_class_layout.rst delete mode 100644 docs/backends.rst delete mode 100644 docs/conf.py delete mode 100644 docs/gui.rst delete mode 100644 docs/guide.rst delete mode 100644 docs/index.rst delete mode 100644 docs/make.bat delete mode 100644 docs/start.rst delete mode 100644 docs/utils.rst delete mode 100644 docs/wgpu.rst delete mode 100644 docs/wgpu_enums.rst delete mode 100644 docs/wgpu_flags.rst delete mode 100644 docs/wgpu_structs.rst diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index d4bb2cb..0000000 --- a/docs/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/_static/style.css b/docs/_static/style.css deleted file mode 100644 index e69de29..0000000 diff --git a/docs/_templates/wgpu_class_layout.rst b/docs/_templates/wgpu_class_layout.rst deleted file mode 100644 index 24f62af..0000000 --- a/docs/_templates/wgpu_class_layout.rst +++ /dev/null @@ -1,7 +0,0 @@ -{{ objname | escape | underline}} - -.. currentmodule:: {{ module }} - -.. autoclass:: {{ objname }} - :members: - :show-inheritance: diff --git a/docs/backends.rst b/docs/backends.rst deleted file mode 100644 index 7f91098..0000000 --- a/docs/backends.rst +++ /dev/null @@ -1,83 +0,0 @@ -The wgpu backends -================= - -What do backends do? --------------------- - -The heavy lifting (i.e communication with the hardware) in wgpu is performed by -one of its backends. - -Backends can be selected explicitly by importing them: - -.. code-block:: py - - import wgpu.backends.wgpu_natve - -There is also an `auto` backend to help keep code portable: - -.. code-block:: py - - import wgpu.backends.auto - -In most cases, however, you don't need any of the above imports, because -a backend is automatically selected in the first call to :func:`wgpu.GPU.request_adapter`. - -Each backend can also provide additional (backend-specific) -functionality. To keep the main API clean and portable, this extra -functionality is provided as a functional API that has to be imported -from the specific backend. - - -The wgpu_native backend ------------------------ - -.. code-block:: py - - import wgpu.backends.wgpu_natve - - -This backend wraps `wgpu-native `__, -which is a C-api for `wgpu `__, a Rust library -that wraps Vulkan, Metal, DirectX12 and more. -This is the main backend for wgpu-core. The only working backend, right now, to be precise. -It also works out of the box, because the wgpu-native DLL is shipped with wgpu-py. - -The wgpu_native backend provides a few extra functionalities: - - -.. py:function:: wgpu.backends.wgpu_native.enumerate_adapters() - - Return a list of all available adapters on this system. - - :return: Adapters - :rtype: list - - -.. py:function:: wgpu.backends.wgpu_native.request_device_tracing(adapter, trace_path, *, label="", required_features, required_limits, default_queue) - - An alternative to :func:`wgpu.GPUAdapter.request_adapter`, that streams a trace - of all low level calls to disk, so the visualization can be replayed (also on other systems), - investigated, and debugged. - - :param adapter: The adapter to create a device for. - :param trace_path: The path to an (empty) directory. Is created if it does not exist. - :param label: A human readable label. Optional. - :param required_features: The features (extensions) that you need. Default []. - :param required_limits: the various limits that you need. Default {}. - :param default_queue: Descriptor for the default queue. Optional. - :return: Device - :rtype: wgpu.GPUDevice - - -The js_webgpu backend ---------------------- - -.. code-block:: py - - import wgpu.backends.js_webgpu - - -This backend calls into the JavaScript WebGPU API. For this, the Python code would need -access to JavaScript - this backend is intended for use-cases like `PScript `__ `PyScript `__, and `RustPyhon `__. - -This backend is still a stub, see `issue #407 `__ for details. diff --git a/docs/conf.py b/docs/conf.py deleted file mode 100644 index 1e40388..0000000 --- a/docs/conf.py +++ /dev/null @@ -1,166 +0,0 @@ -# Configuration file for the Sphinx documentation builder. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. - -import re -import os -import sys -import shutil - - -ROOT_DIR = os.path.abspath(os.path.join(__file__, "..", "..")) -sys.path.insert(0, ROOT_DIR) - -os.environ["WGPU_FORCE_OFFSCREEN"] = "true" - -import wgpu # noqa: E402 - - -# -- Tests ------------------------------------------------------------------- - -# Ensure that all classes are referenced in the alphabetic list, -# and referenced at least one other time as part of the explanatory text. -with open(os.path.join(ROOT_DIR, "docs", "wgpu.rst"), "rb") as f: - wgpu_text = f.read().decode() - wgpu_lines = [line.strip() for line in wgpu_text.splitlines()] -for cls_name in wgpu.classes.__all__: - assert ( - f"~{cls_name}" in wgpu_lines - ), f"Class '{cls_name}' not listed in class list in wgpu.rst" - assert ( - f":class:`{cls_name}`" in wgpu_text - ), f"Class '{cls_name}' not referenced in the text in wgpu.rst" - - -# -- Hacks to tweak docstrings ----------------------------------------------- - -# Make flags and enums appear better in docs -wgpu.enums._use_sphinx_repr = True -wgpu.flags._use_sphinx_repr = True -wgpu.structs._use_sphinx_repr = True - -# Build regular expressions to resolve crossrefs -func_ref_pattern = re.compile(r"\ (`\w+?\(\)`)", re.MULTILINE) -ob_ref_pattern = re.compile( - r"\ (`(GPU|gui\.Wgpu|flags\.|enums\.|structs\.)\w+?`)", re.MULTILINE -) -argtype_ref_pattern = re.compile( - r"\(((GPU|gui\.Wgpu|flags\.|enums\.|structs\.)\w+?)\)", re.MULTILINE -) - - -def resolve_crossrefs(text): - text = (text or "").lstrip() - - # Turn references to functions into a crossref. - # E.g. `Foo.bar()` - i2 = 0 - while True: - m = func_ref_pattern.search(text, i2) - if not m: - break - i1, i2 = m.start(1), m.end(1) - ref_indicator = ":func:" - text = text[:i1] + ref_indicator + text[i1:] - - # Turn references to objects (classes, flags, enums, and structs) into a crossref. - # E.g. `GPUDevice` or `flags.BufferUsage` - i2 = 0 - while True: - m = ob_ref_pattern.search(text, i2) - if not m: - break - i1, i2 = m.start(1), m.end(1) - prefix = m.group(2) # e.g. GPU or flags. - ref_indicator = ":obj:" if prefix.lower() == prefix else ":class:" - text = text[:i1] + ref_indicator + text[i1:] - - # Turn function arg types into a crossref. - # E.g. (GPUDevice) or (flags.BufferUsage) - i2 = 0 - while True: - m = argtype_ref_pattern.search(text) - if not m: - break - i1, i2 = m.start(1), m.end(1) - ref_indicator = ":obj:" - text = text[:i1] + ref_indicator + "`" + text[i1:i2] + "`" + text[i2:] - - return text - - -# Tweak docstrings of classes and their methods -for module, hide_class_signature in [(wgpu.classes, True), (wgpu.gui, False)]: - for cls_name in module.__all__: - cls = getattr(module, cls_name) - # Class docstring - docs = resolve_crossrefs(cls.__doc__) - if hide_class_signature: - docs = cls.__name__ + "()\n\n " + docs - cls.__doc__ = docs or None - # Docstring of methods - for method in cls.__dict__.values(): - if callable(method) and hasattr(method, "__code__"): - docs = resolve_crossrefs(method.__doc__) - if ( - method.__code__.co_argcount == 1 - and method.__code__.co_kwonlyargcount > 0 - ): - sig = method.__name__ + "(**parameters)" - docs = sig + "\n\n " + docs - method.__doc__ = docs or None - - -# -- Project information ----------------------------------------------------- - -project = "wgpu-py" -copyright = "2020-2023, Almar Klein, Korijn van Golen" -author = "Almar Klein, Korijn van Golen" -release = wgpu.__version__ - - -# -- General configuration --------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.napoleon", - "sphinx.ext.autosummary", -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Just let autosummary produce a new version each time -shutil.rmtree(os.path.join(os.path.dirname(__file__), "generated"), True) - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] - -master_doc = "index" - - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. - -if not (os.getenv("READTHEDOCS") or os.getenv("CI")): - html_theme = "sphinx_rtd_theme" - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] diff --git a/docs/gui.rst b/docs/gui.rst deleted file mode 100644 index c052205..0000000 --- a/docs/gui.rst +++ /dev/null @@ -1,172 +0,0 @@ -gui API -======= - -.. currentmodule:: wgpu.gui - -You can use vanilla wgpu for compute tasks and to render offscreen. To -render to a window on screen we need a *canvas*. Since the Python -ecosystem provides many different GUI toolkits, wgpu implements a base -canvas class, and has builtin support for a few GUI toolkits. At the -moment these include GLFW, Jupyter, Qt, and wx. - - -The Canvas base classes ------------------------ - -.. autosummary:: - :nosignatures: - :toctree: generated - :template: wgpu_class_layout.rst - - ~WgpuCanvasInterface - ~WgpuCanvasBase - ~WgpuAutoGui - ~WgpuOffscreenCanvasBase - - -For each supported GUI toolkit there is a module that implements a ``WgpuCanvas`` class, -which inherits from :class:`WgpuCanvasBase`, providing a common API. -The GLFW, Qt, and Jupyter backends also inherit from :class:`WgpuAutoGui` to include -support for events (interactivity). In the next sections we demonstrates the different -canvas classes that you can use. - - -The auto GUI backend --------------------- - -The default approach for examples and small applications is to use -the automatically selected GUI backend. At the moment this selects -either the GLFW, Qt, or Jupyter backend, depending on the environment. - -To implement interaction, the ``canvas`` has a :func:`WgpuAutoGui.handle_event()` method -that can be overloaded. Alternatively you can use it's :func:`WgpuAutoGui.add_event_handler()` -method. See the `event spec `_ -for details about the event objects. - - -Also see the `triangle auto example `_ -and `cube example `_. - -.. code-block:: py - - from wgpu.gui.auto import WgpuCanvas, run, call_later - - canvas = WgpuCanvas(title="Example") - canvas.request_draw(your_draw_function) - - run() - - -Support for GLFW ----------------- - -`GLFW `_ is a lightweight windowing toolkit. -Install it with ``pip install glfw``. The preferred approach is to use the auto backend, -but you can replace ``from wgpu.gui.auto`` with ``from wgpu.gui.glfw`` to force using GLFW. - -.. code-block:: py - - from wgpu.gui.glfw import WgpuCanvas, run, call_later - - canvas = WgpuCanvas(title="Example") - canvas.request_draw(your_draw_function) - - run() - - -Support for Qt --------------- - -There is support for PyQt5, PyQt6, PySide2 and PySide6. The wgpu library detects what -library you are using by looking what module has been imported. -For a toplevel widget, the ``gui.qt.WgpuCanvas`` class can be imported. If you want to -embed the canvas as a subwidget, use ``gui.qt.WgpuWidget`` instead. - -Also see the `Qt triangle example `_ -and `Qt triangle embed example `_. - -.. code-block:: py - - # Import any of the Qt libraries before importing the WgpuCanvas. - # This way wgpu knows which Qt library to use. - from PySide6 import QtWidgets - from wgpu.gui.qt import WgpuCanvas - - app = QtWidgets.QApplication([]) - - # Instantiate the canvas - canvas = WgpuCanvas(title="Example") - - # Tell the canvas what drawing function to call - canvas.request_draw(your_draw_function) - - app.exec_() - - -Support for wx --------------- - -There is support for embedding a wgpu visualization in wxPython. -For a toplevel widget, the ``gui.wx.WgpuCanvas`` class can be imported. If you want to -embed the canvas as a subwidget, use ``gui.wx.WgpuWidget`` instead. - -Also see the `wx triangle example `_ -and `wx triangle embed example `_. - -.. code-block:: py - - import wx - from wgpu.gui.wx import WgpuCanvas - - app = wx.App() - - # Instantiate the canvas - canvas = WgpuCanvas(title="Example") - - # Tell the canvas what drawing function to call - canvas.request_draw(your_draw_function) - - app.MainLoop() - - - -Support for offscreen ---------------------- - -You can also use a "fake" canvas to draw offscreen and get the result as a numpy array. -Note that you can render to a texture without using any canvas -object, but in some cases it's convenient to do so with a canvas-like API. - -.. code-block:: py - - from wgpu.gui.offscreen import WgpuCanvas - - # Instantiate the canvas - canvas = WgpuCanvas(size=(500, 400), pixel_ratio=1) - - # ... - - # Tell the canvas what drawing function to call - canvas.request_draw(your_draw_function) - - # Perform a draw - array = canvas.draw() # numpy array with shape (400, 500, 4) - - -Support for Jupyter lab and notebook ------------------------------------- - -WGPU can be used in Jupyter lab and the Jupyter notebook. This canvas -is based on `jupyter_rfb `_, an ipywidget -subclass implementing a remote frame-buffer. There are also some `wgpu examples `_. - -.. code-block:: py - - # from wgpu.gui.jupyter import WgpuCanvas # Direct approach - from wgpu.gui.auto import WgpuCanvas # Approach compatible with desktop usage - - canvas = WgpuCanvas() - - # ... wgpu code - - canvas # Use as cell output diff --git a/docs/guide.rst b/docs/guide.rst deleted file mode 100644 index 22c1a2c..0000000 --- a/docs/guide.rst +++ /dev/null @@ -1,254 +0,0 @@ -Guide -===== - - -This library (``wgpu``) presents a Pythonic API for the `WebGPU spec -`_. It is an API to control graphics -hardware. Like OpenGL but modern. Or like Vulkan but higher level. -GPU programming is a craft that requires knowledge of how GPU's work. - - -Getting started ---------------- - -Creating a canvas -+++++++++++++++++ - -If you want to render to the screen, you need a canvas. Multiple -GUI toolkits are supported, see the :doc:`gui`. In general, it's easiest to let ``wgpu`` select a GUI automatically: - -.. code-block:: py - - from wgpu.gui.auto import WgpuCanvas, run - - canvas = WgpuCanvas(title="a wgpu example") - - -Next, we can setup the render context, which we will need later on. - -.. code-block:: py - - present_context = canvas.get_context() - render_texture_format = present_context.get_preferred_format(device.adapter) - present_context.configure(device=device, format=render_texture_format) - - -Obtaining a device -++++++++++++++++++ - -The next step is to obtain an adapter, which represents an abstract render device. -You can pass it the ``canvas`` that you just created, or pass ``None`` for the canvas -if you have none (e.g. for compute or offscreen rendering). From the adapter, -you can obtain a device. - -.. code-block:: py - - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") - device = adapter.request_device() - -The ``wgpu.gpu`` object is the API entrypoint (:class:`wgpu.GPU`). It contains just a handful of functions, -including ``request_adapter()``. The device is used to create most other GPU objects. - - -Creating buffers, textures shaders, etc. -++++++++++++++++++++++++++++++++++++++++ - -Using the device, you can create buffers, textures, write shader code, and put -these together into pipeline objects. How to do this depends a lot on what you -want to achieve, and is therefore out of scope for this guide. Have a look at the examples -or some of the tutorials that we link to below. - -Setting up a draw function -++++++++++++++++++++++++++ - -Let's now define a function that will actually draw the stuff we put together in -the previous step. - -.. code-block:: py - - def draw_frame(): - - # We'll record commands that we do on a render pass object - command_encoder = device.create_command_encoder() - current_texture_view = present_context.get_current_texture() - render_pass = command_encoder.begin_render_pass( - color_attachments=[ - { - "view": current_texture_view, - "resolve_target": None, - "clear_value": (1, 1, 1, 1), - "load_op": wgpu.LoadOp.clear, - "store_op": wgpu.StoreOp.store, - } - ], - ) - - # Perform commands, something like ... - render_pass.set_pipeline(...) - render_pass.set_index_buffer(...) - render_pass.set_vertex_buffer(...) - render_pass.set_bind_group(...) - render_pass.draw_indexed(...) - - # When done, submit the commands to the device queue. - render_pass.end() - device.queue.submit([command_encoder.finish()]) - - # If you want to draw continuously, request a new draw right now - canvas.request_draw() - - -Starting the event loop -+++++++++++++++++++++++ - - -We can now pass the above render function to the canvas. The canvas will then -call the function whenever it (re)draws the window. And finally, we call ``run()`` to enter the mainloop. - -.. code-block:: py - - canvas.request_draw(draw_frame) - run() - - -Offscreen -+++++++++ - -If you render offscreen, or only do compute, you do not need a canvas. You also won't need a GUI toolkit, draw function or enter the event loop. -Instead, you will obtain a command encoder and submit it's records to the queue directly. - - -Examples and external resources -------------------------------- - -Examples that show wgpu-py in action: - -* https://github.com/pygfx/wgpu-py/tree/main/examples - -.. note:: The examples in the main branch of the repository may not match the pip installable version. Be sure to refer to the examples from the git tag that matches the version of wgpu you have installed. - - -External resources: - -* https://webgpu.rocks/ -* https://sotrh.github.io/learn-wgpu/ -* https://rust-tutorials.github.io/learn-wgpu/ - - -A brief history of WebGPU -------------------------- - -For years, OpenGL has been the only cross-platform API to talk to the GPU. -But over time OpenGL has grown into an inconsistent and complex API ... - - *OpenGL is dying* - --- Dzmitry Malyshau at `Fosdem 2020 `_ - -In recent years, modern API's have emerged that solve many of OpenGL's -problems. You may have heard of Vulkan, Metal, and DX12. These -API's are much closer to the hardware, which makes the drivers more -consistent and reliable. Unfortunately, the huge amount of "knobs to -turn" also makes them quite hard to work with for developers. - -Therefore, higher level API are needed, which use the same concepts, but are much easier to work with. -The most notable one is the `WebGPU specification `_. This is what future devs -will be using to write GPU code for the browser. And for desktop and mobile as well. - -As the WebGPU spec is being developed, a reference implementation is -also build. It's written in Rust and powers the WebGPU implementation in Firefox. -This reference implementation, called `wgpu `__, -also exposes a C-api (via `wgpu-native `__), -so that it can be wrapped in Python. And this is precisely what wgpu-py does. - -So in short, wgpu-py is a Python wrapper of wgpu, which is an desktop -implementation of WebGPU, an API that wraps Vulkan, Metal and DX12, -which talk to the GPU hardware. - - - -Coordinate system ------------------ - -In wgpu, the Y-axis is up in normalized device coordinate (NDC): point(-1.0, -1.0) -in NDC is located at the bottom-left corner of NDC. In addition, x and -y in NDC should be between -1.0 and 1.0 inclusive, while z in NDC should -be between 0.0 and 1.0 inclusive. Vertices out of this range in NDC -will not introduce any errors, but they will be clipped. - - -Array data ----------- - -The wgpu library makes no assumptions about how you store your data. -In places where you provide data to the API, it can consume any data -that supports the buffer protocol, which includes ``bytes``, -``bytearray``, ``memoryview``, ctypes arrays, and numpy arrays. - -In places where data is returned, the API returns a ``memoryview`` -object. These objects provide a quite versatile view on ndarray data: - -.. code-block:: py - - # One could, for instance read the content of a buffer - m = device.queue.read_buffer(buffer) - # Cast it to float32 - m = m.cast("f") - # Index it - m[0] - # Show the content - print(m.tolist()) - -Chances are that you prefer Numpy. Converting the ``memoryview`` to a -numpy array (without copying the data) is easy: - -.. code-block:: py - - array = np.frombuffer(m, np.float32) - - -Debugging ---------- - -If the default wgpu-backend causes issues, or if you want to run on a -different backend for another reason, you can set the -`WGPU_BACKEND_TYPE` environment variable to "Vulkan", "Metal", "D3D12", -"D3D11", or "OpenGL". - -The log messages produced (by Rust) in wgpu-native are captured and -injected into Python's "wgpu" logger. One can set the log level to -"INFO" or even "DEBUG" to get detailed logging information. - -Many GPU objects can be given a string label. This label will be used -in Rust validation errors, and are also used in e.g. RenderDoc to -identify objects. Additionally, you can insert debug markers at the -render/compute pass object, which will then show up in RenderDoc. - -Eventually, wgpu-native will fully validate API input. Until then, it -may be worthwhile to enable the Vulkan validation layers. To do so, run -a debug build of wgpu-native and make sure that the Lunar Vulkan SDK -is installed. - -You can run your application via RenderDoc, which is able to capture a -frame, including all API calls, objects and the complete pipeline state, -and display all of that information within a nice UI. - -You can use ``adapter.request_device_tracing()`` to provide a directory path -where a trace of all API calls will be written. This trace can then be used -to re-play your use-case elsewhere (it's cross-platform). - -Also see wgpu-core's section on debugging: -https://github.com/gfx-rs/wgpu/wiki/Debugging-wgpu-Applications - - -Freezing apps -------------- - -In wgpu a PyInstaller-hook is provided to help simplify the freezing process -(it e.g. ensures that the wgpu-native DLL is included). This hook requires -PyInstaller version 4+. - -Our hook also includes ``glfw`` when it is available, so code using ``wgpu.gui.auto`` -should Just Work. - -Note that PyInstaller needs ``wgpu`` to be installed in `site-packages` for -the hook to work (i.e. it seems not to work with a ``pip -e .`` dev install). diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index d522b56..0000000 --- a/docs/index.rst +++ /dev/null @@ -1,24 +0,0 @@ -Welcome to the wgpu-py docs! -============================== - -.. automodule:: wgpu - -.. toctree:: - :maxdepth: 2 - :caption: Contents: - - start - guide - wgpu - backends - gui - utils - - - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/docs/make.bat b/docs/make.bat deleted file mode 100644 index 2119f51..0000000 --- a/docs/make.bat +++ /dev/null @@ -1,35 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=. -set BUILDDIR=_build - -if "%1" == "" goto help - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 -) - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% - -:end -popd diff --git a/docs/start.rst b/docs/start.rst deleted file mode 100644 index 6c27905..0000000 --- a/docs/start.rst +++ /dev/null @@ -1,103 +0,0 @@ -Installation -============ - -.. note:: Since the API changes with each release,you may want to check the `CHANGELOG.md `_ when you upgrade to a newer version of wgpu. - -Install with pip ----------------- - -You can install ``wgpu-py`` via pip. -Python 3.8 or higher is required. Pypy is supported. Only depends on ``cffi`` (installed automatically by pip). - -.. code-block:: bash - - pip install wgpu - - -Since most users will want to render something to screen, we recommend installing GLFW as well: - -.. code-block:: bash - - pip install wgpu glfw - - -GUI libraries -------------- - -Multiple GUI backends are supported, see :doc:`the GUI API ` for details: - -* `glfw `_: a lightweight GUI for the desktop -* `jupyter_rfb `_: only needed if you plan on using wgpu in Jupyter -* qt (PySide6, PyQt6, PySide2, PyQt5) -* wx - - -The wgpu-native library ------------------------ - -The wheels that pip installs include the prebuilt binaries of `wgpu-native `_, so on most systems everything Just Works. - -On Linux you need at least **pip >= 20.3**, and a recent Linux distribution, otherwise the binaries will not be available. See below for details. - -If you need/want, you can also `build wgpu-native yourself `_. -You will then need to set the environment variable ``WGPU_LIB_PATH`` to let wgpu-py know where the DLL is located. - - -Platform requirements ---------------------- - -Under the hood, wgpu runs on Vulkan, Metal, or DX12. The wgpu-backend -is selected automatically, but can be overridden by setting the -``WGPU_BACKEND_TYPE`` environment variable to "Vulkan", "Metal", "D3D12", -"D3D11", or "OpenGL". - -Windows -+++++++ - -On Windows 10+, things should just work. If your machine has a dedicated GPU, -you may want to update to the latest (Nvidia or AMD) drivers. - -MacOS -+++++ - -On MacOS you need at least 10.13 (High Sierra) to have Metal/Vulkan support. - -Linux -+++++ - -On Linux, it's advisable to install the proprietary drivers of your GPU -(if you have a dedicated GPU). You may need to ``apt install -mesa-vulkan-drivers``. Wayland support is currently broken (we could use -a hand to fix this). - -Binary wheels for Linux are only available for **manylinux_2_24**. -This means that the installation requires ``pip >= 20.3``, and you need -a recent Linux distribution, listed `here `_. - -If you wish to work with an older distribution, you will have to build -wgpu-native yourself, see "dependencies" above. Note that wgpu-native -still needs Vulkan support and may not compile / work on older -distributions. - -Installing LavaPipe on Linux -++++++++++++++++++++++++++++ - -To run wgpu on systems that do not have a GPU (e.g. CI) you need a software renderer. -On Windows this (probably) just works via DX12. On Linux you can use LavaPipe: - -.. code-block:: bash - - sudo apt update -y -qq - sudo apt install --no-install-recommends -y libegl1-mesa libgl1-mesa-dri libxcb-xfixes0-dev mesa-vulkan-drivers - -The distribution's version of Lavapipe may be a bit outdated. To get a more recent version, you can use this PPA: - -.. code-block:: bash - - sudo add-apt-repository ppa:oibaf/graphics-drivers -y - -.. note:: - - The precise visual output may differ between differen implementations of Vulkan/Metal/DX12. - Therefore you should probably avoid per-pixel comparisons when multiple different systems are - involved. In wgpu-py and pygfx we have solved this by generating all reference images on CI (with Lavapipe). diff --git a/docs/utils.rst b/docs/utils.rst deleted file mode 100644 index 6ed4557..0000000 --- a/docs/utils.rst +++ /dev/null @@ -1,71 +0,0 @@ -Utils -===== - -The wgpu library provides a few utilities. Note that most functions below need to be explictly imported. - -Logger ------- - -Errors, warnings, and info messages (including messages generated by -wgpu-native) are logged using Python's default logging mechanics. The -wgpu logger instance is in ``wgpu.logger``, but can also be obtained -via: - -.. code-block:: py - - import logging - logger = logging.getLogger("wgpu") - - -Diagnostics ------------ - -To print a full diagnostic report: - -.. code-block:: py - - wgpu.diagnostics.print_report() - -To inspect (for example) the total buffer usage: - -.. code-block:: py - - >>> counts = wgpu.diagnostics.object_counts.get_dict() - >>> print(counts["Buffer"]) - {'count': 3, 'resource_mem': 784} - - -.. autoclass:: wgpu._diagnostics.DiagnosticsRoot - :members: - - -.. autoclass:: wgpu._diagnostics.Diagnostics - :members: - - -Get default device ------------------- - -.. autofunction:: wgpu.utils.get_default_device - - -Compute with buffers --------------------- - -.. code-block:: py - - from wgpu.utils.compute import compute_with_buffers - -.. autofunction:: wgpu.utils.compute_with_buffers - - - -Shadertoy ---------- - -.. code-block:: py - - from wgpu.utils.shadertoy import Shadertoy - -.. autoclass:: wgpu.utils.shadertoy.Shadertoy - :members: diff --git a/docs/wgpu.rst b/docs/wgpu.rst deleted file mode 100644 index 372f15e..0000000 --- a/docs/wgpu.rst +++ /dev/null @@ -1,224 +0,0 @@ -wgpu API -======== - -.. currentmodule:: wgpu - - -This document describes the wgpu API, which essentially is a Pythonic version of the -`WebGPU API `_. It exposes an API -for performing operations, such as rendering and computation, on a -Graphics Processing Unit. - -.. note:: - The WebGPU API is still being developed and occasionally there are backwards - incompatible changes. Since we mostly follow the WebGPU API, there may be - backwards incompatible changes to wgpu-py too. This will be so until - the WebGPU API settles as a standard. In the mean time, keep an eye on the - `CHANGELOG.md `_. - - -How to read this API --------------------- - -The classes in this API all have a name staring with "GPU", this helps -discern them from flags and enums. These classes are never instantiated -directly; new objects are returned by special methods (mostly from the device). - -Most methods in this API have no positional arguments; each argument -must be referenced by name. Some argument values must be a :doc:`dict `, these -can be thought of as "nested" arguments. Many arguments (and dict fields) must be a -:doc:`flag ` or :doc:`enum `. -Some arguments have a default value. Most do not. - - -Differences from WebGPU ------------------------ - -This API is derived from the WebGPU spec, but differs in a few ways. -For example, methods that in WebGPU accept a descriptor/struct/dict, -here accept the fields in that struct as keyword arguments. - - -.. autodata:: wgpu._classes.apidiff - :annotation: Differences of base API: - - -Each backend may implement extra functionality on top of the base API. -This is listed in :doc:`backends `. - - -Overview --------- - -This overview attempts to describe how all classes fit together. Scroll down for a list of all flags, enums, structs, and GPU classes. - - -Adapter, device and canvas -++++++++++++++++++++++++++ - -The :class:`GPU` class represents the API root/entrypoint. An instance is available at ``wgpu.gpu``. This instance is loaded from one the :doc:`backends `. - -The :class:`GPUAdapter` represents a hardware or software device, with specific -features, limits and properties. To actually start using that harware for computations or rendering, a :class:`GPUDevice` object must be requisted from the adapter. This is a logical unit -to control your hardware (or software). -The device is the central object; most other GPU objects are created from it. -Also see the convenience function :func:`wgpu.utils.get_default_device`. -Information on the adapter can be obtained using :func:`wgpu.GPUAdapter.request_adapter_info` in the form of a :class:`GPUAdapterInfo`. - -A device is controlled with a specific backend API. By default one is selected automatically. -This can be overridden by setting the -`WGPU_BACKEND_TYPE` environment variable to "Vulkan", "Metal", "D3D12", "D3D11", or "OpenGL". - -The device and all objects created from it inherit from :class:`GPUObjectBase` - they represent something on the GPU. - -In most render use-cases you want the result to be presented to a canvas on the screen. -The :class:`GPUCanvasContext` is the bridge between wgpu and the underlying GUI backend. - -Buffers and textures -++++++++++++++++++++ - -A :class:`GPUBuffer` can be created from a device. It is used to hold data, that can -be uploaded using it's API. From the shader's point of view, the buffer can be accessed -as a typed array. - -A :class:`GPUTexture` is similar to a buffer, but has some image-specific features. -A texture can be 1D, 2D or 3D, can have multiple levels of detail (i.e. lod or mipmaps). -The texture itself represents the raw data, you can create one or more :class:`GPUTextureView` objects -for it, that can be attached to a shader. - -To let a shader sample from a texture, you also need a :class:`GPUSampler` that -defines the filtering and sampling behavior beyond the edges. - -Bind groups -+++++++++++ - -Shaders need access to resources like buffers, texture views, and samplers. -The access to these resources occurs via so called bindings. There are -integer slots, which must be specifie both via the API, and in the shader. - -Bindings are organized into :class:`GPUBindGroup` s, which are essentially a list -of :class:`GPUBinding` s. - -Further, in wgpu you need to specify a :class:`GPUBindGroupLayout`, providing -meta-information about the binding (type, texture dimension etc.). - -Multiple bind groups layouts are collected in a :class:`GPUPipelineLayout`, -which represents a complete layout description for a pipeline. - -Shaders and pipelines -+++++++++++++++++++++ - -The wgpu API knows three kinds of shaders: compute, vertex and fragment. -Pipelines define how the shader is run, and with what resources. - -Shaders are represented by a :class:`GPUShaderModule`. - -Compute shaders are combined with a pipelinelayout into a :class:`GPUComputePipeline`. -Similarly, a vertex and (optional) fragment shader are combined with a pipelinelayout -into a :class:`GPURenderPipeline`. Both of these inherit from :class:`GPUPipelineBase`. - -Command buffers and encoders -++++++++++++++++++++++++++++ - -The actual rendering occurs by recording a series of commands and then submitting these commands. - -The root object to generate commands with is the :class:`GPUCommandEncoder`. -This class inherits from :class:`GPUCommandsMixin` (because it generates commands), -and :class:`GPUDebugCommandsMixin` (because it supports debugging). - -Commands specific to compute and rendering are generated with a :class:`GPUComputePassEncoder` and :class:`GPURenderPassEncoder` respectively. You get these from the command encoder by the -corresponding ``begin_x_pass()`` method. These pass encoders inherit from -:class:`GPUBindingCommandsMixin` (because you associate a pipeline) -and the latter also from :class:`GPURenderCommandsMixin`. - -When you're done generating commands, you call ``finish()`` and get the list of -commands as an opaque object: the :class:`GPUCommandBuffer`. You don't really use this object -except for submitting it to the :class:`GPUQueue`. - -The command buffers are one-time use. The :class:`GPURenderBundle` and :class:`GPURenderBundleEncoder` can -be used to record commands to be used multiple times, but this is not yet -implememted in wgpu-py. - -Error handling -++++++++++++++ - -Errors in wgpu-native are raised as Python errors where possible. Uncaught errors -and warnings are logged using the ``wgpu`` logger. - -There are specific exceptions that can be raised: -* :class:`GPUError` is the generic (base) error class. -* :class:`GPUValidationError` is for wgpu validation errors. Shader errors also fall into this category. -* :class:`GPUOutOfMemoryError` is a wgpu `MemoryError`. -* :class:`GPUInternalError` when wgpu reaches a internal error state. -* :class:`GPUPipelineError` for errors related to the pipeline. -* :class:`GPUDeviceLostInfo` when the device is lost. - -TODO -++++ - -These classes are not supported and/or documented yet. -:class:`GPUCompilationMessage` -:class:`GPUCompilationInfo` -:class:`GPUQuerySet` - - -List of flags, enums, and structs ---------------------------------- - -.. toctree:: - :maxdepth: 2 - - wgpu_flags - wgpu_enums - wgpu_structs - - -List of GPU classes -------------------- - -.. automodule:: wgpu.classes - -.. currentmodule:: wgpu - -.. autosummary:: - :nosignatures: - :toctree: generated - :template: wgpu_class_layout.rst - - ~GPU - ~GPUAdapterInfo - ~GPUAdapter - ~GPUBindGroup - ~GPUBindGroupLayout - ~GPUBindingCommandsMixin - ~GPUBuffer - ~GPUCanvasContext - ~GPUCommandBuffer - ~GPUCommandEncoder - ~GPUCommandsMixin - ~GPUCompilationInfo - ~GPUCompilationMessage - ~GPUComputePassEncoder - ~GPUComputePipeline - ~GPUDebugCommandsMixin - ~GPUDevice - ~GPUDeviceLostInfo - ~GPUError - ~GPUInternalError - ~GPUObjectBase - ~GPUOutOfMemoryError - ~GPUPipelineBase - ~GPUPipelineError - ~GPUPipelineLayout - ~GPUQuerySet - ~GPUQueue - ~GPURenderBundle - ~GPURenderBundleEncoder - ~GPURenderCommandsMixin - ~GPURenderPassEncoder - ~GPURenderPipeline - ~GPUSampler - ~GPUShaderModule - ~GPUTexture - ~GPUTextureView - ~GPUValidationError diff --git a/docs/wgpu_enums.rst b/docs/wgpu_enums.rst deleted file mode 100644 index b4532ff..0000000 --- a/docs/wgpu_enums.rst +++ /dev/null @@ -1,7 +0,0 @@ -Enums -===== - -.. automodule:: wgpu.enums - :members: - :undoc-members: - :exclude-members: Enum diff --git a/docs/wgpu_flags.rst b/docs/wgpu_flags.rst deleted file mode 100644 index 8c41c6c..0000000 --- a/docs/wgpu_flags.rst +++ /dev/null @@ -1,7 +0,0 @@ -Flags -===== - -.. automodule:: wgpu.flags - :members: - :undoc-members: - :exclude-members: Flags diff --git a/docs/wgpu_structs.rst b/docs/wgpu_structs.rst deleted file mode 100644 index 3feb606..0000000 --- a/docs/wgpu_structs.rst +++ /dev/null @@ -1,7 +0,0 @@ -Structs -======= - -.. automodule:: wgpu.structs - :members: - :undoc-members: - :exclude-members: Struct From c8d4d9e33c83695fd33d5a33e2e35c6e025159f9 Mon Sep 17 00:00:00 2001 From: Jan Date: Wed, 27 Dec 2023 23:34:40 +0100 Subject: [PATCH 06/20] Remove other utils --- wgpu/utils/compute.py | 198 ------------------------------------------ wgpu/utils/device.py | 17 ---- 2 files changed, 215 deletions(-) delete mode 100644 wgpu/utils/compute.py delete mode 100644 wgpu/utils/device.py diff --git a/wgpu/utils/compute.py b/wgpu/utils/compute.py deleted file mode 100644 index 9970dae..0000000 --- a/wgpu/utils/compute.py +++ /dev/null @@ -1,198 +0,0 @@ -""" -Simple high-level utilities for doing compute on the GPU. -""" - -import ctypes - -import wgpu.utils - - -def compute_with_buffers(input_arrays, output_arrays, shader, n=None): - """Apply the given compute shader to the given input_arrays and return - output arrays. Both input and output arrays are represented on the GPU - using storage buffer objects. - - Parameters: - input_arrays (dict): A dict mapping int bindings to arrays. The array - can be anything that supports the buffer protocol, including - bytes, memoryviews, ctypes arrays and numpy arrays. The - type and shape of the array does not need to match the type - with which the shader will interpret the buffer data (though - it probably makes your code easier to follow). - output_arrays (dict): A dict mapping int bindings to output shapes. - If the value is int, it represents the size (in bytes) of - the buffer. If the value is a tuple, its last element - specifies the format (see below), and the preceding elements - specify the shape. These are used to ``cast()`` the - memoryview object before it is returned. If the value is a - ctypes array type, the result will be cast to that instead - of a memoryview. Note that any buffer that is NOT in the - output arrays dict will be considered readonly in the shader. - shader (str or bytes): The shader as a string of WGSL code or SpirV bytes. - n (int, tuple, optional): The dispatch counts. Can be an int - or a 3-tuple of ints to specify (x, y, z). If not given or None, - the length of the first output array type is used. - - Returns: - output (dict): A dict mapping int bindings to memoryviews. - - The format characters to cast a ``memoryview`` are hard to remember, so - here's a refresher: - - * "b" and "B" are signed and unsiged 8-bit ints. - * "h" and "H" are signed and unsiged 16-bit ints. - * "i" and "I" are signed and unsiged 32-bit ints. - * "e" and "f" are 16-bit and 32-bit floats. - """ - - # Check input arrays - if not isinstance(input_arrays, dict): # empty is ok - raise TypeError("input_arrays must be a dict.") - for key, array in input_arrays.items(): - if not isinstance(key, int): - raise TypeError("keys of input_arrays must be int.") - # Simply wrapping in a memoryview ensures that it supports the buffer protocol - memoryview(array) - - # Check output arrays - output_infos = {} - if not isinstance(output_arrays, dict) or not output_arrays: - raise TypeError("output_arrays must be a nonempty dict.") - for key, array_descr in output_arrays.items(): - if not isinstance(key, int): - raise TypeError("keys of output_arrays must be int.") - if isinstance(array_descr, str) and "x" in array_descr: - array_descr = tuple(array_descr.split("x")) - if isinstance(array_descr, int): - output_infos[key] = { - "length": array_descr, - "nbytes": array_descr, - "format": "B", - "shape": (array_descr,), - } - elif isinstance(array_descr, tuple): - format = array_descr[-1] - try: - format_size = FORMAT_SIZES[format] - except KeyError: - raise ValueError(f"Invalid format for output array {key}: {format}") - shape = tuple(int(i) for i in array_descr[:-1]) - if not (shape and all(i > 0 for i in shape)): - raise ValueError(f"Invalid shape for output array {key}: {shape}") - nbytes = format_size - for i in shape: - nbytes *= i - output_infos[key] = { - "length": shape[0], - "nbytes": nbytes, - "format": format, - "shape": shape, - } - elif isinstance(array_descr, type) and issubclass(array_descr, ctypes.Array): - output_infos[key] = { - "length": array_descr._length_, - "nbytes": ctypes.sizeof(array_descr), - "ctypes_array_type": array_descr, - } - else: - raise TypeError( - f"Invalid value for output array description: {array_descr}" - ) - - # Get nx, ny, nz from n - if n is None: - output_info = list(output_infos.values())[0] - nx, ny, nz = output_info["length"], 1, 1 - elif isinstance(n, int): - nx, ny, nz = int(n), 1, 1 - elif isinstance(n, tuple) and len(n) == 3: - nx, ny, nz = int(n[0]), int(n[1]), int(n[2]) - else: - raise TypeError("compute_with_buffers: n must be None, an int, or 3-int tuple.") - if not (nx >= 1 and ny >= 1 and nz >= 1): - raise ValueError("compute_with_buffers: n value(s) must be >= 1.") - - # Create a device and compile the shader - device = wgpu.utils.get_default_device() - cshader = device.create_shader_module(code=shader) - - # Create buffers for input and output arrays - buffers = {} - for index, array in input_arrays.items(): - usage = wgpu.BufferUsage.STORAGE - if index in output_arrays: - usage |= wgpu.BufferUsage.COPY_SRC - buffer = device.create_buffer_with_data(data=array, usage=usage) - buffers[index] = buffer - for index, info in output_infos.items(): - if index in input_arrays: - continue # We already have this buffer - usage = wgpu.BufferUsage.STORAGE | wgpu.BufferUsage.COPY_SRC - buffers[index] = device.create_buffer(size=info["nbytes"], usage=usage) - - # Create bindings and binding layouts - bindings = [] - binding_layouts = [] - for index, buffer in buffers.items(): - bindings.append( - { - "binding": index, - "resource": {"buffer": buffer, "offset": 0, "size": buffer.size}, - } - ) - storage_types = ( - wgpu.BufferBindingType.read_only_storage, - wgpu.BufferBindingType.storage, - ) - binding_layouts.append( - { - "binding": index, - "visibility": wgpu.ShaderStage.COMPUTE, - "buffer": { - "type": storage_types[index in output_infos], - "has_dynamic_offset": False, - }, - } - ) - - # Put buffers together - bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) - pipeline_layout = device.create_pipeline_layout( - bind_group_layouts=[bind_group_layout] - ) - bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) - - # Create a pipeline and "run it" - compute_pipeline = device.create_compute_pipeline( - layout=pipeline_layout, - compute={"module": cshader, "entry_point": "main"}, - ) - command_encoder = device.create_command_encoder() - compute_pass = command_encoder.begin_compute_pass() - compute_pass.set_pipeline(compute_pipeline) - compute_pass.set_bind_group(0, bind_group, [], 0, 999999) # last 2 args not used - compute_pass.dispatch_workgroups(nx, ny, nz) - compute_pass.end() - device.queue.submit([command_encoder.finish()]) - - # Read the current data of the output buffers - output = {} - for index, info in output_infos.items(): - buffer = buffers[index] - # m = buffer.read_data() # old API - m = device.queue.read_buffer(buffer) # slow, can also be done async - if "ctypes_array_type" in info: - output[index] = info["ctypes_array_type"].from_buffer(m) - else: - output[index] = m.cast(info["format"], shape=info["shape"]) - - return output - - -FORMAT_SIZES = {"b": 1, "B": 1, "h": 2, "H": 2, "i": 4, "I": 4, "e": 2, "f": 4} - -# It's tempting to allow for other formats, like "int32" and "f4", but -# users who like numpy will simply specify the number of bytes and -# convert the result. Users who will work with the memoryview directly -# should not be confused with other formats than memoryview.cast() -# normally supports. diff --git a/wgpu/utils/device.py b/wgpu/utils/device.py deleted file mode 100644 index 1a42076..0000000 --- a/wgpu/utils/device.py +++ /dev/null @@ -1,17 +0,0 @@ -_default_device = None - - -def get_default_device(): - """Get a wgpu device object. If this succeeds, it's likely that - the WGPU lib is usable on this system. If not, this call will - probably exit (Rust panic). When called multiple times, - returns the same global device object (useful for e.g. unit tests). - """ - global _default_device - - if _default_device is None: - import wgpu.backends.auto # noqa - - adapter = wgpu.gpu.request_adapter(power_preference="high-performance") - _default_device = adapter.request_device() - return _default_device From 32d672b92622d03100e06d49bbb105c8e70b5d4c Mon Sep 17 00:00:00 2001 From: Jan Date: Wed, 27 Dec 2023 23:03:50 +0100 Subject: [PATCH 07/20] Remove non shadertoy examples --- .readthedocs.yaml | 26 -- download-wgpu-native.py | 178 ------------ examples/compute_noop.py | 136 --------- examples/compute_timestamps.py | 166 ----------- examples/cube.py | 388 ------------------------- examples/events.py | 21 -- examples/screenshots/cube.png | Bin 4265 -> 0 bytes examples/screenshots/triangle_auto.png | Bin 22181 -> 0 bytes examples/triangle.py | 155 ---------- examples/triangle_auto.py | 21 -- examples/triangle_glfw.py | 22 -- examples/triangle_glsl.py | 143 --------- examples/triangle_qt.py | 41 --- examples/triangle_qt_embed.py | 53 ---- examples/triangle_subprocess.py | 84 ------ examples/triangle_wx.py | 16 - examples/triangle_wx_embed.py | 40 --- examples/wgpu-examples.ipynb | 117 -------- 18 files changed, 1607 deletions(-) delete mode 100644 .readthedocs.yaml delete mode 100644 download-wgpu-native.py delete mode 100644 examples/compute_noop.py delete mode 100644 examples/compute_timestamps.py delete mode 100644 examples/cube.py delete mode 100644 examples/events.py delete mode 100644 examples/screenshots/cube.png delete mode 100644 examples/screenshots/triangle_auto.png delete mode 100644 examples/triangle.py delete mode 100644 examples/triangle_auto.py delete mode 100644 examples/triangle_glfw.py delete mode 100644 examples/triangle_glsl.py delete mode 100644 examples/triangle_qt.py delete mode 100644 examples/triangle_qt_embed.py delete mode 100644 examples/triangle_subprocess.py delete mode 100644 examples/triangle_wx.py delete mode 100644 examples/triangle_wx_embed.py delete mode 100644 examples/wgpu-examples.ipynb diff --git a/.readthedocs.yaml b/.readthedocs.yaml deleted file mode 100644 index 7c3e63c..0000000 --- a/.readthedocs.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Read the Docs configuration file -# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details - -version: 2 - -build: - os: ubuntu-22.04 - tools: - python: "3.11" -# If we ever want to run wgpu stuff in the doc build -# apt_packages: -# - libegl1-mesa -# - libgl1-mesa-dri -# - libxcb-xfixes0-dev -# - mesa-vulkan-drivers - -sphinx: - configuration: docs/conf.py - fail_on_warning: true - -python: - install: - - method: pip - path: . - extra_requirements: - - docs diff --git a/download-wgpu-native.py b/download-wgpu-native.py deleted file mode 100644 index 659240a..0000000 --- a/download-wgpu-native.py +++ /dev/null @@ -1,178 +0,0 @@ -import os -import re -import sys -import argparse -import tempfile -import platform -from zipfile import ZipFile - -import requests - - -# The directory containing non-python resources that are included in packaging -RESOURCE_DIR = os.path.join("wgpu", "resources") -# The version installed through this script is tracked in the backend module -VERSION_FILE = os.path.join("wgpu", "backends", "wgpu_native", "__init__.py") - -# Whether to ensure we export \n instead of \r\n -FORCE_SIMPLE_NEWLINES = False -if sys.platform.startswith("win"): - sample = open(os.path.join(RESOURCE_DIR, "codegen_report.md"), "rb").read() - if sample.count(b"\r\n") == 0: - FORCE_SIMPLE_NEWLINES = True - - -def get_current_version(): - with open(VERSION_FILE) as fh: - return re.search(r"__version__ = \"(.*?)\"", fh.read()).group(1) - - -def write_current_version(version, commit_sha): - with open(VERSION_FILE, "rb") as fh: - file_content = fh.read().decode() - file_content = re.sub( - r"__version__ = \".*?\"", - f'__version__ = "{version}"', - file_content, - ) - file_content = re.sub( - r"__commit_sha__ = \".*?\"", - f'__commit_sha__ = "{commit_sha}"', - file_content, - ) - with open(VERSION_FILE, mode="wb") as fh: - fh.write(file_content.encode()) - - -def download_file(url, filename): - resp = requests.get(url, stream=True) - with open(filename, mode="wb") as fh: - for chunk in resp.iter_content(chunk_size=1024 * 128): - fh.write(chunk) - - -def extract_file(zip_filename, member, path): - z = ZipFile(zip_filename) - os.makedirs(path, exist_ok=True) - z.extract(member, path=path) - if member.endswith(".h") and FORCE_SIMPLE_NEWLINES: - filename = os.path.join(path, member) - bb = open(filename, "rb").read() - with open(filename, "wb") as f: - f.write(bb.replace(b"\r\n", b"\n")) - - -def get_os_string(): - if sys.platform.startswith("win"): - return "windows" - elif sys.platform.startswith("darwin"): - return "macos" - elif sys.platform.startswith("linux"): - return "linux" - else: - # We do not provide binaries for this platform. Note that we can - # have false positives, e.g. on ARM Linux. We assume that users on - # such platforms are aware and arrange for the wgpu lib themselves. - raise RuntimeError(f"Platform '{sys.platform}' not supported") - - -def get_arch(): - # See e.g.: https://stackoverflow.com/questions/45124888 - is_64_bit = sys.maxsize > 2**32 - machine = platform.machine() - - # See if this is run by cibuildwheel and check to see if ARCHFLAGS is - # specified (only done on macOS). This allows to select the proper binaries. - # For specifics of CIBUILDWHEEL and macOS build envs, see: - # https://github.com/pypa/cibuildwheel/blob/4307b52ff28b631519d38bfa0dd09d6a9b39a81e/cibuildwheel/macos.py#L277 - if os.environ.get("CIBUILDWHEEL") == "1" and "ARCHFLAGS" in os.environ: - archflags = os.environ["ARCHFLAGS"] - return "aarch64" if "arm64" in archflags else "x86_64" - - if machine == "armv7l": - # Raspberry pi - return "armv7" - elif is_64_bit and machine.startswith(("arm", "aarch64")): - # Includes MacOS M1, arm linux, ... - return "aarch64" - elif is_64_bit: - return "x86_64" - else: - return "i686" - - -def main(version, os_string, arch, upstream): - for build in ["release"]: # ["release", "debug"] - filename = f"wgpu-{os_string}-{arch}-{build}.zip" - url = f"https://github.com/{upstream}/releases/download/v{version}/{filename}" - tmp = tempfile.gettempdir() - zip_filename = os.path.join(tmp, filename) - print(f"Downloading {url} to {zip_filename}") - download_file(url, zip_filename) - headerfile1 = "webgpu.h" - headerfile2 = "wgpu.h" - binaryfile = None - if os_string == "linux": - binaryfile = "libwgpu_native.so" - elif os_string == "macos": - binaryfile = "libwgpu_native.dylib" - elif os_string == "windows": - binaryfile = "wgpu_native.dll" - else: - raise RuntimeError(f"Platform '{os_string}' not supported") - root, ext = os.path.splitext(binaryfile) - binaryfile_name = root + "-" + build + ext - print(f"Extracting {headerfile1} to {RESOURCE_DIR}") - extract_file(zip_filename, headerfile1, RESOURCE_DIR) - print(f"Extracting {headerfile2} to {RESOURCE_DIR}") - extract_file(zip_filename, headerfile2, RESOURCE_DIR) - print(f"Extracting {binaryfile} to {RESOURCE_DIR}") - extract_file(zip_filename, binaryfile, RESOURCE_DIR) - os.replace( - os.path.join(RESOURCE_DIR, binaryfile), - os.path.join(RESOURCE_DIR, binaryfile_name), - ) - current_version = get_current_version() - if version != current_version: - print(f"Version changed, updating {VERSION_FILE}") - filename = "commit-sha" - url = f"https://github.com/{upstream}/releases/download/v{version}/{filename}" - commit_sha_filename = os.path.join(tmp, filename) - print(f"Downloading {url} to {commit_sha_filename}") - download_file(url, commit_sha_filename) - with open(commit_sha_filename) as fh: - commit_sha = fh.read().strip() - write_current_version(version, commit_sha) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Download wgpu-native binaries and headers from github releases" - ) - version = get_current_version() - parser.add_argument( - "--version", help=f"Version to download (default: {version})", default=version - ) - os_string = get_os_string() - parser.add_argument( - "--os", - help=f"Platform to download for (default: {os_string})", - default=os_string, - choices=("linux", "macos", "windows"), - ) - arch_string = get_arch() - parser.add_argument( - "--arch", - help=f"Architecture to download for (default: {arch_string})", - default=arch_string, - choices=("x86_64", "i686", "aarch64"), - ) - upstream = "gfx-rs/wgpu-native" - parser.add_argument( - "--upstream", - help=f"Upstream repository to download release from (default: {upstream})", - default=upstream, - ) - args = parser.parse_args() - - main(args.version, args.os, args.arch, args.upstream) diff --git a/examples/compute_noop.py b/examples/compute_noop.py deleted file mode 100644 index 2c19e2a..0000000 --- a/examples/compute_noop.py +++ /dev/null @@ -1,136 +0,0 @@ -""" -Example compute shader that does ... nothing but copy a value from one -buffer into another. -""" - -import wgpu -from wgpu.utils.compute import compute_with_buffers # Convenience function - - -# %% Shader and data - -shader_source = """ - -@group(0) @binding(0) -var data1: array; - -@group(0) @binding(1) -var data2: array; - -@compute -@workgroup_size(1) -fn main(@builtin(global_invocation_id) index: vec3) { - let i: u32 = index.x; - data2[i] = data1[i]; -} -""" - -# Create input data as a memoryview -n = 20 -data = memoryview(bytearray(n * 4)).cast("i") -for i in range(n): - data[i] = i - - -# %% The short version, using memoryview - -# The first arg is the input data, per binding -# The second arg are the ouput types, per binding -out = compute_with_buffers({0: data}, {1: (n, "i")}, shader_source, n=n) - -# The result is a dict matching the output types -# Select data from buffer at binding 1 -result = out[1].tolist() -print(result) -assert result == list(range(20)) - - -# %% The short version, using numpy - -# import numpy as np -# -# numpy_data = np.frombuffer(data, np.int32) -# out = compute_with_buffers({0: numpy_data}, {1: numpy_data.nbytes}, shader_source, n=n) -# result = np.frombuffer(out[1], dtype=np.int32) -# print(result.tolist()) - - -# %% The long version using the wgpu API - -# %% Create device -# Create device and shader object -device = wgpu.utils.get_default_device() - -# Or, you can select GPU by requesting all available adapters -# adapters = wgpu.backends.wgpu_native.enumerate_adapters() -# adapter = None -# for adap in adapters: -# adapter_info = adap.request_adapter_info() -# print(adapter_info) -# if "NVIDIA" in adapter_info["device"]: -# adapter = adap -# break -# assert adapter is not None -# device = adapter.request_device() - -# %% -cshader = device.create_shader_module(code=shader_source) - -# Create buffer objects, input buffer is mapped. -buffer1 = device.create_buffer_with_data(data=data, usage=wgpu.BufferUsage.STORAGE) -buffer2 = device.create_buffer( - size=data.nbytes, usage=wgpu.BufferUsage.STORAGE | wgpu.BufferUsage.COPY_SRC -) - -# Setup layout and bindings -binding_layouts = [ - { - "binding": 0, - "visibility": wgpu.ShaderStage.COMPUTE, - "buffer": { - "type": wgpu.BufferBindingType.read_only_storage, - }, - }, - { - "binding": 1, - "visibility": wgpu.ShaderStage.COMPUTE, - "buffer": { - "type": wgpu.BufferBindingType.storage, - }, - }, -] -bindings = [ - { - "binding": 0, - "resource": {"buffer": buffer1, "offset": 0, "size": buffer1.size}, - }, - { - "binding": 1, - "resource": {"buffer": buffer2, "offset": 0, "size": buffer2.size}, - }, -] - -# Put everything together -bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) -pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[bind_group_layout]) -bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) - -# Create and run the pipeline -compute_pipeline = device.create_compute_pipeline( - layout=pipeline_layout, - compute={"module": cshader, "entry_point": "main"}, -) -command_encoder = device.create_command_encoder() -compute_pass = command_encoder.begin_compute_pass() -compute_pass.set_pipeline(compute_pipeline) -compute_pass.set_bind_group(0, bind_group, [], 0, 999999) # last 2 elements not used -compute_pass.dispatch_workgroups(n, 1, 1) # x y z -compute_pass.end() -device.queue.submit([command_encoder.finish()]) - -# Read result -# result = buffer2.read_data().cast("i") -out = device.queue.read_buffer(buffer2).cast("i") -result = out.tolist() -print(result) -assert result == list(range(20)) diff --git a/examples/compute_timestamps.py b/examples/compute_timestamps.py deleted file mode 100644 index 051e7d9..0000000 --- a/examples/compute_timestamps.py +++ /dev/null @@ -1,166 +0,0 @@ -""" -A simple example to profile a compute pass using ComputePassTimestampWrites. -""" - -import wgpu - -""" -Define the number of elements, global and local sizes. -Change these and see how it affects performance. -""" -n = 512 * 512 -local_size = [32, 1, 1] -global_size = [n // local_size[0], 1, 1] - -shader_source = f""" -@group(0) @binding(0) -var data1: array; - -@group(0) @binding(1) -var data2: array; - -@group(0) @binding(2) -var data3: array; - -@compute -@workgroup_size({','.join(map(str, local_size))}) -fn main(@builtin(global_invocation_id) index: vec3) {{ - let i: u32 = index.x; - data3[i] = data1[i] + data2[i]; -}} -""" - -# Define two arrays -data1 = memoryview(bytearray(n * 4)).cast("i") -data2 = memoryview(bytearray(n * 4)).cast("i") - -# Initialize the arrays -for i in range(n): - data1[i] = i - -for i in range(n): - data2[i] = i * 2 - -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") - -# Request a device with the timestamp_query feature, so we can profile our computation -device = adapter.request_device(required_features=[wgpu.FeatureName.timestamp_query]) -cshader = device.create_shader_module(code=shader_source) - -# Create buffer objects, input buffer is mapped. -buffer1 = device.create_buffer_with_data(data=data1, usage=wgpu.BufferUsage.STORAGE) -buffer2 = device.create_buffer_with_data(data=data2, usage=wgpu.BufferUsage.STORAGE) -buffer3 = device.create_buffer( - size=data1.nbytes, usage=wgpu.BufferUsage.STORAGE | wgpu.BufferUsage.COPY_SRC -) - -# Setup layout and bindings -binding_layouts = [ - { - "binding": 0, - "visibility": wgpu.ShaderStage.COMPUTE, - "buffer": { - "type": wgpu.BufferBindingType.read_only_storage, - }, - }, - { - "binding": 1, - "visibility": wgpu.ShaderStage.COMPUTE, - "buffer": { - "type": wgpu.BufferBindingType.read_only_storage, - }, - }, - { - "binding": 2, - "visibility": wgpu.ShaderStage.COMPUTE, - "buffer": { - "type": wgpu.BufferBindingType.storage, - }, - }, -] -bindings = [ - { - "binding": 0, - "resource": {"buffer": buffer1, "offset": 0, "size": buffer1.size}, - }, - { - "binding": 1, - "resource": {"buffer": buffer2, "offset": 0, "size": buffer2.size}, - }, - { - "binding": 2, - "resource": {"buffer": buffer3, "offset": 0, "size": buffer3.size}, - }, -] - -# Put everything together -bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) -pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[bind_group_layout]) -bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) - -# Create and run the pipeline -compute_pipeline = device.create_compute_pipeline( - layout=pipeline_layout, - compute={"module": cshader, "entry_point": "main"}, -) - -""" -Create a QuerySet to store the 'beginning_of_pass' and 'end_of_pass' timestamps. -Set the 'count' parameter to 2, as this set will contain 2 timestamps. -""" -query_set = device.create_query_set(type=wgpu.QueryType.timestamp, count=2) -command_encoder = device.create_command_encoder() - -# Pass our QuerySet and the indices into it, where the timestamps will be written. -compute_pass = command_encoder.begin_compute_pass( - timestamp_writes={ - "query_set": query_set, - "beginning_of_pass_write_index": 0, - "end_of_pass_write_index": 1, - } -) - -""" -Create the buffer to store our query results. -Each timestamp is 8 bytes. We mark the buffer usage to be QUERY_RESOLVE, -as we will use this buffer in a resolve_query_set call later. -""" -query_buf = device.create_buffer( - size=8 * query_set.count, - usage=wgpu.BufferUsage.QUERY_RESOLVE - | wgpu.BufferUsage.STORAGE - | wgpu.BufferUsage.COPY_SRC - | wgpu.BufferUsage.COPY_DST, -) -compute_pass.set_pipeline(compute_pipeline) -compute_pass.set_bind_group(0, bind_group, [], 0, 999999) # last 2 elements not used -compute_pass.dispatch_workgroups(*global_size) # x y z -compute_pass.end() - -# Resolve our queries, and store the results in the destination buffer we created above. -command_encoder.resolve_query_set( - query_set=query_set, - first_query=0, - query_count=2, - destination=query_buf, - destination_offset=0, -) -device.queue.submit([command_encoder.finish()]) - -""" -Read the query buffer to get the timestamps. -Index 0: beginning timestamp -Index 1: end timestamp -""" -timestamps = device.queue.read_buffer(query_buf).cast("Q").tolist() -print(f"Adding two {n} sized arrays took {(timestamps[1]-timestamps[0])/1000} us") - -# Read result -out = device.queue.read_buffer(buffer3).cast("i") -result = out.tolist() - -# Calculate the result on the CPU for comparison -result_cpu = [a + b for a, b in zip(data1, data2)] - -# Ensure results are the same -assert result == result_cpu diff --git a/examples/cube.py b/examples/cube.py deleted file mode 100644 index 18dc4e1..0000000 --- a/examples/cube.py +++ /dev/null @@ -1,388 +0,0 @@ -""" -This example renders a simple textured rotating cube. -""" -# test_example = true - -import time - -from wgpu.gui.auto import WgpuCanvas, run -import wgpu -import numpy as np - - -# %% Create canvas and device - -# Create a canvas to render to -canvas = WgpuCanvas(title="wgpu cube") - -# Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") -device = adapter.request_device() - -# Prepare present context -present_context = canvas.get_context() -render_texture_format = present_context.get_preferred_format(device.adapter) -present_context.configure(device=device, format=render_texture_format) - - -# %% Generate data - -# pos texcoord -# x, y, z, w, u, v -vertex_data = np.array( - [ - # top (0, 0, 1) - [-1, -1, 1, 1, 0, 0], - [1, -1, 1, 1, 1, 0], - [1, 1, 1, 1, 1, 1], - [-1, 1, 1, 1, 0, 1], - # bottom (0, 0, -1) - [-1, 1, -1, 1, 1, 0], - [1, 1, -1, 1, 0, 0], - [1, -1, -1, 1, 0, 1], - [-1, -1, -1, 1, 1, 1], - # right (1, 0, 0) - [1, -1, -1, 1, 0, 0], - [1, 1, -1, 1, 1, 0], - [1, 1, 1, 1, 1, 1], - [1, -1, 1, 1, 0, 1], - # left (-1, 0, 0) - [-1, -1, 1, 1, 1, 0], - [-1, 1, 1, 1, 0, 0], - [-1, 1, -1, 1, 0, 1], - [-1, -1, -1, 1, 1, 1], - # front (0, 1, 0) - [1, 1, -1, 1, 1, 0], - [-1, 1, -1, 1, 0, 0], - [-1, 1, 1, 1, 0, 1], - [1, 1, 1, 1, 1, 1], - # back (0, -1, 0) - [1, -1, 1, 1, 0, 0], - [-1, -1, 1, 1, 1, 0], - [-1, -1, -1, 1, 1, 1], - [1, -1, -1, 1, 0, 1], - ], - dtype=np.float32, -) - -index_data = np.array( - [ - [0, 1, 2, 2, 3, 0], # top - [4, 5, 6, 6, 7, 4], # bottom - [8, 9, 10, 10, 11, 8], # right - [12, 13, 14, 14, 15, 12], # left - [16, 17, 18, 18, 19, 16], # front - [20, 21, 22, 22, 23, 20], # back - ], - dtype=np.uint32, -).flatten() - - -# Create texture data (srgb gray values) -texture_data = np.array( - [ - [50, 100, 150, 200], - [100, 150, 200, 50], - [150, 200, 50, 100], - [200, 50, 100, 150], - ], - dtype=np.uint8, -) -texture_data = np.repeat(texture_data, 64, 0) -texture_data = np.repeat(texture_data, 64, 1) -texture_size = texture_data.shape[1], texture_data.shape[0], 1 - -# Use numpy to create a struct for the uniform -uniform_dtype = [("transform", "float32", (4, 4))] -uniform_data = np.zeros((), dtype=uniform_dtype) - - -# %% Create resource objects (buffers, textures, samplers) - -# Create vertex buffer, and upload data -vertex_buffer = device.create_buffer_with_data( - data=vertex_data, usage=wgpu.BufferUsage.VERTEX -) - -# Create index buffer, and upload data -index_buffer = device.create_buffer_with_data( - data=index_data, usage=wgpu.BufferUsage.INDEX -) - -# Create uniform buffer - data is uploaded each frame -uniform_buffer = device.create_buffer( - size=uniform_data.nbytes, usage=wgpu.BufferUsage.UNIFORM | wgpu.BufferUsage.COPY_DST -) - -# Create texture, and upload data -texture = device.create_texture( - size=texture_size, - usage=wgpu.TextureUsage.COPY_DST | wgpu.TextureUsage.TEXTURE_BINDING, - dimension=wgpu.TextureDimension.d2, - format=wgpu.TextureFormat.r8unorm, - mip_level_count=1, - sample_count=1, -) -texture_view = texture.create_view() - -device.queue.write_texture( - { - "texture": texture, - "mip_level": 0, - "origin": (0, 0, 0), - }, - texture_data, - { - "offset": 0, - "bytes_per_row": texture_data.strides[0], - }, - texture_size, -) - -# Create a sampler -sampler = device.create_sampler() - - -# %% The shaders - - -shader_source = """ -struct Locals { - transform: mat4x4, -}; -@group(0) @binding(0) -var r_locals: Locals; - -struct VertexInput { - @location(0) pos : vec4, - @location(1) texcoord: vec2, -}; -struct VertexOutput { - @location(0) texcoord: vec2, - @builtin(position) pos: vec4, -}; -struct FragmentOutput { - @location(0) color : vec4, -}; - - -@vertex -fn vs_main(in: VertexInput) -> VertexOutput { - let ndc: vec4 = r_locals.transform * in.pos; - var out: VertexOutput; - out.pos = vec4(ndc.x, ndc.y, 0.0, 1.0); - out.texcoord = in.texcoord; - return out; -} - -@group(0) @binding(1) -var r_tex: texture_2d; - -@group(0) @binding(2) -var r_sampler: sampler; - -@fragment -fn fs_main(in: VertexOutput) -> FragmentOutput { - let value = textureSample(r_tex, r_sampler, in.texcoord).r; - let physical_color = vec3(pow(value, 2.2)); // gamma correct - var out: FragmentOutput; - out.color = vec4(physical_color.rgb, 1.0); - return out; -} -""" - -shader = device.create_shader_module(code=shader_source) - - -# %% The bind groups - -# We always have two bind groups, so we can play distributing our -# resources over these two groups in different configurations. -bind_groups_entries = [[]] -bind_groups_layout_entries = [[]] - -bind_groups_entries[0].append( - { - "binding": 0, - "resource": { - "buffer": uniform_buffer, - "offset": 0, - "size": uniform_buffer.size, - }, - } -) -bind_groups_layout_entries[0].append( - { - "binding": 0, - "visibility": wgpu.ShaderStage.VERTEX | wgpu.ShaderStage.FRAGMENT, - "buffer": {"type": wgpu.BufferBindingType.uniform}, - } -) - -bind_groups_entries[0].append({"binding": 1, "resource": texture_view}) -bind_groups_layout_entries[0].append( - { - "binding": 1, - "visibility": wgpu.ShaderStage.FRAGMENT, - "texture": { - "sample_type": wgpu.TextureSampleType.float, - "view_dimension": wgpu.TextureViewDimension.d2, - }, - } -) - -bind_groups_entries[0].append({"binding": 2, "resource": sampler}) -bind_groups_layout_entries[0].append( - { - "binding": 2, - "visibility": wgpu.ShaderStage.FRAGMENT, - "sampler": {"type": wgpu.SamplerBindingType.filtering}, - } -) - - -# Create the wgou binding objects -bind_group_layouts = [] -bind_groups = [] - -for entries, layout_entries in zip(bind_groups_entries, bind_groups_layout_entries): - bind_group_layout = device.create_bind_group_layout(entries=layout_entries) - bind_group_layouts.append(bind_group_layout) - bind_groups.append( - device.create_bind_group(layout=bind_group_layout, entries=entries) - ) - -pipeline_layout = device.create_pipeline_layout(bind_group_layouts=bind_group_layouts) - - -# %% The render pipeline - -render_pipeline = device.create_render_pipeline( - layout=pipeline_layout, - vertex={ - "module": shader, - "entry_point": "vs_main", - "buffers": [ - { - "array_stride": 4 * 6, - "step_mode": wgpu.VertexStepMode.vertex, - "attributes": [ - { - "format": wgpu.VertexFormat.float32x4, - "offset": 0, - "shader_location": 0, - }, - { - "format": wgpu.VertexFormat.float32x2, - "offset": 4 * 4, - "shader_location": 1, - }, - ], - }, - ], - }, - primitive={ - "topology": wgpu.PrimitiveTopology.triangle_list, - "front_face": wgpu.FrontFace.ccw, - "cull_mode": wgpu.CullMode.back, - }, - depth_stencil=None, - multisample=None, - fragment={ - "module": shader, - "entry_point": "fs_main", - "targets": [ - { - "format": render_texture_format, - "blend": { - "alpha": ( - wgpu.BlendFactor.one, - wgpu.BlendFactor.zero, - wgpu.BlendOperation.add, - ), - "color": ( - wgpu.BlendFactor.one, - wgpu.BlendFactor.zero, - wgpu.BlendOperation.add, - ), - }, - } - ], - }, -) - - -# %% Setup the render function - - -def draw_frame(): - # Update uniform transform - a1 = -0.3 - a2 = time.time() - s = 0.6 - ortho = np.array( - [ - [s, 0, 0, 0], - [0, s, 0, 0], - [0, 0, s, 0], - [0, 0, 0, 1], - ], - ) - rot1 = np.array( - [ - [1, 0, 0, 0], - [0, np.cos(a1), -np.sin(a1), 0], - [0, np.sin(a1), +np.cos(a1), 0], - [0, 0, 0, 1], - ], - ) - rot2 = np.array( - [ - [np.cos(a2), 0, np.sin(a2), 0], - [0, 1, 0, 0], - [-np.sin(a2), 0, np.cos(a2), 0], - [0, 0, 0, 1], - ], - ) - uniform_data["transform"] = rot2 @ rot1 @ ortho - - # Upload the uniform struct - tmp_buffer = device.create_buffer_with_data( - data=uniform_data, usage=wgpu.BufferUsage.COPY_SRC - ) - - command_encoder = device.create_command_encoder() - command_encoder.copy_buffer_to_buffer( - tmp_buffer, 0, uniform_buffer, 0, uniform_data.nbytes - ) - - current_texture_view = present_context.get_current_texture().create_view() - render_pass = command_encoder.begin_render_pass( - color_attachments=[ - { - "view": current_texture_view, - "resolve_target": None, - "clear_value": (1, 1, 1, 1), - "load_op": wgpu.LoadOp.clear, - "store_op": wgpu.StoreOp.store, - } - ], - ) - - render_pass.set_pipeline(render_pipeline) - render_pass.set_index_buffer(index_buffer, wgpu.IndexFormat.uint32) - render_pass.set_vertex_buffer(0, vertex_buffer) - for bind_group_id, bind_group in enumerate(bind_groups): - render_pass.set_bind_group(bind_group_id, bind_group, [], 0, 99) - render_pass.draw_indexed(index_data.size, 1, 0, 0, 0) - render_pass.end() - - device.queue.submit([command_encoder.finish()]) - - canvas.request_draw() - - -canvas.request_draw(draw_frame) - -if __name__ == "__main__": - run() diff --git a/examples/events.py b/examples/events.py deleted file mode 100644 index 17ba923..0000000 --- a/examples/events.py +++ /dev/null @@ -1,21 +0,0 @@ -""" -A simple example to demonstrate events. -""" -from wgpu.gui.auto import WgpuCanvas, run, call_later - - -class MyCanvas(WgpuCanvas): - def handle_event(self, event): - if event["event_type"] != "pointer_move": - print(event) - - -if __name__ == "__main__": - canvas = MyCanvas(size=(640, 480), title="wgpu events") - - def send_message(message): - print(f"Message: {message}") - - call_later(2, send_message, "hello") - - run() diff --git a/examples/screenshots/cube.png b/examples/screenshots/cube.png deleted file mode 100644 index 9003daefcd8541e50f5024bc1cb4260be703adab..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4265 zcmcgveNJHG5( zRyQ}8Y0)z>8B>_RWO(K(W0Ha>GNN#%F^Q>6yxH^+um9((hDzbWiU}fH)qDB!<kMsv2bY)WI&*H_m}+7g0LPkq+Y)6;rx@thDFtbe(YvXhicmfgj2q|@!) zj-_U1$d5B19aHV^n}X6LWnmWLX4olhNpZqJEVAdcd^@u|!buibWtC079gS!03CPX! zT7<*xZGZUv`ILy@hWo9SCHf4r@eN*Y)y^oulFiJb|CAiW0U{Vk2Qw+oz|8UpImAO>;*G2ryz3o^1DBhr%vt7!?M!FgN#JEPwP zGT3tPB$He2PCeg^f-rw%32JCHv);bF}$>Iflkf5oxy z?8M(L`4h$-M6GUlU-Y0zTD(%A2zk{gXcaQcFJ1wi&nofdC!O_Z(O{0`(DeH-Vw&7 z2IJDnpY?taZ}5!T{rKv`0%KftDB>9$ zBJFw7u(0~!1VZf-^ZoqpT0YVE_|+>tsd4kuiyx95<(J-zmXwx7P22yq(`H|7c;KJ0 zi8$uYs%yo2Uy>iY11Ix&DvPFc6<+x~!f_$P{%o$jlR+q6$hc`F)rq`P|)XNb+99xn#Z{x^pzAMJb zM0v#u{=s}HI1Pe5Y@-S$3IcZ(D@%(N*vH4h95Ys#3BL6Z8R4SrWZZiZ+O+>FkA#Do zqq1~jy@DV07}rdOl4-1aoFq^{KZFsPNdLI^Wm!TKaR4WQ3DL1m#>Au~m@|c?r^{$0 z0hu0{Nc2zErkOY<1{@?MV~u3DBTv{B7b|EzA!Se%xNVV*hmR?Y300XAVkITt#HBq) z&%jF=YYhKj_p>2g>VDNt*tD!kdQQSrav)ad4ipv?N@;{g1005vNOq(2eFWV({l4kP z7s<$cTF8wyTPamlBserMmwXW%lA^-_Uw}hnqjWg9Y6Tnvp*yOdTl_kJ|9C`dobX_g zeb$FbA;p6C3CnsmN|7?CO;I8!Hd`1bOkISt$!IP~d}FSkaKH#HzHc>x1e{AB@ ze+m^axm8u6PkB&>41@|;UjC_8!yWR3M&bLfG-TAEI>cSouWd^ z1xp}Tmx1^DTVH)N999o4AX-knze(U~Vk_Dcz`b?HwQb8nZS-oUsV%KZKnghv+$l^r za(7YDYqFDmCXSIWw$(<>3{+T#|Ne%aK2iTMuSxjsvq)Kern1UUWfzlgx7joKh7{no zyd-$nM=`AX;4=nyFp6=x{ARpDQ8acBNCU#@)si);1o0|w&hT}(%-A$STpgEIBy;vS3Txu(sxt4ka3^!8kaM-k#7d|;<7zTcT%AKP z;0MhGm6499;KitM;8#oO;yF^hC|`_mG$w&CD_e*#$;De1{HM5 z*or+|BC3cwoemfW==7h{S3cVOF*wydj@bDw3Mp`_Y#|b4^uFJHi~g%9FOb&Sit?tk zfJ5{oF4>BAI<7TCzx{rN)ok<{j52IV=>FDE~W9zzHpXauPknjYUL z6_txE95V5;!2usZ<)m74+MzQ)UAk(vFBxfngRfb;c6me{+noXi_g zU9N4ne$m<4sIaeI>gjuoixmwS_E|w`dIz%%?rzSZyIaTDKg=ClS!!u{ySaHrI^S`7 zscFr+mDQmSy#m;H!BPK;v3nXCg3@$R8-Jarpo7u{@uj9I=j-hkbm;F`+?~?#pUbh) zg;S*7zvtRd)W4G+K`qdr5ee~Wf~Tx0`WR2~dB5Ns67|CD_ol()I6x>*T@S7X! lQ8VHHvP!-AXTMhXbzpD4I>CK({Fe-W(=xJbZ&~fm{{mw?X)*u+ diff --git a/examples/screenshots/triangle_auto.png b/examples/screenshots/triangle_auto.png deleted file mode 100644 index 5c9bb7ee723e992879622eced3d6b43da9c9494a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 22181 zcmb@tWmKC{(>59?PO+jz3dJc9C~k!or$I^ucPWKHaMw~SXo2Edq=vgoaS8-?flThreEFDA>%{{tU14D^ zVkN>SZeu-DQ%M{7!jbp8#S-2orL~6f@qH6kCof2E;QaC_O}V=@;@j76FBl^R{I0Dl z+GetmfskVp@X8E2pzrecaRb80ePn0>96Wmwn0s8vK7of>2H@zvoe3`X7d$rXl?{M{ z{eeS-y%IggUY`U0^LJcI>@@`NpYQ&EeC7Y-y#W9B$Nz7x>wme|e@*%SZR7vL zz5W*@|6lQ1t#_dS;+XK4LB*u+5Q1O!F}Sm&wzRMGmx||MZG4$s5 zBFBBqO({cMV(?b8-h(>Wt(t>Hr~+hAP1<2!{{$&GH%b;@or*&wc`-7#TO0 zhAFTBF<6xOnQKrkw72ZjhufQVyAC%6tZ|s|iRT9_9(%L=27MQDK&O++isP+W85)Fc zt$U%t6Su<#5Ony3G6N<&aVag@h?ZKv#snKS`%Z6DxbN!E2oejK*DUrc=A#epQHK9# zX}l+Nhh%*d$4x^#BZgPB{l)wBTRB~DG?zJ~p1H?1us|YF{~3`kCipQSW{d|xBf8>Z zvK0IzVU*SpyfodrSrXYFe4FM=9-ByoTjh-=bg*_|5X3;--=uck7EbyhhW{zM*^QIy z_sV&i3GwVHyc~Tc-f%8N%w15g64ifJ5hmMb3<>h)Z!axgS>B3C3+mmaISGs|gGmg2uBWOtMki6V5N9`V2qi`lor5 zAb3$LAOWb7Xz} zN1&8=2qBMSNrWYW`m59j^@^q`sDPZX&Okx>;*Vwf2lkYAi5ps4|(DIF*#A z!gwC@X(?Ri29z`8k%cs;Q6INE^YYEpTyX>9_Eg~zBW6Nj7s?^fP#qf>L=$|Q{69wM z;>-n=RQ}8ICQ190T|D#F3V8dnbyH@$eD`_Is=;zT^8>}4Tstj;ptCps15!xC%FVYH zIVQa|7I>w43dEY|k(RK1Cn{}Q-%J z;^}Xbyqi8;IFkuac(}8TRwcH=Gjkk+V>1C#2GCtH-5dLzx$+ID&$_x=gFhZk6fD_clW~{V>tps zc3g?7(*~$?Il(8K;VLosZ(MgTUPWs6rCH4i3i@}692=Ph3s~1dIYRyUO!Ce(G30{% z-Zy~b9hd7cL1;D?rN{f6MfC8!c5z-H(Ct@4W*@>Di_h#dYZE-yHi!?b;Eq9T@M zNd3^-l;+P1&_Eti;J<4wHvphtF4P&5xz+cmr`qbftw#^lm#9?^e#99Sky&?wyFvEe z6U>5(&a0LvYu*#jKWj&MqGBtsL4ZrS`q~$9&fo4L`vb*sq+KSaQkpU3?~5~f5A?&W zw0NsKzW9r?ffuQeJ@NdZL9Ti7_W)YVg3I&#U5HmbJ|NgFuM>9A#xU(4rUA(D!3~?! zNVCizu#U%vJzWP)h=jJwtJ;;c=ntZ1lXDGCD_c+J6Ed7<;lZW;?KQWGE?VszM-6!$ z@4d+j3X(~mwGq_JGRn>5L@hmv@lN=Dy}+{JWj<;BB(LL#I`|gMaD8AN?54%7aFeOC zn68LQ?jXmcq*Mr?#3$`M?@+E&#rj3vcKNznxOVmw6aTu*Z)<4}wfKaG21eTZhn$85 zWQ?{&0ov}~^|b;4W7b@dDB@IfF*IPp9Yg7uT;+Am8xcYjE|u4!te<-fiWAC#UT-v5 z*`q%Db-O@ie+I_(oa&~2R?9SLuJVTRdc34Af(G13a**Jxx)|U_P~R9$)~{DQ9P(N$ zHb{h^W{h&{QU&ZlgEa`J3wX8dJOBCff)cOhFKy=Q9;w-Bp!|~h>sd}Ct1>}^-_YK& z+tH1|Dc1rzD7~LEtujoYPb8>ebPx<&k&LkxZ>Z~)SKc&#W@$acEsypkQUY}1gvr8M zAVGuKE|!EqS@(chiQs{`>O)#NqSLx1wHD-AV8GJ|$ff#u$NbdUUrn-;?5+_aK@ zu(w)CKD0I$)*?B?P(60R}#^?5Nxk|lXw_^E5J2q6&MsGh0! ztvp%#k=&FCyZ>)n*$=r{O~I|ZsYbJ}qqVq=)f(OB?-Py%jPzO;ZXj9I`?r!4Ba)ys zzeeUHf?4J7s+}sU6 z*`Em|Opc1sJ-VS}&91)86og?WWW6cso_Kt(@1JG)N4KUJ`0aIQpXv zA10p6Xe*>8EUj}R%Q+k)Q~ssg@6^{Xz^pc}wxYQ|??yQ0w%F>|jjHmw=q&B$mkN1* zxV_mPJ}#NOF}qq-H)}rSa|9c7VeTz)T{`NHF3mXV{uOs0hG=5Y4oAD%8i%9O-Q24 zH6^7t4PuXM{O_#GxBek0?t(D1LC)5xbr(@MLMB?T7_{Nnt^$9T%;vK|L}?~vp(k9o zACOX?Am#B1XSCTTV0AcUpUQjIWxe1Xb7yazgy|^!M1Z+L!qt*qJpNj+Y}qh7+YTXB z|6bPZeVVI_z#8(~?1+nd%Y{Sb-OTaEwStc|$q+@r!!Ak;p}Httas^VLM6>R`^fd}( z;a?T6G`de^t!j@*-pcgA| z(2o9|3>yz3pI>WdtB$b3s`r0wCu~h@=`y!IXDG%Q3v_=;4}EQ5H2rd{)<%o_Hjww2 zr%B=`uLn%jFYqCa$##_hY@EhnAqIRw+L;-sK2uulGYSbt|GAh?_l!EeeS+a#3~>$& zvm^xy=JC-HWXn9HiUFBh49z>M7s^Rj9>y8`cC0_5B$YiqQ*} z_TKb#T(eyvY{*cF9(hs{wxehwp*FUge(wXx)YM?jr-CwnTN0U#_OQ@$T2`JH?$P75 z$jOUim9^X8NB;qn!2>5vvC{#qp2e%Wx0`C^y8BN#r^~SLQ&t_lxXEUnXWgxAyk4qd zBc<5P#ILrVLuqel2#MXTwwb#Hu^}#@aPu2*_T4=zIF|s20Xb zFZCs5--&2HuWh9-cA;qnj1e*zmHa7hnsDHDQAZY3i|mlA`hjfWn!HjR%#_9T{wY3L z{6sRC)2cMMeU1I0XVBMs=^g@E#jm_~gw_^7#lM0A&6))SHOO)Z zfaX&pB`E|HcDvqg&jer0%7)Y{@j>(72p$+a9IXqXcja@p+dCq7@i3dVsDY9*oKu8E zg&Rwe(KGYcLJe~jZU6RqY6S4dt2T_|8v}zNW^cj-Us{jz27W8Z(2|-_wiL=D01cF- zm`b)hER?q|^6yYlY}jlS46m=Ehzm2Kw0%R>@s4VC2HIw84V@3;V6X5^)1to$%v({$ z`gBdZ+P#iW{8fsdd`B*%_KlQF=7K9r^VrkZ3eTnXY~oF;P2R>!C;E<^2Pvp>bQR2V zyRj6rSH^N}!nT;<-}eE2bpdU>^CkKrsa+WK$>=bG#nuyOcOCxT~`_5`1jo z{kUUtpE3fgM(u-CS$QeuZX(}lF}gWc?Z||IH%wJGtou@HWG%;Ho9i5ZsdEkZUC31N zPK|G*SOA@?!%BZYa1*O4a{rOJcB*w`e6SfO9rD$}jl&Gr+lZjYnQf;aerJj7pYqp@ zeC|->`bzMIS@J^H2+IC;@D9q0hhc!hrV!AH=PP97_@EJ840X)D$hx7}FDGVr`^ztk z?4|E=TE-*D_xh>&{&t_@OLtTdjy%t>dc&?}!OhnxKM0S$fh`NFpH>PgPP?`ltP zG}MJ3+VvYDX6ypYIbLbETXAu#kYp#p!H^r+H$`6kQ6mcKMy+y?`jDj|qf({_Wi5r~ z+EJ9~`-~zlZV(XgO@@$WOLi0X}S{jXP`rTL#b zulu3@GK=&Y9ddaET4F@jf#hOkAAoch7r|~zPfo8I%%ZvuH;z#6!yaQrAe77?FVWa- zh~3g_z|kzOcj)uIbjaw2HTy{tbYehdKSy=6DQl{rm^@$7u1REHKZ$(HzyQ2Dmco1c>q0L8>1{SPPmP1c?F{;=l7*=KmVMA%K*2CiQhCk5yT-gvT|5E_%(S92NM zN|DEMfGY?E))gCJb~-^SPB;fCJhg*qzYnb?sxwUZd)P3Tmxt#~ zb+8@R_!b+FHr!dWcogcjcu^Q;j$|s3JoNPq(A*At7az!`FI3tTW6nb}lQd$uEYywQ zKpb${OylUL#8&N(dRzEKH3T2~H9vh?CB^FYsCXg8WPdv@r!BGAyxTCv$fw?a$>~Ji zvECp%KrQRwIF`Z!!xknDq>b8BBl{@To`OTlN5Wb9fgg1#eCliNAI{d>HI-Axy%>RK zqPk1YCuiUHk>_(MKU>?7@|cEluAy0G-PTRQhcef|TxVY6Kcv-6G@g{}y(ftl^oT69 z&%S{8ILi34{z@dbH~^t0NihiTBl^#7H-&2ThtZTf>d#gdmf9U9oSRVAoU8MTM=iSc zVp=XoO7K2>MEP6l4AYXL^QSTu1~f7SN=`30%8KEWH5d7k85~ANaeIeiF6%E%o8zOj zma2Vxy_6f{dhgki@w}$%go)3fSEm1DItFaxVhY@LdPt1IeGZ7RmH;EwldbLeuBUQY2o06^y`W zgDGn=V#&g$0`IK7NmYT_KAHlNcFujtRG*`7k8uf9N8V*Zo^&)MxKnmzbaa2!c{%STzX=*Ar=dK+0f-J@!0}OC#M-QN8%2o zKYAu$4r}eVmJrKlc|h(c&G1Ea$jS$~SUp+SUayGS<#0JKN2I&Cr~`~X&PzVayk=+g zY5fIF4uPBc$3}ckh9T!wKJq#GhfQCZhBeB7(ZG|4d;KPrWa3@BDdJ- zDb5IuZ=Coe0)dm2!ffl??=MtX2fWB8`kle+7#v1nhz0s>&|B6g#~rAPMT%P~cWv_M zU_Oc9Fkmu|a*XK!>^!4TvemJb(lmvSS^VN;g{;5FBxgj^y)HZHxi+Iyd}7kx_{K+$XWU;au5V-aU*NYE4;)>pK;+`-8VM$n zvtW=6G5ar4M#%oX`PNV5c};Mpn0GEbJh)0Fp_8<6g+omob^fq0nudB<#+Nl11yGYh zw;zYUvHZ-aClO*+q1$D={)kCj-|I2Qn#3z)+Tr4Zm?Y19{dNH#c5}q+Bdo$D^=S$U zx#=CDXwen$JJ?Pr#8U>X8_nD+8mcg*5~g*XCOLW|9PD!&)I%H2$lO3s@JdP7z+)$q z!)(g6xeKX7gOHopEI%`Gu2K(QM)7u+^z;Q`&$IJQ)t2v(m z^Q9mrY`g{A35h1{ujOKEdL+c$XDMHgl%FwDyg#=xZnb}86`$!|kypnmC?O;8-RZ4T z4!?NP$2q2)N9^wiUD*FU`8d6s-ncrX#9K~HI2M%6u~a)|kVK;4ygNH;^p8=ymi;vC zLS5ybmUDk3AGXv`3r$82c1zsBXoMgm@LwkFrN&;EDyKqU%~$kbcK8R9=|Q%Y4N4H) zMgq(EVLm3rQ6QZ2Z%r!REH0D3Pp*I9fxj6?=wzcVNdSzwzDQc_lx=@+qQQpT!g!v~ zBuM*IN-(o}=?nf~k87p0neB_BsKq>PCQU-p1XWb8k6mC>3u zziRY7gQD66zh<|;Pd02TZa*;#2*GB3eM`s!K`KP_M6A|>sfEm)@Tooe6(5m^P;^h; zlj`^LK0udNTT4(Q3?Ck(5csUfhP`)C^1jMfp5Y0d|WU^2RUkwxigFR`6$qs29%JM8Zi-#PAwuW&U^7rEiDMf1SeKD96sv-(^_$HmKVZGttU4j@+R^{f#$l6Ec^{R1evmW9*@)1T7Bcc zc$$6sIP@v*alH!*l|A~wg+W=q=MpqsP$~r7#LcnAnK1&6EgdSfRb>LL7oR7wfjM%z zKFaReBDF5ys@Z)9xo7>jSiMS1{mQ5f)$M4;3xqpthOlrv_fv?JaLZmlve93}W=R@{~5Q??pzaqmyVFGX4UhfiH+z^HzNTJZ-^Uk^UJ)bhpi z)tY@(`cps}!dg7oZ`6IG!n)AjfN4w6>8m39qvzY|cmB3~*uOr4 z54!<@|B00u$J4I~M%95LMo|~LZ=;6sXEVUWapRoIm)%Ivf z?YbA$;{i43-JZ~O&n_)*+?{;C{EQXEd@UhW0Fu<_v)kuoL+n%$_KLCuFW0`ZrBr`IyG9_%sI{2sy} zUb-&z1;2_nCD<8*BTRhUi2n3+;U0!4w9}8L)dn~gE=s8((~K_~FoNOv0#$()DZ7M} z1Q`7@zR_VqFtVhaQtPWQJVq?V#Kd9S=N+T&FZQ>weQ6(ObDOodx!ViL*!_6g27 zwD8@M_iiI0o?URjJ(gz3Cnyg|o6+xkr`NF?z!Co3CG!$9_k%j*d~1+oMIhOMwRd)7 zs+jJFD!K~j{%O;7>W^(_lLIIXqqdPyJ`Tnh@2KM$inNYaU#MwkaIH^z-Q$rMg-bzH zp<{HuMYOW;x|Z^vNx3KD@kd}yhuJ7p+-7Vhzj>K*xj&jN7+}60w%JZlS zMTB1pmi2-^3ae*VP4)6?GFPEp%ColD8oz%{W*}nVS8}f0uH+{9#He=67_Cd{05gPT zY6LV0P>2EJp~<}KwiZw+4?uD4u{e4DtxT?g2X0<4Qa*qHOA)}+2Z3MY{E+J|#2&m2 zqhu^zVSk<@BgI{<1OL5Tt`%8kcpc?rpP??A$`=X7Zv#6?X%b@EX6omfay$iG&{5yI6oMtolFX?4|v}}muy+Y<$%erbe zeR`W3RRcDrggC_XR!Mi&&5D6p&9A?s4(?&{$#lE-it3aqi+*~;oKAcCit)E%$Ul#j z`Rqls!;t`<#~h#I0vtF!^y>U!9(rs9kG=!>V~ z_0Q&*mkD1%s%Ys*=p^bFb);E?F9$V#3Y#Keev`KI%y=m0a0RAMcBEQiu(@6t%LxTLL;;J(O z^n}-`R{`c2dAC#1#>DD_Vbzc(QUhv8CZT%5Fv986314D~`0EzlKA)<0f*h7ppXx%{?-h*Y{drCIhEWpo#SWQSzQ!e<8aDgPe$0i@N<>nKCgrus)$O!G zHz5lqbS+>V;|WRqwe(jt!M1F(6}G^1E}Lv4xwD#mxkLxSHCt4~n+u2C)0Fba*yh-H z^%5eT$wXPzvV5NOSa(*c@WmKglERngU)@^`i9CJcXKu?RQX8%0a({!DgC8dNP(AFh z6S&31E&$i)fLwg^)?|AYf$?LT;I1y$RCCy5HFpv;JC)GCVW4 zLZ5%fyrG)c#H>(LGlN|wJ+Gu}r-#<0JDSa%JN3u-Wm2=pxQW%n!z&h=(+X5gKr7bG zj%GiC-^n+(u~Fj6NGX;}T{kf2vEQDq4!GMb<}j9jJP>2ZIW z1Qsv-gH|lQ#P4Qa`SaE5YWXpGzhuQDaQ1{cWLY6tXT<7d znkCAH-KzE_-C zNoEaIRR@g2R5JpcMnzw67~*N0-M zAK1G>2`KiLOsfns_~a=4tWBup1-qg9sAPZDR;5IUEo0I5ny%;@ZArbdvoF8}oU?p13g zkyk$a7n~HW@`eVzl!s8dkEtk?@$j8moR75898SGPtqTV$RS2L-(yLe8!JS!cVn*_k z`?JB+H+4m8`XAOoxk#Y{>^XN1mN z2TJ`#*|By;(qmKb*~+(I(?=b?%lY>hVr6Q+aev@Xup-TAhWTn%FA$`KBY(1~xxgbZ zs`=0_m*nSHqZc|i=~mA9P3*gu_3d(B`g)+c5^P8z*3_uv)SgO#4k;SEf~d^`Z$9NR zU5HcboAhFHtT%N3s`bp{Us~4$7wv_>wp#piIE4E{Nq5)jI)P$Si4+5r zi`2?5R({q7)Xcqa?sGN4BZvrb2=sDbZzHoxKvIO3GwM0t^uw3(I=%wR3*hw6Vxc@0 zwEtLm`D5o=g=Wy4nZ(RGc!q~S8F(w#$28T}IRsF}Qii%9$AX12hW_qO^p2j@0PA#) zf=?$x^ImZrM%D8jGpwtQ=gu0d0&1Rh65FLIY2ldkc<`dTE_CGQ8{^66>?+>wo31k+ z48);X4-er=Jb}h3~vo#y0F%c>`jCvgIt16?-Q_Ns{)B z86hPOXSk9)rt#&1wxFHTtt!d=v~W_KUa$i`JpxQTb$VJg#T}~d${xi<@?F83Nz#bz z%aoKnAD(>6&>!uz+PUM=s`3>Jx@t-8qj$$6gtXYFJQ%ANLHB+L<<&knmxeZPOqhofi}8D#r*8WD}6_ z&Ft1)Et#K{7hYCy6cpUyoDgDK@{OFiB|4uIoL3G=vVp?Vdm>uOqmKUD1-3&uzxS_( zM*3>0-YBb(7ft%g)CxE$BMb`0_6nZm>s5BppS{AMD9WRJt6szn}-M$r~Oi;45S$U=@Uj`kqv{kPG_CfjQQ{g zgV^SDmXlu!C+z{kST+ZyIZx<()^S|foE_G>V|zaojG^2@UB{pX$Rk5);&CdI=tum_UJnm?q!Ua{o2Y5e9wr*ZvyEYJLG-+=FX&f7v&J>DF@FF$7 zj#9Cl*|8$ZJuNl&S2nW`&S}&(c^ZeC*S7VJ$GAdzTuhY*SxQ%-!8g+jZ@AwKoph=Q z2$qkgEPRd?w&HLSeYFCCQ@;PgP6v-K=9x8nlrMG|k#e*=YUur?1LsWW98*=ew~D|X|D!*Vm?BQO z>Nj^g?!m-z>e#yUlL;Dpk|wLhsHCi?Tw^oTH7}N+U5km@6h0PyFg_RM=*;VS|K*r# z76P$`kCdA)8R~gvieJU(lR&+cO+GK$!6h#|jmog}ggthwJ0-i~^gZ2X>PvGj& zc#o`MwtIwIAg1lM+C_sVllleo_yTls07vs%+;I7s;+Xg~!Wz^AtwDG^uK7obVY|kr zbdL4)f%xeW)(n1H!8Y=(GmQxbE3p^%g-GdBsTZ%FZ%3=kaAvunjgUy~1PvQUc6I4R z=V0@m2fE!KM+e<|vN)Dz%VA+ZIL*lgexs@>)@ul`h`3)G-P)j-vMS=zp63|VZN-}1 zH3f-2pL;7ptEF)>^!3e)8(T{ub7$o*M=YhFpR}dX<-D_10h+Kg)(8H50rrCh`aL-k zuY+Nyo~NJf8uVPB&`lrH&{2&CU!+~;<6xVXa&NOlKXSn8+)cj}iUl)tfqB8NF!d}> zVN=bRu3mZV%IF2i!0S3)>euwv25KYJ;@Bp*2Fxhd*!W`j<0_3(7}VzDC@c!onl+2F zX(P?}Z=95^g8w-Wypb?Z1|Fu?AlcW%D_1<4d`}hh`oEDQ@b&s@d_gnejmct)-1PNa zmb)hV=(_usZ=}D?Z)_J{Re#>ON^e9ntT(2GcRy@}W!l;zSHvOcG1ePOd5ix{_AoeY z{bU2JO+$IkSc(KjRK??~6Eh>9fW z*4AbCKf#;NN`L!?xH=6s3T@^Il1BE^B~-QTqO(J6ie|k=}0B7rBr2(~{Ny4Via8c40kdJ`bW&*DUM}@t7(o-=_3Y z?o-_St#;uLdyqB4^+HK&hKz+M*FaMvFyi?0oM@_Pq#f>}DWX`{^V29aEd{yd0usm^ z5m>pf3`hrjL2nOiCf^5?$LTnm`=a~(t`lmVw9K<^65&phC~Mo3xxsq(hQaxY0W0Ic z4g`4u!=lGsKYZ@^$W_F*v?^ht>g+l_XTypWz8`*FS&`^FoOHm^5O?d(l4+V*O1>jGev^^aAug60HY^@oM7pi3iiL zoBVn6Lbc}H#9Jil319zEtN>}klwu#k$WycbJ+ARlmME3-VX<*y*U2GlHlu7G##A2! z(fmkt-0Vjthc2Q7+h=1U3Nw~sJY-jISK;1Tw9U)1E|7VY$Qd7pr~PJIThF9sIPi~& zvK?#tF_W29Z~a=WnT+Y$y8>ec;v(1$G%^L zyL!jP%60Iv_xa^LL`7WP-O|bn0QB7YFzp6kIgkRF9|ChChojxmgq2RR5f?LKsOpn| z7R|htMegfhp`(i~w|S>Uv}b)o9ZJH~F}b(?ppm|RhNS^mKWLRz6|8mfxPLy<^KN(} z<*$6ptP@5X?gPuu!h7wOlaJb?-4luf7`s_ z;)Q8#I-J(UMwUeVpF|nANck=0ByYwN@#mUPJ1rg;>gi<27EJST-IgLUm>f*acRAcj zpn4h}Z>|>xam9m2%XPda zx+Im)q`=3fTgw{m0wnuQ7H!kp;`sUY)N(QQ!)PR~=alb|1HP#~WWDm?^^Z8{B%hIz zH)4mSw8Zsf9>ec*Du?1<)g=4OmpxIv_&KW~0CNS6Z7to9Pbjq{@?TfNxO@cTnq9e? zzF9_A7bVm*f40pBH~PoVjgFtXD;EI4@90Xf(S5{x4`MPX$ghdNoBdUV-VNLR0#$B0 zd;=E1@Dz`hq3|4;SLS@%@^t-c^5wy0!;foNzf+GZ)OS>p8XhclA_?N&ANZq}qriMT zumTxKfhYOOiIhA6Af8AZbC44Dy-oN?qsF>$zFw^inQTLT^x~R# z>KCouk=d-~svD2H3>9atgh)r<>rCR-)^od+^QQp-tmBtV>0OSZ&XV~88iq4kEv3a| zgvATXmL%MBvmut>VLHD&gS6tnsRtZ?WIZaJzAgi^%Gae`(}bxFFwlom>M$ZP0rG|1QLN)PybiN2^H| z#0;x5fllB$z)B^`dVkq)s7s^Km8Lr2G5VVdnuBAzppoe=`+&>T|1QRMO<1v)bl@m0 z75Sdb5VQY^O4uJA^E$J9e-QJ7^F#TiTI(ZXSQ!uXdBv8Ww)gcJdeEn^jU#b4g!f2^-^<|XDvra!opb0#3ZmmzFvzqZb-E3no(CuDZq+-G0 zY~ASB6s3Z9aTh9d542WAF=-8mT?ldFjrBcR8t#wI0M1Nl8Asg60P`5BIE@}Dq_u#p z8sD4#brW0GHafDPTWWc2YQ`1871Jf%b=gq$c_$o%h==Y61Nqm@fH3F!!z}s46{nDr zOKSEhOgI{~yCB1_70?=LM=oTUrNorL8Iowc7k&H9;Yis0M12yuQ8B8#oQcgd4Cu+s+a&X#u*Y#$}qchR@hRS2&M;4iQP1i-i$Dp z4{(V##CeeFJYs4NSiB=z((=C(zkqqF=X`b&SFG6GkJ8kEgK#gAI#(qNWMSFYjDMs+an~}NgIl(ql+EA_N2%|v4NTWrF^P1=UQ!` z+uGY`@rcNd`u(@XNAUIZuGrSY+s3e`m=o)HknERYZ=Q_>c{oCsd)7vOWo}yVXkX=w z3GEz|h*Qqc9D)Op&Z`&|ZqZAFXJ0!#aNL(Qgp*H~`@ics6qyk>?GxM;N&N_XPkiC* zwaeIF(I;QJU#OJRl;k~sRIjm6$3FyFI&yvBb)MQBQ|9hj4n12o@-n&CIsFN<*ax-E zLpF`}<2|2G>x&m@nq2}f>ww~PsZhYT)JN7G|i); z4`Ub3!cH~>(jaNHA3Pr$BN$n}pXIjBo!8|JR|s{AKPO}Bg#H~q|F<5y_}iZPbyl0~ z677>D?NlOJ^e)^_gqsWzK=Ue)e*EgP#!;C4*GX?jZkP257E5jy+Zh)r9aM=zin#%n z%9vw>=H*7>HVsI~^#h<^(w;@)hcMR*F!x0dyaVn&qRe7H3)wXAp*=#bH?ua|n8sZ} z%WvoPgBUx00A}xlIsrTvb>*nX-Frgj=Zo1VHiGs9>S^y0IAcgBEj&eAM7E`!RIpTv$Obb(nMkc|h2RaCP znpFa<7>r&y0HS3Q8a<1h0y|w1wZ(CdN&8>=&w}f^)A5D$aZ?gxxPc5gGkqIIrTr}Y zoPOv=n=lKHwlaS&SKui6evmW{gb44&y=*p}X`yHIZ--u)iT*GMb%G%|iK%8(qC6Nh zbMQ5V5QKbX->+G`P@I{+D+n7{r4EnaCr;>M$GB z=?LPeH-d?*rkY!)PsSnA0iWxqU~-Y(tDfTt(OdKA2)NlSAL#F5ckcF1^~pC6IEJH_n^(jy`IZEp{w7 zTDS$%M>cXEdQCgy=>NC z)z)4|OJ5uPo^O(wWpe|Z4aujO%Z4!B$7m_ zx%b^s{dGq}^P)i?VmF#uc;9=gh=OnUu|&L9=AZdAxfFA$5GHbr*3J1!i#+yd8UEl@ z;yvXxTGJeEnx>;-lqCy-ARHU*3f49LFxwmK(gnyBZ&Dc;c^1BN^+m#FY@ zQ;0KC12m$ritKSNT)t)%V3hOYBNO*)kEGJl&Yrpl#K^6;wX z5pz1Yq-Z)Qp~iMGz|bIVsH=D!KCUYwxOUTza zA^C{$0(1dzi9yp5VFB7GkqCAdE^TTT$R-fP%xP&b%P<2~WJW9E1&7prZ4Grzg1vUN zSdt=TF0?J>mtk%4y3(EWx6gvA01zsy&rh=QWH@@*d44sHxHWZc$3j2R^wuzQ<)cIj zj3X2?4*Q75yQ0L!VS!J#M`a!vQav#B9~s=}*x9{#jk6>|6iU$eNlvB1;hM0WLN*uw zwj4f#FAZXTB!Hycb=awbq$Dx+bD0j*aIQ^HQrZxAXCeA|j_MMv#%cp^Iq!Q(n182v zAc*Wd6xKB2B!%W~Q{i?V;$Y-%c|nVBZSDn|M_MO-$*4*(3C^c(a4Dnl!P zI#1Ikk;6Y^reBNry=(P=d#TfdQlf**^??ZR@<?o)Dc;t+?v~0Y;o`Jj{yN%-R-yn>O`26%V&3LY5t0s=6{pP8aj9>+B7j zUX%j&#I*~jGnnLIzYa;u?RSRJZ9?m__^WvN1phg&H?QT97WZA(yx^Ug(io%SU-vG0 zB<$WOoR#BOP(0;Gh7b0|~m7G8*XvhG@fJ zb`3%oi8~Y${kQ|$T>?nGLvL+EyVvbMpr)u9epmn1EQ(@g20D?H2^eupRmh1Tq)sG| zvp=C5Vl!N%Z4h?5yV112F*q((m3a37Nat|+mpou12K&EJjJza;OH13)<%Y%Pbh&La zjw{HqeaK2RM03e8SjI5vOfe(Jc5PvQI4%6vy-g~eE<)irZNXDS;UPf#w8jimn=F1y zI?F#ck@#o&?QE#4=JT3WvZ9m5MKvSn)Vgu_8~L3f*eU=sZ#~U@2{DdUG*7{s6Hu`Bo?)ZuI&06UjHx9?@6q)5PK1O#fod%9o1Vgh~ECJJ#T;O)hv(DUvjF0wvg zj=xLLwfpDOeUmWPtcppNgvQ6?c=GqMB`{8~C=~oLyEqdSr`&pf>U0Dyn4>u{U~NZE z{JDMjsVi7@`dxR^5MnQT^gx=vA1Q4PI|g*}1^19*3eD-8qsnMJ7 zt-Cl!*iGu^04+!BoSMW%iS<)jFzVl21^7U=3`v^ZB;Ie4lgE51lTzh$G+CxnN2lXkxd;C+Lg;_+{lQOY825)GP_$h#d2T zSNW#DhU}b@$C_Rz&dlf>4wzI)gX^3$?{-3A)@_u+fz5o6A-d|^)=ee)z z`rW_ly081XuaGbV2T6L&1tP~X`8BN)$n!%`Xuv26RaBS1o*nZfPJZB_Y@G#VRDO`N zQd0MB$Wp8-qcM2n1BI6=2OZ9UMB*!6k+q$1gIJ{VXjk`k9y2vNv+G>I%R^s~7(yX- zTtmi%t8|UDc+l&Ox*8|#^^U_Jg%mJokP@AAo?zgMZgycymrhVky+0tSthCWsW1Nm* z_I)SO(me0&(&gyU(C1SpyHO0ytwSmr$AId19GM=myyJP|43$OIw2e2ycRX_`2~cxc zzj0$X76>N7W>$pzi8#$;8{-si>Nacw~Z;NW0TYb%&9+T~A4HMsI>c#~x8Y(1d2`7WcUe6JSj%yP(up7bVbf;H}et zyWI50<%Z+_QYvfI>jCi^%|pV_J!M&`K}`Rhd0d;V8B$65vqalTudh6D06EjE-=hZB z@9Rofva9h6e!HJaS~9OK1TP93U3tqZXB$#I=$9-L92<*|WLD|hCYT$6M3_`G=66=2 zT{5aPwHmQKxw9c-(=<$;rSufPkemO-fD~=fgmHW^HXr&t!davMadmMPs&9qzd#GWX z2$WfxfSUAtN~b*KtkSGgUD9&74+ppw#AJwCSW6g%S?g2_*~Cq4@39j?u%OhGv~&=(|1PTvq$jpHS#CtaThz1Zr1<@0Au@DVX#IU>6iTGG4XknmhZC>F5Wpp~~tL zOOjmcGg5d~6MTji+3cB=Y z+W1`1rCg$+F1mTsSgvoo4|G?c`EIli{*35H^!Z7vryaJ_9^u|1vC^>el3J8A+B&#o*P>J(Vlb%= zQ6o47DY4%ZbqCIxMmvE!8cFM1CcE6-LA z_J(;VXevSc@@}fUzs9ZA$ld$SotL{%F3Iz(y;i$h^bL+|h#Z?Nx(vA{PTM-0^#+P~ z0(W1!SbyWLbu5=VDI?E8$X|bl*jDa5{IQbsh1-;8t_6x=ozK$U{S74za!%xhYPTs# zp80vY_{Qvkr!}XRx$a_b=27f!B<$11|}!v9Uh7Z2dsX1`nF$120` zO@}V^pe7TmZ<5MBocO!Hub46_w13RH{3bmbF=GizV;o?ggLPi#@O}%sRaRDG*hfo3 zSLKAZpHgmapyg5J5;8_+Q+<%hAa8_N0i@_uPaaQ9SXni7IrJch6tQN!C5fPjWn3=op;oe@Hg6 zgMu_fInMR#di|v?1=2)dSXYl&RreISp7I_`QOzFf63m+pC1(2_92g{!sr68AjuWlz z2%sUTa)U8*huFd+(I)ywGyQg$yVc66>1^V0Jgrk>pd7p1w{^Eg;_9`nY;$>_AqNpu zZ9F9(vlP>l=@+Kd(u-r>x@U?7`;*eo^23V-VH$BZE<5%Xy{kyN^C=;E@62vsVF2ZA z825!&H;nb$>J%Kzj_2=}zK)cz%(?X-@9)o_n_BG)&3QruNRxw=r!a7<^xo?SwQUnh zG50)pb5CEAnmB>&JEf4P- zI2u^>7zd#=W-=i>3ouHc>=Wo)lDoiu1$8GuUhj{$p{=>r-(-eHL+3yBK^0T#@Y-?N zSl z&LkEAS*-VdnDe046gd<%LQ<(zUEA|Q=|e(e%91z)&JctsRDv?RYM_V?p04y?43Ct zbI)7^cuK?m=y+3V;P`6=wzbd2%`5DF9B?X%qAJ9>Zv4#YHun_cc#PusTzk z$8h=TNLl*KY3v&W=P#H()R%i+PAH;O?EU9LdjmFA)h2cwi4URFeO&rt%F6ge^=N_$ zNtQO8r%i#UEDX$4?&hixuSjj!=9wR7V111##Hdf+dT_&A%<}Y}8aCB<5fgVY=*!j5 zTKg{FZ&CRt)5|$g$X3MZkc>v+$dAvP3&t!fXV&lcQ{T~(L#m^u&73uw?GG=EZ!NAx z7Zi-?i82;QZ)IRp9bc(q(c*40uNaL~7S)_%5`NO|?7HxN&%8k(5_m$bC;;|1dLh-B zHAFpcE`^`gsJW@-i$gbIROZ_EdxmH`{n5kby9S6NK=}suCm4$oLLqt0)hc+ZKjJcu z+dTO}&MLx%7<*P_PIODxd?zS}1E<>lk#p58{ZUSzm2G2QteDN1_Vr4Cj%<9SE(6R( zpd$g!iQuCoR&iCBcZg_EitA|V$u7CUP{paj^KN`}n?Z74PlK$xK84wZ0)zYD&%O=T z&OhO4YfTQCoD)nfF0u!r-HD5Q^CLkW~-cHeNKh`GnOoBz4&9-|^(*wl)` za{*Ks1F%w*{stj3e*c}Aepz$fuXRevEfZsplW$z}Qa+_*8M)*&7LQ=0LM0KP7Cmyg zx?j6Pr)0uCfcz^5xIko#cLq76YwdL9P4K607XWnl9yM*r5+OAsv+fawC06@ssFu3Hf&1k&>xA6>p5M2PJ9m4onilZgUedXV@- zE-z_c9m&w?mC=>+BMa4o+d!=;ETqwv8gykm@EqM-Qz<7*=Ja=fP#yLDfW=S2^wo0u zD-wwH5Jxirwv4Dx8hY+c7_O!Xr1C>$f_P{;TBV}<;PYhHQ4FENAo&2muECL?M@QZ7 zN{Y}evH_wn!HpQpe)59}s4iHoprDRaip&T-1_7LdB11=k4C${P0>Q2Fq1s)47pzyL z->!HZo!iI^3LZspF2eTFpTkgDP5lujQA@M&wZw9Hb0~ zWkJ~KNLxy0wP9R4eS9SPI!WG}1gTKVjqvyo2W8Jd9H*G?s{o|0zkS?qD`j;fN8 zI<87uQe?u&ga~APNeJ|=U&}C6POm0k_Xh~i239dXnJHvLEOm4x($nLgIXFz01l{Wk z8urhW{D_ieLcNt>Bw-zg=GNCkm5Z_6aw^H|Sn5eW@L;0@0bIWkyQtd`qyE8qrykp3 zKE3H7Qv&G2S%_SlpfYlvXl)17QH8{BiJ>w0kvf}z^Ae_bYkP9Y{4=E;L!Tg=S|ea| z`G$K}@z(xAPCJtCG_m4k#eu#890vJ@PrVCIC!KHmxfeC20N3DwCkd|N1?@RE5n#IA z_YXK}MMyb1IA6q>H(EI$7E2R_K0nB!G, - @builtin(position) pos: vec4, -}; - -@vertex -fn vs_main(in: VertexInput) -> VertexOutput { - var positions = array, 3>( - vec2(0.0, -0.5), - vec2(0.5, 0.5), - vec2(-0.5, 0.75), - ); - var colors = array, 3>( // srgb colors - vec3(1.0, 1.0, 0.0), - vec3(1.0, 0.0, 1.0), - vec3(0.0, 1.0, 1.0), - ); - let index = i32(in.vertex_index); - var out: VertexOutput; - out.pos = vec4(positions[index], 0.0, 1.0); - out.color = vec4(colors[index], 1.0); - return out; -} - -@fragment -fn fs_main(in: VertexOutput) -> @location(0) vec4 { - let physical_color = pow(in.color.rgb, vec3(2.2)); // gamma correct - return vec4(physical_color, in.color.a); -} -""" - - -# %% The wgpu calls - - -def main(canvas, power_preference="high-performance", limits=None): - """Regular function to setup a viz on the given canvas.""" - adapter = wgpu.gpu.request_adapter(power_preference=power_preference) - device = adapter.request_device(required_limits=limits) - return _main(canvas, device) - - -async def main_async(canvas): - """Async function to setup a viz on the given canvas.""" - adapter = await wgpu.gpu.request_adapter_async(power_preference="high-performance") - device = await adapter.request_device_async(required_limits={}) - return _main(canvas, device) - - -def _main(canvas, device): - shader = device.create_shader_module(code=shader_source) - - # No bind group and layout, we should not create empty ones. - pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) - - present_context = canvas.get_context() - render_texture_format = present_context.get_preferred_format(device.adapter) - present_context.configure(device=device, format=render_texture_format) - - render_pipeline = device.create_render_pipeline( - layout=pipeline_layout, - vertex={ - "module": shader, - "entry_point": "vs_main", - "buffers": [], - }, - primitive={ - "topology": wgpu.PrimitiveTopology.triangle_list, - "front_face": wgpu.FrontFace.ccw, - "cull_mode": wgpu.CullMode.none, - }, - depth_stencil=None, - multisample=None, - fragment={ - "module": shader, - "entry_point": "fs_main", - "targets": [ - { - "format": render_texture_format, - "blend": { - "color": ( - wgpu.BlendFactor.one, - wgpu.BlendFactor.zero, - wgpu.BlendOperation.add, - ), - "alpha": ( - wgpu.BlendFactor.one, - wgpu.BlendFactor.zero, - wgpu.BlendOperation.add, - ), - }, - }, - ], - }, - ) - - def draw_frame(): - current_texture = present_context.get_current_texture() - command_encoder = device.create_command_encoder() - - render_pass = command_encoder.begin_render_pass( - color_attachments=[ - { - "view": current_texture.create_view(), - "resolve_target": None, - "clear_value": (0, 0, 0, 1), - "load_op": wgpu.LoadOp.clear, - "store_op": wgpu.StoreOp.store, - } - ], - ) - - render_pass.set_pipeline(render_pipeline) - # render_pass.set_bind_group(0, no_bind_group, [], 0, 1) - render_pass.draw(3, 1, 0, 0) - render_pass.end() - device.queue.submit([command_encoder.finish()]) - - canvas.request_draw(draw_frame) - return device - - -if __name__ == "__main__": - from wgpu.gui.auto import WgpuCanvas, run - - canvas = WgpuCanvas(size=(640, 480), title="wgpu triangle") - main(canvas) - run() diff --git a/examples/triangle_auto.py b/examples/triangle_auto.py deleted file mode 100644 index 542e7ca..0000000 --- a/examples/triangle_auto.py +++ /dev/null @@ -1,21 +0,0 @@ -""" -Import the viz from triangle.py and run it using the auto-gui. -""" -# test_example = true - -import sys -from pathlib import Path - -from wgpu.gui.auto import WgpuCanvas, run - -sys.path.insert(0, str(Path(__file__).parent)) - -from triangle import main # noqa: E402, The function to call to run the visualization - - -canvas = WgpuCanvas() -device = main(canvas) - - -if __name__ == "__main__": - run() diff --git a/examples/triangle_glfw.py b/examples/triangle_glfw.py deleted file mode 100644 index b2b34b7..0000000 --- a/examples/triangle_glfw.py +++ /dev/null @@ -1,22 +0,0 @@ -""" -Import the viz from triangle.py and run it using glfw (which uses asyncio for the event loop). - -# run_example = false -""" - -import sys -from pathlib import Path - -from wgpu.gui.glfw import WgpuCanvas, run - -sys.path.insert(0, str(Path(__file__).parent)) - -from triangle import main # noqa: E402, The function to call to run the visualization - - -canvas = WgpuCanvas() -device = main(canvas) - - -if __name__ == "__main__": - run() diff --git a/examples/triangle_glsl.py b/examples/triangle_glsl.py deleted file mode 100644 index 702d3a8..0000000 --- a/examples/triangle_glsl.py +++ /dev/null @@ -1,143 +0,0 @@ -""" -The triangle example, using GLSL shaders. - -""" - -import wgpu - - -# %% Shaders - - -vertex_shader = """ -#version 450 core -layout(location = 0) out vec4 color; -void main() -{ - vec2 positions[3] = vec2[3]( - vec2(0.0, -0.5), - vec2(0.5, 0.5), - vec2(-0.5, 0.75) - ); - vec3 colors[3] = vec3[3]( // srgb colors - vec3(1.0, 1.0, 0.0), - vec3(1.0, 0.0, 1.0), - vec3(0.0, 1.0, 1.0) - ); - int index = int(gl_VertexID); - gl_Position = vec4(positions[index], 0.0, 1.0); - color = vec4(colors[index], 1.0); -} -""" - -fragment_shader = """ -#version 450 core -out vec4 FragColor; -layout(location = 0) in vec4 color; -void main() -{ - vec3 physical_color = pow(color.rgb, vec3(2.2)); // gamma correct - FragColor = vec4(physical_color, color.a); -} -""" - - -# %% The wgpu calls - - -def main(canvas, power_preference="high-performance", limits=None): - """Regular function to setup a viz on the given canvas.""" - adapter = wgpu.gpu.request_adapter(power_preference=power_preference) - device = adapter.request_device(required_limits=limits) - return _main(canvas, device) - - -async def main_async(canvas): - """Async function to setup a viz on the given canvas.""" - adapter = await wgpu.gpu.request_adapter_async(power_preference="high-performance") - device = await adapter.request_device_async(required_limits={}) - return _main(canvas, device) - - -def _main(canvas, device): - vert_shader = device.create_shader_module(label="triangle_vert", code=vertex_shader) - frag_shader = device.create_shader_module( - label="triangle_frag", code=fragment_shader - ) - - # No bind group and layout, we should not create empty ones. - pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) - - present_context = canvas.get_context() - render_texture_format = present_context.get_preferred_format(device.adapter) - present_context.configure(device=device, format=render_texture_format) - - render_pipeline = device.create_render_pipeline( - layout=pipeline_layout, - vertex={ - "module": vert_shader, - "entry_point": "main", - "buffers": [], - }, - primitive={ - "topology": wgpu.PrimitiveTopology.triangle_list, - "front_face": wgpu.FrontFace.ccw, - "cull_mode": wgpu.CullMode.none, - }, - depth_stencil=None, - multisample=None, - fragment={ - "module": frag_shader, - "entry_point": "main", - "targets": [ - { - "format": render_texture_format, - "blend": { - "color": ( - wgpu.BlendFactor.one, - wgpu.BlendFactor.zero, - wgpu.BlendOperation.add, - ), - "alpha": ( - wgpu.BlendFactor.one, - wgpu.BlendFactor.zero, - wgpu.BlendOperation.add, - ), - }, - }, - ], - }, - ) - - def draw_frame(): - current_texture = present_context.get_current_texture() - command_encoder = device.create_command_encoder() - - render_pass = command_encoder.begin_render_pass( - color_attachments=[ - { - "view": current_texture.create_view(), - "resolve_target": None, - "clear_value": (0, 0, 0, 1), - "load_op": wgpu.LoadOp.clear, - "store_op": wgpu.StoreOp.store, - } - ], - ) - - render_pass.set_pipeline(render_pipeline) - # render_pass.set_bind_group(0, no_bind_group, [], 0, 1) - render_pass.draw(3, 1, 0, 0) - render_pass.end() - device.queue.submit([command_encoder.finish()]) - - canvas.request_draw(draw_frame) - return device - - -if __name__ == "__main__": - from wgpu.gui.auto import WgpuCanvas, run - - canvas = WgpuCanvas(size=(640, 480), title="wgpu triangle") - main(canvas) - run() diff --git a/examples/triangle_qt.py b/examples/triangle_qt.py deleted file mode 100644 index 033d7b9..0000000 --- a/examples/triangle_qt.py +++ /dev/null @@ -1,41 +0,0 @@ -""" -Import the viz from triangle.py and run it in a Qt window. -Works with either PySide6, PyQt6, PyQt5 or PySide2. - -# run_example = false -""" -import importlib - -# For the sake of making this example Just Work, we try multiple QT libs -for lib in ("PySide6", "PyQt6", "PySide2", "PyQt5"): - try: - QtWidgets = importlib.import_module(".QtWidgets", lib) - break - except ModuleNotFoundError: - pass - - -from wgpu.gui.qt import WgpuCanvas # WgpuCanvas is a QWidget subclass - -from triangle import main # The function to call to run the visualization - - -app = QtWidgets.QApplication([]) -canvas = WgpuCanvas() - -device = main(canvas) - -# Enter Qt event loop (compatible with qt5/qt6) -app.exec() if hasattr(app, "exec") else app.exec_() - - -# For those interested, this is a simple way to integrate Qt's event -# loop with asyncio, but for real apps you probably want to use -# something like the qasync library. -# async def mainloop(): -# await main_async(canvas) -# while not canvas.is_closed(): -# await asyncio.sleep(0.001) -# app.flush() -# app.processEvents() -# loop.stop() diff --git a/examples/triangle_qt_embed.py b/examples/triangle_qt_embed.py deleted file mode 100644 index 42c9864..0000000 --- a/examples/triangle_qt_embed.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -An example demonstrating a qt app with a wgpu viz inside. -If needed, change the PySide6 import to e.g. PyQt6, PyQt5, or PySide2. - -# run_example = false -""" -import importlib - -# For the sake of making this example Just Work, we try multiple QT libs -for lib in ("PySide6", "PyQt6", "PySide2", "PyQt5"): - try: - QtWidgets = importlib.import_module(".QtWidgets", lib) - break - except ModuleNotFoundError: - pass - - -from wgpu.gui.qt import WgpuWidget - -from triangle import main - - -class ExampleWidget(QtWidgets.QWidget): - def __init__(self): - super().__init__() - self.resize(640, 480) - self.setWindowTitle("wgpu triangle embedded in a qt app") - - splitter = QtWidgets.QSplitter() - - self.button = QtWidgets.QPushButton("Hello world", self) - self.canvas1 = WgpuWidget(splitter) - self.canvas2 = WgpuWidget(splitter) - - splitter.addWidget(self.canvas1) - splitter.addWidget(self.canvas2) - - layout = QtWidgets.QHBoxLayout() - layout.addWidget(self.button, 0) - layout.addWidget(splitter, 1) - self.setLayout(layout) - - self.show() - - -app = QtWidgets.QApplication([]) -example = ExampleWidget() - -main(example.canvas1) -main(example.canvas2) - -# Enter Qt event loop (compatible with qt5/qt6) -app.exec() if hasattr(app, "exec") else app.exec_() diff --git a/examples/triangle_subprocess.py b/examples/triangle_subprocess.py deleted file mode 100644 index e1c2e64..0000000 --- a/examples/triangle_subprocess.py +++ /dev/null @@ -1,84 +0,0 @@ -""" -An example showing that with WGPU you can draw to the window of another -process. Just a proof of concept, this is far from perfect yet: - -* It works if I run it in Pyzo, but not if I run it from the terminal. -* I only tried it on Windows. -* You'll want to let the proxy know about size changes. -* The request_draw should invoke a draw (in asyncio?), not draw directly. -* Properly handling closing the figure (from both ends). - -# run_example = false -""" - -import sys -import time -import subprocess - -from wgpu.gui import WgpuCanvasBase - -# Import the (async) function that we must call to run the visualization -from triangle import main - - -code = """ -import sys -from PySide6 import QtWidgets # Use either PySide6 or PyQt6 -from wgpu.gui.qt import WgpuCanvas - -app = QtWidgets.QApplication([]) -canvas = WgpuCanvas(title="wgpu triangle in Qt subprocess") - -print(canvas.get_window_id()) -#print(canvas.get_display_id()) -print(canvas.get_physical_size()) -sys.stdout.flush() - -app.exec_() -""" - - -class ProxyCanvas(WgpuCanvasBase): - def __init__(self): - super().__init__() - self._window_id = int(p.stdout.readline().decode()) - self._psize = tuple( - int(x) for x in p.stdout.readline().decode().strip().strip("()").split(",") - ) - print(self._psize) - time.sleep(0.2) - - def get_window_id(self): - return self._window_id - - def get_physical_size(self): - return self._psize - - def get_pixel_ratio(self): - return 1 - - def get_logical_size(self): - return self._psize - - def set_logical_size(self, width, height): - pass - - def close(self): - p.kill() - - def is_closed(self): - raise NotImplementedError() - - def _request_draw(self): - self.draw_frame() - - -# Create subprocess -p = subprocess.Popen([sys.executable, "-c", code], stdout=subprocess.PIPE) - -# Create a canvas that maps to the window of that subprocess -canvas = ProxyCanvas() - -# Go! -main(canvas) -time.sleep(3) diff --git a/examples/triangle_wx.py b/examples/triangle_wx.py deleted file mode 100644 index 22c9002..0000000 --- a/examples/triangle_wx.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -Import the viz from triangle.py and run it in a wxPython window. -""" -# run_example = false - -import wx -from wgpu.gui.wx import WgpuCanvas - -from examples.triangle import main # The function to call to run the visualization - - -app = wx.App() -canvas = WgpuCanvas() - -main(canvas) -app.MainLoop() diff --git a/examples/triangle_wx_embed.py b/examples/triangle_wx_embed.py deleted file mode 100644 index e45c13d..0000000 --- a/examples/triangle_wx_embed.py +++ /dev/null @@ -1,40 +0,0 @@ -""" -An example demonstrating a wx app with a wgpu viz inside. -""" -# run_example = false - -import wx -from wgpu.gui.wx import WgpuWidget - -from examples.triangle import main - - -class Example(wx.Frame): - def __init__(self): - super().__init__(None, title="wgpu triangle embedded in a wx app") - self.SetSize(640, 480) - - splitter = wx.SplitterWindow(self) - - self.button = wx.Button(self, -1, "Hello world") - self.canvas1 = WgpuWidget(splitter) - self.canvas2 = WgpuWidget(splitter) - - splitter.SplitVertically(self.canvas1, self.canvas2) - splitter.SetSashGravity(0.5) - - sizer = wx.BoxSizer(wx.HORIZONTAL) - sizer.Add(self.button, 0, wx.EXPAND) - sizer.Add(splitter, 1, wx.EXPAND) - self.SetSizer(sizer) - - self.Show() - - -app = wx.App() -example = Example() - -main(example.canvas1) -main(example.canvas2) - -app.MainLoop() diff --git a/examples/wgpu-examples.ipynb b/examples/wgpu-examples.ipynb deleted file mode 100644 index 3e67105..0000000 --- a/examples/wgpu-examples.ipynb +++ /dev/null @@ -1,117 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "afd9b3fd", - "metadata": {}, - "source": [ - "# WGPU notebook examples" - ] - }, - { - "cell_type": "markdown", - "id": "2e610ab9", - "metadata": {}, - "source": [ - "## Triangle example\n", - "\n", - "We import the triangle example and show it in the notebook." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c6e4ffe0", - "metadata": {}, - "outputs": [], - "source": [ - "from wgpu.gui.auto import WgpuCanvas, run\n", - "import triangle\n", - "\n", - "canvas = WgpuCanvas(size=(640, 480), title=\"wgpu triangle with GLFW\")\n", - "\n", - "triangle.main(canvas)\n", - "canvas" - ] - }, - { - "cell_type": "markdown", - "id": "e120b752", - "metadata": {}, - "source": [ - "## Cube example\n", - "\n", - "An interactive example this time." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e4f9f67d", - "metadata": {}, - "outputs": [], - "source": [ - "from cube import canvas\n", - "\n", - "canvas" - ] - }, - { - "cell_type": "markdown", - "id": "749ffb40", - "metadata": {}, - "source": [ - "## Event example\n", - "\n", - "The code below is a copy from `show_events.py`. It is just to show how events are handled. These events are the same accross all auto-backends." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c858215a", - "metadata": {}, - "outputs": [], - "source": [ - "from wgpu.gui.auto import WgpuCanvas, run\n", - "\n", - "class MyCanvas(WgpuCanvas):\n", - " def handle_event(self, event):\n", - " if event[\"event_type\"] != \"pointer_move\":\n", - " print(event)\n", - "\n", - "canvas = MyCanvas(size=(640, 480), title=\"wgpu triangle with GLFW\")\n", - "canvas" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6b92d13b", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} From eec52e462fef1f0024e1b5264c599957c38d1215 Mon Sep 17 00:00:00 2001 From: Jan Date: Thu, 4 Jan 2024 20:54:04 +0100 Subject: [PATCH 08/20] Remove CODEOWNERS --- .github/CODEOWNERS | 1 - 1 file changed, 1 deletion(-) delete mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index df51a9a..0000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ -* @Korijn From 7d845a56580c623c22d598b94f62127ab80d9f46 Mon Sep 17 00:00:00 2001 From: Jan Date: Thu, 4 Jan 2024 20:59:36 +0100 Subject: [PATCH 09/20] Remove unneeded CI jobs --- .github/workflows/ci.yml | 218 --------------------------------------- 1 file changed, 218 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5292b1b..55e67b3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -32,55 +32,6 @@ jobs: run: | flake8 . - test-codegen-build: - name: Test Codegen - timeout-minutes: 5 - runs-on: ubuntu-latest - strategy: - fail-fast: false - steps: - - uses: actions/checkout@v3 - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: '3.11' - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -U pytest numpy black cffi - - name: Test codegen - run: | - pytest -v codegen - - test-minimal-import-build: - name: Test Imports - timeout-minutes: 5 - runs-on: ubuntu-latest - strategy: - fail-fast: false - steps: - - uses: actions/checkout@v3 - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: '3.11' - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install requests - python download-wgpu-native.py - pip uninstall -q -y requests - pip install -e . - - name: Test imports - env: - WGPU_FORCE_OFFSCREEN: true - run: | - python -c "print('wgpu'); import wgpu; print(wgpu)" - python -c "print('wgpu.backends.wgpu_native'); import wgpu.backends.wgpu_native" - python -c "print('wgpu.gui.offscreen'); import wgpu.gui.offscreen" - python -c "print('wgpu.utils'); import wgpu.utils" - python -c "print('wgpu.utils.shadertoy'); import wgpu.utils.shadertoy" - docs-build: name: Test Docs timeout-minutes: 5 @@ -130,30 +81,6 @@ jobs: run: | pytest -v examples - test-pyinstaller-build: - name: Test PyInstaller - timeout-minutes: 5 - runs-on: ubuntu-latest - strategy: - fail-fast: false - steps: - - uses: actions/checkout@v3 - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: '3.11' - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install -U requests numpy pytest - python download-wgpu-native.py - pip install -e . - pip install psutil glfw pyinstaller>=4.9 - - name: Test PyInstaller - run: | - pyinstaller --version - pytest -v wgpu/__pyinstaller - test-builds: name: ${{ matrix.name }} timeout-minutes: 5 @@ -203,148 +130,3 @@ jobs: - name: Memory tests run: | pytest -v tests_mem - - # The release builds are done for the platforms that we want to build wheels for. - # We build wheels, test them, and then upload the wheel as an artifact. - release-builds: - name: Build wheels on ${{ matrix.os }} - timeout-minutes: 10 - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [ubuntu-latest, windows-latest, macos-latest] - steps: - - uses: actions/checkout@v3 - - name: Set up Python 3.9 - uses: actions/setup-python@v4 - with: - python-version: '3.9' - - name: Install dev dependencies - run: | - python -m pip install --upgrade pip wheel setuptools twine - - name: Build wheels - uses: pypa/cibuildwheel@v2.16.2 - env: - CIBW_MANYLINUX_X86_64_IMAGE: quay.io/pypa/manylinux_2_28_x86_64 - CIBW_ARCHS_LINUX: x86_64 - CIBW_SKIP: cp39-musllinux_x86_64 - with: - output-dir: dist - - name: Twine check - run: | - twine check dist/* - - name: Upload distributions - uses: actions/upload-artifact@v2 - with: - path: dist - name: dist - - # Thees release builds uses QEMU so that we can build wheels for arm64. - # We build wheels and upload the wheel as an artifact, but we don't test them here. - qemu-release-builds: - name: Build wheels on ubuntu-latest with QEMU - timeout-minutes: 10 - runs-on: ubuntu-latest - strategy: - fail-fast: false - steps: - - uses: actions/checkout@v3 - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - with: - platforms: arm64 - - name: Build wheels - uses: pypa/cibuildwheel@v2.16.2 - env: - CIBW_MANYLINUX_AARCH64_IMAGE: quay.io/pypa/manylinux_2_28_aarch64 - CIBW_ARCHS_LINUX: aarch64 - CIBW_SKIP: cp39-musllinux_aarch64 - with: - output-dir: dist - - name: Upload distributions - uses: actions/upload-artifact@v2 - with: - path: dist - name: dist - - sdist-build: - name: Build sdist - timeout-minutes: 5 - runs-on: ubuntu-latest - strategy: - fail-fast: false - steps: - - uses: actions/checkout@v3 - - name: Set up Python 3.9 - uses: actions/setup-python@v4 - with: - python-version: '3.9' - - name: Install dev dependencies - run: | - python -m pip install --upgrade pip - pip install -U -r dev-requirements.txt - - name: Create source distribution - run: | - python setup.py sdist - - name: Test sdist - shell: bash - run: | - rm -rf ./wgpu - pushd $HOME - pip install $GITHUB_WORKSPACE/dist/*.tar.gz - popd - # don't run tests, we just want to know if the sdist can be installed - pip uninstall -y wgpu - git reset --hard HEAD - - name: Twine check - run: | - twine check dist/* - - name: Upload distributions - uses: actions/upload-artifact@v2 - with: - path: dist - name: dist - - publish: - name: Publish to Github and Pypi - runs-on: ubuntu-latest - needs: [test-builds, release-builds, qemu-release-builds, sdist-build] - if: success() && startsWith(github.ref, 'refs/tags/v') - steps: - - uses: actions/checkout@v3 - - name: Set up Python 3.9 - uses: actions/setup-python@v4 - with: - python-version: '3.9' - - name: Download assets - uses: actions/download-artifact@v1.0.0 - with: - name: dist - - name: Get version from git ref - id: get_version - run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//} - - name: Create GH release - uses: actions/create-release@v1 - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - tag_name: ${{ steps.get_version.outputs.VERSION }} - release_name: Release ${{ steps.get_version.outputs.VERSION }} - body: | - Autogenerated binary wheels that include wgpu-native. - See [the changelog](https://github.com/pygfx/wgpu-py/blob/main/CHANGELOG.md) for details. - draft: false - prerelease: false - - name: Upload release assets - # Move back to official action after fix https://github.com/actions/upload-release-asset/issues/4 - uses: AButler/upload-release-assets@v2.0 - with: - release-tag: ${{ steps.get_version.outputs.VERSION }} - files: 'dist/*.tar.gz;dist/*.whl' - repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Publish to PyPI - uses: pypa/gh-action-pypi-publish@master - with: - user: __token__ - password: ${{ secrets.PYPI_PASSWORD }} From 04c73290c3173a6cfdebc3da36557f5eff0b7e44 Mon Sep 17 00:00:00 2001 From: Jan Date: Thu, 4 Jan 2024 21:05:50 +0100 Subject: [PATCH 10/20] Fix missing script step --- .github/workflows/ci.yml | 2 -- .github/workflows/screenshots.yml | 1 - pyproject.toml | 4 ---- 3 files changed, 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 55e67b3..d305bc2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -73,7 +73,6 @@ jobs: run: | python -m pip install --upgrade pip pip install -U -r dev-requirements.txt - python download-wgpu-native.py pip install -e . - name: Test examples env: @@ -122,7 +121,6 @@ jobs: run: | python -m pip install --upgrade pip pip install -U -r dev-requirements.txt - python download-wgpu-native.py pip install -e . - name: Unit tests run: | diff --git a/.github/workflows/screenshots.yml b/.github/workflows/screenshots.yml index 276d53b..7e0b7b8 100644 --- a/.github/workflows/screenshots.yml +++ b/.github/workflows/screenshots.yml @@ -27,7 +27,6 @@ jobs: run: | python -m pip install --upgrade pip pip install -U -r dev-requirements.txt - python download-wgpu-native.py pip install -e . - name: Regenerate screenshots run: | diff --git a/pyproject.toml b/pyproject.toml index 7041990..732fe5a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,10 +9,6 @@ build-backend = "setuptools.build_meta" # we only build on one python version since the wheels are not bound to it build = "cp39-*" -# we can't list requests under build-system.requires because -# that step happens _after_ the before-build command -before-build = "pip install requests && python download-wgpu-native.py" - # this is sufficient to trigger an install of the built wheel test-command = "echo Wheel installed" From a391515d232b1386bb3a660a122214444fa749f9 Mon Sep 17 00:00:00 2001 From: Jan Date: Thu, 4 Jan 2024 21:30:47 +0100 Subject: [PATCH 11/20] Remove reference to deleted files --- setup.cfg | 7 ------- 1 file changed, 7 deletions(-) diff --git a/setup.cfg b/setup.cfg index 4cbd3ac..2fb0efc 100644 --- a/setup.cfg +++ b/setup.cfg @@ -14,14 +14,7 @@ exclude = build,dist,*.egg-info,.venv extend-ignore = E501, E203, B006, B007, D per-file-ignores = - tests/test_compute.py: F821,F722 - tests/test_gui_glfw.py: F821,F722 - tests/test_wgpu_native_basics.py: F821,F722 - tests/test_wgpu_native_render.py: F821,F722 - tests/test_wgpu_native_render_tex.py: F821,F722 - tests/test_wgpu_native_compute_tex.py : F821,F722 examples/*.py: F821,F722 - examples/triangle_qt*.py: E402 [coverage:report] From a4dd55546bf310eb77067175cce08342c82cf938 Mon Sep 17 00:00:00 2001 From: Jan Date: Thu, 4 Jan 2024 21:39:59 +0100 Subject: [PATCH 12/20] Cleanup setup.py --- setup.py | 35 +---------------------------------- 1 file changed, 1 insertion(+), 34 deletions(-) diff --git a/setup.py b/setup.py index b50647a..02fd049 100644 --- a/setup.py +++ b/setup.py @@ -12,39 +12,14 @@ VERSION = re.search(r"__version__ = \"(.*?)\"", fh.read()).group(1) -class bdist_wheel(_bdist_wheel): # noqa: N801 - def finalize_options(self): - self.plat_name = get_platform(None) # force a platform tag - _bdist_wheel.finalize_options(self) - - -resources_globs = ["*.h", "*.idl"] -if platform.system() == "Linux": - resources_globs.append("*-release.so") -elif platform.system() == "Darwin": - resources_globs.append("*-release.dylib") -elif platform.system() == "Windows": - resources_globs.append("*-release.dll") -else: - pass # don't include binaries; user will have to arrange for the lib - -runtime_deps = ["cffi>=1.15.0", "rubicon-objc>=0.4.1; sys_platform == 'darwin'"] -extra_deps = { - "jupyter": ["jupyter_rfb>=0.4.2"], - "glfw": ["glfw>=1.9"], - "docs": ["sphinx>7.2", "sphinx_rtd_theme"], -} setup( name=NAME, version=VERSION, packages=find_packages( - exclude=["codegen", "codegen.*", "tests", "tests.*", "examples", "examples.*"] + exclude=["tests", "tests.*", "examples", "examples.*"] ), - package_data={f"{NAME}.resources": resources_globs}, python_requires=">=3.8.0", - install_requires=runtime_deps, - extras_require=extra_deps, license="BSD 2-Clause", description=SUMMARY, long_description=open("README.md").read(), @@ -52,12 +27,4 @@ def finalize_options(self): author="Almar Klein", author_email="almar.klein@gmail.com", url="https://github.com/pygfx/wgpu-py", - cmdclass={"bdist_wheel": bdist_wheel}, - data_files=[("", ["LICENSE"])], - entry_points={ - "pyinstaller40": [ - "hook-dirs = wgpu.__pyinstaller:get_hook_dirs", - "tests = wgpu.__pyinstaller:get_test_dirs", - ], - }, ) From 5cc459f170e2965625074a8734d349ad81d139bc Mon Sep 17 00:00:00 2001 From: Jan Date: Thu, 4 Jan 2024 21:47:37 +0100 Subject: [PATCH 13/20] Cleanup __init__.py files --- wgpu/__init__.py | 20 -------------------- wgpu/utils/__init__.py | 42 ------------------------------------------ 2 files changed, 62 deletions(-) diff --git a/wgpu/__init__.py b/wgpu/__init__.py index f13d909..3e1ca68 100644 --- a/wgpu/__init__.py +++ b/wgpu/__init__.py @@ -2,28 +2,8 @@ The wgpu library is a Python implementation of WebGPU. """ -from ._coreutils import logger # noqa: F401,F403 -from ._diagnostics import diagnostics # noqa: F401,F403 -from .flags import * # noqa: F401,F403 -from .enums import * # noqa: F401,F403 -from .classes import * # noqa: F401,F403 -from .gui import WgpuCanvasInterface # noqa: F401,F403 from . import utils # noqa: F401,F403 -from . import backends # noqa: F401,F403 -from . import resources # noqa: F401,F403 __version__ = "0.13.2" version_info = tuple(map(int, __version__.split("."))) - - -# The API entrypoint, from wgpu.classes - gets replaced when a backend loads. -gpu = GPU() # noqa: F405 - - -# Temporary stub to help transitioning -def request_adapter(*args, **kwargs): - """Deprecated!""" - raise DeprecationWarning( - "wgpu.request_adapter() is deprecated! Use wgpu.gpu.request_adapter() instead." - ) diff --git a/wgpu/utils/__init__.py b/wgpu/utils/__init__.py index faeddbe..e69de29 100644 --- a/wgpu/utils/__init__.py +++ b/wgpu/utils/__init__.py @@ -1,42 +0,0 @@ -""" -Higher level utilities. Must be explicitly imported from ``wgpu.utils.xx``. -""" - -# The purpose of wgpu-py is to provide a Pythonic wrapper around -# wgpu-native. In principal, a higher-level API is not within the scope -# of the project. However, by providing a few utility functions, other -# projects can use wgpu without having to keep track of changes in wgpu -# itself. -# -# We should be conservative here: functionality added here should have -# an unopinionated API, providing tools that are still low-level (follow -# GPU/wgpu semantics), but without using low level details of the wgpu -# API itself. - -# The get_default_device() is so small and generally convenient that we import it by default. -from .device import get_default_device # noqa: F401 - - -class _StubModule: - def __init__(self, module): - self._module = module - self.must_be_explicitly_imported = True - - def __getattr__(self, *args, **kwargs): - raise RuntimeError(f"wgpu.utils.{self._module} must be explicitly imported.") - - def __repr__(self): - return f"" - - -# Create stubs - - -def compute_with_buffers(*args, **kwargs): - raise DeprecationWarning( - "wgpu.utils.compute_with_buffers() must now be imported from wgpu.utils.compute" - ) - - -compute = _StubModule("compute") -shadertoy = _StubModule("shadertoy") From 3986477b3997daa82d3f7ed17796461a0fd2b430 Mon Sep 17 00:00:00 2001 From: Jan Date: Thu, 4 Jan 2024 21:57:44 +0100 Subject: [PATCH 14/20] Change name, version and attribution --- CHANGELOG.md | 642 +---------------------------------------------- LICENSE | 2 +- setup.py | 10 +- wgpu/__init__.py | 2 +- 4 files changed, 8 insertions(+), 648 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 215facf..d9576b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,644 +17,4 @@ Possible sections in each release: * Security: in case of vulnerabilities. -### [v0.13.2] - 21-12-2023 - -Added: - -* Implement support for timestamp QuerySet. -* Add texture input and iFrameRate builtin to Shadertoy util https://github.com/pygfx/wgpu-py/pull/453 - - -### [v0.13.1] - 08-12-2023 - -Fixed: - -* Prevent access violation errors with GLFW on Windows. -* Prevent a segfault when deleting a `GPUPipelineLayout` (observed in a very specific use-case on LavaPipe). -* Fix `triangle_glsl.py` example. -* Fix that when logger is set to debug, errors are produced when Python exits. - -Added: - -* Support for linux-aarch64 (binary wheels available)! This includes Raspberry Pi's with a 64-bit OS, and adds support for building linux docker images on Apple Silicon devices without having to emulate x86 (no need for `--platform linux/amd64`). - - -### [v0.13.0] - 24-11-2023 - -Added: - -* Add `iDate` builtin to Shadertoy utility. -* Allow "auto" layout args for `create_compute_pipeline()`. -* Official support for Python 3.12 and pypy. - -Changed: - -* Update to wgpu-native 0.18.1.2. -* `CanvasContext.get_current_texture()` now returns a `GPUTexture` instead of a `GPUTextureView`. -* `OffscreenCanvasBase.present()` now receives a `GPUTexture` instead of a `GPUTextureView`, - and this is a new texture on each draw (no re-use). -* Renamed ``wgpu.gui.WgpuOffscreenCanvas` to `WgpuOffscreenCanvasBase`. -* The `wgpu.base` submodule that defined the GPU classes is renamed to be a private - module. The new `wgpu.classes` namespace contains all GPU classes (and nothing else). -* The `__repr__` of the GPU classes shows a shorter canonical class name. -* Flags and Enums have a more useful `__repr__`. - -Fixed: - -* Dragging a window between windows with different scale factor (with Qt on Windows) - no longer puts the window in an invalid state. A warning is still produced though. -* `GPUCommandEncoder.begin_render_pass()` binds the lifetime of passed texture views to - the returned render pass object to prevent premature destruction when no reference to - a texture view is kept. - - -### [v0.12.0] - 15-11-2023 - -This is a big release that contains many improvements, but also multiple API changes. - -Most backward incompatible changes are due to two things: the backend -system has been refactored, making it simpler and future-proof. And we -have revised the buffer mapping API, making it more similar to the -WebGPU spec, and providing more flexible and performant ways to set -buffer data. - -A summary to help you update your code: -```py -# X import wgpu.backends.rs -import wgpu - - -# X wgpu.request_adapter(canvas=None, power_preference="high-performance") -wgpu.gpu.request_adapter(power_preference="high-performance") - -# X buffer.map_read() -buffer.map("READ") -buffer.read_mapped(...) -buffer.read_mapped(...) -buffer.unmap() - -# X buffer.map_write() -buffer.map("WRITE") -buffer.write_mapped(data1, ...) -buffer.write_mapped(data2, ...) -buffer.unmap() -``` - -Added: - -* The `wgpu.gpu` object, which represents the API entrypoint. This makes the API more clear and more similar to the WebGPU API. -* A convenience `auto` backend, and a stub `js_webgpu` backend. -* New function `enumerate_adapters()` in the `wgpu_native` backend. -* Warning about pip when wgpu-native binary is missing on Linux -* The `GPUBuffer` has new methods `map()`, `map_async()`, `unmap()`. These have been - part of the WebGPU spec for a long time, but we had an alternative API, until now. -* The `GPUBuffer` has new methods `read_mapped()` and `write_mapped()`. These are not - present in the WebGPU spec; they are the Pythonic alternative to `getMappedRange()`. -* Flags can now be passed as strings, and can even be combined using "MAP_READ|COPY_DIST". -* GUI events have an extra "timestamp" field, and wheel events an additional "buttons" field. -* A diagnostics subsystem that amongst other things counts GPU objects. Try e.g. `wgpu.diagnostics.print_report()`. -* Several improvements to the shadertoy util: offscreen support and a snapshot method. - -Changed: - -* Can create a buffer that is initially mapped: `device.create_buffer(..., mapped_at_creation=True)` is enabled again. -* The `wgpu.request_adapter()` function is moved to `wgpu.gpu.request_adapter()`. Same for the async version. -* The `canvas` argument of the `request_adapter()` function is now optional. -* The `rs` backend is renamed to `wgpu_native`. -* It is no longer necessary to explicitly import the backend. -* The `GPUDevice.request_device_tracing()` method is now a function in the `wgpu_native` backend. -* We no longer force using Vulkan on Windows. For now wgpu-native still prefers Vulkan over D3D12. -* The `wgpu.utils` subpackage is imported by default, but most submodules are not. This means that `compute_with_buffers` must be explicitly imported from `wgpu.utils.compute`. - -Deprecated: - -* `wgpu.request_adapter()` and its async version. Use `wgpu.gpu.request_adapter()` instead. -* The `GPUBuffer` methods `map_read()`and `map_write()` are deprecated, in favor of `map()`, `unmap()`, `read_mapped()` and `write_mapped()`. - -To be clear, these are not changed: - -* The convenient `device.create_buffer_with_data()` (not part of the WebGPU spec) is still available. -* The `GPUQueue.read_buffer()` and `GPUQueue.write_buffer()` methods are unchanged. - -Fixed: - -* The shaderutil now re-uses the default device, avoiding memoryleaks when running multiple consecutively. -* The GUI backend selection takes into account whether a backend module is already imported. -* The offscreen GUI backend no longer uses asyncio (it does not need an event loop). -* Prevent a few classes of memoryleaks. Mind that creating many `GPUDevice` objects still leaks. - - -### [v0.11.0] - 11-10-2023 - -Changed: - -* Update to wgpu-native 0.17.2.1. No changes are needed in downstream code. - - -### [v0.10.0] - 09-10-2023 - -In this release the API is aligned with the latest webgpu.idl, and -we updated to wgpu-native (v0.17.0.2). - -Added: - -* New `wgpu.wgsl_language_features` property, which for now always returns an empty set. -* The `GPUShaderModule.compilation_info` property (and its async version) are replaced with a `get_compilation_info()` method. -* The WebGPU features "bgra8unorm-storage" and "float32-filterable" are now available. - -Changed: - -* The binary wheels are now based on manylinux 2.28, and the 32bit Linux wheels are no longer built. -* In WGSL: toplevel constants must be defined using `const`, using `let` will now fail. -* In WGSL: it is no longer possible to re-declare an existing variable name. -* Error messages may look a bit different, since wgpu-native now produces nice messages replacing our custom ones. -* Errors produced by a call into a wgpu-native function now produce a Python exception (no more async logging of errors). - - -### [v0.9.5] - 02-10-2023 - -Fixed: - -* Fixed setting the dpi awareness in the Qt backend, by correctly looking up the Qt version. - -Changed: - -* Links to readthedocs now point to *stable* instead of *latest*, so that people - reading the docs see these that reflect the latest release. -* Don't enable any features by default (previously WGPUNativeFeature_TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES was enabled). - - -### [v0.9.4] - 23-02-2023 - -Fixed: - -* Fixed issue related to winid (native widgets) on embedded Qt widgets on Windows (#348). -* Fixed our example screenshot tests. - - -### [v0.9.3] - 20-02-2023 - -Changed: - -* The offscreen `WgpuCanvas.draw()` method now returns a `memoryview` instead of a numpy array. -* The shadertoy util changed internally from using numpy to using a memoryview. - - -### [v0.9.2] - 17-02-2023 - -Fixed: - -* Fixed that `get_preferred_format()` could crash (in `wgpuSurfaceGetSupportedFormats`) due to an upstream bug in wgpu-native (#342) - -Added: - -* The shadertoy util now supports GLSL, so code from the shadertoy website can be direcly copied and run with wgpu (#343) - - -### [v0.9.1] - 13-02-2023 - -Changed: - -* Improved documentation. - -Added: - -* Added `print_report()` to get a report on the internals of wgpu. -* Added `command_encoder.clear_buffer()` -* Added support for GLSL. - - -### [v0.9.0] - 25-01-2023 - -In this release the API is aligned with the latest webgpu.idl, and -we updated to the latest release of wgpu-native (v0.14.2.3). - -Changed: - -* To use the default `min_binding_size` in `create_bind_group_layout`, it should be `None` instead of zero. -* If the depth-stencil texture has not room for stencil data, the `stencil_read_mask` and `stencil_write_mask` fields in the `DepthStencilState` struct passed to `create_render_pipeline()` must be set to 0. -* In WGSL, `@stage(compute)` must now be `@compute`. Same for `vertex` and `fragment`. -* In WGSL, the list of reserved words has been extended, including e.g. `mod`, `matrix` and `ref`. -* In WGSL, `smoothStep` is now `smoothstep`. - -Added: - -* New IDL: texture has new props `weight`, `height`, `depth_or_array_layers`. -* New IDL: Buffer has new prop `map_state`. - - -### [v0.8.4] - 10-01-2023 - -Fixed: - -* The offscreen canvas's mainloop prevents leaking callbacks better (#322) -* Prevent error messages when Qt examples/apps are closed (#326) - - -### [v0.8.3] - 06-01-2023 - -Fixed: - -* Prevent Qt warning about setting dpi awareness (#320) -* Make canvases close when they get deleted (#319) -* Fix qt canvas in ipython (#315) -* Make offscreen canvas closable (#309) -* Fix that the offscreen canvas had it size hardcoded, ignoring the given size (#317) -* Fixed renaming of `queue` in docs (#308) -* Fix using `.draw_frame` on qt canvas (#304) -* Add missing dev dependencies (#295) - -Added: - -* A shadertoy utility, plus examples (#312) - -Changed: - -* Improve the error prompt when wgsl code is multi line error (#311, #316) -* Tests: execute examples in the test process (#310) -* Package only the release binary (not the debug build) (#299) -* Codegen: uses in-memory file system during code generation (#303) -* Improve readme (#290, #302, #314) - - -### [v0.8.2] - 06-10-2022 - -Fixed: - -* Fixed imports for PyQt6. -* Keyboard events work again for Qt 6.3. -* Fixed that overloading ``handle_event()`` did not work for a canvas based on a Qt or wx main widget/window. - -Added: - -* Can now add a wildcard ("*") to ``add_event_handler`` to handle all events. -* Shader error messages show more context, making shader debugging much easier. -* VSync can now be turned off to raise the frame rate when needed. Note that FPS measurements are still a poor performance benchmark! - -Changed: - -* GLFW canvas does not draw when minimized anymore. -* The offscreen and Jupyter canvas now use the srgb format for consistency with normal canvases. -* The examples have been adjusted for srgb colors. - - -### [v0.8.1] - 29-04-2022 - -Fixed: - -* Fixed regression that `canvas.handle_event()` could no longer be overloaded to handle move and wheel events. - -Changed: - -* Added a note in the docs to explain that the version of the examples must match the runtime version. - - -### [v0.8.0] - 20-04-2022 - -Changed: - -* Now targeting wgpu-native 0.12.0.1. -* Updated API to the latest WebGPU spec. -* Better error logging using the new callbacks in wgpu-native. -* All destructors (drop methods) are now working as they should. - -To update, you need to adjust to the following API changes: - -* The encoder's `end_pass()` are renamed to `end()`. -* The compute encoder's `dispatch()` is renamed `dispatch_workgroups`, and `dispatch_indirect` to `dispatch_workgroups_indirect`. -* The `load_value` is replaced with `clear_value` and `load_op`. -* Same for `depth_load_value` and `stencil_load_value`. -* The `device.create_sampler()` method for mipmap filtering now uses the `MipmapFilterMode` enum instead of the `FilterMode` enum. Since the fields of these enums are the same, you probably don't need to change anything. - - -To update, your shaders need the following changes: - -* The decorators have changed from `[[...]]` to `@...`. - * E.g. change `[[location(0)]]` to `@location(0)`. - * E.g. change `[[group(0), binding(0)]]` to `@group(0) @binding(0)`. -* Structs now use `,` to separate fields instead of `;`. -* The `elseif` keyword is now `else if`. -* Buffers bound as arrays don't need to be defined via a struct anymore. - - -### [v0.7.7] - 12-04-2022 - -Fixed: - -* Fixed that event handlers could not be added while in an event handler. -* Prevent swap chain errors when minimizing a window. - -Added: - -* The `QWgpuWidget` now also supports the autogui events. -* Our CI now tests the examples (including image comparisons). - - -### [v0.7.6] - 28-03-2022 - -Changed: - -* Pointer move and wheel events are now rate-limited, leading to better performance if e.g. picking is done at each event. - -Added: - -* Added `wgpu.gui.base.log_exception`, a context-manager to catch and log exceptions, e.g. in event callbacks. - - -### [v0.7.5] - 17-03-2022 - -Fixed: - -* Mouse down events were not emitted during double clicks in the Qt canvas. -* Mouse move events were not emitted no button is pressed in the Qt canvas. - - -### [v0.7.4] - 04-02-2022 - -Fixed: - -* Position of glfw pointer events on MacOS. - - -### [v0.7.3] - 25-01-2022 - -Added: - -* Expanded the `auto` gui backend, which can now also select qt framework if available. -* The qt gui backend (like the glfw gui backend) supports user events in the same manner as - the jupyter gui backend. -* Expanded the `auto` gui backend to also support an offscreen canvas intended for automated tests. - -Fixed: - -* Size of glfw windows on MacOS. - - -### [v0.7.2] - 24-12-2021 - -Fixed: - -* Exceptions in user-interaction callbacks don't break the glfw loop anymore. -* Pointer events in glfw have the correct key modifiers now. - - -### [v0.7.1] - 22-12-2021 - -Added: - -* #224 - Added `add_event_handler` and `remove_event_handler` to GLFW and Jupyter GUI canvases. - - -### [v0.7.0] - 21-12-2021 - -Changed: - -* Now targeting wgpu-native v0.11.0.1, containing many upstream fixes and improvements. -* The `[[block]]` syntax in shaders has been dropped. -* Renamed `ProgrammableStage.clamp_depth` -> `unclipped_depth`. - - -### [v0.6.0] - 16-12-2021 - -Added: - -* Official support for Windows 3.10. -* The `max_fps` argument can be provided to a canvas. -* The glfw gui backend supports user events in the same manner as the jupyter gui backend, - using the [jupyter_rfb event specification](https://jupyter-rfb.readthedocs.io/en/stable/events.html). -* Introduce the `auto` gui backend, which selects either glfw or jupyter. - -Fixed: - -* The wx gui backend is now fully functional. - -Changed: - -* The qt and wx gui backend now contain `WgpuCanvas` for a toplevel window, - and `WgpuWidget` for an embeddable widget. -* All gui backends (can) now limit the FPS. -* No changes to the wgpu API. - - -### [v0.5.9] - 11-10-2021 - -Fixed: - -* Include the correct binaries in macOS arm64 wheels -* Options for arch argument of download-wgpu-native.py script - - -### [v0.5.8] - 09-10-2021 - -Added: - -* Experimental support for macos_arm64 (M1). - -Changed: - -* The Qt examples use PySide6 instead of PyQt5. - - -### [v0.5.7] - 07-10-2021 - -Changed: - -* Update to the latest wgpu-native (including latest Naga). -* The power-preference is actually taken into account. -* The adapter actually reports its limits. -* The limits in `request_device` are actually used. -* The `Adapter.is_software` property is renamed to `Adapter.is_fallback_adapter`. - - -### [v0.5.6] - 30-08-2021 - -Added: - -* An offscreen canvas to take snapshots without needing a window. - -Changed: - -* On Windows, the Vulkan backend is now forced unless `WGPU_BACKEND_TYPE` is set. - -Fixed: - -* Better support for multiple canvases by fixing a specific Qt issue. -* Fixed that canvas was not passed to low level function of `request_adapter`. -* Support calling `get_current_texture()` multiple times during a draw. - - -### [v0.5.5] - 09-08-2021 - -Added: - -* The wgpu backend can be forced using the `WGPU_BACKEND_TYPE` env variable. - Values can be e.g. "D3D12", "Metal", "Vulkan". -* Initial support for off-screen canvases. -* Adds `adapter.is_software` property. - -Changed: - -* The `GPUPresentationContext` class has been renamed to `GPUCanvasContext`. -* The functionality of the swap-chain has moved to the `GPUCanvasContext`. -* The now removed `GPUSwapChain` was used as a context manager. Instead, - the frame is presented (ala GL swapbuffers) automatically at the end of a draw. -* The `canvas.configure_swap_chain()` method has been removed. Instead, - `canvas.get_context()` should be used, to obtain a present/canvas context. -* The `adapter.request_device()` method has its arguments `non_guaranteed_features` - and `non_guaranteed_limits` replaced with `required_features` and `required_limits`. -* The enum field `StoreOp.clear` is now `StoreOp.discard`. -* The flag field `TextureUsage.SAMPLED ` is now `TextureUsage.TEXTURE_BINDING `. -* The flag field `TextureUsage.STORAGE ` is now `TextureUsage.STORAGE_BINDING `. -* The enum `InputStepMode` is now `VertexStepMode`. -* WGSL: `arrays` must be declared as `var` (not `let`) in order to allow dynamic indexing. -* WGSL: storage classes are written differently. - - -### [v0.5.4] - 11-06-2021 - -Changed: - -* The backend selection is automatic by default. To force a backend, the `WGPU_BACKEND_TYPE` evironment variable can be set to e.g. "Vulkan". It could be good to do this on Windows to prevent selection of DX12 for now. - - -### [v0.5.3] - 04-06-2021 - -Added: - -* `adapter.properties` now has actual values, allowing inspeciton of the selected - GPU and backend. -* Added back support for filtering float32 textures by enabling a certain wgpu feature - by default. - -Fixed: - -* An error in the docs of `create_render_pipeline`. -* Vulkan backend is now forced to prevent DX12 being select and causing probems - because it's less mature. - - -### [v0.5.2] - 23-05-2021 - -This release uses a new version of wgpu-native which has changed quite a bit internally. There -is more validation (thus more restrictions). There are only a few changes to the API. -However, one big change is that shaders can now be provided as both SpirV and WGSL. Due to -the strict validation, most shaders compiled by PyShader are not usable anymore. We -recommend using WGSL instead. - -Added: - -* Added `GPUAdaper.properties` (the amount of information it contains will increase in the future). -* Added proper support for WGSL. - -Changed: - -* Renamed `renderpass.set_blend_color` -> `set_blend_constant`. -* Stricter validation of SpirV shaders. -* Float32 texture formats must now use a non-filtering sampler and texture-sample-type. -* Integer texture formats can no longer use a texture (use `textureLoad` instead). -* ... and more tighter restrictions. - -Removed: - -* The API concerning debug markers and groups is temporarily removed. -* Adapter and device features is temporarily removed. -* Adapter and device limits is temporarily removed. - - -### [v0.4] - 21-05-2021 - -This release represents about half a year of progress on the WebGPU API, so the API -has changed quite a bit. The wgpu-py API more closely reflects the webgpu API - wgpu-native does -not affect the API except for a few additional features. - -Added: - -* Added `GPUQueue.read_buffer` as extra API (next to `write_buffer` which is original WebGPU API). -* Added `GPUQueue.read_texture` as extra API. - -y -Removed: - -* Removed `GPUBuffer.read_data()`. Use `device.queue.read_buffer()` instead. Note that `usage` `MAP_READ` should be replaced with `COPY_SRC`. -* Removed `GPUBuffer.write_data()`. Use `device.queue.write_buffer()` instead. Note that `usage` `MAP_WRITE` should be replaced with `COPY_DST`. - -Changed: - -* `GPUCanvasContext.get_swap_chain_preferred_format()`: now takes an `adapter` instead of a `device`. -* `GPUAdapter.extensions`: is now called `features`. -* `GPUAdapter.request_device()`: the `extensions` and `limit` args are now `non_guaranteed_features` and `non_guaranteed_limits`. -* `GPUDevice.default_queue`: is now called `queue`. -* `GPUDevice.create_compute_pipeline()`: the `compute_stage` arg is now called `compute`. -* `GPUDevice.create_bind_group_layout()` has changed the required structure of the layout enty dicts. -* `GPUDevice.create_render_pipeline()` has changed *a lot* in terms of shape of input dicts. See new docs. -* `GPUTexture.create_view()`: args `mip_level_count` and `array_layer_count` are default `None` instead of `0`. -* `GPUCommandEncoder.begin_render_pass()`: the `color_attachments` and `depth_stencil_attachment` arguments have their `attachment` field renamed to `view`. -* `GPURenderEncoderBase.set_index_buffer()` has an extra argument (after the buffer) to specify the format. The index format is no longer specified in `device.create_render_pipeline()`. -* Flag `TextureUsage` has field OUTPUT_ATTACHMENT renamed to RENDER_ATTACHMENT. -* Enum `BindingType` is split up in different enums for buffer, sampler, sampled texture and storage texture. -* Enum `BlendFactor` has some of its field names changed. -* Enum `VertexFormat` has its field names changed, e.g. ushort2 -> uint16x2. -* The API is more restrictive in the use of buffer/texture usage combinations. -* The API is more restrictive in formats for storage buffers/textures. -* When copying from/to textures, the `bytes_per_row` must now be a multiple of 256. - - -### [v0.3.0] - 2020-07-05 - -With this update we're using a later release of wgpu-native, and follow changes -is the WebGPU spec. Further, we've removed the need for ctypes to communicate -data arrays. Instead, wgpu-py can consume any object that supports the buffer -protocol, and it returns `memoryview` objects. - -Added: - -* The texture object has more properties to query the parameters that it was created with. -* The texture view object has a `texture` property. -* The render and compute pipeline objects have a property `layout` and a method `get_bind_group_layout()`. -* The shader object got a `compilation_info` method, but this does not do anything yet. -* The `create_shader_module()` has a `source_map` attribute, but this is yet unused. -* Log messages from wgpu-native (Rust) are now injected into Python's logger. -* The `queue` object got two new methods `write_buffer` and `write_texture`. -* The buffer has `read_data()` and `write_data()` methods. Note: the latter may be removed later. -* The device `create_buffer_with_data` is added as a convenience function. This will likely stay. - -Changed: - -* Targets wgpu-native v.0.5.2. The first release build from the wgpu-native repo itself. -* The `array_layer` in copy operations involving a texture is removed. -* The `utils.compute_with_buffers` function now accepts *any* data dat supports - the buffer protocol (not just ctypes arrays). The outputs are `memoryview` objects, - which shape and format can be specified. When a ctypes array type is specified, - the output will be an instance of that type. This means that these changes are - fully backwards compatible. - -Removed: - -* The buffer (for now) no longer exposes a data mapping API. Instead use `read_data()` and `write_data()`. -* The device `create_buffer_mapped` method is similarly removed. Use `create_buffer_with_data` instead. - - -### [v0.2.0] - 2020-04-16 - -Added: - -* The canvase now has a `request_draw` method. -* More and better docs. -* The canvas can be passed to `request_adapter` so that the created surface - can be selected on it. - * Support for debug markers. - -Changed: - -* Targets wgpu-native v0.5.1. This is the last release when wgpu-native was still part of wgpu-core. -* The `bindings` in bind groups and bind group layouts are now called `entries`. -* There is no more generic storage texture, only a readonly and a writeonly one. -* The `set_index_buffer` and `set_vertex_buffer` methods got a `size` argument. -* The `row_pitch` and `image_height` args in copy operations involving a texture - are renamed to `bytes_per_row` and `rows_per_image`. -* Rendering is now done under the swap_chain's context: `with swap_chain as current_texture_view` - - -### [v0.1.6] - 2020-04-01 - -This release is the first moderately mature version of wgpu-py. +### [v0.1.0] - unreleased \ No newline at end of file diff --git a/LICENSE b/LICENSE index eef4b5a..5f90e52 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ BSD 2-Clause License -Copyright (c) 2019-2023, Almar Klein, Korijn van Golen +Copyright (c) 2024, Jan Kels All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/setup.py b/setup.py index 02fd049..1aa1d3b 100644 --- a/setup.py +++ b/setup.py @@ -5,8 +5,8 @@ from wheel.bdist_wheel import get_platform, bdist_wheel as _bdist_wheel -NAME = "wgpu" -SUMMARY = "Next generation GPU API for Python" +NAME = "shadertoy" +SUMMARY = "Shadertoy implementation based on wgpu-py" with open(f"{NAME}/__init__.py") as fh: VERSION = re.search(r"__version__ = \"(.*?)\"", fh.read()).group(1) @@ -24,7 +24,7 @@ description=SUMMARY, long_description=open("README.md").read(), long_description_content_type="text/markdown", - author="Almar Klein", - author_email="almar.klein@gmail.com", - url="https://github.com/pygfx/wgpu-py", + author="Jan Kels", + author_email="Jan.Kels@hhu.de", + url="https://github.com/pygfx/shadertoy", ) diff --git a/wgpu/__init__.py b/wgpu/__init__.py index 3e1ca68..5fa53e3 100644 --- a/wgpu/__init__.py +++ b/wgpu/__init__.py @@ -5,5 +5,5 @@ from . import utils # noqa: F401,F403 -__version__ = "0.13.2" +__version__ = "0.1.0" version_info = tuple(map(int, __version__.split("."))) From 3546490da8e0c5b62847a0f9b4331da3ebd5ab66 Mon Sep 17 00:00:00 2001 From: Korijn van Golen Date: Fri, 5 Jan 2024 10:56:42 +0100 Subject: [PATCH 15/20] clean up packaging setup, readme and imports --- .gitignore | 4 - README.md | 116 +++---------------------- dev-requirements.txt | 19 ---- examples/shadertoy_blink.py | 2 +- examples/shadertoy_circuits.py | 2 +- examples/shadertoy_flyby.py | 2 +- examples/shadertoy_gen_art.py | 2 +- examples/shadertoy_glsl_clock.py | 2 +- examples/shadertoy_glsl_flame.py | 2 +- examples/shadertoy_glsl_fuji.py | 2 +- examples/shadertoy_glsl_inercia.py | 2 +- examples/shadertoy_glsl_mouse_event.py | 2 +- examples/shadertoy_glsl_sdf.py | 2 +- examples/shadertoy_glsl_sea.py | 2 +- examples/shadertoy_glsl_stone.py | 2 +- examples/shadertoy_glsl_textures.py | 2 +- examples/shadertoy_liberation.py | 2 +- examples/shadertoy_matrix.py | 2 +- examples/shadertoy_riders.py | 2 +- examples/shadertoy_sea.py | 2 +- examples/shadertoy_star.py | 2 +- examples/shadertoy_textures.py | 2 +- examples/tests/test_examples.py | 2 +- pyproject.toml | 52 ++++++----- setup.py | 30 ------- shadertoy/__init__.py | 5 ++ {wgpu/utils => shadertoy}/shadertoy.py | 0 tests/test_util_shadertoy.py | 8 +- wgpu/__init__.py | 9 -- wgpu/utils/__init__.py | 0 30 files changed, 71 insertions(+), 212 deletions(-) delete mode 100644 dev-requirements.txt delete mode 100644 setup.py create mode 100644 shadertoy/__init__.py rename {wgpu/utils => shadertoy}/shadertoy.py (100%) delete mode 100644 wgpu/__init__.py delete mode 100644 wgpu/utils/__init__.py diff --git a/.gitignore b/.gitignore index 526929d..6cb2193 100644 --- a/.gitignore +++ b/.gitignore @@ -1,9 +1,5 @@ # Special for this repo nogit/ -wgpu/resources/*.dll -wgpu/resources/*.so -wgpu/resources/*.dylib -wgpu/resources/commit-sha examples/screenshots/diffs # Byte-compiled / optimized / DLL files diff --git a/README.md b/README.md index 2b51e38..a2d626d 100644 --- a/README.md +++ b/README.md @@ -1,117 +1,24 @@ -[![CI](https://github.com/pygfx/wgpu-py/workflows/CI/badge.svg)](https://github.com/pygfx/wgpu-py/actions) -[![Documentation Status](https://readthedocs.org/projects/wgpu-py/badge/?version=stable)](https://wgpu-py.readthedocs.io) -[![PyPI version](https://badge.fury.io/py/wgpu.svg)](https://badge.fury.io/py/wgpu) +[![CI](https://github.com/pygfx/shadertoy/workflows/CI/badge.svg)](https://github.com/pygfx/shadertoy/actions) -# wgpu-py - -A Python implementation of WebGPU - the next generation GPU API. - - - - - -## Introduction - -In short, this is a Python lib wrapping -[wgpu-native](https://github.com/gfx-rs/wgpu) and exposing it with a Pythonic -API similar to the [WebGPU spec](https://gpuweb.github.io/gpuweb/). - -The OpenGL API is old and showing it's cracks. New API's like Vulkan, Metal and -DX12 provide a modern way to control the GPU, but these API's are too low-level -for general use. The WebGPU API follows the same concepts, but with a simpler -(higher level) spelling. The Python `wgpu` library brings the WebGPU API to -Python. - -To get an idea of what this API looks like have a look at -[triangle.py](https://github.com/pygfx/wgpu-py/blob/main/examples/triangle.py) -and the other [examples](https://github.com/pygfx/wgpu-py/blob/main/examples/). - - -## Status - -> **Note** -> -> The wgpu-API has not settled yet, use with care! - -* Coverage of the WebGPU spec is complete enough to build e.g. - [pygfx](https://github.com/pygfx/pygfx). -* Test coverage of the API is close to 100%. -* Support for Windows, Linux, and MacOS (Intel and M1). -* Until WebGPU settles as a standard, its specification may change, and with - that our API will probably too. Check the [changelog](CHANGELOG.md) when you - upgrade! - - -## Installation - - -``` -pip install wgpu glfw -``` - -Linux users should make sure that **pip >= 20.3**. That should do the -trick on most systems. See [getting started](https://wgpu-py.readthedocs.io/en/stable/start.html) -for details. - - -## Usage - -Also see the [online documentation](https://wgpu-py.readthedocs.io) and the [examples](https://github.com/pygfx/wgpu-py/tree/main/examples). - -The full API is accessable via the main namespace: -```py -import wgpu -``` - -To render to the screen you can use a variety of GUI toolkits: - -```py -# The auto backend selects either the glfw, qt or jupyter backend -from wgpu.gui.auto import WgpuCanvas, run, call_later - -# Visualizations can be embedded as a widget in a Qt application. -# Import PySide6, PyQt6, PySide2 or PyQt5 before running the line below. -# The code will detect and use the library that is imported. -from wgpu.gui.qt import WgpuCanvas - -# Visualizations can be embedded as a widget in a wx application. -from wgpu.gui.wx import WgpuCanvas -``` - -Some functions in the original `wgpu-native` API are async. In the Python API, -the default functions are all sync (blocking), making things easy for general use. -Async versions of these functions are available, so wgpu can also work -well with Asyncio or Trio. +# shadertoy +Shadertoy implementation based on wgpu-py. ## License This code is distributed under the 2-clause BSD license. - ## Developers * Clone the repo. -* Install devtools using `pip install -r dev-requirements.txt` (you can replace - `pip` with `pipenv` to install to a virtualenv). -* Install wgpu-py in editable mode by running `pip install -e .`, this will also - install runtime dependencies as needed. -* Run `python download-wgpu-native.py` to download the upstream wgpu-native - binaries. - * Or alternatively point the `WGPU_LIB_PATH` environment variable to a custom - build. -* Use `black .` to apply autoformatting. -* Use `flake8 .` to check for flake errors. -* Use `pytest .` to run the tests. -* Use `pip wheel --no-deps .` to build a wheel. - - -### Updating to a later version of WebGPU or wgpu-native - -To update to upstream changes, we use a combination of automatic code -generation and manual updating. See [the codegen utility](codegen/README.md) -for more information. +* Create a virtual environment using `python -m venv .venv` +* Install using `.venv/bin/pip install -e .[dev]` +* Use `.venv/bin/black .` to apply autoformatting. +* Use `.venv/bin/flake8 .` to check for flake errors. +* Use `.venv/bin/pytest .` to run the tests. +* Use `.venv/bin/pip wheel -w dist --no-deps .` to build a wheel. +*Note*: Replace `/bin/` with `/Scripts/` on Windows. ## Testing @@ -119,9 +26,6 @@ The test suite is divided into multiple parts: * `pytest -v tests` runs the core unit tests. * `pytest -v examples` tests the examples. -* `pytest -v wgpu/__pyinstaller` tests if wgpu is properly supported by - pyinstaller. -* `pytest -v codegen` lints the generated binding code. There are two types of tests for examples included: diff --git a/dev-requirements.txt b/dev-requirements.txt deleted file mode 100644 index 2b1492c..0000000 --- a/dev-requirements.txt +++ /dev/null @@ -1,19 +0,0 @@ -# For unit tests, linting, etc. -requests -numpy -pytest -black -flake8 -flake8-black -pep8-naming -sphinx -imageio -pyinstaller -psutil - -# Building wheels -wheel -setuptools -twine -auditwheel; sys_platform == 'linux' -cibuildwheel diff --git a/examples/shadertoy_blink.py b/examples/shadertoy_blink.py index 6c1877d..8e04f28 100644 --- a/examples/shadertoy_blink.py +++ b/examples/shadertoy_blink.py @@ -1,4 +1,4 @@ -from wgpu.utils.shadertoy import Shadertoy +from shadertoy import Shadertoy shader_code = """ diff --git a/examples/shadertoy_circuits.py b/examples/shadertoy_circuits.py index e8d6be6..1c49ab8 100644 --- a/examples/shadertoy_circuits.py +++ b/examples/shadertoy_circuits.py @@ -1,4 +1,4 @@ -from wgpu.utils.shadertoy import Shadertoy +from shadertoy import Shadertoy shader_code = """ diff --git a/examples/shadertoy_flyby.py b/examples/shadertoy_flyby.py index 73a015f..62f5348 100644 --- a/examples/shadertoy_flyby.py +++ b/examples/shadertoy_flyby.py @@ -1,4 +1,4 @@ -from wgpu.utils.shadertoy import Shadertoy +from shadertoy import Shadertoy shader_code = """ diff --git a/examples/shadertoy_gen_art.py b/examples/shadertoy_gen_art.py index 4e858f5..c48c1b9 100644 --- a/examples/shadertoy_gen_art.py +++ b/examples/shadertoy_gen_art.py @@ -1,4 +1,4 @@ -from wgpu.utils.shadertoy import Shadertoy +from shadertoy import Shadertoy shader_code = """ diff --git a/examples/shadertoy_glsl_clock.py b/examples/shadertoy_glsl_clock.py index 8ebb5c2..21ee515 100644 --- a/examples/shadertoy_glsl_clock.py +++ b/examples/shadertoy_glsl_clock.py @@ -1,4 +1,4 @@ -from wgpu.utils.shadertoy import Shadertoy +from shadertoy import Shadertoy shader_code = """ // source: https://www.shadertoy.com/view/MdVcRd diff --git a/examples/shadertoy_glsl_flame.py b/examples/shadertoy_glsl_flame.py index a81172e..b734f33 100644 --- a/examples/shadertoy_glsl_flame.py +++ b/examples/shadertoy_glsl_flame.py @@ -1,4 +1,4 @@ -from wgpu.utils.shadertoy import Shadertoy +from shadertoy import Shadertoy shader_code = """ diff --git a/examples/shadertoy_glsl_fuji.py b/examples/shadertoy_glsl_fuji.py index 8cf3c62..ffae0f8 100644 --- a/examples/shadertoy_glsl_fuji.py +++ b/examples/shadertoy_glsl_fuji.py @@ -1,4 +1,4 @@ -from wgpu.utils.shadertoy import Shadertoy +from shadertoy import Shadertoy shader_code = """ diff --git a/examples/shadertoy_glsl_inercia.py b/examples/shadertoy_glsl_inercia.py index 0ecef49..f4a2867 100644 --- a/examples/shadertoy_glsl_inercia.py +++ b/examples/shadertoy_glsl_inercia.py @@ -1,4 +1,4 @@ -from wgpu.utils.shadertoy import Shadertoy +from shadertoy import Shadertoy shader_code = """ diff --git a/examples/shadertoy_glsl_mouse_event.py b/examples/shadertoy_glsl_mouse_event.py index 7fb748f..99f72f5 100644 --- a/examples/shadertoy_glsl_mouse_event.py +++ b/examples/shadertoy_glsl_mouse_event.py @@ -1,4 +1,4 @@ -from wgpu.utils.shadertoy import Shadertoy +from shadertoy import Shadertoy shader_code = """ diff --git a/examples/shadertoy_glsl_sdf.py b/examples/shadertoy_glsl_sdf.py index 10b5b18..b83f8b4 100644 --- a/examples/shadertoy_glsl_sdf.py +++ b/examples/shadertoy_glsl_sdf.py @@ -1,4 +1,4 @@ -from wgpu.utils.shadertoy import Shadertoy +from shadertoy import Shadertoy shader_code = """ diff --git a/examples/shadertoy_glsl_sea.py b/examples/shadertoy_glsl_sea.py index 8276797..3af145f 100644 --- a/examples/shadertoy_glsl_sea.py +++ b/examples/shadertoy_glsl_sea.py @@ -1,4 +1,4 @@ -from wgpu.utils.shadertoy import Shadertoy +from shadertoy import Shadertoy shader_code = """ diff --git a/examples/shadertoy_glsl_stone.py b/examples/shadertoy_glsl_stone.py index 57181f5..b63da6a 100644 --- a/examples/shadertoy_glsl_stone.py +++ b/examples/shadertoy_glsl_stone.py @@ -1,4 +1,4 @@ -from wgpu.utils.shadertoy import Shadertoy +from shadertoy import Shadertoy shader_code = """ diff --git a/examples/shadertoy_glsl_textures.py b/examples/shadertoy_glsl_textures.py index 8d9e6bf..f7c1a2a 100644 --- a/examples/shadertoy_glsl_textures.py +++ b/examples/shadertoy_glsl_textures.py @@ -1,4 +1,4 @@ -from wgpu.utils.shadertoy import Shadertoy, ShadertoyChannel +from shadertoy import Shadertoy, ShadertoyChannel shader_code = """ void mainImage( out vec4 fragColor, in vec2 fragCoord ) diff --git a/examples/shadertoy_liberation.py b/examples/shadertoy_liberation.py index 0914057..ab2add7 100644 --- a/examples/shadertoy_liberation.py +++ b/examples/shadertoy_liberation.py @@ -1,4 +1,4 @@ -from wgpu.utils.shadertoy import Shadertoy +from shadertoy import Shadertoy shader_code = """ diff --git a/examples/shadertoy_matrix.py b/examples/shadertoy_matrix.py index 1687df1..1437666 100644 --- a/examples/shadertoy_matrix.py +++ b/examples/shadertoy_matrix.py @@ -1,4 +1,4 @@ -from wgpu.utils.shadertoy import Shadertoy +from shadertoy import Shadertoy shader_code = """ diff --git a/examples/shadertoy_riders.py b/examples/shadertoy_riders.py index ce040b6..f710def 100644 --- a/examples/shadertoy_riders.py +++ b/examples/shadertoy_riders.py @@ -1,4 +1,4 @@ -from wgpu.utils.shadertoy import Shadertoy +from shadertoy import Shadertoy shader_code = """ diff --git a/examples/shadertoy_sea.py b/examples/shadertoy_sea.py index fa02caa..df54f7d 100644 --- a/examples/shadertoy_sea.py +++ b/examples/shadertoy_sea.py @@ -1,4 +1,4 @@ -from wgpu.utils.shadertoy import Shadertoy +from shadertoy import Shadertoy shader_code = """ diff --git a/examples/shadertoy_star.py b/examples/shadertoy_star.py index 48e95ab..780e6f9 100644 --- a/examples/shadertoy_star.py +++ b/examples/shadertoy_star.py @@ -1,4 +1,4 @@ -from wgpu.utils.shadertoy import Shadertoy +from shadertoy import Shadertoy shader_code = """ diff --git a/examples/shadertoy_textures.py b/examples/shadertoy_textures.py index a8b1a49..8b1a279 100644 --- a/examples/shadertoy_textures.py +++ b/examples/shadertoy_textures.py @@ -1,4 +1,4 @@ -from wgpu.utils.shadertoy import Shadertoy, ShadertoyChannel +from shadertoy import Shadertoy, ShadertoyChannel shader_code_wgsl = """ fn shader_main(frag_coord: vec2) -> vec4{ diff --git a/examples/tests/test_examples.py b/examples/tests/test_examples.py index 5e80e04..7633295 100644 --- a/examples/tests/test_examples.py +++ b/examples/tests/test_examples.py @@ -13,7 +13,7 @@ import pytest -from testutils import ( +from tests.testutils import ( can_use_wgpu_lib, wgpu_backend, is_lavapipe, diff --git a/pyproject.toml b/pyproject.toml index 732fe5a..1549f6a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,26 +5,38 @@ requires = [ ] build-backend = "setuptools.build_meta" -[tool.cibuildwheel] -# we only build on one python version since the wheels are not bound to it -build = "cp39-*" - -# this is sufficient to trigger an install of the built wheel -test-command = "echo Wheel installed" +[project] +name = "shadertoy" +dynamic = ["version", "readme"] +dependencies = [ + "wgpu>=0.13.2,<0.14.0", +] +description = "Shadertoy implementation based on wgpu-py" +license = {file = "LICENSE"} +requires-python = ">=3.8.0" +authors = [ + {name = "Jan Kels", email = "Jan.Kels@hhu.de"}, +] -# this is the minimum supported manylinux version -manylinux-x86_64-image = "manylinux_2_24" -manylinux-i686-image = "manylinux_2_24" -manylinux-aarch64-image = "manylinux_2_24" -manylinux-ppc64le-image = "manylinux_2_24" -manylinux-s390x-image = "manylinux_2_24" -manylinux-pypy_x86_64-image = "manylinux_2_24" -manylinux-pypy_i686-image = "manylinux_2_24" -manylinux-pypy_aarch64-image = "manylinux_2_24" +[project.urls] +Repository = "https://github.com/pygfx/shadertoy" -[tool.cibuildwheel.macos] -# also create apple silicon wheels -archs = ["x86_64", "arm64"] +[project.optional-dependencies] +dev = [ + "requests", + "numpy", + "pytest", + "black", + "flake8", + "flake8-black", + "pep8-naming", + "sphinx", + "imageio", + "wheel", + "setuptools", + "twine", +] -# the upstream binaries are not universal yet -# archs = ["x86_64", "universal2", "arm64"] +[tool.setuptools.dynamic] +version = {attr = "shadertoy.__version__"} +readme = {file = ["README.md"]} diff --git a/setup.py b/setup.py deleted file mode 100644 index 1aa1d3b..0000000 --- a/setup.py +++ /dev/null @@ -1,30 +0,0 @@ -import re -import platform - -from setuptools import find_packages, setup -from wheel.bdist_wheel import get_platform, bdist_wheel as _bdist_wheel - - -NAME = "shadertoy" -SUMMARY = "Shadertoy implementation based on wgpu-py" - -with open(f"{NAME}/__init__.py") as fh: - VERSION = re.search(r"__version__ = \"(.*?)\"", fh.read()).group(1) - - - -setup( - name=NAME, - version=VERSION, - packages=find_packages( - exclude=["tests", "tests.*", "examples", "examples.*"] - ), - python_requires=">=3.8.0", - license="BSD 2-Clause", - description=SUMMARY, - long_description=open("README.md").read(), - long_description_content_type="text/markdown", - author="Jan Kels", - author_email="Jan.Kels@hhu.de", - url="https://github.com/pygfx/shadertoy", -) diff --git a/shadertoy/__init__.py b/shadertoy/__init__.py new file mode 100644 index 0000000..8b3c589 --- /dev/null +++ b/shadertoy/__init__.py @@ -0,0 +1,5 @@ +from .shadertoy import Shadertoy # noqa: F401,F403 + + +__version__ = "0.1.0" +version_info = tuple(map(int, __version__.split("."))) diff --git a/wgpu/utils/shadertoy.py b/shadertoy/shadertoy.py similarity index 100% rename from wgpu/utils/shadertoy.py rename to shadertoy/shadertoy.py diff --git a/tests/test_util_shadertoy.py b/tests/test_util_shadertoy.py index 5223243..bf99040 100644 --- a/tests/test_util_shadertoy.py +++ b/tests/test_util_shadertoy.py @@ -19,7 +19,7 @@ def force_offscreen(): def test_shadertoy_wgsl(): # Import here, because it imports the wgpu.gui.auto - from wgpu.utils.shadertoy import Shadertoy # noqa + from shadertoy import Shadertoy # noqa shader_code = """ fn shader_main(frag_coord: vec2) -> vec4 { @@ -44,7 +44,7 @@ def test_shadertoy_wgsl(): def test_shadertoy_glsl(): # Import here, because it imports the wgpu.gui.auto - from wgpu.utils.shadertoy import Shadertoy # noqa + from shadertoy import Shadertoy # noqa shader_code = """ void shader_main(out vec4 fragColor, vec2 frag_coord) { @@ -69,7 +69,7 @@ def test_shadertoy_glsl(): def test_shadertoy_offscreen(): # Import here, because it imports the wgpu.gui.auto - from wgpu.utils.shadertoy import Shadertoy # noqa + from shadertoy import Shadertoy # noqa shader_code = """ void shader_main(out vec4 fragColor, vec2 frag_coord) { @@ -93,7 +93,7 @@ def test_shadertoy_offscreen(): def test_shadertoy_snapshot(): # Import here, because it imports the wgpu.gui.auto - from wgpu.utils.shadertoy import Shadertoy # noqa + from shadertoy import Shadertoy # noqa shader_code = """ void shader_main(out vec4 fragColor, vec2 frag_coord) { diff --git a/wgpu/__init__.py b/wgpu/__init__.py deleted file mode 100644 index 5fa53e3..0000000 --- a/wgpu/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -""" -The wgpu library is a Python implementation of WebGPU. -""" - -from . import utils # noqa: F401,F403 - - -__version__ = "0.1.0" -version_info = tuple(map(int, __version__.split("."))) diff --git a/wgpu/utils/__init__.py b/wgpu/utils/__init__.py deleted file mode 100644 index e69de29..0000000 From 6be6810e163ae49fd50eb0037752248f95ccfd97 Mon Sep 17 00:00:00 2001 From: Korijn van Golen Date: Fri, 5 Jan 2024 10:58:11 +0100 Subject: [PATCH 16/20] clean up ci pipelines accordingly --- .github/workflows/ci.yml | 32 ++----------------------------- .github/workflows/screenshots.yml | 3 +-- 2 files changed, 3 insertions(+), 32 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d305bc2..8114545 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,8 +4,6 @@ on: push: branches: - main - tags: - - 'v*' pull_request: branches: - main @@ -32,27 +30,6 @@ jobs: run: | flake8 . - docs-build: - name: Test Docs - timeout-minutes: 5 - runs-on: ubuntu-latest - strategy: - fail-fast: false - steps: - - uses: actions/checkout@v3 - - name: Set up Python 3.9 - uses: actions/setup-python@v4 - with: - python-version: 3.9 - - name: Install dev dependencies - run: | - python -m pip install --upgrade pip - pip install -U -r dev-requirements.txt - - name: Build docs - run: | - cd docs - make html SPHINXOPTS="-W --keep-going" - test-examples-build: name: Test Examples timeout-minutes: 10 @@ -72,8 +49,7 @@ jobs: - name: Install dev dependencies run: | python -m pip install --upgrade pip - pip install -U -r dev-requirements.txt - pip install -e . + pip install -e .[dev] - name: Test examples env: EXPECT_LAVAPIPE: true @@ -120,11 +96,7 @@ jobs: - name: Install dev dependencies run: | python -m pip install --upgrade pip - pip install -U -r dev-requirements.txt - pip install -e . + pip install -e .[dev] - name: Unit tests run: | pytest -v tests - - name: Memory tests - run: | - pytest -v tests_mem diff --git a/.github/workflows/screenshots.yml b/.github/workflows/screenshots.yml index 7e0b7b8..e624a4d 100644 --- a/.github/workflows/screenshots.yml +++ b/.github/workflows/screenshots.yml @@ -26,8 +26,7 @@ jobs: - name: Install dev dependencies run: | python -m pip install --upgrade pip - pip install -U -r dev-requirements.txt - pip install -e . + pip install -e .[dev] - name: Regenerate screenshots run: | pytest -v --regenerate-screenshots -k test_examples_screenshots examples From 6fc694230073b01e81019f582f565bd45a2f78f6 Mon Sep 17 00:00:00 2001 From: Korijn van Golen Date: Fri, 5 Jan 2024 11:07:19 +0100 Subject: [PATCH 17/20] make example testing compatible with shaderoty --- examples/shadertoy_star.py | 1 + examples/tests/test_examples.py | 2 +- pyproject.toml | 1 + setup.cfg | 1 + shadertoy/__init__.py | 2 +- shadertoy/shadertoy.py | 7 ++++++- 6 files changed, 11 insertions(+), 3 deletions(-) diff --git a/examples/shadertoy_star.py b/examples/shadertoy_star.py index 780e6f9..bcd1556 100644 --- a/examples/shadertoy_star.py +++ b/examples/shadertoy_star.py @@ -1,3 +1,4 @@ +# test_example = true from shadertoy import Shadertoy shader_code = """ diff --git a/examples/tests/test_examples.py b/examples/tests/test_examples.py index 7633295..3a9c586 100644 --- a/examples/tests/test_examples.py +++ b/examples/tests/test_examples.py @@ -87,7 +87,7 @@ def unload_module(): request.addfinalizer(unload_module) # render a frame - img = np.asarray(example.canvas.draw()) + img = np.asarray(example.shader.snapshot()) # check if _something_ was rendered assert img is not None and img.size > 0 diff --git a/pyproject.toml b/pyproject.toml index 1549f6a..214e367 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,6 +35,7 @@ dev = [ "wheel", "setuptools", "twine", + "glfw", ] [tool.setuptools.dynamic] diff --git a/setup.cfg b/setup.cfg index 2fb0efc..9a45c97 100644 --- a/setup.cfg +++ b/setup.cfg @@ -15,6 +15,7 @@ extend-ignore = E501, E203, B006, B007, D per-file-ignores = examples/*.py: F821,F722 + shadertoy/__init__.py: F401 [coverage:report] diff --git a/shadertoy/__init__.py b/shadertoy/__init__.py index 8b3c589..9a7b3ea 100644 --- a/shadertoy/__init__.py +++ b/shadertoy/__init__.py @@ -1,4 +1,4 @@ -from .shadertoy import Shadertoy # noqa: F401,F403 +from .shadertoy import Shadertoy, ShadertoyChannel __version__ = "0.1.0" diff --git a/shadertoy/shadertoy.py b/shadertoy/shadertoy.py index d475c35..590f012 100644 --- a/shadertoy/shadertoy.py +++ b/shadertoy/shadertoy.py @@ -1,6 +1,7 @@ import time import ctypes import collections +import os import wgpu from wgpu.gui.auto import WgpuCanvas, run @@ -341,7 +342,7 @@ class Shadertoy: # todo: support multiple render passes (`i_channel0`, `i_channel1`, etc.) def __init__( - self, shader_code, resolution=(800, 450), offscreen=False, inputs=[] + self, shader_code, resolution=(800, 450), offscreen=None, inputs=[] ) -> None: self._uniform_data = UniformArray( ("mouse", "f", 4), @@ -356,6 +357,10 @@ def __init__( self._shader_code = shader_code self._uniform_data["resolution"] = resolution + (1,) + # if no explicit offscreen option was given + # inherit wgpu-py force offscreen option + if offscreen is None and os.environ.get("WGPU_FORCE_OFFSCREEN") == "true": + offscreen = True self._offscreen = offscreen if len(inputs) > 4: From d379089e49934c106db846e9d039735cb444f6b2 Mon Sep 17 00:00:00 2001 From: Korijn van Golen Date: Fri, 5 Jan 2024 11:12:25 +0100 Subject: [PATCH 18/20] mock all time invocations in test suite --- examples/tests/test_examples.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/examples/tests/test_examples.py b/examples/tests/test_examples.py index 3a9c586..c292824 100644 --- a/examples/tests/test_examples.py +++ b/examples/tests/test_examples.py @@ -6,6 +6,7 @@ import importlib import runpy import sys +import time from unittest.mock import patch import imageio.v2 as imageio @@ -59,8 +60,12 @@ def force_offscreen(): def mock_time(): """Some examples use time to animate. Fix the return value for repeatable output.""" - with patch("time.time") as time_mock: - time_mock.return_value = 1.23456 + with patch("time.time") as time_mock, patch( + "time.perf_counter" + ) as perf_counter_mock, patch("time.localtime") as localtime_mock: + time_mock.return_value = 1704449357.71442 + perf_counter_mock.return_value = 6036.9424436 + localtime_mock.return_value = time.struct_time((2024, 1, 5, 11, 9, 25, 4, 5, 0)) yield From 6558b269a1928b802ed52afa8efcfc868881d028 Mon Sep 17 00:00:00 2001 From: Korijn van Golen Date: Fri, 5 Jan 2024 11:14:31 +0100 Subject: [PATCH 19/20] add reference screenshot for test --- examples/screenshots/shadertoy_star.png | Bin 0 -> 525969 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 examples/screenshots/shadertoy_star.png diff --git a/examples/screenshots/shadertoy_star.png b/examples/screenshots/shadertoy_star.png new file mode 100644 index 0000000000000000000000000000000000000000..2a822e01945fc50a6680c66b37253dfd401e71e7 GIT binary patch literal 525969 zcmV)2K+M01P)0f{es z;xmyzM3Hzni4X`&MhOOoIB^j~q)?C)k?q(nyI%KJ)qS0N&tpH;m3aZ=R~9v2oM5+z-caAmX+EXhyX%K2_YZ| ztu+9;{Tl)xh(HJsq$48w=Md;3q!uOIT!8TG znd>XWNQ#jd0?nRdx91&1*jgn#d!Dx?^L|^?|uAzyPy4M1;+D^A?iG_dyX*@?0qE_dORZ1AP8MLKDzn{nhm!;I~|K) zV+_IWz1y{h03}7%+6Y1PGkpETNbjBAx(i195CbJ;LeM!f-WQVX0XBC-KtdqItiM8LzWX}z^i7uKN#7R(DNmeUe~IblC7*xm zSCQUmz3cef^Ys119w6Pubj%a!fRI9@+4ZED2vP4_=epiIzG{L^?-1;>!dSmux_&M? zPsP@>uKi$hsS`t>HeLJ4)}W4sz83-^MncTQRG@E2uLOkFI$b(RCyzdxkaR5*ZC%;@ z+nmz%U34AyHrA~_soKT%gQDv_C0^X#u+@#r}i7tZlZl4^9 zvDkjwK$_jFkb~ZD%EiW7uhrL^Ut93!l61Z07EF zS#B1J-HV&(?64U@ z%GvfE(e+Iy=>eb-ZJpM<3IJ_xNhokO+`lkHt1otz>i zWdOR(b)CPAGr4zR&Y5M~Y!A`%T7VRjo>Az&dB|D!#u%A%=EE0v+?|emcK^Wpr!xq& zF4W$2pR@fj+4JpP_tq{N6V=uzDY4Z`ijlkHfz~@)ZFKYu_x)8wxH}%1r-{ewg(*e* z4vlYgEkcZeLn#(-Ve`GS*2bI@Q_j4+dqLZ-beq#D286(}RS1z%yi%7gLNR zoiePg(*%xLpIsMp-wuhJDKRJABbM6OdMAg#lr@eCG13GMIWgzVY0^2fG<}Z#*(F78 zN@gmBwdvV=-8z*BF(Y;5nBaEKOi}mwWveW;@!@geajm+RM4$P~xga8Z;qFASXZX?S zz_;JsbAMg9nKHMBnJ$4BB~W7I?sQ_FW=;uy^b4=x!xwOO2U2w)whCYTNDuRMgI*y< zKLQ~qIL`3;7N!iBMgLs{4zq2OQQvbq0C&I@T7~oqm~G~sfCI1q71)3V6#Jsb2`)h4VtaEacNc=lFHu-DM$& z&h^Jt z!G0H^S|A%k)N2hIP&NW(n#cS6`ZYk*`_e;KgOOAU03{b4pgdW! zvyyH>-PZJw1t=+zb0)?#!UG}D1e#q}OnM!`=0-}{5(V8%V}O(r9o?`9nhb>)d8!)3 zMGIy%hzdwZ8t~cYrKFqToHc-L7DP#~pf*Ih1^k4-;V|oW*klS82wDnkxK@zJ{|~ch%`zm zq&!i|fndQ^h{@(j1az%63m7#ivEgp`h|f&nvQy3J~u7UMLI|& z1P!+Ge9*uxSYjrY0N8z`X!mIeaP5to<2(}g5CXYOI-VAg>c3fFSU1{s;ri}ZS=TEu zMq-MToS|2)4{un{@Aq{eVnMOzz%qha4^VxN2mz*&b#B_63Ng_wK=aQFq5+N|e0VyL zEV#v=a9{)n(UKP(V_mzRTY&Bft&0WFNfTrdThA77p#enjfWr12(78U6?+`7Rwj{7M zq}P!Y_$!ELkkA{oZUiiucsBoXN*rt*#gr+f@X!5o|4Y95r7s!y;Ir26RmW#*o>1uZ zGy#bFI*k!Rr}wIfj}L_iTh~N-+bXShy7)cobwyj3ga|QGy9PPUvr2o&V$WF50>*bl z+dET=oNVoybc?k!{;X z(&veFj)5r!rWnXc0T}T$8#G~3U<7iW=p;hSl$>~ZoM=s-Yl@om1xt3jow03O=+fD$ zo;S5_^j0kq)N4#ZlZ#R&1>IsE`&Ul-=O%&6=82%~ds&mvce%3E#&Mc-p2f(EtyPXCA*~S+dTV^=;lg>{IOfEQ zsc1qbol_|+t+Ce5At%0id(-cMGrYENKwfHdt4f${x6jKuK^jLu;_Cx_6}r zuWsQq!ACFQ!`FsJX8Yy99BA@!KogLHCK?^M0bZa9%F+HU+t1|vmo&IJz?VOQFTIAB zN4HTa~lM&l*w#&-n(?adSsbmf%^YVD$W|}ljnlvGr zbK=Ee;_fi>l~=dAJ}urnua&2@+WDbzQ5PWxsGEY%=e6>*RMy%oHqo;^p)tm3p7m#9 zh)B|4z1FJ1fCaUVxM0Dq2PiSvf;MDf1cAVm6I*Mf5UBDDeGH1GhYb78M`U%|z)+QdF48bi)1c@Pg0#kRIn#1x zSr+R1w>pRe31lp~-WpLb#HX>1Kp~7^#lmO{5W60VcDPr|DiO!N3@!G6*Mq{g(dwoM z)6JKu=TCLx8wa~>A{ID@n4TSSaRk3%Af1kW>i22Pq5t;$b9+~W%%4ZV0z2FIL^KEy zQEbe^nh=6+!~{x!wZYbOoHZZ^kv4t;+t9l-dUfR74?#_CLP&(5Nxt4wptWk%EU~JK z-9K9S2*E1>!Nx%N!pqmZeR$Bh8WW@gAyi7qtk(7BYRc8}IdX?TAngfrif0+7w+6F%p^v zB0d@#IaI18e@{yt&-#!e=iW!9Md+3=C%x+KNf=x$un1`7#nEaZAQ7gVts)l3nEj9b znSX}svRI|q?$%IY&jBDPI_?LF=+TIXRj=$G=mZL?{x7|_;mbMkz57Q*;ASe^9cH##`PnbM=Ha?ltoIm634tvXn>i=X@h z@Y%QZ|67A(RgrQ8UIA~k3W5MP6yb8!1gdwK4-h-NX`1VZCReRCrkr7#DIxL6n|B0+V+tITCYWmz ze*A-%^e#MY3bxHz3j~Lfd3$-bKU;kMg}WO*Jzu$saA~@ib%7ot>$(}JAhzZbQ%WpN zL4QIZ4~#S>_7b~+R2G0dKbVr0U7IB}RXottjbv@QNWFxkn?NGj=Ik4+qHKMjE+K@0 zRwgTZJJBEpVck|k0km3!p(oJE5-?Cx%k^k5z0)u(Fcm-5wS@0MWDJ38T{UP!=`;}y zh!hp?mdD1Uw6|ZFZf#Arl$3rPA>i!}J?k^YWGej$dM2el16on~S zQA-RB4zbxrVh6QtLOy>KeTk4VsT`>NGRECe7Y{7g*63q!bRxwR26`#LR3;J)X^62wI4~rOefeRYw2cwhdS};0+7=5KToa$vM2oUL(9t8_jViaEH z%u{VFUH>)O;fV#`0PDJra5(0I)Qv&Mu>lhiQciEqakDjx1u7y2_<$ya&&Dv= zJ^R3VwQr~HDe}Bkqet1AZjSK9y%M8Tr$)3=s0S#%GjxGp`|E#o1Yl0J#DXaQAAiP5 zBlHW^{Xi*E`hK0~X`uaD6^S%^#(sd;?&$4iOdKJJQY)o9;mv{mXM^($usClOf71wX zM2u26mh4!=de{2NDW?GvIhq}V(R6ZQ+ZNuet34~RCHCxD#=UylYDIL~D`;f&D@B7f zG4UkX14*wIA^ZO7pRH>kT9AtExsH+<9V(^~{QBBimy4buHdnGyYh%)C<~$p1b=yYZ zn{rWfUplErLu3Oz6eM=KYy=?YnUDj$t{{~zMy(lVozCm(6=fTDrRxNmfmoX*ArWkj zMgus%K$msh#hQ7^Gwg*u4l1P zsiUu-m@E(z=1{q>RqxOCgZuCO8bP#r(ya2J`#NN!_x9eYmxmD>ZA~aK5`%(F&{?-i z@4_ODt!q+0=gi$aF_p}=Z7j9xSugO?g814ybBsLIy>gH3C7M7Ak4q&7*xH#upd2Qu z2(L~je(><9)eWnt)h--!W}XA@mW^xg9EySfuTKYylM|<$IoD3?!nUofT`L(M-QMu_ z;gR#I==o(?Ii&-~tQCPvZT$UT`H)}#{2iY?o>^O?Bn8I0&Bd*CCNxQ1Yv-5~Kl0*+ zFW(*^N394Xt7KfR@Gd|pN?ST#p>CRpT&`LbIZXOq5%~5uv@#MBSszz0HrsgNWMb{vGR7gZ1L}JYJwvGeXsR}77njO(YvNfka?c?12FbjIrIog0|z!}(s zSw$+eWZtmDC??&QIwR`>MTs2==|BoHs7Y9W*=(cg(a}Drwjqnaah_=+Y|Fy0yuJS+ z#Cs}*Rx9&VShkH6qoMO+d`qk1*e2gmlovy@efY5Jx||%W7ccS4ss`0f${Wtzwx#7c?1$=F0e4`nzUg{1+RXf&^#OD~=n8rs+OCz0N`Y>QRJngz;$h2jdP8T#@JjSB&z z8MKi!xK_XbIBJs-Ob5H?XbFNn3`OdKZfL+jokIv(IY5k}qVzdWDByWj2As}9&_h&O zA87MP$Wf|^hz#9JF!VVF-DratWrt>K!IPRk){FE`^Lxfh@frFLaRgH_MLXm*Akze2G-&fA!LG?sO?aj&Fgl;_ zLjen}b@`A|Fz&Y^_+r)5WP$#3up@%bVf^tbavwv0luVnmqj-vN_7SA|poz^tt4(WJswo~ zE8BDEmShN`V=yuYLk|N;uLHyhG1z?BD;vpK3N{qI%MWQU6bBG`=p-s)^r#dAAtj1^ zx8Jkt<8;m_NFlXx$TDaz0Y!i%_}mLf+eQKzVkD%4o)hh?S_@QD6JlXH+(KJ40p1p3 z)V;>XDp(Q_qt2t%Dq4uJ$*u#M=2>+gF%b}2NTzXUmV7BSs2N>#UoSc~$@ukNor)<@ zO3}I8gYMIIT}!RHcF}}oT~|GiW7dQ_CWxKT6@Aw=)a>w2`t!m#r(J6qLg34{N8Vi5 z0S)06hTo`Gt0dQS}x>cjDbruH({aQjG1=ag#@}Yb2 zx@{0N(QjSo_K8OJ_n)Fo8OJz2c-+x}Zj)zER%89i$)?PVIMdwGfJjB5DFhmw zn|V@-p6zd4;9=c3NqDwpd{TpN>aMr()H}v20dS^hXEmD2UoWsKsVViHV6e7f^KvM z&8f%^K+AV2M?=&qAthocNUtR6lv4QMgAaN4?yUtMQK>g}m|~lh2cM4k`DUi{kUhxJ z>(Pf%pP`HEwi4LI;*PF0D+~GOu1!Upx@pRwc!_kPBYPoQvZF%Z>$;FqwoTU%B2;`P zZ6hFDFAoDLE%QXL8$bIKKhAG_=et~2qn0EiF7=9|xFL)?RrG5F+npw!aAEf^JD(2a z-%H)-T^C}sgJdt|yI>dPqW5O;WMDJIMNIq^fEWwCSEGe!z}4Gkg3M0ukPH=6*wlmn zIFM4Y&AGL1No$Ah2(G>y!8(=1rD}4PQlXTIV3YYYCEl%0kMK&sJ{G?5da|nlPao(a zy$v+~Z$hDiR-1ARtm~qwk5lDZgOCU`O{NIXXfpcQ8jyAdp}`hov}8P)2$TFVA zdCw9cU`GHw0JdP)*Qj!MlhU0mpxkQZxp@0I%J2V8Wu~pyK{4>}ETtfu&R;iK=;Gu= z7j(;5SB6g5n6oD$!UzC%eaD_$#J=b3rY8Cv_0ZBa zmUAXrl_p?Sm)6NC8DOVcw-7XOSk?`qRVKQ^6DbuKe;@O<83$V|*`*SLkE$lun6?VSv**5>tted# zy(V#Cxjv_o6PpQnol2SwP>O*;X;BK~p2T$1I(cbWm+dXeL1JgRbX^OY3?*+?CAdOC41df-~@C=s$h`jzNJtXFRmFvGqnYz;Vh+ zlY$VD0CC&LZfQy>3%w5jVXKYFK%-+xBbYxN+4{4pecF{bn|)pik;|%y ziB>$CCK;&^d#A)S_UY@Y09X#H#rUIN{(xWl^npXkT&qx9r9{=%q?|b!$fuQ3)#A)K zb9c;KwhCNTz%M%gBf>{FN8UVL%zOx?ONlTSrIh6qDLHYRGEYn6Qk7!!(P84;y3uzs zuZ{;E*UFpAN{)#`3M4wqrU}hjg|%+1tuw{Ihx5!KB@QKVR|+o}gE@ab8+&^j(d3%IBYn3Eo zNlAv;Dj`EWzM<8P*XZFA?axPk_gDT~{C)pj-sh211rBhC`sdfL(Bx;w1}&GuyWD?&*mfmC*tZ-`y!gV`h%qCg2;JLv?`W+vB`@pi&<>#}J2mNaIA|GD6k)!(eK7=Vph03z znOBEtlu27_>S`6#bwtI)JL02gmgxt?OO2STkcq!yG8Yp68Pikr4${=c1O_bultW*8 zrMAsBO+AR$rIIp8{yRBpySrno<&TxMb_ zBRBz11~h=WRO9OlmUwv*4DX&UF7qc?5>;|Gs$amxcrRanKsKi|fgL%vL`|y-dLVex z63}N~t5s^EYfhrk;xs`{W&p5@#Px@p(cNN77R2cxcARHCL3L4~qH5-b6rTyyLL`{r zx1>A}#MWi-n3JJ~-O$wDjJ9KQW~tTCO1<~qg=MY8>E?$v2iLiTLAQ(JS?OGq9o6^G zGNf<_@-2Aspj1SLXjbR$Gbrv^KpsM%+1f$$9<;g<28FeR5k0nZb(R2rfzDNDUXrJL#>}2!8=U9$$tSJ$#qLp0uAO87Y*0qr$Q_^`j=d5CO z+goGGgj9Inb*?yw(utfmokJ;v@jV~DddcZ{RB=3!tqbmEmUCh*<(VOZg37xqVyA8o zde3zubaj4o4GDmoeR!gCt&PvGi;2<|Jude7*KOmnZ7Le?O~=zJ?#IK4xoD+fGm&z$ z7~;BBF4rroIa*Dnm;;wqppH`+d*Dfm>`29D4aDRScS)b^=a^5PvoVdTPT-VJs zH85pOWZzvD1IjWD)u>G7=vQ0k;kxj}7q`qg@xf{4Sh9|2uEOLW=Og@c==S?}mZ~Q%=Kyt+vLe@89$J z!xL+*>Mn~W+*P1TAeNcA=o(p#o_Tvbaxk-jD(ZrjqwY<|qVp_@R^oE+GAaVmN}-zF zC<^R}MeyLw0*w#?dA=bX?^xdbfWBT4I&EFJ|NQ&0HiFJE9DB-}-wFfeq0O+fgcM zqZ;IarY8vqq;#N%iN4(vQ{+-j-(m?A#$iW{yVx5c1*=}^1kYtwfvpATDMq>x;~h;D z3Al@hF8&?0R(e|_vT8!rg>88-wA0aZ<;IGrePIic5Z&o4jNrr*Ck4k8!PQCAEXXh; z!-2-5)e_>Mf;`>7Gv2x(ubN%x{$fd+^ge=sT_5wTS~G%c4chR;us7W9^|>%KS|uyO zutMGtuI7@V6_G^iLd=?|q%@fWkS8rlD*$0@J_oh3V(C4_1ar-V_bOW4gUV7eeO+;6 z(RCobF0`s{SHK7^yWM|EQ4{K*RWG;q4alxVuvgA%5vmScIxpRQD8)D`U2c);JbuCF zgCUZ#TR=x3<%wQ*QMTapL8tiyyLJ~J_hDiY?a4h4oRS5#ZB*ytGi{5m zY2DkV!-=pi+nZ-4J&Vo>Yn7$Ye}UL0rMpRfAdLNMu0ptp^ZGyrvLTCemV!e&8sv}flp z5v$PHHA?S8uXBF9N6@69t(UP+R0E)L$_PT+RzeJ_3kza^m(CM+S#s?_NRjWIujB|f zha>MU3o#$;J!+f0?|r`i1OsRE8iP~Wf>n!Brq|WxvR0;E%`>0AyVt+VYNio`(S`k7 z6~?t~RcVGH1P&!BSOmD{1T<#CE9aq`A&CqsUkb3Ao0lg_4&tSh_~-wt|A}Y1Os_QB zq`{Kg5Seo3a=q$)9=vj<$rnMz=DljQ_xFGGtGs=G&t>!yj@1^*DpAu<^_+=KD^ zdnCrroHdx=`mPI#an~YUw}mdkb=}5Z)uk&9D5yT~vg%$I4S1>BM$Uz)B(_aWX|~qQ zEJClTih>4+kaM;w3ByrrO1lcMi4a>Srbv!SlO1z{%TX&yAD#|;_OuX<-LO2p<4{x! zna}|K)#=2$r!%!}tf8t$Q%a~oNT+uF{6k4RT^C9zY)yD|IuL-TW#zmoZEvlOn^O4v z>C8=vd~(@%xGvnzGmqDWH=CNFXmz}^hCm2n)g3tINb8vv14nbtI_AtxDLk%~^VayW z4_>gf&cm`WMaVR2h+J#scFO$Jmp=0(;;RjDMMP zVXMso3JB%EkN>Cs34Z1O_CFsF))7wZ!FSuT1apBh?L&B0`N%OML4U6zs!gyMeCR@! zr@b1J4%F?k^SuLRhu2u{zcm7$5TaEJOzets&|^>(>9(jn-lZy{Q_)TSx;1|KzwwKF zw3pORQe=?9Mo%Td9{^hv+vr&pcXB4PErP%(|%=jZA}B4Pb)V zc!n5g?TW}1z$z|Q{xXEh^(Gql>n57bohn8Zb=GYi0lA74cTuN4ZxiF9x9rPFfdd)( zi*6j>fF`AaR>|CFK3WAy`Q3>qAheMjU|N~MNv6@dBlF-6F9QwNNlKn?bS#$(^g4Xh zpO3Sb`R%AcgM=`?Yl^V;hW1~`78C?(T{VFBMPZc?NAg{C7A>1sUw9coYsz_Wi3LyG z6EPM-D&*d29(?tVvaXZ{@Ws>2_aC1K=DiGhn$F*)0olUGz8LR*p(j9 z#~fj=5TTrG7XyneG`FC9PCHV>u32verBSqIu5w*aQ-CmnSp_IuN5T(1hY)u938KMb z8Yrjrb+JuK1>U5t9d3PFl5LwQEYjg~^`ai_IU*wN0IEYOS_^>ezTMk7SNxF=#^B$J~J+`h4D* zBT7zQZGA=;9f6|q_fu9R*b@ggc93WD(_da$Y=+pn$w!EhAbC(^o84cw{usL7HfqDb zmCgrq;caa!n^v?+&blA9XL`fdd0ezdN3T~CTW3m1tlJflW*SBZE;LwvG+z>vnDn`-^?tZ?%U-DZBl%0ZZfAgYA79$mlDXMq>fQG*WTwf!>SEH51LM3S!A!5O+3H z%H6VvKI2qqOt+Q;EIlGs68QYQ5Clqq)1>Zs*SbN_ePha*h;Yb>ch{9{YxMED5&|5jiQYRu@!?DEuM6L~ zf8tOQYtTyAWm)*a{R1IHS`%`fI8HCPoIhjEVvb&c7gOOwZt$O6$V=>&h>G_Xo^5aVna??QI4!r&$37y11y|WcbM2}gPi!$*Dv_BPu>ti;IdRsQ{?SM6U>`QD}Xms=2S9| z%c=$iTjhg8;f=+@4=r$*W|AmScsEac@_43+Sf$Yna$4hQSF6TUkF6yrkU;q4TYO^^dAEla*01l-xyy z-Rv7*oQ{gB;!OgL^PLJImk~s1zZDhvbx`4&1uy|I{(pjy326dzx{4_qlB!I9JRFOM*?Dpt787+KoYwKPcfA) zu+zwD@2p*LT3C!(k++n%Io*t&Fu^Ed!N#`PW<6+6Ha|nm{QN)sPxA-=^Zz)xXxaBq z|LH%CctGs05C+Qh9A$N%_0@yd;8l!*#0=v)3vP{^%+)HHEvYB!LZEf0x#+cupth!# z@PwEK6)goFyOavV0&${~Ar?X`e6}<~%%e95I3m5@lOv@g_*O`N^amjXdRLlO$%%QI z^yj+!;&*eIi`sZn&?=dIrq3g9ww5fgK1@oV2<+CJJ0&d)bXdRthkKaFc(D4N-Wvc^ zhtZ^u|9|z>pU`bR>}x~E)M+ppSP#o!UG!rv>Zf?3reoDw8^Z2neH4^#7bq#>?n9a| zYD3dBXm=ju4D}KLnC4l7`4q?B5!^o7DQ)U@W{wl2`Ngx(voUd`-}@ePO*F5YBCReK zANMcX|E3i4<_~dTXG06AclrpJIYK1GR8ByM`j_2pg zs*UkbDi+e%tY3r>_NK1A4T_euh1M>t^=f0u5M}!ubZul5A=+5=R<-Id&YQ?qo7rD_ z|DtNtgTg!?aC&1-y5^Fd2_uNbs#KV8^VxWg$}sv}F($3%A{HcDVr~g!YwCWqwMMIz zT0Ozv&uFKzhMfZpq2Qo6G=6PBCiu%t{$S*c9h#$tM|-qp7AFwKOhm8xy4ESpzJ z?c9qFaQa!_Xmv&U%3KPi6te;K#570NUt;`1&J2hzzXZLaCMRCTOEI=4CL9-7qh)SDpYZ1g%{ zPP)#nTOHNYl9IZcqR;DT-8dDkEZt1Ss8aQ>R@&*E3Gr(edd$>x;QNmk?OQPwjyZD3iPy)O zL($6TM>hvPx;gUdICE1nFAo#1PDgT{c-k78?sX4HK}GeT=gq5I-CIrv zOSbHsIL^eo6O@T^b40erV)Xg1!^zX3ui&2?Eu!Gl7PK*5!gDaK(69z3Q8u~q@ z#L_#0qX{l(wL`>$_Y{@y>VIBp8|Z(GL4~rRBYj0`H9o5B*MlF9dN@2St0kb;ye}n6 z?=)$&CWhDwu@p*-){r4|Jwik|ES~8HgZ1kC;#N190<81Ln-V6 zX6J8%B8+(iiTeLr?}X%exQfI>4C+>;pFlKM6*0S0P)kwpKn~SBnWs(|$z>#@4Cz2? z+emOk0*BLq<$6J+A)=-PDI4WZptTjJd^sgUJ@lg@p#j89AbiWy2D{jb}NUaM_q>3j?1sdd)#P zPv8HhJ!}2Vf8L4}nxuLFEiRbXIUZs%k9-9w+`3mpDJvmHq%|sr)_3E{r!<_-v>Dts zj3E4#jLIdwBXri~I@aTu1D*)TF3VGTUL?hiY!5`~q^zzs1RaCc`v5_j-?Kd{PlWcQ zS1V4{IPZJG!IQon>5MTB@ub)~@N0BK1+T*B^WU*;Mc4PLnBQX#PUH%EWyB7v@hrM| z;14uP_s-VgPy9Fk+x%bt4}XevlRcTYu}c!S~Cr>)%h!@$XXRZ>4v0g;+o7DyK~)iM3RZuV+=!! zBQUy^mlO%X?6RfPne#O9rjB5?Cbrfn zDSJdPoEin!NI;1o(p3M`yE#UweRdG90$BA&oL093aZw=D&HA?lV^x^erfpUk(Mm&- zy~?K5EIr2+knF6FAM^cx?|%(#9n^tWeRWag+p^cKg zy16!ibt8mG2|_~AW3S8*6b!E7y-I{8f%|0};CB@vmO!@kcBu{Qd*-Okc&zX=C6}b|HQ3!pmc^3W%?Ly``r`*%O=XJH+uh@&0ejA>rFeCx z6^7Yet*nn5x9CK_+RCi#PF7wa`MdpU_I+l_i`n(Z1TSuM@7tQ9`Rk@SkjthOoDj`^ zce5>CpeYsZyeQZe2z>3;EuTGJ2`tnOce6?%)@`HpiQ~%~(!&`M>!s9%jYtZeNav6u zOAlPOjc)OF43X26d3Afko2RqVW4i*zPd0Z|gq$MNRFun$RVrGgx6M!`83(g86~Xv| z@*8mEIp@sjbVG_Nl5$#ymi%|o^z(x&mxCe-D0aWUCk`aW4^fc@#?kdd?4nV|UZNKa zLH2;#g3}n4KWY46Z@wqSaSot;8rCb)RnQv61ZbF88r^7`=@dN3)tEn90P5WIXGkFX z=Hyvv3^D|a#LSDY|09H0pzqDpl2ur|vLki@F5o+#AFXq4Qyb*I+2H_fyU<%zD?bm) z>{{^6as=pIMWiXwt8<7M&&&fz>B{Y#Z@*#!?mZ|J8`IVm5!LFJcZ$^{ub5EpIkM`N zpa9l+HX3>?#x>?R<9P2s)#k^S@(rJ6VpUW6D~}jZ$Chi ziuSN)o{XBKNyol+q|2~;cFLpcfJSxA=MFyJItHFBctv7xpY5}PbV8misjScRI(i?( z-PnK-MquMqDHjhWLp{7Y)~zx}%p-|&v}`j(I;Q-rVP+T!$Pc0Z?BDJ7sU93_g7KW% z(k)qc(^VbUO0Z7l4se)GiG95xjCJV$|9sxKkRJnUJS$Mwo<>Tl)fwV$VTe<`GxV#H zxXaP_@}9UB4mU5AIvEss_wLW#9b7DFm`u2?$-RoGcf)~TowDt|?LM*n!V%(qAJD2v z6B!lVSSDgjeC;QHM!)4*rCCrq*FHz1{me`$^G=oNoj?1(|KHm4^0i|@x&@Cp1`b*0 z#AFtZPJdPUivlQRny6jRleR7Nx-yvn_%sy`Q(-de)QF0MBRYOqua0hAwa1uXeL0Y! z(F&e8%U-eXqOLt#Yi7wTW3D@r=m}7a!Q9J=o%;$Jb*q?oHQV?2wKmpPhmClOL50Ei z)KW*sQz-Q%-0bV&>0o#pxTodgMGdl611+>j*fLRE@f!%T(98s zvgE8iR>YIdjz~06Ck|~#7r3rfd%^gcBsh*VXj24DxExl{03U8Xo3W7)-mi#e;DoH&iFkJFQhdIwrpQ@|IIcqyRtr(MwKK;q7H*Duu7SxZ&ktCJ?xtbRYcw z!>ZbF<8fKJtSgV#m919RT6uT5!gXOPk=vrc&&}b)XM=qfjkg~G|gPj=W+Pzp_xYDQA!!*@K#}SB$3@pGUu5& z2j&#$>qd<#4C~FR8y%LuSpcHwcL%VtbGEL-JPVlhdXyrutyeT4+$|z8mzqwuPNK@U zRaD9=M-Zg){+oY>wynenttmpXSrFW0uR;YwwH;wok*-k@Y*balx+7P&#{=ifg-w;# zn)u~x&B#=JkaJPjtD)r?fV2e~)*mJ(+VVMqKAY-ji$U!k?B}ScW^0uk71i6UQ+IVe z2-==*2v-5GL}>3XTR^>z14ENxM-By~8tCekB1H&tWFR!rf;ivlJ%N+$s>v%_c~i^A zKyMnz_PVMyV@wugYoe!f(*xRlj@52Sr6uIfn~y;eixg*KKGOR}+s;HH+or+Mc7Csw z2>nzGJ~Y5p`$&&<`r?Oke5{Hf|$H<%$_jMNzPh~c`Ml@F>`Jt+Aijl3URr5AD;;!LB^(<(BD89!A zL&7bP*Ey>cET*jNE4ESPpDp5OkBU$rY2@H|jd_ZPh}h`su@g3Yg(Oyd%g*^KHB zZ*CT0;1oblS;0v=H|#0cJkVsYuBx#}Ay`#P*Of?T8wmdHkhs{MQEgq76#3z%yL%UR z-@j^Aokcdfk6RRb<>ZG<6`m)=MAo_yLtrWyH^C^=OQb$9g$7YSf7{lv|G8Pf0DZ*v zst`xQ7i~X{LBSaTuw>DJiq<>-?_c|tofo^9da`*`&9SMP$T^q7Wm(304I-o2W;S=0 z7!$QtHq%u3-X09_@#JXeLPXJcy&vCe(S8%MjeTf1S4Q2}s&rD<^PzQhdhF;I_!w7v z4i&7wkB-+;wf|I#k=vt+pmPj-`84zPy6QZ#0N?dowR@i|wVxIH{Muct+{gWb(m6{C zR2bA)uNXDk_@K8a=PI%5vE@Zc65(8>I#=t+Co? z%_UQejo|dx6e2hCfyZT4I-yiDdiE>^T$HpMS8UZ9eRg48S8k6J_m`EWHg4xxgVvz7 z!lzQGmgv8{Ir91A6SY>p{=o~r|ESi$AD#|$f%DcmC77qekG{C!2agvBk)QhTC6CL> z=jSV*K3@2!H{!QHQBiiust1t*rwI;6Jx@x~gyXE5FxC|-l>;9F-vvHpPdYp?8IbPp z@hkDe{YU)wrrHl{X#fK69#xP&A7C}X{aKj{zxnnNkDp(k4xFpn9M4&eDLyzIczeDo zUE6@$^Sbisy;9@mZ0x9hVM>7}YC7_?Zd_|sh%qHvtt?w*ZE#-K0nWsj$Whq3M$PIp zQ7cd9vzA+0ADj4S0@v-AI8x{+lG)PQp{JCPz`Cv0Bft-Po^h$YGx9E0^h4GE-5qQ& z1SzJ>&;6(WArlftoTFCe!$9A*&5{3IbUe7Ch6HZ4(L&@}+YqdIx9UC+a@~l0aXRM} zBDF&Zins3{@AeB8k#~j?=^!U%%k>-3_H=)xE@k)GI-%SylFD z2)t$jyO@|UhI=I_Fx|c$s4*~*&}0E|6B%4}cQF#<(2EG$y4vRI9NjpO=NN<>^R5#x z!IvMBgEE61O1+PUM&9LKOh`MbL3p1H4Hc6m1=;749WW(DQqfJPwyKTsWEXKpn`2^g zGg)B08q(!T6_VOVEovlE6LjmDn0Q!iU< z>_SY{6B$8oJky3#e1sm7c9@Qef+?lSDxGGdIYhRGVE0~Cqtd%2A7bOZS1Mf~tP~(e z?Jc1(t7h96G~nDe+)_Gta$&%OuYYy*QCBSI%e!vIs|0~PLD4CqRT|fH_&gX+Trum| z^{ztTu`hbE8|?iSp>^9!hvr1N{Z-&?gO4wpA=4OEQdXOx^7BRrdg0^4)k?Uu) zn&=$%Ovwo+b47MwpmjZmu)RZd3*Ps{)-|H@pjS4t2fw>Hs@T}}R;_UM(eQRLOV(;@ zV_DZBwjJl3%^%nMxz@>PhK`max|b`(N6)NzSUvCi(G*o|`P*(&ePR}H#F_^Y43*M^c-4X}d=BPVNr>ZpKZ ziMpbi^)Ausm(c}r){Z8z*(!G4Gi_^nz8?*w=ppm7|Hc0n4O^=rGL?yrSaPS&dCHk4 z!s5VAN_vJk9Ts6ukyB1gF_4q$MSAaifzc|jkyIfT#*3?;PGK$>fv=t>uICFcr_9fO;T6C2 z_JQ}y%4Ms(zbxENg(+w?f7vSExj*wC``Mr1Pzv9D|H%Dy<#E|~dtLebvS`Qi-r)yt zRFqz_RzGe}N@GfsRt`$mN=a+*$y@lrx8dzCYZE{NO5hHqV%-5R`Qa+b@5QU5RhKg= zc7L)z>&Z-QO0ilu?P2oKOQn=0;55T!p>OI+_4U_xgYKS!YIYvi%4~D7i*Pe#q<0>c zjr(=u-F4vy4^MpW{*iBd{+`RaFxzu=Ba+qj@XZ(lp2){%cW}IWg``3g;ks4gXhP_L zi;NI*l%fY+B_xDAxWD?+V4!zV#9m*mt>()uqW|vH z7#FyJ>9;- z=`5fJq+J->s6}{!D(WFn2PpNv4`x4kE72iM!x*POMdAu|3&_>|J)zrWDR7r z-$ol_KN{c-q|QWcWg3_JT!C$aqb!EI6`j zZ_Ts{dy}(Z6&4wl1Lt&W;Gi_HL`X;Nr(pDo%T{rtgCL3qUzSxVb(ke-&^`dL_o*N} z!#$*PKA(*~rq52_zX$q}PHU_7npiKi^-SFsqs_JPtaFZr?78;Gx&=qQPru)kC+&?A z_drmsa*gg4GkJc^-v7cZUAv6uu{jmR6C`%C2!9U2?!gb_l8S!zbRgvkf(hrXDsVa+ zpYf_S5bRxy9_}fL{aJJk52vfJ>$V1Gdc3y}k-JjnED1Hx!-a0+XV?Jrs(7M};4m4apLwlvPv4(W9#&LaMlqZ4)jB#Hx zSg=x!tBr_NJFv=vSv(JFst34+>`JGNUM+%HVB+uhic$06*PgFe(+-mvQHTcRImOXK z!;|VgxmJyZ)9t)zoc2r6>ZCXMJ1;Biy0KMFdY7##t;9hVODYvd?5%Ml-eOisztb0z}pSwM{Y}yjHU=Eg8`n&_}!Bf(6=RKXTt!m}? zylIEv^YyAta-|vV(W(z6kA2-M$1%nM!e#3P6bSTfHDivXaV`kLX`Z+}Djg|AojV}}(;8$-DW=`>o>`siwz1WX>$>t+ z|MmZ!QvXDqxN6sPW!Y4Ods!Cf8a%sZakc92Vpjj22)TXuRvXuKLbYN|ryPFeNL$9}5d4FA~tug1s)3Oo+ zoVUhwc&S<%`_)P6_7x*b6TbCu<-Artf4cDgx^k_JcUN<;TC}rv3TSh|E=C{Iri9YF zR%A{yU4-vGoLOq+VcE3%cJI8L3tziC@%lJ(zi984|Ic6hEp4b6)h7DoVd8emO0Qft z9}Mj#p3>gD?>NJ36_CTMLfFSMyuF9VC#^6n8``<~6??~XuSon}ztq`skBNT7{`?m1 zKZpA#Yp^I<`H&7D-ofh|NFDAD#Fr=1VK!Uk6F>36D?a?_Lv0S&J4+KTRd_K^3Z5rh zKLQWS#+&QL{n~h}o%=;y(N1&WI4KzPoMgi3G!eZzyJU%^A>q?$as(-{#0K@_X zVNCqzKHZu~xS-5J-jEatX73v8p3~mhIhbpb^UZzubi%q#Y@+oI4@e1CE;sQzbkz-X zMPGAJp=IwKwyV0YU)DzK^nROa5_wkd6Y|=Cm|+Gln9~i&BUC3 z<_F+tI7JwN&OrZzBWgR!ogCF?X9w$e7@}>olqS1HX8^YIX9Ul7L@eQn#HfPJMkJ#B z9<(AB5#tbhAYbjEElHE8<}?G5(F>(ZGpbV?i10`#qJknGV|o^D;J=BxGwNBd^czAN z;?S)(2pOW==2}qgl@#>;*RE9+QC8nT;9+-=aw?wm!0Y3I`|CQ~dI*u2bv`cFcZzlg z9bW|r4CyruONgrL*+B)bP8mYmmEI@46Z1q**(wG~XA5aU+F~>u6>>jMT(R|7nM|Fp3ZpFApv4Usm1ol;D7*$o!zAR#m6^D~j40di= zbTRuoTj3Uc$@I%-1uMpJAWP5+r@m)YG>W3Vtq}fnFE2= zE3Im>S->4D9mLXks)0Jz98h&DTdf)(d37ddwzg7I zQfQ+sTnV8;{_#S9R1-)u@52W$KY%eo95v#jt+8e0{9=3&mHm3CFHs?&WV14VtO)1)36d9ua+B=Wy#!b;`g>>yY zAJJ#L)JFG8atNH}ne(#nu&hk;iKp|0IVWnX`ZGqMyuU0gRcUciV7or51#cI=bbG_+ zPZy+{eult#Sykr}G@fswRkq7kO-OGJWLxD_3a4peSsPogKq9orvRt{;#>282ZOwa8 z=st6rGS?2XX})d`g=5ZUA3O1IU1(i+eLAq!N-0r$X3Um&%+?d;@v6zlG-&`|)Z~Ff zR#&OJqiQIEu#3*EG2nkBuY$T?C6spc1O!crE~_RW0^Huho3~m8m?yI@?YNHH&xB)+ z9Bz(~HO?<7^7+%56eFLVRcp9ZxS0!E>%5$2I)R6+LyRn|0<>TLnLo_;e)De-?w^!; zWk+#}k;_(j+NuI1Iupre%5ZtQ=a>@N&OF?4N>P|(m4oM-qW{v>uk@cj9Atdsccc!q z$(~!``9Z3`nfpu18tAz>qzF}12p2^w?aEP3?3Se3T~YTF8Hc<8!BF220-dTZGD-Qx zQJUJVw$(aHn?+v_^2 zS|ZIjq z-LoYg0)vu<&D)&SPPg^|LG=aB&9yxowM*2`Dn?6dwq7JemTk3)k_sTP%8CZ@h8D|C zWAPwbgLe<1UH75amaI|LYR}QvpC=m*7NneQP6oseo)o!z@q&kU?{M0ZqTrUq`&ve| zKOx4w30~cBY9B=H4lUJOfq*B|1oFtDWI$l#*4Pbkw|8h|>>)A#*y6Hwu=}LG@Nzv~q!ozhL=SEIe zX*UpHT{jK(Vqoq29(R}zMCn|YE0=AS}Xc ztP8vV|I}aj|FYDkE}CL4YcZ-7Z&z#HDJKvOnxi#A96d(}+#F^C@Nl^*O-qEup7k*$ zN;WIfsCo$39UZ5ly!jBhIUG2zD>3ZXTMaO3y)#W}3Q~I~hltQPubN1er~&wS-T3I{ z#KW>ud*hI^I?pCOcjpvX30zivsB2R}|EVY~M(WC?ZJLzl!XbrWE^yg2mN^#fBk{rQ zk*8Jn>dzi8Mm5vTuJ%p|fjjHDu{Pmg^W{)fr}J>WuvD$!oJygi&n*RYUW6wOIdTk{ zLryG4$IBt}{re|wr@|p8PC2qng;$3Ioxst01Lc^wn`hM?wN80?3r}aA%sFY|@$S*+ zS39*==~rF<{`RQa5U-rX2^&d%Z$;<7^A%Cb*BMPhR^R~h2XNQ5T2njpD}?3?NV~dU z7rm$J6*hGv%PFy4u6+Lah0C__Qy;$IH$Hn0K`S5UCcHk*fSy|~j}wPd2oD!NeYzM3 zmiX4+_)9#kE6cXA_MJ){OJ>f(yX(r<;q_rst1|&d$I023>kEwvh?z%QpF{)sT_t;x?kX$-3QP zv*2`GKe{2sRL}-S^%>t*5s2PY2o(eLsMHqMuv9}W{lpB$OYQdmC8_;g55WRNy<(-X zXk@*tt2)e>OBfjC-L5%hZ2*>H0c=Nyx<9a0;;`xj+lY(u6=(sB477^R5 z-Cc{es^7WwJ;_n;!=IZzS4D;2pD((}<7cJ@#vZs4{Q23-s!SdfHS1wAfZaPdB3020jWq{TgMj81nCuBe z3>p||)j|^=anhFv+tvp)Vu+|6nl`GONY~EXFoI|Sq&4o}y?s_i5D3w#7=|#8K&&IJ zn?189Cqiowy+dkBgs{7db;J^1n{mPz^AQ;C(@U$R79a?U)U>8ysIXT)yf1=333)b) z0@}Z%HrHV>z!XOveSN2_99IeQ!-;lK#IOh3+d`ySqq%N-$PfO?FQ6t9x}&|AGmo1d zU>+z6VF-J>JufWzSKC-8qG`ZH)lW+)b1W092wwS8>t-v#`X{h$B-QG5hL~Q5x7c=Z2Xu_9bvc_Dyowxt~ zW~r7mGdk5fEz9%EF3^_;?XT3Ve~i2T#XyWuEivx|rfH&X+ql-8)q1xRNx5iHA~JxW zWJzhWB!9Cil1KP;TgDmd)vCsB6APM%u5W*vV3bSODc0UicclX1U;qARtgY?(lAvY| zYV^@85y*z9D%e)L_9U2d;=EiTXacRhKp0)W4>^x9_C6%;BzG&a}bYzFeAs36OW<4kLfr*-2X!m(s>2s~V`^sdiiDoMYu z>HW_^*F=tii>R(^5%|*G4Ud~r*6(s=wH^n2|}$6 ziGfcauUu9YpnvK1Fk--UTe&+O`NLm+&0qZPr!*0M{>vZoo%=JFZR71l!M0DHE^Mvy z?y~TQ|GIY;xpI3KiKyl;B*1`bl0uio#6eW{@F|Jy{px*EX1yDBCcB< zG5WXOJu;Wfo2LsUMgG2@`4N8OJD+mh8aH$1ZYrkn3G`NZS~pUPyuB>C7Y$vWTFkel zTg5K%u&lf}U-`n#%u?a9ZVFHeWJeHS55PYB!k0K-t}M$!-L`$R7qxi94v7%7gk8OB z?Vu33SG7`lMj12Zf%2-=ZZnFNqNsJd;_f8M1vSt6;AoCA*f-uxpZ2V z3z0o|Er%Oe&eZzc_E{IDcZeO7p%{>EK`WlP#c`nyQ zOhto%wN`?3YOf;!@I*qZR$;7(fgV~w5xpM@GN?Gfa1R2dov5@&`?x?&Hvi&&HtxAN5n%U zB)7E=V0OH#-K!_$SfDJfVd)hK46!+M9_=2z7lGc4rAI-? zLZ8XikWvRnTHB34LeTHoQ*kWu(e)Pd#2@>w{FD6RpZ%BUn+2C+{bI;7gMtXUmzSKm zxxM4z=}Zv&Y-{?bppF_=_}ukiI{)FQ3Q5SM>m}Q*jgv$qMwU` zJ7Ni_i^P3w9D(c465GuvD%i7k-HSaGLJUeVFsHF@=T-DWEQF!I2q93i?&}63Ylx8( zy*afqAaxm*yCr37=4l;>!akp42yC17EpS>yZR+aOdne~ID1%OG*;*TplKNSUK1|Wj z0tE+j&zy2%sg07A$~kv;nYJDWOo>YgRx~-?n)y`4X1?8NS>TUYe(6q7M zJSnYuHCmM?mN_Ouy%I#YZk@}z4Konm1C@>|w9T5}1qCq)s@?I%u)TLyn-g77C(;;M zo6e0no4&%Djrg2-aXj#}tQ<>HZBbTGLVCAEN$GCOR%tPC6+P!l+Rv>+&fLxuQ_@=d zah_=J9?fpI^VyR&0=#S*yPT$(lqOE4aNUy908O9s>UiKwAH3r2{XN%Z;j*rT94H9q zsw|0*PBRaSTJ~;TND1DZFWQf!R$k4CwL@ycU;e?{;ot^p!K~wvNg=R)_FgGkYvWKF zh+2^^&BX8XjP{jTcdOdgl)7}7l&0iBtCqZAj&O6Ug7d9HHH}9(p|q=lp&aZd&F}Tg zzsCuRUROWrpG$`KrUL=J?spG5>6aC9fiE86?LEaRJkv$BJ+DtkUY%y%pEWVOZk50K zTi>_pX5)iAb1aE`I?~?UQ+wy(x@h4bM$XI1qs{%Tb?p@dY%Or!loB|_#QW>UK+G=pGUwWxVMgO5k9xWxjvU{8Z;pBpi1sos0Jc#`Hwu-;!7?|14?BPeoh zREvPFf#Gj2ofHL7U6IleB19GWitNJu+V`G2$u~>!1W89vLL8Qr&arm{-76nHM&1DJ zxoLnux1GAR&q)_yYZX9US)7*HdL8%Uu3!Dx^UrorJV%&A@HEc02fY{u)d)s1Y8Qc5 z+)@nG_3Q(H_Wq(J9J(PXU*}AXmhFZwAOA9^l12q4#;CU+!GmyvVRmSZdI*g!|%YXY{`4`xh zE1ubSu<~O+_G7~xN~srp%xMwf{r!Wj)jX0MAA5vR@pPxC5a8wAt${B#3g*044mZ5` z;*XQ2(?ID#1NSMXz5hc9Gy?hN15!SWS>Zxw*JEAI_d{=Y&2V2362^|*vG{aD|Z$089WRuLK^dx zXY2;aL$Ie@KmkPEq1yA`Lh#K$H{x^O?uV&91Z0)+f zp*>swF$AWhmaZ{I8tP)xMKuZdZo5zy*1)kj~%RIcxNto zoIl#P!`5hDxo+#IK)9&9+q%|q%Eal2H%@k7OL2)vZDPhVVFXH^1a7r**)|ijE2_KJ z%4OL&o5iTkEx-3pFbcW?RazlCPI-@)@AxDLk#(V`H^S zSu#!Rlf9P#{_5nxJD&lo-`=SA?ypTi10{vmdvN4@oIYQ_6gFcSj&_ znr3dz@oKG&EgiT$%-TpY1a1x!uTB%MkBK?MN2kKcXl9R(4|sFRWQlEx+VF5$SHAcD zfu(9i_wD)0yYtn6sm@X>zxno_)>S)DyUvFg)C53m#zWw|HGcE$BTsANT7}28ajEc= zAAQ7+yn0QDGh0_fmSeGANV7E?1P#)+YQj$2&rL5&U7=c%QnCi{%|R6@269%>{p&kEczwsI1eSH>{&MBKDn0RA zZ}0iud$sGmZjJlvLc1=!zg&5H(PoxE`oT-CTjg4fkI{+*5=5gpLs;9%(#oV-PLk|v# zI!fESHVU!@Hxe6!Daz0?bPFUv4`{8v#POY7IzRE{k92eD9V~%DY(X1xrYNWCLsT3Z zM$nYp6$-|^$5;k315k=to0?ltvmnOR{-y3++P~W9SX!obTOwV{45=N#g(#d+=W20k_5#4mNrGFdK+-jNIJbFqc9~ zg_sJZOvGF`S0w~V2uc|lhn`3JF6veExuZdY&j(;Uiy>MTLm5Y6Wz@OtY0Q&mNKr7i zl{f^07JxUk=k)tU9|PH3gK{<}H%FqSj?W-GqfJ=s0=+UK zV;${5cnE5@>0bGl=E797<&=uFy@y3N4}UccP-xDTYh(Xv##y#;D#t? zdvjMc4>9#KzV)qdlgc!lzn&+})%LG#Ta6AEY+tf!QS>>Kc5@O5JQ$}s<}zFdB}Bq> z%YX8}^QS1s+W{D;aC$Y%=WhE}KY2}@ZldflUs2(IwFYY1!QFr}%g&sd zq?FQKlM#aqE9H{a&>=?aMG|yB5mfvhgMlb$wcv4G$(9WGwfLG|Tepfn!);LvFbK#G zH9ma47M-5ud$sRlQ!cnAa6qW7Y9bMIT>)b~h@Ib4a!n#^o9=PSJm_QI{*_<^c zV&v6vh5#k&&o?4d3LK}x?K~-vwqAIDxmpvs$criQ$N%7u@rS?qiZ9;IH0dglp!0CP z&|0NLoe#&XPICI59Xyy);>E1i%vVpkddJ-}-&_`+w#p$#+oO~^cHJ~)yVk~(GgqTy zomX|!`{vs-KX_Vsbv&{(;cU-{k~QJLHi;C~s`f{}2p_!IwI0{2YDId34`0E}9qM}J zeK$7ywbNgJ__d+Q!GF-d|DC)%0h!U6@85CN-X_4lP$m`Ax2}#}H%GPSJuf;2AwbI3 zIlc36UHSa!qK;v6;k)l2bbsod@7_Q0jnCilTW{|9=4WsC{OQbft6a9qx8ArT84C|4Xtjd4e9fsYBRwpA(&{@(9DqOy%M8t0J<<7#dUg+*_^h< zaGwEw^W#r-ep%B$5$Lt*{~H}BhCmSIvnc|s_Yq}ao8vPy5ZDHKA7VmcL7F*a*~SKY zf0LB7`|9YFjs@N3HWV$;Nl`U7?i@6k&Lo(qZ1aYI-jO~8QrzubJrGkqyO-t6qdjZm zI7IFJy#GfSy&ZJAlxf<#P&;~Tn{qCNX3r=kt%Qj6DDe{g2qf*E+$=zY8|S?TjMA?` zR#$zBA0~{X&*mNwjpIHx;-M??V8pgnfA+FdiUfDp@gzhL6p3wO<^jRoa=hB1`VUPU zTu-Axg!g*T1i=2y54RW%;Z$;Aw&YYqD|d9AzhbMe5A0EM0JUH>VZQWnV1^h8Ccqk% zF-O7$3y92Kl3>*_F`mCs>X6~A=088|wk;|0`wtdCd9u=4A5^SBu*PVLNUNicQV81U zg|Y_+mX@JaEe+|)wiy-B=eDm?2^pmB717|RtRgHP*aSRq*S$C<<%ACn zj@WqHn&|?eYjE9+dZ!wN-Gss`VuB~uoYEB&M z)$UPT1F=&dQz|OverA7)?|CsPtu02abaw9pqR&b{8!k(;;JO=S%bJ$aJhMMUJ8HUb7>ziYx=3+9%^9!z$A>3{Q26P8E3U1nCVivn1)}c3nDw^P3a6Af%rlp*?I1$eeO5n1>2`;Cw)q9CReCm2 zn$JJ@5B-DuYk&UV80)5rAVHI@d7fC-mAzv4tWqWlY^-fxlY$zqXqyvHsP)jx(UFjS zUj?I=qSpG|04_}k;sD;Ft*5nBN=gXAWnG3|(UX|og`fS&-)Bizq-50G!>=o7NBgH` zv1_lkM;m~I3Y9P0Mi6y5YrS*I$=oEB8R1m|-7D0h-IKB@C7zbWz&g{01zZ1PB+ZmT zR4DFt#VN&MD~$+;GMUkg{?sr1>NmA7M|aa4eJ0DcQi3*f-Eanl?%xEsJshxZ1LA=(+)o5K<{Pp&7W!m&S`}e=|F--zxD&(%>{ATkvgvfEqWD;-BS5_1L zr;wQJ*?jT#z_sdJnv)v0%+pLf+;ac)K%oH>pFLfP1dciJ!RuQ-JFlb|czKxl-u)w= zKP{{c?w6IdM@pXfv6nZrUOAOSkf!6jZ1CB8n36hM9VXp~&R1*Vm-P4hGrYdjNB8&y zw_jz~Uid>@(1c{>Z+>NV+uXmKuXn)5XfmSlG}}e*YntF?(&|P#>sPt8B~0-ITd(YRvTGukTa`QYJpQIr5E9Kj(hgxYkB|IPyua%BzrSd6 z!L2nO7p)wYl(eBC!dwzB4ioRM3m-q~99U|%kV<2y?8v9x4y2jTmEz>XEu(s|fH{pY z4n9A$D$Y}l0a|#5FdI_i-9KFupq-C{1ybIHR-05I-i36hpGZ)2CtD*n?euDN683`< zzJIv#6+HEe)OXc z`S|lshevsckzQ3Gt5%9SHe20>LqA|7s(pGwcTh{b|3tx6rH zVrB&T`~^{WuHFezEgeTRMd0;m=6=;GSWJg;usek>T9B^@r#>&Q;026dOt5a+0Z<)N z^7zIQI7igo?$`y#-4a1Z7?mF5;!zDs3HDjy4L6O)?m@Vr+Ykt0r{y>*>sqUhD1Y7V zX*@>>j5wBk4!AZf#i$%;oPso#;SW`!9F-Se*A|fV-po>2e>NFdq11n?z}j^UlsrTv zW`a@r+Tb}d?$7Tbr7|ioBY_=p`QGaRpI6bC{~-*Z1(d?|$A9534^g+z17GVB3v5rr zoWUKd+-kJCeQ|Gu=sFr*lWv`=^Ut(E!2<3kqYJjW^O$%$y4Fb1TTQYx;X7L3ng`X! z&?z4qzvlrcSRyQ1Atsm;Q1>24(bkGqz#iXxOo>UG37Uu>*%QkU0*8`W>Tc5=f+F>s znRJ|=9`$+0s0;~vu6aIE4!6{|pVGF~y2X0}WDV`AT28mFRTdYui?Ew1^ajL1zinMO z9*#U+u1sYb`=?qP_kn(^op|KskN-UHfAcSnz*7(hn|{2PDa7l!W7_n%;Pn#=X70H8H03akdw%w3`GtS;-yCO0 zZ#v(mqSnF@P5wLy$tf#Uq-lfNX!}+G9HuTBS+ zZDTRVsMdu{;JPmS^w++^H$MJ^Wz)uf>KLiajE?rpAb91-s^T^$g+A!!IYtgSnGr&! z^+uVGgl%DsCh(7OSV=cggP4cfkxlo?t%KT+s1q7llZ?x%L1gX1U-|AQ9H*l16#-G! zg{%HzOiTvK`|@?G?kOqQ{L&Jp6eAB?<9?~!eeo63^@)#W6^1`98+UV2y-1hQ>3wUB zPahW!rI1Cmfu+C{PORI7(_!M61My2=PyqLQ)+$FtsY-V@@aY@4oZ#i1{vD!~ft-w@ z)qo4?Km|JutDx18$`AJesTiT~+0Xokd5L|d8FqN*a{{zQD;p{5uZIe6-ot9nSD(Iz z>uN%J(T0SN7fnDELBN}N_TG7axsqexi+3kIlcaG-i4|oJd0c!q7e2VV<>Pm1h;f(- zPpej?itSrV>nvT*wnHgYYcEie+s5mDnumjh=z{HHCDxe7p@98eKjtJkQWO zYu%K$m(qTa;#!a}4v1#r%oMdc5n>t#F{Tr-JO+1fQA$c0{9B@yb?Ul;gb|$OGU?{t zHgkx1h9VAqi3T=(1c5RRMZ}Rg6aR^#&~ZLeFAoI7eB&F@4j0!3Xxtvh^@(&ozkjc` zh?{pq^#Dnc;~0XXN*4UMwPQ??m#Gh$go zF*QNzmb?h-^~p;8{tS}^FTNPZS!EqO89|mK-B^GZJP2o}y`{rzf>c8x19hvcUFYWE zFmv51uYUR;=IyWiD{5i-tjgx44?IE5Fd2*=N*68b^i79B+0PqH+&jeZ{v4Xwdk&mfh zW)(Qi(vM;BABVOK9nP4&wxu;6Zwtcfw&4a1t?N9A_I5?!ZGFQ?K>zsP^V9t6zx)jX ziWW=EsyV9Fg#V&ewkWVLmuZ+$Xr(C7n+(^kXwN9cof_w~9DRS(OP` z>NF%@V~aJ+bxN}H$z4n12`a#=MIH)jQul2&g;fhCSslmLCtPF>3v#O&i%yJRziP5Bxz+|thbct`kYc^ ztBp_YABW3SYn{V1fvA4CptZcBobNtYVe01jJBTYA1GGt2t+lJ486Is{s&^V-7rSm8~@{ zRkgldgv|i7W63O4-S@=i$=l1q$?W;-RypL%>Fx!*`!0vbV65bziR-K5j99Yt=CX2L z)KdL!o_KYbczHOmG~u(S2c`^AInDNbV^9J5QOBEyvu;Q+YSlv*ts%0a!Ta5f_6NB+ z!!)C@V`Wc1OwaX0uY^`VKu^oa-}$TCXBFUqoz`U_eQip=%1QMe0Yz$$lj=bp&#*MT zpSr2f6eHiee`2$9y;rdwiaAom2hj83ot`JqiEgpP=ArYR*3ryI(A;QaJN-PCtn9XfyzX?3F+>K5@- zLFdOd`eumHK}^KiJ3sPA{_A|_-~E^Qxu5(|{^GBF6HpP72d5G;p_`4c(Lz!R1C?~1 zCn4dET(X|k(Ih0hR=u7Spd0@l3oJWA#xxnNT4JMUR~LxW0qRPv+a4&A=+cbt(g|I8 z+}pNJt0T_Ov6;_knAliB)KSb4CT$3IH8e|~ z>xLq8*(!SOF!MZV^CT{XNH?df81^-iV&pg%?wwbPS6{rkhUpQ!8DSrXyC%Zn*}Zyl z8pdbgLT^1?pIw`R9pCu)v+<9DPTU+7vxIT?xv8;@>Y!jZl=PMleaD1hrPduH~+&b95ENu`6UaewHtJA6?z>M8Xp_4e2nba z5~^y>LSp+*w#sQ6z)xiB7StFb9e_Xo%ikEumNIFCTxNnNu)-KG)hsBI?1mYw$#^uH zBx=T(%S7ucYIop7_uIxMSb!>8rLaRibTu|fNuQlRf4v0V$&-UO{;Y7+dZca7_5>OB z?CW|ZgcU)N;Kx-R%+%qlTY#)Vu~vn2&gYyIKx3!B`5fvFwgsiW_U={wJOb~I0t~Hd zlhGi;x-~mDA?C!^y#I@?m$g@_O@FU2_<8G8;&rP^FS6t$2c^4+Xv45gyfN!Dim5cS zb2d<%|mg^?I1&6sB|H1 zcvoBFFckxrJa|_yGpES5tTdzQy1_v--P6|8R3pViLaT_SWJ);@+e!?bOI3Q9)ay8_ zAr5d;y*_k}X%c22jwp*`E`{q>xtR*9eO8W2OFzw%&I_w*IDK^M#ySvvRuaO{tZMHQ z4Srh}mev$7?U8LgbAQ#ID60exA#t3teXi|nxu{jH1zMZMVp*MjIOIagnN~No?+&Pm zP3%N6IS4(dG0<(%#=A{ado-oU6bq}d7jleNwd>5es5LRLJiX`kbfA=p`|CXy6`#Y9s*OJ}4E- zgPoF5&EBME7MZ;dhgME4vf^)UM%bVI^7sADMCJdV*K-=zrr#|Ul^!&$(3GrIj@(Tw zpOi}W;VbygXOKl>+0E{`s_r8rqwme_R5(pV8xzI=#{+CSAFo}wUp7LF+^<@Q@J5!m z$0Mi1%z54R*t{!c@i-M4O7nZXT)A4+>~Ym5rV6-bO;dEeL~4g%LgOyZo7FxAAqGNr z-{S~r;`HkG@%8`GU*Lm3^gkdUKO&WRpq4(~!O$C}XX(aCh}^yUfWPwZ{lA!xCq1xm z-ewv|V-TSf=hmxRLp1tTZC0{3>WtE9GT-^7KTEG0|NgIilWd#4(;Px9OgTT>2tlhM zZa%2NZ6Ii7Y}ao+im4(a;n0qwVj2>gcH*=sz29?foHzBy7IO}ZA@HN8!?;gBOp2k0 zo>=K2*ep3v!KdC#b~12QOa=mQ8(3`HE4~B@&!vgtD%_10%T{y z_O2AWt*UKq$_EZ#{CfdKPCFLZbrtS;!2VE$4}a?CHJRMo=rPF*= zuXxLv1V^&-P>ykZHcKz_d3L?YgC?1{Pk;N#?@i-+^C~RkhsZ zt@qBhRjY>V1`olXggu&vq-^tyW8X_qEfk^5M7S$!oC0SI-f3TUx937jcW) zPU2jvHCHqZmF$(pmp3PJvc6b$UaF?W`tjJh8(q(zt;Nx!DX3lVW>)sK8zsZ;sdjzu zG)=sEdB-38sh=dKObo8iGwnxhR$0=n)jogDCK&d;NpQWO?kQ7B``Dm7f3HxxYnY!! zN?WX>;_d)Y9f?C^9hEu-*_N$wzAidWK^yN~uUDR)o@l*sG)F*Bd=EvPyQZ9(4>#lc zV}ML2b!~)s-Av1gQ0o)d<#9N$=~`0S-^qi!oJ{x~N5#CPG|pPA@^>mlbt2Zx(D%_~ z32-&aRFgm@(k0M?TEL&TrWIJxSf$p+$M5d>?Eaq1x{;oN@tbJBw5d$o91awVFZA5Z zOw-KEso5%^=aaxn@RJ6u1vFR{m%|v;Hpc`&+%7!sQgN&-B;Rqf)D$9<}qiB8IxbK3(+Jb< zc|eB0|L9`Zs!FAEWlyQ?NtIiAfP@m2W*= zu#^nY&2#ngIuPh=>jjJ_7s|}(_N6vh3<2*WAbq!HO}43H_cYLYp(Nao?^NzV|nNiQddHqHl^EIx^jx zax)D9Q=Lmhf;k(Ry|JUFt<`buj>?ZJ8^K&XG??gCIqKGkGTXfKpxm`jnvl)JG(ju` z6w!Aoh5zzoJbFGCumIaWExKrfF?YMbR5H0_Ld;-?lG8zQQETIzlWzPmQcB`9O&p6N zcdqgAYxjBh98dAYK!oASMA)w#wCY04pDF3JxcNnhN@pW5)~7q^ z`TFw!wG>6FbZm@EFP&DKx!i$QJhbAX`-vuC&jg0g+D|>hp>gv~}(;SF4uw@eFe*_PG{xb`2sw`N4PgWX;!vtHOOQ;2oqB z!JPZ{#5)jDW|}5iRUUA&Ks?24KchdV$#}*q;h@euT|D8IAG(&>_xahoapjZJAD-K! z{y%=}6XwH76M_&pP7`^)CB|gWNx>nv+}!7PusNh6hXNv44ZP`Vj~zSpb~*@BWMbS7JVJoKFO4wDoLu!i9OBN5HK4yJ{t{ zad3<0fF{!3dA@pZKa#Crfw)zd24Kek16}*&y=9)R%NYOFdMX%<=G&0E8-lC_Y^q#}7x`m7}u=ktZ%{;tx5MgoAL?Nt+)(Fa5J{!iDfS}&M5^h082 z<7P>FvOxdRA?FEqUz1oM&uVw?^A%0DXc`E15u2L^zy+=FxwD~HnAK8SdrH*S2%<^P zlvOx8C3T)mA#i&*@Ug`?8X3cQ$LZO}c`|HIf!f+TIc24uq|C7>=vi_m+nfoO zFkmR~wdr1xk_zd)@^mPL+r!N5tV!|daNs!U*%)GCDus{=B~MIcqU6je@ATc(g3ZV4 zYHmde_>_S5rmE}EXiR3U9uQ6?@#;7a@NZ7)keQ>#4sS1NE^yv9u1y=-=~BwPfA={h z2IjOo*cGD`zdBBM-?piHmDaQwreB}! zCn+eau(%OH$G7#>#I0qTwaRVW^cXEs63El6 zRl{bkbRvjVEs~u}lhJziFkV5g9%fq1D*UvhB89-50$b3ex6@Ud*FBBVxz~DQF{h*+ zA|Yl@+2|&?LeO?(?MN=0)*=_dMmk6hp%n7Z$3^FwW}MRPXSLYt#O zg!_+w+tzLz6_zSueRNc_RC{Nh3d^RhYaMki%O>7WNxQd$u(Yl$ljxo9K|<1b9CvdR zPnu(lT(-tkvNhBUSdoAihXa?|EK%&7a^|1<=^x>r{mZ{G_LRFRb6zz`n?mAdo>;ey ztLe*}l9p1|{v|nSuM-iq@@+c5)?x4vlnK9Al&tCwRdi(iGxwj zUL7aaCM;1ak}qyftXowokDX_(D|@?b{NitXi?6?a!TqvwGapDH@X^i82gjMu9?x_U ze)H`EUwLuEAtfG{m3$BL-AQ#HDZxMqPJ4f6yxS*5K}_6@pi{a60_|82huB z+qNt{5c6Ss+sh|CB(otbWMhfNum<)VZNRFoe|2rS+Z2+0CbSd{QX zNgzs;O1vN;Kr%vcjg7$R6>&Fw0 z%W786f*7_Yel8UsTxSgP3JCD#?g>uU4MA`-jR=x44M0}G`}2y^T5y;~>=VsQT5H(H zfTbz!jz=u#1!X2F_1k;{N^Hz9BghLMp^+DZ0EyF-8L$J(!K%Ihzk3U7LL;1mFD6 z|5YIFQS-{QblZeQNUb4CU#wx_16(?KSbM{wmCcm`&=6Rh!lnk64ql8aP5a!4FdaD7 zHH6kj|FEe`Fa$PEd)zM>xit*CD}40fM_p*_$XBuA${(_5bTaA(285V+K-f7{t;un! zjc>3}Pp5Xoaog}!zgm!R>O)c7d09JH+6-Cq4Z0bL_3@-v4>ZjY8{esG>Ddvdt+}yg&xr+02nh zf5-qHphT_sUPLSM`UGuTen^bT4u!3I55{LE4QvY(bNUiRxMm9r7p@9CxN#gO#9_oV zO&G@s!#H9XM}!zLO*_Q21IAk*?5rTP=EEKw1-1kFqL7YuzxcGUdGM6m6o-z~3l!_| zOZ=by)StlP5C2~XTDECXt$204hwE$n9s}C_`W=~fq?fkP{>s`lO4+C+o+VOh!~MK= z#5}}^@#;DL_+R){23#7&E``wf%tW#jd7bgE{tJKTD^n16?ec~YPz;A__yf>`2f=&a zlf=DI5x;eggh!0~8w~ry#dMXgEAZ?g7MHCsXJnfI81*Lv47&s3V4PxgG=H;I_P6VI z(;L_RvkY}^7ma7FwNF5NZ=|*FR;2h7;2Iq|_f-tKnifR*p3C(VaCNw%?|%pwht%m; z-X7JaSQ?8H(lU(7s2)5Xx?XN?+zy|XINI+`Ip3R$-5YY|JU@ByG#gChXn?vpa^ka~f=|Wlq7N470QR6@T_Z%b13N zw78TQ)@ILzXI;e*Sqvwi1PE4}WN-T{*S9yEK`W%tg{c}}-`5_%$<34^6a>Fg2pAG6 z+ofdFKAA1F12h0ytzADf1kaM;q^`+;71UQqoKR%~LgK!r4WgtWx_gsn2;{~vKQ_`V z`c=Y~RTTmh0WpnmSH@;3>b86K$*9`-Q6X3wX(x~K+{1ej5~_?SGLZTN>ae0s2?ooO zVH0hk$Y7>M0UHTLpg3IkJ+AE-(}2mQ9exhRm`q3xEV*J{3(iaFXU&>d)RNKait{q# z{qcnV=Cjeya_2BR!fBU3B@Qm{(_AM8g&5iGgjab6hmSPGt&6<<6aaqda- zY$Qu_(*w0oO1f?3+OV2F)FTrd+!$gsXC&rI-0TK?^W}3x%sI()BNa|bcHMBJ2a&4c zv!^pkt$4LhxEca-t$25uaV`~i=LLY`eqQnX=78Oh80vdEvmp1-+5O1ET>|hp1NVL5M@~#|1nqF&*#Gt^$I1SDNxRzh9sfVsFV}{;c0}zp z4kk7aY&wWi5hQL(`q1#`x{VEizP%uJKc)*bEIlGeM7*aT5M#lt#+RQpS-ASXsihO#?NAl zuaTx5@^WTDae;38;S%g1@sVpv$qF^v4G)nUp{J+Y$zj(mn3jQA3PX%oaz?8eqO~I& zV@il=z?{wX13;;n1&w%mE)OTz4d-;K^w&^Bk1rFJVMwSS{47Szf5SN7N8kH3eDgcMpW^zSrGdU-LW%bZHgU8!Q(}cgvqB(X zqxAT?^Uvt=qRsJ3jOt-)X}AIE5z>OHbHkl#BjyT6eeM5}KFSm6k=E`IM+*X7FR=h+Y-Qdstt-p%D_@Ddl;Qa14(dq(i z1kW`6~IJ)-~57X8V0`EsFb4QxT6tU#u*t$)R%#PZA zMu^dhRhwe;s$cs)<~k6&!B$Z6dhj|lv|6y=?{Ip2*wz4G(+1tvM%dOIgi{H*$mijG zH5aGA!?aZMlZN*DztKOx`aWZb8=b}0q5q1PpFx}m!=D>3p9Q;LVmb#tdo=rqF~SXm zgY6fJfZSY3;`hJ#Rk!_hGwz_arD7A1vpF79!fc0^7sG4aT;MJX)M(Vh(&o#t`15`#tn$(#r>*l&Vc8{8<=A?iJHGLQBTHthPRC2O4abgh-?-W2L)S41(+3 zgn#2-{^R&tKYD|||NWn!loiLtx#?mc39m&QM+Qu4gV^-DwuTfoYxBuyLQeN`!-EuQ zDl)`q6A`Y7TnYu?_L&<5HQU|^3uf%sn$KY}``8#G&TEG2Z$zlyixQEhTVU9soZg}q zBF;h3>7Jh9LTVJ(#np}ob7??DL>wmUhX^ezjybbt&~2UiS|#;rz|!ejp`Y=dougn7 zD#Ab=rYdU9D0zXlj4`oJfD-lJ0w96w-Hx;@o2a>dgFstDwjjM20!E+YKrtpx?3<8s zI)s2R1{}>DY%r=Hu($} zYj&d$Kx*1>9R-ie+J)s#wQ7xN<K(QO>rihp@3Xsg+{dT{@)&78|^Md=+8PE26 zOr}kEb3AcvS#tUZ`w6=gQS7>WaM z7sQ(b^DnOVY%U6b`zI>pV*sX!P0EWKHXYAzfa?RS>$2YpOy;2V>_Dvq09?7v?g-%4 z{7_m{cE(AI_VWW1SPGR~Q~ zA5|4gX_!mFaV>aS3f9`N+H{1$lELVUqp?kZpL=$VfB4fkcyn3-Nf?I7>RCX`3G&Tv z|2~BwL@*JRgv26$yhc9Vq2`&zvv5@Do0Hh#+j^0>QEWVaBg90@Tnp=RK@)^s5!1t4 zFS;2_#bUF;3&GG4PuDqgpAJ*5#~igyvD9kL68vOYv^2Ohh<&sBlcb-2*wyu)DTH^<_&ui7EBsz2w{{LtYwn zI*T=z+5l2BJ`FZT5BA5#eS9xfcY%(6SJ% z=&(Z1ZICyu($*+Io1s?z{$i&S`dUuIXzA?9#)}at4nPQ}^^yc=n4nr9LWIJnX(|D6xWaJ#3F`WY zc7BIa&TLK^UCiPzqO1$94hN$W@|yef$nT>!yxr>4*NHdwR%mo{2}2tp5#!ZMKr8b3 z-X@|fazMs75Yx;^RH=c zryXoXqBokb=d142#?Zl; z*3^tX&?gNdPVs>qnno9H8M&~azn(^%*A?yrS5?vHLBeaQKra>w9(qZ zA>ZfJfT8_3_C{=o5x@B1OFXONj&`@^`6hQc{TUv~Yl+T-A-?;8|S&%zB(cc5foLCKvs_a5^eoR9W-7sKnz%cF* z#{*8Mds<){l}r)La3cZ2r>@luLAIub!MRmw*(`!yA36H(>pjJU46-e1tytH^{yt)i z2@txCx&c5+3Cl{#-(Zu@QX8fyI2WFmJ}DT2IhKWh(&(fXgJ3Nc&-WA7#`QF%gdqgn z?j}6DzQVuxXa5ZTr~ill0FOo;&yAXmU5YrDg3;=Qr4*D>FdD0ambesyL#^QiCx2B5 zattxzdVj#OE|>!KNXhOMryjtR5+2tAZHnuWPFh0{99M$TH`9duNP6rT1TPLd9EJh6 zQ^MQR89)Ex7Hg^a_RCvbfA}2h+Xp3kIf>)YeZM&qLoSj?-Erp1j3 zc=?Rli4f?JbzZl8klg_M+$&BSDk#{FJN_H?U=#8N>L_{G4S!cz<7WR7JNAJzfWi2yg*BXOe65&OMc&*JG^~3;-?Qs z1OU5~@QeS$|2FWK|Brb8?t#MaG+|xO7zMarR-AId(^{}xm;szth7K-;!I=(NL;?0G z;(9ltsIXzFiu?F# zwM}2tHuEUJvoYe$Y2FY~q0rYSINZo`Z=i_iMoV1w?=d$Wd(W-ef!TEd)|5f4XtmF6 zg??qX+u=d-k2%#n*}eNTd6-anifTe!nJGwuI2%e}5sc(&_ATg*h_u8BzQ9 zD51Nspt(J)i4N^PQba9XFb7rSoQVCUC)bl+MdV#Zo+w4`ZOqoh+6jA{(5wlGX(VN>W(>W-s2EU{8^)KNo$S3*X7K+hdvOu98GBJ^u#gcRw=l z!*8|r@45V5fCpo-HKK;@ZmkZVw`QA}UZC%)((^Zb`WLL}X3?H#Pu*xE>Z#dVldXO2 zcBJ;AA@+UOwHyJ!Xy~Wd-cVK1fBHvgPlY8?h_M#>5FOf*`jBEd)?l%vl3v zQx8~S8^mL>1jT4qzDEPpq0~~~p@4mlu+P9Ir}ppj=PCJ~Xwwo`D~L5Ot+hS{V|E4I zky#2aUC)FdPbSODKn10L6cvrYC=j6e{qg?q+YoFYU}uG#)f z(-3~{VrN7Unht2IXrg4h1VzLh!Yat?%(WH-iU@5+bpzQ4fS|660gyJOvuBQ{YwZ+o z2N+0~GO#F6@O^SURBf2bObS%=+%9TU;P4zVjKfF~Fo24HDgk4#5K+5+LyXX2fV7Gvv@|}Ups3<%OgNW@EcHBKDYWA~7w!921;a7_-M{jG=6(ZOyf+1^2-c#sT2|I?J%BT?IdO{LD4deK zKQHFQXeJRcVxJ;fS#e%hoNdpWKs{$`oRlBejH_Y5ewwhZE7r22HNh#f$emKc&<5_0 zfi73>DE7NPc!5*SxY>=|A0N+X5ZuoT-v8taJl~D@;G^f@m4%!qqhdqAZVUOSV>^1f&;z$4PUnGOeG$bi!9G8sU5PQyDN|E(E1lRVQYXe^F zMr7qw@4RNLrJ}Zm(^@(ZwU&m`1Zg;+#aEb~{ijjZcL@0(VM0c5espU#t=PZ7?&Cj% zPyWPzA1{9SU&r+98wkUmnGnip1Ko(cfZPgocK@{(qQMWBR-tH}rXgV1&X6LqIa1VC z4dt~%ISd;G$xR=9av?;Etq19ar;j83K=Xftr-DOpZ=Ug9IF5S;tweyXQ`!)Q$+8-p zf)0D1>5Et=%^)q;}94*dUtkF;f zhQlY2FrfbhwyDGAJ!1jbFdrZIJOvkN)~+RjGHXDr()97ZganBrwt`tYwZPZU7%6!6 z!q4+8oa?W(K+Q0K{`@SMJYU2M@({Mo*qa;yCu*+iiNlCtcfjuEIj&!Rh?@^S#Pg58 zfoEU)1Xs^rVRwClakocE5&PYUG(gxqTTm#;16evn2IqNgnL8aegC8y0th1Js(u+>3U=0Tx?I%ElJJ z)_mCE?@L{TSvVq_ImZ6#hJmGN1mb{(O{cN5XHT)M`?+kyN(T;t1e^TQMwSJD?Ze={ zQ%VL}?iR&#$n#0@SJz0lEHyAk0+Hij4l3t)=6muk$o8dwe&4Rmbz_PYfWtr$?O>Fx zX&T}7KJ3r_IZC5$VJQV`F_$QzG+YE21^DZ~`X9pSTUDvE zS}eb!sWR80(MGslJeO)uL>dM_6}hl(aI~D~OJzP+IiuzilHL2r4G>~ z^=W}?HJR1FYuHls7LHoEWVYj6GHgVmQGMm&{fe*vd;a1jRC%W6%Fx;|a@tTEX-ki5 z#?Dak-9Wdln`z)a+BUbd+%_QcJTC|#QR`xEfIxsqM6SHXOD?$G?+Ipw&{Noxkn$2? zjb8Kfl~qF!(xc>JPZt2MuCHiuE^M~nozA#F&6qQtC?~U5rK1|@vTpoyH4fMf31bZS zh3D56^2ofJbJ-@D>U*I8(>V5@??IKNR_qglnpG8tA>nEoa5Zq>C{1z91(1L-Cfv;n zzI=Sb5CZ00kgFoagk6lbU$G%cF=Ac|o=o?m3e2VA{SQCq)Z?8^5NfrFhdrdbiSAeD z6&NkRPnFY!6oH9`Du{uN%F`Km|Aw@!c?E_W^zgycBO4qMV4Z>GXsjZK2Jh^DdjoO? zH6zxn-~qrF06qim-%+RXc;fV<6uzI5sqy%gKhA&q^fmDIiBpj^^S>bgA>jRa!JE^9 zRw}TpkgtD;7$fGKalM=HTd&{a&EpB5-ap}SS+UlJb1uxY5sJ>Qb`z#??9vHZR7YHm z1I9?I-!3KO+OSKkb5Co=-D$>YT|1ca@)!RFW+>GaP`a-zr64tJKrl^3NJ!HSKKT>> zCjQ31{R#f9r$3GV`+xPfusnPQ$tTOaFxp36P1v~cvfc8Ow8PRT0X_NWlF?FY_2RWwA7eVF-KlV@wyi3(0i`+BjE)^_!r9@+7<-}VN#d&L zIS^_838dX&n0C0jeSzB#KE&0Fui3=#sC^AS()-(or5omsLPB9-t$lgl)N%` zMz-mn7Y}NvSb-u%qUaxe^B3^xkG|iLP1R=ZF0Frlzne=bfE=;hpG`cUdjl1Oh?+=5 zQ%5z;TDKWDT1a=;-V=J!?4sZ3Yb5Q^>XVnMEP}K_)Fv&C9<~NGu)u*4HXN|EYZ@pt zs9SKh+Ml~pR_w2zVSakxno}3ja({7}C%n;D{mOIHsyPT*qMloCQuJ+4oqeWclrpAP zd-}XlSb;t{ZWTm=L4?6L0jU%Rzkr=v>R+E4C} zpc4x}KdFUBq@l)*H)9^Bbqf z1=#2I>j2?1FV0wCW{9fdi?{FlUXTJQcyrON<*Y_Wdsa`nRj$#}TAvsQn0HdDA_c`^nvhGu zZrTAMBIgy4%ZlsW&fKcl$QR3#DykHhw}vZ=Rj7S3F8KU7cYE&)CG26%4s(rwt6}7O zF-z{IoOnDf84vS{F-F{8jojyqhBz=w0N1+_DFi$&8ColT{`nO?dpP1S((K@7jQHXE z2TUp8VA_+n#}ib6wJBciC!BJ@lp?N%gyrrL>G6a(B;Xgn#pa>%dBJ|7^HpiU)t>)b z0Zaw>@EP?Wes1hXHYQJJ7W?}h@cL6A4qS}42SCV#L1V3q^TGyZw*wQ+14z|s-~`0J z4%GJIcm~de*1wYYUUQ*F<%8$^`_lsa+TZ5i|Ky%)_c(7~cmD*ebfCT6ji|Nad}>HP z{>&PLf(XTah`2k?kidpwN(oQPid-9(+OV{OX0=aIYCe`idf%0$rr#IE^o>7=;kSMj zFZLt$d?S)jT=(7>^V6*NRgR;=&7hrf&>$+M|9X85P2(~b6EFPjg zyzG{{4R2oUkhg=<4rSPZ?Wn#_b(}-m6j)cj)-WWvr+u!izg`j#aofbA+*n9?VEU*f zk~X3^o7cK={6wJ|sN0Bi^vJ7Q7%Oz0O;By4d8ko!0u%{|BT>fUL1^Bzi8X4WzsKO^ zfgQrbbii_YLeqj6?6?-U$qaUadwzvGPl+uS>5V}Uvoo~9!;#?D1VBZhpCHn}?zrTY z_NykU;>!>jY(fBG(~|HIB`^BIDpmv%oSz*#_nzYAVDy7ZS_r)L0^n;)8)ArvX$J-f z+F@%4a7UI~>qRETsb&{z_n=j3iMXu9!tEM7pv`k@v=)S6;2rQOlDMb6pyk(?b_X1; zZ}IHIpTo;fzKIXN^}F!=!>=QaBTjek@WoGl2XBA+1C+et`2G#*awgTsel{}&7#>%v zr5{@nQq>9pc3=4Xr$6cxA1{Ej^YP&bwHIIk+4$_-U)jj}gostY759P1cBz|fXaf+G zR0Y3IP1Tw~qJgCr9x%#Hjx;Rp5lm87WTSl)yFabuu8)BgK|TR;ai|#e$Zec^kqO%2 z!jVD$EY|P(A?f>*v~~e5Y=tnq*zswmZi(1lfR}cOmRg`9`r~HBq}ZRg>8)z9rmhG10RPSeu@Z5hA$8-E86zxw|~Io}anXccNx1#QhNRAJNqfEFi&G(pezSW7`P zEuaW>i9dNbBAd1&R{|T&&x8&D8YMT-cDJYkwcveo0I2~=BtM3LX&iA}Ry25SmG*0U zH!rqsU{Ztzj$2bY<%{q=L{3?Xat+cnhpF#DdA98ahTpDcEB`ht z>5C$`ng;AsK+X){3n5@P?r>br@JVJiVDM%);kagu7Tj4|V>sdQjH_wDX`cIE2QIjI;r9jM`$`d*7d#x9oASXG&#kK$&+zc>0k^;MyK(-t-{>~V zPiw|6yx8OCo?qkjai;#z++M5E@7Agi1C3i#vsF%bzMJst_h&r(_#adAGYm*6;&wMe zRIyY=g<^`p08%K|x+0g1)l3``^AOyFKO>*s>MR{h$p%?r0*pqU3>>$`Npk zk&a7&Hj=HZw6P5dr4%}Nw91SSd$@`hW~H!TG?ac!32NhQz>yORDl-HP3PXYU*@hrE zuMOQ6RV}4hHydA3MJq=DM0uYbcDO&0^XH!5qnOK%L`;WUy!!SZ!e_tz_fhiP1(IIu z05);CKyNo>tl|9l-f9NL7$erQ_Jg6dilB-nY^{O?y-A2>{h1-H(aRfFK&kW)LU-p8 zpigAbDs(-gntyl@j=6&s8i?+k1T+blV#2!2o!;VJ@LfxP?i~UWtb*WZZCLVF3Au^XYQ=zDd_F5C)c*ZX{QO7wJ3sp3f(D6> zoO-dG^>P-+Jt2e$NqcBr02G@-_;rRgZ?xw2XB}QVi9cI>A1N5m-w{k}7-0AAasqof z6(gwX4QuOEP;U}q47P8GgQ@(!K)0EtYH!S48x~Ro0}dNnS>Y5fFZ{Ou5{#I3d#vkX z(^~@o>>iBch;>=E38GDhL)?HGVEf+!-g|Fag0S!}t@Xy~^S|{!$H#y2-@p(qni;W1 zNGMMKqn~_;U;XrRIE}_9Wf0oYy3II^UmBv2yI(IR-@p0!n+*)I0;si(jv9n|5l^2V zhJ?1RXd(y@3?ZQe?L`P%dwT|Ly5ReEZ52ziNdUk@(7`EoA#@*8T0YjwHBC1h3#PN@ zUV%+SkAS9Io|ET}Y?E;=%L}U$`}>BJc(zb)!CffCWEQi$ziFUj79;|-qUMF@x$RkW zeKJ!9h)qufyWJkQFJ9vFPk&;K6W2MfmFHTpF<9+q7dyxN{UiW)@899!;T}N=0*KKt z1s=!;>;_VCN~I1_+KRGd+>8;Y-1_HTEzR1=WyRLbQq;~M?Ns7Et?ujA!AB2+v?zmM zl$>FQkc+LK1XY1*q23{~@Snzk4R%T>dBsvHN|S!p(5jW|ZkG~v(}X8A+g`vd#6t&S zno?x{bf)uJ3WCTA0q1o_vrH(pqRZD>wDsOvMQsJm>K@NQQmAR%n4jSmGFQ`xTpPv^ zu#{{-A}39xOGV^X5kXpTwm?S*KB1NdUK~KfHqo&O-C-IXA;b_PQrvQVE-7Glr!x-I zh$m}Of{@QY2f@p$Jz@~d#kE=dA#={yX~TP%fSM0I}JGJg6nAn8ts)!Q+)0Efcx`IF?@=6d9}lCi1_lU;)4Ul z-&KM0vf`)rM|^y9z)~yoEXIWAy8-(lq2_|y=g)wr6P_MVxIXNF@dVsn^?efS3Op*c z7PkjBAMYLLqtl60tg7U`&@%q58y}bfX3se<%=LKt3d~2F; zUJJhb`~L*Dy9xXKgm+Jz8ofE5FbxsW&Kwt$A9KaB797p$y4Ff9T~mB9uXt*TQ5qI4 zczzhLl#DfJB*oNkf#36dH;)B&VckqgYS#6Xv(y!};A0cwCm5 z`586$?bye)VghPMT_FNVv=xjgVZYyFKF{5b6TvOV*y%T=IIGrT-rYihqKzX_FVGz* zpn|46#$#YHz@nl7%A%veo;;Wvg|@CS00bUB)X^vqNn+GI`q#Kf&&SJ-cGS8di{~|O z(5e+mQyM77vjzdFHik|_mTT19<)0kEK0rji9##T zTr!btD8r^eQDtk^aA`#?Fqe-NK?AfBXN7&{VogL00j(9HG2nsOnqo*W+6FBPb5?j6 zSL~We8ju1ogk)lSL5c%%&UXEYvS``($U!1R&2F_WCiEr(!vu90SD!*~M|r&=X$pi^ zF)FacXAr57dPV>b=n8=gbuSAv76trYrP}mHy*5_6#T>r}b5{{0q=aq-S(OFOFeED$ zfj|GR{b~HAzw;Z|9d2;*@@x3`JHL$I|I5Jd0~?T^J_p(#_z>5>>tDgc|L#4G@4v)y ze6Z;sn->w%K69(|nulP2eT~O=Uv}|yRahR5p@Blj5udpEz3KESFPs(s+_SKK?ce?J z7rn9VwvgH{3ZH!m_FDZ(qD5roc(vIf>J5Y{9T9Wgrh1NC`>&MDyh4Olx{AIBxS<2! zj#jYseUUrh_tM^%)keK(mJkhXW!8ci?WYOX?b=|I*Uer#iU5H^=okdq1o~QAKd8L{ zNr7RJ%Y5dVgu?TH)DXgT&2U)OCou{`PcQC1EX!p^_An3Ro2BTK8c%7f_ zzcP_bqBdT<;6md3?PHYk!1Q=7oIcE$)y;MjM8Uof*A(HXz>IzRw?#@pl z!pwvi?K$9{do>N*1Db`siiv}xxrKr`BlqVM-2IYzlz=5?TKcwz+JGT#bn2$S!<=#0 zO$ft;D+!qA1*Lf)Cq={`-dy1a?;pFBcWF$xF16r(uGX-U4jzMG41)bQKtjOm6ki7$ z)hU%EG@o+ATGzh!l+th*$pHE6@n~!aYJ}&UvE*v{j)47;@WIs{ub<8)lvlhto*8do z>*?-H@cDKc`?)~cq5v|LNwcPMFOVl=Q!QHw#~715OJ=GoM zRE?zY7!pUO5M6O3Dl|715N*?QXjAGVY?I;7<_;Zf1KW^VwS#G+XgFfc($^4FLr4J& z{j(xsOXAgjh(R~2R*4C(o?YY3b&OmK;59wO8xfN+I@cyO$%pb;o{CEGp zO{jR^OI{)3hizsN6s*7&x|nfk32Iq!(g4-hptzB<@zQ`e447Vg3(MW7_SwDYWxMxOsQs-#f2McX9K{18hyS3;K^;bvUvvLcV{o>+B(W zoc3*~94)qCMWZ#+{ojm7?|X*NUhU7#-c1xz9H^1-{ULS=8Nz^Z+@VQA6w*yw&B!^U z*avk=nf5)#X-a~W5^AkDTwmk(@X*D`@WQP*C?frHxWy?Hy{Qv{In@P2rnt_(N9Ztr1WQf~vucDL^9t%NufWtM8*U6>`0uQ?B@V2Ah~g zVSt(lEW5$&x5dCGD`a7oupuCb;`e{@694pPukCXfRWERQ(!d5W#z;`ev|iK;wx0ha z)+lkBXJ-h1DyQ}0v~ElJwR2&^l5DbR`vdiJ8$HMi@KNm9fN978x&gwdkM`UIYeIdU z(&pIq5An%L#A#V<4<=XzZ_Z7#BX-}_?mGmi>uRE88cPHLhQQ2?Idd{v+*;n2K$L)C zca1hbQAef}xo^qle8#`FAMvxNGbw5Y#%v7-Ak7;GuI*u>UO<=Co+Zv_6^dy{@K8<# z3s5zn1ptE5X$!%_E=j{lDWX|WU3Zf-eGshSu1XV!S_+=*y3>YNkxS+^iEJcymemsh zWCgTVWZjzmAw&QT^D<-gtfXF~a}t;QhJaY8q^s7O_hKUpwsZmJ^NO$Kirw>T?twQ~oIacvPC5Jx zOM#P*+beDYyOGVpm+$%Yw^y7zeDDDE$<4vya9gko50WBk_I*_r#Rz?TLip4 zp3$0Oj05JHu(pP`$1{HL_B}&96*#1X4-OL^OeQcF*050EVP26!z$Z5cl;e!k0-SQi zGzj)FVa_YgIb({^fKUPe&ksACs$v*u9Fl9ZXS(2e8c=FrW{ue4&}uK2 zpe?kDH}{X$-Fv}NdhrGk2)MvRnPgkn8L&+*ilJ~Ko$C%LmW7v?5HYAS-D=3 z47sm{^2ZIiELOPY6(w((sQ{h8&jo08wR^T=7)F$7hniQ6lOT;Fj`#23>DJKJNAFRB zxt1wvX}!qiJxy?TuWChdrwv)LMSaBug{dEEAnnMCcL66`4%_yrlW1Qg-DwRQvVN&w zYP|TIB1wS60i~=R7>GfTOL54z>s-VRe}8`rHIVhHGa3k@7}o6h>fDdyCt zUU>3nR8@q)41qXZA?#ma+}$Fd-$B!d2r$VOeYNQZ$|tFZcWs|{bCvQ3w*MvKwRJJ4Nk)?y8S zL{2O08HG*tpba129PrMpyjP<+`<`1WuQ};%p+B=?;8D&NXl(mW5MwsTrau@0r#PP+ zMtuL_j0RvualD07eS1GqLT`GKHJha=(5UB(;slD;jaU4ar>O&qrDQA$?1qT*vZ5-5 z@+p$m7GppN$t;@{yU=hd1*dcAbx(*9tpbs61WVz1DF(#M8Zbsd(an|a+0`C*$1@~I zr;@UeR2=7}pL6rF(hR^rg#9!k*NVIQdq4!c6yYacX=+8~fH{{w^-dz#4+*;z%`$kS z9bfMc*agLsGjciO=?CA1Krt5&5)|A_6R4n~I6T=@=*52OV)eOH9L9tny*uIAH1y}r z|4Z?Dz{fXN2*)!{x!}BJT-&thTsX=2`RCUt%0}fU_iU`sIpf(h;F}-4#Od7w4u>5b zulmAhhNK1-^@klBgfVd{_Q5mYr?1&KsIsy6;tg;xx2$Pk6OqhSORxn6?nX8=j>0u1 zeG0-nksyFZ{{7jqNv@s)>j`*&$M3JrCMJngtSZ!*ym$^Q4f*{YhKG~ok|m<8AH93*6W!99g$x#I*_RbdX?R*GI)7Z4FSJ9b2D)Yrx(`<+As+BlIoij$xuv*&MR6iDC>;n=^pPs`w4z{0)7JE z9f0ow`0V?@)4MORo{ku%J(A^w1iK#1*PU!!V{+$-9!}%fIlnY?h;~m)N2y_rK(YWX z9#I$f&91PA7)o#Kh|aomPS-lw^BKBp+7`$-O?#+BKCgaY*?OyG?Q21sIp77liHU?x z`x{d1MW8>&ez3wC2Ecss)q*rd&=4$$EHdB1Qm6Yp477#qzz5KuX&IGVV}i|M86&zHFA3&?#4uYfQ3+rpi?m~i%3<#nsVu%&kDY@g_I6y zqeX~Z+zSwql8bBAt+DNjZHeT(7b!@0rWwbP+6Y@`5(G;w$hGw6e24*1BFSDj`dYH` z6mAT#Rx|nVFwPLVUF`L4LXco)9R#VGeeKJeE2KcWMKGF1l+dTHevNBcp{*g8Od5<` z1Lu#3;`Mq{MJRnaEF!94OknR~QQ^nqYFECZ{t=j{iGn?FR1|Dh+*Y;XI!5U(+ zCXlC~I~)N#=u55GB~F(7+WI~c1;&fNXt>q1b^?3G4FnL7dSjwDUsGy-qR4XvTE$=u zH*ESqCnal66?j?;PMOW-T&m?0Rc24wI;JuA#bFQBhah;-q*Ye_s&-KU>1L@c%8Cr>l=N5Z{f zOtA~zy*O&xP%7?Xqh@C1P|Zn);AX$$v(mWUqBXzN zchO>bHml`6m9zVkEQBxxS{xsS)PcFuwqkRdCSC)j=A&6QOw(<;TU5>7Rj?a(q{@QL z$m3e@mog>MMB*^T(8ukY~1)0wu<22h=BJ^rz8euTLy z-pwl@f|rLK{_xko*7Yy{_y?ciamhr|_Y;5f!0U8=P-4Xcl7n|eO+k1X30`OFT zPyXwGyrOVFC1B2Mn4$vb6F&+d=Rdnwc;JuEAZxnizde`~2da^bYBD79iK87DKG zibC-Hw649*OYZ97JYz>>$OSh0++6U%)ei4ZvvHcuN>bcE#C_X&$a-_)v>x|8&+E)d zfp-P~mg6H2mqt-}h|vVi8wG1j5vzUQkSxGRL9{a3HqneUBQAn-?m5xW7$X5&LqeST zuoX!cBlEz6tkh!q5A=u4(f_Co5Eek)?$b8*A*JpR!q3LWZL)Ewn)v8D-~2kh|Kp$7 z`cD|f3CGj9w;C~WtnYg}n&ttqlH9lmMp4`6Se zwDsbsLU1^Y_|8XP!*BffQw##Op{*MFh+eP*Y%bKAofslYsYsz2WkBIP{o1%OkGnl2 z3|N*GH7~vSQ&{m~+py~o-|W{0{#+Ya;tANWbwal^Z}MO^z}Sk9udFNnp}VxHZqHIvw3c}-gRt21 zpKq&gidKNdEd{)RC@*a93t}{xcJnF61+mmhx{Z$AAIiZnC?BDO*M-aT3Jbgi^t z@U1&nQX*1JTac_Z0ZmyXO7n7^McZkuYzP$)bG%BXGkX2-$ouH7S!z=TyWVo~T(Wr% zfH2c$ND0$8VqR7#8vd{U@Hc2+U^FYAD0z{&IpU$yYdL{)H$=q`?;c4}t1R66)Lses zD9~N`g6%^g1ngqKXRIorDDbeo0uE;2VtKqoV?XGH<5m9(Q6 z6V`eWFbJMiXccqLwyznbxYA&xDv{&K9Q@{7tkE#q9!x7`>q$iLv&S=G21Cz_3eGtb z6bgdVS2A|ci}z@IZ?QeRsj`ugfV+nW+ncyfa$(cMXPcj<2LjvwtJ)BhVRt^YZV(g| zL^V3H2o8fKriXxaT`wkZfwV2xiPnZB)UzbBqfbKGRj|gTSmw%7YH#9mtsRi^`_Z#( zz*^I%XDLRmp;}Rt;kQjQj!U!V-uEO<9&%~e4`g3>vy-g}$)(}t?KS3Q?M_*xk?JBL z;#4wDPbX_?NdZek!nrDe9nk*yoT=~FO(P%yLlj(%0jiwf?8gz!ihPCQCvkA@cIhB^x;eV+E2g0v)za(M*Q^dkxg)nIIkJ+&&Ajig1e<)KaO~F z&Um?G3=b#Fk4Nl=#0fz`#zClm2Cy3v?oVeN#=!!QVW2?h;hbWiA}5=Q(@O0~t_>+Wzp@J;rEeZY z^XT00`Tf&{^N(7Q2S`fDts#X3g`gGkShdxTY&uuCt}`<#9IFZDV#6^Dpba%MwH3_| zE~CyEZ7SD_-8kU9(r!>C`Mc%e20(>QA58+erNd}JHkk>aDoDeKQdWpK$9&_%w`NGb zA>d6n>(fs2vdud0p9*xcJylJww!dgKTyv`G(vaz5NQ7#*x~2UhvO;@-tMM3dI;Y z)yRt_o_+e26!ktO5#z6i&^0NJ>Qne?0#)OdAI1U4Wnlwl?~~O7(R~p%lC`b?NCOOp z`llFCSasO#h?t&z0?kj*^MhZCPP5~((_UZ!FuDtDjk|8h3x|Ngyyj!%0~a6&X$RD$ z7cZy{aS(j+$tQS!|A6`N4v(*Y)}MhlKm7>9G~xJghgu4T-5#x04CfQlNRgXZ#)g4z zYzl+%((UjZQ^c~aaEggH+&=k{W|=yMCW=uJy(#fRKiR)GTfd%J0NVo`6$a4l5&%`EMJn!SO^d?UvAXha( zv3=ekwjCu}>8{j+(9w{sO*M4i3(H{dNsWfZ0M|zF2U|mUxmHOp-1ZG7o)@Ed$)y315b6YY;W|g$DXHhog3kl}VDHZzCpV*zhA|EOd@npmd5? z-*x(PVNO~Gcr_D>#~{r00vm^+qYk8MmDV~C)0EC<)Tb$yR;=k`pyT6tHsHW$r9$UR z1yJ%=UDTqnDK=9B>dR_Vv+if8))=Y_CL6v137#$=RfR1ATWiy*+1cr6DJR4A^8h~NFeGkp2@golONBQbD8 zMDYIY=QQhStxw~M)o!EFnn-I6I1H|*v`Ko)sBJ|X5~{NBqB z_PpR~O!$r0cM#!O_wARrm{P=-Psg63{*6~xC<=V{^?U3GTEpJWE43fj2YhmSjW3T2 ze$Vq~7^V?#-rnQYt+_?*CqpYEFdTrGICb#N<#&Gz_^lsqspI!gY*bPrMQYgDw8Og1 z&lg_WNfUm#6 z@ckd5&JA%4z`61DjtkHVpaED5bS4GwluNIN&Y9CAiNPFFE8ZSYy-`RZ;ATu%*NmlB zHXAA7FpdQMMKD=Z0h-fx8|#o#@MPMr>uKz)B84JXKytBjw_C5;1$0Dfq#s@zAlka1)f+>nT)F+RyNNlKsaxavd+iIY+WX`vVH`#x z#FyU(79&j?PNye057o!qbM~)?afQ9|5+DvNNR`}HG#5H@{4K+du3BEailRJyu^Aw! z_QRyvdsG-*34TkwxH4PBroAu$C=k58I^bzu`X_JsgcJuTk|C5Ct&aHe%P*m7^sKkE zGtKh}yU%`tkOo{mdx7cdmOnO#{WcCdYcX?;F(ZU#nh(1q^O||^s--b2I$f$63Zd|Z z#imlVU>p){D!uvYjimHKxM4Gvz_x^uWSe$2ZT+zG=0f`GY(Z^4ENk{Td=eJyGX@FJ zn9$1H(Lfff0ckv-6$T`>ejZhoy>|I9?utsw9sI0T&Qg?K(lcL3_*bMQI^Lx8J& zc6%TYypXFzAqBjrq|i8cKy*0*3G@U((5f~0F4*T` z^0*eJ4-XN7hBapZOwNu0I4?`j(g^n2(xg92ybo;5D8L3pB1$WC%&NsSetv(Y-#4@d z&yj-BpsKkZ#W+|t41p53aY9V}{*+2!MTAs5Q3cdR%`EU8Jxb#N>->ZiBc>Q|%0%jh zK#EF`2oZ|*z4+XIH=7dq^Wx{kwmza|9=O8J-L0ZcIow$?3Jn|77W<{?gHsx%KL}Ps z*`A8Lnk6u4cC~?N@pTipyMUYK1fj3vr4<51AmZsv1Fk_nFMPkGi( z$pu3qy>cyu<|wvjF!)!kK?!|-9|B#{NI9ZjF~x|z-RBfo>^cQU%}l|k{l2#d=pI#D z!z!(xflF=Xx<^`+2QV_poo^#Ig$5wonhud?EKI`?1F6-mu`ygr>uiTzB2_%+f)q`g z*Az{&>4+|Dt1p1coC_{9$)@D?;wM`&v}mx2JrlmxkEq!@8rS2P8l-CSdD6ZN%m@>W&x{r8U;gW!HC_~`n8wN^|? z`vD{n+#YuL`pp5apUzlw!PPk6`}Ze&h-Zfbp5_^~6~q|u)B6RbZGc(?DT8o| ziB}t}9~ATHXzoqeniC0-K#CP<8xay3EpuhtBwHoDr`1ZHz7^YOwAkgItwb*&9a(DH zumISPqlI)y4|iiRW8qnbGS;T_=#L|vmqhH4H%f>V0be{E4HY8~yfuy{OQ!Hk4Z&a& zQ2VrioY!EskLC*IsCXkHC-#~?A>qf|Tz3N8+nxuN0POz833WL_%oDs}Tj$=$Ir1%t zhN{t;n4A7(Gh?4w1Y;D;IrqXjgn(~f@A1Rax}j~6R)Mt;Y3$kw1Z?tcv3t}yvb1D! zDMK2WyOI)$`9vFd##ay)Nl`4(`$8+CX0z#Os%{r5;32Y4feEsan_k+4(>3YZtVp#3&>P}y zkC8svM4)edHVs*f!bEa&5^GjC`F(LWn3N)dM6{4h*v_`4bN=nV+Z*arBLzVEkFBY; zX)dAwSJwwT9p}C-dF?1ZXHZx(2PS*o8~VDw{CpAy)RHg20QANZus#UQfB-@N2f@YH zsv$|7MtURYjg^XjUF*$H3<52cwR&aoT-BRI?b#OE6edq2pWYJKvuA$TUiP$>4#0#E zF$rm!YoUdw7epeVPsCVs)jpAs{$5jGn=ItD7cQkWTZ`H^k|eZMPoCS-cNx>Ena#LJ zg<8rxjRAtK2f(tzsVM>35ycSoK8d$$ljkRbQc9YDtvH4ShM{Bi&E8@%ZDFnjr3sSI z9Y~;k;tP9LGiu*6R7t5?))fK@u?IKF=%Aicw>m_#tsgN8VyzfsGE|?9(JlqdMu96; zApz!emTbM*^{5$bd7|FKrb00YR@081X2l0L*F1w2W+`kL5Y^XOz#sfC{kQOIf9-$E z#;{Ko_)Jv;1_EGaDAtgPHws_Xc~(_(ud0Y4l75sD^(ER&V?lb*oOuqu{Q588?U$dT z%x8FDZB;)1*&WbatE85EE^I!W;ZPf!!zpn3dcB{_F28oufHCo0WCjXBgJRE|*k_+} z#iTqHSer15Th%#?5ZT zDHlB3B|OfaDU$K&{Uh$rXM~Sl;)AO_j=A91U*BP$BL3m$?{PB@IIRSmzwzQ3e(Uue zKEA!eBX<6Yk&HK{{&-N3}x#Dig-PD0icg9>Q zb3k~tcyOnbXnl!>ipZ|e3JZP}k;_Y08np-51>|Ym!TIlO7B*h5Z^9wS1)bxo#R(v0 zfyY9%)e0C_*blX6e~vLhVnWSKag?|tk_tn&>|igZ4{&kskfKq8C>C>}7XVB{+MGJz zs4^!QFyzq>d~X`a;dZJ_Fnc`P?AfN+=XB~%MOltEaWacv7a{Wa>DHS5D*XTv{$1Bl zi2z1t6A*o3;cLjID!%_T+j}-~Un^+En*wuk!R4=ua=h{V&vu|{5Y$Y{(YkIyhhE7w z0HM(VO2I-_z{9{%8BT6m|CxTcD}XE`AWNUpcpiyr!w@&>kl&jqmhE7=ix47GpsSMl zqPMI8Z_<233~X*%!T-&W@uD7<>8rF?*xiMP4;5(;!I6I@?QS_8F-|+67N%CWf|6HU z-9E=Lv7nK74*;koV>vxxIX$5)Gjd)~@(Q2YFtpOBFtPP%LWlut;ksQ5dC3B!(x?Iw z?WuK_ICluL$)3<+l)yfJ&M@tHe}$ILsuteYHmV~0`Y901@ugN4ZOyozPVE$NzI(H0 zHg+8u%)T2T*9Tn`fqC{XrC7K&qqsw01(AEI6^EGszYsoZX_MCb1 zTWZ1m{T*T)&>9UWVD3mP2#vxPdEKUQ!je}CcCAqur(Jhvy8PvKRTF+AQOg8Hiln0`VAF0f_am5_8~2_9-S*szYaz-8 zj?^tCq?2aZOy{+MGujG@?HX*lwC@3K#sPsfXdbM{33syM`*dfFsp|?9 zq{sPv+)pExoYCN*2K92V=ECWauGtC_P}Qkkf@H;S^@hYjLs&3izaqfXnn^XPN0O%2$wZaVlS7ET6zLba?E(+8(EHQLFJDWa<2c>fk+`$sdH#%!W{TP3E` z_Nv$>N1&n^a=4_3cHPR5QXqckE7DL>Y1}FwJ%`_Mg zHl4Xv9Ht3Pfzz5h&;YP@Y1e@I9-mvo^)w>ag41#a3@muQ+u`nX z=6(v+KZlW;k8|^FO(Cs7g+ZuqPb(ULb73tsM!H`C%sLrUB%m*w#dlT3+mmH>+2=Zcnks(y?g2v#xE=@m-j845&G7^QLBiH5e*EqpzxTuENKx?Sv;bgc%IA+KeCyd2 zCl7q6so8+Cp*Yn|Q!xx|Odg)tV8q01j%DS<;&f*7FeEx$J-g<_;u{mL2Hy7|z@uq#N-Lz_DT59n zg30KT0>zavr&QULpVn;q$Htt9H*4utkTaNXIS<+PmFFJRD(S zSJ-9~EuhPS;?(W+X!{F042!CTV)6S%L&;U#yWgl{pzDPnd;%B+#F{zX4xbp9ZmNdh zL>rrGR@7FkC`U)&xj|aVqt%dL?FSviQIxHKcb;f7DiQzAZT8d(8QueJt@DAQ+kFY9 zkSxZB-w1a+{A=B&LNLUvYe2A}K6H)(M0sfS#AE=|n6Ri|ndeUR6Fc--{22YH6`Lrr zKrMOc@575gFB%+Z2v$t{NZ+X$9$e>Z>~g&cEK?u^%mxdk~IAP71arjkD_hChRZ?f$fE9wc(9{6X^oOpnm z_I2d#d{crSZvFumt&68p2TOoA{f)gh*JVYc>ebe|3s?m?^Baf}{q8~`h`uf9=G+y{ zRKuwtw&8LrY??L^boZ=QNTYB&*uf^c+Nk9lVgg!4Oop~f+osjN$H)kk9qE&x7?;@ApQ>>8sppTECwHT!hj2z1R}L{oLw0CwPVqKR0apSSqPv z&tAO5-MhDd6@PuIB(#@JA(-W;*V3E_t>(8+ty}1?ONy^e9c^6&E8LSeg$9zT2wE<^ z*;HH8<1pa7X3|>Q7Q!bWRc1;9S}wSF?AiQShpQr(hS(jgBt|Bvx5nJ2&}{nQA$kOd zowX1wH-dDfL;wK6G(CXC2vio8DYCF^x=rMgo$l2nT>kXjs7_j`<_AyDtJbnfM#v`&2`g4!B(7NEGQVyOd$DPdk# zY6U7aN@JpqcR!9eE-R*C14-6WFboL{`9*KYiZvcu*}T2D8WER_hdJYVw?l{ti!uk| z?&*k}S3E5Xf|7nVriA0N;G55{@%DJ4i&|q_`fNAi>$lhV^68AZvRF>GZ-4p3K*J9X zJKUXT(!F*RS$}-B!`-sr#eTwz>A(hbwj7Pq8OG^kXkb%zXt{FY@O;?*T|fbiR@&@- z^aA+d=fLBU->1UHBv@!*uA2rVE0C1B4+pmnJKFjlM&O_Qi0?_24TtjrOXcr5^TG<4 zrO_I$KDb4FbC35=3+B^|H^&(tTnP^EeuItB-BW)yvH~|l#NBDeoL8Qa0N$R?cm=g4 zUvajxx;Di)jCl9-gtw;|Re_IhuTj@bnq3oUW1h_8oNbCNxEd40`i0Y)jk2fReer4K zvy*u5q$B+Bjo&+!_1ueiM=J!RL3ZH%!86*==1p(VClH{9$2i)K_``k{kso(AQHYRA z;r6n80n_Te*cUJL#m-}XtF|Vt%6!BYe)eYnoDrOx&3Lng3eE9QmsKY8bK-8#fYWVj`9P* zZe&Wc7mQlHVcmX?TgHmeO{K1j?H>TFc~Ec8)wKZDP_zb-2CW%+seM9ID|ye3y!L63 z6@iKk3HQy}28F#TK+W)~(Wgef++v8R;0tk22wgB*i;b;2x*7<`fa{k!91n)GJ<1;| zHSx6l%&s%oo<>e})YhEoY&^xAjfBuB!Rel2LTYU5*mfQO+bwd$q~ z^0TbdmvlqhX)Lkj<#>ZttWkGB9^f==3)JL3boc%p_YHgf+RRkL-V?JXJY_yu^U`pA zv&T9w$krs%26HpzS_0VrZ3>nx8~ylP-9-s1&E>BbibL{-zTxtI8uXSpuubn)PA=k z4aanN4S-g){D{C1Mr~X}U<3DqfA&u>B?IFC99PqDs!v4$5k-hhWY>V6n(g=7piR=A zO6}9!+Vx**RMuz{se&X#@N21kui79t?vvBNOp8Ga=8^$r!Z}*Sn0z`fSSn3pstP0y zn5)o4M=J&hUhPJ_UC0`kh7rg4%;Me}@E2DH%q-Z>+E7A831*&PjmTir zpcrYCaC6!~^a7z6NdK4Xf4U;q3KuE)fCpo*LQgjy@^ z=LNY{>{3FCf={jwm`lOkdBK0-mwp~!V!~~ELyG7y;4nm-a^cTfGk)pSbA0^j7N@5( z4qv?I)Z(!74WzS4ux1Lv&sO;FM$)2kv4+B^%pcrx0s^r4xL3MZ&18pQUTu0|(~}~= zen+i`D6m%G>7G*+5xQob7ydlgJ9{ItpE_9m_O#&555KS`oqPOY9GDLOaOBtLf_Yhx z3t1SC^Nd`1EeOgpm+7;|BW`ySp6^DSGILZOa>kp-C)}TBoK{Y{KfQbCx~nx8?8k(| zFyL`ndsA^-R!-Q2>jwa?b_|C+S=c7*yicuLcam>eEKCSi*t#}>jRAd4y22k z?ZAKphH9zy1YWU6Rz#rH>{eljq4U(?E>GM@xzU8)kT_!44=NGFVUMyLjVDbcu0YJE zoL@Tx!K;_g@%rwui_XLj%(~9~uoK(0gDv(T+{jp{Dko|E=(LGh*ixc-kN{p-i6k34 zp>J+6#LZ>NzOsw{4P9wX@mtoldQ$so?F23&U6mq!7Z>}u3wFQ^<>KeqM79^O($}A_PTvGQODG61jw9yt8JC(4?hR%zVHC*PD&nx?DBW~tO*fG} z8*zbzL{wHvf5scska8C5bWKw<0|q1ns0?Ur-5PQGykFIr1k;!}VbhRa0pc(LwLog= zO;rrrz4u8%jG-sgiV!R*c6~z_Q0vOcj1{G&HfoS;v-Euut>W3u72ZEQAuFiMfoB&u z+TDP5MnJZmtddXcdIw*z4yf3knxdN)fDTe&%ZQm*S9my(#R+5x2_kCKN_W2U=R(o{ zuX4IArql9b&nH)1$0w%u2r#;eYbm5pOl%h+WoIPiij;$Z$EQ3(Lbn?1b z5T0Nf5{1UHWf*m*C3vnC`tv^|o%BU|FHH;q`IG zegr$+O;)m zK>Q(e7Y`Bi5vGWXmdxcsTi0J$Nelqk?f0l9qnIWD%Bg`jCW4J3Btqd^8}8lR9mWu`D0L@Eln0^^iO{mKXa(eU zPQw6g4PyuxMR3&CJ@?&Aw{=|!AJnRs*0xS*(|$dYG^}=AH&-a>#bmVulKPI&M_Rx( zHm}7RjpkuzhUf+tC<-9Xb->8FYKNW&R<=g^#9<`GXf7-khGF17G!7`aQ12ovR?GPwhar+XSxB6J%FB>VuZM!8sD8Em zKH1w17Q!ZmP(Uvdle-y%m+K0&X6{40Kef$$?WqE2Mqvu31;aMo7>w>9!3$UaCIvIR zea~@+DPJE}NWt<)Vzi>cCmHrEHP=E|k*-|Fv`M8lMS7jsw!KBze9;cPVm3O&MxhLS zjrsfWUoW2B;{M^$t|bfqS_{OUYj@^zx}q18xL6zgX831?Si!NvwJxqVXh3i7ML-8Z z34qu>?I1=Vh0gcGrcP1vqJp0}gkW@2bD%QBvQ+K|QEI=3bnx1mSitDC9+nB<{o~P| zCq>EB{`usK`)UK)imKVbhKeBohusbj=d%GD38iL4FqAI#vq#JjAYzJ<{A2jc#Q+kB zIS~qWL&V|c2FIrdoX(sG5B0T^K0 z?>Wo}#it+q8Ptq;ktP`8fbaeGKf$+u;a6yDYiCL^)ZE|C6at!2QJs3G0$d-iuq+Eu zwKpMdt=T~_5I}r79k(2(kWliu?;~thBjR|#x*Q>*5M`lSYQs|eGt+=TVD|D7dZ9mr zXh?WKR5j->0pbL$w7&Ny!S`>^0UKk$+US}!`@~c%+a!)ix>8?b*CR?qObJriD0#E$ z*Nds?{Pde0IB8JTsFr;G8io{c7$VNKVHgI4!0G#7VS=g*$}QC!)P~&@un!4$^9m2` zb@wX>8yM1AjC!Ul&Z;t%e#u6|wt{S3Gxh`XETEiN6`a z%@|_9%{buMZin3vaX-%}t>NwC6RyUDH&17*t>I@6PY?;%57aKw%ANxKK?L{9irN~U zR^I0+1QczM$_d&zSA6~UfN9|8Jg*s(u}waEJaJtMo6=pPHsp8S-Xn?NxE4H{M*M*f zp94cgTR7P`<%ZLmF~)#hRixd-iO4vx(OB}9On&!dVS|ZNj=|_rk373byXuW?VcLCZiluG_O-=#MO;JNakc?6r_Cv&KOr^(pVX$Wi2-AQhz;zfg z2=JxlwoGOi@wgC78O;j6fs}Xv7zZ{bYi4j^2ysM80~D4A(Kg4JuFDXj%r25ECSpv% z1(b@i%t&e{og;RE;?Pca(apjaCiwM&-MRPzPF&JBVHkI4Ch&Bjq`Qo8d)ZJ8FJL_J zwyN20<_v8WAMXcf&5*`)(M{_D^Q#w|h6tYB+~WBVv5P{RGq4dGQ{vP{n{D!z%OPTi zB6mw4{qtH@Y8pV-2}hO%7M9l=Il$=wTUWLfaonWUrV@tA=+*!d^%JC&5L4=u3Zfb4 z+4m$1DYq#N{SbDWfbLW!KwcMXn$X3@+8W|8_9+A{6Jr;_(PGdz`nr8q z-m`WQMrTNMqv$aWi2-x=K3pdP`z#`ksM~8bA~-2}O0^-#t+3cMempw5#E>XD-xO}% zR(!APf@NKCI-QVnwgShlnKq6S?Y@^nr=%`^R5n|!F%`Yl!nIg3n~lQ2!MZM}c|~3p z(svr&vfLd@TSLh!=~;P2v9;T&JGS;6Dd}K=Y}3PNY20c>nui7sDPRl$>WXUbi?_fV z`VBSrlCSlKAKSGs#0koxjBDHOGr+uq=&n(|p80Q{JK?Cl?-%`-6@dS<|NVc6tE(#v zhTLuILCkA{niYn8G0ofVz7@+G&> zcx~67SDR>X=QQhM|0*E!187zI(RdBMZoJLHuvGhY1re(zIzzxE6u-OjoYxQ~Vy z5zV2esdnysvH!-4ecufzT5D$c;Cs5+ElcPcIWcgl**Zj^zToG-^ZQMcD4m}@4kKcW z+>=AZ>PrJ`Y`keWpU<|Y>^Zk4lN7oL&!^%<2?io4P*;0S`2Sotx6Y5;i~2+n1|R^i zX3Sf2exEtD3C$rSL@eyeI6XpxG&Y};t<|le=lNS}#k?@TfZ=u#o_tR28C`sQZ=riz zAjQe|KM_6`LW6~(PQ}%J-%4$m-9Zi8`);Z@n~+u;>10>qfSpmS=A3a{GPAC1Up=iE zkGT*au*Sn{m2UcA&wh_D0BfA9{cZyoRZ*-+o7WXnO8E1C^!FfzNRfFdm`#i0d1$#- z?037Zhzz8DB|CQkHn7Jf_xm_l{=?mA#*!=6TJd;Z@b>A1)0(@+=Cqct6tz+Ku3MvZ z>wZXl9_atiYgZNLCO8)g*a2Xv74J?nuBQPn?DIBd!QTX!QpBsP9S&o{tLp*9 zt>LsRD7ADUdm7S)s(CWHHZ~piM`m->3XG9W$ddW@0yZB|HWan8fthx|?Je-?rD;S# z%2zGG)x>GY5P|Chn~tjkf2Tx?;Oi?kI_DM3)7cCac#e-za9UP;baTMV!-N<62}>^c z;oEz>emdierxQMZJmQ=)9+zxuj6iotBbo?a9wzLE0XNfVQ^^q(0+oQpFp3w49iAU{ zs71FIJ<3NH};< z&P*D&=j0k4A`jkdR6#Z!>H>RS%OMDsc0n6yj;yi~ zgV9;Uo{JPmHb#w`;M0CJ1ag$HogMl`rbe%3U+P`+cz%F>%Dem|c%5wpSW*Uz3S*^qbt zJ8$g1(v(SnTh?+~A^mU>kq$OImWCk>SZl>cFK+SW{ZpUVd4lq$<=KFbt-&)6Z`(9s z61F@eJ~P2~xr_JRferLMi_aG6mG;m8{iguqI51StJ~wqoW$ZpY+-rYrycMBUk{}0`>OQ&0rjc5C9VFTYpy2Y*UO}u9V zs&WTzEi-shfV6uKUGBQ|y{{!vrCv@qjRHjQ?A3>Oc>5*L1f?5NW)^Sy7am*4}}_|A+0qUk2xMXaB*jnC}+hexbc zp|wF^T0bT2cr6wGtAF%6$QppmP*<%PRe)V0=&&0GynB2!F}!I`jn>(;Sq(PeTng8M z0_U9jwWGTc*Wc}K#Jl6n32JRN`7jnn;O8I>s$y9iUhIZWp^moat+nB_uDHG4;rTS; zrw#pdKz&^*2v8ZhL~_Fiot-GS_$@k{&+%ZY;1n} z%^hn`0COqW4Oe(~I$}57;_c~-WYd@-3ch%FqRGs>;4ltG`BwbSn|qvc#x9Y9{z4~& z5O5U&T_JJ>@`+8r&za!PSzs1NH0o4qY(}ODcy;>_T~wcugSxPf5W?{K{q?1m8^-yZP2FW;f5U^qO-eE$Y( zZKwctF=A~EPYavF)okr!5IilJ6U4@;;;C>S{_NoiMS;T<@VMkYMNL6)J+TQdb}#0Y z^h2?|7!vXA4`1Tt6!H2vT|4>#6246szM(%D|@{&Xh(BNl=$l7hfR+pi+ojw4sH7Aq{W}kRMvTd4g#eWaDz9 zRs8JfgqtDZab2My(kVk5QlRUJZz%5a)mp<%6dX(I=-ueHdE(O^Zj5dx2pK?2&<2Ug zrc90xqaW?fitE*oaW8A!9ir<{TCZ^$-IjObBVhAZN5%QHu5=9@a?YQm~Z< z=&oQJzy34sTJHoX1r-!i1qKxVONq#>$4 zWpeZ|3={s*kG_XL_|5NdLS=^m(Oa>@!lz^bL);E(qJRmjlG2vCpmGyK`(eVMyG^vu z=7Y2=_4VJj&7OrF!rKuM{8FipQrXA`sDZ4Z@~?E=JXwNjC3mrn{SKGhj%r zDI3h%dZT>7uiwSzaC-@-J*t}Sp;au)f_a|5!ZTKhaP*xnM;m2{R5~^}8xKB!B$Dk~ z`4y#h!yUtHpWaQFm*p$Px0k!BvNciuo?#yQ=NWE};G$tipb1=9A~fh$;=z_)#H%&h z{3z19;EiIYeX`c6SC$tfR-krQF?+4S@=MmXS$O*zqsc?*o33jsyl7&>!Rub@SCAiU z;0Rx9h@>9nT9NJ9XMx@?AVj^WY-kz))j%r0xULOYiY?#^)0J&#b};=$L)9&Z$N?GO zb6QjW?4}rOKZk)UP(IfpxVUeeHu;|Dx7nLcpX7EhT?9?EXJTB^7rklQo=cwu1Lu~x zj@5ib`ZgkBJZ2s%k#PdaQpLU`#$%_ zaok}(pRDm^b6DIVI2*^gAT^`=`{fPM?Tqo2X z@qzWp1ScyAOJO5cTf=c#F+`h6sX_xM;|_+n+Z90G`_(w$w9qgh*&ffpUe*rxr!&SO z0gXcX0N{E|5D1n==R#8JxaUqOVad4%Ce~UpD-))RHrz}J=b3;>1i!{p;D?l;9XU4D zm|dr7NE8~!1hkAXBwS4+*&#|z%a!Skaw9^0HH~;M2dP{eiUP@;F(YWp8-!N9G0;5b zFij|pHmhsNcz*`glHh^IxizM;2f_Oz8<8OfKuNEEb2?*45&M*Im`3b#u^hR8rWLQo z9a5ls(qV|S?5)(0MG^eg>-YHJu*1XgJwCa;!hWFn!Ob)x*NTs>5BTA`ds>MD`0cm% zc)sJrpeQMVkL!xNWx=y)z$c%4jfZHX_AmTd{CoeCzXI6a zyh{;}D@E!KfP@h6@@hh^4bS(SfE?Eqxw1i?bD@b&h**~eZ8(^-B58qi|5Ms}gF^$< zhU*}BvEQ*-)W$un^_@@k7XkX0SsOs@)Yv#a5dLE**A!F}0>hfdN#chB;sMLPsK@yC*d( zCLw`~Rxi*+yYv1_5pezNB~Ev*jsC{>=@TpwAS$43LuVKkW^}f~;!!U)P*}M!1hWkF zi4Fj56@T~_e#taR?hM19Rn5BC6RUv=4c<(%@bfSyI;i26c*%~nhAn~=g<^qPw=5>a)iWGoiwzQX12UWE~a4XjQ zuCPX|{ocgjhgL)d@23QM+9#nEDw}XRkqrS@?VwX^hg%AqHf~Nt-m3Wu;#1{`&rVAb zY{mO|-n1iLWjnACOi);j-sgaT*kqKS+n)g<_&qt^ent>Sex?6?jCA-a)m`KGz7#~} zp^nyDGXt|mHGNL`N<+KR&6Z_>3vvD5V6Q0>P_f-l=Y~7g1%Y%m{fef}))B0UhXX)F zk9!j@j*g=@gI>@(3g22o+#jrnqK!Jgo#=Z8Fb>glA6w`rFCCjknu~DCIw+n0R0BjZ zA}lnrKNFVAlmxK62(Cju)&Blc3ba3UN|YhW)vO=;Glos0!v?Ee>;Ve5Et|%X=YUUL ztRN2%EGkgTM4^t*T&*k#yXdx`@?!^UI0>q-D7|dL(eoi{tE3AmX}Y{FHbJ!O97Ci) z8Wxyox*AwW~ub3 zmo~ApfK&bkWu6NxX`GKEa?X9v^uvC%$!9NWn!ALVOQJanYHN1hbG$wPL9ikIRD5 zo~^145A#Bc-WW`W6is8$a664yYQ<9M#5AP=V~jYi1!JOy<@wct6Zl5K^y9Mh9Fr*u z&MWl^ydIncoz{YLE{Fnr=jAOPje?s36XHLaXlpLQ@~H}pOB3P>DuR0F8J_j zhZnmC?5_7fN;tlM#M4?)kgyBP!1%Hhd}Nb*)HA0fXExvDb1DVvUw_4^#`{N7u7WW8 zqc&@nz`XPuX=O(VYB8=KZaHILJex|L9cZCkp*pvhc{nr2RUpS zV+g(2p(Rk{V%i4qvo)I}IZ7G~H9?^3&}t4Gz$OUQnz81!f7Tci;;^Ggbe&Oa2ALyv zy@WXNtqi!djOBe@)?&{e|9*%}*loPYy>)gWZnK7<7nNq2>JCZT)UqX5 z7&GaZJaL6WYem+85O-+#Okw&Zw_UbLv;zbohVJeb0-p^yea7x}@b%DFiLarHp3WO% zn?^)K9|3exU$A_MUYvSEU`0yPX4xesI+iWq&7Uu>Yc@)%z&K6F>uOp8pw+E0_vXXj zsMC`+5Ty0W%aPy!T6;#L*F-@_LcMwP>7lgEt2;P^&Z!BUCGDc70sHez!-?3z~8t zFX|%CNaKg5Xu|jPboK_;%;^Pa7W}zAFPtbPG(ZI40IxR6V;4wOs~k-Vb)Hy20S#@CA3pEgA1cTdOf%nuGw50rfCmt z1!EMHyu!0{xa?RfR0Bw(uo+ovLr5dU>VP>H+Rm!>i~}+w0?!N~2o+IL3cvP#UI;{p zPi@f~!XOMPOd@E3=T5D)=eeZ78VeniQ3~M&KyA3)?{G3Ll>;o%_TlG;J?>7Y4&dab z%-aZ&4Uib^tyNr&1Ag@5A0iv2aZon9V|48#&*`gaM1?T`l(#~9QaZY ze0+0-PwyY`w3f~`$&Cr@`+<6aTpPw9_?53c!$1A(HQvuF)^N@Vj5!;$dOkFxQ4pA5M66wZj+#=3Mb&H+H7Z z4ubZ0!cr=pmK7-}PL=!ix1L`CX}~22i^tQJf)1fRL_ejrBLv#?(gKe95Ll<3G7wkb zAN*Y){C(ivTYhaWczisuX$=8~am0DexE>kxj&08+Jp)X(cRvvzzd6X5;w&%yZ#tn#evVl@s@ILY`NA@pQ)Z zG-7Sk2$kCK{!E$|Bw#-bw&$@gczHOW7=V@wbw*<%@RYnbg*Jwt*(rL-n^Ze|*FvNr zuQQzH(a~uUgfzedu^ids;V4iw^@RvR8Uf>yZh~4mYVg3e%njKR&xsatMmu5(KE*G$GogLDfEMM>aV@<0eN$4Z` zfIU{TgCjU9t46JYY<6)^KjbznkvSf=5I5Qhu#40%gdkXQh6ZmojQ&zBTU|C(pGcD} ze%TORNHAe}L`Z`*juHDI;*=|7cnPWRyUR=v|A3n8K*P%MH4B{y@x@<6|>>436u=9EM`${j-Nlq{}sTgxWYC$F8)X=@Fo zX8Y_zN0uFp4Z@)tM2CkC#eA#s1{MSlo@<=>?Jb7(&H0$wlI=S9ihpN0O*8d+=h=JT0a z2icl<7xQxY^CvjvN$f>gY#W8@;(EIlqxd8&21MOxqfVhyr~w1s#Mrx2(=E9^#(oY% z5&68ZFn9gerXhfCS{o6;@!rg6%O;+!4sv4?5+e zSo7o4S2RU#+1#4~L|HUzQ?nS>etkm_{P;v9@7Lcy@%!CS!k#S{*p)L~(|qciqL7B9 zig6sx5l_%G!g;aH?yaANexEp1@rl`{wP3@~J+M^z_76kiIh-@p^gyLGs5M8{Uz3dx zBi3rJj3OxAHu|eHI7q-01J;~T+tvgHd*12=5R)RW_bvpS8}IRAVTZi0fs@k&MDN%I3O93AocKF%j+1#=WKoJraBN3Or-kc7lV)cj!#%V-roA{fw zCDK__u;)zG0#bdA1N$^!w(}$e!9I|d2Z%N2T$8yptU@}~XyKp@(i`Tx(-}huC_=m4 z7@2XQwhyGp=V2`sOLePbhRLl)B_GXxcL)i)6fH=QPGL)3EJ-`zY~bQrNcUlydcs^d zIhlq5OD+~1$bI8nDsFcZ_5<*+Wc=*$h}(A~-aMV~{&dD57J6FgNa|CVY7PHx5S+_` z=X<8hA9KO;-Gs+^#r=6ksSQudig_CFv}P=|V$K!&AyL<$iV+lJe|){ii`|Iy@)n*Q zx2uXF7Az&>kRmD+Z%zwtCdIVh0prL;lnd~<@ZnD8gtcVggJ-OF-n{3Z&zaP!7#LI- z00(FJf73znleQhyBC#C@!BD$6&Br8hwf?q9t zW3+t+3euaJs@Y)%bPlm@BH3_p;``{=|Apzc6|14mxmIYKIWAU30mt-2hqm|NZWKcz zEzEOHUOl_Peu~hxLM(VISkvXrSG4!JTEWG}F7(gu-rV-4JF4iLTJoe$+oZh{sYs%u|MK7Y9U^en!~{oEFPbBTjp41s^>yIdni$y-0FJ70qbb$3 zGIH4`bu=|4isVU;;5`YlO)MNOQn$Xe_vArI=`tDrR&O|_UXmOd^}iFBf};*zEbn?q7FBnKDIO09GYNy0+0Rw83fSpYRd z(_H6K%=zuoS;>(-W8ZUqD9{5wtsrwfkT)TAWZI!aUU@U`^&p!|TCJOFQGi103aIOm zzFG)5d>O=_yX|YdQ4W@l{q=wUKemR)3@yA+gij*Og1Wf{xH;@dv}(M`lXv1@Kr}Hr!Q_odVx~}y@3s#Vi*7r;qGKC|6%YZ z%Qq-P@*~&!rOS}-Uw$}yl6X;Os=xu9K-a5WI00_zX#Zs<6Pp4BZ8vi*#$j%~aZV{% zzD4K=cL>3=`>L=c5LQmzblJLihSIN){~Ex(2tBq5Sw^Zz63&th%cwk$EI^-;{Z z*4lfYb8h*{C^CwSOs7f0c48Z28;k`hY>^-VN(nHP5`nN%;>!}2;R7TD0?EP>808N} z@`vR#SoR0FkP>i-oz6ne%*e=$$cT9F-FM%uPPO-5Yt1=F(fMe-kGXDeLoe>V=bp3n zTw{*WNAIn-)_Tr)#E8;697yQnO!1*unGeH6l^Mq2}@rTqT zB7F>u3LxbfA!SIUxVhC8K@|xdOEqDxdSYUijjGYJ;?9TdE*v#_hXu$n(8`h-qCMoU zPdQU0X35O$B=Z@gvsCBmKr;XTE$bhq-0Pse97LAJ|p6o{7m+um{H(`iFPifT!A3=tXv z(sV@32lVZNx-AIU=~wM0x%1Vd`%AJ=D$rhs(~TD|{#`>15V&T^c;X@mVZz-!;j)~E zfSl{Ki>W2-mX#8A^sb8_rpf48ir%(iIStp_K)==b$eFzC-Wkl*O6YNJv{TF}z~ROB zu)O~qa&vZ_(qV0?k;wbyA4Krw&wdv___<%DdsGNSh(jb$;HdIQvN1YXzP4^}>aJWP zxwisG6&N#XETt!%%=8?Fl8z+IkWC1mft>cZCvEulUtWCrzt}f6_b=CdSRyqN(S3u* zKxda*DiG<3X>_^7ihOsSW76w%BA;z@(Qj)HLS zRt;!v*CzV>5$zpw3I+_&;A0;!OO)EdF@JY-F)}@7%0P%br=40vN8Fbitqo_?u*2hb z2=yu7K0(e%-~dO(AqDFs!_Z7}>nB6cA=HwEn>9G(47UKtL8$vswMppQM=1s~8{*wi z?JVXL0ZD)Wt(KbtHDXRQQ)wn*&ndD1lgPo;MoYYV@ge@Rf9ijQzx1#CpYZO-e;vzm z9d1ffO4P5kHVVfez?74@Il-uieCrCZhykB91sd9@(!$lvJE|4w(ZDSzyJAu3P)6HS zvthPEDxjmUdex4f>Ik^9435xcSKjH&B> zaaqFT|rYz2>|+srZG!{a>kHY^wg zQ;fJaU~K}G2_YVhmc2pyiXlx+x zjNUei5nCT<5ECLD34yzhz|tfx1ZhHEINc(xtu;df6SlGq{&}#f|Ko={yneodZdV=C zGz~BDL09P%MUkr*@TdNPzXSi5fB&zrrEC-}%P9<@cF*vg*ROu@JDTZLFWh-bEx92w z8)X-4i@gtpQ=NE;Ze>rX2U;cp(dK;g=5s4{&59V>NEA$VR9MW5{U$l;H};TTfWhVx zbkh|y>m>LrlO36M!Tn8_#gi%jUgMvGeK)5l@GnoGVhDDbvWU$opw~LM{~a(la89-Bv=H7;pn0T^8eG63@Atr=$ zK!_RIR_hvaPm_kn2zWus$UWGxU;?O^ zO~mHh{oZvH@}r3G_v6J3ylCF3CI0T*vaj8oJrO3ZEZ(i)4Pl6ueGl++Z5Mk{?Z5^+ znZMPHSb^!fud&DZJtd|gR2{_}TnBYiq&k}HphBSgiW)VN1*$EeuZf*CIw>*fEnSBe z1imiH?~X7-+hTjCThan`F)SXgCN}?B*3XGpO0IVBW#baEIfkI^M zqX?oao&h5k&q9zv>2u1kPbcTEIZ$J<+KNfoFi+>%2P@9H#eds@G+wx&2@`Z`<2eQU zec^*mlUdPgS9B1aC-tiB?=NOf-0l_H%K%?s6WObYtWykz{BvFsrwDW3A95lHlMx-dA$_D0vOsafd+)^9d$Z(Jp*T*nu^EVxFE;cngow!~ zVi4IKpX?o!S_Z&FS54|fs(rRosiHlP`WZ9_uH!s&jz=}uuW!90cJLa2c|Ia)1L}&k zlyA}U90=P7FLG}laz?emgXxA^?!_m&<{LpJ5u@EDNQhXsGWG;Jdwds@bs1AIuqe=J zFwJ+scE(oerWFIhy6=7X0nV2TXy+fPC+Q6V9Z{4+{w|Brry_iaT)zQZ0t&VdLyqvS?3pkJE;lTMt#~d*S!5{tZNBGJ6GhSaQ z2Y9@$5G(dx9gjFo8Fw?2<<_YAxt}s#9!OJqGy|ZI?~iz1H=Ngk?>?OHgBSPs=!YmroaTRh*^?QzBLGapAzk4~{cV^MvPh8v`Tnri{CZg&O7wb=hz!%mah4*siVP z?Mj{CFaE-J@a5;5@2W+JQ1b425sXY z0WnQT(*YrIUPxIkoCH1WNZUX=MRf!S7oj`lYG^04S@PdU0y^56F(J97pd~m09Z2c@ zG+|Q~VXZ3!&8$G3uC{lDd^U(DoqagW=s46OfhdHn1;pu7#>)m!TN$Y180ck*S^)X1 zBT2CJcBdhKXTvJhg6Nn55!eMG($-BUz!YFy==R-200|MF{@NeISAXL#pcLoSswK?d zPDXi2M?*`aC8c2meKk~Q7o%fbzK(p43*Fz46$`z!-6X-1PcML+O5&(nj$C6F9D9;V za95Bb>3wBg;Km24K$u>l*DJKyRBTH$L-2%mG;<7rI>>i*nam7=laoup$Ir zT)@7Ey-;v%fKNal7Oy&fHz514DFD!3;RD`&e^-!y%&({YJ25E~LzE+cjyOM{t?viAzi5yD9a24hP+Am$8nbyL*vZ$4cPO#yvdW%)baA;F0r$ zVuQV)OmI>(x{?iag_2r%N`eL%_O`IQ*J*cN7#wmm7pp$@ah#55>zTV&6?ap{F-6=* z!P0e10~fQ=pEEld+g5A{t#uf}-j&_EcI|`D18JETH z4IO}*pXXy{w?aX^i+AaZA;QTL{;gZQ$S1UN#V&Fe9L+^;vgAF5z`0Ul9g&>J_<|H8 zKDaw!*;w?gw)f{4#(aiDPM8Dpx(48~(%on^ZN}ZwudpX+UI<5I^eQ#IOKWV zXnG)mkM54B2C^uOcHA32KF#>>kT?hsd@lla)7~#D=E(cGGzzA(bt>i*F{g;75quT_ z{;nT>2VXs%@!~My?fHsQD^4lmv!@GQ9uN5R;T|yr98$vbM)Q|Lia4Z%`zhgcnDO59 zpRIR%_x^-KN~n@?ionb7{5|Mz{yd9*qi`=xu{F}~pRQL36faG<-V|6{$7za~W5B0> z_!n^gou3Vm>UnD@y<;_++(*ZZpFUpjxK;e{h>HO9muFKr)LDLle3xLML+CAtM=a`b8UD zRT)WgS6(12VERqlutGbB4fR%0*DJ6saBiw6-)f1f(;D2josSt2=M(bb4)y#NK(#_j zC`|T@MRtt~-B1h-G*mlyw%P_M@MJaY+*+vW6FaYtX2>XF|~awAMoZ0bnA6oP9yn(TOPApBz;H2 z+_^;g8b|TtDCsfN1nlzbE>xp~)D?L1L*{B7WgF`Y^OYGwv0 z1T^R*0rUMUY|8~nNQKO1y*?;edOKr`okIZTpk_KvZKwveIQk6Z<0}Lb z1sSBiaBv|(+r{>%ck)L!F$4s=|C1R>%rRnf1iW)D#+%Cm)rJzFd%-)vT`+7(-^cwAP}x+3>~Fc!i4wcwaD{wu%wL;Q=s`BOjzNdoSV2mI2f zuWzlWjQ^LEqk622@hldGo zmX)*GmJP>g!w-M%@4(YHe*-}Tudgd|i1_F};^V^+TS!Qm_qTV1n+h&+#C=$?z5esa z2F9*U(L%u5DvndewFii-m;&{6l3r( zy?+Lv_~&Lg z#JSr=9%DvzWX^fDLa`(4Vt_CqIz$;(rM)(`hRji@)QS)@YN>!(-a>Z7w5uX2=w~G+ zoD>#eRoyitT6@keQT4)y9`PY%2uQv1HS_KZ-EA=V=3;jq+x1NRQi>*OX7DAt5|Hym zbO<}mi3zIWWeRvW9`O13Vs|x@3(}f&B$fa;Ay+D#7w$rSA3~N>vLF}@eN?nsZ}t*t z1+HQ7!Cl=7oG9!I^xjQx0i$7Xn3H?kJ3S&qUS7dJOPn9hz`vGa7k~~2B`DB^^uJ(j za|qvMM1Tvf1-ppW({=e)!6-YzDlU$vj!BHB7K6|Rvo%y3uBjc+0Bbq#i5e`C^NZ2B zm`#!oz4jebXTl!ROw?Q3ST}a*{GIQG4bK7-&aHJTdIMVZB45$VWfWWRg0Oo^ z#}CPomz2-@5c|!Xdy{D4X;8KABfl7-H`*fYq?g!qZrHE+zlxX{g@Fw*WhTU(34Qb5 z2<|z5Xh&?>k=(Uq0xP;&|JK@%g4Yw;{ZsekSM2@U`Rm$I%nCY4Fe}k8+*zvbNIU@w zD=-G-agQOl-B{Q&crhk|L9g5_8B{2yNDYOmVl9P9kh+-H_Ga(2dbXm-9dKfx>B6-z z>D)|wy>~B25xV=}Q(WJE#XT&fy=_L(@QwkLTzA(1u%OK#Km(Sz`Lxgn%MmSfL~V`E-XQf^xl}U9XG*YINAX3-4)i zLR#DCh6_QZ);n^ZMzN#o6fH&oo7)K+Jujy)K$~tG@2K0=-n1iiE@CtKa_^=u zl!)VDMlBVZBU%vDUhMptW+E!Z@*(1Uy@K~V1{JHR;(nU34UkNSZqbV@^}S%7GHMrs z>E|+33rZ)|F(^>FzjvV>abVnFQCP5dvvhR$<`{6UZ7&D~cwRQ7lyQ3bA%5xa{QL3O z|J{EB_4>rU6#^tqNQVi_`5OoVQdis`kLcG0r7@;4P8;pD)j_ZHeLm)qSs~Qy?B*EE z8LOf-(lT=hw6C;BL1D7urGf4 zmvPw|?&pka?Ras2z%PCD0Y1BIxJ$HvmJqO(g3sP_V*1^@059*cHpROu*KSG)pB^LD zQt|qHMe$c*Ip`cCt_GwWQo@|$0MJe;;ET(Gcjv`)N<5d#HbKDIBTws!V~Y66n@3#x zJ6ub{JdvXLD?j)ie(Q^G@b>W$9ki(4&@e@6JF-Sxz%Ep69q+Cy7MnzVKP9-wnbG?7nc!9^sXuV*afN_vH|qvBS*5`D6_6<~+4_KQmFI}4B|Ot1bh);E6z z+6w4U#N;rhL~^rAd9!YKY&yChKZ0Zj_9iifcx%#HM$NW z`(*^8El%6&wkKT&)U~7bXo#i`?){!nMgS4$z#%5|5NJ=^6_9R;ngD&D!0e*TG2PkL z#FGimo+zkFL6+}V9i{=E=yt&Nv-UnX?TY<<4n!FyaJ3{Tf=H1kME;qrRix0-)%VAq z*zmI>f;b&SMqz!(v{`Ju*!uUI?g}81?fpB_tcDbuFd3#(Kp5g%2zlDITIuEmpZ&dc zMNE-ModB&dk&J=WWP(6KL}|Luyb&4QjcQ!!-M8Np6-V<;@!*As`>hA>F4C=KB((4n z6P5t&*5iEN9X?ofIB8+m&Ug{i#<{b?*ibz&x}|r1`aqe*_K^keR`>m6i6#KdX6HPH zU_#*YKZ;T9Tz};z8Qy93@5udcR;t6MRqgk)Kre%)%3_N#pyBz=mn;%((CaA5y{NMN z=R-N69X6q6zY{F3D>&C-e7>@XNm0gBXaUAN2Z~u0=xuaS_F_iHyARk4`MqFKGmzj= z%}P`XAqM=n|MWkBU;4E_f`9UV@Lz`rwF0%Qq>rJE*vv$6r^hLDml6S1c$+)j03k5m zZxGIONHLkYg%?Q}z!!)4?fM^T$PD}wL6C&f>Y!kG9Bf^2m=kJK9E{^HBAAXR^xjd| z1tAC$BDEi41$$i)V!*>Ezku_bpV?e3@3I)Ur~FR6D+(A>k`=DhSg_LpWv4~K1kq`I zq@_kI>xO1}O&2pi91p0iq4tLR;}Pd|vl&VhD(@62o4{@fiHJGE*=&#KqH5E9z+#4~ z4>shVjlx3E_g9A^R?5ud!M@MNBIY8a=gfsDy0#8b+qA0cV8)Mj)^uFZAb zkc_%6A}Ge^4?@}u>oF$x{(Hgz(mEEK^Kr7_h;yP&1ge;VKwIH`Vtenp5sY|Rx52JB z7_|%%Fa^$05mh`ct2tr@tX-hXieki?0{;GA{67AJAAf{uxah$Q#j$*kh;FkXtr}=>P@msw@6|qUtBVl!s=6=b0@bm$Ay*IXf;S;k# z^wW}W3TTNs`Jaa!dVn1IQg@vM;0dfBP>dwi6SJ<0G~Ges4Cz)Y;3c7;z0_TnkVv{OZ}&wLqH zuKqo+({KC|PAMTwC%pLXFXQcx|0=XsL%sRk{^9@mzm9+NfB9czfhgQlmev_}H&V+3 z3LMpvQx*_vpSRmwM)6dV&{;5A-Bv9j5)K2KrvvhI!m@0L=xEZ>+GZLGrz+{7AVDxX zYfM_5W@i)(`|q9*Pl>KIB8p-^n^eX9!g2)Lzp-D^YUk|37Y}&*e1;;J=e+GJE0D+c zG(>Jr!R$7650@ARBSIuS)z7Q;1{d2W6S;29M5_DifQ@a3&#(wV z%w2A`haG4zS9E|S0_^9_c>L}#rWPB5?Kuf_DU2zOZZP8+yR-g5-Z`?$--_9QU;Ok# zynZ_4-T7)sV4`V2E$EhT_<9FRc>R5O8{hfxZi9WB(FeO*9Q0zH!j4f26uI`%9dm%p zKrq<{Ix>0PcloJV3kR{Pak>X;pKw`nCoqr!*2ch|=Z9Gyq{ZTv>7K z>u*0F7AxJJ7$yYgY%Cd_D!nm@u&r*0rP-Z^IR2c!Z`VquJW&q>7R`rw#vlEA{xkT! z-~OBU>Wk0KKHOt~QQ$z}q=5+iG|wo@mG4hr#l2;Sl7P0^pJhy#LSRB~3Cfgrj=xjs zyxXR#Hm#Y-Q4rFPC!=mv>o)mWW!HIk?FvHkha3~TBoK5&OgW>6%mi5)wz66bB^rO2 zGDY34>CrxPM749MWpY#qGMwRx4c$~_f=Q-e!eN>)LGi2i6Mp;MGghJ5OhbU$j1iBQ zUD^FP9S&HQE4H%1skG53hB*ojDX}25*qb5~_n-ey{xSU3|LFfam=3}A!aN^%-?m0n z8`LN{1K~R_@A2-!Sw?TqR|EmSL+AO+)}1)p0L5foLDh~qGDc-FTjL;@0*irSi7dof z3N~X$odO9W*)ia5o-ySaAO8A3g&+Tg|1HY;o?RY^NO{8AfTRT(9iQGG(6u86;JR*T zz2W_J{{x0XHyLL{);x833E!gY!!DiC&`CE$F9d^V~nvvnkEPlmr}8I7H0P! zeILu4pJB=qmi3B5PI$jIY}=Ktg$4*H7^_|!X6v>VBoVw{HpYOQ@Nk%*B4|zVxE9+R zjH}dM6|6SEGsS>o3V3y#kz>Ty&lkvdzWKAZuFYvt>5fO^{df!nqJIH?d z^D>b!WJ+AT)OS~u7z0s~Wi?#O+OnOf-e4As++1pMlOQ;qi*zH5N& zcdD(?dd&($Uus8bqoKLQ0=@H=tB&0<1jwHJ#Ax#Z%4Qv$fS>=)$N2GA-@x?@z3=UB z*9>@4;aU_`KqUepkqXg@oOt114Be5N{pZV^1UstiXtwxu2D535<2?Z0i7=6>?b{r~_-`SQ`>p^^ zpZp5e*S|BqN7J1)mxT@#>c_y|cMxDYoKUxgiHGp>Qku}}W+(|i#}o1WTr&|0s@T*& znd{YR8O5k)ND>kx&WOjCK--W*r!$Np4ZX7S<9oriOEBu5>w!E#`J{8VMWh=JpxTCe zly_;pOOT@pQ9I8~Oo>zGb*EE_?;B-xG36O538IyATyE)%wqL(ZiNkmu4{!4wmU&6F8&K)g#6 z#&hPJN%a{bUxB;g`3dr3!QXWh7s}JAuj)>%5--NAV=LCNa=KAh6RzJR&z?a0Ju_Ug zEU7N7VUA%q>v(4{03*R|>hEgr&hHd9UYt(lxpif8%UKsd`|9Y;XL}L4camg)MF0ig zt+nwC!;%=5Nc+>;y4qVss}%u#I3s!CIJ)FFcO}=!-wlC`YDn$?6oh@1k0(720Mr(5)L9B0@Z%YeO#!V`&g`3*;c$D5fU>puL;8nT0_Lv}7%O11kX0x==wBerd&&8N0OQ5t|K zcEGZllM>)-ypIQnBA9}V0enth%rQXB*)4~NLkjrCyBWXxc*eOJt<#ttwQhDc{LBY- zy?RAZy23HBwz$=OjurP>w&ZbiaqS%^UgmV8j1vLgUU8S%jhv#h5$L+anIkrT1B&C6 zxvwpj9i|CsKH<8o?D_-%%3*Rj%9sN(=Y-QV;r-=`rB)nr213G=5|RMdZNt{t9oZW9 zR#)KT`xEBF2@M%vzki3VES&d{GNJ@LZM2x4q+<@eM+G-`iz?n;uC&ZO%xKDD;FJ;` z*A*SWT~5P6s`ZW+hlwKj<%&~^m{LYgfgqMqjHk#PVCx+(jz=7iN4z}Fc)l*iDrop% z%J}j7C%ify@!f|LYHRrDbimy_*&LgQr_%8Hd`0UWfB55<`0AM=^pEaOczKv`X^Nk{ zdqmTYPwx(<{ZRb$@r>F!4k_ZT%{IB#hSN0RZ~xxM_?_49@wBW+ks!-?t905D>Pu3L z*m}qN%YukLrnS#DK(Kf2-Np2jwJVNurYl}$hcg>HrzvnQ4X^I+@%Z$Jt#NM6U{zQS z{r0*dN6zn=as~jbHn_8NQmEG2F{OwXhj~m?XP1q1!fNru^Uc-tW^)vL{BREeJ7l4i z6=LGqf!a$Lh}@3y-Jl!du0UeJHX?+K5VBp$_F1;EI^1Q$_FC^80%xBihN$|4bhktt z_q1IGj^=sR;E6`JM6h>mOxo7^g`(q<4Iu%dHj_Y6w-wvE4k`ktz!I5gWgDL_zYnGK z;GxR35D}w$^&L3}0HdAs*0HVyDKH_|EGE3m)=Lneo~ z)?AfTyfeegyS0YLt>C%z-POl}aqFaItjmhERFvY0Ss3UmQEUvPXI!9);mfbxhh~jj z+`#&VUN=iT*(v$GFTUCndh7|Gb~OqRtauCi{)!nP9UuVyjeqP94qC?E6=MP9gAX0a zi(*!9!3FV#KIb3e2Y=+xBF#U92px?$OQ@lAfy9KAvdwfc6aWK3{Jt6r^ghtyU>83V zzBq*W-eLe6f;Gh#k+_D5is4ieBcjI(n6`nO5dG3|?3(%w}^@xeXGDb=K5 zH7wWXkyw|)B6usy=vMk_H*1e}v+U)X3o7 z?<#l_i?KHt2bf@0RY**uut;XD>%967i~*9o+#}+22My^)dvhbux95u^ZdlSQwDd&V zM)K)svjiE=bhq}jzQ1U!Fv0Z=MAvmZr*|X?c=T^2KLLB;prCyvozLv&pxS53ND>`5 z;GBTnV8EXdLTy*D-^-axdwllueW$h8j2oSR5^8N+uIN21T7$SrxP zwR2#`HD+S5viF8VitPNV4hmPdv+YB0_2zr>YY5g_$LFZBo44EcihXzcvk2zYBt|m| z`~JR(S<~STA*OHrw`7#zZs>hi9dUV{CUVSshtq&nY!KhJVp%zhV5@Bu_ujdK?Alu6+Sr4=)L3p`Hc71 z6<<7G@%Fl)+4G!B9n^S$=cm+)53(S%f>VxT_RQO5#fPT@PIERJV!`8Dur?M~FHcAO z-T&}Ef|n2XNJgz%TF2T2TW8Mk=6oJ8^t_dk?E1jT_ba<)Re^lC!`3_g#@BD@2o?fX z4Omo+4Mc%@fa3Mj89#gXg!8&#iV>ea+}Zum4UwW~oH`=KaY{I)gxVF)g#!kslyI#b zZ?79_Q2&a??n8-%I=){jJStYp84)h_mFfUM>BJ9cVo`Cse@uq*a|s*0`kQIz4u+-=q}Q%V6ZSb30rq~J zrsTr_Lzo=mw*s!+P^z$_&LhbLw5w4J6+X+RVb@r|e}<|RG47&cS_bifoDI625wt** z7vsxU9|8U|1Cvo+k~fI8oh?sPMdH|RrQ2SGqv%X@ia7&$B9Bh)s(&e;_2J*w6G;z- z!PNI6qz&|5_w|;5X`04+9Dlc;$6S5yWC5g=oQ|h4Eqad`d|f-rO>*KGbT`&nMxo=v zX)o+e|2(Eg`@k%*50?;dxO)M0d&_P!FVs2<0;F&Qn21psg^9^XronYd#*;S*4%}R# z_MJNDdt&S}OT4w4zvF#VPHU%-g4m?D5Szn;wfj zKDh&p7q&iNP!fU(e1Kiy*29R*48;k4$OaoP>Zpa71d};Zft|LK{|CGQ#4w!kV790wN8UhAMA`UTN z)r?oa{3kIVJ|IYCBJkEbp4ZhhAv;%JZH`T_4(GKHp?AP~trU(g+lqBvP|8ZWxOQAN zGCY=b1$xDOCIE2RHY}y$aajN;-d`4+*9{MIMlolo(i%Q{|BT1A(9IG&ud@MC2XpBH zxL-=|c)xCVx2(f%xOH|^PdP(X@w9IE?AJuaKQ!+;M@2b9+FlXp+}*oHES z2%fis(z_LJGq&3C)id4Srj+pFFyVg6?DiQ2YONgX_{rT7uZ}aVgl(wkA!mH|;SMk6 zjJx@O^Sa?$Id($L&U{XZLkPXtbtXdC3;hVr%#%p!VGzN)%ZhWYlmQ4ZO&Mv39z~$) z-ta^+#`PmaaY9O*%3BN0#ZU~RA^fF(^Irn2l9pgjI2KGJ=(jH;kOs6~aVgajH;OhH zysfQn`ietNI|tnoNk`EA`9g5?!}J3aC*AfNF1jQ6s)nqM%T-5G=gBsaI)xhN@8%jN zC8RsW3KaGk+U?>!?+El-5TbX4`nZ4<0aK!NE3G3H+txr=usFD#OsEue;_;R~#-%yl zn`48*X}iwp9fd~7z~Lq0@*bkpV#FA+77pW4a}c`aQG0gfl#>_)+IIIVe-2OF0CL?+ za~+TIMg^mw@?>CiNxBZsF|ApJR-^FY%Hz6jH z_I7_v8LhuVxqJg{3xZY@8}bzl-Bty~v|i{K^(F-qlG{#pw<*|Qrj7`tnc{z8!kB}g z)Pmj=*SDVoreT4-n{i}?LSePc*LkiAjDM!*0ylmwS8%qC)0!Nazv&8%L~lpLxjt@n ztO%M$_s!pn#G53%H$c1#W}n+l49IE&p#g7Se_;Y?85e^{g3mGWWZS7bbf$4`JkMw8 zK6sCUQuU4mJU;P)2#B_Xj#>IyfZ3z^$;MbW0R_8PLx>$1sI}vJrvtuP3VH`Akge~8g1J124(BZGd)MyvEc^M5)_LsU`IOExBEcMJ>^*fy zKXGe|%tfdSkX$bjq%;Ensd(_%;m#-&jKZFQ-w(Jy9`Sr#h8Ws`i7?1WZlqvzE&)QE z(CZ37cO9V>+3bOj!wRU3eN^CCK8C#>93uv?zToEmnh$NsYSd?{tPsvYu#s^(q^9W$h+7*Ts2g=q>A z3dQ>${}m)F23uEDR3;w1ql1*$U;EGh_wm>MwSRWp`EJqc`__?hChUBd6eu$;dPmR; zwBG9u;6MhD=-`4+lIQyq`0&~gjk08}V}7c=JINC*Ksur>&+vh{KCE#m#cYUSbgmoP zHg4!|U`!1n(7>O6$_Y!Oo$54AKy7Hs{n-K3!kCp06WY3=b_ZFSS@d(Dp#X>}vttsX zfszd&CS*~XBLwcl6lBn6qwU-1d%m-f%qc=t4OpO-AjgPntys5>$g|zSwY32VoP-7W zd0SCcarwPJi)DF_`xsHQ4%9wbN42$vLr%D#4|smwa0Dln7kd^TGCDiexY3z#Yb$~f zOv*9g@p{GG@dzJ?J0a2C&*J!M^uX2?A0KDDvEjSJJ{~(otu}(wMHSc95o1P-9i=nJ z+FD1{j>om&)4L-swb=|M!De~soD!xO@Oy8c03p5T?l9xBF8K8R4wt6*(Km1Le%bKd zhm5yZI;s8i@k~uu>p0vUFeTEgmvY5c8ChM<+6+8m;hz9=BBQCmr=Atp4d zaDBq8YJ>kK5dqA(!O`@)BzUTUXpih%Z!zSr!aFdoeGp6VdASYlw}@G{F|hW=3j&VZ zDaPbwZ{t!zBJk-MeyNBh549StDg^W%#--}5<6wy9=q!YQ6v#34V8Dm1oNHm1d=Np- z*(#ZYkdLU>_lCyma0v2YMq4xcVY{Gex4Gh0v^^Y8IA52>ScWw{_p;4A=O zv{;>PM=Fd{WdL{R@ghiV@2dhiPo}*f=lQm~#cl?RWB};H;?n|v|8pQU%;zJpfW8r# zOGmT`&Ac>d91&vLDKTk=)D6(h&H#6Xn884=g<{CQ51VVOw@)^hi%|d!r1;*bg0is(2mRVBOGXQFeJrnd$tVU-iwqlPH#MA_sy`InNZ%*9CbxLd#`zPxm63=V7hQ00}0zPQ&%} zqJ_D2?ZrKH;mG3{hAK77%RAQ4g206)_LmJPj$#diU0pD-Ejq~XMb z<9uXt6~eHm4=`O!SDY_bJe*Fr6bC32LCv5AM4*cTMFncw43&UOg~dP?BBd-Nr%85k zx|J<;mL0Q$Hn6Ty7tWN}3WCwR0ut_zGroBD#Nt-z@>Lt@Yb+EhmS-9iEFHMZ5nThW zrQy64%#oeZ5F)OH_v*Q2oMgk*roFGVAw{xFN^7{(%FpSF7pDW>p0BvHjXJiCpv1Bj ze6|&QaGLRGRClM&9#h2gwoykjax->Pa}vz+0ng`0)XtF+L3kz)IpcY&h#?_I7)@F6 zZY{_u;mhYMzI^7;!&A-x1lM{65P%TyxRA2w6hV=Q*UN@up73s^1?|_*S2XY*{^agJ z7pU5C%yiE@<%rUFwg|uiRh5&(L$Kj~z;Q~rYzEkL&c%3rE%@Mcz?;hxT5rP$VQUGi z6~&|guK4NO_Ykl%^7^`sxOSQn=EV2^xNLS0Q2Vl!j-@vI`Ja3VRSv?eH(km@&Ukg0 zu(gi2mbaYd8Ov6%Ii0(8#K3O1wgwFWT^QGE-4L2!nliQql*V}?4|7Is4VOaEeC^%3 zj5J43@a)nG*eMYZq7@+kVIqAgChJV_zt=*~WeG$e>&9fdX)!^fU2v$3QGU1%-EUeSvJIy<6d`~W#nu+9zAvHiiqKyV?$!X9o_57*^x{qWjz!%YkrX++dI5-5LLGy>f~y`Ys0dgd287JW26C zGeq2~=y1{VzFs`TK)3fB^9gs~{YP;A*}se!I-*3bnXkRJvBm@KzgapAx}TkT<(U`b z;IrfBg^w`l5EBHC0%->T#xn-%4wlWdPC94FNxaZB%iNGSt$P0WmvEPlc=xm4L|>n-4j`>HA_QtLV+=rycIg`(7K&bYSJbA+DY8Qh zg$nmqh;+DG&GBg4HZySG4G*zdAFUz<(p9!5n1*$*;>A3ns#=oRa5x-sUKT(C)@Hz& zQKn8)Kr02uoN$*DzB-d$RBgSY02QZ$KY_z*&D>zN;@-P;6x?a_WrwfM{ zCf7#A32&B)puG1i8AA}kRytZ0OgSOVN1Ub!*X05Qr`LD;oa_3CH=O`l5SaEQKsYY$@TG6V|5q@N~fAx&acfY@6kK4F{tbpQePD$Ai5K<_=G#VXG@XI8ONKyC-~f zn(>pjPk6V|iuQEK1PXh_weZ(|@bZWcjuW2N4PQK6@aQ^^Inf0yCp;}HO6%A?zK; zpC>$Sv{Ux{RRO%16TW=pTGy_)ltKY}J|ISRfB(^6|5dE-pYdn@;@`x(PD|v6DOtQ? z$AUv)$#@7s%wg$YxB`HR@z z{}_(8YA>vGx&qRHuIQB=hyX{>*?wcIsUV12);YSGnl|QFp*3UCiQ=WA3## zNa%=&aFG+#FK{sd>)zGzM8#3C+8WT>NTR%>1$$N>ie+PW;Lqe%V8gW>Rq-$;{l9P2!s@& zy`r2yXQE>vT=pk{g0YGF$cr?wZmrpd>#h~?*weaNQEYok3)aWq0JIxT#%zScUYrK; zp>Xybve%`m81ov`Hj=8x<6S^o0Zaj-GpCm1`%H)1y_pX`KwUmZ-B$av>S5K^7Dk8S zO75}dMjsObq$!@SS34s&MO5Dl1`)6C_BA;iPAGL7du!LHbuDBCj~uUaFmDFj^C%K&Hm9Hf}gkOw^_1S>$$2Cb|b<<+T| z<8JBH^_5zoD0b)CYY^&HCKKN}owqB6&7|K&xNv?t-l5fk_4-W89jO0e$35KGoPhL zUENwcLO~1a?M4gfE{Fy*b8+wE)5*`hGg# zvaSe0@cGk)BJ3FP@^r%e=>cz_-T~V2_PXMuyCWUFz{3A=UGRQcahehySK!qlN5@#V$SFQK0ahTuPbB& zmaX6}Cwy>ZvGJQrV0)z4xsv&6TW=Dpc&2n;V|LjyCZ(~{u$33jR$&V44sUkyjfRA zKzsU2p%pym^K=BiOZw$=a@T_lj%u}GOD_9yxP^OE&yXewM!$#IvB1?&~Y(98$LWGKr7HI zLa)$X$gx##w+GJuh#AlZZ56$Bl(x^^-~~n`EttQ32w@C$@+Wg~tFQZ@lfVlp9jVr! z*;zu%7QTt36-3)Z98+HX;`C%1V?7yMws&7JVX)+Btd4EwDVYNp0%D-HD5lBw0}D;D zcklb54OGsw1HGfvW>kuuB1O%I03k!u5h2k4!Cj*qcK5_?2+Sjg@A%vViD4KS{QTV! z3U&pZ&UH)Wi-BC~mk)OX1>>bH2!Z7P`nC|YDp^&rEGv1c>cV<4p`u`^%&J_}O9Z+jf=N&Jq`^>Aeh8r#U{--NK5{vROZ!$>;a-m?}a zR#i!(@tEbt5N=))0BBwC|N1BX5BNv^^#2XZ`5pCM25tnftJ_*dZ-oh`0{L*VIIW0r zLP|u?1B|{a3b(uU7@j;vSwPJ(4sa&gJN2{F0Sc*Yts%RkRqJL;9t2SohoJcA7yc7= zPPu|kk7?cAt#(H?YEEe1SuM8o*064cgV?+~W%puh)r8bKo)HoV7OXaaC8WprttmeurK9Aw(H`5g$@Q41#N^c)M=c$~MeQs8xt)T_6%M$B5R5xF1qvC*nZ&z8nb#xz%vBIUujE zE4mlC+Oe6NqtoKLDwfjlv=*$T;&E9~8aqYrE*G4a1+_I?OTl?7D7E2nS@GLH{Tbd} z7XU1pKR6_OdVj!Srao?K9ZO}$@mxB7_nS9(eZCUh6`qHut>X1%q5D;>NKsI1Zq2n+ zEVZFYKtaU2Wy5KjaLNe>n+x{xFyZGu{S=ZC4pYYCvf+8%@N=&o0D!`CD~@BN?eG1} zeCXjYW9y19&kH_(I^&Q@yLvcGD81upqi*fxlsG9}NpH#_;C{{q`f#0ES6nSNoJ+yy zPgnfj`!lW$HufNxd#4OK^Jc>gzj(SBD4>JB`qk3~TkALs?FT`L6eC_94^S~%`6d~L>uk<4FuS@&#=o(?7KUv5-n+kB@w}bU2QDrg8hsRiW?cY8YncDLDPUC zT#iVIty8bugfRxJn|FU?7ik7}-tm<;RNBx)36WN+Ay~oZ)7(u27ZdX72Pl^>(aRZq zIEwkj#n0LW$a}KynxGV-wc_Ynf{f=#{Fa3B}?48IWM{6i&GK&dw8MXiMbzEKMP&0>t@N9 z7k6_?*!-O-hFjCHHySW1PBzF|hBe8V~CO?q4@SH4LOrl+7tL0)Lm)9D zPY1;5!1+0KK`9%#?0zn-JEQij-c3z8kAfB2nfPFT-xwgpp6JJX;QnhHyuU&OI{3fs zYbj!hfx9~OaXuY<5Ez}$lI2~e0`%rd6xVLfiRV`+z+|T_*yYaTK?G*-ASlgI|A<=K zIE#*OyK4{6i=bL(z#RiS+?|T8_1oBRThx1;34(n#lI(j)br<#m)J#BOb8LK%?lFt& z*9WKGO(M7-#oi-b?ht3A_f~`eq`QyMuI~()h+9>idr@k@H0~77{*XS?+|tfov|cA z+XkpR3Q_~vwWHUEGRq4Z6AE-&+of8fP>Ux41=fbMR=(+ zD)Dx2XR7pE?X^#%Azwx(q~}LQY@iOA=0wBdnd%|O6zX=;IbSerXa}49Pm`o zHQ;+c_#uA!)6aI`Nq35qca&2ksO1!@-l+x5w&y)=Wuuh%`E@5rRR{Il+B%H0ocwOK zLacNh5m}VFjb`ULz$|4`hIYlJn!;29PV)h^R(5)nv>JW~jQb@@u=VEiZk2=FYU@aO z!ZAfmDIy7DUNJpW2!a5?X`T=z;Ji^I;00uk5r>>`EjxX@Ly;w5ZJhtnT15nlk=hl9 zX&N(Ly2Z@nlyRwT0FWt=F1?h7<2*wV5x|*c!z$d*c@WbDkYd)9G#&^r=LsJjCp@nl zat8n}jx$~z4_FF;Oix~ma-Kw0#VKdJIL=tt1+UK+8&nu@f4axITo424Q6JvjG2SW% zO`Z#B_)Bd##K^eYfhH5SZ2`35dk;t4&l9E;Q6)iJL2Vs(lXW8w6H4niuN&^>2~)~= zR}wBQ;iT{Jox5FF4*+WizIeKzwvLOSi_MbeQ@aiz(XHOSEJJyYZ1zT%) zzpQ{NW?CKM*P^EuYm@$!(-m3GZtj6y1O#(KYS3VZ3KtsZm4 zQmZ+nvGCqHjiT1tuvNwt#*c)n(d?4V1#-3%0GsziaO;o~DO{HKY_%fYCMG^Os?~+X z1}q`#4G3ZvWf*fbRGA1u`NMMv*lIyQHwO(z^!66~_YN69yLRLDTZ(^!0y<)|7dZQ+ zrAmqgJ?g_KLL?<<-AqHEBe@zji%@ixku>{;I2~rRT8EI^@+P=9JnvrPfOZOvLr54R z)|747i;N&9fK7?kaY3jGdH_U;Iz7xYvbD9Zo>VH^3WpYMKGF$(`?BU^RD$*4EM00Cv%|Cpa#+h8P{hHt@HQS^rA{jvxF0aej$j z8^ZN9wsNt=YDcgsD&%)afnq;(q^>t8Y#xDxL4z2y7ugY9UStEaH7<7;_OM+^W7_O+ zhLqrTzqiBK7?k^Cz$vFX<=w=w& ziIDcXq2F{p1%fyop<02EQMb+LjlSkPnhWb7Ss)n+xPs{8@o+#Z8d3U9!`*9W*I?(f zE6?4Hj?SND%y_b&tstZs+87(^^*xJ|KtXi1&N$%O8`7|9>F64f zLqZIUX9AwDIe@@~4fFHPTUS3FVju#>y&$?%UwdoRNR<^GHf_6idkj0_--(6dLwYxn>+%Kdczapwp0Qze zUGdqwCqxna+^Yv%Q1HAKyf_|=VFFxh#hddPwbIe;yeZ`ZWx)rh1Gd(U>eV=8F|)`$ z8*n%W!JGol8+FVd-35Hz1MUs{`QF0`9fEgj#b)<$juFQ>kNdhyz^l^{YX>q82r^+? z-{U9m&zMrME&}PJTd6k1yx`UGh@4m$G%&CE=J|@tR&c4DV{PAmEeCT;>!7e-B_IYM3p=6H0J;RnDc~?={K6mod+^u);=hN4 zj$e3nkFU>Hyt#C2P*R5_AR7Q21qPDwe{RX`P2aEV+do~ zwGV?LlDKO-dQ3Rx2`Oh>&S$z0q-d9_+N>~ICC3nLP>tRcR1AdW`UMLryBLYoar3!| zRBkLq4T+0Pbq z^m0X6FKFvCdMo3S-`CXwyaW@zE583Le;7ag?Z1ZJWc&^n?~4_qEbJztl-f-2Y@ zE6-&0j*Nu8V{9Nq6tQ#TG~fsafO6dR2&$xGcv7xVBvCZM0AOyP0U{Lqx1DqD1^sQY zC=!N^XSEoSqL8L!XCg)FXMDJ#rvkTtQm}WjnJX31`i|PH*jBJ>7jBFTu>a5ZYg3PZ zEQBW;4mS#Tdj+h9KC~8En`*0=+{^&9YtJT1u2$IOn61kk zZo2H&ZRw=Fgb=W;Om0gDx&}PV2P}FJ95 zxI+2`*TA?!8XLv)OKn&xXMs#H;)A;*a*TL=;SkP~6)W$TGCG%UuPc&Zba0u2J>rH`o%ZAHu z{>LmTMDW$6;CZdM7Uupb2q(|SNE%`YIOT-14W?w*Q*fLz=~vxm;DL1VYBtey!3q%) z0`eTq`Cu9Vbhh(eIrjp2v`kaf&xxe=nBL<(M<}Tsinn3-?n+|FQ#u^KpcP@6>R#iw6 zy!_x5p5DLZ0II%I!~A_AD5?gOWwkZxHtfjAB%bWl!bD8H?bhvL=LJeC!QMq`{f2hu zymRmOLjh4HqZUx6zNANO4I0@kYlbo9DF;so>+(j95$Gr1@~XIk5BrCO3u72!crdlcNlo0c5$Lt^j57kp_Ze)i@aAPFfr4Ub5&pErO1+Srv+E1Jf%Y;i4^ohx|pEaP4HYZ>!$1eha{*Vfw( z42XAaWsiYihc=|i;)Km`aVfg0QHboESy2|yQML<{JEM{gp|H5VjPIeh8>(BlDMBqK z+|-?Wf@$9=#YnoY{aW=+X=_c_9U{6(SZH0hjRQTc$W`oM*jh*v&Ot_TOb4{OYfS*O zZu3cC9Y+DSvYV%HUqwXFhkxBnp93bVX_{FO2F40N5b_N4Hh$JLC9GQ;R6D0=0@gjX zc$N(Dbs(%X{(1}%61)S%q<5_av?%89$tgjm6ONz!K|KHDFF>^qT@8`$Xdu|G384_) z2gmIIOfq_N6ajk2X8SE#5vgKGbZh%r0U+@R=sKvnrS4`7wKB$f$D06vS>hWA<8)33 z_`C>`0W82A!uC>y1`QaE^$=v}BBMB+s>6qXdV%%4t{jSK=Sl+OYlj-B0BXeG0O&2{ z)Ccgw66P>EpH59=u|oP%F!96nZp+wbF(?8QQT({ zL<1ljSQO?EAZS=2Shf{8=omb=*i`XXrz5W0N}61ZIOdFw6-#B#^P$<>UP~SH)undK zIpdh~n04aAFJI<}udfBG(b5t+E}afhIa2@i;pu?Kg|wt&N)U^qO$DuSqWr@=;fu#J zp0|RJPDd(`{LJD{sp z=v$H>^MoM4iz(roQt+dv(#q#FKS(`Hb`@+lmrANNbkClVzPq{AmwRBoMx1bK?NGo zn+M<>Ipc-7qvDQsIG-DSb^2u*6S=U49)@&)(>sDF>b6*tn@8E*=%`SgfFc);xy5ZgH zuZ;KV!cX(S??i)pQG{S#^Y*;A!xxE8@7fTkV(Wa%z7Il(O!oYO!u}V9Q;EUe3y@AL z+7Nc9BA9Sg?b7JgrlxjsO1&Ev#wYLGaZelor=|$iP_c z$qGkv0SXZjbYhFbLmE5PI8BHXec@@34EWS$|JJp0Xe2KnF|K+T#W@Ex+9?qk z-954O+~rd0~)t)XlSgP9c%j*hFkW+8$)O=hT38i0M6 zqR$k8I&BG~gRq}zUhBo|9364p^+KjWvThH%r{?I>Dtcppopaiqh4?4nY_Z-OLOPm* zo2@%+ANLBi7|@Lt=fn1P?aWBWtv$_^s+RPOq*rnCXDn>SOu!tO)V{YKfTZ zI#dVn!~y~PouY0t&5rw8XeuEGgNjY(%dkP3C0ot(@cB88qWa!P$5~t7^l`>m$ z-$9C3&S}iC*i8w*F0H^mi$d~kxx#1Au*hTVB9)5g;<7BmHdeG-M-}7#$S}irr|R{F zZnLA@=O=~<+S$$SZcl58KI~jzd&c#%zleP}CW|%))*QW$BDVfvXgYYFI+Fq00nPR` z_bSZ|y2@~_Vpo%2jQ7SB**fpl5+bK9dEyOV#3LsoCgJq;X0}{To$`tEIfS7rXrq$^ zGbfmVwvKqU&zfP8IwuwY2yM9Ld65vTvQY)yKBF2+Yg@3c2of!Zuwaa$c=7On%lUcy z`r7L#x^Lb8k_gs9Cn^>lJ}btHbkgJ2ZR5EPf-bZtuDv70G&%`xA@48*XRnYBSy zhkziFbSR@2&313*UXfzJ`>hboZjCnY1}yM>^1ag%z8M{jHpRy&<7ru##{?FkKA>|7 z0WT(slb<^<+js1(Tldl?qL+#nbH+4HxNHTb)aL6;(S|HN#qsPk#AROx0v08_HN(t9(!@5}~OXC=Lec1}41mws9^WAkt z1G^+oTfsa9+=)${SNp80KoHbz!yyM?KJe~PnpDIXQ5AUoykL%`nXk`3##TA;U9=lO z0DN%Ze6C;r{wMf%e)n^9(5hF2`n%d2uB9Rc!LNM!A%5reI|u}8Yj|-w; z4KX&20(@1D>OA40Eh0pa|UqJkWGUb)BJ2uZEGyIL2e zk@jze=bSTI@1(2&nDWF}V*|>4uxsr=$`RgiKjmZvBxzs03l398$T=I5W9W#@$Z%wH z*v^g$-mg~UNBd=F8L79D%X%Hb8DKILM36%?ak*1@V1M2q4iRXh2ng6k&B461p|rQi~>M|)guvI$U} zHoVrYWu6TE68qMw3YkAaX&tLMGgNQK725wrYo(57cO>23!B*HII3{bKn|M~cwX$j* zh>klMt;WOiC)Um&)2{EjQQKJ>P%ZO9hdiO#r!08qecp+h+vk{UG6t$-b zh-tL52Q5y?=MI)+1MYQyQ^*>!7GowAV&n--oN6t2{POouwiT^bc9B{Iw6ch5^&4T+@KyIT&o8lRl6_QriK1VrPG^78{iy0^j= zSk6yyid!{=bLg$?EV^^e?c9i)JPj;~`d&~0+*_4O4+Zr&RkuTYK;1HS7MI@dmAoF(ReRGjH7u79-woY zb#`yPGbgZajybMXe{WDLGz3$M<_N?%)Z9MX)~q#mU@T^~Ld{fAPF^NBbW0vKx`YsH zk(dxt24co+oys7jSZ)U1HM6vbxc$1#1xfR`WA`pugZ47+2>(JQ2>|P6(}H!LCb(7p$y{&*w5BEXXV)Kjmtn|) z?0*vCt`Sk7H+NgAqeIb+^QQ_+#taN?MA45E9gciP5R?=a@6e{0#zba8H+@!sN`^|r zyZ4VHr%^jI%|OQ5ch|uX0!}mEld~MM6<uN$;0K0ajp;)gHsDn~2SI&w^a1UwwL z2ddpgO*@{p71ynf;x32X(l{BYxLOhQ-G@5_5!^+_hB*q(+lKpj!rhc{f1HsgQlQ)s z?d|!3&)>Ym)c~>=hj~!=uC?Kt^Me22M?a(H#y}439g7WT+*-p=-aX>q`kTLp2s(u2 zAb9h9#&z3<8(wKNi&#p<`(?#iE53NV;E*%sDKohj!SB3z#PeEEp1#4HGd{jQA;*lW z!cIB3AGX#xW2F=Q!TH0tJb1MP*7@4E?PUe|B`2%y-gJCBJ<0i$-i2V zV!;MO{ppHk^9~v>`(8KB@vxvCY(@(tSdCMWqvFFkuu$lH=j*!g``N|jqNbdJT^5!c z*}mzuqAVAC$GdeTzpoY3hC(6T;rPKH!0Efcj_Kk1K-?3b!>9jg?k!9F-aS6zx-Pqo zac?NwW)~2K-LWHJ-i7m)H|$~xkwrrg?#E&dD4yjhc0EZ9k`v*b1;+EIE! z&*awL0o;c72GI;*oRwbQ^Cz0V%M)T5@7cRuH}ou#v4{A3c89LLgJXrMoo^4QZYV%{ zL$o3_Ax*FF@SQ)7yHEZ>r2G;gB&^lB#yipqd&he7Y%lLl-!i9QVa>$kb^y^pF27B? z?s3BRMK^N{=fP8S&Hn|nYxQ?Jl1jnY<6c1WjA1+<)TnUQ2^sNH_l})J@`rt$_jk*L zg&cgn(FzPfg?*onzx35R4o$Up@3cEM6m~J~=+d-`-YX~OTJh109@CBHnk?Y}NE`Cv z+2jA-sp;s|XJyEU30-#wH$ag^hPlRVQR`IKTNkj&D38@#<2d(3ED^bJUb2LrTzPiH z{_=nL?_gW5(X+n%Maymh*vo`dFzIP1WES=h{wXjh0y%d8W3yelgFk!?&i`Cu0 z6xT3`QIeOfq9dRxJ^x*^$cgHfVE1kS1i$lEnRITAnwgUUKVqEvDW`AIe4QHg%YWa0 znFX{JSJ!Q`a7QrB#n(ZAY(@clfzFth@(K4J|9ybWcAjM{lqbwC_V)yq1sVvfTIbn| zJlJ~ADPoH5F1drar8T3yF!A&On_dLW*{Gm^=`@|%4P3Vk686H=_2W(A~8 zS63F*(mRr{SSmM^&N-3tN5`yyImaSilr1RHk`7W1wRG$9b2tf z%ZeYrd%|b$&v>_x*40$;y;lzu<=2Miwb2ylb@MAqA9D@N`|Q3)656f~{0Ui!5J1U-A09V6Ai>697w7oNGq~@U%5NZ52xc z?hgmMzj)U_%$Yh40CU=NIG>@GVx$3+?a@o2b|uG%Pf)Lq&j`NIOiM4WAlFStp{WMguZTf?q;p1TnEolpNGHE=)gw zPP6eAylO=SFOgSs!rN+vjwNe+p@wjVgn(e34lgLmKxeA+!NC^0|9mE~ysw%G=q=e4 zBHF{MGz4qEJJc8urvv_{f9C&z|NVdWKZmXbim=mTpzwiVet8h(@GxcpPr>AJtc(l< zcN80z4EX!&#wFbakQ6S~+AOKC&xv%!3Wij_`tTFHef`x4eiXf0ChMrEQ3O&-yj0;u zBLn)fHmqtLHXWEQ>;}U_PS^lL4f(CI=HYYBgiEozzZj3&3a_Cbuw)>n8OQJcr||NZ z{tNi>Kl*1;FW*2VqO}d@FaBKwxX{uHndbJ6?r`yTOcFu23YX|4E}y+Cs#bd!ZhaQbwHbSzvA# zl(LY%RO{gNcPLINB1^z?>8L7Db->2XNAGY)MvFuvwxvK(>M@Z9lww3x!7*nPCFQwq z)$~#O9jS2ZT`&6kC@TH)YgZKQ<7Wk*Ic1&l!6Yz7f|&M)!3 z8Bu0GH>A)lrPwf7r=JEhwdkXW0Th4h-~CrjP`t+?k00WDSbVB;9c!3ls#a8OsCB0k zC1DrJbC^7pseXfLZUlM9O6D<-3h?_I3gJ5KWqVDHN9my^viX}#m_Fw;ChNegnP&ZY{j#$4$Qn!=!?PATEMZa8Fux9mD8Ql9YH zyEBf7`GC(Ud3Rm7KTE!!*+5H~>`!SJrfhy~+*X9vYZWM&cQ9{S`p>o^FW2(lf_#A1kXdxMJL?Yiwr zwFT>)Prau_yF46wQenv%FVlU{pX*2fHU1|r7w1%W-Tk_o8t2}hSv%HsWh=jVOPtAO zP61n4@#eZ9q>MaGvvE;jiUl`v?Mbt7>J6=fZ=6TE(?}R}1r6JB!T;{R_Ybi% z(K7J)G1c;|=%m(O%r|U8>vjrvP4k_#qzOy@je>%)$MbY=9fy z z0XpfPF_0520tDMv9T+y-aL~ny^mO{rqt7GZ=ed&*#0HR!f`a-6 z7wB^@dcx@iyWG4FJ2&>SiaFd+Ag`o5SdrghBGO9tmS*SBX@4y0O?%{@4bwnb!7q>j ztVD6+37-`JR9F(#+YsM-cX~O$ABj06jA`KzU;D5ddl*;)(+N6mPluqw{UH1MRa>VV zBC4E_vZB{V`0&_l@3&NfY@{c;UF$y4UBtCCu&zxU9Zshdi)l=f6_3&??-V<`ey&|~ zoWy3s2>I{D->njCne&9Z(;XhK8!P*cahFiDq9(xJ#n~!myT-+fB;W30K+I@Vfcx_hh3-a< zyo-$Y`uqRCfE~fDY#bmOB9h8JFUH=1h$2YDN2eJ-d;iQXApitYJBMkC8joH1`(unY zNUz~~eMXK6$JDWvH$dR;FAjeA`BIqZ7-*gAku&L;*KNh@ZlImtfG&xOfAKedM6(pM zVI>cBH%BitfO(!#S{;S>lromC5aDyLrE+H2wjoNurw<8R=b2bH0-mdN002RZMwJd3 z*Y%1P1$R@%r^gwuFAKI>(6+azwc&Z`v`COmSpF92W)yUQr7IISUKN;6_8xR!#0&0l%m3UUm%%fW`wR@_Z_XOMvN zIqnY=p06vGZNqWO1b#*E!TkwO>t@|Ep!SNVD?!C;?a;)I>ml$ChdTX8@bMNC-IS1Hz}rhlO|$^^82*0Q010?8XRJ-}w5@nCPx$oy zgwG$(h(f(Y?Hwz)_s*r5jv}JA9Wd~2xX0zKDY^jn$0KSw;r;V_yuGZ@-Z2U7kW*kc zZZ(qx?^;$>NGFK8^^R_aHd|YeY(R81AXPQk0L_4$Z3KiWrj+n(;12DN1Ev_vYy{9i zx4RT0rf53uagox)X%AS2zBnS})|O%wK7H&>40e;C=s4^ONJ5Z4+N;jX78_=y>VpHj z*#)|W#D&I`r`$@OMVy0gsHq4vM52r^q|!de2WPn_dRIY=6L#ds617hBu9Pwkgtr81 z-Qb;?loDQi^eIlq6W)IL8OpXotQ#SL{9R5x1%CMb@8hRG{plW78FkYS_~5N@7=qN$)<-;l`B!lH=5IjTMhn+gF-;Tdwhqyv?=;sMM5ncZ zs9bA0=DhG{GO=(EZV%4E3en9(W~zK&!PfZ}ITqie&claY+5n;jO@FrmhD>`Ai4N5^ z-j(yCgT1T0AoFC<3kNSHFp{e76ajIEvz-z^-HA_tcLBPY26)kfj(4xW8Se$cZ?u(y zn7CGa&Xt8|FHj{-CWt>1zm-^$<=qhC9WrW@ktF%vg$}FU+A+@kpi)Q#kPxR2asS0y^gre%6==rWtqv&|IFc_~}L68KUw1&>ti)IP2C)*J6fMZOM zF}sCl@&0(kyYq#jNqFp(y~`t}W4kFP#psL@$kpb%@Oa?VBufsv#{k&5YXj9CS>veE zcFw>0->s;#@UuU;#~>>hZezcT%f+9;_6OgqQO4qgAd&A)y}06MM9@xm&S?}SrL*|p zC5SN-!iymYP#f^_m;XoT>z9~c|5}K_)}`WFrkI9?74~Z|4VXw|@oW^Mr^^LD|LOs!9PxYao;fc?WYCmSB>L`jGC=8= zX6L4<;)A<8TuQ~dZG+-BfHyk>JTesTkFy5Bd0TBR)gFHV#c7^~Guv94X+H!(J1*;n zt|EK?P{8#@c|1_>;yK(CZSj$SMqYy^Nwe*gM!;D^v z6&>BEgMw3@@b0>xb`Gy|n))F{T%O+#$2{)ojww#9jByt=ybHYKOXRGLjh}7YA0HUg0|eJoUm0| z0>>aY=7iF0uwxfIw}ki00%*r^q88~|DL!9o#a0`p6c9r~Q(E@s81T{E5$~@Ha-%)& z>+^ysXQO~d95QgJ4L^JTgqVvJkO5n(IG|(B2lUp)JLerhz1AE9R+}Z)y5AXeObcc+ z?rZ>$hoo-D)Rdaz0OrEPn1fzF<>^1Aqd1cr2JpBKdVE)Efy%X_ZzM{`K+9}7!hOgsE80E z?jIiDTy95rosW$`0gb}D2_5tCgqXp+#nZxNBf0rl@X#Ic^Z zj4VM$OFG!vIRfGO4%pg=>{PnV;|={+dLO=B63}@U0!25 ze}!^=i&i-l3XXbPAoBvv6YpUS%Uhg#+E9{Uhq#Pq<;Ck-3Fk-ydq+e4QoqUjI4rNA z+hrQ;Ql_Uphd7Do&@`#>cmse|H}qaPvqu1Dp{r?rEc>vNdRt7{Tn>xk-4d+f^)9>q zqpJd8!r_CzjR?JcnzzxtYxS+487yqsQG1Bn|=!hniPY5wXLxh-?O>uMf z{C%qKzds*9%U-_MVuJqG_YN7KqobAu&}zw_#acU~Z=cUzXIKHq_O#gZJKtCgc-gA} zfx|OaYEk4nc#p2V-M-Z!Q>SI=ZX#lPN$vBf`?2njp`@euv1bonWQe&T zsUz;TmYs1T9bQ0UHsQ67_trXf8e1U^w|9yzhea@SB+_Fq0BC)~+u!*=;{9j;BHo*h z3(%2_dmKx40GV7H^~|5ae!l;HkJV8y3Om96XG+~2$J{O$AU&+r4cFy@b-ADv4qNTU z_a|+Hac9|9b_iRg#kX<>O|8Y|Yhj$HW+nsB3IVl}ODW%?Ibh5O@in52xD2*Drt-lt?>NMKQ)DU@ zn^E=kWyO!)UjPvta%A2CQX(DrNLGA)a(9GJagUbQJeLMccc|b*a|&|Fs+beIGBNCP zMh>=CDFjrTSw!HVk77_eJ9uIsO_PKkC;as3it}2iDMhmLA23A|2(K%abweoy#aJ#9 z0{-Nm{%3Huc@l?A8q}OKPVq`C0cCm@?9d|k5vaWbq7V3an!;3lLVaiAXTnoXXDMp+B!S}q>h7cpp zrDCZpf?8MHPZLUQSk@Ki(r_+~g-=u5ee@lg`k2O%j2C9NqSQ%+5O66WX)O(Ftypaa$m6m?KKg?~=30d#N!lTC0WUCW%YY*F%jz*Y(* zOo+l@9YVCuhy_lRs(v)o^+wL0V zD3gE~WUa;W1g=#Sn9c8=MA6F8bq&1WJdp)398!V^5Yl)j<1*i`Nke86Yml1@n%>%b zVlCM94&x$93Hfl3zAY4J+Q7*qTr)>$T!(V2u@E>hhR?y^rG9sV++`nR76xKC3Qt3b zx+OdQY3atpG#k*b%Z*JR?DJCLmz&JpC5R3Zw9>I!70MIZYDBU{T$-$Gd zV3)$sWQ1K4mF%({)}djaEUIb@s_|aKKtUBoZ&Dp=7z2lJvdd%77>Bc2L#N=6RzjSu|~}(LhJw}Lq>VlbN~!IFST_7aYAcyk8N~v!nfih3kwXL zqdOc%ks(SCf%8K)6ZSj(1+W=6YLn2TQR)P!1`zQ+=>u?b)ZYcsz8^desZx8Ar->jF zFbd8XqY3+O01Q8yeW&BL5ufSC7fghmVn9rJFIpi(OW4eCLM%SKxWB{W`HW(e5q3@z zdODGf&^Az|M!jHkYEz*ldFwngglzISHVj!$w zN*Qa}Fa_%MO5Y7~vf0+6Tz<)bBcR(vv=N1>#-K2g|QiDhA=vGv#l%c z=LuCJUVZ=9aDDd$Sp{XGLu3rdB3LZ?-tI9c`gc(2jq0bwta!#H+*t; zz^{Jz0v*7Q-#tPC*PwL*P05PBZ=TM0m@}5f`{2c#45$EJ9VUF|;SQfYF4$VZr}rm( zeZE3OsFUlQ>i+5d317T_gwJOY5#&5!zI%!I;uC!J`R^gM6$y${j(9jsSZhU9!KV-R zPzj@Gxy_v$1n0{c5A%$3;f$B;?tlh{@SOnAy9pre_*v_n1&IHGJ5d3q*KlMEw+W;LQ%S?E zN*&6zpW=wYB18K&-h?B1bBtJR)&}eXxYbAGlt~Ng9o+qS*+I?VyKw~V zS3i1zzxmBOzAG?zg*XvK4}w~X2?V*WDJ3gN`1B(gFe^5<`IMWWoO8D4+Y24FJ?6zL z#ju{hIJ!?uMMqxgB;`d*8%a~wQQeHs7fX_K6mYvmtZQ_9xS{loKz)tqm!T z4DY{JEn#$ng%ra;cSx1UoVVO6AiHom7|KO`(6=pYJzt+Pkll63laN+zFrDWEGFI)T01uW}IvvalR|}fj(2Z9==A+17REPxH-r6%a zOQ3`BavrlG>^pQzv~LBY3Z5-mq;ihIx|MOM{{B4&fkh}!)^7gUiXtEaAt9zCQhu;M zbVZOtdgXMWQ&71=%GGRFE2cc5HnV4ijD2LU0@~}%`|0DkRDIx~4rE9{`q7j!9eBJr zv@_`Yt9MSDB|R^Ueb&K5GN**nyV>IIw4ES6z|ZYncRJ%J)^snvy(5-lutM1Pq0`%# zh-9JA(d}Am?|WB6Eg?`CU=v5;DHhvC0XU@AFG1%((0BG%2Vtzim+2HgA^4Aj`c2njMMu(Z7P!KGrB z)q)9%+R8Au*lb{)gNQk2>ifD4*tCM`m?KuB%5aF9+wG_E=M^2v=1HmUcW{ zHEXR6Q_g5MzoywhO#w_-A;`)2^G2hdVjLE_t0j2Z?h$sA6cBbl1d1uep+ymGINHEG zr@CsO3zTa_N!j9ie0e(3O{{a?Lka<_gFhCNx+;qx%hzhxF-z^?a70r&T;4}B7qN5O zdpA0J#vF*EuazAKDF6E@({=MSanRo>M{KQQ=?wl6-9R4F4&N>-a!hzQ956*rm7k*E z*T4TMraxEn=j*qU`_?%_CZ~)P*fH`%yen{;XY`OT&ojPy|Aa{tQzT#m;TgC; zO!)rG2b9iS?P+Ca>AaCjRa(W>tb2p~nWZ$GX5NuifkO&NBKYB}2mGbaz8W#xf5tH< z)1gGHm{1U)+w<5f(c^odPMF%|WCJbf7FH@dA+Aqz*Su~(@s!vttex>@P6X*+KVNvR zgP?XXjTJRyhe(irEgNzWoMu}8UTYigxi*4fz0>LKkRnzCh}hYbT@2|Qf3?j=M1&jY zfA$ako%rAUfBrH9Rv?NSV?ZsQsDeYVlwZ2HM#|Mq0_4flPHzFkG$1haV@~XRsEZJl zE!r3aqOJ|G0cUo(i={~nj%;yi59UEhhqHFZeX6;TAQoGxQrPXHV^Q+JI(kt3edW;+O6TqCp888 zVO_2rxmhiP6GhLKLuDVXKy6M5}f1 z=tt_^J6hQWVkinABv32iLkaB}uqW;I$AM8KDN^Db0`2S`YV=~w5E=!s-L|`3!ZMmipP$C2GJFDtM)TGy6Bi3o??PDKD$$DYEyKzL4yz?{DJOx z=oNqgEyC};trwOAVw3|n2(V(9$hXa_u#1LWwKj??!R}7Q#AGifn|7!r-OXZi_Mv8h ziF}HYvBW?ON3PZO3p|;&cjxrQ5l7X*0>0NWx+on@2N{)AwBAkhPc0z2&v)`7(CD`! zqsXw_8RO~!MrpzC%g^}9>-X4oOMJf)Gx9V8g$^!${@l4w{R}8=a)&)T9Mk|r5}LO0 zU4Q=P|KRAj&r`-$oEp>y;No2ky-DUB2vKzkCs&aMAwDySzqVX2JR`x9Rlpo4`Mp}F zVaK*@{5hs$a~`$AhW6P#>4prjBHxWWd>#2+CVPhdt#jMEchz@Jw*x9oIm8x0EwSyR z_#!CB&Q9wU-o0zKj~UabN72Lqdy@dqr45FI$4=77+Suy?cDc947r2xLsodu=X4^N6 z-A5Z3+5uIEz|WM&Oy&v%x@6G&f&lMrPK;Y6udN_-V2+9x zhm0>4S|sjx`qP-(wrx8MYJC)QuJwK2-uslddUbK%i|yEc#*qlN6Wd5Q zGQ>Y1v5;a*#6gNEf(R%QLLdbBLqsqk2$WzYND#%KM2Yx85_A#hI3;R; zo1J$#23(F}wCc{-+SO)gbbNe!!l#eV*eVOqttl>L$FZ__DxHP$)(B9XB4gm~O1oJV z&K*esxXBUcDER4Dw@@g4@U-F4W{hlw6e$iN%ve&w=TDarKYSkx0BsemaMsu3QIN#C zQ^LMlYjmx0E!4>hzo+9swP8G`6tTvDx7z_#7RAL}@Ho!{ILC-Lm)&N>Rby2!zTafh zgjK1vX@wnoRdzu3(s0Vj+_3_3ig>YbVBn{AUtM)LBzA-=r#}`SFHb8j$AL8p9`+sk zu>&wBh&_t}V9g0ZA~vI&_11@8uh>fAL;;Zrex0J45WiNR)1sK@UK}$VIBe3M)WJOhKzy5ZW%Z-g6YyyJP|mJu6u3kmtb>Z2gI&lEFI% z>}s{v$AKkr872#?X?gwE_Ql>=npzb@^(vn=(2-7eh$-XxSHGgG3`=Vd!j=U2Ft@He z^oON$kV%EHy%S7GR||)wyO$~hWYQn} zxr+oA9#@Fw)Qs@C_g0{7NACxmdMCZ2mkY}CTh#rDYjSAZYz9!ifzL^T>X_Z}{(G+= z=m<8y$mxHl(=Bel`n#xI5kbl?Y`g2#dlQRq77E5D_UAIY2u_*wuBWORl}31ggRXKA zB7TGRB9m_(WKyU^FsOsRW;}qbi72Z0@7pu%YF$BP6hl~%Z@vW1y757yGZytL?88z0 zF|Q&g>)`OcgerzbxwbonaIM5bo>R zrvb_YK)8pBx$e!54T~u47H_)kE#*7vrezsjNQ-*}KxT2Ju*uG*GvwLoiCG*PfHA}q zbh#>Bc_#Zhk9o#B5j^k5=nMlG1MO4DAU0uQP^-#i-X_P{$DP;mv8>ch`>OLo9t(ln}IQYJ=F6rUy?)!Nt0-#klK9 zn23pMH0F}WQ7eFepLunMbIybM9b-bw3jtRNc-nS6UoJ=h{@{;%G_Eb0{kF$~+E|#n zEi}M=d%Fs6KI{id^xUfsRKvaG1;&VyF8wgMFv3$=Azj)I46AA;@P*$Ei* zGzs|J^cw_9xPD9Tc-RY`%^>Jfo8^-MZ}$oab-qhVh>?Qoue^MLSEmK%81SF_y+4gV z@DKeww0GPr3Fnk>lM{a7D<7b#;_YR}7f+WFWA?_5Oo!OK58!d*sEQ><mDj;CRLu0XFgtW%HxSo<&n zXvPU`qivs4w8^xli-398+aX*Aa^(jkMu6nOZ|Sy7QLDBC+8j~WD|Al*HO{zI+nxwA zxQKCl%<9xFS;0iO*`YA_bzwxwiF%l>p&BCVvj+wpNd>(YBJ2KnW5}CAuDGp!I7Vw* zyF&(%Su2|SIhfI_dsvGAQKAVFDb^dlPKIzTDH9cV-UsT{$}xf(?K{cue7B?G6cJXi zrS*}O4|`8Pc*PM{4xH&KM*FcsPXi#=!|^Z>)IPY-wk9kYxTaf>E)=Y2?a*;BT(!WV zMh{~0`N2nDgA>9XlDYP8uQEg~l!`nAsSb^SwMqwb4)E({3 zci_cDDGxJ-T$Np8qz^DrMSn_4Pj(lLkGF5}jTF6P{P6}CQ!{1~-E~b+GS?p0P5FB2 z(4j3*+cSrCDw-V_*S!C(bfPQ9r$62P7|P|d+0h=rhj&V?@Y6|Ibl$pK;R--Xg*qLk z!F|?Tw;(pmvhEvRfA6=f6E-Pskbn>~1Y$Y?0G70=0#!S*=`Wfk-F{XzD`Y|tq!7`x z_O=_~Anl-BX`)hm?C6Sh)D5-I{5 zwEz;3^2+zyD!XpBhGZ17zAL>!XfBjT5eQP^4Q2wDvLn;BS8N7GlLQ<83#(WiV5msVj22V|Q?CWEBX6I)iS;7(9vbTnbK_5Le*oDEJ3HdWknj10F8;;_V|Ywd2Fv zGrsY~1D;F8ul(>eXVceGhRRYY$Czac0i5VcFGCA{rVm`5@fzzSA6O2 zgg4uP$Nj)!XLs2P77>We=6Tu+b$8&}*b5mw(bjzJ9qr&dr?ofZ*J4!75(Gc`>Wt?~ ztK{uC2)r8DSz5(bo3RhLexLRo`@yO6+30)JrO{U|$H*CCk>~GW=VAx}k`rp>H6OO7 zmlRPg4y?MRB_hR-#lF54;(^912QGB?CA=gciHwr@we5ai7d>poBJUQnT5!rKr`&F{%iH*?H+WSAj zP`>M8H3&J(F820c_(390YtWG#Fj?gL9YYmCbkT?ZkeV^+BVv@pz*B!% zj=C^RZQXLth9#fL=T0b6wBbX(m%Amw+6OJjh0c!XUGHHvOHc&{BI?B^tO!#~D%lc~ zA+da?J^`7-^Bh&iC|)fCfm5`6UyWkbv(Qw}@UX=Ns?dr71jt^hl;`PG=y3XCM5aXaT` zK!h3;D~jCo0V1@UjtL>H@NTgKBTyz*Zjmb?jE;hvOZazPUo#VH?&E9vqF<{g-ocY4 z>)3kl!)HIh_n@JluG@meif%k(Cfm^_Z|k_O5lb-bsufINRCRO!{blzKKqAuV6^o7I z2~KbGXz4LEifb5}U2h-v0*YigOIutSjjVeh@)Q5t|{E(US8HL9m#>bv}(S z97AI);EHv_6gMGIq28}FOG?HJ8OGt>8zR(9TQK(HwMHgJJA2cDUUsxbijJRi{+(N? z!)PGcdvX&NfA>pHq+3Wt$_vVVfp)XKheE8FY!|7!aJAk&rZJAVi&tGs9>x>eLWdDs^lHyKSb3OJ6{nal2qB;}>UDBT1W;SE&I@DPb{sT!g0UQgjt$TdGtLbylr%mNr{8@rhTjo z0k3x_#*6}cDFSk}A%c__gp}~{%NIn~>~tibmB)zMLD8XX2OCG!59kz}7wm}z?Dr{F zBid&UOLpL252Qe~fE!<^*$84fz#bzzgCk}#6U5FzmffAD|sO(=Zki;1PhIpjRB zuElSVd4LXD%E-}J7IvkO+7W~xFvJUE9zqaOgIwfYO{C~z$2tV{#;@&OHt-N5MqtP) z+$Nb_3K8HR{L=e&p2S&1+B1s45dV(^TfNg`t2<0lp4iWe@I9`}!p~O4*(S>dagI5Q zmuiW=C-P#+Qw#wiuC$XKRy@$Ku7hj5r-az+Nnfi zhKP1PJ6NYx{ao!GSh z5^$53aj$ZeajO*pgxxT?Km`Mx|2Vc!acaMMb4ahOe8? zKl^N)wb=kqUrSzOQg=aMX0t(c?|=LiKq77}SK#w@I$R@l%?i-J@NfOEdB$~1B8|p& ziou377DNH=G86yX91#L=T34LUH>B7A)O_`h%YNX+x?;5=(S_5c_c41#TSK#$cqw(z zb1Vr(ZQZ7UH$>gBl?|mg9JOFO4(z4kVWS?k`4Gb1@xt^TISM{|dd5*Jb_F&Se0DT^ z=kbD1pEt{08!pyqbYqk>a#f?b8}+``ilbH>jRPRPpyWBw@slsl_~iZ(-~9YFzJLFS z&z|XE_12uz9*+aBFFT$~9ahmt=Rjo*5mmuLDg+#@P2or3m)A3i*x zD>Wd_5V#!GfN#d;!)(gE*knhNok@!Q8}FkOY{!n>IvA}U*pCB8EqK~E*?r733K1~s zSyx4=1;_q~(x~C;sx|33lv=UvJD&G~Qr*yqc}i8F3-7lnAYb|FPpAi;=`1v- zTn>Ov=AjUU0U`-i?(ZT;Qz$1cB)0)(JnSc(%bhTS2VlT79Ypq8-Ld9~y_75TS_X73@V(unDB}%x@^Q*<1)Fu}5<|z&H{RdjumvTCx{93isCc zqzG`-_DX=xtHBuJrtaL~0cv(sX5yp}i6i@T(Vi!cDMrpO@$0k1muR)yXTqKrEHQD% zCY^y7lO_;ss7-C=ZsXN-H>#Iimt7r^$^0QgJfUshrTdr&L^34a0PPXImr*GByoezV z7rb-dZ0GYg2X#-q_`yi_-B}8DaCZfMOp%GjTu?K-bJX;;7ez)i^}T)WQ361MRKWKvt~zIA*A*Rm&hzVCv? zpzG)1g}GiO^+Q(`(B4k$F^)v5?2Buk?E=q9Nh z9s*K2J?72562>|WW=|et=DZuO476s2K1jbzBDk2{qQ@&oiq{m`MeEco`GZuQZ^+lN zXUUFStMa*KD|kZSWb6jUp_mpN`*!u7>@y%&XMpd09czM!b4INN-UrDkvEYVT*^VN? zW=&XfmSaR!HO*?OPFH?7iAwOH44_5ReFM z#{i1_9p{wHNF@vrY7oKQ?Jb_3FT-Uro0zrRc{jRQu+K1K6+c5lkU>p?c=vS1Ahj3U z3<$}_W;mVsy}sAHAc{e-1P-J-FUy$Q0fDugruN1QYT&BN2QTo#SRYWu5`e9GG3DKU zYA>QuHXKw)HrQ4HXo&dfum6Mim;UU(ihugQ`j6xO{;d^djJxcpiFMT4pfl1wmfimp zBMzJPJ)H}$_C&A=&mpqwp)@zx&EUY#&V2P}XXi=A^#V@8y#hlz*o`>bkWQzST}s8vyH_}B!)1Fy5Nb$n zmIcR{C*wdtG|Dv#syK949Y-nHOGQ=1P0piWKP|KdmQF__4-3)u-q9Lon#73nvfyUP zHmRL&G(qvW@0?R(6T)9C86V%B@$E0};YJ`KM8uR)%7G|E&tIM>px+KA$4|$BTNBSm zdv3SOf}<<$PA4omS-xHVz- z+if5FHe1&-MHYvrOzkhl=p7XVkyROwgT1FHg137`(2gaN_WIGw7x>|ux9li|fF%XI zcRq1m1_WPwb%z`yzW?xq5CT4Zc*0H2qsZ7AuX)Xxw64xrPz)U7`Dog|5Dc*5d)sS6 zitG%h5b*wK!9!_~yf9|BI;fMyn$JmQ(S4LM0AL44TBlwsrUdlKlnN4XblW2zfB8q% z4{@jRFxlbmBq=_cEyURpzJP`>o1KNp( zVJ6?A&T%$MlT0g65uujA0`?HgMzi3IiLEtoNRwM(b`!{YeD!NdmWVX5HAsNbUsBAPrdNuKuFLKP} zaEkM}c4(pNiE*&PekNZ6+GYY6C=NuRh^Be+LnQ(sBA{6!=A))?k9K7+( zN=|TxbLQRUq#@Wry&x?opzh{G!%4Hyjx|Nxofdq0|2*Pa^TNpvO$E#@lq==EobdYa z>BxZ3MPl(alWheCKoboNDa|^ZGP@ZgR@mnUE6$Qk?S&`lu>YP@vSF}o&?J2x6t2u2 zy29vAe6LGnSLvvIxZ26pDhLLGCHK6CTorgmp|V)hL7?qVeC-4LU%&I2#Ra2*0w}gu zhuyAGoYH1=62T>oGd6PHj5nWL$9DFJ_&~A%pB-tJtgnx2zuc(kxI% z!LL(eB?7#+X1v`Gqx1w6GvRP1#ohVL4r-zRw(U=7hKjeY<7dYVC=oLSusCFU$oH`) zgB?x)sz6o0U4sFMj7K@eA=>Y~xPG7w3uRk>aB5scYaKW1 zic&i+$AP1Dqp~`hU62ElylQTkK4h?4!he^|wjb0BJnRRS<%H5Z_erM>>^XBkIUVxu zbV9MDGlYQidctb9zxz?}yzf|Y!sE8%W?hg%!2RWd)vhncfZKJ& zh8Ua9Ib&HC6P(Y2nbXW^Rm_|)%zKOxck6{*^2gn+8a`ccwrOHy95+O?8lBZ@);kM#pie4K3V6TApU9w0zuhnKP5Vc zJ(|PWpZin)82-<{_;>L)zVj*euK4`n(P9!z|Hw19DK(c%Oc0^%^HG}(GURa0+P(V} zaM*isS{z{Xx)8CfCq6IJD|swQk?(9ti9o99JDfPC=L zSJmk{ty5UY{Fq5_ywYLBl#$PObcrd4@s8cUdInCh1(??pnu*$ni1jM*a4G;iF?8xm zVB(TeWfqtlUGsI=aFfSV706wjahmbqb7L}6>H_cq0r zcCb!vyl}Fp9orCdSFvgqKDZ^V z==Crp>?*kgM$rPy#59;d)%nU@uPixr(p|g=aV~7If8R|w<-el6*I^@RVU=P`y-J?= zwBF^AJ6i4C7L4n4^w&EueqA%+ks%WGB8%K%j6~0qXQG}{&@3V1}CV zctYrfT1<)VZ?*EkJ$4+mLL@R_wqm3h?>dEuWm(|%(1-D#YbnNUbbW~z+gFK)B_pNQ zp<#k`%t8o2ykQ~Nk6|GVOSmV`(+6Afzl~z6v)Co~R!0JD+Zpa-AI+zpR*aSuOB|>> zgxp}a?9{7u8mwhG08yO)fYY3EW;Z07Uci%QUpM}F{yzQQ)3O2~!;@qVs5_=G+}Dx% zy3%oTF=Zwr+C~S)ztGcB9%4+`h}zFVX-3=|AV^FRu={O3P%fXLbsM5@*DlP0+arQl zC&a06YDsi}#KpQy)xaLJBfUa|ecy}(=L(>RM&FVU5%bA}o^8YhcNha46q?rRe*f@f zNvVN@J#igt)%nFN7@a2Pd$-y94E_!*266tA51{3GXr@K#>Tyy>kvah>9SLdh!q7f1 znlsOsd(fKA{}9^~0TF`WmeTf@S}gT>O=_QgyDm(KEQ&_Aa)Z-@#nyn)x8U8PNx=&- z;h)(r$GxqIiP}*cQc6Y@;?JiThpxh7UGJ{3=|}*)kLV=CZV9#s zsTeFieEsOy{|dA71<~fF0df5@!txQyPWWvErJ=h0me1&I=I8y^{+W>)6NG7&PyKpgleMVV(UbJcjk3C zi{rXL^C|H;FC(#QeTu-l6)sE8SLd|@KW5lr1$1lF>B#sTBFnIMl0t-p1*h{Z-rm2) zB%#J*0Gi;n-z1(rUw?VVufBOivU{z)VKu#yhuc%mxLr>;TF1@0 z;1mPiUUrm5!S{1x!g^y=y*1g8z233rj3~f!sdzlNN24|tVx2QFs?m_w81doV*}6#l z*=*geHD}ze3+Y&;qL@}81+j#f^K!Pz{hR}V-5kdm;vW$bkht$zSnbmT$0w_jK z2w#>39fJMxJW%6mZQ7VmSe6Cov;x&WEk*rc{jrbV$9F#YV$f(*ft#E-R8C~@d|P)! z!a3IrI_pZoQ_c%6mu<9yW1zL3c1P7Q4kdSN5l;;C8sT;!C9T}_>QHyY6jq>+a^Mfy zlC!Yb`>W>kVI1@rhmOY+h&Wtw>>9KilJ6auh%qf&z4V#lU3XTA;sQQT@?FSm6|f;r zfu}eFzfF?%%?%Ml3hKl!by5kun02IC;RvIZfZ_3G8fTSkCl zV(Rw9*Gb&`buCEtIRpUA5>+5+#rob)qVHc|xp|55^crP*gtmeypq|AK{0Un!3e}Nh z&yJz*D@)k~fcm9PQ?_V(VW*LP%$b05>_{ zX+Op_@_L&k+eA`bw=^hUpdm(%$*eK+c%D(ngANw(pN;eG*X<{W0%K69w#GR(1~RGf z%-v;&vC(H3bSv2S89q(i-$JR)_|(CuPq=#izJ{mX#n)g?5&OYGalz320f3n;JnT6; zU8b~Vz>t5>EP^?^MaLTRl`OuLcCCx#y+)7Uz(~$HTV~SZ_=nuS zmA_Ft*QNtf0MHS5PDnyrZh_;Aapvb3@#^-B=WQPaUAGQKx3d_SbFbVAnKZ;`u`>XH zL)4xRf-~G-z!HMjdV|A0&IU}=mJ#=j5g-N}gyn>OY@7T$k`*w4X=)QC0G?)qWdZ69QG zWhYm(VF`-uIE*sPF5{XqVqBn-kz)qXa5;9gxB%?or8d$2CQBV8YPgW<&02C2o#>%I;=sjr+<9oYI+} zXy^B~RcAKpKX|*S`#PIFNyA}Z?}C6OoW{s3O7&Tb3EmYtyC<_0w1FwLMq!SFBazt{6u`Q zW;~RJQVz=%+W>`@7_fKhsc)7AF+~LGG^QXd1{jjmp_Ujr%0iK4MXemD!fN_Tlsu5H zt7OmM-btKc?r zmW;aP;>TpZ;Htns`@4T~ya&PT;JD{JpE3XZ$i9T#iWGR9KrrS5+d!7Lo>t?tP z#vMkwp~X&Ta_-*fASs<+v^SgrI}v_Bsq@!?p+9jxswW}A62p9cfsob_*xhBz*9f~4 zp2#wp8P_Rvq=3zr@t^hgG3j>GE1d~WLQ1y?k__eIn)7)Hsu&iqju0QMSi;xG8gsOb z>oktU5*DDlWk9DTSpvm1wMHxCp;Ht;lH$37o#w^+s1P)0ucQ0m%|$1@d7|aQLD*+| zQtVWtak|4?pZs%__Kn@2t^H=}6#>cU(x7ha~Pvvd+VJv7(0x`69Aco z$8h!%nT`jT1p~C8T8B56U&3-8Om-|ux40{)i?63273Vl9Zzp0HSB_wS$C#zV0u z?8QB1ypq5HS0SdW0+8!_+G@){?`QGThrKd*A86D}`>1j@6@3p@M&5ClK= z(?2)(+OXK9P&^-%)D`>XV9VHh3RI)4^bYJ*@pLpx%55EmgyrsQh|3F3cD{N(^Gw7@ zgq!bWkk�DgO`+Pl_cn0EBy=LT15vT@A2MLn2*0Btz^SZQnL4(5|~xzBf|F8*g;`IJI|S7y!)nHx2<#L#ZlSOa%;+ZL z$ROQj1OZ+cx6(Q48K~2aZ0^+%KM3tX;MX9ybpBJ6a!W{b(I;FFW2| zHmC{N6`UFOR4Sry(*46_L+Oh1>5Ol`dBRg^W8aP3F`sag6J9LYtfmVBWOVPMy`xiR zQQM3PS$Z=KkIhOkN3X3m?4yh9dS|g1KlQ2s1RH9lk-r`MiTsEJzxlX0=OdrBE8uZVd;NEwJ3!I0|VKxC*VtO{hcnTJNhPgGCNCW6C` z%5dGmY~6FR$SNT^Hm` zyR#J25VDaAnkY`Tv+#faE8jrx4Qr-j*zn7bbgBsNT+vfJn2!@F~>xezTbUxMm!0g!AWFxzJk$2Q~~scwmkv8VL9K9 zWJG&68c-kWga`O?HM0$>QWBfVrPzQXPG|U3HssQ;*=tyu4(@6Lr$cY`0 zATpebj^h~O>}Uv$7i(h0o}-63C0jS#8<+iHVy8NiULOXfz*tWmv)~SCfrJbfHj8i= znh#hr=iNa~>&pE(EMqZ+T`F@BArt&)i0mxp-C~;0UkCCR1n?>8=OhTBql=)aYq|LB zd|!AH-vRsl2nigVmScJ+j?j^)4{FhMA?8Un6JFSGGKH3BTt5E}E{|`ZtpR-&2e8C# z3}^F_;i@2kb_94llQ9csIz3rdc9oEMFrEd3#kQD1n`E;Pv;=QSiOID)DUC#4tiaWQ z{M#4sP&WZ?>y4U~-ru2MzN2YU4*VkO>UYndb85{w&tapbb-XPNdxs^CVYs|_rw(Ak z+-+dr01a#(d9tiBJ8F!3VXV_AuBUldZwF!VB*D!JFaRE3W3EBiDn+Y4$$J!rnD3Sp z7?M)Gv*G7TX?;9fKRbMnk**?Rtx%HzJbdq)gX#uL%GZ>K6IbiApsq%L>_($gxV|C) zlHl${zOI%Ok@N!S91QqVFHYk={CxENy5@}2>CBF4@JKtKmB(gZtM2C4!EOQ1%tYOz z?dml}q!977+moHe7}OuvFggI_i9P>phxR#v*+D3bBBDXM14d@?dNnwAI`i>;ta-6B zicUVh@BI4#;JZ&7ww)qix4l)6O8MT)msoPf&1q#Z&|Pc|n`;n^65Gca?t7SnXCqFh z6I6usSlc&2d=O)EbCVOGT)R*EF`N>mH*EWX=l#H%5?*iYf`8|WH~7Kh1rIyvXV0Z!??OGy z)6@9<`>kLr74P5P;GFqPveT^^wH~__3Z*uzDd29+lvya_Cc8`A8|QV!k_==DwD4DO zFzDqVjjGf-iuULfzZh|J78q3lNh@A`|g*Ah0(S1+-NY6cHJ7Bt6(<#x6FbCPav7)k%G_19toa#xJLu z#gx%%8TXT8#DC|H{~`R@|Jz@o+lM5iNMG?7Bi?&?hY#L+4`2QGBmC%Bzk<7)8@#=L z!1q4+6yN;r5Aem?2Q0Z@KMs_l@6gtAjHpo5IxpmM6gx!C5Wwg{B0x+Fdfh-_CIftF zJ7(gd4(8H%;CO@!ry(|DpjYH&L9>cf{P%4Q0Hl0bK0(cb;iJRh#}3L5Z_oJr`7(+? zS15Z6wn-m{gF>O!s^^E#!`9cIM5h2LkvVasGZ+=a*4sez`FGbBcrwByqCqd;!mw$q z6+MJ8*^>nuljiv6ys7PimcZX3qvfp1Hka?K45^R_G}FBHq;mC8JM`xkNe(8=pAtv7&G~+kAGu zsD*_if6nwdAz2>DqE8%R^|cPM18R!BeP0}uny)m0kPqP>Deg+dWiLT~lz zz0)Q&2#rN{b5Uxon`@D4>A=B&^${6BRnEKm=95nB{%&bcvDSHdIxy zE+<;?dXj7Np!kHmfXATf2!Jn2vm_1HY3B1+)iJy_K%oX8AtnSHg&Wj>I-@440gu(_ z#5_|wMqgfLXN!Yht)Q^3-m{_UOvYw62vBMl%A}4dNOgKL;qczwHl1tC2iv*(WeD~R zE#{?^Oq&WcyHB^d_T)I9Zh&?mn;|z6)v; z6_UeP?}PHx?g4Fh-nY>;eE;qiXc4jPklygH)2(fd)av9EhfTaEouzfGiEAmFMi!v% z1oat9jMRmQSl39Y!LVXuuhcEPJfGO{sw^-lu43sE==uW5h=~5y8WDK!heKH#y-_Dmyz0 zv&ijOO(fqs>Cqj6OXWPWqjdwbnd@xFLGWh8V7{jprw6Uw71*qi$37Psd`@T{t))-Mu9^l}@ z5@|b}gVhhg3GoukvuL#JYMa<@b=<$A|3-pbiI|C za3~+(UfU4ex9+nD_$!47iNOTfL{z97h-Sk|^x%BH#g{&OA7B5;AH$#cqyGs0*q{DW z02Tln{@s85-^73Tm;V~R^}Qe9X*=-z{DghqQEQ>6xb|`IxT~1Y3J@7};W#`cM1<2T zpcVA8L3Iu~QbU>HiFJzeP#Pdt8(F$%L3Z$FgA8%^bcmY91XEk&$(<#b>T? zBl4aoyRL?v$7xIHUj74_4wKJ3l0!Re{7@8$SWY(xy>cqC37>)K)CJ5!gmWY!4nvqf-&1yFZrJ0=y**64K#>Zr5}`Qb&e zCG5=-Mv-}CW2IgdP6Isgb){Z}U_1ka01EPb_*WX}W$O)Ut?Lki*?r4!P;wAMVKgA{ zV0DXBg$(Tz9iXo4eWCB%cQmwbtj8;a)v2kFXx(0R+pY!AejV;0rnX0ON`@v!Xx-7x zi3=9_lObjCs$=5AMCZQb9!E@H?%wia_1o^$Bx#SoS$Pr?WE8;7oAgiS;&VFvE*#-EMmhD z`_KmZndW-gxsXS$rGa6vd#Q?B*8Z!d^Q6#4$}> zOk3o;A)1Z_AtHxhARjxA*EHu8BYx)YjKBTG1E}fX8m+CG)`4q??sMk;rV7!Hy;dwC z;N{9G;Z7fcy$h#ZiF5MpT?GI|{pDH*6-+$l9@XJe)y*j^*mZ=+p?*O~jSRL-@q{xu|oF@3rknCEfCovsFjP<3b%Nb}*J&*u80RzN{yB;A+V zAz{IJUGTi`P&0&JG1%w4FzpC1I-#wh6e#4*gE=7h@t^!zeE&P&0(8>iQ($N7N8W!A z-+L?&&~-|*BBG*e#d*!R?3+1-&hz}Rq9z6tXP?iI^#ww`;OTNfZJb&ThzZbbjW;V) z?QBbS&O{R*7t>5FDY5$(BX07-3Gu--M^k4-TX3K>HN6PW#nS4aTzkhv>|ASRII&oF zLxg$ac|CFVc3SYbZO~S5vm~4{X-4n8xWn7$3+FT`J1dKIka7&;`wR~u{ZbWA)JFbiZHE4HFyh;7r}o4;tn9dw`EtMQ0L-%2e#WW( zZnEK);*1YGqt5F}lb~ZmmxylqNRNw4&Wx36lxSG-Y=F`lsgv8=90uy{rlUirZkRC( zePEh4sO8!@vRK_qv6vHBob)++&2`YUN)PsY35FyOh77na$P_3@9hvSL{26E`Fd||W zm^x@D9b)|4NXmpqUuD6eQ1s>rfCU~uM=i@ez4~dqjBijO(7>tdF+|kDmN?I>HZ2@2 zsM&w8!_5y#caI1Nl4+gSD~^4)smwZ(bO`}s_W=t|>#{)7LMJPGMiEvlDA1MF=(1$| z%#Zyj{@6eDPlCk<0G#kA{~P~p{NU3s@Y(CvczV9rXPbKsB3j!GbvFz&P{bXa(jeRj zAKslQ7HR<60qxb$-kGo{?6cXfK&=xU_he^O5>^sOb1PCW9&{8auz4;DAd1sYuDJSR zwu>c0o#OTn?r!ku{o`o&(ai-cI{MYLX3Rp!QE3(wwpp!(s{llsT{>sYH2WN2qV}|6 zGg@y3lZJkK^-0O!Ukz zJClMC5c3UsE6`p^4?38vsf5Al9UYKa8CbI20Bv&;djv|g;zT?NqsO`!%>%uoZ4Uzx z?X4MI%hxpv>0+&Iz<>&~zjj9l=1wvY(?Erem z?YiJ$+qgfx;MX~z625i?)@9)5kzVQa2>TX)mWt8s6A%y~w|#bl2G!6sM%t!5u3Ca; zEF;FF&KN*52n=AMDNPti4U6dE1i?O@mB+h)D=LZYD?vBI5CQu9eZXyo?fYe(9}Z@e z-C~|4L_R6qJ2%1JTJIeX5BK(2R$N>B^L5-z3pW?N7of8P8N}#~s^)GZBe8Q-*lmD= zJ#X*+IaR~IgRj8A1mFtw#c)M}7lo#i*`o+)K)3xw~mDG1Xa$k_`9#~El%>Ai)m?CJaUfDQ5z5#gQKvZk3tH| z({YUb1CU2vu1p)CrY&*M2pC9sSD`~-Q%GD8l|@CcE_MilpcrRDLx7|kXgzS$Z2&Q% zZZB-s%dH`pSi4I^tp|ptktpO9)zXT#XdP%={k~DBypUvv$F!s1{UI>Yfm{toeTInOqdUPGfokuy*_WG!+jW5|@OV_*EE!uVTxW1?Sw&Ew%XC$Xmf&aUcs$DJ>;w@U&CGhhg#LanIHimp zB7y+toKd^t#c9F)UJ(Pu<=2BOd#BUd`{xy(K0M*MRDAT}CALyg%8nq4hi$`6CY}HG z<_=%H`GOjfOwdpaXsL)e3Qb0q7_sC9&-;N|y(7g~yqG{=#0Lv|o~7ohhEYVFV#IxI zIGQ3NKvEnrYYD)m9FQ1rlap!WrduMNQm1y*9sTUE3+75fQL>LqzYdhJ12qzzodPvf*k?!#bkG94 zCn=>C)2Ia8oNw^+KmT?7^B6o(TmH0!v*Cd2u8UmPopA9jb$N;qQEMo1Ma<<3RM}Z`?AEXs8uMuJ0k0&%CgO z4rKzLwrtO>^ZmFJSNASo@95pAh_=34tB5g^mL=qq#$Zk>DPheKeX9)DBBNLuNst{F z*LIq|W+2?yUKsnI@Iz)4ww6RnprtIqI|0zqtIuEIyVr8D1ks-z6IxZDAAwO|zz${+ zB1g$k9PUv;RkeYBmYejB{jh>tnIu9$dX&W$CLw+Ye3qqpM`8RJMa%HnE4Bt~O^kUu zBNAZvkk?Y+&Q|VB1Yx&_+Oq|!4bnT%iv>x=>&HhHP%&}fmd+&7lXwX({I16R!bng@ z@c}n}(DCln-iHE+DFC%XVe}q5kYY?I_G1eV)o$FxTJ;wG)^@}kLv2;p_2`#3>mgU`Otz*d0#ReyY7>ryPUZ-&uJeh>wu7gg^;rZpkOg$XMlH{yDEAugN6xE1A`EOZu$x@EJ4&a z2FL8oQoxbaXw}N)*SI%8bq1gaT#;2L#YKzPB@z4 z<;@x8*asuvs1+(0Iu@Uq!}T0c$|Jh?&`1`Uub&-mJ<4s5j`#EjUQ z38C3}3h)jfJ8mhEs`TId*Zu?ium0KpBDyNCU)V*}O1(k_li?dJcw;d)IfxJf&MCv` zG(HQYT3%LrN7*hwinQC+DdsPob3FE9vq{Y1saxmmCMN{t&wN(Ou=w3-M+k}_G*AgC zqbmpbsWK)WU7>ry<6bPTaoq=H7vQ-}JKowm&P&FUB96-LRyImiURFrwJ$-in00};T z(w1t7IAsU^IIaBBI%-$ktt(;(bRF%?70-!aQibBV0jKxBil-lZ8(q0(Z!&e%DTqyc z-*H+pF16$SvW+g%5+nB3pb~Jm1Vk;9vS|fGGt(Ru9UAUr`K&;3o71$zRd$C)w?x92 z<>m3CdDjoXr7;#m0=t#HBL_+wJj*(28=5We6lWq}2r>&|?Ldkt!Y3+w8lzhv{;IeD2j;Pt=PxW%B;X{SyLiK=KrBbBqMWmstVpFBXMb=>)2BgsW;5`}1tQvuyh*YrQGme%L z0dANFbAIswwzr=^`W%+%Iuko-$HAxI%Vsyj7`f`mq9@kIEo_r_FUC&{AWEyjiit4F z6U1}|`VOC3e{CsDJ~6MP8g*zYQ9N02v=~+dbt|cznuvi?jwAx414v8*^;1^MkVs@d z2z4)AJC+b|bj}%p@s>G3*b6?j!_x1|_}k*^>S|9YlL}uewbrqh)e6m!B5Iq416}7d zY)@Rf7Z3>0048Z^<(@LC6;x4<9WGoR;HZWX9ILgZiwVko>bzV1);_6A0BJRDd%Wbu zY+AjTVdvD*J$4TP*ZNp0YsrVAsNahvv2f99Fh-eh@M#FpYRI!08i;t8MTdJHjM^pu zk$>;@2GUBW5Z((7!(GJ_JLd|EU+IXe5`qC5BPE1(b|Jg~^(pJug{dcfK?E_~pp}9p zN8H_<@#z<@%|els#J2r#fEU3M#_kQr9rEmD7UbxN;*>#t z=5^sQsf51Z3~;cN#=#GUiYqR@0Um&uNVIZDoINYS9PnBP)yup5ZpTeJMs12=6{}Md zI0^wvOg-sx3ZM)2J=uP7I^oUpGep486hX4JBNi8&3O4#}A~+|azNMBSS|43s5Ha)t zu1-VSuic^)C}p?ggUROa{R4j#zxe<9%OeIfo2TJ=$>xOz*m3Eq)WMiVDFpB->TY`A zGc&{rIf#yX>!4UwwUa?0Km=^Maz$&F#5CZ&SGRcHeCoRChs3LJOMWZ@sKw-z@;D#7 zyL`$E)^#=0k2;D`C>@4cDOMx%eB<04o2srIK`B@c0a%0B08NbbMewP?=X(Ia+si&; zt#^u?rqjapydO>mPbtv$7K-P6M~V^K_RKhAH3~W~ZjL zW@aA|y$9s5;9;v!Vd3O+L!gQ!Q_mVhzz27?*h;04Tm=96fA^o@eHyjb2M9hjr`hwF zG`ruTJO>UYj*xsp0Nqkc?~qz)iX%W4fd-x}PbOl&S+W78vukk92~8E}Q^IFY8%`d6gDU>r5Q@!u?n!&I4Gkhn;~*J3QjR%FAZ*sezc0MRL6yI7_dNt0j{;9 z&2a+lc*2r0(3zYkO#0NCNa5<;bS_3Ud)zq7rGfW-ULr1~*Z|6er^5FZdPn{Adz30z zu^CdrIVVJP+~kBemmN6-eC6(pH+!MEM9lc`X2s`E&k!h%)=-XvXTC6Z*-J$O@U-nv z0dAKSA(6henI`YPC{oTiio3O02S!N$6cJ?G3(4qaP8Tf7f=WZ2`D-Px;dERFj@)k9 zbz#ZYWelV_!@*xQ>I%f&gm}Pw?Z zS(+6Nen_}AV6*mEZ57%YdaWq?jiHoy`fAm_f@L81ys$o_QD?6piTJ{ZJ$2eh#ufzoET)j=&`OMexkAN~ahno&&v ztvjR_pie-c*|Yayq4bUCRQcK}#tZ{XmZH&w#1hm&kpo8e$SqQjat!w%<{3H$m37dV zK?0k(ZfWeFCV|t!?ccze2Qr=mo#D8KVeV9OJqskd&8t|EW~F|hGnR~4ZHPX>1Jpma&H!U%CMnuGzWTCzxWs?lGYssQc##LQ2TzZ{8YBl^o)r z4cZH&lWN!BaRQt)q^TLf07Qlk#efe|TW!7xY=)D}&gdAx${jWmdg@@(74R|l|IqM3 z@ba|6o%MXMrzaveOJ*Tv0csT07L!`%pwH6UI0!|UY> z|2xRT0`RTz=x1R#jR~R+mU9SE6gIy^vG?i9=8jUym|XV{iEOr=OvN2~u!3aUF3?us z^jrU)&66q>qtDTyC%Sn|Iw6`~EeMf!0cVo%xe*0l68CR%TCDi<#2BvDd&A|}P|7ye zQ4WG61Q=_+h^rC5CAK3>e>&i9X>Lq59-!h)`KCggi%-2LG8j3mJQ(-Y}p z*R3FrTH4LsNKhm}1Lv_k?Hlgz-ykR{^L&pThjnOF6*)w6OzH$L1B-gYa-OZy8g{Q0FK=!Ep;qds4WB>lep=$W(#6kXuBy= zM(Z7WQyK^a;Y4|}KlZbrE8}Rj;#&dBe-dfl;5t8AhnN*F-K07mw+&xBUn~yV)v1ni z`kWIsvviJ@*A7jXvg_{JCIpJGQ%vk+!Zfc2Xur6<#m8Uz3YNSe#fTT@6T*7N=i5Hk z+}o|-x8FQsZ;Gu>0sG6FGZu4LeYCkAhiPY?j~$nN$NgnP>x$=6aXAW}FBcr8Vy_jY zSsvOI-8A4;ft#Gi{F<)H?vkCUDMrLN87;0`aU+motdSfeUag*6V#HZ(6#+o%t)Yix z;!!)eEv|p}n}3hViJ`Hc%=$n)9){$HbwdnVZwRd->IKPY3C_#&gNh4_E>&~}a@vd_ zXs{Noq3Vt3R0;vlmLS9+=(VD%;=E)$?`0MqX+b|O)JZtB&^ub$At@sO6j}jrKB4$# z#nmr6C`0@Zy+1?10Yo`x)N(+yLk%&wYzMyg$*1_Mf8&=?|CN6Q@4bA9Z+_={`0fwC zz_-5p16(fKNOnZ#fIH~q+m>!zUppwCkvs5)@>TPIO5_+s9TM~!n@N$#0Jg@ zp1=Q>D2DFl+!0s&slWcW@PGOJ{{Xh_7|F0W7Ns(&Q9_-HKPR94(Jg6#!o+Wibb25C zc!NHM39|Btt}6k;RisvDC>u3|%f~VHLhO8J2+zem(x&I74`^~=v9Q*jWNj{L# z>ETnF+5uIvRr$T7=1f~!8Lm%03qvizZq?DQX2nezi7(X55bXJJTDF%>{NC24GZ?@A zsz6g}9P(WqmP`mFbYOV;yB=&F;7}d<9A8o1d1NU|^hyF_&!n501D_Qk z0>B-isI=<1f4*?7_kvMOkLP30+Byv#-W{@JRlFRxZpe7XYH^r5xYjmB&(Nu%v+Gs` z%_`a)kSAf98Te4ZL2trDp%i=uPJweuCf`-`=wscgtLUoiu6l2Rb0Dg%L3XZ_6?9M4 zySH(4x_teR+B3Bl3P4R%=^WU?D#tLMKLl8oGiupw>b3$B5R+3fNx^J=(ph2*=GFw9mIX)Q!R(^o=T^q`A8y0j z8v5JM37P~k#2oOho_EmzUguGpoiSJ16aj#l@K%uSSjCDFd-o2qL@SQ24NmBoL^_*- zR5O~W^jYLtk>a2V-E@Kh7cApz#%nT=`CjSlQCUrQj$oRqT{q69R+5Nam&uOA*r151&X z*%A<8UDu{#ZpLm&c{+BY*p_t0z|T|z4cv7x`F9CvX;Da{yiF00+ri{BE%@p``p@C- z{^nmnbl|}>Gyrgl0aX>JoN%c-N>e=S7rb0EF16w5psUrxUhvU*#hc4M=$;PJI_Th3 zsjiBgZxE#coyBLV1tCQgTfaSXpd*WaYA>H`(svY$IC8qVL*JiZ^c{Cp)rEL!9M9o)V>38T>lxe5_MBQJ&p+1z zt-}Hp@S?_DtisT3IMpKAaJ`fg;^`I-pMS#n0NU9Zh}lG}9O&b%YsMc-5*)Fyh0a9H zk(g$5quI=-aGu9&5b`12?Z)R&g7x1NObS%@{Gr)XB_*6r{`zv z$ByGTaEvLD4A5mzuR3x{czHhI)B8sVjH9ankPz2b(7vOVC!#GW*raUW<$1+}7Z75| zui3RR(X@ou({q2f5ahfvN$Ye+SyCSU*1+Us6HyL!kVM?9EdH8t{k+gnI|zBkySLO# zt{fpD#Y#s}GY4M5)|IDy-pO$h`%EY#M1+``D7w(w5t!fw+$>=H0IjVLPIUJq$!Rs} z^e)6ke$D1|yKg#zZ-<8(adu68)uZ-{@_5nn7!j{?LUahwM}x&mq)3=6HV&L`Y&`zSV( zIrAw5IVE)1uuP*hKw;ur9keMXH9DTDACOKH0!WSVUyew(tK^udyt%MhF=}Ck2)jj@ zF~b6rMnu}n9PX*M_jvD<&d7J-TW9WdcWiP|`n)M$vvbgBjB&&hU$a*d4d0j}Cb(U3 zur69_4RLuN+7F;TfvZjoZFg`bjKb94BP`B&fiPnZDPgTsn;|iwm&3YL=CEUHfO-qx ze;KL`wfgVbJf4^dP}Rc>RES7=LMgUC9R2TCAW?Ijf*s(K5{8ig2iu$e-#O$eBf{veTmB4y)(ZS81sw1iVju_- z0^=KS)%o+Y-HHZPl~xDi%442Wo=c@( z&Oyv2FOXibWX^&J051E1+B==UbkdQlo!x3h!IBd8>Ne*Y2n){V_mJ0DfOf?8h=2;! z0;va}>_nIDZ%%Pa?BtXyijH?aTsNb=TMjedAhi%_XeVzn4O|X$G;=4$LHDu(5uFsc zt=3_rBbZ(J*7bN`6FR4dqduFhe@ zV((S(!R;Aa*&M9H91f!{AVX;KYb;t9*5Bjvz@)SHvmK3eCxnNz-Xq5#^YYhUf zAAeU#D1I-JQI3u7S=Ko@t;?YMGKXddsJbM!q3OmN98B+xrzOz`1nym@~@9lh>ohQ35KKE6!kUhHri2vZ>1^0b%fFg4ImFFYzZ z8N<x5Yy@esE}4A2mvW(OH%qP$*gK%Ax}=8X?49KuNGMl$7zm=!v~6A&vLAl!eR ze8&)x0!U#Jb7u-xh{rgb;G9Q1l018u*{trG9eVzb0@ym0K3n8&ZR_1ECiH3@x`Af{ zw%(hRr-aws>rHIQmrdAW~Dfo+o|C0iABBZU~cd7nOs$4li)8=$d{8x)YmQw957S$90k7h6lWE z2Yd*saE+`vL*oiaKy5pmM#$vb|$_ZJ(?hd%}(lJ378Y=G%>1fytG zFA~AzHJTwn5W(%~grjxDkf6PCdb!&7fw9JEy>5^p03I$I-@O-4N{Z3Z_t8;tQT*r( zqnlP#uP9$USE6=Q@H%T_>}=vv%toAZtKxLH(7iLB8K9#E3o8*qBM_KV+ZQap4XUwF zQnJ0zIB0rd>|I$@qFX1TVzG`*EgM7|N~?Hk4ZURKz?>u|e!y1P8QbjHo^ry|!QwPE zGdi+6c0l*-h1BudIDLDKEY|kcY11sE_blfH{W$RW{lCL5ko{aU>eM+WoYEXjn$rTH z8+9mRHEjxY7&6Y3$BkN_6e3gtnj|Fa*Sh1CcCO(ik|tg)27YjJ#-%otQjk-^Ubu!z zw?4ZAY%pgy5t~Cp`(wJ&DQ+?gazu7AFQtq<8zL>GmlREt0USL-@`pH%@4{zZ)z)#h zWK<<}3Q`AvQOy1CsBIL?RXMns27b)kSoEnGfLOQPYl6##MXgb<|Ks2IrsYgBfd$hg zs?o9{8t#@8?)NgZ#wn#lgwjMMo>cja0V1LuwKIrCu|aM|IT)xk0ClI9xIoJ>kRTPh zuKBBD1#R(c;vmx7{RCLC%npNUZ|`#{$n9Yr9wv8h=uUIp`+ zIP)N>YJoH8Kth>t8}exgPz(WE>!W)Uj5k_aL(-1>$497g0&30~r_*UnTlJSQ&kdQ$ zeRq*7>@cQV!EG>otv*U`OVL;0kcq<8;nKdx_dQ)mNuQ|AkhFeI&DVf;6&IN+8`rdMoR>(G`Aw!s8iaIq(RyRZ71^DTcM$b zb#^(oA`EIHJF-0Ya}-EQKs}IhLhoV#Xfxvl2NVfnCQFh}Dd09IMb06E3M)tMcYH&bO3X}VM+;#fLiuZc>UF1`6c|}-}eup)=KJ)0!_@( zQo_i&0OkfU2Eb_Y9NzD9{b08wCqyWY#{93=f_-O<>%HT06oTH_=ibn?NC7+? z6{#t@MuhN)bB<`8&S14QTxw-5a`OUhdnTpbJ6{&dTW74{yq<^v8RRSfr|vx<1*$U_%EXR zSJV;csOr>)4s|`4%v_-S|Extx$_}JEK9~^Kg`mR1DL!uN{14DU)^@3+OQpL zQVcQ z-cNy3eZ4R+S`Z?%>7dAw8pI8|{l|BpHv>V5cC0M7zr;r#;_aj z)WEpjBk8$n1zU>7=WdB4tZM-{n(I~l)oSn&)PZh2-emx~QIOct$uQ(w`bf4yT3{Yt zXdxh&b6bFN0;nD65(0u1#xArgc1TB?%zM;AG)RaJ!l;QVGbsv!fdcuUAAwO74E1~Y z@;zLhAJL0-RSYRQYQK68cFSV-ZU62xzjsU-;5NT%`_;R0;hiEw4W(w06p(Yq<+6=^ z?;V7ZK+4o-a2*==TRRjTS>m8^!Hav?ATR~K`^8&Z3lRp>54s5~<_u)k`AaY~88Q5;F?9$1+f39fj zxEkmoA_I`<@UGX4wY7J|a*SC=p8Q8f*YW$syTq!rO10-aYF2+m_C8<;@U(7q(BnE* z8|Y)tq1!VVbVZo1$~9qLaHvA7yHfGfXm8Eu7=zOs{VeIokvf6_sEAmH0^>iEPZt>A6bZVQ0-6TGPWJ2rPb-`uZpin$+vls}06ci?lbcI0&&gV0p zo}V!;)6P#XKALt&D-faRxEX~6R+xP6^#koduL}4eVj3BmowRzO7UyD%X$3L1zj^nMsa8ibAjvzNOI{E{ACt5p0XYTiM}eCQATSXVC~lX8 z=i{I&q3!#Abq>2deD59maS#l#qO|A)Ia(vlxfvi4q8QtPc}eZ0%qF38(!*X@)C3xX z_&r1kSTYNp(i(y&L@RBk55ko>3Zx5~u^m3$$~a5CBdn?jDWM((t!)4lwE$HGDcCSQ zi+x8kXCP%^eO@y@xH;i>eEc5%%{FQHfAAjNReHWjoIi+>ljaOrlw2oo59706h zcN z!O;~jZqCrB=Me*4TL%&AEU>Gt!lbu$0@LS(W+PF+GouSiZx95WqbJ!8iV8H6=H~_6 zQE3RXhKMhoHY5W&g3SRtW#(dgZ7d9|3-RKd@#1{K<3VGYs(?zvtJ_-~R@gk5`OLOm zaMW&d=zxG|BNa&V+AF8O->%87snU(h_ORQA(?zgC6*y->49|FRoAK#$#q&Wxpms$H zz}?Lap6iAP;HlG;fMR{qAg6$JxrNG#pZnkzzwyNvX!U{+5`b=Vf9Cq}oIfa^rV}tK zVEZ+&AQg`*WRz4@9Ic{QyE4WEmQ^wj1)^Ae2wAxp%@$RzTJ~yG2umoh-DkAdQ3>fF z;*g3(p=5z`#+_#1hm}`ej;5$GaFt*pVDC1Lvy+oB};f6?XtsEI;JC* z?K_dtAym~Xa|wTE#w+iNZQDnJ=!JzRI9-68GVM@B#(Nv<6CG8(JJ8|9zpslBOnV?e zOvz$I9drs`b0NqOrKcEh6h1RAXou)kEZMHxmG!iYpHVAVc)h`9>tJ+pJXSce>9ddQ zr32!LpxE;jKm$Y;NLZnWrZErzWe8a$$6{P|Lc073$pq9hpnU4M8#fMxK z^pPI|*Npf*H+6l3M>dNsSFy+`fbVpa~h-gdC1>?w&KHMlgQL@mFh@i8?IUnIyyf&PR_b(4;|=h%6;(i*SjCxZpG~ejT6VwPV-S zNq`s>N1>ZutxOVAAVvA@Ww$sAh(u&z45IM5ql`IH)M7<|8b>hp1{G4PAjru&IK7iI zA4#2ELcmR6!Mb+>LKjQyU^Eji&{B+?VBPzev5{lM*IvEA*Z-&g8+`p=`=8-2{^s|Q zV#3pLV6Q%u*1F$;F|(N+uu2%ZtTZ!#K`&wt4=;%FU7NQv958$`#ahPO5}`jn}WeB28VfUPp|Rx6TeBEdn2 z`y(ky=M=HTfcyQR^=za?@MS-6N(s_wwX90pUoZ66$YQcog6~g9#TqC!_kuWOu9Ne+ zK%0Zv&5A|A{o$Qn>Z;uQ;mzkiq$!4JvL1I1?)>QXg!`l6=5&j+e1x9AjNg3x98GDe zl0w5#7z=X@kdV;Yfe6JZ2#(fqTy@ogQRKb&NHJiqiq+z*fN|ETtuIdt(sYgyJInz! z+fYZ!)e;KyjxZbsSR8ar+RzlOdpL;($m&R5UEbm8rz5%@7zrv9NIysorEP5-Xz##! zcF=YJZmkq;*pQ3hJP`{aK+*~=mthge2RAvKjzD@cGhCR|CU9F|Ls#aZs>9mRuEyHj zMw7rq9&-$+jYwVV9p&*JWqTg;4w^kSPxKfJWYD}UZez6l4M7!0_Ie-f@F*14I*O#K z=p&)BBuf=)6L+1*7-K>_zd%1ODEnsXq0{w8T&seCkUB5={rBF-!^8cc0gMM~e`a>A z{QvB|*<#T7+kD2RS%9*b3jONZl{S*ja|Qrymyta2K>7ywpIq^jwR3`R6f}wBI=a#(ndrL0(SK2{ zd+nX{4MSD?aoCy-2nptH;onD*vUUJ6dBxr-kYICqy3fGzL{Z;a!^2%hWEKK;-~ukl zcE>4UAteb23t~J0TG8qS&^8=VswbL?`7RA)@V5Rn4QC{z&eQie1VyWT3|$&vhJ6+j z3DdBc*mfj9R-hJ}LD7wtG};{SYnuRvT5R#P)vV~gxqX4l^An&zL_`Q@Nc;dXzCddm zL^682M{i#ci4&(Scu_=j&ENUhGPW5G#>polls$ z#L})hw!oMzvZM&LW@CyGhw?H)#bj_HU5b*Hn4~!O- zktE~(LJErnCRs6aNbUK>`}ncn^M~-sul^$Lf57N-IR6JkAEUAW`u1=AI^1b3 z*dRd8n22yi49|jy(htp8MM?pMUJ@o~H9eCd1tDN`axUi3RgEza^DC6gYqUNK z-WcO3u8zW)73}(SGesac;^8S6ZJ8mybKV1B9oY``#IGx2WSr{FyKoIYde$wLiwS(; z1-AgbSG>vrUp#SQb!iQ2rlx{hX`Z32HSDTbQp71|tU049@V9^XIsSKl`cL9FK6`_W z+{xj1rM5RZs1Hej6c~|!o8^rA$9t5kpFJ-GW}Xg$w;@^_s~xAsjC%qQItR*iFlp>D zpd;dx=(eVjGnpX4sJB*x!@DdUx649GzS5Srv^IwIU5|Pw7*t!z@k+6>)HpN*GjW~Ca@`)01UM}T`%V|3&X)ssh7kz^^%N1jdHWVF>Q{BK^hA!GV!)Tm zzYiX|T?p?^hn1h5YJ4|=e%UNgxdkcA)^R}1JNEmW;yo6!(=}mN}px=A&LjejkB*^);0G)Z3S3ipV{`+WU8;*DU1|83;>+CTy z0lji|vghGzl+TgiMO3_uw~Tg&nXX!#4ony4&OT;qOvGbhg>Jaw4wL)~o{BaCv$# z%2z^=gcxrT@>h_SA3^L-Nckh|&%c4=asluJ)oZf^#*Xd@F6R1`tADtDHohOiuyS>C zgX?AA$NRRZXdN<}(*p0Xc)Xn!)!h!9d9cockVz$2&5oHMqGLbE*Jo#lokrO8 z_!{hL-7R7A7h0okQ{6n~N)u>tiZfYek)k?vGy)inwF3G8k${vFq_c`?M&WfTA?*C; z`ocKtoi@p5J9CS#tO?(JK5Tu3Q5^6rcc)FR`)Yz(dvvI6x@g4^u-v^*SHR~t`1ZpE zhfU}<_{QFO9~A5m0@l+ldfjbrP=p+`;n#lnh~N6`ExH6Gn7ykVJRt-ewW3(z6(!)P z1sxqv&x{jEWH{^{P4Uq${15T^m;WWiWP8KbR_Prm9LPu}MnzYO+e3;tRPlTiWTA5% zKa=l~^F^j<1s%Q!l=R+^Q^eE80;8*9S!`$_Shy7fFoTfy|9H?=911)i2ewiNX)z~`gur=E`kZ)!78MDS*> z!vRp5nP0Su0L7&%aui$&oulggGw)3zaw2F|Iy*TduTvcW9S5AgMEg309ltKr*5Zw?b;(T|D`=g+mOPE^~H+S2=p4cuYsX*tyT3Iip3{WK=-^xlSmYV#-$la#r=fE%x#f08Z;Yc|U$KI1pdMBf#gKWB2SL%o zMAx(x0pu-;`0+iH3g#meU~$tiuzHc`_wC&$(takThPjK5cjyJ1Cv1RNpdcr>>TInM ziFP6XY|}cB;2Y7AOW1X=rV$5tZ7mr;e%zmREkYjm(X~U=TJ!43Q3oMM9iIt1G@MQ{ z1)&p3^q+(Mo?4;Bq-Ao*yAmO^g53rwvQ7A~9av(3D8+bNZ4|`rZ%J45&+cUu-%Mz{ zaN)f;t<1%|#rd}Of?8{M`|t=gcR1~wD6zzVo1F0T@4vur-CyunZ31ayvNsQBShs5G zKNK9Y)$G94-mty>?kH@$TQrl{aMi9=eEEa-@X6<|aZQ`DO2O8}^%@HZ`3ArLzrP@^ ze;&W~=YKDHyIk#wiF$GnT>^T;nzJ1$%1%wNIMV0(nCQG0`Cbg!Q4|8waz@!6;fV

&Cicq8GbOogI--Mcg4z8)30-9E+20&Io@>OEjM}qA4 zqB7ml+~vwanqgV$@x*@4EmW^*Ro5uJAAtNv7Pw5Ftmx_8iureuyZL%2hg+dWzjSJ= z;z`RW;d=)RVHLfadyxZXF3=bjsmz zfW?+~A=Bu**z>qb*qsLDF_f%a`#$T``@0%yBRgo>Am_8Mo%$6oUUr-3;nY;$S7J8B zVg-bHfggM;DaEjPMUajZ5}&WR6Y>JNzdD=_&GoVGJ33zDxI7|-fDiA^__2@Q$BWZ~ z-~7(^@ZHb8K&c&RIib~Kh)-43W)X~(9x!?eoo_lIw1scI(c#a!KS%4nHODg!K@cS( zyD%HJ2CP_71=h%9UsaJYX?MW@1Rsi6Y%Ku*0Da2LPYfgpWM zV2229mWAhgotcw&F}{ZfUQmeHpI<42$e*OgQYfnUY`+S zLK0wahZP%*R5VObv32tCSF>cbw*Uz=`%wkXIS%)w{cr!vHlKy-+vA0YlN_16m&*Ap zmu*81)_ET>55ooNZq2q91<&Oe4r{eF{N#IQ{Pz8m>7J$zwf7E4lKyj@~=~jMrZFR-f0Mt+NrLi^BDth zsne#vqC-WnLZQMA+5nqu^%AcrZ$$Sl1)j znK4{eA*4}8Aa+1o!E>Pog++nc`Nm*&^cB*dkWRtU9*ANMfSi7y|zA zFZ@oM9LfWV$qg$l7d_SE!U0NG8_)ww3=we%)14aShcckZ<>~2WK7NFpD~0bMsy*skAmNcqztZeUIv2cJY;_%*E$cselM0_&$;yippUr& zez-XzDj@&Y1F#x`AA~LF5Wh4+lN84ZH|M{o? zNBGWP_%qOc%w$?7`U?owXo@6N>{p5F*v)+w4xUI1-I6dFgF4Cy)6XzgkykDw;0{}1 zD^9%_hVKiANjrFtPQ#4Bp0Lcq9@-&dT{r(N2own_CusaC^6Bfyr=LMwejFmpNW@1+ zR(0&j?(=IbN$|mef|)otlJD;<*dFocGN0YM$&G_WbXn`8xCiDT;setBee(4LGgJ_K zd|qck-wB*VpUD)iseAfvtQgnwZV?_r#CrQ7;&O&-^?GYK_6yo^fm>Yqdv~g!>+^h_ z5AzLspqZth^sXcCfIA}N=VgJcchr4=U}`h5 zZtaTwIPkn5*p7ndZD+T`gz-nMbfNOro6yNF8KCKLF9`6o&tZtWP8~-uhfja@e)dNR zIOl{v^0PlS=0Ug)igPE}S;_WJVBfbU&RdC4oMXVZ-aaD5fc137nv=~TVNWivH`B7! za*q@fVu;8wnq97Uj3QEsXu-N5LJb2#z;mg1E)`7`Yv!B{wYXZ%K&DE-i<>i!4qTej z1R#-mc(X1L(_zeR2+yr^rrTw)?)Q?iiMfNtfiyagi-8C}+o%dH+`skafi%tLE`~60 zo9pkiENGo{u$%QX_VZ~u0XOfFmf1P`BuCB#$uR)E;O)~Rwtd514!CwE2R^R`YQ|oi zRsxl^gPKpOlKT!&=f@|V8!Rns@J z-im3(?N@#mo)Q6ObjLwKRZDSCemKB$Mqs5n5ga(-{W=}H5fI1W5lGNM~ zQ?%qn$Ardt>Uj zeTaa)e5C*}u^5(sUJs~uePA67OK8;JWyt6@@zhs>L!WeSW3g~rR~*Lybl9OVDI{u# znp_EKc^Cj|40y35{M4&EeCMg+sZ>0c3g@mv8!m;2v=`C}V9gxN*}Ap;g-IC?YOeKW zyxXznZB1WON4f(7+Q&89&a$5;|9EmyS7QJ6{2Mqfzm8r%M{Q3~n|z*98iTCfiBsZsn_@MR z$D2vDn#pT4*E{MjociEhTkRe1uNn8HjXmJHI1!9@B7oVXb8pkCnrNRVl^(|lIyv2G zuJ~>pGaf9i1$Z|(a;QSw|6lnX?84h%#gL(Gzly7q_A`>_!1dOz-m8rN8-E-3!!_fD zrObN9Hk`f1q5{#nuBKe>{Rilpa42?04 z*u_1>Q>Qt_D#j5DnsyHSb3wTPS>`n37$es8ge5O{|8~WXfB9qdR`ILf{5Bq*sf}wE z3+Fi&Mj_$d4E1w51f;aE5EVYLS~+h?oV8%_*gp?`hG@sjC7~$)j4FSo^bYSb`b70% zv)pz3vx6MoK}m7yQtK$FoRJW1YWuWt&C&-gF2n^Pt&m;}*wMkFpnj|m-AxFLvk%4y zxgLTU?d+gI3ykhFY0OK?wC5f)oj%U+oP2#Hjw%q3EIwrOs*6B$xVo5v)j%u!A zA>n+!h4ea+dlA!e2x_m`4WZvky^7gkbTug+=ljkn;qJ7e+OWOHqhhi1SC!|OPaxl_ zVhKPp{aX~es#aAxR#`V>i4k4JC~stT+?>v+z2PRZQ`zh~Xv9E0N(zG87kBubKlgR~ z@-P3gtv4X0h?lpw_~gy~5O>oSnR{@nX44NA0Lyv;v}4~cM%7DLQ#5KB1@K%qHuTTl zMUa54F%~DGp+S<4Y6BaW6!6~dEov<&z2o^{*DM9W@BiwD_{HyjioLSC<#~Xr;&L2F zAz-g$R@|&BEwpVKdl-OwLPv(AGrA^-7Gx;^Dyq{ZYcZ1t#TqA-ImKC2RHwQrhlVB* zyno7M*tCwvQmGvSC_;Dq(v$w}^KV$yXKq*kd>)4<*Df~h3i^p#!g*R3q#(m;%n?}$ zK$HZPh?rkM(9!m{=vpl~*Fh5zkwMde2&8TBq)v@X&V4>zmEZ_nWP4xRoJ5^dV$vye z8hHDyUm1rR4^0#P4gTmvW4#D*5AtSa^9q66A~TWn>9M?r+Pig+0$S+D!+sNTGCLoh!f7A|XTM)e~Ee&yX=vk0-YM2)v&ZYLL!hTXa z4Iu=!>3eVHz+;F{^CW?kiFfoTfbsA9xKChG2h85tqk@QN?$jV3X1z6pr~U z6h~`4L2+%(^_1>t!B2vq?i+k2iTb*?1C-oIzGu}s-a#<5ac?@1SV)4_iV*rNevGy? z^)7DgU}8|CDk`RX5f8f+s%0X&@FI|6uP-hK9?M%aaV|Z-%c&e5z1LVW>% zl)^~TMi)^0`mW#*Wf9!e3UDEUt6EU&Tj=FCaO~ef2n#~U==BS<{)Fj(W!DrJXe;j| zp-`il8NXdDDIUd#C8;Ci0r(yN#Bbp5{Ci)AQ;5WpLyzfiOGOHCbPZ$_0y?a9Juw_b zN~>5idE!ZI{iFp)IiTXxs}nnsI_;AA*mT?zDSoao^6w8oBY4nyN8M&(9YR77BK?Y~ zCmXdOd{}FRchiE!F8}5Fm1tI<&F;_bo?U<8pQR9lL}P|!J)u0^qt-HtlFKpCL&h>b zGqq{L@4b43%i|-~^BI5QpZaI;|M^S*5B%UezfCmUx&f+gUmOrqvKc3SZutH&`d#lp zOb8)a!O#q7U_$aD1-v~fLNZHkOM>?88ND8O-FB=w;r{UnPaDsfVjzMW3pk|2&P_Cs zzyUnQ0Tsr92)b4VncZ;43wyuUo6G+3vGQ6=>&6H70(S29YwD}o-^Fzp!|aOL zuC+Dpg*k`X9vx&~fO`~S!O#Eczkpx=xBl}ydqsz?O{^P+-iZpgj{EJf{m@MF#v39{ z#jZlls}_5m+7)BMkACuZ4gm^SQU)QEd)X?G3wWww8r+de#{5u-B{NmFu0~8Sz{>cP5IU z9CP30z@K|~e8MmN%72PAahPMR4Ikd!;(KrIsrwSfl#g%Ec>DZpVtd724x_*|w9At} zc`LL#wyM_6j)UPKmRCw(cO;3RYsGF|@bi*+_DaK=Bi=u+fHoY)S_rlwLJ0VkPu>pZ zg43~M5S;R2ObWrLj~AOn@8@C%hlql6Jp$*~H3d2%0tklR8VztS1%f^X6ibNgFk;SN zIxh*2I{|}WXZg!D95{rLzq}LDjWeA_ zuw|k;XL?WzLz&A{cBmB@BamBS z%}KfNDuk$&zh6-M^8Wkp3E<^^TZ#OX$1yS=d_xK86u7@hD0?$cc@m= zDRBSxy4n5ObJl5F>;D`DV{V1tCuAo4kd9yd_kPxrWt(2>*A7GYED0tx$?tiVRHJzH z0>$>4e-}C}7oRJ;Y(7X-ZQT#*mwlEGG^=J35$&M!Ss*_DMw^TnJR^ngt3K}+dUa0% zAC9E}hkf>GLk<2(3d(VopNJMC7PC01k`LR-{i_Er$S$B}RP+)Wte%F$kKqOWX?G&*5P4Ljdfh zpxGW5G*sXB)|IaI4hMxRPWxx&hC@gTwC+RfE)vW%F&^C@3i7_ z5NHwQn*ZMGx3PG5)}XQRxqr~0;pmy-HO8vKp&fl z$n)&FIPZ_|nV;z$GH>sTzk8#b`Tu+~iOlO5XsiJ42F8SUQj0>2V~XxfeCz;>MC|sy zkHXD68zO=aUcJJb`v<mfHkp0QCdUJdFW_DjJVVeZ5I=S z^W6lq=}n3GSeg=G+$e9J&MqiDNHR z%VlB^9LG-fMVa%bL}|DXL$FRC>5w@lY^7keoG`CvY?mjbz?01q2YluBgf4>5o;SLq zHFxH0*pG%76Lk_c?<9!g<%wOU+Boxs^Q8p0rv*iU?WpKx0ue0!MiEG)0Yovwf)pc` zoDeMz`0s122$^&!cHEQ&n~CRpD@Zw`v;#LeqVt^`F++wWFOV;P`A5_T;9vxDKO49?rr-R#0&i}*JOsZ$ zM=cdGC7g4{R+-=v#h3w+Jj`gro#;hw*&sr%mehH0H*T^)6V4rVlvnM#bo^c)(Uga7 z8p)#<8^K&0h|KvM0pa&2QX`=5mcmC9ms|8+(JOhHBGv)1!zhU0=Csn8p*3*WUxa{% zMlhagvX*UaTx5=Bc0>(@8g($=bj#EomU08>#}gTj>3{LD;OZN{IA#56Tl3x|^_=1E%o5^=bP^`kMyI#>!MRU?QTlf!}q)*;*cvIG!l{x z{A}-NJUp6tFrNZw)2UgqdtIVL34K)X_;}r?0nWvp?L*an3zq z;lt}#EO9t$y?Q270IjxfV@yy?D`kpI6-yLOH6D?~_o)>dG7;!7Vj0H4p&nk;yQTIo zcDVZUPCp2otDQGy(&Zo z4HMK_C|Ka;yudlNVHmbm-7a=>#r zop9_2Vi2OFGT@Oz?g9)ov*TC4mX zA#l3pWK>l?r^ALht~r^R0s)MiGERxKm#xxq(OtP5?LTMEmI&kbIm|E{4QE}?Sl<6K zF0VhKAU{zrakPdI0!}HBMbauQslmN>lM^1cZO|pP^$}lBONP6Y-NcBZz|&C>W5V6} zge9@)kIRY_6W%`Dk zy`poVszR`YfcH-sr7PZC_EF$hOV*!lpGJ|P#aYdY{3sP`j$?1;NOP5FafWnEuB{Mc zbiZor!{H4Q5b{E8aH-g8!NYdos1N5a%_#5lUIx=5lTo1 zI1i#o2nmtIeE2A#-}jA$lo zf%OeB<$F*%0{XK_L7OS0=rejxjs!~!AkS_SjUp1uC~!*(FMNJLcfw9f&f*fQAnlyu zyX2&2!64NE>M{^W$fO;(JHGyn-^4YT4Zr%&|55zJKl;z%o^BZTP>cfDoNXkBX0dOz zc6}f3!1e5538f@J=o^z*Lw3ilyRL)xkaOnr;GlT`cX}`)?$cISiGK*|M-q>*-nft? z+6J9P#-w$j_)0IwyNg5x;AH8z7FV~)ox`5)fsvx*inIy>7-r6dzup&JljdgzoJ+zs zXk8N5rC75es+QxKimWibNXcQ|(Aqu)wI2H@3aoigu3~be@fH-ZS&R{c2$JKZkSz-f zTC@0G(n71&$pnaOBq6j6=V7-;_Yc#?H-3K zMTvf_&1n;?xMGHrEh3bP_nRI&A3SIv-rc^8i%zcL@kv{qCL4V2RYS z`c9mP0@K-bb;g3D9<_&MZWz)8qFw(G=omgLbkMY2~^)%K|UmF^e7VIG&$T@`C*! zt6Eb_Tv}@i#w_6N2lXZXg;Ysq>C3)!g{)3%NhG|0L?VdB%>qaY1?r! zf@^Ufr{%qr5~Js5|e|$h-&>rK5K&qEoW#!|qgf z$06s$k~5w@{TASz)L|oy4xWdtx~sE!G}=C!Ii9=GD8LNwpI7|w>4w+ahFU9%^80qX z;pOEl2TdirmI`uN6=Yojeja9lKxbJCP8-LH+77hlY+>Wx4Z_Fw5yjPHQ&Yr2B(e!+ zQ-o;}?sAbOM6#Vz!gk2dJC{2YFCj8v+LO+Mb$phQ)?mgYt92p@(HhMu4gN9@OtsEI zq^Y%vE)*#cJ$^tM)T2P&!>I&!=w`g&`$RI(BBE<)EF=~hvyz{p?n#@71h;=u0&(dLG)W#EZEqvfOIY?vuG2t&qC?mMe6RF0di@V(>y(Zu}*CKe_6K*F$>qR zSRy%1$F2F+m@rz@aXO@m^g)pf=JgRj{dfOs2%FeIkK^5?#G-W^w0FYvxZa_4=v7Gz z2W+;=F4$>V;7*=z4bG>BxRi*)a5%Gh&sNSjx?|fm9ikZ56qbeSvJ3~Esf}pZkBT4r zNB>!VhEV_TnU8gm4e0IAAi|)UWGqEO-s3Z%-*ixSwx5@Wk;wTkDr-Ja=u z6BI4&8{jTkjWvAtbFn_m3?F{*-d*Bm>Vhz7&!u2Jod!CkEvjTZA>7W`4OnL3>KFo0 z+^y-@54B_1Zcp^x0jHhopi61P82Np8KI$xR+(r8uWj>P)VAzOfGui$owWJjzyn)}@ zsQqab!PR$!4Ke0Ayi03ic%{SjUc(l4(!TcN7{zgH#6yF%qVHfSVY^!sd@fEm96dKu z*5sb$A(WnbFi`&a-Oyf|cFsZ~v?ito=srdz1E`u)ffU`dNc2+@bphjd!(>O<+w9tU z6po~cn?I8!TNS?L4Rj4}*9T^?H?r5!IuLp`5&TGMW#u)`U4;QX&yG_7G7o2mR$+O?-fYe1AAYTv7 zS#(wT42uEQE0#oCaMS%nQAs;q-p?KsaD!sEiuV{y zhvq(Juej5y^l@Fr864~9XdSoXz*b4oy&ZK7@Qb+nv>!OS{<(CfzfbaT~Ev#5~reX zwPDS9=%qX4nwi*52}|dAh9)40126erwnitaU9{6f)Z|@;6-$CZbDp@vbr#wGWWJ5X1qEBq7vW~g%bACf0w=|+hD|RA z#^HvJgl~NC5+Tb*TIVtBJYdr}Ag9nRf>Lc2NedMz!!1k9>S%YIbLg;+1lnDRs|qDc z5kOV23?$MA!Vwg)`w8qdgm2{$$d4nLdsmo=k;Db%z(;Vv5IzrzRSdb&XT>DWeEKTP z3X?!Yk_ZJ=7C<>Kh@pslO!t`IOfsS5Fs6gPe7GRVMlPQ1K#~>2qxg!M7aTB|@c!j| zm-Nj0jUvd#onj20qEOq>_P6-;f9Jmd%gdOOj=RLp9ZIqcuw|fS_s=%yeCEL{zI)g1 zLb{+L(QsK8#4MU9UYfh?93us!$T(ZjaWD=ym&n^B`0kbwvn+i4^b~gw${{cg1$Y+< zSw~mGR6sAwD$YCZo*WU|x<4_I(P7wae#$^LQpKYk9>R%1757X;Olh1~Q*wwhCrKn3tDw zx0=r`icWiXS0gM*gDw-F@3LluWt8QFvaIjKKBSDYUa&3&@q%NIuIIC!e-s$J#}9wY z-}EC6akIJ(3Kd3tpe^~V?FF=;%pykd)K&bz`JP&>!~BEpLPd#M8AZKGbQ+qB0lK7& zlvgb4MKlq1QN0I^kk{sVugQ#aZW8BSL;)3AgK%<+b$uKMV}LmL-CY{QJJ&4Elc=l+RU=f+ohRJ|w7eW`UvZ|qY3)Q9imbmIDo0c??a z#+se8v8L9pJLyy@XT=k8y~DOS1-;2b5I)zIMKL^L3in4)G^-3Bnsg?$Rc!kq%|sGL z#C4wyxGaUmf)f$$?pW20B2AFGgLqB>Og`PNFoa#YNi))s*E6E~8_8P9npqW_LUHOC z10YGYI}|tQMn&Lh$w|cm=LN0HLc8m{jAF>)*jpb15n&j5f&iUbr>J*V2P8B(ODV%) z=qiRrZI!cHdWVNyx!>()?XTD?OU{GNcRHQ$cwX_aEO_tXg3xuAQn1LWk>_xR-_g7O zAODU2Jd$N}%Q&py(C3dB(;LtmK3vl{Uyr6ih5bNp6-RBj$+DG+tdDssMTv0l4Oy{k zT^20s%6AMX86}P9&_2EH6>qjfT^$?G@>C4<_62G^q{pCMWqWjOXnN^oFPHMlBUOvNa?lvtg@s=$XT8=v)#$d^q9# z%Yuich$^MLtFgE{q%&(y``=d&57f~(=gveiwG`66s2%FKtemswpw)iL1H8L!@Zg_IJ$^Vu8zoq>W-!=>9r_mpI-7(d6Ojo1C%-*tOF&QO;VDlPcp^A}4}^~RBI#ST6J_Bg*+5eC zpF>O0bohl|e5^4W-^YVLgZsFYOcAr5WKfl2`uPx_1Gs9ccB~Uiv}UB1M>GqIjjMl@ zd<^s>Wu)bS2dWC#m}>$RAAH6X)`qK&0fQnK6oPm6Pa5kaXjU-@%=CUNz82d6fY&R` zqJx1!nH&2PLjB_RB||C8xb{Hi&7fqmk83C?jd;ml=m}_Z-H)mk9s9rr0 zPIbhpc<%;2i}kLv83rsNOcpXS5``Z;-SFaa83|>|g<|=xPBaw{`OFYo*<(naQoPh}x&b zOkLUQwjsfhGlw-&Xqw8+TiliML04!e}e8%*<>0>$53$95dp_l>hFoH@s3AvLP|)Dun} zP{(CiI83iBgVstggx4M#6$fX9?DfEY>|_233|NHrAFXLvWgcD1lng)p+kYOH(|G`Q zNnOxg<}h(`nA0LSqvLw)qBC}sH{OuelR02{6lL(K zcPS6=;dp*Mseg&jEk@wQG@(#(K+}Qac@C$vct*0M4hl?>Q&v|}LqT62SD14ednv<} z+^S1vlfpA;UKyON(#Br6=LpABNGaoVz5pqs)j9^PHFbahF!s2s1EONh?oe^j5^%T1 zhh41JDdHM2O+Di8s4iVb zaZkpyW4^0+Mt7YjpH}IvL^I+WC#9$HySyDB?Tv@|Aw+Ntq#frBV3Fv_woKl+?kLB-=Vp*1r;$*=(Lo9s^51$vUC7h$$BJS z?0B=_UrWrDSrqSs3IPHq5s&kNbv@6RgpN(~BBBc!gHK^Z9ZMzFnW3%@0WhDaLT9<m~BZS+s1co{` zut^E?JC{R)v_WB9p`Xu%PJu?r+etx=!yczM8a z>?0vxa@J^r1I0+M^e9U5f^3|xc3C(N?6jWX-tm0f(7em_igfdmG7h&v;jY^a=WGKSPAOqkF_&O?vmJ1=F_e*=y@XO0P7&973~H=_liq_eU7a1whlRSJ za}56j|30ULx3_I{{z6l?=7g8$6^k(+d|V2C@N|U(*c&@#O@POe4G-&rtA;5Ou!>XF zaV{ATrv+yff7eQV*X6yhqdxs`6y3Lj5(Z18Ugg9jKPZ1ynvI+Y3Y27nSpwyaah}A_ zkPar7Dm~-q$o6=Dvh=TxL9Ht)lp=qLyt;ygVE_n63j-XZHrqRn4qmf)W*6aPtGbb63OG0qb{sWuJcWC5bAdLw$18BSe z_>#NV!JmwSr$rU1V<;JL>^naFMR zk*?M_6Q`>1;WXWbpZV>-6TkK!{{)nCGYfUMQ`4AMbCAOgL#p)FB@H4M2_*E70p((L04*y)>gR(@83T4h|Bzi!ozOu$1O?s65tEMGk`fUIEJ^&XL=}L`PBUQ^RRg%v?h9UjP!fIt0)nd5r1bqGKcj9qHr!s8DGQr_+jd94bKPEEz{knJ%cE zGZkpmK;svTqD=cqakNftGs3w3=pEZZvGO|!M@oSB;QLXhz9$*16j-e=FqsW72q__z z6Ob|z5>h)jKfofnljcp&q_&2SPAhuu_})#0cW1Y)$PutAX$=><=rr{%2S5)&E0@_Q zPRYCDK93W?JIr+orb(MLP_*1T3!zAKlGzwk2j{TS49Df+0ncB4hN7ZwJDTDw!MT)V z^r1CP2gT^d^92MD6E?d0z9E%mfEFDF7q7l#I!z%jSPQTn8!Sn0(%U-&g443#e7T^t zH|V|Lq**>4jwOXK-G`Y8+JVdYg5De6-Zt#gb`*mq(Q{8%J@-W#m^CrsJV~%mBs_-9 z`J4`Oca|1)dT#rEmsBn|Y6r~m{qOv`C{zoM+na%!djg(!PDf7$eDL+3#uwlJbr?8P zBt=#%*2>W$PHJXS>v+5EC@Ett1fomcaVm6*%Voj&;ROKT_WV{B&Z3CAV>PIQ;n)sN zPDf{VCkpT0J6@a?I%z4a>nJf;>+h7*SPMBTW{t8)e<wtaq@lSdLJlq{lU9>2N24VptnSg@ghmRM0`gEJV+(Ov`WefpwaKZs1y^q=O zcH73oe%IY5{xDnz+H#K;22v9Yk97G6?fR)60PDQ4O&y1qIM2*g96%z6J^~+z43%UO z$(gQQiGH6$n#kcV$mPV}Yc6_A8-fzAzgF&&mYiXxAsN0x+9lvu3iyF@~To83L)HUz3m(7v(|G>m*T{Q!>K*Q#?s^jqGv#spp;hv+ z7kun7t^p=GQFvsF;*NVShI$UeiNs-6mdYrArVgWz=MIbOZzBPGN79_aJOLP%*817$ z0!X)|CPY4xdhUD1HF$KM9AR5d&V8Zwr-x(UC`Cu7F!t%XuBf%nd)4|#cg1FNNJ~QC z#f5;OPg`eL0W?@M!-u0!6IC+0X8|IZrJ1w|5FnTo0Es|=l(DQQwAzr%3FY(xr-!e? z&fh><-pBUlo7mrcQ?}DiBLXY%{SEC#4I>sY`Wr0q-lO;rgMzrWB1%on{0Zu43=uO- zt8=>DxNKb0DE=bmx!wy0!)eNRIiVt>b-+8jb>6Fz2chsTngtB&dq0lj%WuQr`07vm z4Bq_k+j#TzG-ma%2$@rfPes(}9#k=|$A_2i;rZzeTIFE3Dp0JP9`V)R{PV~U@8Pq* z{AaK|zeaESptEwwT4=-Ad0B)^SA9mfLzz>b7!+2(2bw>qkIM=D*hbt5(JVVmdBh%S zP9n)-T=D=-fzct4q9clMv*~)3lMPB8041*p=Vd{2U~8IGjgFL~Xr=}xrHs?7uc5vD z;fQe&Mb8T@l4pmRR2zqeP8vhP&wua||H11U*5!oMNm=jSab6Z|tz%slI5fkiH`L<> zk-AC$(*1^kPEi52a+uwQt*6m)>4d37)%OF z9QfE8)`isU*4Xt)px_-^KRKz$(lFFm+x(@a^q7itnIkJ@32p6+9n%3(XYuOetjz zePq|{demV`bJUK<)5NFEdQKzx)`V2W*{%Mdnm?vBr&uVaI!L~&P&7P68pOBtz<1KNdrKDA18 zEh#~W!Cf}ne>|*`g2_bZcwlBBp5%1m?e|BbPa0R!p%tAS+K)0pVKQ(ki36Kl$WyOy zbkrW5ABW|FvYb$lO@}sy^&fgeKW<2+NMOdGw>M4Fy+aZ1F+u16%w-t@Ml*GYFh%?x zHv}FiEE?oig`kr}VdtY;Fxteu(@Ju5uP}tf&4XSdt%GPQj*RAQXb;RV@5O`vJd3F* z9M<3-aDj47_$YcDK+J7`3cX0AMzs?EV&w{e2k+iG*Qb8QRi!(#>{0L}&~lGRo8w1k zdc?%?KnTWqf%Fl5igriB%uIYdS8qf{GJu@dfgDGXI|i&plAVb{Wo3VC7BlcFKuK zkP5wA3R5V zF({9`8v)T^6u`+aC^fNX0{u4?0ETdznshCXof!$b_bHH%L|OM3-Mc{_jlx@9&W5{q zKDL|o<|r(IS{p2_FyBWPjXyJ0W)Wu$K=lKu6lBk|X17REkKwZB+G`U;hz?AOi)6>a zLubal+~YakU9USQT_g6b18!%${K?;sul?>nh~M}V{|xHwQzUFaKLG2f+Z%#HO5(3| z{AQf9;o2w7ko%6*q|o{VGH;@JIi_wdFcVU-)IsS1;&deK@oX{uT=7omX?F)|gXINz zc|hwa-ueL90+6FK4N$-Q7C+N~&%XVuEcA881KpHPi-*f(tP#V93Tv+uK1(+E~95epi859L7cvDCHtB4AN@xWi@wLqpt0! z704O8_E_kgT63gSh_)|=+qS_0)VH6Ej%#&k$a2Ddv{C3pr|M|oGRQ(KrHo(vavMcC zb)~cz?$U(xw6be)Y@FHRj-G4`Xk1FBGuJ^|`>@)zWO%+Z4?8b~@u9^$ExJm){;k3izwt;1CTg3B9YQra>r>|sJ)WlJi~F@4|EWathXBg z*%(84jyidlrQmvK@IVT2`_yASt}G6}+zvRJz%|XG>5iP(De&6x`tu*+lryf!f#&pa z3#JP|?S?0l8z;>mUSF@n)W*Hz>2@#y7bx*mt8{^m>$anHcD7z$Z=yWNSANQT{AeB7DlRK$ zjX>h|(0OGu{O5k#PvK8~>l0vE@xwQ7sq3j>3N)R|_7)5g#)JezO$F^Z;3>n=5QZ}_ zduM=Z=&G7`Y?U4C0A9=Gg4`Oe+diqf@G+3l-BDOvWQn#jyzQff70Fdlda@3|u3&cD z@%_!1$mN{TYlWH&6lE`ubCBZ$5$TH*(Z_8)$G|R#}Uxg%$~e+E88d z3wRMzAci|MEe@?fjaf#46p!HfDM9K{9E#(2jr)!3@-f(IuX9o_BE5Bt z-^1M8rg&B8X0+Ex&YJhjA7vuyU@=r{9lzfJP3cv7E!|$u8A-h2sRc2tr!$W2Iu4sC zteS>8Z8~_F$IK1^3oH^v0_l%IdoUtU9JZxoZdM*MsjzV1neI+zc~JD2@Ir+yhQN-8 zU+8yCD3mli=t4&Uq%(TG4n(dbQ9FY2p;WYkl(YRVNsaxcWey1CFo{zWXnzd_^zEoB zN<_OtB)9ye_dfh8-oE|<-2nH5WI9+>09bb*$#@_UZOdiR+QwWDy^rFDVt49>JkcQ} z_HHEUrXUlAiYYey;ORP^c_8=k8Sz|`B?*70=+jmB4*+2N7VTMq4iF8|D>VVxNH>52 zZt%@}5A$nLo7C|dT{*C*<={b#B(8ghb;r;#1Qjj{!-$hADj{m7!)A4&qB#E8_&;d? zFexv%eE1W1{^75UE;PG$(4E_`fF6`H#e~p6bdNxli1>^9e_As4nHE^<9T^8s>uFe? z2LFqCoCV#R;ygQXy?0rg8!RW791lkqQELEBI>D>+318gyk*s=mRLsL@>T`%#*D^M>AFsdpZJ=E_r=W)&yzG`$06ibh9?pUOgF%Sgk7V{vYzjQYjMZbGZ30rH3;e}fz>YN?ue&sB!rtZDTw?7$@Et+-1 z>xiy0yK80GXNEAIfJszCK}U9-5xmaZwlN%H+Gzto+n({S{+Ip%{OjulPoIDP9kYl* z`5nb3hP&D5P%1V@@w}E5-W%TD_GyfQ0F(Nl^wnXG!H!1Io=Y~IPb)d);Ctj?TAqQfRU|#QlKZ#vMr80;D*h4o%9cgnPn$Sqx9l*A8@;6MbWPNJ=Sg2b3dK`Msi;At& zX`#T}ZNyl>(0d)!>uiRzkndaTD9g&iA~PPAg?iOB@vJR5j^Ynz{IL6u3 zTaA^jq*QEyPUpGprgq`ME@h9T^PIgoFwojQi&V28JHhD_@`)6Ob1US7`J&&K%@ zDRUo`w4#8sJ=O)Is~81+=wBl?bWls-px$D1e-)S4_r8YDzW4#UYnnTv6PjQGIO(RR zys1@e?Z8nx8mIw8DoDmbg`Ntm9dN_287`R}gzeZ!Q8IgnM&<6PIFOfRxW9Fzp@FHb z!P}f6^d!K5&R-4hUrxBzcE{M@866$=x7$tudhPh&a>kdNTn%AzqblfNmy1@?%oPBF z0uF)`>{QfokgR2a!}0(6=JyE>?i;*!+zx8JTW8@~lrJBB$}hY(9NytiZ;`_lYh*;q z@67K&hpw&}U9${HgBu;6fA%TdHLoSpzWcUsgVN>HzVL=prV>96f+Fj@aIF}EXz(IK zS1>AlfZ3AI1r}}A)2om1H~bg>82-oq-oJ{U{yl#LFW>tJ>*WJ1>j|mI5g=!nw$i;d zoxujeL+ZyAG)LQ$Ueqzr&H(RI{GBLvrL}4dn@V>Nkk^}YoZu zv|}5e6Aw72yJ3YY++4^(yql%OJc2v#e0{x-P7BoL)DXZSx{PNx9hTgoXEX)5QMg&+ zLI2+S?~OzlaJ66Md;)Ro#R;S^_69?E^*e@SSg7lufhl0cANLR-V;rPB@H@~CSi8ZT zt|}pXo=E$2pg{zOR`F}U^2?|StN@1$#h|#b@k|CyaOc$7oEL=9c_265nY8LThA`7~ zu(EgqAc|pcP2ACpTo%0e$~V{zP1AldSsHwHs343a1mcv=?xBypABgXmITV_fyN*)V zLDRb5CHq`MV_!pbExq|c8I)tky>5w?!#37g6a5H21?a;e30kuz$EbG)MmSKVsznl8Q05lG?<0fQ0iR#A0vx5bEk)|c6(NIi_^%ik3 zrHqsm+bu-k7@!Macv&(5IY{&!*KyqN`rH38KKa%E5Zm?5Ox9Bp#RlWE4fpy+{_d2g zRVjKG=UdM@=TYc4oqMh|7c;^lsa|p!0^4ohch( zxme&)iJ3BgBXUD+-YPEGFC&&NLc@6y>fN?J=lm4_U1?Q6S zSQ0Ml%JU*&%$G1Yt{bgNtGXKK6|K?*EL*}y@4dpq`Gk^K4FB=}*&oC2{hNP5xfT)q zY%DsC-k6X&@JE09PjFv$4y7!K<`1{Z*xa1c<$Jkc5Uo{!kUj&sr%gbhY9OY8_wd1~ z;NSTVei7%DXT*73u}J5WENGM5!|`)n@yiw4Q%Weyf`=FHt5eNuJDl<0^(WsZ4X$&r zY1Leou*=UDB5yv^+i~Ew)0ObC7|zxRtmg%{+OdlQcHPv8G82H{gKihb3Sn)#WW&Qk zT}U#zQ>}WB*?gY6y<^E4Yvx&hXyW$|-aMl_?RfXvv6hS%rxU8n@wIh)`Fz9kb;CEm zcnb&|dwlTC$!ScD@15Pj^W~zv1$eU^c(XS&H~fyTzNd4%W2@Be zv<@6da1*$%b?{JF>>MLEWzqhy7F^cCqS)zZY2FcoX-%MIvb&iuVDWJ(a&^1Y=5Ro+ z!@l~}pLA%L6XF*`*tZ9ss-Po)E%?51;FYonLE>@6EBU8UjtC#C#Gt zWn>M)*>6{&0~~DQT6oju)84rkb|DPE>lV$)i`k^eMN$|^ZHVvPF`j?E zqi&3D+(<_66ANSC64^CK4bOsX|$|OCHmC|Be z>Al7~cNJEo38Zm=gjjw&Hw&SwkM-|n8ooH~uQ@!56oNpf28ui0r43|sh@t71N%Sfu zH1Bx1F8J)akNa6pXPh4%aeaQqaU7Z_F}c?vAWNCU4og{KnZ^?49qqVj7@rbUAysK; zCZddBH#2BMW?9L%5*`&M%`t!w*qmiBC*J#5*JZt+?awO8q9lg)S9D0@zh^*W*Href zJ&>Lsp~jgFC#6^;W8W%T8UjuBfyBn|>WG9MV~xgj!FWj8h`T-#Rd$9P_wI3K_z@i# zCbFz}mDI&DnDi={nzfVxZ-e@IU$l?so?^yEtWQ#8zVhM`pT2qGvmddrA#@_s9AVDN z$U^Jlde(JCw~Tu1jKd=qzk3h&Lgx^rlQzQiZ7mDBWmr1F@~W>@BtPK&KTh5>Cf`}FebN;NwKL2drjDTGjN{k`M;(Z+#GQlJ0Kx)j3g@=@y+O~ceOlti zz_y}!2^<70J5Ee0GK>jf@`>P^uJg2Tig)wQ_^p^lieXn=W6=;C_P9<{0_F|@IxK)K zuSyiSU|YShREfB)GdJmcNtohy-t*L;O*?pl^+tUN!eUwWyFPjA^9(esDvT#9z>qAD zfof)VgEEc6<)vgCtY(qC4s8*@^Pi536dnfvqTQ@694(Aichr&VfI7ig7sqo@EsEqQcdQ!>2w@&#IVeD?Y^ z%;~HIcTT0&{SYkmA()&5P&odQO4XVJBm1+_OE9k^|~=B7z6w=4x_q;&KcJg9&}@8U9C zR3gC5LtDs*9&jMWDHr5Yke3CQ(+aE4_&5Ih{|o$~|M{QAU;4NHH>mrw4j@+kaWX^@ zO2)QZEUrM6&>MQII@lQW2YN4L<|{%;iVn5aj& zeS>=J>}GgJE@zBtTWctU^Lf1hc^xxGA^}b4vPX8KL5NX^GP&lo_xl7KXzdsUL`>&}_WRi9ci+zhED{Uttu&{24}8t;=fD{3{y6}n z^I$6K4dlSbMDU1BW>b7SV+#fSd3hhO3}o)#Acna|83oK@LDVl@uM4Ds5&RgPW5b|F zL7f%O&7iK$5)n>sO#X!|N5j?Fh|B_m2PQ*S{l!fw&dYGw)-; zSSqmxMq+ECmttsKO0rQ9Mu#R6Q}Xl+mUTt*J`m2MQ}lZDX_Fa6-vDHk82O++3lPYp z(ikuX*mVyVF5?vASgRJ`RT^MM)?UepT8$8Nn_~S0q|&&qR(F_pO$VQQX3oM%<2quF zNr?4-^f^`~ESsit2UysYEaiyp0CHK!+UcE~a#B{a^bhPjrA%5%(2iiU*xxIk=^_-D z-DY)AjZ?5q7kRG{_0Dc-3=B2{uC0ydxTd5cD0A>Ae3%;S0)mdNY0?1K9~_6)b0qhN zW{`L%O|0ZaU35Ckwc0fEiJ;708=h_(-d=C`_76Y7_dop<^&oJw)rw7@9Ysexit>a7W|!zf}FQOrtou5-q+toWV3?Pu}d`JetX zc=7V33Z07m*s*4l0}{p1OQEImsT90AEqJ*uI2G!xj_n!y?ahek0BJ>$*miXv-#ab% z@YM^v_u^5x3plf*(K?-$)nr*bF4U8}$PV-a*%_-Imaq=CF?k;P|3a&V9<0`>8?4>& z=6b{9x*&9sTqgmzkOuvHyW#zZ3*LY6NQz*`&`r2$?pGAe0FK63-aNxl@a1#GmromZ zbq;U)K86_*SWr%s)1de{OqReDts4w`ZFpSi9(k(`Py0RxXBu!`78Y>WQ9wF&RvtvV z=jqDEqR;65r9F74c)L|uW@|6R^JI<#bfboMt8~L_+EYdAX(h!MIpfW?kIqvhc!3NCZ7beyBp>cAf?_ImB=FVs5jEw+W{O4Q`N~2%4(l%d zxvni}1TmL``AAVYz{8N<;VW9b>fOa*r+F-<=XztboYQr5 z{Wnq`1yLyrayc=9G7Wv(H$6wvKO(V>I2B1IyCTz?cytU>6lIduzq4*J-a&W3vXHQd z!S6g@-9@bFoCgiW^~`IJZj-6IUUb%^;W8HVD}Wu~a6V$e5c}C@b!BvNiln(ayUFee zFMsMkjW2)g-$TsXz&Ja2Jw<41bb-tarL1^(c*N^BuN9B`ILu?s#(IsxX|cxD8yItr zvT^4IU!8HWs^IfM2^(v904dU}n2lkZ4Ny`u<8SDf;^)Wtrq>n)cJFo2Q=2pzmJ9B? zKODfF;D6!I}AaQB>>l38xyxr>#9V% zVnIhK?3yI~KoogVl*c^;_{rEGmi%1n0i2!I^XRbBy@-Vr=U~jPGccz_$DS@`*qg(Q z0~A>hG5Npkw^;zXKm>HGDXi$b3IWZAat@BG0tLejX;q$-;V@B`i0m^--7ELUT1r$U zc$VHJT=)WnAfKN()w-;JYZeK4a&%LBe84w8{utl+Kffe8xt&VJMK-_BM;*F~y)jmumkhTeO%Ttw+Nl@WTf>@Z zIh#@<9Zd1Glyzv9YOSIevp`wO3Dp;r@~Am48)~~D8DqMcmf__=E3I*6clI_#0uMW4Mxa>*o;h&s1(2vE(ToZ@7Lf$8Zg}*n++E*HiRFq_-S$wz; zG|7_TvQXdC5Ds6uevfwmn3eI&;`j94QI8Ggj?2SE#Rm_-^H+Y)IQ&9p8iiE|1IL>( zQlv^Q65TQ;Y{!mXp9bDIsX>!s5IoxX+U?zc5D8Km1xJ^LEGMFA=cTZ9oYUyO0n+f~ zf^uP;iLM142n*mW#3qte{trJl=`fg<)~3Cz!mMk!q#MD5B%Q?QL|_(sO8(=Ji{BLm z)$GU^y3GmP_wgq9(bwBGE)?(v_d58rp+kxUNoCx4wxgqiSrA31CB7%AI9@N0gPvK0 zNWS;*pfyR|9oL6CumV=1SPB~0w0E2umPtd>`U$F$_LAMb`&f5}_|nwR$l9?}%amj9 zImUhs;Nq^HRPxVAzFsb9os=BF#}KV7%GZ=una%;oIZpei$He#mrQ-WL-u}j)5)dU9 zpy)&@gRjnmwJfKxCKDV_Pj3arh5f9(dmUPzL352wVR1ZofDkk@DH`-k_c1=-pL+kr z_`O|;Aq-g76(y1P9|MHD*${27rAS9v1fpe_=+~r;#rr9-o1|7h0 zP^UGzb@~%$@?*&(hnO)gCY9eg26e}A>{ycFvJ@8508YzFPQTW9f@xlY;k2w+mIamz zDiV+~NjLWhxhf3^Hyxw+H7oAoy|5Qk=-D*Q?0d!_r zT({AQst#O_hU;DtI!+eljWcmtmG&sHLjXhdjApNp9{+ZnKllT%@-Z9>vbqvsqy3mo z6UJ|Nv$43zIl;<`o(hl`EX#s*S@3u|At!b=v(5|}_COAgklioyzV4jc<7RT-;<>ft zjJr)#}0nl;6fWrXr{KRfVw7Em+4<$yu)*-O##|?FRM%}OI^?>&(w6v?zUhWQe9x(15 zzyA4aJa0RmueUKE51^i-wW=f&1EmZEIdGVfm|zB}aEK|13~l#@UU&3*=pHh@@>~8^ zl+z>1vH(W3&7ICPW+s7p@O7hr;@^eiM6}lt2lB{U0=eQne)rG)!@3UFJPNsPShz#* zo_i&=hA%b_@{TI{%V_OeP(pkh+`)$p?ZLfJcBTrzAN*T>r}QChwDhySq2z*7Df%7n ziufn2h>^%ZC|1xOicq$&8a8lH*zNUaa74m$XC|Qgh&Hn$j_z2ELeTW_9SM6MEA>Yb z;kz{qN3Syz4M4@8>vJx0a zBo0Z)1cvatTllOsGyS3)a&GL4!&vdQFLRSU3k7cu$$+!Qb>VoW7m% zg0igqZ>@pB>vxF`5ktu^4t5SCO9ROZ*OHO6B4{^rzZu@`D8QmKBy<@>pqA6aK&Zp1 zjEPvnvMl6!N0%{*UKsYC~}70G0Gxt|6)(*4^>wq7aQDvX7$3d!Ob5VT)o>fW#jDktpMiUUwMCJI^_zs;fOZ z)HXGFk$mRpChB)JQ72vhpp%Z=>c)89ILpS|<)%0ziDSBu0fxP@(~`>xr}G2BxReGJ zHfDyDl;A?aQ%Yk##VKxY|$_8Z+G=5;WGAM2m zgQtR=SGm|RJ~1YzK`{N5}Pc;{@i;7`9*EqSYIErJZ!P*n6aJ2l@;r zh;?m!(1jvtZg*?_WufgbwRfZkZ9z{p2A&_T|K?QgKE?nXoi3G0*HRRhOXyNH z?<2aKMXq4Cy!&Mjd&To@!}Gr5dc9(oSbVPy)k#sS zjWookz2Z0CJma(5F(_Z*4A~r{5(%7Ja>gNQ+ujML9!DKCx6soDg_7Fej_r_Wy;D^G zypzST2K^2Ms;dJ}+fM3S?YPyZZcN2f%q+>&tpwFEx}>LdK`N&a7souM0DnA0_eA~8 z42u!eKQB>m#{0>5dHE2(`FH&>{9XU>KZ2INjwLPZSc&3ope^sDo_3&BR3xk){R}lU zM$o?rxWC9W2*&PKZH`s%nByxd&+I6)An+42UfpU(>r+&|?FT-)9X#&^Vl9#Yn4}t^ z_bWN0!Qd$qe8}uFef(2Di&JI{x$T>vwSnZRLK2#ulIbuH_j+D@4V{I$5iA6_!P~*% zY~&MGCiU$=tGis5D$oz1#(;sX>}b;t9=Xs)m{F-O+cDU*R~?-NeQ!~4HKX7$oI$e2 zz9$8UWRr&C9{6I2A-R}upMDdDaN(Kge)1}m4KR}^7Bhhlag$j_UeAbBgNaWX$*@_V zgdOHLxF7r+Ck>#K1wf-f5EJna3)>Ln8bUxB9`%~)J+_+yn=DD8LkL61V`Bs7l<|N6 zbAL&RClAxV?|1*U!CTyp9Z%anL`IQVMR(`EonKUBWEFjD&iE65u#ByGoer75({#n$VgQm^yCTJ_3w+ z%&$?Q+c-qcEX&cvMjJMjMHH=f6W7=ef#N0=Z(sRPT`8@vF7!9(E~79rqOnJ<*tgr< zf0zZL$BP?u3jxmRv_Ub91NiCy2jD?3V(U1_lmsC{1EBbLw+Dl+Q+JB`V8Gw|*ZuS; z9^!f3>m`Qn#T0>U(w*R>(;VBCg=tC%^9YBYS;~VN8?JEdtP#}=dSG|Z^1JQuRYgNc=w#W@!Z&cN|ds zbO5h!J56ZJ@Mfdc@Z*d3s8vk!ys;E>U!0DLE_$KXSZGb2_YM28!$3s#qlXjDnZT7h z&%xF^^NU`Qj73L;Insss{!3Z-{Eaa*1}nyt>(Qx-wdjt|_NqB#1W6yyXRQMWXvvD< zoV8I>xOO^o1?dC~>`3r4uK4_WTqkasvzWC1TMzr_K87WhoUwJSGhny`R?Qn}?zkOQ zRvJHaj3|7-Va>&14pGkD~e(>!)-rgfHI90)|~ME z!$q2=9jMpQnOc?=>#`uau7^O4z1ESZRe?05bw$D5k(0yw8+`Fs{@3`^|I+^f*1nJC zlj3D6`aXYt#`AWF=I&!3w#MS=qu>1p@c7Yhf?2{=Y4}jPYi19-flmhw3!3Pxa9S-} zO1QNSw}fRq2`uF3guG1M%b`NoWpnHfbf;xF<4{(Podl#3unr2iq2uYhe+`$lNVK0| zAAI-?=Jg<_-aR;{a-PTwydAQdn+tO|+H@aCOjWBl((>Tr4|2jtfP!u%6gN24_YicX z^_F1ygyB}wDq>pfy^xnEU>=7?1Y);9Y{gMlk{^SmB3RFkh4|Adx}eVqG?>W^4~?8D zz`Na{7YO9x@C+mj=y{V(0ebL!mhvmqxbO_KY2Zn z(=iJ{_Z4mpN2AbAxfZ$`JYVp4{4>9dKl8_aAGX(jO+mZM;%;7hpqhb@cGwu2Hxdzq zYYyLY&KXn49o>UT?--^4en-_4s=>TR2j*Cs^Eujho^p#2g~x(=(dWRP(A&&;22)qW zkC~dDD2`3kkYdoFN7*47Mr$>6zLuisCLIYrlc@N;AuNQ=;t%?F@VK{HN0(%*Cu4WQ z-GN@mKDjS6Jqj=v1=65yOdjSDwa#Y|oz5V4opKt}$D@5sdt*Z4@eGE*T6;EJn-an* zHTEq8Qs}+KI~ea?-=+J{Lg7*_FsEzIcGOX{8zx03*1#}K7=JhVuA>+@Vlp&re9yZE z1OS9O4_i$Ai6Y-p!sBVdQQMvH7{h6%b`HdX660Lxtc1)VR`7X!K*YPSg~fOuwGmut zqQ^~|)IGg&4(kiIuXq+=b!}z?Jq!dr*%VK^_95Yy`78h$VuXe50EoJDiCb?KiXj>q~1=+u;e;VL)Yr~WCWz+0Pp#f;_^>QZGhWVjMt z7KAyMu@4NgCvI&d@W~R=`hY`)YIok(`QZg_+YY3nGiq1SIK{^IRJd5zNx)W4NyRf0 zVv{&LA^iJA=`$jjmRH7*-tm=l#%IsB0X~M7YKm?mYq_ZAlZ?e!!1L%ovTmU+*b?PaGy3Xm&=VZ9Hpt#C{Iz{1; z06PQueV8pc*=uauYoPBiuNpY1PNHe1iaSoH6HX5=@bu;9BbHy5WyIg-?LaZ+WDXyK z@pWOMo&e2O&(FNP;4i<9Vb_jga?^5lq?VLWvf=G^aQz#f zb8QVvNrP^+B*RT~woQW+PlW>MloAUW6A&RFz}-dfqMk1SqN&@+EduB{;@nW-SQSg& zJ1sao3@?Iq$pXPZH^0a81D=lzqzh^{EVcpZ0A#4qoo5`2;I6@;(gK5LWOtyPU%=Y| zJD!nM93Qe?j-2i2++QbkLbyND7;d;M3mz{Qn$UO$GJ&XVyTKjU_Cxfo6&Ve;dhi`A zC+ypk&Q`?)t@-HUnf6jx6?e5TD)a()SSN4>onVms@R6p%~4W{#9kVP#+w*@H3A z1~NX9u$1X|;CDIzxNOOQuf6vYKYY!WcdeTBp@agKaJ}6|iyQ7kr|`UQBaLv7zVzY8 zA7kHcc>CoSa5pUH7wGjF=oL-rM{6C)8;~aQw#jlW8N?M%a9iMhjK>?=h5_Pq4Hpa= z2xf6;N_rrwS4^?*?)d(fuLsf}h%a<#hc#?Y8O^%5jXcg0q;b8aY%0F#w3XL0>h=^1SA}{LYAIAZ746UXrulw`)(hmpmgGY`2UmkMEwJ?w zeq6zs7xPf>ogVkyIpw$Bb?Ay2lCC54KSKx$3`z$;foANM1RGthKuMeK&1ig%u4Js% zNzV&jdVm!M3hZJ}%>5mP6~H0-;z&+a88>86%Wobk(_Jq?F8c0@#X0gFI5*!k#A z`^B7*st_$bogRge;^r9M$&IH00B;yIX)J=MG8*q@|*AewCM_zR!C z!L3bmhUiw0M60)9LX%ASgmcpO(K_5R*207oihmrUO18muKgL>!eaM2t{u2}%gN?!@ zsDMwc<8*lm->-E3=?*tqj$TeD+>VagKF3lLyjPqS;OSbKG-ek6u@P!(ERN{! ziw@WtzT9{2&HXJqXszOCJPdP6SQIO}67lHTkowdH-LN&ssbq9#LR{VPt8X_1BO>A< zXJ*YVXosE|I@TqLa<<@U+koEj>T*IkU(jEFiRb-bhefux0meiw7{x~bB{?T5evLrt zrDW`#!)bYbIIgXslm*F1v+{_a0Is#&85DHhpJMzeCoClarQmEGz!h(<8;}9(z|kv8 zVhl2q^Hn7neQOOlK{_WESliPVAhU$Ap~wuTl&mg{Ks*>cB^2$Ct+JRT=+5<@?jWLV z-*MD7%v;u!(AqI^b9yR;%oNao=xioqStfSL-T)ywIEeY=i8$3&^2Zf^m1 zl&rkjj7T{cWO7+3NGwGyP&qQefF2g19jDU;`*s`2S?^6#h8b{k@aT<))DOOVqwkwq z57j*6lo|u=dU?e4?dw@=d3>Lb19rR3ISKBL#cyy^w*u@OxDNhudZa!uWqm+vJCYec&sZp-?>MeM(1E}}87=us&Uky&QCtL4ZAl1ydpEI+>o6nd zGKN@9#FT%MQJ2u7P|YhKD$&8r2rEp**yJe391(J#>nK7ZSw8AM63(C&IHn5|JPvh? zwG_y!$KC+xG2!QCI%ttHEXkOla^{8Y&+jH?U7|j%iP-QX3AUK^ARqCZm7oxn;qbOJ zgnCV_`Cf1Eb|?pCa@DoR0RH!`1ZpJn;fQ4u{Q@W==A>r@$4CHs*c93*W@q9jiWm$v zKm^PJFrH=zT(uN<|D>c8;PJcSYf>dOHj$0m*2;dEH`2pOSGzXKO}=t}P#l zLr!IA=6Y{%WEd*oU0{#RYm2pL9Ei2oHr(*y{kZl%0K7Ua`1y~&g1`9rm5G*3;d)ZB z!1v|S`BB?-1fBD)=h6eg2l#K}l<018ud%-vM`L1qGTLdw_c3i8?gJU-_wjQA38Vpm zx){-&v}uoPokGo&lBQG75)a@W>^%^DM=^)@1ATvn8L5b;^8=3Ud90pDA`K8l1$XbA z@2xDWx+c-dNh8?1$KXPq@TYa9p@(;L(cOYNH-NTuH}DpaYZ8Fo$M2;Z5>e`Y?Ce}P zlOM|oj*MD;(!4BlmPM>nk4~HoJ=d3Sp9J6#6)%bVAtDM9bSviGPi*Qp8FNYKRi3JiC-J6KHE4 z-+#V|URDNmE?Hs{g~`)rJO!-pEWqs!iO!P>*}bD3J4#A;-gXV$T>yY~=1`&+@hfR? zs&gHcTwtZ39tTQU34m?S#6+0i7mCW#i? zjt!rE>#tD&u2>OW44%JP#hVNjn!384&@!`z?vA7LJgTe@I6tCySSmP|g6I7}PSQFl zKe$zD5tyuVU7WKDSu4Q4v(Q7wzSAWs=g>SF&JVB9x3{=%Hv|iSol$nukIMI3a&*rP z*{KI{?^u_07Ute@w2D(9>tgF2i)*623P1;Nm3!4HcE&8!Ttg8x_O2ph)oEdPCZu z(LvX&1iICQ++k3(yJPF@Htbf=ojKu)l{%xnbr!GABIhOtuTu#I!U2b2Z?xP!YQs_r zbv2R;#DMSK2!xs$GJ^i>)7-+vzQ`F{WwE-b=!rqSIcM~I!f8Dtw*&Q=Ll%!tM@ZTU z^Es7$-^ZbyGOL?7{CjI-(k8VQ4X zb_ofUoMiyV?KA#-AeuRIYq}#Pc5xy`Je*Is-ZnhdDe~muq=c=vam|BTG`>@br6t(V z4bb_aN;DWn1v_07w8lDkIs%XG<1mY40a{xyL!x)jNLcQD|u>1F44<-Ygz2WMCG4v1VLJB)Y>BsR;4;RY}>nZFiVDdhTt+ zGItCbAKT;!B|k9XtvB?}q^3KAx&y-$ZqZ z=$G?&CQLxJ&v})6=30}ZI|7=m8Uf*CC0yqXWBMKx^ z7zsQfB`g9(Qpz};AF%J+pq>Tp3oxneowF@sF<5lu-D$9*gebtK=`P0ad9fBew@FW{ zyd5GkjiqftmBXn22JIo2}H0}G63%bCuQi0a8Am@yAS+RF%D0* zD-`ALH+EAEcpJk9bH>ps7Blo-alH9d;(qF{PD>PIv$q7z2~%7e|&tz@A%o@j6eB{e||(Rn4#6ifrZBI`(g$p!+WQU zFZRj-ddq^3K6oF$$vA4H<5-QVOyeZldM|-S`;5wU7MwXo}Br((<&TmkavChw$w!%5mWE z1~gGoGxv`gWeTuLIEDpapUGeZyP#RcWM2dDYApz>%PgfklLIDZ-5CgCqH?c0dehY3 zK!*lWXX5}3L1Pyh7D0U!OBRV$G;=440Ei+cPI??x|H#k%_&~A&&BU-khv_I2BJL1I zMsR`xWX=UC6(#g^>?kT&atX(?H}#N|KYzzDR~ z#2a_ft{~JPGbo^UiPY|_K?51>?`i_t@iY$Ddn3XzJvnjud^nf9tKo@3TrpU&FG>EF?PzuC1s@bqH7Nnlke^2^;EN(Jz#gXVUElUu(CF;a+9LNy_ z;ka$vaIqO~V%iVzcTsHkboPtREB7|vV~S#^SG2m*T;jN)9Xo2R`05AmV_6pMk!f99 z-9`VPeqc>1uzJMMD7xiN175>Cm#IfE@3X6I(edW@MeGTuGFqB*R)+VAqwc83h9%K; z3w?^;<7ZP!czS+T!pGl#Pc4f?mgi#{=a0#tpqSybu3QiLxz;G0E`{uhODPhA$yj=k#ytE$;ypz%&Ir z>pi>f6QskGg6-Jxe7j;PG`l%Vz`JHrS@vUtJKa0UiPwGJ$46uVGoR&q! zhvV_IVl4$7t^tQM4T-IJsGEGPU4!aGJ&f}f7=rWKI$qy))ZVbJD^BZ*aI1X(prL@@>RJY&|GrLjK%;PGw*v;C zb;FX&uxSoDpnG(08AJBg@NimjSu(cTaNBqM@?ZT`c4?HO#yW}IZ^?A4Y?kmw%$>b! zkhD=t_2RtZ)x!f83E)efMjesx2NUF0D~pGe@vr=^{wF#&6V7soJIgGl^@dNMujt++ zm~U{4IT}eoAYka@*)NIf$(R0`J91JZIvhXAjXa9Ya9nIms8Go02Ff!N9%qNId1 z=W$-f{11=ZQ`@k1AL8~V(7QV+ecQehd<_>|IF4GeZO_ttP16ns(+);Q$fHh6y1{3r^SYs{(QHYHD z!GfbexO*TabI?{40Lds&Yid8`vY^G#IZbTum5V%((m?#e*{IfoKWlW-*elW9NaTS# z1m0Mcr@W}3V8Wd83d@i1e!$xea3Y5>%qRqcWzpsZ6T<|@(c73d8}Dg8KUyZ{WyNZ; ztE+Vsk^oqlI_)LLvmZwyJ6|JEgNa!rc&6f%Nk>;AK$~EA)r!s{$gKgaRf8w{|vv2+i6BDgRU?y)c&T%-79UgX=kz{&Y1cm^#WQl^u zy9%#l~tRAqyeA z9a@X~{Sw!&!X)Q3K!hwi;U-tPoV5Og=0WIN{OtHWx~7sdYHL_?#@0Gc4=+)->lkk4 zIv`BNb8yxv{%j+GjwCh9o{z{}l2|?FZqSW|<|!;h>Y*7yz#SNYu|Hx@#svDH_axbx z=HzI#p)4yczrDl3;wF&Lpc_p-z0YnfCe`q~6b(3wYv6T+1?jMWo>0TMM^_<~$^#xg z`USN8HMZA(73lk2ppNaC(Hr;FC^nE(>|pGtc zUhrnyaSS>wm~4&2b0X+LODZWTxsrI-F(^J&fJY?!BWAd(_+qjiXMGfCIhRq$Wph!- z`ap`4&0K)!{G{fEJ#tD3o>nGH))BVCWN644&&gpAH>s*UQ(GgCUa`s)TT9B=>oE)* zD42csy?$_B@w)PQ!g(lcu#XPSdCEwMl$s_=6nZB;EN9shbM42hswgfZrk~^*Sh;$V-vXqS5 z?S`V_s>?VgSRG+KoD*jrnI**t$BUJq?wi|2Ajg5zy2vGu)Un6&nR<#^aV|wvzb4uh z=~o5}&1p&t90hA~-0A@bsaOcWL%F8GWp1FjC|YW5EchcO=sxFOl+0Nv=bUj~GH%C# zx7Vwv$iVwA9&i$Hcl3@gp05OkqYDTYJXepuAiZvN3fRYNwM`)yg!EsI@)|g0?_Yd%dF;VCF#c zpuj*V-hcTDpMUv!Byn+wT3&$e$WESQtva}SpKW$k<2|pin5!Y=+mcaB+uP7FOfGnz zd|79EKZy(MAgyL31~x_AuBqWLxx@4(iiHwbJK=+$`ETRRxBt)B-u^Wxg@U(%2KrGl^R|#cikcWil$|LjRQjv`)LqP=!RG1u!U@oYA1+gAXro zd3eNeR9vq&)avlMvGBTku6!nGXe**y(VhF)JdO7Nbksuz2BJDe7t~|P$NGeiBz#a3 zql>3FM9V}I05gBV)P&jiyK%Sy$mNXQ4|wbFzTx`Gf5^8M&q(N=8TBw>K|Tr@Cj*$j zdQyd=kc9S8ytib)%QfSRz42gzQtI9ZRq^uhfTK2`Zba%DlgHW`l0=AZETD!#LFUY# zIFy!Toc-c6F`L6R|C4L63rLD}cDr6j0&vRVvgKoMyS@(w~i4uz6FI7Wv!imIIFvxt4|9?7K*P$AY_ zQ0W*C1MG3#6JyUJ!Du9qVY6Kjnz)3=%L&)(m21^F11jlRhwIG`-#%0D*(2%XFu8E* zI$FbJT{Z84a}79)r2?K59w}%wok2n439ThO&TJpZcTh@45u!O+j)p^PyLMW|Mw$O^ z4q$*ZL!?Z)oa88UzLbLJ%2RF4MSOC5$2qHMntX>LTLp54*BefSyQ0>EYdX<-HUXmH zs_5r3z?0mPf>P+<8X#SkYoT{X@hG?rT{!>?KgDP`_Pyc#^NP3o!O7gWeViEGJCb9} z-l)A}T^5=f97Ks1CBr#!pm39Q{Ow>od2w2BJq{$XF?x5La$(|Zj$1#F5-oOPSaWQP zoCuh#rJy)w-#|MuWeCzW!qL@tz>A8>rajg~E zJ6hBJNC}=YGJta;EzFxb*NWZE8!VBw*quc~2593AbqP!~vEH$2pl(7}&dU33eXf^X z6?pRo_Z=u1aOO>}7?qRZ%co~LJta|-67?Dm3A#m3x(5gY+-j}k&D*EpTv!Bb0Mx0N z3OqOIct8W43?C*!0Rp(&px^;Bzq;-_j@I$^dgHttumG>pl+->4IIksxw$to<)XqIu zyQ7pD1B0F#=P(qDBAA-7tKECYV$^h07?x#4@{UbFZFUyVPw24T@N!-7X1}9d1JMBi zVhB|navoVW$XhG&Io|dikb@Nz(1+7d>{N^3qivQ7TWMy4R~z3PNK7P!DNwBLoa8FO zU7%1o7c4J-3~he{98W?^R5?1)Dh9Hc58)Iws>>i4Q`#0sIGoH>B1B!Hb9h*Nl8YG! zRuoGPbr;&MO)9#R-+7)JsVZgt5NOY6_033VY88sBgFELu=rIxC!+j$<8u2V5i_W^I zWF|XR+TZo=5*E(lExIQmj*MhD4iXCbSFD#8$mf@M@$x-9U2mw@*XR<(w|dCtQi!Tg znvlWeyy2rT2wT1TtSy~rG^QHN&Y}`j5h_OZT zaWB-rz@KI3byeHOQrQbJkJ>U?xjH{!iULO45*Q3RUa<W1a>8NJxhp8k3B7ZW z9ZeF5HuheLIChr}Hz^H)m_=%4W)!C`E6h9250ChnzwWQc-~UJdar|5Vr+*oL{?Gnt z+}^%HI}X}p3jMs_l~EY4AN*FdH{Zs-KO^U$f%I931{WNWl)8p&;hkdD0HB=F)Fp{R zEQGpIq(zcHI%0SGsJ1$^DIBSxOOT(B@hdXCF#Hj@CuHEYcKYklIxNb+^nXMJ0?m5_}XJfdw#6 zXOg*yy0_KFVou3xPP40^$Aw{^u4gISmG_#9_{e-0)WPu#iT&g%z9UYvfYmG)k7UFL z*E!#H8?;*WjuH`Un;?x_(!EYQ5Ncz(Y%(<{?5+^wv*2*d@oZ8?S2a0G^-S5CGeO3oZ2$R`@w>2 zEd{WQ!?X4|JMp#dXtLXdx|)$6m6S99kbAF3Ve7!rm`ldUh++b0jV@$MPIz4DqP0gC zN=6Qsbs0k+mr_Kt44ZEtAhYvQ@OG~V1#J>A>;k2`)`qu6@&Cc|GHc)5gAh%H2@a+B zBfV^#Px!{azeRW%N|ErlAys)5PL_{=y;q2rXbN1XFXxLyz)5Jvw<7TD${3YU!S z(S96AnS!CCu|A6ApD!?xb`*%%^c^7IF%l~!yi9sxD3B-)qv+8=FaBSLlb33VpZoe- zBa!an%Z7k8c-@v)tm_JgV}E{)aECHrPz5HXM}ZLJLn38Px|A@v&noE-k&8pucsK(v zBR83vf(1y6&^1}sZa+k{3R5a5=T}I*W50e5Z?Zr0DSVFW8r7mIP@3q~- zdN&N@IRU-W|GL<_Un5Y}?^?`J&}1Ml$fuWh|2O>te)He;U&Gt){bhXX&;09n{^Zxu z_bd65tpO$q!oYLHjR(EGkIF!^maL@M#9K8b_c6PJBK5Ihm9Tb8LLw=Ij8reBjHM*( z^%y8$0@QYFVD0YMS_i#Rq5zu$@(^nU*L18oI9{AjxNbxik_wDkyIQ1E#5^=M1Nq>) z@5=t-P&OeY0E9A6dREtZa3xZw%->NPIjHe_!pMO|ulUP}=zL7acJJ``y2pN)g#|2j zdSHG0g=Jll9n=3?YoWP{_g(>zgI?i6W0(ds*ZiPlD49XMIbe4N6p>JU{MzR`Tw^$kT8JZ;yXY7;4(PQlh z9AJ`Wfx(QTGVU?-B}{B5KR)iUBo^G^pjS~bzDw^Ql4j*Y7#8Wem+h!yjWNMh@@;7p zOaPxvbOt6xXe>;a3DohqPsk@$$94v?ckFe8&Kc)K&SfNoMNT!hZD(3vK;k1O|K9?*hM&1<dud%-rfOtq6n*%cNz0^4uL1bInkM1-9(cG0P9sV zkVN5C(Of;8*GNGrdCr3g0L=lEyr8$MXw>{Y&B<4fF8{V4qd4oGf@Mcgi8^ZM^LsuT zqT@=c8G|?rX4%#Iv~dl0B^Ks_AoMxUwPAD8x%bwvE*XKHRdrNj21N|PtI)Gj654wd zdb4nMut42}%x6(`zW4eG-Px(ACR^ED^z0}2>+wB3&ew!iyIkLZX_5oHz1`Fq-*uKL z@wEg8hX@|SVXbXFSaaXTHFp!S5L+qCv-X6eh6Tsj$^A0W(kKneJe0rB-#DuSy;5!!_t3UR}{!y z)15;Zv%pPDxE>YDT2Q?J)^rvr$Ln1v?|>JC?=0d=}h8+SE(S_ zD=9C|q}RLnaB4fxL>1(@!TysXQOL%v-63^x(5tKA-YJ68B8#6LhdzLeie53QZF>37 zdoxQoFL{U$n@`lLyR-NhLzy@SWnC!j+;(wk7Dl?BV8Uzr;=u~@w+31Y=c(jfZFhGVSz{F1mb6= zWW$lEb!T#+7G6x9jdv1ymF2}(@YnyLe+vKmfAAv!Kab!05B?QgzxXcN?FnA#oJJ1+ zJfPr{F1D$~Xcpf341k^_vhP6hjp#w*VM{H54^C>a1mqc<4Z&GIU^=Wd+(!5!QjgxCu#k^d6GC^#dOw0;NF5NB81D8$P0y=$|a1BM;~It;OZML z>x%0h^bMb#5rjZf?lCrb+XIciOT0sn>6UO>PuPzgYoeKwh*Nkhe-(7sncAANtV!!i{v}DjW z8b!8213#7cMkg$($moFdR0Kw#1ARPutzVIK2FMc5L%`HYHO+k(bV43NV_rifDA8?< zVkUUdaosTP%bW|`2wZW0XPs+2>p1Cq=Xs;E!&?V%sN*$@m$;wUQ4S_8F)@9fr6B4m zKp^J?bS5OJtm;H{ES3iK%E8WY>(K?zaK&6GktQ>O&u+L!I;nSV8AvwwfV-xhN1VQk zJJ&bJih=vSVcV`_kGaQ8F6jx(^xmV}F)L=%z15)Pnv0^zr_;JgP|wv~J8eI^J6@EG zmKQqz^*ZP~Fw!vB(;3_CiYi+4US~4fS~zi5SbN4^cZ7r6x>936)Z0YC%9QS)!;&mf z&2jJIR@(s`6mdPwgQ!C(ru)X!uxy&z-ksCXmps>jH)@GFcvjZcckjUk63{9a zc<*w;|Lwo|2k`s;xql5IbPr9BgJ^yUWJmg)a{2%toPak^pHai%auT}}^w-vKQdcRN z^51@-l#EtqfttXj9a5tc&i6;rtSyEA{^P(O|2_X0`gi{%`q6;C)2+%nzWpcuNmluF;{lxmaSW6vp7m_g#-Gdi5zp5* z0Mt!bSun93kFm8C#$`9dEw#mjHZ(|0jU9U&r(7YTW}{wXL&ScZIaYwSQB8EM* z0PW=7c86v%c?`}olc1_8s&F6ca9p>J-;^_v#@Ht?CGn;bJ}4p3gzdS1pq~%dxMVs& zJi3w?32k(I;+erI#NUsDKW{b;!FLKn!wBGG?u^zjpG-zdUBQkWs6gWav1d!z$DTEw zHt^=Sefgc?{KZo;p?P?4Fi4$J#?d>DeaELi_#R$={waEG*pCCPHW>*xR#6IC6dn*K zHeAzELRV5+Whr>HjvsvYTNvq()_*cOv}~Hf&Z34%NRIDTKRMjPG?Nv|RW1?np%N z?9SqJCUAo;rCrKMG`m5qHO5&bA3+rzv@GrMq<dNJ^ulYVffKx?^-_Y-Ic_4A)#>=LOg64VG58Z%Cr* zIUwx?pU`E^xHW2WdaGCpV?}l*OfW-EKvn`B!xUqPpCZ<9sAOTB#`6m6+cjrSgctZ% zY$mMGJH7uC!d5qS8llgbprWgQVp3Nd6Pr>Ngj3&=nUDd7tvk#~j~dpkq6{TVI5u`H z$1~C=RyiL%?-k3kqP87?CSK6X^~Fnk@yYiQ`pHEdtN<%(?;0-2B=@o| zNVVa%Q$G?~v8?#X4nN>}KPl0UzC^L8qHe!$9I`J2;>= z6X_@IHqSkv_!8$Q0L_;Ho+~F|m!u*Zq;|$RA34Gt#}JgiEJK8j$A?F> z8BQhRdEfB|{>A?*uKSMtAZsR!0d|>tq?~cdKsR84Bm0KW-tKgr>=oxFPgaBhr$S~# zO)Fp(wRbFu>(MLUQ8LqDL?GBe?5H)F3uKARImodjlQs7Qg$Sw`t_?V6=C&mz+*)M^ zsqw#aF4S{$4n+ysqo*Edp5`{NKbO zN+oA5+QfJ1y-mwkZI(DnN-h)+>RC_-@2J<;_^bc!zl6W@ANd#XKm0HMkMQ<4{v!@X z)WJ?w$K_rAbq{nYQApS7RX-g=z-HUfy;GAh z{#zDuaUalmuk1Z^B+;v~G^YW}-L1RMow0)x{^tObg=|D4qF{(;obP_G zgTC?24M4gpROq}FIC$t4hGq$~@UwT;2k-0HHyp=-qcvnWE;%7aj*SFU2Y$|JAp9)I zY#_c-oICEsZ+ha{Ktq6}p?c0vK_ryNu@4Geybl6bF%(a3AzC+eyW#ru7Tfh1b>D~1 zz~DG91*ekKB4;vEG^3=|jRO(KmCcf~NB8JQ_V}$%$0jW?Qd$ySm6GgDhek=DMeiMN zzxd&J_ru!S@8X%L2No_s`o|388j0e?fV(bncpd*3+Pg&~COrw5V&q8rwIEpJ>mv`j zlubGmi=F#DANw}`xbt(e<-3kaAFcA_9WtaI+6s5a&;33BJw9VV8aC0Z^t|sFtB5o) ztWha;4dBHl>7hSx{?b2S`OmoFRNBNrGy2#?I>rrg=2+ zJ$swyeZi6ZHp$gE_PGMS-T@_JbN%bDWk14(g@Ovij{^T2i0Z$&dpi(=EoJhc)B< z557uEYvQO2)&w zU@_BB#Hk4@S?4DN7@Cdv$f3uK9|267aRv*gzjOWVGCuHFoS*d;z}@g8Bi;%?>0`$MPD;yG+h`BQb7110M*r=DRNz|9MYH~5B{b5ypel1rw}$mREzjNOGWw#`i!Gj z4lKPZgo3Ua&pc)(Bw6vrY=~I^@;XRkh}LZs@Nt0qa7GKXGGF;po3N#i8h0O#AAL=9@oM=OqvyEaWe{4r#=TZSM0qWBM#+34MG|K zP)gBR9(UN0QDcN|1m#lw#zF_~dm3a+;eDP_jx z*61daSCsVu_4=hK9y$;8>^^$Dh>ISxA~LPs`z*@i*%r~0!{*fiQFlZU9rw-mwUmM- z7w+j6h8TQAoFn1G$RVdBbC1|p&aR4+;uxL+Ibej|>$ zQP9gXL1(?kj2xn$?*M^1e?{)69w_+w zIpcfJHx-0stS4&~wb8oW;mG4m5nbD*I|A2z(E!tWHv>UYjGtff0Eb2aBJO`vPIXxq zlw6c_-*x=~&U5f4OJ`94dvN6^)pD;v<>cQN6`T*JWpsLTCgsOKn$|r4c3KM8a8OHI z#pC5d>J{h;*NtP38O1z)Ng!Zpmfw5}osb)@k= zw&PHznZR9j;G7dKC8N4rwM1pVDla=P1uq{iST8TIJ-x=$exP+~J&xMuXAQuV&yH?x z{9P2|SCmpXMAjQ^p=15@j?21?!gX&Q59@-T_~=9Y>i2($qme50a6aS9>kZd~Llvi1 z$?3ug(KJ=yFDgun0Fv&&)_mjwLlc`b&ZjfVvI?Y2q>j2H^@g__!Oo+dg{LEuwIs@p#dBy+bU;JO<@A-%RF>Vwk#jPF^*3a1~btG&Fu!59NuyW${_5-zE z;hh{}c!kny(Ia^JMeu8(J@JX=1Ed_~EKa<>r>iiDKR=pa>M_ly9*2+_UYu6E*^Y_u zCJy9DxxiwO84T`pwwNwBvk(YR@o2k`lr*jbDkO8pvYxmAof?+l4??t}ILx5~C?yU; z42PLHV}b?7aFFVgTK4WbV+&mwD^akrYUV;8Xhh13JgPUiS0cc@PYYlrHTMPt=aTTS zEO@i+Y8m@LWuciCmNH7>L7H>Iam?x6v5rmej9s#hUXSq^q%AZ(FFBD2;tNryrG({l z!Li+xgo*a3-%FYsKeIbq)5we5bt3V<8}_Dzq}GPD6xjtP?aKs6Slfna!u>oRnIG#} zbdR8>gFH=7`x;~n;8O|2Qo24Mg!34bUsC8YItoex ziZ?vh17-R4v(3IJ!J}t`t36FiJYi4gaFB#X{ z4apS0%wTyzJB|Tj_5nsPi3>a8xvw}}GE6;Ks0fHi3Fl=QK;l}8rr9fZ0*mCx0Y-;4 zs66hXy^!@0YugmdAJ#$_rF~~IhuEvMvkix}+8XU?HJ^m6kA@5kI?hrSyt&P40o?Jl?|0x3HO$xJK*>c^Mc#i5emt!!GF%OXU{DZ<0c_07Sz>J(3;g#VAAnS_ zyYb97{X9CS&dZ4r0IJM@V#s8I&|ygHEBNtdh=S)3zlr`4(U^k{>CzO*rv|RQlL!Wv zSs1H3t7QY$vgnL8*l0VYd07E)3NJu(*~-d;yltZhbrmfz zR`F3-hkt)IuJd|_xUz#sOwIBNcvYe^sU$OD4|JKXZPl58Wiat*Ztz~2*lNA>@hmUs z{m?o!C6b~)+1yXr*a99r?XXMqpj0GSUeS+d1d<*UM2rG2`1cY4>UZF-Sa1E_^WAm7 z7zIV}&k=dB-cg|5;fNQX(FUl%wGUA(s7WzIhxDD+cS;nw>A&yM^jHtEMrKi<0yIVO zJJ(R`BNtuj{vAeA-T|a?hSwYVa8a4DPv1QMzXQ?c#@+9l-*#6hIKdZMTI;d(KI4E6 z@o}MMclUsi$Oa`VI$LmvzdY?_VF{_6v0OgD{`7q%;w+kI*1&%s8m0y9e~bdd#xwT+ zKM7jgRh%bNOY-jb#VNq=pNFh#ji;Ehof3+7Y^`C9(afFJrK8)P3M`-S@bT}){`%Lj zz5O1{LbH)|ZY)U4iAi?fM{z|5tfqB@F;wn;T~UlaEoZYO>N-;5RNyG|QbCtOqIZfe z`=B~>ESZi}Bbj}8iM~INLfwG7c)<=h5&1R>+SZ$9PgTv7xi1p*ep4E;^Qd!X6_9XR z^5Eb{SO2}!f+q=Ui|W~`A+GGu^2ZkqFx#60rfaJ^9@heR$J1>`12tu>asaS_#sW*> zxtR-TouMJ?oyq3WDCDgUym~kTc|loM?E675rxQdn_?@k6!mINcUp64G4>&(OvY6SQ zQMViRZI>}YgLxYrprYsG#;N5&{jT*8XI=X()<}RoAzD{wziJjptebG1V)8f7=;(e> zTM+ad(+9e|W$b*V}MuJT&f$Vc{l$7wWQUhnoXS#Bc=0-|*Vo|jV^h#;LUUzs8w=Yuy zy&#p9^V1AKuc&pWmZCMdT-tKN*mt$N00hRgk~5aF;&yEC&in4}qxc{dNHoAc^O&_{ zEF*7#&R{xXe);q??4OI7v>=XUS#aGCq@>O@fXCB{oHA|a!7h|L3wwY7%el}7cv&*g zJN8Dik6I6$GU zBF!dRx?@w>IBer^i<{-ze)k{^F`(N##7CU#bsQv2PV%594D>-LMiw_WxQfcd@5F{m zo~#U7Mh}GERAzV=z%{7_K@B!Iyd9j~p$EmABI+GUdTU*Qj>uo%$r&_VYaF0QQzGpY z07dof&Op~y36APt&m}m_bFK&Oem1U^176B{;nCb@;un1DcrSzFZH8_W`8G48!iqg8 z$+M7%VO7B~kKtvsob_Jmyr8O_R(>((qK|}t9C#+vNDFc~iIO+leYGB(&e^)C4>MsH z|8y>F3f9>Ai{w8x9E+Ap%9p-B)&-|*o7OIbiXp;5!y=qNFn6t5FBJo;F!THQiS!;FhIibTe(837}n1pj7 zx_9x^eH`BLr#b*z5GDhcb;afK01O1gfxZn2S`;O`_xlZ;5RO3cXtEN+c!m}+Lf1*I zx&RgZngkojn$BD44ID9{x<-pu9~4MykAeoV_GiIVQW|qih8KF8odEJ)qu7{pCfw8^ z)7NrYkV;{qTNdQ9s#7A#b9AkY9h3w}uMl0GJU;Kf5Eyl^3fg$Q6F$J$tNb~B)j&`n zhJMcY5J|k&R3h(|E=IG%5&L>}A7)&Rx`-VFN3S>B-h6iq6vbFi{Om(R8Ser%9PA#3 z{rI?F+|#^5Hz6+ncu#k8SMam`wB8kQQBVKAk}t zpAF!OYlAwta1DwCvN`bOA!k7)F71w|<}jo|4eDdhCUih%4R3B6Zbv03kmx9s5@#uJ zASw%*=#IvIjdc{Pi^F;T^}OJ#U;hUF{(tzti?4qCb)1%hwG<|LqOCzi+Ea79czlId zU-=mC|HRMXD?jnGxIDgxyqr+hGw0+uhE@X3Sm{oE?OK)*|6W}#Sk@EDazS*h7Y+PN zqI@l-;Qfa)01B@oHwwMSaa7bs(RFqLZtQCNn2ED4)MAFQLdpxymscp4ufWO~K|zhe z<;A+75XNDIR8lFZg)nsz5Vt%wA#~ewZ&vp;J4ht3nLZL+=W%jo1fQ)@& z?QO?_FQ2byj?s}0a4vyetS~mH8vMDIjE}$a0iDfq#(7z#Lv5(X73e#Pk;-QvRl9(* zcGi**x{sqxKxSw>-LW6r9XRGAW?hyA2DwW)0f3RSMv-9Ccpxt;USDs6GKWqZ=Cmwu zI9^|`fN{1+>ohUhW5T!t>#B3zskL(l)=a%8b(0j6pGtula9Ik@r*#yf!{|VZ@w(lx z*F*O`^X~JyYR;1ndVGT8vSh_Qf+}k%quWc}6@T*~dLguK;~9Aj6om8Ld(a2t6y}+H zT30OVieip)&KfitrwDU}oH8z_^Tg-WsZTH;^nti(K5M$uz-RK5(yBv3+|4u(HymHG<8I1VmtM{HF=Q!w3r541fDKzJij=&>-+CnP?>y zFVI1fb4DraI2hA7q^5s&s|Sw#hPGdaSaj%KJf=}9+3jJdo8mXB1Tu>PBf7oUSE@MtlZjTSE#%AGYGFoTdgdfmnYGj}v4>(D*9&(GuZcXdQm z44qFWww9BPXF=qO^9BY#@gSl!3Y}cAoWG88`gr0iDj0gL3d%zCyN`p02e~Lhh>k}e zZN;E1ytn4Nu2>%fdXj!F3abL5l0_}kWJHtK(~0ZL46hz8Lpzj({46=$1vTDxxWd3Y zit%2Ocbu1k_a4r;tP3921(#C9vlObP!B>-zIH^kKmBsVXfvu;ZI|;9S^myJgKoZ&2 zu`!TzCS9-);mXZIiL5)P;D&x`v9U^@h7NUy~`0O zie}j8f-%AA8!$~$J27+eYXScct!%(9L3*R`@Ld6Dr%Q=7ohIL)Dgtt z>$+c9!tRFZ3Ec|qHk}|@N(CjKuq+qk(+hn3+y4&EfRf#K0A2>59UIUae(u!;OPQu9 z+&hdtZ*MnhAeB(McRU?2c+I9+l^OD~VBc?O(u@pEh|b{IF=ds^p|ODXI);t4-f0nU zv1Yqm-|p?t^NjmsquDqeKpSl}->y?9SUkpY;G zjye;)Y2s2!#`*CjUVZcvc=+IJXhw?6nhk4Ccvv!CoX@zZn>6$s>lv5x19DlBEMxCL zO6nMlwMd%SQdk5z4Es?>9z`e2d;4DA%T^n1Is*~Fh7lD%^z&6{c*+IKi*MkkfB!#= zw7x?1P|}g!^Ys@GcyT_Dwbva(tNR+uMbU1(>+7VOhayt%O_YI%Ed5c}^uPBy7 zu_D(0d0lZ?Nb`w7sB4*^0E?Y_dM9H{mpM^aAyhl3cYpHv7vo%ic=0tF*P!93J6=CO z;mfzLv29m@mKzMD%$-WXxnvrZVlr+mCQPW{L!xPubtu{Q}HhlW#8G8f56Ev3ZH=tB3HQ)0K$ z)kQ++4{glXY28te1JAb&w|&EL>~xjG1niItJQY}`4ry;)l+1!$3ePWNar)-ALnpuh z?d+qEe#}prxNQ2yb~oE1S)x0J0)@kc7>6rOodGOfXo`Vd_lK~cPzPKsJop{`Mx6jI zh;hsUnA~UmmT|E1clQyXqT1qr)76U3BNz!JlTRNTGSKMZvNF&!s1_2+tPRB?AijUn zGD7e=s5^RgK~IYB9Tt=V5{Hmq>&x|ARFPpRCzRy@$MIIlsjMx11~v$hGczQgHm&15 z_+6s!GbzeS!UimDHc-dY>4fg&61G+cWy*}hafZkf!P%aLXj+?SHc_E?3( z(B`Pn7}N%%V@~T0iyTV2ekMeeOn7)dOQ2uz9#S5tR!XYqgLVWawOvVjptq?MQsnAB z-&LQwj!_({u*gQ6zK12_C*OaGr~SaqAMpQU>)&Fo+qUd5sP!?%oNKM`zwG@P5qmSNOZ>{yO z-hPQUx2q0bxs(JqG9{D*O?cI{mjz)v-6O%aNjvdghcKS*Tx}%yEVhL(rW4Q=1BZ+CXg4)m(ACKPj8s|OvL4Uk|IFJ@y;8v-c7h_~bCFpHM>y;1NC z+H1UdJ@+JJy*!7d0^eSZf}ksD9d24?SW==}imt)^FmTbK11ScsWxXFc-6<|c7mO6F zNs0W!UwaR~_=7JaVk3xD5qsm`qFcGjLObT70HSEcJBqG}ijO2(L(T=;wxLJj>5lG1 z-N!X5ka)yXV;7>7g$ZBXM~v|Cjt-PBhlsj1;%IcPoA}PdEla;gy-~t?dO+<&SR)x; z3%T%+Gt&n@@kjCIH~%_rZ$H=PRV5kxz7RcUIqfwirLMg_skV-Ak__--DWrgfQ%)CE z2k^t%Q~@RWa&$$^*x?yw1Rh2MJ`$OnY*1G`)~(iRQk>lz=XA7An$@P6P;stBoLWmB zngTOUhp+p6+zV9nEG6S~I#XA0I%C^5v`tQFEhu*^PIJy!mJ=SHzr^{yPhbx((Obh; zKlp9j-+l$(H!KERvf<@o_`&0q1}Fjrmt0Wo1n-QKr;>r?jO)|ep+^B6r_(COE?q7ruRY@ zJsVvD7Jm!6bSL~i8=&S^n}#A!5H+Y|RWbc?I-yFGty+1UCnYhC-S%B<4+7y!GCW@w zd~v-2nTyohrDQzb?n7V0bB*~b?RE1gV5*8) zcFDNsIb5_LmkaXx5y9m8IcmF$8RKIj`$zA+z*ZYR`|33u5{v`GUj5$VypTJZsG+OO zqMn~t{PZX9<2PSD;?4Dr>rOiDB4$9)ZzIp@4%xp_eTM5E5Y=(OKw+Y2N^} z@tR)kyJEvYBC{DjcsL{aE;%nK>j~@S`AqtwV77^30>H8^Iw3|G!=u+AL}r{@2*hs+ zj)G^a!b7|7c)+wi-MRP)DrkfUj}$`RF*y7>u)>?X2r;*T_~zke&{{>SM4k}8?LpP* zBMBR*C4f&ap{twdcl`JVFYu>- z;v4w0fApvE-S^LUamu6P5&}ae3_M`AJ;a4wNhr}*@{uLl9URIz(HSbj3?V#@gnhMsVYM)5&gH$w*A&YYLebl(C89E#Gc2SZjH}i zQ5OBZR&!AN_~#vGuqawB+TBO6!^flLz0cw# z{v&)8u(ZJP8H3`gpASd2u7yE_GK?koFhW4hUrnP>Vxl;|oAgT#W6gJ79xisb?PuTMz#AA;FlDX+Em}g@OvTZ+CbX0n4Z5YtmZ% z7yre70Ugm*_VJx&Kwg&7eT9cWoA+N1XfCbHS&XKUm}SJ3vWq_Lfa4n79s9l^mj&zP z`JmQv$eZr5?fCpx|07)AzETo22W!T2-qqFF_6^&Q*@>A z&YwJfXp@VN!Hb{hSOoI-9?p36bQ_03w2ebxs|DhkYwiy^a4o-}F>Jf0^NV2Rmbn$2lkRQH*3x~6M8aOH_e)224ko0;UsN2h`K@pFY1WClMNIIv7pF{5J>^qS!A$7cu9{k4xi z!q-3f2*37Q-@}`?Z-mxOR6PP+6|x(DQaFQo_}(jdOr%E;)yw034K2{23lPOe?~Niy z@hMN41Frt$cfXE*{h$7C0sq85kH7Fg{U6}p_-nt6zxg|#;q~>7T_`f0D%uc+4fHt* z2qkj0Rm_Rhw62YVIoTA!&jW{T)c2o1;ML<*dxe3q^`I~sLiYgBE^^Ho_pTx~B`oIH zgBqv8XY6TxnWdQ&bjVh;ZYqRqJll7=i8|DyC`p;a$EuQ}fuH7ec6mmTz(C1a987Go zTGksY1^}jko~Gv)`YRu(FP{lU(K?Rc=oTuNTUl^AKj8B61AO?6KY*MIKKtIU;mwzy zVZS}G%b`xkcmsjv23-?zkoR|s`jp;r3k!-}Y(>7(QI?Ir2pVAQ$w`%o!z57qr1+rE z89(PEGYN;lR95s#Fk|k24h0J;YCl|D{NH3U0I@U*MaKR~g`77-`VB?_G3VFBnvFFK z#kktoS@V=d`vrn`emVlL*p7}4CT4sL)H&i?tvh0-Sj5{JHlBzzmypV;cf-W9Nze_rrrmex)dCD7 zWw$oEel4jWl?=CxZ~TFu!k0hzZ9LuY!}h(^9m&P>PYKp`gj-dI4YouS<#dsf#c)aj ztoZIvOCHH=Q%SP#+*@I%pC#U<)76|Y0wb_Rg8<&)-Wryiuq-PK(&F@BY4kDVFC{u_ z1+eAO85TV}3_+OBa2-5b=%#eaX&~ZV&$;A`-YT}{bP!XKG@iE(fh;QAK8E0>kqeNP z#TfYfa>2IUhj2XZr8_$bLzLe;Zg;tl;@xiJ!)e8une6Y4nxsW;mQxR@GsQSJu6rAC zX|E0UTJij}BCOnFltJ%=%WLZryC*!HPPn&@=bwBV*ROtvw%^g~K8z5yTIGg zkRQH|{O~cZKm4VUp9Q0d>1dYT$6&dr+ZCXn4|-N~bEBBGM4EL|9NJrS89BFv`&Pnr z&*!l~ zbKHy$&{6d}2D;n3Q<$j3Y=0sq^UCBx9SaJ5h}$fl&NiZ&im^A5^$rpMV$(<&*Cj2Y&G7gW3`AE>P7Z4{^&A z>5bo|?xT)G3i|n^xi87mI|p895u(-m{s$l6&;7YShjm@Yx`@ReC?y>D17G_XrL00W zNVoX(;}7xY|Gj?;@4b96uDRrBgE}%g){?+WP@x=~2r;=EC>9BI8=4%z!Fd#u!WHW& z>?Pg9{!c;6ID9B)cEJ9Fzw|eO|LebuzxrFhgU{YPYBDiDhy2P;!D&kGAobC0a&jw8j&L1?dL*sU&2o=91!+7FDCrIR#w0#@ zSx*RIFb{Mk2eBrhwVr4<3u7YVNJv&t@&be5cD>^I^hnpd(4HkR7!|Cm9v75$8S0#ye=jLCF%R z(3~a;?tB;CZ}4_U>lIJi9o;LQz5fB$%LVS0v@xN&MTrh$Va(1TfE5(FsVnlKAqa3G zx}c+|)W%I(t6c)%)*9}&>(KxFev0WaR)jrn9>7f+mYwd7ZLfo$9zYdsW0`;qfotXT z@JQ5-BG(pF41H#SbGmnHBc7P7in$+ME|pWu*z|KTc(>Pvx^MX1-~3hFZcnhbBZ*FB z&ceZa!@l3g`{#O++hP>U`X9AHP>edTFlt4W>sqfnuD9zzuDci-kqd>5d|2fcy}PY7 zy&E}abrxL_E4()i#M611Y&@%osj;_9&Uk*gjQ0gNF49;%?G1bDcyp&7utfUjYK7-w!BnKqYKTW}O($c%$Lm!$AgR{G! z0`9!7bMRzVzPX&RzV~&UKl(26dVxd5atMdE*=KnB_O0%*34By8;e5yT>8IcE z5TlvTMV*WXQVerSImanH(DC5&hS({@)TedDb)&s)Z*x+v0Tl8mc}Zm%b0q?`5m+&T zX|jxhn6i<%oW>xGUv z6h3E^1kW=mkhXCjMWQ5QkxM@{Pumm}WOlIf!K^4T-I_u?78c=ZO4PfxOnR`S~0I6MPw zK}w@z-~j^b{SU1HlGY1Gkr7DGG4T%ceF}$t3?RI$D?YmLf6q@Vo@&Dn-ag^?USBn> zHpzu5#AY!=0W%rZ_k?jmABlr}(gP__pgJarBOt}T$ed8x#Y-OZAZ)g~LyXSka$07` ze3$r6gKL0b2*fe@ww5Oa#0(=3*0XgtAg>HF?XF}Y=#XROk9!yZ127z5ES2$Yb15oT zd9Z}fd`v3tvdT`f%-pv-;5G!Vk;w8{@EIi`ozbByWrgL8BpXA(vEQ%QC4%hI&GcTS zIhY!cwUjX=(&IU4cZDVGTu<@NsXeLNhT{w*0ytU=me^@(bMPXQ6e4}OEZDdGFi#~C z)QKcgFL597U@hex%hy3a9s7d+UP=tL>NrFS7R0&W`THN^)#pD{h{%XJDT5~3qpKZg z8u|XDgGRihrXxhlkyuTOXvW87S(PA!U|)0dM zhIr;FqZ_pbZV5>TIpTYrmKxMhwej6!odzJ~e7{K)U01OJqni_Ht)f$n;#>D?;Q5dj z^qnRmUC#wQpfI!$U={MzV%7z|6|##nN~ zbz@gykC_jO1@~H!#kdH{n)kUjO3v8oq#cDd@vi%b?`HDLH+Afcnbz=H3cmSp!jEnn zc4ra0Hw_zZ6-5Bz-l|3;2+7wuds!BC=5vy36Jt|v6-z19x>&+`eu3vd@h9=#Pybi( zcmMqV3ft3HDq5+z*hi30C0|oAZDBNft%)D;fx4D;k3|u6bSrYHj^=_ej z<-h&2KZbwr2RD5F#v!Ifl8{JW7lzHL{5Y!a9|Da}wWvS!X%Ij<`nzuf6^ z$HR7L<%)o677gsCu9_a-6!$uSy-(0<5vvCv3#7=^t*X202s)vQ-Znv>v+mt2kqV8V zQd1{`-tn+zb!G+ZMt3C6LkpuSM+vIalIhdUlXSZ1Fj?V0vykANC@?v#3tqi?4M0NH z-moku-1Z79CtwSKc|lq}fVZ~*Skb2hwu_YvC7XJJg91Z`y^R7RlIE1ciZcyyQqwGu zq!)j0eITHUw?f=75-oTn1{3`wCPp451w>F#F?0)R7!KFV)U|ga>ZzxZEXD+BqneOG zPm6GHo%@l8J&`@(kb}dX%trAM=$>g7L`nxF(1=13L9%i0aUhpvMbN~e`*FHFgUf{@ z5?eAJco5XkO8`9dD<^rH4rnGt9B$KfkTJk@)OH7MHy*Z`p5c9;mb~M+N8y=d$7;Zv z$0xjbdSbvZCN0~xjeDXXy&nRy6X9;tr3G$yc{$_t?aqL`pgH_;sc8~58s zF!LeO?YBF&eaG*>Dnwc!P+)Q&?muDN z5NV=&tkr|2pa+lozp#QZ|AK3@!(_zJXF_uLY;R$6o8V(CC;)Go5ToP>oaH`}*_7B} zGA8%ofI=e)A6?8y3{7j20dua@tfb2wqt(q-DMy`Fv3G(S-W|DUUCN0i^R?=e)B_pom~^W(Wi_l~qxpOjj?w?F#vOZ;b_eTC5Z#B

    TdxztcSKM2Jo8iU78DG48WU><-G|bLsNeQhvW?u)xxrmoW3JnC< z!3@{iolZdR1L;mFql(^HTVrvJ1{0;$eQF}s!Z{SZLrHp_u>u+(iljWKqSgsQjKQ#Z zp?!bl*ntkiR_P41-JXW=24e5#glFe77GpQC?G5;8J8vF#gg#ZV?!zl?iG*U z|IhIFyT61571azoxJRP+I2Y=;Zrcu|j8C3F;770Dq8sUH98l@l_MJe13Zuv~vnBKd zMAvJPDt>E?&q*<%x#R!-E58loRdLnE{@I*bk>)gK;OFjxCR}5FT|^Td*L!zZHe8!i z+pD543OKWbw_C+k`}h3GPow?tZx5Yh1BVyxZO%b)cdQGk(5a9bzK9ODJE>}}>)K__ zeOd~7m?s!bBbGvaP8dkIgKNtS-Dv9_h5tKjmwd32Hs;KQs6s+o&baS&Mka6LunZ&? zHm6O76Neelb%3PAflb{zdVM+^7lwzA|1_R{`0t~(o041d?+vs!tm2dt!LfN6LV)I= zGazha$5iy*aohT&6lDgT2)c0%44OK5OovJDyfu@LzX41m-aV*uN@!pQf?T>vw7s`? zU6@?=rsO*gN%BS)w%w$c2yINn*yZ$qwkb(DphYn8x0^W0X%5EVw$$sZ*KRIbVTwT>nq+* zYxRIK(IsSb_y9VaXf^GP?Ok%QISA}7uY6Ylzb0d`kC3R||^TL%FNN0$SkJ^WnXsLO< z-Nqhw811PgD$W4vPQPqE7|0}^pu#K$r(0304Go7m z8t$4zuNfyPx~Z`?G2(;6VU69zr4($Pp86x_kamV2p~cT+D-bw+P`+yF1JcZb2C` zAgSKgcSmA(2*X$w&iNShFaWJrc2^PW(MDWg>~ehL`5C|Qpxw03Eef1i<#{@BI}yx>dydtFEpL9Qf9U@8Jip-r%xiJl%JF59wB% z=pB?5pyUP3nzT+5pqp&bmlb`#p;bEO1t<`1hVB|7re{97aIJ~Pr=rn2{?i|Rg@_1I zNDdmNa~27m)%u{_St5{f-*&~7gfHGaa{Y_On{x(Q9e|_jJ5A?x6tm$7W;$=&Nnf&_ z5w@=%Tu%7%N~g2E_CYH#K*b%#8eLODH&Ni!`QRk>rspUZM?i)u_-=o@(Z`46z~ z+gQ=vJI=|lRZgG|y5V$*;(iNiGz%Ku#~$AS$Vp50HHzM(@4$TCU?KK+qd7s$s&d7c zkIyT9a3$y#MOElGJS8;CKr-AK9gZFzp5gJ4g=7qUM#@0l5LN3UV*yj_O*t&xW53Df z9n1?-uSnTwNCJS=voI0?U=F{%8HwpC@pjg{9nS3H^ZoqkF3RwZ6^7!D<@|t~&&l!b zoXORpiQu)P*@CzC9dB>NOKxgNI)7TBLj+FRb zyBpv(3=UFCLzfXnTI&QN;#`jCXvV@Y&g=xxB3aLVDJ({Nt4KyhQSHF(v%d{^LqGzx zJ09;$|Cd6ugQgBg%zip8nau(-m}M!{APN@?x!b;T<$0t9bkx#uqal+}JaIsF;bT;T z&i&8_S0M;vPTSW${f?K+sj+M&I(pxMzKwC~VOyE=GC0{G@{3k!_=S(5UFbv5#7c2d z0>PVGPZz*4yu8G-Z~qCr`t|=1zP)7^MgiQk=|@D4pcIFv8G_znU`woa29hBlj<&O< zk(dSdZJbv6tdvZALj|)gg+|IiA{C^yDsF8aQ@G=BV*+7-Wd+Mk*LUF8k9*)|9LKBz z-T8dR{eH*b+$(tqJ;pr1ZP2@7zQ;H$V;RQ%SvYO+b3VG9@cF%tgooF6=t9B9GY?mh z@p%S3TUR{oZM=)Pmz2_Q1qmde_4rI8ng%E!A6&E`fwKxe0}p}qd_muC{5>5&LGRcP zR?$%;z!JRe<9QD;bWqL|pi5as(5aQPZla5GTw}~Ld_21~8HW}5yMn%2$$^bSbJ!XB zp?F6e02s;RKt63+jlN@9YCw)eJH)RZoOT}tf&qiBGxvsUOtl)41s7k|%t6t@6kql( zT1XF?pb~31e)nJbC-KR@@P9@mW3d*P%w{Db{K%N?KpNEH{>Ls>0uKzT3YX~|;^h$H z>0$dFJmlFqh#3m+2%9Y|zzrX~|2|&5e#<1(r*12R+tG12Cdf9vm;Z8kc);VES29nS zcBSk2f>t>L1V*}0UM?Ky+GKSsI!3Mgh-G0X4KRWK;BvwjPxl!+l}yKB5L${s`;0wc zhlJf^TbnYH$&=mzR|jP7zk|Vmr7`t7EM;dOjEgTp10_=$cIkFlD_z}EiiTAZdq0Tv z`91691@6~3N_hD*K`SvuEVEev2leI%07cSCA#`+C4sXrkfHdtX(d!cD0ML<6dpj2i zll$Q7J4lf_l?6H@$*~T~P|hRypaTqx2~y>CnB)JdmGxwh>s9F=JIY~H8EdH`Cr2OgcrryPXvZ8I*8HZF9rDU>dW;e4|4&i07nXzW> z!9@cQ@7u=0qSJ9|7EU~iiok%Ru0&TX3{AvRI4wTbW)vzeWI5^mjl^80OHb{wMEMu{-NK5+i z#e4YZn?Hr${q?^IGvM)h!zQhb0Z56IA`{TEEGufOSd@b;>&owxcA#}1MHv93L`S!> zthnB8a{OdGS~KVKwC?OAq=HMy*y|=*{Cr+Z$r729%~1o?QSLye%6Tr--cfSFD#~8$ z$$MpYjYY%MamDir5&Paa`J4bU*HchRSNQKq*hk4N~Y?zVc&Nk zt$6m0zaJ@4d>bpwbO;8z74tJF$T}p7z~D*l(wo4K)mOwz9cD_1W1}&7i2Jp1h`18a z7|eG>?!s;?+$3V|0~GvC)~eY;ErQ@rcSq0vbX& zCTM9UdX~@>pkXmNI_4rMk%FW5in`q4MYw1;u?f6J0-OHoZE!R^ZUrJ?1 zlKaIMUtr(w`0xL-|I#R6NQ!rmb3uSB zOQtb{yVioM#qKUVLEuPt{OIvE3h$(XU_2|uu&|kYXTHljYe1VpUA(r_LK{&0N_5=` zKmMT4c%SnY<_ZqVO>{LI_I*bPetX^MfTn^Zn_(%5GgIT)j+n&H+i#B?@@E-;*wH*latx!fa-gVzcQjh=+fK}47=6nE|fYY)d32@tY&R(dU{PlP~ zwf7;gX0bv9^Wpq}Wxc>HjU<|hGe5hjgA(UZ?4x0xqRysC4^w4*8eQ7ubfJZ*C46xw z_rIhxIyd1$7GT7E+eXrO+xKC&wtV!S$h zrtVE`6k98H*Vb{pKH-PI{VOmyp6(m=)(^n-qz#7la^LUT3mx0OW4qr_Rp9O2ajOJO z!?0uT4Nu#KS8w0oroEF4a1~7Vx)1h*%V`dla5&KFv)=PQt(SA0P?ep9}Sm}-E8-m|1p7>=cxa~xLx)N@p0!$j>FbfB!-W}$A zLAfj=$q)C2MO7+W&1nQC8z{z1lo@n2o5G3ow#FphB2hnfN)GoPMHCaeIFww8mJ%DG zVh8+Ei4MkpABBnq?x;V}xzKk~FutPhJJA9;lEe^7J6z+~lyZcL&QQ3RMPe8SCE&2U z;^jB~AYOm(mx&Ja&}sN+H$&?q3n6k<(!(9(fWW*<4fA*S^z$C)HIV91L>~#u&{<^J zA;~xrKIs!+P2hQfPb-t?rKCS}$~Y*>5oLyvW1qef}Xoo}D%GcRQ3F7)cwBdq;6-DFtEqd~{_b<-%}U zSAqxIeb68W1eMqwqi$vnIAnY)(u(mgKb=-wpWcpVVhC7^Tz}^Du4YwN4$_MDQEqMKOV@D^=Sr6GX zq_;OL0B)@TNZ4vcbiqqmuoUv}lNq*s$6B}!O6JU{T}4IfEcSAuc}db7l0dkFq8Rfy z?zQn6^*aHUH6irP4|AkEfWbNJ`Mj)Xt!Zc^M(1T+S9ojKTAzaxjqz)5oGB9pV8lnR zuLwAiTW!N_(M%mZr@_M_P-Lr>>&&USyDXd?ab6d62d*0}lQ}4}qdKYH z!@@ab)P2L64SVBS+IxqY=zW%%o4BhWVe#E`&gS*Zjk0_=lJ|l+AD{8F!ZiFY=0%@%-6yyn6ixwQ-Flv#F;v!v_x+ zQs{IBtzt#N3~z5YY&+Rd)Mm3=0s(+w*~|t;MY0^jTp$Eb36_%E=q@O0QBuU@IT-&Q zsPZ`Q7&IU6`#|d=P)@X2R06N?{Sm!#^6)qa4hi@!o_3(dg33k`9-Lfqxm|7rc|@|D zGn%dBtAPhq&Y~pc!(buzVK1AKn7U5ED+SXtF*`cCWI1a=3oRIZbk^{}V;sqIqZ3UT z-IcKt`Tuqw6fiZ75X1Ch{>4a)lr<+EnlUsjDOf%`oz#M3LQ-NrMtp)cFoX)i(wJ`F zM{)rjXr_$W6e9WVHBVY0VN6LF4iTTz>O@Y=PG=2TTCBvt>OQy%69qcGCn$KeJ` zC}qLvbVgCKItSbx=u)DEFlRra<`@VZQQtfg8y3wA-JC3=d+fRYVka=xRR}N5J06PT z-~EgKA;)3@M-H9?;4TdCW)&<%cb|Bz1~{WzMb8*sV6yf?SO%@gqsR5sgtH6 zwP{gh9iYd~-*MPgfC-ZdG{24RY7rMy6!mcHtFT@u(_+5#S*YOMR{5m(nF>WF=Rp+=Jy)#R7&vKKBg2$JE%%r=g7FUcCz3=<8RygG z05r&o(@gfUb3f-?0JK0$zmV4p@^aR?N@LwPjML&z56H`^@9SfYCS6B&3g1gE8eGQ1 zIp-C2{uKqRgOD;Gq3x{5+n9Z4#zN&Z{ z39R=%VjRYM^tqs>o^#IQo#1G|tkouHzmYqoV*gF3(>wu@be1-`^1v*pp)E-W|6m79Z)I zq7sTq}yVDRKUSW6nH@j(O|As#zF0kxIChf+c61 zN z6aYBiQ7-J@KklS@HSctHTu-utRS}fW=(gb5d!L|z9sAZDx2A*U5f4h1F3VYnaqr}jl%HjQvF6OF?y%8esRC!3&Uy!MyWerIq$$N%faZ?- z&Q4uP2@mT+phf4%u`}2jftqdKv28b8w;kKQ(*PjZ_}{~6r1g&5Ua_@~3{r^h zd&Pa*hCZ)opk0S4FN%TA9o{NkyDG2qUa99iFOly!uslOvENq9VVH+2~5u8EuRK}5IBCJO8@ zT+U}YBPgN1bsN)SNg10#*@34N*GPpCRw?Ge#)GAX@0Sk zMV8m12jTdGi7v_0k3~<@Jue(jqpBKCbV%XQTKI52NUk!c2u$9@o$7 zj(Cj2-ksML^q+WGL&s+(cb*a1sq>4(sWa9G~C1VXqYEkC`NTN0vfZKL^rb>~~%QDKH72 zKi$T=0HPaOX-sb1xo{elKwb9j%B_xqq#?sZzx8;D`_)^I8d z9gKQIQGvDZjfMzFd`8-I0d!_KDuDHL!d32b&Dl}mhy&`MHo0u|-Z@J{fbRe-_3rQY zJIuImvKg-1j#UEd>&CNhk?`MaGDtdrh&_oz73}tO3tYE8bjh!;z3~ zbS&DhJIz1Nrv>}{k%eY!Na(nyS@g4WWN2;2<=Hc8N^Vbdd^C0D(;Vv8mG4rkL1B2j zgF)Ty)N?q(uD#=`?!a2|U}cmn0JxkOo2ncR-36kslgM+3U4`iG9+?9t>8>CeyEi_! zMLxfKZR4zK0w{8(uC5z3%}rMMLpSTL+^&y2a%!^-NQS@l*;m*dxY=j2GscV)ppj69 z!GcWGr*lU*+W?T&D=@vu8L%c= zc0d&)p2d;XySSrD-jn#*aHLf9eX|`K(_rIeMB*5UNr*uMF(29#pUG$(Qbkp{E8vs6 z%?(auiduuH>^6yyQVwBjX2H`a9O5vT9YI|R3QSNh09okyL~;XJA41WThJet4A;hFu zGu}x?i>_zoNuC4IQZznv9@F4t^_lX+HekW z50?fdLOSUUKq>`&-)G^c12f)ZRzXG@N_^%V*c2kF=vD+Db_(#6Iw=`|c;W7|)!)@t zz3&YjUAX-2KDhUOR&XCZLpjPO>b;o!fb|^rK+S0X>Q{E07ZvZSFz+|?` zinA;W1*IyOhXdMp-xPG zGrEtMix8cFKmy~vjAF>?(Vu0V2+aKxg|e3UVNFwL{mwelKFm1`2Kq?!Eg3Big@i^S zJ&XY&mTA96vK>iy6!l%nW|TIjI57Y4dWLJ29};J9zo&dct9L*SWnpPdVW6gY+oUZH zx?&Wbip^``FwlD?dLJlS&_#oKJ3BgnJ5hf+7tR7JSw<-*bfW=;c}3fAsBKe1S*QLW z(BGsvLW#*@RrMJ~A4QrhIsjTg7M)2U@D3VibmCGZcQ!ReaqIDWu?}L-l^lCo31N%W zF*G#qenyeOLC{N!O?R@;oJ7Yp?kNl+MuHPhIKKFuJ;jwV=2z*zMTa9q<&77{Sd z%2Em)?Eu;Y_vs>p@w_LMdMVTogi#9MSY-GD=diT|uA5X=#w&;}K!;z~H)i zE0ZzBnv$ib7{#ZH0!r4yu|pn43|)y`DGOelvTs|~I5zB)MRz3z81AiSwCjFsfW zU?U2>t?pP-8q}~;$YSZLe5lfD`Kz07?nXXCGAR=(3yQO3qoGFO27A%6AC#@I92Au!!o~RhTb% zn(mMp#C-x$4VN5e7@hZ?cFurl?nu2=iY^D@be#iFp~tKePOi-8yA0AoWdZ|9J&_m{QLbLr_x9!0Kl9DledQ6L|y6A zDU1}SNQPK~4mw9lGA_sxujj%42WIi%<1p2C2L&RECk1iD|KDd|=YQr@0XEAl4I!J+#`|nM$s^~ zaI4sNia+>bwg;VQqK6jx3`h@1b;R5Yaon8@7#nY9EIUZxkvJ_M9_*v-&sKJ zBT)Nzo>kPt|NKAyB~K!r8gT=6t32F0_D5XHSSbrOb2e<1C2PBV8T z{IUPx-vDgkeRm0($Gn!Aum&16imdpyDCT>oQHZ6GlaU|`QHw@Kkn~3<}$_k5M9?1-woqFy}PcLdumt+ z_pT|{bA8VaSqQv+h@&k64#p9MKl0H_3CjtVBseZh8EZd8*Fj+q1CrZz$8Fojl=FSx zuqI9RmbAg1L}Jp)*ZixPPO=o3w{w zT{KmXG{WfUwc>m_4Qk%jG}1v7)7k+TsiWBpm(zNi=Sn(bc7eyW^>Hn|HPLQ})W*-mgAB@0(PoPy_`dDf>rSLSXZ$DM`-}%( z=fjMSOY1(8%-U!@*3`bv8r0?D!gV2|bctxP$ef)D%M08nma@FS6aV=uudZs<@#>a! z#e*CzlF+|*U2$enf>g${i69?8bJWW0X7IN5A(D+;^T^@lYlJ7{wlX#PpqGfvi3CTHK zy)_0A_d_v@*tf1iEo>;2$n2l98-{RaN-QYzIyN;#F*&a%CdigyWyOBK!%`U@{4t{? zk^n~aJc8)3n~fr_W1vz&^WeH|?;vy09gBoDlD@8h40j?vHrvNqER!GIqlgUMM6{eE zk%ykww2>V}yAq#aLup2v(O5Sgx)fgbT8Ng^&JNB|8`T1Igh5$~%E(|EikxkD?aUx<%#2I(M-0{7?`tKYP>2Upt!h%H> zfd)f1cA!}-5y@Q&EqBLMWF4e&SzYH$06^WNFvAdF&JvnG8w2X{A?_N&4RPwG%n}`U z3Y`qkg~IQkhNYD8>5u&sa$d9^q7y4m{w{+TBd`z_+R-WQm_``9m$|=VZCEU&C?*7C z!KRDid0nwC%Xn6#v<01#`>XByfyb`ApfxN?rgECvlAvQPxnR|lWg^uYCBRlfIncDvPtGN$lIs zDdkD1bqwiq0B`R*y7UCGAEJY{E15i%JOHMg60X}m3RV`zlaBd~_g~=KAHIk4dZHH6 zOjIkMU2MgWpg9qJFWNsHkM|u{4KprjQtG4Vh*-8R%jiaQ1}>bIj8h3ZF}Oz)i?#${ zNeTOQAG!yCU8T$Ug5Cq9l(4-NRCPfX6@E`oPq+(!4?Eq{x(uOq2*us!d!-g*9~0qo zPB@pjW*wZs9+Xhh%jhPxEQ`PqUhPuyaM9}m7LTBNXyh!VQ3P(f4hr`ZF3W-d>pP$| z#OGDjjow2{?$F$osn?7DofOMl+MWbzoz4rp>-NdlzNvS|hm(?_i5gSN3Hx?M@IiGp zXp^#P%qipY@&nx7zQS|_N=gP5h;z))XnIf#P_1=YPll+Dhs!Q&QyvBdjtui1r zlIVW0hSl?N#4HSzDLxCza4Wl%B7H<83t>mt$9p)oIpa>Ft3h-qNvq)_Igd7@tLhRLL3L(gY&^i8d%C+WDxZH_KAG-yJwTOtZ z_71q?P8X4NS+FbxkJmYIn$Al6{G<*x9O{0i@#Rc-RHQ{mB`OvDOwJi)S+Q?(_+lL5 zFdi~}zmH!TgE}?9iZn2FU!C(Xmi2^GD6+lXZUQ~}KqX0M(I7g06p-BD4}_61@93ul*P&5XE_{c@`RB?!t(`=Vu;tl`Q*E zLkQwinnPuwXaBISxV1TSciT76fhm>_XDX0q?#Ow?!}$UG{xpj6d*fUR&JU~N>rbIP z^`99jF-hz0*!C)|6hS@cIi=+#UjD>ChPS`|Z(-kWfOpMHA=N1vXdkVJxyUpjrf|cV zNaX+}G)F0DzqNOq3awIuioImcjELkv9DSZP(yi&fW@C-k#<>{HBW|Su76qdq9q4Y+ z#RvvQ)j8G#_?(tPSG5%LD`bD`(?|j^tP6pz1kNI>t&JHbd)9k<){U^lGf+YAp&#^c_` z=d>Qw>_GjwPuC)ZzIEJdxG%Ar67l(P!oUH%xSR<1R1T9K?o#`Pk~AS+IZWhfm-U3| z&Y2mhpez@7zf=64TmVv)_KmA99Lv^Ha8svnI0Wh(h|W;tAth%zgJIe-Bf84&WBT}_ zpQ}!m!oG8)fSX}0i_BI!Zu^etz;l9mR-X9o``vqoJ7ZRoBUi*khl?_2@Svd|fFKJ7 zjDU+;X+PctM2t0>q9Ycwi8_+$u=d`7Ua{ZFJvVnG-)YGww3)6bne8ENkvQZ$yAU&} z;Gi6fYscZ>)c-fc-`?|>PE3#7avgt(DFLWy0@0SrWU&~Or;wUk9cg!k2Zd&hco zJY(aU8VFV_BHn+((?CV@U6z%jshkJEpyMB)MEqTE{+%q2}zA?6PWw zht(>d9XmQ5V(Egmr`l0=Xs@d%P8N9f+%c{0xZj>ev1O3PD)vd+W*29ypWw4&To){C zGI>ZY>mph#&wW@-6$iqab{6)bpVPV)(mK`$x~PHy@)#^F$aw`_?lV#<1Y$;<;X`#0 z#Y9pEyyWo==Bxzm4;8heI8baZLN!kdJ1$%ILDq_O$7>HqL7*F{Xthrp-nLV4$_}om zRRL`1*_@UI%jqI2S@6EcG9UMr5TH~R5T}mnK%y19hDM3Ecv=d6=9`}kK7Q+Ti%N2> zIxP#%>QV$?mXqOfTBH-oBN-=p%LMdxe^LTXdgal!+&d|e!M_g*bWn}=y`ook1%lqR z*NVLp>A%-2zWTL)8=Gb#^s(pDK&Rt6F6#*~XC@ifMTm8q3WG2eaZG@rbEk`1bb^1<8Ybv$c+93Cnuo2n8kBM;QSWSCRp}(V8|WdrjS-=jXGA^4d7MBVOM0tV>Fx zxa45cShEB<<5`7u{PVTofA%N;OUzqa#a0_m>%tr}0HNN8kw$HF11pJqlwjH9oaL~E zp^51#a!mIbbKSPu*qIP0=3s7eR`)xeM^O3BCF5KQmP}KYAz?tov;ejZp$#{EW_L1w zAlvzA2!TNZ3|HLRI&RyBtyWy`cLD|>c5s0H|4{zy6 z#$p3)O)Y*4Pi$GJ^@^+u0{|-zc>b+_7;k^;-$hsp2G1V=r;@iie4`@~`QX!U;nk0R zFp}#SHji*^!NS-1?k@5P>>t1RQx$5*YUz435#N{k7R|YO&>2m1nT)fM{`!FP?g(u^v-Fn zBhfNBH+dgHGLF`qCO3H`ffi*P5EF_m*$56t_pZ6PBPjvg!+?N`9$HpD{~v@S(nPo8 z<%b=nP{|O#>*oS#icf334}Rf5p_N334kwGl8He)aw9=773BQh5B@Yoqqigs7A00mm zQK7X}G6jQBLXK3B*JoHS?_+uR7|X*)_~YMxAAj)||2pc^7eKoXF`a9ZCk5P?bRSd# z9*~o3IXg^~78Hr!>_T+$$t94p6TEFO^KnqcWaCjJs%S;H-~?Kq>Gl#6U&p$)m_HFl z51^i>*(^vziYtZJ6e__G-2_K`glNN@56@3+2sUhsUV3D9z1)q4)m&f zogE)j>nt}{$j%w&6>5;Yk5?ie82 zz`tK}#&z4!gC6Q_6r8im%sA0nM=lRoZId$*KZo~zm^qQDK|x+6%>I4nXcW2qSm<)U zE$c~xUW3NS=aMWTDc5L~+olW2{C%Kbi*cuHU zPAdm5zF7I+A9sRp=Paf}>zw=2$6FiExG4@n@sC9|JED8l3>hET-KYhE{nDL78ap&j zL&Y9rVuHQ@!N;B~!Il#b<^47ih=CFV$c63nNaFTNr=WE`X{nkShD%>cMnG=rLvG8~PekqHlKTG(iggTdnhgy^6s3;?5k#K}t)=VO~Q4`(~ zJ?n3l!t1Xs5IX(M%mEOtQgQe~-J(dKdxx&G5e>;1wGk#LgaMPhbXiqkg6%%cZxl2Gd!K1hF4D@yS{v!C7zW;$<@=4GtQI7Gjz!`JGkw9drm_ z3im>+4v%4qv%?iBVtf`bsXL+B=$JxwhX%cY-tXn~45w!w!A>8doZm;^ueiVdKKkuT zm2^SL5azd{=paYu4G{!TCWSa;o2Hown6l~j$B^L^ydkj_dxm0uzgzz=q z3DG?aki*?Iq^%7qVjS4K3Vvq1N7H(VLjk~`XU2MnHgX_DkubL&=;7qlAO9HS;umW% zFA~_Q=spx9P?8s-ej8V7qo|k(|NM?*2}mnK1UpQ5ZGU%?T-T>2q&AM|AcWx5l6Tm5?RK1-c3q2k`5@AB+GDb;~9i_b1kHgaB!W3 z+GZmm4daib;w~$}VIIo(KI3o1NZMO(dqwCf@J<)Nb$&38P}dOjB^)TD0<3J@qQERe zuhay^yKCWsmF8icbHTQ4N(ecB;V#?DDj=-Zux^zjj-H#DDUsEzsZMLpIc?aql%{Z* z$u)6O%njHMv0GU`EIrtnxbE;$nfFlFAY09{}%Z63CPAW+234VXVZQF5PGVZNo z-z)cPnZ_f5S{r81KlWgB91>Va1Q=k3SqvbIf4E2DT+SAH2)!Sn8_A3;0-Y}$3bz9e zJgfyEvE84L$_ZZYgEkb!mm4WUp)HAy$N(Zje`g6dSw9CQD!OQ~UZ6My7*5NA zl31{W#x3^b=rCyw1VxcK7DhfZx^#Rr2&bk~S@6&Q-~J;0)&K6F!tRcQPNNq9dslaI z=%V=fYf|yQp7hQQ_W?}0*fTb$aseO*1vZ&!GC9GPt##zGqStMlE6kC(Cobzr&yaDh zfw~t`ooVE7z26b~v$%f)yGuM1B2U67&evtZi)YXAr+@4deD?N+U-|qEc3H*${MSCf z(^l~tU%o-r`57}tu5zjh3RgD6?hv&KV}d+SC&tB+jP$yy{7t!5F0`M%-$@%@mj$O% zVDEqMiJM^@zV43o`6t+(z8pG1tYMXkJvhl03amqMQQ zUhB{a0BXfrpzPsJQZSuO>PQ?|Epn5&He3Q!jBp|A=@XGrfpXMq3_&8I(-D(h<=`cR z2_`C71Z^b~(}tA~cgaGD_4{#92DKtk?1zQCN8$Mx6UKe)AQmw0Dg=EYDyW0ZXkUa&n@`4yZ&1{ zBwMb8>KzMSQC%qF3l8|4I^NvJfG}FbLSo|++odu|uvCziGxGWz`Sb!QpV+p%zlOCd z`hLS|!DE#yUGxpUE21w9Hw8IALn_(2x&*%muKolr=n=Wph!o#xAVws(e6(;}m-kErs_klXXf1jOh zX7C!>xXC%lm0$mM38%|T?6)^^j=}i-bD`h99}YS0-w-}y zd>Xr32fI`iAe?s2gnq_9jJ*_S_I#cyo{m{>#4k${#%ul&0xg^cNOS>mSx`=AeD&3r zBOx2ep-eK|0ZSQWIb(VD4fOlxX!VA+-$v&d?t_LC>Ch1C_81ZvZaWNe(J3~N>T%tx z3iLx422;xtF)7?^qHxS`x}ag|>6!z}=>ctfLb#DdI7()C@8JPoJzZ5`@tI@|D>IX= zrt?l>mPqKyBL4I~UB^ZOp$8oYF_U{GJJCe}WJMc{fiTv9XgPsai*9u+pv+h#nT_iT zZOVAQU?E=zhda)iGK8lkCtM|fUUS0kAabh1J`$XmvvavzaJxQ@IVwPO;tXTjedn3F zzK4fKNP(j7;hB$C(_7sB7G{(GK4tZ!}Tx8z0$`FjMKW3Z{OIR z4q8}i4b8Q#MZ=2tcv@H7?ssWz5;h5Tc|Cbd&MwKY7={a>Ha~KL;vLN#Hjfj^hY1e% zdB-ZT|4oAI<`b+sl`OW2jdL_-YIRuLO9xQHv?HqsI$>zGO zSd6ol5@=Do_n4zZFn28lTWw=--mY%b=!)w)8ILF65QYn3tG@5ETgRW70|OT`+@(Q{ zc)4U!PMaG({ngH-Cw{OWW*qwX8pzG2(8q1B{bdgqK0#i5cj z^5rG&Z@*%$rRR2DS3EzFa{Hb4U*OliU=i|iUC`mU){g3iHIveH-SSvK2UkSwChe=yzWXmYzB(s zXU~KODw;wV$u=^lztRu8jRYxlGY|qHU5^6u;6v)biRT@MM_EqdOGmdN5;zCOP&>?o z41qw03(^58Rv{I^%*G%+9B>{CL+zL=mfMs+!mA(Js1uQK7hJ8f_8Z5E_G2IlF3PH}8P&I@vRi4Q*gv-pL7 z;?LpP2mcs=2LN;d$|}me?eu|HG7<%NJkw-GF|m?EMEA*MBwF>(FTgi!V>pm%n5ihdrFb*CN3c z*$H!N&o_`#!hU-rU|~*_57!*1IE5KDEbuF-HjjQLH&i7?)W(PRMQSV9bnM6>ihcdbIpcIDO1kaj zQoA!j+;!Fv(4+HXR`TB!U$3`2qR}2ix-4FNtpCuVbf@zVp!EyUQN2st+?YTC;0&CC zQZm+O?{m_($7FK>a;2y+oj{sdfHt84OLWmp)J~>MEAy>&{Eg4PLNe+p=xSzIQ``yT zcu_|R9`09MQ@n0k;+r=l0aRhNokc~-ImwZX_fQ=fccp(C*=*qm+{k>Yr;5K5hH3#b!PuFh=nr=Ik)>&$ygcJe-!{ zqBQ3gah_G216^~b0N`31w$^d)a>DXS@n%k#MlR?Udao5ZFYxgkWK|sRv3F!N{NR`W zGS-rDT2|8emV#&J3qJbjW4wIv9vv!`Y?mB>sEUU{ht_`+=m_oEC1>1suIWX4Ji4(X zb^>_u{5j2#^jzJL8s z+WY+Z7*`Rrxua%+kVyN3HbX6aoJ8=p+#hxTHj6E*Nb#@LIn2)!UJXwBC`H zXQ=zz!3&Iyebdy@`1hb?m=N)#Lp+eR`@WB0kj+qy#nvP+C@Il1ysQhJZujxrn+{6{ z+s`q_V3Bp;*T42LzW;ka94{C`Vwl7-$sGw?veDj+gb6lkl5s8WeK_9mGa`ZGeGXQZ zcbGW%Q1L~ooMy8`fB*~HK)EMM@%#U%i^B9_{A^sHjbw5bW&tCb)1_(KD=cv(B!DO& zVnS(V5a~E19tl=XD1FD-fLC&-h-t6ySkPIg z0RUNOPG^CG=zMj&_v`^b`<<`jKl-J=joRwaK?EX%hyj)W7xf7aZ&B8VUMnh&D5l<> zv1FF;6dvrxXBnCYL*qRqc3r~8JM=Zpc~FPZ$Cy5^nKnU7)piB>`J%fTG^~qv9k!ZL z+D8Y4`=QQ#Y}~8yGl9$>=>QIBy6%CN5TKGc#1_9F-JC?*cLFyaSAj|G^brH%I{;wY z_SuznITZ3@*@aCB*5^Yo#XO>La_s~Ch>lU=myOtG-aX;-P@)PJ=~GA z)@bjroP>H$=fq?zPTIb)QxWR}A$Vmh8uW#nfEI&(J07oB#E?Gk)10ALf}08!#mgnx zaNqGxT2iQNN>+R^0A2Q*DKuQhLb^{IZv$ytb5BWQVhNv~7X08Yt;gHX#~B&#w)T!J zmq0k5|7l$*Aa@osr=^e*RXM5r{g*G%+;P8MQLEwgwxczdjclA2QAkbe z(mXo{@ImWlI9_(5(~C4bQH%vkqV}%AiHxZn?#TC;bHXMHmO;R#5|Y*m^+dqf z!7fRWy)%r!Qdr7DAP0+D*YHuU=@-wQ;qB8STGwnH#hxV_*2Fk>+bc>gq7Zfzqsw17 zhP`?H8o|t2^uBKUuCvQf7CHuc-EqG@;(oiL_d2Yud1gAgnT9-eq;BtkA|s1(AA)pi z4cA)n{BjwNbwCBrWhR8yh*+LzB;cs~c>2VEU@}c9JuT!3IM6?zIB$ZA_2PneHM{L>~E`gOja) zPlx9f?~TdYfqsKo+(n{Oa@ZXU(QO;W&A_4H!JwjXP$kB*?C!{?2c*QrFL=qZhU0&v zEHOEc#4FoGYsLi9NNT|3WIa9L)1UgI_{Puw1F*94PE_Q1uV`(fRs)S((6+XH6x3@rVM7pD(1Z3}=QPvff67ElL zhsELuT*ric#2l?>oBzeq9PAe{=yW=3Zw5tjCd=NXmr>DV%Ly;P{c|wOc>8q4U;CB6 zM|u^rcO~(<9>msahazKMXSj0d$Ht`G)+b`PF3Y?dV=)4EIjzLsFy`FD^$IaO%JI1E z_~F~@At8^Bk~(2g+&kAz06)FeQRvj#Mywr0o8m=IX@F44X53h0Dqx~JMUji3L6w3K z_`@YOv;z0J*P;*(UVl8$tg~jgr;YcS#-ZxK&@aIbx;2|cZ3!#T(5@VkrV>8<5eqE= zU~*WuJMK@9EQlbjg_9ByNN}8wLCbXKpuSkoFm*Lr8v}fcVrd<**wH1Tj(dvxq+=1m z5Q2ZvO`Bp{9DXO(J1h!A-s_!edY;pYEm3Hdvf$bI3}7aGaaP4UThxI-#JPx-DJCr^ zwAS$3k2hFKW1hjVg!iCrgl#ckIfl78^1|VUNpzL_Mjg-jbi#K&`2c_A|NH;IAN{>JcnYh!K3rZHTUrc@5`--YwFqMg4VlGZOBSp)L>u~+p*VO1#`G1#TtiE zqf&*$54~+p1H5cX8e>mXhv@X|5CFiE3ofS<%s7P5qO#Rmh+H7&Wm2J)m+bpCGonP+ z$g_{XGtUsqxXVYPN@ui`OrQ%vFXTjaZ@F8U;!rF+OMSEl>yibDwI~2Q^HFk7}V+p^M>2~hS#sYLf?0sGK;H{nDce8Adg(U zcLLj6Z5mpcN2kgUz;dk(Z?D|5?{=32``Dzz0d#_!Hu$vj1=vBkca#0Kz&Pc8mqJsE z+FivlLCT4@i241GIHvO=6Y)}=`jx09jPeHq{;hr7pAGhok3DakA$>QvGu5Y5ecX^ zU*bXy7p15!yArM?7iln7x^j3M$^Y?Ci3GgWu9kZgG9k!P;UPy5y0*l1Gx4ozza9^r zcpH(tS|n;pmil3Vt?NPosF@J1F8vIj_5Sq5M8KfH)%*A^s5rtQF^punlmZy%z0^(w zDuJy(b_SHl;Z6a*Prj?=73eIWEXtvVAT_lj8v|5IZ=o?7q+gq!sY0+(5cB3 z*IIX2IjKn9=iY@Mo|&WW+brb$P}Uui+ul1;p%9)$BV#r@pBaV*eZx$;QaHf1QCzqK zsTAbYG&d#+1{;F#4^IU@y53Qpdw_c`P-+7m)3isHGtfC7BIly+redk_JcK3A-uS>kL13vp*bIR5{*%`0)HaZRK6?^M= zz7%ZE!X|8**$JU8;J)uldOIA`rZod{=i<5zEeI?jb@B}?8G3U-`CaQwq5;GH_+@M< z#$FYu0p`@!-xB4^T;OA=_oz6I80 z!CpzVay|RVKSEd4VKU5s_QE(F(0a#x*E=;DNQK>XoHgzpZ>s}&)v(&o5|a)XXUlDC zu&;gX8=incix{?(3Vb;wSU$7(u;7x5KdyqL=wS^c&6$YhMEg28199&$gk{Ww3=sMj{af zOCXRzxryI-M=N2#x~wAxOo~|)C$R^7tjFj|j@@F0B3Hi9Kk#wOy3=&vM4_p*io@Pr zmm-n(EI=Zd^BR2oGrA2CH=@`HN1`qSJdK?{DUeazO`RMLKg%~e^$3X?2^dn&(4$4b z9%~{Mgdh#qQ54;9f9JX9yl78PKDmLNYcoTwOa$nNq`hkx)7sXo|6Gj`;}>CsWB7;G9UXibN+*Aykse?mVW zu4AKkQ$p@zZENq_jDfL!Q;H%O0H}QkV-JUk7v9HvHzQpyT(H6=i|1^kF$5-JUlN#T zri_XVL-$9_{04S;L_=#WmNAMee?LR=hKMmu9;E3mB zcsMOMtqZoD$x^a}O)g=rb(pl4A3S@&XRlr(h98>-D+WN4Q^MY-;C(GcbhC_g;o3vW zxGWUzUvJkz!>TesaosPR-9NbFdcO?-XYNC40Af9bd)%-Q)_M=!CLka~dnM;XXG!ZP z?CwwHgk`y4>kYl#(CRicKdpD1mxA}7J#eD6s6@BDQjfT-=tfEp`Udohhmw%tV|ZUU zo({oun%4lm+qGmRr~8;8vA0H}7>`a3U6+c){D7l&x-_lJO5+`m&Qc$4YfvE@&TFyX zv))tAnV_b?oYpFC`^@t$=M%PV$9*Rqe9alzfF&7To=-UCj0VG-`$m9NbO4?}VZXn} zZIcer1XM)#9pj9MZg$CZ6O013J4Q~MGrMT5M>aBbM9f`jOS_hY`~8lv&aSPYBo38~ zu6@M#5WX)Zt9vzp(df*;g1Q;niZ=4toHAUxyU_c*|MDdspE#qeTS6JRC*w^6>E%rY z`=C0S!DaGeiNg+?Pa~pBN!T<8WJ!^ADKqF1H^MnA;iG34ynHyJFwvg~^HPW?r9{NN zyCLW2%w5_mz8JNoqdU>O^5w}pdTJ_l0XpPzO0dpO#t>!2VPw>r#Bt)WC>=1{4=p4X z6_IH0dqUhMadeU!jS;Eqm@Q2Q@YDj)RC~-MxKpy3tpMt`gbi_072hSki^9Z&xb@zV zmIriyf;%~s)1N&wO=S|Roel$NQX+UfDW zBGCi@wDFD%;ZR_aH|Sv1!W;J*DCZ2gOQHJ_Ck*p*K6?2QU%YvX819owLErm8QsQ1s zg^Q(SPF$8)HKio$KMX`BS>)xxR}EK+*&dxq0RNc|hfw@TW0i*98+M`gi1j9f&dktk zQme9p4TnrtAZ7Tz@wxUUEsmqt4xpj$+bE=?crkBCiGJ9<`#bk?Ok~49FwlosU#&-w z7MGrPb6EGCE;DX0uYl}TnK;hmjD<``Zz^C@LLh#Im=)pkIkRNb^(F+*nuSaxah!-8 z1vP`s!4qByd<;yAHGh(GPdtOxeH=(x1$!6=E|{_cPL=SMNx6$6rr zkJdY`_YF^XPH-nE5&Oz@-3^Y6AH8}r_P~{u9bVFkR8P|7N3w$RaMi7s* zm_yqBBVC4$s#vUD4F>@!s!yqs6kGd+UyM;q0>HKu758X5QsaXQ-v*!}>9tv*9jx zyRMwMB$E)F@g|<%DXZ8yossul3TSAUVS?ELH=d=RzkW0JDd70o?|utE{QOJYYQuf6 z)Z3UNCmeD@7TT>h=B8S&g+o>n}&objjrzCXwU#FY>v zGo+kQ48tCXDrabtD#(00+M(@TrP7XZAESqRBE2+efxvVAx;7J~*-F0j0yKYpP+p$jp$ ziF_UkE7!{~1G-w2-C3DN@#4oWODr*)9QTyc@Wf|DU%-8;4v7-~C}!N*CV#e){`K~@ zfk?8E4Mqku?&0Mc=x(yn9#sJk?POB`f_~M(0ph?G658F z8!lqKjzG+|F4U4dTAzkiAPTZRTBgwT9cQt?DEQ(AkS*bOCL`S+YY5|c7&Llio-o@E(OCl=wxWd*~hjq`QP=p0fiO^bsO*axU3#JctcEz(!Yy z@37Js8M17aT3NXSAANbzijg9lH^N2^p7h`nh~aqdaeW(b5hK$Pyz zu2grpK!T?&tW2W=b1YhYpw6-0!#*_hF}W0Yt7v0W(Rpj@qOf!qs?2MOcn4k6xnx{x zM-1)kt+TUW`5lY#l$abaai5)(^~9fh#|g&LoFX2r2|X|6XiU@vXbgQ*>=GZpF$yZ$ zN-`O1>g2>d#k)u;VO<}9lyUD*0AzO=#if#3CT=dow~c};B4AK4M%T%myXp{2xaXuz zI_%q6uOn7z-piP06#V%(LR~}DOhYH#ROqVf@7x=lB5&ARbRQT~st3Xt0(oAWDb|E2 zd|HW{tlWWIvIg?1doCcSwpoBOM)EUDoE;WXL7oBJ|uG7 zM(v$<3Pj->Nhqz4=R6*07@&$e$Ds5S&4=nBsCD0VIQH>bAw*ARxL1kgo%G#~HYQ?6 ztmJvz2SvOj!zlw#wW2C!WHY%gQol&k6WY}#OE{m{Rb>}}AkUc9aHv#*apBy9O5I6Ayu;iA!n zR?eksr6!}G)jDWU;aoHrEg%>jGc00#TQbzfJ@@ibaIckmMp1WXg3ml@l_(pX-?1!Z z00%K1ArjXBqLyShW$r_EL^Wj5Ow=OAs}!-&m@DM6Xzd1dncd9VeYms*#XW(A&oF7Q zgj*%ODkxR~AaLFj873)m*`TJoqxyk;qH`44;hmH#vPt;f7j*~YIW?Gyj)rVqHo#f* zFilO@zKOXgV-W6s9}I?8n}!&Yd9l?v_w~EF_Fw1%L0tPuwO0>}qc8^fVgWaISpUQP6gQ~dQc|DOhjl`F68Yvg?Qd!w%Gkkhp z@q4#@99j;k@KZ+XyEgo+c#jB2Bxh!%0>LaUix09Cw)~Z(^)|I03|64f)?Jl32A4lh zfIzaiU=nhgln8hX@-d|H9=yN7<;2ap%gH1XM>A43V3C|gr4c;O=?VsbL*zW>4=^Aj ziiAkuQaT*|_Rc@e67qWD@AOGk3Go!bXI0}KV-1ML6fH&5wJ&8M4_FI4C4;xP{)Ai# z+V-yY#Fackg-KA%oF^-X^*CUO?ceS8gk;>DtvgO-q5F(SXQS%SKcErueLcdE59G>5 za09{E4m96Kp&V4R$D0IMerTr}@R$GYZwZlh9YT> z5`OkOUq?>B-}=q(p>*Im4F&07dN+|yFXxb94qo zK}!6owUD~iYDHIZ_(?HMS74;^eHnRi9c!e4j4Q1 zO`uTZG*EHaLDPt;xh=<&-=VVuVr6(kYjmP27J7;{K(FggD_4hd9AMD&S7R6Px^2V$ zJEGIeXAgLDy$+)e9&-y8u|B%4r3KK|!=A-uSj7Evt2BKWF}YPbeKjV&F{h{YFz^8n zb5Zz9o-@&UZ_HD)hfVCA09ry1ZHng~o))~Sa{LmrAZZWo&4F6cDrrsL+AyPte854Y z3e!5NJNN2kDM*=JrF}F7F!E)_V~)A7?kI)len}kA=wNZ_%L%Z8S|5jYWL*~Ac6Fxv z1Ru7&&a)!|4#J6%0eF=OK+4i^XNvxxoeCcBcRn*d4D8YkMPs7NugA$md&14tHEfVpR^Gx;}sQZ0zhLhQ#znC3f?x1_k zfSbfsj02m4026+=MXEA)QCI6IrdPX(pK5e_;06n72$0$x{r;8-d$iG0L2E9)@^oJr z^`rtSQOl{JnVfQrO*!7FCHcVXkn4f|Ig-DfXjEqhhO~!eI7`${VRal_(Fz;MjjB&e z37glW@`yvQ=1ef{SKZ7VbZrW=od@80m{c2{wjc!Z@D z-HP^2N6_E4ea9%Hb=?diU9&&9_IFuB{>)GQ`0#rtJ#InJ8oYPZ?TS_p0_;|CDiU<2 zq&=wv&SIb^0Carq{TKL&Z+uPf;@yFfQP}N`h0P{QzcKtU<%4Lrk3hwhW7jA& zC(wV3_!S6!9tlYr`*m+^>YkLmgqzu*E(w99cr+*!lk$_3VM#13yVg*G^YCt**+Jj) zKJQ<}a!CmpvpC`Y;W`LGbPWBY_F|C$JUXs+h-}jmnSSwI9c#leP98fB89`eNEfw#L zL;jc;XEXfSAOAXj?AaM-`RAL_<}s2Q^N!wbu;s!;Kx=q_38tjE^o|t`3!rA|>9vJ3$4W5u%Z_#=*HJ`s^XT zM7KD8|D#X81+zSK714bn&UB1RX%re5iI+MDipwEhkGWP6*YE1yO~8ns;bxPst@Amh zn01tZ#Q9Y&FzC96n3yw$l2_FG6_-_9`6A@>&_$24IOx-H?IVT*V{e;jhL0tj)`hN! zI&XNk1Iw}ux^lQV^&U=8KB%eX^a7UYkkw(h*Rc1-FcSzDr$HUh8LNuCo1CSpJE|WX z>D+POEA38=!!NTz9o0(8r1xsSlN z8+4=nF>?hLl?Lh`Vo>oWEAC{*!rzs+CquCg+h5`JZ~yza{_sD+et(0#?zq=|#DRxr z54fDp*lNXf+wpYY=K0iiSl?k@r3)mm6yF&m5whkUF%6%&OU#PNiIhnSkt^Qb?pQOO zw6ehBlF~rEb4n_9c~8_A*~mp;WW(IrVUq{DElz~i>#roo7`#*H~I6jIk(k!mv z2vXcNd#S2eY;_y^35eXpal3DL`}h|2$*13;2Uh$*RF{XgQwU8WU~uDwHh>eUn|EB! zC)}>rk-!96RdUAVd;yHebZw%j#OEnUIlo}GvJ>5vTtUP`;C*mCiDYbN9}uvNR_~+B zkyLHxgCLPy+7$^KzWvEZ`1RlWp*}0ntN7gnN2m2)UfB6jfC}IIR8D|v-a}A*{Gh4O zfyas3a|&`w1u!`nNYof-)Eq8@v572Yq_PsVsC0bFy0=Jb0zr#|#DunxA`!Y7jN*vG zt3_^6k!VF>F_AkRkvPMsk=n1gzDjqH&av=GA$OrXF=xe$ob=PO;O%W27NRbM zio(UJ$-}cV7hM-bQXGeiZooT>f>chZb?07jup=S#b?i62i)_Hl=g;u^?ITD@NryEs zpKk~X<6XI{*bv<#o(;vMeG~=&Ew-Tq0geHvMDlAYR?{ff#2H3VbpnJCO%MEe?lsz2 zdtT_qrM*6c-eb>Y?I9kbIc3sQ5$-}UP%`M1>cZ%Kx6xgYP|^&SlJWQcnLh*kLw_Is z@qgz3fWP*m*K&2?cgCvdm~&#fhPe$sw2I--j<6U73-2gSqx%WM6}zS?$Mpyj8s`F> zd$;bp1hRP~u>r!+fIyvBvtf4`?>h_uEE$%v0?xuMi{cpFd{P#s)0Ba>pNxGo0qb)S z&t%+P_v?eom=3g}S~+tI?xGmkVa=&aY)~AU_a78p%Ox^?zW=l`#tC%DX+hAS*N11= z9=}BDb$}fMaaYIH<*IX93NEJwuOF|dt+N0_6nUhUHqrUm4w#_c&{c^d+6C(HdXlz|f=M%PCv1ZaflciBC#`gvq zoh%PpOvId&cr7p+PF{Qm`VIpD9*KvKbCiySY;^Xr;)5H3&GWiouRB`nV;XooGt;>~ zCS@z82K6-ll$>!|7P>8Uo>!b}*2aE5FGahB*L2@^?2Q8qVJ5>G7HG8-G|D-ReCf0l zTvlq#_R1oxfb=$6rYGu7bHX_%PRQ0Sjod1Lreux+;O%`wP!{)?aism-dK(F_LE?TU z9em~*2hnuCBa(fwM_Q}Y81&iUGjQ!t6UFy)UNWwGMJW$hPA^cmS1_k}LGK;=ejk80 zz%Fmj1zow~?SAKAO68!t+!S+8_~gA8xZZB~^63T#olxVg1)Z;U$9wC-(GwE($G}?V ztenGjz28x5MHtOwF>R{i{-vZrAI+wcw||_5ptN_dXx&1JY2V z)0{+6WD{Vo+srv5_tKiHq7)|s)|_x}%pK2#9gv50NnyW)ExSSkM|sXRw^{ z^5akO_RG()Z})-b==>gsTkKmyAi7oeZbHQ|_9)B-KRTt%ZWb?;fHJ-}Ov_ zb~V=;Fy}HXIff1|RMx!d!#EEUY1Co3;<<8;4^lizt#a z37xTI+KhH3r{vEak_kOTCguy$a#C^OK&yZ=2`V|G*8NZ{Mt8$hh|{+{9EoDU8ciJx zVemj3SI%pQ=M`~$Nrn&4oLzHsqFhyvcO;gF0@4r9)JK9xr=PlSh{U8L5cI5W7k&QG zRm>r1IgXEi>KE|kul-dy=vwDAL(sRjKPE|5ZiM0|Kd*PR5w5H&|N;+IbJ*t{KDq zLjM!*JK`bZjALC_7-nG^e}ZDvK&ljN<7s`mRjx5}M{RXn?^5O(c{nYAG)24KeMw0G zrHi(1xNW4qZmo~bV((o@eI5oC0f2?3gup3e{l4hgvGaLa3O;;(!IzIuEGV3ArX{C2 z&{)st;mje7Tbs$IDF<4@si{#15I|fh1y9>P*N$RaRqkdsV`VnY!thQDXba;84H!It z9&5=64QY3vEGMM4Va=2P-0Ds`WX^d1yyDf)b=hHPbw^QQWA6Z+)NVNpNChdM(d!dp zxF{e8C-`B1$9*uFfAR2u&Dq_G0gZp^+h4<9`oXI))2F!-adqyZKwd@hUKfG{InhL5 z$qDy;gEyYj>@>Jsy+|_*K#KX2k25YTw`T$(fHr45y!;T`(_7B*P`77EHbCakhZGGm zrbaR;7jDuV`v4ZUN;89JmouKW`1^UbR~f2=%{s$vPM?+cbBzvxkhtMn@A&Av>a1wvnj*1c4IzqC3k%@cZf`yjOCDsAcQ2e zyQ|2GP6B+C6Qd125^FspCTHXk$J~&h8vqni9r9l|fDeA`kD#1i=srU4V{mUHkPPu< ztXTl3(>ge+KsTpBCEyhT#CLj6%a5cy_rc@8donBI*ccaobV;>hGhC%fIXs9EnY;H4(|* zy>bRc4NF`lT7go=JA8A!B036DaC04+1plH$rG$rd9m$O#oKXaXrYrW|Ngaif(nx|$ zzsE;3QCJ+-z2ybv;S-b>-@@|zo3Qm6Qd$u7oG}bil%eP{b)Pop@z~Z9MK-Oy|R=TUk`CJ zCRelYP&WeLnu=jv0hovulufqU(kG;pk;{sI=nwzgc=uVf!=p8BP}qXbn{weY)3yjJNRUM{C2DiPE zF`-UB;5Zl3C0nC*gAvq+6IpK!8Jt!=KsnG1Aar{yaBP?iI2hA%DOlDM&gaXR`Epr` zbQYlfE0_b_?^o=tUS8ng zqi>@7!K^^VQse~&A2!gBw;Siqv^MgTzw*OZSl1Jo7^R?h7APZEbULU8E2Bnotr&gV zHg=(UqnN%FtQsiToC5WAJ!3sxWS@MXvSRKbkF$y44c9kcalTfcMOA5@wO=+!#GQh4qXo5VJX~yij$#rOIinO$v7_@Y#H&TIrF$C=|FRx z*Pnd)9cK%Ml{k|#AO^*Sb^>F08sO0(iNl;u9Q(cl2OUHt)IfNUn~?ZW)H7IR@%ppS zlhBDo*I>|Cv|xw5sjkb^$_1v#4+{LfQc&aawjPcv2F)S~#9Kv|4wlxM1Wv3<$K>5e zoB`1f_|i)X3)46Zg_;`C-Qem^P0D=3(Vh(VF~^aA83j95b~&`sMd9eJA!`5;JgAI9!y}Obsd($cB$Fr~5W=`9D#rDU3}FTb zAMayoTKXX{Bss*u;@M(|Y~%joULz6Y;hIJe3++#BLG@B0q&BUA{M0ekS$jDU`sB3l z3vc-Dj!&MQ@uSC^c1430>>vsavbC3G6>U;3Pc|t? zF?HQLZ9WG*wl}P5smHpZ+Y6sv7M*0$%O>~Vz0HHo{henJuVn0NB$$ATB#T%W=f`~V z@yGbV4}KTLs4EN$&6G2TlUhQrH;m!6I`sHmY0mG_wPhXf6wG5B+NMIoVEEnN{3U$z zyMIVFwmvA8v>Ki$`}|$s<>^^t(A{KF1*>K0+5 z`q^Cn6kfLhA~dH~VLm?%?;L0AEYhLp%z|;zoDCa!6Ez(KjH^WXmN*1&Sue1zVz>um z#N_JKRV?>uO%ETG%fXa5?qg5`*Ska8Z>_CbSH%!A%}PNump9M~A|GK@&7x`J6b&%?UnDV`;` z9zU^t=}4JBtQ1rj+rt zyUYf-ho3D4*GjF@T^(sld35;V^NWD7%TjPEX-rZ-XG3zwtLqJ|6X4q93>T&xDPbNU zg9odmA~c{8L zq}za0fV7OYGbd)-Ky&~fLb#(dQ5Zp_JQMzr_2_mzORZ7xUXObX zK8j#>BI&U=;`4s+<~o98?8W2WjI{|R1L*NgOvrLU3`GplaIBpuwgIAqy={2fHYQDR zyPYlO{q^rDp6OYq7)BZc`3x^Fm$9y!xceCQgK=N2^ZUNK-N)}AC!hoKo{n=$#;pQK z`Gi0GPXmAae*y{oOo-S7KjPmbxeoIXvk+l+COAXWq~ZnYn8`FDTvdWfRYk+i0z;zs z&j{dx9mc4qEY?3T4!sa?GU<=_GX3w}HGtI{KKtPhkVOwMSGVmwZH>iQ!>Lhzx#*ah{?_w=TZh$DB?g#~9(^dcZ5Qce`ICrmPs<6pJOJs0oU~?)!|<34Gx=pM$7J@l zR}LOjp&9vD6dM7w1jWXIcb?5{-?^rAZI^|D>DoHBT2Xr+7PstDHGH_v?#WUL-hVh_ zEp$ZM+cY02nP_|zyR`$Jiq?ZVjUcf1F$eI~Ts%SG6^K zb-hW9WFtpq!o?Sn&|YI*E`n}OlvE#E97+Jh!(gjSQutE3^>fbFdN)t z271{y$GVMY1xWBVr@{tzJTxDXbf<-LYC_B#32GGoku=vxmTayc25^bh5s594?^xs6 zrXfUFDFVQK+Yn-C11UaG!Wl>)&NC=p6 z8lA_n4?uo16d27!bxdhI|DeFdEEi6ujfcX63Zr+a_m#?wA-&?O$G2l&amf?mS>^n~ zwIj+GfP0HFQ~D@f3&07zNzVp=bWpM=^oIKM1N^`Kq5mHK&0qK@vA_Mjk*-c)V$#XN zo;-)eq3X%#fZ3qQ9yCg6c5s1NWDMG#J3Z<_hah_GbFIeEz&U9;ibW;xp@Re0U{TjQ z-VgOkeH0A!<;Ya2D10hfmdJe=1tI|f>-mO@m?RouN}uYtx3g|~3+n;JsK z^rUz>JK8Y|A!sRP5sy?T!cZHB{Z*lOfZ;Fw+V9Z9GLbGi3lI}^jfF?;KrSmRFYI#J z+_SBPyBakBdv_p}v3}1>!EGb$Hna#__bez&vf)w(Fp*uCf@^I`YIS}HMBEz_#ONi5 zc8CdObd;6cxCZ8?G?E6u@N2*J>x|iAE%14ZLZ*9+L=FhIImVPF7nEG^-sOx^7Az&> zAN_?thGkh0y1eK>=CVwhBHxHQ)LG}SXmPMntgM;Tr<`IwnE=w>mFyDOt6gqJc>!QJ zNk4fM)RSU&Vv*x>)>mjepl7=`PX0z&lweleHRC0U@gTvp<^xE$Z98tF8r}BBL5K_K zG^@Pj14ghB@X;7V7YtV?p0ql^r*=2`| z5yPP8e-}+Jv=PHy&?ddGyBI5n^EtqSn735xj_<#D9Mtrb43FE6HHCIKk1m*jBK+u7 z8l>@19$FjS%K+e4f9rQfu>^+-57AI{CbuSPSKX0~bixD;+H4(PK3#S0k|8j_W6T07 zbi}l{TY^>^5^zMW5AM%7ai`}O-WN%~~1=TI`060Mxy{!Sz zR#*2298lN;gw_Ec-R`c>hO4MkzR97Hvf=h%q_9}O?hU*0j+i|Z>&10KdgS9i8vsjT z?y#nrXWZ0?Xr20|M?JHc=hH#gFUkTM?Bh?r?T{#zXjME29%#wtRN%qYO?+uYGmLRUuJoo&Ms972irA~}nI5et}+Xo0u8k~qHLBv%B|I?6jpBoTbP^T*8a zaJgW+-^YWn<3h_4Dy58dDR@y5K7YJoI)JIV1ZJRA3)NJajw!x+k&(;*q#FR_D29)< zj}63xE1Y%45vM>kc*{t`Z=4g2Ly(6d;0?-0%FEDPTyIw-IKKYzM|kz>^$6TuEi_(_ ziX{c#loHB%W(TRaasLs_0n#L5C=mScY)l&m5E?OQJ#m;Ig}6@7$`Z^95Q(2pO-UDh zDiXG!U!^Q=^>V@a#W!Gm!}ak;dM_$o`i|BpcnrNm2wLHhl*jwxy+_dz&wn^{bs`nK zuRg@9W(T^C67*H`0fN&T&#X$=H4;9f;|8LY9{Vgsn_huS*M?M(muG18itJShdnB1% zbdQFZ&>cFe7Rjdtw-geK*0E=xHOwv~-`lbj(Ia?&qqC!fdeZvlJ{zEehc+zFeW3z} z)E(fwEofkvUW&ub#xo_DaXPK<%<_rkJM7m_8vOR8gMY-HA&5VA z2>{)w>@5lCRK@I=8Q^fh@}Rfmltwq1T=)#j1?S5d+rHy|zw?0h*-gTT#x~+gPI(;k z7_piMbQ3+My9Dxp7JNX&L)=Lv+rgpW21E<1HPRY!L_I)4V8|hgYZUH#O$?7r@ z@WK9ZXYrVGLM|(EUeN!4Z2eiRZQGV5gtb1#m~*YY&$-om_mvU(A~GT}vLqo(LxPe;Dk&=kQr6J zcy)2BbN1eA&M`*!M{B*0b;3TO6ZgJz_gQPsQS_qq)>|Kkm^!pYy&r&Vkf{$PSWCMU zKLcnvMZ4-_^|$SfphVg$EPEMd$+c%@NX89}{^q)EFn7GSRoomnm8?`Swi)V+*(+`H zO;X?~q+$j3m2Z4YxHFQfL{IaGc|O6_?-7^BvC5)TJo-{fmS9YV_D{%C#Z3I>p4F3}7zO=`Qm3ZC zS*Wn6HkVM-08v+%VVYszsR!PVm*neiB@(-l2_-qZcU*R5{jFBava#9oE@9HfFtoBz z@>s@*fpkk`q4z^ZZK**ebIz+?%mlx2eDB)E%Je6TkKBXC`N~K`Tj(yTPV)PH>0brOLL2Ls^w#z9#wP z&}^VPiWvfrigbg2{~QY}G4NcqLZz=-NYQyd&O9fl`unm6>+bUwx0%%?_5+Ge*4OaU z=%q3XYKz^ivyz1pJs2yXidtR-?~!x@mU}k+j`x56x!M~phh?JezCMpssw!|~(IQ)| zIEVTe6+oQ*&`cPilf2rGJ~oGZ%I#SO#VSA;7>h!3ofhby3K+QgEmzr_7GM zRXpErh)Y}q90X8P4LBT>$5(z#u09(x?}_tk)hoxLidtQP(v0zpo_MoJL(xsvM`#JBA`=AguIAlIZ* z6*Vc7dRFQM=jf=#;iKc!bDHdW1+Z6B)qW!TT#dap?yI7=)ruitt;**1xf0Q+MrALS zBYQuhZ(`j??pgS|B9Niz47}4<1~xgzY>+M?f_oLmrtj@ziq^^ar_@MhX`2*1Ayh}3 zsNxZe?V1Y|bj+Cg4#j|xoNfCRn=y%qs%P7FeDL^yPv5*r8nQVljB)M^z*oNhGx+>R z-{X5*ST^A7Q$Dhwwu|Q07@lr7#kw0_ZqEr|#2q#>pfr@Wp?b$&3tq%VfG>fVQaN7W zy0w&B4lpQtNks*VXNx|q2?{ebM4~&>J{AEjudWyTlYi`Y;UE3C{t|swG3*=9DHLC1 zERKi;<%d9OV($l{kiq#o1#MHqbr|oq>o0&pi^7h7r#qdcnkynt-#RuUugBMZu@!_PPTWRRRV>$Q^#UNiZh%asxn2V=4@6KPGy*P`}$ZN^-3*?f|di0T{ekRE^opmNvrLsrC zXiUdgb84#TfC{Cf`eSOkJf`Ap4lP(nSR4~Sq;Mm!QuGvMQIehWYT&acCr$m20B??BMW zB4P`Pa&8kRwwt%TT0e%}IE1W>^AOl1WJt#UGM?u%X)6PO*-RLO8wEX=eLwW1m2 zC?R{r09y7>hS>nvT!I$ybM6X$;?MIP_&$i>^-iFJhoJH%sR9K3-B_y$QDH^Vd)zk; zI+Gvi;xRMW8@V=dBlp%QYw7GKq`l|!up;)Z@I}mYio`D}_Y)JvMGw3S_`(*2PpV2h zB5(jqXNsS{Z%p_jxtPE0eL15hl92{eQzr^QrC|1ToI*a$LrIH@VXRmx;WZ+}L;mA9 z*c?8_DIYP>hm_f558rA0kDxxCED)5E2}qA$wCNg6?0nBE$`aLkl&A$Q*mx#OK>}`a zPh4vkHOSM@i^g?k0;dy=hsy;q@Db+N+MZSV=}u)M7^-VV)0{bDqfhoIa%L2N{+JRX zn5$_zl1^5ZDKGoIAN~lJ*6`o@Q-2(P_Fw-GaSXo;-nh2S%A)$|Or4VBtrkD;3BvNd zV+yyUj{sPtU}lEf(bX8g;p_u!o39K-L`f~Bpu*Mv=PCKla|+z1ql#)5y}8wijGxnC zBM@Z)uVQwbo%Xraf?HRjU8>F-eK;q1R{^by-dyNFk_J1}(=q47rL4pnvQHH6SZTl# zBZveanTYTi*L{ZS_z9m9a$6+3yvG_+tYFla;!Uo2yT0ByE?&#KlK11C5| zodaQcj<&*BWTkbEy;dCOiGS?h`Onh+6)TK+dX+VsjqBx#pMLQc$J1M(3W1$s2W5JYv=YDO8lWd-~pAO!U9D9i!if zxWHwll`N=4*852=YUM_AJA5rFky#asl%OBvEKX}An{K*$Mbok(oQlUNy1&^{BuD~p~f{{2lmf*k& zuolO*)B>v)mHd`-A-c+cCoS(TsBpbm@}i@Pl(* zl4YI)#|(uA4rL;5Jsb@A%?86W7F94L>wGf@_I z`Iu3;D|devsZId}*=F&SOT~*616F%_AN<^KLe56C%hu@Z^jTPso(-BV2j@DA_W}O6 zJ0N~%Z}xphKW-}kiU*I%r%xr5GpnINdxKB>tDsMqkC6GmX{&Wl_eJbtO zitC48!`q*HH-j(uqQQ2d>_qSv+8k84*z2g+h4_NWu+_$j2@pxHr40(jrZ@|52G?`C z2HooCmQ}*baZ*uhaPVQw^5ATbnl{h?B?`JTgNcvLf?1-`x~Bh&H3pCfbFc#p93)q2 z(;zbbT(z+O;M&NqgtFu*^gh)I!>?K-YPj<=$R_gOw+B^r>aOu#2PJrcK&3FSx^9%1 z<-Ubj1^w2XSQQ~&7{rW`weO@bSL6JR00@}u(_fz2jjc7wr#j8>ETdU|^uvb;3WTVF9Gj3Mt2f*ZFt1k66&*}^R9t7TT|GE(OR%p2%_6=`t z2T~Fhl9zP>P)307u^QeS3&hRpx9;>gXHrM0gceCjoI8SXk%h)UMF4ncMF6d;4W**B zP06VzU_#poTg{nJREbmkh+O#q@TP)rC_rLv@1sEfMWI$P0VS$7l=mvsQB`%G{v-CG<`GnRaN;NcMbA%$s8-z zV4rIYf_B))U;iB*(xhD)=!W+goBk-icz(gZ^5_3k>K|Ld zIeIQ0ba`V~Rs_YV*{TV+;0Fz_5(nd435;Sc6Ze60`s{PHUPLal>Gz1JNoT& zBppK3G9GfvrnxZC0Ek#t&Y7S#bo4aP9Joc_$IOEpW$UO|M36CI8L*m}Br)Z*=u&B9 zr{_V4DaoalMHOpTxOZG_;NuGi1$`k!lkR{?I&QdeXxts;j3sa?cSM8uiZFmsj$%Tw zoE#9m0Bt~$zd#|uN#}|R3XlOP!Ux9B7L>Z_;1$Czjq@orh&1e+E!VE%1q0C)E$ z9NMB-axG)#V0FXjXAXj79DFB`5a&TQFKCBBUGjdehF=RKubsPJ z+t>q@z@srAY5yt{wwbJY1l1;`(Upt^RW^g;a8BBbphwZh1;rz>X5Oc%a}jji*gu2z zz!!J`SSCoNtcKS>-1DUB4DR~{x0ff}{~`dOb0aV<63|FwqQ~K$TFf;t1`UY)QQ}c* zR!MnQke@SwwEaxv1-?xBu+;=Ae{e^ofRPSNS@2W)GH|H^H1vI@_hQ0G0D}YbMxGB+ zPiH2f42-utWA33qsUS!yIP5qK-Tqbaj^_*7Zm3F*F-W6Tq6A-k3vh^%rl47OC8$T6 zfz%;H?*Z~0a28L+d#_&Mi?>e*L4$J$z|em65k6HiucFRAUN0~s1?lA+XsuCN__*Qa zc8jo2>R)k=8GschJ<1S+-b}WFK-*<&FaX!fu2~N;XQ|}RO$Cpq`lYr)Inx3rRxvFe zK5(38QUSMCbvHyG*HiU5*t{&jD*Bp|5`@){3MIO&R9yBAx8q#&Meerx0tOy0JI|K( zB>JI{$;V10_gZk?(Qld56I|I!!As}R)W^q1yxeX%uz8AQb7|~Vpj30s_;6RyJe?Js zGpgpI#*Wb$&?0<`5<>a7q<7j9ftd=}uRFyCHp!$<#TY|CV|)+go#S?aG$TnMpZd&z zSxx&fWuG4QhNs(s01r!{=)%hx>rN4zBAMNOYPJc0t+1Kj-P8+0)hPC_&a6PTYBh=ksP(@@9?oBVCrfp36^A zq|{s>ZdHd!2ym$xY*T8K$d=CmKR<_$=QN|##8(9d4$P(D?YIlHja7?ro@n~6G~9D4 zh@a`iq6xX=uFPR_F-BG`bk17%~PQX0)hpg?7>3%CaX~hq-CYDRZ@u=Q0L!Ouq&SLdIa6C845%C%(z>| zK<;n-8~^$Ykm6jLD2VY)_rn!e&ypn-051DDV-O>4(bO6iA%Ug@DqbrLe(wij*kL4$ z>#Rl~HuIlzjwK8+qnC?6T<)G7X~V>&o}40YGB_a*9RZ5z{Z`gvxhF}Oj~JF%YXjN_s|{7^NNrn6P$it2RhPqK_9E)WpRvj2ID<%FHZ@4 z#INBl8d4y-VS85s!GJ0fD@)*Iy2Hnkt&9T`QkGd2k+&LSDSho*)4;U}pFPieR1-?& z?253104WTl9W#i1@6{`mQgMu+ywF~3HK@iQ@G*zfWg;2Bqsw|oWwN>%vYwHA6xrYS z**-`+3~BTTJ_Cpdu*OPb#YBAe{B9td&ciMOl9^YJNZ7vaO9GgE05#8ugVdCl+YR6N z>cUQ27eQ{n+PzImDQOO*K5&$zj& z$^7Uu0xHH3F;KA7g3I0z>Uo#Oz>K)5r>%fbJnTCjFPG%yhurz~@lk>?z!F$+^+OEY zPEy!vxl5px5QI|RrddCX9||TIJALW(E7Vf(Gw;1(BBE-k8Sin$@<%XZBYI);yn)F> z*m#!%?5b=}0M9Q6WH=J^D24MXh=|Xu&^t(_3W-_=sHoN`bKS?Ob9V>gV%ezSmhoy- zU6)S*ZYPHWh7>OS#iE>3p>3rWw@{07m%c(((wBF8XtfCcVgS#!?Xv$c39akykJh?VcT%ocB=W6=r!iI zKemc5U3WC8QvFZ<>3$M1aMDQ;IF{!QMyxo%Lp8-xF1X8l9wPD|PY_;O~aJ0uifqwiN`tbuInsWNZ zpun@QO#Y5$(e?+-^9Al~2#wNRAY)VXv6RRaniTVk00H*k^EV-)YeQE63^`Fcw)ZX@zIeH5XpNdmp}{`r45E&wH2Be%z#(fjktlxM zo!UnrOESg=;N^B(Xca(7#8Ohd!4*s~91`^;ERQ^3Tti1e**?NJe$4Mzkg{E_Yyg)5 z4r$p4$V|y51Dl#N0b046f-d6{2F=nC3F(_%NPy*8=3qab*B)z-NIUms0y)EDoeL#E zPTE(kEe-FS9l(V7fPqebV@IhPn3uHXb>DEDr&NBlLRNGZ3_dx1mP0rjD_6&})*Rxp zdLqkEfF8v((1em&qdViagaTl!3H5%6inI9l zR7|4+PS8d|K_%xL*y<;{-?IoLBOnjXC^h7#XhkK53`PYQ91CBfUO`p_S(>a#%n_1GjEIucalXxCgowDc27B~Q7(mo*{6E;&e$rZE$ z;!JD5QbIfK@>C+4fm2l4m3Zrnbn*MM|4e%xe18V~NV;jT1o$%yPv;zWRlK0w zFM*)$xt_<>*ErK(pM9v3PusWGs(BqjKXvtpb-jyZv)z-)5P|w7riaTu5B)O||E)48Wq4If1c~oH82ex6^T_TEnuMCqIxHL!hw#Kf!B6@ z#PRY(O3istj6i{2Q)~feuge%i!wEy!Mg2N6XqzWF8mOW*!cijhPN8deS0%`UvSEfdw-Zql zNE%5gXl0^p4RahCN~H^yNL+m13Z8gLxNecq(7BOUNvFn3wY3H~6i}>E8Sk#zMOly8^W-KwAi2B!8h!1SQsf_YTMuSpmI*(U3fJB`vNsI{SgqZWf7BP1u&QeexjR%=VaL#uobmt|pa(6Es z&q``@P-R{@%f!WM!?X&ss$o-}>q{f^3P8*bxvwxqWseRlRyl(C+IG!Q2t^bZS~l*S zb6p9$&Onn5iMx|ij6rp*n2(c*%UqN0BSH2Vl}etWv<0yNSinYtFiJF18v1UzGMUwy zhbT#`Bg|0C1}huP8Vn^-;M);NC!dFnw>zSjFM^VyFB@a#9u+fe+n#IG3OVykUJN&tzn!x6S1ez`O97;tVZfc)GaDl4wTw(xm>b}P5C9r_}%%Qswce39NmK#$3+V)EZa;Fp-KE6jl239x9w zS;18^V2-GSm}Gsk5=*AUm_+Z9Rc^&AvZJvV!yU(9KTO^i-P55y7RTz>g=|O2(th>5 z$F!mQwqf5doQf@J+o$7{G&fZsIiQXSbANfl)7eEcwCuCU`>1<+&(r`Spq9^f$E^q1 za#8z(5*6Y*ac-b^ts3_qR`B{u-#{q^fAH&HO8*sml{KfpOAu6x7fvST=KjDf9IRMVWN0##2>bhSAAo8SLL$&M!t zx%yKBRO=ljQfj#NdRaLr0g1?#yEYc)-!{kF+!q~ zCGA|;2=3y6wfI&H<|}xQW-a${MknAU^_5`-tvcr9vyASjU4rC@Kao$BYKtf_G>MFx<|QXhlEq{U7~=!HEOWPXe$H#r<3y zb9hd0jnZM)I+j2rYSSRQ2&&(evjc!?lg&HFnS4rj$27n^wEGnp-tqXOxrtM1)#u}s zwp>96gN>fSAor)#s-TnaziGb3n9&4k=};q(^8Dhv1O_uvvoQy#&fp4&8lqv*N`qBS z6+XWBf1|XGoYQ`!>~bm2#%ve0#FZ2|gzm&yE~v$E&J|d3%ISK)$PuRxm=$c72lyN~ z&jau%%TE*>;P6M!#FxIxRlcQOc+6wbj6MdxW2gb0_n}rcI7Se<)q;r$6UA%xgLfu_ z9hRq~k_Q-K5Z^-k@0)TX;G{zdDkG#*KYaC_tcG6gqm{h(vEE&Y%1B40{;L^@EVN!3 zSElTynJ^Ix>cI+#JemNbY9pr`62Z}*aV-Tmt8fg0zROfWf3l6bS_9o2a({VI~X(mQ|4=e_3E?ZM#dN zVxi@qL4S7~xBM>wU%*xqa||U*By~jNK50NP!-&nTOe~^8Zu$)Pqzvg4UC_eMC@~78 z)x}?js~1ShbtpK8qnN`Aa6CT&AnJeF8jcBAyHLXWd_nQ(e=!wI^*wW9o+q}#=K6k8 zu;m=fR=2{+e)POWkJX$L50_ni9$LHeOoSg$ugV0&%IoIeD zdFdl5>|Me0h$IBaMt_58&IbXcS_?L#%)f!>CE|qewM91W$6ru?VX_5dIv)2*MKJYKJnk3u zx&#>I@G?M zvTZo0W8d29{}h-SRmt}rAJte6oIWup{m$OV&>2{OjOo4>Gt8n%bu(HyHq6p+9s~Qf zDf!TBw9F3*T5t!cL#L_&qJofq4ekcq&GH~8nMgz_Qp3s2ROprzM&dFFQ`4eHfTt2} zWF~%w8)bQD-UZ^5&6K29$*J{&&mGx$F<9ME_t)sR&#-M5^y7xDl7ATU35pqBZ4FOn zOr{1(Yk+rV{hFK|v?vD?v)%_j{o*Y?d-J60O73VhO^&ge+kNUnjDe;;TE1(EpI|`J z&ot0+fQeKV2FKd;DUpDCjywZh&Nrwk^;S4;ku2T?T-GkARxvv>S4xy3DQ))=NKP=e zrIHkK%w#cGM3_lW(bi`&Hs`?JT9*H4w4zy=XDT?v#VSxLO5Lz;8_!=Ua5wCw&=^Os z=XCn4@I>Wep(FsGX<}8m6bm71Paeih`(lo9-gLBG-%S*2j1?F&P*=ia4ERqu(dbWz zrpDP%terCH*Ax_+Dz{60q#A4z$zbV{D8kf4SAfY!1e*8U?j?)N1Te%4T$0Qa_Y{z! z@-X0cfj>NeftZED`@K~<<_H>gsdEekc63N9@U>jm2#8b(fh&nZ{tn&s_|>)@v!59h zM&SK427rQ-n zG0zt~TrO%vjONlQ3MxNuzmq&Cm1xsjtYq`y{f}^ce1-4*+OH(~7H2pUB3nZ+gPs(~ z7y}2#?db~+HvHh%oTW8**`Q2b?{1vB&0NAq@|g zD{jXN7L{419sHiN#~g=}`|L`Ft5u}3*pE?!?PzW=N>u1 zhFWztN<$aDsm}@Y$w88EpmQiB<&_7Hm!agEwqnruq3gV4lXfY%9cSLLV+=I;q)7bW z6SwokP1TC&jt}<@pZAf~@O`Uz`*P$tz8@HeNDQrrGS%|4Ly_^iZ<=y$nkmJ73HfOb z3^klXH^fL^J;&69bHf=Xx35$E9~= zbVCix8EgUyDponHl$s4&%R{XHRwQ>gCkO2?BUAV=byekLuC-!F3z>~kJV+5#m}W&> z_6Hm)J)O^QFz4bK0zlguU{rRA0|qPbvSD^ALz$8ua^y{XB!(zleY4&-?-tw+v_lH5V6J!|{08jOe`gyX<}$prFu9 zUl%)(KxxiH=g0JZ6dNENskRL<>!P$BrBBn8E1DimcUOZStF;}=+I=;t#a8VF^MW9a8Pt%&)C_3*h{oh_g25Rs8SrP{h_T!c zIj}xv(pRc2b)zcUVj$*}uUtnIQ1Uh-$?-h5u~x~6j=TI$@)}9i(7jy=Ougg4&$tJL zoNi!4Ymb=Y3GiE<#i-7(Cs2Sz-2qhnJ;NMoRy1D;Yq?;~8^3!BjZ-yCQ%v1(U6W$_?LpN$%1{jNy4U%KlY&{|Ty1961n@ z-xmEKA;3|x@s!e&u~M<#aRV9fYl8NixvoL&ib0AIh?c^B75W_}=Y8}Ae#z&>nVi1p zD7n7^Sz1{l7-0|cUh4jK4Qwmw3FFK!Q8Yy}%qpZwq`)aCGUwn#?jl4ROmZTC3g4N& z#E|p2;u-0_Mb%AE2>@0jb4(JiB}5hm+}JD}3XZMNx4iVBnJ1)MZ>=U3uPdP~PPCcl zjrSsk2j0#Tp%~eWb88H&8kf8NoCxfPlFyJ`j$Rr=fPa=Kcl4)iEvl!u8?H;r;i-M5 z^lZ>?7?l3*Z+;Db^~awA`tK2wOzq`}a0^U&#+;lzAsJozh_EyTTWjn&P;>p96G!i8 zYG%0!cz$S=lj3FXA1*tN2^?L0x|1{A0(lQ6OYHI18osnw{N(wDy=uObnP#rJXh=MF zTWgFRwkwX8XMs7gjVk3H8eW?HNyfgLLqlV;zai{yDL6*N0Za_`Z9^#>UR~r{uKSML z!RoieP+as%?Nub6yn!Vp&|PPDv!K6EI4bt-f?9UK9x&^Bu=*NDzu`Q7g4sVunNM)_ zfb`k16(qI0L|hA2i3*HNBl*v)z!r+bx7Ki+r@%ITPQ>dWbzYTdh5$>Ejet9Dojpox zFyvr`C{UC;pIDDIB4AIUk5#`8d542J9DBZ5K{p+HLT&7~6H z@iK_UzoCIXF7*u!BMF{*2^5W&@izGV+rz7OJ?`C0U9JysHJG1g$5i0%$~@-&#hjA~BxNQyl7Jc5o$DuIRAbSabE|GDYB_?^)S6fY@x z@Ku%u7KO{-?ZK$^uCL1$EiAq(GQubq$A60-h&6Y8c8zAqJHVu=0pb1Yo_{t$&2xWA|TOVr=!uk1}892gO(Vd-uis^lq z5*42#rBNOE-Khp9Z)moZoZ=aQMZJn3{QjQDUaqRj>SN(q`WywppT9o?0M+AyzDqHN zch8l@~d-js9@G#IKPn;{#BYkarXFOuHYHvG)*`|I(+M<3@L5Be-Mb}Oik zHpuDhXZY~4W%4+rA~xs1%R!k`cVNH1f^84^{;WW8&WO*B^X0p+d7^8N5_r&c(y8Ma zcQsMZo>Z|l@5)70(ovq1lFWDO;ozLB0E1dFlxnEkmX!vBsrtQ?#O}|eSxHsPP+BwH zyL%!5xco4aOK@8%F#caL6gAUXyBmj-Himh_cj2$HeN{ zc-(jFmn%x!7>8)hYbm0w@nM<~ncpRW98$i4oSQM=j;3Yag8*xJuy$3N&M^u4I``0Z zcjF}f>JNU(z5+mYClXziKV`*{^?<`xQWu$B@H{ZQD^L z2=UyR@C^sLJHEJ`t4~qqIn>pLgQEp#k#MO@Y(G1DRvBQ%UZAWPhreJlSgkOr$=JnG zMJ*EU`wA)VG8q7SEtwpKs7DO#4a!&el{ve5p?KFuP`*H+rMMFwXA>(CiM^qkFgB_y+0&RNa340`#OZqH8)dX&|$nY=0JQs5zv7#D_p56ZS>ARR1gRzT%>_J(ZR zo&oio18+Y2gaOw$Gk}afkMle!1zN9Y`zy|vs9QQikHfB?`{~n-LtXBI%S4g~(HS#C zLx=e@FsUJb90#7>zCo1^b2MZHqO_W?807ZbQ=)C-ct*_axJSLt;b{<;Y-iAuR3UJa zVU<)MB$=66rblGPKx*SFpGw4VQH6n8T^y6PcREmn->F>wSxTizQ9u&Bm=_Dy? zp;6A?LGOs?HH%Ak|64{m6D!R#sN0g;+pcUfZyV~i=YKDC=YMY({XDB%Iq$=fze8%3 z&X(wfLZvhY5z~&m$aIqb++X^2UgSZ#gM$OAf_#%y7&&7ip34#b(jWR0i7doD;F$(W zjpkxgAhKqZj3G(IE|spkiq4wL1134~EFQ`gP|&WOa-A`#8L@Y8HpWb+9f4-{f5_mp zwq_OxM?i3hBiN0=-tP^e&wO77+I*HD{}2B`Q4<^?z!1nx(Rv0S|NacUci9v?hg@^z zFfZMe-g^dV%T7VyNIVB}z2TlpWIug+$@38ZEJ9^VYcXdMK@-xgM5yz)g3+jYvaihY zY@4l~v-tJq`Nlxb#2E+hrH2b%UzkwqLXa{zUFV_W>?dh@LUmS^QlN>kh8{H5xW5tD zMKBu4`E)~+zs)Ei8tO|&j5>2i_RaQVdwKMF|S&g_6ECIOtbH}yXs=h;%u~<)L zy6^Z1uAh=m`NgPYASx%*b7qIT<6B>PpKAhB&t}2z|DE5$^?E@7O@UPBxppaF8bLR} zB@V*`#dW85QE|?grKBD`s}Pq6pR;Cd>7sD&l3oaL$VoF9)!L3GbzSokYZ?V6i53EYW z`lPDX1cS+co5sf%ndN7n}TBA4_CsDqb7o#LLSwVy;Ip03z@W zAQ={76qH{+$H478sl*hSM1V^&0_R`%Kwdm?fwa_o}Ia z5R8UnOuS6M!Aa}IuGrcuxEqf13FG_%x7%l^cEX*#Wd!$zD){VG-rI%~(f3Ng=DP1W zi9BpY_|;3RNp-z_{!@e~3|$3n#^dEmz(W=G%eE!e?D=-2j{;V8Z^hIav!Qbqfc?Jj zCO(so)X83xCF}V2mkE zNAD@$Q1FOwZmZcpoYy-u3!>^#LK0KWhfkj%=9Yxhxla-TY2u?QAk})dZAWQDh-)Bf z*~n)Z*e1QemJ_BDN+e_p?Jl*UwjFi5rfg>mwX>q0kVYz8Z5Oot3abj-N>#svzAxqt z#B&=m;||pMo&%rfT;=(j%9>jaMx)^znn)_RjtXf$m-3R63iK=g!M}+mg}Kn$sXYxx z?Q`N%04OL4nqVX|Lo%3{16!fN7}0kD;PUWDt##ckHssn$c(X#(v<{YQ)LO&i<0F+S zECOmFK!ZrNgYyC+Q42YP2=Jp69e-s6i_E8wkWh^wRaomEi8dfg+f-s&bQd`QvDSiWhTrw2_qhf# zuor3JXTp)|ExKu;jCn1T=#0A+-vKMQw1%HN-|+VAsuUodIv+Qvbe``7TS0B!XWh+- zl?=M>+f~?XQ$UaM@I5fNk~Y)$5%yFV){aq!#@n4 z*ju4OVlip_yTn4eDh*87X%E(L)1!|Je7CmYdVR=V$5p9l0u7ne7X`}`tKmOwqZORa z|O6QrCG(C5>~D#yIo88AxA?q@t?F z!U}zhgYz>=;ZVOhog)dvR%xZ+zx@Y(KBu%Zv8ecK{;_`%Rv);=gO!x<-Ba;o07j9R zHN|oJ)bnC#WcWX2K(?!U-3ZqBj68-G5SDdGXNYll?hft0wQA^~0+uYrMo@q}*bYbVv=<(wO>B)X3Jyi9`y;&UXqOL(s=j;*?{6{A7lhnh zIPzjFjfGj54q(RYkUOU!{qm;D&WNBNLM;P+R1^u1TBcUP3m?htqPOd1iBJxh7l0lznb5vIm3N<^nQt+^Ecyl`u zNkl0de0Eexg9bGvnqQL+1ia%^?w_51wzR*272rmtlYgdj&df3szYILBio%Bc9>~5# z<-pwY%te1!&_?EYAm*JU=6Y|JbSJ9jeV8WOM_*9PzzO3j7pZ%m%`i<02p(o zwtKv@85=uE|l*&;vKA1VFnxH}fY~xu{5)%n(oQGOgAIihSBW_2>@Jx{a z)0~AW-*6smPM^eTFZ%|V)XJRmBZiuXzwy!g`2J5nr47{HZEeGxs}CpN%b~MI5fl#2 zibQTz^Wq@!e6AoklGEDQ3lz4KcJN+;)UqGKM6>KYnz@%edn8^>)FKoLg-oHRhTw}U z2`}YtZy-~v>LOys0N-&;hZXv)hM-0aqPa0IO9Of7lbeEUEGlXE`ny1K+jprl28g)M z-_#xlPdvmVB54k~+kFLmOim83wXu14M)Dn%{S|yxRSlxIFnUQ_ZFsqH_-v8vdjdu} z@3f_r2)oamLJeb&im7t9)ryMg6%Qx4S+KN_>YiC?hbMvxMF4oKtZ;<9?{-&xno=4f4cD^bck}@(hRqmU=YYgg z;N#?wKmmNw>lc*A$BjJ#FK3r|vz*xyJrtL1!)*lXLTmtVzg}N9d~!QE=R^s1t&9_i zbPs_Cl*$bt>i&!n*oi6`o6bj2gW$_fo8fV5xXnr5s6B7BU|PXuq#@7V@zOh$MiIex z#Hwu69(}COj@d_6P?SWp*3c*S=6bo}I9_0`o)p=@xDRnBwo0|F>A>UV%C>{r;8vv= zRe%XhH^5%w;k`eK#}EEieD=dXi}UtZf%$@SoCG0_V1q&6hpO)Z=7tY=e4ujGt#kP4 zd3GgG6at7giHOOElHLR&M2WgSJYvAmpTEEya}9`|1V@I`-%$B$Bt0&MU_*)Gw(TgG z2pr+n^@7{kC1$Yexia=9s?X+=lBmI$aX^JKTft+aib3!If#5*PsaurP8IZ)3-IWv| zQ5=_3N*3*%x90!mzt1`GM}P5KsBI(S?Guvph*&y|eRi=!~g4w}lb<1G?CiJPOytL+Vcsh?bun`DHFfE@*=dkTqgVX6+}iL2O&sV*2Oa1p;qNs zb6g^bS^+Z4;gNh$&4}pY?d1i1baYV#Vi@PGkGnSk-I{5#vW4trAP*(c*a#c~8wd&+ z*Kq}Bk+cTQ$$+I(Z!7tTJOONj5oXjB$tEJ<(k9QR6`@RgZXIJp=g?$wrAGpBhM}y`M0gQ}$w(U( zpupWJSM+WI5O}8V00c9<-Zxw}2EkU?|5j_up@B@GxEQV=?Q*#waPLTDqJcR3WD+D0 zMam4{eDw&k3b%@AoXlI`6C410!`W}LwVnw&ep^yF=fLyJvy$wdl-{EAJ31%mju>{P zbvVwB^SmiwtSk6%$M-+^EX`HyVUGcnNiEdD1J7)!6+>bt_Y~7Ohg)6Jw*c)~1QFl* z+Q+JXjs%wOE6p(gG<(7Pjs~Z|e2kF|(le^}@!UhA79Kqlx;ery7>tJxbM+l?jdQBC z0-t@`m@xXxAa#tc3Xo;Mm8kr-;ngK5&(VVg`f4j^+eW5b23?NqIR*Zk3E`sCKQj{vo(T25=o+$`cC{- zjMeyEeO^<(?)jv|?3dqrR6{h+$9de)d&liK@N}>$5@Q3H#2ZR!X`@ZaRUZSlk=Ey=Soa=N-)XaX?+^6n zU%_WT{HHL^9|QA1pC^FHCV$zr7}^+rKb|={vXA2>6}kF!kY(hW(_?8nX*+21Vs2US z)rT~X8;++hl4@L>vp)b|YM5G=LlV{`{|gTMf_#pC_iDnEjw}RfFGUWB2|sI(pY7zejHsoYw6Tc0Q$Hj;84; zjdY^J(YR#kWT)%7=7IP34WHdkItQ0?)q4dUX^bh&c&$zZd0M-)*Ltpg+=No8)E@fE zVet$oAUagH2gwf?SjZe!owA6UGVm`j8$$4qS!tBaAmhY|rM%=!w$vyz2j=-X#&JU^ zNztHM27C=d13&%zbND=AKKL%S0TCXSD*p7m>$PBCLo?vO z3Sy%w6b{oAjMi3B%SH;|v=tb;*274jqj3xgLk+IdLLIjoA~}g6lYYKrMNciZi4aEr zLU3#EgR}UJJP!*&Z*sr`OV&A%*D?8={IL6Kjy`)jhK=0sITsoi4sP3bV3J=u^*({X zM-UmJ5`Ks3c@i$P!^dF`2K-B)(*lUYZl&CpbM#=X%4rvwyt4UKoNn7g20Yikv= zcdClm%%2O6>^+}7mTHW_N}z>=q~p!e5sD#JIGnS#hS^V;FSQ)frg$EXgB38`J>nca zpDpg{0tI+)zWeqqGEgt9T0~$U;E_QpQ|=Oz_6#&lpASP;WH?wP5)AhLk>b_`3XK(P z%4u><)L5F&>nNqcH7z@OMm+e)vb*<^q^^4&KoLKqCcTzsqNd#j?&K&-#dm-5X>Klo z0{)Y8oA?=3=R5B+_%k8PJCf?k|JJ4Im^}q^Dba1MC^#>f$j?ar3s4lS#XXDPL8FYM zm9tQ^b`YrM0Wk5-Qg)ufZ^*}*C`+wy-c0}%DS7PU-nXPi}^OXXYbIs z5a*SwiwJ1rxw?iYc2Qo=$r$6hHJ}five7YHy)B6~ty9;Q&ZknG0R&O9c z%%Fjya83$WMRrby*9M;h4;$mU7tPKH`u%jEbjkUHcfVS}7#-KGVb+SnGX>V)ak#1_ zuGp$ihk4gP*9HD;rEpe_PIcUwF(*EI`-}j@it$_=?r2pElo@}E-(G9Q_5E+4y!-@X zbZllgeFB=JHiPw2D-NBvIUL;`O$ps!{N7JE#4jqIh5nW-9pE%l{kB30`>IOP-qeQ& zRX;FMt>ZX;sJ^fRz$st1Y+1xMTWc8o#5sDNeUslgB>+ISkSE|crlV_Zs}+ox7gs2A zYywfbPh6{ED+SY{kAv~RrLi){yCi>83=eI~GwlwXY61$1@0c@bb8${;F^qoZaLw+{ zL4<+$eeG*s$9KQ`9pLWL?)#oiI}XQ9Mk|bUWTp%GW+3z|eR(!kd-Q3gBV5R!j5MQ} zbik3^P_)8flim1d@4wAZ0PZyI-B(jAz$Uox#JkGzKv&ST=m;DN#%FRRE)b&WGOEgG z02n?$4%|zkPC>Fo!Q{KNwOCJv6AE%mKO7jf)WN>sxi>|S*)!smmif00wk3b*_6dz6TPvsq@E*Qs%~ZJO+sAIY zl9jj*^kcMZj;J6|*$+Mey8%7`V-F;OIj#XIG~oY6s7;EN7<~rvxx-f_u2!Jfz}bU- z44rR@ZuAwz^Ipi`z@<#obaz%$Zb)3$+9(imdBiwg064**m~UleiLv*R2c3CYrpWndUv6X^e3%mel(1_)4!Uu6Ow4t_wGCaT0oq=Oe z#Y$ll^bvHU@X2N{n|F7Kg9stDv~hM0Dpqpvd^`B}rYcXa^E)Qa3^b)Gq+M_jkU6^i zK%66Aq8W)~yQiAgc{c;g!bTfy-I&o!#B+0=Hyj7D*W%hL1^~>GR4~Kxb?}Kd&o3*< zvVzho=H&d1dh&eT&STlJT2W&lrjhp_)Ye*n<2*UUu^0}$;|LmY`ZfLCF$Ho9Sc+sr z;7`{UZKdG#rQxT~#~p2Y5O~4@LW=qm93$_mm4+CS+@C*1m7hBLz&9Q)_}(+Y)nKns zc)=3{nC`d@(uN{75iAltu&K^Gwi_V7#SWILAP!xOzV^bSC@&A(udse(-}Iyn_(JCvLYFeEFN- zl7o&zR5ays8RLcSuB0NQNZIrg%2{_5AQ?ygKYQm=Mv`M@X<*Ou(Cmj((xhiL%Aftj z{Tzcu-{I$vpun(4E|NY}=mdCvo1g2P00H83MsFyPFvSy9*8# zEJq*>;1|C1J^~?U6Y6S$ed!}2R{ZGchENw9N;D(sFa@<{$VNyiuC1oeg)S|0B5Xlz zJKFw$)~eKIm_$SSSBto@qF}Xf(kl6rb3K(z@~&>JN!7}5bQ*b2zHJ0R%LY6=ENRz! z&Ty<#PI`~4ja4a$1aR~rWwenQOq>-DumOny41e#JG0~{~gh5$}n!Tt66fDo8SGz-fn z#kywIpH*Q$#O64Z>=S|+AJ)h+TF&M`+zNQ%zfrS73#~%SE1tBFONy}2;RHc9u{eahp z*ZS_|3*&yY)^c+4rXGjrudzbrLo}*K?@Foc<-1%TvF%sd4DHD|Cwf273VR(wCbSmI zA#LINv?a8~rQmY8;Ng17I{^TpKwiJIq*R44MNkx@f-+_Eg5naQEYUZV)fBMm(qf;$ z`3Ls`5{@-&w1m%6u$6o0JWnA)VetH(qo0`^@c#2Q5UiN$k%2yo2eY(|)HAUGtW@=o zaSv+J25J8?p%mIuj7{#vR+T5g6%WNh`myM5jkCgDkdpU9OV0PM6@Teh{v!5$V-vMO zN{F68XNYIy(7!xm(Tl>0+S@*HsMkm}Qqgr<%EdQv*ULp9@Dw$i>YcQE6Ej-$SWgOXWAnJ{EIqjqmpU zWv8v$_w0d+UWi*~55^a_Q`IQPCVIWkuJ1qae0#?96Tpc+4ov6ZJ{V*V74!H=n@o1i zNQ(BYcizoA%KA``Nh(KbMo@?^#kp3HO~6C@;npk6|6+RL7!AKpv0P|vm^o&${dSJAI8#W1eZ&VNTCdqi8-=a?w+HY|67-F zidN5fZX^DAfQVtLt#Rq?hWYT_(tGg zv0Y!|AO1)FVf^d=+P{kH^@5PL-7nWv$uT7phMCn8i~q|Y(O^~)dF)NiR>(`9_)v0? z`*}$2F2W5(3UYE0C^4B_!A$mQn57{EDDzorNh4)pNNeLnR3(p?{Ovhd@Wkjh%zlH7 z17&thT{w&mo1$=xF4Wak!k%!^$YR1M9PcD2@1U-C`yEsxcgIg(4)W>u3mecCEFQh* z8gq#%YH28Sqdc)BV@tbG)obJIlHlye8V2ebpB2e9B9J%ksfQtJExEVp05ZugsQVQT zHp?@pP(!l<)vW-3|DXBWQ*mWci3MK($UwM`O0O} zn&+{>ul@9MoSn29R>C&a%PX!^&Uw(gG|Cmj*>pGi4Rf60F|QJ^o8epUzs43kRs})7 z_%q)?X-#%yDN8v5VGPA8!_U3{$j`(|5+VT$L*GiFT)2$3e7_oHA=Hj&IZ6erX22S^ zJNo6GpP#iRca6-M@$t_?NhW4?gdwaZ)LsKDsq@~`PRq{U-P8Cp1t50`B5ek;Z!hw- z)|x;BTOfz-cg|`lOBjRe5Y@rE9spaQ)X=0}fXY=lRCM&T)9wncd@11koqzIg%s@4& zBDWh8)FL_Xwr!bwMA8+BTZ=Kj`bEVgHK5J_C1Sd^XA&~OiW}V;!Gi!1qI$dS7i?`y zpBKp?{{H}942%p=4@lYG!{7E#ejPq;sAhP&9VmssNK~8ddupOL0dRc#r=J5b91)DC zE#d}7kbHj-&f7CWaAVo=^^B2RoRi#m2EB7X?uei6|7cxs&}bMtwcPk@C;%-!Kto( zrBDkCyL4a0Vxmy;egLc>cTJWL+GP=?tWP50NypT8XZ6yM?1&LFM|eR5?(@vo%7o*T ztZeiWg^X)=R;5mT4}(0z01`+#`@nnGYx>`)j-7MK+ddsqW*Vn}6eeI9A3+pd1(l}Te`kq!Rw$^af{S5$c z%!xnoxBck^5sIjwAp`ZBF<=Hg= z?|<+i_9n?^&?eQ|_%4P!aE0sk_T>3spNQ_O-W{Ool*w*4-RnL&Zs&nHJC5^)J`Rl8 z0dN?k8P(#(95_3NU(eZ%UPsiC$+|CncAB-b6vVFQF{!Ks$0UjnM-8o!x z-*&)Q@tTTJLnW(jtk~}R1>61rs~b=*m`-J{FFyYa|I(lRUugfM);ICX??2@8tu>M2 zoH@3Qe!kTT`|`(M^JuJefB~U`PnHH3Lg?;9JnqV5_XpqSKuifw8cT9-tFpN;f;8$G z?CSR)|6_j)|K7j%@6r(5$y7#wm`=sRU>IP9S$P=S%!rA`z#&P`D=Nt2JVh%D6%*JB z8p22fLZ^pdaA3e?-|&zBgMSzPiU0jSyUWvq3>l@SJ%m*$Xf@sdX;(tVR3L8kRK=B-Xmf z*@OcPly7>VYcb_na8jqG<4-Eh?P>DKA(vn2hgdbFM2Ff-~_L z<5bgN$Lw8G(eLKhAgUF-tfEEI5e2e4S3*|r4B-kOl+?u9a9cCTD56%>T2Z$Rt#0UO zu(so|RXiPshRWSA$C14E=<6ub=D1$3*;KZkI|5gVLxd7fh&S9<)?CnBv4&Dh-a+W@ zUUpVEUXFt_hspD9wQ{&4IPh-tBP##~*A#oM+n7?5y1OAkm56FutWA_~BgTnJOY%qS z1+TvT`*8g9SJ7X-5O19_&lG21!&)Q|Q-NHR(5%gAWz^a*y#mr)FU4@Fq`-9N`$Pa)t*GI<&>dyc(Ny)Cs!s5n#*7)jC1kK{0^7r7ZWq=?9#Mv|k zO=#wA2wH2gqCjz-{h+s%vccK|TD#yppHQ65rWWs~{n=VbU>o-Rg5x|iL7Qtp+7b7& ziMI}IYlhUa375+Sx0jdfcS2DDP-@=WpZnl7zV~#zQ(2@vUiZnh=cQD%eaCcQ95;;f zhJGHZ%ufLyXWHTbN%q$8-sOVNZwKccL6j{gy(-r5=qIA$C6}O}T#~^))7}T7J{0Vy z8djcP(#x8_0^V808jchYD8^~&PcP5vALCwM_l?c!qNZ-jw9O0;8z*XyJHIz!Ie#Uq z*pXD~djqhI%9EQB&@@%O5dCz9ZQJCV_`SE=3k-(qWoOJWDcNp!e#_l)-720A(N(p! zadzRzz`U3cXpj|gZ4ED-ii%CcFE3_z6Foulqt|W2({W(W3O4Yt6ZDDcm8|hC&uhk8`67eW^7Y!b=Id{yI;TuaDT4$$V5UTCF(7#3|)2 zpA)UPn$y)|0y0IWRIp-gM%WNzwK#lhRs2nbtA@H?5@htz*-t!!t;61$%eLY5^+Jkd zz2L9=g`dTL_@Dn}oaY&lFRqR6G2EG;Y!9dp@8h^V<9zxY^L#-OU2M*QvP8=TYO8PF{m@rHwd2GQxLkTG~h8t$MYF)+KM#rdMVWf*p!cp%n@XR(NDJVpe2 zKvJ)2MNwj9?E>2#Q0gv9*$evdgxPP1;er{Y3W?b^$`B7V<()mKDm>$q@GS^A;n*|x zh1`cjvl8?M&C)0pTdPEEpd_J0!{S27eK3eP)iV>0wv>Wx+i@N@M8mq-YOvQrF^SIv zb{XxUjqUrcA!jtCR(CcPkCXRJ3Eh-(RUr86jnSR0q(pr9Grxp4-~V-l{Ngd9GPQ!5 zA=$_2o>H13$RL8<06`#{B~<<{X80pN`xX3WKl~I=J>;u3H%K-tffW=E$4uE@qOX_( zAvGF3a!3=rP|o{=*3CF{(;VOZ&Ho(V_}O2~X|{nd*4j9P6_GS6jZ&aLqXDtp_1R$s zxZx~0gb)ZPZxBT_Pd9_UNW$Z87*^j*oDtfGiv86Gu-Vbi8~Vv)uC^w_p^_r4#hj=fI~&n_d{W0jUoF?U`N#_145lF{7{}AEBSdz$Mm4&0)2+^ z4F9(kHp@pp1|ktt6+URe!D@)_*;GX|`MArrW2j;EehBOA2diAFFtrGt&VyiADH*_< zK+foK2&s6;eg_~H^f}14!&YE=uRvxI8z?S#Y!%%dH%U&1Dij1hth>R++CCEjE$-w!ozb{q9B3!T$f^H6R6U6PqM-CgUs(ZgjFpgtB(tE~ccj5rtb?S@ zUiKZWZM2b-YKpfpGBJ&H2^+xA585YfYYt4dB3g6+Blo=(RC6q)pqb%WL!kFoF{Y|3 z#(~Lw90|%5E2=(?Jjao&2bddYpw*JqjtL2%gf9_LP>bQZHM~6sN^RJ$4}d$)lhVB- zKN|wG7%N=2okIqrtI7HM-cht6FfAulWw?BqNSr#9eLtBJM$v>DoFb+h;B7PrzrcsT{ z4XohE>LxU7IfI~TOb3x|=scmW&$p_Euti;=ouzp!reG&1G|JmT6~^S?gP(3P9&2!! z_@R)-JCx8&m8#4#_(h|&-_s|KP3iwB^dGs%w{m6tS4DLkX3>9GYzC@xcDqS|HDsY?q#OMe5@q%%ln7!jC zpL~Yjdh^7fOP?|1zR;p?`&=3qr z+TUQ=l&4aaMF+nA-lNX30#MzDu5-lUNsezmY>Q*=QaGs20VVWj6wL;dM@{xaqq*lNK)^bh>~sOl{%LONk<8f^jz zMmw-c&T%EZkionQb^-iHzx6{rou{05WA$QLBE;aSMIjt0nB4c+e-DY))e}=JW(!2a ze%uc|>l~PF_{Q(}Wd$iU`%z4`HGWY%W6@cnS`v}F;r^Kk4AfG@xmJK%)S!vgmjnPY zdAq1-J4{*pOb&%ju!Y|nGYZ031gE$pSDUtllr|}^)he6i%g)Yw1ug;^?>}5K0LU2? z5s2UMpIz@B)K}Q564iK@y8wvI#_DC`<$(8_DuORCzh&Q2$X5fF2sEcYmv<6|OsH%P z--J(o_UwSbwfKJZ3uhl#*a<4h%!tGD?}T0V^an2C2}{eShrtKeP3nEC8_l80WaS)- z);4%)*e?&*_6L0F!w<3V7tX&DQm*ac)bKI#jsuoGo=t-!$s3Qe;%kz1G4{1p&DWZ% z!WOpu!y+wuZMYBTh~C(VbIyFn4`08+E-Ji(|NTWmDNujYaA0dYtnH~%Sxe=d6o3_v z*4XP&4E;QDo_)n=Q6W+#>bh-rlD>}a$YA-@cSnVsm6w=xLc0;wk%*D^ZPO6dhKI`q zC=G4nP`_8#$2l8S8My6**p++96NGz9T~n`l!7tdPXdmM|CqZhhqQ_2ZVqlV`+5 zVJ}ov*jwXsS;6(ydwBo7_pxaEOgf$qQo2HF2mL_*^n2(}pP?$g-tNGq;SQ(Y1u(t=H(~@S&T%5Rf2-+!vhn9erD1TC}FW>?6+-aIdT%`aW@nU z)DoOi&*zQmNv##vwjri6$cu*q5jto(H*I4RS;(rw)DTz5f97+{g&smQw@UP2N>qTu zggDU9=Yd)rK0A)%(3-0}J9*iv=msBt?dS06_kSZ3wV~=q0ntH&kW3lt`zG*IeTu+I zroo~TB$UV_kra|{Z6~TVhl0}+<5Uo@- zRvMxjgXqaolC#x@G0xRSVf?5C}}1H8WtZ z6^DXlbEtCU>lp+Xpf=1gG0r7HT`bf_ECB>Z&`BftS|Sw@G{$qVBp=Bn63LE=8esU+ z>nlD{q8&4iR`12T!4!bIJz2PxmCkypE5w>ip z?={gv0rp)*RW1^xOknu{{24t{QROcanMk1Gxx=2P6Q@mcPZ!SToP%d_#or9jOlwmN z#bW3f2bhw&(cqvsM(-Wb&`Ig_)wc)Zy9BZrXmIoc1R@*Mvl2j$&UGXg@%e6QdIqIF z?}Y+lcTM{`q4Q%I79h+pw^g=4cQ+NxJf4kf#OcTzrjg>++NQy~pkFU*SZO5x(4FZs z`YcD%i$?F7@xt@7fr2oR==VwLXloU(A0Kf{Nt@r>WZkI(Ch2`n$)3^%?)9#*z3@!L z&^}$BTcK(P4*~FkM?zO{LPbT`r)5 ztjrOlh+&a4DNG?<9trw&+Y*eqZacg*+-^5fd?X;FspBc262tXuu_U$@L4D7L=~`I* za@YE_3X25Hkxld4MmrL;T`P>cICf!T*V#M=D-O323909WsA~8Q>U$)d&|U^}!1cca zpo~WTK1rFawp1>IK1!gE_H}O!*UOb)>0|cID(@q#)^TujeKIq><4nnIToE` z`p7^dvPt(7rR|*g;S=Tsn3N|CDMQFvy17s&URnmG4{+OHJ`ypGK;3-8$D*5rkF;X& zVYv}O_gcB2p_J7C8$nRf_sc}<|9G%HO4e3#qp5NQA@T19B~~|b_hoD&usL3y=|JL) zU@23y{Q;1~=W*QB1WCPVSW5@x%KXW@=I7-66-T~h1j|4JaArbL3h?&3e?9}ql=Ic$ zh+%gt(BefQR^f1|c*SMB)Pk4ZxfXX0G?6c3!V)Sk?v86MxJ@Mjk~Fk3fpMY_P3d;v z$V5v?q5=t)+JTUsbtvJHbLFfS8t%Osx=H^VqTdvtI7{U4iO;_O8$^xzIwJrP=iC+31tQm?2prGK~SJ0iJ7Pf-IIVWEF#P$6TaeMPA%pA?=!~^k53fZ6vTTrM- zi?c)?Lvoz9gzt<<{vJy&ZLM{o{Q3`7wV$ILH0CgWz~T(kU=0P zc~AJ%DV$S5$O47Jeuan#CWB*#Kot{0YzBa6I%h>NLFEOP-o2^_1^{jtn5eVo9hqlO$4u}nfC#Xs|=ad9DljvGGBOqkqhx!sCIkZxM*04EJ!hD)~#Tt`+ zQhg_$M~B6rFRU}=FxR<*IQ9MA_86VSfcO z(Wl)obO$bN!!LgPA%5itKh7a<=a^~+FDNwzajyFr04t{BGCPGp##F;P?NrP)J7SiZ zaW-1qJF1lRtir(9fDTi?g@6qvnFFmL=c2U&__VYy?v8t5XZV_nVa6UM%!y&F-0%o8 zk(xS_V+N2#YN*3-sRg&GzQECeF|bGS9RNuwDS2+@X!`6^vv9%(xcjngfabmMe@6ua zP@=5+h2jewF$VP^Crw4qDoWWvnOw_L)6e%p)wVuPzz7swis2?3z#fl|tyT<7xN~Mo zEu_31n5v@FC%Fe|`G>{aR5s2SG z1$a@M7vy zfm$um7fYAQi*E`9BA^R>;dlpVaM)JMLUto`l1;C*0A-VmFJP2YV zj;$4(w;PSGhOAAW%K`b~=A)EUQi`$lfoRb@adw0m8p{nQ&7FykD#@a|qLB;(5QDtJ&AE~b0OEz)T3;;QdLp7SIFWP(GOFxOibkw_ z+;hCEsaldr*W%>!kCQl?4wn|aqSPH*;RH}Ic?Y4QuP3q|VOD7V z2LgaJSj#cSd1s#3(ArL;t>*Fwq}Lf>@-gRX7E}nV#8!!0^w3yTdRDNUfrCoVfwT^R z1s}cl8bAK@v%IghHYv#jQt1fp=^9+Og3Nu}a7f>siOU`87ZnCM@uM%^D2ZJh`c<)5N_H90X0o$_ z)g0^=_ay@0vgnXOFFnr_p{i$cdNLyj?lKWlp77}gvkh~+Ae!I<^a$I=dr2T6f+zS& z0K+aVI_dh-sA|Ea5C*_}mC)m!-qSh_A%AI^_;vv$*%zV&Fp?gNJ8hO{LL2o?!dq&^ z5PD4+(lgh*)~4PXCK>$7Ko*y;;2W~p^md$xig8prvx+)neiZkoOYLT_mF0g`9)|o! zsTH+t$s>=uOi)8>?UupD>b*nyBEyo1UrByiM!*8eM+Ww6`lckPBdR%E_duDaY-v1K z*zY)3*;pO_G4|4m?mbe0h5?Zws>ipFg3r4ae<;fY*7TpR5L6H?GMM1B{JIQ%^6qv;zo4DC0<0 zGy@pf+LoY|y9V(_;$u{zI|eIukCzLMKC`OHc|W8PPbCp?CahS_{9Kn#6m`s00d*IdQ%0Gy*F7jPnWo=~Gmd zZK@UQF%trY77c!v5Apsl{(THSbHRcF0`cNw)sfskU?C(32dkj3DLhL>E8|d*2XQi@a?0aVUR?r${C|lc5+K#$iQR)t}is%Eu zoZ@*0qPqHYEE21-P;`LhXdV~OzZZyx7N2Kmo}3m7zSk#Te1R6+{!&CWnb~+9DDoL{ zlo?nvu^l4<4bKF;AdF`rrROIN$t` zsNfv<@fUA1u?g4i&i#13Z)n+2k3=NHR+Z$LlXE=9` zD5<&b5x$$>avw}p#Y~=c$7#RRd@d>_fDANhYpDBmB{+6BhsZ=-&>7$NhW&cU#466= z-Y6(g6vPb$pg~WI#^O->i8WIlPji7*qboo)zW@`ayOf>y-#GYc*`oUd!a$q!mG>Sq z35?B(3FIuLi?|{zT6k6(9qf#gdOZ)wxyz1$@{)y z5GWXm#3X%H&}i@Mt7)BXXXo%W#T8*n`7e5ZhlDydQN1UQ=Qn^;wWv>?t9gf-YhjtN z)r!ZQm{8tZC~#@92FnzJChWLRN^=;0A}nw;PVLr4~*9OcXqI*F2F1A44MxI)i8c z=NMD~lYL>Z&c46^A%FV$7jTUf*k64K?|0ZGje1aj|fP7Jr!-kz{~MM15sFBrjKKp z*>p&*L&4T|01Xp0FItn+W$)4b04dKI3{g3l zv*UIgRBVVrgaHreK!vFl^dbwiX zc2z=J3Nyf#gXKyoQ5I?uyjjz`1`;#QXj)zz>#04A1iuCfq2PigbS7QV4-sg@MCfl9 z1h>DImB{iPNrOst4=|Wr^F3CQAMfDcUL}&GJq+3Ant?~`2{JI6=QHVPW6jjq>KcCL zd94kf-%gx!5Xg|OI#I`(&5oQ~!U|i zS-IPTP#j#%+XJ|h4Gx!reD>a>nK-}){f@gMM;3Gj=zIhRo|F|Vdmt1&)+nN{RCdXL zIt$Mi1uO=NEY0VC{l>c|hA&5@Kz~ z7af}G!*dLyA{^pQR<1dW3Vy?p0eve=X=p0G_;dgDzd^%A7kd83|7-tEHNvl6iLgqJ z1x%z!#UwW!ScBDqBD@sDkn8;V@qx|Jy~7cobLODBSi`6xUDO8>&v^8XQ;70K!z%+^ zf-#s50gGBH+P1^3;)|1K^E1tom}WRV&|*VJ!4!QuLH7YC>j}H&{>WA-R-&*>HR!hzf>7qxng2^EJ{rBI`XBr;8s$9`aYk2ke zi0BJ5S;JB)tnTvvhFc%F?i&MH#d=M>8MScOW~=O#*%dgKVz})4Lf@T92>}jIdFP_{ zreMIHV2>jz6+Hj76ztbWj5#Sgt9We4Z{5y;UUp0?QY0PNjLFyY%S-lAasc*Bm8!FM z^f~Z+yKw-ZVy}?=1q>hl@}I&V{vZ5b(Dqjetaky_;g6|6wWRNaqyUI zWG}_=>aqbw>RcZq<@dYVBQ9IRpZPog3@&@iD)gn5q;Z-pAqIE8SF2<-ge{J^eiINB z{gzM9&mh<@ApupnhI+5#PPS5TsRj0xuYSX$dDY}>0@)~V8UBdYXwZ=eyvmt61O`?{ z6oCM`RC}d9I*~>;!4WR&=>5z91E8|NaU58pfGf)a7OD#wB9_(^Fy0|x?@pXXE@n`n z*w9DxWGM5E=HvME`h^*4S4+M>$rXTL+p>Q@$|)~`stvIRW)EP$E4U7Q3+znx+L-zL!@xG#}P1u z!jb@rTo(^WGmr>`oTZvDrIvB;^us|1Z6H4XvTb<#^pu|+vY=7%F&JK7HoQGovs+Y1 zXpCf>XEgqb@;9ZxroD?98WF(7`bRl`QC-%YhM-{pRQkAFuDD(wFu_a6_6astxl_u{ z`-zz1oxwgvKr0(msIia#K?$fkfS`&{zkaiV=dr3$Dd1s2`wTm2d0wKg zFICITlJ@t}^@2}tl%6~@$y$h{=dDDd>0txGk4oDdjw_l$b3cV-hg@zTb`gZPwkcO( z1xc@y$$u?X$gU#NC@bPWy3$eFuMa0Ad#6;8mk%G4p;SIias?*$2<@8^98dX zN%Ot5f|uKiB(}T2!_FAu9{hkOxKNDA6cy!`-<&QUyFjR}X5&o_=}Ra?)l7pZd-uIb z{M9tcd5S`~ss^iBd89zXiaU@$xBO8CWjdSJ%M}jbS`E+V>d6e&hl4XOb`Nr>p)7l}-SPY6|z(YF)!)91|d;YQ{_twk!21T#2a1niwoTial=1CNi7cnS&L$=;4?5)Qaa zEK4!X>OnS;yY@K}F0(ry14GfE*nS*YmCfWF^qm2&HJ8)dTf@tFB4qU3XAb3|gInnB z-L(WfvYp6yNqx~N#f0HD!#L1r4-dp;sXhe?dpi$pBeFDfO4^qwn{Jv8O(+tr;aA}@ z87Ny3ch;l1eNk#?6lZR}&u9`=SUp{h$`LrosAz+3T6}|LQ|X*6%iTALx`)b1^a$K3 z`OJA2po>NY!a`eX>GYx!^ybIkfdlBDe*(a9-8MX*iz6#C2kfr-KGtzWpr&UGsuoc~ zCM8kAuAI{F_abI4O+eKX>LEA>Y(>H)blJW0 zyp>4AEY~;gt{D@Ty%J&6s$nQejsqP5YM&E7eYs_@6iH^-E8MM51YPbEtq~w1=s@Sx zb#7~=THnWyJAU~2+jo+$LqWp|4xK!GcfhBbGClVrsyGv*D7w=Jxq4IriH6R%Ck;}2 ztNW43*xmc{zDM$51P{XD`ot#1zzCdQ4*JCiZmTFZQ|lXcoJqH5(hx~-oM9$4mdTw( zFB~jEG!%04r!%mC2fv@|w$}pNE-15e-hn%A$H4$Gt3<38x&nPx9C?o;iMc$yLiAqH z=M|XL2W#6=_Xo6X$9}n@ZKQA9#&DEs$gs@04%gN-l`;JzcdIKW>Q*}n~ZaN#G0OVlc>xjM^ z4s45x0PkBgl0(VL@`*A3#A`$Coq(h4^_&7`pgT1Q&h?-q1Biik9=68-??Z{BY6>BYJDU-e)(OM2s9IrI-6DQ*K(_2Qg#4@O&P0X#5I~oiVZS~C$MFuu zk6Xzgy;f3*?wg$>JNm(&|ECkAt-9m4hJO3a2&i7y8yUzz4$RX3Uup$yYkzpVvz=XFOO6Bmywr#L6&}Pg> zW3_&3l?lE9kMjSKkVahPxT{o731mzWNK3^8V~R0O+`jk$&Nn8Y@_YxV3)%LqFcB?9 zm26lR1A?+C_oT!)=E_w0AI5X163DZ6^sd%)8+d+xW)h7F2lsCps4mz~3a12BJpA$y zZCQ5h91&9?TL$KIHqpbE9hct>+jsUs=2NXG5R!02Fl|g57KpS^3nF$4(TV2lBE-b8 z=mk`AxB{S~T@Ne1zpr437bm0^rx3<^$CX7o_;vN60H+8MTiI)PjK!%&9^9Ef0A2Xi?BqJs_XuaFRY$0C#T$I^wVxv&(F!K0Z#`AYx7w1c zYzDZpctZ>dTL=cfkkdpX86;vyqegCWLCF9mnv6URM>yuXJ)qPL`{e?tjCWzWorors zQ<~wOCMB0+3*Kb}^3nJl7d7S(7=YyHX9Kyo%<31ZrKsVJzk2UD&n}u5%b{a*4V?lo zSF_!4$NumN*T>hmTwWoDSY>i!mixQT!acekNN#R5x#FrC9mflLA4DdC3KEs7dFcE? z=-~yI{gNf}2!*CO!-`WMwhK+Vnf+*RxjIjdFNjAeg>|={-U5ww1yA9`3u+{-^WjG zuEeOND6AYuhj(UVog|IE#K4oXM%IzCO{!;8L@}UB8u3 z{nAjjM^*@&34~Oaq9SPOvMawL_@l)Ok9`>M<$?T*~TuEcg3b z3X1IM@C8a$D-u@#s?I7PK7j}5!ohN8mUpS-oDf#y&-297%L|7Dhu`o;FATr|3PMA{ zv(}g`Q*qfhY;DW$k9lgOO*PS9YAPX3XCFv_oK5$2t2G0^slI}c<8C^u#R^V=E@vO2 zV!A*tR&%TI-d@!BeGV!gSt)q9>(GNzGax)Wcn$S-S#+)7!#@x$c z7434x*$3XfeGBs?oU+w?IaQ4S0RR9=L_t)A!gmHQ2rBCT{6F|FP^{oL-@J$}igzfg z0ILzLlms5{gZKgU)mgP!~Y&viVFdxpT;zd3I&@2~G(x+nTklRnX zyw0wuzq0@3qV61a$aJDme_hpP65NU~S2XV|(vCi-AMO7uy2 zfS{QWF($fbR!hVx2F5O1iNUFnK(g<;6@!V1P}MsJW6w(T^OejkfOg-w*BU3Do@25x z7EmB#u(nAd1&Dx&hPA;#8qwYwZs!fu{x8_y`@Oh*{+|-Lx z{nWSYwc>d$B!FjQA_f;)X{_D{ilhVyN??i;uNnycv7Y}<3N&aXYWo+BfTLnzNgXkY3uSAiXpf= z4$+xdYNxRPTs;Z0fJ%sJH0rr0f|qAU9P6nPHqp?Uy({O-6W4w^inE!x$FmIvJCtFK zN?~LVJoZD#c-*1ZE;#xr2hR2B{Y>Sha188+5`mhfTUrCW1W-9Ni@Gv z&N|LCWwbZR#}Z!P6j*>$b1Q%vsDBtlN1~o)L`d(a@DjLSFc`rt(N60E>(}tjpZ!J5dEm{r|8L;T z7nR@5@Z6>11;e&o&`&nX7d>mMz{@E_Qr~H{;`;tq@cilbj7*AWsdza%CaB2u(wUUD!b)h{cD(n| zSMcdieuQnSxQ&it6|SB<01}m0i9S>zDWV0(S#VX+!Zwp#P=K<*<_&9Z(`up~{SZfd z~iF!FALI7&4D^%08BDY%Rs*XUdIwNuiXlu@L%4tHVH)gZJT;GH zj;<3oMVroJ6@uVOb z7!2PgtCCG|MMzvPYOP`4IKv`($mnYalQFBn1cm|E#$l&c*yPJ3TeKvA0XLWsartJd z6asgBCJ7)gb{q0zu1NdkJY}OONgS1qpfhc%V8rJDz)N4YoOF4E56gRIX4s6)@--?= zwPl|OjPmAy!+ZtRj-`lsj)?}t(-)uOUe%N!#Yk#Y0_6JptjyOt_6w7X-e>+?s9;|A z4bQieRTb?;v4Wp@b;Wm{Pn>Qj{lup~{4OVYJ85puIiQd~AJi;^n%L!YYAJBdaDf3+ z_YdIND`8i$Dg#XwC<5jLfO_vZr=uDZw3SLMulj#DO8xv&k zEDu*QG=jOncy&(B?158-3uc%aNs)7o+|%C|umWv&mF1|)uV0~PiR@E~zO6PV%*dK(V|- zhX@UDbN5U>VBkCoET@v!IRZ!oeITyn+H7Wbk#dMy15%`PET}DB#Fd>v)6xgYV8No7 zp<=6nTG7~Z(A&yl_e6lZjh z4q}u-jOqoTpo&5K!&dRZ<0IZ2EdOn-NiI|G1Hi4|S3Y~nDZWu+gs=6hMyK71iH>5p z)`nxOK!9h3Q+CIBAl;=cqio>2N44cXfXT`dPbWf#IfAwPyFfa--aF>g=a}QJ;Zq#{krb#X!<+We^mmE$Fo~%O z)Z4aY6ZdLZwp7cCUqQ2sWS+fWboARsg`**dIeo%=M^GUFmBsJY1qRBwz6?~&M|f{< zc-S}e>G_ynN4QKDz8$}~RO4FyqE#$0aIZXF(PT+XC`!7qWs{4ewKEnZ0@G8vi z>Rfmo%fUY?x7x43TIM$^3$K9vjI&4EQb!|wY6@TlXo|G@VXTW2qn}oIrpIIu9Z7c2dfQb zyI^~K9}n++8T;!GQMU^oX2(3A(Ql8~zVb5|M?tpc` zOHNHLX0Tec77R9H;NUNP^-K8GAN=UfKCY@KfP8=2ie-n~8ElW~XigYpuSIAz5Ce__ zcsr!B=|*(BsLyC`8!!!hF7?Q!>J6;IB-`ITdNP-N?stJZ&y3|+*v#;vq(4sJ<#Ivq z9alJkty;7H)Jla-3Z|6GUeK8FQ%b?bfDYP$F()blp>|ZKPaGNlkWM|iV6##v3+z4Z zL-b{CwQ+uh66J!BT;)OSlR`SIF!CN(uvvW<*b#{ZDU#8r08!_82Pf=qtvn@}=NRCm z9)ixOP4|oQeOg%(?Pg5QE?dL%ZDt@J^NGfEFvsJ&3}?UT(YXf?AAJqaKm7rI>+NkN z=>5c-Pkxj%*sT_zcqYuzD`#+3UL9uy@3P}CL&&dh`xX6EpO5Z+fL+}^d&fcv^c;in zpUy!9>XBfSV(7!L7Xu3|s=|ZG<4tnK;b2HBCOGOww#v3&`MKjrRnT$H^mDb4)&|+* zex9UrDj?>(pMk9s^x%GDQ7ARM)m;UR!J(pLAi0vclD#3tO3QNPnwERdndRj=N6b2) zL$&$`OA&xsj1Q0CPZ$zwrW2+2tzdHBgLb5N1!x z)O{ng-8~Pqfq}xA%u!S>f|e2TsdN$=P4BA!9Lg<$h*N=Mm3b-Y%d-4bvQ*oSZNH={ zL9A7+RStcX+J8IhPG?c|+?7*krH37=3_j15WZebiCdbA#q+_kyD?~LQrX>e*Gv|Ph z13pi<4?1Bpw7TK_#|Qk$U;a5yEuXpVFe#sy zXl`Z|{p=a2M&oUu!4ZV;68n13yFa6(Dyttfh71C`BQN5g{>T1DRB3^sYs#qmNGAqg z4OHX`Q+~1Nd7DuKJ-#;#4iNxRo5s{+*E8ufa}ExeOlK5<8IAq@S;$Zz5FjCT`{jat z-`J~!2s$EATr??V6UkYQeb9C(I9u~4GAIJ>o*|H7 z?gKRB*^L~_u%z7i9(@v2hy*+7SFW=hv@11m{@fq@FXftLzlV76eNHOvgn_hzAUQ^2 z6*L_<)s*JwshWuRb0PK_^~Com5t);AE9^b ziot|{8Y~k(D^=A4%O+m40@NLC{|MK|uj2apOSr!OF~0h{{t&+M%YOu4{iQ#QkALp> z;quWpu)qH?F0Vg8-5(g_h#JVmj`ym-h3~9$AM0x{{OS*WlD;7tZbQbMgV=Od8tG(P ztC^6-Y2ffaN`A)LMx%NkgUK%9_;4el5&Rs3Qm9z;t(Y0&Qy{1X{d(QE*vhWNf2*r! zsn3aHbiCRn^*b57{oQ}tpTvjnzsJg6iM}UDi4X{ZiC49xU=H+M&teleTw*2k+f)eT zYK*&tuWeUS;h>6uY)yb1VH;o0o_B6q#k9Ne1W*;dM31oXJnJ*g0LvUpqSn{^sBD^! zy$$-T86H~2-}g)36yV9YnGnNToM9C>;_|bQN1>g7`zn|Ik_zC`>{}gb0`AvAvaahT~3clzW-3KsU zFz1p+3r;K1GG30Y?YJ}!+;OK|UXuz2@e^GgZDbgNr6I2bXQuCDV5k6O%3wvGK+uz# zxYPs8)F@*+;M0Ki0P`USOnS}m3V@}`Z+zn$`0x^rpp0Z%o_*+Nk{NKXYQKzPMl>{qZW-Wg5@xNQE71nT@l~~ zf*S!a6EQvCT$=ktjDfDZ(&t30z`MPlYDmc(lJ1EjIk=G#SCxWyvbw(?pXn%BG8PRLc65o^()`}9r*G0zmsz&R&cF^ z&l*a*V)mPoeQ2#&fr(!e00dr*J4#zL>b`Cp4zSYWb7FYt;)kNmhAvuU+bGHrYH&V# z-i6Y3Y#^^aW(gqIK%xO!cf`79z(i4rU;9t~ec2{MR?N$mL%5W76Y#Mc?+${-$@A59 z|0g2xG5j^(_!xiTw|;0jL=#zX$ay5`BxIlgP7EBQcY|g)6X^Dc#$K zqo0U*Q!B^=V&H7d?m(%U?IT(KNJ4DGL_=*CeC?NhKfd&{z~wb?-hfZP^Ygg=)i0ye z4gEY}&Y49x4@7^%>|!-chq>drRnlajehjS@5Y;^ny^J$ARehXuVsAy$n)&ZOi)=vj zOUm{daNQct2@PA*`h>j%L}gVnp$?4GoU}>%a1EWTkQ*JclMj6$9kN=?XrCNI2Ri7&)88Ge=O6+- z=9*(-W;l-+px$1&KK z;j(>e48lr`Az-E!f&se$bHor}?&Y->Joka;+qmmFVPey4jZq*Q#nI1G(VS&5z*P}y zL8n6a_lkYn(0kA16*|X|?UcbmF~jSv;rTe>U}dhSJ&qnm-LKZr#!#ZXB0HI1NUjyF z){3n#>xMB4`{1K5;YZ*54g$GyH{5zSssi_ckH7XUZDs|Y|1$o{zx&4kf5Ds#{^SSa zzRPQ?QQCrox#AQ$VI2=&`rbzm7cY)@P<&u;@4$3yt>J2hFAf=X80y`u3BVJ%k8+;& z3@lC0T<964+-*ZE8%F0)nMHG`L>xw!(E!{}W%m=Gefnu~)(yY_FkDW#hI5ENd%hr>C-wKK z87}T$I5q`I2BEBu*9bp`5 zp5&hyF14UL`HcNMbbf$u|9Agx{LDZ3&m&*nRml_2^f+z`sv@|a=u_E$vU*hJzNa%d z60a!gDqtgKy@O%z@|Lrrr|Ku3fz8hoB=HeQYQQO%joYC>%rdQ3KVSB277{^WQ2>%jMZ>o>U% z+B*ZVwW`5r%)eWyIEDJn8P7M<_9f_)wgLGk5$4~pq3$1H+e~LTjAo1S4n_{@5Mx+E7Y#XoY=`L@^Gc2xaE*3H3-QX`f-|=5m^I@NVyq*dJcu zc=H(y6FyFO+&`ZfvnyyNuwVeDcbEac@`IluW-sy2V^vlFvXEw-CH1*PC4?$`drqo1 z5Zt(@9RnSh&#>0$dne}!=gW^# z>WQ!Z!e5Ku@z(<1{22H$fCIpfegXLP|M(X%`ibM^37_->ZF>XX4<;i%aj6BT`Ugs_ z>BFP<;IS4w4XT8tJ@yqm#C^FoHcvYi1-g_<-g}WqhyXgDv=>!P)U_C1ZyP?no!N&F zs*n*}IdC_$Rk1=b%(riK@3D|)CvfvJW>$M?GkEe-Jq&7jNIFY9Tug zejZBnSp}6+s#yw(lP(K~&8f2{PW~h%X=^%Honk-J)LS+B>cg@?s6*tSz)Gl(7A41? zNklYc_c>Tau!3q6qcDM=>Yt(T4ODUB{X3OFOlLpe^KmLE0XimLr24g&fW5E91-=plZo##&5;oR12z3e4R}7yIxl-^AxX`940o z9oY9PKKcWH7k>B`{v5{nLe*2*p;EYyAp^%*j$Hd34AJTXzEEOLw8oXGBicFU%VX!*^_<(D` zUIw8S>IRVN4i%N&NDWHIYaxCxaG;;i312iHqJN@*C^HKsj?p_X&%5_s$P*pgLT(~h zGypFzFH}NMvoa4_Rns)l{4v+is0hBJ2f_^G+)p9LGcsq8dSg|NPku_(19CWPCLm2y zR*@^hzjKa`m$TnZwPzI}0-Zp5V~EZo4lTq42zpw@w|?elQ0gX`LVm~mZ3oSjc*bSm zdonOO6oAJsI%BD=_2d330pLKvaLnPXj=>>_tX7f2Fy_QJ|5yK9_JI@xx;y#b$AHZN z_~5`ppGgVeg5EXUXF}mRul)X>`{04)%k6?~zcBGFMJg(!ZJnZ@Ws`kbQ?Q%Lo|v2k za+4%608L0$6Mu_=ltFk|l&RJ>4gr+PjQpPhS;OD+XTOc5)JE=V1hF&bk0jrGB6+(I z1>#RA_Ut>+NSpg&C5JEg@DYOxFTQo2LId^4@TXNR>=y1tl#(KiZ0e^V!eGXC+ z-*WPis8Kl^(TeIgka89wQZR=bAP0kwNBL?Y7z7()3Z5| zNsr2gD6Sq;#Fq#{(Ba#`JivTeCnM?u()Dxpq4!E{$B`$H-6m=V>D91gk*GE5dk8i!>&C^q=Cz%mREp5jX zon-bCqu+3QeuLxfr#PQJ!Q+Qt!k53rwLZp}bFIDiIk&#|)vPH};!_k#qA1FcZAC~F+e&0eQ6wO7eiAq^;3WPD z0^}zKl&?5&5adHZ5F>#QD2@RGmLVvy1r=80P@+gtBo$ID^!O^i_ilYpvG-nU&M`*M zM{jM6dFUfu-uuqkXRSF$(TmnwZ~e*B*O8YK%5tI&%fW3=IhtEB?}92pFSk7aN%n?S z4Q(57LjYva^A_jaXd^@3RI@x(p`5-m`Wqv_d(L@7-RD43*{2b04I4GjkLZd05a4OX z)WUNp*<0uSh?>G*#U&Dvyow)s^(2xvl}KNEjipLX`L$8`aIsfVy9B-D4(&BIp5J(0A%&bT`0q z4DQP(yx0v(S&{P+Dybe--$YfmQWl)n+pyp2?c{Tfp_Z1B${D5HgvzRt7qghXCEH57 z(lP0H*L_Z8*I0$G|9}3EY@`nrG5NsQiLj5SWeFh&+O?=k+OQR2GBXOB;SCYLRmPJ8 ztTJPt<($n?a*jdREtDC1*IjDE<~0J$dZ&^MH%q~q6JCDw9VG8KuPa*b_~h6B)`$&T zQw|aL<;sDFwY4EWQaxzH(N1l{_S#$Te9?`~sZi{d^FR(&3cPMR6uB6=ftp}D|6&?6 z17NBy#eb*CGll}qc=%{OT%$DPP8AU#xXnJwcSC^%LXsR~z?}{`l<2SuKt~Wmbwpnh z`VxPphD~wg6%S_);Hi{TQ*T4a!ySy3YZgHo2VI=>(Rfc(r0)9&45StyUN^w^F&p#( z$WbH(HinziipT49Ts_h!0uoOcvs2b21WoB4ALgq@Rzq zh+YI$0#qpx4M&dCY&wZob=1jSRboTKFBIMoYpMZ_ugE}?*9f2IN$Bk`SZur<6Q#8i zZeu{35L@*|ELk%%q&hJZL#W!ovvRX6a0_(_1x-^4jfoTuFdMGIDK$++6;-K^B)5^$ zm8jpJ%-G{&BSB82ZY`%9t$Ger3cs|I@O{!1|sZ)|W6XQb1 zA%uA#-T-MyJl|aoE{*4(r+T6?b6^z{ZT^x>Z}hAberAOR2`)KiDV(3YiH8@TqNiK< z_H2B1obgFHOFVClw#E6ZnfJ>f%7OEY%w|rVB?Vhqz>fNFoIGnHx2guubt7$1@6EN% zEaS_LXn?1Z-Z5vz`zvdv1Vc(<%|lhHIz|F1o8M{s)|=N*FE5xNhOKR_hEM-i3eYFKZ5`S;+9U;ShFz5n7{_=E2NUj=Z> zcla)Vp9AnKKM#E4=YIk3|K=~^+4p`M`~5TAzkH5%eL&qF(e@3ANk!w(K@Bc#yGcr>j#D-~5`|xukO($!Be)*^p8vOG z^`kH_rR`V_NdIIx9BF_Ud#yD&R@@~+JrZVDPwY$rQ(l4=N?PGwugK;&myE}K!&(YX z%Q~v&wX+Y{Ye)nm`m0QFs%4zl6HX=L+2bR?xTB=lqfyag#d{3@M1aDD&DxFh(R zUot+akO-a5rx!uLs;aU{k*smH?~@DQJtp!_bhC$at|MhUc?XBtCQ z42bhM6jTWk8N#pLEE$FC|8k3%^w~AUQc78Hy*^@RGkwWXh19bH0myFbISTqMg05M2 z_J14VV2=%rOR16c?xEvv01*rTkj}J9VhR{o72zdxMCSR%&3PcX<O&%$ep=Vi zUji6 zl5IUUEJ-3X8Sv(7PhvjED~8s`cfk)7EC8_B|8jl{dAS{EAbh@$oLDX={MdizS5V4I z^|UkwVoeljCIcGi1c=f_B+x>IOx2C|PG!6gxUOT!5>e6-)aeWmuFq?t)V4kRt-K)Q2@vr?J7F{WUddET8$rzAvA4SirR z0VpZ60T6%*IMPH8%euyQ0wXDeF%YMnljBUP9CEEK#a%-x1GR0WwA`H55kSW0bhPVz ze9!(O?y=2;W8D25pC#m|$Knp~0J#91MjbOjR8@)O&0x4)q8bSIsD5}LVqs^Zgs$L6 zO`oocDl4DjAq!=zeF{yCgbn&V8-Pw3ltiT`HhO;?KG4unGabnbKMO#;yc`u?RpPCj zu%4dATEpm&ckK5cW4qjAyWF!Wc`73gnb7NF{GLUyT}lLJ>*;)$R~12{s(A|RO?L~k z347&mGVhMMU(xm}4Mj%Er_pHLJ)|q6e?nE~326PuTTf$tis%s@&RD$`IRyZanLumK zgOGaB=@g79$ZbAnhyawsZB;0%b0Rv+Ql7uYZNsMkmA*GFuZd$j8V z&z^BgGH`|boK^zEzIG;!q7T95JynIi2raTg`1zw%C+ z17c<7Uz{hIVl>W*L6io1zUIQJ%Tz*1u3sc z>l38q7RYBxorjcs?`?p+m+Q!3}2c|tpq^fm2R|^U%Wd-oJCtJ0OIzv z;*-76uQBWb!qzhZth5V`sHnSh=&6!tCeE^@nV?O|h4iU{>)z-OI^KM8i|#Sy{_^-R zSU1fbr=q6wQ2I;=ez-YpIKRYbh~+$cgYXsS1TG zC36_TUCJ3{IV0r-DHSLQ8#E>w>(TgahUYKu4>X`7C1uhKv^r+2{G3gEAH=hbvpRwvKCg*RlnTmvgHoiH z7bbVm6phXC5a*e9JS9XMR7NT*lL98ZV71OdrZ#3NgnXt0NFD?0iYP9IL5q?~$IZGD zJp>bhFzjVHW4(F8;8n?I2YQ9qYw+cTteT;>j?3i{ee?v#<{0NlECX>;La+5#3MdCXZw9HmS4^h~`SDHp~Naz!V2cX$3 z09+0Kv7RaCK?MVYY2EutFKq(=Fxrfq%eZ679NuDZW7{{hx=qR`GYADI$+8Ry&*B^a zmlEk2NffU<)P_0?xvVJbc_dhk)t$Y@cV{*u2T)W{Zr;JuANVtHJB_nHz_byt+qlcW z@zJvhX2^F&q;v?XF(xcmD%l7)h}W8u>!OYbEtVZmv=xK0IsWfdii>2gvI?=I?-%Ts zdpth8K(AM{eS_Bxb-MuGVcvso_wXE#p8_8N_!fX?06qcm4FI2Rz>80ITpymHZWnC( z9LNfr3EkcSI3s&K?qT>{An2H*wm@{;a)IZCAV|ig0ZxUcK(kquwFQ=LkWX(Tt*-$oWLNj5 znOSirYYUXVvntqoL$4d^_7Wf6KVYNZ?LDf6ms(>OA!CY_1M0*S7&Jo$;$`?<-fMym z0z`1Ii9h;B|5aF?3Q$9pmG&kQ(>N1*b6i^j(u$a?5! z+G&7r6L=50ew_WvGd|$7o>%2{M36FWILQ*Yj%+XH1qfm4qqG(@7#g2aQ2UNj z5_)4sX$&%RLERo_5Xa|0JuXM?X( zJ%jRmKpz3Xu?nO!Gw2yKNHLH~&V`7>;aY?OML{wL+T17glf*Pza%NdYlZ2_9zk)|g z3(O8fnsNqQ&F6uxe)i2@LP2)BH z%d%qkK&^Y{&^*NOqfu(_9p{_iUSgjJ(&LLyalL;|C6bI}4X~I6lad;uF^K8i0drL< zqC{+hqHJ6QXd1mU8IMwFsMEoK; zJc2Y(lAL^$iDO-(YTy=$fp=$vX$TdFzLkWov6P zaUE0S^@Q6u-bMZBJ3}NwCsJTVP~sA5^?Bz_4cVP{^VPl#=MYrcl0q~qmj%^wG&%P+ zK4Y)!HA|GgW#5Mln>?$5noz4iN6F!7S#aHVN^~A^Y(A4$f-UiPrJy1uu4tx|Vx86@ zzB|#$FRbjm_2dqZ*DLNHFJllF5Wvc{s%jBJFg4279m;83ju<$){(s7v~u=CN@#7zetkfFc!q~h-^XX){xZJ(MnV4q@H>dW z;{yQi{|4~wk?|b-c6OAO+AcIsnof)68 zhwO?w2l4Wp*4FWj@4O%Dn9Hc{m=IS?`jhE1Q%py1b;E2D##n#%DaauToRbRd{JF9O zf*xlk>_AZISh?YIFJ-}A_W_bcH7-wp4L$;OeLt8W8xlORLXZ;HRB-p?DL(t`GbZCv zIgIw2f^H|+>0O+jd|wPXe)twLS^y?JdMXm_kk|;K9p9}5+o7t|ndHTJjK$1maEsV& zrwOS}I-Hmy^xFxOZr174HMJYPqNIW~0bl$1{~S{1Qu3idw&a4n?!($}HwpSg5)jhx z4Tg~7y}Q zs^n48jpzBa6f7y>wbKFsc-ZS`e{h6y<5g`3z2aAB532*~vLdC1$LrC5fPg_CXIMih zwWk8Zy92OG<$WoMhEKLdu-5!27y=IaDjOIxPIu3##47wd0(J@d` z;I99eWy`71Bu6sVz=9IC2mz)u(?nAxZGh0SNY3-_x64 z!QcDkzm0tR_IS`zB0y;M3Jp>m?*>s!KvX6wUZpI74md*3-qa%i+`o7>uKQ>x=XVW6 zi7Tu_J*ZM9%88@*4WfuBB2dyGW2L1woBvDnzCdi`d;#e291sOiL(frI~(d}*DY3cwRr-;*rCogyX;wE=E}<7sqoSl6^?2K3&cc30-LFs|+D_S(H=aIWZ$=vf%Jy0Z_2@|5GxD#? zvd}K@s4-xy+ZFepegJQxHFf;KFTRar!25see~(j6aCEfV2Gsy2TOle*Gy^v_J4SQy zq>~uJFgFLb+5?3i4fr$R1Vn4<<9v+)a;#2?iUv|V^I<=(wT{HYV1v4+n-IM$K|{Gc zpARTqnw4q(qq8YsL7$q=Z(_ZB7hih!E!>_?0D@*{kidl|*-x8TYXs1>?m(*)5Kxk* zIVen;yrW(p!r70Y9swvADK=%r*hoyi{WwWV&LAGR_QPNK2fzT}a>;mliZvyCbX!1nkOk1syO^AEm>55M*+ z_~t);iC_Ok;8(r|{PM2@U;hWdw}0j9c=ny&!pl$I$Numv=AY2_`Je*JHaO6pLw!>U zObe0dIKzSLindsC#)&i?NgF#N@O@>@0F%vK2?NiZ=5)mob<76$p#Kf~go6(oVkdEj zi_Ks!C-ytk8q)YYLwnsQC*5f47<9`PAMD$8+pu#oxwD@_RTtg+&T5|~W=k&m+4JX& z6$&Xlj4cx9i!FCpZr{Utehuq#f-`w>gv@YIZQ{EWP#5~1g%v!rkTE_lF;KD|=vy&w zBT+e(lwo(=oR(p;!74zlb=;*GOq_AE#I*Y8G3)4C_;jtSDhr+WbHq*Fky6;Flp?V% z773KWS-Fg74ZEP(F$Gr6@U#%v14_0*Qw)J1O7V9GEa-OM*gM4{@39A%(v0 zs?Pa%=htpdqt^_Kxt1kBewY-!8iC8W^9E!5IQsw*eDUcmvPBHp*juu#q60s)c3U)CGcUf}J5WO51^4=XFN6i$Qe3RKgmPQXJ+N{yGs zei5@Y^s8fNkQyTmPj626^yLFa5J~){9pY~5wM$;$8ZHqHOR3E_2>4b;NC7iIasZ^kX5q)+HezA~a zH9rSJrNmk2|D;Bm8BwO$Q=$Mdr9hd~G#FJ0Ic^L)F=c%*664V4=lk%zOQw#zt1l-Z zr3ID{)ej2YBu^Jhi*OB2PZ5u>uZwhHyAZj|DF+2e6Xgf*m?&z@%sGHTLQQS}451KJu$*tPUoQ;c5&;|rc_XnWa>{$IBO8tIEh+;F9!@t{PiI`V2Fr1Wdfk9V zTQtsMBH$HIyYF?{KX)vJ^o|}*IQly@G!r=IjEXt41W?s(CLrovC?$`I7Tl3a!M<(t ztSER-vu{k5byb$fiAfo^rxU6htOV$l%8Jv?9rEc8w@=@|lQ-YP?OUJ6?Hljm=CwDm zobOQJxZFR(Cm;L{?mzi1UVQvLZ1>NwUoU9;6*o)9!;8Y==f! zVAz?3be+atg^pLnRbsh~$u?16CexDWm0lWdo7O1||P>-{{98>5hC1b_ALtQ-rcU75> zr=yK^@Lf9Xw33gIy>`nKAY!i@#Qa%tvt-;~CuK>darE2T^e@>ANWrp**<_Q#8}Zkx z;7x5o`OXxUT1t){Y6G6G1;v1uTf<{*qv{c0WFl?ZMtm2RKPvTm-Lcooifim~rEIiW z!kQDd7HhZh>QIa4+!BRubsiCDoLV(RnGZnt>S}9^(^uv#RYZKD( zrQF&=W;Bp1AR(a*5xyw8_+7L+1#<}M!R7e}NT!NQpg-AS4QBNYBk{@> zb1{;8G$z(M)>B+nq!>`CQw$R)cvcr115R)AKB;bWa>2DmLH$$=TMyLPTTtRcd5)6JOtjPCtZEor zr;!6}02`gkV3CBz87MH){!#>$U8u332&w>9?Yn4W1{OJDkWHw9(ub+?JuITlN4+RZ~+Du?v7UFaAKPy_=#QCoFp@;VRbB7q6uvPC<7#M589j| z6x+rbqZPCavo_FZ-8WT)TW=!?3tvOKr$$y)KRv4HJJ(W;sqq*@U~|FlVapO6#-9mC zZ)h##u-BZhv4UXB;_~bx%V=bz&G@-tjtKErl>40-cj@$%(E1U)$vHkVRDYPENqQo=_2pA*=m z-Ld4%3YhFCMgr>|&oL8G9=icZ%t3iH5s{EH5o@HIC$p&f^g}Yhv#+0fyap|ARQL%# zB(pK#FO)TTH&Viq4A<5Hk8?EdKNz#vxIdhHGm zt>Y!9jY}aoF|QhYD0w zyn@otibfArO~^-6{KF6mPr!FtbUak%eGE>K0|D274eGds$XAsOgo zhGsGIDq&8D521)XhG2uwYlLokA2d%fJnpZ1_;>a@8e<{{RX`I4EzC)k-D?a5SHD_R z$2y*#R@CO$YePzLS8|HEE~E0R^Nx<~@&JRO1VYKCFLCNv=IIQ6x{Q5JsI}u^+c4B{ z0G3=MP#9%I09$83>>O;@Dw?zG+*`=yS(KN|g9E3D(wED180v`_^Dbk=Ah8&AhvgGq ze(UeY0;ZS{EJxZMIN#jj^7uHiO73sSU@{r-UV&t=ZtUf7qONUxw}Lt$agEfMloHWV zA^n)h6M%I+qXXEtozkE|cMCLIGQNTCE+w+%cK-FQZ34UC1+&zgSaOoub3+y{Lf0X_58-?aQWp@%KInhaBFS`$?&07p8$xwo6FJET>v3u-X6iAV8O|ZX6$%pLtK3QT+SIgC$Yxe!~vZ-RE$X7+OLlgYLi5Fs_hjDpo6_SbQ0Ns z>OF>?b*jz@)BpxxFp#~JaDMVS9$!9#s1Tht#MsNS^#(`CU5`d(1bsx*EQ60elR?EZ z$uxsTG`P4SnlO!r_s&u_(<1Lf8d>PL9E7S#iPXNhbHGM)TZ7Srh;?`DHK+oHm<3w}RoeFIHp)zp|MIFJ)5Np!kaYk(eu)_5@bVF8Aeb=a^- zT3(UU_K1>H(KdK{Lao^>5u zbkc4Hc!QC6G*>b&5QTJJ-yw=`-2E83SaK#psAOaY^r97KvlvR2Y=A0>!Lhd(Z0Q6k zoIeYn0e$0HdGgMe@$7qFKLq)ScPfwMT&FS{`^7+6dNbr@4ZrH~fQ|NgoClxh9sq-m z=zU0LLpg8XcepE%>Cx|D)7HT-`p=MZBp;TLJ)`a$7QW+V4zD|~-J`?swNJi-*Vck( z4-a_p_<()8qV5||NssI;o}W~t#EM$UT>G}ey0d4iW8rgbb*flp2K$azDXcu5Fr8D6 zYUR#gD11RA^Mz~*RrX4@F;G*0*&s%ljfBDY8KeIM9MCHt*!ln%9U-&2F;JdNigtFj9(0o>Fe(=qw_)mZCAK*{_kw1#>^3K*Cm1%*$<`uS_V(nT0J;@TV z#EpB{c9c@kYxKEvvSZ+2QaF>j&a*3;cxC?^5MV$yLtYB@y^aav%qPN0(h^0KeIpd?}_s|z-)2Wg0PEgs_w6H*sLh9HIuRVPOFJHdE z_2D_Kf7q4xP6h8@_Sjzz{H$ENg}7sKY|lz{Gd zxv7GKv1hvkgkZF>0+E9u5PdF#UkXU|Dfp0DB$0(2l@~0gT1G+U&-|6IbHZkaTOI)i zj|cMU4rb91T*;BGZj)x?G08Ln7bYwms$#anUFCL}r76d%V7K+4fLuQLP5Ayub0Py9 zy(DMxU`NIph*=$0K{9~+E)$aKv$Sr8#0H{L7O3n$=n*ssr4%en!3||U!?3kMkuu9j zmIqILHbX@-^0sZ*nSmz;vuZF>P_&TOI;k8g?-TV&Ip(QAlo6pl3@jBao57u>Yf~RU z3|Y%;C{1)`GO9!8yX|Oghmp@KV{St5m>@oW`rT1x|Ap^-$i}}4zI_=qr$71qpNFUcN<5u=xwkflaJC&~InDEDQ;jNmBBWI0 z!>H;kBV|}wkV@fzx{PHxg#uH`*ji)tM62pVa{EwMs(n&r#Cts=tjUBVGXVk6_H6{P zIZ>9?H6I|9#Lzcr`Zbi;CmWTHWH#4sR1W&MyKyJy9!a)a$QZZ}!BgHal`b%2Im}f z5Tmk*dWh1ng}Q?n_W%eQYVBcXZk8pUK}Zrrn@iAOvhYi<_CWy{_fyI+)Bg4;1$^vv zN$NT5uYAv||4eyXRjB>&y`xW~Hx85SaYekG>5aK*YXnko+!aFLH_AwFpWZ?UL4Q z+O-%4NSajL;C08oUGRAS0@s(H;gb))gKvE2oA~6D4{^DF0k0cQOF=@Y0k-`b>SdCL zN5XQx!`<6oz;gQd`u=YS$dtS}0nb*A)$s zh-?J4c2+y3xD=IbAIagBy)cdY8`YUKl-_#Xq8Do4V}2bRH|)WP06sw6-W&Z+!+-J3 z_oLU!HBU-Fln!A~kWcF}26so`AB+(1byUbQ6J@Q@^?HSS$CcIe^SZ`)=bAM3UNy#| z+g=BaQ@290yG8WK+c%Jw8>F%VlpA&cU-*4LgLi)D_Xn6D`F0q-ciBfpNIpQrqtELK z_l|Q?1<*-zHbC(%6&YW=coE63&hPYE0Hz{lfSx|8>JrhA;%GLq55D*Vzt8LA^TCmo z)M#c&dzAQya}7B^_Yo|acib!)FD}-G|^`T_3_ym z$-+4Iu8FP-GD~=^#jJ-wkpsb7N*<0y$hupGQ!>weh8)R&H33`iz@U(z{6(T{LZknLOs-b8kX2JDMtVHNp-46jCFS-oCTel~C@^BrFPEIiAgr{$R4$nUP zZv1>MC^^(z%5oDT6!j6UZqU?so>Vr+1tCqySr}0UD?W|(prMKGCCAG>&N&S=7V(`O zM1x70QP6m-SPuKb=UB=@L`sdDoz*Qw^S#Q;09vm&tt&2<3;%AxwZ=qr_0RzE9V1m7 zdlMC<)ACnuE?Sv>Wv~`6;PZuUI1Cew)*a;iX z5Eg5&EF69{Xu^OJGE<`DGr?vK1fhIVcCoZT^R6_jp|g5XH8+8K;SO{JzrE0}o=zvM zcdz65rym_0Irko6XTEqU_~yfv%RU2b^=V})EtO=rT?+26+ek{)$0H>6ye^0PV}l0M z8Ni9|g%oElvF89L$>~us0WdkX+MCqWMptMwi95sH2TqaJrM=`P*wm*$fgs|!AR1W)S{$q)r+j0%=pjSAtqEJ$!%wr%>a0o)o=IR!|i zekH_bX#Q7%j|xk&0MTQ%P1qx2A|NrPu{Uudn6!kC5%^Mj#0obhqambrm$D!&C!Fuz z#LH(Npw@ke11zP$azWiUwCyoiBa~Q{ow-?;P$lb~vr}x?yUspBftX8fZa@mmFq9TYO}?xhzmKQaG-J)eP6V4`fk| zsVdhiz|sQ3LUCcrvnfbpL#t(2x85 zYLM!}LfJ-5I0ojht)}-I4%Y^sHZEBn#!yXwrK}-mxNRe;NqIq_wUdAEh{QzGK~>?g zTq3a>!Du+<*;swVgXDlzuG0`L8pR$LYFuu!!L5#f-N;>>LAMO49B5Q3!;xEFSo&wv z4<3Rct1~LqJsxo$I8AHA(KODizLH zI{h}@VW@n$BuZy+9BLb}SDi_8R3C_5GU3Xc2hv%c0viF@5kBZR3OHxbF;V4_2=d&3 zw6m=?25uR~V7MI+%O!`JPq#D_Omy~C%~9eu67iflSWIAq(Hybj&>Nfng`N`eR{{#^ z{O62L5eYlk5*%wTLd4n;%$Vo)*cVZ#Umxod$%rafqjy3J^9ASr3{1=?#N|{UK1b&*U6S1dJJ_I zhH2w7X6}*BYwHfA5=n??UBkv<(0k29KOiWjq*(zOzd=c@o?Sg6Y4jb=|D7%2_kZPc z_$Ocg9jJn=1XvVY_2P_ugPu{>A-}hs9@xkxl;j<60U+&SiD|!nRu27|#gEaN7 zF}&{(ThKRWDL@fi1Y?3&1B}30vnskH=8p|vfr;|mD#m*kabtyrKV$i1{zp0eCdkhGd%pe%5Fzk?+Ov*WY z7<|MuHwn0O$IZInvR5jfrUAl%=k{jFIC;Z8seO-|WRuhApFENF6O+-WtxsmOd#oN= zG$i|B{Y#DtV77Vt6$fdy%V|BK?%P1)ErlNnf1bvj;vMu@?(EnEjb&n&0D>2ZphF042!sewrK9#1Aj`gy$_g}(UNG=3C8@!=V_lZ8 zzbRp3GP)3OE;(k}oR^Hdb;Z^yuC=3_Zg8y?mxl-JWIb&=`({%fxxxftF)I16In4GZ z0ne^MlT8X7XPI`E*Jf}PneNtk8Ra;6dE^i>N3loKm{d?`+z01+2ALsWc@KEdE`-{& z)+rSihj!aH_DamGHZ~~+&cwt_DxstT^`|%&yY5#?qbl$rr&-24vLU4? zRhjYG-Z_Y_59GvPF$p?zNFN8dX)1l|KAKZo-NShquJ#^;)%LS*x84(^F*EMtANuxLe4DLoW0|zRcqdx!6pjawUjl0%yy*}Xa;W_rp z3-n&mdc%@t@R=~zLNTMd5gK~+mIU6L=;pAk)Cd9pKBXJ*e(1W8>vLibkRF|L#eBsOAKX|-} zZ^7J#M^zrJ2C=yk*6 zr|;wC^G|TSe+J(kv0pC%9C)ap^;YBO?w{eKul))x&pzb5mxfcupiH&i2V>#1EF)Nw z?~z4uTHUarY%JHHHm(o2S9^^(4VP`hSN_W1glLg5JTTU5 zt92%;2#N4;E*%$6iyzf=2cGS1RxJeF^^UslLmg`lL?%79M?6wF4@^ma3__iLZvqRm z64k_f0Kw9nQQd|O4UnQ?-#7H$u$WOz%#ja3Qx#1S6HMAD8QeUSZL3EmB8FiXu{E4srD1PAGw{d%O#-IE%e;&W^PyPXx(+R)v&wq8;{6c^aj5Wd7HJkj) zVXxisvPOTCC4;^D-WOcRWbkp-q-4-E@hJc0p$rfxjRru`fRZ-!YS5-@>BCXTNMduRry*EmJKEU}?P znL4xaW5cQr_AFUO6lIo)-e4jfGrAhIFFypV%d+75_y9@u@@)-kbMfSW*h!e)SnlrxXdTqodZ4 zB#nNhKrL(AUrCO`X3Rh$^XK3pn+w~j0237Zw1Pv^lG zM{j^j`jCySIe5jjM^Gf7pyc5BW>S7Mqh_y_>L;{cAexyBfY5s9c?2}cKADkD!4$sF zYFd|rJlY`I=s;#870SzsecPfTJH@o|c6}JV3rpePNC4hjD7vLr=@f8BGRKLsjp{Fo z0Ml_kt?1;2i&`{#cv#_$0Fu?zln?tm&u&zyBT#K_&{Xu+wwQw@sxZL;d7+VbLct~j zrV@_&vQ;=NY~lI@*N0D|(sr!kV}ea{Ju&QRtpvFo!0<0=4A0948wT1v^^62i80X=< zE_l3LFdCD4f5q-8kjw0e3%QH{*5X#k4#%A;Wr15Jy(*6(dnIAa8WH=Uk(whP8mRwN z+k>S*e^byt@_K{oi;v;;3V21UG5#1p_6p2I&)sn2a9JMYhTD@`&Id>%|z%guvXqiFtN#i_-o)bP9W zAIZEc@$B@MN(2GW2H3E3y5D6v!P|zLrQl!seLsYM@BjH9;s5c!`gibu{d>QF`|9YH z@#4jET(=F4HX!;c^Q-rrJEXof(NuLFQi_dn!-Szf|kWZi6yY!S~RZFKuYDs@(KC$HroBS zp_vK^9uvgt4re?&__&xjz8R1lhmihY>fB*w;9Nr@QqPgJ!E|zZE*S*Q)9}6x zV;cXSfvKFpe!Uz+wZor9GgWIl(z4=o`vjL4pN*tJP6NPDxDi4g2`WYetfWE!TxgoCm4X1g3Wwe#yItaCHdxJI`AwL5=84Y=@mE^);TT2V($J?4Y~j;c|uMF)cZx z*`az!`(6-C1@#iend+or@ecL~92)^R30Tw`0nK%97!#!3@$~kD%YMbBYlaYw#sQd2 zF!Z{^6YmGBxWwK@a^_S&nkr7tgg>ErRP$<|l2k6dSnTRE;5`B0tzq-anAZ?TjETzu1l3*c9+KRdcP!?Ukz!_t zzCKJgg%bWYl=OU1qh~(|a4OLw(mlYNMbtD{2;%T82pkjuI!^30WiOJf6|en!{_219RX#D!Z@sg8lwGp=)ji zw@7`=GY!@Z&R#_r*2p|md>&ss8xCVASMaiHjYd;7*YdP1GiZbH;sWj!y>AFr zvKa{L9gn>tNXv|l^?MPCKnO`V)Ji873(M4mW%X$E(`SyMn0jU*iRy9EevgU*2HMxi zfu1N4lk47L-hl2zSHWh_kfxTbZnh9osH6>e?_spT2FK$$O&L+uDkwP=%My{zOsUlK zX+?L(T4MU_oW{%s!jy*;Xzw!Gn)=GU=YCapR3?wwYwywL)2{cp?mO<@`g|Bf^-Gy% z&BQs3>BiF$ZjEyjwEhtx9|96pJ13IT95I8)9Y6bvzryz|yhqMwp7-bG?H}geF+lE( z0bzsp2!h(670uu?lsatm*O(=I;VVDH3J&ioD=;|R;kW~_PR--F|I+{XFCgcFx?SUZ zFi;D-Hc=>ryIY$QlmJGcDEiH@^ga0dL{Ae@^^`L2ei@WfHIEBbm3r8^tWhoEnF3L2 z?Sqo8Qi9i=Vgn{XZ*r8z$-{6f5g znkf}EsceL{kp;y$pq1Fx0p1%kxr1$70Y;zt0_Q6cmh)N__mkMzGuUxjgUi zh+i_R6Y}@*j-mrai-3HE1K8@0+MOz=;gi)js49p6ZpAj&eGj0-*nkd1vP~azT2u6g zwTiZ1saO_wqh!Sz@!5(U;v7VOorA&!WOWy)QZt<>|&DBclbJav9C8nsUi2i;jb)a~3rQgA-d)in1-De$R!Yj)>oyFx>(C&Yh<@^a@^$>;fAB*@m>Un3anUUhSFcb*TpEFa z4<(M#_{+r<=Ut^k=VigcI7>N04Vp4^GMp)AEG(_61WIh127NYWt2aRbP@rr!)*+sc z7XsuaW74CJGy@$vtx-5co6p8ISGiU}R5;C0%4n5MbTs0V(i8h52L{JLZiN8Aj0l{K za;sp&l0BS4>*`bNxF}+9TafC>~T*;W<2_c}jv*E-!fw<}))` zS+QRq2LhWIxI3r-k_t4dArhQG#g)`bRyodisC|?q%8endH(}4)9%t`B4}&9`O)aDC zmqQ?EV;yC9hE9HuJH2E&M>^`n%FIC3c^4(EI@DmutG_K7mG2n{ZyE^7%^UCG^8BN@ zpO#>z5;=#9v5|VtcQ7m|1$BXECRCqhQavath6lvcEcH(L`PK7Wd8EE}yAgN0*O1s$(h8{GC>p6tDd^p?RU-Vk z02WlST-NX>`(vm!wLOq>IM&k{^?He5Qa}J-axDdw;7?*Qm`bR~t?L=vHD*nzsXs4k zP+9gp?&zStu<KS0NO>po%*Z^0EM~D!CC}>?|{M>CDNngYxZ6rt&e2PERydZ!R(lrNRfM1ik8s;caW7&TN zfCHEUFl3!oz4n8|u1$KegRF_X;QadAxIFvlkZ6UKP^C=%J*R|aU1Po2Lt|`GR?mvq zDT(icgRj{4ZQRGQEJ(nh2&Otl}Ao{Btg+N-IB};7Pu97vL%9*km zck6=7K|M2Q(UJJ{N=an+=nXlxxft)5dI80jQRO^Z8DdpLtJd=`xu9bvwR&H@(`j8% zyNAkJF35Qq;K!+ixPUvWW{`rYO|Y#C#G3mVwv@@eo(t2y7CzsaIXE=u*h|JV*BT6n z?m>f-FisS(j-XRUvye3QwsUTs61_P4YD_OeMp~;-wZZ7z!|Zjy3YId1?=HtRZh4

    =K6_7a~vDcj}*C3n37UP{J1>(VGIj)^|2uaCnYYbn@W4e<0NL!UReQXc2&@hLu=cLg3;f|*mvOf;&v z2Ucn@LzH|OLZF6cXLtZ6!zJl$*!Cl*a!Ni{9h^^MCkjS=Ahh7nTJ8g@*cp?fvHodY4kO zk3r4zK?htgQ1If_E8LEOW4pq-!H)op%tAWaT@t!s=XD2zvg8O*6M)5Bw%jiZVC;#5 z78Q5wPjB)*(deMs9K?JEsv<6-I%uDu+Za8l)TEfI;v4^Mq&=w?AcXpCKqXi`&-5)~ zS|Hca=wlj_dHd+2PdILZjW))?cwS)bg0VeiW^-wcTJvhc?W8izW?1kro+U{uIiB4u z8~gyLRG~9la$J@L`!Nv15v5hnWJp=E2|WlzXrbKMaj;az_Xq@vh)TP_{m5)sfqXQ6 zjokQFYf9Rlt2nW!6`GPAgBWAY%px09Cakjg!1HFLcwaU&n#I7gwxF8h*bf}XhD$AY z+K)*kP}OC4q6gxPDi{FwBel3Q^DHR=8c0hxq{cyKA=|cu?*4W=_3qG_sW-t#4&`wN zn;vIr-{FZe)f!E~&NBoDh80-7L*E}E^^+Omv+_g-}%F6%eNi2$7Gt(+k@DN3ecHA^l+AuGsdCLmNp0IR(z8 z1S+}pK%%3%6pmEXD)D(0tS)V#40T~eCKP@KxX&m4cxemT%sA8}@C(ICkv&MviocWEJC@MsX1hErWyBX^mV z$5RCWkm}P2UU4BM$+UkKi7W$O4e1hyQpvYbfTZNEwdf`2M8|?sAfbg2^r%(R>yvts z)DhZ7-I8jCwFK{4_T0>pemF*t0c_f1S7W5*^Y)YEu$<&bXBLS=yvPsKrt$2HQ z_(`t&-O^(CCzI&3udr!9NOzJSk`LDBKxcup&q_J#Xk^wL2F~n}H7uGOykK+h1X1QFj z-)^j!cM8cE)@8-cu>(7idYGZMm4gV`e`r(TbFCG(<3Mc<$9}^`fVDBmKKD>LV$iGr z8hv-z&1gLs9&DQBau2ft^euXhw_DJUm{_zLeMM!t$HOPzz~#fQ;_Yw#C2a4%KtDG0 zW6S7AeT(jqj0Y`Cl}4MOb**_Gqqmea(Bnv^1)wV2dB0=dB0g0G@f0ha;sYf*#Vn<{ zk7G~JCjnE-8aH5$;?duD{v`17E~oDWTeMxcA; zgqpZV+1fk8#G+sZe^%eDHviH))oE4G_U}@+&Pe zWq6eM6-*k`92x`Ow3w+%;6e20Os2%mm$;bt%)zl^4~B)qw6dW1lzyvWRj||s8=cZ` zI!==(=^n^NM;IRXz;#!k&ERqvt0{v)^5m|H@e&saaB;5V!64e1(0(f_eKE0s$9xJI zbp<1I%2M>;Hezrfjg^AM4Y#o~F`NnqJ1Gz12g5?I|GGbAf+m`q`&8nRfp^z46nNh! ziV91^2Y%<7Qlt+Lpjn8JoJE`$W3bk$Wl|Sn1VX0*C5(yQkqq3e3si0DBhbmNfZdRW zeqpc##02o4{PLffh!~$KB~1B7nmUaUNgfkbI2rDK3S0ZI;Oh#?w*?j>)N)?EROJFp!+ov_ZF9A(%)03P%Q z4BaoocCft7cha8x=YH?+!XN*0zcdl13`8QBi94hT!IF&}F=$4Rw@`(X$$XLQH3 zvqz<9@jSZwYw&U@uEM_GuZ3M7Be2D(|MKnnFo@lWFL3QRm zd%Dd*ld<;$eQXmA*E5o08tqQJ=d1vIwuSoM@DbpkSx8Ess%H+z`x_GivutjTp_@pX z0Wk5P4=G≶Mvo{m5qTd_s3vU7-QPj<8RW)btsXlnJmH*I4u~@ZHCDe)rgmg?oMO zi@mHDynnhTfK+Qe0iDK*#p&Lg$LA>9;yqnk!`6?Wz(Vm9cm@NhcE%FPhVcMwK$E`! z#cGY|<()Gkd>mL>B$P!!r4V)c;t;zLkTP()UGcB|_x?Hnl-uncx9d|jgiG{7iiolU zVuEz~re(RrpQ%d#L%pLFU>iLseSE({&`ABmq5*+Ed5{kms)PYLcz0AMJYEWl57fRz zqr3}r9O>!YgK}SLD0;QaJ(l~IdA=-@3;J2B(Z4mVSo+CEATfGnNONM<{IV=~y2T*d zvpQSnxl)A!rks&LgOG8se-}Yn^_1uh>K*!#&FaTszbs?O#@TtA&vxjpU{+B>;=94& zl*BIV|KR;3fDwMt(R5JGs#8`B#VhLFmoRRh!7$i-8%AXBGwZ9xdD+-6q2QT63)H$N zT2`2ew-N?I+V2OQsg-EvDeH^akmD)P-V(v#(fcWIGi*^M50v3J(l`)+hdZK-Ym>_! z8`9uqMejFC)SVDIUOLF=jtH#dGB|2m=qPwavnh5a3dO?cDiDT;F@TSl_t6g=I~5a@ zU6zsqz|MU*Q3j9c#|x)CyW-b$3V{Tx?0RTW4oacXY9HDDS7H0 zQp0fusRhWu^~{GA3Z*Q?p`aP~_~C-bZO1L9kCDG9DnP}hg+ki>{au!2w|$QiwnxP% z)(iPOLbvw)fE5ncqtP}R_Mu@|d>AD++M-fpUH1J54lt&wiu**@;Yz@q32hiGCH|!} zTt4^|e(dn;Bkv`98HUubJdIp5q!Ha<;4@tOP6c>94J2s93r0>v?>z`ET~#+HopLxv z87SVPG62ixZbaZjSCEq~CD3D#$`FblF`&vVdd8NM=6gDIQ)zwxeFsK^cfieI=(z4j z*ae*tE9l!(^mg=)QWyO6FZ?I)>%aJK1GZ(79|33BoXYQ0RWSLo=Q~Cx(5Q!h4xzQ@ z15U&+?1q35*^daM#jQ60760;|`-=bys}F|lcFi-c#IpEA&GnsO6Fd6x6n}=cF@y^u zxTzNCxY-OY3nGBhtN`a(Q2@91fnKO~g~{zLr4qHA3HP&9!bRP&OgxJH7ODyp@`LmQ z*#M-is%%`RvXC)g?jx&}QCUX@&jDNv?48L2k^f=g^vw&(`U3yNzw;;X$NuWS8OIjX zpJAThitJ>mLDx0$njL7%g6+1YxQUN}r*~hV7Q<4E6^aO$`xvaEh)HmMf6ofDlG&Mz zOlq$LDoCPg!i7$i3F z`ul$o0N6JgHexB5NyquVN!)9>8rf>oS=*2KG$mY%)w6q8)nlt zAXFY%L6EKSG4OP~qS&O8r@v$GR(x(C%VCqE8W2=m-9yWBFBXMEePAQcnjXL8aKeJR zUSd9vBY^2z(2pyw`<6CK6lcq|L@to=y*lTmBpr%}A!iZsoSgdyqG}to|5jsovRi<0 zt*)?AF|cFM&uHDHp2?FOiOPyZx3E(I>UKQcB0gKz3vRa?tN$Ql zZ7OCN?RSTiV`t2;w0fGeLc4j;zZ`c62={oZeR#)bX;0gD0i}v z4km?CQfob(F;zrPs71eh28;um6y8c?`I}F=j$0Ug;+7#1>dp>$P2Y@VP2{^U5IrFt z3Rz?S3)WX(!Pwqodw-40eRLu*5%?X0O>X4fDpQS1ykbGeRdTE370&@gybwO6)0U;d zs9q&%QIt)|)MNlxN;^?|G`|9we1V^@0MsGA?!dMms1d{DO&w7?os7aJGXEWSk;e zlJatPD#V~+X87Q8!L2(?-oRuG4Yhff?x*RuWRU~Lwys*N@2nF z=0`~jSeR%C1#^$8O;>Z|X}Wu8zQ-Oeb-@vkATV|!=b|FToo$E;SglLmXE!l%EvO1s zLcy(XL@bGzTcKkl6^`L@QBcKT%uGrmp%7K;lEH!QTJm3MINhT``O+FLmkX}9E5^RZ z%$5tR0XCNVI?pbx4QmO9jV*uw)3`bG9 z!u)hzPZOFE1XN8mhP41@P*AO79Fd?tPeOSX-RU$+t|u6X``EKWRZGSFWyRyRlgB)f zLD{n59*(KdhEb5D6w4%TaL$0}Wa9P~Q^;q~&{YRc(!-v86$Q`jPp%~tcvAw`5e{uc zB4K>z!F6^8%a|0xr5ZlGzu+hD9(f1j%pZyT2Zb~^>K(Yf!yo&-e=Ek=Gbl(9#0u)t zn20qvJ=ZZpIq%pR1fMI79$?qDp9h#mg_%;Roalamxh18m% zxw&YyWq<{O3xns-c?WfKj)6q4?-vfh1QP)EblqJpF~oEPS}|!ZjBRMkrbI6j{xR(W zo(0PRRwWQ{ip^AUQ+%SkDu703VR$qZSK1~vIY#B&XEUPa0}QW1Ywvq%Ea$|)%f{QEdDOfVBU&>x6yf3Bv$f}K(eW#=h9ThVd! zpw1dR)$4$WDM!+%X@hu9YM~8RGL7ivEH%=NV$na#hn zzbt5@<96%`=76G05r-BOQi_#?7e2?|_VthP|NOyg90c^Tft+Vz-?j;^xWJrQMO#Y9 zfY07b$-QeJD&r4;luFOUSKvWN`#y-Dpna}gmhtJ z^L0$l4GNfxO3;C)&wnE_t*%)jaXbe)#!w^7tiA}vkBuDIz>+D4wKN!Bt_|<@j>D%m zaa48+_PysO?8kH#MhB!zZRu13-3_VS4)2yok}j>nm`EHXk7n5RE7aU&0HSj;c#f^D zfn1M4Bs2KU>KPbG42!^MFnU{S#jVStMgmaNd60@UoIwhRbn1N~xI;G76P1DJP6OE9 z1HBA+-P}-3=a2baJL!=0acVt=1zeylh;yLm>;vsWR}&nEK8Cqr%F=(@&Q`mEUVc=w`*Tu6Ve=!^;;hQQ95M47b}YXi>#* zclQdfU%zHrgADjI*s>$gr&1gC0Tit8}kX?6wDN$0L2Ys94K|6 z@)7WR|Ipu!Kl88r3pn(21*2SGWyR=Mzy|J`;b}WhytND-+|*nh zzi+if!e(*SqN$OT0Z~}>zAm~HGgSroq-k^_tOA^3`uXu}|B{01nD^g8(Pk-OWrvbC zI!4J`qm)V0qWu{>1FfnW#Afn(5BCUG2OJf_MUUVn{{AsKe!Sn(Y(#*`lW z6m2lBEt1ML>|GP(XJVZ6nYIGQMizvj`xL5BPh?1ltNGTbkR%pG(Bbg~ge1(^Vb z-W~ticYeewf}yMr82ug8{}JbFvUOUm37nHB&4&9pd#yyjnP#$8j$v?%8E@Tt^y(Vd zS+var34k|`S5`veoJuwtR?uuF6%Q{z!28#q!{Joy@{

    q=m`8H!C#;`S_e@*%_RB z7Z2LekJ!L|%J^geDoe_u`}F;qGI31dp&E%35!m}QAPz%q_Y{T*H~v(z4v9b)-q4X$|wK0YG8gL>pjv zt|Oa}tHtzMX@nb_xYV!?>RcC_Q#57hiy7{git85ZcfDP+qOq*^=so6vly(O{ZrP|> z7^nyRtFReJd~?IHT*5H3lG6J?M<`Qa4CZQd6bdrPA)S;724ILL?2w7o^9w%#DwquR z5s6P-(C%MFVs>oVccI~ct^{Os48%J%MjInEQl|4eIlBm$nIu?DJhntp0=2N>&n|kL zn@}yQG(66(hE^MDUD6;lHlFbtW6mbENd=hfBrlBPuOwWRzb{2D2TF0EEU>boJ^VE8KlvMQd-ERw`+oql8$i;6no0*Ea0i1*De}nJhg58Q zS0y}FY$n9vxFcv>sH~;w=Awrh@9EMiDRHxL5)8hj9Oq!Avv>4TaC>^fpZu5p`Pk7B zNZB~{M32WoXFwis#F#R|qINlc{%c>tfB3y0vhQY`sEeW?@jg(l*-3Fce4>j0X;0lb z^%|6Ojl{#)Q0r81stkOK89upxz~@J$^W5>cUBf|{X8>5)tSySk&Y+|_CnM7wE3r8k z;3<%PerK2*)<&v{Xm?24w!Zur*S9}GG0I^Qt*dEIjeSx~hCFB?a0WI!{G#ef=;GX$ zAR{z*=0XvN=WE>PRd}uy)L{mt5pWoT9OKzyJ#-A1hG-4aUijzz6tqYHBvrEzNV}xV z&zMLi`iQgcoyj%|#@I-^)6mG^`FEc-wYHYNA=H+Zc*k0!PpFivOu$1Npc?S__#Qy) zv!9Sp1Jc$g0fhzaqW6Q6s+H^nRq;j+?^C*=q{Cem3p>Ab+$*1mz0&t;H3Az-@>b5q zk-V*0*tE)E2Btm+o;Nb@hC77G$6G@46fAu;P$%pD2GpBMe~Gm`ntXRzql`!TTV5c&Ptz>lH*|HKO7F;Z!=OBS=FQv!6urv03HK=?x((jU;h4&Pj)x#>3#$V9lo}~ z>H?GX{o;=EtOr;?{ZNq4;RK90B(+hZgeh2jvk;5-m`<7^<$wTka7 zryd&hT?`99@k}tN#`}BTaiF#sstSQw04~d#4fCOb$&@Iol4a@6QvoOb7(N7FAo6w_7sXD z2xl8}EI9m*!HI$m4Z2cPyl*#_9OFBV;iTkffEqFxoU|e2f737H8ijo7L{}XO+8f`? zm=MQazWmB>!*Y3G^3#bPM72mK3s8bI@KYbYzz3HF#YP%yHI`ZlWf>t2g;Sq_pj31! z7ag3jP?rnZazS10P}+*xE@6QAt-X&_?9uz9Hmo0fJ%ZQK@$~o}Z{EDck3at$pMCxX zzWn7+aU4BSx=u6%c@K=>EPqBCP8g$scTbN1gHj0{7=yJ`zfl0?;z2vU|Jmy-(Q5j< z@qc}A5E+B!*V_@nA1hsMR61jVE<^(QE~p?oDKnCohE=q?pTal(z(0j`WUCK0e{=Ym z1`a}NMOy``iA6-UFZbCqyqThXZ{=U zJhfpk$?8l>DrHtf6vA>{Qev9+QFiLgo0kfi?vc_#B~=Pg@0ON{hx#bC{g{D)JBOSd zi56<|IS>@0KEf}F?%<~b>#kWm$ZEnEM^F!s*tfHS9n8>{I~1<1>0snugzsprp&v(p zA$+$B2l6c^n=dL{!O<;$ai_2t^~&J+eT8XVX2{&79=e9f{?h37Fqys#C~Unlum z-H92?1V9+)=5Af0GN$T8R{bZq;)4v0(KDg?nU7xb+%s^Noq(e*>+EMb{hIv}aR-Wp zTx(3+u6!?0(J_*gNR|wjb-}tWK^d|DTGTt|rx+}ouXB&Xx5qHgxPO1)yFbAFx**lq z#vC9TASb1Wrk{&ly~yS{1;N z<5Y!n=ir2N(zHijvA1_kF%Po;i2xp)rK~PT~A7eT*o9YaP#enP4vtOiHoQto0 z^Jnn=?|d7Q$rTO3te}-hP%f>7hBKTFsF=h1SHERm8~wLm66v2Nq+R`QkvRfF7PD{x!Tk7#;T zU=@2gFKA=p=wyEI8bKORttH`o$`!wt}#8dae{6Z z`=jTgttjn+x?F(LfMdhHJ;M6Nfj@EC@2g>VkKsE+0Z>*f*t18W)rPiSBFQ`sJY8?0 zIy0VE?<6xDmAcu$R*MuVWPlsH3u;+m#jq|F`-sZy zpyFHxkbH-uN8+`%n!&)qdwT|E1_d~>`f;LH1m$xEM8?F*5I~EMBb2`~kvpO10yzXM zl!|2yoq8k6B;(6EKuCwTKsK!yF6#wlSpkng>!2iOEcKYdsm0{$2o&O+effhI_`&OU z8LY=~E13XBg)WAniJB6@%%sxjUI?htKgY>D7ArB+rnJ=4kIg(0aQ%2G~Gd|5~=!K zPCh~XXVqA-BE2*U+%sWF+eC0~#sQH;X2ivWs1}R}}Mu?oL4D|~Ydz?q_wm2}J?S7`yD9*Y@-Mj@m`ADknH-oitTEP6PNS8~$LYM~02 zf>in|(YobpGp^A&<2yG}+O+mb3&vFT%>SntrBIZCYnFD_3?|@}>%G((6&R1C+FtfDRVu;n9s?f3k%_{abFAHYBJSO4en{>@*4_bZ(8+s<=Sn0N??3p)|j zI^;RG^zk}3(PATDmsEF;^!Sj^Qyfth`yg$FL^Wh%BVHMqh^tZCEGW{k7o?%dckp}{ z3vjS<5ahZv08tG(hU5OSrX23kqbd*;l(5&wew#_vIRRu+su-b|VPoHu=Ai|v!XPEP zhyyF`?(cEAd%*j*Uj$9rMQ0tE3wa?h(}LSp$#9SV?XvtKnf3zgb!wbCwOFW(}7QzlG!Vucv`(tzsc6xl#A|Kd$UCIFLxiUSb|&X3#nH z+@JIP&&C@%Ampm*IkmycVwYBf4g^)6IHj5V^lU`+c>(1#a^iTsL*JjmFr4;9rbmMg z(_qs%6g%~ZSR^?;&w(mQ3JwdJO(es8Uh?3{>aUed9{Lge3$?8gT#W;V_?3)qT3rH- z1E=BliBFJPBfxclI%uy(=r?lWxfegtgqF^(HPXa!$fuUJ3)297u1ff=x@muy&z zpsqz@b%{VN26T25?|8S(#=4nt?_M1&X~|YmLO*Vur##DoT41DgM}DzM=Om z9gRM#@a}_wNvYB77lEWDP@o$zIQdqzCB6F$+L5+L0ZOZl$oLE}qPj#HV>vle1O=`U zVF^);fo8z!AdATC=8DKsUZ`2MkUp)1bHIq>fcJFuFOhzQiLcN{N71K_aVYos=Bo9u@{JW}!w@Y${MGpzf;7LBY6XU@~bpajzFP+#j+xuBvp) zy_M~TH3VUX%3-{#M!Vxe(skEu4mhj%Tohg*C^7mGwqA5Ci2pv)hKhC-3Yv2+58;|X z*ttG1QOtBN&wiKHbq#|gzz|7hW2N*Y*9~ECQYZAZvAy@$1I8TBL^9y1{8kx2>sb_- znyLE4XQkp>vFP={`VBfxgc?^3*uFISwcDdY9%KC_L3iPq+KNK}eJ*3qv zXD10Uufcc4!G>l@;hRnRJrFU~rN-Qs#-@BF?Lk3eOh%vDN&)=b3rU;OVg^|Nma}r8 zq{k4{xQOpaxF(oXYB?oY12a6XDUK5rGxlSc!F6UZ`$7jn1$zHD%Zx!|kDTH!Wb2&f zZ2-^31VqbMAKTi_0~+J+UDgHHZD#@k_CghyHPp7EwH5pJm=xoss=veX&l?ktrz5IU ziutOkr>$lTV;yMzgXo=_W#t49TqeE107UhlZxqT%gQCt4;;* zz73{g5P~#DG&xG)$n;Ezg9b!T$lcuqYm3jlD+Mo>IuF%|&kE#20~$+GOJqESoX+8J zHItXY1ZwZ63y;WvD=$Y3O^k|yP)=QhxJV3Oj;BN$`Y8y}aGNMM&c>wCV*uPiLtHPf zAY?W#ut#7Ltqf{ZBRat6?Y+Y@h;vzTuIpeg(g?~yy+oyl7gBvU+p*`_av*}lw!qsR z?mzw}zWjUsF8ucY^#1@~{`o(I%d4-z>jlFazSyE|?CFc|WQlO!ws0;Z-tV^U7(Fh~ z<1GdmZO4xHOg^IOVSs20JRccMb4{aBdSsyBh_yUOB~T;35`Bf>R%YTC#?A8%Cdy&6 zM=8tJR)TKU7Wjx{K{wgBzV4zx(rFv;VyRTCAw{Ym@Ufw9uhF;9fa5KE+yHd6rJ=S^ z0IDUDUrE$EK? z-XFhvNgMmmzTjJ~#2FJOgV-91K#Yq~f2dcVM2`b=(_VI6c z@RXJKSzYmW{;_`rKm9lU{UNF0s!EJ!^eD$P5Aq?V&*0N3LGJ{G`iS5QC8`Tnml#sV zLAdASJN&}D8}JDj08t?gd#JuJHaH{s0=}nnULl9{E7A4#U9&Y@|=r_YIv}rIGHWv5r)H)L9u&fGMRTCi`%_{NSTV ziu!<**A%#e$!{Us0Zd9m0<}Q-r!xUqNy&o}wD^yBM&TO~^Js*CRwXH>0ny;DGHvpa zdy!Ndu95D=v%$VROk#O}3FU>A*rnE_TXl+Mv`dJhh%OVc&A{W^H}Rbe&RYV);b*GW zg>TsTJh$G%OE3wD!MX8I#9)ep4vgaw+xuU~;}`z{#_=Ya!8Lr*Vq$s(yb%lDu8)y? zFqV_7uUWxTR{|cQo1OudiqclJ1ICku#U#}Y=xB;O_(&O5$uda2!_C}l}h zOMW9NFCA(|ip#+lAC8Cn`!vLFZ(ACH&@wJrinn4j*eOJ;J%&~V)nG)Ua*OPBQnw-# zzO;st0eBpeZQC&s4K-cHA@#uVBcRKgMo)J$(p_U4_3X~1K=!l?-D5@oH_g$bqMJZGto0Y7f|SO1Z}9pC;p|4%WEzEf6s zO!`(S1s}e6NS~z3qJ3#Klw*_}6b3y~odM~PLB(7`Y=YG$`JB$=O`sHgbxv!3SeD>d z%TbqkCPh;bz@R>qYNy^Ilg+QsK!Ql_X0Q=W&}_P;4Gt`aXO)5}qFk8)f9(JDX9>K7 z{|(wssEop`@VNb);^^%eI@;DD@le-;1&Kl&5@hk1?(Dk)Ja0Djxg{X#q& zZMuf-#pm=)>Utk|dVGY{M&B`2y0(48{fVv}1NC-5c9!S$e6J02eQR6u=X)Rd=LIZ} zaiAX?yk9YnC-~SAJ)4u}Ep;_r*r<9c&T^8|btBN1Ryk_w-V2d)T>{D>S9388|SZ zCg4fibkOe@nMx6lG8yBH55=dKOZ3dPw!qpN>(e*(Qbn$C&=#o&MZkH||NMDBT+BS8 zP2;*a3`IOJ1<4AOVU5Ng`QH>6MBp|~8#9L{g+HuBK!l6~W^sZ05kZQm6o#q7pv{}d z7eFbI$+2I7!2Oqg8j{~D5TB#_~Z@dHA0bp;0heP31e#>GC}+koa)?L18kB zgieXhEH@7#ErS)d*}S+~k%|v^A2fiX^&Hc;desqlE zMntONdw=dvU{{m01t@X1#EgMUYf#VItZ>eK@cj8iqI4z|%!-R|K&l2s^R0$dlFPiV zP|tzRYN$RHthHcWTG}k==F&zCM>yrWptL)nUQm`hl)9$96+I4yS+%1*XrhaO^Z^DG zw%$*rw=t;f!*?79_BX$Y>zf~;9~%JfF@DBd*Ack1&A_*X7oL0TYZrBB1V8S zF`s?3vl8;fg5d$l!vbKnSCY>PIHgRgp0A*46F}yA9_(dp%aSp~7;zSqaNf2z zSlBm#5!FP|+>TDQ!Xu`Im#Tz0Dv#Uk2{>**-(e>qfUMS9D5$OMfe>BW>dL;Wc&4Q- zdCtn+i`*aiMETE!pr4wxukXIVetpFD`Uk*Z1%v)v6@4Wk<^$jP|NcMlgTMIaf|e

    lsjd*rZDF-AmtH!Jw&U-7dOwC1y_Z%U#as*0N#%Ccg)dx2pgwiI_J z-c*!kxMNvX#E646*hQlQ2x_4#J_B$Z9oxR)=pEa!B`B4wAo|DS*g5wpY;Gw9%Ti$< ze)O@sPwtuM9){5N596a|sW`MRmV)g#YQLj_Hn`dA?2&R_jFAiBfa797Jl)%O4(=j_?AuIDm~Ns>#eXfHm& z)7zhf;Us-a#z!2zby=fackHa9AP;407Zl%^1US4zSxu$mIN+YtwxNS+DyuhGSpaJ& zh-P3VE}?a;(wosp&cHH|2yT%s3e8q2vzaT-I-7to|7@T;(ngO&uVn?P0Ht6Yfp%zq z1+*&`&0gjXNwK9gC(+EdtS|#J^=YJL6v%*s!K3IuOIui~UE$WS+`q!ZmwpBx{}q1# z?c?8u*T4Q}@$P&70dB9qkNxQ##&Lr|+}sGBTZ!jvE#|R=YR5TvXOLE+2@Pezer%BL z_qpuZDY#!mCdUl$mDt@c4NpgBxjO=o3eQHxY>NIZcSUlK3RIQwipFOgn3sr%s9*>r zcxmiP<)ifwW<1<3~8l z(D=_|k$2Y3@apb@FNjo5v}~fkS>f`jo@Q2XSym$Z)8;F!5N+s8WsB=rGLk54gbigF zPakqwmN@?|nk?11N?~Fg`u?vz`4mrYUt>E_uDI=qIw}D)BCAn#Z#V`guyeYxx$_yP zDpNh7k@{u%|7Jgd0^@8RMZ9Mu0h9I#_fUJ}--!TYu);gfuc{NX8cPsB6fOFu6mJly z!$eAw%1eu;?dV78X=gJLEwkBPd#j3%5a#F$2&siyD=JtGJO#Za_jJY)0RlvoHP$BV zg8bN=s}Xjj`&1mtSmYF62(4Azpn;_7qi}*rvauP{7--|>bLsQ+t^$?RJEEj`q2d)z z&(vCrN$(0GdCv9OnY{A;#qe9w#xtq+>}?D-!4w`4dt)YA^cf`rZUIcjlUDfo#X5L-pq?P)0PKl~W>^n}~{H>^B{qY0`*=fJ!`@v`xrDgqI>0#+6tZqmT? z!T{U@*>gUhyBg^NIlpd`&IU&wWWX@}@Z^93fQCcKs1FYZDN|%}3cQqR=N+K1Q4&IMt4*5f0J3Vt$1t89VQz22{lvHag>; z0a}~@a$aSf5ptPwLP9E(SDZQFnf|9y;m{)Pu`yv0&oq!z!!bkw6rvM*+g)ci(3n)+ z0245t6S&88XHrR`4Pv5=Ruu3BD)-|44(}cx>5KrUYFcY8x%PvDQ8oB;j2L(~#=u%z z%)KFke*m#YW>~6zKav~ID3oACh+@QmO^ZrTIV0T>m`1-s{N(6#P@_kX?K7}G<#!^-rVU9)Z4cuaC6=0RR9=L_t&q2fFvPxz#2hrN;1y6GZcLH;R*tjt*mzMV>Uz z+UxgE>Ba|;L5*yZro62K1QJ9WlRDjy!KR&;FPl`$povIq z!DD_1khGzoc2vY5#DXcoJ1YlDnA4h? z=FdTZvufl^$8ao_d+MGQu`$AqZAZ^@ z8JnaF8UW_I&wq%=*FQl&_Mi~DdN_UloqojgSPTz_IsVL#zDVz%lz$6GjPrc#+ckls zu=fPxV5wZE3ZLZ9OD$E=MjReKER4h6ghbQSNhwH%1!gfh*rvRy(YT%mkEraMoY+zl zg*2NNznI}TdTyl3r;b^|SLQB+6t2>h(XbR0t8i2fTr0S|{3xb)E_W#H0)P{TsdnTD zhoN}vyeUyG?ofkdXEV?&Cn62R)4Mm=uI~d~AEbng5fgxUaoj38&eep8kN_5!-GZk( zTzt8p8;pKL6r_0ry~o7!G4RBsZBScU7egEWtg(>ms((~M1>`unCuhUMcb2H3zJJ?y zq~7ysAEybNf(6^bS+3$B>v9ljIip!>W)m+uodc6)oz>{oI0C&mZrI*`j>pfxi|vc= z;&}gK?ANzAwku-VJOjYq@w9jDQ|$FS_V{Q|L@NR>(#y+2N#>YzeR=sYF0a0Xx?EtT zo@iZ_^GC!x*IFn@k&I+;hmXODzw#Ppuo}t};%gp#z|@yTL@udM3be*a3BxC)Y-y3y z$8v{bT#8Y-YpSUv5^_#pMO(mZh>L!u;2=Al_8@w7u(#pNxmFXO%!p9&b05lYrTb}K zf@Ytb(K9eu8|p>if7`Nh7H3Hm9=46gu`|ROHqr7T0e7e&Ic&mnL`svIXc&%G#Zip~b_#@<6#SI8XFF7n8D^qxd=Kyt_Mq|{M-H~Db-}XS zqqI9%X`u=S1F=Jh0vGSio=u`y7^K^FWNlaB3LZG?+LU27_6wajO-Vy9ZpaPuTaJ2}0#J81BFH zH{kY@Kb@Iz>Vuo%t`dS@3%>caui#gI<2%o)e#+cKl6673dxhWkU;1C*_U<$M;=laQ z;P(CtiY~QU|GI+N(r*rI6%c6)4uIkC{x1ngLZ*No$DzIwrznGuhSq z&ORK6<|h;;OWKP{C6z&7K+Zdp?Gw#URTUw>QK>mOXl6xG=~9I#8tY!%Kj7`-)4V?p zNWrMp$}<#B>_)p+IW0IodrD7>S`xGfRceG5O)2HcGnReD%+&m`{Y|#FE4TZ;@f!q{JS`=Z{f#=!k&so%4wVjB_Hv5 z?!Xv(?C)^2R4q96C{RJaLRs!KloOceTW1fN#N2XjgeVz>z*yKcCe7@N zGE94W96PJ+q2|`dV3j(~{@wk3^v6*U_UK1Y^q_!x+E__xS5rb4&%L&aZ9gJ-NUCMY z1S?SPipv@^pw9NV)@V9a1>?RJygNv(F^~@D$+w=##rzEC{TZjGU;&D?M1m}uMg#>F z3sumL-qE)!M5{A|LY?z09vp0@eGk(e;7DPR0@}0aYal(90jN0do_8Sb6-ve38_9TqdMKH$|>G{K@}I}PZ!xSj?QKb$#;}7kQ@CPb+%c)PqU%gs zn#nnROW`ajSVLJJ(3Tg$sPGNdH;lgF=zD@v`mFs3X>pxt-P2kd(2tW6r~oX+gisYh zEdrC0=|06BhKEe;(``%JU8zncbwk~wyJZPbv3H1CucSyq4*<}NRct2}kX2TALd3CO zbnA=cc6GM^-Y>(9y9zGr%AwD3@3x~OD#l@d1(ZfbCcdkWpn|$pN>+C)HAYUfWd%6k z5eQb%Y_~uTLEJmpG2Qb$R0Wg+a>h_JQZNAR%w;?q1ps+4rLHs-t#OGok3-zx=`?kb z7?_rrCYs#NKFiMsMyRh8!``D(uqhxFb*G3@68u>hX)vZzrR9e4RmeLO+C_tB47@2N ze0FlMuw4PD2$hL|35bDE!*|k0w5D-*6-cXrXf{Gw8e0IUt;HZF%#yRi@bvipWQ+}= z7Pi9s4cDLiTNpmDTEX2?@qRmKaHzMO=mojhX>4agVKT~0#G)xw`)Qe|#ANCzp_sRJejqKin~x3=gMN?8!(gdF;dS?eG5(C>2EsANMoXsi5P4 z9~*xC-})EH=bXJZ@)~|}1iGxX*t5p+l1Lw*L7C1VdJrX5uru)7I1VK+I*BeNjo?}9 zs*cgIw1y7=l0a?0)`p+Fe`HV*&S$9=8}C9Sw;FM0Nt=v*l@sl-#IVPHYz)$bGRL2f z5_TXjt%S15zFlGNxGa@C_=2aKF42^PgK=KKF&t7&YQ=Et zJGQ4cD4+cr%Cewe-(&38m=w=`f(Upg>RABn&V&*|+9yhTR>Mgd_YtTPA3n_m=lqf6saV1*O5MtT=pN+mCFZK9ZVi=`5EPiA^mP z)naI^`tnq%N_*s@sZ0QfrZ!c3ih@1brA!`~D7#ev`!m$iVX)e8Y*$n>yx9)gr~x0y zU@06ueT#)HHVff_8$}t^>&NlOPe4g-$USvTaCnN+yGaf(ZIHm@(RT!QMKz-_EF*~`efX-ib4Bn zF!otBR-v3ROofkvKDMyioPJ+`HJc?Hgq?QWgF5rF zRlHTz7F@?#fYM+!8vd;xQT>LI@&Z(!GtKn8qVcKHXo}MqY^!4CroEa&>0EJut8yK^ zX5atrZ-gpdX{e>6+dNN`B{4X&rbWNQzUP`82{MI6Z?)p+8m!8_R|0AQ#>KnequqZ6 z_aFXt9QzZF_rHO$eHN5&D?ukF-g!{T4+IaucWR}u@0ZnP1Lbw+9w_cpLNw=a#lJ6& zqAoq`#^3b!{KNQDfBb)&Nu9tX@0~!=z-`|`I=L1cJ?MQwiJYwhKv1iy)lBNcmKQkG z2jjdi6R6^Ki#Wz-Z=$?sE1bJ!(Qm1}51PKAl?C@7{|x%mkKxA?n(@tLF^eWZeh455sk8PEzFy&d==XA(#uBYJzJGBuMYGcaff;~R=J z<+OHcNSzdmgT^yC$3v(nr33qsv>bO2Y7^yNlZ{NHBju*2+l?q`$G}v0)7crLqi;|6 z!GHAs#*csLPhr2khlc&R4@^1RSf9E)@bekGX=oo&QJv)|`%dXst@ZrHVBbXr8CW9o zPX#ceW2mK|w0pF=pwx=j*IQCK2GMUXEhY%_v!_gFqau*u*42a_LIJhLRM?`^;BhX< zI?04WRgRBdJz%2O1=V8OZPlPamT!|HW^t>PXvF^Lu960(+gMattyw`k&liD(a?!FZ zxU@jOm&*fU=;cC6n}YIzqaQiBzOMIhS4lkX-0+Hl3#`6^m6vc^VYcAS69+iyFoSbX zY)&&)7$ozf85*-HIPFR+6+iQ>Z^nddUgSJ~X$RvR9><=jt&)Y2cR^I2k#~2vXC*=O zN$*EAoIkzB)4R{G-yR9ljFit7)j;39kIBbA(^lkk1_SS?erpzzDpWNQ^SBQajWlRh z5vdflQ0kafHgh!D>@tN8WFuk;Mb$NUSuhZlAq~i!9C-#VwEvC(!8E|^+!$I)A=)Pi zPnulJ|Bb3)Y?4s?cT(`;XDJ#JJB45|#2I#+=`d&e&MIlG zST1*vM6tN_?gO?yptS|Vsh*SScuYohSB5HV-1kz`C-u;=#fJ~~u`Z>;>OxfcJUkNt z_Ax;Y`W$HUqJkU>Vudss$HP)H9x@>BmtiW?B@dowTE73h{%`gv8LR-@c^{U$2cWKz zkXfwPhhP64SS}B!JcmmyoY_!v8z$&Tz?WZ^L9U$=MZROK`}yA;-aD?3@A3Ba=NKb= zp}|k*IsU%pGD#L$6(A)caA*8BZ69dgU*4@SSo-6LuPW;D5)WVdoA7u1i~k&c{=fLo z5dNeQ2vXmZsN#+gCdmNb2mbW` z^?%2+ExKt~)pel^E$47(plvO)VPCUnnDbz*4PX1x#~Jg1)t}Z@l-gqa1O1qo8j;Ax z8mSwJRc!HFZ-uPfaveE0*w8+PA?BpH0)Y7L|I*uXReFV+LAi&^AdR? zIm3a!@Av;)oVn4l9}(moEF-JJva_tKghwWM_~4YzFwA2d(X?7-N&grI)H{H40`AD) zb`QjPXBO*34y;5e8_I(D>*d{=h7b%O(=GM6;YBw%86V8pZ~1Dln5Ro0Q~ZONY~2Y zt2xY0pDBl_{v8>WXFS|7(0ibu-}-}pPpk{&Kw}S)7f<~3!K76KdmL=k-)}f5>vr@b zf`D|`C#;=G6wET1H&_fxJdW8IE*cm-27~QS*f@Z$)|SnV`n%HF4+>q!adaHqTSxRx zI08*MuJ3T%9x=v2sZdIZ!cV1nhm9=}!kB>(!MIyNX?Lh~B?nZTXa)92bSPv4Ctau(@1tO9l9zg)3MG0l_=~5$=;_^9|eAzU7QH()O?TtJLt zCh}6?F|4g7C2S@cO6ldqXYyi*=lqTDelO``8K9@@4D{DP`Nc7EhE2*{r=dp%AhJtR z_)-s%s=~*1#qAmc8_&{>;Y9j$f(**_`-2K%@&QVQq1ABp=+VxuO9q{@Su~RNV!-eI z9Y32ve`QdqD&>4HPkLgi{Shn>kfb10L1H>lQW%${(ZyNj*5n$FlbwedHcIan6X+Ed zNde5WN$a%PU3ZB0GvsxdI03j@7c@$Ig2AfRDXODk`HmA{10V%pP922l@cS6p_Xrx@ zqyLHyD6^dCFNx$s?~wyN$E;pGqp32O61@ZpbZbLdzl^$mjGQmx5p;{bqQQt`cs$#& zW8b$_35&ZOGp@XEVQbmAs-m&xX6U}jAIM%R@mwz}fZ)o1@%|}&bkg-A7{T3AVHU$H zP4|z0gyu^%&XF0~gYAkh-anp{8V8fCdG|p->Zym(!e1KHHz>0Rn zJD_Fa{k0gd>55CuU>|?^r`)^BuF-j4i!Qy9jzofkNySPTX7D1%=vZq>X+xprRvN4> zakyK*_5z5gbLVH zAc2P_dkm9XCKAwD1wo!gBySXwSf;e-DLC^HPNF-qilQK(%D4kGRt$HskPZ&XSd2VWcPPXYk9f`9a%{=eaW^FRO3qW7u2Ey|Wq5m-!v z9#rj8Pg2}XB9GpwO@eHsRknY{)F`%!8s_~3HE;}LW&gJl&!0igKl zOEnyP@5bJroha++t9ai$gFW5l=yAEjx~@33YuG9nZr3ZK*F?>P@%guX&ni@_1rL`M zFD@%Sd-Im_0F-#m5_qdE=!&)i^&@=rwf`D^@!$Th;y?NK0>Aq2{1NQ8Z^Qfs$3V$) zxvEf1km2NCi|&BDXU^b6{VRn@pmUvcXEMAo!@6FuJba9&H$R4Z1eZ~&pR_FA`q*oz z@s(VACR)+-TWE79{mjg8x!hs9zQ?}bB3Yw7nUxsQ5cu2%&uYbf9HjpUf#mE3I4;W) zvr|foC-WHSHC(6|-&vba`;`sU#jq|5uG^ldaxKw7tqM&B!4^q=_#4kpt)!x5WnO%I zo2>opv*NIc!RJtdkoYGnOhw-Z^|lajvCztw{VJ7J0tH=li5|pb$I*jM61@lU>r8u7 zz*8`3O?rm;Y>s?LOd*(@f02Do^Br^?-I0Km{+#@8++%_q0tB_UC|K{mhFXDRdmTR5 zr+ozG8b524W&oRjK|S?nurk&A0nJ+yTOk^ZE4)tvso~%2`z?Vr zt+75^>(@w?tZfOKs7m@6`0Xz)_)Bl!=U$xeMZTBTpZ@C8`wM<@yw^FQ@<{N*3N#V8M0KKz~d@VEbcxV`?% zc=w%u9mo6cVH}U}aiE%55UTig6jD1EHe@RX5OczSOjPdf?f?MS+l@ZGg+Ev-d^ol~ z&=C{7hbvi>!sYBmRxcEM0=tpOgXM&}f z6R|{8Y4Jg6*_pKTexQ~bSv{RQ%(6`EjyVkOM745ve6%hMlq`exk6%9E%MX{lVuSnX zW6a729Y2(0oYvAE88}vu0%lWvB>BbM09CG}eiTies-&IOsyq!W5S5YMr)tf}*%Co# zThu?mGiK~{ijPtih?RxDqwhEDThP*e;rIQuxWB)TcjV{6rP^bV;!akeEE*C2*Z=I_ z8M@?%hB!4P%E@G{$KU!V&F903JR9McVh{S&+FbYdZGe)rKsilA+;%d5;+ZX86N@_z&Zw4_-m# z=wrk`y^l~cajCV$8T{b>4!`SLU*|T>=bO9f*pD6C7%}*4>=@g3@!$Tx0FR&jyHLF` zWHBx6Boaj}$+uqWaUT%KKs?4j02XE^KT26HqvOiqn z1R+>*99vA7&f&ACnO;0Qdd5&O677$7_GH_XUJXRTH2Q)4*m1i)p_Wkp`S{@ums$uM zD0$F544ZZA8~U*)fCV`I;@kHODpi$ z+)6?55&!Vn!SmY+VD3~l0)T?%aC1 zKDmonMg4rDn5oK8Yvr)pc@e`FO>37)HeZ5Rk^S z4usVe?eYP}{s@c>V^CeI$8f8I6Nr(EX=!kBs2GqFo!LB{T7EEze=q@Y0$u=gX#rRKmiJ@tvQiC(YffYwc>3ee-cjMl>?w*O%C-&5&P!->sb!XdqqS+ zhNE5XH~}_{>QWGLj-*R;mtx3SRmIYv6q}zcDpt~$s0b#>Xaxn4zZI0SV)Pp%Ah6I# z&$7Bf&FyDJFkr6nOzr_mR2E7Hi}9hgIdj2vHkJYmHp~gt71|6$d3aqH98w#JBg)E{ zr4%Ac!I!@D3EsbZi}#ODq>0fnfa}NPccb%J*A*bTy7!6vg0@e=awPu?Q0}lSUxu}> zV)Qrg<3~7-w}2l(=_PZ(um0*Fr%uW>s*hC1xY=e(G-PMYHmw^K6Eq-aJlbKl z^j)Y@QnD#KW_HB>wpwv_x!@Sio`*?UYc=H2<%{$VKneO<5gN{f-1`I?5dJ{>t2-}* zJSloM>4S#{Qr&9Hy9-2ceQ7bTD$dgx#9~szB;(v{ZjF`ZGm_yXaYDy z((`9()UU-RMVcrn6fh0PvNUY_&bVYs^v`4y837vk@n*B?GAl7*xAkUst>(R-zSxo; zqCIS?C}%U&RD?&H|%t_6+1+z z6tQG$E!zN=TJh?#;^Vd9Td(f$D_^|FkDspChNqB#Ds`>30A2?ZAo?r;#!}Q$v$=qV z03s?{ThNwJs#*xB?NntQAUHol_I5uGQsx|cpNf}r7%+gkoCfknU#==FfarG(cXuxW zyciyKVeFCYGu~O(6-#Tl-EPQM2-@z0af3u*Drea#aXnY3@75JhTg052>(~2~4Q_2g zO4OGcGQ2rx>V(+DB~dVe%rs{JMubF_4VUiyqyXJrE_m9gIOR^+!wT~)kjqn0aSkrU z!2}l|6A%NHSv(DDO7fN9%Z?Em$43`(3|#K+quEsN?9L!o1S>RdOf_`A52UY2G zK^&}{_@wb96`(ds+74dO8H97?wM!6HVCZZbQtc!X(7-I#76H^YfKOjM;D_(tAr1Gb za*@H4>GNzNn-Jd|xfk0(UZd4~m*A?>S@H}kaTXoOY@S1*e$gl!P1FuGpL!aeNxRc( z#5vmcEzNtQq~dJcCw~?+bf?5v4!xSO(UViFg)q5PhbARZ-<(4|OGR7m(MmYjk6VFvbRG=8m2Ag5R^QYb5M_Bt2DEY_r*axK!)NUWkKEE4_CEW#285cKEXXtPBj{@yVmB*` zolVwDYXp*x1TuixJLCj?WbgVwqLUdoo~X7GJ2fBd9SS?9W^e@wW&v76FltoUF`hvq z$ink%u?}*A3htu9Q7OlJ(C%O+?(FS=&%PdqVO`doqo6a^6^t@LyCXf;Xs0Aus>GqN zsxpyL(IrJsm{bizfJvjr9FCrP^`M^<1w;2&!KCxN8Ur%t{#F8UB@!?YM)vSsohTCF%w3uS3e>~=m(pg z!6dUzMX50>Ibv?eehq|(3Ci;(artX7KvmEsTc}~_J(MM<|EH=Pm7#@WYhUASnglJ#-2cSBwl^S%wml2EG^4|8)vhf!MReS;^X{`F{TW6+_NL&8e6}T;MC#488u-o^Iu^)rh8Z#Oc zfE6MIOAEydgJ?=K6AXtrFLcYm_?*V5+1=+)MkGOMa*T@2AnHV3JpXXtH&i8LG-xr% zrRGKz(yXNJz_AlhwMyA>@BNuXv4Xnf6Z^>IBSR+{I0o6>=fEpb@l+*B`OF^9uHC)D z^5RP~*(&Ws`-<=F#~zdz2b;35|DeD0^XbVnPJ)c?p_1m`d>}RTk zJlQE5 zGsCc&Hre6$dw>7W!CTLZiN_7^qo<8lZ>iR3P8j#J4YG@-^1r0&8oeGp|9J@mU>>m6`Fv|3AN%l^u8l#83}rpzI6n>`Z{k3KnaeZd|aP%Npg_w`3QyvK-`z?!4^~uWT?8EU;Lv=1FY)U``Nct)M2vf2j zb2LJn(}Usw(IdN7V@yQ)FEf>OSs91TDksyAjG-bCinFo5`^^+;(D~R{0e`n0cslyC znPmcvN-_BUl;AQknLzX%**mHp=InO@C@v$lMpCjI*|VC-5Z6Wi+!58nR;IF}BNCTZ z3)Us(2!+a6_?MDOuw-wHLAx3DYulp_d!DP9-LdZnhjs@jr!x}sjV$ktC}iqGnRkr- zvIr0=S?v<=Z;a8DI~V}_(wDyCIcXKoFL{*fV}T1gW`jBMqSWPrzF!k{f)=|5N3fI_ z{N$(LD{hhuO*sQQ{C7jC3uzW{kV*-PNK8A{0JS-=ueAHv=>C*VjY5?GqImAi)`aZG zDXJ{rCw*uLw?$_58E|nUK!j;{1TO~-Yc_t-a0!*D#TX1B69ElXN~H>;l8=SME6~2G z%1}s`s6<}}lc6$I64-P%jsrr0<~$8j*NW&Klb`Lml6>~|66jGG0E3rUPa53O+1wqM zuxovM1Rbv*Gg$?)qSD>*rTYtB?*p~np}zbEw%5N7oY5Bc1oI&M6Ro+-c++1F~6- zF`Ih!YNBtP;jry{Iu5;8$dNXf58U>h2~X@8i7s-S;RUHBE#7~=Mx?68Gvb>f*!FfD;$pf z5DlfIol~Ifd@tFZ%0$w*=AGv`P)5F2DLKhGXmd-xOFbohifI+71^o&Ms>lWkgbJ1c zfWa9EAaY(R*4A)ipzq)e0|$FBV$ElREHVHZ9)YWDX%&r_Ho4lQt0|a&b$`M8TTn47 zt1hm!hla||eb)D@;@p7;Ihe5V^a0X!!})(6$2L4Q;exVI7SLcj4_jmS`5GAYhT8%{@};h zj!*|nYPea<1{xZ^7w;yZY#)$7pU zPrv$2ck-Y|26GBLW?z6ZPX+05P+3jzo~5r@60cY<7XSz<5I<+QA?R9R)atU3pYEwv zq^6-W|IrvYmQt{9TQ(7W^GhG%H@|p`eE@2p*5E+}Q}Dr=4X2#teAm+&~99)h>ee#6ovCb0s;ZGZ2R;O2I%WEu8Y$ zvqzzphNJKKWePH)9K1wuoB@4&W~&9Yu5cFzHl8t*GYUEENFH=dF&?LjV07#uwW)RS z(*+GQJ*9*?kM;wnGaC`N&h>Nd`lMeF z{YztLr)k$g0TKth4-E`;zRv^|k{^#izJ&-yQWi$MwF=E1C>XROVSC!LVBfYx4`s&+ zZAc#>L#)I;c>4!t@H5|?R*}hSeHIAMSc!M2cUF}gW;1xx`Kz_TEx6x(6j;3kpV>E7 zKsn*w!j4f&)As+grsN`)qi83%CDH~8P{ z3Rp!gVVg%kqCd(996{k33{2xYrzGoHvF}WAkjqP&S!=i~jn#-blS63b1jHn)Q1JDa z_jtR9n$ER9?-$a9;Y=05xOB##_tMMBx}@&un;~cM&L9FLzP3;-06H3}Q<1Mx?4c08;K$&1=PiB9b5z1JqJNRc~n8c|Rpd zvs#(^;`5!&rax2wE+&BbG?feM14>(fld>KV3HUQglG*w4I$PSeY2zj(HQ%L#kb@CC z+*8@Dpe`X{zf*>Nf)U|6Ma>OShKQBcg*L?s6A(2bhmcJK?D5>qH1|f7;&`vi8Y+ts zV?}iW+&igq?y78#3B0Q3LEx93>tLnPm8j#?&nbX_fNLu;kWzHI+fEu!iOOU#oT>+5 z|JPPiPyq;=EgPpng*rXOfXlk#wg+8xr7%zTXr7*@Eat}<&wMG-FCn$Cq{uTUKG+u( z$p@nu-pA|>D>cuH&YBVvQM&~U0braenQfphD_UFO$2DRm#a@sceE7(NDI3fp#t zd&t4p)^NGI$M*O(#t@7tUg2S364ZV+^#`D6!{24;3rsl2}MrF-*mdC|RlG z+0ork3Il2!pFGT@+i>01w%~4EL(Qj5o$(ojGDIG8m>4kCIHI)KaqN#cZm;1Y8hGr1 zveQbms1@>y%T&aQ3C>7|7i$~Op~EgDDCiiTfq>K_K!hsLMV76PXoBigSOC|}*fUVr zs3=r;o++Y;>9pC;QdiWrpw*V@J5tSv_3qINbSmdN{F(YpR=;?D3TM?g&+_n*i9|I^ z3d7b1Zr#zX;f967`hr(HFoyzViB?FgBc*KDWnnUWr15WUHfO`sl!$qjO>(HxzCQ~L zgz2@#7DvEtk&l3iPE?WIEb}WlVvT2Pd<1Vlpx;A3#AnSM?bFcehVs3 zo?RMyfQWhgYB>N-;gBUVYd)}Vx9s%^dO*u6L9LCp`9!_3lF6l#-@?1h-$IlY(e^YO z3~BxWC|w0ofIn105*e8-aGZX(Zpwfqh7aSQW1PAA(VR)S+7Z5} z7RzCAt{xSFFS%}Io`p^JUbgYs?479e{qEoXvnNF`u=q&VUB?}Z{HwiTgZQ~niasiA={VrWHNi*uN5wiuo4eFdITQUI0M=UZ zH~qf93VnMD6-<*o19W173Ku=GclR&w@bVSb^+H=0p!GO*6!TyqSPb4=?;faDTJYr$ zKTJkMPytJf449*R(<7P1+6kPwtScVw@8hh?ZaVP6-6eXv__N!#GcG<5V|jC)-}Y738d9fm~i9}rq&krHPqAjr-7*&IOL=`APv=d2;dyRb`IDSR2K%3Gf~kw z5QjQ=?lFK*#x?(!0Z}Q2%iTRb{rWd%QUcwl*s8rp5Uq))Ziesu=(7lRmrtOkgGiRc zz)0Rz0e<{@&AoQ_RPcH>RFr|C5(;tR&rW@9mCemDbg(LTr7>5@( zlJdyAvXLS%dmjwjwq@d1^FE~MVsN540t(pU&x>0=9otkmI(oML0D!&B$mjl?2x21(`Rw*=1L=Nf>4oGLm;HqlGSX$u~we9 z1U__6Kwlu8EIujEGv*27(JTV^?d3BlZ!4E z6zPK4!&N=I+)n`;)j+U#$8p?Ha8PXIz@PrJ|1QR{L&N=)yi_JIGaN9icX#;OPyIA% zTjHJ%DR+h>JrE->>T*G;E395%b;W1z9y1OG)NeRgk)Kr?QR2qvxa~ViDYz^ZAH96Q z!)0Z>7VG%-y3;>5)b+)*1Ek$8wPt^oW40h1!|}=5U>|<`WjCv+=1FI88mv-kq7Xxz zR!meyrMQu^3c|s_3hJ^(0CyZ1{T2=l$ZAIK$80>a_`WE!O=MNg=^+~zGL5OO!r&%L z{ZhV=!Ukq(Jq|aX{YomC0zf^7Rq`PV>iQ|#@_^g>ZzJeHNXMW7H$tG?Rl=MBjhdfo ztuZ~aHrP0Dy=~-mI&Q~6QIXYRAYm=B9@0(kni@}MQj6r(N|sdX64Z&I=1m?ZXHacE zh_WUuMFSm&LWqSpy?_QSDnO5QtPCL4Smo~c_^#n;kETZ65mYlgy!;Ti>n){TBS#4e z4iIf(W`^2oydx$bx7LznRPJylFXpqMTW3cX9cHL?!E$+l)ec;$Ve1Fcr%5Ty#%>55 z9a&w7A%&IP<6|Z`RaFmC)5gH%!>{1+iyuS)Q6t#sy<;FKYvQ-VW*?6#i6BCd)SY-V ze0+bw7f;u$=*R)7f)YWz&>+WDn9>q(jlg{ zyXb;WK<1oCqNsVbGkKtvKB-agkqRwJ*lShA35Y85q(dTauh91bLqTIxcqC+=6}hnO zLHpQyD8ZFdQ(QqkF|~&L=9F#?v@qn@<7}rIApxzW7Q|6tb?jNCEAEByUT7a9pk*S* z_XdNyt_!8Wv4}ob;e&@eEC&44S3kwkJO1K#f0I6LR%`?`6eBH-&)bKiUG6cqr#z>UOOG`NDgPy6 zR4(LjR_^4B6#U!l-PQW)th8mtZM&iP!2P=95aThXx|@1=6C47t_tQ88{v(#k<^CQI zFJ9rz>(6oQn%5LDSswsO!?Hxu(JjPI`u+$%&GSk%JjU$;vm@D_s%FIuFD@&-@#!b{ z!++qf!Jqv1{ycv9+uz655fG3y-n~d7$2iiCn=)BbADksUP3(bU0Dk)C{u&&8;Cj8y zQs;<9NoCpR@~zO42y8S^iKQ+eP_(vWFw}%-A`IHTZ*wrx$+*Or`;t8m?!mPVPUA%E z04HTx$b)fskyHns|1y+kXvZ_O`*ipEDVV#g4=^)qTa*Ni7ZWniCxb3D7z|)z2$Zp8 z9@*q}MDy!)@7O((jJ>P;C+bM7SE82gIRvTj_Y)y=qPsx}>BkYzZ*l3ClX4a?$bCc_ zC0*3s=ios#R}UK0W6Wl#5Cw?jv9y@pi^2`W2lqGTC8!sN=w2~={nIZeW#CMCnJ94tewt1WItx*a*oKQ498GSlDjH)9_VQFF zxk?!4gz!fBLt_%@7D?%{ENncF9TsH=dVe}Ap~tPQoaj2$%)mstn2xb&)+Q7NCIkTU5853SUv(8l+{&VNxUmO1RZu;~6xU z60;9P7aY9ng=eu+n!A#B9*yBl_+9o@Hj>E>&sQs~z_@NuR0X{{o;naVlA;XjfV!>tO6Lksj5W>lqXY<@04sd#f3LRMOdU3`V? z)(k!ayb%4+xz8D-7JGJ=)GRJaSx|gqZThie>`xJ#^1Y6I4>@OokNb#|DgP>aBAU@u z_=)>-oWGZ7Za?;CG!e-IYihS47z0t!UNZ6Y(L>c~4BS1uz*m0ecR+I)bgSJbjmO7G z$zb<6^ikDy%e9TY8;CwPDSyS8nf;lKYqt0KeG)xT6)|D}RWN&~GA?V(r|`H>Kl;t@ zW-veRqGA@~eF`Cmj_G$~%e3yA^-xao=*D33(iBet?g36c>zU)TD=C~bZ$vc#kr;ja z;y#|wl%Slz2s2jN9CvpgoYvFu{^^F#AFufHzwteM`$wO{J(7r@`siiaUw8-A&RH6 zfh7m;Rt}L)20+sD!qzOsFpeE>KK}{OLttTT3#RHFVG@E#wY2ENz>F_b)z8D;ma~si z@`UMkZpVQy-e2*5|95^7-~a3j`k?R^gR`6>HfykQ!AJM^K+&L9CZJ$LJCnR-j=Ef6 zAAj}c(xkrk`$R9Dqb6Tp1SY~@NhauqHWBbrV&kq zjx+%#36+SWf~b7YQlR$zP)9);NA)Ub-OWPv$>3Sd8w@TUE-Rk)p3fTdJ>(#SRH-b^ z{*V&u0Y9*`&~iW2I|Ao?9S!-KXuKg1ukJB`ZaaG3%geID$tBJ`8Xk?XbR?Q_GoXQR zJ$r8yC`+wM{Nqqt^=xJZ=gsJzgr5G9Yu)Z%VSjoPYI{C8{MqN?Q`22RyQ(N>>c+QT z-Q#=jZ;>3-1@-a~e&;{%C-C}L{|$WqPyJ6Yws&F6G5}*G)t$}G$f>$@S+TU`G$$k8 zX&6NmoKz}7?+ZNVLyVyY=SjU2f++fe%wOJF1~CrLYP1qya2=hsSPY%p zIL8&?IHxKOQ7ujN=9jg?843hsRCQA&J3xfWfmSof1(5bYNsI2C)+BleH5-7H4=4$( z8H7h=F6awkE~9DQ0C)+l{>6eGmsJ`wTrPJQ$DY;7NY+~p2#ei2!>Nwbm9)gjEpT~TcYwc+zrJ&_i>hIq#~>%q`{%$E3n>Mqtw0wqJ?`Loxepol+f%ChiT-;3@B&sBTp!<}9|yjA zY52|SMhC$2tEDM(?oPxXq&z9nH}(Lza{yp0PWmh^sVcV#7Glzh6>A-x1MIlZWx1fN zci6A*BPq{&lnylfo;w*61;?@H^E<+31EOXQGB0XWe0#{_o+bSqQQd=0WLtL+Iz~*c zTP^wX*iWEou%%9s=%|SMqT!H^5D1DHUWPI@6pO)qjlCL;J(s<%@a++x+`eL*p!)^L zs5v&lwum!nZH0}__tcp#$u7x?D;YNOsmA-=d0Pw~-!0}49HlO3mlxQs@6a%!A%7s% z0=udwwP7hUUIk2WoEvz8DG|H%G3bK~ZHC=( zdG^7Hur8!;JVVVCs0XrAq2Sz6Q0of-*SvHQko1%lJVn|Jeoiqw|4#P-)0)825uMAf zDkFz;cZg!87tID#Ev~GV{>2Q)#haO5&=P>#^(i*9luT@Z$<3DTwqx=&4MDM3YJFya z&r?ogc87*47Qh^afm&i%mkezIn~%nHz{7x68hPH6XD!DQL6uN9U^@;}GwjDUje|S9 zR79h`0x|%5K!m@~0b{vLe4k=do->?o=!VB#Gyn%218$W)DPh2l4x%}sWN~AVA1Jaz z%}Fp3NivLyCcJtIplr@U=uFy_RERSy(->!o3Bhs8z$EAeaRFLs=nR_2LGA9Mo(w7V zR9>QrjlrYKZ~iY2Db%< zrPWC(U?A3t0wp%|5y8~GhmA5#YxGv<<)Zul~b0_Auf~fHeqeDTYy3_;F*fY$!%0Fb{2eAJpF0 z`7qB)qva+b?c#GZd)gV-zcA?38FR!mV9B@Yo#P{!oLHz=ZDW7-*}8{;4n|)<9|K;% zqzc%N9fwn!{wM_TWD|%gA9g55!j4DR9yC4@rpy(Z&q*fT@Y0Wr;P^_ zqEg^p3Es$OdwBHw1sv`UDMBbC)*9GW4Au5B6oyfhpTVR3>VbdKWM}kVYY~U`aRA98=@Ef`oYpYFiUERG^j9 zO-X|jmrxX|biUkpJ*R={MMH*>B}N**GhHJqA~p}042OnA5<8fcsuG5|5EYFzH1bY` zbVoz6hLQC?=eN)^p|ut8&{`jwl+Lx0+D#|_R00G+9V(PP_EW>Jq+AV8tP16GIG{#1 zw+bJ05`BZ8s5{xzC_0`v=!iR1iljuHG_$y@UKaG*+b}6*^3y{?xxkkfz;T6pPnl)~ zr#d^u3LYLFIKeq)hGZi#rZd*_RiY}Ae^ziH^en1;`g2E?mo;F`%o2@R)=O4vw*4Tl zb|j7DJOMQ*ih%?zqU$k168h6>Ns3D;r)ErYhIfJsAv7?^=Z$1Wjh8}hszpUaJp~FX zwSLC$PITL)GN(Ia0H%T(D357%i8}}9z}1e55E+00Yl;36Ahpy$jUY0V+wUnycZg=Ojm;b>AwUj-LJ^ zg#&E%1)SG#tu=$=qjx4P5mGEQ(R=W^0myNV}n45vErUHsu{^0E!+d>AXu|NT{zHG$aryW zVTWpsWbN7fqS8PtnaZ$X{j=IH-!t~OOSGerbbD4Y#yIlK6o_UyZEQ|;=Q)+kvifno zG_oD2cIZw9(0NXthl-b^v4oUFrfr!O>T)R%wKBQHGx5+r3Yz0${@|DXY`mxo9UAD{ z7tfk&!*0X^E$o&L{;Ow@zUPG5$(bJ{@t#$gmW2~QWu{ELV+D`8LLnZ z&GNaFXy`IYzR%6=f;BbFLuZiYP@Hx}~CIeBEKB0aAGt0@Qee_hsQFh#Y8o@*F zJTyf(rbdH~bw7fd*M;t7@+f0o@CUy3Da4tL2b>bc&d=~P(D6^G&4|JgRIy_W?ES!g z?6~e*HrmI2A(LOBBS41_((hmx?3Ix;Wu6PGli#Y+H4H(;TjG569esNr0g6j@a5k0i z!_oI!$U7_fz&wBFevQ5dl~EJW=l*%}%H@xz63a}EQ!%F03`Pu z1!Z}Gx;#L%zMTQI_9T*iPREUdq;!b(i_dR5)dk=gtILu}Ui`16t)sb)z-tJd3n%?} z4KauS2rPRljDIO;fZ*MWghkJjvZ=LUC2#)r_?E!~WrP{D7b%&w1!flSPJ40Pg5q`t zSI%h`G-ty-ZO@HCQe`ihK%@vvCh(!8bnk;Q={+e~vf}&jTr>6UID=;S6G!~rr|VNH zj&1u66vN=nS3ZMdj7q4ESP*lN*ZqrEct{f-?2NbqV`Q20S#b81I9%p5W5$LY9 z8cKa%`}seJyN^E2b=AF6)!ONUf_|~jA)0=wE|>y8KyyTN4zkHP{kYBu?T$F)wBdun z%AV4OhIY%KT2`&zLpamwj?Mg``q%JmFyYGSB3!A1*I;C*%GRZThf6CVs1?< zQcDcK{^GJ?Z7q6>p;*Tw87;YKoGy48~gxE_L?GSNR zYOxdy$fQ|dbH1(9I&bjtjF!M5))hTL6&dP$_F z(3z?!FQs*%{s-y$o|>@v!S0+-F#2qy%>+d#Mg+!7nT?{oA1FRIKaC>GR)Gev1TEvh z#3ANIw9tun%$GI=qL$hD7z5Yql}$kg_(0!pX(%;~GiCY8%uXHQyqi#O8Ulx-wmMhX zJui5t!FM^^F@xs89ad7Io`SX2nW^(VB;BW8kX8yFT5wCFvY_N+CU;h%gcmN*^BMA> zost;>HR+)5Kl&85+{c0E{`Qkfiuk(u{VsY50L-CmlPw#8cU$ixKw*Qjlge`rk`;vg zf4=vFM9s~ZC~z-=#WBBEPG~z$s#mt)7=V;k3_*jb#ZMWz8s93{jy*-<*%!+>B59mx zJPP@^RvIh~WjIUsBNM<}vs~+G%ph4ust0k#SAoUy^n$Qa)*?0&XCFHzxhC8;x5DIqhsH0IQoug#vL?> z432!D`W$j8r5DM=)@oV(aCnrP`M{E}7Sbc-K4btldkoA9mh}$H`U16HV$GGrSq!q0 z!x--P=!2KsLk>3mE$4fXx*`;D+5X>`MiC~Bdc1g_cJyMhpqSgZ^NDQWvFzMiM-O5i9YKb>Fq<^ zXi5Yx%Zi1*D+DoUPfGL_1_(5PfSgQU7u4l060YqDmcUC#_kqXv?=#Sf1ScMDxx2^G z3YKd4(#!jxQOJp(s=kGe-0gYOu^NtniF_+CEVbh0m%ff=xzH{V)m@gE%+SYS%m7vP z?NyuN7Qm$1sUIr?4z5wHCx1MNI=NgGW(Ad&UWp$QAu$BJQrpkoQsX zP)%RR3NokS=`0SHr-_9~^)46zF4?o7zSUCVUb`pC-X*Bv&U;JA>MkmqPw++T5KQ`% zL?k4*qj*3*8C*kwb)Zk7#usg$&+|7P<2X!Ub!35OQYh7`p^ znx2W}7&C2AGQg&jQz_Z!cZr9bKWApD@_ft-i0oD+oaFwqpP|%-QZMkazoW|pN|NoBemCYy_N@N2F3J>U2We(^UyOX^7cm1x`wzOKsNmIfh^gFODc zU?=*3%?9Wk;N|o83Qpx1&lIq;EN#AVW}VY$70l*1%jcL4=ul~`7F_OMVBhzcggo>a z3|zG53{;;rGtnr$xLe5qM53a-@5v_w(TK!D_`E`&^gTgkA(zyUKhw*rFCojWMk&s6 z(wFyvn0D`2?mxuw^ak!u!37`&jh&LP{6~CZIb}T?9|SCIs)XuUH-x;B_Dj^Chb2&5 z{TWqH+%ITjXFmJ1SyWJ3FQlzjwB;3! z;|Uc~hB{7S9p}o4RALy=Q1Q-S_-!A*!f(8J%o01~@DT}t_BQV=NUu!wr`QAmKcYq| zm{y>m!Jz4g%u+-`!QtKy#_yJ+*I5NhfC!+yN%N~I#Z;>@ReNF%%zls(DC%38;sJ9n zL+XD1==FYS&i2~e{yrhh1f=X#0iELHn@0*DP( z3;fv8`<}t2^IQzz6kv115PKGWkha$+4Kz38^bC!t{C0$W6=hF0%iyOqi1Yqe{q6q+ z{Cj`me@`lv`p05F3KO)fbO_{dJOir06A*zu{i9B<&vST|Q&vA=&`aZaN1w4mA#KSl zDjZ6>L;myux9bxJzm5zRtBKx1pFw*Vy)AK{7ghqaw@R$#LmkHVoq&qyH#30{_+#^q zL3AA^G-+of)y_(vMc>HNp&k%{p7U9>nxYa)q-LHBV9w`I;7WRJ+Q!&}rG)%_#$bwL z;4@y(=W;D0kdF0h%Sx)zg3|8cW;nL@(X)Y|{wm0q;$LkG)@8+U98{>%S&YQ##({=P zm4|zZB>9;3NKBWA4JUO>e+PLcrKVOV@XIqHyDbW4n46OHrMd2;3YQvtH4b#rdhah6 z9Q_D-^S+~&*>k)f6O>hNiq2P~uu`y82uk979+9*)foHuBtm`F{hM;A4ggAi{lsPeT zcs}ozC2S-Dl)oJv12%;OG69}`PHLzjQe%e8(y-QsOKW+dyN_H<9Tb)A;>RVY?mjbN zaysnPH=Bq^~M)}fEmvC%P8c9e7$oVdw}{t~@Wz$0|b$APjupf2}GZ&5HDf98yi z1b4EJfe+V)v&3T3$y8=j)`c0F{^Iw3MD>lDXvuIk3o~%O++89wBrQuE#7>7E)TEJA zfgA*8MnnnE3{LV=4M|;5P#FWrVyBcy%%;9V<2zAxz%a|~)8PY?8{fAp7IT)GPyFo6 zoNXF@X1U{JY#yl^Qrm*ddVvE(uymf{v+eXAy2AyQRSh-m#?NP;h5nx8|C~ng?!&KT z!{Qi6^7r*?dwiXAC#g#%MQn`hwQ)&kVyavL;grWva?Oj`MMu+_;jK~b!7~EHK&Zk8 z)|zU5&Qvpte^RoTdss=w0+aluRV=OG%lB(0Z3<$vx2Yy2qfevY!p5!O{_dW=EAhU; z|Bb(+<~13oP_;Wyi5SWczr114%{22DT?Ic&{@iHYz3KeqlfX18ri4}+dD=UnT3zi;n-`rF^? zQ>iLD<=Cz?vQwy%(1nB~BnYqs#)%b0AR&l?6dDQf4+&(^M?wPGL3EKI1k>0SC|F@MApBm}{5WRp*|4_Wr*2TWdC>dB!uI@oQf^ z#l6n}tD*=gqk}e9404V_1_8W7#Q>XBBu!g)C73`Y0<=Rp0Cr3TnV^_ZBGd=={m4Y4 zmRTLopdTkX6egX+2o#t{uzIPJ?qP7;@3&bIv#EK1I=1=WndFknbgqhus<&|Xr~*Nt zz)yV($zv`h1rcVV6YCunM+=pjWr4Axr{v%uO06VJfw`Xoudwe&Qe%%Usp7DAl1o>m z2tZcv(pD2hko|6ACD1R8V}p%@!F==tF3WE$6tXOkljPN_AmeG|7iw?k%U_Y#G2(LT2s9~=UPtm zR_7q9(>3kLSpiHwt)IX@(Qaba5GPgBkDZl1RZ+CG(~kSo8^9ee-g}|9hbng$QabfH zr7c-4P*N-lXIAjh^T!NyeRw3br>dT8q$(#hd&d|&^WU9o7(9FidJwc|N$(u697MPX zys{YVHzB|YHf5rOx!wSh-mR~XDp5vA>0R1jNOFfq`T69Nt+NM!*Q<|I!_0bRP`~4KOPkxwmo|RLJvh=m**Ialx5hHCN`F{Y^CTArEflZ`8oM8OJ3!B+w zY~VOHjqu(TzNEFD5tmiT>g4yACSeTl%W zEo)YliUn^nl1=B=t8c^s4<71$-$UM10R`7omryToMp3}z!7Pq{T`qt%I10Am=}2Wr zHQW$TWn&~APDrOK7&C)gMI{CB!ucYVrOiGDZl8ZEnt#(lj=Vq-xSTE@07)c^THt+y z`AF&0ay~CL)Hf8UCRK(;JM$=4Ff$;jPz^U4?(gt>4X3_`Qh`!KUa_ty^@7%}QKBy` z6vynGd_@D;_ir1_z+ilK!whr_0h3}Al+e$2^15>v7!`G5X2cy}xfQ#Py>gIfg0noS^C%~)&8fE!GboDp>yZX7Z!J2Q-j~)GZi!nOh>>M%}IZ{gcs;^)o-Qe96 zDjA=pI}b!Ps4|GxSuc!|&49ugMI|euAqx%Cjk}BEu6>FCsgWADT%SW8kf$CGZBmu` z=x97&2gNPI>r7QI`5aLOL$Qw3Dh33d$%6MI0#b@HR5sQMpl}Nt0VY(-dda)JEDaA2 z54b!&$JlSt^D&Mb-U^JMI!h|vL-u;9QT2w4p%8ZE^Ve^fD9!}>{7VT^f#xa2`n5VG z;lKFacf#J1+U&WS6NTsh^OP32H5dWT{p}Yt2lLLhI)NBCqQVL#zwCX|{@VmTYi-rU#Y^x8stj}Y7;JS1)>82F<_+ZMYKd7!9Tt0HC5O$~={^SKrX|hr?U)Hq(Ap-5 zqr^RXO5xcDAU`8=S)u)IRJ~eSWkueyf+3%$-kjg~*Z&x5YcU69S@Qj7^-J;g4cn8uzQi z&@sjij2*>7F|-tJZz&x58-73Ls&$Nh$KDUTx!-ZPC&=U;J!;?h+5a+qex=YsD8v1U zfB5%C)k}WV!;6h+82W3Z!-9A4Y0K&xcFnx)WBf&{};aOyaM?IN2$JgA&Uk77dh zxiBUu0NhB=9V7Zf2Zar4$pNJkG>tx0uyWRFh5gVse$Zb$KH$|;Oy}OootG+9A{+<= zIO#t^s7$%G(5X{-32|5=`#Lo|a>0TQP=L}P$y?%LFc{W#i9Q1qY}<_u$g^Q89IJ(35c!^@zQ&!1>&IhEhxxo9X2`+m=4YB{NNsX@Q*dpJ2ZxNgV1 zq=5Lw9ByWm3+!sfR?0{8qb+EcE57{WzaPibYkc)`l_3{G8)8}b80Ru{j zV6`-eo#{Sskn0|_oMSo?OcY9oQ__n{GWBdZ-4>QXkPtJyVgyuWe%vxJO=YPxpkUdReISSdU`kjeRN(nsCd?&*?aaLSi2RR-7w_ZktCz@TOAskz zuqYKM_p?MS*Vs-IqTwesvWfIq#{Q(dAR1bOE^{iwmxgs&;H9E>hqV=OHGoEtl1T*z z%>9>s&kx~`{gq#Z24xB{n-iWhzs*F-pyp#bV+Mf_mlaR9TfCDQ?)#49c@u>bI1#05 zp7So5(O4sC$QSm(3d1yiDxEpteaA94U8P`0`@KGUI`F5CryQv zDMT>8w2Ez?GZ>WgD6o`lxY8VfN!=ZnWu213n!m&uOw}A^bR0h7?^+Anb?lU?pMZ|} z49at^jBM`dnWd;VtJ6eBO*SC*p|zHGPk>18gUUh!1(T_O(l{$>`a%H!#pttKZ$Hmr zVGx!9a@tgiqKug!&0%AyQ82BxNKR=}5-f44Cvlxih1)WRfvq<>eHMl2htf+`VMT2QR^V~46w4#fkGZ98BX zIakCHf;H=U!SIfcKKT+p`|h_fBHPDPtimgm;d_k zfDoFTLl1@MC^?XFh10O9l$-miK$Z!gF`ySEKnGEc(oqgK8ccCmkKiENR1D*X(zwql z#|oIo=@P&vg5Eu{cKr-JzYxw@e3o;I^LC8c+`hJG0D~FceDN}prG5@DRDPVz=z6x+ zY6jniDM=XUI}Kndxn>G_+H!%Fik${X$y)?5@oszX7)*Hje#dd#A}Q;Uu(YM&Ohx!_ z{^eguhX~9j-r0N^zoJpBrE$JAHvhXQX25;l;Rakdg&b$21H6NHm0Iwh{xAGjaJgKO zo}b3W6_gqW9kL`VT9y*RP!v1x@#8frs-$<#(tiYbtt!Y}003Xy?wMQ(k&!Thnn`0a zvZEh6-tHZ+29#y4yQw$Aur6`Ohwhp&uqGO+ zBB=?x5dq?nGRhbpJtI9D7h9Qqa8U`ZR9Y(#|N2Z{R4_Xegm~U!hLvdR^JU3BbA;O9 z(Ra>q8SjWpBsB~t)fWaXa=He`4}R%G3Tp%+m{c_&?TFS;;boln(6N_OUrA{YB}xc! zHqD;`W%W_Ofk&#{R4UL72l{cxxd~p+P)or#9^>c8d8)rq4m4Y~02l_YvROPE>MyBv zKI2T-z-QE&DQJoNpu}d<$uvbhSOxE_8IeT*Ieal5ERa+iQ85`Krdp3a(9F>Z@D!U# zu+ix_A}HyuN!)`n=uPI+#g>Fd&97L{7JihbB=2r`@ab6<4{9PSYe$KUbU zYJ=5?brk=AKi6TnAJMm<{t?mkqEgs1dFd3SNNz;Yu;PeA<+pF!J$jP_1VsoVWe)+;4Oe+KMri9J&6XY7|-2d%L8pY6Ol3Fm%MxO=d`u3m>B{W z9W%Yi1Q3`DBtU0zUypsuv!GZ+=l5XJ&1j8&niaHKVV`{RRac_H^+XQKlQCcr7Z?zpe!mhYXpCira1${Q+ULxsY-)=f{^rVHP*sqlxC2L$;D1 zYeKc+z)8VEjkT7R2S5mdkuDQ~UZZ^5wv$R>v++sXIDJU)_CZ8L=oV2)=&z>nC`{It z0^Iw7%d$p5CFACE#!0O$6o}Rb^y5J3JC43%-w!&Y&NXENCgQ+6TvqgfC_{Fs*f_t2 zI_0I|*l#n)p7A7usY3K@((&fRb!0M8&S#XZ8Gy@2fyR~H^5bmTxg0oF@zEs$jQ5DxR(E3j4V{ly^ zhM)P}KZ#%bwO_~Uw@*~w0aWD^6)2@FPe!xYq><@N)LOBwmq=O$*Z|DLkBgy%UON8b zXeR)`HLvyEiL;_r$g!MznR*7wvu~l)n)gbGm~4^r?r8Ro`D`eM8O#T&O_2dj$}J@l z#$&r>x!er+sc(E0zx>yuNmI(*^)|7msUoN6&AT<} zzRv2o*=!7Vk3>I&JBS{V)+Bk^#z0rePBq}!Wx<>KhP|I8R<$xp40$Fa5t`~Yx*E&E ziX#l5l4S(=v&UlaU1~9`mn)8)>!U<~_U#=}0V8P0O5|D29QpH^Eb_e+{8TFaj4|-M z7QE^5->N1F&6W4ETwf(EhOl|_nJi-f*FQ23{L}O75Y0`38PAe@0cXi1{czIUGGP;S zu05diJC&3IeZSO#+qR|eJpco2vyrmj2t_-ZYw4j1&J zl9iWpe^Mb+#!)d9HAUs79K0DW*9ZLYkN*_@-hbkc;LrWZ{~3PsZ~j$`?S|g_WNesa zwak^!Fs^#=a=Z19|3 zmlcl>SA6l71H5?_z;mWJFOt{*Nw1d+ymu5CzGu|_kEd5jAGvsGWcz`>zs?I9iIG7F zijyuf%W+Oerocl;?UcU@F6zMA3Y>$Qrrf1Z7MR?itOgSXA02JIBBg{~8s9<=Ed$tt za~u>j>VsOVSe6U>6#}U|jy;t!rfxQJQ>GdRv-q;k#7aq0W{!IFXkPkf{@~vYv*6iR z8k&KEQq~Nhzzbz`-s9LY`W{W7JY+yPvZbAxD+?!t-ZyMa%w*hD?k~kyO*rzvkiin~ z$4qv>P^_kowVXa}KP1zc#W(lKA*3VP_C3~~lz>WZqoi7E0hVQn*$i~L#-Kq)NuRIg zCmE!D41DqC&HTDkpbWmJ!aMEv(M7EaLa8pae$81QOw{aM?z!%T<+*U~u_5}I6zEj6 zy2Spf3OD>6Pj{vc4NBIA8dC7UL&>9KKQ@5;Q9WNG*6#T7M<3*~%eiK+3xL1;^M5mc z_E2EYU?yZAiIR~cD$-0USn$F)6P(-a7PC=cXv+hZ^*S9f&w-H0OPk3lECQ+87WPjV zn;{u=m73Q(?if+cenit>yTmz>BxYNp+R>@)jya>Dl%NL1y9`zk7PRG&6(Kv_;hE?L zIyL1a!-fIB`0ek8Bj>$TGqCQs84^vl5{YCa&7SL_Jy+ricq032YvHI6!H24>qTkJ` zuc%bcXX{MLOYpQMp318W@+JZzeN{LxnXYir%z9tQe zD8JYXt<69i>CVDlh5T&6wrv^IGHs5e{Jhtvo`R6RwgetR1Z1))oOTvWkkw2qY1cOc z(?{}uW&8EBmsYcqTUeDC2t-x}W)O_?w1*V;D6`_F3blHQp!4SM0#tqUccjO|o~x4R z*ch0PzTx2Ws&aAPj~NS@1{m^`@xDr;y|YK|V5KyAOo_UWkWs!qJP#TYkoMwt{xS#@ zlBEr_yg)sf^u65r;T>xQ;2oEyo=wl7SN2!ETViJ@k~Ayu+VJq~Mf9^0C=l&gplMwn z(3UHVwES~&C8}8}NAbL+M7k3oU`4vABJN5Q5sAmPF)5pIg7f#O5;%@LW2)fbbms%S zS>U>td8`Od49^4%OkO1G>rzcM1C}fP8~^-2g7-fD3KRH|e$PFsyzoSB%lxi}A=wI@)-9vDQb~Y+q^zj0FWD81w zY4>fzb{w!Ted+6NrSRZzGjyThGY~VwkA3_e{>FDdM<_JK172Fq;X5qyq~pifv!r#- zOv5P={jdhZoM4`TiYxdKB05X^sYaw~3Cp6TXo{t#x+*v}ESC$m`;CWgW^qD}Ypq%N zm<~0bSdJwFc@y_tmw^-;IF93(2)N3fEW7pXj&KN3!aOUtgmfa6zmx@l!P>HNBD3Mk z&o21Z%P*q&vBzL59ZI2mjtE-QSs|L(3`Qj!J`^oLEp^o)q?BO39S z2+S#uEHomxqC}Gu`JBy%nb6pzoCQ)at0KwAEP;Rl(R5qdLR2sijHr#sAsy*7S59V@ z^N9g*6P?n1&|1oA$1D?C4+9?71$!T{7Or&*bk*~m69p^{qdbH<*tn;$(^(v@^R5&$ znj7!i&OR{qebkE5uK1UJ?dS2||6Tt8?yn+1ciwHODCrD!(#t~kKGy0OlcEDirgSEo zu+Vrv`bgD1z_D$2JQ(oi2xE@n*oOx%x~wQIRM%Q9*tc7}!?Iv_*pp@w(TKBnSr>fy zn?Hna{jFb4>D8SSDRJnFg=od$RE9I5+wpV2J2ouV#k`~U9YGP5s*k4)Ys(eKc1znQ z(4~}$_pTQV7@oExW-|e-1hETKs%<)dS}z6Gw3&e8*zZB_IR}!Mzn^C-&iElYRh>ci z3@Rd+tf=cX&NLgQ$0>L`_r^V3F7XZw9NbjlRgl8K&j&Og20p8Z(bVo4;5rcR&bN-F zm9#rd-=O;n_n?au*+%u?R8sGPOK+5s?sK=( zZYM3vCzaG-*^>kQet5y?8!7~9&Ymq*k~OE~oIF&5wu8xrq<_T{Sz0Cds;UrujqEGm z6Qm~r^F7q;4yz%gF+`g!**Di(gQ(bt$4nBbOE%7iNRSpYwkq_8rLCxK!Le^KS)4Mj z4ug;AuN&4-*DLn>4d@%B;I>p_v$SH!ajMj^qEEZ106g~F@>z7Y+>!lH&b?h)#ZHN0 zB^?k8gCK#aZ_}At*^Fn6XWKwuz=T$j@!A3qQjc|Q3!XndL%lxW*#{rv^Y4Ebw^uKt z+5*6tTpHg)F^Q3pvKY`2K+Ds%Cv8p@zGAABhP`W)+Lb_qzV@=T953L!vqp+-?>%GU z>jz)P?bY`&j-5Tt2jpvtoke28=fD4O#hZPE9C_@)FgGN->f&B=h3PYltqGahGsf3)9AVy(Ok4YE;@lR*z z_t-Za$B_n$(Fm8RXDPerV(L)2Q}V21^aI{E6rzdM!svyv&W7GMeERA&6e?b7VX_c= zQ7sZKHHJQ}k)f+;vYONJ$HQNim@XPaB~NH+qC#MjG0|IHP}C({fGa>AS(3#mxu{HT zp{yT7!eDs3tSH5BC(iaP0XtrlFW<@yess$SUuUTHvRpu0-C$mC;g?JSMz z*L7)>YGQt>NL}F%c{~8M)>1h=B;=gcJfJ*QMAD^jfPK#Oj?4Q!z&z`#^^R zp*YKRc2YOsj@z~YLsT0fa8gJc`A?jzhEIO-zm9tOQVeLz`*~8~;New+<+dN0*eXFW z02l}UyMO231K(anDPC(53xgc4=DH7#;hl6Ope~P4HOAdJ5HKA0^EHg$eG8?meY;~E z8^*E2-0{?B(wOR7@nf;Hip%v8WqpRaK19H2>|q*vOlsvluMAY)eEu0^4`sXNr1`K> znJ_^E=!BSwe;xsa_j{n?LMMIN1D(TTdxF$PGI1w8bRPrvZO4UGVO2uZQ|AtRbiJUq z6>YhoE^AgkN?l>?8i|YMzgS72NNcoGm1p7iIzfl<2W3u+H`+^q4hZR2cV`3o420A( z(n!_$M(14jP`2EOn#+EL6QAIQ z0?55p%_F-#aeLn)etGWr*s33=~Dxn=wY*n&{t~KuIK)g>)@t$(FX; zN(Z3tN?Z4J1Y5~ z0lB7!!$NU1sL&Kvv~iYj&Im+Z{Wv05I=e zujjzA@9Ed%JCY{J)1hIdZ?_$=Nh^H*_y{o@j7hNrj4>xIt1&6qcAdqZNlw^_K|34g z?#=hV1N0Dl(x{2B8B;kf1V-WveDQQ=FZS%SQe|Wj?b;0X<*)vr!=2f53|>()XkqDR zq6(e{COC6v1rkdqqfOKhu5>C4kkvm?D|zCs04q?c!3~uvQ>Wu7bc(etL}fzVKzLus zo>|f+6sQeH48fukm&D?5;Mni!pyN4-)Qvf8NX?Fc5O8=E9oWzPhyEk@zx|p2eFnX2 zAw@`H!e)6fTdh>}0wj&5MxH(-t4Zf;(|Egh@HE^1#ed)5k3aT*`F~;bgJ=tbQy%(b zcseRIDajZPut&aVx^^hSgi_d93trTxw7+?nm5Em>@;G_z#%uQu^}=UaP#LhCTR7d4 z|CXVRiSBfRx)Dlz6!`Mb%#^LOU&u;Gz@2(}w1;zeVIlNGEw^Wp(MwC9DDh7MT@}{T)Q9a$Y_|iFqN_42T#=YD3gNVHp*pN;M zvLmpR*o8+kv?MgA46cG61*Zt5F`sl7II;)D%;2TM%7V-F8GL;PSViA&IG$d`l=cy* zTMk*JYKV}mY$Sc`xl;Gg`1KZ8H{|M&}# z=!foR%mbj0ab!Ye04|pcwrxi#p>XDinFjl*A0WXf0PC_4VP-PVo(mn!hwBATPdC5} zYg@37K?-IBBg=Y0#UOw%CWxXS3K4|%;C-VAMfGW;&eaA<_O3*xQ5Nnl%6&VC?+!zY+)v{g-UhR;69*`Fx&L@HDvsFPC}QlJSkcnoQ60VUywdOva~ zsj6-YN;fe*WVlXyqcd47L_BiXvj7EGiW@7@D9yUeA_2Ov2u-04Kab|iK#w6-rQ4bveEUhFn z;d)uA@EB*REf*}cBq*r3S9|U*J7r|pD6yS>UguFgB&uR3z4I$y`yscy^p<6s>}{CG z`eX!nQCh>XZ>Y;kCs$@w!ab6YQ}8<%Y4UXyeAc$G=^XsIQzKjfx`IGw#;-vs%2pL< zfm!~bF#u*`W-SHFvf{En;=V_g29Gi@T>TOy556vQAPlp*Fp~3m4(216x;DeTccOGu zreZ=R1fr|4$xgJWBJN&X$X07Mo|dv;HXqX%ct<($!7oe0+r85n_0t6cFZSeA8l=-4 z$&C*sGZCm{3GZ}>0!@qs6@O4iUMNtj7P|g}8rnQNLW!@fVCxeR*6=deq>gA72oN-m z%evt8(+$XQb{f5MQYT5B@w|{F(Ty|W2^i60PSxJ`cM$KC^i`ltBPAMyGb=MX&z^Us zoZdt5OaxceqC|da6;DUJTO{Ir8gJ9jsu}3X80np*!cyx@R@9xNqI$uu5adtXtO71tME!oyd766OP6eCy}2zkP|~nupM{SyI%Y zYpdA$oCxo^_Ug3>esuKgoak3{u%i4(Vpfyw%?fN;bBGz}v{f<8dvLm=QZWnrRcN@Q z^5C>@r8KOU%R50Q5Y6a=XQpD{u^GO2dcs+#!YrsZZUt>wu`c0w_Tzx}j=?!NmY{~g zZGrg#a|VNExG*>b0|h1TK{KOFs8x7bfe{V2PFrkLW$Q;&e9v@GV?tSpoIkdT$ED%b z-tlJJvj;5g*@&jeLbUh{P?SoUU{=EAjA7~vFqj2jIe@1SPBC~6#C0E33Q}_}uT|gC znZUwFPALX733^t)bZV7g99?v~l6PDRfeQ50WNYzqXnaP|m_SfYP4~xfu$njWj$9s} zqi=W8n8Geymxib99_Q3DBn@B8HB@j|Yb0R~U>pa^azWp22`mks@t6i3_D2aw4B?H+ zn~#Bea_{5s_XIji244!IMDMCLB}f6_+0yXEw!Z^XD4^Z@r06JF6BwY+lRZf!8Nl`W zfNk4(WC9rJ#}303Cx9pE?;R0|TE(2eJAvz@s+GL=qI#u09?5RWd{VzS_$g0knp9oNPv?Xc53YAdRE$0`30k|tF_?8 zvuF73XDB21zmdz0L`LDcuS=< z#X%*NQxkACiQiN#mj|FWjN`z*Zzzb~*kuXIb|gm8_fah-tV9EM_!w2-Q*z&DH9{2* zNn*o8fXE=AD&k?OhsiFPy-Qp7F=9U0a)FVyEcMoZxG4fLuRw z1!ciGEC#G6Q*$VeEvveT|K5*YSY1o8VkNhmrT(a5& zQEKBSsiFS7kWp$1n#%2nz7$QUOc_MPMRS{(VoG^t25f1JhjU2GV6tJNnp7AX3_+c` z)SAIS7??_8jYvm`lt6J~&HU^BfH3b*E|V^9)?P!7O-u8EZz z+WLs3Z(Wv&)EJwuY6Qp;yik>7%!F5tI?i5{Y^Cra9=YTY#~8RDfsC!o3Q5^zg5kXU z8dNkS;1EIIoG?0*0vRJci*s4W>2ApJY9O2g_}yRo7={s(QQ`Sd9CM*cBl}mtL^~2u zn*l)4of!DVZ-0tyKjx%X8cPig+oc?bDT@qvye|O_RT2KS-XA*SO!!Tl(8gjk`5%}0d5T)(|IG!k!0QaQ&7{H*%w|Ji~=|qZn{~ zcorfNI3<&qjo5lO{r+IXq0z8wioAjq-8~;2m!;wP<0Gr9aRYMtHGsF90?#wGF#_P; zJI20Y-|k^^Jw8*x*16ZIkXn{@eT)nk^iE2~REZPya_mnSOkCDlLe8_t&@1!VtXoJ? z6}6>U*qU16^Y$^4HlfN`Ay}hIvnZ}gbVff=MeQ{t#V#uSk&tRVTVa)w>r_Euh<%49 zg-0bT&eAbPO4Bx0k#rx9OpNE9JBBA+%k%FnX(GpPN)u1t0|*GAQ(rD<>m_WC%^)`b zm>Blz!am%w-QE(xpW+}-+cu*uD0c8nTrKw?0+z~sjLJw*@lsWg>m|D*vmUSD_ z67KuP;gO1W=w~x<<^2?eX|8+pgvd_AW;6e}F*wn5keXKv-Ho%u)K3`|3@cIbI|r_% zg+Pm_Uoi)yvI1S+dFFM!;Mw~hVtssp(w4MMru{5NdSt)>3Pw|!)`+xQO}ibUEj>Z6 zmdRbucZ-3UHP4O`7Kh<>90b1NuJ_>#j1?$H?3w=Od^fb$1e>E`8Q>vG4Z3q5_~P}O z95yMo%A~D^2kABm><0rrTAu5N0Ds~-wM|ceL+r~%{gaj?zu{6%jV>G9~FXbf5 z4$D{@Hd_b;pg8Y+&IjTJbXJwzJ%*UNvoO))^J+t}r~;MD5qw@=$g~I`h~GCg;g>10 zQ4A=} zt|}1 z0g47gjieuWCT;EtT$r?(0i|L1DS73ghXKFuN4~-7+?K(3&8kIQx@~9k_$f%MrQ)M6 zeKi`~>k5>XiI$8&B);vPvT8ag7u9H9*1>;=QD>#CE?CxUqAx;sPB2nd<3zcNCet}A zPQgP~pNs?aY6aS}yf>knE(+Vospa~6t=ZHvpTVC3k)%47@=mfAi5e4{W5-|l&F>(B zjv43+b#7F!&>CbfgN&gb#ZKpa4D9=nYF|#L<^cBN2uIB%&PcIQbxAZIB^RP!WRg=W zI^Pc^^y5>f&n&BwAAb4k5rB`5{q_o<|LXr6ufO%z(DxfgKk%%T8Q2O%=9x7URWASr z5Rynb74O7c+c+mR?VKmo$=xZ85O(wRXWu0S2}v_^pJ=9n8lC@HxC&dT`w&|3gH(=i zv`ee_b zfzBitfNPyfzKiHS4BiKuH)+!%(=*aT&ejM?S9-M{`0Tb}>jNkarLFUhIt3+E**MuE z0UJAMqM{8?iSPiU;Ku}Ej6GURIFEh9=sSu}DrIF)SJVA+ znrfNo**pGV+U87PJc7l(Jz?xyu2hCR?tm8zFYkWNc@7WH3{;s;TQB87q)k^v2wXq& z=pkHvgZd;F(oGDyQ<0QCUzfbs@DcrEL&NJlDU5@RhfH*tI4ToZvaezGhU(Rp!k~sh zWq?$A>#z%z2}W#tP%qDdHv%`nMCfsicV6oR@MJqnDX7bmJolvYz@TRE-g~I8o%Zas zp=lS2#Xwr^uY6A7InhfREE~!<35@V>4hG|mLqH4F%=Tl)?aiy01v4h6Ki`FSv~Ydo zqGihqqAK$2{SVMlfD(ORE#;siAqxLsNaaM*Fg3hU{@Y5$v~N5w>^mu>Q3`C8a|vKb zXXTu1(N@+4UN2bJ2WUb&z!_a7n(2qdpw702q83WYc~gJ!ul;-`1n!R4zxD4xV;Z93 z-15GnRA^j@dWc0O9DW87!e?F9=!4p}J5^MB_6CP9?AY&j{MFz5t+>ww(u0mjyND?s zA)ll8a5%6X2OG?lV1)WqumUPH#CO0FST)+Ab%~xdC2O@*$_sO-E0A<5Sopv}`yo*V z=h^JT(R%=;k^EcIrkhmQcK$kH2ml^!~ zU~`%NJQ|Aw!HC1NUM|tq&rC5l%RKd*WkA&3uV0~(6HP;3fA0~$_W4`f?i*^N86X#! zTvf2VJR1uY#i5PzNy-xvz%8bd%>Y4I_l}wz^#@HE% z!vf#!ZsaD{>?Kmtbtwg-RJ0Nk?M=Zy??m@mneo#(iJv*wQ}l}Jj(v}x?fnf36B0Qr zDa1)}#r*W_45GGUR#p@QIq#Dt98ln8(5`1xd&-?G*E*ZHt>(}1+PL2!FLWZ6wH6e9 zKiuIKiX7Xi%2tbE9CH#i%uYtNz{)A{@PQ6c?hqM_hq?+)UYW_F&jrEG$=jd)*5ASy zj^*Jw_U#^x=A?|+IAY?gJ2nn#L!K%mx|z!a@_qZHv*b)1_et;5I%d^3p7C*Q*m}>| z5awWlS0|7I%btkmOT()}Rh5y!bHPCGtSR&mHVE!>M?t}Uzu`OI{ub`{4F{)Q??V#N zAV5)KiZ3_Mv+~1z9aH&4m4d;bT75t>sBmZ?<&YoXvmQKJTFb$ER%%5pq*;sz_L8X! zpe`#uxHi0gx-r0r0Pq;$sM^xPE?a@M3(AOLvr<43&pw_5nYe3yhALGC1S;HTGBJFh zIM6$16V-~gKEOQWIuEh}dKyWp0(ndUK1id~-4=gZlouHCPKs)qdz0r}{*Ttq0!2Pf zG-cV9!Jf>ewycb1zoWAsvef7gr?&m6?m5Rc^Fe_WMQTrHa_+;B;d-fjk?J$Ky@jb21 zUP(RBk-=T`w2Z+XCMF9cI%5Vm^~10)wIAqv%!Zi5b#*>0&r_V?fnqhQ zOageU6x`mviF;EU;0?#I1$u3Ukv)U6QWsL-mfp@iBxDO!Jq5lkYE(AMMtxCUWnYdS zdm5l`P<2a*srIEbpgo|r1^4X+D-B2AbB0{ZvY?c^HGK0Ye=olE%fEozo6l**=lK#y zBFfQyKTz8e)F|4NiUA)5tpzB(AJJnH09obTAvJ6Ckh$gFwx!YMP2a92ekp~j!qbm~ zeOIEFi6$6#QoukXbrGgHWTc*2LtQ*7EIe0Ajs;Y?gZ&;+v4u%gg!al$eI&KQJt&yP zquLd+;hy*67*1dxIF8vXUTuP-)dU!qjO_|S!npSi`{c_%;FqOglc7~v(+qWeKARYw zv=A$(r9qXBL}3glP{;j|*9Tuv z7+`jqD67CuPQnz}hEuDB5`8|VgX4O{9?m2)54S6$j$jIYn#@TD5&GCQd9G>-Q&2uD zUBS<-HjPyV9ntyrj$|NY}p}WB#MKq3Tlp@9qS~9IMNsl5Ys_QeO4<+p6jR;!~Xv`dw zYmcew7598{gdL8xwQMQ{A~;pAPMmLn35Z?-?p><|UKaMkNv@LAD83)uA@PQ`tXLnP z<8pn(+wXq|w|h*rw-VKxR;VA(s_#KCD7p0r+J*aI@1Bx;Y1Px77WK>k-uJY9Q7Kzu zd(BR3%zIp;i5)mmxESOY?JxIGy#NP=2F#Bf;0E0V*+xENsV#s9IXy69*8@1%SPirS zN_Ha8lmgorQWok#ooDl0^$1FvC!(2)gn18*$)f|Tma|c~HvGs(AL7^E80vV<H0+%!;;0ZXd%cvK_{FrSyZFi~bznh{Ast#Ll=L}kPb z?j_ctzXL3TY*kuHZFu(Hhj{b(XSqbtFA{*r=u&QsZMxKoyQ;ZBRwp$G8B8qCeJrZ@ z3c%2y(?PW(K=UGMBJzNGEaQBb*43x<>vEz6E17zD_8d=d-(cG|XlUc;`zZ<0P`WxR z*B+2#44DIJTd-UnfqvxBV8wotRqvyp&Jps@je8AuYy^!I914&V5b$GP{RF@M{m-%Q zM*<#eEx7Nq7iT7N0F3s$Ua>xW2wR_HY_G7r{XWKVLq$i$z@2tXRcclJF{b!J*h4^| zn5f)37slkItt%=5ssHs~{RMpU2Y)<6hw-9-jfdEmCK4NI151zTOt7<%V_mHM<&+ze2{9}0aOMe>M{Y~r-p3 zwyA0wv!%>9C!`e0ax1R!r23(hd3HSPeP?W->dHW&NPK*6Naml53t|5k%6qGadtH{W z?PDBxx;^DAglHBdrNlz7T9e6jfWV+byf_6B$LRU;KmNM(h68!&JS-}Og;I{5XC%!EOc=z?6v7t9K*^e=1CVh)#+Wo4KOy~$ogJ0~ zsAk|F&N=zCCj+B>$Y+of%DhJejPiXkHO1xhqS zL}10sw;Q~!D0KnEb=LFL(@-w1V+3{NRh|Nl^Yfc=n4v|`sHEqj?kpNTeP%ev1#UGH zjO@RHCxy{`CRCv+(cq=d@@#D=9L5DRxM`o_yyCQ8t_&Jc5z?A#Fxfe1Q((yt&i73I zGmU%q0=EV$q;oMa?kv|EjZ_T%YZPlB^==Ft!|`U@vKi2d0#FC~bl#x`G6#khI2Ln^ zzGE?9wV0%Fu!ly16oFz*>d=5up1VQAmxOfHr1{jedveA(wN^nMl=z=|MaUBldDW$mqM%>3i-z@PFRWr_ta-uq&MLisz0G9mHJm({W zw@C2tJI)RR!a|UQHnXcrOcMm9aSANAK7Vu^8k#nS2lcGz`4{)?bLqKEmcNH7MzQyXBPl zS_&Rtd_Wc1c)r>eECk;)cvoWrRH2vsQ16lQ%9MI;Q|5UmO|MGEnWsheeT)D@@Ao^l zeMjUYoJ_3@(Z?SA;kVz3aI65;u1|{I|AVN`u*s*?>&(R(Y;y`qBD)+c>*aA z1AE*NO=3DKCeGx{DuvLb8j*qAUO5a;+YT7^0iXn*bGckIcv2AdbN|x6o}APSC^4xj zy?6Xy|LmV-2^urlBv(9?a4Zjvi~yL7&>!Lc@;6~t@aBupq2MV?bruQS@VR*=SOB_7^0`ROC7C6}dT50WI7<7^lmpePEW$$<%F$sj|Cwd|NGTPmBUm_#}^<9_ky zndR3(G&fK=k9!N9Pavk3lRvD=+sHFybovuq&^=p7PbpS#S(dCsY3&wP>qhoBuv)j_HW>?{JB2`xD8_mj<*-vV$7i09tOA8a1@qwc@c&yO3u9 zNd7!dpKsc9Xxe(oN+tMi`*tU=U~rcYoV}Z}k&cDuNIceBVQ~D`%U2WtAsEDW6a}-j7GPN^^NnoMmB45Ffsa3k1g18*m$YeCDlW?f ztzBSc#c)digi!3XcL+5p6NrbfVS~@;2*tr#Ltvp;C??vtqYtD|P9Gzc!_=>=;m862 z1<2$a4;WC|ySB>MbJ#=qP8CbZOS=c~5Ho5z{D?ua<^rX_iRF{Bdh`IPCWROVE@cKoLN((e^Uvw5i-KDwGtbrqr8Ttm0n6k2u=?Ue z!zuYQ40EP}VbT-SZ0SxKk2VgX@yPKzZtHVv=> z8qr6Dw9t;sf6KW6l%0*|e&=g=I&v7%m}sFA2D7Q}o$6<@Zn9teNls5Js=67lEG^5s zgGdZcYm^D8-o+g+o;~0{|405%I_Sbgq7qR-DyA7x*dqd58q{C=rN0qRR4PUv7(}|1 zbmxkB&=>&2zU_%#O3f&fb3W>!HBx|=VY}s81)4pBj_sJ}v*npmLO10Bh1w4p!66!l z5}Y`f?|tP*uw1TKn|P``a~`Od35f6>=ZG{d1v)h;L2~>!B{P8Gz~~#`C%IdMBNHD~ zB*Q`w!L*(hsF(QM&Yv@m4H&1ukwN5f99Y>Px=^`ftpyLQ1!~@p=x2yqo1UkWwS70fWNP$F5+9oLTPD12)XOv88_QpF^GBsjzU&Wg~NELI; zDYx!ZK}aaRaTQgO9%J5X^)|`zzrAm#zzox_a6dOX&Dfs_2y}N=vm-DV9_Z=3xAU&0 zDvU?f8SX*lBTB0^aChwcmNqoo7nx+JfxWW=B>C(3Y*v1JVDv3!z8pKovBmJ#F{t)4 zi2U+%d3Ji=aU478g+ZIGh`smGaesQt-T@8I^|Udngh|{&&ekkuZpaSk&)`VaEcq1$ zL;4z@);1H9IY4smvjNf5#Nbi3NPF|y%a?iIaJE&vTa_S8ZPCOmItl>vV*{L0*+aCY zns(HkA`5)C2v{SCKF>1fd_M;=IhYCAw4vuDO@^#gHP=!AWkg`tkJ+~(izyj)Rca&F zDVoF;0WED`Nt+Aa^AUV>cb>@zu)p@~@(%4$bE-t&7NT5=dZ&=oMzf%0X{ewXvU=2E z*oOg(g+k)klCBo-C(5U1B9MTb8D~lq(y!GoB?VFql*^0Fif=)vcg7(bz2m+g85dQX z!S6AGwIC*ohW5oQwfNKD{`^&(pY=tqll-v}2<_hB7U%Iu^qlsA_aOT-PxL6QVWBdZ zn#>J=j|0ck_weSo|2no;zX9|R5}JyFKn@?}pJU9vQrXe@^))ME8Q%@nLlh6uHrw0> zNlj;v&C@v9Bc6Z-;COS}qKAcs;QsU`03BAy_C6>Caf&U{56rkQ5(CmM5tYDxY{1yj z%z<&>Kl~HlghJnAbR2fUvR*O9!0pZF*thL8(`x|B1JAf6*mG7M%cK`4N`EQ%`Im1{ zTMPBdw&F}@RliUA14{6o`@u`8(LrVwgJoX6c}jz*Lvi#yGt4u6NCBm)29mA}WOUM8 z;w3`D>-zXSP!LX*ZM89jjX>~eL}TBVzWKxWul_Ip@33+s`T?qxcp9iyCq+U*;KWW4 z1Lo*|6%NkKK-K4jeSp<(n9;IhLY$1K90cC~RLdj#6Kp7~n=Q$Epl7#Of zJB(tW;6w%jW_Vne6Wv2jbvj|apNydzS!-J%uAwfzK0DSRl8nDi!g!1f%ma-b7{>w4 z|2V5E31NBp#cTX`{`mjm9Yq>=peD60C~aYbEV-Z-!MCV#ki&FGOVyQvwatqqN4lMy zZy=}>0g?Rfw(q!ackFcVLUc1gk;#*>94A^}rVmgWF%8uSWV$lJ)Nn{RgJ2pLm59f1 zy#DUrLO%}nXy_-?`6y^LK-E!kEli-Hwks1Xdk012wa%cE))i(*3QnzbglT)@e9FZBt~@BkV>AFa$_((c!xRzIsq$V9FeTB zA1da8OpsxM0s%5eHW|>orx{2{rgr1H=$23DPQ5n<$<6C88fSA@DJc4PQNLYO!ZnBH z7&GBs)(dJ|gT}&uLB3q)ddkms+_w!uOq|p;-D|kxqsL45_o3$0+3+lpl71YC#yi2b zgN^i}WUZv>BxTsx=R$v{9>@SMwOG2xGgy-?SXx6}R#;t5{)v8tf+gCg?$Im_B}lv< z-@WZXiJ4&$d`qr>Qdieyq3;bFacMK@9=+p{mGHD{W>}XC0XS9###CO5_{r(h=6y4F z{BwWwpUpW$W|nf(QYZt}7iCuj9Dr-r`#}JB_P~1lKrA&XQ?)Lc&}5=(qzf(=JUl$h zq-${9`t}9S>a3=^hdrwn2}3c>3_4E1J)*~g?Hf->PI@AlFFr6((pHGZIC|KUjRS{~U``}8XEjW~DQO$P!vuK-df!nE_&@#iUuM&I zOss$V*|%}*p+Gx0D<|k+9sAL7d-FvCQ*o~7r+6Pk6_p1ziykI3Bp7sb$I{jSD%kVX zE)Qe|m82Oe$r89y)VO%`LA8YoL=AwPM;~bfWEA7#OBtu*y(_6JR$yg8sVjg$#g85x z{(tm*n;z8u~ZUe!$SETwy>afblF5G;DjQ)SPKHGCZvI%wI+HFjjLDu7fzF`Y<uk&zwU&bhFLWHiy8wD#QS#N5M1g&51oA6NvUwo>rmno5zgKxn4#bUOkiqJ z<-)Na_e9uYnCNtBG2=p&79#5X$ortaD%myDfdw0y&q$QdYK;c?@jLQYv4SzAgCFZT z=^*p2n)tqEb4G zfWovX$UCpkeE%}-TRan>q(1*4MMih1_egFI0tU5MCMrkYVeZ(rdkAeXX|9FuD>Tvp z&8jGv)FI1foytf+fKF#ho_vV`u)5@(m+dSJxSftVb&)=|URepjfZghSDBTYsKTk0~S&Wdyiy%I54cC)Fows zm3UoR#2>2ei|&3tXMh7^XAkEo2)7w`={e8T|2f2(JvU^+G{&?6hySjhKwwltLXfZ8rWxwC@%(_6l}=hy3(;=y7j z9dc%Wz6Iq1f)_+d5NZ#AB~QIntrf4gBb%M!2Y}hMtxNNV|L`Bd{ZOExfX2ho^D>Tc zV7tA=+gG1sKekw~TJiRXa#gU23Jy1*R6IU=c0$M)AUWWr!bPt^@RhTau(F~SC1T`P z*CjVB0_2+6b6P5^+1Q-q1rJBj@88AfZ{e8bec()=igmv%Ee*R4eN_;zV1w@#s7vQz zJe9fmNTD;$hu{TMQdJ9xh6&x9CA)L5ljn@jA_P!D;N-VQK(Ox{b`G|RWRihw25NzV z?7WORxHSB$|H=OVo?k8z43r8lVI18Wq=_X|MCVbfz8^=@ zNrZZxD@6+2^c_iQ%o{Y{TO4jA5HJ&7IR#bMoXK86aASpf&5u=(%r!Os?9p*-H@K=b zJ`?LKS(D2-(Z!yMNd1UGa|aPnXJEb^9l?LD*u4Vf0`~wOAXHgSDyywgO_cML`dp$a zq`Sa*X?*Yv0N6dT(4gD#h*hhp5 zg^9@UXxtXo^qoTz&KYbrO&NrJjjerw=hJcwz!IP;k9I;Jx=hfT|e^F5L|asgnXp zbcXM5KF5B0ohIOXu0*j(r9wmyYj7rPaackv``yZ9Io%T-sp!QknXrWr#{}b|Um`xC zQV~*9o6j~Tl`Cnhl%J0bwC4S7HPrN?N)>CBA|YvIWmDw`V^1aiGDh?O+-`4E@lnZ< zm4cUdO8X)zRH__na9n(SteavCt?N=P*HcNh|>q@@Ch(j^HO54MfpsNfFCZ*@ApuJFOs0xy z-@`V-0>HMqqAVAb($MUHdGtn9Gg!T1R^JL@2qLLuj_e)UhkT=-dY2e@BY?Lb{_2Q=fxFkIG0)a%DUeMoVOKHj~vQ#BLm6YZ0azv4;=%y`KFB8MXbJ_p;S zOqrwQ2A103V-6vU%udQM!ELMS6|jbZ1$}=a$55Xy#8HN>zY~~7^ARHPIrcc{LtJPz zlg*Nu6f#0tr=KpcO7@fPB$b7AUGez*ee`3;)7#gH4y*JJ{u~)EtyebJ(`on^d_Fqq z3@{iNP@`R&tH9H0n+OFRQWC)!pt^-rof5{&dci&-F>&rm4&6I8C<%0aQgCID{24;z zWbe)ir{sy|KYLDMlSzw%OG93;5?ITHne!7wHO|jil=?xc698+0jscb zLEqmJ*`SJ$4BzR(juZMI)gaN!&W)Ekm^yQ(SBb!e=R`_Q4ohzP6nao1N>o09*$*cu)5&wo7WJ+7nF|~^qy&S`gb<`L0t8qTnu;Ac0pa< zLtS3rxP2GL{s!=#^q)X&X3}$>DZ7v^T~eQ4*CX}e=`2*?(jC#coq_$?#>@oOz^1`I z-botuW(xFSv-#WI@w`<$ao?2~j?t5|J}@7)wHm;Hfz&(Wa4J3Nls28uKZPg4XE1=G zQkYT2&DeSx(|E^}XDv}_&)`sjB=3AU*;DTm&!cX(_C^(>pr;1)BPOVat&l35K40>! z=}c`Vb9&o_f+FXtHII4*Z;oiJ2B}(!Xd;?z)rX~MT)~khOyktptFx*J$7aa8rNNU1 zWHkg+-f0hI!3gUb!c{$&e_*yX8iV9A#NwJbYXvMOzIo3GD2ZD(b{ULw_;?IpR1bxnY z;82eRB1jIYHufVxJU$tt+|hQc2rK(I0U z>zKNc$`*ICl!AS`!vt(u*uin^xh62YL!Hz-tF$o=(n0}M;pY%n%RQc8v(B|;+}TRO z(IZg`3a_6=F`U0QDk1{Z%5sHM^)?wVjCDY}(HLhA#_z-9-k#;Q%_;_hy0@;6D9dy7 z{Vm4v7VtePH)goBkSpIw|4Js20|nGZx1y|OZkV!W{khs@Lv2WnPTaudGN}f1*i)u& zVBdFq{EZ(D?ryzCxvSJXxXFj3#`2j9n;9rkwS%&=#byv^W>~AufZsy{J41|jE;ur< zi;EDBvvE#^zV7islQwIPYS) zu&U(iYh6@UyYB~83Oe52?|k15&DD)Nbl@No;*eog5OP*$5zn7%JZ8XAYeB6`A_-1D zvkZ#%L>Jh7CZ=+vFq=-z0+rKbTG({5a>fxrSYC2BsRUIbfx+Sb%10mK@IdvIVANBC zoSyeA#ki^Ai*%C^f6?nEh99pF=vWU^r(Y8Iq6fP(V}O`A6?~3`c8A{<)o=MM2qF zhE%~r1R9uU(LHU=`~=Y`XzQ-e__LJIvImC=L@5PNr)|;vq**o>EvrJ4n5{%5FDA`L zgJly(+cE9j>D(qb;u?%OgX*a0u!kkGFeW&25-%SeDFnhqEt{FSAL0?qVT%f-L{ro| zJzLo2|f zdODS$49l`+bw?_8wO01Xah_Wya29b~3(D5f2fJPXhrdtZ3Ds9C$b`(Pf3>r@5e5l%LWZZ2D)vD_w z?yo&VZprZwSf?D+|mI6yLEQ8(N8%(f5LzBqL-c0?#7(Uq7WdLs5787_&WNzWa<}45$^0Y~qWyMsC z$*+ijLS^NkEA;&Nh0h3=n#!pJZso+!C4xvi!F}LiU9k`6C0N+d9}PlxPJ3?41!a9e zzr6+&3=}3_Ln>8;6cIle*-5q0p$Cn)_EV3OEpIvFWGi;S^oELtli0!EE37Uu`X;0i=h~hMZtxVi#=$FdjQe0Qvv5;UaN!6I>DrXB6PQ?`$D6+e4fAJO+)_WAUqXW)`K_-X${KUGpZlgoM66%gwl4I;8q6I|Ld zgK1d+pFwjbIEK@d?LjJ3RAnW^qO*9ex|s=?p65u3qQoK2KVQLa07y1t1x~;2T}*)4 zKct&s(Ej<{5BFICLs+thTJSd2gw(sqCT;))o)n=F@rY+{RaH65bzEA-81qbvwJ_IB z)icXL@w`q7;FgMzJ@nm1{1_E!{WoeFLnfRQue!sC-q%McEYXNKpz^fYnE#Al^NC61vZZ`+l%0sAo|H z2QwLuEAP4XQ0!m@rC!jM2iWx^Jp0=3L%Y6@x4-poWBcMaaBOcd`j(aGJfCD?7!NL& zkN3>ty_qZ?hh+T2H>r})Q=VIAVfF>X?v&!Nv$ zvL~N!r*%qRhfh#VveA_`!wv(a}sZG5XGkmtHy;rAOH1LKH+CY={H zrtVh2JceU4dn`nhM?Y|X^F%X~DNM15D_@en#;0<@n*`Xll_svOXUN zG?7(TCdyc0bYqe-4U~)vV%Jm#!`mDO5O2`a) zL{L31d<)Iw&#-7ph7~ktAVz~HN=2r1j(8w*ST^c`eBs!YP>&fwuc$Ey zs*eaV+w}$fxP{SBa82$^RGcggP~866$d}Jd=r)j_S$&c*qWJ&{gV<8CLAJRK33wboGA zhvbZ>V~t2R*G;sauwT?%XFwBoqtjur8mRL`N4mC7=z7lg$jgOCfL)4XLdbf(9`>xXM42f_1%c zj>4AvLi3Hw5&*_A2F5`4h_u>5;fYDCW<1@c>}Mr{Z2FI7SyG`)K;wO9pN4#T+NAJ( zhonz2&!hXS>>Ubb}QaAPnFC;&u88H&s^&Z87;+)-|S}ld`SEdpPCX z9SMLeoQMrRw|ftTv)-A+Apr)hg{ld)HA=mTY7{o1nBnjdQ@-idL~9SZ)p*uQzpOg~ zA1JkEl3y*ThB+riHcWP_HA<*cRga%>rWPIV&}@}7ID3o+%y*32XL$1)|5kuI{T9A^ z98C6+amRj)j8~;RraRYa*i6)gdnVvUKf6FG!AotpZ}+qp0#kG!@_sN;R&~`~UmpN* zcSqs-20cH-5|(8}KQ;_km7M!FMpnp+%_?Il1>R%-mRdQaFcO1kltuOZnRp z^U!Yhm_yUyurZU>N?!_B&vTsn!9T8TML%|~LBuZ2LPVi06^CEK()o7KxN{CS>%89tl$hT3}BnMU61y1e#+kW8tPxlBw-81kyUjG6QeIUH<;)WZp zmkS5!M0xNy4($7mwdm(!2A(CPgI&f13~RsZ5&3g(+ZL4+QkSY3j{8%7 z<`{#{+JRIV3+c7SvaC#kGqBRvef#C-_}0tMNll3f@S%x+i#@3emgUMII*dpv-@u{r zX`&hsqUgC-(d-w4?xfXTub;rmk_nlTMeuUMfEOQsnHP4V=NUZfYhgq_mQ+hKn@ye* z0Z@iM(PUBYTARqMm6F5PG-3hvaiCJ*f?o{RvN35Y$mozDUtkjPEN+` zc@Uj20wd}0$G++NwcnzrxI2mmItnLs$nlPYKlh8MQE)iGl?o+;q+!&~tmVR;;aZT75-XR@m|k zu!i+=%{dW+fYVgplj0ptJ!iU?6oTkC`2Gd+rlzwRdaedoj0k>wi4&6)CI#ku1iZB zpK+6gxJ+$J^inC_^{C7?(&TF8T>;4IplaOkh|AiVHVYPX*P}by<$1DX6fYDjxrURz z9Y0izpXkj5?Xv)sNK=k}VBc>T{VfJn&&=mrm0}!(4Ln~WcbL7I4*T*~e!vR{vmM)8 zV03)^^&iA{f8$qD@=@i|9CAj~D;i?YI?HqOU5<|%s1jHy4Rv`2!$3doh^A=I2Z{w_ z32(^d)u3#Q9sRh$oyZ7{rKm6Q5H-=BsH#vo3kB|zR&hQs$~FpSi8(UK=urW=bnuRG z%#vdSI)>q4NCKHu^&^ndxTiv&Jkey)twhC1>EGH?0?;tkc(g)rKhZ}bSc8vj2AyT( zK*1*sE*y+eQAy*3U^7x$sL{PIU1;)_?WPz|^)3oe%{-gLu$ ze?mVtyt&_lLoOqX(=b8?FeIm;MHwITdwHaTa3O# z6RtxNG%8-D)Y(`G^*tDNqQ~c7`$@6Id4D7`2t(?UUs}U^4_8=ecyj=yU2wm>!M5L1 z!A$)b4nPWk0WeGolb0!r4OI3~&_?-8bd><5Z~+t2tRMQcZy+GUTR>{rpq->Dni*-P zCGVCfl*V%q%6koHkMR9FMsQ3HC3hTSn$+p;+ovnm5C?*LA)pwb>k48W{)B8**B3GE*k zpn#uU7TzBwdm1?D(Z3kyv@o%Y{xKmqklTHF%|XHdXhRysCI*ACW8cE2mx5{Y)%0&@%YuF1U_e%u7W%tMofO4UXK&gN z%T5aea8ivG4Po*9sp4`@W_8w0Zd78Rx{>g&!!WZN;|B zUdG_Oq6FU$9DP4gz~mERHk*Kj%02OMu-SX|lNf-9%ZlSre83@?Hv8U=(b)nJl`=C2 z6rTZEH9+4nX9lT?!lAUilX?{ZNYJUH5gY`e)L5?m+Z5d)Sm&hA$7e*mvRs~FzrEoZ zuX!J%KO=1KrPdT@8jRmMDTeYF4htFN^$}x#g8Pwd2Iqa$JflGXN6)hNBhPonCyp4D zN#Iqnxs`%ty}}XT#&PQ4Y24%h)XVY6pSs6kBmzAu?0(w39xCFo#jvl#@&$v-sIHG3Q(^Y$6Gq&N#}BR zJT41voxw2T!nE2__P4tRU*R3JMM$EO5IN@=cq(wFsX8S(;T)0KuN<0m;DyA29CBTf zK<|Abh7fmM#!QJ$pj5R+#bdBQScBl$e%`LBDgvzj)JKW<$Q*b8T*-! zti%Bb)T-7(!!&un#(h&TrUV2Tq|G2*@2O^cC2{(@aB_h_dB1UmmCaMDo7PN$O>KeV zMsGy?gCQ!JutT4T2Xv>Vvx`2a3S$|x?|X^~Wd8!45{iapVV1;+azH&We4m0)OaV*r ztwwWQwcyQ{Qn1rl-tSN0bo$8Zn40hcnGNTjLzhm0Z6Z)zfsgL3=ergq(%d&#jpX~d zXN57Hc#WyLYR*%z*E6ZL4373bt9Pv}n9{reJk;v6j@MgK|x!o|vfr6+= z8W0@Y9P&8QIZ35Qh_ixWs|$6}+5D&qnUYQnDv+J@+dWMz2JM&C7_8=_qx!(cfT;om z5lRNuQYw-2l_E}|}jV(T7MZ!drT6uncNBLNYg;9H{tQu;hjyeHVz$H1kvP&kBuJ5|401l`55 z&q;R4c~13vJ#VcXrZ;FSMo^eV%d92np5Z41&Zw7V_S$8YBY`T=j|3teiJ>aNv47{8 z3yjRr2>@uYXizUnL-jZ#vhR|8UY3Ua=NP<6p{)lhC zdW!Y!wD}AU%lZM1`%4&l0?CbbPHVL;Aw~VwPd>({pS{HWe#5pO(`GWUlOR#m`7#5X zl??~3>xvx>+rGzBI|ny1;p-kF1O^ibAVd{Pi7KOG&iVmDu%*-%{l)|&H8K9D|K2}< zfA`<|b8vVB=RD)d%&_S3*^kaUTM`h^T`}5_;XMO;TGAQcy5j9J&s1Fqwy5XE$W$>7 zJS2T{ILrcc@v(<*!i=plu@>;;@ocrN7`?|pQ^p$tFM?hxyK=5!o?<)#OcA>k*e9QS z)n({aLKa-7!p4EoO7a!uy>o+sQ8T@eJR8{q~*gw8DW>aqL^5 zMk+r82&v|v8wjPBF$wML*|ej8x0QzVqo2jo%fEtt{}c`-DK*zrl?C9mPCST_T+9J7 zJr80@gCqPUXfQR=3ZW~mz^t}BT!7x$0G}#Xlkbf*{0fwX#AXu`54Cp%PeQwDe&#p@ z5jxxoPK(Wnn2}V(;RFM!v!1sC%SMz-37oMiRgBbye#F4MA?gld|1r6&GeDfkTsp&e z5)_-F1zIonLJ2>I`G$w<1+U+{4#Vpcy^b8@gj`!lAA@&F!^<+!{T;ue8JvhPoz(d( z963#PRzx#GAo*Mnfv4xrio4T{onMo5- zqPFDJXqSMc%_<)<0F;ccr*chHIh+B65>O_wKJSL}>vB@?X(8k8RW?J$c@%oB;7G|z zB;s*{0&QCma@HYfeTZ6gnjE{X3yyuufDSzK?lS?BLolm>%94&1mAZ}sD#{^?Y>nzXq zbnGQR&oHnqb)I1&wxVuI${YkttTZgkin?4-t}pQHOFxA5;W=J^`Wv`?@hPl*6StRt zH4?}Y17Q;-PtYv{C0YTVwmU+MCH!1r(3N+FZ!P6LiE4xmFalGoIOh4bOhg9vuZg&x zfQp(t3w#`GdLC?e1@F4{P@Xob;#!`)%kBLec-2rNL+=z}^FC#~kW7D0oZeP2dO-t$ksjyK=`P9VSx z_~$cmZQ2r_FoHU)-igyEPg{@K6JT1~5_<`w9DWW{R-&yW5huGb6YQd@MDHPPqDs7- z1rai>j~GDj$-5AY09N0*Z@mu!8U@$O1%p9)X$_jxtpTZYDz(_v@ zsDDM2uArDrDo;tU9JSPjJ88QT zLKt3P%QN)*tGOre+`DyY5a9msaK#s|!@rAN;xN>4+veykL z3w+$+U4tT-$Z!vm-NZXqYa|X%TXt605}-Zl&nHT)_6ZhXD48gwP!2AKhMYH)4NJ;i z(&;C1jLeQiX*wffm&f<=dljJn{y+G0{G5|sb)n=eFX+Kc%^4WQ0^Klthh|#@vY>=W z_eBVNNvA+XWSpro2H>;TPk|nlDcySv_CR=`z1tCk!W0C-NAlZ&2=0Xh%m7N<+%N{W z&;D)r@kRCp=umcM_=!)@;0AI%=UrseTPdf#IH{Lp@+M=6G#*f>smhauTGn!TNW^7X z7NT85SfITo?Zb$&Xded=vC+XA2ozjLCcMue)I;u4hY}_u96?F)Yx!fdDgM+%96^tQ z4N?GHj%xNtj6QJhoyt^PhZ&<~z&VFAd7=-NK~?;795d-V_aS({WAq!w=y9d zD2AX+0H;Jtsz?lUmSqKsre`BBs%)t{kry*u+JfE(wr$6;Z#i7b`G050()_o0n+&vC znQZL3Tw%tljQgxyDQK_=Cf{xwj()&Bq&mkKM4gb1^IR$y`ixV83k-S3RBiFoop&Y? zPNy$$pjuE?K)d7&%3EvUOlnMjtyKft#L-8d&+Uk^e$0Cy9m*q68gvvg8sg(?xMm*% zdzXqw+#i$`ZMkMJtV(UZsf%Lcc>m$TGae4t#}T~}eM8Pl!rY&5O!58E==2_ae)J8l>VYXpPY;`CAOmiTz+|Z$4(Fb;OeB%5^QI(W%EVgU`=0%1 z0(+uL@ulaF1R`ctLcJ1FKwFHxpYFIH2ku9Q*}3X5lczvkOUjcfSnd4?It&9YRabON zTjii$fe|#&=7W{enS3bel*qw2_WXBKS59698m)rwj* zpi+|LGa)jB?N}D|SRm!vKl#a@J@x&RJbN1jAqJ4L%t4(Mn1J*zvjR6kfazaA03?PC z7lf^^CQ4rj=F|*ZJSbE{H1+{A$I%bGef4?LP?cP{hpj!1m_&Xsu6SG*oO_%iR&B6$ zrG1Ej#u0}_b)X*yM&EJVKF5^zp1#1rp@5ub-+O1Ji2V{ENZ!}@N|%rPeYba!X+H!^9j zrQqRu#mfHfOKWJx;n3!+evC|v1#CV0;Nt{WWOF2`ojE@DOZ(YJPdaELL&8dpxg!K? zCS{CFB?bPvQ1#)%tYR1fX^{4~zQ#!dN=E}`pJnP;o5jRcZCr4acl`N3^Cxj^p~~cv zqb)I!dYZRzT;4Yv`xE+cL+>%{O(j(L0K^NXanum6!dV*cP@+yoLP&ha9Ht}m;20fd z(!=+e;f9xmB3q%lnw8+^i$nCZj|1M{0>04zP3My(*V@v^DdSvg!L2js8}Iftn2U^;;o{PZcBC9t&0X1H(w1w^|%6b!cBS)Rl^ zLwt8>$e**}2>2_XOOs2G!N+-!WhN`7Kzi5bz7Qp+X1s{x{RS#qPs6^HTuk&K)|xXB zhGlij;E`PWl$=#1Au3Fk22z2fk|td|?QzhB0-c;Cb|tK&^tn!DoZqNRaJoF#T%r!W zpB-Nu8KKRCR0GZPD5N7u{xaeTPf8_lM$QbP6O~%A!*IBvV@}Qn1MI^7i&7(qS=xf7 zHC)z(oL)AE7wOtpmiT{%EU(R;iBb%GyF-Jyq-Y`qG%jZLMUC@WO`GBQcY}939(!Od zz*@tqsCv}?89aRD51@}g41?+ze}72(zDJPt`CE!{ zs8WG=h63vAbJkEg^b9JeZF)yDT{buGku%{vBd{?nQuI3wJJnrOK$nu0qhlwPFXpU7 zBkHfee3ijSR;QWNO7$sp;0+H8mF-yl-H(oK+bESx0H{Qusrw*0VDAIl+c!bk>+BC< zn!-Ge&X)odHmP5O$%FccR1M9cmjr0crti{lE-QtIXgo`!0D*dhBrP7QGY6hN7cDo? z(NaQfNw%w$5{<`w1IC7mfzANSTog6S&jj~tqQ|=9O1|UIH}(jWQt!6|_J*}A*M47F8!_Ozib3)ag6mgT~kApuOOYP{A6ST*0^+$g-%ifS_mDy87E z1U2{40=aLdWHp2%d>p7Lrb57{lnhp4meEdhmOg_CwW>EWA&KO2sk7fmy&RRvq=2SY z3f6HTCAJaXIV*(%ff%F}Qj$CYim)rHD1g34)l;}wNr5J(e3L8NK z2G6r#78B^t_@>nuP}mxiG|0d1cvxfnz_K)ewo0*J_|K6x^o!5F2UrZdF1#P-JD@nH z7El~%Ca3_CiDEu*Y)>hxuWFw7Y$2e zCmm-tX1N(Omqh`dF`$Q>KAVX^!;#2_>*~fi${%dAza<6>0_q#!i^u0$a7mTe$}xfO zA|;4(MI)YDm9)sfcb*~1?!_~ZPF7-_;=!XC_}qU{_}&q4*Wlgq{VuJtH^65CLTXXc z)k@HL3?5E)41Dn4=FjIl8`a#5hPhSae2kufwkT=}kP}bwaO{#B zjyXLA*Xsj5|Kbatw}Q4_0TlSyNYC(m=SF3$jtD@ueaGl~CI$t7w>3E8M8zuukl~>C z!U$Cxo)6JYz)IMZp0?8&^h_+9O5HwZu;`a)b0(fQ@1c^`m9(`qY^4I@z@-&zLbb-^ z(h6NVf4&@t&S0pK7}gJdH-;OYzW0~#SUO(ao**w-FoR3S>@P6HnmTQvs(nfbvzm-a zIV)zkT-LBzytjp@rYIkxR0?d+^(?g_1Tmnu2*kA&+;++WXL8Q~ycAfw0N!DwGl!msh@UFnS=5uOB zh-e)S$4c<$7!%w;X{0Sp1bkf`tTm! ze(yK&^zu9K{Vn!wOPj#u3|m|%axfyGQ?T4DDpI8s3?|pTQ*xd*F8iT?dCwhu#j1ndR47v|< z-LgK8tf~_THJhN289)}xJ2Y|m82|=} zKl}<_efq8RX`*t<=PV`Ye*=hja8U9aNuQ1c(=t$wpco-XPa2gT{>4XM!WZBFZSI35 zICQ-}!?xWqD2-oh^at*ee;+66ke<(DoWRaGU7~8|eUhMu0csK#|3^PJdUmGJh*}Z$ z{a_^k31Z4$sp6{mqo@I#N#MSR46c=e3mk_96@6W=aBD#q9b1TK3{gFiES>Gx^?GsE z+coZ~S^z-bV+e{=4H$Bbt$MV=mJ@ufi7`u zsQM>5t0qGkKLg1Egaf2ggH!~@M1R%fq~HudPKC^pEr43ryos+~zrnum^Sx{i=bFFh zkjn{uFUHdRY%qkYXvP#rf0i+lg62sNQLjNvVV!+4TI53h?mzmE{0AXx>Q2Lu>j_-H+pZ>K!73+W#;!Xoy%rTAwtro1;XL$C(mr$1r*E$@GyOeE4UM@fG0B3V7t)aG9 zLsbAWP_%jY^7qd5x9E?E120aur7_L~s2Cd-Bcwe>%-@KED+H9y#ayvTZJNo4<5UHy zlJnEgibU?Rp3xB+9{RFs)GBAf1vw?3RRkH(!ODoiU>y7vGNC1%=eeA&XYRc-h$P}h z>B5(9?{GMN{g?k{u9I1V8ZhC}FuQHr-!=>{5b;6UjE24OQ|NR)`shP^^zn!2ec*Px z%>d0TQBFCgnV1H=aJa#0L#OJ9oWDEptVM5BA3c$|%CGghpstrtNV4-k5yXcgRAO-% zxDA35P71`70(7W%?2N`q!7c2A=bd$T>};q^Syoc0?#HB7#r|hRf+>o?u;gT8%tK<_ zyyNN9zl__be-Yj{yrEhVpfo_sYq^$&NKF2*uYG_OA@Fb<2a$LMNu)(UAVts%Z}&US zXt8tej7hxHF346Ci@npJt5T5k%sxiGKb)L)_q3f3z{mAFxSB&>C_5 z;{Nfjly>D)Ex_FP7 zM#mQY1pNJx_nk=x&##h80Ho01oo8BDZO~nVA!$K6VX`%vw(b10RtwhF*mt9|H_x#q zRUbzu!XK2*eLr%JjO?A6pyIdpT(DU;7kE0`}jKj0)qW+Mjv>Z-q8Q(*Rb|>XY!<_H? zj#sbW;Q#wezZ&3Xk-YaDs28jg#v5d>Tq$FZQo*r>q2Sa4F&9l8+^4+{+>VZGjYQ+k zXTKfaRVXgfC8RPkNsh23&ep~|t{^;fXtO-u>*XQWUiQ{K(EQIo{dNStIy(%m_WM&P zi+Mo8y?4fSf!>eNnS@VkY&O-%6yHjnt-vV=a-vwpgVNT@f5c}UgA}XYvF$sqm#Ffq z(+sqe&NI>o3Z#fot@Qo#4v#VLG-9H>Dn$4DTa5iFlR{PJI*fC207Ka3by;HV`W?kP zim8V)VvqM9E@)&Zw7O)qc#aqd>SAHF^B~A6TLmjvuFqfw{Md_!j6aqo=DLWMcJ!W! zpw*cA?zGL7Ghjfrz-3uyC6}tZ9R&q681BcZ`3op@#nWfMfrl~!NSI|qk&{NCK;dL7 zGwAF@%eo6C?x|NiK^{6s?-18N1JS(DVSxHc6~pT#je!tyj42`3d0~=!%0xy+NSR%t zPp*T{?D1q&v@t@I1{eRV(9zVy>VT)Qnithiht^6cF^w_sfBaK_8aYHphrnhN=@=a- zOE{73DH^dxOkEb|Q))a8&*s~Q_ddq5)XauqHs>(N005yh)c3fQ9?eTC!#Sn{Fa-ga zfvl<$(NJE{uFo?e5*-dqEM%kyLVho}yTPLlJ~gWrEFC7|^iGmF%i~G6$+CH2P!`F; z+8Sl@qMxu7!{f6T{N5RSI*pN>WCrCt(;+#y2Ni>jVn8_J+3$e-eg#j(d?GWLcgT58 zGkA`tNQ#q7QAAvSCPqQGnn?DUl4A@kNAGy~@+Dq=_8E?Siv(nx&NwDTO~Hr^ubMEu z@96st`+leTTF^1x^fMaIz2rF!zVDGs5wVjv);t@_%0L>4{>ureF9iH7$Z;EuXj>9Z z%H)RjBl*xLgeGUzl$vzJQf9+$_DgXMa}I}1T8o_SzTe^Fj?wQh?`W05vnUjP1~;Zk z5e)b@zxxvR)9e5R3Z)dZW#Q9Xo*OwQRfSfBOZF`x}4*@usAw0&A+xFd;F1SR#J}Ml#qsRS{|p#VuzQR#K}l0S*z+Tw2AY zML<*Sq?A>d$h*PYC4zxHD9WwOz6pi63Jya)lryUM^J*sM4maSZ|DL}eYYQP6fgp|d z$Cw;xe$;Rd4}@mPhymewJEyM~syY)IXJ(*E4m);I?z6SZvk{;3TW{}hM>gjV7v(5E zuaAyz{l;%#X-n842IXt3SWmLd(@oaaqI zovhNfAEZNT7E{RzP)up}xgN5ky#vGXl}|oG&gD3*X9m?uv{`u{9 zUs0vwuKb>Fei^kCJSlOaAC;2QyrWV*iyqlSEEVqQfaO`1v@*zgd3t&qNrZb=Ig1$< zQ*s#ALG(T|$-|z2XcI^~fjP}wQ%{J%ils%|6G<`wMFbc=+;QKw0LG0i2iYv%2_`7f zE}5A0@(?zkcJ}i49PRp;$%T@#>~FTp2Kaf_`nFb8^ZHFU7Q2@_1f&uIG0dKcp2rbo7d_(t<_F*Sr9AX3NUo7l%o{9Q#e~Y7c z{Eg3FGludQ?1_-l6-9Vm7HFo;OyVLwSeOKtwnQhk1@LAlL7}TF?4yr9@!+*h11L$s zejLG#?4cWcX${*5dQGV+2gk}b4yHT$tYFBw>r%NRC!H(cpQvdePg1mvF!~4SQ!)MX zh*ISkNZDUDg&8Evid;kmDCOi&!hJ!2B55{I8gbHM?VFKh$-H+ zHWUlV+~YV-3Kv)wgwmHZT;~$F%6_G`EW-v0Qb{+6 zXFg@4Vem(v@0 zqrw3nOnBye7c+ZjimTpXQqO`iCW%L#p8*3(NR9dkYL$-;(QW1c#7aGN2KdFYscupn zWZa|t+`DEY0FJ_$DXF3w8=XnAsLhj(Cl4@X8iOT1G1gfSA<*bY` z_^qstGFg#bnsbF5=4zgv4i*rC6;zSOhb!*)jiv30ILdLyX`o~2p%Qqm7$y75PAWIF z$C-HNsfwU9s!t;+nW|Kn6G_qg7Ml0$!RQ_Df9*%{`L}*KgO^r932LdN2PdLBD-l{Z zRk}i&xTJ(JOjGO`4D+1IRyv5*v*Hnhyh5R-6~n%7N$0H$n9OFsN~)Q`Gk|Ig+B+s~ zV$PMSOr$-5stl+NsI{RzJjeajXEB$-Na1J|57#TU`whn~AqPO0WFXMxR!UUcj586I zHU(RFg$M7le3SZa)T1CTQrm)3T2!FM0q>o(;|adNBX}&B$~?ioC~3Fd8I+S!7lZbu z507=@-7ghg&q|ssm{cbaC1cL#Pc=WDRZDx|xi_K%1G8eAca8G(f7ds^jKBJ8zm=6i z^;L##ns;7vu4D%!I#H|6g236C^i&J}eFl?o#!Qm}YsJWb5VhQJtrho!RZ!ZK!JZH$ z(`&2P_T%04!R&*PpOjm21how9f^FN7uoLN?O4&s3bAek(^PKy$w3guN=o<=w6s_$9 zh{bd;cnMl6`o=XlgMz#Vg(4-S@kuRCVnw<$;EBzagXJOmCkS|8P#MwUE=K=n4a%W| ze*b`mFow-Em>)3}eazWGOKU(~@k4*`kKnt1{r`;nt55O##RvH8d*8ws`z#c|Pv>LC znE{Bmq|8QO5x)26Bj)4KcPdUfIIBql6as;?!;R`!Qg-}1fA>%07ysH{p6wK;L60%~ za>iCp5VD$KSVe6s1w0m%M1f?Scr5yH#PKk05Q2lL><%asH3gUj4WL|rvSwC1cWl$Yuvx!^^ zUDfF~8ZL}IN^wL4@{&NwiUZ!E;Wg2tV2SK`Zl;vIrh1l)$JA+$&x)TP=$=pvF~FB+ zAI48VA+bUMybo-*C-#;csbnU9S0=8C^^mGWCGj$ z4nKC%9A|>0UxssFOZ{_9gCahAQj4H~ia`qh)DlHvP{1Td$a+fdvf3k?s%KH-adJRU zbQU?AhwGK9UM+((cbo0?DZ>VUhE92L35#z_st^hGMQq%xxHW|M8mD$KFRO zbsgMi?X?1I=Q)j7M>5MwFxc?@4*zy0BgND-3*7GaaCF=+^&Opo4mlzrpN-S%OF5xb z+>a^MfwXNIZd&k^uMdy2>gW;lsM^r`9*7|_BHT=BJ^_!(SUNy~z9WE}Vun}W`jr^O2CB#Oc&ClL^k)y( z={v;#cU5HMPrxM4@@Yj~lf4udUx`j3on<2(PfCFY11VKJ&NRka8y>IM4D#0X!UWl{ zkErrj18Q5@49qo>2!Ri@YH7dLV)*KN&*0v%K0HfXBekky-|_nOD{TA0Bx%M6O5%*5 zkNP-ZLGAG|C|)-uaEHhFQDeBySnjXI{D*#QkPN>nZjvt5Jr}jm_YhgfEgC<=|L17?ihM;{r=K}F8icERF4Vp5p%4+53NtlD7IFdObL|u1=_vWwt z#}MhP0853+PMZP?`f?w@Pk;Cv?_E}`oc>-&Yf&|%W(64e zE~OM0lUL_E84SC7#(!x8bhj*IX$96glw2#t(VFGq$nWcjcqFA*T{|xp0}?D;FIPM~ zTr){lz;DJR)EYDfVOsyN%kGGtSNdNo1xo>(v)>$!m9)aJ1qB+|nKWu99btnkV?+T! zaYriwq@dHz2K3pZr8DTGr}|buj*Ka?`Zq!ma4LqXcP8x3Z~fcqxlr{`;@J_~*L3tkZU8zy%s>n__51VhElhqfHb9ODaK_lv zF=Q$XmMzPY7dkEgf9DSOj#kvLyJrxtp=S!hWE=*fGNMZl0GG94Q6#)n% zOipdA=d%xgACSgJ$t5XOCD4mTnTRO)QcAYJ!5H^MkrdoG*HF!Wk@(GE8djmfhUdUm zDD|7$1~~9)pW*`%C(Pg1TJVkM5B#%nLKSdI1eFAk(>;`^04jCIG-nI>K0b$f7P=7O z5q$j4i|%y#@IkbNQmtd;-)W6!zrjR|G;d}G;<~r-?~8_O$9461?u3?fMLzQIQ+p;oH7?1&RTTrjx#N$VQ0PXrUSiRDCpAux(Q7M`9&qV3n zq-hLie+WSwk9X-uKN<7*yje|&K)x0Wj_IjV$3bm&G)QY@TZcWk2_o_wrnfMCFuDBCC2zLL|8xA6w>n0!e{VU}Z+< zyELq|ChD$9$bn{&(nCr~-X8!5=LNtFzwg864B&vV-Q(%Z@Nij!a#|YgI-SkvcwQ}} zlp$Hmf$Ors_8Wftum3-z(Gi}{q+Xzyu6u!-WyMK>Q_K*N#AVO3-G_4kUCG4K3ad6A z=eaa0vmpWlq>v_RsBe#(Y?@#+@8_u~i}lp(22VPfi}=g=43?i+xhl}O=fKO%q!qVfMyWD6;z(h0O6WS3vQc>_N- zeDUpHrO%DsuZHLa+Bq*^NDYn^J)UEwdk(y@X1*eP(nZVE9}T*EQDQGf8nH)?8nG6XXnpWLcyn; zcV?;r$_-^Ou0;BDuiYJf?6F^5&o}oSm5JtWe)?H{zwCMUN$oKv&C3$8z+ZX!7H@md zt_KWW7J@kyOIuJ|1olf?lJZ!rou28Som$`jM%Zmg1f!x@4HgvRG6D6~q`cPO8L!bA8AB==N0vPp=z&hf zgKkyWG-=XYR>Ry30P=$QkrzFy4i+3bD@$JHIkQ9Duvq_4@i?peMBuR>BM-dpXF6(T z`;{sM((yNoEVJ@ilmv`1I2(hA3d>4%G9Wrwk!YwV0;iyOK(h*9m1{OB5K@^cH9G&> z$=S!DpAB1)Rf}KRpk~Z?NA_;`$V{`)F|EtWDnkSVZCUZJ{hNO&`zeyj#H6NOp5^ns z^NS8a(zRheNL^wj>Rb&F=iOMoefs?0O~Y@w=W(g_KCth1%BT`q_W_`zZ{LIOF9F|~ zl*mA_Avw9dfe>%oMwFN(O9jt-CUKMv1JdP=fRz%3nEevh*6{78jR_qx3BphqF#`y9 z$FXYwAYG*`DD}bsN3<8NJCpaBJVZ6lGvJ(S*}GK7JQD>uE&Xh%orI$lb?ijVFvR_& zoZ_4NmUpw3hN}YObi(mV==|?=*m=g)8$(0e8ORt0zWQT7l?IHf2cTlPejOkF@E^tU z?1$0T7XdivzL#=JC9mH^ej<5 zi#1skF!Otbs<#%+#{w}7YGe%uf8TbzrP>ML`5DFlA9*$z>F)H*MGcIsR9G0sEF-x=1q4d-gAtB zcDc-8g-NY?gdCAP%O0I&$#t$(jUFB%3YC(@tH3rC9^hEU0!%F784Vw`Xynv5r zl)ZQ7bIJ}m6%IQvj-WHTdH}*-7C3>2GfM%PTo(qfNd!0non8nt!+-pr{}%|hfc+eH@~`hE zXZj_Ui-OhsK7`7h)ny7TV1fklZ^w3@6j9o#(RVasPi~1ANQcMAS!Q>FZ)RZPRCsQc zz&R=MmQ_-rR#j{O0RR9=L_t*9zgdql zIdkW{wCLq1b%OF@A8c>FkKT`{Mi3kX@9Qx-)xZdNAS$c9_e{RzQ)3V%0Tjme-H{4{ zvU#d!qaxj3`;W5*Z2{+B`_69!PltJIe7E!y9U$4639H7uI_k%;FQ!(bnN z_+@t&4^aqCIHMWxgp658zXgvi^sZ+Us{vSRXf#U>QN129_-RF3-iu6bJn`bs09+ZU zIERB-z10}`-uHXT#KMASHhFAO`e|*!pn*-54o)_e!a`^{Gsxx+4jQo`BvL6i6D}P; z1#h}|t*TrqN=z*B<63b$I85$xfz=gHw~%rCoijtCBGagtG?AjoWfby(cbr`WT8QMW zgepR*3t$UGF9ICigVGkUl5rqeynr~oV?;SRc)T+ZwR+(^hPXt7NnKoIt!c^26=y-_ zC4%GZU4S7cU|Njj zKaTdHq`^yCBaOK}C(afV1)gPRLYj%Q>%7pEn$+9^i9AkD@7&LzKuiG=%uYRF6Y;lF z&`L-z)*AfM;RB;b5{~kSb3>(?=km+<2yj^hd5Z-8&8#R2cww_B{Ik9=Nf%O{;m z6-p*tqaT?Nr4uX@ZPhswpEx$ZQt>H_Y=x)1rz&bXBSr&xQ1C!O#ux+5)bu(}kaXP@ zRM!@0ox}-b$D{;h2*>!|^XEAFct_qegHrcN8Ktp~sqPkYjS$o$ImLO;8cMxj98Xbo z={?s{!XCBN_;X3ca^aU6m7G*IHodJ@|)pK`9HBqecda1IT?;;-~;P4E4yK!wG^|0u5j#XzvF;{q`^8OJDmj zs4qs%=YT5IVE|LWW5keI__VV_&RJjMLNR^zl#sU3U4hvIM0CC@_dw0LN=#MpR?ygL z3mt#1ZBSt&5Fez|YHgvr>q4A)#|Q5$&uOa#`yr`#B_+t-F4-A@9r8_@iy|s#DUtAQ zM~`#CdmEJ)q>@f8(R_O2pw^zBDStNQ*LAm}m*&WXNtM-<^yIom^)l8kWQQfOKIm&K z&%7(pH1IB}5`zi;j42ta8&ygK+B-S%qEfZCuu=CUK2ta(t~QK=6l4$kQOvT}1w0Fe z>4Qq1Yx#2d3;7<)=H+U0wujCpPQRbB_H!0a2(T0;1|i@h!If%&mFS76T>C=0YF6_E zR=KB`i`hi}oz$a#vWq%%L92>p`yz@w03SOkM{^d7sLF*Ey*TgjdB?HCVJ91-;eyCB z1q(Uae%$g*>RFtZO%YWjLMt8~9`N~RpXNTyHDxTycx8~DT~E|1eeLv9l~g2Eb(L%k zXuN>#R-QE35-V`4Sg(&b?r&*B$H{IR)>hx?6#_rYN(q`FK}*>~2P;!9dN1(r|EK;C z{>=aOe>Lrn>=}C)trUzN)V5GV+)cnqdpUk8IbdM}w35K|G8SeWzFwN*5rA&ipryv=p zuQLRSdJc?KhBk}9CLV3z*0oxWecy4XvcXI{m(-h(NyI0Y4IDibw7wMIZ7SuK;b|>E@y{~K)hdE7MQ9%j&z1O*MSm)qGT0j zxkCL6z-0-|Xk)N~vB$yYsu&$bt@N&9f z`Hzgz$n7~*k}&zUN1tV18N)<@V7Z2t6-7r(qK=_MkfVcVP#i~X>hPZ{5uOiA#D_F8 z_=s}wvCl+m3>4q7-@b$U>t8@WUc%P*vAp6axHqfuV+!a-kQ8&0!k5R!&Z`;mh;CuM_of&VpCfg*@Lc&1`#Uj!u|fB0br zRPG_!yOTa3RNI*)sv6q+z?-*E!TCR(hiFQSD&`z46?>s73rfqRNy(C8biG~8p`@pI z4XperF&kWKxx+pjjX=xga!FhM!DYc(E37uWe)E*QX4;!jvT5*gL6ll4SvkV)e)8h^ zsShd6qJWd_=+VDG8+YW_An%;q&?dBf2S?lAfy;aN={Ug?c3oAWg|wVKrQqI1e7#W!RirQ7A3miC{;? z(b7&$zP%+9EgrnVl47Vt4gmIjBlT(~V{~0M^E{DU z|5i10oSyC-Q4lBvl>JfS(`dg04(J&yZ4*e#Dx_P+9xXU*0a{3-3)}?X8CFT3Sum`j zE)j=Sszzy}1i~mbQty@$9#vAO@}EFG#wZ^Cr`f<#{6sNkza)K1nlT0IBYGo@KWm64 z#j0`ktI`$pC$k#cv+!rrLL1yusM(>3%239)EW9gNjxgm|y2F*?0~8lkNSp zfAW8gMp24jjx@CEvwV+3{oXL1+1OVJz(}QZ+m8f#bfJS0A)6GE^OAaxaR-yB)LCi8 zIB*`uJ-ugd+})jzMIl=7EOnSvF+QNi^1&K~*}XK;)L>)dkv8;bfSKA1Vl#=Jl@B>WHAI$LNc8}| z_w8TCCtv-MJYVs1Nh7Qa>hcT^U-~IL|Hj{k@BQ_E4f~t#!26a>!qEgCNoA;vX%&(8 zDx7d|L?=Qxt6o_p%Rtrh&qWb1<5^|}Q3@%c(8SpbGzSesm~qe(tu^QT|1n1IjfeJqt&e%K>|JN?wdVT& zG5V!lzV@}R;q7ny-+=uy+@8O{=sN>aq@ys|8;fbvN0+3koe-s2X^VAKQ=8=)sJkwz%AYl@R@LaEym&ELa)0`3AjYQRS7x>gKngz#9_5g#`7O2z- zl~y99L@~Zol&aakS1Gk^pxV`mZYM-ICSLbhZL|~3kBQRS8up~t6}0=;sCTb1-hPJO z_p}XdSuwo8J*Ju8Efr6s{sBxPO94P`G2%Ox)}X}k(n57g6&dwM=y{B1uM>s*=}Fac zwj+}Oo#oiGTBb5usTHMN;C&+v$Z%;hNFHF1ky+Y^!HQ1|c%{s*&7fM>tGjGJ^3H0i zIk+Dw;VL`Ae{zNZ!cn(Y@!osyVY^=ObiKxaHzqW#7*)?^btd<&6s*fiHi2lJ@pm;R zV=#%ifA1r_{qiR;Hvjh9GvI^lfVkJXd%EXQ*;wFiiQb9==FQSu-)8 z;E&s+NDo%(6%4~Oga|*Ff)w)NDIl5_ZEI~f_AONfwfD!#20JYM zc07AYq}`arZv=yslq)#_fU>^AaeE8RH?z_*hA2!nB~BAn9?td=uw`V8w1)M)Z)1D> z41Qc=y-yY3eyV0iVqY_UEH>$FwZ_nBRZpCNNXht=41_ot=%b^Rf{k%vR6GdI(bh@f zs+OqW9s5R~rf6jP!kJVNSZCtF!xz?ZoO)iEs6^Ahw&*}s>LHQ7+cI#=sVH& z8@uq;s5B3X$@WKN3pirV4rB913+C7vY(y@2Uf7M z0ns7H9F7uP10@{`s|ya6EtSFAEagE4PYWB_0RWp$f2bKrP0q;m3#tn;NS-A{a^y*e zlk&*Rz;-4Ej;S02C%4fjjjreOM-K~54ncH^^Z7_c6}>B0B7@^N9%u~uGz}R%6GF>0 z?b+b?_#5A4+1^h9B(wZ+U>r|)`t(1*r+@7)V1N2ioC6Cbn4o&hfL>@sa`=mAeFpV9 z#cRezpZFF7e;G+c!oRd7GwG0`F03?>hdV~fc`DfuN*I+o-GWnclFQ69@>2!`09sqv z{6@NCc*~q}t!h~$LgA#>4}Jyz;y(vGeDbH@Rx?nF@ARZx#JZ@OI0Mb&IG}-<1v9Qt z3gqr{NR<*^<9R79t6k@2J0THD{MFPwfK>I7p%%3$ZIKyJLdC;vL1`B(OAKOF)l>axWVV4>MpO<&o zc#n=DI~kQFKX&ZTZ{j}kv#XHT!bGoB+zyA81+`wnu`G9}ONbD_~0O^$)FAw^t7S0TF!OlebGM6O2AnFrNf4h)4||ADj#Z6<~GDxrwvtKlmj{JQI#0o zGp%P?F8SS}j4PP0rKYl;d}&r>=~E0-h4TgdhBiUK7!XwRs6aH*=L(g-;!M5!aF52J zd`g=1-4aMpk7m#@!ymL#n4re`o*5E+;RvD$e)!n2?;CuOV&jvxy6224`dwKPtI@<> zg1S{p#ofA~lsU&ups_%W1Y#^^Q!GVP;zFu4`v$fRKl<_wY22PH2{k&GGKWMTdk7do zfC7_YgW-N%^KL5OK0!|q8)iRtzzmprGh=Q-}0OqY>rSBimD>6ITeD_GhT zhEZRhv)4<^0>y($j6H3G=X1%LYL-yvpxSI7@M#y!l=u+5C=HE@c+Se7ECir&5OMc0 zI+}ZwX~U=>GCzoysti$?%*a183>*$vF)Zr^$99b@+a24!;p1Qa=kepe^Di=I(_sM? z1FoG7Ig-5(eNdIwk6!)O;^qv>Ij}cBK>7PWG8N9&cq0$_ z?m6ty*c56Od}TFHuFHaL+a?dzr0>pmY8A(EBx;kr0sIc}@?|H8A#(kN$Rh)5?WqEw zb7CSctqi%r;O6)<-}(^$-f#anexG+-K|%lw9Ar3RCQy8)LgJ3&xS`++{P=JEZ}4CF zbAJWTKmKd*?F;nd3N@&%TH? zMx4=U>Os^wIQzpuH5ueuRVDydLPj&dijIDxjc|u%#!Q*vl074}&LMWCHXLJPbvq{aqXvLyIdXs_n6*3Ku{_gsUu=?CJ!AIu_j{-;MjrMufBaL|>k9|*o#0WeT0yw*1qbAxw5j<}S9H=q9~ z?zv5B@Sr`2fFS6^3NTbH)=VtZJ%{=d0=SdyYR1FKI~eN}KIz9_y^S-SAP8-o1wwtM zOcevn9cwE%9N7;tFcEhv1#2VTTZ7)HKPv!-;CeQ6tNGasT+gTQku=|t)Dsj;)f1xX(3Z*xFqeXZ$=HGF5999_D1|mce5u zAQ0c@+=z2UH7XM`8E(a96@!Vk&i({ym{3Y)8)5&9_5)*JAN?d&G3}-Nk(pLAo@*yP z*sP=ymOJ29ab>T9^PGfKw~^&QisIlsVFE9zZwU5)NhTPRf^j8qe}9k1rzf7R=wFb$ z_m7@8n6cNTE=Nh_7K*zgl!<*aAc(!D+rX(cn^h~ny_lqijA?K|mG$g-O%@4`)u>BPUz@Zc4(bEa{ z2|U(dKx`YqKl)4ornR+5-dpqx`v?jbM)<@*TS5~9u!qNGZ<(|V4A}c0eAC@#W|Rmr zjbv?0e0ASp4D@0UjvTWF^c*>uE9g9$&q3Ko(mM8I4pMQ#8@iC^hToHG&Hx|+)J`K1 zFOhoOB?h2na^ZnkH&Vq!83Qx(P!H0qi?~L$E-1?bme(I+V8! zg1X#s?+%O+1AM&ikeFT=Sy8$~b)oSml5;RybtYg+GQ<<-Lj9-p3LZA~D>Y@knCiB2p3PxLkg61wj< zEUjgAvJ@kIkq@aPofS3przIF7w2FaK22{L15~Ij>DA7=9MRb4x*d({T)?(Ter`=lD726R3 z(`vxhXV7fCd(qTgv5%+}S+1x0IS8bgG2lH1i`-AzLiJ^+A5&7Yl7v-{WoJb_rnJ+Z zArc-{sGqhi>=qpR(bLuit#eiygHl%$y$C=|w8&UvH49Hb%<}ntgwA1WYcLl}sl-7b)M2U07ELr&VZ2vX;=aiCM_)snp9}k3FAuNr^j4x5Qz!sdQ13rPzr6uQ$WhBS$PZ<*$#?;@ z2c=f{5dumv_>VxIu{oXh2{x4)gRMcDeGxNYCN8~qEUjf<-LZ>;sw|uD%s4pKXaGa8 z>l^~B;jBuKl!zPY?P8u(Gu-w)ft1Vr18&zC4qa=ok>)v`_B4b@T1cQ}v{UKrku>3YtnyX9RNYcf&hh`#spN zZ(|?|57Ov#Sf%ljQty~fI1XQ2(pqA8jFp1AUXo8Y)P(5ToA{UD?;Qw4bvk_MHtRI% zqRWZCTbSTj@`5!ZuC}_(Cn^}T7xhCBB_{v$KnREsRzy#|Q zU?!@;&j5p{EhQ@}4}!ihzOvE(H_p)mT~|M0hdhZ2jP=P2YnxgH3fcn6y_Plj^_IF zp`c=JX@;vc~t0v1KolSHzuN1Et;GgweNrMxBn8p z{RjRu21>{vdY?T+Lw9-4dsK5zR7tc9D72@gM6Zb>9O3mAC3yt|ql*G}S{s`*s={kh zSZO*A-q~$OD|7!u#d7}L;AU$;V|@P5y^*b|4LA;R=ffUN8B?1{hC50jx(}j$r@pb# zCp9hJf8g^^e=rk;?r7}}_3mdd#t!UHtRlvF=?<8amSG0(0}bra;C)pBt@AIWw-iGe z5tuE@f_>j0BwMo}7Bg(U(}vFD5>-7U-KCF^IPUS17{`GRU7XD^Vj!?tRM|wcFaxPzNPlXqYAC_>J{8^+gjq2hhi3&);Gcqb1+5Xlf#BmQ zDNJyNNv}_ELA?I+&xx6nc8pQ~;BzV&kR^M2+rxj9P`0{I=@CFmfI~vQrLuZ56VYOZ z?RG`6K*l%M(Afz*sd^SvQJxvZS&ym`6D5nDALDyDZD(r>=ePjioztK|y{lw-<7~_x z+PGgm&#VGnJ%dAifrc@dz__EJ#Jrvojq~0+e)lJz(^m$Ns$PVH`%%nO;`r!PKs?3T zf$Yaa57&7Le^*Jf8V-2xnGj{-g%|`1GaPTe%vh;0E^xC~FW1cn9*QDwx z;S8xd4Rj2jNj9vYwH52qa=5Ev)AVUsK_jI#-&Z}e(Q~p>>|s)Y<+o0>E$40*!LT@>oee#Zp#c+PAdL)XF#rm!1eR|b9cvD z$&=)QX{e9XWA4`_zh6oSZ*F@8Q+MyC5!dAnDhWII`;J;2+vAT=`jx@3Ce$J`#>kv4 zGsSQS#?3n*+F2YlyjT}G^@8B;8a$@(0=Sg;Z|C1PGpuWrycbbKm>`TX@c;fh{}58y zfGhc`xPRw8{Nf+^)A+;x+5aMb{tx{zyngo`EK4KmSmtG=!Pxsm5}n2~rhd<+)uw<} z)F%g04Q#}^?2rGD2QZAj_7AxB}zB=u;Q` z7$bum<1HJ>b)UTxW6WSwpDD+m=N^V%{@VKh=)gSAcdFB{mm{f7HWABy8pOjVo(2ox zuE#pwcA`+H;N-&6=R%YuxV)kh&VHB#oj?4afz<_f53gY@`0SyI75l8o!^#c@gO?m+dg_glijL4}k2im@ z&zDv+a4%-KAcUz!0Z*jgPRjH%A{yGwMV{BbZ^uo&hbOATZ8ltqr9u z`Dc-chZ>xyBxb5g#Jy=hg|w@ww(rmBu+NF8D2}EzfC2);WnHnf=xe!KmOS&?Pf65{ z;c>=@Fh}rLQ83Z;pOzjSBY-d|iD?Pq-kenjk z&ni6n4b?yo#6Urefl}A(RZ-<~jDc;tvEN3OgE-wYNp=+W!$ogiJa-?e>Pd;Mp!hiX z#tiZ<07~xB-MYr! zjZOiBK=`$1trpzh-JwxxQIw1Q=xBHE$1uqp1ZDFgDnQFKKzmjin@D;8hn%`a4-sZ! z4LEN5k@T8A99JS;*L_mYYTn!F9MyOk$!SPtDv;Iss`4S4htR~N?{I(itQ?D0mKP(= zFwgbqJ%fZm*d!tnGb*(6y>}`^&4z9tJI3(_`}6lOZr=y|N+fyq4moKdai%->;fP7S z3>-AgU5(cTmeiom#7F99Rw`;;;AS~kOU?Hsdy`aQZ7O{stRdcqyhsfkD zgSav9xa}0xU=@|~AZEXo;f{0{`JtfVVV-{`CY6CXJQ~&EbD*L)`jdhKlx2;(fdF=J zem>cJGu(gpjR@2S6P%y6x3#71lFE_hAQdw#r6j^{Ol(maP(xN$s!|H7YAJa4;SR;3 zsru=*;~3LcJI}gkb-`E=%{cs7clk*s6gbK8`}C_*gR09+3`0$bULpE1ax5KW5_=c*;P?ag;DfYSXJSb6H z2@)&$C}tc47&JuJJdNBB%X2Tm7(xq7OkzY#TUN)*JgO`{;PJu9>{a2g&bCpeT~dg5`3DWxb%55aZ}0s!N?zK>-R%u>a;? z{!6$Wd-`@CF~14)J?E+{@Up4_fO5W$Z+`q?zK?r|;}pfNHbIA{=jXIry1Nl;MZdOv z^!5LQ*V=&6axqK5Q!r?Tz`z)Ll!Q5ZVJY#jGLm~gGIY^u!)=dAmoZpNiu>H? z63P%7CZ_ryzT(e zTwUmDLfIozju`<3cOQHc4PmAn#Kb=Yz|FA3xvn&`O3+3LpbB zsW1TI;f18+ft(NuPsG}2INtmC8~Elg{C@oSd;b7WKluUn+cmVD5$ZuNWI#Q7dk$u% z4rH%QURMj_YTN@kvl(Q@JJi;wKTj=nLnJZI*#Sht9bs(5w=AO3SfOlSytUPZ0q3lq zs8Q>z%RhrIA{m8}yCz<(5+r-!w86E7#YJnBu}f||-%G)vdY#}NL+V^|lVMm(IzJ7* z)c0wr6-4R|5P72UkBW&!X`b^>^nkTQLz-t|)6Y8uhX(?fiCWDwq+}0#4;g=}3)Xjk z7Pl{d3uAwdH7PZND^CAZ6M@u%WnJ-mf8|&4xBmVAA3}9wqPZiYnV>!B9wfaZWV&I2cn;kw@ol}M@7Gf@ zH~Hl{_k#|qEek~g;`7ym;}TsUZNW+?x@iw~P9>KLPV&rsCc!wJNBwHFM}35vQ#Pmj z8E~MJ@CcOQo_$P8hzdANk#}7Q`b>O)_a5%>`P(Nb-~rDiLmgIY;+eBaS;_N209U0k zpahQnhEkliAZ7vm@n8Ho{H^c(F1~zvPO6nS*Ws8$fTNa>nRF%Pq&cczFzDKJ((X`? zO|ek*6TmRfdfdlLt9a%l_qgYtQ-$|L^B9J>q?OB*%B1R>f-QUDGnC8)@TSiiRxq|} zoC}I3z-owv)Ea$xeNWUedea0D1xS@PfqimE50Mg8nBd61FGr~MxHSM3qu*Ff4K!E7 z6L+fH%!;hUI21h3N{m54#jMh3??jnQ<+ULSnWxOIs%;`G0pX|R`tKoa$`Uw{J)*30Ss z=~wnk<>zn>c!#pDX{cvWpkQ1t#X{;+LYWFpvKKSfnN{RcV;%bO4DT`Rer*l6W5?1K z9R1*VA@jismdJvz)P`Fh(J+iiKw`jY!8mTH9y2KJ*O;^PbiKy9a(`5r(E3JfIH|Zg zGs#q8q6G%v0;J_>vM+}L`%YV`KZ`z4a6M;IqU0GmzdYB~COypj#F#mqN7LTWHe~+} z&EB%KsFpypqCkJc5d$7J?C2d<44tu-;w0H3n&26`n$1eKJBR#E312;%)fGm+cl14( zEQ+R$fWXSW6!#duE4!b)(?H()1f#9N8SBQ`JTXRSW94rvllD>y)^~mu*Ehct1Fu;@ z6Eh}yV=C_M?t)=MyS5!cW2|k3ef*7IaicLzXN?#X2LuWp*$O2wO-(3(^sWsv0WoGl z#e(V)^p6#fk6+<9w#=B$hNxcK#G_ce#X8L_d{D5zcb( zfT=nH8k%hS;~HyXWy-1|=riJiOWLlmJh!a(!8wJajHp0uhbRw#wLA3viqQj^bafJ}^+n0bJc=;=gQ?+SebYA2HMH?#TL*G{PKCoQw zqVz8W-+Pv7&zD-CR}hW*Tj%ovp?tz1|13XTY`&$Vq+t_UA~a6H&dW*V(^03)?oTBe znQWXKX?`Qu6r-b5p&X~dK^ZI}kwTc%PDU6QSAvNAdEgY}>lu~25s}Eu%PI_=sBQ%Q zojhG}ogv4BjtVt8tKvWekw&#n-r=bcQ)egwg2H>Rc@~4z1X0T(c#i&#s>IGBP#FC{ zAV-b4kplRM)Cm1?1e)hQt6n-|L;J~pL?0}9E&(Z(X@3n-VKM`5+nxap031}AdwsX! zxoZtMp|KvG`+0V}q9VP&F8E^G)5#xHEz%u6SBuRIcgqr7YOtY^=X<2{ z5#7rP2I(YD+f%G$;Hc+v-Vvw09{pet!$qRgw-Uk{Nog#G3#a+strxui>K>oJed05+ zc^=4V-IZ&7Sr+U^NY1K%Lg%XUY&WXQMI00Fs2-8V`wiZ8lnU-u7YYLmtbB+iQJ!?%f6-Fc&N|swKxNUnhr&7)B{`EWPM>O89mwVK=8;{+6inJeIQhNy%w zL8!93s$KyTYytn?h&wo7AqU-7?ENIu4fRnexYpkWnG#g$>^p6`Q14hb_2Z<7hMk2? z;!HU}+YpL}0k}Hu)&={1Ft`rNnIfergKNmHu$kce^e=F5rm)`%1nImB3{xYz-v8ipO>iZqq*q+Gop%>6{BJem zaM_p#1*>Q_uQfZ?(>^y9##Eje!|+Mpty z>iaVXBFC9nYfzuIJ~E!jVSuHCohY-HDH_D%mfWoiuDkp~1I8ZB_iw%o5EC=;(R$RP zp_M?P-`6`Q8;AB_NMNZS@S&>q%fwn$qNqSmTY^%9R?C9K!)3v3pW-0(6bqHwAPVr{ zJ;>^9X#_t5SO$M?G2ljky2L;@PAGQ0WXLfJ(TZ{qgJDEgSr|+nq%4oy7R=gn*u6{g0$%Y2&r1@7TGA1`wi0n7teHH6k4B!3kcQYY7_CRxv z?UsROOh@;OYo*eZ$Sn=_c63}`y%Xm)4=1B}2sxRQC?GO(ogGziz`$)@K%E&T#@r8w zr(7@NnFPC@m4F=$;MX zXgQ2*(ltW5Jj62M?+v#cAPNKxbkgkb!mb1zwI@HlkAa1ahHy`3+1>Hg-hsjd3NIx$ z<#6=xr4-oR`%v?xW)#g|hP$!vC8Fp{3Y)p@oP=DuWFIkfaoRc3%cK}JRG~*I7>1gWW~t16JA?e7VI`HO z&^$)bl^}u5DwzUdF8d73$?-p=^oR*c2^c9<=$X~U$Y_=54Ie3V6Cl@Ya#pexJs^h8 zKQ|xnzUOQ)fikL$0R$Ncl0?p2vH}1QL3hA&??}zE7|511D-bvlB?u*xZv9-`t%`bi zh@^n_Q@sJ&x5jf`OjJt(0*5Nvmc!fLyT3zLdsvx>s-0n}5$OAIi+_3m$Er9eh}B*# ztp?SI8>qdEu!xDbGqGFW!}{?bhucD`Tt(*;^#q2C+U{rngVR4m^LxpeFwUPZwc*HS z>=~=YUV&$Qq#9=Q51y3O5WwO)4|mKyJkMY)CEl@Atm_@>dXMFDpGgFGmSxW}F(w67 z6|Gte?$#A|mrD$R?!lJP)pta!whvYIFyrGgXO+b5}FtM_qW>{nnM!IXigpOeo}RWD98RWz-Tc83JwwqvAf zp@6`k2%S#ToqHs{}F47<>4qoL*gK?rvf@iLFdUS$Vofssr&369Pw@6 zqV(AZ_CBy5;-ZP)Kh;gdw>L8w8~gfjtc6)B4}p6uQd^e1yaoj@vAtsq{QP^bU|?U! z`dYpLI%=_LYJ80V#4vjyB1Ez1tLRFQT)Rj7& zi%<~DLtwdaIcr4~5b(3)c-&b!cgA#T>;aO4Jk+~E^oBIHq?r^AIXpX& zliV7meVZOoUVu07%(oUgoM~IXrgH}(&=6O=l#16MeJjeqIQ??Rr*EGjg_Ym@&EGsB z9HQApVij@ipq@DC72I?YM&vXA;QHnZzL$b%&+m+j7R_cti;@FhX4A1KGG>75J9&|v zfk|hu{nO4yrFR7u=YQ9_Ih4TAakBY8lPa^E_AKWr$1X>1JnZou^BF#S`xptw5|eV5 zyH{A&yWmRK2Fn3h3J%D<<#($`#SB@R?i$MJ4j(&SUm7aUwyIY%X%RA0@}IopxdKxl z0H@R;ey>wXnzD>D{AN&iWS;@@`$`_{VxH(6Aq<^(+^-FNH8P)^DXat{1d!JpiAgIJ))tiI6^vYd{n8qDQAinR z_v;r>PgE$U#oE^z&-Sz(v^lI?A2Weak3b~&4j6gp7S)EzUKz>JDs&8o8CA2TRg`uI zD<-|cOqvG_$My_KHpXcmOj?2UazR}mP}hg}*-Y#u7d`jhfmG6|ZHebwE_wd*R5@Vm z&B;M;7S_F(`jfa^K15yb<19JpmLWdW-sO)FSYD>TE{!_gqh}jw7oi7Z~eN39Rz?i|L(3w1?-yH+}*ii@)oX^3T zt|)o@LH~G0+9w4wHs_~lJqA&&k%@x9){r)?1ZC#1;u*;D_oDkNDPV)XX}>|*=rJ5U z`o2U5Q>)wJKBJF;%l$(FJPA&f0xb7<=NrF-cfS5zwDm5B zV<&}MHmI~2<1t}fYr#?r+InH&J()Y}%9zyy>@X%A5};7*FlnrEWZV{=Q76@yF^%FE zXHu-b=0cW9@29(B1t<;ODmn`GE*}}fL6z|q*sC(<*T_YN{SSZ%JmZnf?$>9m%Nl#j zXX;Pv0zC-(%Ai1hb9`-+Ln zfFF-x*!l@o)TJU6ki8qWz{)`J1F(wa;TN$z{v-Ify*QW-gO82Ilk_tf5fdTBTxdke z7}hy_&+vL(aP0%dsBnY%D=8R0(R_CWP(}p`B)S@njtuY^aMn}LLO42BN{ZpPe(N`3 zRp0s;7@2F}y6OKLJS-~TpbCiX_@WHPur@+xpYkSJBUG*CVV(NzhV58&hkA8dd zV~Hl65i7tsQ5_skMzn_KqPP(HS~nwn_Z_DVLbvKL$cDhG_HY z+$b?S2f61^%p!tCF@)a#DbbKTXjP@6h%OjwSnI5o!8QB`QO4{uNc_eJ@8Q!gzsdwW z&YqkIl8@R?QnqVr*hj2&Z2>5tHo*2U)D9Ywei|S-(y7ESrP$Zw*b*cNNz(`--adB* z@Z>9x2#h6Bt3*na8zF7Rq#_(5;KNSmMGlBbn2Q?0ATe)}_FMsK@S%O|XjX6|-Dg=i zw2;p}hU015QHtkhm?41#wIh25v_S^2TL#0#a8ft3+;(&ws5Pbp}*t?`%nRFAihf{RJ0QWr; zW7!GuuLBs0_0Sex?!S%ar+_A-4(l z5H)Z(E~Vf$22eOVi8ErV5l{7jyVmf$DG-r@(o7(vN?5Gmx{KCIp^#E>zpgkC_p0|J z(Cz_Z$gMLGSnSxNqOkzaw8Vt1H4y$vH($S|1ndrX8 zURC*N_#A3y4E!7pCyL%h^EAa)0i4918haTXemo&1`F-}+0Kg(5k3$Jf5~E&TBN--mIg3p^;f@c6Fx?^pcf@fqKI zeUBeLUUBsJO!-J>qAz9z-RAVq62uq+iQ}Gku>^D%xvbms;|n@!#`^AP_wS%@&uFD! zbM3RGjg4d{)?=wPjiiIhgV$#!w~mK(!PCBDKRT8r1~m^-#KJC>P?$8NpX%=E>xT6)( z4=`S5XytImq7r(Uyq6GvMr7(CE@;0BSmf%#zm}g=z+$|OR z7+Bkiy?u=B?e75SXl)JWVKIln*f2DyB6_!IjsrXz!l1K#qE*3WL%uQV=cPcQDwT)S zV``@pbwQ?IVZ2^lR=m01PHPtjCN!gs89ceWK(4DOLkh5q%>!M`(C$7C-t+BqYI#@A zE}0HTL1!xToDeFViIV54WMC;oL2DxfS*j<2Kp~x_)<+I`sf|JfLdYf`QhdbTHTY6J zGM0_+gS3tbP6TSSkmq`74F`jh;fQ@0Gf;4VlfvVS2hGV&2}%Tp$-{T`iU>7~XsVP+ zw@}IQOea$Ew4AC&O6){ujH6Mote45pHA_T4=vUM|zq`kzZ3@AipKVNxE|*clEZC~0lIL!h#3tmL01RYQ)ZclNb})4E$%(w$y#{$~8H9N2?sRDKt-IdC*a^+7VR44wwi3mVz3C5pk^z zJJEfR)}`j;(v}F``+@7eBN~vG_^#m$EaueNS{V2fZE`fA6!>w4_YLkHQu!}!nZcE7 zU>1R%bQT6HYXsmO5;xHI z$@%uqN}2BP$%6!{>xu9wh!D8o4pNrch;M+|lum_vR=7n?DrVVlqN-h>#F_%lfTS%% zIL8T%HQZ5JeJArkeH*1%$km&n^4t#2Ul0w%m<$`gZ@xF}PL0Y^@kxhFaSon|?v6_} zTsMj_n4zr~)a8QX`E4dAT1!<_71U)<#c5y9G}CY%5f@0UY~S}6&*}7^vXeoFvQr;` zl8DOb@IjLrNha6N@V)uG0#|hgtwhgACndNTvid57ROepLQ%qHS@E*=;!Wajs><42a zRY2oA2Y_{1Q&=N>M$ezqAX`-l1RAF^TXgTe4o;eTEbR@&k4$ZX^o0j zx*t3yC8XKoOh+%)sL@9hbn_#9hu%{?6!I5R$+|3yo*Zpi@%a24G++XncMq?yJwIb@ z6`L2J@32Dq76&HPztV~*ZeY@(XEe^yI5dL7Ec}tHhiZyG2>eSsD8Eelzd#o-SsNN4 z$)uq$wmgmvua}CiZqbJ$V9;qhqVI|fr1hTjP)dp~v{Go3j#KyTg z8dMAV5G7;(*w^31zxU~vr{^!QkH7KD9)V~>sh7~kA3N&$0P`KypYZth3xG}NK+ncc zKLy-6ohj+OP8gKE-wBWM>0S3oFG;cYi0O2`hQ&bwEIB}6? zmBuWTSPn{j3h7EJ(Gb&Bw$QMycbwkN3WAUmo2Y+j7xeu(-&0N|;>(%o>SBQhshV*@ zvxX-{fEB^w1*P4?k0Pw|#Zi+L%XGJvyOJ>r7r z<%!=fycjRkKu(t-odQuGArxxpbC=HgEd483a3+Wgr%;2zrQYRC7n?Mm;1sqR!@#t* zdKbeq^zr#M_Jgmv_c;`+oG!NpCINut1ywzgg3KI}$ajDm@`c!ACOb0xL`rqmggzy@ zfN4}f0~sZ0tvC0T$$O$6Ox7;z8j3rbo~?#1sI)EspFYzWdH)HxC9koaKd_2L_GJVAnt55`h`@X?oA>= zRh!uqBiMA80n!$6_C*+MuciqPrcMHVx#@fOkkJ zylj}}G>Fn@a4e;=3ZP!3k!5{DBs(9xeuX#J1N(l>Gc@PQ#MxR(G-?*aow&4yK1L*C zw9h32plI$qyu!UFdbm_^|3P)Qu(ij&rTsI^(!bu+YU4RR@=mp4xGW2nTJU!33|1>F z=|DmT`581ARWWh~JWPhilv9tu8418yu{fUvxa|i4CkBBD9GI*orR*zOH9TK$*!#dz zOTM3+zXqQPU^1|BmlQ!(>c@dr;@P)-kMFm*Z=t|bUQlS2_?`RToWY?edYTJzbgC@f zZZFjOtS(sEf^ECbO6tjG6f-QfVjrE9XI4B<+<6daNZ)0ut|J3$r@yNL?I8bHGNH`g zEt4W2h^$B7mSB&h=~=-sqUW>wq|SvsuUIb^+^$bCU7q`>`*8NjMT0#3bg#IcS)J&6 z##Kv;BtC2#W@E7IwL6uIMkfAN3&s&65MtW>$iJ6DFJ|+;7+@AuIB<_^YtfMJQ%y|+ zF%6!5Ljcffq2P$D2otJO*(Y)2lyMgb1~AFu>=;wUcEwIRtAwoU9fqh)0n!XVqKZ0V z9@TnxkK@>}ZMRdu)@&14))g6|z(S}-XV@k{A&@B5Urls4!8=TPJbZVT00@ud(MJH_ zh#rw*o}xoR7oPE2t(BrKp4Gm(tbikDeH?6Ai2j~Xt5elP35Wp%41(gAKKTrMgYi@* zTgY{f8EKX=Y6|VpW@Jn`RCS&SdZB8d(FUx`ipscmbD&UZjkT!&&;w1%WG=Q`z^b7{ zUqnZ-4)_l5w{Xt;Blhc;czSx93!rJ1(m@YRb_T;k$wrjGfB&EQb0M?UEAFfHN~0l>%`%{@P|0;TaNYJqhq)T)11Er!SE^si`REkLlbMnq+H5NkjY-%~av0TJrRxEIURt zz~w9ppM@asL19<)6Cs^)Yr=gsIE|WF}_L4abbi)~R2O8R1 zW0M^@sB*?;Y^YMN22Rfx|GiTc%xx;GIM?YcksQP5*yKh8L(ifh$^8H@)hqzV(pI$A z7-)=l;#TV^HJ=!A7Y&Y*%$o#5o>wJ3xf9}*0`ETD0}#Kz!RjT0sKS8SEfjT1 zJ@pBg;eNf$#8TN!eiO2(`xqWdKQvNF=^7)Dc8{O!y}zK6=~pzlop#J-@;MR8pnQlX zAOt$sueFdlC9()>DD?qlc^8-Uj&mCd)}>(%$buEj*ZINKlpLh2t)sHO!i5VD96&h6$!83Z@58Q~%&0rUK9wW#ZfB}t7 z;0`A2Rxl9rSbpZNA$yce)eO(z!+AbGe(yEyR|Lq8P+{4+|X!`)f~=*K;eOLNitxT^Fi3JKk)5OGH4LZLK5(zjLx15=ZqjW z_SfLti|V`|2Q~&bAu6%JYM{k=!?_00EILtmvjA2q)d^eG*^6hpZBgBicjuq6l3E~t zaZVH{4suf2DGVOxAWlzkLt_fNh?PK0T1zt9}{R-Z!Hq;hE7p26^gzh2KprAGUPtP?g zv_WDMI2E*__ii#5(l7DngWf72Z7Lfk2-<^P(oYo&^O3zu8jh+MDMTw`P;N*_Z#ecF ztDy0o1L}1Y@HP|B0bo0J6nH4mxvFm?`y5NVMC`~*a^v~!qvLjc4k>ghk18H0mGg9t z19vs-t{4y{Z&%*FtilNtNA~~W#5CXulz$_Gup+qtywBw%jWP_-}X^c^7)j64w z!oG)igYE}pV-rjuJAy!idP9qI4i78ewU+cps(^?^635YVPn6I!+O%_ZQ$V8DNHOJ$ z`6XlEc74Ka4BR|=&-UX$?;{8C9(_NxN+5l0#6fXa+ZYTMhNCotd8j`eJti0jWkQEF zmd#7epj=JcfSlXFQVRZq|JT2D+UQ71hE3#H4va~91T%CH0JAYn1XE~lE+D3&fQ)Rh zfxA|*DS)(4iMY8jK%RlFkq#7x&C#rY`aMKW=@9x3;aLE1sWA`@{lKfsipSfHbP%BN zuy#ydpR(z;EZFxw3<-pf6&lT1Jx~?L`wiPOXL5{z-NjAjHS#E_ONBy#+akdLM5gJ8 zgB$_@oBL$szatWiA)gRzjBraQ|(#@XZqBJ+eJ4OEmkHge2;29jgS z#&&Y4#pjJAQUTX2$!EohNWgLINhitgJkeLac>4rVR4|TAN(TUNIwp|)1Wez15)HQjJt$p=f^*vV(Rcjl^Bdo3 zCd`0EUtZrJ{(WV#YnJ;TDh0Ab2Jn7z1wM?8lgjiFL5jL|<7An7W5bNbSX( zsyHhHzy2G)gJTSQ{eH!dw@{qfyQs{QQY)LOJ`=t7zHJP!gQ}uLZz-B7GvdC$5xtCY z?IQR$tgT`n^sDI5L}mH9gG1e3*j_W-uZCw0e^Wv!N|Mhr5kO4*zuqTmKb3lrXXm}U z6>qkFN=3K_u&e}iR+LqJT$hGpAS*S)J?tzHN$gNXU3Y3kgE``EA3bbjSuU(Hh#{fm z7BFYxY=sqDJzGIFV5dDxN-+m>W)Gu<%|ddeBz2QZPybq$VX_<_M)J3wo~A1&)xrHB4|tN7jdWe5M+w5~Ki&=9wH^B%OOb zVp<>2bOg~Kl-Uk8cAF=L#YJ^1O2Q-^nt^V_#9S9N@b6Y zA^bzXk=}$ zESdqS?j4K)JU^2`LnI#%*f9S*Smox32Dt-1Za8kApqU&ec;RQ z{{w7KUx5*HaDXez*(b+gJlv|xR8~JqDY%pa%{=!tDGjH~3mBGVP2RWYPryt}v7{yRs=hIqu4=`Fjec&lBh2j&0kp?Kf6b*q6efOhX8xnsA=r?3pmlH;@rUD8R*o zb5G-bgp-@pE};nq%9rw)MqCI0j%8_SVD*_ozoh=A`2d3#+w!yLrMJ3JNyrt)BIQ;+ zlC#0H^5wQew1#X_@Fj;zZCua<^~%N^Fs1M1~94i5xtZ3`}UuYy}^Gr1U0;NZFL zuitw=0^HH@t3UH~PW4>^NgBav9@#@QZJrX>L!5fF`+Z zw%kE;Qo#PWq(pLU@7P;(C|LJ-j>Z`HgCD)aV9EjdljNk24gpSqRu?>8Z|J0itX1=H zzzTRE*|javzhZ&B0jl89mN@zJw3lH!X64qwW_Lv6XUryOdB1@GKhD)YGbCL%?n>P8 z%4ZMxSf%9!!;C62@!W^*s1cmte;i|Q*q)~yQgHvn=Z*G6^rM-84xjc-z(Z7O1Cb4t zIuHz~C@|cPgNY156t0B>(}sF9yMxuYKv|8IDCD_}pj^)sXX|;7Gjg_-(5l$9k@_=L zgaV9-^Sg2%1_RPJTLhvINX4}{ir zf=4cY91$;y8UDm~zYW9b9M#k0Z&G@01RJ#$W=Q$kTluU54+n&Z_AgP^`@O=xqhYUnEM>;9EeE zk48VJ;pnX_Z-$YeLs6RY7MnK(hVc!a1PlF|GOVMjvkcSxW}RdMz4l3CNM-Y zcaSC@fC^%+i#rE;IyAd0KsuYC6s4qU-QJnhl0h(dXt=DGNTk6zZLAt?eI}#8WcsKNvy=jBE6jkl-oa|9O4h=?|N19C z@34YlOMF==P(zb+{lQ>B2M>r~TO928flF`;U4siVp5!eU87zvvpwg=vKWot-y(kfj zG7%Ra;VQ<}YvG6P1ToxyQ$Q95QR z&J<{?@4+M&7s1ThqPbj&N-@K-uIRl7ng&mUt7h<88%E!u%7@B!qC07Tl$vJcki@rUDq94zK zIWh^YZDC>;_rKN^Wm!Y&6AVllIM(F>`~C!sJp=EUw-aY492Jq}Q(~wl;!CTz?tKOx zKG9rbu<9Ota(c z(>O-Z-K@b&i7MOB4UE90B2af#SI+wIl2tSd`btn(HMH@-yZ0A7Ubjr#yMtV5eTL`$ zCA|s!8IabS6ePoeu%qfxdVN{&blb4Ag7f}+@8YXBZ*vX$==j-pU*R{uc$;?6X{(x~ zmCaHe%q&#N46K$wyy{9jAxd3{Q2>@UQ~n~85(Nk)l;$*JBvh#w^dN$u$>$Dni`y~n zNq9EPpix!+Gu3T!*Bzp>3Pf|aq@%(o*aA2nxPI!JbY{p3=xZs_@%eRA@377604yv%h_uqCO}!FMwHvPuf; z@R?-B->pfzG%6Q`yNL4*O78N?@y}vE?k*QRUuU&%FfIaZO~J^@Yn+#U+Bcj)GN4uV zfDqAdq~+|#9{&6Uc#Kor6!BJ^-{a7XB=CNzXHV4=;IdTQ_9LivwPB~|hyX3c8^sDx z8?3Ief3AM1v*>_$8?+l9bUyXPp8IKgfG>Kj+CL0XwbJob{ zc=zK^@a88!gw>XQ2Ov7Zb*h@X0m6*|&}!_*Qh+V05*TCP)vH%{dcK{2 zueiIdXcmfPRS4cuB2YVEzM-ubtoN^xoZ}OJJ~GQG+oge6g;|e{4Hc4~tJwr9n$0v& zo|`KAi&>p~T8lu?H7eew1Jq!)?oo0c5Cuhk`YbCp1HY`?m{fskC`H&T87kX!9}L_a z#{ebz=1d7)Kt3qML2k8$yqHZoL?#$Muu1w-2k`WBa0S(sPJcEMjWmR;5~<|Mb=HB+ zqiaH{gYT*DH#=#yJJP_J0sqcF|KCZY0bZ!3Rs7;F`~n{C?@wiMHPm_-;l5{?Qbtc_ zMST*Yjdkv2XXYNLM>HY33EimaTote#WAaax^t2j{b0`IpO19Hf?c@P#os*AV(3bm& zh7UPAqb&$DNIY37Rw=$*)0bh@*&MiALn$qaWs|lCCh5-i z@9sIR_pC;AR^&Q6jD2IdmTN6!RVp4K(`)1(4-Ur@mmjBO5c7Un3GVW8$Kix{VaW9i z@%(hc!m(UNg&#qj;NPuTm0V|zmJFv^D#JpKI) z;*fi%CSR>jA2B^Rq=+}{+an;_m1Zd*62Ml~LxsG*vMrajp%`%8kNjR$4$l&i8rD>6 zi@t*r&F?XjBZjSQ!!y`$7`}XbLh_TD<@H02utcnRzV<$CZ19RhJ{PCtBc7Y#cL_Ao zBcgtBhjb3yEF@9+-s<;yyhhS<2n31?0I3_bV%bD}GI$Ik<8Bl){P%zF&xEo{Yf)Wm zfe63djtm&gfH!a6ke|M@`s9&(Fu|S;j>yV%42(6XN*_(*$a@<;;Fv=$xp#dy*2{(a z&%1pNK5X85m}S4tL4}@tTc3S;?w+#Hn)^{o!FvBb(cqu}s^2jJ-094h+A@(?Qn_aO zQm5}W+VmlT8F#D^Sq<>4i1Y$g2+tKe(Fv8r#XAG3_@xRH`RePs0`fQXVM-vj zEtCXBO$_utJtfZ;c&_+ z?mC`t8?3Y!2}`Op^2yh#dn1a=w1+~iW8m_x4!Oy&kl(9O$z1B0$}&M!m|^se+LlZ@ z*EV}-^p1uY1Puf=W*l46eU!i{h#fwn8nZp4?>AOxW@T4B1%?Q0HE%_;M5;cJ&ubY3=SJq ztwdfp?7r4s^eFvaS5=2TQ)~+Pxk4_w=dq=4$4u6N6Z?SYQiof~eCJT=Vo2pR#o4t0 zN8fQAJ6Zug|LijiAFPnEx{P2P{efTkm9R%(&x^+#I0>qVw&_r>aXqnqO7<~{NwL0q zKUqydCp5CQ4aK4#QmT6!!BQa3>)W@F3CzeJg+M^TO5u-?mgY!`kH7Y_F4R{|ILa)A zWM`1$NX{KduV`g90vkLyz#|%JmQrRw5Gi0bV!{In5@$G@v`d-#`&2zBrJCBhxqkOx?lM~%{B zV*n-ejooL7IFgQdx}?g`2r51m3VnJ1_u=}*-$SvH!=>Pk57&k-kJ)JJh(PKXXEZ~X zL=L}XRt`EAaHX`jGQOHjc^o|ts1nFSoJ$L3noz?z8Ek|DXsx|Kyu=e$IXUH1EsTwn zCX$+TN}iV1h;Ta2}uUirve;kxI(&CC?^sv5kf3yXYdSq z^g9FMx6i7J<#Q@gK%NN&3bi$C+ZF@87&y5*8%iC=zC$WZRvM)_?=be~kiG3O*wbC} zXI>^HhG_bhy2sL5BGTCd!~M}+TS_HeMRlNgWm=5}(aU^KL_(*W0mMOx z(cmHnB_-BjN{MT&#PhadF+XMBpdxQO(eU~kGW6F=3m7I*e)R34TnEdaMCmyIg zpIUzqNJ_Dh(TS|ap*VWS(ki+~6%5YmZc;vxiiH~T`R+%b;L9KXZp?7#lXjBHXRe3k zKmFt{{FLB06Vn7XV3`Op1qjNV70j7K2A7%>W#%PctHdEWpceHv#>bTm8Zg-(h4&QkcatHIHa)(#6~lHMkjfaG!a)rB zTQRI09(~;ppj3?GG@}P`Hsk4-=(kX^40@nta_ES}zO)O}u?UL?Rg5(;na7Jz&tDz4yV#`0}$KqqY`%XYrnmy+Fkf zGNt0Bd*9G3Hq30QsOaLk6|~DcXzRlXsbS-%hXEHdA3YrnWqH8z>g&O;PINywu`#%% zOj~W{KvWCE=-vvWC>HmAvdiPNFP4(E#v3UqAmlG-${*DaG)GjABH!kwFj5 zMwuC>C@U(dxj2eUbR34()|~3>oVa~Zl|4CD$=uhryI zA`y6zn9TLeAjnVXS?GqO!iEqVz$ry7Ll(~PEPp6vir|RHp4YbE5B}Oek>zkbR4EmA zclWrze+A1x3-}lQYyUV341K@iI5u1~#8OE#X0jv09A{|;#QPBnNZo;9B@>dQaw1fd zqFFf^;wW{AGe{e=?Hit+pD~V|)X#7p@iA2GQ^HYd{QD5f~h0{5?HC=l#tlex9$Tmi&HECfui@O|3Nsi&E4Y?R!$U;@s~^Q#9jC z`IvXKOXwgHY4sfyo0KoOXk+qI^Q;Kjm+}=5doG2eL4PbMWdRMM5{k<`1rDMS+aOvm zfwznW49DnDGp{@F_9s7#N$*t0jH95$ko3MfnFmr`f?3ia-@9A0y67HF%L%#+i5FPj zF;RO{m9NZxxKc_=&Fj77b2SEFa4Niyfqr|2^B%%yB_R~-ge1!^#|)MxI}K#NO=}Hb z-u7I_u1d9M%?%>4x%TbYi44c@s{dui6s)SN(VHm!6!Li4r&tJlmGu=$sWX{ja_nPP zpo{@@%H=Cr9jXY9QxAnF0Qs#C-%sD9idN;_{1dpy!-lf`t3JA`azuA!u~`iN9V*pg5UsF+$!wu z!|+4gUvNFsH`$C(1&dAK!x(n}u&Ew+d;5j*Zu-?Jcdq;VVoU>+s8ARLQ@}#!cKDRF zRziEG=;zt53rf2{F`+4r2NIO3wc>8QL_Zy(x!)8|Fo`M_$+<(+S{4gz0?^gKd0Zv? zAS1CheEIp0;NGJGM28pE7T>v#Fw6>8<|1?LY+eq@I6Dlr-9>p_tHtD^lohBCa5uaiE37>LK37uv zsZNx97jiGpR51NgupA#zWV;xLj+sy?3CTc|NTUY=B+6o-t%X#$iZvo_Hu15dF{bc< z5}h$Ro}b?0um0cvHSAjqA5_o_7;e`at_%P}*{tAS_*egJjDADkufXWI^+0<->DcV) zI2rm<@8g>jbuJ8GqUS&nMA%9J%7S6xs7hO7 zW`;xsc>nc$a*zs4tT3@+AD?WPTl5olknfvxmg&Tsof;qMyp%j=bGrdnjI$4Di^sgz zORa1$o#)O0P6Us&IDFpJV{jTe#Q_fPtHOo-IA$dbQ|RGL1y!h7nB3+&j}e~-R$4p# z1#Lq34B9}<#_OTKH#FNJfUTgcDWRwIErSmy<>5sItcKs)+2f=DLwlc8QcOE30S;#Z zqR)$W*9?tVjr6;`gITp?RR|VAnJUcu|FQ1b6pQr9$2ciBagD)u`fAhsgjsD$d*Yt! zQdqIiB!=gpccII17H|8sgOMZ%gy2+o`puv^Qb`F@^492o8#i3`+>DBXx@VZQzn`{) zsw%-`V`zU)!DlhYTIDYk^iCQ}ZFju8o|WzwK8|ZWf5r2R{W0l)O5o$~DcGfw+WEug z9}$S)8FI?zBC4*RJ?|N)MzR~e_qHGSD}U$j$!TyX$)R% z-m^UEOv)N3mKRojN?mZ<4lJdFG%cTF3=aU@j6F$0`s2h%K*TU$$8#oeV)SS!jGG<_ zV8kb)dL`hjJ2#(p?u6;QGVr&8hs$~bD(5-ZHb{^|ds)j=?tIC@h~#};8i6!bUV9{O zs-!tG0X*FCOYgsiDpcOL6t1W?l>!AkXzo-DsiyREs{w=+&UI5o$V!MMj1Kf8X>xsp zh|{vJxPN%S-K*EQf9GA;#~**o&+`^ka-0b7{H4-nsh2<+`i6eYsi0;B-5QSVDVhu0 zf)Bp^i}+Xn>i-je{`dc<@b>c`qVG3oB4%80HktaV*{Up%RA~g=Eq)IGJRFEBGiKpT zB^g^)Zr}IWj7P_3c|0tG(zR6_JvetNMMk&3(305frIbW^OQE#gh=xC-bZ&e!5kv=v z^9=0S&myNQ{`tVtD!H0GxYqFS(a+=h#c!eb9`uouaWFdS^F>mbS;LOp>fA$riFkp* z{gkJj6scTq3*;0u=tc*lYEL#~o8_ehVtQyTp}YUi!ySI}cYlnh+ZN60G=c{M#GY&W zQkljbc=hTPZrdJHR_hvP!J|2^mVzPUYm}oKo(EmdR6$cbqfi$PD-ApZznqn5j8=mc#W+;uk0jXS`7TkT+Glosc2Q+Et4a4c;7LOTPW_Z8lwH5vP z7S4OuVKLA)qMuBFwT6aC9q{3l_|*Wx0EJ3h@bKQpc>d}$PPZqbOZq^ukdA)4A8D(W zwgh;^^8kP{m8{M)i4zKGI%`axd0S%b2XdxBwU`xhyTwpFXK-#Fl)Nh`Av#^EV1=TV zrSAcv`kJa~vfUQ*o~%^t6gx2hms)WffYpkJ7Q;oKb_v3eHawgteo@8d@=2TE+8zKmr${U;rm!4l8L3J3`1n@}IH|%d$}QsG(v6z1Jdn+zz3w z?1RzTnW1YPtWr?nB;AimN$o!sfCF(x;%=V+CEB4dPf_LC4+f+blfc_@3A(8>@hdUl zQ9fyDjUbzn2-)38$);VWUvmL`0hwy<)TJ0Y-+NganFpe~GuVYEU8#=&%l^0$8%Uqo zj}4vl%(mRYJ5gr$e9ri^Q-YyDH)+|i{ZTEbQ03vgv}ZNJP^DzxOxmc^e<@)DlXO)| z`zeNr`Cha`qtc@ZEG#6*;<}rw7-`2OOwNU?~E0hSUG(a_eJ+vlj|h_5WlZvIP8r8RC}>c)2Ak3 znF0qIz^R1zEE#U4VvHmDbD2;TGr)1N_1-|3UoY|J8p61x5u9Ii^xhTdk(96Ya!1Fm4!!M%x1_2k*V7 zDw2L(-+YFD=Fj|j+@8J+j$;M}Vf;dS`V>&RM>b|Nz&1F{W6lSFY($IrutphNyKo91 zqz*SgIOr%}NP>ptMU4 z=vvysikK)RFFB=0kHK%Be*Pu?{_p${*L|Oj@1!0XKRP5&n|3KLoSEVI`5MpRh>B9X zk2N126|>&a3fRAcOkj$Jxz#{sqf!(DiVCT8rOshPl75AEnD41Aud<`%Wslw?Wa5RS z5u8%PcxJPt$;COFr%(SIiT=nyxw4|wTFnI7=CCreVbNh*`UmQ>y%d0{e9D+*##x@R&SPg}r2e?pY#;6c<*{cf!oG=-d( z0RYn3X2YydJIujq<_KAE-ak#nE*PZHdEUX+S|%^I+l>w5do=xvHlt)CtHVtAf+n(HxjbNf{Snr8KS6u=5M{Z4VULBtFO@XY zA{q3k8o>v~_RMC=P@6FyFpuGcO0;zr!lp$v!I%(tH&|`(B3nSOBvxQVK@C@NZq7%C>KK6?=)-)p6z56AU(3macb_L3bw@JxB-A&Qs0 zG9t+d$|KhVNcu$v%6ZnoJ|E+nIG#Bh=okO=pT}MnJRLKjl_a~ac}|?R8Ayrh6*{nFCbG>f5mN&>p;T|9%E;HNjPW2}#So|&PQiigVu z#cIkyd)K*+#9jVVP2GCwpmT|4JQLiol}Kut26zJDpN=DBv2~=@76H?~-KPDdeJE@a zbOJB#gFQi>!Q^aIM%uPSvrjf#vB)+^i*_pB9YJ$Ci|~MnFeYTez4TL%Iji9E-=ab5 zuAFH)z}X&`yy4)PaVWmaZ{$BoO3JgR>P1xDr>KfP2YPe&7YcIDgeTYs^7{qL63T|9 zMP*slf(hnLg-qjnx*-Gef|0&il)aFVi;D@J$aG2q!Be9fU)p)eLn)>}NGK2i;fw5EMv!YE&SYofddwIx zl(J+nFGS55s3(m|Phthjva*qp35AEzhDB2B`d=7oTZ78Lz8<|>?+1C^h$ITZ{l0D2 z)>ChA~3Bf%r zIxLM8Fo!G|jMc9(4m?~IynVi6y?+O7S#f*(GJ}+LS@67V+>eq8xQu@sL1&^k13O9d zMgSgxbm6`0jzm)cp(UYswX_9oiGHY^v6?V3;S`P>iD zRKh4q1S5^M#a@YG8q?L4%##N`JlAMSGEaq>00b(c$$2Wlf#Ka|*cQZPu(Ol-N7ke%mR7ZWny^F9Q6YU$rqt)5Nc zthBvQC1Ubk59wtYuRz%qHQwW2mwbo&O3BP;Kj2$;bK#U@TM z!lhP}CFFGhB^wNj6vG3g*|m4(lUucwZkw-(l-8VPx3nv}Y!v61j%yO$r z>iBs%I5gKwifcoeH^c;qD&UMMRe3+N6<}=&jp#YkheUEIT2nDl8k`KCp>x2VB@00x z@EvNg_-`cKKG&HsTh3WARUW4*TdC2=FQ$inPJ5iSEXR3A#jRLimi@adsq z)k>WBogj*PR46aYg4@vpEP#66I+i8+3;XCe_K3HNDrXMbA^|*x+aAD5EsxF zg5Blr9(NC~(ZBkP^A8wwNxD%1G8=TAhGTF@TKta#puA^UfDx0Sb*ay#x||W>G7xi- zoPkC<9A@T82eE=yr|Lv04RN0x#$gVjzC+qVy98~BN`8fn$ru{ife9DJ!B#Qq3fDLLi60;e+V$tj5SOyUF%BMvrQXkW0~&X)o?p@Z2J~A+m!UP5tG4?z^zbd^7-C+cUK*p zK{PA5i>M)akELoA@3L>Vq;n`a5#7VgaDQ3wbam{cWNS9?5uOAx*&%Y>C5-nJYVK0WGs}c1?$1zIB8i6Ml1v%DF z-(19dcLr@lx2qL&Q*01tWq34K>)^Uu%6SF=MAbI=d!kU{yDz2Soy&?ZuUq;V?O)Pt z_^yRc?Q(aA?RL$EdmxgqQjrNDQYN;{_q71pvH~(Dcqu<3+kM-D>ggF@2uL!#`{BoU z{rVNY{~LcFd*4xM)6F9BhZXeg$nP;Ty#I555MTV(-@tn+B4tWNefo5(;pDQoEr5WB`7tE(cX2Tk2re^cKdm_7m z?pCZ1@8t6*kJr>E5&=kMiF{dCNDW78SxKphb)nwBl&?xbTb8_Z53gR~{{9|d1x*NK z%1m-m95g;`FfVn5mxa!fR4NP<>$0DKEy(#2E~OV>sqy~D@Kh@sP*WV0PCS~@wta)t zWyDpEGkNe!B0=dR8jT&KAC^FBgwP_5j-hb9&I(Q{g^K4LhMPlpY?i1CkMY^LjMM%B zKxu1)=<3BNVu42DEi_fl{lSsnGN8(55FLSkRo@{5E}V`)0ifBQKd&L5e(IAbOnSj2 zIds!whQ>Iqs5qVN5ehuVaP0emZ9Bryk>;qtOVy)9;du88=-e&n3%UnutDFVoGw~(T zVpg!$mS=4+DfBT?MMyS4Nmc}{vq@2dO9w!}WT7#QdoT1!dnGE@ICfkLuol`U*_IIk z8{1CFV|V=a7hiGkVjxx#L=D`xlC&K-l5adl2ZpEejsjfW?ZZJTT*-YszmF3+FJ`g+ z`!o9EPq2OV8`z&e$G%;0^!>$DdYsPV@NjHLpRnZ5&V|x~j>wG_>{r z4TuG%-kE{|J-3kg-Unqg>C8${5`wZzw4S!T)QaVDpEsoxqr9i6KT$yweWsXUl@>l6 zg%zJbsoBHgBLR|HK)U9nJ&oabyl!c`<%?hzy)uZP`?l}VM`a9p%YsrD-oZfD!{N_B zDJZ`k)Tl@*q9YgzPTzg-&MJ9mMV2D|dA@Df4^fC10K>7C;Jw%N4wuV?u_mFxlkdtP z_e6I!3c)CGNFW2G3F;)cPv1^E*ToF*GnEz7N1NddK@{E9J|d}(KE{#_^POvSdwxQw zF_DX246RynUZ~S$@2j1NF1aO~^1Cr>zyR2B3hIbzBlWtVVP!L2g6ZlC<4wD!O zb5M8!g|4lUiaDu1`sYHGvQk%ASy8N|jc%N79q;Cu+)Bp&ESk|rOeB|IflZp9g6KXx zdf;I2zM~?9M3x$Zn_DeO!JX9Npk_VXUGU%hr~XU$z2Et{P`jfQk@8hgt6JK0RnBOs z@|l3>bI@!jGK`>J4N;Yj+gp6~!@mna68Q0V-~Z;%qcMIpGmL{1;%TQ{u>`4k>er3$ z9ZVbqzZj@$Dyp5A_i7DWe3N9%NA zV+e(xM$IztiY(MQ^aT8ZZQmF?3XN1Zeb8Y-Wx|2u7Qv!NX-h)~Gx$a@P^YY=GC5TM z5Qf_i92~l(1Ex}+B-Z}Jzwnan(r{FkeB$Keje*fr-ix7u_F&e zRD}v`80UxdTxVFxBs>QEt++co`;{JtesFIcVNFg zX98$+W||Hh$Q-cmd(2Ad16B}g!GNTYB4xZws&o!Bm`fSM;awpLU^cJ~T490=k#HmV zq%vwT1T>sZcSj{Oc6d|*TCH@Fo(?AxnMee)InYH5Y^Y1nkZ>HN`UTOvv!YT;P3NYy z2Si~}s3<-XFzG#dJ%&3f49n$$?RLfa)b&1Q#gi0)XgGI(b1`bE=$%3h z3}*T`@OXR9q+?dC!l4BvfoH(a^p=XX1(F>IYr;3#&!8$}tn0yG)TU}7Q{!R=)EHX# zyob8kmg-QEFg7!6*S9$Of&KYQ7#w|ji#iT$qobM<_=#R2Ey=7L6i|r6)a+=0?=Skw ztZ49L1q!PvzBBlv$-_V)pu=Nyvcr~ke=tzYJ`Fi<#NDr@VyQtBHJ(c!vbpggzx2t6 z`1_xJ&b2D+4~a85kTF1w|HW&3eVn?mHYSs=&- zsv3o;i-z_(j1#kG5}|bv5TKs5pjm{yY2?Sd5A+^%ietncU_?R{_JenSI1V4MF<{mq zwMgV1I2-dt^N_OHJPY!r8N};O(`Kn6mHy4;&%@Ua{+D_>2BY_KdWK1ru}rQDKw9+# z0wTc-s^)B#6-rAgi01#KFvDmi004YrppAc&Axyuc7|);~)Gs zKKtQs$N9r4`9KJ2#Am%6QVEbKQ3eWTuhkd>9dS-tDY)LY1cY+7R#DHx?Ehol8$my* zjQ4jJym@@YWHf{?aEx3dm_hB|)fU`;{A+mn(O;x!h==c>56lL03*ijKBRT`J`Wd$s z(R_7ZB@7a3h=$JDQvv4O_UN%dDWt^);L;D-?ty>mzwl@Am;dJ9&P2hBO@V__Llw;C zvt*jQ0UE)cAar-^{>_cLu?#Ccr^s1`dRZjWtBsv4_Qs4K0O~6CUcKJ{`=m zpo#xTWhkQ7!DUo4wgE)<;8HQ6G-IEAAI8LNoY%I7a>y7xvhXFESC8X}66Corq1532 z`#>2Tw#K(e?o?CPT$VU!UV)% zdFF)7MS$HC=^R8HGNJIiW41capwA` z3KT36z)}iYTT|k8@N*?%%F_=Gd@}|ir53d1g7-fD1Ydpr8IJA7ev>euYik)8)EY^Y z0(>P>fiBG+5Cv(&*@tzy(|Wr;=Je}gz*-7!Ja-Xfa#)!KcXo^|Q~*5y6$nr;n{Ve} zvm&WCT3_DTT1)m#D2Q&9HSDAi!GiYLYRksYk_J*$zSvWp%T^~B{AXgB%q^D47omL{v9oi9$KbN>FB&%3d&!zKK41{2m>1tDz*;`+))i zSa58664JPbIE%P z8lN}rb)_Bt@Y~}& zyVKv4mdOz^D6IT!&dl);I5~K(V z0Gm}|L#YdDyO{+eXBF+vpiq=RSZ~{hKKuy5Wm=WjPBSSm_3luXfB#kum|E< z1b(OgV7wH`HK1N))7@Av$o!db<;P{0WCe+|9wVQaIAmYVKpcd+#p%K8d*eF%z-Ex2|7 zl+t{B^Hn$~23r*!DzhFc0U3GmG*B}WbValo9LwlV-uDdrwXR1$Fpe9xn-qEuSXmOe zIvY8Y;ddnjA!BKniXPx*Yulp9ofI;g{8(LB{p>M90Sg{#U7}OS7%iosw9wm+^3=RI z!9y(RoSc?NfB&3?!z$|XP9#}AoAn${Rh&>vn268}_VxM4u>+)3j6RYs5pt_D5lgBA zLPArz-=fjUaLK5@zN`S8LxRGf?%RfA-dKb$DL*I?5Tm)umYu_))K~M zj5JDvvV(ydvH5cxbG^pEtA|&7S339ph`xUXRj8tr{8P#rP~ydijb~Fssm{Zo@_rjK zh=_X~31l_H*DebkU{Kx$b@|5$VedT}+&};H2iR}V(S%5Qu&kE=EZU0JR|cm85T7pz$CO~?-78jbpz!R5t(Y>6^ZrZUe|X>>T8o3~S@3BmWWjZo za*{i3XUs1tNt->dNvnmGGJ-h-F&Z0Qosz?YXIr*BDjz*%dyQO-ps3E77*thu{7D3&0pJ3{NqY zc+D8LTrlTe`5>hUG2=kKIA)a)VDSOCD8Z!Xv9GC?*lo=s>Z4Pdwx@43;4}^UMRb$N zhwT1u|L&(4KCqtF2K=|@ZBLM~h|&bZcfRw7Q(3CyTGg~gLaS8)=v_<=QMC#hnG6#Y z?l8YWP4+(GpP&B|{{+_i*Lkk?>m$DU^m`dYWQBvljPrYruHl1hMy?H|gaCjhzxRIN zy4~>JM;~XfJV9trUlbHl$J6r_x7&u>e&ircC9d$vVhA9t=qj7-WY z?KOw!87Y&omT{hrgB5-6fbS^gxLf7RyNO!`Uva82UsunXv<3Jtr`uk z)|f2lazr!Y$b%9=-&A!{z#MBGP0(y)8tQe3S1F~UEw4@uy-Gqn7oZQxx($ru3PZ__ zbdN?%Q^F8lm|5Cj^9%zoF5F57b7|2JF>Ha#gL!EUY_Ol@QaRaHG6aRhyi zy!Ak=I?O7@aSd*62AY#<1pfQuwkP5R_Y5YThul5r7R9KZMhEj31V;bmA+M*^LM}=baj(xjDFe9W5i1+7Ex~-2u zDQMiC201AUr~U;wId{kHdY#Yjs=Tp#!U0&t2k*R#T2~qqq3crHm?)8yWU^N!z7UD} zlDjUN*~1cYf1h5rEK|c3o?)JM8uT$b`f()R+RuAm$lG2oc=i6r_|6~vtMpz9mZip7KbU2vwzHwgT(_IuA z)LKR3yoU&|BlxUL^c8x~z|pa7QlbL>^w-}HJEWg4bBeqispuKv)(S+r- zaw@pT(8XKt`3?rkik6ZS#V3Eha!4HS$%w>9{Rk5stnkuepL@r?@7Vi+(FbNVF?u#M z+r`R~(w5PyHmf>`h>lQ%I?IsC@l^);22GLiN=l z%I1e{D9N!u*dG(D1i1=?{$D^#vm680BMWpd*UvsiV%M8 z-~aW$9`l(_zAQluP%y$p2hpCD)6YmCgGqa-Y>2J=ebG)Whkin5U5PjS@KR=AH&8O@ zs-<8lF%&X|R}l2h?nH8p>!k!siA6uK?*V=euz@~;t28Z_5P+Lm`SC4!t47x_L@;h!5xN>nLWI~EfkIv z$8{!V3jEDG{?@plj)`0=N3cqX51x$y5IE3XlFt2zL3K#jjg+sh#Mph8$e0X#=5`@%*)(DyB419ouR;}d~#<_CQ2_~642r)@G&#ymdj%j*pf_xJe5x4xb4 zcTTu{J~ajm%iRM8pTE?K%`s!i5|usCGu^NfqmC>!nwp`qfl@z?yi9U<&IR3jhmFC^%9EFxrT!s)Yp!N|k41SYzPpDS z35@eMqWO6ABZdaK%HiBLp+KZog%j6ML-GY-EdNPiAhPA*$b@E=+WF!mk_%&?s$L9t z&(dHh6a;lkCH-T6o{2=RbNs#Fou`LLej(_jx&dYb#pBP1hY^~=@&LS$3)=DdS6`tm zci6Wp9AUVQ>s$8D^bExG>@o^Yba1%Cj{~q9o`3QiA(gvtILOnChmVWfN(8$L4f;|G zo+-`}YvS1Vjht)+m2|?jQW{Y~Xe1`QZ&6(WaO^6v>u-bdv~(z)M#_wh@}Mrc(bPmz zK1-=S6C4>zD=?_~Fq2OoJH|dTVNFit3 z_Z-M1Lw6_M;Iy9V)v$uL-lJcive#;`5v^F1e;;of`K|?JoT!QLQvw-xrIw1)7QFZM zZ{y$mKmJSjSAKoN|Mq|IPvW>fqm)30rYvlKVJO}Gw6^h1ja*$oR4q@64G^XJ%iSGt z+i-|-WtIW902tAJ1{{jl^k?uHBxtQ9g((AW?$NVUH0U#elf4gAkDiVH?~lJ^BW+3a z#}UbrXnAf(r8k379fc0KU0{7jGsoS!;m#<`{red>Nka2b z&lG@t-y%t~LauetH~P_|f_b__PuFYs53tVwv**KUo5DW2OdY`efCC@iU+}aa@FRT1 zx-9tMy?621=bteE9eHn!_u&VheG#AU5wPr;Kr2*Z&`X#k%~R&odz`WR%Yx@^2S&_%7(>G+=Uoq5%eCY)z}e7G8!ZBm0n_$F+h z3oxOdZKY&S4a~5XkWVd4xN%Y(RZtvBdED1G&$qNW@R-3|lFERJ!MPr~PN9V;slHXIZQX2bUJ!!C`YgiuQU8MCG@HpV~k&H!LExCQ8Uy7dZo@!rlpGb!u9*i6?abE6m+#XXb zCDzTEu(OA+wpoGI@Dca``W552#Z=7^fiu$J?TBpEu~a%!1D>`W!voEbDvttc_do@* zBP@%VhhwsG=2SYa7T&N#W6O=L>COh$#yZ5mgKFO-m>edq2H zXsqej7oITk9!Du7U~V}t@n z?;W={pW}b{zy3Gy7yrV)4Bxj+^5sEUxPNLqk`hMjaEsSe4u{pzp& zA>6G|O{@i2*T%a!??cb?iL^t0TEkEjQ_sLW)B5vcOs;-sH9_cE>@f#cnUqtU37U>X z*py3Kg5nTg&Uq`r-CiEv#j>mz!?Eo!R=A{%iQ5k)|YjTuP|*Xphd#ueC(trSp@$8vU9G zDNteUg2Rof9{jK8)FQ7`PCZ? zH|){zOgUC$#RBvqgTX?vQ1DvTJ1ngMHj|*nXNv?h{0Pbd^#UzzVdX6BVf0}{FH7%^ z;RciyR$2@zB&rK0KyX#&56-v>M0)4oPV<55wq+2O;L7PP_HjC!>67FyWgDUzLhqaR z(hzKmeY^6WPxeK4eiNqvoc0cg%uA$3lPB`_;l#TrU|zRVG2qj``hTYFiDX=k%VWIJLAztAFB$y zKI^*e_}wqQ3V(4Lz$*XXOwNy^na!qlk9}3&uX?=(e69`R=dCW;IuQ43 zO8*N}*F6~$mtrN=*-w{>WQcbu?E=L@gH=TMK)=0Z!ZGq*b^7U~)5 zUZJk9gD*Eaj@w)G{h2}Ck&Q4Zo##De*Z{+F_YTG$^9K5e%nBK_877@0988pJY7F4& zTb3wAjp%)c%8b)saI@$%q6tV7*#Y${NRt>)V|@G*jyI|hLh;VYNla-~X?O;iQTq4b zA@jDWdA#c&l?#fEK;em!T3z6sYCajKlgsSU6j(~d_4z5PN&o}I$>EI3L&*fGv<2hX zNjo4y&TO-qvDwR$DmEBIPXo&aqCC9X)7aPAmVB1ViUmAM+tN(862WtA>m2MSxmtI; zx-5947I@*HtVnbt30;>3w{1H){s@H346iORN8!5dsWq;3D|LZ}J85dDBsq@}9|;7!}1(NCyGg3?npO_iNXp{!xWqYE$6#bT-Kn0#qcnq;=H@YKy_VL zl(yn>|BA-$DF!qls2TCxPJX%Qi_si<>^o_D;}p2gpd*}>Ww0#j%rWM)V=!qr4{pp# z6#ra9HXTABf2vQ7fPoTM@N>TbR^G zwTg);*FpBbmZ)ejj)Ul_smNsk6s_w8b-hDfRvi0=wKRP8_HCT0YN!Bbx{Qo5Wcy*$7oV%v`QA{1r+11o(Rdd+>Adny-A}V-NPjVa z6@p0W6u?Jty}-&HYPm-r8!Dc$9as7#_P(7!BTz~Nn6cBKtu1DFRSa*AIfxUb;BHx> zFG}BArjnr<;~Rp8trT<=z&i$+42}uv#KJWIvk$V?hO#{1Yp?I|>E}P0L9cw51u!gN zHP2c57Jf-@|@;3qNkC z7PRy^TTbz;sA}{4i}9eCF5{(AaCceo`1Aw~e9c(N=03ar-DltFU{y({QB~;6!z(8C0Kx#{k)EMWEP@`^ZYq7>)vEJY$FNSKJ;y z$NuyMj_X&jvC%lw=_aolr>3+4O+P+vk4dvoMk~dIv!P8yvaJzd^c#v%rH78y$4*3N zmMvSI3N9i4+IXia3Caal3}K*Z$Z2C#kvKSa!vNe>1q?!z<>Y+QGUQwp-1p$CgVlt7 zY#7HA`w%pqegJ(-9daQf`JAN%B6T3Nq)SV50>Gp_0Z=cCqflikjF}N}k)q7r)u1e` z^#LmV%bCoP%>Fa0}PrEa=3V?fh@IJU}L9lzE2MS=y<$s45Wi&I>?RH z;GN@`l?pRt;&N#f|I2^&&*S%e?L$Z!S({l57=1^@M3^+wAd?dAUYt)F6T}69HEg4x zyR^Y$U5)fMhoiJ5lV&w^N1!CsYqm3stEnx@gm45$_ED@bL?;WGd#C3+T`s(@xcWxyxOBvesohkGQEIiZ^du<_Y6 zFs;@KbI(LuZdJ{+V+{2DcB+I8l)^b03Yw0C^caBvpge9lJ^O6R2Y}!A;cFcGj=o)S zee*f4Z@wcBxua)IPW@xU`1#?jd?D+5<+p z3*msepw*TQ*y;s~UX+TZ7TjM}eEXyKakn(M#~ofnb>n? zNYIRSPQ;F*6bSVH=->Wzv}HjnoHxMa2l|^ng%yT7rHp5FOLb@UJ5;F))`s=|0r&5I zfXm&(sn<>FVC-EO(@)gK3TGm~7QAtYQWF&4>-CC#-$HG$_f)`g_mFYsbAJD;M?CH` z2rY=@R$!LSZ7mJ8%rhe@vx0Ms;DGPPfy@0X{LHWXNeosd&WVhgw88HS)347h8B78d zrCzYU^Rrmr`4+5Rur5m^jRM_x=Zg|Ck2za8;|UTwFj`_`ULW%ec(BkBaYiFDEwO<_ zV8>(_M8CwyxFHfLfpP;OpfT{l!(B2aqK84U+8z78;ftq7#MprNd;t|YQ%d9+mIYA6 z^CN)h49-L%WiS4A^i=H-&8p|Wv)V}2CGaz1E<>lEngCWbPTw%bSMc$O;bDKLpvUZ& zEk$!cYWi0N?rQpe+fQI9*C?tq1nlJ3=!blqrM}s>OAs+x6^e1R4`XAz+8N&!EUn;+ z>&ACF&x}JfLSs_n4D8{e(2bIB%@R^|%*?P{?y;^Hy!SJI49lxevQfJWh%TXCXchxS z1xRU1{lQ+^cwG&qor#O+aXKF`&1d6Tz^rs+za-zw5&afV?C~@vE#*`Zoxn&?^fZ$r z+X*m*36u~&z5NQBdo)M^i}M<5wOya{Y?f>nATR@?&yP6a=$f|{0yF#VienEtubTJm z%nHVA0_HhC%v?q;?g|f|=|9vPzQ5DY2dmb|R5dQZ2i0EZqV~Nk1QedGav9a}a@P^bi#| zHkMmemC(Lsyf@76s-RBy0RU_S*x=*rC}%>jw31Z%6Oo-+>ED-pCv|Gm~4 z;tEV?_C5L@juAF!KSDuht}&A#@UnUE%*_njzR&6e1E8n~M#WIhU1(v?od zP^U9ZzOU{9*G>t6_QbhfL+z+VA_%`!PQCwd zho`-xv<0so9`N-1gzcDjq#sAzN1g8yj#@Km+<{gtm4Nhfg+PS{?1sc|*x zs>hgBc34nY3)D9tfriQbe?f89`EfP&mkfqv2g`{{(m4xd^aE=(4$2GQXCE^$0)pDC zp30~m#N9lOgET$Opy4`H(xqiJN(Dy8x-7U}uW$!0mwUig`0^eM1N-euVBEmYIJ{cI zKJHAC-GRGx!F4|%3IEEy?_2=chD%$pjbJ3GN~nZF$2VqJA$$STZYWqIXsUZUEpGfB zse0ypC>4^oP5D`$iM*AXi2@S5n)e0ptk%ee7WRaadlBFUgW_-8ulT`pC?@(ivN|Xl zky!z#Dir{q&QBy@Lk&Mr1tb#CV0W;u#Ugcu({ZCrH77;db-yBH%rIQJ?0;n-_ph_yRIdVYE_GY+{Tn-){Pp&QS?%QitQ-gVSPiz5lu-hU2;uBkLEk?Tk;E= zxVw6Qu*@gJt?a$9ZyOW3)=D;t*!YzJXAqDD~4JB zm^@2e^r$KxN$=3l37t{kA7^`ztKVaQ5~(Y9mrKq?SWeP;tu{`OzYE=QM*;)jbEr=U zP~mnaotYpyQVy5qk4Oa#XNkUmYMM42f(muHWPjE?OKe6x&2a&o^Bs`<_T?IrAB{Az00X|VBb-&~YWR#(x(zH$`W-pZJn+nyX_`IbC&;EW1 z+EOHmoX;S77AwXu%j6@zKUwFzPwFk3v_gmDb{r|9kkZ^`&TpzFL^G10Z3Iu*n2H`> z{SF$0Muz=T^FK~L;_Y^D_!;nYy`r*}n@D5= zw&HAU&yPVR8WJstV4-DI>MY;MD>KL;#-w ztZ2=8Tsb8}3QiKhQgyM9IgQ>uKR@<^IE?}bzIhC+B!bFowRKWE9N3uP>14P&wnON2 zeBN>FkYsI2yD}*-V~k+*H=}b1E;tmG!Kioa#})nd8T$4m-l?Ym&|<#UZJ)lOww8dO z0-f$ex0#GJf~WVZWcg>sF5cC$x+8!ujUb}8D}WU#-i1$CsHiErWanI|&T42sB7VsX z!X2=nB&Fmv6Eo_7!3DO={@NeS zI}`RP?5(P{eVk{e6}*30b7`TNg*JG2+#Pq%Y9J8Rze=0wKH$eK&L$JSh*1K-QqQg< zK)XOiFUbl;8cvxBZKa#)nQe5l^U@rY2Idw8j#XxV=;+v2q$lR-@ujLeWLT zz??6s>FG)^N?`~6v8Pk+GhL_QkE+R>$w5`5A*nCnuSkkWsdr&`_b2Q$hRf1ej*imc z+DcA}Eg)Y{XcPnTb2ex&{P2grjjw(EThpl^j8|vY3e1MM+2}jDzs`$Pj8cQttCzDd z%vorq;rkSy17X$_Z448lBmPZ`LA{Z6g5L?tgvP_UkEThhqnES1F6 zLTz;yqDZ8R&TBYT?qCEL{Dl)h368x!KjuA<@?#|iY!LzTq>6E##ppBmHz_*NHrfmt zjlt>CT2j0M{R?Fu5Xn;ig5rg<*Du?S1 zkM#4=upj6Azx7Ms!oTt#eIM$JQCzT?l%(SRRK!H=?6rhvvMV5D#yTZ?8md`Hrkd=~ zner4ivMzTqL0rHfqZ!R7X?(w>Ex2n9y)<0+Db1>Nvr^Hi_!HF*n@j;F2`DWCM8zeY zQ|47os01)n8CS5C>Ww8nt1TO(GfA&`3Ce#`ar~(mdSK(zr zX?L)?0vPb)8tc#2f$+z)Up(7|`@Qq?8Y}-1zanj8EdTWAI_H&@eB-ApdNEe{HDkqS zE3_srKD(63_Aq3;+-k-BvfydoGj>-MzL=qWagz?Sr(yDRl7SFxM<`r40uik1JxVE9 zmlY59_kkRMl@J=MEMuA_Vlhifn*DJ`Yeu4Jy~mW=IAkgbg$H(1$kl+XnZ3DM5-{N)_n4fpM(1h+8iEor}D(SsK|>Q zzo$|gxE8%1fzHyo&Wg(_N2mZ=U$tr4l7h4vmwcq*>umP3tb|WIEBLB` zCSTCY0fvB>BlIjJTdeHJ37xmH4iBIt3MdI2|bsSe}2&v!}I~ z32g+Nr-jNywJ}iB6l=*%9t@mshh!Q#1dPG8&du$1&EavAW+tOM&_|$A4{M`ppUdE6 z0C1jXQA)-AdLinAOy(s&y|xyLIh=gldG>VeW#nauOKEA-Huk6$OqD4xcu@cv{a^oo zw*EC%w{6P`!&)C>%=vw5?Y+H(2 zEhT!TvsWsXwq{`(0k-G)_S5wb7uVO}KBXE>&#Nr_jDp4_Op07!uu^vbh{4i*dkaIL z#Vb+8?v8a`@y@en*||}$+K*T$wsD?k-}eL*G$5{jq%fnTV0h5wqRYtQ+e0-^?}^i1 zV5PukI4J>2*o9!L<1b1`BELM(QZ!OvRi|G5aj>hPB06j=u79~)^1Gv^rSB%qC-xjU z6TV4}nZ5&f_TruJ3w-9(s>^E#b*e}R+R|{jtdwk3w_*yL1ZXy9Qc?^4*w6e5kE}T3 zkz5ZevGap;t{az#${YMW-~Z*j&Umi$5_*56CwIm*`RrnnS`QGalnNYMb}u3ZBY~hO zS)yp~4%|q;TDj8;Jdb@umvU*9G((A8fXO;3OMTComXN!ieoK_835?N~CC5LKSr*P0&W^A?8|g1Z8DoO&9irE{OG^{b%+a&UHHtN=p@DYg z`H-2LF^EUuD*X$8N3%!a}lQxyjX)m{kgj}l{NG4dYrX|1KF9&`t0gvq@>MTX(;WAQZF!T z!0BCIm+3Pd4maaYl&OMtoH}5t4ds89b%}U!%2gDrH8Y|gC}z>A5rq)5h=rZv1Av@e zwi6iXQ8eFD{6oiPWFAm*$rF%?S^QU_B*6vszQbUsbwvz}FM%fY9-Qh%4!%Y5RXKoJ zFIR{!FIT4)mk{6rA{;1E1^^#f<>s~eiK7fBVmJ#C?>9OuR+;?Xwq7A6CrPXx z#}O~T6kO`0LMdQbX?XVK--L2`kN0SKz46rhNm6fi3TDu(_|l8JJX%WjQb~b^tt1s^ zLnoiyN=b@FAjLG|(r~>#huUV#eJbZzX!YBazk^v0Ze~Tx_wexe zkcpGzFXJ{(lBaTV@*fPIFfQ&Y6<_+?2XO$0rvjLUxTivcXOO&b$yrq43~jFC8!15{ zjV+Tw3)#w81Qa%f(gML{G4W&}m#oM=zpIa+-JOfbPR?~K+5rnlp+a#zgL6G$wjE>a ziH^vrRR&)rBH`E^505wO+m7uRSgLW6r;MERyb>LsB3P7(tmidM$0C%rJanl?G&aL4GklLJzEp^jwOyoUdAc7R#r^UQh zEl0;f|M|ew=iVNX8ZCW`|$b z{=xen;0M0>4JL#q6xXK6gTzY!#CZays6o|CTO_}RSmNt2RPF%j#}4y>zw-0H#7*Ax zYD|1JJGYV|6LTpscM7ZYHwb$Yt-F+oxa%`;j;k}y9z3H15o<$SqNJ^F$5fe&V!d-F zNc1QEZ`#d#-_bcT6O}Q(s8tIpk3G#p`o3X%^>HlvJsvSpd=_8)OjCCa;6g&oB+nuz znX#1blFw6$jB=HLa*-*hgDgmpe(y;}%FmVprWO`g=5S&W)KapziUp|4j61YB2SI`& zdZKprge{AvUk$my4*-sV*oJNuvC68Ff*1vgXh%A~zx2sVyxopu5dgsd{Xh6??0kbZ zV@U&VsdhI1kyuWO3_y8Yw-rhER`1)<0(d+e+ zdFp`UfBJ`hA1=$96xv`TOyP%hU7<0C$YDFX{-k9xPk1~u<4S;O4UrGg2WbiMxm2Ia z!GCw`+lF!Mm>t@LC{OQ_adf$(&G7e6U=x4@$sL}vw>n?)xnd=p8AJi(M+~$NKKDg$ zb;%clG~9T31D?ZYS`TkN@L&9|{WtI@{%`*$ynT4U(Zz?SNx1l7A*bs@2VZS`CP1a2 zE-U)>2>r^L2|88f_JS+KEd#&-AES}yNkeUOXeM~n3$`QB{%VHHy5Jar7PVmTPc)N> zsbRKPb(r6|yTe0$7sumE^xMm*;7l2|5E53h7?W<|D!Q1AW(7#<2?vOrPC+lRc;emw z3{R6_4oZsWyvdQH>Rr>(oa~L$~CZ}Ksp9p+t_Xqm2 z_Z~}bdaiiN=^6{E&{OI6p6&O(_}!PR?8`MiytY)Ceb}^C2KX?S46AB_pLy)KaKq&w#lIbB+f&e)i=fN+jSa?h7iV71_Ji)E>EODz+-e3sEka4YcRhB1!xH4&efSSx{J z!KI|6mV#wnv28H`+Da5Qck4p#^eIV6v4ld^vG5ypeU92L7~3oQP0#tVV}vhmqzXx( zLFoAx?=Sf7+Zzs2S!0&RaInLrxq(tElXOy{#`NC+lJX~XS2UNi45HT=OVAf2E);3+~&3w|kehBWTlmbh@Fz zd&Y@kSl3I&grLM2+=33|V}BC!ov5p91r`I}N_4^(w2VJ-&MTiyoR!X*&}VDI)Uod*i z2@cL^2qdiGXc@Bt^inb79LcF-rwu4z-SSmr1SrDp3y3wE4@ zZV)z*bEv$}I7W1+j{WgX0&YA(DaPaQT&}Bv1Ia=u5ubRj>~o*{lB=>ETEa$W zo(T;T=qyCCr8Kbb!LRG%fOY{4kAvVIfp5_8!|)o`0)Pq;I+PQYu`FH`K-4|SgLrCZ zmNGXgO4mIH&6uD`#uAuYX;sB7T)?$=)_p!Vx9}2S5OU%~u!GK2*cq9DX`)16qIBw@m;n&# z#K1=buSygU$<{m^Qx9ABX#D*{6d2lvStJ45e$1r8q&ZGiEN;ooq}u1EPtDBCBNl_C zJ|z8Ve2+D??V}rWWS~z(&j&88;*7pb4 z=-JJiHlSiYsB(c;0#@tf;9Egk8kJnaXGw(MkjMegC>ThjoF$(JAm=V3sUhXc2V(K! zV0S`iLS7-B%X+=Xc6)$fCJSkYNgENhkpVOlU{<4&U!$Aa6 zfjbhtoUdU(_Mrqwrxwu0`EUMxzYD+kAOCyU9$qu4Q@4BM`A!agDH-&$=vD#ez^39< zAOI%CBW5uoNXKHrNQ#%Lq0O-2K%M!F<1KRk(V%Y;eJpINwI$EoIZ*;IX@yL}9Ejz( zOwtbd&hwd7C#)FA0Oy?3p#4V2xUkdJDG{8>+X$pG=Jhm`8Gsd zs<0;-1|T)0fdbS8=<(u4KVYU|OrW$Sxa@Rl?lPzt^a2n6`5Iy5bXyoe4A#W`=pYSV zi?sAxtwaMIprV!eAoWN|r?XXNJU}B~pir;~a3EjKX0mC8#S$f3i_n(S{01!d3zXXC z0ax5M~OQ3Fp%s@h1jQNx~| z3wc6%H5M^;&LAks*7wPA)*^@HnHZMR!YP==&@gSGm5PICL3SaE;j&&Z%$R6~BTY%X zw2WBFmfZj)w>-=P@%)ArA_zx1{Br>TK7-6X`Pmc6n(s1@dp?h4!M@${Fm@LBfC}pw z?A7t<4CdlfwzfpKhVq@GOL+{Wl|>>W^g+B#Q;B*i^mrpdsnmVcMs$V6W^K5?T(Mp* z`1IA=D2i;_eMn1om~&v+B~B5AV5n_`^G~!EkqrI0Kl^9#JAUWyz(DK(8l3l01l4y1 z)l#}KbGL|Wrl`m^U?B}@z21k^>AsOOErgi`Rw)T=TrOAG7~Cfi$??v)utBmH^Iw?#p;)`Bj-+8Kw4nkp8QGL`!r zc=E0?LRctNwG!PK(SB2^n^|7?Dkl6Y@Qm-sjP#CQ_1Qf0vB0Z9V3O!mhERNREl737 zv({1?)bf)&lNuq5S`4Ncx#%Lz`wz5O9PM~E8mJCGLi$=MY2S@=NQFv4F{cGM7qUql zu=j)LvL~pMvcH2dpe=A~aI09BE6^@L3GRP9=ZqUcn4nuOg2kC~GtwRva{v#K3w4JD7M7BqrH*f_3*1)p{S7drW;uPZ{EOakA#F!9 zg)tkX`NFU>j2>KkCZJvC+yVfpKmtcOV9lbU81Vj!=lJCH8^)0ET~eN0Y79lwq!ZAx z^qZotg)M1jO$GPaZ@U@3{K0$p&c~lZ9iRCBbDAq#pPe}6FNT=O5irqv$Fn8C!vnEf zESL@RXY}5xsZ%Tftze3uL`+U8=+m=Gx*N|dqf5;7KK~NNJ|bxQfJc+8h207z*ual*v%)v-(PA0U0AYh0*vsK?{^rx+zUHX2l_bB%&^HP z(g(IOV|!^!cD`XzEUGwxM_%q8>R;>Sg0~M3*t^d-zz&xbDzy-?7_hDvJU%?Y;aHa? z6ig*%!DQC+uxZv~4so57*my8Bo1vCoB82dHDEaU&OT+!;f+pI{ zm~HrKqTfWKspLKbk4c)+l27^0Wyy+o@_r5V?hDENn_DuY3nLVG4069+tj!sS=h-={ zsepwJsQ(>BM%;75Z~8<35)NzG4FahlacHIFH9%iMA7{AM!evTvAh+X4x=40VPR7cigQleGwS<2fK_C46vd>=t;v%+1u%D02n$7tUbg1``?G_=YB0NpZ`&; z@Bdn?FTRX=eHZQW9HqqO`h9pLN+>w`9)7}S5fMH$-gjdRRkuM!y(H4Xx2Tz#O>tH9KXQ&s1)$rkP`VHia27xjSsGrvgsNjj@uwtRKHC%#2@~tyDbSKS}cl(`BKFiL2 z0J7>7ai`R;DjeechEm_Mu&d+2(pakD5B$2X;UE6W`zU@8ESh2?gWcPLaq12jWyqC( z%sAbyGwwJ$*zCTXiNuX+sIf!qF{T=am4WGSzq~9HYRpxpT zPcKW$PHxDzGe0%M4?MfhBF{aSqdtA!aeqr|+%seOPfgz)KokI^LM%a4M}S%C1jn*M zDj5y{wg9HiO2m|h+m^*eQtL<1tekORvNjCcwgHrTCXW^7yosJ9^#g)}-7B2-;nl=9R+(9FIQP(i-fGy8b z=cd_nVF`c=OXsAal=-NKvr;jrxrSpn z9=9DR74`Zao_+4u;QIbI(Js%E9;GC5?4QCclHNWBs!ioO`GAERnqm6MxVNf}!FjcF zrexARjR>x>5D3ZU!Y=4I4jlV7H=&=-23T{|))Eu|z+EkKz)(nK(7Y%_A%&z&?1>}` z7-0e;$y>A8as82h`1f&#OF=4&@XjRw@V%e>^Ev0Du4mY`XJROy_3Zu`(d~ibh@?i; zlHpO%NuA92GCS>m_}s;$YmWlaQ`xR9*GR4y;QI)Hg`$GItQkxMDD>kI0Vc$PN!S#j zkV!AKLJQOSao~gN0%t+rmIYt`4Szo_mlYu)*>PbfIg}J9O>H=7k%J-~Bd9fNjbZXu zEp1%IPY!*9%lE{K=ASt~t440PV6J3CYwk%1v*pEGfIUf$(4NIvEmprR}vT7o< z_to;ub?$4>%#@R6lxLmS;e-lDt*YpjzttqBhLzKO6BG(8ttSY-f!;* zn)%$x1kG7Y&;&m4>g`+fV@ESbE$lYi^etzKUTlYMFhg!p*85D?S{mwlkL8{3$LD^- z@5eWO&%b~#{C)o&+y!y)>oB!Hbm%RJxQxo~tn)45!h)`GD=qH)QmiU9qE z!yR>9@$H}a%Q+;BlYoq#cUNbU_ae}ts=Mt<1|8-+*S%xk52zT_-&fH%MMQv%PPBE= zpPlC{=CC4XD0%kz17P<6b5L0Yel^3t|H~iYCqH?Wg<3>e{kNK5HHH-lTHKC$P|2`LKh9A!>Tlr0^4Oh5 z4h37teo(Dy(ruK?iN?c@Wz3!w4U|Pix%as>T*J4;4F84i{{sH)pZ{(iYUe#U3W6F` z^7k~9tTvO?1;xYh6ex+n`50$nDHBgeLdWfy^cF2)#?Cfc7ErL9^^nsDdX)p*jJ|jA zNA2{CcqKO5isvdfiYQA2r`4Lq9MvvWN+{16gGqq~EJY46zO#>kwN@O~aQC?%#@mm6 z5~5Za4BeHmaQN0eiU@a@kvGlxlviGpR4gIFDw{QFW0c5mp%Bh343 zH}fr+)%XdIz^nrt9ghn;Gogr~mpLe%oKDHoccFlZ03B3vD-CGNf?5oZw=EsI5+hM` zrpzs!o(gUlxxIRBLTe&XX)O>#q0Wi;ml?DqegG4NlJ1u0rBE3tcI-4*yTj%A7jgZ< zZ-9Hl>tFth=no&EA2-DO$4EdxMNFYRBw@rQn>*iGF~d>}osL(zgcjUcX>e<3=&03j z?AsGtS(ecBmz1R}ytanR<%%%|8e-vPF~hDlJ5DG*>=a06VXH#9JK!7uR0$!33l44? z*8D;IeB-9}>*W#!0t{onF=65Sn0Wm333|_l=#Z#W=Z;kIQNb)6!~k$WkH2_c=f0JA zpDNC@3Ei1wNTOEgn--O+h%R+u01N7mvy0%QVM>ZI1`7*mC*dy!b?r}$WdxgK z;uhTV+VYwU4DY}D0{gz>)0Z#NyQ5ZhXd_664DTF@7LB8<@5B2m2E?$tiYd|{L=#Y= zQh5$kaL@kyQcJ2M#XTW6H6OiaLND!mm^vfKWG!dpd@lCvU2WcxPo*UdFLT}OM#_%> zmitf)p45(oOF?ga(rA}rIF1f(r^$|26!(_fv>P0NSjTo}`pkmY-{P|K>pk8pje-G{XSMcnc{|J8UfBs*^zxHc@e-psJ z{7?UNeD5#+AMyCf&%(D?aPAyZ(h~}45ff3=^`Tfv34g6cr2|waQy#+eUDS=xGt*2R zY0^^X5~6;lBCCLlyvXxGE+r_=$~$H>2;VE3-54>b9QjRLd#7&#iy93@E3RwjQpN&{ zb0>gOR8+XAWRat3PE`d;Wy0Ra+{+gJBE&7^Uz3*2JIGx(0yi+#)3O^`=VG}8Yft%( zsD8?|7ACd_ui-c`Pp+-v)>VW9R%8BiZH>ND9d!u`sE~{Td*62*B&c(ghfP2EQ}yupZV#Z#=dXZ_8r&Diid|sK%H5}mvvdN z?Ghs4Wg_T~!-D!~W1~+d$XE-uOpf3R>xf+5U@2WNh5F5)%L-soQBD;>ID1=lzi9ebdMbc9lh^tt;c|xd#GD1Tp}h~fGXtT2es94YC*Fz z;E@<=CkZk2Or$%>cS`(}fJx%*PyFIXfxwmW#DFWDvn%Rmcv2ILXq3nq1vWQ1pQtSg zdTRHIBN+z+$oi*L1C!)^w{LQb;DPG`~MKI*t9x}!+5_(KQQ`+K@NJS z0~V4h#L|7B_&Cih&|#H=^>UXs=wR26XG!)X`Mq#>`O@g- z;Uu(BDhE#)d~(|^jN7Z7pB*-$y+I15om7}>HJC>NzwJ9w#pqleqhsfCIw4=`?!c!M zXwW9&Eft+r1y)zm)w#L0=ayq2-my`UM<|$*f=b%KvaBJU$>*8U;-$urt(Bmfnf3=o z5Y~(kAfUoYZ?cp9BRRN1Wmu*n4;UC_BQ9ErhK?z?5qLu=!C}g3Mwe)j*Np4n_GO7# z>K%Yf zOK?NTBvK_R-XCBk7TCEF4aA8ysB_0%Gbyf;{&G@f^lsz1vWQ|qu@YQ(B$6Hr^kVDt zpwIr{FTRgUYnccJAe2E0tu0uWh3|~dB%B~5Ij&iW>-|06egAzd%Yy5=#$FA^ijuyG zb0+k0qMoVS6MruGUipJr%(d3AUhnaxUlY&r9|f?y_*y(m1oF$Q43hHH;I06th$)40 zTtRI%P;PpJ0Ev=C?WutU!aEUZpNT4hIiVEk0L?L7oO%D5z*a;=? zO=1)g2YT0nTu=Y!eV`v3_MOF-%_O%FoQc;q0YWpwvaC^LyHuj0xK>B;jJlsCY_}tJ zj$H|!=&XrZAp1=Up^{PBY+SczY}4N;yhh~(oxbGg9XC?(`#AX*-p{QM0xaQQMvug} zJ4kbuPvp)o0AO({8j^aGDtN+2&Fej@oGHN;h!usZXF+sUmpaDm9>61hxoN3l#T{oW zG;&aD#dg~=4(sQ94BWOYVr5-p9+L6VVF|3rkLW&s$2Y&s4j-Q_7p%^mY*E0r+D`sh z55wTBMwL~IDbRELp`czr0Ll{iw2Rdt;API>b+TI4w#2TRx=@5@vM&k?W95EsmfhMo ziv?xdZ#Y~v?_BfVs7pN`1dXd`{(!Wns!ichsTCT;lQt;~Kk&gjnH)!#V}m5ZYOQFO zI{@MAg`DIDa>_;?U8)$4ZP%dbOc2aU(3=1(W*HRZ>>sxn(rLBv#Z2147#^HZIm_B+ zt4%?d2BAd45CeF9-^c@(Axx*HB(JNLRu)#C?5^Jm@04w zV8<)`)b~E+K%Nr4p3eDf6k&^V4+`i=)QVMnQWwig!SDaE-x$RL5AIyHMU=2sDiP5^ zur)133fa+8%Ly$}0vt(^+H-OJqzD{a>cQ_PuKU&p-fU4YNcAQF-mnGnC4sQBU-)QjtVv(9sPK~ICc(8bBi^DBEY26fqlEpq{HVwa@88pI*uba-cH!X z0u6Q|R*vZ~1UxKtQV>-j$_}Mg5$2)aE9rCYL|vh zx)PJ)RLy~Gg748vBE9nQ?&RUyoM}q?uiQlJqKWCV3;D{kGp2-FMU-Lg7f`oBXG4HQ ztu1Is03MDW6wZB5`lY0jMcWm^ImW>L7`pvlq=?2Dq`JWB5=p_SP*-9%JI5g!@T330 z@51HodB&SOReT0oeyfve0MBD#a>pVTGhR90xx;YVV-};(#^b0DRNj+5mkUsx*rp7kVnpE{?2>vW4*hlGF12(%|#3cYFluB|BQ<>HRK;s=Dqiz zBDN?vySmh4$MN_IuYTok;Lrbyz#n-8{=okSc=fG6hjBb4;Irof$dN@-$Z=D8SVaRO z9>z+50FlO+9Z)B@Quy9;kI+d`P&tl6>W3r$w;aCa=_;Rv=w9*xFaxOeSWc=@K8^q* z;H23~R0Y%5(%+nSMMZ}2G?CQMx)v!4+|)c^h~*4+9e}$f;l$36DZv8h{D zl!JtA#q~$$CJHRl-7Mse_3P*o^_au^GX`SXC4%y{yOT*nppM0=!I<#RgxstWz$Q?q zLZ9(ROQt<@l$ZyancD=p=B^DESLHmEZ<#AO9ZeMRIQhW|l0I&c_l*%?&4(X-gl~WA z+qi8Tmy|~A3I9EkHIeB_S@G@<{pV-5R7IbA0{L+8>^c6_Pkk$k{{X|8U(v?{K^NYR z_}O4;9LyU7Jk!1=0Rw>H%kMtVJok9`2pC)13HUw>RV~#`%@qL-zr5mp|w@mSHJrGo>S{)&@!4<3U2#8U()FXtJ+^LD|Vr?W5lM>F=r0U!VnY$ z1DPv1TXAr8e67?h_QC%y$3T8(s~Hr98fI++scTIHP!yc4Px;R=9BpYh=#(JiCBjfl zRdzsjwZ+hJ44Lky7JM8`4Z_XJ*)xF`jYn;28Mrz{N?rz?es@Q07aI8h+_GJJPzp}e zk8F?H{9ZX$r)P2QHkloha8%lBP)fz};`6w@`Uqgx;%;epyY2DbO2sMMXH~ruWr$~& zV^p}iTyQ(WX^QeOky}s2-Di%;pNo44wUUmt*4!Onau5pMY?r%- zB;sVy zu}du5n(c4TKo|u%5|~(svi4)+f;QKa!K63@cmLKqG`g>O1zlUSB0@TY@}vt|epXz; zM2Z`(>w>Sn`y4;_(PxoN!Loo^TE({aC&^?{1tWh@Dx{7g^rx6qsDfr?d|yd5V@(>* zZh^Hm-bs%{_&8!~y_RhjE8Kb<>Tt$@EeUaF0vNkAltk(|8s-*b&pEhSE1o@jhR25o zO2Nh+4)+6w9v!9ZfCiQCy=V7$vmNpG#=)X%`Z!Uwk}e2751m(NHY0q`(qi+w{50~6 z#4`^16+1}l1%2N+bgKP6@);nVBm!5Xuk3v!lB|u{GZ7_?Ey0mU!n9EyIzvM;kTT8q zJ8NsP$j*W$)Nij){D^l)pPTapT$(2UtF0GcL`UxS@MfOn>`(!NpK)pRi8`Zv$DBWl z-;0gY?4OgCC52+nI*KpULh~&YL*?jeqvPjX&_u|EuJ}(|${#VwK_U z?hZfwH~%C2uHXBI@Du;>Uk#vzV9^+Z=Y55>1>568stB%)TdBv0)it(|OhEf)q^T4$NZ})>SG5o6m z*tiZLy75TRQ3Vh%MFwb07vpVX& z#l6h#=gb#X?DKtFbfwkNp7Xe#wk^=nz2_3($R8`E{6`jiZM{YT|CO&q0Rkq)2bDsU zG#-6GQj@Cs=YpO%T*FCyutMi+8K_F3zP~#p{Tkm@)1k*qLC>R9OCWzCr5o3x;nv;< zgLh%TWLL)Yu#wf*`GG3IaVCd3@EhqK`XE14f4@|CSy1X7`t}N=$3Qo8S4g(cF)2Og z0wg;iATp?n=opVKG+m`m6aq$PD)qx{ zdz|zW&41*-$(e7V(sLXg7(M1MO{n zPyY-55|G_2$Dl0dQdF?gCs@Hg=qE~md$!!6F3(WfB>=!<1Nsg7_7*rE;Qa;+Q6xL% z!$Vo_+}*Ej0T8>%#)#OlACo}<+M~KD6TLJTcUJ*Y)gx^VR5*g-xe#ARa_Jc#Cbf8^ zU(F$ECqQFJwaK$BxI1@*DQS*BUzQd3@4k;$pL{Qs;dE|$@7ZNpO3dIm0;IgG3$`PK za5OCJ?#Vn5$ODkJRhN|8wz^Fk`MGe@W_i!2>QBxj zNd2iTq?8NUHis9imv^x}d;+6drj-S?E?8T|ZNFhDF(5w3&;WDSzms`D-w8i0>SQtv zoX#>5NRd^jLn|?dsh~JP&Y*9lJIo?Vdw^nAmr7prTLn)Q&x}A+0yQR(nFHV_z{o%v z)0o9w_}GEZfAt&q zpNx=(;0%x~46KAh3+0&|OF_ywCRk-95o@6%GpTlH>!}jtXx~x_m{n*Dr)qQ>8R$i+ zP!Ov&5s#!e0l2?h@%;WCAAa&NQ7PlInXS+R4P$#tBxTa4W)U+J1|@j|s+KFP-aQ#) z!jMRxyA0Xo9XO+uWJZo)1TSQ>W1fDmoYUEZqGf zOk!5*XY7cMpfsH4c2d^Y3X=wD{8!q$mi%}Yd_d5E0+E^e;LEy_s)tj{T%D2=Ki%{x zye2r8eaT%KcgBE$V(|ST&qX3Pq+AUJh+3IIpB9(#yVJp*zPpO%!+Tq5?94E}A8mez zg%~l79G6l{GGoS!Ddnk^z)iGX|>|{^@7)r zTi9;lt`_%9TqDWuvmmDr$a0t9L|xV>8Z7+YvR4-9y=Ua)!PvkZC3!eT4l zgNpP9YnR|zt4MNS3<{kbXrB^#aJZko+DfIQFukI&3(`V9{5ba5 z?o42*V0OaHCbev$&%BQGPbHPv3S#-2>`iF*xqCTho}%-o#4Y@#_Ru)Y*_kF)LCQJd zc37a3Ypuj8I;T^eNbyjLKv2if*ICjAr;w@4B2YMWm#}L z4sP9^^Ejo}+^N-SEEVj>Ezf1ieIX-bLA=XI3`=dm2x?F#0vdLpxW^fcyvkpU!SZ6Fo}g1O7lcg}c)M6v^vv1{z}Iwp-T3gpYu zLZ(%TyA>?!irek+DH!H6ZvkzDcN27Yi=~mZXlF+FI>kJl9iSu-Fo2+C7K;6F&r9dL zt-1_%wcvxh3%>jIhFd@T<#=yvjlCYC@I5UCW+y;9X_oa8&w^&}R;c8b9T0bGhvBEO=7 z>+D;?FRKeRvYVbOCHV82eTlAcMO!X_h46+7mDq6-pXuXZ@f4l(YLj-CF^dIOt1X|o z3eZ|BP#dmy_d#QS`#MGkn9Jo1tr2MB{VezIVSjj)wp97&8{?<^AV?XDjz9A-ex7K3 zwXN8<$CUbRwW2wzuToAAKHlk5&MR*u_?J%Otf_&o$|kCc1!e;U$E=vbwE1#kqorXc z2QnP8{@7}i3o&-09j#P!1AK6K5aWruA}S&}=Q$!-DaejYW}KYm;Gr|Hz0EBLPBbI< zn4QWD^Myu0aGK|-Jx*f~DyTHP5aI3z2UDR+T!tG+tdNu8;2I4kpqi@zMF8m59`*zM zaihTu_%IAJl)@lkfQ=iJXeqzdxj6?twdlK3PoLM0aRb0X+P-n_Ck);Zy!#wzI(4#O z5nxp?X((Q_sodDj-&0u=9UMD3I60O0j$=e|>`YWpq8JEuojKqhPH==oa(3GN7H?{bP*TUBOeOlZcspCYAJV z7Dk(n*v!y*W|Dm?5W&?Aq!OJA!SS?*qfaq`yS3qV^i0HB5vVX_FO`p~gIP-ycAvd{ zWWu_mfg5-|RF<4bZRHSLC>?2OT4LK_<9*NK*8!j|vFwn~;Bgng5VmvMZjr?DkX4)X zjZ&y?ccNvirC{>`j0kQ~BnOgyX~6B6i!q&Nw>w9{$3R;y0g5pAgx~3Sb-Tq|q_9K* z{eYDe1&QRVFwyICMoo#Ds0_S6y(`)OoO#nMRK$k6rQz}DxyvbRrQ*62JhC|O(J{Jw z!kDvn1b^~#&2nj`6oNDd)<{vQEZTNHtE5S>JJJ1!c*bHbIz*vNq>a&$Fgb_KEdxIk z&sZoAr(Z`r%aK&gS=g$ltTU3pTAB17CHJFuBA^BEryJdek4UI}pi>oS;WDxG+u+=j zS)kqn`F_N=x7?n?qMJ0S041h)fKJk+wAvFVAwA$9bwk8o<_e zdz`Z%86&{iA~6>%h-{?DQX#l8dih^+lW*7^XrD5wGA3kiH zu_PnG9R*Qf-;TK?P{Iqa*zGfEY$d^k=-$nlGWfvb>yMf8DOnEyV z%cgq9dr{}U@$Mad?$ty137)G8;CTBo{V|x6CS6vH4qdP7T-WqrK0I#RL)4}y&U&ms zXvGY+JVULH{q`2wGCB4L(8o5Dq^#Q2iRj6PGbn%$xCiCo-~(2WE=&}`*4B|#U~Ey& zRwQp$`Aw~CI~T`3_BpW3g8(JzAc~lkFrWf~m{@ifMC*tk=Yh&t#qbz-2gqZ?Jh)0U zMR`znWvZDNsaaHXVqzO@Fd5}+2Rp)16r5$uG{o6aPVGzSzJr0&74W=c-yV{ZqGE4& z@b7UQwoy;6t{nYLR*&t5WaudYWpEZE zdh|Gdhp}Dj`~lLqq9AoHj0^`+YeheHXk)xHF?T-KLFGCHsA@seI7{idDpw|5LLVa` z+t?9g$C5h+l*l^IANbtQ#YdvH3<3{Zoby)83=(6+?J;7Bq7{^*nMVb8HJ-y+wWwLa z9TVZ0Q{pqTLPejcPUL%hSj@FW(ck;aO!hl0hnxFwbS2kt96mMG3j&3Xv!wyrcplpj z`ZrKeiS~u!(Sd?-oVr`2g!LZa07?jMsACuQ#tt-q7i+=GTm1Y0X-tDZYg~r6-ykcO zLW`xHA@&tajz*HHSt@1c#KKA1MpR_wQjOgJ?lS!+Q*IxwaxTnu8**cPlJSfz>f{?hJ65+>jkwR*v6preI%;dj%?f!W$u}L&zZXkG!A~}LG;_Xx9J>Pj?weE z7SW=%w^{VSpMrx?bjj4{D-nacq9ew$iT{XQ8&Q0Y9`EVnH;<&CXqJsaBJ^_@{p8n> zMQ7wkN{}o+J7dpqz$j(`PewydIC>NhhMHjEWm$0B25PH0%b_AoG&cG2v@23EQA!l& zc=~I-UU7STm~n^HBIH>NAEAxb>V_*e&F=Y}(@jgGK7|1-RSC;2y&NvD@7KqU-W^ila{L@L8#AyfH5lQhZ1kbriNqeD-!bpRX zJzCoE;W7IeyQrd5^v>Gw&F5ECGkp8aBW@IfXtk2jgPFrI5m5djVD55RL-0rg@Z(5T zRAqw5GZk^7wgCE$BMOyLS9sqdmcr)uGQMNPJFrrMI>+KB=)MFE7$@508Rz*XC3Ltf z9eCi5If2$Hl#RBuZ@2k=d8Y5b`0Mbk4}ThfQ8Ig=wiU;A0~95#V1Ubk^YkFWIm^7L zWWmy`VDk{3sI>}D%@}|517(Z>?+>Ttd13s8iE}0Yf2kGMWyuO%>FenkLdkM58BS(* zVVG=1A42U&Td2zwYpcl>PK1CrDZOVcdy|}PE>{YR352^g%r)`_W-Cg4hFT^)Lan%B zM0+6#QbVoJfwIy`MSEQV;i-(BlGdOZfGuSO8UUv`521+scPUv=7y{MQ;By?{Q@dG} zazAKDk!-qN{_u1jgUO;hk*VHK`rZQ)4^ZOZ;J@9|iO4w*C8zE)IpiR6&>NV5p6Fob z}Y%=owK7Q;-VT*=m-H?95bv92ons3Xt6~O9@Il+ab zLQR#cP9Mq)wZ4NI_ycy+NACW1eKs4 zN_@?Is`Ku@pL8|EBbI~pZ zvx2vKKXtQN)SeJds}}+mQS>S(mlDDYP1}@NQlVu3GUp4e#Af74omwMi@;;P!#dhO(_PvMQ$8Bq$#H8TB1JvvN2+9--<@v6)0Bxso zq2#VR7b5O~qM~O44EhfMi5O1?LnPP^%d=?E-WKEXRskAee;Vj*wP2;Jth3k=ExGXg z8WaOm^aAh^GmJjxOU^}v;#MDnb4;NyHu{c&5uHY#-6pr`+W}y^Jwh{Wigo!6M60nl z6(}Lo->5vM7?p$h@Z6j71mtlCJZ$ol$BnZe2l{?X-|~dMXC>)-^L*$QLep+6>9_k$*{Nu$@vb6)gZ40GXYZvGIaD*mw&!X;-$%%?}(SlFB2E zmj7ENu>bUhlX7cpD_-Y2WII!FnX8{P-SMY4S!%9ZxCIai3F&`JG%J( zrvq&Us|)IKhw=Cl`*yK#8VXSXAyr^;%M4xTH9EV-7<;1tf4@%?V)yLr3U?NdqjbxS0nhzmL(S8;W!YgG339Vp3&Y9yn54NB`65P z3to06$P8MoErH&5NrIA+2cY9rP`D_V$U-=B{~k82fIRA-~}U(8IDbzk1?X_ zO)jRJOGz!!cnxE!Ky?HmP_rK?-OZdZJG&^~jYf*D$AIBd>+fl%4)h{4fHJFKK>9qhP7 zBFeoaWDQ9E7$|7#dl>s?D85e$R?@Mi8lXCO$>+x`;-!EWwpb&nl`jVJ47$ADlgyRi z;)qT58*M9NDP-G`=o|D5J&Xs5%aT%+d;3q);LFh z_3eRw{U?3`+W?l@unE~?9ul(nx&U=pOZp0>xU;Z}`>Sv~Y+K|7dGS=(Bt`zbpl?zq z3F?$6LN533VtaT6AC#nb58;*++F@>HN_8W*x?5K`0ief=?(|(lW&~?V0Z==HmHnD?J%3$F<=>zAk z-cn;e#gtgWff3L0=z!+oK7RccQa@CVsp4NV2|9o3=!+JWXPtJTo~IO^S_$;?{<7j> z549?%F`TKB>KZ^vG%A9m9)I3S{QEq|6UF_W2r^tnym5(T#V8k;?L<#$U$?gqCB*@% zgq2W?WYTnaaK|sp1?JHu?#B(_oDceI#yob_`iy6-YF@-q%EA{^VSd1UPa9Ep69D4? z;N!^7l+I!Jhp7}*t)$%d@PSRn=bl~1WoaB@jrU#5&|1NII;LcXXWkjD6|HS-kj6!eNu%U5epz@4mny$_+v0c@Ek%lP1K6G`KA6U`c&L; zv~7tLIg^ReH^3*=LIX6D_I0X;A<9?D>x`~I!H16rWgabBj{6N?zl5CM7_s5HV4xN~ zl-Z__1ZZj%2WlMOt!>Xdn?z;8;RMo;yl*)c{i9?>%2)cniDb>giF%z$t&ooqgU~)S z=ww+L2l_P9fjc~3x)3%4Wi+L{0}cph!7<-A(_Cl=2OkPzkWJh_p{o3x0>E!@k3`l; zwKFD0^Ow(`G2`;b_?+wIk`=dFfm(<)+mI7U9Iij70~$iiW+JO=K;Fwt0#(2{LZ(s* zFj^EQTgxO^PT!1e#zF_oyE2QG^|%2(dHT#QkVBMN=&_%~;--&|rEqAgkKEk-w0PBA zM27~?Rg`2Rlz-2&(6ZC-%W=5H8H}z69S*lUOE+K1RL~`YAKO}s0{654bYud=CptQA z+igCV9$0B@As<>>i zBa*rE`yCOCZ%0pG)Jo+LDXo^pn=_Hp&y6ms(8wXiwK1{hyH~N2>iVu0{G4y;l93)% z7NW25ex!6}(pPQH%FJgzlLjf1$i@{+q7~mzmj#WAg$hz3CmekRnVSSOip9>Ivutg2 z4M)nLp3kOsGey$QPDH?B)5ql*CS|G^?pwo)bqO%TWvjZrimfX_n`a?9MlBcjDG46# zF^GvNr>qkYb4V1oaNtXKm#}kz3H)5Ak(5%<)=T`%a9mr#?dXu&mX(5Ky+X3&dhbDh zh(s2CBo(66pys+2v}NJX@Vz-zd}ue_Lt)NQ&@La~8$bT1ae3!QQ0gVsDCK|jJooNY zP6{6o$K>knn94@pnp)0{b_k+JbchAIgWNe4S zN6a(Qy<{HH8U;d}Gko%iijfFFM!O6|yPKW7fgwPHKDu*=wsUXJF@gpyIxC<=I+8WG zqnhKGNV}#00RR9=L_t(i4I35ua`J@5K6nn5%pwtgCDw$>L^;wu@{JVp?7XyPMQO2b z0%rK;*S->q{A$JBv*!>%Wo9UCg&BfrqO)uQ{o>z^g_O?zzHNB*D?g3x@lC8h$Ujw- z2#_v-u6UwLYe88DaNCcdLb02p9H7)Cl4<86;?d!@pu2J|miSqG%-IsrYjl?6j~}-K zzx3h9@ccP;(M#BRo>mXL(-K>^C@|n!u4vi$1<$=UbH`j{?AOua;^9%0KJFdIi1T%L z#Pf(p5!bsTC8%4i`1bcci8d?Gz;KUFApP*cXZX+u9tlX5Sp@eV**zCXe=w6V+>qdNGD#_ zOUT*zz!-sEg!8S5;*RVd^gO5v814}a*H-bkZRvO-XjP(CQi~ETN&|>EB>~aqNeTy( zLI*fx8!yAm@Ux%3%!*kj!T^lA0Q~@PQcrM$lfIPFzBESVl7i4GIw>KX9Wm`l;WkKi zA>=?~V2&w+{^RswtrYa*k%^=dUZhrdeE&Ehe01Dw3+Klf5S&#Fy2r0TJtU_lbR>dC zZ56KJv=Z(ss5nTO*|`%%!6p)NA5;Z7b_^CBg`XX5KTkuAcUl-MdvXdNZ?_CGdRP02 zGmKnL7@d|#?&A4q$S_LMne3ANP+)Z;n^Kw3 zy?7?Z=di$ph)*okO?a+!jjlp1hOAX|oEYaZH{=t^m%hH#RkR<14~X}qmMVyH%f69+ zR}_}DU9fKt4E(1gOvVHS7oG_^i(TDKE2r-Vhav}HLED2hz(aw_iU$9+&6{$i1hHH*HL(G z4pK~%01qEfcx|m=sSSG%+UR<@V&5Lao=Tyr%Sc2W?ja2tFfd-M3tsPg?Bg&ab+ura zBRXhNP)Xe++>f1fB1%?p={f;cSGx;_{ zK?yn?q*~>fEJx>c?y4E?xYmlD@A*3qHyqtDJnYdpe*?Y;Hv@dCaSe|h8dEJ1I0G{Y zGTWMO*GLD1>lb44X>|lpqx2Pxzx62?b8JqOv9Xqdo0p{J0zkot0)Vkg zsOs?Wr%oT-*+I$$t@Jgb&4K5)lP@on)&U&*4j%*0u2(#6Tgn}ev4^h#K=G%TD`7`| zcL8wm!ft&o%9VW>pMNaY6xcU=%l#NA!y_T>1BFBE&)#_#Z$A43+R!e5u=|d}qWE&T zL*E|ov%m05X!JpE-@YOE7WUJ*`TUW4w}6AlvwqFQFFG5B?Kt3E{43ff5Gs+iEs$iZ zX0gl+w`~s_X2Pj`ITED^`&r;{G=hQsxS{$^S8oY0hF^@L`yicH$w(%oC4Q!RkL;hb zg26(GBC#zCj(wjAF5_gY(Z#KB93CtPsg917woWcWYIpxk)UlK@%hCJ9geqeACMG2eG~;F z4p)2yib+;I;$COo3;K6WdF+&GSL`XqnH3-KBQ9DU6sLM(el4GQHA~O{?kTnH1XToX zN*QqUBMX`k7brnThVnP9e{jHpt~&_0brnxx9QcF(_QIu zTn1ZG0L=NonTSs+jpuA4?JHRK6~7;){5PYd_hZ80~opVC#f*3wDp<#tk{+u$V%WX1GoL)b%gW!)g3zx zS(Ub+t=FKPa8S~_mbEd7kX#{ca9P?D4fuElp0))QB3UVT=e-Z``qe8O zJLQ6zIQJf1JIPAwI}A#kaQbY%5AHdDHGJVmejC2`6aQZ(7W4gRn`_KS zofn-qq5C-)FWQERd? zN}R_S5k|SnGq9Zfs3CNAXrB-Oy&*7f?VCL-A3GiU> z9<+g03YHQBu6?Kr%;$HqlgYWBV3C<;1bp9$vBYuEm*D(xQE7n>$>~@Myk>8Lc0DagU#Uv*SR6l@*ueirapJj~lK_!NcQY z*i4VPhyYLITPsStV7b14S;O!O*b;uQZ>ZQX`i8D$x~}~Mvi0Z!v@-9jU9K3%fnhbY z`#G?Fd;6Ne$(S8?cG}O0a=s7!J|gL^HFh5C$A(4_QXTy)26&!i_fHBN%~SBq9z$~( zMtRt$?^D#;h4hu3L)B>~;c!n0ztS`n`R{A_qz{VS`-hS;x|gzXLfa6S3GV9U~+qgrBnUW3Ksw`owB3JpEPUjyP9N!2$FV0R*s$ANmLX%lO&<;7_1oaBh%A zeko|YXNOi6=xlqARXCFM##GW!jLNTyF{Px0zxLRu;5+)DDy^ZkC34GtBxuSDlY4aYon9rk(Gn5n0#0% z-Y6;Td;zV+KoSi3=K%0{^lZb*fy?+cc&Q{>2HmOJWu`xy^W^#g_^rm!<`rF2Y zJYSc1tnt!?*wkXlWmRhqHV4Gqfj8Tc4|qs+msG5BDn7DU0`Td_-%CebYNJ9zVJnR8 z*yMZ|FNp91z@7njKs?pg+ku{k#`5{WIoHGIdvL?HbE%%6lCM^SHm9mMMF2AS0|O|~ zT0PM_Y95IDXV3EhDY3C)uv&>eA`irR{{kolFCRm~d6z=~FdZ4YZ9|6Pka} zUW+Io4hA33Kbyf|77E4UJuH`d+`spEJbV8OSnpq;Etg2L*-DhPWTFmW-_NQemUb@6 zhm38CJm!z5J?O(8-oD`hoyl@gqfXA2LC#K}B~T#gWc-7${~aIx#J?9wC<~t28kWl) zF89yTuJ>Ff7*0CrR<1!c%i-hmj8q09cc9$C>a+NL(IiSdgX_AU+V!5@mA*ex-6R5c z@KGl+caBKv0+iD7nnc4XHB@1AUS@KdgbNl;j-?oy5`!@US*6`XQ5FfPL=n`^jv^fR zoB!nRK0R+L`*>`JUs^2Mldri@(FlfL<=;q$vhcy*{?^}$V$y)xuAz2P=`-tsKpPM>-FvC073zkaCC@5Uvj^pUK-F6&%Y!N@t;mw&HKP4eR ztA_vPZ~x6fyLIu7t1KaW!HJ@q2%^Q={o-}O);qQ%3fZNtY5VF>=6f`C3F28l9(@K- zomZV5ojubzi+_$0ovnU4o3&1^bEYn|>xX^EZQJp7+p+gf0SE5IAiY%rAqL0o&8NY) z&NCPPoaqkoOM36}JB^){zxDEMbSW@kzT>vN0!GXgKJEvp6;V*1DW=*D~1{QNXzphwqU}-gY=Z|kuYsE@xAAnSqll-iny6i>N zG|@cWLlG)~EKzBqsOw|kjAyKNuepu284Ls3_Y?VF09cWzl`P<%EWB-%G;Ry!t09nX&U3f{;7o}F7{>-l z)(e=>&&2i3duu5uFu{R?0v4mII0bw-K6h#9mvb2{-~Vy+jK>ZYCIkI=#5iJSSUnXU z%2t!2Px_V$&PvKpDTPaFEx`#DTbaDOTv#d}#~B(c zLh|Wz(l>IssKvjJ;n-2|xQp61si-RYUcGsP{ZQB5(+iq7Z~f+1@cg>sEAPF7`*lfOe?1`Wj3`Vh#aC@@?vl{LwV`&MwM-Ve8}>e5 zuo41Qgn@2$6!heg#q7bUDurHZqw2+!@pPZayz{}-7Q9>yeMyla{{E3wd2I`I{g zlP4!$dAz(a2JV*@{5dOdZhaK$R%~+UfH@$pYm?)P0q%Hdwa&SN*d*NY(2J7A6ai8& zI3tn9>npWJ@*Qyq*5G2499RhynUP8 z$#nJ}iY;+SNB*-vypGnQA)j4kB4{T5Cp#!@7B3M{m2gaomIL9e8GHvNfg$@+A`3vn z{y-E?by;xt-skZHzxDUv*ZufEiWgt_8kWml3~6Z)Og1oIEuDE#;m%4Bax|F;lcFJ8 z(k~&aeGKfk+ercury|sz-n%0c6V1QqEGa-}h}(R?a;Z@a-(IhG`1}{Yj351-{}?{_ z`me>ZUNZo^KtsP_)R1>6Qu7$WsXjSJ*;@>U9_Y94V0-&5^y7hZSO@k!2I#c|M1d#@ zN$vw8GG9w18k0jB6gua3;PmVjv|{NTgMbzM;P-zYw80omkQz5yV+3VrXSYK#sqT*Z z%LTQC1b4H7gM+)ttz~fA_5(li$NprVE8S1nf5(3Gv|%fjTbTz7{*iy*Gch*L=2{B^tzvMX*%ioltjVV%&-F z-fp){IF;m%G17MD^2D&wkkXA!^_j!{hA%dgjj*qd6gnk1y=bA8Sm;S5n z!eGhmFQwq0{B6G(zxTKN`U#XZ$`qIIaR(RE0*;N;77}!GE-T*s2^3>e86auFb{sfZ znD#NCeGZyE+k3~ecRq*(d<6DlFH7VE%ev-lkz#J4eZ(H3j+7MEohr66msE`X-UF5H zjQiP4)E!J4tQBjm2~xl%9qnl=%3sn-EDCz#%s{C%6gJz6y2RYyz}%6f@_wo6;>7#W zHApE{Ig8N`IA)Hd+0cB~+2Nj_6O^|2`R-t{TTcHbaOnf%IIu1Y>T-Ev{~6DH&YgN* zmw=6uvk#iBQ$aFZGUlZ8SP~S?%&H!olDRLplHC1gQZvk znFesUogy|q0<0YaEeeAJu?;f@D9wOZk2iep^#k5)1ApW{`(yao?|q7Y<1hRtc=Pap z4cyYJMQ~sx8CFdG`y@B`z-x2?5K1)|e|E_Rxl*l&?+F+ncwXzPOQ&6Zy~ zMg*o(49{P@z{{7fU?q~g)+&0}OaTK0o?J5TVJ3TWesl9~pYM@}*Q{V&FL?g`7x9Px zwf{c855Pb1`+gE{KmHZeTCp8Fzd#FjEbTxAX*x6gyG#)Vccc;uOary2cgF{oh#3#_ z@Zi&L`#$YYhXta1>FHWJw)tyziHe21UMr5v-93KnANXz9zVA2VyFdG%qK{7M2$T0^ z#lCG(y!U9ES8EVGKuLCKjYD-040eP2I6`I47|lZJGA>XtAM_cJ-7Z0~ve17m3vh`i$*b&8E0r1tk z6+id#P0k?=k9M`W!wN8zBv_!Gm1oL*QUFlqocVzP_mFChnY##b*xsOw1Mj~7 zIefN1Vn6l}+NdQq1&iC?57L}C`0OKxEd>rlYEv;#F#Hi_ll)><@LKZVP8)V0_eub` zaSMDbsO2@w$CM~4W57S&P2s}IgW*j810fGNFsO_M9y7-=Ag+I*WHOX*fnt5sbm- zrOrNR58Z)pKEK1yy?&&faV}^$ya3y_!`!i~D+m3-Wow?k`WQQ`1ocAcVstPA+)CP! z{JCl4dpvJf@`U)@SGNs+^pF2(Y{!uW(!h~mkfC5B$9<_KsTwLkKs&3YvHi7d@OT{nVYYg53z$no%{KaqmB);^uUx%QgdD=|eho~fyD0Y-F@W1*~e<2Fi zx{#96Nex%Do`t2{>Tzf(TwGJDhEg204%`m;Y@>8^Ja2<15wSGTToHxD|kEHkDPi0k~Tl-qPtv zFFgXt-%0-hfak5?C6TMr?t?pe`W?_ z+~L)SzX0#C=u3`$U?7bj$U^s=@(T%6K(-T7*Q|sdy>oseuUjTW=Y`4L4V8};34sI2 zDW-jDwg$4(Aqm`(*OZ+BCSz}2zrvsSfB(NQ@TX!EZ;ye`b-m#6@h!&f5&DuyE=9Fr zd)jKeZgBa?aO zJsf=&NQv~ZFmIJR2K4ehIvE!O?pwvHK45*v+mC+{?j402>*E$EDF>{5M}P-V!~xFV`kVhQ{KdcaH_*qx z#<+6^iIhZX7{2#0sZ21KN7C;e-SN>4@IF---Ed`R#?Gh;C;C>sbn>;rh*_Pj&`i)nV4qQGLH z@fZRQYO(=i&Q^>ua2$Jd{?t+8sqTGGUs5^m;~aWF_Qx!0_O3le^qFQDb<)<7IAfB( zo@lm@lXR*_a%!??0Ie=4%YwddnXH->RGX{~2XhWHbO5b3+<)N*aeeoT`0VHYBOLG$ z1L65?q{3F(i~`tl_X6wPGko^lM_4Jid+{E|_K5B6OE@GDBU>Hm-&MHRQYfQK#ltaY z!6d|z)!6BmW5Bz^EILe*WCsj&S#qJO@;am6*iJL6AR|J_d|U6ZJwBXr!ZZca)MrP? zSlpV&b8X8CEO%&D(Lwdap{?@#`=`8ec1la^ff7iX@4qB?rGm#jcAE(BNp_EEr&WMU zi&XazU{=iHFt=96(1AAx0YL&`tu=hm3O@2s(yO(h5VV4+!yUln*eRt9%drCas)Ngw zM6EukA{VM|n6Vg0P}siz`+vyI4A&A1pxhnzYZMYot@!%;@8H?eu(pDq{LUx%oR~bzap<#!HiFM$B0x+$%}~mU)-LG% z0YX8uh;${O@q1P)j%~{;5@pZG;RWqRMOJJg4yUr-J;VC!U6>Wz-oC>A@Fq6KvV9KP z$gv-pP=^yK`5dhnRY^`ok{oNegXxrMtB!)xEzmB4`hXH#Z0|=Fw6)z~^vARdIX#2t zK(+A$oqtE2Iq^U>=M?h^okL|(7>dv9nycbF0~7L6BVlwPWh=ATq{AJgFCNFifoCCB zv!F=F#OEONEV%kJc@lp$lD&ymWpZsLi;MCApkuW* zSi1xIBQTChX~BH9z$}0ZSX}X0mD*qk>X(m!XKO4K%Va{ntu46SLM^FUsJ?6-XA+tx z^N?qpeDFz68tgdKwooaoqSghyA2^OJ2YTIsLNyv6Te>vSu2NQ$kF6NFym>wY1&0!y zQw#vVBT-kQI*(OY4K!FuhNyi4h$TEgD)L7`oyaK5`MmcvT z!2N`LD?x0vlGep}A>F(w8L^CV8Ea0@YH1*w+M&FcUPMLRab+8TCBcu%`u z8Q()ZB79>Mg>k@3+Jr$%8CjH;@JXdq)XNK$wqSpF$$|KhF-gxTRX7o9skU6AYjNzb zQn0QodLOvm9z&$adrHhI*EsKq3y>kY9)q&)ior>F0-$03r50?W>KB8QG8LavCDJ1< zTwBHMID!%mGGSZ_sOkuD28#uC=6Lfmk&Bh(--}eNEWiZ%tI>Ci?U9OAk%ug$Q1ad9 zd*>Hu7P6ma3~LE$ynI9YpVM;yg4U`TkECGnxk+tJIdux5AmY*9pNhN~?`LflxBWnE z4Xsve+iikKCr~N!oZ0!lEGuq%sLO^B!aP$UY7#y;SOr;?MUe(W>-~tfwR_N=EN<+2 zx#BnueD~!WI--GBw+&kdf_`%3ti`&nc(~p2a45i8nS+RO!fY z*_LB6`JL~47w%ET-g4+HP`~|n14C^0Jm_c&TVKJCGr13hJlf>%KS3D~JoB{{+>Skr z)IvFE&OdlM-yDJld59-9k)Vs!L^Fm}4Bz1gjck1n{q`wxD3Q-bd8q>4!bI&{uw^sR zN1&2mhY2Sn%q*4^afm$*n4J=Qhk4IxUPZ`EWE>;4rEx+m&xxO5_(?&9&iQ;$v)e#0 z17qMYgDJUlkj}kJQq@|!O zv0Fj+nfG=|Qms%j)qvH2&fx!SX?S(0Q$hs~AIx-EB=lJDBnjz=8%}yk*#5wLUguWz zv+D(q+m3^jr08@`8V<~GSr>GxoJ;yU6I40rQd=P4nEYfwoLOxiX2Rgy?(Qr?mlc=i z@8aRJk23jt*gI-lfYLCIEfbP`yQK{bb37cRIvG+vb?>J=MYOBY51?EzoM)FyAZv^F z)Rn}Sf@h!mA-w+ZClJz%$oZ-<=DZI;Iw;@OfRHRdo$-0@VCo=G8&Dys&Rp!8IAVVb zcbF724660XU#ztP%Y6)0tE=YWXNS%ASPCgm9y@D_8Hzh948=M^89un^vS~AMuPuhL zJwyfePJxcz(csycl<FmvH;^FJnCXJZw0)V><0Q=rZ*6 zOtASPqJzaG!nkF5pueiOZndny(j`+WfJ9S_@${w%t8t~o&j*wEUNVlyFQAIf@% zixL(a)a4&)4U}EJyiO;z@!F#^{ZdOFMsE|ah|hFD2Bj%|HTY{`&T|pu*!<4 zQ#}e+u$y49cQy}Q^=+KIXUEAu%%mrp#G_n?`#5nYv>u= zQeNnk4==qR*!B&#qhq5=Q1@6WwH*guJwD=a$E(L1Zu^m{3!xGag~`KhqxN>(Ko_Fp zVa%G@$Ondauz}uoJUqUh$q=h;0PiEr7nqF zDWxxL!)|?Y;tEuC&Pp3rV(Cn~ ze7>Fz#aVReun@I@4o(=qREGdZKLR<7ZJ6`m>6hxTEYg^qO6f@l$8?CplQ8)k8Q3AS zqGQWvj?KpmIowhNP2HDj_ z#c1rg%?tS~M&B`xKttn?jEU7?-fNiv#%zmN`ICz19(07!9osQ5(839ilQir&*&~w= z=ldT#ym`$4j(wx8h281><5bkf-;MM4a2(uC0X&W|Rea#MTeyi;MI$>p+5*0hIglu2 zF>Uw_^_fT-Iyt+yZOephz^84O=z0|kdRX6XMAT=S-K=0)8mv~-c0s+qgL-|ApeGTP ztX#lqA{ENdcy?*W7#PQn?e!-_;={LTV7gO1sZlVZmO_OP(t5^#dm#SDvE`6?&w=(y zdpm9q(Je5w!JVCtJqKyc0KEKJ6o5NT;(J?(35Qg$8aqsXN2*7;?2TO}+QJwt2z}na zq%eg&xjU?JAtjf-$%oY~sjJr#yxfRanSgBYS+ATVc*M9@v-=Nhw z-ypg}43hS=;l|>q3;jR&0u{AwU3rbC0&k4miv=LL-pk!PkU~>60eN$#&n&1frLAaf zf#t3!WSn*nfe6LE&Z5Y9AGtpXQ(fuYt(JtR5@LABN5`zw0d>8?u6JOJ^dGqyo=Hof z+lu=!atOrdjlL7vRsv)=U|8;+ zl!FQsVlsu-?mml#QVK5DyDXf}OGWpc%2oh=>6d;v6Ua=!8QTN^D%t+|?|ct$AA>&C zT8$xT~OE1G;QY?asg=Z?hv%{DwSEhYmld9zGGHbpzR3G`dS|ZD9 z61*840{S*GR`%P&^xI->l!9sndmp&H`Y;$GEW^xd0-CX4Z~@8%W(_bd53~}S*_lu% zEsOlLq%G^S&f*-JACYn`_}#Esq?Tk%JeB%~MUmzDKR(lO?77UiR@!)8UVLX;E?DoL z!@m5buX-yLZM`G67+AC^l{Ad)2qW+AI0mxFm`E6<7L)EEYDS2u#XOcf?t4h#`Ft^wK&j1-! zamfV0J@#buL{}`)1tA*Y1T1MbDimV(2Hy)&8YODlRp7833XIR3-4Yd5Dl*T@f`mB8 z8A;zCFj5N)q9iGa)i9rUoW~f6G^uc^W+^+X0tEWBaT+q$bI=v?!5X6QX|+)$BTX}t zEgdW$J+_vQ(@<3=7juTkNZXmbOP(dG$UQ5y5{m=PL)A`Og#pLf8jj<@!8WNGo#}~wm{888O6 zeT!nIMUq_M*!LZ`u40gAZIRlEO`CFdN(rgUPiP*@GPr~<40Nhw!UHPo4fiZk!Usoi zw8G*ZSWX$E(Nu%IQmo>1OseqG1mlB*Nz=fi-Pnxc4*%M z$0yJQUYO3ioy`aU--8ruEIL!qmV(z0kLX^2<%;d$6^eHb%}27kvs+~B9=k_kI!4E( zarjz^2@9xNLii*S_l3o91mgI+F>~PFDgGI6Hg`wJ7tt?XYr(@Y@_Z|X7(9o%vqLJN zqJ$)AKekM!(m$1$KPhd6wFQ^fux+;hSb8L~VBw-?srRW4hv<%l=U*u4-VZFT2H@s% z(PuSd_tCgbds1GPWnoveM3SQTX2{=179YsMC~RC5&KgN9Jf%iq6?MI0xje&hdkyRC zfb!3kbgO7;+(6ycX`o<*=9!9Fda$C+&!w;+B@l3IFpMYjd@~j?PPK*x&smfJDUj5u znfON1CI OHY4Ntl+XVg06}kn)k5e{zr(LHsV0el0>2&^B;Z0vs+rl2OoS6uU>tI zja2c>Gx)RMLdzmig)x;4mu10fj@J*jv}-Y6mSTA1J}VWfwbUv3ow-fPLPb8yNK3l5 zinrTAnSXu`&t_%5W0cy3PyZNzsno+x%2P0`%aRI!;ge>v9r;}BfYP%R6GcFzd}}DG zm7q&1*CYM5a7k{?o(u|-Fb9h34hmZ=7MwY;#shM0q{KF}rot2kN$9E@s`=+yN+{0C z5-otO$8F2!rGb9M7jkGaUI(*gQpAE57;y68lmI{HT|T%c&ok##_hUa!0s~gi*4Q3z zU;ffpy>QS;9Kx0DAoqcdU5|xwZI9bdB%|lWl7X5DceYs&l3oVq5+TPU&!y72xEYLpWM9a3U;Rs5gCRwbza1Ca|vUA6zD?+F-2dzm3Wsk2a9ES(x17#lk{v?l;XQ=wtW_Tk&KTFi0M}3Vt8f+cr5LL<@0|p9=`jRVi2-cEVbfV4gdS! z`5W;E|DXS1$i^NAlal$FA)RPU6u?B-PS<2v07s%zacwpCNhpaZCFnSpVt7qiL;xvW zt>B^AsAXAEYQwQTrfsRXl_aSEiwxvhC`>AVWZR>XZK%ryOSMRboe{(etgfiFvBS2b z!lx}yx1Cf>{c7xk^<_-(lvs zUl+W2yyb-r7iEn$Q3H%Kb7*HwB&!H|31&PBy3@?O`(RO7!sm@Vui?W$zjm-iIVHfU z)&nj-EkaGhesad<*fUlpz`;VlX8~m_;w_VM?W~x~a;Lqt8vK4qp9@q97eqhH*)uJf z)#^0GESTck+!=KAjtWua5b8aBU|nKH!ZB^j3}_ariSnsuQJ3ua&?cW>F4%j=!%nJO z7GlVARJddfOrA#<6!*VXB0QEy zA`YpxQR-$khS|k6Ab)WO5;mYHc4U#t{2dv>?*h72&(D^?jHFl9QsS9S%?LZKuN3(bf8%XXrS3Dx=IjL{i2&ux*dn zwnzG|C{i5}7!N)#Ado}_DxKqyNgYgRv$Zk(8WL(1E*i8K7i-GW)gG8INk89U^qV@7?>M$x9 z65V9L$ByH;!AYC3ydJ|dQp*JV4Hg#aTmo^u39d2uEuu~4wMrFCT^*jm3?^}%2gNRu zPtN;_E1Jqv#bQR&5%fp>4M%X-9ge^In_ne;Yeb<+V9ZLzx~?vamtUbmrE3;8r;>B@#uW$XC-_P zUo~*t-cp!>@86=3y=xj&o(1$2fl*hyr(Z1U@Nx$7Okb6c1}tavB(HLwGtpUAaxF@g zi|#gpW-IEWm^F&GCcllyHH(Hr<5(?bX(Ft2F+>iSxNX&HJYhQge-&3GathT))os> z^%+sf7j$p10OOqB{r!Cic@VJNd(Ryy^BoSdhYIWmzVDmf+Jf!abFV_9yseVpenK%~ zkAz|0AD>V=6v!tAJze@)83?L_ECxH5<4Ck`-*c{GcAxwnp%Uc+5Sx>P)^=e1QBzCB=G?#pd9i9GBkHL@-2^ zLH?`|imXsQ{!E*&`3Q^psA3BJd#}S#!s&cQ0k~jyikN3fA7R5lz z{&X&MRMZVf)wZG8mpTzqo-e}$ssy7n4gkm53M#}g29ydEgm2o8vy`mtNbzJA3;@uI z;c#}{Ehr{!SwdOt@ga&V0EJ0icfHq;8x8chBa(d$VNS}UiY)m`70R-i<1~{ntqS?~ z156-rC50vydKgo!FuL9ZAZSmp3M&`5Z#hF#>eHpTkUxXE3kCrIXFpEoABjC?QlJD~ z{P|{xSx*RLoFIce`R{-v+>J9L=gv~hks*aPW)c{2Eq3b|6}N&)6YqFr@uEa6WT-vw zEpn6aHT`scVUf(XWsNS$woz0hD0YfRsuW~os0;K*hh8j z0R|Lyxd01V8~tiw^4)t6>L(&zF6$NJxRI79i;nmOa2JwUs#j^9fNZnSj%q1*Bz-59 z>R{=wYn?NvRtye;KcxayekVZIm{nzNRx1SqNUix*YMK#K;!VT?)c~)v`yRpg2_;QF z_dF;aJ}0P54iMDU5G9~`uLt3ry(o*#i zVAs@)v$KQoQ_BPy!x6KQntxhb!>tecz5s$ifLVAjsln+!!XFOWEM}E1{xd|s(lVdP(am>cqCU++?bsgT{qs;A`$2Rd&dFE4 z{&oG;EQ*7sGEtkIPE4HyxaUE)r1%74+_(u9R#55%R#y~j0Ac`c=ceg{fua_Bk}T*f zfHpDEV74H-FAZgRhPFOOYir#3IHEvd!CU2c$3$t}fvpSqlrvFlf7Y^6y_>&gp&X8D zU@g+;pU+wCVIKoaF?{l{r5{q!uaRT8>>K;Of8dK3WKKs>rAa&qu0{9Kho zUUv~MUct+3+B*xC>|l*k66#|n1}?;V+LiWOh|USKL^wrf(`SwT0QU|5>VNkSVc#Bb z^gR$}NB9MRE&K(Jf~}7y&%1YjI{y?0H7OX0z*3>e7=abofeA9ae_i67s+*bzQN?Hn z#u15?wypa>In8a&4#6oFl<3f1FIQaF6%y_cdU+f>df$^u*P%U2p{8W!xLX_UmXH$F zt_Y#4O7^vZx!62oaYQyo%IQv$`YBR%&`zwU*MogaXMBt85 z4)e)o_XHhiD@-P5A{4Ts8e;wJUwn`EPP-}-RCxcrchhcwIVXs`&7!j^s^y^lb$8Ud!0Hkm zS0ekBGhr&EHp;x4g~C}Rnw@^2rjM7;i^1%0FkV{3{I@iY@7mbxl)rKy>H*h+<9gBMO0#ejNl7&eUeGs1b`maZ;4HTG^c~f$Hl_ zYlcYi$WQ0+99N~TnBp5sAn32vMR5yYVG!jHDcYXwih=olo@`UkoFq7fZH$L;9y8@HDuC8jaU((%9Q1gZRvT(*IQGX#UTAHt zEr5ZEY(ZV0!;cLg{WHHG@Bc6UFZo&wr$KWw#8N>@W6EIpvQ?cY>a|>AE`yb77)^uK zC4zq6naGb!+|3M^%Zh#HqNeO(*~wgHQ80u1?5qeW&|&iY?D+e&%t~3$37CIJ)o>1q z8In&P#y@EX@p9*II7p3Caitcau4^X#P7U`^267;iL8k39@s@+&y;g23-bF11q;1a*Oow7{^))Pc`aoD$_l4iOQ=y8p55Q!)yq!- zwVVBfbe9Enc^3CD)Pd9GIo??tUT<62*0Eu272Ce2JDeRCcX~0TApsdo$EgSy+_HM~S%e#_fWbwhUBSdo!MqgG6VBaA@C?nRjIR5We=tL- zcPK_RoKq`#xZ}DsU^upMl4DhlW$ZrK?8k{>y0*D9#}H`JQVSk$TT%sJ8r~%;RT{Ll z8KlKc`rrsmOH#dK%I}KyS`f+Xx-QuF<4H#|zOJCw1ppDKL>>c#**Ubh)`HaxpWU`h zG8zZm!ynU+f_@H`Dk`x&uwZzBl`Amb=KFS!;&r{=$2RCOaxOvx&ae^#q0mf}lzqV9 z9CH!5!M<8{tHKv7yeBy+@{*M-$dh++^-oC}vn$Rts#|I)Ep7%bAdQX}Fz5F!%MwLT zjfC{9c&LCYTdkzpYS?=g5r|>h6Qw6bQ!G$?q2@i-mgjL&aVHlP9YK}s0wd(z;<{B- z1s!po7nnK|{v^J%dMaSFH->TN=qg|sA7Oa$#joS#uY3!jB)e=!?`xvIGpP{(Hi5Z$ z2g_2iaqhGdI6Q_p{b9F<11L*?j6f<->3puVbDar2J9r35mr`;6>^a^%JO&+F)T&ZB zD;ElcRu`I&$+dRcfRlyY>y9k{mJ)W80bgPQc4N&g|Gdl ze+b|CiNA#7@%8_oj`8d~M!c-3>m6+D^atU$r<&wQq&jBVEOs1G;naxIKc&0V*9?y( zblmgNN`d#C^RHnqJ!$SGi;9CNx#kbW{Frp-`Aaz<0MhBm2x%+Y^*Q#3SLw%dG}g82 zOlnYZu0(&0y;eH2QvjwFB>f6dXYxrn!f?m^(r`PD@Z%;X4>C7U=jm+kN(d_{V>4H= z-01GZ33vyUH{(y4RBm{5_s0m@+EjOqXS_Df%2|oRsk9)Z+KSSys5WBuV(f7}D(v`- zdv9wFhSR z(Z2D0Kj?0;L9wha@WBuLI37RzN!-5oGw{C09K^Aw!xs8>R=BCP#v+^jhMbyb@XIQR zT*E;mL&d)BL-g_9Pkmf3mE_;0j5PN*QH_aj%B%^ z6rc}7Tc6|M?I$R3qNuTqCEBVAzSB8s5WLn{l%)qBMOH&=72D2@-eVf9RBXhZu602l z#v>_b9nqjtnZ~?xAY6gWSbbBoet5n}XW&p^13&~e2F9a>dO(|}LFa5Z7Y3r+38k2w z64#w_x52txGAPO*Riwd;OC=)=S2HQ$XR#rNkUs+=5FW$V+cCMSz-+4zw>j5P7L5HZ zfLK1I)?um}(TUG_6+oz2I4eur2=t!?hzeaER0m+Cp{*}a)^~8c{S`>+*6D1_Kt-X! zV#+hwHJfT+0CMg|PEM32A?QInYiZvGMzC@Ek&~+o55#oc)lm zvp_N=q0LL9g9`5RacC(VGSQJUp4~IOSEq9ojgT?DM2j~7iLe9Xxrok3eD~$D!pFe2 zJz^rgAZIs%-%cet2ngYI=)8jdBi?Z!yPIe@RCtp7X+BdXYzpeSN7=|(Id2Sh2}{gx z2^p-sN1>u)s2C4?OLP$$1qK43Xf}g(r|J@{;Mw&VUT-_<*l^aemrn}tIg+5(8hY<| zwlq8(gFB0aR0D1&spu@qNXv6znBknLfkCoBvatobOQtnAXt|MmY{u~lQP?FecbVZy= zWfpK%sy*>ss(E%#L|#Vvip#=e zmjIHPp{Qf!QHWGg^yHY#NNqD9Aw{MA46Z{x53 z`+o{!zePa{U}2{VJSR%(tj5Awb>C`@xf8KCV3G$W&1%dUG|+ZrC+hll(V0qg)pG99 z!I%&tK45HZ>s`d-9$n9V#DcjkK0)whjeZT5I9#@C|A^bS57uYr&8RcHP@U`v0SQYC2Ug5wFM-* zfLeB|3xXXW21s<^Ek`W|)gi;zMh^AfyYJxBPd=t17pd9G!}v~)L;GMf8|U;LlY-UO zOZYVp8TwSRgmP;7hV5{4uNZbgS)bv5^vC~2yz`C!3}qN=N`etjS5XPygrn|M8a0u1q&Cp zL5A;g%oRsR>fLYKhH-3oc=Hi}!ER3=SW?>hI}Xk%V1#-_9Q10&8I~Q7w;R+}7kUxK zGlyh@p3_f%UQyaTtge8Ockbf$%fSa)oAf95$$Kpq%vle^Z~oCAMs}J&N||_kUE47o zclXa@@X-S8)Gv2rP{S!H3JO?By2C8`IN)qb4r#1>h~ioUq3EF`HNV1Up_r91=57Zi zB#|t~=fp+M_UVAd2KY>%*xDC`kO^1PRUG+!u#$mDt zOkH(n)){^>P&gr)#y3zYE4--F%0QyMCPCHeY1r&D6k0LZ@8_cpJ6a%sn`G`&Etq7Q6am!~_3_%BA!YLIzxafW} zu+!La|LzxpHedyJOKguXwZ?49dY3)}n81N-zm^7|X2LvJl&NbN1l1o36vJGd9ishUkwM?myA%yO3hh4%*r0zolkZvrOz8U!QbC;E zb|#A~u8$t-muNGLSN=Ktt-taYG7gDCy!9Ub-UZ^tTvc?TPXokb)BO)qrL^`==N;5DPL@;I(CCK9(*h--C9JM5kwoguHO?4bPi8 z4Ee~?u1{Tbcmi!YPwt+p4gTa$eLH7}kog7$T!y5$<9ez^!FT>uY!>i+bOP^^iWi`# zE>8gCQg>{n#Iq`q+-u2b??-m26uTvS;+V8j=h=&VPx(WDRAUT$`srukciA~h;UYc@ zDfGSn`7dVCb{r8ON^P7WF?8)J;wHk|G{tg2l1W1@^9jH zd%&^Z@-WL0oCjwBk5%)7OMVRb;0m2n&U2sw2>Gy58aeE$qKGC($ryV0wWjpsP{PWY z3?-9th>bjHD=UUAC^ZHTT?^eT^vIvDf&SdKjdL&J_;Zg3#33bu1w&gJWUGl(mx61n zSn1IF$!i8rc_wOl;ko=J9vhR3_=#_P`SU1tqyx~HYD!_IJ7!B9L_PBQADLJza0CA8 z-~6epu7i&`+m9Jc4V2R~QYczF4PDvO!#YuRN8z(AOr#oWU2}VUUaw8Z5b8&s!@@G6 zq%3$W6fy+LZI0zgK<*m5RF2g z;jVd+aIlrbi)vq2Dw9q9{n*+XF|T94Jtmswc_N-VXmPg3v7z?^pL>1>@QM_u%h{g0 z7Av}rOE_6dm2TSxIPl(ejUmUPd6{;iVntvcd|&(UOtgXd@4`PfRY3^)MlA*Fdd0Ft z0{6*nhvqpXkE`y20z~XYh>wYD6v|G7(1FL>jol<6|3Rg?fA#{)^+nw7i5eiamC+CE z`<8a2Le@#SKv4pk`Xk|AOcXgO=fne*Tq)eqmIX?HDtX@Fs~gUTLP1=EgxMuRI4{|8 zBt-*{kFU9PdfL9T%nklDk~^=WvgkFOKmq6Z>xlUtw3UHTva|Ev9EKnG-T!O^OCZ&B zG~XgHAytfQDzG_pyN-ufAHx9LwVKNy*B18Gw+AkRRN$XOz)Cz?MS=)IHI<_&$udKz z#Q0qBw_ZXA{}?e((#ODlbo`!Q^A(sOftb%ub&`MAM z6@!J)o3XP*fFL?59fsKWEHJ8+ind%b8D6MXDZiexKJ<~bHd0__$5$O%hb8hq{hY*B z#69Odq>mnHFNzU5C+S0qVS9YQ+qVyh?qbYFSZyeE%{i|Df&s;H_E0K-T29w}%E?coh>x5sevl!<7h?Zq>YwppAU`M9(t zckWS;(2nT0BmIz(=A%AH$s)k=Tvm(X>`(vY|AhBU5CNiL>1--q9y#yjKF_}S2ueBg zk!4w^UM2Z$5{}hP;e4&AQL>}Ux`witQE{&iT$Tlwb&bwjdXPA;$`9o8qOhGgjcmD? zp)MDASzsqhmb+!~BY9t#<=)@?jQroED!Bl+c@KO-Ge4kU$liC=?Hj7CMzzEE#Tt(?PaO@$rAi7tUPYf+v;c`vk3yq znAFL30)mDEpM3ATlkFs`tIzJ6kAW2f_vHk1D8`*YSNqCWzTu@Tv8MtCAHn&H83^sp zPy##YOqDd6!b1(84^2pLpgqypEXy+h4P)GZev7srm!HW|hsw{2cR%<7-hB4Glv+De zj^>Lg>9iaLumwxIizKTI9NX(iP|xi?8jg&MO9hL8tLK)$A8Mg_*FiWjZD-gs+)lTOsq=iO=pG4f~iuD zjh7$D1&5ik)d^%vQu8)y>n62@$!e%ANo$=5;0Oop-m#Qu#YzLZFnL#3i2r{|!otId z_a73ZbI?*MDndS@7-~>QDC*YpoQUg zNrzQn3m->%n~*0Oy^@jP(W*B}=BfbBLG#mDi4LS;KYCI~g|ZI@p75tE14{`yRN5`$ zLoI=Z`@wi(NlTN)z80F*5^|@gOB5f+7IQAgoVPLnC@nfSL(n$E$r z{g6*)$TghMzL17K`hlbGsSqa&L~3^O=SkzJHRR9P8I64;70a>!WeFNt-vMZuo1t%y z0Y;f-D(2ZQ4E#H3d~2<S*zs%@63VJ9Laoih-! zJM!s5|CyAt%PVz4y(2A23|r)4yEwKu#=xaj&V=&4@$+Gp&qe@3B;b)7m8wO0qU)aK z%jhfezXKC|V#ngp_H~RYGcQb z`(jF|`0D51$LqI`c=`5$*Dz-mDwAMQ@(1T#L^BNFdWqbmt|(=J_ebUkos@QU&Ov*e z1*EwEOQapk|LY9MFRB}HD3`1aK6=a~^*G;sjClVR^CLlVj=24myA{9i_Lg~wJoq{O zhiYhLVNH5DbDv7#0{GlTQ%}Wd;kyk0fVpz!IGDB%&jEYK%zfi5=Ivv6u!r_O8xB!G z&Ans+sB-K;IlFsO$K<15eDGzw`S_RMK2Rc=+0pm6K!1cEH)sp?@Y6uD1uCk69ET8N zBr5dYWl5@5B+3eC1BMPiUc<%)_nw8#a7xhfJ9|Iy*+<_Aiqa`*>g45|`EhX1c5FzB zg~7*W(l{h_eIBM}mMv`s-UvWCSkrmvTC@;IhgJ7FZBaZ(@x`eYvR*^Mz{25Q-uW_E zBtp_{G=HcSsurx0r=`_IKa8k|yCs(xXQv>88Y2^N@SQ}EC-;y6pwvcnhQR|>@O-IE zu4*bh2-bU~z6BT8( zuY+Mgpa!M{pJ+lq(%!-slODodFRJeGq{~AzJ56Bj9&r;|SXg-X_zuc1LhX765IK-= z0LJ|o{GIG%&DoMn9+e0TPsGIC6S)l({6If8?AwN&vbjna6@30qi9~jUEUY71^FmaV z~^xj*mJk(NZ>)&G9>+H4W{N@OC3sMhs#zXX? zK~2>AzNh^-_WeW&eK|uU2c?nETVU9a9dN4lWfB|v`n;*RKY$4(T0`>Ov_-IjSL}D1 zlo;}r>y!YFL{QDeyo7nbEzy0^DFe8nVj%5TRtH!SEATuYIdh2gp`b>@XUdUOqMKod zLo#Ly5`SOrx8V2#xnzHiu8zM&)f2Ko+T;369*@r$0SJ=39@#lXgx z8x;Ean&91K6Ij){uIp5o0?<7KR0axKU8q84ST5hr?^SSPrLoyq6pV26UBMpD-_8JT zKlYH2_er@4HNXu^Es?m4*~e7Ou-2$<^aL6Vw}W$a0CWJG2Q|w_N4FR*S~!!1&Js(L zlAr}B!hV@r)Tcv5%Cf*9fXSE?QRbZi?Qr(|4bDu$qy{ClQWscTQAw3iH3kCZc)lL5 zS90eUwDmRChi}5#Lr{B~xyArI$82U#8nfaVpEkXaa;ags@);S_jUI}RN9P?JSp`;7HE1_v z0`?;)rUwC!U-{@&Qf4AZY@{OTZcPQSNr41{sSx)2aKJ38e6XozR%^w&ED5j`0G;x+ zYH~Ne+qy=g3nz80SPpf5|M{ofi?9Q)9v)J7;n??#RWwJ*;TgnsRy3O#zI^*0Pxt6K zmfw$$EGUpsI z`A{+)Ju)%rJ_Z5?)p;oviA{b-?9Y#Hd$JF}Gcy5Yq>rd=jc4VYBqdHk+dbIZD`BLD z$8|+P+~1g8bBQWcZDh~b^vN#DC=-kt43{|bVOfVej^O!YSR?JSLexvmu^Y4-*K4pI z)>ea!ARCa>z=HRm|M(OW5FO~pjs-{bVRR1VO8Qk~$1Tg$O9pY>9dtMNYS%3+|W zWZ$0PBT&DW!m*K9gkusaFd*s;fINiP%N6UoB%KFz&hUYP1Lh%XDVb7-VeC(k=5<}J zSYLk}27L51TAhwcfkFhi(bJL5=h|M+n+LKFsR`tg00}1 zCi2C^X1HNLW|`T2q7y=A)mR4wC%j9epVbE}pL3jgctY7M-t`zGvV9LUz{gZ>n#Q!I zLx>;k@NtKa7-j_oPh7}`Rm#8832JSZ63P_D%b|*s47ZHA4&6j%>?D&Je=?5SL=FrY z6rCx8?ufv$pQUB}lrSQb7dw4MQYs2ndgySp$mixY;A?{?dHKU2P83!NU!ogCp+Q6^ z_N6E7W!rb;Tnz?cV;l?|okwT}eQNwvIbAtXRFEpZ|AX(dY(4`fz?isup7X*$_Vqe{ zRse*kMgVX>pU=}BwG-D?-%}H@VmR%X@8vR}mY=2|`;1`ANQ8=v)a9bGc=V2b9C*Gx z$pa5^Kmy;7y|CJH7H+=N)fa|)Vwud;SI^Nnlo(1*MxRc+$OGdQJ zyAEKf7616Jd=GCfA)R}vK@aUj_h1MmI7yWg?2vS13=Nd#dwl14c;iHHW%g#0H9RUQ zqE#x%F)Kg~s-UVBW|F;(wcB=zm{`r32-$QEQt@PmEmV~XgOsQ`)Ckh+65lhvisYR_ z422F|!BEarP{2%dR1Ox*U4U;n$mm6ZQOkwPoAHmuhNTpnK&rH^|h zU}I9eW^dVke@@V-loG=RM+XRO6ar&M@7ScO$zh-B^;PeT?&}Ej3Z+!+?s(n~_AbOw zKUGuJ3nnlnp0%JYYudT6C!+Us9NT+{k|-ag*d~K~o0LhtyM9I>7zA^e0`e&i1Ry~x z_1a`r-(whW=b8yj147ceQGP$lRO;y&R+ssv&$J`u^T;8c=JD zYbUV3vo~q)qA||9t1xF-7TUg3e1@o;5BH$$)+O&YbkF%IxBCVMa61M{Yq(yoOg4`g zz-c-dGX5( z9qq->A7h-HozP6npI76N8oinlAMTJ)0CUH&Z+N%Gki~+jk~Q-1d47-SxO?;)gaX1G zq6Ioz_v7^U03rLjgjvewg-<9WlsJ1L8E%g%>jprH+@J$JSR)Kj49Z8qbyaK0_ ze0Sd<${S9!3$5iB_bsMEk9*#?P)ccYQn?ZrQU`@{hx$!$=D|SZ7>@t@Z~YkWwjEpV zc)$0QJYL%ZP0+ozpueccgqv$8T~vMqa~xjNM2WH;^LJR*OJeEjsW96-*_dNt0j6fBla#m~1j*|T}1-qB; zVO^M*I1+f#uRaEP4&X)lK2>Iq6ELBkiDENUilw$Z*Ji-z&*=MGfGTeauC4`m{`qgC_eq_RE%!l%E(Z8>wP{BH>|>xia9LV3 z%JRQe<=c7hrc^1?&L#NcCxs~{Z-$A8zs+iAti{upKbz2KCNY!R7RlVSO$tC2T>Egm zdALIThf58BL7y3ON!IAip&x8Z%Y>$uIgD>sHhE6#DY0REvn&_h`^I=^f)QzB27NrG zsyol5hB0gXq)u8&feitxBdR7iQNOsBrBt+a#r5*Q-Uo{H`0NW(A4w$$5cA_^efwfPPf@tXiC^?1!*!r3#Vvmy{rRS->{pDK+J z`)vuv>6ux5ZVT3Bc>!JuWS=qEIXR4zv9`u0ZvmUEpsOnFQuv}!4tFBLk4RR=7ybiP3vk~VLFN1PZ(jf5y@Dk1{hm4hskOSEF zO}P}3Ykc};R-$K$m$W8g7R}<58;&R$FC6|Ct3+eEWYUrGcm~TQry$StB>wzd`2r>- zaGZaS9li$98$+o=_wpDq0X53io}vjykJ7)Uh?iQkIw-@eV87OecDbUi7nIuY-5>sn zRyP9S_>a6}GSm|#n&-mml7U4j$V5{LK(&;da4v_DY9(?y{LJ^i`#1A$pOxK4RxM7? zW5ziEQU}t$c_vR&_nlZ+I+0o7>h3^kXiF&YDe(YoCMPu}ewVgn71Wqi3Z!YAPXtn8 z+?3n}4R8v2oX__ZUD!GF%J~kXciivypqi0#3b27W18()acr^Ic<%i^fts8A+2X&g0XZn;kyXF1tKi5(2g%0$?3%XRKKo4^x{F)%={4cDcy zf=8h4gnY)&Gf1Nycf@&YwP9Hl$}q94P$;jEzBEeAZ2rHWOl9(&jy`OWN6qrYi^xiduJ|ww9D0cTDhzF^S$9V6fI> z27(d?C7XpkNfBF+Igo3K6JX|WpY&$k6N3P+u*I@{V3RgZ_u%dG9kRD3N;IF(`;NXn z!?!0sYbb~YP>8I~=1epOhoWXilN=_$8NhcoYzxsRQql+OY zX%`C`kil^8^3BFGJ^@F^Kr0n(z2Kwo`~n_7{Dd~L(q4vIYV) zF&zKW|MlO=AX(DDfTJHWrs_KPq7= zrQ!O)H)HL;{jKkN9*h!B((8)Ldck&k2jlQMGs89t3T6O!9&(b@rPD=&N7lWz7MJ$a z0Hok>jDe*!UY02nSZg#c?On8xnsgX~+*Vo_1_SKJIkR0a>&RU%fOSJ?iJ)S12M;cxx z1rfbYI7CtAOq-hTT8nu997YKCz|dhNm5Ga@&y%{syx3~iE2;7^Jf}!-&1}y0V0EOh z8FykHGkDXU0!ZUHgZhagflhlGT~RH_nKgrYkaXrVSR}%^v>FZK3YK+FhN>>HcC$Z- zs0(J(qmmAx*d-GYWYCdkgRc_9;VhpsD{BnuqO#!`)MhUOpT`|cw8x8mN{Q4`h>|+R z51jYO5&ZD_!VxR*Hy8+B=0HY3Xw7MeA)?#J!C`-Pl0NjwC$6QSEep2&NW-d|+_^WA zlf`;fo3uGIWD~T8ie>K^d!+zTCzZUgMbXuaYh>MULBEQA#2mMp%VawMRD5V4NQn6fLW52&g~DkkcK=o?Fe?4DJBN5r7AW&=s51U#+Jacn!d%iMpkk zLZl#%fl?^f=`lm#X^S&mEv5(8!u97}aK3Y~2sR3&>Qq?Wv6wpTk?d6ZIU60VRXpFG z*+A+%(h80nX5coH)WQ8&Y6D8e(i+tqr~Iii5wsHfreR|8b>SvuCp~4a%{U_?2Oowl z?mhN+F~F%Bb&N?r$#YgKuC?KQUejYkg#bwgZ>M|QYC~NX>~~Ic9#Y7ejowiysqmBf zloB=vDE1-RW+d05d&*WzGyr0BEEm`~7;txJxT+HGGw2gi?G;{@pc`TKww^0I3KaX% z)9%L^MHnFGkRRUQ=zY&J?J=fyzB46zK@k}vf=5JfbBuW5JCRs3zBs~oUe^`(rzg0F z?l>T&4@qRIq+N0vb@2g=Kuh`qu;XQLr(TSRc{*B)N#dyuZidlAAwpAz zkv;ug~{LBG1mB~Sa2@EAK^@G|ETB)R|DN$mT$h(lNiEh(pM4)9dS}}NS z_}q^;mj#;D;rTE%ow$%vl|f@L*G$&Zc~*mhL16sp>saOt#_(e^ufd`K2r)Gh7baHWo88TTRX9xV>?bTC<{b?$pV zb}Y5b-YS)Kxq8EpiQ7z>b%v#=;@I{*12BD`RD`+(qL*g|vbPK}%<^oc(vwG1ae>O! zI!AL>iOT9Q;QdejgFvDNwfqOEFCI;urBv)Fux6QbwDp4f^GsHNSnsN4{aSErJ!qRL z%eg}cbR=^Hj<9W+x$%4__0UP%GYhB6_ZeJ+T1)o8^fBP20n36io>A%&a_q3YzoH+S z708;6A&?=h8(D*SX|d?+#qohJFkT?`X6&dg|VHz)zJx7@YLE2o?}COPH8gVUUVxH-^VhFVT3E z)Gh{iDX=2dFLf^rR0x@6Pf1-+i{se$6xdKN3;0g^DM?cj2>`|7ZfwV-xJ3nfq>m8& z!wgJBV}*_*g216!FIE8`L06&;3&k6$J92oVdq@OZ3AHcM{DMXn@~V0ss7ld~q(8>l zFF>EcHxOq^_R#wQ2b0N@+-qwE_bsaK7^IAtQ*Z>RC}}rqTLOS^{y8Z+4kln#G{EW8 zh7VZZk~Rk8UEsTf8TCwb!F81+ycPO2+MWcc0QN&rJuo&8QYuuBS_GE?W*nN2G&ESx zd8Z~D15zCwiq`&TgZNW3hDPK!Mg z2tMNtgv}83eGs4-?r6)JlIPkF(7J|yo&8wu1IxPN*zd4mxtChgPy(EOMKQ;nehFs5 zHX-mLBUR}8)G9EJNP5gXLCAinM=U^-?$qR2jL4@iUn`@hG-s#lGzr7$|i3(NxMcrh$r5Xn|QcUK0|{%N?pAX9XR82Lrz_ zfSJgL!^mKp7of&2!A|X{Pl@B_m0#>q4 zv@Qq0X|z^|e)5joZnuyDQc=m{oGH1E&(!&d0Hkq1)3rv`Yde+K zcQ)C{*_&(A4CK%`;51i)L4nH6R#&vk8bQ}_hXUf=$@GrYz|bga_QlKivh?ozGHcK4c|A4bxf)x%&=TvV|)5CRcW;L@r#wsyxY2? zv!dgOggV|g&QM5*Z?p1b@lNmG?*v6QCbXo}n%*#yMPop2Re^EI3StDPlqe}BC5tPQ zwgF%z+V2Kzw`bg+p8=ozazoh0r55Zx=q;0%EC7Hu8pz&*z>?!Uf`N$q&AxZqaL<6G zR#n>ioO;f6bq@d}sRXitO2FxxpoBN+YRJIG;K1lOP-5b}*aO@XSX81rgZz&_c!THr z1{^`98vTHqhI$dj7OgNGHLNts{6?kP%(8kD0o9n*JK6z*P03WP&(Sm33`Jv3G0zl7 zDCevqUe~BVnzJ#P=l0SnZu^LLf#P6+jzPHuURFsI3I=deVdW8KcfdND&B||~|K0lp zEFNCH!t?uQT-FvnqzDw8%FGzY9!W2QtI<1}8MZ-*YnR_du5(lZn525VyPD{Aj*cUm ze+fBdl|Swq;}`vX{=3lrhLP2+3PYh3u<<(w1LP3-XgKy8&>8oXLXnT~DXL(^ort@s zb9t7QRsT*<*JmG{87bqs!xrfUN=5Z48SJ34mV#|n25EQ8`UW`OM?7XgA6uRoF99}t z?|Aq2ZLUisFBMCR&-mn{kMQ}IZ*g>wmVo6W47*}%pW)h8+>U!poo~zZN%Vh#*#@v3 z$vYeLOTm8d{+|HkoC|_U%UW1XFk?T_-ZS9`-+R!f7VEgwicR;vP&Svhzg97I!WS-- zZFV35I2#c|f;QUw&cU=s9~X&W-*0dq*!K>XtU4U6S{4P|1R~GaORNpW9{aK7`)l7o z@IpREiPm=XkQH|)Fm$9piP)G`N8asHB0f6L%89Eo*-_D_|0mjBHutZrFDr~gYXw2sU*p5)KQ3-XN8lXh+i0@=WP+fAy zKsDg8O^17K<~}!-sS(G?e-*VUtRICzOF>=M;ELv^F_RwyQZ!K`vjf<-7>pP4YBR|4 zLAnvX7O>b1l!Pv->1s|wRTCM~0G_$Npdp)m0epT2M`ni0!((z}6&!~e;Yfq1UzPHY zrIeV^=@X4o3H*!}fCgD~5qYxY!JmRubH}nQ*tQKv@1gWyG;;2&ibc|~G^t)__+osX zdLBf<(}P4n;5l{O$1HOzi30}q?Z!W2@P0ys)A;a=1wTG!o9KOqb3*ypk>5;XU2Da4 zjeds8yWkFFj$?^)MR33}>cXkn?rNyiGa>tA!lYM6{wEVIUHajVD3=S!n^I=7-ZNl~uW5;Gh4eO3(X2fv7K z{NXZn-OZ2}Ss%Phr9>YY4`kd`o9F*Hjs#=eBj}BH z^SpQQ95YyouOFN}aZ0dehLC4J5Y0XBI`<{1ReL3pQG2QPH!^m1~ zc=hlK%k>qhOmQ-$VA$3bOJg6MY1mqUQ#Q8`KUJbBP*M!9uNSH^g)fj33r=xR09u>F zOIs18be?Ho(sMd(M`M$IRCL1b{WpH!FF-;7(=HCIEuN(krRfP+DTx3RqN$K)7IuAE zVlQq-hxY^faiprB^SNMwQVt%O_wU}u9pLBq$!=)hM*Kzui#E@-674ZY(rJ;2V+68~ z*Gpb+QD{OiVn8!b8Y^h3NER4<+r0Dg1<&_;RE@^8KOxCZAsDkM*=v?P6ALLu21K%m znf!P56MBCOANQc~joE)V#vsLdhSu-C`U-s##o$o1r{42n|=KVAOG6_5I_7A|7(=x zL;T7&J`QlaRICrLaeaKl_0(Q_e*S)h5XJ}BHL6_Y+)EdCOLQvvb1?+eI-s9kO5srC z8M7`em=yOuC9`{idG_*b4^<2V;Ih}EHe4O5>4XVnz}$9A;4gY0F;pMPK#$1eSpFPjPOkZ8Uc+8 z(E4QDqh1^W`9=D?Fb~Oqwdgo z0=E8NsG>+2!)7u^qjN?p2G_imif=qV;E(^}cd3!j05LZfGtpu2F$eh3`8TO)grU^X z9su~>N3Zer-CMkW{}#8~6K=OBjDCK=%^*nkH?lgnQLx}>^v^Nu}RAypeCwbbstTfm) z@0!`1dcW@*oyE>1Y|54DluEsA49z1keCLP1H|V3?;U3jS4a|e~m**pp;izzWx7AE8*m|MItf29)SanzFB!)V<+7Dn~rCrG%trQj8rbCLr)?*WHrOVRfrmR6PXd zrSc9a*3g5tc4W06s83=U81LC|`B6_hsNlYpg0=T|KPUofFYR~ z>SqKdt>w*9A=N*La@^U(4BBpqJqY~MKiz@SVxl%oyNt|`hgsZDpEnYt4^wF{Pj{DK0%BHw$bd0{^+CnvMCxa%~E)py_%aqUf$heku z960XJ@y<@^*5{v~zyAVo$N%T2pCy=bYi1ug|W(}oqtU_Hd5Q&Eq zM2Zc3XfnSs(z5`!kJ5BR&m_{`~DAppPL4QIgzL3f^2@aIs+mz?AskXoefUq z+z7(NFV!n?5ptVj18OlmUM{$A8;&us6n5h)XyO4lIE_)Ls~Y>lAQJgwgW180D49}~ zkmOoT23#9y($c4~lF{{`FI|e^PRf=(C!4JyB-BuANVcj0N=9w$}9$`?7C|jAhc6aeADu z>FBi*VW`&SLgR7b@bjHCLD%7@b4Ua3s&367G|sGko(bZzgkd9{E%(7nNL!bdRFG0C zQ6BbISkT3CqH+*+bp@yNSydPX@0Cv}L5F}7UM_gf~{ z$FXOy6M-oaPzwpgK=d!b*fIbIoxt$(z`@uL$!(qkNugt+w8oMc;5z60+urkMTjeBs zCQ6pcr15=XKZmOl&{O0^ntHN+O-I&^e_ZFMsLYYEFpu7+}vDZD}>IUvPeg?3nX}R8m*R z5mUJ}RX9PUyt8p<1e4T)6S~acw&*^M42BDk!E01u*O=m-K!uW2FrgoLcFk(qWZiw$ z^{=gB8;({h?)w(i12yZ81Je|eYRvyqqFqfe)U@+ifw8Da#2l;mj+g;672*bgpLJQ# zopz;$fX!eR8a&7fn8=`#O63S#)c2v*QM+mW04Hah9b$Bo}T3g`5>5~Nv zp5@?~ATu`4z82E%qPi)epTc0I&i!}KeHp-e&ck)Z^F2U>0?(?bj{*DPFZ~f8`@397&8mP&Hn*PrbRz3Qc>`UX zLC~3c5Hlq@F9P+++k$3}M3^y4t~t=D25q8mHy9l2(y-QwFQ0Ci%*f$|6FN;_XV5-g z9Eb*_(L#L+=(D7Mo!s8C zAoS3qD&tmyqd7X(^@@Fa&Wu;jHxYGR$eO>G0Yd)!H0~Jyn{o1XT*OuvjNU0gFqnbw zS%Qs#T21gm5JJk1|34G7R3Op!nRtkCu%)svn|fc;`81zH$PBG*e%4YO4xa(=$i!{_ zotRu&a}T4bbJCnDs~A#u$s`NNi~O)Q-1Z{^y%Dm?IT<&f3F&YEs!$D|fhcfV2b#Kc zo-ur=JLb7&l`K)EA=)2Jx(fl73$(1rI8l00(zFljx~6O+wBCsOUP{CA_ydgXQ@Ho! z97Dme#hvjJRjyOT=NxcJ6)qfY80>O7k4lpC^XestyJ?x^nd_Z_hjj`0U;+q2sKm?H zS{jkgRx5fBLtl&tC5@#hI5T(#)Wit{imI7FZb)QF=U;!OL||zP+!{vT!g(&QB4G0# z5{~;br=TOxz8qtGUk&9f23do)B?AhZ)R#`dgee&*XQ+l#U}-fKWh7~>q`8zOdov<< zKY=g+>axIlIPcE0*-AN0OD?GEO1h~f*m0zgL&=8qopUAVE_07oN3CJ@%b%DJ-Bs z7*V~p5=qvG!G(d0j_K#uhuGKsnKIFQvoig|IfxJNCC*s2f}GH<3!IBHX^r8_DoR_U z5A29$=a?=}K$SpGX$vc51?AQEu|Iu^zP%5-YKB+Kf~6LGc^~lO2B{4mN2f5!NM9i^ z&CC*jm=)2HzTts=Gdc6egBhZk3@@6#8A(JU(=tIBSqgP1=3ADAZQH|NkD&8*+Thw2?E9A0a))7A7F0N%?^`4m zRPR%gsF<|2hJD{@V+wyRKcvGwR#p&YKRFo%y&2s7U~-|AF& z;JLTav8(^pnGcNPDFSK+RiSpH@_Ae!Nov)q&F*Pz9DpgKQt)O%MLbc2aw=gRZfp|L zoC}q&iMg@7+*mHYZwKxOWUkg4Qg;g2Eocb^0EHOiwcX>v0KjMYD!7wI6fk}#2dW@~ zAei1GnuQef=y`e{L_-fABwl0YX87hOm^jMgm5(!)OfRe}i-FLL`{TxH~qM zBZHRZGx+7RfsIY^{OFp*ta_}dscAC z=T4{13bR`1ykchs}M3H^c09@eK!pk za#-m&ItR23y~o4~HT)7lHXhc}01h526jW$Fuo3dmpvu8DEVW|nUq#Z+yW3iP?k=hX z6Ko%6u?b5K{kHFDQhEZLW%+!i8200cV2bBP2%8(IdEc0TjzE9;mbd+2P)Zt}Nz@^# z3$X=Eg&jP*vF!OMU~-0YWafwT|uBQ}`g0d=ICms$J!LqWi(7JUG^+ z;y66!UK~$h&!D3rq;_OqMGRF72XotY$}$$zYS{Ljlq#a5Jd^tS1M1JHI)qsnh0xej zvJ=wXtPFWLuc!hPJl!{VCs0#ssKXgBXxj^ysIyT&mMcp2m$fE`ya1H-3>$diglf?= zx;q*iPKkH;NJ#*YzP9^BgwrAN+-CDLK*dC$4>hE3QL*ue#)G7009qh)q~{&;v-@t%xbU_ z{h)oe$B)t4g8SQ_pzmAS@75MPUam3uxgYSX(jd=CBzj1E+0C_Cn0b;dw6x z8!>;R67`3Hy+o|ym#7YlN+-sF$w2$0?ma9E?wu-S)UT}MUhWVg?%>?aq_Xnb7G(Xb-vr6#9#GbIAt6D{=E4<%AI3XD;QOz$la52X%|9+?H=7Dnb zJ=AZU6MqrS=rW*3*W!#^NKJ(-LMY&gC>r-c)dz8BvzKGcNs29dE|39HT!P@vmf8T8 zkKz!up8a4$bzF$GN}VMnQ}a|{#*0bOp#vywg|xd-Rvz9^uh@@SHja`tfYM$x zK%e7K9f}uR!!m~k*5v{J(%=6(D2_< zuq+Fa1|h32DKO7HWFeIVuBXP!X<_2RZu^dHyJI`TVTxnzW^nFd&~K(9S0X;=_vfAo z#R{p$__IDnNb6bz2g8-LMb3Dsf%k|aac+hj%+^KI3q`GK-m4_)VRcS;WqIyO1?uvkS~Ry=(3S79ZFuqK_YRFrnfb5x5epLC`S%=#F( zJ$;3--+<9${j%mNnp%g@Tp`k9%=sP$4WXkNjpK#GG5~G85QvDQIL3%~Vg88jG=;8ONAI)5+vA#r} zuNb&i2KTxL@Ao4USxHArqu;pawJ!K4{`>z2+7(f|f{Lp%Ti94CUkRSM9C|0G=L7rw z38ggJgQ!;BZ_h#5g5$#vKgeFHQVQ1V1HSQtU&iI}HP&^-Hjv*dKKpim#xc;7bi{$pLEx@Wj7E&It72grYyd{*9b z{_nxrXQH<%nC!hr%y3F-@)ubm%vwni?+M?u8dN5jVP(a~UGE-0)!56o5_s{_639v# zA#Igo_E8nZHK(MyRrY!EUYHr~Sq%*eZGf8Wi96ZNzziu7&G$W533b+jF4ib0!rm9R z#OL7wguHwEC641BQ`>#A78Hv`Oi^1T&{(!zF@&m-!$;1_ zI}}edFzTWX_XGX@gk!sB+$C`rS>(yMkWW{mf1ivJ**yyypoZNVeY^nOh(`zdv5`V~ z8tQBXWqHK)8^4O{$G?Jlc@5NsHpx(14SDo2@Z9Hs+;}Edo4@^?U&>ImkO~oilX1uu z(+LJ@f!Kvy=&|3ps2p}Qq)m-n>}Wot4meR%HFO$)rBUL})J7c}xweLVzmu8(w6;cI za%@Rk$$>eD3XbzR9c5;);+pY!#Ek?GPh_((=tyQPOKnUtvf0c+H9$#}f>~uyqO6Hy zYBrnn0ArK+in2an^o>o1vlJ*N6I7y}{~dc{X~@3u?eE}=Pk)*LUGLFYJ5aJ@A4v$E zIvrLtJyxvbmbyp6aVW8xAv_J5xQKf82J{$uGlu4OMBvq0BxQl9Ptzx#u`FxuzbH>& z5P)%Nu&OOHdNV^?FX)`wI`}?G(I{)uN#K$BOKws~Ul^-Al$a&v2%(aDneUz^40ir^ zqG^s;!;$NJUN!C!&j}D{fP@TA4-v@odcksez}Ppy)lU)5SjLW-Xq+bm<*6S#)DIIV zPeEyya9+nPorr;b35ZGv#uiito%3P}8+P`7^xm^aNc0V%*i$hn>|%1Ti~f7C@<%mv+7=fN z7P7DK;mn_ujD|BLc!&v-&3n z>5sx+ayCfMHI~IREbBFbPM!@VNXrt`#!?E#@r=H2FDRHfu(8knMA0jAGPwnivtF)o zzlM_{asnY~Cps%_$-#F8WC2QDT1z1HwrwfJJ9K7o`bEl1*zX?IP{utN!7>qLIqsUK ztMg7WFh9mjf&}KNf5ZslIw+=d9BG4OFTf-=1tC1vPQ7IMF2e+_z$E`t4A1Q05(v03 zskThu&r{(o)!_ut$ls+ubwAmd!lcu%9aA+40NetcLyxML8mkN6zqJ;hK6=iL8)H^2 zL_3=ax4`YW&;4{Kkf062ELYqnTRFj0oIsrDohB3D1VjqZ{@(Y#i=Y1NXDIBO64*L_ zos=9I6Q`Xv;Ighbj-Ean@^ z`;9|)BI}luP3LyX?hZSOGP7hb#w+BN^tZ({eX`!msAe7O>kn~x z^(N64_mEV^*Mh)wvL$$@`ZM>LL>JnHRww;3z=7!dIF2pRC(Q@C^Nv>rSs~%uaJgLa zbDT-50)pss5yv>r$=6w6N;sERm1Vu|0pq0cA>zcnT8mm?kuW^eIydAE<@xr}a zFAu1-;d;H|vMia&ZIT#R_U&P#I9e>_uS2$A_XE$+n(BgpoFS z%)piRdVBvCf9_BHBY6L1h(tK~{jjO}CQ!l5P%p2bN~l@*lZ9d#@Tgu@ese8S92?<} zj(a3TwEJguJ_qpX;UV^*@30Z_uA))LT7<7&*DJpD-S1~lgwro%1s`E6K7926HTEub zsvCiRSIK@Q&pmpA?tU)kXk^g{_Pi9YAdtEZ5fred6!8wgjl^ z0ThosXVK5~bl=eX5%SRM!jyfW_%Ufv-1qL$Cv&Onb8(MJ=Zs5BnbacfrK-!iQxlMh zYNS{=Q4ODJ#vjpZo-~?>G_wPG5;iuFc-V*t1yA zg>(2`efSB6)l3Yt2ZeV!Z0)oIg_Sg2dGpybxR9|RAEvf|jR3D~)r=FSA8Qtal4I!x zYK!@G-fwvOn|}#k{q|qM*q<0(+cWrlTq#DDf*{A*|rAB3bDoQgl?^z6YgJUn!ak9|wUCZNb}F+n!O8 zl$0AxMl!qS#STivl+M%o25_TT8Ny3mB$ z@c8j}v0Pt;Py%UIC^elkZ+(7$K8tI5{L6Q9ca&Ej{5@#6 z!Wq@JMpcdt`$7F6H(b;e2HxJ)6JamPHOViKgQFSpGKvSsL`?C z`A(ir)(|vEcggUUd}h6y?ucd-B-&!E4)kbV?!z%`$>2t4O)*M~BE%8KP_IbrU&sUp zML?=}1u9jC?D4+cQ?k>5oK7xKV>^xrj%_Bd0ux}fsnh|~<&u3)z~qN3dC~hh)IT@o z-6TL@JY%&@>2QG|C?)SMs1ySjX&B*cGO^9Xxa3h(FaiV(M{_dPbww){%280YB^*)A z1Q~oj1)ZJLx9Oz4Tp}U&P#D6Pb0li9BtC`ml!E8k!y)_L`w>Y6)yFl#tx0;FZv{KLuvS1g(#b`@U1wU44F@$)Ku>OQlr2f>LHe`aC7$gZ3rE z0CxttoM6s=n0<5%-?8t{Xy*9k?|uva=YRVz;-CH}{{&t?JYcC63xkTrWEMV1H$Ddj z=kMWKKfFPE^%1r{@OM7_619eUSZxipEkXBcyn{vb)t9>c`bM1C8*^V&yIF%^UY5!bZ3U|Ao~)(61P=W8J6+7PS-1RTtSu!?5v zqW*w^rDydms=rIV`v-46$aAKjo#z{*N(C~n{(i_ydbPz;g|FV){M@g!t zVS;1oje#j}sHp84)s-x}czDPJ>)>GP82Qiui}`-;ig994Go0~T zCe)7mDYi13J$~rV*uMH59PfXEzCXtxQV<~Ndw}A{Gnb(zrEpoA>|B9y3iEWF1*MW# zYz4I$sRPWsaMBLvMH^#_hF_Mq0K?wNb$4Z+$}}0;@IYxQ>ZCVB;*(iCKR0>=xh&2M zRfdy??AV_0zyI(2A@u#0CFN)|Z&@l+QlM^pKOJr>OC(+V%!nC{QMk^BTw191>@-3t%?itE z#z7^8K03a7dP+kqDaNzpXRI#?!9u4YzW?h6_dpQUqCMqQ-=ig!)Do*Pa&Kx;U zBlk4V5u2Hyvlri^U$HK#9@FU7#_;Ggwl(g7oBn)H-OOZhrwu7zO0c1xOAmpT%Y z7C9S`PA3B`1FV1^n1Ly{Uadwm^Zxcz98aIaP*d#$nY?i~ejb*7;Vn zk)h%e^CP5Ag)w0L$N=%&fOwV%RbVW&l*_VCuwg4QzPUDT{Te4s8trINGC{+D7okQA7TfUU5(vftti zLez{%aB_k>1C6zml#w)UAH)<5&0_U%bbi0USJR;xT$QQ!k}p*WRZs zW*}k6K|g4X+@ksvt{g*guR2EX;!c=wOJf-ba zK`sUiijH(81$r05VQLN!Nz)clF$xG6mN6F(xg^25=9oCfB!Q zkfi>aF+vHb@azmXH1?rQDqRQ`$iK$<8yP&S5>`v4&6<=sA^f5~7c1x(1ed};bXSrC z-hKYQMK6JU>{&4h{wKV<%tiUz?Tx2 z*n>ek5)~Rzw!ruc49=x|DGB>QdhP+>XPA-~I_6t`~yz1}lxv*710`;C75qC6t}^ z02m~Ku(XC@A*!@@(BDKw29E5B=!}75*$TXGNi&UjL@yTqr_`uc0g`A3oB3^!Gh|@~qkbF%t>^>>J~@J5}oUd67(!5){~o(ooXiijF0@ z$GA+EL1<+1qDsYq>!c$D-J>RlE@&9xzzYZJxixVMcm~ubDr<)E;>*gC7z~i%GLf~UVVgr?LYkM_@{pG z`=j5KbX4uA*M1|;f20DC9MH@(d_v}QC6uxt^ z5bN=JU81?xwQ{0k*nD=2dvaYbFB0415Na4fh;%7YUhE34@(Q#;V~eo zRpM9i%!q_%p;8uT8)_*4DTz*mZp$hf?_6Ed{<$j29*DA%r?oXaT-MOaCv7dLdb8(c zc+ehNqMYI=heHnnwZWiqjpSK}l=f`Ajy)&B$vftG zTL6WSjK@y*P$<45ip``RaNP#&L*d>o>yp9C(i*no2!I8w%#5iPqyX4y|7t<@Gi+1{ zdsr&G6nymh6~6z?Pw?5>XMFnAGoH5ty+^-?ddH@0bnJodUIf+==oePVQhwc#RMC`s z&O0CLzqE?=a>220cz%9@cLngXnvoLVzzo``lBwy+G6JfEgmPBI4z0v`bO!jURs@}A zR>I;A#-8-yc)e8I+_KkcSufb`H>h$LfaJhZIQSJa5FAQCPJw!y(JBCY!=pD)Vr7$cLj;lSmx0>klYsrc%?r>_R@hiJe~U^vGu@EnyA z39rMmhfs-5DL!*OzP#Bx4 zU(6koPA_U}(lHgw@*f`@F@UTe6mM|fl7XT|b?&8DFrrm1mn&nJG8r)Zvr?m%toI{9z4)B3_p<2T_N+uJd4^(P z14Tz>?~ZJ<`U0I*-Z3b<4vBlvW|=A`$TNBLj#hwDjOde-4#35eBbPR_k@kTSw^G zWgh*YT5%(V>>0Sp9!Z?ON3?_9_vAtP}__1 zzIIAh%vcgNQG()joskb`(sc}J`qL1J;=^NU$bA@g#02&tUiI zv~#K4T2Mh7!fS!zJ254 zkJBy;m(YRcM^LZc*h@x=m=iM1kW!l{RkZ%fnB*0*X%d!SFId%tU<(f)y54>|4I=A-$Sfv7EyuVbC+#MG~1p2+_U6M_en%Sgf zmpm(tN9(E2^Il5?ebqeA5vbso&T~F6)im4k00}M>R`sLOVd=Mf4;pW&HG7U6 zqL$GwasSVOrb2}|(}!zIpU`=Rho>jVq(*0P3J1)d%1&xiCs5jZBzp(_(?Ch!Yi%(z zO}RtH2RH?a>~l(?1)iH@KX8nJ_Z(>FCYv1UR>NwlRl0Kq6X`1gD!P02CdxLR#SV&W zCgYq?T+&@*b*U8Wj7N@R$6G1E@jOUzG+ly)l2vESa4^16T=RNur#qs!rp)+*_j3#- z_8gIL@-t(Mn0eC)wvPkw1AXkNDAfwEQxGP^9<&LZ#@=*R6ep@7n^jiDYJ8`prjOav z0S5CS@G~j-gU>K1&96j8%CU~9{1wKb?oNfT!TDND7(A**eW-^?;A{Y1XO@Rp1@Rdp z;t%znX76!S*E2zfrx=RveF9i86!%kO9|MBRxI5SjGY1_>DCHygc)NW)L33 zTl|E0DFdGJybf4M&ej^5;&O!l_D{d00Xiup@Qe06s6i@EE5K4BrXcdH3%%_z|3CxL z6oAUes5F=IxmNP~)Qbav&72^|ve5{OGKIW2-a9~br*X`6i0f$F#@PQ-0v$@UP48-e zny0VVYp#13#P}&yW~|Ia1vV2oGhH95vkaSmC zYuI)rf?;qUYDt6A%<=KVB@>Th$Vsa_JMRvd+zrghyIs8J6VJPphRgMXM8vY)-+^Xo z?zBW+EBilGLHPLfBmULD_~zGu19mE@ouZnL!nwowJ-Qn zf8nns;v9dl=I5)bEk!ajfhd-rDc`cxNa|oE6(EyR$M?EFz0GItW58>}<#Ht*g33Gs z7YuAhHF)20qBH<%{2zFKyGQcjz<@zLILh-z-}6RKumSAHEP0=fW5>R43FtHeQm5ff?6DQfA&@Ypz59Gv3Ras^f-w2dL;9~M<_lzWo$a3eiBb$>4+cc{ zn#t4fSXcGe0N`N90J#)jgam6(Kcb1+_dS%aBs5Xg8wI7)!0;F(X=z80)iee0VhGe_ zL1_)8Edi)yRV{2sA0ySJhU4@t@$d2~8=L$i@mDV9kum$@PBH9an(OE9|^j z=cXeE(3VHQk5G+S7u;@lNZVO-G*PBzg@UG`l;m&gpLE!~?2@rPBW$8s;6&NVYv4es zE9g4Iu$2NU3;KS`XO~mkX+Q@JUL0(mO(lC)=J(T3Mxtj76jqFWj|3JoIZ{B(yT-$A z^Ia361J-3pm64K9^0mM3U*n&eO_aejmw;1gOG6>5U{sNMs1@%?)c{UNH;uq7Y1#?> zu%zXkD_KctlRXvxR4Z%?;73qF^fQ==VJ#I$-|1Lo^f_GZbWh=uFs(5Q2&6wT06VxK zLVOKD&k_ZztygT@Gw*UtjL&8@k8<_KGhn5mE?4yJ3Em?iYQ=I*iUr^PvNYU}uoWA< zwwdAKvZ9uP`yQXStFp%>ZMCISayq25BRkWjbT5pKO+|6ZZUc^LabCKHBYFfitp!KB z)q=-$#XAmM1J8Y1AK>0`Y`2p`0J6`>DDCSJFe#za{hSTMq0*#EpQA(r;pj2Yk3p`` z-LP4qup@%nj+HcTfG_)qJLW9cJ0$-JCPWL& zcl5Et!1)+sWVLAa;FXuu9G38niCxH9&g8$=g0`$N<(l-ZqicRrT!dCj^i;TDloAr3 zFt}9)R(mL)@mXRH%>=mS8HxMa2PtpMLm-OBeO5kbJ4A`IA{Dc^Zwd}IlkycRP^wWBE);F%Dv8#%OGJ=7AyOq*LMhK8pg!UpgBa!aBe{3M;g)GuhGr z)d@6_f-ReS_7Cn!F>-)lTU2#A0x+{ex&NfJD*TUiUmsrK{`@WzD6Mm6a+dR&;P`{z z`51rmr(foMcPnV?LnH*-jqx4r!#JW35{_$&BzR{+q3WLigHw2uiBm_=k(|#fCCrQw z6}#A^ztbuq+bvAKowkl=-%3gKzir!+-etgKLcnmiK8lapC|v{N}MaPmTFl5?>LS-MQkGGI{J|SxhbJRY;;>z)a8Qh>8n)G1n+Jw z6WFP>MwM2Y-Z_;Lp4qNj!9au8N37R3ST3(oudmTAuV5Uc7eREO#H|uWU{Hs!%-f+qkBxzJw_8M5)tp&z6E!BP&q(G2j20`KliV|t)OrBxGFv?3=4y`q%L7LjA@PFMCzfy zJ{BrCsd7BP=pk3Mkh+o_q;f2=GxTrz=Q!-aJ^j8vF7i;Kqy!x>Y79xpQ#DPC@~dX zK+GBRczsy`sTVOhmGp4Tj46;Z(-~!mq>E~1Ou|xCDDkq1_zyQYhps897!Ex5XdXTf zQ=9H_-UAr|DL_RMtaGf=B^BO^hWky2CF7-;6w9)Nx=~6ikDS_`s%vG^nSvTLsGb%N zHu@1x#iuw$6bvj4pmfz!kiZ<U{|3 zK42r9Yz*WgTFmMROaUWu4GvPC4k^L#yM_lkT2GW4N$%daI}Xx=CPhs4Adv3850vFq z4yKc%o8Nb!_zecp9Y#9G&i$Xx45YVC9&(=Y;c5ST3|w0@?LI64JQXX5c!pIE1rnqI zb&o(~033d3DD4`i#K@oKeXXj7@Y}VILTIJZCORbeUhcq!bVfbBuBobc;R9$Z#z66b zvAw7K^q7H}0tN^a!Ss)G?w(c!X**;1Y7C4kaO}6wFphU9@MtFIop;_<`NI%0=`V(3 zsy9+W%;57K+saAX#fhA^*Zk;gT+epyRj#r_>4JB zQv0WV%8X^w_d0-)y#`6Aq<^&2b7XX3XZo(qy*T|}Gf+x*&%`?<)l2lll!oQd@W(BA#mg_4#e)0o6 ze)0pX>jO|1I0FKJrP(3b-7qG#6xfO9nx!-#g5p!tU``UPOZ2^%y_5!aPa4Zk-RAf2 z-vf;Skt`viybn6mL{ho#jzDG~Kl(mMxV-8!kr|IcUP~g`Q%M*fl$Js066wN4AmopYbWxfs@>50o$m8&SRFY;tU$qS z1ROp2p_cpk=DGyM!=i*MT7-c}mw{8tjYey(gCiW^-e<4G-aFpi?hqw216c;fO74ub zE+xfL;%;!t*qQ~BE=3zhlILbao-X}GB3!ud(X1S6rUc&+<$gyfuoM9zKJawAqxS=E z-@V6i^rV!HvA+l~4xdBJ6nKTLU18p_Qhq#pRa(RP_?i;eqMR{sWRL;|499Ut_ntg; zSe(zCSHbU0sqt*~?kOXy=1urOVecXkc3uhUi-1ajutn7(Wm)h~{S*H& zeCL}VqFN|iDLENq(o;pPRKm7(1u96JR7(UwD}7TucLXTMalreY$;);eWDxY^vl~1D zGBYe?O6UUOoCl!PYV>XVz2E$QV`&#CaC~5Imz9d!9;$OkpR;Vb17&>x%7U^yL~u*s zNAoj6{Y*i&Y%CLh9|9TKobOXm`)9Hs z(02I1vF~}u1+uvhCUKVWpZd`}*T)HZ%>LZDp6tP6P+iP$<(LVhtyiMnDV(t{ z{eGJNpe!f*#U0m5HMKE#rXK->_>x)__I*z3GTnoShg!<@qo!}5y#HJ zrb^d^3}k2T2S$Gf2cj%%qKF`aWLK8?^(@v(mbXIX!W8}G#uh|R1;~?J+A=9cMAk)} zQ8T9~HwJ_ETYlXc4N8gu1M>)8oX;1A7>LHVIV>tr9HH1uMS>fJ3$vO^|s|Srpl+e|Kaj3Bpa>^qOL-ZlkG`Pav&cV$zhhW3j5tJ_WnBM#4kCKL?@Xi2y@w~x7>VwwngamTiVy#$Jnu;TyXa!Esucpc~ z+5?ZICaMnPW*74Dl{E2?%UG&k=Ab0(nMRTWctmx82Y)(^ivlZZ#ABj8D>MH4-ebCF zoBZmCe~tSb|DF^bzVg`!e(?Ah+WSoIVk-je39j@!gZlhgB{}{2THp;tVyPf7Pdbf- zerG=ho^SUIbd;NyfUeB)}_44>S*4zWR*3h{KcD2VR#LFkJpozWJggmJUn9YDpa8YVhDoXSv zY{!T`7*M0$frZ}@L)g@?4JP+5_sh(%HU@W0bR)<(l9G}Xf)XMMwJkVyo#jxdQy_>u z8x`y02XmKBlmSqJIZ(D9C4QUYAwgXjbl@}C>CR^MGW(#C^2faw9crR;Y#OxL6QPQj zDkIPxL&*3Rv!H1}ltm=152BYaFo>X7;tUK-&Imfd#_D*)%0Xn?wqawSK1Rp3-(J{D zz_Yri*$s`&({c17SgCC09q!TGdwoEuHTOIMz>2m6KqMPe;c(6_P=d2AObAM-W+`Y0 zn&1@O&@8K@LI^Z)RgoqDB{}Ov^A(7hB}k|FT%u;*vu^CJ1(A#WJ32AL_-30hAHn$2nMQMWa&DwK{&!ul;fK zPTsx`+}NWg;gkqwd(yBm2K=}Kj_%n+NCH%lRZqXxU>V*+qRp4GvJDyY237Qlu z*gpN6c(b(Zwb+Jpmcyj>0zp~QT`)f5_kZ!1)Bd(vu`FvekDDr=%D=)#TWxrJe8jpg zXiSVRAO0Yi2XHK>1m$2QjCBCY#GhzQnDNOp?dK{@%+D|mSZ+*2`mte*J=T)|mB0}< ziWIQF7YKQ4MJUnm9ja8dG*SW=)Me#;?pet+E4aM=C2(F4^)w!tWhJ67} z>7@9QK6)mT5<^NFH0(u{TJdbyK>e>!ecE06(LQ<#I;8TKy?h=RCOzC`DFHiyO!L{* z=NR*!o+%8VLcno*pZ(Cum{B|nq&jNh$bd6Ya@%&^6WY}C`d}tRee_J`Z(n_e?fHG| ze*%P4h`|tj#rxnu+GC$IZIA!HKR*#Hcc^Nw-YL+YIFon#*0+DrVW*}f08+aub;0HG z3RoVoTwi09Xr}D@9mnvwqHFvfvnyTQCOyX_1_Jca;boXT50gF+p*R%=M@ZTIOs z%w`$f4E@;f@c4*#UwttTc?}Ne+DbH^0yy0=XrPq*s)C0|K-55v+>l5fm>`sb)~?~8 zilLMR>#Gmrux?M-w|l6`43*{h*$%jA#C?N$G>Qc$FTRV8^_0lf(s)@aM!y5TLrNdD zmYgI$10Q3?8?>lYHsU`*_7zmrVz?{~-}~qT{QUh5?{0TUZqU;CPqaaXD^Vn)^xbmB z_VhNU`L>E<51nb07QH!!tO|9XNeCDiIq5#B>cqbdV$guERL z)_PuMm66SYmL!Cx_ju0M9s(gnpC@N17$$vZh!Pc5mSf*CXw&m1BJEB+FZH1K2?0T_ zlajhK|5i#>n)PsunOv)XL8b>$HlQCd zqo8-BS{I@@`FW#)iT226dAyRE5ch1sI5sS`Vjto)bAQGN?ffy&+De*h01Nw}DcwOk zsnyMg;$5xgdv(>Rb{WOPft+HXlL$S+OY5sv%*2zu%Kexvr16-=AP(2hcOX&~p?6_I=9$MM0T@ zb@hT|KbsjIKKeGEKL1IAI)C8z{xbgd-~InaLb0sa`oQ}7TR7hT9PSe7lLA^*L z&vvcl)VCwQmO!LF43X9Z7dRak|Fzy&5T$=|oxlazK0A%J}hhsJCv zc?dsD;gn3=>$GbrJqtXn3sK-04%^2-X%DdelmJmF1)qHLTX=f^9^3tfy$7`>geGRS zJ84lm6Yx+R67WUvMk=RZJW(X5)7A*YeW0x?uCG48+fRQNl9RV^E=f65+?Eo<9@E4!fsUmK{9%+q9%jxdRUpr=%cHYBDMOQo%FiiZ- zQVPm)MX4+Nc*etJ#q;ws$2`Q}lQPy)8zgU>>;ao9QS#ZU2nQuO=5#Ejgkof5@F?db zg7CYruAX|P0>Kq|_a<+T3LM(xm2?!rY z#CZkCxW>KEM@AC77T|#4zTNUH3k3K&SRA`oVBh=RulSksE?Gk*At){Mv$<)2q4zCH zEF*&AkAMF^j-UUnzlgCvBQ)r_kUhA{4rD+i%8B!U=t$LCIiJJwz^&_r2e+V;`p}7f zD*%tpEjYG&24wLbvte9|uLjpJa5Ua#ImuJbK|zb0nq(kfP1!<;FhAW1>DDZBPSSl7yCa2kg& z%-SCZ5WH3AXVm4AnXOQzLH?d*95^f;4?2u@-&Myjc71|>=^RlzwZ%NtcYB7g`4pqv~Q zbf@fW7?9;pAN_RhXmm`CiG_R7W)c_a2H;U)I~Zugor)C-%3n4Bo)e96aFzzE7=pd78Z!mhUO6r1#3e*AQpL#bfnUD^UUx zPJ#4`QnBqD6f}h$IDh`!D;Y!WfpqxQo0e12>6}_?32GGG3k%RlN@l7GGzN2*kG_NF zFMft&p9A`UiJ&VvudSh!kX83%Bh72thv`%c1B4NF&Ygju5dDH`PT>LeT#2^H{jpNB zuTT3dFw79=sdL>bow{s?95ukIW~pH_ScU7ua~4I|MRNN&`L|^kvI6?})oGi9XFs|5 zuV25yySMM6YBOjnxtEtE5;Gs3_er)u&V7Kw1vcrhs=kD;xRI_m1A8SONOn{aqe!rheP>?>mDfDzHD)3FaNiFkuN{ts6@cTEkb*R}^mmwz>E}HVdK-VW za2=JHH%PEV6qio_16bO0-KTi(Sj$)I8tPgO^y7{{`mGP}@BZX-s0rC=r`qKaeS7Bn zj8oEvq&zDg$z)`VT&G?$MBrQkR?Sk`iuqs0DM|ALa<6i>oP*Wein>1Jhyo>F3oGJvxdhlm zTbH(y{{PI95ZD`2z@z$U9bCL#1aOQf2V#QmOeq<|dipzHMn}=S8#AfEY(x zuIR^|8&i3Re3n{ym;)uY83fSC3AO8|&GDkfSwAgyEN-gnFcgkOwY=;+UKnFwyU%j$ z$cy0l@3R6SN=XE>=Ys+;IT;7}zy?Nw8BBqhs)G}x3!7(Y1P%_-LaBJaZSkqe*QC)s zJ(KjQhii&wL9q%+L>m9!=(9gZm)#IEAjZ)d)QnRiC`AzD#9Li3?~dL%DLoDiIylLm zfsv9n%%9>#LMA2|(48(&EgC|kKs%mONiai!EbVK0>O|$IBw{=_QTlp8-=6{%IGypL z=4=`mA$+b2s^FVzDT5;;ZqVl-KQqxauyOcApBq3Vhz1(TnefR`2o;)+!XprKPjmtpQUz6a3p_!0W^ml>!Vq(H;7=rgD-?(kA6GcCGh&pWHNs6`aSkf5y|YCb5ZN+$Oz zsx(f&!b!lr6Sa?BEM{0s#o>-47CIE6c=uciO~x~7!29=4bl9w}+f=F>&I#5Ptn9>8 z=;vbq?Gk%6LaB@6aj6OuAj6z^t_s&YEAWBQJu5b{w;@9ulI)^9z;sS4CGVq;38G4I zojUi^hw$Y175BrjyCt13??LY-SOhJfNBY1Z^SeX=U59o;V)qMm73o%r+ zwl$PTkBvhyNkitlRbx+D0OoSib_C2Zg@;3zpwbTEQ_LYG*9Uebi zRR)O!$d>}MZ}#=F;>&H1ckdmJf#VR%1sLeDc$kk~TfFDUA(*`H4ytN#0UyPiDu5; z2#GmK)oCz`(OHSFhM2Sg_|!@c<2f?;muimCmJGn*czAfg`zJP}%lN-YKBx@D>`~2A z*?5i_ya6gds;WY#cgjp9SE=C3$B_pEr2(U-To|mPUOs?tU*Uu6f`Q2H4wl|>?f?VT zRx9@Xj+1SQ&GZ=<>Ldmt(U6Tl<0Mzity0#gbvZY}8UuTS)+|u-K918xV%3MArE{c< z@d7Ex@3g!mq7UiWlWJi?i|q6~c};m}XJtT@2+b7`htx&qac57^d~b0;P0itX(TMs$ zLGTRd^Y0rak-3;gCw``y!RL822E|%Wohe{aA>tlX-i3SKg(>-Ga8%T~z^q}6J4{pu zABot={wDGV&&0eo%0IU?oG91l)rX(p`OD8Tcnn8(Qnv7UgoR=$3CiE+I}3r*`G8T` zjT~|XYS_km>_=xAHcYGe9OgbaL zS_%aX9362k-2*^*xIW;1zagte{6|jE$83Ndpb!B-c~ErGFK-cV9&kZy}Wqts5$*rH3-vfcmuNcZ}Xizli6$t}6}%s#knt z71baLg7o`kwF(oFn7b0<0{v78ifXOm`Ps{$^9n>G?e1VhZBXzUJQE)^bq1Xfxi=A{ zYrtCg!f(HR#82No1!bpQqB_ys%gn;wa4l%GAS%Xu|48B=is8;4%#Mm7B|rM5?sIr7PQiC8oEyS3qRL$R8qFi` zGJ!w`lDGbcfB0Sejo{ zRB{0)UvK)ateQ&FJp4>V(3jSt*EnNH_OlEjYDMhp2`qGwZ_3 z4=8vvOH@>pEFC~I!$6dVW2$4I`gM7L_br^bk*`-4Hi|hO)`nNt3*J57@w^>4`hLo` z_1!ZlpQYt#@FzVe8qiBE82f!D7y233C&sJQ61?Jf_w7ZwO#!!l&J1*DL5=vv#~Zv?Tr#ca6MW3SM?^}i_fyO6?atu zF{uc1mO@3T7oaTgeuJ~8r;h{8RAuzI!XBuv6gFr#+=dF>#cuEUi zP!BJy#?Y!va(G6h%EWgwE773qp(2G$_?0dr*P;sT!^{0pGSnzJ+Ofph>UWAioZf5S zPR*Z_3MW-3or5F2PLz*-`0a1vcR&3sdl6nFw>+n!P(H3Jo{pG&ZDk3A+P4HmrsUv} zHYd+-F-{x~dc=LBp-#je*{j6-e%{y9vqV3b{!ZA>r8PWnTPF5O_&<343eV5C6#Nja zb!SkQlh0vnC}$4}(d3ff)iY2}MBP9eYE(9A*gd2h*BbWVta_*WJLxgt(8Qo(v?XY7 zG2rgVfoZ?|(QEwHmrrT6+>zfm5EK((@9v>oGn+=kCYDmC9aEISQ@GT^>V|rtVm3sh zV%(d{insSY_b|?eKq*evE+wm%QOz+<5^gcmqEvX_=Q&p263@GAZ>bHnG<@^T6<`r^dkO(0*u;>)<;lAK0({C;k+E<1hd3a?cga zUy5PtkpLbVPD|S=B()%tTW|pFJ`f2ukX3n|E4bz;MG)EhuTc&QbK2e1(U=#ijA-|u)ad<_EKq8hc70&fdG{?_lo&wl)y z=*JDM1T9%ih{GK_IbCB66gJBzO?`}77#}4(%eo|uF8btRzbgZE^}hi)DJz0a z;VtG@l#?B^xVe&3O*^mT!pJ5WBLh*<>A(QnPv=n;J+TQyiIpN#AsA3N0^sX3VV&E{ z>kn~z`+4|vCRgWPi1pzW?(e^vJ_B>G@fmO%v}|t>+Vk538)aDS=cfg^Wuk6DIb$dr0956m))$sxD$uETv#r&}G9awZfyD>D+IyS_{kcyekr1sYd3JL9OrUJd^P_%B z0W=AMS|q^k&WfKX`$1caeqPD$bAT!rBq~2aBaQ@=mXEWR1Xz~+II^+CER0D3`Sd5F zsj5fgpLa^u!5yWp(E%Ma;>#fAj=hufH`tuh!yrh(MjTQ&(D#i9SjooM0ZW54vr5rU z$S*TWMp}@>x_dN+nT5d+r!li#29ZREP!>3iY8dE=655Lm8Au9=I}8Wl2buw|AFlY- zU;IHBY@d@TjTfabAW(U`lu(9<>tgx7LiOPso`o576CjD8I7`T}k?*%Xjn}eh3b5Y8 zUpOU|(0~5L7ipZv7%`ipM3KAHhFX`?gx$iq04)xZMWd7RaO;A6a#un1C}%xS=qrEwe*?E@prWL>AW*>J{uswN1UfytBJOgg{oW%ed zW&J^ClB49q7~mDFT(>#QP|kR#;YfaRi6lT3PH?u398#Bq!+h?u{I5ez+zc^Ke|DCp zK1PT61{k@BH*rs&L2-OUaL{9Wsk!G`f%s1X$w(uUP62Q~qLNQdx(uCenTp3B~z4s~D# ztwL*cZgqd(zI#s_0@8}4A~!kp|L`w=KSTiFIV5Z)0-ro$SSlQv&Nnkj-K;SNtncXk z!1HN{;#meXkWy;)yJ0(~l2h~-1hRVOr2qEYXIx)>04dwG8gnU>N6MyUg%5KEOsQd0 zss%E7^s~wmXio;Z8#?$+yY!5JwJn>MoIt+me2Rsg^RP#Yq) zf+}NST4Qa0{r~pANjfy#qrun=Pg^8-`+i`+@!^Lbq?%qd8IKf82tOXbCv@0&u4*l* zRwX6LY%F%izVDedsZUPpi<6D85*6Z5eni@(0Sj*ZU|>I{zmh+!rOlXxwy@Tk^P!CP zGkpf4H)R_kcmDIA{3byDzZ9ZASR2*?EWFQrC=3swb(A;6>9j_jMHz>P0B=Wor#RBA)=dt=O*CZ#ClC#u~$ zB>AoYREdg`?J*n+kDmN=f#d%6^GwnvbuyANci{f?F8#KARIWE|wPuqk5LbVT`fZnm zz!d}K#+ahk#u;@?2tATrI{}lD!S;Ex@X2G0Y*=dH%o?uy@brUHkPE;mn#rrF3NQie zhowg1#I;SuMke$NrS~;s4!XloZ`_S|bZ%mq2zfYrS{a{D31zhpAV57y z5gW)1A>gEh_1O<*_`TrpA4i|TMxqXr4;wsj zn}0^9;gkcm4DgM{=z!E-XrK9hmPU8IT(RA63?zI0eM`easzfc>psQS>wiPHBTq^MX zrit2O+-e4mFMqeKLrBSnS!jq^?Jc=+Zb%jU?A-9S)sv5Ihr)Ka|5kVIp&H{ zw(UJH@yJtvd?py+eb17#N~B?fXMYVKhZ4rDULJAWpGc>1{&^sveLwO!!kM=SYA9qN zYL%5T5vUThq4w${Jbe6peD%BkK~B>S^^paV*L1$D?yV9jI8c@a6d z%|O+blxgLDRaGv$&+{oN39A>a)>Kc)G_uJShSDzB?@v+bWnifUc|RsC zHEc-)ND5AL?r=($<=hoGG&an3owKYoPXHqkJ34j;r_^)C%~YHV+fs}8#*r|u>yqk7 zy2~S#B0eb`(7{DFdzD3h}TOexHIcO7ANB2nyRa7c!CVy)+ z+SgEz>WMdSznL^g^85<@_7ftFp3TNuucbx6a#=5N-^Rhjbt)w3%tiIT!7(TsAKyDz z)fbwm3Y(~M@%Q8Yq%8B2rJ5Fb`37vqj?p6la`qi40a=!YqiZ1S zlu=f5@<1SqB?EwKFZeVekq>P<4LV~U=0PHwe76J(h-TfH(> z{XVED?PK))i?DrKg;H^8^bJ1Z-zk<76|O=+bAaNo9EE7=z#-ASOFQ)*w| zBi|ruY9+nTU5PT~u`gHb_Zx$lO0cP91w{XV;L%1!K}XQU;*2qAb0y<(#(T7s<=v=k zVGzKk#HiImx>oG-wgmuf=^F(RSlBKU6z{yt@xD%1K^Qhld6`n|*OA z#f^^p{W&jPP+@>_c|8+~L?#sYBGEq=+AxBz=RDvth-dEqe(X?Q)Q98F3C#-L6FC^n z)QD>7ARm2T+Yh|IZP`O`BX|B>|`4Jq-+l2_Ycj3eznpD3S)cf@60cK^}g0!@g~? zuP{`rX}Tq$tM}b0&!?LqKKeLrc;4=a0e&rwsV)t4wD~Tgx0JS|q0%{1l3LoD4&}aW zkWz*!aqd7_UgKOTsjQ?qgxYu_{%S~6mCSio-aK4lzX6D%GmXfZ9#)vN$snC5G5{Xd z6|+1V+VV<1&Xks8Ke#r0kKu{nC~(#;N*)m0y8&RizQ#vC{0C#+0r*{`2cqrjFNq3U zYYj_lsI_8U7W^0fd;jx5^N4Kp9H!RHX7xo*XmGgOQfpL4PxtHGAGNfgdj;Y&1Day09)!c}uF4Fl zvoDb1aGDFBqm`-R0|!>l3}TmH+6~dshDVhx?-+U3{WxCS!?;sX6^TYfqLTRMtKrqdLpJaS01*NMx_gCoKavX&IaMN} zChA!g0)hth!Q+*8!o$}i2Ax3whZU64qVjDOwXL|m{v_rq)g=ZNvxiZTby)aJK!gVZ07Phe4E<~tl_b%Y6==$y6uP>1d(0ELADr-xxB~C? zj@xj2v2|=ce10+HY=oF6rCB$Q$2uzy_b-12Am~)8n1;`=d>*5r66mBscy0kM+2wVt z8_%j{#Q>E3<-OJvdQhdxC0N5VkRmni`B_fok-%%{)&lJ;VNAfcOSgovJm23x4@7pmPkMt9*6kOL(v0IiF@n4be zD5%Q?!=@sYG4>dven~CraT!@0u5+R85xW{v=gcGk9i8 zg8&%)ib{8#F{=ap=oIDQ8kHEBS!pvRW3B2{A^XLs=%!gmNy9qDPcVah@{R8}FiT}H zb@d|~pcMDL9G1qCAOo994la06gtpFHIRaD#V>-m{QXZh;^Yifr53^;1cyjVh0e8<} zPf3m%*XJw@WFSd~B<66H$v+p$LaPxATjQ#V+Xe>k%3`B*Kb;=r^wP*HC^iNw29%0* zy@b|!&JtlpNas9y@cSfT7bs58L`-GMm?o~?7wuu8stg;QJer{*P?^SU_Lnd*DJ}nu zCl27m(@>e|S&31=tOn0)&5>iol%EyPcg{hOf$W)iXHJBm5|wtz3dHEgi~SKw0z~yl zi1(xfl1c+mmeZmBrFocjHa!!juit@U76JrN5~_=NY83T&o?2M7BXTITNb8*En15fF zTxHO6klt!N&EFX5z$D->`OSsvtpvQltZ{w!q!NgJ7*6igSV?-_7&~Q`wJ=1OX?R3u zG5~L0J>r*t@ICzdzww(`3h;E_SfW+phwL#(F$LZeQD_WPCtqk;2ID3aAM%@6+7f}A znmxG}@oX=tiLC{VumeQq4W&eLGV4z0_h(GK^;3VBISMA9)ewqUiu=_g=d=UWqQ^>m zm$WaBp>6nj#Z0y<6H?kd(k4`)nEfJ1z+gZDPGy40vqO`tV6<@9pd=}Sb05j`hi_iv z_8h8h1BU&Cr0=W%XO)!Sdkjb8d>-SP2o0Z>q?azvn9S#8uALy6DmJtq6^RPEE?K+$Ok zh2?t4dA7D>^{Jm!z1&C0ht^0MLoQoXgc$5AunQnC))ut3;$j2uo_RJ$pXWSnlP(35 z4klv~i4lFxjyrziul>*Pi+|+5jqDAeooS`uQVs9-o>Q<(iKbY?94?#Fl4%daE)_Ni zhy8eL4NpghS)~u~pc$7Mey%R06;HZ`j|0Xta`ZU+Ifn%S&b5M23_Nae<_s?arFf@# zH`W^R%s~}91^#6ME4p9sc?N_MH$x~Piq`E>E!d6&4#VqK@HEagnho$wC_z6E>RGYg z=d4>%Pc4$J2RfnxR@YZ3wW8m?#QhfH72rM%2d=H)-EHG3M4tJP_cZ84?#5)xIoPvg z!X}%(u;F@XLI3Q-DX79{&d`U|WbK>Tq@>%VLGWvpTP>8R1ZoCl3r0tIdRJ4b(D4b7%(Dii58BT)y~5?=@7$}ti*So8->*v+dD4{ z7ka5JxSZU?K7hd|_{p#TJJ|0x3|AjT!TRus z?e_E{fq3l9izP9obN-#Oj=sAQL7)==!-i@LYPA3&Ttv?93?_xF#v9y?CE>v?s zy=|WF+-i&U?MDO^nJ|RWl_QIsDe^aels@-6P&VPKDu8IEJezHNEV;;e-q5Dmi^;J_8A z%z>_j0mSn9n>e07Pn5Bc0;3)ym}MdpNb^)eQxzzboH%6Cfag9{V+2|l&txkF57#S( zJ8rjI&K^)uWoFr|8S7Zn_oxprRfd>6IZkRrW_e!YGgW~VAZI{b0iQ~NN$CPDdGC`F zNtH5l4`nPRf5hIlv|o@G&Kd!y<(!Y{86 z*zYmP*%UB?l~*w>_$5(=Td-|+EC&4E*H`@dThgXW*sR`dVVSmNs7U)0teJfCt#LCptHtk9-o45y*Yob^&$qGXmvi@?J_! z%6*aBG%NB|=f6GW2uYF)9r6217$=l#fKI{Lt}G;BwY%7FG;vo~;J zcX)`))DrQJ_c1BRv^QecR09T7eHU%&^xs2qiKX38yZ}rpqxv#xZD`9Hc9%11iWzo_ zL?r+OCzbSNJB5;|)+%0o@?E_9^mmiSS4*f(4#tMEDa8!-@h9I>=${*|4Qq4+8gMod zscD!s74glw;LF=RgBVo+W@SR#Ih7q|0EH7`Ffs=Sd}1C(4O>#Sm=zAHYz$HaG+bpG z84%TtarH@?IaN<$Uj{^vQQ)o=Af7jnS>t*r=+VU}wUV1TGHHvAubj>t2dOP9tTZgG z;C8zODuk(|0i0!1XXYzJLweBXwN`3tR${;q5CbE9Ktuk5KOc%eenR*7q9|>RHD;xx z)JqtuzC$wta0+-cn6zlK(5 z9qiDgWv)r3M4p^St)ZIIN+riK&f9uEPkZ@zhj-}w7K z!TW8;-UsgYEt5_SH*6)EN;^WbwdFmX3CFq5X)yT^>X-=tK^L8E75(&k#wyN63PW)D zgJxK3iwaQz)cC6E^M1cY0KmVa#@lT6rCku~;9wO)_ar7F1Li@cJP0N!h^c(<6dPc2 zsH9m{)eHh@7rK*{27xyV~c@z#~wq<1SC{VhMs46 z!Y;9IkJlC5W8Tbu?7RI5WNS%@xvU&`J7#Z$WreJtm=u;k z&E@(4A0Cu**?u5r(=^c*Ju31sbBQw6RMvtS5%VMGpcx{0XmC7jQsqP@+**IVj_7^E z0zh#p!+qnU5Ag2&6M>Nd&E9eNi?x9KuK*->GG8R*V=t<45)BfNCN-IY&`wT40Oxx0hLQnbb%C+MxROE` z)w+YQvA*Ga7ikgFp7thN`f(b2eIhu;9MWvZcz``n@y`;s&bp?(B)6QHzqM<}$=VuEml{kjR|ScMqxEIJgSJ zd9fL=S#6vIPa2SrnNubSSO!UeC6#MXt)-QcKd%cD=O7N-aqKwu8=jw@;N!?Y*E3F( zd(zjIR`K1BKL~VAe~y`Sq#-W_rCqX7DW{baeTxzrlGlBGct}GOd%h>#&4;GlGi$!Q z!ox@3niMFL^X=0h5UJ4`>N1(t)C)IWsB~idy-XZrHpbMdkNB0zSym%-tx5(j$OBGl z(l{V_%X5!_5jH!roIg3*TJwJFIF2I}VNQwWY&N%yHI)oX<)rs8+CsEl#(}6qO3XTP zI3A$6H1n4d=`@DLu-1Zo-x7^ax}TC|Mh5yMV}RsiDi`HDb|umb!ZFM520bf-O#`IY z?m%$48+qTFkj=Z)Nr}j86j7~k80zvSW~A^8Xlif%tjoELyqiO_ehwjwtyAMs)hv_t~2e1{}Zm-ET)fL*!Fpk^rh7gZQ1sq2KoqatJwOcEJXJ6suUGsV|C4_n|MI`~zr}y_ANYMy729`K zU>)koDTsBrGvJHlhk@>xfs&G*Wm(YJ&!j2Us`@1OL#rq1o%q}Em~){0JxJBlS-V_T z_KXDo`=G6nq_YyNRI1}TBwf{o&PSz9QkebNF-BD0meycpiV~Dksa7a|Jm<><{VHtI z7>3T5*X31T3E&!wG=!oEvk;9=7H2c-5K{#B}-Y}xDUPeMA3CWzW3%; z1kBt#$#6UGdGxAGQ0a@e@6m_jgSF+m6H~$2!^qA$6ISi-7z6e42zgnlSH*n^py`7T zK8TaRAU5I<hs5EgnpZX#IM?d!L!vHiykwNwF`0qIEXr*FVRt~Hkc-jK^4j{AU+1hT;`TYX+ zhCAb-_#y@Fii6eXm+?);L{gfHO8cF6Hkb^Gw*p+zdMRM@;Cp`#Zwk)5r0?x1T#xRUH~mjz2B~jNP)7gGWT!pAme;uq8PpN+&o`n05W-;2<}mLwFg;VL6qIP^ z`9Jr)Ckl|WKgg%l@Aok<_9qAx5bwG79uw-N3C@dtdwPO9joIkbqYq~gtWxby7^%MB z5zUcC9v-eiL-J5Yd3d<;j7DF8kfp3XxSIJ+7Fvg0-^q)vGZw}VXv<}m)Q#wr0u=W+ zM@-~u3B?GN%C*G<^9=SBHt_t6g6VYBTszWGDe2IC6tPJ4JLI}g!3Mq&R43!d}jlT9+h8!@Ai(74(@{8}u@=JvWv|CLsp+_a>%l~s z&t1s7^@Hb;>VQ-ya-7TLvnkNEPYh&d&^9s|5NPtFpMRc>+M<)HtUsMT7;samgg+wz zlD!Z7V}JJ7**_9N)Vup0Nf7T}F=Sw-L`D5Yjsld1wKYYhabL=$O6jhtzrfv5mX%6d z(c7Ws;8rCBB05!^>){^y`b1^jgKPZ1_}BkU0H5J!pMOQ=FoO*Vk(4t|EY!_`~Ld)r4+QKMQI^NC^r@G2w2 z8AlN#Y!0l~M+osZA)kC*g(L|Yd3>Kn1`G#O9Rvox`K@oJ4I8wPby)(z_wcO)mer?X zz_x98+BR%d5K?mMW6n*83ec27jXNy=K`JF}_K)Ab$JS4=icJt8-Yf3Ae3KOFzSu$` zNql`X%YUb-q<^Koq5SI@9rt%%M$Eu-a9uBehj_v<9AA9-Hho8H3mG#y^)mo|+Nh>z z@wo*N{8sg<5p34KE4lC?%A2K4a#r;uJ1c-#l*q!$Rb!$#t2iTlssiM_JB~3$T#{CK zqOm6+$Frm}R!j6lb&uGmbKma!j;;H&LGpbbQDeA-_zq19uLNqR$VCW;G_H?&hn(xC z=X4;dRkekzFF$jkutnJN09h-mGX{jfj(m(2)FqfN?x9i^;O6YjR6=9CD?yW0A1?Bo zwkmb7Fjs%@JKtbsm}42JNLp*1;~qZw*+th^570~5%$riE^-cHC{ zX`zxa4v6|xOl9*CR2^4^Bb9q|x-lfr76cTO%Nz9jSFFe=NT_&t^%~Fb-@)Cp z)Exoalq#*YV&BQX#k{a*Wr=j=Oc-jJl6A)S&lwXSy~-{a`&ZCFMOeXi{?LC4KmPas zEsSHEGLTwv#K)}es6*gai(VZ-SVvIt=N3*;Q%Ln0(qC?*s~f}3I@XQsE?`<^%CkIbUH~K zssc6@pMa2P{NTFcvv*HS5Y$5eHVguwov7E)Nct_x9K$9C-4 z_6|EyaRw6|@mXb8MB#Ct5|(P_bAa^EF&%L&IF6YZ=(9>~!QH0w6i_1ym>e)zaqK-B z1C2>=IHwig`D1??Kl*?C1xy)qOAY&nb!YKKmS+Dla;j`dAF=;}g1O6NF*HCHv_pxU4I7QVkpTAH)S(4Zz~{!v(J&t|(S; zKL(!f8*V$Pa&3XBvc$D51P-$Iz#||(62xNfMT>qSlp3G_lL4oCPcfrZG$5rP-Huvo z(jt$>8$ z_mozT#4GIdu?Ia_iDUv=xqbprE4)-3`~8Ivl5oXwoCoj4Y%xG~OVY++2Nr^0fG4<$ zjE{h}goEkXtR_x@@Wm}B?VNiA#d49rvR?H;grAUF!uUZ96Odgvv zi3(=CWu@fg^;T=jcW=jmvES$Cm}Xw6pJ7TLiw4_INqc~0ZN}45x+(>0EqG?NWH{KM zPxgSZ&&#AJ71febD^M0V?`}bW6`~EQMPRUEb*Z5!S2bXgcC@qwXqTv5_nm+DNWh^m zi3{qO)G>LV$KXI%DOh?=jL#lr`eIuy0F`u0p`U?A0;0>Hj7?d0;R4hWN*(+C7RE&- zJ60&9XcJ#(TsKEQHu;J$4onWxXu7^SE=s8M^i)6@%n)%ltPED9Dk3CJ2jWG*PiHU# zD4|wzK9~W@iN`o{CIYi=8pq({tI4qDp_?V+NzX_`Armjp#3CF=*s09QPnQxhbR}k| z)DoF(BdzJo>oq1`((V>lUhm^0f|Y&W^81D6C*8+LaqIV>u~<17ei{H8i?iz4A*CM% zAR#fl!0H9{@&@hIcTkrHV1I}0=`$SnFM&Z>XH7u&c=ow>HqT;-3H2B~*QJmehaRQ) zODX7PX;68RHsS_*~?Cq6YbKu2Siyt09H#Z&ywV4QPfkhu&NQLp?Y-A zXm(z)G>rV#Q-v+juXuAjj}fQEKw<_ZOPvc%F9H=r-7P$-{UyuR9ZHc=Wwwin&AxG!2MOzu$eg8 z6}EhYasM0!2Gpjeel68(@)o#ZrDAPyR$}f4)jOC_Y8c=#92G!EAVxx%0K-~h;IEvLP!@eAQU|MM*?ddzLKAhPLKyPT zb9#OJ@30!mPC!z2ij@?1Py(PpE&{_E11d?O=wo25(a?Tv4eA5xT>nPrUP>UM4oaR^ z*|UnB63Y6gkAd~|w{SdtnhGg`YShmUc^bbY;iv@SuB%CK!rM1lS7Y&d0&Nv@vCy&CuG4?gI^x zsJ?!Dz#sg<_wZYvzr&|re1UBbHMONRCIBnkDvsk0j3elH0%JtwLj-=~Y#%ZGq=UUr zLt+}nK%c&z_kO@*mKe{uF1wYIlj4JxMn4ayxI~=WWOmTLMHT*ne%u1BH>u!>N?hNRfLhK;>}N(u#-2r zKz&@av!jO%qLA;hERpb+unqD2Jl`ci3`5+dn2f$)xjy3l{>w?j{W@?B#!WsO%R7^L za7`8iHY$tOx)3zr8&Zi)rxIdfSi$AhhxiBnGygUG*1!9I!56>t8$jQ~4!WulNIR_R zi83LvNdj!CS!EEF(L5@Ps*J%$^o*IMeP3EsUb}jBc{Z#rST7H-c7?I`xZj_rfJN__ zaF-H2Euxq?_qEi9vRtsf`WXBB&v5J;=URc(-NJjcL=3szLy?p=&Ulw=tE3{6`IEv% zCD%j(O-J8@ip}#^3J1vu6;o)WVAx7j2>N!9K;zg0-8cmUE(2p1l=TxBN}`uZ4eCAA z9ZKjFH%_9=BmkDi=b%Ez7ifsPm1&wLq5yZFFc&VsMokt-Y#=1y*JHA0`3!TUsi4*zf@%-`J zy|eMD4+beSz)~ZTbee$^_m}VKeJA&_BnlWr}2O+kl2_#VrhQ>8{i8NYrX;&dy4%R zO1-{)5NZoUg5oTd_ATUc09>yKyVN{!-@x+ykWr|Zy&jvc2_WgDl>{+BE`TF6WGhz6 zb{41)#Bcg-m)LmF?Y*O|Htie=XEXZ%FJzer{32OGfLqC|@}<}hffhi34yeii&byg0 zU|Pd)m2*;nXJu5p7}(tI51c&h-rX{)h%>0Zl6f&86-ZV-u!*lEcK&18qlnwrXv*$xb6i#sC`tqcjI`lEE zwQI-XdxbGz@4GWXu7&R-4ulL$DO2nLfP(GC#L$T~pxXevK}x**@FN3I%I+x#pgNjR zp(b&uJ};>EZj75jEVMhCot2(jkt-IISZa=oiJnhTFS0AML}TI=4A;!;>AjKnV@ zg|UTYIeHRg@3o|aQV+;EyVH;cfTond?vfs7H@nlCia^|%#iSjq22Y79Y`x4!-`0BF z*pH(z?U(z6eM7g014{NDLJ{071)u)%XE@dhm7(|FTEpeCMg@aao16rfH{V;F@LsPr z-6;wh4|H6%4N1)wm1#EDlD#=}S7XA&qp#ZCl)^6XuE4e{{`2~c5e1&amMM0y{{(Pz z?I1(|00sC-L_t(3SRZ6YOxX^?#sJ)*R^)m>E(`8o{1ogB8IK)F2c4$5;#Z&j9*?I- zyIYc(g*0yr?awa!Y`zT*_2(BT}%#UKgUPhQYO~s$Dy`!xca~Rz>NFU<`c8!ipEVah{nR5y?Qo;aN(TkNU zj?dr6+xNf5;n_P#l)?FTV|T7qA=b7>bzflBV>80YKs$L3ZxY`=5h+m1Z?^N3-6O8P z>H7wOk(4GW?EcykAO{|WBvqtjbL+}PkOKmF@xd!as*t_`l7cZS0-V-uYDno(qKq)g zh(wu7zCp^*(6=7|-2ic>AC^3&a}Y5L^Ki{Z||_J8x-A;buvj0K;0{# z?F_pA%-~~AfS{~2Oi~uVw1=%8QSvo#A+X_LF+X#70L!7JxAW-bP^Fv4NCIuzW%K0J z(XZOSb17b-7_tt&#rdLuDkP1|L*cS8)sf6Hr27G-?2XNqWLnKFh~@hf!HeHfEfqi? z&Pd5G|J{IRg4!fj4qEc0{+*Y2N!(3Gv;AB+kL=qAvH!)9UEErwGq5*)tcg6okkN0Z&9?N!#T=!pI;5{P6mDV09uS2442H5;&$k0te#b?1nX=2bi)PY{(UOQG3sa7~^_( zTyScGTsam>Y<81LV2~Wtj9XGX5_0IU>a{(hh!5*~)o^%^y$=U5ToG}GOj5$36s!}# z;n`p^AkO53b#xl02a%8nyn1TDJLO^$p`DjbHc%AHcz|rU*zlE(Paxy8`8uwuVhGoGkIUk~k#K zWr}2I!_lf7(N?VgxYj%<*?ViES}x*FiI!SraqC@J5<+Ps!&=S-nkrJg zF&Ciq5siI*I-=S!C#-z{qMQpvcWhgmUeBu5xgKo2wV|~Yd)ua|%oYpi=GbH1JVEvQ zfU~g}SL}z?hzJrG)ntDE)?-6LBm0Evwi#j5=;~wt*?Yr=`9Ro8yTXHi-^c`l6}uB- zE)hHl7B>yRs4B*NA>+;vj586#18qTv9G7RgERf!jO0gz!Xe2jRstU#gf3GEtIXLh$ zDw0wCclOu;-V94*U8TW*!QR`9+cou+Q8_#=3z~Q= z$u@aj_ZD9O_r)F(Z35sS^HyK0nIz*+$&{gHXezin9&kE6VQnqIIh8;bTZe4bfNjBY z`y9_+e2kyG`UDSOeSxQkZ-I8fwyt5L&HK|N-8-t#na#mdHo|4|>IaEk<~yyA9&PoQ yL)8YG{;w+PU=O9o-;|+9$k_njww)(}1o%JDzpTTXFzMp}0000-+ literal 0 HcmV?d00001 From 33a9d2622f5b00a47549e4262fc8de0aa6807598 Mon Sep 17 00:00:00 2001 From: Korijn van Golen Date: Fri, 5 Jan 2024 11:20:44 +0100 Subject: [PATCH 20/20] add publish ci steps back in --- .github/workflows/ci.yml | 67 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8114545..8b17a89 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,6 +4,8 @@ on: push: branches: - main + tags: + - 'v*' pull_request: branches: - main @@ -30,6 +32,28 @@ jobs: run: | flake8 . + lint-wheel: + name: Check Wheel + timeout-minutes: 5 + runs-on: ubuntu-latest + strategy: + fail-fast: false + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.9 + uses: actions/setup-python@v4 + with: + python-version: 3.9 + - name: Install dev dependencies + run: | + python -m pip install --upgrade pip twine + - name: Build wheel + run: | + pip wheel -w dist --no-deps . + - name: Check wheel + run: | + twine check dist/* + test-examples-build: name: Test Examples timeout-minutes: 10 @@ -100,3 +124,46 @@ jobs: - name: Unit tests run: | pytest -v tests + + publish: + name: Publish to Github and Pypi + runs-on: ubuntu-latest + needs: [lint-build, lint-wheel, test-examples-build, test-builds] + if: success() && startsWith(github.ref, 'refs/tags/v') + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.9 + uses: actions/setup-python@v4 + with: + python-version: '3.9' + - name: Build wheel + run: | + python -m pip install --upgrade pip + pip wheel -w dist --no-deps . + - name: Get version from git ref + id: get_version + run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//} + - name: Create GH release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ steps.get_version.outputs.VERSION }} + release_name: Release ${{ steps.get_version.outputs.VERSION }} + body: | + Autogenerated wheels. + See [the changelog](https://github.com/pygfx/shadertoy/blob/main/CHANGELOG.md) for details. + draft: false + prerelease: false + - name: Upload release assets + # Move back to official action after fix https://github.com/actions/upload-release-asset/issues/4 + uses: AButler/upload-release-assets@v2.0 + with: + release-tag: ${{ steps.get_version.outputs.VERSION }} + files: 'dist/*.tar.gz;dist/*.whl' + repo-token: ${{ secrets.GITHUB_TOKEN }} + - name: Publish to PyPI + uses: pypa/gh-action-pypi-publish@master + with: + user: __token__ + password: ${{ secrets.PYPI_PASSWORD }}

    K)0)hBmRr%CD%LM??z1RW)k8@lB%EWGT^!^x<{8pfthOK47N&Ipe7KoN*q zboViEYba4LxQ;=IQTEhg^)A^+M{kk+jo^I*mT}pC@&EFJk(WDhWd~q|0jI-#@&wZ; zH_f>YnqX}RlpRISwU!9>yrq zYwfjG;yAk!6jry!--~B+vp_+;<91zw2IYZn?V?T53CaLVrRa!uMPPwQ68K$mE)kd; zoI@1hv)UMRl}P5rTfJEd9&4=eNCc2zg>)+hVWtLpCXiY$acNb_b2!j;y#Drk`1HdM z#x!t;iw_b8!|IUzRK|2(7FH}s5du;4Y@v!VPF9?|pg%RF&gah=$w~yAOsbVo7=Xvx zU`gFUv>ohYxIF&^W(6HPK&mBNRoe)H6R70IQ*I7yL^B-_ZPa1_(OE8MRRnF&GhZ?t}5q1|lDTRog3;Qh1gb zSUX@X8u^2@U_e>0o^GIIOvyp54I6|0y$79R+xHQCMGvK6F<|dKsFu*A_?>q?)_RCI z^zhAr%p1Upt$M;r0xLcOemL-QfM&Y;Ga44vSuw}neE_Vl%q;Mzt4Xob8}}YlolkP2 zkRSC~<+pkC=b7i;d+%tdc>L_60ATtANQB~JB!WKffRSzk2Z!t#I6i7rgGLMz8TuGV zITFv-hb-!#9)*53wtbFWaK69T3!&s^=w>*rx7fFbSYswYnysaCEJ}|km@y-#;OTk_ zx?&f_Up_Mao~n0B;*=DoWCV*?=_YNlSj!s zapQ9ilqQ!b@4FR53zZsFNmnT+7z3J2P3^p_Sk^PjvW{zeG-jE`FoKf9V4Q=J0(j_J z*R;_(x!S(*l1ffZ!Fg!$w^nIX_>NF;9!9ab4TCvKp|C*0eIhOKe{%GoWBt2XbL#Kb_BjYQ zl}4R=+b^GCyZ<20Cg!g=`_rN5cNHvDmcx&~AD1waR3Z2ZnnVjLtQxf*NYC^8$Af`A z{M*SSC*mZOPUy{~&dlG7pm0`S3{+e)qb5COfK(Q|`$JzHUwEmN!>B@Uy8}q&jOF%S zl=GV?>+MiCQyErZfK-*tVvkh*^j`7&;}6E5qJ{E+p3dCtp!Qg9@WMkC7%@+ofi#6+c_9e%2WWx+Q;|hG2qgV zr-whJyP`k0Bsq7VQuOlo2*i9p?1dDkfEeZ{DsBjW6erqkNt-VF16cKQ3`;L9P zqLZrHInYsxeo1!+qH18!j%%yf_BwoKYm(GfPuHvKA;E!_zBW)~sVmN;PV%S6UcCtV zrbiAe;vna)HHA3zbB=}l@#tZ3onapn;v*FcVBh`_|>%MsR!>WwNQ1Mnn;uwE}j?ISiif;q`MKKRCO zG12x{Db+r!gYFjhls6AH2boGG(JbNi>D$Bq!ECIt?vVhQS18NOzKpS*;tzOK4;5>k zIO9mW72tJak{3QPrOA%z9A+J~{EFIupgm`R@pckDR}^zg_`V0gLbc1@Bev*>M9?f|zL-T46=MWyRBGX{oX6RT7-dr7QXZ-yGO7l5 zWl#=yN4va;9uOh)!_mZMFC3h;EDM%(je~!Ej#e*&I|wHtmojp>0jQuOHG;uw8{gaD zZ3jHK+l>8S2E1{13-^w#Hq=g9ipRfGUXaQOme=`@g?dM;m8uId;k%@0z-zVQR0^hJ z(`A?nDsfQzD}npb0g2NnzMz?`KhS$cGGJXYk~wJ$4UQ>i?7+BuBQ5hNuRGUH*=hvg zfdF(&qawOOWa4!8~~h+pehYyTqYkE76x)m1|BF@E+wdfl*HUZOsJhEIR>uc2RGKvDo(?ZdXh z5*}+bN3Tm6y-NBuW-9m$f<`!PF*9Rb*D;?*wz)94m5PeH;ai`+q@&b%VbXo*^$855 zqV11?UT+)r?Sk$4h`wD(J)Km6QJw7b_l7NH6RpoVqy-|*rV!aFFFp4=`o1U^$|c(Qot>QZ?eQM{`iQz+QNk0=+e6T0 zmt?Gi)1kxGW($esnAgzxPxN8I9mOa|%jyptBlwhUjQj3~KCbxw+M|gZ>;q6`J?wp4 zv%ULJeKLzdknR}Gv!W3TY!eVA@MToc5H=D>yGJa*9Eudvlar7;gbycdQh1X{?C!)+pG) z$?-$?B;K=(>PX0sSNE}}M=5@L?{i=DQAW~9a09^dIAUQWi3uRGDw@qeHY2EC*GNJh zA09%-zIRlX*y0NdQA~Z05*n4o=Z45%%q*8GRlLd$3x28^&2s_Lic(H+D|8|a<~zJz z(drew@1dvcErPF-vG)YC2*$R{>N!!Q%DnMOJ!nQ|KwG^-h68m3qCUSIKpA===<`~qFwdBK^3rP%P zn|TIbDL-?a)rX;T)>S3SD=+SXa`P5WuYW(h@3?&W+eqyJ@H)|4K6jGgbDiHh2C0e8 zHIPpz`2f#6x%8}>WCqic${oop4B52a(NKNa_UN~Qf-BO_EJGr%YSf#}lwiNY6n}_4 z5@H+yRx+Y+XlPVUH#hSSoaMa~kxL(?FQ#&_mjlL7BB-Gg8t7W)6JxDaH{1Q2e(lq3g1QedqaEatfLr z@<^J^qLKPkR$TWTz3tfQj_ff!OM#^ilVrng==|hzLT`JZ>8yT=G9g=&m@MyYPNdex zX|i`NYxHj96b+$G6ied6Mv5ZONkZId$M<(M zvV$p&fTMZ@crk~7K%pVdnuY{2NNs)6y_S^l+G)iD1JGhXZ9b%xfk_dS-!CbRBv8*6 z=U06(L1(^h8w3El1EsK%-ATJ7qd>k*Rr1lhL3>vBjl6jb(87?$&y70*i#1@c)C8Pj z)in8Ay%kPh1O=ZwBw~PQAmXqMzsrA=Tn2LhHkl2bfKW;qnwO%T^J9PxAXy#*{)W#o z8$P8J%9^9&b8o(ePhLK-N>xYY`KWX?s}M|(dA1dt>)8ir8Abx4VXaHS+pj-GvV>1w zybS5qriS|o9xA9;d=$Qr6rZN?0GMD0W36N5WQtS}iFXZ{jlHb<74B_ReQFOHSR4Y^4!=YCeKEA7yRu{cX;a|EN%oLAd`YlT+v{}8|5U!ZUO#sIM{OJuF z&qhC}9U@6lllMtUeM&)P^@e^B;xJ6*x7#J7NP-MX^m@ zVMKERy2pT*-fLW9seA#06CA~x;JCHcv8IHLU!F6mWtL!NMJg*&Iio#(3MuPEqH#zr zsKJ}8;Iw3H5Z%lf_a9f?rX@BGdl7;Jz`VtGuoM=Pt!Ka-5`K=o?Jz`R(ZO7+h1JC{QfO$m&PJkQ~dI!+?(<@Zc~&9b6naXpc(HDXcmMV;-Cug&r1s0R-=VfIv0V? zg4QmFWKu8YV?d{<22qYxa>+ml!0p`~u9s_+S(^uSsovxA$!~_?sgFQsb1$3~4M%N0 zc(>r))YSVy0Rdj=%X;$qTX^~GlSwmlM=m!gzK0`#kN&UB>YF);L`K8hS|X4wD5$N@ zY6xP|tg|PoZ803Qy3tTF*jAsEW%L$Jwwjd!ID_Q?E0P&3CxrC#EVs|yA$4-Q;(T+5 z+aLIEQEcMnZ?+I|H9)craf zq5PGazysZ7_0ckxo7Z4|iG&jzdYVIsptn8FL{)Mz8_ef?5+&iW770;VoHUuZvvd|I)ApW{7^9J_B#-0VMs??|wAa zO&ol<#>2tWMs841S+%eds zELocUEXKJQmhjrT;ANeRgODhWr+I5#@JW@pM}^n}W(k^bFP^+)(Ys@Ss!h~jB^Vg;+J1W29#lu4+;jN+7 zx$d%AGiEW`->`p~F`OA-{EproyH}o_9Mt@jN#o@ivH-Js-9s%f4ihU*9KK87ZQC>l z%nz};L}Iiq0w&{BH5j}_(wfqW)+&kxD3lYBOF}WnRa8k*Y2)lgZ;+)#iDUJEN6%(= zj<$eoqIyQ-F6^z5;Q%Dy5SVxe_yQ{1ihdyp%`n7=%xGj`hSq})5Ofa&!exd(_n&-w zW`meWd`B6`MFbafV&+=%P^rp?rkY`KbtBAB?%s+6ekmm(hX`1SCp`4d%fe7N0yLc&2T?0&Rzy{Jgh7q)kV!!AvMrEMw-S926Hr zOrvE21kn%}3?WU!9RZj#GX~}2*bfvc-B82UZ1QU(k&&FKXdL=H1xu2w1bq4p#fqa+J7WXBLne*K8{Lns$I$5_3kKg_;!apsGSs%LG>8x3p7l0Txgx~oJ2yw zYGG>wl}FzL%;^!5EG0)!)MF2HmV56w=joV*1nV`;f+{wW9}D#Rt|Q=VblygXE$LD4JhoQRwo3%jeSY=;#9AB4jw-E% z6v(_j83)b4cnm#hKAS4-B+&vKwfE>O!k*5QKAfk z6(CL24n(&F$k=O0i|;$OZG)5IA(R(PM2yel79A=bQ3<;{op7@(L0wFFQc)aK4kGm` zMTN9VK*OhOyZQtwgFYLT97)Fdq&>k7YZJERl%sm2#3TSXo^{M-;_kqUy&+<(glrj! z%9W0cPj&~Kz<|H%n>(hcMPg9zOze)JTFysXuYr@pV1&(K6;)?L3a`M5XAj{-_*YVorcxY- z&N|~vu8Sp$rhf$3q&p&-vU;IMMVM<`O2Jwdtm-E=Dv%OD)7@8`rD}cmj^uUp^1a$$ z3Z|ew6272`tIuuTMS&wZOIeL96B%;ajFb`*86jql>-K=wH(c9>z3ss)Xcdp!7X7&W z)fnmRyEAT1CyEBqW-IwqrAAeEQuiWOX3R#$!PYwLop(R)apiofXRs6t8p)h9p1$@v z9$vnPi?p8L`*jSP>drF$a*oN^y$)_~Wb`GdEeEF%ytx(dNgGKt)HxY$OUCo9jj591 zY;720v*^-q-J{MF%=9O+DeTtO!fjuOO-gXEyklDdq?Fuo>iMZsn` zQz2IBCy6x@iX-Pj2Ur#+44D_E&xC34QUNI$^s!q%Oz`#X-j~{`xK(>tLMPJgX+daq_by%{$kgJHww-c~|QVv(XeS+LG2; zYA*3CY^vn5&iq+OPVBRc-fUh!(tgrFQv#Kep`2Afon4u;#01`5$(N7+t+OE{!x*p| zjinM{B?kH-Rduwkw>LLPU_%N^GqTE_Ezdhh}Mr z{^bnZ%?*|dyw65v@SMou7Rm@}QhCW4%d!UY)EcQq47PbX!ih(+?g$jFEDO%(Gjhs! ze0;#;GNsc+LobMI{21XmPC%zHA`q2=HlHf}Wy3%M1iHEnBjt zqnej;2)dYIO^#L7&)%@NhG&;6RduM4GtNFnkkehL^$f0joR9eTdp9S1c-cl`Iz%pn ze3QN!{v%^qmQgX=s@5ysUxF|UcgFgL-6%s<3LwR@$8_|C!w*O7f=SUERiT`R-vNRH zKPzO~Pk+TWPuh24;HH=({9PTDKZ-o~tOU-q%LnKYJ&^%=6;d*9?(XpTa1ZtJRM88^ zd9aCidsIl?xjo^dhbwlW;J{4&rmGkc$7$iL2?ee$mIW|qk7kh^TAZWvQX-z@N;bz( zj+<$bw85ZJf=Z$|w4n zqY`XhRwe{)BlhLq~watn1N=S9aQYpB*xq%~MsHbbeM-SVGD*+$!(&uHxrB)`i zd8}*ej%=~-Z@uw4K7IBahQy>v_g=KDH2kgh!o9O<>sTqL4<=d01VaFV;^3SzE_;K- z4s_BB{YGnhz!CJ(l|8D85lb$x_ul=y59$p+Ad%OV7;tL-1`tx5>HHLz`_JN#awt@H zhm~`nC<-=v$J5h_ms=g5ABdp%whU^BY6`(zi4;U5Vd1Qh#)-`=mm!)vTW$QgB@&Pk zEJrZ*+B=`eXCHny!14QAiIHFMI?$hs+M3ijPa(#)e+9G~Jb`$U_>f*^_m_ z{T`D+ky1P#6A)3IHYqM#&akd&u|6-6f>o{s{J?0pDH*5$au{O5Gr`#ueJzqZ zqypFi>sOYS8Ms;+$gK>Fj2*~P4o5L*1#z&{d>(ShK^HrsXU86kcy2j&0|v5n{7wf* zOPL0*q;S%qE<+U$pM1nXD0MBk9^>6GnrqF{KrTc1!v^XxIsH+tG~NXztFhiwJ!_EY zbPmRajQ^fA$BEnxN>iWQ%5Djk3Y>~+?lE-~c}4Gal-U6`upta)pz(7C%U;`{UyLI& zj0FuhRjd^BcOIy0i9CHw!*3z0mu%86)kNHSI81Sk^(&|nKF+7O_gw?a4uKC2hR$JF z))V&a0tc%fN;Y*^Fcq(q(Cd!|*FCApH0XnYMDl#4Jj(6xFqW+a&1hX0+}zya=H><; ze)ti#>t!NAvInpcaJu90>~RJ&x!n$o^RX!4wFw@;9k)xtW1Ff_gAY!7GnC2rZmk9N z;Qep^CO-d_AA{(bDTQQfRE4JMpOq8Z7QFAAMR%D!W+`Ph^7GD9+T7HC=Z@YhlFxx$ zaH*Y5o^J$79ZQ;gbf?pW3ni|@=0`=mzmjZ->n;3Gy<|G?IYkxrv@F3xZc0~dRv3p( zl%T=1i&9(CcTNzZkHFZ#hH?gU$2nA@zcwfJ*gKVk`m|S6dz1o=lHF2rC_1@j(j?MT z91Hir^1`ddiY5V{ToxF>nOu_=>FA^u&7Pci|AOWGF8cL-R;Bb5vLAZZ!=)k`e3T7s zy+=?9!_Dc0TBBKZ(njN)+}%AzGQ+I_m%0a4sP4G#HKt^ID$!nK*<~N|kyJjNSxM5k;37 z%4PH)vX?`I!vkCz%4-LZ1VjRa{?A^|Ne5Is*QkW%hNzaLgsoO89H!xzx<`U81*_H% z;35}5R4?^4NycAmyGRL+?-a83eN@*5-HY}px)wXnA(;}~mqG)l7zFO@w@a*2&gw@{ z?%5M-q43$-9x~Le@}7h*)LgYp8SG^}QEDB-c6BBwtvjj0mHk}t9FocnV0Hpd8uWPO zz(C+IAkq@5M<3<76!bPhcZVc{1<=bB_hARtWr?Z6Qz?BllKdjBaAiTIPd^Gfe8+Jv%3$MegT@tDe& zKJK_CUpAsgLiDeD#dWV7b`li~xWl=zoqu<~ULvz>q59O@hF&jd^#X4@dfV{X``<$H ziY4_?9#x}XZIssZK%}BkxdVvC)Z={{2$B0M(=#I|n~2TaFyN5`5p(9mX$Cs2_u%Ps zji%!x<)Am9UKo&qNJj)L$>zZu#@A^q=?uZlc^D#}bdYpF@28|2kxoAng2I?aGqNE; z{U-{rW4KuakwM)v$T&>nXy}19rf^V0g$%&Y{NNX5$#{`8kZCGv0BV%RFwCZ)2|-~= zl=c;sBWPs7zvtr2=f)uj83~^=p^Aje((h7Ba>O2|jBR zq9zg8hNwfRDXCQI&|oPW{9}N^1&1We`yQ18(nkh@p*yGh%so#VwjMregpfHs|Wf9tK09tLhzrV+) zAAcO@+$_rayc6@6F|eK$#$lgSE!6Wd=!I$aQRPInYWw4|gO1lwmf>@rI(VJ|aGVwX zY6p6ww?;Z>jX_}qCA#}~M*aOGqMHa#oYpmf4gOqbiC;~?Qnsl?el3X}8viW0j3Ayz zA%YN6&fq@jmdWP*hT@}mF-f`3soX$1%$89PkOG#d#MIHl;fTsy=9xB2I6Zxf3Al~k zn5Z<_kS}+4{OO%@g$Tpd=qC zWk)b0r-WboyMGO@J-LN~e&<>m4BOuD)@j9kZD?+|0HHu$zto0nbJ87kp3=0@e&mqL zh|FqqEW4A6ci^)7q%TV~PeVUNxslzAGv?zSs0SeHo*E97bHGeQtv-TIKlnbKdDp$d zq)!P*aRA#E6SBL29qv_1DcA)*)gOCn!5V?j-y>yN@kG{a7OqL7?M-I*xBvD37EVi< z{)z_KeKIC?&Qs|KfH|+XasJj*yh{&hT=4uQzQ2uo&g#C_QnoGybsUz_-G(oyty8Uw z=d>S_2;J{tLBpSsY8D67hF^wR$iOo8aR(~tS$e*pH%FijuARUx9vO^}*B*kD>||qq zdJkK**FE~yddEr{uAXTrh2T=ihbIFHc;6e9^?=d$bfha+o}1U7+>NzrJ?Pvc?l{;J zS}QCi2OY5q%#D2lllM+Rfy;J{xS6*1hAO2UlID-WpH1T#&?%F>AtymT9c!7Q47FA~ zUM|D_Rwfl{*SK<@V?R4u-BEkPwME};qmR&9jC1|D&wViz$vPcnUgP31nHao9USeI+ z?rOp&`T-l9+-yFa12FqK6d37(3`*7rcrcuzvP;SI!lIN~*~}@uoZJW};ib_&zO~)H64}dS`<=5%&~XqU(wiE_&!s`%$)|zFeaRa%xWPD9P>+5=2j3^ zmgupV)9N#p(@pfev_RJ(qr-hDoXN7DPH3%Rr=GOs1>Sa;*Fo2EU<5VM$lcikF)Ic7 zl~QE);uNZ9q>-|sq&F5y3MW23BiZEd!=#!aL`s7g!?<-RhjpjJTq0OK-(kDlqt^`~ zP%shvp;Ds%hG+}ZSWDJ8kv`(BMd{ZSP)?LgL5sl;37LR_JQI!?O~m@=DftOA*!mi_ z$InJTl2BD~Vcinebr}w0aN&6hj*W?Yj zf>Sh~*DV6OocTFE9M-i>g&{d9seQZx-jgw>A(ham!^Vg$63IwZuvw9fTi(NCOqhx=yPh=M}y0xGM<{;{1a`3w)2? z`TDQp3t#yWNP(oka!+DC1TM{U9_mjiAw(hAcy9nKhrp^h@s2>-OIZW|bccKNooN!c zlHVbXNt8dEk)Ah)1tz(*BjAFWEc>{lhVxR!z&Ju5T-uJCobWhohtTbr42|lIuyrP4 zWnE&gwJ%IKg!V>I!#l%d&Y%YV*{D6ybU?C{>er}ksx&P*kz%Iz3aXZT)J-;g;mH}l z{p^195U4jw==M^UVF%935|ziCvF$sq`!;}w5r|;Qo9cT{Ii!X4XT=hpe(p>7_`Bai zG)@b^@F8KY-YC(4gz~Gped3VT)+T7ESsR`ftm}#lj6E_01xTugK74B2p++^liI@xN zjr*jgTG>XOciFd0%11e!QHlIUz)8SlAl`jQo;#chOOxg*Dw-U{8sQ{DXwO=6jH;NAm@Q1W4f<=SRf?j?3X+mK z_R5C%NemD5Fu;+__NY2X&r3V_G7O*q7L}NrlJT%l(AnK_Dv8q2IR^9rC}l-1E7bS5 zx5{2@(fg(ll; z2uCL04*YCo^M<%oYTC?vi7{hfo==SFlcq*Nq7>YG1@`kG2lD6MqIs@`Zob!RG-daw zh&Y?FCwG*HOkR12j0-b$?T<*0hVzU7f(JncYs^aF@73R|32b=QETxEx{NX1@c>aC3KyAN=v(hubGl82cpdPahWr2ji)IRp9JASSIOH zu4fy3P@X~loREqLa^kXzgQ9(QXEC+ukZWma#@ad(QULnh_K zjg9VoANOv~oxyls@EMgP$KD5W=hv68e&w?phv-E)7`ed8f@MA9baRW-`Ha))24z_z z0S87h9x81~ACDu$+s%-d6G|z_St1Qb*E*jy6iy=GJZ1F+&56MrP&<;ys7eJ$VwUjO zK&NfP2C4e-rSJbe5h!E^$jOH4O*}X6Ic=pX$)baFCIim1o>{cG$Q%A~TTl<;T&)PEy@n8W~vKi_E=_U4=k6FrgLB4N_^ z|I8QO#K+(L)&Muu06eN6Tn__9I1b=|Ap*0@<%%n3Rmg5R19y1L8K@m-$VlZ3EO)Tw z2BqA@oG#JH(m`$>(X*xo`eW>WyDmX5lbv=Spr(L{9)=MmXWX98W6HZ|gjy$u069fC zGguy#LSUYau#Te&EgLB?QZay=p<7g!jx1A1GQFMDKFf;Bip1vX(5Al#>z2GN`iBaNMlSB2>uKXZ6@r7aX&-6Az>{E z&3g!s1PmI_plZK>Q=J{mHFqbI#C)82RjJf}vgQQ%9?7kx7TrUXmXYEhv?byE+Neq|Go6l;E_4O(-e%4FQDP(_vP z!T>KcK(G;@0uvkOf14|=<(wfniV<|6ty+^>3FbLEiU~-xCqVLsoQ<|z;8s-pB%m}| z5Rg$CVgq8hbO+9-8>GA<AhPquMkTorIN6sfw z8N#WBhWBR7baijKpJrt(W16bw0183-GfhFN=$0LYwyh{qup z5&_^;5>7cGBUFRnmFU;Z`r?vDknc#i%Pd3F@NYFxrpB^pUP3{ra|!n*?RqH|9gz_Q zpK>~+=^)V1LS)P6Dh+*<HSUMqE{)f%fm!i-8sQls$p7tCP0?Pu(`VLsQVV5)hhP= z8p>TnxDq*_DNj^!qIV27eIn7v^&Lj6wnN{JAI?H>?sb-eYSm|=VVcP@6@8{-OLOi* z&M@rJMDIZ}i;|}lV~!|OI!xP*N|2=-RYa4DmJjr=5k)Uqy*sm-fbnm~E0r1|u%e<_ zN@EU&24-1OQz>A%K$4+RnJOr2C{rz2l#?{h#AqUih6c_#I4K7&IndlhFQ(v_@0g;> zRz@P+@#1=gv+5-}%4_F!AXkPNL^!EpqUJ>MHBotEaO^k)O*jU*>Vx3AEDOqV!g{{J z-Q821@19^e-(tPFLphzXo^N8Gi7uzIV7+;Q<#aQtcwR#yk=d405J1%~14dFSh~8em8?6^c$P`^E%^DB z>c+b}b|yxmDwO3E)l&}EGf{7hgIIara!R;p?i5sk7Coe(S;p?si?^QdP?lxV$;RDo zNaiD`Sjwa`=^aGT!r{K>Qm~%Rac=So82hJsCuLFVpP{Obv<9?MqJJ5{>Gk^FNEWZP z;y?WB{}~fV&JnTLm4yTJB>Ixe33K1D8PR(7HxRuntkWu50yS%%|r+o9xW zfF`m-?gQnH#`uIk_7gu!U@Y#6tZJ-l#-IL~pJdg<4)MUGJx$xIlO%wlkLMWYPQD^% zq?}k`WfEtKhZMXg0rb#xZGD4)9XLh+*!olhi)tGuOM57Hs$bI-j3WM9^N7yppxy-V z5kPIs>S_b=Pl(=5*Dj zf`W1X!+u@&4!442q2x91lkNbbzpzYDQhWTeb)?#-@|Benq=)WMsqK)ry3ocpj2hsf z0Mchk9z1A)1Qq1v%<>elZ4rRz;00$h4>gKRUUmd*X}qw*$+hzsLUah=;mj}%Sd-&v z0$wZeza!vdcIPv*Gt*^==l$&Q5o(MDoli-k&j7vdQLO<+ve9Zq-LKJma-g*sX239x zJ^9Z-G+He;Pf^wrrGkxo#)8BNwo;E!z}!5{UowaFqhunHNd{k2@xWl_k%Zw$(Hf?C zpj-FXef0W7*-^a|44J!-Av&#*T-p4GYbob(y*%PO-~9R*MCok@h>REwMojIEX43!j zpZynKL2O>>a~W`0e6NB_8NyIe3Lbk_)MdcRxIK`QG!#)_^Se`?6U!}UAZ0A;ie*_K zR7XwAiD#$eG@1^zMbo!Bqs*?irn3! zv9!y8D|nku!yULhJ`BNuoD56SnF}_QfB;yFc;=aN^vP&PH`0`1g@G zoUlIqeQ*o3P_!?}NjEm&)Gj%EANSB`?=<;Z=K=t>*7(ekfGp=HxP9k`kk=cqMVd{c zv4q!s4$wPmsGlGDPmHvb!`bRm$g>$dPd^a$!Ifa<6_#&No_+;)U;Js@ed(ug^ZJ*7 za)Z6c9-1L~1bV0tDG1lV(C$5?QV#*7QUP}iF8p3&)>U3_&@JQVe(h`6YiAQP2t2Z9 z!X1C>AOF)y$vyy*17qWP9T_~6-lfdx^*|_v6{^Se?x;KG7Fpc)5xX$Hn*t9(*OT~- zq*YX*IaYc@sVNP=QTSLw^%XqMIKZ(mA-P^3q3Q9JJq_Ml@c&6QlCL*lR6>9lDruP1 zFr%MxKTzix|3!~ztU;##lM*TG<)gm}v_n$za+?)?$=R2bu#`OB$uY-5826(0K9r$+ ztQGGsY$)gnU=!d8V94l#VnmmHAD<&1sw%wh*SfA4)LrL|>UGl6ph`o>etAA(y+&HN z`iNxfk7w=#1pS@K2iyl;8UTd_wC0_?*n1?7DF&dv+;$4Rfa^J90VUs;rV$Q(D7fjL zIgbqN+iDSGq*=x0CFeTDe{`=vyI|ih=v1b3_IUx6Ij%ddkN4wz3A}l_t|*}a5F1t2 z75n4P5^3)yryD$f z+|XJ@awSz@f6c)$w+0Y7QFG*wi5vp za7voPmSn7RLNq(8=g1v<7a|5MCD8qtw(O+e8THRqfyhkc98%MeLv?q!kW#LZ6gEV) zDa(S_-h2zab=2*O+ID>Q@rOKHJj3Q8X${RkaWuCPECV)zBZmeJG3em=h{^?xuFizd z#>?9D_rX4{aHJ(ucb)#e^*#~+>9fnxTipg%R$TesI||5M?;gVm9avANsA!yTaeneT zp1$=Sp1%Eg+&q09+vO22K6xMbZ6Hz_Ju<`_9R_ai90r$4nO4CRG*Zi~v#@ zEGn+Gg<957ML`79ke_IkszZ~aDj$+l*fiNT37zB|jkdY0$azJ|3#kmD(75kAe(A6N zuUR3bRJ=t1R23)a&Ow#S!4Iyt3bxv-wk_@O`W$MErYzk!dw{oCj@?&8C<50PGc z8tPCUP2^Xiq69yY@(q6c&;1qr(qH-GaBQQR;rQ_&13(d` z4H#3;_g2xj3;gjJd+qvQ5hyjy#vVa4Jk|Fk{7r;ONYkwKr!zb9UFQV!~g(xFkLp=++Z|)wzWCzqY1t|IL zSS%C}gE|7>@_=LsyZf*av0m&q0&uN0)@Q^t@z84cb^0c~hg(#~k^y^Vf@a#U88-lq zto{3xlKsh>h;7?|rCae0Sz16Vz~uXXsz zLNK6u^d@)9(KiWY$Eq>e81yTOm07|;R0b%aa+Abn_wMsYedq1p;K9Kf^7hIL($a>07O!R30z*0!0%03#T2T~=G8l6uB=V6>*eEP|tRwUS1 zqmF2hTyj7Wj+kVO-si+*CAg$KoDxW*3-0q;$_T!r3_O7bKkSdfUsFnx5-BP@%Crc8 zLEi;SC`K+W)kHE;Z_PMCO32Fs@NNe!B1-5;1RwicAM0Qrox+#oGySg&Z?Jq(WQ6{+vzKCDYdZ9qzsHnKh5 zkD&Wh3L5umG(RhQRDj{Y{bwJ=-SR$Yf^)xPenh{1gkSo<{WRPLMhn1L{a|$o!1eNo zL8-Jk2WToSNr0md9}39+IqLNRZF>w#$AJdYNO@KxwC*NDk`bs%de(sG1MrTNSG;z2 zhfiO;gf$<&#eK?)ubGJh46uxR^Aw@KMI@2b1R16@~YmEOJP#y z^s50NX?BYQEqdJ<;U&}#0;#da-e)r2#`8)3d^YmRuZK&Uf#Y~rK6SF$le*><^k#yM z-F{(e#$dadAB9_^AN)3lx zYvx@Mlc9F?-vmYM_H@Ft`};Aojycvi;|`(uK9a24^9g&a_+R|@{+ocMSLB}ca|Q?6 z7V*M(eu^?MiO)GhLPN4UmP_>1@ZhS4Vl5>qbGk?J`HUGwrP5xv-my?%BAEpM21l*e zS3nmfdJa+n`FB$)D9cLwH+$lu;zDXQ&%Pzih~nMQXEDZCugg{|G|1G)it%q)0#ZS5 z6+sXQP$1s5W623u54-r-V_sDxD|6jzttgU4^H$N5cwf^P=p5_f8r0kr0mo7boD;t_ zoDRCMw!lh8-ydRs4gq6#AX|piJ7iR0te$Y8pRNTLA|Ogsk|n%E2CTuXBg63RXZOek ze0;6=_kQ)8_~g2cDvcH<3`@d~zj2FS{rFkTIUoY2=B}We>?2?f?P`_4oMo?&Q5E+w znW1f$Vc6}JvXvo~74Fxl#%vKhNdNkBz2KjH{TuOfxuDk_%9(}gOWN_tl5xM+7*fS1 zqIFr3PdB)K@r=%&^e7W2IFIHeA&j#6aR>n`D}MTq|4ICvzy2SijVZcJKo5!M;S6%G z^nDC?S{;?I+A6HY->u{FgpB4G`ehbVJux3D(eO7aOc?_GC{)nWrSldEg3iV0dC+^B zf^5pcE+^Fe3TN5AI}r0v;xBc_Xj&T*#(HIaj`t`zjz6O!6PV!e=QR8>f?~`w&UMQ< zVO>wCbtfXk;NFgbm)<3ed_t6B6LdxcB_6}oG}y~6DjB|GT~BeYyW{fsI21!bhbMh+ zHEBvcsFK$tGpimZBVxMI66R8{u$i}TMna=l#eb!tvLB^BG0qq~g4IL^y45yhjx#*w zB^diY3^8?@N-4&^2JcJ&UTP_hK%1(nrGGs87FtEBppK6#*Ay>@$tPqz%uPhiU(vgSW+QMEB8)&MSB z9qTFp>}Z2)_Yfx_XB_(#U;iin-w4=vhX&9F2JC#!vaHy)>*#aoeFqT1jRQc6K1=t8 zwPalDKAr(ut3K75Y7bP{?KKIAG1JSMKjBL8s}>%QEioM zy8Q+$}x*)RPb%a z8fc7PQcyiJE55M8bqar;sel;1WcDA`>m#JFsA_vA*d2;*Q+cWPj(onsvaYC)7hLy! z0H&(EI2@%c@Lq8$?C0QStv7dg@13{t?QegZ)xyIjLKS7Bf43NR+d^q?r&4E7tBpWW z419evnGk)o*l&VFRjMtGOMmrpx{Q>(Ph^VzLjecw%sFX4oyJ05_I?GfURFtZ4fN zfp95jY} zSV9sAofMP4S2!ASDkFi?Iri~BQ}#~=H!e~dKeJd!HLD?P@Td<)04Wl6BO0D{PiUay z-x(zvylvQP8+RZANu_9}rHASWs`!X6T2iR;6hrh1A)(uX)_1fz8_q^=QR@-(CaJ~r zS4#2O49%GQ_ddD2z?jdI@`8o;ES;Xhug@_{&xgTZeKvfmTwPKMMJAt3?{Lk^sSzZw z6rC-Gn$bmIDS6D)a0k0BSETg`N)8)QmNUxv32xqa51;$tui|HT!rlPz;wR7et$*@m zeD=-Xz>7~l7;>JXJ_PM;-kpenM2njZbzuXHMjay=5UP5=?X*QP2rL58s5)Kgs4m-% zvLy7@k(L6iui^F!|24e))-RwwK4;HEW8xEN`Of$t5m_@jdOcfEFgoh?fW|)yt$qh6 zNTC2s`>vo?6dlRu2AUYX47m@-fb;n#hF%fTOAJC{;Mk;$rBv8A)5cvI_x0e^kI0;U zSuTUi?6SWMVyh!)tIc8AP%IjOpYKjt*aD!Swhit!q&Hg&pS16F__bmCc{g8~d`?bx z_m1nO0k(|*a0DX!@5-Pcr=Tt^Ib*9#zGWkb&(gg(%@4^(N`@K|Ja9PneMf5ZUV?$cw#U1Fh$W(-jXHKY1s&@x~ z0;fKdj(Nqb|_U~4eaMdN-7v`9eP8Qy``jJ-N&JwI^WcmM>yJOis{(5 z>u9dJ)*47=k3n9wHB^L-eLg=O&AJ9d+aFjWjb_!9Y{=X$lzDsYyuuvV9-qfM80i&> z62d0>kqr85F550Isrj8o7GLXr2$JbYS|82b2_A!6whN?Q;B7~J_$VapT8+UkCb?cx zh^Si9Xhf8h#NpiVeNS%&G8;}lXe~1Qf$E21#Qn<``0Zc$`B#vhQO($QP8!x-@gaSD zEbl852NSt?_d0@ORhrcFrb}GgJ07Qf^6Al1_!UY(t zEC8LP&>pG2l*nv`kx#iyBqpK*7$P87sa&WNoDH3#clb1HlMIF67L#H$=Ti7En#O;U z9q0h#JcODO(lP@d{v(AQ{bNiB)$|)^loFAkn}x!c540-T3Fo^%jFhOj#4NUUN~B7v z*$nWCyLWyRdAWsA38<{MDC;fKa>nsjpp69$f!@ZR#9FK8d^iHCcqHz}Oc06mcpdVQ z$$)h!;XK{PFeXuWDv_WFhSPW-?@FLM_Lm>w+1LLe+T}BbNW&VBARM67YvSw|_00$< zF^mCKl38|X*LU*QyAPlyv9#S=#oqS0R{-gf@!n_J$(%AS+Xkd%B$;uCBO#yabRF;= zOLA5jgEK7WX|xRrXr#1gw7cu?>aRO229CXFK+s8aCr-tXd_%7fK)b^GhL}AA{Ez9)9a*|8)pyH_PMa zYiro|3N_PS_lg$}7f7keyvLx)wgL44{qhkmpMC?|vv1+?#fR{AL95%4mG3ITs$e#h=U_ow zYp!d;d)aG{78q}Mue|s4%Oz}~!{E6@++;RXvO{;}bu%O^UcU(>CKtQnGcCcI7XFV9ts*Z1{I@i(v|RZY}ZRnGxsTFtBt7Jm4o;q36b&z z`c7LNpSSHh9VM{SXf+4QdULcbXgq}8``{i69kG25>zoH=37P{Qt9bfzI$8}Bj>u{UOt|0J3~BU$otKmle_Ik@IH z1PzDaRNUcLWJejSCX=#C8T|U3L+gH7?~roAlIA>)aBz|6)f&cUAc%<4;}<^nF7EE0 zge)@Xkb6f?qR4e#+z6a&t3wSbr95WXXi{Qo!U85NXN#?U^^#HcK9hja50lqzjd-d=ag)heC)V z6EaJMs8RgC>qozab&00Ja(V)k8$4XDc>f!}g}(#f0l+r^{NgV+eD=L>;rj3b*X=rH z$#h@@616j-Y}*Ki0^x5Xn3JPiO5B;X6#V2L`sqlH;iLbnw~AWlAjamCM<06awv;yQ~1MwLYdCFJ(cd%r|KB!aix9i}pe1Gwuh&Dhgp#SdRcD zX4T9YIskC>u!A8KP|)iYa0aKM0VUnv=~Gl4kJ(0vWqSr(BPmg^hVl2W+r}VR)RC!r zwb03p{t<-mL+=&W?GbIiN4tK8w!eTkPWnxTzxtQ|eFzzLSV)S(<~?OX-s(R3*c2dE zCLv){(*ZD>N_cv{5!g_UJ0B7)siKt(7+xI!3Rh)|pFp3u6OkOiLfZ$eQq<^?82e!c z5d$+RlqvWD{g~9ic&J7Xj}r2z9z|fHc~P%U_~sd|m3FhC)eGw71$=*jw!J|14QN*w zCCwv|ry`UGeNlaJWA4@1>-gE5h4esoe74o$cLylSfH}^1#1tjXEBp z*+9BA|40q;+GB}D2rxZLS`XkZO>jl_F95TtxE55SnN%DT=|sw@fI`v5^h^6_4E#r& z#dFZ=&i+x^TL$c7ZDz$!F_Z#BHLn{2)Y>YmGCYjxTWMsPeD(kE9}Q4M`*nQQYi)t{ zGyVxhPe=8R=A2dzAAjdea7PJrtm%TA(+SN3bPejVfH*er!w6P7wtXLs^R0)_g)pkv zGb8xf#+t25iAqs2l#;Pz4myOx@`B~=4J>zWMo*a?=D+}0OMzL&d0p|&>#qTf8e)cz z&Xsz$`cOR7KMSX$7XV`5xXx0ob=N36&l|Sy>4-_@+@md!)6p$$>Wf80~L*^vg3}Fr!1X< z{`hxvcseu8*zVZ8y%BXJnxSCanWPvrL`MN#l=_B(LZ?Dd>u_{LzD6Sm$umx*U>uO} z8DabmG@>~TGn!pDPk_yXhQm5p%E1F|6=`{bzP%X8h7l&pbZYVbot(_6%4Or>j53C0 z4E7m`iHrU;DG%V^hs}!<=$H>)>vFz9-6AmS^~$G`@e7UlW3Xii!30Z&ozrqZ{p>lA zmoV5w17K{_6hc5J(0UyKyXQO%UdkOV+=%ZRrCB;@WLRF|eT!K#Z5ya__cn~Kvn;&# zSd&X_qe9Xk>RVJG8WW|I4CWygY^=UWU8o}ugs|F>$EK0mcO{Kl$tAjB^$N z4@!U$RAHgvuD#=Oz2dao0eOYN@ciQs@wH$2hj@7YF@EvO-;e9VJ-+$Rei7gK+OOgI z;u-eqHB?PH2frcKW9vaDa1X(Uy#{J%h(s|0^VpA$NXo9Y;vfCo&*Jv(4liH401$TU z|IgOH#_G0h*+E$AV~jb!wbtJIyzV{sRrglmGPbMA7;FcFK_m!}F)=|=ctwIdNXSEA z5CkP8Qt~5;P(l(#iG}hb5{VLpBog5KfJg?Bhhl6TBop%}u&eAUSJi#heVudmYpw5_ zbB@vTqqo+_Tt#;2)IEEhy}xhH@#v4%TW`IT2*x8>qUuQYQbdx85lQZ`Us3v&>sIS* zfDS4}^faVOoROwlN;oQ6Wz#*dytB0w>?31UI>sIj0*+%G8H~i4GtR7u&r=g=XXPi7 zi-D}m(0W$*{5)7EqWn>& z(;|Vd7$_zR!2wYE-PsQk!C^&RRvfpd^h44dato^E!d{b3;HY~?HO>klEf|P^B$_x2 zD&}PLV~^(dQYt=t@qqUSu-?Cbx#R7-I4h>U15&cOhxkQdB@{({89ir1fY0A7Ddv%Q zj^J+(?yr{tN&n935oLc*pa4er#A6ho2=NYS&h9anpjy!K+#_&G0SF0=q1an! z6kXZp5ARmIyKR|Vq_UR-wHj6`*9r6x;1HlF&$(2k6J@ z0%>u3S8O{nzRc%$$3OH}|91Qf|KYDOzMxO;N6c|*4YuCngKzx;E+2jcKluxP3fHk? z99!DA{RqF;hvAENk0`a}KEUJyBhI>$~?6gNpN+W(}RfhJ-$1EK9 zn0-p;`{_QX#)~@HK>!Vg*EQ4R#f|=--)xVa(5)$ zF~DdXTRO@@N*I{X!<}XK0`Jk&ki)xVv>6``kFs#6QbmPj=b**Gvnc*>sM#3lGzvn6 zoW!4vs)D1Kj344$M8=qRZTLX%2KQs`57*3@3`S)joOU09P#$CCq~`)v4hF_?8bWsp zVjbbYqL;=o#=!uT{b6RaZ-6~J&b?il_`nV^CbH66!gehLB$~xJ9(~8{@lA*jT;Jfw zw>Nn6qi^Ha|NVQ6ap3J|Kg81)Kf%+xFR*V{4rlU|Ts-=zna@2q#ta@U8H=k13*-V^`=7-=0YNsFzen!lq<*b@DXtR z90!=(tJlB+yg9VDG8jxibDwB`VRwN?MzI=meL0E zez&$puK*M%1Jw$LcQHxz2%w8QgIy#@;$Wc1H8}b}DDLi%;$8+5a2%|^8wQ}{SJ@