diff --git a/.readthedocs.yaml b/.readthedocs.yaml deleted file mode 100644 index 7c3e63c..0000000 --- a/.readthedocs.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Read the Docs configuration file -# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details - -version: 2 - -build: - os: ubuntu-22.04 - tools: - python: "3.11" -# If we ever want to run wgpu stuff in the doc build -# apt_packages: -# - libegl1-mesa -# - libgl1-mesa-dri -# - libxcb-xfixes0-dev -# - mesa-vulkan-drivers - -sphinx: - configuration: docs/conf.py - fail_on_warning: true - -python: - install: - - method: pip - path: . - extra_requirements: - - docs diff --git a/download-wgpu-native.py b/download-wgpu-native.py deleted file mode 100644 index 659240a..0000000 --- a/download-wgpu-native.py +++ /dev/null @@ -1,178 +0,0 @@ -import os -import re -import sys -import argparse -import tempfile -import platform -from zipfile import ZipFile - -import requests - - -# The directory containing non-python resources that are included in packaging -RESOURCE_DIR = os.path.join("wgpu", "resources") -# The version installed through this script is tracked in the backend module -VERSION_FILE = os.path.join("wgpu", "backends", "wgpu_native", "__init__.py") - -# Whether to ensure we export \n instead of \r\n -FORCE_SIMPLE_NEWLINES = False -if sys.platform.startswith("win"): - sample = open(os.path.join(RESOURCE_DIR, "codegen_report.md"), "rb").read() - if sample.count(b"\r\n") == 0: - FORCE_SIMPLE_NEWLINES = True - - -def get_current_version(): - with open(VERSION_FILE) as fh: - return re.search(r"__version__ = \"(.*?)\"", fh.read()).group(1) - - -def write_current_version(version, commit_sha): - with open(VERSION_FILE, "rb") as fh: - file_content = fh.read().decode() - file_content = re.sub( - r"__version__ = \".*?\"", - f'__version__ = "{version}"', - file_content, - ) - file_content = re.sub( - r"__commit_sha__ = \".*?\"", - f'__commit_sha__ = "{commit_sha}"', - file_content, - ) - with open(VERSION_FILE, mode="wb") as fh: - fh.write(file_content.encode()) - - -def download_file(url, filename): - resp = requests.get(url, stream=True) - with open(filename, mode="wb") as fh: - for chunk in resp.iter_content(chunk_size=1024 * 128): - fh.write(chunk) - - -def extract_file(zip_filename, member, path): - z = ZipFile(zip_filename) - os.makedirs(path, exist_ok=True) - z.extract(member, path=path) - if member.endswith(".h") and FORCE_SIMPLE_NEWLINES: - filename = os.path.join(path, member) - bb = open(filename, "rb").read() - with open(filename, "wb") as f: - f.write(bb.replace(b"\r\n", b"\n")) - - -def get_os_string(): - if sys.platform.startswith("win"): - return "windows" - elif sys.platform.startswith("darwin"): - return "macos" - elif sys.platform.startswith("linux"): - return "linux" - else: - # We do not provide binaries for this platform. Note that we can - # have false positives, e.g. on ARM Linux. We assume that users on - # such platforms are aware and arrange for the wgpu lib themselves. - raise RuntimeError(f"Platform '{sys.platform}' not supported") - - -def get_arch(): - # See e.g.: https://stackoverflow.com/questions/45124888 - is_64_bit = sys.maxsize > 2**32 - machine = platform.machine() - - # See if this is run by cibuildwheel and check to see if ARCHFLAGS is - # specified (only done on macOS). This allows to select the proper binaries. - # For specifics of CIBUILDWHEEL and macOS build envs, see: - # https://github.com/pypa/cibuildwheel/blob/4307b52ff28b631519d38bfa0dd09d6a9b39a81e/cibuildwheel/macos.py#L277 - if os.environ.get("CIBUILDWHEEL") == "1" and "ARCHFLAGS" in os.environ: - archflags = os.environ["ARCHFLAGS"] - return "aarch64" if "arm64" in archflags else "x86_64" - - if machine == "armv7l": - # Raspberry pi - return "armv7" - elif is_64_bit and machine.startswith(("arm", "aarch64")): - # Includes MacOS M1, arm linux, ... - return "aarch64" - elif is_64_bit: - return "x86_64" - else: - return "i686" - - -def main(version, os_string, arch, upstream): - for build in ["release"]: # ["release", "debug"] - filename = f"wgpu-{os_string}-{arch}-{build}.zip" - url = f"https://github.com/{upstream}/releases/download/v{version}/{filename}" - tmp = tempfile.gettempdir() - zip_filename = os.path.join(tmp, filename) - print(f"Downloading {url} to {zip_filename}") - download_file(url, zip_filename) - headerfile1 = "webgpu.h" - headerfile2 = "wgpu.h" - binaryfile = None - if os_string == "linux": - binaryfile = "libwgpu_native.so" - elif os_string == "macos": - binaryfile = "libwgpu_native.dylib" - elif os_string == "windows": - binaryfile = "wgpu_native.dll" - else: - raise RuntimeError(f"Platform '{os_string}' not supported") - root, ext = os.path.splitext(binaryfile) - binaryfile_name = root + "-" + build + ext - print(f"Extracting {headerfile1} to {RESOURCE_DIR}") - extract_file(zip_filename, headerfile1, RESOURCE_DIR) - print(f"Extracting {headerfile2} to {RESOURCE_DIR}") - extract_file(zip_filename, headerfile2, RESOURCE_DIR) - print(f"Extracting {binaryfile} to {RESOURCE_DIR}") - extract_file(zip_filename, binaryfile, RESOURCE_DIR) - os.replace( - os.path.join(RESOURCE_DIR, binaryfile), - os.path.join(RESOURCE_DIR, binaryfile_name), - ) - current_version = get_current_version() - if version != current_version: - print(f"Version changed, updating {VERSION_FILE}") - filename = "commit-sha" - url = f"https://github.com/{upstream}/releases/download/v{version}/{filename}" - commit_sha_filename = os.path.join(tmp, filename) - print(f"Downloading {url} to {commit_sha_filename}") - download_file(url, commit_sha_filename) - with open(commit_sha_filename) as fh: - commit_sha = fh.read().strip() - write_current_version(version, commit_sha) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Download wgpu-native binaries and headers from github releases" - ) - version = get_current_version() - parser.add_argument( - "--version", help=f"Version to download (default: {version})", default=version - ) - os_string = get_os_string() - parser.add_argument( - "--os", - help=f"Platform to download for (default: {os_string})", - default=os_string, - choices=("linux", "macos", "windows"), - ) - arch_string = get_arch() - parser.add_argument( - "--arch", - help=f"Architecture to download for (default: {arch_string})", - default=arch_string, - choices=("x86_64", "i686", "aarch64"), - ) - upstream = "gfx-rs/wgpu-native" - parser.add_argument( - "--upstream", - help=f"Upstream repository to download release from (default: {upstream})", - default=upstream, - ) - args = parser.parse_args() - - main(args.version, args.os, args.arch, args.upstream) diff --git a/examples/compute_noop.py b/examples/compute_noop.py deleted file mode 100644 index 2c19e2a..0000000 --- a/examples/compute_noop.py +++ /dev/null @@ -1,136 +0,0 @@ -""" -Example compute shader that does ... nothing but copy a value from one -buffer into another. -""" - -import wgpu -from wgpu.utils.compute import compute_with_buffers # Convenience function - - -# %% Shader and data - -shader_source = """ - -@group(0) @binding(0) -var data1: array; - -@group(0) @binding(1) -var data2: array; - -@compute -@workgroup_size(1) -fn main(@builtin(global_invocation_id) index: vec3) { - let i: u32 = index.x; - data2[i] = data1[i]; -} -""" - -# Create input data as a memoryview -n = 20 -data = memoryview(bytearray(n * 4)).cast("i") -for i in range(n): - data[i] = i - - -# %% The short version, using memoryview - -# The first arg is the input data, per binding -# The second arg are the ouput types, per binding -out = compute_with_buffers({0: data}, {1: (n, "i")}, shader_source, n=n) - -# The result is a dict matching the output types -# Select data from buffer at binding 1 -result = out[1].tolist() -print(result) -assert result == list(range(20)) - - -# %% The short version, using numpy - -# import numpy as np -# -# numpy_data = np.frombuffer(data, np.int32) -# out = compute_with_buffers({0: numpy_data}, {1: numpy_data.nbytes}, shader_source, n=n) -# result = np.frombuffer(out[1], dtype=np.int32) -# print(result.tolist()) - - -# %% The long version using the wgpu API - -# %% Create device -# Create device and shader object -device = wgpu.utils.get_default_device() - -# Or, you can select GPU by requesting all available adapters -# adapters = wgpu.backends.wgpu_native.enumerate_adapters() -# adapter = None -# for adap in adapters: -# adapter_info = adap.request_adapter_info() -# print(adapter_info) -# if "NVIDIA" in adapter_info["device"]: -# adapter = adap -# break -# assert adapter is not None -# device = adapter.request_device() - -# %% -cshader = device.create_shader_module(code=shader_source) - -# Create buffer objects, input buffer is mapped. -buffer1 = device.create_buffer_with_data(data=data, usage=wgpu.BufferUsage.STORAGE) -buffer2 = device.create_buffer( - size=data.nbytes, usage=wgpu.BufferUsage.STORAGE | wgpu.BufferUsage.COPY_SRC -) - -# Setup layout and bindings -binding_layouts = [ - { - "binding": 0, - "visibility": wgpu.ShaderStage.COMPUTE, - "buffer": { - "type": wgpu.BufferBindingType.read_only_storage, - }, - }, - { - "binding": 1, - "visibility": wgpu.ShaderStage.COMPUTE, - "buffer": { - "type": wgpu.BufferBindingType.storage, - }, - }, -] -bindings = [ - { - "binding": 0, - "resource": {"buffer": buffer1, "offset": 0, "size": buffer1.size}, - }, - { - "binding": 1, - "resource": {"buffer": buffer2, "offset": 0, "size": buffer2.size}, - }, -] - -# Put everything together -bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) -pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[bind_group_layout]) -bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) - -# Create and run the pipeline -compute_pipeline = device.create_compute_pipeline( - layout=pipeline_layout, - compute={"module": cshader, "entry_point": "main"}, -) -command_encoder = device.create_command_encoder() -compute_pass = command_encoder.begin_compute_pass() -compute_pass.set_pipeline(compute_pipeline) -compute_pass.set_bind_group(0, bind_group, [], 0, 999999) # last 2 elements not used -compute_pass.dispatch_workgroups(n, 1, 1) # x y z -compute_pass.end() -device.queue.submit([command_encoder.finish()]) - -# Read result -# result = buffer2.read_data().cast("i") -out = device.queue.read_buffer(buffer2).cast("i") -result = out.tolist() -print(result) -assert result == list(range(20)) diff --git a/examples/compute_timestamps.py b/examples/compute_timestamps.py deleted file mode 100644 index 051e7d9..0000000 --- a/examples/compute_timestamps.py +++ /dev/null @@ -1,166 +0,0 @@ -""" -A simple example to profile a compute pass using ComputePassTimestampWrites. -""" - -import wgpu - -""" -Define the number of elements, global and local sizes. -Change these and see how it affects performance. -""" -n = 512 * 512 -local_size = [32, 1, 1] -global_size = [n // local_size[0], 1, 1] - -shader_source = f""" -@group(0) @binding(0) -var data1: array; - -@group(0) @binding(1) -var data2: array; - -@group(0) @binding(2) -var data3: array; - -@compute -@workgroup_size({','.join(map(str, local_size))}) -fn main(@builtin(global_invocation_id) index: vec3) {{ - let i: u32 = index.x; - data3[i] = data1[i] + data2[i]; -}} -""" - -# Define two arrays -data1 = memoryview(bytearray(n * 4)).cast("i") -data2 = memoryview(bytearray(n * 4)).cast("i") - -# Initialize the arrays -for i in range(n): - data1[i] = i - -for i in range(n): - data2[i] = i * 2 - -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") - -# Request a device with the timestamp_query feature, so we can profile our computation -device = adapter.request_device(required_features=[wgpu.FeatureName.timestamp_query]) -cshader = device.create_shader_module(code=shader_source) - -# Create buffer objects, input buffer is mapped. -buffer1 = device.create_buffer_with_data(data=data1, usage=wgpu.BufferUsage.STORAGE) -buffer2 = device.create_buffer_with_data(data=data2, usage=wgpu.BufferUsage.STORAGE) -buffer3 = device.create_buffer( - size=data1.nbytes, usage=wgpu.BufferUsage.STORAGE | wgpu.BufferUsage.COPY_SRC -) - -# Setup layout and bindings -binding_layouts = [ - { - "binding": 0, - "visibility": wgpu.ShaderStage.COMPUTE, - "buffer": { - "type": wgpu.BufferBindingType.read_only_storage, - }, - }, - { - "binding": 1, - "visibility": wgpu.ShaderStage.COMPUTE, - "buffer": { - "type": wgpu.BufferBindingType.read_only_storage, - }, - }, - { - "binding": 2, - "visibility": wgpu.ShaderStage.COMPUTE, - "buffer": { - "type": wgpu.BufferBindingType.storage, - }, - }, -] -bindings = [ - { - "binding": 0, - "resource": {"buffer": buffer1, "offset": 0, "size": buffer1.size}, - }, - { - "binding": 1, - "resource": {"buffer": buffer2, "offset": 0, "size": buffer2.size}, - }, - { - "binding": 2, - "resource": {"buffer": buffer3, "offset": 0, "size": buffer3.size}, - }, -] - -# Put everything together -bind_group_layout = device.create_bind_group_layout(entries=binding_layouts) -pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[bind_group_layout]) -bind_group = device.create_bind_group(layout=bind_group_layout, entries=bindings) - -# Create and run the pipeline -compute_pipeline = device.create_compute_pipeline( - layout=pipeline_layout, - compute={"module": cshader, "entry_point": "main"}, -) - -""" -Create a QuerySet to store the 'beginning_of_pass' and 'end_of_pass' timestamps. -Set the 'count' parameter to 2, as this set will contain 2 timestamps. -""" -query_set = device.create_query_set(type=wgpu.QueryType.timestamp, count=2) -command_encoder = device.create_command_encoder() - -# Pass our QuerySet and the indices into it, where the timestamps will be written. -compute_pass = command_encoder.begin_compute_pass( - timestamp_writes={ - "query_set": query_set, - "beginning_of_pass_write_index": 0, - "end_of_pass_write_index": 1, - } -) - -""" -Create the buffer to store our query results. -Each timestamp is 8 bytes. We mark the buffer usage to be QUERY_RESOLVE, -as we will use this buffer in a resolve_query_set call later. -""" -query_buf = device.create_buffer( - size=8 * query_set.count, - usage=wgpu.BufferUsage.QUERY_RESOLVE - | wgpu.BufferUsage.STORAGE - | wgpu.BufferUsage.COPY_SRC - | wgpu.BufferUsage.COPY_DST, -) -compute_pass.set_pipeline(compute_pipeline) -compute_pass.set_bind_group(0, bind_group, [], 0, 999999) # last 2 elements not used -compute_pass.dispatch_workgroups(*global_size) # x y z -compute_pass.end() - -# Resolve our queries, and store the results in the destination buffer we created above. -command_encoder.resolve_query_set( - query_set=query_set, - first_query=0, - query_count=2, - destination=query_buf, - destination_offset=0, -) -device.queue.submit([command_encoder.finish()]) - -""" -Read the query buffer to get the timestamps. -Index 0: beginning timestamp -Index 1: end timestamp -""" -timestamps = device.queue.read_buffer(query_buf).cast("Q").tolist() -print(f"Adding two {n} sized arrays took {(timestamps[1]-timestamps[0])/1000} us") - -# Read result -out = device.queue.read_buffer(buffer3).cast("i") -result = out.tolist() - -# Calculate the result on the CPU for comparison -result_cpu = [a + b for a, b in zip(data1, data2)] - -# Ensure results are the same -assert result == result_cpu diff --git a/examples/cube.py b/examples/cube.py deleted file mode 100644 index 18dc4e1..0000000 --- a/examples/cube.py +++ /dev/null @@ -1,388 +0,0 @@ -""" -This example renders a simple textured rotating cube. -""" -# test_example = true - -import time - -from wgpu.gui.auto import WgpuCanvas, run -import wgpu -import numpy as np - - -# %% Create canvas and device - -# Create a canvas to render to -canvas = WgpuCanvas(title="wgpu cube") - -# Create a wgpu device -adapter = wgpu.gpu.request_adapter(power_preference="high-performance") -device = adapter.request_device() - -# Prepare present context -present_context = canvas.get_context() -render_texture_format = present_context.get_preferred_format(device.adapter) -present_context.configure(device=device, format=render_texture_format) - - -# %% Generate data - -# pos texcoord -# x, y, z, w, u, v -vertex_data = np.array( - [ - # top (0, 0, 1) - [-1, -1, 1, 1, 0, 0], - [1, -1, 1, 1, 1, 0], - [1, 1, 1, 1, 1, 1], - [-1, 1, 1, 1, 0, 1], - # bottom (0, 0, -1) - [-1, 1, -1, 1, 1, 0], - [1, 1, -1, 1, 0, 0], - [1, -1, -1, 1, 0, 1], - [-1, -1, -1, 1, 1, 1], - # right (1, 0, 0) - [1, -1, -1, 1, 0, 0], - [1, 1, -1, 1, 1, 0], - [1, 1, 1, 1, 1, 1], - [1, -1, 1, 1, 0, 1], - # left (-1, 0, 0) - [-1, -1, 1, 1, 1, 0], - [-1, 1, 1, 1, 0, 0], - [-1, 1, -1, 1, 0, 1], - [-1, -1, -1, 1, 1, 1], - # front (0, 1, 0) - [1, 1, -1, 1, 1, 0], - [-1, 1, -1, 1, 0, 0], - [-1, 1, 1, 1, 0, 1], - [1, 1, 1, 1, 1, 1], - # back (0, -1, 0) - [1, -1, 1, 1, 0, 0], - [-1, -1, 1, 1, 1, 0], - [-1, -1, -1, 1, 1, 1], - [1, -1, -1, 1, 0, 1], - ], - dtype=np.float32, -) - -index_data = np.array( - [ - [0, 1, 2, 2, 3, 0], # top - [4, 5, 6, 6, 7, 4], # bottom - [8, 9, 10, 10, 11, 8], # right - [12, 13, 14, 14, 15, 12], # left - [16, 17, 18, 18, 19, 16], # front - [20, 21, 22, 22, 23, 20], # back - ], - dtype=np.uint32, -).flatten() - - -# Create texture data (srgb gray values) -texture_data = np.array( - [ - [50, 100, 150, 200], - [100, 150, 200, 50], - [150, 200, 50, 100], - [200, 50, 100, 150], - ], - dtype=np.uint8, -) -texture_data = np.repeat(texture_data, 64, 0) -texture_data = np.repeat(texture_data, 64, 1) -texture_size = texture_data.shape[1], texture_data.shape[0], 1 - -# Use numpy to create a struct for the uniform -uniform_dtype = [("transform", "float32", (4, 4))] -uniform_data = np.zeros((), dtype=uniform_dtype) - - -# %% Create resource objects (buffers, textures, samplers) - -# Create vertex buffer, and upload data -vertex_buffer = device.create_buffer_with_data( - data=vertex_data, usage=wgpu.BufferUsage.VERTEX -) - -# Create index buffer, and upload data -index_buffer = device.create_buffer_with_data( - data=index_data, usage=wgpu.BufferUsage.INDEX -) - -# Create uniform buffer - data is uploaded each frame -uniform_buffer = device.create_buffer( - size=uniform_data.nbytes, usage=wgpu.BufferUsage.UNIFORM | wgpu.BufferUsage.COPY_DST -) - -# Create texture, and upload data -texture = device.create_texture( - size=texture_size, - usage=wgpu.TextureUsage.COPY_DST | wgpu.TextureUsage.TEXTURE_BINDING, - dimension=wgpu.TextureDimension.d2, - format=wgpu.TextureFormat.r8unorm, - mip_level_count=1, - sample_count=1, -) -texture_view = texture.create_view() - -device.queue.write_texture( - { - "texture": texture, - "mip_level": 0, - "origin": (0, 0, 0), - }, - texture_data, - { - "offset": 0, - "bytes_per_row": texture_data.strides[0], - }, - texture_size, -) - -# Create a sampler -sampler = device.create_sampler() - - -# %% The shaders - - -shader_source = """ -struct Locals { - transform: mat4x4, -}; -@group(0) @binding(0) -var r_locals: Locals; - -struct VertexInput { - @location(0) pos : vec4, - @location(1) texcoord: vec2, -}; -struct VertexOutput { - @location(0) texcoord: vec2, - @builtin(position) pos: vec4, -}; -struct FragmentOutput { - @location(0) color : vec4, -}; - - -@vertex -fn vs_main(in: VertexInput) -> VertexOutput { - let ndc: vec4 = r_locals.transform * in.pos; - var out: VertexOutput; - out.pos = vec4(ndc.x, ndc.y, 0.0, 1.0); - out.texcoord = in.texcoord; - return out; -} - -@group(0) @binding(1) -var r_tex: texture_2d; - -@group(0) @binding(2) -var r_sampler: sampler; - -@fragment -fn fs_main(in: VertexOutput) -> FragmentOutput { - let value = textureSample(r_tex, r_sampler, in.texcoord).r; - let physical_color = vec3(pow(value, 2.2)); // gamma correct - var out: FragmentOutput; - out.color = vec4(physical_color.rgb, 1.0); - return out; -} -""" - -shader = device.create_shader_module(code=shader_source) - - -# %% The bind groups - -# We always have two bind groups, so we can play distributing our -# resources over these two groups in different configurations. -bind_groups_entries = [[]] -bind_groups_layout_entries = [[]] - -bind_groups_entries[0].append( - { - "binding": 0, - "resource": { - "buffer": uniform_buffer, - "offset": 0, - "size": uniform_buffer.size, - }, - } -) -bind_groups_layout_entries[0].append( - { - "binding": 0, - "visibility": wgpu.ShaderStage.VERTEX | wgpu.ShaderStage.FRAGMENT, - "buffer": {"type": wgpu.BufferBindingType.uniform}, - } -) - -bind_groups_entries[0].append({"binding": 1, "resource": texture_view}) -bind_groups_layout_entries[0].append( - { - "binding": 1, - "visibility": wgpu.ShaderStage.FRAGMENT, - "texture": { - "sample_type": wgpu.TextureSampleType.float, - "view_dimension": wgpu.TextureViewDimension.d2, - }, - } -) - -bind_groups_entries[0].append({"binding": 2, "resource": sampler}) -bind_groups_layout_entries[0].append( - { - "binding": 2, - "visibility": wgpu.ShaderStage.FRAGMENT, - "sampler": {"type": wgpu.SamplerBindingType.filtering}, - } -) - - -# Create the wgou binding objects -bind_group_layouts = [] -bind_groups = [] - -for entries, layout_entries in zip(bind_groups_entries, bind_groups_layout_entries): - bind_group_layout = device.create_bind_group_layout(entries=layout_entries) - bind_group_layouts.append(bind_group_layout) - bind_groups.append( - device.create_bind_group(layout=bind_group_layout, entries=entries) - ) - -pipeline_layout = device.create_pipeline_layout(bind_group_layouts=bind_group_layouts) - - -# %% The render pipeline - -render_pipeline = device.create_render_pipeline( - layout=pipeline_layout, - vertex={ - "module": shader, - "entry_point": "vs_main", - "buffers": [ - { - "array_stride": 4 * 6, - "step_mode": wgpu.VertexStepMode.vertex, - "attributes": [ - { - "format": wgpu.VertexFormat.float32x4, - "offset": 0, - "shader_location": 0, - }, - { - "format": wgpu.VertexFormat.float32x2, - "offset": 4 * 4, - "shader_location": 1, - }, - ], - }, - ], - }, - primitive={ - "topology": wgpu.PrimitiveTopology.triangle_list, - "front_face": wgpu.FrontFace.ccw, - "cull_mode": wgpu.CullMode.back, - }, - depth_stencil=None, - multisample=None, - fragment={ - "module": shader, - "entry_point": "fs_main", - "targets": [ - { - "format": render_texture_format, - "blend": { - "alpha": ( - wgpu.BlendFactor.one, - wgpu.BlendFactor.zero, - wgpu.BlendOperation.add, - ), - "color": ( - wgpu.BlendFactor.one, - wgpu.BlendFactor.zero, - wgpu.BlendOperation.add, - ), - }, - } - ], - }, -) - - -# %% Setup the render function - - -def draw_frame(): - # Update uniform transform - a1 = -0.3 - a2 = time.time() - s = 0.6 - ortho = np.array( - [ - [s, 0, 0, 0], - [0, s, 0, 0], - [0, 0, s, 0], - [0, 0, 0, 1], - ], - ) - rot1 = np.array( - [ - [1, 0, 0, 0], - [0, np.cos(a1), -np.sin(a1), 0], - [0, np.sin(a1), +np.cos(a1), 0], - [0, 0, 0, 1], - ], - ) - rot2 = np.array( - [ - [np.cos(a2), 0, np.sin(a2), 0], - [0, 1, 0, 0], - [-np.sin(a2), 0, np.cos(a2), 0], - [0, 0, 0, 1], - ], - ) - uniform_data["transform"] = rot2 @ rot1 @ ortho - - # Upload the uniform struct - tmp_buffer = device.create_buffer_with_data( - data=uniform_data, usage=wgpu.BufferUsage.COPY_SRC - ) - - command_encoder = device.create_command_encoder() - command_encoder.copy_buffer_to_buffer( - tmp_buffer, 0, uniform_buffer, 0, uniform_data.nbytes - ) - - current_texture_view = present_context.get_current_texture().create_view() - render_pass = command_encoder.begin_render_pass( - color_attachments=[ - { - "view": current_texture_view, - "resolve_target": None, - "clear_value": (1, 1, 1, 1), - "load_op": wgpu.LoadOp.clear, - "store_op": wgpu.StoreOp.store, - } - ], - ) - - render_pass.set_pipeline(render_pipeline) - render_pass.set_index_buffer(index_buffer, wgpu.IndexFormat.uint32) - render_pass.set_vertex_buffer(0, vertex_buffer) - for bind_group_id, bind_group in enumerate(bind_groups): - render_pass.set_bind_group(bind_group_id, bind_group, [], 0, 99) - render_pass.draw_indexed(index_data.size, 1, 0, 0, 0) - render_pass.end() - - device.queue.submit([command_encoder.finish()]) - - canvas.request_draw() - - -canvas.request_draw(draw_frame) - -if __name__ == "__main__": - run() diff --git a/examples/events.py b/examples/events.py deleted file mode 100644 index 17ba923..0000000 --- a/examples/events.py +++ /dev/null @@ -1,21 +0,0 @@ -""" -A simple example to demonstrate events. -""" -from wgpu.gui.auto import WgpuCanvas, run, call_later - - -class MyCanvas(WgpuCanvas): - def handle_event(self, event): - if event["event_type"] != "pointer_move": - print(event) - - -if __name__ == "__main__": - canvas = MyCanvas(size=(640, 480), title="wgpu events") - - def send_message(message): - print(f"Message: {message}") - - call_later(2, send_message, "hello") - - run() diff --git a/examples/screenshots/cube.png b/examples/screenshots/cube.png deleted file mode 100644 index 9003dae..0000000 Binary files a/examples/screenshots/cube.png and /dev/null differ diff --git a/examples/screenshots/triangle_auto.png b/examples/screenshots/triangle_auto.png deleted file mode 100644 index 5c9bb7e..0000000 Binary files a/examples/screenshots/triangle_auto.png and /dev/null differ diff --git a/examples/triangle.py b/examples/triangle.py deleted file mode 100644 index 80a14de..0000000 --- a/examples/triangle.py +++ /dev/null @@ -1,155 +0,0 @@ -""" -Example use of the wgpu API to draw a triangle. This example is set up -so it can be run on canvases provided by any backend. Running this file -as a script will use the auto-backend (using either glfw or jupyter). - - -Similar example in other languages / API's: - -* Rust wgpu: - https://github.com/gfx-rs/wgpu-rs/blob/master/examples/hello-triangle/main.rs -* C wgpu: - https://github.com/gfx-rs/wgpu/blob/master/examples/triangle/main.c -* Python Vulkan: - https://github.com/realitix/vulkan/blob/master/example/contribs/example_glfw.py - -""" - -import wgpu - - -# %% Shaders - - -shader_source = """ -struct VertexInput { - @builtin(vertex_index) vertex_index : u32, -}; -struct VertexOutput { - @location(0) color : vec4, - @builtin(position) pos: vec4, -}; - -@vertex -fn vs_main(in: VertexInput) -> VertexOutput { - var positions = array, 3>( - vec2(0.0, -0.5), - vec2(0.5, 0.5), - vec2(-0.5, 0.75), - ); - var colors = array, 3>( // srgb colors - vec3(1.0, 1.0, 0.0), - vec3(1.0, 0.0, 1.0), - vec3(0.0, 1.0, 1.0), - ); - let index = i32(in.vertex_index); - var out: VertexOutput; - out.pos = vec4(positions[index], 0.0, 1.0); - out.color = vec4(colors[index], 1.0); - return out; -} - -@fragment -fn fs_main(in: VertexOutput) -> @location(0) vec4 { - let physical_color = pow(in.color.rgb, vec3(2.2)); // gamma correct - return vec4(physical_color, in.color.a); -} -""" - - -# %% The wgpu calls - - -def main(canvas, power_preference="high-performance", limits=None): - """Regular function to setup a viz on the given canvas.""" - adapter = wgpu.gpu.request_adapter(power_preference=power_preference) - device = adapter.request_device(required_limits=limits) - return _main(canvas, device) - - -async def main_async(canvas): - """Async function to setup a viz on the given canvas.""" - adapter = await wgpu.gpu.request_adapter_async(power_preference="high-performance") - device = await adapter.request_device_async(required_limits={}) - return _main(canvas, device) - - -def _main(canvas, device): - shader = device.create_shader_module(code=shader_source) - - # No bind group and layout, we should not create empty ones. - pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) - - present_context = canvas.get_context() - render_texture_format = present_context.get_preferred_format(device.adapter) - present_context.configure(device=device, format=render_texture_format) - - render_pipeline = device.create_render_pipeline( - layout=pipeline_layout, - vertex={ - "module": shader, - "entry_point": "vs_main", - "buffers": [], - }, - primitive={ - "topology": wgpu.PrimitiveTopology.triangle_list, - "front_face": wgpu.FrontFace.ccw, - "cull_mode": wgpu.CullMode.none, - }, - depth_stencil=None, - multisample=None, - fragment={ - "module": shader, - "entry_point": "fs_main", - "targets": [ - { - "format": render_texture_format, - "blend": { - "color": ( - wgpu.BlendFactor.one, - wgpu.BlendFactor.zero, - wgpu.BlendOperation.add, - ), - "alpha": ( - wgpu.BlendFactor.one, - wgpu.BlendFactor.zero, - wgpu.BlendOperation.add, - ), - }, - }, - ], - }, - ) - - def draw_frame(): - current_texture = present_context.get_current_texture() - command_encoder = device.create_command_encoder() - - render_pass = command_encoder.begin_render_pass( - color_attachments=[ - { - "view": current_texture.create_view(), - "resolve_target": None, - "clear_value": (0, 0, 0, 1), - "load_op": wgpu.LoadOp.clear, - "store_op": wgpu.StoreOp.store, - } - ], - ) - - render_pass.set_pipeline(render_pipeline) - # render_pass.set_bind_group(0, no_bind_group, [], 0, 1) - render_pass.draw(3, 1, 0, 0) - render_pass.end() - device.queue.submit([command_encoder.finish()]) - - canvas.request_draw(draw_frame) - return device - - -if __name__ == "__main__": - from wgpu.gui.auto import WgpuCanvas, run - - canvas = WgpuCanvas(size=(640, 480), title="wgpu triangle") - main(canvas) - run() diff --git a/examples/triangle_auto.py b/examples/triangle_auto.py deleted file mode 100644 index 542e7ca..0000000 --- a/examples/triangle_auto.py +++ /dev/null @@ -1,21 +0,0 @@ -""" -Import the viz from triangle.py and run it using the auto-gui. -""" -# test_example = true - -import sys -from pathlib import Path - -from wgpu.gui.auto import WgpuCanvas, run - -sys.path.insert(0, str(Path(__file__).parent)) - -from triangle import main # noqa: E402, The function to call to run the visualization - - -canvas = WgpuCanvas() -device = main(canvas) - - -if __name__ == "__main__": - run() diff --git a/examples/triangle_glfw.py b/examples/triangle_glfw.py deleted file mode 100644 index b2b34b7..0000000 --- a/examples/triangle_glfw.py +++ /dev/null @@ -1,22 +0,0 @@ -""" -Import the viz from triangle.py and run it using glfw (which uses asyncio for the event loop). - -# run_example = false -""" - -import sys -from pathlib import Path - -from wgpu.gui.glfw import WgpuCanvas, run - -sys.path.insert(0, str(Path(__file__).parent)) - -from triangle import main # noqa: E402, The function to call to run the visualization - - -canvas = WgpuCanvas() -device = main(canvas) - - -if __name__ == "__main__": - run() diff --git a/examples/triangle_glsl.py b/examples/triangle_glsl.py deleted file mode 100644 index 702d3a8..0000000 --- a/examples/triangle_glsl.py +++ /dev/null @@ -1,143 +0,0 @@ -""" -The triangle example, using GLSL shaders. - -""" - -import wgpu - - -# %% Shaders - - -vertex_shader = """ -#version 450 core -layout(location = 0) out vec4 color; -void main() -{ - vec2 positions[3] = vec2[3]( - vec2(0.0, -0.5), - vec2(0.5, 0.5), - vec2(-0.5, 0.75) - ); - vec3 colors[3] = vec3[3]( // srgb colors - vec3(1.0, 1.0, 0.0), - vec3(1.0, 0.0, 1.0), - vec3(0.0, 1.0, 1.0) - ); - int index = int(gl_VertexID); - gl_Position = vec4(positions[index], 0.0, 1.0); - color = vec4(colors[index], 1.0); -} -""" - -fragment_shader = """ -#version 450 core -out vec4 FragColor; -layout(location = 0) in vec4 color; -void main() -{ - vec3 physical_color = pow(color.rgb, vec3(2.2)); // gamma correct - FragColor = vec4(physical_color, color.a); -} -""" - - -# %% The wgpu calls - - -def main(canvas, power_preference="high-performance", limits=None): - """Regular function to setup a viz on the given canvas.""" - adapter = wgpu.gpu.request_adapter(power_preference=power_preference) - device = adapter.request_device(required_limits=limits) - return _main(canvas, device) - - -async def main_async(canvas): - """Async function to setup a viz on the given canvas.""" - adapter = await wgpu.gpu.request_adapter_async(power_preference="high-performance") - device = await adapter.request_device_async(required_limits={}) - return _main(canvas, device) - - -def _main(canvas, device): - vert_shader = device.create_shader_module(label="triangle_vert", code=vertex_shader) - frag_shader = device.create_shader_module( - label="triangle_frag", code=fragment_shader - ) - - # No bind group and layout, we should not create empty ones. - pipeline_layout = device.create_pipeline_layout(bind_group_layouts=[]) - - present_context = canvas.get_context() - render_texture_format = present_context.get_preferred_format(device.adapter) - present_context.configure(device=device, format=render_texture_format) - - render_pipeline = device.create_render_pipeline( - layout=pipeline_layout, - vertex={ - "module": vert_shader, - "entry_point": "main", - "buffers": [], - }, - primitive={ - "topology": wgpu.PrimitiveTopology.triangle_list, - "front_face": wgpu.FrontFace.ccw, - "cull_mode": wgpu.CullMode.none, - }, - depth_stencil=None, - multisample=None, - fragment={ - "module": frag_shader, - "entry_point": "main", - "targets": [ - { - "format": render_texture_format, - "blend": { - "color": ( - wgpu.BlendFactor.one, - wgpu.BlendFactor.zero, - wgpu.BlendOperation.add, - ), - "alpha": ( - wgpu.BlendFactor.one, - wgpu.BlendFactor.zero, - wgpu.BlendOperation.add, - ), - }, - }, - ], - }, - ) - - def draw_frame(): - current_texture = present_context.get_current_texture() - command_encoder = device.create_command_encoder() - - render_pass = command_encoder.begin_render_pass( - color_attachments=[ - { - "view": current_texture.create_view(), - "resolve_target": None, - "clear_value": (0, 0, 0, 1), - "load_op": wgpu.LoadOp.clear, - "store_op": wgpu.StoreOp.store, - } - ], - ) - - render_pass.set_pipeline(render_pipeline) - # render_pass.set_bind_group(0, no_bind_group, [], 0, 1) - render_pass.draw(3, 1, 0, 0) - render_pass.end() - device.queue.submit([command_encoder.finish()]) - - canvas.request_draw(draw_frame) - return device - - -if __name__ == "__main__": - from wgpu.gui.auto import WgpuCanvas, run - - canvas = WgpuCanvas(size=(640, 480), title="wgpu triangle") - main(canvas) - run() diff --git a/examples/triangle_qt.py b/examples/triangle_qt.py deleted file mode 100644 index 033d7b9..0000000 --- a/examples/triangle_qt.py +++ /dev/null @@ -1,41 +0,0 @@ -""" -Import the viz from triangle.py and run it in a Qt window. -Works with either PySide6, PyQt6, PyQt5 or PySide2. - -# run_example = false -""" -import importlib - -# For the sake of making this example Just Work, we try multiple QT libs -for lib in ("PySide6", "PyQt6", "PySide2", "PyQt5"): - try: - QtWidgets = importlib.import_module(".QtWidgets", lib) - break - except ModuleNotFoundError: - pass - - -from wgpu.gui.qt import WgpuCanvas # WgpuCanvas is a QWidget subclass - -from triangle import main # The function to call to run the visualization - - -app = QtWidgets.QApplication([]) -canvas = WgpuCanvas() - -device = main(canvas) - -# Enter Qt event loop (compatible with qt5/qt6) -app.exec() if hasattr(app, "exec") else app.exec_() - - -# For those interested, this is a simple way to integrate Qt's event -# loop with asyncio, but for real apps you probably want to use -# something like the qasync library. -# async def mainloop(): -# await main_async(canvas) -# while not canvas.is_closed(): -# await asyncio.sleep(0.001) -# app.flush() -# app.processEvents() -# loop.stop() diff --git a/examples/triangle_qt_embed.py b/examples/triangle_qt_embed.py deleted file mode 100644 index 42c9864..0000000 --- a/examples/triangle_qt_embed.py +++ /dev/null @@ -1,53 +0,0 @@ -""" -An example demonstrating a qt app with a wgpu viz inside. -If needed, change the PySide6 import to e.g. PyQt6, PyQt5, or PySide2. - -# run_example = false -""" -import importlib - -# For the sake of making this example Just Work, we try multiple QT libs -for lib in ("PySide6", "PyQt6", "PySide2", "PyQt5"): - try: - QtWidgets = importlib.import_module(".QtWidgets", lib) - break - except ModuleNotFoundError: - pass - - -from wgpu.gui.qt import WgpuWidget - -from triangle import main - - -class ExampleWidget(QtWidgets.QWidget): - def __init__(self): - super().__init__() - self.resize(640, 480) - self.setWindowTitle("wgpu triangle embedded in a qt app") - - splitter = QtWidgets.QSplitter() - - self.button = QtWidgets.QPushButton("Hello world", self) - self.canvas1 = WgpuWidget(splitter) - self.canvas2 = WgpuWidget(splitter) - - splitter.addWidget(self.canvas1) - splitter.addWidget(self.canvas2) - - layout = QtWidgets.QHBoxLayout() - layout.addWidget(self.button, 0) - layout.addWidget(splitter, 1) - self.setLayout(layout) - - self.show() - - -app = QtWidgets.QApplication([]) -example = ExampleWidget() - -main(example.canvas1) -main(example.canvas2) - -# Enter Qt event loop (compatible with qt5/qt6) -app.exec() if hasattr(app, "exec") else app.exec_() diff --git a/examples/triangle_subprocess.py b/examples/triangle_subprocess.py deleted file mode 100644 index e1c2e64..0000000 --- a/examples/triangle_subprocess.py +++ /dev/null @@ -1,84 +0,0 @@ -""" -An example showing that with WGPU you can draw to the window of another -process. Just a proof of concept, this is far from perfect yet: - -* It works if I run it in Pyzo, but not if I run it from the terminal. -* I only tried it on Windows. -* You'll want to let the proxy know about size changes. -* The request_draw should invoke a draw (in asyncio?), not draw directly. -* Properly handling closing the figure (from both ends). - -# run_example = false -""" - -import sys -import time -import subprocess - -from wgpu.gui import WgpuCanvasBase - -# Import the (async) function that we must call to run the visualization -from triangle import main - - -code = """ -import sys -from PySide6 import QtWidgets # Use either PySide6 or PyQt6 -from wgpu.gui.qt import WgpuCanvas - -app = QtWidgets.QApplication([]) -canvas = WgpuCanvas(title="wgpu triangle in Qt subprocess") - -print(canvas.get_window_id()) -#print(canvas.get_display_id()) -print(canvas.get_physical_size()) -sys.stdout.flush() - -app.exec_() -""" - - -class ProxyCanvas(WgpuCanvasBase): - def __init__(self): - super().__init__() - self._window_id = int(p.stdout.readline().decode()) - self._psize = tuple( - int(x) for x in p.stdout.readline().decode().strip().strip("()").split(",") - ) - print(self._psize) - time.sleep(0.2) - - def get_window_id(self): - return self._window_id - - def get_physical_size(self): - return self._psize - - def get_pixel_ratio(self): - return 1 - - def get_logical_size(self): - return self._psize - - def set_logical_size(self, width, height): - pass - - def close(self): - p.kill() - - def is_closed(self): - raise NotImplementedError() - - def _request_draw(self): - self.draw_frame() - - -# Create subprocess -p = subprocess.Popen([sys.executable, "-c", code], stdout=subprocess.PIPE) - -# Create a canvas that maps to the window of that subprocess -canvas = ProxyCanvas() - -# Go! -main(canvas) -time.sleep(3) diff --git a/examples/triangle_wx.py b/examples/triangle_wx.py deleted file mode 100644 index 22c9002..0000000 --- a/examples/triangle_wx.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -Import the viz from triangle.py and run it in a wxPython window. -""" -# run_example = false - -import wx -from wgpu.gui.wx import WgpuCanvas - -from examples.triangle import main # The function to call to run the visualization - - -app = wx.App() -canvas = WgpuCanvas() - -main(canvas) -app.MainLoop() diff --git a/examples/triangle_wx_embed.py b/examples/triangle_wx_embed.py deleted file mode 100644 index e45c13d..0000000 --- a/examples/triangle_wx_embed.py +++ /dev/null @@ -1,40 +0,0 @@ -""" -An example demonstrating a wx app with a wgpu viz inside. -""" -# run_example = false - -import wx -from wgpu.gui.wx import WgpuWidget - -from examples.triangle import main - - -class Example(wx.Frame): - def __init__(self): - super().__init__(None, title="wgpu triangle embedded in a wx app") - self.SetSize(640, 480) - - splitter = wx.SplitterWindow(self) - - self.button = wx.Button(self, -1, "Hello world") - self.canvas1 = WgpuWidget(splitter) - self.canvas2 = WgpuWidget(splitter) - - splitter.SplitVertically(self.canvas1, self.canvas2) - splitter.SetSashGravity(0.5) - - sizer = wx.BoxSizer(wx.HORIZONTAL) - sizer.Add(self.button, 0, wx.EXPAND) - sizer.Add(splitter, 1, wx.EXPAND) - self.SetSizer(sizer) - - self.Show() - - -app = wx.App() -example = Example() - -main(example.canvas1) -main(example.canvas2) - -app.MainLoop() diff --git a/examples/wgpu-examples.ipynb b/examples/wgpu-examples.ipynb deleted file mode 100644 index 3e67105..0000000 --- a/examples/wgpu-examples.ipynb +++ /dev/null @@ -1,117 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "afd9b3fd", - "metadata": {}, - "source": [ - "# WGPU notebook examples" - ] - }, - { - "cell_type": "markdown", - "id": "2e610ab9", - "metadata": {}, - "source": [ - "## Triangle example\n", - "\n", - "We import the triangle example and show it in the notebook." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c6e4ffe0", - "metadata": {}, - "outputs": [], - "source": [ - "from wgpu.gui.auto import WgpuCanvas, run\n", - "import triangle\n", - "\n", - "canvas = WgpuCanvas(size=(640, 480), title=\"wgpu triangle with GLFW\")\n", - "\n", - "triangle.main(canvas)\n", - "canvas" - ] - }, - { - "cell_type": "markdown", - "id": "e120b752", - "metadata": {}, - "source": [ - "## Cube example\n", - "\n", - "An interactive example this time." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e4f9f67d", - "metadata": {}, - "outputs": [], - "source": [ - "from cube import canvas\n", - "\n", - "canvas" - ] - }, - { - "cell_type": "markdown", - "id": "749ffb40", - "metadata": {}, - "source": [ - "## Event example\n", - "\n", - "The code below is a copy from `show_events.py`. It is just to show how events are handled. These events are the same accross all auto-backends." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c858215a", - "metadata": {}, - "outputs": [], - "source": [ - "from wgpu.gui.auto import WgpuCanvas, run\n", - "\n", - "class MyCanvas(WgpuCanvas):\n", - " def handle_event(self, event):\n", - " if event[\"event_type\"] != \"pointer_move\":\n", - " print(event)\n", - "\n", - "canvas = MyCanvas(size=(640, 480), title=\"wgpu triangle with GLFW\")\n", - "canvas" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6b92d13b", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.9" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -}