Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Raise MSRV to Rust 1.80 #6105

Draft
wants to merge 10 commits into
base: trunk
Choose a base branch
from
Draft
6 changes: 3 additions & 3 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,12 @@ env:
# We sometimes need nightly to use special things in CI.
#
# In order to prevent CI regressions, we pin the nightly version.
NIGHTLY_VERSION: "nightly-2023-12-17"
NIGHTLY_VERSION: "nightly-2024-08-11"
# This is the MSRV used by `wgpu` itself and all surrounding infrastructure.
REPO_MSRV: "1.76"
REPO_MSRV: "1.80.1"
# This is the MSRV used by the `wgpu-core`, `wgpu-hal`, and `wgpu-types` crates,
# to ensure that they can be used with firefox.
CORE_MSRV: "1.76"
CORE_MSRV: "1.80.1"

#
# Environment variables
Expand Down
22 changes: 13 additions & 9 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ default-members = [

[workspace.package]
edition = "2021"
rust-version = "1.76"
rust-version = "1.80.1"
keywords = ["graphics"]
license = "MIT OR Apache-2.0"
homepage = "https://wgpu.rs/"
Expand Down Expand Up @@ -77,7 +77,7 @@ bincode = "1"
bit-vec = "0.8"
bitflags = "2.6"
bytemuck = { version = "1.19" }
cfg_aliases = "0.1"
cfg_aliases = "0.2.1"
cfg-if = "1"
criterion = "0.5"
codespan-reporting = "0.11"
Expand Down
6 changes: 5 additions & 1 deletion benches/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,11 @@ path = "benches/root.rs"
# tracy = ["dep:tracy-client", "profiling/profile-with-tracy"]
# superluminal = ["profiling/profile-with-superluminal"]

[lints.rust]
unexpected_cfgs = { level = "warn", check-cfg = [
'cfg(feature, values("tracy"))',
] }

[dependencies]
bincode.workspace = true
bytemuck.workspace = true
Expand All @@ -38,7 +43,6 @@ naga = { workspace = true, features = [
"wgsl-out",
] }
nanorand.workspace = true
once_cell.workspace = true
pollster.workspace = true
profiling.workspace = true
rayon.workspace = true
Expand Down
12 changes: 6 additions & 6 deletions benches/benches/computepass.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ use std::{

use criterion::{criterion_group, Criterion, Throughput};
use nanorand::{Rng, WyRand};
use once_cell::sync::Lazy;
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use std::sync::LazyLock;

use crate::DeviceState;

Expand Down Expand Up @@ -424,7 +424,7 @@ impl ComputepassState {
}

fn run_bench(ctx: &mut Criterion) {
let state = Lazy::new(ComputepassState::new);
let state = LazyLock::new(ComputepassState::new);

let dispatch_count = dispatch_count();
let dispatch_count_bindless = dispatch_count_bindless();
Expand All @@ -449,7 +449,7 @@ fn run_bench(ctx: &mut Criterion) {
group.bench_function(
format!("{cpasses} computepasses x {dispatch_per_pass} dispatches ({label})"),
|b| {
Lazy::force(&state);
LazyLock::force(&state);

b.iter_custom(|iters| {
profiling::scope!("benchmark invocation");
Expand Down Expand Up @@ -498,7 +498,7 @@ fn run_bench(ctx: &mut Criterion) {
group.bench_function(
format!("{threads} threads x {dispatch_per_pass} dispatch"),
|b| {
Lazy::force(&state);
LazyLock::force(&state);

b.iter_custom(|iters| {
profiling::scope!("benchmark invocation");
Expand Down Expand Up @@ -538,7 +538,7 @@ fn run_bench(ctx: &mut Criterion) {
group.throughput(Throughput::Elements(dispatch_count_bindless as _));

group.bench_function(format!("{dispatch_count_bindless} dispatch"), |b| {
Lazy::force(&state);
LazyLock::force(&state);

b.iter_custom(|iters| {
profiling::scope!("benchmark invocation");
Expand Down Expand Up @@ -579,7 +579,7 @@ fn run_bench(ctx: &mut Criterion) {
texture_count + storage_texture_count + storage_buffer_count
),
|b| {
Lazy::force(&state);
LazyLock::force(&state);

b.iter(|| state.device_state.queue.submit([]));
},
Expand Down
12 changes: 6 additions & 6 deletions benches/benches/renderpass.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@ use std::{

use criterion::{criterion_group, Criterion, Throughput};
use nanorand::{Rng, WyRand};
use once_cell::sync::Lazy;
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use std::sync::LazyLock;

use crate::DeviceState;

Expand Down Expand Up @@ -427,7 +427,7 @@ impl RenderpassState {
}

fn run_bench(ctx: &mut Criterion) {
let state = Lazy::new(RenderpassState::new);
let state = LazyLock::new(RenderpassState::new);

let draw_count = draw_count();
let vertex_buffer_count = draw_count * VERTEX_BUFFERS_PER_DRAW;
Expand All @@ -450,7 +450,7 @@ fn run_bench(ctx: &mut Criterion) {
group.bench_function(
format!("{rpasses} renderpasses x {draws_per_pass} draws ({label})"),
|b| {
Lazy::force(&state);
LazyLock::force(&state);

b.iter_custom(|iters| {
profiling::scope!("benchmark invocation");
Expand Down Expand Up @@ -502,7 +502,7 @@ fn run_bench(ctx: &mut Criterion) {
for threads in [2, 4, 8] {
let draws_per_pass = draw_count / threads;
group.bench_function(format!("{threads} threads x {draws_per_pass} draws"), |b| {
Lazy::force(&state);
LazyLock::force(&state);

b.iter_custom(|iters| {
profiling::scope!("benchmark invocation");
Expand Down Expand Up @@ -541,7 +541,7 @@ fn run_bench(ctx: &mut Criterion) {
group.throughput(Throughput::Elements(draw_count as _));

group.bench_function(format!("{draw_count} draws"), |b| {
Lazy::force(&state);
LazyLock::force(&state);

b.iter_custom(|iters| {
profiling::scope!("benchmark invocation");
Expand Down Expand Up @@ -577,7 +577,7 @@ fn run_bench(ctx: &mut Criterion) {
texture_count + vertex_buffer_count
),
|b| {
Lazy::force(&state);
LazyLock::force(&state);

b.iter(|| state.device_state.queue.submit([]));
},
Expand Down
6 changes: 3 additions & 3 deletions benches/benches/resource_creation.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
use std::time::{Duration, Instant};

use criterion::{criterion_group, Criterion, Throughput};
use once_cell::sync::Lazy;
use rayon::iter::{IntoParallelIterator, ParallelIterator};
use std::sync::LazyLock;

use crate::DeviceState;

fn run_bench(ctx: &mut Criterion) {
let state = Lazy::new(DeviceState::new);
let state = LazyLock::new(DeviceState::new);

const RESOURCES_TO_CREATE: usize = 8;

Expand All @@ -19,7 +19,7 @@ fn run_bench(ctx: &mut Criterion) {
group.bench_function(
format!("{threads} threads x {resources_per_thread} resource"),
|b| {
Lazy::force(&state);
LazyLock::force(&state);

b.iter_custom(|iters| {
profiling::scope!("benchmark invocation");
Expand Down
1 change: 0 additions & 1 deletion examples/src/boids/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
// adapted from https://github.com/austinEng/webgpu-samples/blob/master/src/examples/computeBoids.ts

use nanorand::{Rng, WyRand};
use std::mem::size_of;
use wgpu::util::DeviceExt;

// number of boid particles to simulate
Expand Down
2 changes: 1 addition & 1 deletion examples/src/bunnymark/mod.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use bytemuck::{Pod, Zeroable};
use nanorand::{Rng, WyRand};
use std::{borrow::Cow, mem::size_of};
use std::borrow::Cow;
use wgpu::util::DeviceExt;
use winit::{
event::{ElementState, KeyEvent},
Expand Down
2 changes: 1 addition & 1 deletion examples/src/cube/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use bytemuck::{Pod, Zeroable};
use std::{f32::consts, mem::size_of};
use std::f32::consts;
use wgpu::util::DeviceExt;

#[repr(C)]
Expand Down
2 changes: 1 addition & 1 deletion examples/src/hello_compute/mod.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use std::{mem::size_of_val, str::FromStr};
use std::str::FromStr;
use wgpu::util::DeviceExt;

// Indicates a u32 overflow in an intermediate Collatz value
Expand Down
2 changes: 0 additions & 2 deletions examples/src/hello_synchronization/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
use std::mem::size_of_val;

const ARR_SIZE: usize = 128;

struct ExecuteResults {
Expand Down
2 changes: 0 additions & 2 deletions examples/src/hello_workgroups/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,6 @@
//!
//! Only parts specific to this example will be commented.

use std::mem::size_of_val;

use wgpu::util::DeviceExt;

async fn run() {
Expand Down
2 changes: 1 addition & 1 deletion examples/src/mipmap/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use bytemuck::{Pod, Zeroable};
use std::{f32::consts, mem::size_of};
use std::f32::consts;
use wgpu::util::DeviceExt;

const TEXTURE_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Rgba8UnormSrgb;
Expand Down
2 changes: 1 addition & 1 deletion examples/src/msaa_line/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
//! * Set the primitive_topology to PrimitiveTopology::LineList.
//! * Vertices and Indices describe the two points that make up a line.

use std::{iter, mem::size_of};
use std::iter;

use bytemuck::{Pod, Zeroable};
use wgpu::util::DeviceExt;
Expand Down
2 changes: 0 additions & 2 deletions examples/src/repeated_compute/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@
//! hello-compute example does not such as mapping buffers
//! and why use the async channels.

use std::mem::size_of_val;

const OVERFLOW: u32 = 0xffffffff;

async fn run() {
Expand Down
2 changes: 1 addition & 1 deletion examples/src/shadow/mod.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use std::{f32::consts, iter, mem::size_of, ops::Range, sync::Arc};
use std::{f32::consts, iter, ops::Range, sync::Arc};

use bytemuck::{Pod, Zeroable};
use wgpu::util::{align_to, DeviceExt};
Expand Down
2 changes: 1 addition & 1 deletion examples/src/skybox/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use bytemuck::{Pod, Zeroable};
use std::{f32::consts, mem::size_of};
use std::f32::consts;
use wgpu::{util::DeviceExt, AstcBlock, AstcChannel};

const IMAGE_SIZE: u32 = 256;
Expand Down
1 change: 0 additions & 1 deletion examples/src/srgb_blend/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
use bytemuck::{Pod, Zeroable};
use std::mem::size_of;
use wgpu::util::DeviceExt;

#[repr(C)]
Expand Down
1 change: 0 additions & 1 deletion examples/src/stencil_triangles/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
use bytemuck::{Pod, Zeroable};
use std::mem::size_of;
use wgpu::util::DeviceExt;

#[repr(C)]
Expand Down
2 changes: 0 additions & 2 deletions examples/src/storage_texture/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,6 @@
//! A lot of things aren't explained here via comments. See hello-compute and
//! repeated-compute for code that is more thoroughly commented.

use std::mem::size_of_val;

#[cfg(not(target_arch = "wasm32"))]
use crate::utils::output_image_native;
#[cfg(target_arch = "wasm32")]
Expand Down
5 changes: 1 addition & 4 deletions examples/src/texture_arrays/mod.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
use bytemuck::{Pod, Zeroable};
use std::{
mem::size_of,
num::{NonZeroU32, NonZeroU64},
};
use std::num::{NonZeroU32, NonZeroU64};
use wgpu::util::DeviceExt;

#[repr(C)]
Expand Down
Loading
Loading