diff --git a/Cargo.lock b/Cargo.lock index 58ae414..d51f311 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,6 +17,18 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + [[package]] name = "aligned-vec" version = "0.6.4" @@ -79,6 +91,12 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +[[package]] +name = "atomic_refcell" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41e67cd8309bbd06cd603a9e693a784ac2e5d1e955f11286e355089fcab3047c" + [[package]] name = "atomicwrites" version = "0.4.2" @@ -95,6 +113,19 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "av-data" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fca67ba5d317924c02180c576157afd54babe48a76ebc66ce6d34bb8ba08308e" +dependencies = [ + "byte-slice-cast", + "bytes", + "num-derive", + "num-rational", + "num-traits", +] + [[package]] name = "av1-grain" version = "0.2.4" @@ -154,6 +185,15 @@ dependencies = [ "serde", ] +[[package]] +name = "bitreader" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "886559b1e163d56c765bc3a985febb4eee8009f625244511d8ee3c432e08c066" +dependencies = [ + "cfg-if", +] + [[package]] name = "bitstream-io" version = "2.6.0" @@ -194,6 +234,12 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64fa3c856b712db6612c019f14756e64e4bcea13337a6b33b696333a9eaa2d06" +[[package]] +name = "byte-slice-cast" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" + [[package]] name = "bytemuck" version = "1.23.2" @@ -214,6 +260,12 @@ dependencies = [ "syn", ] +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + [[package]] name = "byteorder-lite" version = "0.1.0" @@ -247,6 +299,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb9f6e1368bd4621d2c86baa7e37de77a938adf5221e5dd3d6133340101b309e" dependencies = [ "bitflags 2.9.4", + "nix 0.30.1", "polling", "rustix 1.0.8", "slab", @@ -279,9 +332,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.35" +version = "1.2.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "590f9024a68a8c40351881787f1934dc11afd69090f5edb6831464694d836ea3" +checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215" dependencies = [ "find-msvc-tools", "jobserver", @@ -296,7 +349,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d067ad48b8650848b989a59a86c6c36a995d02d2bf778d45c3c5d57bc2718f02" dependencies = [ "smallvec", - "target-lexicon", + "target-lexicon 0.12.16", +] + +[[package]] +name = "cfg-expr" +version = "0.20.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9acd0bdbbf4b2612d09f52ba61da432140cb10930354079d0d53fafc12968726" +dependencies = [ + "smallvec", + "target-lexicon 0.13.3", ] [[package]] @@ -305,6 +368,12 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "clipboard-win" version = "5.4.1" @@ -343,6 +412,15 @@ dependencies = [ "x11rb", ] +[[package]] +name = "cmake" +version = "0.1.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b042e5d8a74ae91bb0961acd039822472ec99f8ab0948cbf6d1369588f8be586" +dependencies = [ + "cc", +] + [[package]] name = "color-eyre" version = "0.6.5" @@ -392,20 +470,31 @@ dependencies = [ name = "cosmic-bg" version = "0.1.0" dependencies = [ + "calloop 0.14.3", "color-eyre", "colorgrad", "cosmic-bg-config", "cosmic-config", + "dirs", + "drm-fourcc", "eyre", "fast_image_resize", + "gstreamer", + "gstreamer-allocators", + "gstreamer-app", + "gstreamer-video", "image", "jxl-oxide", + "libavif-sys", + "nix 0.29.0", "notify", "rand 0.9.2", + "rayon", "smithay-client-toolkit 0.20.0", "tracing", "tracing-subscriber", "walkdir", + "wayland-protocols", ] [[package]] @@ -531,6 +620,28 @@ dependencies = [ "syn", ] +[[package]] +name = "dav1d" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80c3f80814db85397819d464bb553268992c393b4b3b5554b89c1655996d5926" +dependencies = [ + "av-data", + "bitflags 2.9.4", + "dav1d-sys", + "static_assertions", +] + +[[package]] +name = "dav1d-sys" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3c91aea6668645415331133ed6f8ddf0e7f40160cd97a12d59e68716a58704b" +dependencies = [ + "libc", + "system-deps 7.0.7", +] + [[package]] name = "derive_setters" version = "0.1.8" @@ -600,6 +711,12 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" +[[package]] +name = "drm-fourcc" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0aafbcdb8afc29c1a7ee5fbe53b5d62f4565b35a042a662ca9fecd0b54dae6f4" + [[package]] name = "either" version = "1.15.0" @@ -658,6 +775,15 @@ dependencies = [ "once_cell", ] +[[package]] +name = "fallible_collections" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a88c69768c0a15262df21899142bc6df9b9b823546d4b4b9a7bc2d6c448ec6fd" +dependencies = [ + "hashbrown 0.13.2", +] + [[package]] name = "fast-srgb8" version = "1.0.0" @@ -695,9 +821,9 @@ dependencies = [ [[package]] name = "find-msvc-tools" -version = "0.1.0" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e178e4fba8a2726903f6ba98a6d221e76f9c12c650d5dc0e6afdc50677b49650" +checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" [[package]] name = "flate2" @@ -852,17 +978,242 @@ version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +[[package]] +name = "gio-sys" +version = "0.21.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0071fe88dba8e40086c8ff9bbb62622999f49628344b1d1bf490a48a29d80f22" +dependencies = [ + "glib-sys", + "gobject-sys", + "libc", + "system-deps 7.0.7", + "windows-sys 0.60.2", +] + [[package]] name = "glam" version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "151665d9be52f9bb40fc7966565d39666f2d1e69233571b71b87791c7e0528b3" +[[package]] +name = "glib" +version = "0.21.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16de123c2e6c90ce3b573b7330de19be649080ec612033d397d72da265f1bd8b" +dependencies = [ + "bitflags 2.9.4", + "futures-channel", + "futures-core", + "futures-executor", + "futures-task", + "futures-util", + "gio-sys", + "glib-macros", + "glib-sys", + "gobject-sys", + "libc", + "memchr", + "smallvec", +] + +[[package]] +name = "glib-macros" +version = "0.21.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf59b675301228a696fe01c3073974643365080a76cc3ed5bc2cbc466ad87f17" +dependencies = [ + "heck", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "glib-sys" +version = "0.21.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d95e1a3a19ae464a7286e14af9a90683c64d70c02532d88d87ce95056af3e6c" +dependencies = [ + "libc", + "system-deps 7.0.7", +] + +[[package]] +name = "gobject-sys" +version = "0.21.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dca35da0d19a18f4575f3cb99fe1c9e029a2941af5662f326f738a21edaf294" +dependencies = [ + "glib-sys", + "libc", + "system-deps 7.0.7", +] + +[[package]] +name = "gstreamer" +version = "0.24.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bed73742c5d54cb48533be608b67d89f96e1ebbba280be7823f1ef995e3a9d7" +dependencies = [ + "cfg-if", + "futures-channel", + "futures-core", + "futures-util", + "glib", + "gstreamer-sys", + "itertools 0.14.0", + "kstring", + "libc", + "muldiv", + "num-integer", + "num-rational", + "option-operations", + "pastey", + "pin-project-lite", + "smallvec", + "thiserror 2.0.16", +] + +[[package]] +name = "gstreamer-allocators" +version = "0.24.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8c98c09098dab9ded704c97124c32905f1be3b86014d5feb0947e38dd0ccc17" +dependencies = [ + "glib", + "gstreamer", + "gstreamer-allocators-sys", + "libc", +] + +[[package]] +name = "gstreamer-allocators-sys" +version = "0.24.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f12e03cd5d689bffbfbf97c2a3a1db2bb022ca2d3fe73c9885dc12531a33ed7" +dependencies = [ + "glib-sys", + "gobject-sys", + "gstreamer-sys", + "libc", + "system-deps 7.0.7", +] + +[[package]] +name = "gstreamer-app" +version = "0.24.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "895753fb0f976693f321e6b9d68f746ef9095f1a5b8277c11d85d807a949fbfc" +dependencies = [ + "futures-core", + "futures-sink", + "glib", + "gstreamer", + "gstreamer-app-sys", + "gstreamer-base", + "libc", +] + +[[package]] +name = "gstreamer-app-sys" +version = "0.24.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7719cee28afda1a48ab1ee93769628bd0653d3c5be1923bce9a8a4550fcc980" +dependencies = [ + "glib-sys", + "gstreamer-base-sys", + "gstreamer-sys", + "libc", + "system-deps 7.0.7", +] + +[[package]] +name = "gstreamer-base" +version = "0.24.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dd15c7e37d306573766834a5cbdd8ee711265f217b060f40a9a8eda45298488" +dependencies = [ + "atomic_refcell", + "cfg-if", + "glib", + "gstreamer", + "gstreamer-base-sys", + "libc", +] + +[[package]] +name = "gstreamer-base-sys" +version = "0.24.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27a2eda2c61e13c11883bf19b290d07ea6b53d04fd8bfeb7af64b6006c6c9ee6" +dependencies = [ + "glib-sys", + "gobject-sys", + "gstreamer-sys", + "libc", + "system-deps 7.0.7", +] + +[[package]] +name = "gstreamer-sys" +version = "0.24.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d88630697e757c319e7bcec7b13919ba80492532dd3238481c1c4eee05d4904" +dependencies = [ + "cfg-if", + "glib-sys", + "gobject-sys", + "libc", + "system-deps 7.0.7", +] + +[[package]] +name = "gstreamer-video" +version = "0.24.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33987f6a6a99750a07b0341d6288bac89b9b301be4672a209935203d4608d547" +dependencies = [ + "cfg-if", + "futures-channel", + "glib", + "gstreamer", + "gstreamer-base", + "gstreamer-video-sys", + "libc", + "thiserror 2.0.16", +] + +[[package]] +name = "gstreamer-video-sys" +version = "0.24.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a00c28faad96cd40a7b7592433051199691b131b08f622ed5d51c54e049792d3" +dependencies = [ + "glib-sys", + "gobject-sys", + "gstreamer-base-sys", + "gstreamer-sys", + "libc", + "system-deps 7.0.7", +] + [[package]] name = "hashbrown" -version = "0.15.5" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +dependencies = [ + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" [[package]] name = "heck" @@ -925,7 +1276,9 @@ checksum = "db35664ce6b9810857a38a906215e75a9c879f0696556a39f59c62829710251a" dependencies = [ "bytemuck", "byteorder-lite", + "dav1d", "image-webp", + "mp4parse", "num-traits", "png", "ravif", @@ -958,12 +1311,12 @@ checksum = "964de6e86d545b246d84badc0fef527924ace5134f30641c203ef52ba83f58d5" [[package]] name = "indexmap" -version = "2.11.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9" +checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" dependencies = [ "equivalent", - "hashbrown", + "hashbrown 0.16.1", ] [[package]] @@ -1015,6 +1368,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + [[package]] name = "jobserver" version = "0.1.34" @@ -1244,18 +1606,47 @@ dependencies = [ "libc", ] +[[package]] +name = "kstring" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "558bf9508a558512042d3095138b1f7b8fe90c5467d94f9f1da28b3731c5dbd1" +dependencies = [ + "static_assertions", +] + [[package]] name = "lazy_static" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +[[package]] +name = "libavif-sys" +version = "0.17.0+libavif.1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "490ca74b61e773140bebb086c81facb23273a99364a9ab0c92ab1532b90ade35" +dependencies = [ + "cmake", + "libc", + "libdav1d-sys", +] + [[package]] name = "libc" version = "0.2.175" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" +[[package]] +name = "libdav1d-sys" +version = "0.7.1+libdav1d.1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d875e9669d116a603412a126de599b7bf47789a365b79fcf461fbf9c18d141f" +dependencies = [ + "libc", +] + [[package]] name = "libfuzzer-sys" version = "0.4.10" @@ -1306,11 +1697,10 @@ checksum = "f5e54036fe321fd421e10d732f155734c4e4afd610dd556d9a82833ab3ee0bed" [[package]] name = "lock_api" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" dependencies = [ - "autocfg", "scopeguard", ] @@ -1399,12 +1789,56 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "mp4parse" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63a35203d3c6ce92d5251c77520acb2e57108c88728695aa883f70023624c570" +dependencies = [ + "bitreader", + "byteorder", + "fallible_collections", + "log", + "num-traits", + "static_assertions", +] + +[[package]] +name = "muldiv" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "956787520e75e9bd233246045d19f42fb73242759cc57fba9611d940ae96d4b0" + [[package]] name = "new_debug_unreachable" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" +[[package]] +name = "nix" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +dependencies = [ + "bitflags 2.9.4", + "cfg-if", + "cfg_aliases", + "libc", +] + +[[package]] +name = "nix" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" +dependencies = [ + "bitflags 2.9.4", + "cfg-if", + "cfg_aliases", + "libc", +] + [[package]] name = "nom" version = "7.1.3" @@ -1554,6 +1988,15 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" +[[package]] +name = "option-operations" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aca39cf52b03268400c16eeb9b56382ea3c3353409309b63f5c8f0b1faf42754" +dependencies = [ + "pastey", +] + [[package]] name = "owo-colors" version = "4.2.2" @@ -1615,6 +2058,12 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" +[[package]] +name = "pastey" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57d6c094ee800037dff99e02cab0eaf3142826586742a270ab3d7a62656bd27a" + [[package]] name = "phf" version = "0.11.3" @@ -1711,6 +2160,15 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "proc-macro-crate" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" +dependencies = [ + "toml_edit 0.23.7", +] + [[package]] name = "proc-macro2" version = "1.0.101" @@ -1842,7 +2300,7 @@ dependencies = [ "built", "cfg-if", "interpolate_name", - "itertools", + "itertools 0.12.1", "libc", "libfuzzer-sys", "log", @@ -1857,7 +2315,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha 0.3.1", "simd_helpers", - "system-deps", + "system-deps 6.2.2", "thiserror 1.0.69", "v_frame", "wasm-bindgen", @@ -2010,18 +2468,28 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "serde" -version = "1.0.219" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", @@ -2037,6 +2505,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e24345aa0fe688594e73770a5f6d1b216508b4f93484c0026d521acd30134392" +dependencies = [ + "serde_core", +] + [[package]] name = "sharded-slab" version = "0.1.7" @@ -2160,6 +2637,12 @@ dependencies = [ "serde", ] +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + [[package]] name = "strsim" version = "0.11.1" @@ -2183,10 +2666,23 @@ version = "6.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e535eb8dded36d55ec13eddacd30dec501792ff23a0b1682c38601b8cf2349" dependencies = [ - "cfg-expr", + "cfg-expr 0.15.8", + "heck", + "pkg-config", + "toml 0.8.23", + "version-compare", +] + +[[package]] +name = "system-deps" +version = "7.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c8f33736f986f16d69b6cb8b03f55ddcad5c41acc4ccc39dd88e84aa805e7f" +dependencies = [ + "cfg-expr 0.20.4", "heck", "pkg-config", - "toml", + "toml 0.9.8", "version-compare", ] @@ -2196,6 +2692,12 @@ version = "0.12.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" +[[package]] +name = "target-lexicon" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df7f62577c25e07834649fc3b39fafdc597c0a3527dc1c60129201ccfcbaa50c" + [[package]] name = "tempfile" version = "3.21.0" @@ -2265,9 +2767,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", - "serde_spanned", - "toml_datetime", - "toml_edit", + "serde_spanned 0.6.9", + "toml_datetime 0.6.11", + "toml_edit 0.22.27", +] + +[[package]] +name = "toml" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8" +dependencies = [ + "indexmap", + "serde_core", + "serde_spanned 1.0.3", + "toml_datetime 0.7.3", + "toml_parser", + "toml_writer", + "winnow", ] [[package]] @@ -2279,6 +2796,15 @@ dependencies = [ "serde", ] +[[package]] +name = "toml_datetime" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +dependencies = [ + "serde_core", +] + [[package]] name = "toml_edit" version = "0.22.27" @@ -2287,11 +2813,38 @@ checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ "indexmap", "serde", - "serde_spanned", - "toml_datetime", + "serde_spanned 0.6.9", + "toml_datetime 0.6.11", + "winnow", +] + +[[package]] +name = "toml_edit" +version = "0.23.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" +dependencies = [ + "indexmap", + "toml_datetime 0.7.3", + "toml_parser", "winnow", ] +[[package]] +name = "toml_parser" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +dependencies = [ + "winnow", +] + +[[package]] +name = "toml_writer" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2" + [[package]] name = "tracing" version = "0.1.41" @@ -2389,6 +2942,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "852e951cb7832cb45cb1169900d19760cfa39b82bc0ea9c0e5a14ae88411c98b" +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + [[package]] name = "walkdir" version = "2.5.0" diff --git a/Cargo.toml b/Cargo.toml index f8faf36..f8b80df 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,20 +4,42 @@ version = "0.1.0" edition = "2024" rust-version = "1.85" +[features] +default = ["animated"] +animated = ["dep:gstreamer", "dep:gstreamer-allocators", "dep:gstreamer-app", "dep:gstreamer-video", "dep:libavif-sys"] + [dependencies] +# Calloop with signal handling for graceful shutdown +calloop = { version = "0.14", features = ["signals"] } color-eyre = "0.6.5" colorgrad = { workspace = true } cosmic-bg-config = { path = "./config" } +# Directory utilities for cache paths +dirs = "6.0" +drm-fourcc = "2.2" eyre = "0.6.12" +# libavif-sys for animated AVIF (AVIS) CPU decoding +libavif-sys = { version = "0.17", optional = true, default-features = false, features = ["codec-dav1d"] } +# For DMA-BUF file descriptor duplication +nix = { version = "0.29", features = ["fs"] } fast_image_resize = { version = "5.1.4", features = ["image"] } -image = { workspace = true, features = ["hdr", "jpeg", "png", "rayon", "webp"] } +# GStreamer for hardware-accelerated video decoding (VAAPI, NVDEC, etc.) +gstreamer = { version = "0.24", optional = true } +gstreamer-allocators = { version = "0.24", optional = true } +gstreamer-app = { version = "0.24", optional = true, features = ["v1_24"] } +gstreamer-video = { version = "0.24", optional = true } +# avif-native required for decoding AVIF (avif is for encoding only) +image = { workspace = true, features = ["avif-native", "hdr", "jpeg", "png", "rayon", "webp"] } jxl-oxide = { version = "0.12.4", features = ["image"] } notify = "8.2.0" rand = "0.9.2" +rayon = "1.11" sctk = { package = "smithay-client-toolkit", version = "0.20.0" } tracing = { workspace = true } tracing-subscriber = "0.3.20" walkdir = "2.5" +# Wayland protocols for DMA-BUF support +wayland-protocols = { version = "0.32", features = ["client"] } [workspace] members = ["config"] @@ -33,3 +55,4 @@ features = ["calloop"] [profile.release] opt-level = 3 +lto = "thin" diff --git a/README.md b/README.md index 04d76f8..a125e04 100644 --- a/README.md +++ b/README.md @@ -2,23 +2,181 @@ COSMIC session service which applies backgrounds to displays. Supports the following features: -- Supports common image formats supported by [image-rs](https://github.com/image-rs/image#supported-image-formats) +- Supports common image formats: JPEG, PNG, WebP, AVIF, JPEG XL, and more via [image-rs](https://github.com/image-rs/image#supported-image-formats) +- **Live/Animated wallpapers** - Animated AVIF, and video formats (MP4, WebM, MKV, etc.) with hardware acceleration - 8 and 10-bit background surface layers - Use of colors and gradients for backgrounds - Per-display background application - Wallpaper slideshows that alternate between backgrounds periodically +## Live Wallpaper Support + +The `animated` feature (enabled by default) adds support for animated wallpapers using GStreamer for hardware-accelerated video playback. + +### Supported Formats + +| Format | Extension | Decode Method | +|--------|-----------|---------------| +| Animated AVIF | `.avif` | CPU via libavif (frames cached in memory) | +| MPEG-4 | `.mp4`, `.m4v` | NVIDIA (all codecs), AMD/Intel (H.264/H.265 with freeworld drivers, VP9, AV1) | +| WebM | `.webm` | Full (VP8, VP9, AV1) - **Recommended for AMD without freeworld drivers** | +| Matroska | `.mkv` | Depends on contained codec | +| AVI | `.avi` | Depends on contained codec | +| QuickTime | `.mov` | Depends on contained codec | + +### Hardware Requirements + +#### NVIDIA GPUs +- **Driver**: NVIDIA proprietary driver 470+ +- **GStreamer plugins**: `gstreamer1-plugins-bad` (provides `nvh264dec`, `nvh265dec`, etc.) +- **Supported codecs**: H.264, H.265/HEVC, VP9, AV1 +- **Optional**: [gst-cuda-dmabuf](https://github.com/Ericky14/gst-cuda-dmabuf) plugin for zero-copy DMA-BUF rendering + +#### AMD/Intel GPUs (VAAPI) +- **Driver**: Mesa 21.0+ with VAAPI support +- **GStreamer plugins**: `gstreamer1-plugins-bad` (GStreamer 1.20+ provides `vah264dec`, `vapostproc`, etc.) +- **Zero-copy**: Native DMA-BUF support via `vapostproc` for efficient compositor rendering +- **Supported codecs** (varies by GPU generation): + - AMD RDNA/RDNA2+: VP9, AV1 (H.264/H.265 requires `mesa-va-drivers-freeworld` on Fedora) + - Intel Gen9+: H.264, H.265, VP9, AV1 +- **Recommendation**: Use **VP9 or AV1** encoded videos for best AMD compatibility, or install freeworld drivers for H.264/H.265 + +#### Software Fallback +If no hardware decoder is available, the system falls back to software decoding via GStreamer's `decodebin`. For H.264 content on systems without hardware decode (e.g., AMD on Fedora), install `openh264` for software decode support. + +### Codec Detection + +At startup, `cosmic-bg` automatically detects available hardware decoders and selects the best pipeline: + +1. **Probes GStreamer registry** for NVIDIA (NVDEC) and AMD/Intel (VAAPI) decoders +2. **Tests decoder functionality** - demotes non-functional decoders (e.g., NVDEC when CUDA unavailable) +3. **Selects optimal pipeline** based on container format and available decoders +4. **Falls back gracefully** to software decode if no hardware path available + +This ensures videos play correctly regardless of GPU vendor or codec availability. + +### Performance + +| Scenario | CPU Usage | Notes | +|----------|-----------|-------| +| VP9 1080p on AMD (VAAPI) | ~0.2-0.5% | Hardware decode | +| H.264 1080p on AMD (VAAPI + freeworld) | ~0.2-0.5% | Hardware decode with mesa-va-drivers-freeworld | +| H.264 1080p on NVIDIA (NVDEC) | ~0.3-0.5% | Hardware decode | +| H.264 4K on AMD (software) | ~60-80% | Software fallback (no freeworld drivers) | +| Animated AVIF | ~1-5% | Depends on frame count/size | + +### Configuration + +Set an animated wallpaper via cosmic-config: + +```ron +( + output: "all", + source: Path("/path/to/video.webm"), + filter_by_theme: false, + rotation_frequency: 3600, + filter_method: Lanczos, + scaling_mode: Zoom, + sampling_method: Alphanumeric, + animation_settings: ( + loop_playback: true, + playback_speed: 1.0, + frame_cache_size: 30, + ), +) +``` + +### Building Without Animation Support + +To build without video/animation support (smaller binary, no GStreamer dependency): + +```bash +cargo build --release --no-default-features +``` ## Dependencies Developers should install Rust from from https://rustup.rs/. +### Build Dependencies + - just - cargo / rustc - libwayland-dev - libxkbcommon-dev - mold - pkg-config +- **libdav1d-devel** - Required for static AVIF image decoding +- **nasm** - Required for building the dav1d AV1 decoder (used for animated AVIF) + +```bash +# Fedora +sudo dnf install libdav1d-devel nasm + +# Ubuntu/Debian +sudo apt install libdav1d-dev nasm + +# Arch +sudo pacman -S dav1d nasm +``` + +### For Live Wallpaper Support (animated feature) + +GStreamer 1.20+ with the following plugins: + +**Core (required)**: +- `gstreamer1` - Core GStreamer +- `gstreamer1-plugins-base` - Base plugins including `videoconvert` +- `gstreamer1-plugins-good` - Container demuxers (MP4, WebM, MKV) + +**Hardware Acceleration (recommended)**: +- `gstreamer1-plugins-bad` - NVIDIA NVDEC (`nvh264dec`, etc.) and AMD/Intel VA-API (`vah264dec`, `vapostproc`, etc.) + +> **Note**: GStreamer 1.20+ includes the modern `va` plugin in `gstreamer1-plugins-bad`. The legacy `gstreamer1-vaapi` package is no longer required on modern systems. + +**Example installation**: + +```bash +# Fedora +sudo dnf install gstreamer1 gstreamer1-plugins-base gstreamer1-plugins-good \ + gstreamer1-plugins-bad-free gstreamer1-vaapi + +# Ubuntu/Debian +sudo apt install gstreamer1.0-plugins-base gstreamer1.0-plugins-good \ + gstreamer1.0-plugins-bad gstreamer1.0-vaapi + +# Arch +sudo pacman -S gstreamer gst-plugins-base gst-plugins-good \ + gst-plugins-bad gstreamer-vaapi +``` + +**H.264/H.265 Hardware Decode for AMD (Fedora)**: + +AMD GPUs on Fedora lack VAAPI H.264/H.265 hardware decode by default due to patent restrictions in the standard Mesa VA drivers. To enable **hardware-accelerated** H.264/H.265 decoding, install the freeworld Mesa VA drivers from RPM Fusion: + +```bash +# Requires RPM Fusion free repository to be enabled +# https://rpmfusion.org/Configuration + +# Replace standard Mesa VA drivers with freeworld version (includes H.264/H.265) +sudo dnf swap mesa-va-drivers mesa-va-drivers-freeworld +``` + +Verify hardware decode is working: +```bash +vainfo | grep -i h264 +# Should show: VAProfileH264Main : VAEntrypointVLD +``` + +**Software fallback (if hardware decode unavailable)**: + +If you cannot install the freeworld drivers, you can use the OpenH264 software decoder (high CPU usage for HD/4K content): + +```bash +sudo dnf --enable-repo=fedora-cisco-openh264 install gstreamer1-plugin-openh264 +``` + +**Recommendation**: Use VP9/WebM or AV1 encoded videos which have full VAAPI hardware support on AMD without needing freeworld drivers. ### Install diff --git a/config/src/lib.rs b/config/src/lib.rs index 9499d23..9a5dbff 100644 --- a/config/src/lib.rs +++ b/config/src/lib.rs @@ -95,6 +95,9 @@ pub struct Entry { pub scaling_mode: ScalingMode, #[serde(default)] pub sampling_method: SamplingMethod, + /// Animation playback settings for animated wallpapers (AVIF, video) + #[serde(default)] + pub animation_settings: AnimationSettings, } /// A background image which is colored. @@ -111,15 +114,99 @@ pub struct Gradient { pub radius: f32, } -/// The source of a background image. +/// The source of a background image or animation. +/// +/// # Variants +/// +/// - `Path`: A static image or animated file (AVIF, video). The file extension +/// determines how it's rendered. Supported: jpg, png, webp, avif, jxl, mp4, webm, mkv. +/// - `Color`: A solid color or gradient background. +/// +/// # Example (RON format) +/// +/// ```ron +/// // Static image +/// source: Path("/usr/share/backgrounds/cosmic/default.jpg") +/// +/// // Animated wallpaper (video) +/// source: Path("/home/user/wallpapers/nature.webm") +/// +/// // Solid color (RGB values 0.0-1.0) +/// source: Color(Single([0.1, 0.1, 0.2])) +/// ``` #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] pub enum Source { /// Background image(s) from a path. + /// + /// If the path points to a directory, images are rotated based on + /// `rotation_frequency`. If it points to an animated file (AVIF, video), + /// it will be rendered as an animated wallpaper. Path(PathBuf), /// A background color or gradient. Color(Color), } +/// Settings for animated wallpaper playback (AVIF and video files). +/// +/// These settings control how animated wallpapers are rendered. The defaults +/// are optimized for smooth playback with minimal resource usage. +/// +/// # Example Configuration (RON format) +/// +/// ```ron +/// animation_settings: ( +/// loop_playback: true, +/// playback_speed: 1.0, +/// frame_cache_size: 30, +/// ) +/// ``` +/// +/// # Hardware Acceleration Notes +/// +/// For best performance with video wallpapers: +/// - **NVIDIA**: Any codec works well (H.264, H.265, VP9, AV1) +/// - **AMD (Mesa)**: Use VP9 or AV1 encoded videos for hardware decode +/// - **Intel**: Most codecs supported via VAAPI +/// +/// TODO: These settings are not yet implemented. Videos always loop unconditionally. +/// Future work: wire these settings into the video player. +#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Default)] +pub struct AnimationSettings { + /// Whether to loop the animation when it reaches the end. + /// + /// Default: `true` (continuous looping) + #[serde(default = "default_true")] + pub loop_playback: bool, + + /// Playback speed multiplier. + /// + /// - `1.0` = normal speed + /// - `0.5` = half speed (slower) + /// - `2.0` = double speed (faster) + /// + /// Default: `1.0` + /// + /// Note: This setting is currently reserved for future use. + #[serde(default = "default_playback_speed")] + pub playback_speed: f32, +} + +fn default_true() -> bool { + true +} + +fn default_playback_speed() -> f32 { + 1.0 +} + +impl AnimationSettings { + /// Create default animation settings. + #[must_use] + pub fn new() -> Self { + Self::default() + } +} + impl Entry { /// Define a preferred background for a given output device. pub fn new(output: String, source: Source) -> Self { @@ -131,6 +218,7 @@ impl Entry { filter_method: FilterMethod::default(), scaling_mode: ScalingMode::default(), sampling_method: SamplingMethod::default(), + animation_settings: AnimationSettings::default(), } } @@ -146,6 +234,7 @@ impl Entry { filter_method: FilterMethod::default(), scaling_mode: ScalingMode::default(), sampling_method: SamplingMethod::default(), + animation_settings: AnimationSettings::default(), } } } diff --git a/debian/control b/debian/control index 739e3af..ffe730e 100644 --- a/debian/control +++ b/debian/control @@ -12,6 +12,13 @@ Build-Depends: mold, nasm, pkg-config, + cmake, + meson, + ninja-build, + clang, + libgstreamer1.0-dev, + libgstreamer-plugins-base1.0-dev, + libdav1d-dev, Standards-Version: 4.3.0 Homepage: https://github.com/pop-os/cosmic-bg @@ -19,5 +26,14 @@ Package: cosmic-bg Architecture: amd64 arm64 Depends: ${misc:Depends}, - ${shlibs:Depends} + ${shlibs:Depends}, + gstreamer1.0-plugins-base, + gstreamer1.0-plugins-good, + gstreamer1.0-plugins-bad, +Recommends: + gstreamer1.0-plugins-ugly, + gstreamer1.0-libav, + gstreamer1.0-vaapi, +Suggests: + gst-cuda-dmabuf, Description: Cosmic Background diff --git a/debian/cosmic-bg.postinst b/debian/cosmic-bg.postinst new file mode 100755 index 0000000..c18c664 --- /dev/null +++ b/debian/cosmic-bg.postinst @@ -0,0 +1,18 @@ +#!/bin/sh +set -e + +case "$1" in + configure) + # Add gst-cuda-dmabuf repository if not already present + REPO_FILE="/etc/apt/sources.list.d/gst-cuda-dmabuf.list" + if [ ! -f "$REPO_FILE" ]; then + echo "deb [trusted=yes] https://ericky14.github.io/gst-cuda-dmabuf stable main" > "$REPO_FILE" + # Update apt cache for the new repo (best effort, don't fail install) + apt-get update -o Dir::Etc::sourcelist="$REPO_FILE" -o Dir::Etc::sourceparts="-" -o APT::Get::List-Cleanup="0" 2>/dev/null || true + fi + ;; +esac + +#DEBHELPER# + +exit 0 diff --git a/src/animated/detection.rs b/src/animated/detection.rs new file mode 100644 index 0000000..d93a3d1 --- /dev/null +++ b/src/animated/detection.rs @@ -0,0 +1,347 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! File type detection utilities for animated wallpapers. +//! +//! This module detects supported video formats based on: +//! 1. File extension (quick filter for potentially supported files) +//! 2. Available GStreamer decoders on the current system + +use std::path::Path; +use std::sync::OnceLock; + +use gstreamer::prelude::*; +use tracing::{debug, info}; + +/// Video container extensions that may contain playable video. +/// These are checked case-insensitively when determining if a file can be +/// rendered as an animated wallpaper. +/// Note: AVIF is handled specially - only animated AVIF (AVIS) is treated as video +const VIDEO_EXTENSIONS: &[&str] = &[ + "mp4", // MPEG-4 container (typically H.264/H.265 codec) + "webm", // WebM container (VP8/VP9/AV1) + "mkv", // Matroska container (any codec) + "avi", // AVI container (legacy format) + "mov", // QuickTime container (typically H.264) + "m4v", // MPEG-4 Video (Apple variant of MP4) + "ogv", // Ogg Video container (Theora codec) +]; + +/// Cached system codec capabilities. +static CODEC_SUPPORT: OnceLock = OnceLock::new(); + +/// System codec capabilities detected at runtime. +#[derive(Debug, Clone, Default)] +pub struct CodecSupport { + /// NVIDIA hardware decode available (NVDEC) + pub has_nvidia: bool, + /// AMD/Intel VAAPI decode available + pub has_vaapi: bool, + /// List of available hardware decoder element names + pub hw_decoders: Vec, +} + +/// Detect available codec support on the current system. +/// +/// This probes GStreamer for available hardware decoders and caches the result. +/// The detection is performed once on first call. +pub fn get_codec_support() -> &'static CodecSupport { + CODEC_SUPPORT.get_or_init(detect_codec_support) +} + +/// Perform the actual codec detection. +fn detect_codec_support() -> CodecSupport { + // Try to initialize GStreamer + if gstreamer::init().is_err() { + return CodecSupport::default(); + } + + let mut support = CodecSupport::default(); + + // Check for NVIDIA decoders + let nvidia_decoders = [ + "nvh264dec", + "nvh265dec", + "nvvp9dec", + "nvav1dec", + "nvmpegvideodec", + "nvmpeg4videodec", + ]; + + for decoder in nvidia_decoders { + if gstreamer::ElementFactory::find(decoder).is_some() { + support.has_nvidia = true; + support.hw_decoders.push(decoder.to_string()); + } + } + + // Check for VAAPI decoders (AMD/Intel) + let vaapi_decoders = [ + "vaapih264dec", + "vaapih265dec", + "vaapivp8dec", + "vaapivp9dec", + "vaapiav1dec", + "vaapimpeg2dec", + // New VA-API plugin element names (GStreamer 1.22+) + "vah264dec", + "vah265dec", + "vavp8dec", + "vavp9dec", + "vaav1dec", + ]; + + for decoder in vaapi_decoders { + if gstreamer::ElementFactory::find(decoder).is_some() { + support.has_vaapi = true; + support.hw_decoders.push(decoder.to_string()); + } + } + + // Check for VAAPI post-processor (required for DMA-BUF zero-copy output) + // New GStreamer 1.22+ uses "vapostproc", older uses "vaapipostproc" + // Note: DMA-BUF from vapostproc only works on GStreamer 1.26+ (not 1.24) + // On older versions, the GL download pipeline is used instead (glupload → glcolorconvert → gldownload) + if gstreamer::ElementFactory::find("vapostproc").is_some() { + support.hw_decoders.push("vapostproc".to_string()); + } else if gstreamer::ElementFactory::find("vaapipostproc").is_some() { + support.hw_decoders.push("vaapipostproc".to_string()); + } + + // Check for CUDA DMA-BUF upload (NVIDIA zero-copy) + if gstreamer::ElementFactory::find("cudadmabufupload").is_some() { + support.hw_decoders.push("cudadmabufupload".to_string()); + } + + info!( + has_nvidia = support.has_nvidia, + has_vaapi = support.has_vaapi, + decoders = ?support.hw_decoders, + "Detected video codec support" + ); + + support +} + +/// Check if a path points to an animated/video file. +/// +/// This checks both: +/// 1. The file extension is a known video/AVIF format +/// 2. The system has capability to decode it +/// +/// Supported formats: +/// - Animated AVIF (AVIS): CPU decoded via libavif +/// - Video files: Hardware-accelerated via GStreamer +#[must_use] +pub fn is_animated_file(path: &Path) -> bool { + let Some(ext) = path.extension().and_then(|e| e.to_str()) else { + return false; + }; + + let ext_lower = ext.to_lowercase(); + + // AVIF is treated as animated if it's an AVIS (AVIF Image Sequence) + // Animated AVIF is CPU-decoded using libavif + if ext_lower == "avif" { + if is_animated_avif(path) { + debug!(path = %path.display(), "Animated AVIF detected - will use CPU decoding"); + return true; + } + return false; + } + + // Video files - check extension first (quick filter) + if !VIDEO_EXTENSIONS.contains(&ext_lower.as_str()) { + return false; + } + + // Video is supported as long as we have decodebin (software decode) + // GStreamer's decodebin can handle any format it has plugins for + true +} + +/// Check if a path points to a video file (non-AVIF animated). +#[must_use] +pub fn is_video_file(path: &Path) -> bool { + let Some(ext) = path.extension().and_then(|e| e.to_str()) else { + return false; + }; + + let ext_lower = ext.to_lowercase(); + + // AVIF is handled separately as its own source type + if ext_lower == "avif" { + return false; + } + + VIDEO_EXTENSIONS.contains(&ext_lower.as_str()) +} + +/// Check if an AVIF file is animated (AVIF Image Sequence). +/// +/// AVIF files use the ISO Base Media File Format (ISOBMFF). The file starts +/// with an 'ftyp' box containing a major brand: +/// - `avif` = static AVIF image +/// - `avis` = AVIF Image Sequence (animated) +/// +/// This function reads the file header to detect animated AVIF files. +#[must_use] +pub fn is_animated_avif(path: &Path) -> bool { + use std::fs::File; + use std::io::Read; + + let Ok(mut file) = File::open(path) else { + return false; + }; + + // ISOBMFF structure: [4 bytes size][4 bytes type][4 bytes major_brand]... + // We need to read the ftyp box and check the major brand + let mut header = [0u8; 12]; + if file.read_exact(&mut header).is_err() { + return false; + } + + // Check if this is an ftyp box + let box_type = &header[4..8]; + if box_type != b"ftyp" { + return false; + } + + // Check the major brand (bytes 8-12) + let major_brand = &header[8..12]; + + // 'avis' indicates AVIF Image Sequence (animated) + if major_brand == b"avis" { + debug!(path = %path.display(), "Detected animated AVIF (AVIS)"); + return true; + } + + // Also check compatible_brands for 'avis' in case major_brand is different + // Read more of the ftyp box to check compatible brands + let box_size = u32::from_be_bytes([header[0], header[1], header[2], header[3]]) as usize; + if box_size > 12 && box_size <= 256 { + let remaining = box_size - 12; + let mut brands_data = vec![0u8; remaining]; + if file.read_exact(&mut brands_data).is_ok() { + // Skip minor_version (4 bytes), then check compatible_brands + if remaining >= 4 { + let compatible_brands = &brands_data[4..]; + // Each brand is 4 bytes + for chunk in compatible_brands.chunks_exact(4) { + if chunk == b"avis" { + debug!(path = %path.display(), "Detected animated AVIF (AVIS in compatible_brands)"); + return true; + } + } + } + } + } + + false +} + +/// Demote NVIDIA decoders if CUDA is not actually functional. +/// +/// On systems where NVIDIA GStreamer plugins are installed but CUDA +/// is not available (e.g., AMD-only systems, or NVIDIA as secondary GPU), +/// the nvh264dec/etc elements will be registered but fail to instantiate. +/// This causes decodebin to select them over working software decoders. +/// +/// This function tests if NVIDIA decoders can actually be instantiated +/// and demotes them to NONE rank if not, allowing decodebin to pick +/// working alternatives like openh264dec. +/// +/// This should be called once after gstreamer::init(). +pub fn demote_broken_nvidia_decoders() { + use gstreamer::Rank; + use tracing::warn; + + static DEMOTED: std::sync::Once = std::sync::Once::new(); + + DEMOTED.call_once(|| { + // List of NVIDIA decoders to test + let nvidia_decoders = [ + "nvh264dec", + "nvh265dec", + "nvvp9dec", + "nvav1dec", + "nvmpegvideodec", + "nvmpeg4videodec", + ]; + + for decoder_name in nvidia_decoders { + if let Some(factory) = gstreamer::ElementFactory::find(decoder_name) { + // Try to create an instance - this will fail if CUDA isn't available + match factory.create().build() { + Ok(element) => { + // It worked, decoder is functional + debug!(decoder = decoder_name, "NVIDIA decoder is functional"); + drop(element); + } + Err(_) => { + // Failed to instantiate - demote to prevent decodebin from selecting it + warn!( + decoder = decoder_name, + "NVIDIA decoder failed to instantiate (CUDA unavailable?), demoting" + ); + // Set rank to NONE so decodebin won't auto-select it + factory.set_rank(Rank::NONE); + } + } + } + } + }); +} + +/// Test if a specific video file can be played on this system. +/// +/// This attempts to create a minimal GStreamer pipeline to verify +/// the file's codec is decodable. Returns `true` if playable. +/// +/// This is more expensive than `is_video_file` and should only be +/// used when you need to verify a specific file can be played. +#[must_use] +#[allow(dead_code)] +pub fn can_play_video(path: &Path) -> bool { + if !is_video_file(path) { + return false; + } + + // Try to create a test pipeline with decodebin + if gstreamer::init().is_err() { + return false; + } + + let path_str = match path.to_str() { + Some(s) => s.replace('\\', "\\\\").replace('"', "\\\""), + None => return false, + }; + + // Use decodebin which auto-selects the best available decoder + let pipeline_str = format!("filesrc location=\"{path_str}\" ! decodebin ! fakesink"); + + match gstreamer::parse::launch(&pipeline_str) { + Ok(element) => { + // Try to set to PAUSED to verify it can decode + let result = element.set_state(gstreamer::State::Paused); + if result.is_err() { + let _ = element.set_state(gstreamer::State::Null); + debug!(path = %path.display(), "Video file cannot be decoded (set_state failed)"); + return false; + } + + // Wait for state change with timeout + let (res, state, _) = element.state(gstreamer::ClockTime::from_mseconds(2000)); + let _ = element.set_state(gstreamer::State::Null); + + let can_play = res.is_ok() && state == gstreamer::State::Paused; + if !can_play { + debug!(path = %path.display(), "Video file cannot be decoded (state check failed)"); + } + can_play + } + Err(e) => { + debug!(path = %path.display(), error = %e, "Failed to create test pipeline"); + false + } + } +} diff --git a/src/animated/mod.rs b/src/animated/mod.rs new file mode 100644 index 0000000..bb487b1 --- /dev/null +++ b/src/animated/mod.rs @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! Animated wallpaper support using GStreamer for hardware-accelerated video playback. +//! +//! This module provides smooth video and animated AVIF wallpaper playback by leveraging: +//! - **Hardware video decoding** via GStreamer's `decodebin` with automatic codec selection +//! - **Multi-vendor GPU support**: NVIDIA (NVDEC), AMD/Intel (VAAPI), ARM (V4L2) +//! - **Zero-copy DMA-BUF rendering** for maximum performance (no GPU→CPU→GPU roundtrip) +//! - **60fps playback** via `videorate` capped at display refresh rate +//! - **Efficient memory handling** with direct DMA-BUF to compositor +//! +//! # Module Structure +//! +//! - [`types`]: Core types (AnimatedFrame, RawVideoFrame, VideoFrameInfo, AnimatedSource) +//! - [`detection`]: File type detection utilities +//! - [`video_player`]: GStreamer-based hardware-accelerated video player +//! - [`player`]: Unified animated player supporting animated AVIF and video +//! +//! # Supported Formats +//! +//! | Format | Extension | Hardware Decode Support | +//! |--------|-----------|------------------------| +//! | AVIF | `.avif` | AV1 hardware decode (VAAPI, NVDEC) | +//! | MPEG-4 | `.mp4`, `.m4v` | NVIDIA (all codecs), AMD/Intel (VP9, AV1) | +//! | WebM | `.webm` | Full (VP8, VP9, AV1) | +//! | Matroska | `.mkv` | Depends on contained codec | +//! | AVI | `.avi` | Depends on contained codec | +//! | QuickTime | `.mov` | Depends on contained codec | +//! +//! # Pipeline Priority (Highest to Lowest) +//! +//! 1. **NVIDIA CUDA→DMA-BUF** (`cudadmabufupload`): Optimal zero-copy, no GL context +//! 2. **VAAPI DMA-BUF** (AMD/Intel): Native DMA-BUF export +//! 3. **NVIDIA GL DMA-BUF**: NVDEC → GL → gldownload DMA-BUF +//! 4. **VAAPI wl_shm**: Fallback with CPU buffer copy +//! 5. **NVIDIA GL wl_shm**: Fallback with CPU buffer copy +//! 6. **Software decode**: CPU decode + CPU convert + +mod detection; +mod player; +mod types; +#[cfg(feature = "animated")] +mod video_player; + +// Re-export public API +pub use detection::{demote_broken_nvidia_decoders, get_codec_support, is_animated_file}; +pub use player::AnimatedPlayer; +pub use types::AnimatedSource; + +#[cfg(feature = "animated")] +#[allow(unused_imports)] +pub(crate) use video_player::VideoPlayer; + +#[cfg(test)] +mod tests; diff --git a/src/animated/player.rs b/src/animated/player.rs new file mode 100644 index 0000000..89c565f --- /dev/null +++ b/src/animated/player.rs @@ -0,0 +1,346 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! Unified animated wallpaper player. +//! +//! This module provides [`AnimatedPlayer`], which handles animated AVIF and video +//! animated wallpapers through a unified interface. It automatically selects +//! the appropriate backend: +//! - AVIF files: CPU-decoded frames cached in memory +//! - Video files: GStreamer-based hardware-accelerated playback + +use std::{ + path::{Path, PathBuf}, + time::Duration, +}; + +use image::DynamicImage; +use tracing::{debug, info}; + +use super::types::{ + AnimatedFrame, AnimatedSource, DEFAULT_FRAME_DURATION, MIN_FRAME_DURATION, VideoFrameInfo, +}; +use super::video_player::VideoPlayer; + +/// Internal source holding actual player/frame data. +enum PlayerSource { + /// AVIF frames loaded into memory. + Avif(Vec), + /// Video player instance. + Video(VideoPlayer), +} + +/// Unified player for animated AVIF and video animated wallpapers. +/// +/// Automatically dispatches to the appropriate backend based on file type. +/// For AVIFs, frames are decoded into memory and cycled. +/// For videos, GStreamer handles decoding with hardware acceleration. +pub struct AnimatedPlayer { + /// The animation source. + source: PlayerSource, + /// Source file path. + source_path: PathBuf, + /// Current frame index (for AVIF). + current_index: usize, +} + +impl AnimatedPlayer { + /// Create a new animated player from an AnimatedSource. + /// + /// Automatically detects whether the file is an AVIF or video and initializes + /// the appropriate backend. + pub fn new( + source: AnimatedSource, + target_width: u32, + target_height: u32, + ) -> eyre::Result { + let path = source.path(); + info!(path = %path.display(), width = target_width, height = target_height, "Loading animated wallpaper"); + + let player_source = match &source { + AnimatedSource::Avif(p) => { + debug!(path = %p.display(), "Loading as animated AVIF"); + let frames = Self::load_avif_frames(p)?; + info!(frames = frames.len(), "Loaded AVIF frames"); + PlayerSource::Avif(frames) + } + AnimatedSource::Video(p) => { + debug!(path = %p.display(), "Loading as video"); + let player = VideoPlayer::new(p, target_width, target_height)?; + // Start playback immediately + player.play()?; + PlayerSource::Video(player) + } + }; + + Ok(Self { + source: player_source, + source_path: path.to_path_buf(), + current_index: 0, + }) + } + + /// Load animated AVIF (AVIS) frames into memory using libavif. + fn load_avif_frames(path: &Path) -> eyre::Result> { + use std::ffi::CString; + + use libavif_sys::*; + + let path_str = path + .to_str() + .ok_or_else(|| eyre::eyre!("Invalid path encoding"))?; + let c_path = CString::new(path_str)?; + + let mut frames = Vec::new(); + + unsafe { + // Create decoder + let decoder = avifDecoderCreate(); + if decoder.is_null() { + return Err(eyre::eyre!("Failed to create AVIF decoder")); + } + + // Ensure we clean up on exit + struct DecoderGuard(*mut avifDecoder); + impl Drop for DecoderGuard { + fn drop(&mut self) { + unsafe { avifDecoderDestroy(self.0) }; + } + } + let _guard = DecoderGuard(decoder); + + // Set IO from file + let result = avifDecoderSetIOFile(decoder, c_path.as_ptr()); + if result != AVIF_RESULT_OK { + return Err(eyre::eyre!("Failed to set AVIF IO: {}", result)); + } + + // Parse the file + let result = avifDecoderParse(decoder); + if result != AVIF_RESULT_OK { + return Err(eyre::eyre!("Failed to parse AVIF: {}", result)); + } + + let image_count = (*decoder).imageCount; + tracing::debug!(image_count, "AVIF has frames"); + + // Decode each frame + for frame_idx in 0..image_count { + let result = avifDecoderNextImage(decoder); + if result != AVIF_RESULT_OK { + if result == AVIF_RESULT_NO_IMAGES_REMAINING { + break; + } + return Err(eyre::eyre!( + "Failed to decode AVIF frame {}: {}", + frame_idx, + result + )); + } + + let avif_image = (*decoder).image; + if avif_image.is_null() { + return Err(eyre::eyre!("Null image pointer for frame {}", frame_idx)); + } + + let width = (*avif_image).width; + let height = (*avif_image).height; + + // Create RGB image for conversion + let mut rgb: avifRGBImage = std::mem::zeroed(); + avifRGBImageSetDefaults(&mut rgb, avif_image); + rgb.format = AVIF_RGB_FORMAT_RGBA; + rgb.depth = 8; + + // Allocate pixel buffer + avifRGBImageAllocatePixels(&mut rgb); + + struct RgbGuard(*mut avifRGBImage); + impl Drop for RgbGuard { + fn drop(&mut self) { + unsafe { avifRGBImageFreePixels(self.0) }; + } + } + let _rgb_guard = RgbGuard(&mut rgb); + + // Convert YUV to RGB + let result = avifImageYUVToRGB(avif_image, &mut rgb); + if result != AVIF_RESULT_OK { + return Err(eyre::eyre!( + "Failed to convert AVIF frame {} to RGB: {}", + frame_idx, + result + )); + } + + // Copy pixel data + let pixel_count = (width * height * 4) as usize; + let pixels = std::slice::from_raw_parts(rgb.pixels, pixel_count); + let rgba_data: Vec = pixels.to_vec(); + + // Create image + let rgba_image = + image::RgbaImage::from_raw(width, height, rgba_data).ok_or_else(|| { + eyre::eyre!("Failed to create image from AVIF frame {}", frame_idx) + })?; + + // Get frame duration + // imageTiming.duration is in seconds (f64) + let duration_secs = (*decoder).imageTiming.duration; + let duration = if duration_secs > 0.0 { + Duration::from_secs_f64(duration_secs) + } else { + // Default to 100ms if no timing info + Duration::from_millis(100) + }; + let duration = duration.max(MIN_FRAME_DURATION); + + tracing::debug!( + frame = frame_idx, + width, + height, + duration_ms = duration.as_millis(), + "AVIF frame loaded" + ); + + frames.push(AnimatedFrame { + image: DynamicImage::ImageRgba8(rgba_image), + duration, + pts: None, + }); + } + } + + if frames.is_empty() { + return Err(eyre::eyre!("No frames found in AVIF")); + } + + Ok(frames) + } + + /// Stop playback. + pub fn stop(&mut self) -> eyre::Result<()> { + if let PlayerSource::Video(player) = &self.source { + player.stop()?; + } + Ok(()) + } + + /// Get the current frame. + #[must_use] + pub fn current_frame(&self) -> Option { + match &self.source { + PlayerSource::Avif(frames) => frames.get(self.current_index).cloned(), + PlayerSource::Video(player) => player.current_frame(), + } + } + + /// Get the current frame index. + #[must_use] + pub fn current_frame_index(&self) -> usize { + self.current_index + } + + /// Advance to the next frame (for AVIF playback). + /// + /// Returns `true` if the animation should continue, `false` to stop. + /// AVIFs always loop, so they always return `true` (unless empty). + pub fn advance(&mut self) -> bool { + match &mut self.source { + PlayerSource::Avif(frames) => { + if frames.is_empty() { + return false; + } + self.current_index = (self.current_index + 1) % frames.len(); + // AVIFs always loop - always return true + true + } + PlayerSource::Video(player) => { + // Check for EOS and handle looping + !player.check_eos() + } + } + } + + /// Get the duration of the current frame. + #[must_use] + pub fn current_frame_duration(&self) -> Duration { + match &self.source { + PlayerSource::Avif(frames) => frames + .get(self.current_index) + .map(|f| f.duration) + .unwrap_or(DEFAULT_FRAME_DURATION), + PlayerSource::Video(player) => player.frame_duration(), + } + } + + /// Get the duration of the current frame (alias for current_frame_duration). + #[must_use] + pub fn current_duration(&self) -> Duration { + self.current_frame_duration() + } + + /// Check if this is a video source. + #[must_use] + pub fn is_video(&self) -> bool { + matches!(self.source, PlayerSource::Video(_)) + } + + /// Get video dimensions. + #[must_use] + pub fn video_dimensions(&self) -> Option<(u32, u32)> { + match &self.source { + PlayerSource::Video(player) => player.video_dimensions(), + PlayerSource::Avif(_) => None, + } + } + + /// Pull a frame and write it directly to a destination buffer. + pub fn pull_frame_to_buffer(&self, dest: &mut [u8]) -> Option { + match &self.source { + PlayerSource::Video(player) => player.pull_frame_to_buffer(dest), + PlayerSource::Avif(_) => None, + } + } + + /// Pull the last cached frame. + pub fn pull_cached_frame(&self, dest: &mut [u8]) -> Option { + match &self.source { + PlayerSource::Video(player) => player.pull_cached_frame(dest), + PlayerSource::Avif(_) => None, + } + } + + /// Try to get a DMA-BUF frame for zero-copy rendering. + #[must_use] + pub fn try_get_dmabuf_frame(&self) -> Option { + match &self.source { + PlayerSource::Video(player) => player.try_get_dmabuf_frame(), + PlayerSource::Avif(_) => None, + } + } + + /// Process GStreamer messages and check for EOS. + /// Returns true if video ended (EOS reached). + pub fn process_messages(&mut self) -> bool { + match &mut self.source { + PlayerSource::Video(player) => player.check_eos(), + PlayerSource::Avif(_) => false, + } + } +} + +impl std::fmt::Debug for AnimatedPlayer { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("AnimatedPlayer") + .field("source_path", &self.source_path) + .field( + "source_type", + &match &self.source { + PlayerSource::Avif(_) => "AVIF", + PlayerSource::Video(_) => "Video", + }, + ) + .field("current_index", &self.current_index) + .finish() + } +} diff --git a/src/animated/tests.rs b/src/animated/tests.rs new file mode 100644 index 0000000..2dc0bef --- /dev/null +++ b/src/animated/tests.rs @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! Unit tests for animated wallpaper functionality. + +#[cfg(test)] +mod tests { + use std::path::Path; + + use super::super::detection::{is_animated_avif, is_animated_file, is_video_file}; + + #[test] + fn test_is_video_file() { + assert!(is_video_file(Path::new("test.mp4"))); + assert!(is_video_file(Path::new("test.MP4"))); + assert!(is_video_file(Path::new("test.webm"))); + assert!(is_video_file(Path::new("test.WEBM"))); + assert!(is_video_file(Path::new("test.mkv"))); + assert!(is_video_file(Path::new("test.m4v"))); + assert!(is_video_file(Path::new("test.mov"))); + assert!(is_video_file(Path::new("test.ogv"))); + // Note: AVIF detection requires reading the file, so non-existent files return false + assert!(!is_video_file(Path::new("test.avif"))); + assert!(!is_video_file(Path::new("test.png"))); + assert!(!is_video_file(Path::new("test.jpg"))); + } + + #[test] + fn test_is_animated_file() { + // Videos should be animated + assert!(is_animated_file(Path::new("test.mp4"))); + assert!(is_animated_file(Path::new("test.webm"))); + assert!(is_animated_file(Path::new("test.mkv"))); + // Note: AVIF detection requires reading the file, tested separately + + // Static images should not be animated + assert!(!is_animated_file(Path::new("test.png"))); + assert!(!is_animated_file(Path::new("test.jpg"))); + assert!(!is_animated_file(Path::new("test.jpeg"))); + assert!(!is_animated_file(Path::new("test.jxl"))); + // Non-existent AVIF files return false (can't read header) + assert!(!is_animated_file(Path::new("test.avif"))); + } + + #[test] + fn test_animated_extensions() { + // All known animated extensions should be recognized (except AVIF which needs file reading) + let animated_extensions = ["mp4", "webm", "mkv", "avi", "mov", "m4v", "ogv"]; + + for ext in animated_extensions { + let filename = format!("test.{ext}"); + assert!( + is_animated_file(Path::new(&filename)), + "Extension {ext} should be recognized as animated" + ); + } + } + + #[test] + fn test_animated_avif_detection() { + // Test with the fixture animated AVIF file + let manifest_dir = env!("CARGO_MANIFEST_DIR"); + let animated_avif = Path::new(manifest_dir).join("tests/fixtures/animated.avif"); + + if animated_avif.exists() { + assert!( + is_animated_avif(&animated_avif), + "animated.avif should be detected as animated AVIF" + ); + assert!( + is_animated_file(&animated_avif), + "animated.avif should be detected as animated file" + ); + // Note: is_video_file() returns false for AVIF - AVIF is its own source type + assert!( + !is_video_file(&animated_avif), + "AVIF should NOT be detected as video file (separate source type)" + ); + } else { + eprintln!( + "Skipping test: animated AVIF fixture not found at {:?}", + animated_avif + ); + } + + // Non-existent files should return false + assert!(!is_animated_avif(Path::new("nonexistent.avif"))); + } + + #[test] + fn test_case_insensitive_extensions() { + // Test various case combinations + let test_cases = [ + ("test.MP4", true), + ("test.Mp4", true), + ("test.mp4", true), + ("test.WEBM", true), + ("test.WebM", true), + ("test.webm", true), + ]; + + for (path, expected) in test_cases { + assert_eq!( + is_animated_file(Path::new(path)), + expected, + "Path {path} should return {expected}" + ); + } + } + + #[test] + fn test_edge_cases() { + // No extension + assert!(!is_animated_file(Path::new("test"))); + assert!(!is_animated_file(Path::new("/path/to/file"))); + + // Hidden files with extensions + assert!(is_animated_file(Path::new(".hidden.mp4"))); + + // Multiple dots + assert!(is_animated_file(Path::new("my.video.file.mp4"))); + + // Weird paths + assert!(is_animated_file(Path::new("./test.mp4"))); + assert!(is_animated_file(Path::new("../test.mp4"))); + } + + // Tests for load_avif_frames (unsafe code) + mod avif_loading { + use std::path::PathBuf; + use std::time::Duration; + + use super::super::super::player::AnimatedPlayer; + use super::super::super::types::AnimatedSource; + + /// Get the path to the animated AVIF test fixture. + fn animated_avif_fixture() -> PathBuf { + let manifest_dir = env!("CARGO_MANIFEST_DIR"); + PathBuf::from(manifest_dir).join("tests/fixtures/animated.avif") + } + + /// Test loading a valid animated AVIF file. + /// This exercises the unsafe libavif-sys FFI code. + #[test] + fn test_load_animated_avif() { + let animated_avif = animated_avif_fixture(); + if !animated_avif.exists() { + eprintln!( + "Skipping test: animated AVIF fixture not found at {:?}", + animated_avif + ); + return; + } + + // Create source - this should detect it as animated AVIF + let source = AnimatedSource::from_path(&animated_avif); + assert!( + matches!(source, Some(AnimatedSource::Avif(_))), + "Should detect as Avif source" + ); + + // Create player - this calls load_avif_frames internally + let player = AnimatedPlayer::new(source.unwrap(), 1920, 1080); + assert!(player.is_ok(), "Should load animated AVIF successfully"); + + let player = player.unwrap(); + + // Verify we got frames + let frame = player.current_frame(); + assert!(frame.is_some(), "Should have at least one frame"); + + let frame = frame.unwrap(); + // Check image dimensions are valid + assert!(frame.image.width() > 0, "Frame width should be > 0"); + assert!(frame.image.height() > 0, "Frame height should be > 0"); + + // Check duration is reasonable (not zero, not absurdly long) + assert!( + frame.duration >= Duration::from_millis(10), + "Frame duration should be at least 10ms" + ); + assert!( + frame.duration <= Duration::from_secs(10), + "Frame duration should be less than 10s" + ); + } + + /// Test that loading a non-existent AVIF file fails gracefully. + #[test] + fn test_load_nonexistent_avif() { + let fake_path = PathBuf::from("/nonexistent/fake.avif"); + let source = AnimatedSource::Avif(fake_path); + + let result = AnimatedPlayer::new(source, 1920, 1080); + assert!(result.is_err(), "Should fail for non-existent file"); + } + + /// Test that the decoder properly cleans up resources. + /// This tests the DecoderGuard and RgbGuard drop implementations. + #[test] + fn test_avif_decoder_cleanup() { + let animated_avif = animated_avif_fixture(); + if !animated_avif.exists() { + eprintln!( + "Skipping test: animated AVIF fixture not found at {:?}", + animated_avif + ); + return; + } + + // Load and drop multiple times to check for leaks/crashes + for _ in 0..5 { + let source = AnimatedSource::Avif(animated_avif.clone()); + let player = AnimatedPlayer::new(source, 1920, 1080); + assert!(player.is_ok()); + // Player is dropped here, should clean up properly + } + } + + /// Test advancing through all AVIF frames. + #[test] + fn test_avif_frame_advancement() { + let animated_avif = animated_avif_fixture(); + if !animated_avif.exists() { + eprintln!( + "Skipping test: animated AVIF fixture not found at {:?}", + animated_avif + ); + return; + } + + let source = AnimatedSource::Avif(animated_avif); + let mut player = AnimatedPlayer::new(source, 1920, 1080).unwrap(); + + // Advance through frames and verify each is valid + let mut frame_count = 0; + let mut seen_indices = std::collections::HashSet::new(); + + // Advance enough times to loop through all frames at least once + for _ in 0..100 { + let idx = player.current_frame_index(); + seen_indices.insert(idx); + + let frame = player.current_frame(); + assert!(frame.is_some(), "Frame {} should exist", idx); + + let should_continue = player.advance(); + assert!(should_continue, "AVIF should always loop"); + + frame_count += 1; + if frame_count > 10 && player.current_frame_index() == 0 { + // We've looped back to the beginning + break; + } + } + + assert!( + seen_indices.len() > 1, + "Animated AVIF should have multiple frames" + ); + } + + /// Test that RGBA pixel data is valid (not all zeros or garbage). + #[test] + fn test_avif_pixel_data_validity() { + let animated_avif = animated_avif_fixture(); + if !animated_avif.exists() { + eprintln!( + "Skipping test: animated AVIF fixture not found at {:?}", + animated_avif + ); + return; + } + + let source = AnimatedSource::Avif(animated_avif); + let player = AnimatedPlayer::new(source, 1920, 1080).unwrap(); + + let frame = player.current_frame().unwrap(); + let rgba = frame.image.to_rgba8(); + let pixels = rgba.as_raw(); + + // Check that we have actual pixel data + assert!(!pixels.is_empty(), "Should have pixel data"); + + // Check that it's not all zeros (fully transparent/black) + let non_zero_count = pixels.iter().filter(|&&p| p != 0).count(); + assert!( + non_zero_count > pixels.len() / 10, + "At least 10% of pixels should be non-zero" + ); + + // Verify RGBA layout (4 bytes per pixel) + assert_eq!( + pixels.len(), + (frame.image.width() * frame.image.height() * 4) as usize, + "Pixel buffer size should match width * height * 4" + ); + } + } +} diff --git a/src/animated/types.rs b/src/animated/types.rs new file mode 100644 index 0000000..15fb1f6 --- /dev/null +++ b/src/animated/types.rs @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! Core types for animated wallpaper playback. + +use std::{ + path::{Path, PathBuf}, + time::Duration, +}; + +use image::DynamicImage; + +use super::detection::{is_animated_avif, is_video_file}; + +/// Default frame duration if video metadata is unavailable (60 FPS). +pub(crate) const DEFAULT_FRAME_DURATION: Duration = Duration::from_millis(16); + +/// Minimum frame duration to prevent excessive CPU/GPU usage (60 FPS cap). +pub(crate) const MIN_FRAME_DURATION: Duration = Duration::from_millis(16); + +/// A decoded video frame with timing information. +#[derive(Clone)] +pub struct AnimatedFrame { + /// The decoded image data (RGBA). + #[allow(dead_code)] + pub image: DynamicImage, + /// How long this frame should be displayed. + pub duration: Duration, + /// Presentation timestamp (nanoseconds). Used for synchronization and debugging. + #[allow(dead_code)] + pub pts: Option, +} + +/// Metadata about a video frame without the pixel data. +/// Used for zero-copy frame writing. +#[cfg(feature = "animated")] +#[derive(Clone, Debug)] +pub struct VideoFrameInfo { + /// Frame width. + pub width: u32, + /// Frame height. + pub height: u32, + /// Whether the data is in BGRx format (true) or RGBA format (false). + pub is_bgrx: bool, +} + +/// Source type for animated content (for path identification). +#[derive(Debug, Clone)] +pub enum AnimatedSourceType { + /// AVIF Image Sequence (animated AVIF). + Avif(PathBuf), + /// Video file (MP4, WebM, etc.). + Video(PathBuf), +} + +/// Re-export as AnimatedSource for backwards compatibility. +pub type AnimatedSource = AnimatedSourceType; + +impl AnimatedSourceType { + /// Create an animated source from a path. + #[must_use] + pub fn from_path(path: &Path) -> Option { + if is_animated_avif(path) { + Some(AnimatedSourceType::Avif(path.to_path_buf())) + } else if is_video_file(path) { + Some(AnimatedSourceType::Video(path.to_path_buf())) + } else { + None + } + } + + /// Get the file path. + #[must_use] + pub fn path(&self) -> &Path { + match self { + AnimatedSourceType::Avif(p) | AnimatedSourceType::Video(p) => p, + } + } +} diff --git a/src/animated/video_player.rs b/src/animated/video_player.rs new file mode 100644 index 0000000..d7678f9 --- /dev/null +++ b/src/animated/video_player.rs @@ -0,0 +1,1357 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! GStreamer-based hardware-accelerated video player. +//! +//! This module provides [`VideoPlayer`], which uses GStreamer for efficient +//! video decoding with support for: +//! - NVIDIA NVDEC (with optional cudadmabufupload for zero-copy) +//! - AMD/Intel VAAPI +//! - Software decode fallback +//! +//! ## Pipeline Priority +//! +//! 1. NVIDIA CUDA→DMA-BUF (optimal zero-copy) +//! 2. VAAPI DMA-BUF (AMD/Intel, requires GStreamer 1.26+) +//! 3. NVIDIA GL DMA-BUF +//! 4. GL download (decodebin → glupload → glcolorconvert → gldownload) - works with any decoder +//! 5. VAAPI wl_shm (decodebin → vapostproc → appsink) - reliable fallback for VAAPI +//! 6. NVIDIA GL wl_shm fallback +//! 7. Software decode + +use std::{ + path::{Path, PathBuf}, + sync::{Arc, Mutex}, + time::Duration, +}; + +use image::DynamicImage; +use tracing::{debug, error, info, warn}; + +use super::types::{AnimatedFrame, DEFAULT_FRAME_DURATION, MIN_FRAME_DURATION, VideoFrameInfo}; + +/// Shared state for receiving frames from GStreamer pipeline. +pub(crate) struct VideoFrameState { + /// Most recent decoded frame. + #[allow(dead_code)] + pub current_frame: Option, + /// Frame duration from video metadata. + pub frame_duration: Duration, + /// Whether video has reached end of stream. + pub eos: bool, + /// Frame counter for FPS measurement. + pub frame_count: u64, +} + +/// Hardware-accelerated video player using GStreamer. +/// +/// ## Architecture: Decoupled Frame Delivery +/// +/// This player uses a decoupled architecture to prevent rendering stalls: +/// +/// ```text +/// GStreamer (async) → FrameQueue (3 frames) → Renderer (non-blocking) +/// ``` +/// +/// Key design principles: +/// - **GStreamer callback never blocks**: Pushes to queue, drops oldest if full +/// - **Renderer never waits**: Pops from queue, reuses last frame if empty +/// - **appsink bounded buffering**: `max-buffers=4, drop=true` prevents pipeline stalls +/// - **Seek-based looping**: Seeks to start on EOS for seamless loop +pub struct VideoPlayer { + /// GStreamer pipeline. + pipeline: gstreamer::Pipeline, + /// App sink for receiving frames. + appsink: gstreamer_app::AppSink, + /// Bounded frame queue for decoupled rendering. + frame_queue: crate::frame_queue::SharedFrameQueue, + /// Shared frame state (legacy, used for frame_duration). + pub(crate) frame_state: Arc>, + /// Source file path. + source_path: PathBuf, + /// Whether to loop playback. + looping: bool, + /// Number of times the pipeline has been rebuilt (for EOS looping). + rebuild_count: std::sync::atomic::AtomicU32, +} + +impl VideoPlayer { + /// Log available hardware video decoders for debugging. + fn log_available_decoders() { + // Use the detection module which caches the result + let support = super::get_codec_support(); + + if support.hw_decoders.is_empty() { + warn!( + "No hardware video decoders found. Video will use software decoding. \ + Install gstreamer1-vaapi (AMD/Intel) or gstreamer1-plugins-bad (NVIDIA) for hardware acceleration." + ); + } else { + info!(decoders = ?support.hw_decoders, "Available hardware video decoders"); + } + } + + /// Create a new video player for the given path. + pub fn new(path: &Path, target_width: u32, target_height: u32) -> eyre::Result { + use gstreamer::prelude::*; + + gstreamer::init()?; + + // Demote NVIDIA decoders if they can't actually be instantiated + // (e.g., on AMD-only systems where CUDA isn't available) + super::demote_broken_nvidia_decoders(); + + static LOGGED_DECODERS: std::sync::Once = std::sync::Once::new(); + LOGGED_DECODERS.call_once(Self::log_available_decoders); + + let path_str = path + .to_str() + .ok_or_else(|| eyre::eyre!("Invalid path: {}", path.display()))?; + + debug!( + path = %path.display(), + width = target_width, + height = target_height, + "Creating GStreamer video player with GPU scaling" + ); + + let test_pipeline = |pipeline_str: &str| -> bool { + match gstreamer::parse::launch(pipeline_str) { + Ok(p) => { + let result = p.set_state(gstreamer::State::Paused); + if result.is_err() { + debug!("Pipeline test failed: set_state returned error"); + let _ = p.set_state(gstreamer::State::Null); + return false; + } + + // Check for errors on bus while waiting + let bus = p.bus().unwrap(); + + // Use longer timeout (5s) for GL pipelines which need EGL/GPU init + let start = std::time::Instant::now(); + + loop { + // Check pipeline state + let (res, state, pending) = + p.state(gstreamer::ClockTime::from_mseconds(100)); + + if res.is_ok() && state == gstreamer::State::Paused { + let _ = p.set_state(gstreamer::State::Null); + return true; + } + + // Check for errors on bus + while let Some(msg) = bus.pop() { + use gstreamer::MessageView; + match msg.view() { + MessageView::Error(err) => { + debug!( + error = %err.error(), + debug = ?err.debug(), + "Pipeline test failed: GStreamer error" + ); + let _ = p.set_state(gstreamer::State::Null); + return false; + } + MessageView::StateChanged(state_changed) => { + if let Some(src) = state_changed.src() { + if src.type_() == gstreamer::Pipeline::static_type() { + debug!( + old = ?state_changed.old(), + new = ?state_changed.current(), + "Pipeline state changed during test" + ); + } + } + } + _ => {} + } + } + + // Check timeout + if start.elapsed() > std::time::Duration::from_secs(5) { + debug!( + result = ?res, + state = ?state, + pending = ?pending, + elapsed_ms = start.elapsed().as_millis(), + "Pipeline test timed out" + ); + let _ = p.set_state(gstreamer::State::Null); + return false; + } + + std::thread::sleep(std::time::Duration::from_millis(50)); + } + } + Err(e) => { + debug!(error = %e, "Pipeline test failed: parse error"); + false + } + } + }; + + let escaped_path = path_str.replace('\\', "\\\\").replace('"', "\\\""); + + // Check for VAAPI post-processor (vapostproc) - only needed for DMA-BUF path + // Note: Only vapostproc (new, GStreamer 1.22+) supports DMA-BUF output + // The GL download pipeline doesn't need it - decodebin handles decoder selection + let has_vapostproc = gstreamer::ElementFactory::find("vapostproc").is_some(); + let has_nvdec = gstreamer::ElementFactory::find("nvh264dec").is_some(); + let has_cuda_dmabuf = gstreamer::ElementFactory::find("cudadmabufupload").is_some(); + + debug!( + has_vapostproc, + has_nvdec, has_cuda_dmabuf, "Hardware acceleration support detected" + ); + + if has_cuda_dmabuf { + info!( + "NVIDIA CUDA→DMA-BUF plugin (cudadmabufupload) detected - optimal zero-copy path available" + ); + } + + // Allow disabling DMA-BUF via environment variable for debugging/compatibility + // Set COSMIC_BG_NO_DMABUF=1 to force shared memory path + let try_dmabuf = std::env::var("COSMIC_BG_NO_DMABUF") + .map(|v| v != "1" && v.to_lowercase() != "true") + .unwrap_or(true); + + if !try_dmabuf { + info!("DMA-BUF disabled via COSMIC_BG_NO_DMABUF environment variable"); + } + + // Try pipelines in priority order + let pipeline_str = Self::try_cuda_dmabuf_pipeline( + path, + &escaped_path, + has_cuda_dmabuf, + has_nvdec, + try_dmabuf, + &test_pipeline, + target_width, + target_height, + ) + .or_else(|| { + // VAAPI DMA-BUF only works with vapostproc (new element), not vaapipostproc + Self::try_vaapi_dmabuf_pipeline( + path, + &escaped_path, + has_vapostproc, // Only try if we have the NEW vapostproc + try_dmabuf, + &test_pipeline, + target_width, + target_height, + ) + }) + .or_else(|| { + Self::try_nvdec_gl_dmabuf_pipeline( + &escaped_path, + has_nvdec, + try_dmabuf, + &test_pipeline, + target_width, + target_height, + ) + }) + .or_else(|| { + Self::try_gl_download_pipeline( + &escaped_path, + &test_pipeline, + target_width, + target_height, + ) + }) + .or_else(|| { + Self::try_vaapi_wlshm_pipeline( + &escaped_path, + has_vapostproc, + &test_pipeline, + target_width, + target_height, + ) + }) + .or_else(|| Self::try_nvdec_gl_wlshm_pipeline(&escaped_path, has_nvdec, &test_pipeline)) + .or_else(|| Self::try_gl_pipeline(&escaped_path, &test_pipeline)) + .unwrap_or_else(|| { + Self::software_fallback_pipeline(&escaped_path, target_width, target_height) + }); + + debug!(pipeline = %pipeline_str, "Creating GStreamer pipeline"); + + let pipeline = gstreamer::parse::launch(&pipeline_str)? + .downcast::() + .map_err(|_| eyre::eyre!("Failed to create pipeline"))?; + + let appsink = pipeline + .by_name("sink") + .ok_or_else(|| eyre::eyre!("Failed to get appsink from pipeline"))? + .downcast::() + .map_err(|_| eyre::eyre!("Element 'sink' is not an AppSink"))?; + + let frame_state = Arc::new(Mutex::new(VideoFrameState { + current_frame: None, + frame_duration: DEFAULT_FRAME_DURATION, + eos: false, + frame_count: 0, + })); + + let frame_queue = crate::frame_queue::new_shared_queue(3); + + // IMPORTANT: Set up callbacks BEFORE detect_framerate sets pipeline to PAUSED + // This ensures our propose_allocation callback (which adds VideoMeta support) + // is active when caps negotiation happens for VAAPI DMA-BUF + Self::setup_appsink_callback(&appsink, Arc::clone(&frame_queue), Arc::clone(&frame_state)); + + // Now detect framerate - this sets pipeline to PAUSED and triggers caps negotiation + let initial_frame_duration = + Self::detect_framerate(&pipeline, &appsink, target_width, target_height); + + // Update frame_state with detected duration + if let Ok(mut state) = frame_state.lock() { + state.frame_duration = initial_frame_duration; + } + + Ok(Self { + pipeline, + appsink, + frame_queue, + frame_state, + source_path: path.to_path_buf(), + looping: true, + rebuild_count: std::sync::atomic::AtomicU32::new(0), + }) + } + + #[allow(clippy::too_many_arguments)] + fn try_cuda_dmabuf_pipeline( + path: &Path, + escaped_path: &str, + has_cuda_dmabuf: bool, + has_nvdec: bool, + try_dmabuf: bool, + test_pipeline: &impl Fn(&str) -> bool, + target_width: u32, + target_height: u32, + ) -> Option { + if !try_dmabuf || !has_cuda_dmabuf || !has_nvdec { + return None; + } + + let ext = path + .extension() + .and_then(|e| e.to_str()) + .map(|e| e.to_lowercase()) + .unwrap_or_default(); + + let pipeline = match ext.as_str() { + "mp4" | "m4v" | "mov" => format!( + concat!( + "filesrc location=\"{path}\" ! ", + "qtdemux ! h264parse ! ", + "nvh264dec ! ", + "video/x-raw(memory:CUDAMemory) ! ", + "cudadmabufupload ! ", + "video/x-raw(memory:DMABuf) ! ", + "appsink name=sink sync=true max-buffers=4 drop=true" + ), + path = escaped_path, + ), + "webm" => format!( + concat!( + "filesrc location=\"{path}\" ! ", + "matroskademux ! ", + "nvvp9dec ! ", + "video/x-raw(memory:CUDAMemory) ! ", + "cudadmabufupload ! ", + "video/x-raw(memory:DMABuf) ! ", + "appsink name=sink sync=true max-buffers=4 drop=true" + ), + path = escaped_path, + ), + "mkv" => format!( + concat!( + "filesrc location=\"{path}\" ! ", + "matroskademux ! h264parse ! ", + "nvh264dec ! ", + "video/x-raw(memory:CUDAMemory) ! ", + "cudadmabufupload ! ", + "video/x-raw(memory:DMABuf) ! ", + "appsink name=sink sync=true max-buffers=4 drop=true" + ), + path = escaped_path, + ), + _ => format!( + concat!( + "filesrc location=\"{path}\" ! ", + "decodebin ! ", + "video/x-raw(memory:CUDAMemory) ! ", + "cudadmabufupload ! ", + "video/x-raw(memory:DMABuf) ! ", + "appsink name=sink sync=true max-buffers=4 drop=true" + ), + path = escaped_path, + ), + }; + + debug!(pipeline = %pipeline, "Trying NVIDIA CUDA→DMA-BUF zero-copy pipeline"); + + if test_pipeline(&pipeline) { + info!( + "🚀 NVIDIA CUDA→DMA-BUF zero-copy pipeline ACTIVE - maximum performance ({}x{})", + target_width, target_height + ); + Some(pipeline) + } else { + debug!("CUDA→DMA-BUF pipeline failed"); + None + } + } + + /// Try VAAPI DMA-BUF pipeline. Only works with GStreamer 1.26+ where + /// vapostproc properly supports DMA-BUF export. + /// + /// IMPORTANT: We require GStreamer 1.26+ because: + /// - GStreamer 1.24 (Pop!_OS): DMA-BUF export from vapostproc causes GPU hangs + /// - GStreamer 1.26 (Fedora 42): DMA-BUF export works correctly + /// + /// We don't test the pipeline because the test itself can cause hangs on 1.24. + /// Instead, we trust that 1.26+ works (which it does on Fedora 42). + fn try_vaapi_dmabuf_pipeline( + path: &Path, + escaped_path: &str, + has_vapostproc: bool, + try_dmabuf: bool, + _test_pipeline: &impl Fn(&str) -> bool, + target_width: u32, + target_height: u32, + ) -> Option { + if !try_dmabuf || !has_vapostproc { + return None; + } + + // Check GStreamer version - VAAPI DMA-BUF only works reliably on 1.26+ + // GStreamer 1.24 (Pop!_OS 22.04) has bugs that cause GPU hangs with DMA-BUF + let gst_version = gstreamer::version(); + let (major, minor) = (gst_version.0, gst_version.1); + + if major < 1 || (major == 1 && minor < 26) { + debug!( + gstreamer_version = format!("{}.{}.{}", major, minor, gst_version.2), + "Skipping VAAPI DMA-BUF - requires GStreamer 1.26+ (current has bugs with DMA-BUF export)" + ); + return None; + } + + let ext = path + .extension() + .and_then(|e| e.to_str()) + .map(|e| e.to_lowercase()) + .unwrap_or_default(); + + // Check which VAAPI decoders are available (supports both old "vaapi*" and new "va*" names) + let has_vaapi_h264 = gstreamer::ElementFactory::find("vaapih264dec").is_some() + || gstreamer::ElementFactory::find("vah264dec").is_some(); + let has_vaapi_h265 = gstreamer::ElementFactory::find("vaapih265dec").is_some() + || gstreamer::ElementFactory::find("vah265dec").is_some(); + let has_vaapi_vp9 = gstreamer::ElementFactory::find("vaapivp9dec").is_some() + || gstreamer::ElementFactory::find("vavp9dec").is_some(); + let has_vaapi_av1 = gstreamer::ElementFactory::find("vaapiav1dec").is_some() + || gstreamer::ElementFactory::find("vaav1dec").is_some(); + + debug!( + ext = %ext, + has_vaapi_h264, + has_vaapi_h265, + has_vaapi_vp9, + has_vaapi_av1, + gstreamer_version = format!("{}.{}.{}", major, minor, gst_version.2), + "VAAPI decoder availability check" + ); + + // Only use VAAPI pipeline if we have the appropriate decoder + // MP4/MOV typically contain H.264/H.265, WebM contains VP8/VP9/AV1 + let can_use_vaapi = match ext.as_str() { + "webm" => has_vaapi_vp9 || has_vaapi_av1, + "mp4" | "m4v" | "mov" => has_vaapi_h264 || has_vaapi_h265, + "mkv" => has_vaapi_h264 || has_vaapi_h265 || has_vaapi_vp9 || has_vaapi_av1, + _ => false, + }; + + if !can_use_vaapi { + debug!( + ext = %ext, + has_vaapi_h264, + has_vaapi_vp9, + "Skipping VAAPI pipeline - no decoder for container format" + ); + return None; + } + + // For GStreamer 1.26+, we trust that VAAPI DMA-BUF works. + // We DON'T test the pipeline because even the test can cause GPU hangs on some systems. + // The real appsink's propose_allocation callback adds VideoMeta support required for DMA-BUF. + let pipeline_str = format!( + concat!( + "filesrc location=\"{path}\" ! ", + "decodebin ! ", + "vapostproc ! ", + "video/x-raw(memory:DMABuf),format=DMA_DRM ! ", + "appsink name=sink sync=true max-buffers=4 drop=true" + ), + path = escaped_path, + ); + + info!( + gstreamer_version = format!("{}.{}.{}", major, minor, gst_version.2), + "🚀 VAAPI DMA-BUF zero-copy pipeline ACTIVE ({}x{})", target_width, target_height + ); + debug!(pipeline = %pipeline_str, "VAAPI DMA-BUF pipeline"); + + Some(pipeline_str) + } + + fn try_nvdec_gl_dmabuf_pipeline( + escaped_path: &str, + has_nvdec: bool, + try_dmabuf: bool, + test_pipeline: &impl Fn(&str) -> bool, + target_width: u32, + target_height: u32, + ) -> Option { + if !try_dmabuf || !has_nvdec { + return None; + } + + let pipeline = format!( + concat!( + "filesrc location=\"{path}\" ! ", + "decodebin name=dec ! ", + "nvh264dec ! ", + "glcolorconvert ! ", + "video/x-raw(memory:GLMemory),format=RGBA ! ", + "gldownload ! ", + "video/x-raw(memory:DMABuf),format=RGBA ! ", + "appsink name=sink sync=true max-buffers=4 drop=true" + ), + path = escaped_path, + ); + + debug!(pipeline = %pipeline, "Trying NVIDIA NVDEC GL DMA-BUF zero-copy pipeline"); + + if test_pipeline(&pipeline) { + info!( + "NVIDIA NVDEC GL DMA-BUF zero-copy pipeline active ({}x{})", + target_width, target_height + ); + Some(pipeline) + } else { + debug!("NVDEC GL DMA-BUF pipeline failed"); + None + } + } + + /// Try GL download pipeline for efficient GPU→CPU transfer. + /// + /// This pipeline uses decodebin (auto-selects best decoder - VAAPI, NVDEC, or software) + /// then uploads to GL, converts format on GPU, and downloads back to system memory. + /// gldownload uses DMA/PBO optimizations which is much faster than videoconvert. + /// + /// Pipeline: decodebin → glupload → glcolorconvert (GPU) → gldownload → appsink + fn try_gl_download_pipeline( + escaped_path: &str, + test_pipeline: &impl Fn(&str) -> bool, + target_width: u32, + target_height: u32, + ) -> Option { + // Check for GL elements + let has_glupload = gstreamer::ElementFactory::find("glupload").is_some(); + let has_gldownload = gstreamer::ElementFactory::find("gldownload").is_some(); + let has_glcolorconvert = gstreamer::ElementFactory::find("glcolorconvert").is_some(); + + if !has_glupload || !has_gldownload || !has_glcolorconvert { + debug!( + has_glupload, + has_gldownload, has_glcolorconvert, "Missing GL elements for GL download pipeline" + ); + return None; + } + + // Test the actual pipeline with appsink - fakesink may negotiate differently + let pipeline = format!( + "filesrc location=\"{path}\" ! \ + decodebin name=dec ! \ + glupload ! \ + glcolorconvert ! video/x-raw(memory:GLMemory),format=BGRx ! \ + gldownload ! \ + video/x-raw,format=BGRx ! \ + videorate drop-only=true max-rate=60 ! video/x-raw,framerate=60/1 ! \ + appsink name=sink sync=true max-buffers=4 drop=true", + path = escaped_path, + ); + + debug!(pipeline = %pipeline, "Testing GL download pipeline"); + + if !test_pipeline(&pipeline) { + debug!("GL download pipeline test failed, falling back"); + return None; + } + + info!( + "GL download pipeline active - GPU color conversion with PBO transfer ({}x{})", + target_width, target_height + ); + Some(pipeline) + } + + /// Try VAAPI wl_shm pipeline using vapostproc for format conversion. + /// This is the reliable fallback for VAAPI systems when GL/DMA-BUF pipelines fail. + fn try_vaapi_wlshm_pipeline( + escaped_path: &str, + has_vapostproc: bool, + test_pipeline: &impl Fn(&str) -> bool, + target_width: u32, + target_height: u32, + ) -> Option { + if !has_vapostproc { + return None; + } + + let pipeline = format!( + concat!( + "filesrc location=\"{path}\" ! ", + "decodebin name=dec ! ", + "videorate drop-only=true max-rate=60 ! video/x-raw,framerate=60/1 ! ", + "vapostproc ! ", + "video/x-raw,format=BGRx ! ", + "appsink name=sink sync=true max-buffers=4 drop=true" + ), + path = escaped_path, + ); + + debug!(pipeline = %pipeline, "Trying VAAPI + vapostproc pipeline"); + + if test_pipeline(&pipeline) { + info!( + "VAAPI wl_shm pipeline active - hardware decode with vapostproc ({}x{})", + target_width, target_height + ); + Some(pipeline) + } else { + debug!("VAAPI + vapostproc pipeline failed"); + None + } + } + + fn try_nvdec_gl_wlshm_pipeline( + escaped_path: &str, + has_nvdec: bool, + test_pipeline: &impl Fn(&str) -> bool, + ) -> Option { + if !has_nvdec { + return None; + } + + // Test the actual pipeline with appsink + let pipeline = format!( + concat!( + "filesrc location=\"{path}\" ! ", + "decodebin name=dec ! ", + "videoconvert ! ", + "videorate drop-only=true max-rate=60 ! video/x-raw,framerate=60/1 ! ", + "glupload ! ", + "glcolorconvert ! video/x-raw(memory:GLMemory),format=BGRx ! ", + "gldownload ! ", + "appsink name=sink sync=true max-buffers=4 drop=true" + ), + path = escaped_path, + ); + + debug!(pipeline = %pipeline, "Testing NVDEC + GL pipeline"); + + if test_pipeline(&pipeline) { + debug!("NVDEC pipeline verified"); + Some(pipeline) + } else { + debug!("NVDEC + GL pipeline test failed"); + None + } + } + + fn try_gl_pipeline( + escaped_path: &str, + test_pipeline: &impl Fn(&str) -> bool, + ) -> Option { + // Test the actual pipeline with appsink + let pipeline = format!( + concat!( + "filesrc location=\"{path}\" ! ", + "decodebin name=dec ! ", + "videorate drop-only=true max-rate=60 ! video/x-raw,framerate=60/1 ! ", + "glupload ! ", + "glcolorconvert ! video/x-raw(memory:GLMemory),format=BGRx ! ", + "gldownload ! ", + "appsink name=sink sync=true max-buffers=4 drop=true" + ), + path = escaped_path, + ); + + debug!(pipeline = %pipeline, "Testing decodebin + GL pipeline"); + + if test_pipeline(&pipeline) { + debug!("Decodebin + GL pipeline verified"); + Some(pipeline) + } else { + debug!("GL pipeline test failed"); + None + } + } + + fn software_fallback_pipeline( + escaped_path: &str, + target_width: u32, + target_height: u32, + ) -> String { + // Check if openh264dec is available for explicit H.264 software decode + let has_openh264 = gstreamer::ElementFactory::find("openh264dec").is_some(); + + // For software fallback, we want to avoid hardware decoders that might + // be registered but not functional (e.g., nvh264dec on AMD-only systems). + // If openh264 is available, prefer that for H.264 content. + let pipeline = if has_openh264 { + // Use decodebin3 which has better fallback behavior, with decoder hints + format!( + concat!( + "filesrc location=\"{path}\" ! ", + "decodebin3 ! ", + "videoconvert ! ", + "video/x-raw,format=BGRx ! ", + "appsink name=sink sync=true max-buffers=4 drop=true" + ), + path = escaped_path, + ) + } else { + format!( + concat!( + "filesrc location=\"{path}\" ! ", + "decodebin ! ", + "videoconvert ! ", + "video/x-raw,format=BGRx ! ", + "appsink name=sink sync=true max-buffers=4 drop=true" + ), + path = escaped_path, + ) + }; + + debug!( + pipeline = %pipeline, + has_openh264, + "Using software decodebin fallback ({}x{})", + target_width, target_height + ); + pipeline + } + + fn detect_framerate( + pipeline: &gstreamer::Pipeline, + appsink: &gstreamer_app::AppSink, + target_width: u32, + target_height: u32, + ) -> Duration { + use gstreamer::prelude::*; + + debug!("Setting pipeline to PAUSED to detect framerate"); + + if pipeline.set_state(gstreamer::State::Paused).is_err() { + debug!("Failed to set pipeline to PAUSED"); + return DEFAULT_FRAME_DURATION; + } + + let (result, _, _) = pipeline.state(gstreamer::ClockTime::from_mseconds(500)); + if result.is_err() { + debug!("Pipeline failed to reach PAUSED state"); + return DEFAULT_FRAME_DURATION; + } + + debug!("Pipeline reached PAUSED state, querying caps"); + + let Some(pad) = appsink.static_pad("sink") else { + debug!("No sink pad on appsink"); + return DEFAULT_FRAME_DURATION; + }; + + let Some(caps) = pad.current_caps() else { + debug!("No current caps on pad"); + return DEFAULT_FRAME_DURATION; + }; + + debug!(caps = %caps, "Got current caps from pad"); + + let Some(structure) = caps.structure(0) else { + debug!("No structure in caps"); + return DEFAULT_FRAME_DURATION; + }; + + // Log video resolution + if let (Some(w), Some(h)) = ( + structure.get::("width").ok(), + structure.get::("height").ok(), + ) { + info!( + target_resolution = format!("{}x{}", target_width, target_height), + pipeline_resolution = format!("{}x{}", w, h), + "Video resolution configuration" + ); + } + + if let Ok(framerate) = structure.get::("framerate") { + if framerate.numer() > 0 && framerate.denom() > 0 { + let detected_duration = Duration::from_secs_f64( + f64::from(framerate.denom()) / f64::from(framerate.numer()), + ); + info!( + fps = format!("{}/{}", framerate.numer(), framerate.denom()), + duration_ms = detected_duration.as_millis(), + "Detected video framerate" + ); + return detected_duration.max(MIN_FRAME_DURATION); + } + } + + debug!("No framerate field in caps"); + DEFAULT_FRAME_DURATION + } + + fn setup_appsink_callback( + appsink: &gstreamer_app::AppSink, + frame_queue: crate::frame_queue::SharedFrameQueue, + frame_state: Arc>, + ) { + appsink.set_callbacks( + gstreamer_app::AppSinkCallbacks::builder() + .new_sample(move |appsink| Self::handle_sample(appsink, &frame_queue, &frame_state)) + .propose_allocation(|_appsink, query| { + // Add VideoMeta support to enable DMA-BUF output from VAAPI. + // vapostproc requires downstream to support VideoMeta for DMA-BUF caps. + query.add_allocation_meta::(None); + debug!("Added VideoMeta to allocation query for DMA-BUF support"); + true + }) + .build(), + ); + } + + fn handle_sample( + appsink: &gstreamer_app::AppSink, + frame_queue: &crate::frame_queue::SharedFrameQueue, + frame_state: &Arc>, + ) -> Result { + tracing::debug!("handle_sample callback triggered"); + + // Check if we're shutting down - return early to avoid blocking + if frame_queue.is_stopped() { + tracing::debug!("Frame queue is stopped, returning EOS"); + return Err(gstreamer::FlowError::Eos); + } + + let sample = match appsink.pull_sample() { + Ok(s) => s, + Err(e) => { + tracing::warn!("Callback pull_sample failed: {:?}", e); + return Ok(gstreamer::FlowSuccess::Ok); + } + }; + tracing::debug!("Got sample from appsink"); + + let Some(buffer) = sample.buffer() else { + tracing::debug!("Sample has no buffer"); + return Ok(gstreamer::FlowSuccess::Ok); + }; + tracing::debug!(size = buffer.size(), "Got buffer from sample"); + + let pts_ns = buffer.pts().map(|p| p.nseconds()); + + let Some(caps) = sample.caps() else { + tracing::trace!("Sample has no caps"); + return Ok(gstreamer::FlowSuccess::Ok); + }; + + let Ok(video_info) = gstreamer_video::VideoInfo::from_caps(caps) else { + tracing::trace!("Failed to get VideoInfo from caps"); + return Ok(gstreamer::FlowSuccess::Ok); + }; + + let width = video_info.width(); + let height = video_info.height(); + tracing::trace!(width, height, "Got video info"); + + // Check for DMA-BUF memory first (zero-copy path) + let frame = if buffer.n_memory() > 0 { + let mem = buffer.memory(0).unwrap(); + + if let Some(dmabuf_mem) = + mem.downcast_memory_ref::() + { + tracing::trace!("Frame is DMA-BUF"); + Self::create_dmabuf_frame(&sample, buffer, dmabuf_mem, caps, width, height, pts_ns) + } else { + tracing::trace!("Frame is NOT DMA-BUF, will map readable"); + None + } + } else { + tracing::trace!("Buffer has no memory blocks"); + None + }; + + let frame = match frame { + Some(f) => f, + None => { + // Fallback: Map buffer and copy + tracing::trace!("Attempting to map buffer readable"); + if let Ok(map) = buffer.map_readable() { + tracing::trace!( + data_len = map.as_slice().len(), + "Mapped buffer, creating frame" + ); + crate::frame_queue::QueuedFrame::new( + map.as_slice().to_vec(), + width, + height, + pts_ns, + ) + } else { + tracing::trace!("Skipped frame: buffer map blocked"); + return Ok(gstreamer::FlowSuccess::Ok); + } + } + }; + + tracing::trace!(width, height, "Pushing frame to queue"); + if !frame_queue.push(frame) { + tracing::trace!("Frame push failed (queue full or stopped)"); + return Ok(gstreamer::FlowSuccess::Ok); + } + + if let Ok(mut state) = frame_state.try_lock() { + state.frame_count += 1; + // Log every frame for debugging + if state.frame_count <= 5 || state.frame_count % 60 == 0 { + let stats = frame_queue.stats(); + tracing::debug!( + frames = state.frame_count, + queue_len = frame_queue.len(), + pushed = stats.frames_pushed, + popped = stats.frames_popped, + dropped = stats.frames_dropped_full, + reused = stats.frames_reused, + "Video playback progress" + ); + } + } + + Ok(gstreamer::FlowSuccess::Ok) + } + + fn create_dmabuf_frame( + _sample: &gstreamer::Sample, + buffer: &gstreamer::BufferRef, + dmabuf_mem: &gstreamer_allocators::DmaBufMemoryRef, + caps: &gstreamer::CapsRef, + width: u32, + height: u32, + pts_ns: Option, + ) -> Option { + let fd = dmabuf_mem.fd(); + let video_meta = buffer.meta::(); + + let format_str = caps + .structure(0) + .and_then(|s| s.get::("drm-format").ok()) + .or_else(|| { + caps.structure(0) + .and_then(|s| s.get::("format").ok()) + }) + .unwrap_or_else(|| "NV12".to_string()); + + let dmabuf_format = crate::dmabuf::DmaBufFormat::from_gst_format(&format_str); + let is_nv12 = format_str.starts_with("NV12") + || dmabuf_format.fourcc == drm_fourcc::DrmFourcc::Nv12 as u32; + + let (strides, offsets) = if let Some(meta) = &video_meta { + let s: Vec = meta.stride().iter().map(|&x| x as u32).collect(); + let o: Vec = meta.offset().iter().map(|&x| x as u32).collect(); + tracing::debug!(strides = ?s, offsets = ?o, "VideoMeta plane info"); + (s, o) + } else { + let aligned_width = (width + 63) & !63; + let y_size = aligned_width * height; + if is_nv12 { + (vec![aligned_width, aligned_width], vec![0, y_size]) + } else { + (vec![aligned_width * 4], vec![0]) + } + }; + + tracing::debug!( + fd = fd, + format = %format_str, + ?strides, + ?offsets, + "Zero-copy DMA-BUF frame" + ); + + let dmabuf_result = if is_nv12 && strides.len() >= 2 && offsets.len() >= 2 { + crate::frame_queue::DmaBufFrameData::from_raw_fd_nv12_with_offsets( + fd, + dmabuf_format.fourcc, + dmabuf_format.modifier, + width, + height, + strides[0], + strides[1], + offsets[0], + offsets[1], + ) + } else if is_nv12 { + crate::frame_queue::DmaBufFrameData::from_raw_fd_nv12( + fd, + dmabuf_format.fourcc, + dmabuf_format.modifier, + width, + height, + strides.first().copied().unwrap_or(width), + ) + } else { + crate::frame_queue::DmaBufFrameData::from_raw_fd( + fd, + dmabuf_format.fourcc, + dmabuf_format.modifier, + strides.first().copied().unwrap_or(width * 4), + ) + }; + + match dmabuf_result { + Ok(mut dmabuf_data) => { + dmabuf_data.width = width; + dmabuf_data.height = height; + Some(crate::frame_queue::QueuedFrame::new_dmabuf( + dmabuf_data, + width, + height, + pts_ns, + )) + } + Err(e) => { + tracing::warn!(error = %e, "Failed to dup DMA-BUF fd"); + None + } + } + } + + /// Pull the next available frame from the pipeline (non-blocking). + #[allow(dead_code)] + pub fn pull_frame(&self) -> Option { + match self + .appsink + .try_pull_sample(gstreamer::ClockTime::from_mseconds(17)) + { + Some(sample) => { + debug!("Successfully pulled sample from appsink"); + self.process_sample(&sample) + } + None => None, + } + } + + /// Pull a frame and write it directly to a destination buffer. + pub fn pull_frame_to_buffer(&self, dest: &mut [u8]) -> Option { + let stats = self.frame_queue.stats(); + tracing::trace!( + queue_len = self.frame_queue.len(), + pushed = stats.frames_pushed, + popped = stats.frames_popped, + "Attempting to pull frame" + ); + + if let Some((width, height)) = self.frame_queue.write_frame_to(dest) { + tracing::trace!(width, height, "Frame written to buffer"); + return Some(VideoFrameInfo { + width, + height, + is_bgrx: true, + }); + } + tracing::trace!("No frame available from queue"); + None + } + + #[allow(dead_code)] + fn process_sample(&self, sample: &gstreamer::Sample) -> Option { + let buffer = sample.buffer()?; + let caps = sample.caps()?; + let video_info = gstreamer_video::VideoInfo::from_caps(caps).ok()?; + + let width = video_info.width(); + let height = video_info.height(); + + let fps = video_info.fps(); + let frame_duration = if fps.numer() > 0 && fps.denom() > 0 { + Duration::from_secs_f64(f64::from(fps.denom()) / f64::from(fps.numer())) + } else { + DEFAULT_FRAME_DURATION + }; + + let pts = buffer.pts().map(|p| p.nseconds()); + let map = buffer.map_readable().ok()?; + let data = map.as_slice(); + + let expected_size = (width * height * 4) as usize; + if data.len() < expected_size { + error!( + data_len = data.len(), + expected = expected_size, + "Buffer size mismatch" + ); + return None; + } + + let image_buffer = + image::RgbaImage::from_raw(width, height, data[..expected_size].to_vec())?; + let image = DynamicImage::ImageRgba8(image_buffer); + + let frame = AnimatedFrame { + image, + duration: frame_duration.max(MIN_FRAME_DURATION), + pts, + }; + + if let Ok(mut state) = self.frame_state.lock() { + state.current_frame = Some(frame.clone()); + state.frame_duration = frame_duration; + } + + Some(frame) + } + + /// Start video playback. + pub fn play(&self) -> eyre::Result<()> { + use gstreamer::prelude::*; + self.pipeline + .set_state(gstreamer::State::Playing) + .map_err(|e| eyre::eyre!("Failed to start pipeline: {:?}", e))?; + Ok(()) + } + + /// Stop video playback. + /// + /// This performs a non-blocking stop to avoid system freezes when + /// CUDA/GPU operations are in progress. The state change is initiated + /// but not waited on synchronously. + pub fn stop(&self) -> eyre::Result<()> { + use gstreamer::prelude::*; + + // Stop accepting new frames first + self.frame_queue.stop(); + + // Set EOS flag to stop any pending callbacks + if let Ok(mut state) = self.frame_state.lock() { + state.eos = true; + } + + // Set state to NULL without waiting - avoids blocking on GPU operations + // The pipeline will clean up asynchronously + let _ = self.pipeline.set_state(gstreamer::State::Null); + + tracing::debug!(path = ?self.source_path, "Video pipeline stopped"); + Ok(()) + } + + /// Seek to the beginning for looping. + pub fn seek_to_start(&self) -> eyre::Result<()> { + use gstreamer::prelude::*; + + let seek_flags = gstreamer::SeekFlags::FLUSH + | gstreamer::SeekFlags::KEY_UNIT + | gstreamer::SeekFlags::SNAP_BEFORE; + + self.pipeline + .seek_simple(seek_flags, gstreamer::ClockTime::ZERO)?; + + if let Ok(mut state) = self.frame_state.lock() { + state.eos = false; + } + + Ok(()) + } + + /// Get the current frame if available. + #[must_use] + #[allow(dead_code)] + pub fn current_frame(&self) -> Option { + match self.pull_frame() { + Some(frame) => Some(frame), + None => self + .frame_state + .lock() + .ok() + .and_then(|state| state.current_frame.clone()), + } + } + + /// Get the frame duration. + #[must_use] + pub fn frame_duration(&self) -> Duration { + self.frame_state + .lock() + .ok() + .map(|state| state.frame_duration) + .unwrap_or(DEFAULT_FRAME_DURATION) + } + + /// Get video dimensions. + #[must_use] + pub fn video_dimensions(&self) -> Option<(u32, u32)> { + if let Some(dims) = self.frame_queue.last_frame_dimensions() { + return Some(dims); + } + + use gstreamer::prelude::*; + let pad = self.appsink.static_pad("sink")?; + let caps = pad.current_caps()?; + let video_info = gstreamer_video::VideoInfo::from_caps(&caps).ok()?; + Some((video_info.width(), video_info.height())) + } + + /// Pull last cached frame. + pub fn pull_cached_frame(&self, dest: &mut [u8]) -> Option { + self.pull_frame_to_buffer(dest) + } + + /// Try to get a DMA-BUF frame for zero-copy rendering. + /// + /// NOTE: This shares the file descriptor references from the frame queue. + /// The returned DmaBufBuffer shares Arc with the queued frame, + /// avoiding FD duplication on every render (which would cause FD leaks). + #[must_use] + pub fn try_get_dmabuf_frame(&self) -> Option { + let frame = self.frame_queue.get_render_frame()?; + let dmabuf_data = frame.dmabuf()?; + + tracing::trace!( + width = frame.width, + height = frame.height, + fourcc = format!("{:#x}", dmabuf_data.fourcc), + modifier = format!("{:#x}", dmabuf_data.modifier), + "Got DMA-BUF frame - TRUE ZERO-COPY!" + ); + + // IMPORTANT: Share Arc references instead of duplicating FDs. + // Previously this called try_clone_to_owned() which leaked FDs over time. + let mut planes = Vec::with_capacity(dmabuf_data.planes.len()); + for plane_data in &dmabuf_data.planes { + planes.push(crate::dmabuf::DmaBufPlane { + fd: std::sync::Arc::clone(&plane_data.fd), + offset: plane_data.offset, + stride: plane_data.stride, + }); + } + + Some(crate::dmabuf::DmaBufBuffer { + width: frame.width, + height: frame.height, + format: crate::dmabuf::DmaBufFormat { + fourcc: dmabuf_data.fourcc, + modifier: dmabuf_data.modifier, + }, + planes, + wl_buffer: None, + }) + } + + /// Check for EOS and handle looping. + pub fn check_eos(&mut self) -> bool { + use gstreamer::prelude::*; + + let Some(bus) = self.pipeline.bus() else { + return false; + }; + + while let Some(msg) = bus.pop() { + use gstreamer::MessageView; + + match msg.view() { + MessageView::Eos(_) => { + if self.looping { + let loop_num = self + .rebuild_count + .fetch_add(1, std::sync::atomic::Ordering::Relaxed) + + 1; + debug!(loop_num, path = %self.source_path.display(), "Video EOS, seeking to start"); + + if let Err(e) = self.seek_to_start() { + error!(?e, "Failed to seek to start for loop"); + return true; + } + return false; + } + return true; + } + MessageView::Error(err) => { + let error_msg = err.error().to_string(); + let src_path = err.src().map(|s| s.path_string()); + + // Provide helpful error messages for common codec issues + if error_msg.contains("missing a plug-in") { + error!( + src = ?src_path, + error = %error_msg, + "GStreamer pipeline error: Missing codec plugin. \ + For H.264/MP4 support on Fedora, install 'gstreamer1-plugin-openh264' or use VP9/WebM files. \ + Alternatively, enable RPM Fusion and install 'gstreamer1-plugins-ugly'." + ); + } else { + error!( + src = ?src_path, + error = %error_msg, + "GStreamer pipeline error" + ); + } + return true; + } + MessageView::Warning(warn) => { + warn!( + src = ?warn.src().map(|s| s.path_string()), + error = %warn.error(), + "GStreamer pipeline warning" + ); + } + MessageView::StateChanged(state) => { + if state.src().map(|s| s == &self.pipeline).unwrap_or(false) { + debug!(old = ?state.old(), new = ?state.current(), "Pipeline state changed"); + } + } + _ => {} + } + } + + false + } +} + +impl Drop for VideoPlayer { + fn drop(&mut self) { + // Stop frame queue first to signal callbacks to exit + self.frame_queue.stop(); + + // Set EOS flag + if let Ok(mut state) = self.frame_state.try_lock() { + state.eos = true; + } + + // Set pipeline to NULL state - this is non-blocking + use gstreamer::prelude::*; + let _ = self.pipeline.set_state(gstreamer::State::Null); + + tracing::debug!(path = ?self.source_path, "VideoPlayer dropped"); + } +} diff --git a/src/colored.rs b/src/colored.rs index b5f4beb..57fbbe4 100644 --- a/src/colored.rs +++ b/src/colored.rs @@ -28,12 +28,7 @@ pub fn gradient( let mut colors = Vec::with_capacity(gradient.colors.len()); for &[r, g, b] in &*gradient.colors { - colors.push(colorgrad::Color::from_linear_rgba( - f32::from(r), - f32::from(g), - f32::from(b), - 1.0, - )); + colors.push(colorgrad::Color::from_linear_rgba(r, g, b, 1.0)); } let grad = colorgrad::GradientBuilder::new() @@ -79,7 +74,7 @@ pub fn gradient( for (x, y, pixel) in imgbuf.enumerate_pixels_mut() { let Color { r, g, b, .. } = grad.at(positioner(x, y) as f32); - *pixel = image::Rgb([r as f32, g as f32, b as f32]); + *pixel = image::Rgb([r, g, b]); } Ok(imgbuf) diff --git a/src/dmabuf.rs b/src/dmabuf.rs new file mode 100644 index 0000000..2076721 --- /dev/null +++ b/src/dmabuf.rs @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! DMA-BUF zero-copy GPU rendering support. +//! +//! This module provides true zero-copy video rendering via the linux-dmabuf protocol: +//! ``` +//! GPU decode → DMA-BUF fd → zwp_linux_dmabuf_v1 → Compositor +//! ``` +//! +//! ## Performance Benefits +//! +//! - **Zero CPU copies**: Video stays in GPU memory throughout +//! - **~0.2-0.5ms per frame**: Compared to 3-5ms for wl_shm path +//! - **Lower memory bandwidth**: No GPU→CPU→GPU roundtrip +//! +//! ## GPU Vendor Support +//! +//! | Vendor | Decoder | DMA-BUF Export | Performance | +//! |--------|---------|----------------|-------------| +//! | AMD | VAAPI | ✅ Native | Excellent | +//! | Intel | VAAPI | ✅ Native | Excellent | +//! | NVIDIA | NVDEC | ✅ Via plugin | Excellent | +//! | ARM | V4L2 | ✅ Native | Good | +//! +//! NVIDIA note: NVDEC outputs CUDAMemory which requires conversion to DMA-BUF. +//! Falls back to wl_shm if conversion fails. + +use std::os::fd::OwnedFd; +use std::sync::Arc; + +use sctk::reexports::client::{QueueHandle, protocol::wl_buffer::WlBuffer}; +use tracing::{debug, warn}; + +// We'll use wayland-client directly for linux-dmabuf protocol +// since sctk doesn't provide high-level bindings yet +use sctk::reexports::client::globals::GlobalList; +use wayland_protocols::wp::linux_dmabuf::zv1::client::{ + zwp_linux_buffer_params_v1::{self, ZwpLinuxBufferParamsV1}, + zwp_linux_dmabuf_v1::ZwpLinuxDmabufV1, +}; + +/// DMA-BUF format and modifier information +#[derive(Debug, Clone, Copy)] + +pub struct DmaBufFormat { + pub fourcc: u32, + pub modifier: u64, +} + +// NVIDIA tiled modifiers reference (unused but kept for documentation): +// Format: 0x03SSHHBBBBBBBBBB where: +// - SS = sector layout (0x06 for 16Bx2, 0x0e for 32Bx2) +// - HH = height log2 (0x00-0x05) +// - BB = block-linear base +// 16Bx2: 0x0300000000606010..0x0300000000606015 +// 32Bx2: 0x0300000000e08010..0x0300000000e08015 + +impl DmaBufFormat { + /// Create AR24 (ARGB8888) format - most widely supported + pub fn ar24() -> Self { + Self { + fourcc: drm_fourcc::DrmFourcc::Argb8888 as u32, + modifier: drm_fourcc::DrmModifier::Linear.into(), + } + } + + /// Create XR24 (XRGB8888) format - common for video + pub fn xr24() -> Self { + Self { + fourcc: drm_fourcc::DrmFourcc::Xrgb8888 as u32, + modifier: drm_fourcc::DrmModifier::Linear.into(), + } + } + + /// Create NV12 format - efficient for video (YUV 4:2:0) + pub fn nv12() -> Self { + Self { + fourcc: drm_fourcc::DrmFourcc::Nv12 as u32, + modifier: drm_fourcc::DrmModifier::Linear.into(), + } + } + + /// Create AB24 (ABGR8888) format - NVIDIA GL DMA-BUF output (RGBA → ABGR) + pub fn ab24() -> Self { + Self { + fourcc: drm_fourcc::DrmFourcc::Abgr8888 as u32, + modifier: drm_fourcc::DrmModifier::Linear.into(), + } + } + + /// Create NV12 format with NVIDIA tiled modifier (from cudadmabufupload) + pub fn nv12_nvidia_tiled(modifier: u64) -> Self { + Self { + fourcc: drm_fourcc::DrmFourcc::Nv12 as u32, + modifier, + } + } + + /// Create XR24 (XRGB8888/BGRx) format with NVIDIA tiled modifier + pub fn xr24_nvidia_tiled(modifier: u64) -> Self { + Self { + fourcc: drm_fourcc::DrmFourcc::Xrgb8888 as u32, + modifier, + } + } + + /// Detect format from GStreamer video format string or DRM format string. + /// Handles NVIDIA tiled modifiers from cudadmabufupload (e.g., "NV12:0x0300000000606010"). + pub fn from_gst_format(format_str: &str) -> Self { + // Handle DRM format strings with modifiers (e.g., "NV12:0x0300000000606010") + if let Some((format, modifier_str)) = format_str.split_once(':') { + let modifier = if modifier_str.starts_with("0x") || modifier_str.starts_with("0X") { + u64::from_str_radix(&modifier_str[2..], 16).unwrap_or(0) + } else { + modifier_str.parse::().unwrap_or(0) + }; + + return match format { + "NV12" => Self::nv12_nvidia_tiled(modifier), + "XR24" => Self::xr24_nvidia_tiled(modifier), + "XB24" => Self { + fourcc: drm_fourcc::DrmFourcc::Xbgr8888 as u32, + modifier, + }, + "AR24" => Self { + fourcc: drm_fourcc::DrmFourcc::Argb8888 as u32, + modifier, + }, + "AB24" => Self { + fourcc: drm_fourcc::DrmFourcc::Abgr8888 as u32, + modifier, + }, + _ => { + tracing::warn!( + format, + modifier, + "Unknown DRM format with modifier, defaulting to XR24" + ); + Self::xr24_nvidia_tiled(modifier) + } + }; + } + + // Simple format strings without modifiers + match format_str { + "BGRx" | "BGRX" => Self::xr24(), + "RGBA" => Self::ab24(), // RGBA in GStreamer = ABGR8888 in DRM + "ARGB" => Self::ar24(), + "NV12" => Self::nv12(), + // DRM format strings (from vapostproc with memory:DMABuf) + "XB24" => Self::xb24(), // XBGR8888 + "XR24" => Self::xr24(), // XRGB8888 + "AB24" => Self::ab24(), // ABGR8888 + "AR24" => Self::ar24(), // ARGB8888 + _ => { + tracing::warn!(format = format_str, "Unknown format, defaulting to XR24"); + Self::xr24() + } + } + } + + /// Create XB24 (XBGR8888) format - VAAPI DMA-BUF output + pub fn xb24() -> Self { + Self { + fourcc: drm_fourcc::DrmFourcc::Xbgr8888 as u32, + modifier: drm_fourcc::DrmModifier::Linear.into(), + } + } +} + +/// DMA-BUF plane descriptor +#[derive(Debug, Clone)] + +pub struct DmaBufPlane { + pub fd: Arc, + pub offset: u32, + pub stride: u32, +} + +/// A DMA-BUF backed buffer for zero-copy rendering +#[derive(Debug)] + +pub struct DmaBufBuffer { + pub width: u32, + pub height: u32, + pub format: DmaBufFormat, + pub planes: Vec, + pub wl_buffer: Option, +} + +impl DmaBufBuffer { + /// Create wl_buffer from DMA-BUF using zwp_linux_dmabuf_v1. + /// + /// This performs the actual zero-copy buffer creation: + /// 1. Create zwp_linux_buffer_params_v1 + /// 2. Add plane(s) with fd, offset, stride, modifier + /// 3. Create wl_buffer from params + pub fn create_wl_buffer( + &mut self, + dmabuf: &ZwpLinuxDmabufV1, + qh: &QueueHandle, + ) -> Option { + if self.planes.is_empty() { + warn!("No planes available for DMA-BUF buffer"); + return None; + } + + // Create buffer params + let params: ZwpLinuxBufferParamsV1 = dmabuf.create_params(qh, ()); + + // Add each plane + for (plane_idx, plane) in self.planes.iter().enumerate() { + use std::os::fd::AsFd; + params.add( + plane.fd.as_fd(), + plane_idx as u32, + plane.offset, + plane.stride, + (self.format.modifier >> 32) as u32, // modifier_hi + (self.format.modifier & 0xFFFFFFFF) as u32, // modifier_lo + ); + } + + // Create the wl_buffer + let wl_buffer = params.create_immed( + self.width as i32, + self.height as i32, + self.format.fourcc, + zwp_linux_buffer_params_v1::Flags::empty(), + qh, + (), + ); + + debug!( + width = self.width, + height = self.height, + fourcc = self.format.fourcc, + planes = self.planes.len(), + "Created DMA-BUF wl_buffer" + ); + + self.wl_buffer = Some(wl_buffer.clone()); + Some(wl_buffer) + } +} + +/// DMA-BUF manager state +#[derive(Debug)] +pub struct DmaBufState { + /// The zwp_linux_dmabuf_v1 global (if available) + pub dmabuf_global: Option, +} + +impl Default for DmaBufState { + fn default() -> Self { + Self::new() + } +} + +impl DmaBufState { + /// Create a new DMA-BUF state + pub fn new() -> Self { + Self { + dmabuf_global: None, + } + } + + /// Bind to zwp_linux_dmabuf_v1 global and store it for buffer creation. + /// + /// Returns the bound global if available, None otherwise. + pub fn bind_global( + globals: &GlobalList, + qh: &QueueHandle, + ) -> Option { + // Check if zwp_linux_dmabuf_v1 is advertised + let global_list = globals.contents().clone_list(); + let dmabuf_global = global_list + .iter() + .find(|g| g.interface == "zwp_linux_dmabuf_v1")?; + + // Bind the global at version 3 (supports modifiers) + let version = dmabuf_global.version.min(3); + let dmabuf: ZwpLinuxDmabufV1 = globals.registry().bind(dmabuf_global.name, version, qh, ()); + + debug!(version, "DMA-BUF protocol (zwp_linux_dmabuf_v1) available"); + + Some(dmabuf) + } +} diff --git a/src/draw.rs b/src/draw.rs index 97a1cde..fc72f5b 100644 --- a/src/draw.rs +++ b/src/draw.rs @@ -89,6 +89,32 @@ pub fn xrgb21010_canvas(canvas: &mut [u8], image: &DynamicImage) { /// Draws the image on an 8-bit canvas. pub fn xrgb888_canvas(canvas: &mut [u8], image: &DynamicImage) { + use rayon::prelude::*; + + // Fast path: if image is RGBA8, we can convert using parallel iteration + if let DynamicImage::ImageRgba8(rgba_img) = image { + let pixels = rgba_img.as_raw(); + let pixel_count = pixels.len() / 4; + + // Use parallel iteration for RGBA to BGRX conversion + // Split canvas into chunks and process in parallel + canvas + .par_chunks_mut(4) + .take(pixel_count) + .enumerate() + .for_each(|(i, chunk)| { + let s = i * 4; + // RGBA to BGRX (little-endian XRGB) + chunk[0] = pixels[s + 2]; // B + chunk[1] = pixels[s + 1]; // G + chunk[2] = pixels[s]; // R + chunk[3] = 0xFF; // X (padding) + }); + + return; + } + + // Fallback for other image formats for (pos, (_, _, pixel)) in image.pixels().enumerate() { let indice = pos * 4; diff --git a/src/frame_queue.rs b/src/frame_queue.rs new file mode 100644 index 0000000..a4e4f0b --- /dev/null +++ b/src/frame_queue.rs @@ -0,0 +1,712 @@ +// SPDX-License-Identifier: MPL-2.0 + +//! Bounded frame queue for decoupled video playback. +//! +//! This module provides a bounded frame queue that decouples GStreamer's +//! decoding thread from the Wayland rendering thread: +//! +//! ```text +//! ┌─────────────┐ +//! │ GStreamer │ +//! │ decode │ +//! └─────┬───────┘ +//! │ push() - drops oldest if full +//! ▼ +//! ┌─────────────┐ +//! │ Frame Queue │ ← bounded (2-4 frames) +//! └─────┬───────┘ +//! │ get_render_frame() - reuses last frame if empty +//! ▼ +//! ┌─────────────┐ +//! │ Renderer │ +//! └─────────────┘ +//! ``` +//! +//! # Key Guarantees +//! +//! - **Renderer never blocks**: Returns immediately, reuses last frame if empty +//! - **Producer never blocks**: Drops oldest frame if queue is full +//! - **Frame drops are invisible**: Wallpapers don't need perfect frame accuracy +//! +//! # Zero-Copy DMA-BUF Support +//! +//! For hardware-accelerated video, frames can be stored as DMA-BUF file +//! descriptors. The renderer imports these directly via zwp_linux_dmabuf_v1. + +use std::os::fd::{FromRawFd, OwnedFd}; +use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; +use std::sync::{Arc, Mutex}; +use std::time::Instant; + +/// Maximum number of frames to buffer. +/// +/// 3-4 frames provides enough buffer to hide decode hiccups while keeping +/// latency low. More frames = more memory usage and higher latency. +pub const DEFAULT_QUEUE_CAPACITY: usize = 3; + +/// Per-plane data for DMA-BUF frames. +#[derive(Debug, Clone)] +pub struct DmaBufPlaneData { + /// File descriptor for this plane (shared Arc for multi-plane in same buffer) + pub fd: Arc, + /// Offset into the buffer for this plane + pub offset: u32, + /// Bytes per row for this plane + pub stride: u32, +} + +/// DMA-BUF frame data for zero-copy GPU rendering. +/// +/// This holds the file descriptor and metadata needed to import the frame +/// directly into the compositor via zwp_linux_dmabuf_v1, bypassing CPU entirely. +/// +/// Supports multi-plane formats like NV12 (Y + UV planes). +#[derive(Debug)] +pub struct DmaBufFrameData { + /// DRM fourcc format code (e.g., NV12, XRGB8888) + pub fourcc: u32, + /// DRM modifier (e.g., LINEAR, NVIDIA tiled) + pub modifier: u64, + /// Per-plane data (1 for RGB formats, 2 for NV12/NV21, 3 for YUV420P) + pub planes: Vec, + /// Frame width (needed for calculating plane offsets) + pub width: u32, + /// Frame height (needed for calculating plane offsets) + pub height: u32, +} + +impl Clone for DmaBufFrameData { + fn clone(&self) -> Self { + Self { + fourcc: self.fourcc, + modifier: self.modifier, + planes: self.planes.clone(), + width: self.width, + height: self.height, + } + } +} + +impl DmaBufFrameData { + /// Create new DMA-BUF frame data from a raw fd for single-plane formats. + /// + /// SAFETY: The fd is duplicated, so caller can keep using the original. + pub fn from_raw_fd( + raw_fd: i32, + fourcc: u32, + modifier: u64, + stride: u32, + ) -> std::io::Result { + // Duplicate the fd so we own it independently + let dup_fd = nix::unistd::dup(raw_fd) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; + + let fd = Arc::new(unsafe { OwnedFd::from_raw_fd(dup_fd) }); + + Ok(Self { + fourcc, + modifier, + planes: vec![DmaBufPlaneData { + fd, + offset: 0, + stride, + }], + width: 0, // Set by caller + height: 0, // Set by caller + }) + } + + /// Create NV12 DMA-BUF frame data with two planes (Y + UV). + /// + /// For NVIDIA tiled NV12, both planes share the same fd with different offsets. + /// - Plane 0 (Y): offset = 0, stride = aligned_width + /// - Plane 1 (UV): offset = aligned_stride * height, stride = aligned_width + pub fn from_raw_fd_nv12( + raw_fd: i32, + fourcc: u32, + modifier: u64, + width: u32, + height: u32, + y_stride: u32, + ) -> std::io::Result { + // Duplicate the fd so we own it independently + let dup_fd = nix::unistd::dup(raw_fd) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; + + let fd = Arc::new(unsafe { OwnedFd::from_raw_fd(dup_fd) }); + + // For NVIDIA tiled NV12: + // - Y plane at offset 0 + // - UV plane at offset = y_stride * height (UV is half-height, interleaved) + let uv_offset = y_stride * height; + + tracing::debug!( + y_stride, + uv_offset, + width, + height, + "Creating NV12 DMA-BUF with 2 planes" + ); + + Ok(Self { + fourcc, + modifier, + planes: vec![ + DmaBufPlaneData { + fd: Arc::clone(&fd), + offset: 0, + stride: y_stride, + }, + DmaBufPlaneData { + fd, + offset: uv_offset, + stride: y_stride, // UV has same stride as Y for NV12 + }, + ], + width, + height, + }) + } + + /// Create NV12 DMA-BUF frame data with explicit offsets from VideoMeta. + /// + /// This is the preferred method when GStreamer provides VideoMeta with + /// actual plane offsets, which is required for NVIDIA tiled formats. + #[allow(clippy::too_many_arguments)] + pub fn from_raw_fd_nv12_with_offsets( + raw_fd: i32, + fourcc: u32, + modifier: u64, + width: u32, + height: u32, + y_stride: u32, + uv_stride: u32, + y_offset: u32, + uv_offset: u32, + ) -> std::io::Result { + // Duplicate the fd so we own it independently + let dup_fd = nix::unistd::dup(raw_fd) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; + + let fd = Arc::new(unsafe { OwnedFd::from_raw_fd(dup_fd) }); + + tracing::debug!( + y_stride, + uv_stride, + y_offset, + uv_offset, + width, + height, + "Creating NV12 DMA-BUF with 2 planes (explicit offsets from VideoMeta)" + ); + + Ok(Self { + fourcc, + modifier, + planes: vec![ + DmaBufPlaneData { + fd: Arc::clone(&fd), + offset: y_offset, + stride: y_stride, + }, + DmaBufPlaneData { + fd, + offset: uv_offset, + stride: uv_stride, + }, + ], + width, + height, + }) + } +} + +/// Frame content - either raw pixel data or DMA-BUF reference. +#[derive(Clone)] +pub enum FrameContent { + /// Raw pixel data in memory (BGRx format for wl_shm) + Raw(Vec), + /// DMA-BUF for zero-copy GPU rendering + DmaBuf(DmaBufFrameData), +} + +impl std::fmt::Debug for FrameContent { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Raw(data) => write!(f, "Raw({} bytes)", data.len()), + Self::DmaBuf(dmabuf) => write!( + f, + "DmaBuf(fourcc={:#x}, modifier={:#x})", + dmabuf.fourcc, dmabuf.modifier + ), + } + } +} + +/// A video frame stored in the queue. +/// +/// Supports two modes: +/// - **Raw**: Pixel data in BGRx format, copied to wl_shm buffer +/// - **DmaBuf**: Zero-copy GPU buffer, imported via zwp_linux_dmabuf_v1 +#[derive(Clone)] +pub struct QueuedFrame { + /// Frame content (raw pixels or DMA-BUF reference) + pub content: FrameContent, + /// Frame width in pixels. + pub width: u32, + /// Frame height in pixels. + pub height: u32, + /// Presentation timestamp (nanoseconds from video start). + pub pts_ns: Option, + /// When this frame was queued (for debugging/metrics). + pub queued_at: Instant, +} + +impl QueuedFrame { + /// Create a new queued frame with raw pixel data. + pub fn new(data: Vec, width: u32, height: u32, pts_ns: Option) -> Self { + Self { + content: FrameContent::Raw(data), + width, + height, + pts_ns, + queued_at: Instant::now(), + } + } + + /// Create a new queued frame from DMA-BUF. + pub fn new_dmabuf( + dmabuf: DmaBufFrameData, + width: u32, + height: u32, + pts_ns: Option, + ) -> Self { + Self { + content: FrameContent::DmaBuf(dmabuf), + width, + height, + pts_ns, + queued_at: Instant::now(), + } + } + + /// Get the DMA-BUF data if this is a DMA-BUF frame. + pub fn dmabuf(&self) -> Option<&DmaBufFrameData> { + match &self.content { + FrameContent::DmaBuf(data) => Some(data), + _ => None, + } + } + + /// Get raw pixel data if this is a raw frame. + #[cfg(test)] + pub fn raw_data(&self) -> Option<&[u8]> { + match &self.content { + FrameContent::Raw(data) => Some(data), + _ => None, + } + } + + /// Write this frame directly to a destination buffer. + /// + /// Returns the number of bytes written, or 0 if buffer is too small or this is DMA-BUF. + pub fn write_to(&self, dest: &mut [u8]) -> usize { + match &self.content { + FrameContent::Raw(data) => { + let frame_size = data.len(); + if dest.len() < frame_size { + return 0; + } + dest[..frame_size].copy_from_slice(data); + frame_size + } + FrameContent::DmaBuf(_) => { + // DMA-BUF frames can't be written to CPU buffers + 0 + } + } + } +} + +/// Statistics about frame queue operations. +#[derive(Debug, Clone, Default)] +pub struct QueueStats { + /// Total frames pushed to the queue. + pub frames_pushed: u64, + /// Frames dropped due to queue being full (producer side). + pub frames_dropped_full: u64, + /// Frames successfully popped by renderer. + pub frames_popped: u64, + /// Times renderer reused last frame (queue was empty). + pub frames_reused: u64, +} + +/// A bounded, thread-safe frame queue for video playback. +/// +/// This queue is designed for a single-producer (GStreamer callback), +/// single-consumer (Wayland render thread) pattern. +pub struct FrameQueue { + /// Ring buffer of frames. + frames: Mutex>>, + /// Capacity of the queue. + capacity: usize, + /// Write position (producer). + write_pos: AtomicU64, + /// Read position (consumer). + read_pos: AtomicU64, + /// Number of frames currently in the queue. + count: AtomicU64, + /// Last successfully rendered frame (for reuse when queue is empty). + last_frame: Mutex>, + /// Whether the queue has been stopped (EOS or error). + stopped: AtomicBool, + /// Statistics counters. + stats_pushed: AtomicU64, + stats_dropped: AtomicU64, + stats_popped: AtomicU64, + stats_reused: AtomicU64, +} + +impl FrameQueue { + /// Create a new frame queue with the specified capacity. + pub fn new(capacity: usize) -> Self { + let capacity = capacity.max(2); // Minimum 2 frames + let frames: Vec> = (0..capacity).map(|_| None).collect(); + + Self { + frames: Mutex::new(frames), + capacity, + write_pos: AtomicU64::new(0), + read_pos: AtomicU64::new(0), + count: AtomicU64::new(0), + last_frame: Mutex::new(None), + stopped: AtomicBool::new(false), + stats_pushed: AtomicU64::new(0), + stats_dropped: AtomicU64::new(0), + stats_popped: AtomicU64::new(0), + stats_reused: AtomicU64::new(0), + } + } + + /// Create a new frame queue with default capacity. + pub fn with_default_capacity() -> Self { + Self::new(DEFAULT_QUEUE_CAPACITY) + } + + /// Push a frame into the queue (producer side). + /// + /// If the queue is full, the oldest frame is dropped to make room. + /// This ensures the producer (GStreamer) never blocks. + /// + /// Returns `true` if the frame was added, `false` if queue is stopped. + pub fn push(&self, frame: QueuedFrame) -> bool { + if self.stopped.load(Ordering::Acquire) { + return false; + } + + let mut frames = match self.frames.try_lock() { + Ok(guard) => guard, + Err(_) => { + // Lock contention - drop this frame rather than block + self.stats_dropped.fetch_add(1, Ordering::Relaxed); + tracing::trace!( + pts_ns = ?frame.pts_ns, + age_ms = frame.queued_at.elapsed().as_millis(), + "Frame dropped: lock contention" + ); + return true; // Not stopped, just contention + } + }; + + let current_count = self.count.load(Ordering::Acquire); + + // If queue is full, drop oldest frame + if current_count >= self.capacity as u64 { + // Advance read position to drop oldest + let old_read = self.read_pos.fetch_add(1, Ordering::AcqRel); + let drop_idx = (old_read % self.capacity as u64) as usize; + frames[drop_idx] = None; + self.count.fetch_sub(1, Ordering::AcqRel); + self.stats_dropped.fetch_add(1, Ordering::Relaxed); + tracing::trace!( + pts_ns = ?frame.pts_ns, + age_ms = frame.queued_at.elapsed().as_millis(), + "Frame dropped: queue full" + ); + } + + // Write new frame + let write_idx = (self.write_pos.load(Ordering::Acquire) % self.capacity as u64) as usize; + frames[write_idx] = Some(frame); + self.write_pos.fetch_add(1, Ordering::Release); + self.count.fetch_add(1, Ordering::Release); + self.stats_pushed.fetch_add(1, Ordering::Relaxed); + + true + } + + /// Try to pop a frame from the queue (consumer side). + /// + /// Returns `Some(frame)` if a frame is available, `None` otherwise. + /// This method NEVER blocks - it returns immediately. + pub fn try_pop(&self) -> Option { + if self.count.load(Ordering::Acquire) == 0 { + return None; + } + + let mut frames = match self.frames.try_lock() { + Ok(guard) => guard, + Err(_) => return None, // Lock contention - return None, don't block + }; + + // Double-check count after acquiring lock + if self.count.load(Ordering::Acquire) == 0 { + return None; + } + + let read_idx = (self.read_pos.load(Ordering::Acquire) % self.capacity as u64) as usize; + let frame = frames[read_idx].take(); + + if frame.is_some() { + self.read_pos.fetch_add(1, Ordering::Release); + self.count.fetch_sub(1, Ordering::Release); + self.stats_popped.fetch_add(1, Ordering::Relaxed); + + // Cache this frame as the last rendered frame + if let Ok(mut last) = self.last_frame.try_lock() { + *last = frame.clone(); + } + } + + frame + } + + /// Get a frame for rendering - tries queue first, falls back to last frame. + /// + /// This is the main method for the render loop. It: + /// 1. Tries to pop a new frame from the queue + /// 2. If queue is empty, reuses the last successfully rendered frame + /// 3. Returns None only if no frame has ever been received + /// + /// This ensures smooth playback even when decode hiccups cause queue underruns. + pub fn get_render_frame(&self) -> Option { + // Try to get a new frame first + if let Some(frame) = self.try_pop() { + return Some(frame); + } + + // Queue empty - reuse last frame + self.stats_reused.fetch_add(1, Ordering::Relaxed); + + if let Ok(last) = self.last_frame.try_lock() { + last.clone() + } else { + None + } + } + + /// Write a frame directly to a destination buffer. + /// + /// This combines `get_render_frame()` with the frame write operation, + /// returning `Some((width, height))` on success. + pub fn write_frame_to(&self, dest: &mut [u8]) -> Option<(u32, u32)> { + let frame = self.get_render_frame()?; + + if frame.write_to(dest) > 0 { + Some((frame.width, frame.height)) + } else { + None + } + } + + /// Get the dimensions of the last frame (if any). + pub fn last_frame_dimensions(&self) -> Option<(u32, u32)> { + self.last_frame + .try_lock() + .ok()? + .as_ref() + .map(|f| (f.width, f.height)) + } + + /// Check if the queue has been stopped. + pub fn is_stopped(&self) -> bool { + self.stopped.load(Ordering::Acquire) + } + + /// Stop the queue (called on shutdown or error). + /// + /// This prevents new frames from being pushed and signals + /// that the queue is no longer active. + pub fn stop(&self) { + self.stopped.store(true, Ordering::Release); + } + + /// Reset the queue for pipeline restart (test only). + #[cfg(test)] + pub fn reset(&self) { + self.stopped.store(false, Ordering::Release); + self.count.store(0, Ordering::Release); + self.read_pos.store(0, Ordering::Release); + self.write_pos.store(0, Ordering::Release); + + if let Ok(mut frames) = self.frames.try_lock() { + for frame in frames.iter_mut() { + *frame = None; + } + } + } + + /// Get current queue length. + pub fn len(&self) -> usize { + self.count.load(Ordering::Acquire) as usize + } + + /// Check if queue is empty. + #[cfg(test)] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Get queue statistics. + pub fn stats(&self) -> QueueStats { + QueueStats { + frames_pushed: self.stats_pushed.load(Ordering::Relaxed), + frames_dropped_full: self.stats_dropped.load(Ordering::Relaxed), + frames_popped: self.stats_popped.load(Ordering::Relaxed), + frames_reused: self.stats_reused.load(Ordering::Relaxed), + } + } +} + +impl Default for FrameQueue { + fn default() -> Self { + Self::with_default_capacity() + } +} + +// Thread-safe: uses atomic operations and mutex-protected data +unsafe impl Send for FrameQueue {} +unsafe impl Sync for FrameQueue {} + +/// Shared handle to a frame queue. +pub type SharedFrameQueue = Arc; + +/// Create a new shared frame queue. +pub fn new_shared_queue(capacity: usize) -> SharedFrameQueue { + Arc::new(FrameQueue::new(capacity)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_basic_push_pop() { + let queue = FrameQueue::new(3); + + let frame = QueuedFrame::new(vec![1, 2, 3, 4], 1, 1, None); + assert!(queue.push(frame)); + assert_eq!(queue.len(), 1); + + let popped = queue.try_pop().unwrap(); + assert_eq!(popped.raw_data(), Some(&[1, 2, 3, 4][..])); + assert_eq!(queue.len(), 0); + } + + #[test] + fn test_queue_full_drops_oldest() { + let queue = FrameQueue::new(2); + + queue.push(QueuedFrame::new(vec![1], 1, 1, Some(1))); + queue.push(QueuedFrame::new(vec![2], 1, 1, Some(2))); + assert_eq!(queue.len(), 2); + + // This should drop frame 1 and add frame 3 + queue.push(QueuedFrame::new(vec![3], 1, 1, Some(3))); + assert_eq!(queue.len(), 2); + + // First pop should get frame 2 (frame 1 was dropped) + let frame = queue.try_pop().unwrap(); + assert_eq!(frame.pts_ns, Some(2)); + } + + #[test] + fn test_get_render_frame_reuses_last() { + let queue = FrameQueue::new(2); + + queue.push(QueuedFrame::new(vec![1, 2, 3, 4], 1, 1, None)); + + // First get_render_frame pops from queue + let frame1 = queue.get_render_frame().unwrap(); + assert_eq!(frame1.raw_data(), Some(&[1, 2, 3, 4][..])); + assert!(queue.is_empty()); + + // Second get_render_frame reuses last frame + let frame2 = queue.get_render_frame().unwrap(); + assert_eq!(frame2.raw_data(), Some(&[1, 2, 3, 4][..])); + + let stats = queue.stats(); + assert_eq!(stats.frames_reused, 1); + } + + #[test] + fn test_stop_prevents_push() { + let queue = FrameQueue::new(2); + + queue.push(QueuedFrame::new(vec![1], 1, 1, None)); + assert_eq!(queue.len(), 1); + + queue.stop(); + assert!(!queue.push(QueuedFrame::new(vec![2], 1, 1, None))); + assert_eq!(queue.len(), 1); + } + + #[test] + fn test_reset_clears_queue() { + let queue = FrameQueue::new(2); + + queue.push(QueuedFrame::new(vec![1], 1, 1, None)); + queue.push(QueuedFrame::new(vec![2], 1, 1, None)); + queue.stop(); + + queue.reset(); + + assert!(!queue.is_stopped()); + assert!(queue.is_empty()); + // But last_frame should still be available for smooth restart + } + + #[test] + fn test_write_frame_to_buffer() { + let queue = FrameQueue::new(2); + + queue.push(QueuedFrame::new(vec![1, 2, 3, 4], 1, 1, None)); + + let mut buffer = [0u8; 4]; + let dims = queue.write_frame_to(&mut buffer).unwrap(); + + assert_eq!(dims, (1, 1)); + assert_eq!(buffer, [1, 2, 3, 4]); + } + + #[test] + fn test_stats_tracking() { + let queue = FrameQueue::new(2); + + queue.push(QueuedFrame::new(vec![1], 1, 1, None)); + queue.push(QueuedFrame::new(vec![2], 1, 1, None)); + queue.push(QueuedFrame::new(vec![3], 1, 1, None)); // Drops oldest (frame 1) + + // Queue now has: [2, 3] + queue.try_pop(); // Pops frame 2 + queue.try_pop(); // Pops frame 3 + // Queue is now empty + queue.get_render_frame(); // Reuses last frame (frame 3) + + let stats = queue.stats(); + assert_eq!(stats.frames_pushed, 3); + assert_eq!(stats.frames_dropped_full, 1); + assert_eq!(stats.frames_popped, 2); + assert_eq!(stats.frames_reused, 1); + } +} diff --git a/src/main.rs b/src/main.rs index 55c67a7..4ef8d7d 100755 --- a/src/main.rs +++ b/src/main.rs @@ -1,7 +1,14 @@ // SPDX-License-Identifier: MPL-2.0 +#[cfg(feature = "animated")] +mod animated; mod colored; +#[cfg(feature = "animated")] +mod dmabuf; mod draw; +#[cfg(feature = "animated")] +mod frame_queue; + mod img_source; mod scaler; mod wallpaper; @@ -32,6 +39,7 @@ mod malloc { } } +use calloop::signals::{Signal, Signals}; use cosmic_bg_config::{Config, state::State}; use cosmic_config::{CosmicConfigEntry, calloop::ConfigWatchSource}; use eyre::Context; @@ -73,6 +81,9 @@ use tracing::error; use tracing_subscriber::prelude::*; use wallpaper::Wallpaper; +#[cfg(feature = "animated")] +use dmabuf::DmaBufState; + #[derive(Debug)] pub struct CosmicBgLayer { layer: LayerSurface, @@ -231,6 +242,16 @@ fn main() -> color_eyre::Result<()> { wallpapers }; + // Initialize DMA-BUF support for zero-copy video rendering + #[cfg(feature = "animated")] + let mut dmabuf_state = DmaBufState::new(); + #[cfg(feature = "animated")] + { + if let Some(dmabuf_global) = DmaBufState::bind_global(&globals, &qh) { + dmabuf_state.dmabuf_global = Some(dmabuf_global); + } + } + let mut bg_state = CosmicBg { registry_state: RegistryState::new(&globals), output_state: OutputState::new(&globals, &qh), @@ -239,6 +260,8 @@ fn main() -> color_eyre::Result<()> { layer_state: LayerShell::bind(&globals, &qh).unwrap(), viewporter: globals.bind(&qh, 1..=1, ()).unwrap(), fractional_scale_manager: globals.bind(&qh, 1..=1, ()).ok(), + #[cfg(feature = "animated")] + dmabuf_state, qh, source_tx, loop_handle: event_loop.handle(), @@ -246,8 +269,29 @@ fn main() -> color_eyre::Result<()> { wallpapers, config, active_outputs: Vec::new(), + cleanup_done: false, }; + // Register Unix signal handlers for graceful shutdown + // This prevents compositor freezes when cosmic-bg is killed + match Signals::new(&[Signal::SIGTERM, Signal::SIGINT]) { + Ok(signals) => { + if let Err(e) = event_loop + .handle() + .insert_source(signals, |signal, _, state| { + tracing::info!(?signal, "Received signal, initiating graceful shutdown"); + state.graceful_shutdown(); + state.exit = true; + }) + { + tracing::warn!("Failed to register signal handler: {e}"); + } + } + Err(e) => { + tracing::warn!("Failed to create signal source: {e}"); + } + } + loop { event_loop.dispatch(None, &mut bg_state)?; @@ -256,6 +300,10 @@ fn main() -> color_eyre::Result<()> { } } + // Ensure cleanup happens even on normal exit + tracing::info!("Event loop exited, performing final cleanup"); + bg_state.graceful_shutdown(); + Ok(()) } @@ -268,6 +316,8 @@ pub struct CosmicBg { layer_state: LayerShell, viewporter: wp_viewporter::WpViewporter, fractional_scale_manager: Option, + #[cfg(feature = "animated")] + dmabuf_state: DmaBufState, qh: QueueHandle, source_tx: calloop::channel::SyncSender<(String, notify::Event)>, loop_handle: calloop::LoopHandle<'static, CosmicBg>, @@ -275,9 +325,49 @@ pub struct CosmicBg { wallpapers: Vec, config: Config, active_outputs: Vec, + /// Flag to track if cleanup has been performed + cleanup_done: bool, +} + +impl Drop for CosmicBg { + fn drop(&mut self) { + if !self.cleanup_done { + tracing::info!("Drop: performing cleanup"); + self.graceful_shutdown(); + } + } } impl CosmicBg { + /// Gracefully shutdown all resources to prevent compositor freezes. + /// + /// This is called when receiving SIGTERM/SIGINT or on normal exit. + /// It ensures GStreamer pipelines are stopped and DMA-BUF resources + /// are released before the process terminates. + fn graceful_shutdown(&mut self) { + if self.cleanup_done { + return; + } + self.cleanup_done = true; + + tracing::info!("Graceful shutdown: stopping all wallpapers"); + + // Stop all animated wallpapers (GStreamer pipelines) + // Use non-blocking stop to avoid hangs + for wallpaper in &mut self.wallpapers { + wallpaper.stop_animation(); + } + + // Clear wallpapers to drop layer surfaces and release buffers + self.wallpapers.clear(); + + // Small delay to let compositor process surface destruction + // Keep it short to avoid perceived freeze + std::thread::sleep(std::time::Duration::from_millis(10)); + + tracing::info!("Graceful shutdown complete"); + } + fn apply_backgrounds(&mut self) { self.wallpapers.clear(); @@ -604,6 +694,16 @@ delegate_noop!(CosmicBg: wp_viewporter::WpViewporter); delegate_noop!(CosmicBg: wp_viewport::WpViewport); delegate_noop!(CosmicBg: wp_fractional_scale_manager_v1::WpFractionalScaleManagerV1); +// DMA-BUF protocol delegates +#[cfg(feature = "animated")] +delegate_noop!(CosmicBg: ignore wayland_protocols::wp::linux_dmabuf::zv1::client::zwp_linux_dmabuf_v1::ZwpLinuxDmabufV1); +#[cfg(feature = "animated")] +delegate_noop!(CosmicBg: ignore wayland_protocols::wp::linux_dmabuf::zv1::client::zwp_linux_buffer_params_v1::ZwpLinuxBufferParamsV1); +#[cfg(feature = "animated")] +use sctk::reexports::client::protocol::wl_buffer; +#[cfg(feature = "animated")] +delegate_noop!(CosmicBg: ignore wl_buffer::WlBuffer); + impl Dispatch> for CosmicBg { diff --git a/src/scaler.rs b/src/scaler.rs index 36124d5..5a98c90 100644 --- a/src/scaler.rs +++ b/src/scaler.rs @@ -2,6 +2,7 @@ //! Background scaling methods such as fit, stretch, and zoom. +use cosmic_bg_config::FilterMethod; use image::imageops::FilterType; use image::{DynamicImage, Pixel}; @@ -10,6 +11,7 @@ pub fn fit( color: &[f32; 3], layer_width: u32, layer_height: u32, + filter_method: &FilterMethod, ) -> image::DynamicImage { // TODO: convert color to the same format as the input image. let mut filled_image = @@ -24,7 +26,7 @@ pub fn fit( (h as f64 * ratio).round() as u32, ); - let resized_image = resize(img, new_width, new_height); + let resized_image = resize(img, new_width, new_height, filter_method); image::imageops::replace( &mut filled_image, @@ -40,13 +42,24 @@ pub fn stretch( img: &image::DynamicImage, layer_width: u32, layer_height: u32, + filter_method: &FilterMethod, ) -> image::DynamicImage { - resize(img, layer_width, layer_height) + resize(img, layer_width, layer_height, filter_method) } -pub fn zoom(img: &image::DynamicImage, layer_width: u32, layer_height: u32) -> image::DynamicImage { +pub fn zoom( + img: &image::DynamicImage, + layer_width: u32, + layer_height: u32, + filter_method: &FilterMethod, +) -> image::DynamicImage { let (w, h) = (img.width(), img.height()); + // Fast path: if dimensions match exactly, just clone + if w == layer_width && h == layer_height { + return img.clone(); + } + let ratio = (layer_width as f64 / w as f64).max(layer_height as f64 / h as f64); let (new_width, new_height) = ( @@ -54,10 +67,18 @@ pub fn zoom(img: &image::DynamicImage, layer_width: u32, layer_height: u32) -> i (h as f64 * ratio).round() as u32, ); - let mut new_image = resize(img, new_width, new_height); + // If target dimensions exactly match what resize will produce, skip crop + let new_image = resize(img, new_width, new_height, filter_method); + + // Skip crop if dimensions already match after resize (common for same aspect ratio) + if new_width == layer_width && new_height == layer_height { + return new_image; + } + // Need to crop for different aspect ratios + let mut cropped = new_image; image::imageops::crop( - &mut new_image, + &mut cropped, (new_width - layer_width) / 2, (new_height - layer_height) / 2, layer_width, @@ -67,19 +88,41 @@ pub fn zoom(img: &image::DynamicImage, layer_width: u32, layer_height: u32) -> i .into() } -fn resize(img: &image::DynamicImage, new_width: u32, new_height: u32) -> image::DynamicImage { +fn resize( + img: &image::DynamicImage, + new_width: u32, + new_height: u32, + filter_method: &FilterMethod, +) -> image::DynamicImage { + // Skip resize if dimensions already match (common for native resolution videos) + if img.width() == new_width && img.height() == new_height { + return img.clone(); + } + let mut resizer = fast_image_resize::Resizer::new(); + + // Map FilterMethod to fast_image_resize algorithm + let algorithm = match filter_method { + FilterMethod::Nearest => fast_image_resize::ResizeAlg::Nearest, + FilterMethod::Linear => { + fast_image_resize::ResizeAlg::Convolution(fast_image_resize::FilterType::Bilinear) + } + FilterMethod::Lanczos => { + fast_image_resize::ResizeAlg::Convolution(fast_image_resize::FilterType::Lanczos3) + } + }; + let options = fast_image_resize::ResizeOptions { - algorithm: fast_image_resize::ResizeAlg::Convolution( - fast_image_resize::FilterType::Lanczos3, - ), + algorithm, ..Default::default() }; + let mut new_image = image::DynamicImage::new(new_width, new_height, img.color()); if let Err(err) = resizer.resize(img, &mut new_image, &options) { tracing::warn!(?err, "Failed to use `fast_image_resize`. Falling back."); - new_image = - image::imageops::resize(img, new_width, new_height, FilterType::Lanczos3).into(); + // Fallback to image crate with the corresponding filter + let filter_type: FilterType = filter_method.clone().into(); + new_image = image::imageops::resize(img, new_width, new_height, filter_type).into(); } new_image } diff --git a/src/wallpaper.rs b/src/wallpaper.rs index f848378..9b68b1b 100644 --- a/src/wallpaper.rs +++ b/src/wallpaper.rs @@ -2,6 +2,20 @@ use crate::{CosmicBg, CosmicBgLayer}; +#[cfg(feature = "animated")] +use crate::animated::{AnimatedPlayer, is_animated_file}; + +// When animated feature is disabled, provide a simple video file check +// to skip video files that can't be rendered as static images +#[cfg(not(feature = "animated"))] +fn is_video_file(path: &std::path::Path) -> bool { + const VIDEO_EXTENSIONS: &[&str] = &["mp4", "webm", "mkv", "avi", "mov", "m4v"]; + path.extension() + .and_then(|ext| ext.to_str()) + .map(|ext| VIDEO_EXTENSIONS.contains(&ext.to_lowercase().as_str())) + .unwrap_or(false) +} + use std::{ collections::VecDeque, fs::{self, File}, @@ -27,7 +41,121 @@ use tracing::error; use walkdir::WalkDir; // TODO filter images by whether they seem to match dark / light mode -// Alternatively only load from light / dark subdirectories given a directory source when this is active +// Alternatively only load from light / dark subdirectories given this is active + +/// Calculate viewport source and destination based on scaling mode. +/// +/// Returns: (src_x, src_y, src_width, src_height, dst_width, dst_height) +/// - Source coordinates are in buffer pixel coordinates +/// - Destination sizes are in logical surface coordinates +#[cfg(feature = "animated")] +fn calculate_viewport( + buffer_width: u32, + buffer_height: u32, + logical_width: u32, + logical_height: u32, + scaling_mode: &ScalingMode, +) -> (f64, f64, f64, f64, u32, u32) { + match scaling_mode { + ScalingMode::Stretch => { + // Stretch: use entire buffer, scale to fill entire logical surface + ( + 0.0, + 0.0, + buffer_width as f64, + buffer_height as f64, + logical_width, + logical_height, + ) + } + + ScalingMode::Zoom => { + // Zoom: fill surface and crop buffer to maintain aspect ratio + let buffer_aspect = buffer_width as f64 / buffer_height as f64; + let surface_aspect = logical_width as f64 / logical_height as f64; + + if buffer_aspect > surface_aspect { + // Buffer is wider - crop width + let visible_width = (buffer_height as f64 * surface_aspect).round(); + let crop_x = ((buffer_width as f64 - visible_width) / 2.0).round(); + ( + crop_x, + 0.0, + visible_width, + buffer_height as f64, + logical_width, + logical_height, + ) + } else { + // Buffer is taller - crop height + let visible_height = (buffer_width as f64 / surface_aspect).round(); + let crop_y = ((buffer_height as f64 - visible_height) / 2.0).round(); + ( + 0.0, + crop_y, + buffer_width as f64, + visible_height, + logical_width, + logical_height, + ) + } + } + + ScalingMode::Fit(_color) => { + // Fit: scale buffer to fit inside surface, maintain aspect ratio + // Note: We don't render the background color - compositor background shows through + let buffer_aspect = buffer_width as f64 / buffer_height as f64; + let surface_aspect = logical_width as f64 / logical_height as f64; + + if buffer_aspect > surface_aspect { + // Buffer is wider - fit to width, reduce height + let fitted_height = (logical_width as f64 / buffer_aspect).round() as u32; + ( + 0.0, + 0.0, + buffer_width as f64, + buffer_height as f64, + logical_width, + fitted_height, + ) + } else { + // Buffer is taller - fit to height, reduce width + let fitted_width = (logical_height as f64 * buffer_aspect).round() as u32; + ( + 0.0, + 0.0, + buffer_width as f64, + buffer_height as f64, + fitted_width, + logical_height, + ) + } + } + } +} + +/// Animation playback state +#[cfg(feature = "animated")] +#[derive(Debug)] +pub struct AnimationState { + /// The animated player managing frame decoding + pub player: AnimatedPlayer, + /// Timer token for frame advancement + pub frame_timer_token: Option, + /// Last rendered frame index (to detect frame changes) + pub last_frame_index: usize, +} + +#[cfg(feature = "animated")] +impl AnimationState { + pub fn new(player: AnimatedPlayer) -> Self { + Self { + player, + frame_timer_token: None, + last_frame_index: usize::MAX, + } + } +} #[derive(Debug)] pub struct Wallpaper { @@ -40,6 +168,9 @@ pub struct Wallpaper { // Cache of source image, if `current_source` is a `Source::Path` current_image: Option, timer_token: Option, + /// Animation state for animated wallpapers (AVIF, video) - appsink mode + #[cfg(feature = "animated")] + animation_state: Option, } impl Drop for Wallpaper { @@ -47,6 +178,12 @@ impl Drop for Wallpaper { if let Some(token) = self.timer_token.take() { self.loop_handle.remove(token); } + #[cfg(feature = "animated")] + if let Some(ref mut anim) = self.animation_state { + if let Some(token) = anim.frame_timer_token.take() { + self.loop_handle.remove(token); + } + } } } @@ -66,6 +203,8 @@ impl Wallpaper { timer_token: None, loop_handle, queue_handle, + #[cfg(feature = "animated")] + animation_state: None, }; wallpaper.load_images(); @@ -118,7 +257,7 @@ impl Wallpaper { if cur_resized_img .as_ref() - .map_or(true, |img| img.width() != width || img.height() != height) + .is_none_or(|img| img.width() != width || img.height() != height) { let Some(source) = self.current_source.as_ref() else { tracing::info!("No source for wallpaper"); @@ -127,9 +266,21 @@ impl Wallpaper { cur_resized_img = match source { Source::Path(path) => { + // Skip animated files - they're handled by draw_animated_frame() + #[cfg(feature = "animated")] + if is_animated_file(path) { + continue; + } + + // Skip video files when animated feature is disabled + #[cfg(not(feature = "animated"))] + if is_video_file(path) { + continue; + } + if self.current_image.is_none() { self.current_image = Some(match path.extension() { - Some(ext) if ext == "jxl" => match decode_jpegxl(&path) { + Some(ext) if ext == "jxl" => match decode_jpegxl(path) { Ok(image) => image, Err(why) => { tracing::warn!( @@ -141,7 +292,7 @@ impl Wallpaper { } }, - _ => match ImageReader::open(&path) { + _ => match ImageReader::open(path) { Ok(img) => { match img .with_guessed_format() @@ -165,15 +316,27 @@ impl Wallpaper { let img = self.current_image.as_ref().unwrap(); match self.entry.scaling_mode { - ScalingMode::Fit(color) => { - Some(crate::scaler::fit(img, &color, width, height)) - } + ScalingMode::Fit(color) => Some(crate::scaler::fit( + img, + &color, + width, + height, + &self.entry.filter_method, + )), - ScalingMode::Zoom => Some(crate::scaler::zoom(img, width, height)), + ScalingMode::Zoom => Some(crate::scaler::zoom( + img, + width, + height, + &self.entry.filter_method, + )), - ScalingMode::Stretch => { - Some(crate::scaler::stretch(img, width, height)) - } + ScalingMode::Stretch => Some(crate::scaler::stretch( + img, + width, + height, + &self.entry.filter_method, + )), } } @@ -223,6 +386,651 @@ impl Wallpaper { } } + /// Draw animated wallpaper frame (AVIF or video). + /// + /// For video: Uses wl_shm path (DMA-BUF is handled by timer callback with access to dmabuf_global). + /// For AVIF: Uses CPU scaling with per-resolution frame caching. + #[cfg(feature = "animated")] + fn draw_animated_frame(&mut self, start: Instant) { + let Some(mut anim_state) = self.animation_state.take() else { + return; + }; + + let is_video = anim_state.player.is_video(); + + // Process GStreamer bus messages for video (handles EOS/looping/errors) + if is_video && anim_state.player.process_messages() { + tracing::warn!("Video playback stopped (EOS or error)"); + self.animation_state = Some(anim_state); + return; + } + + let current_frame_idx = anim_state.player.current_frame_index(); + + // Track frame changes for logging + if anim_state.last_frame_index != current_frame_idx { + anim_state.last_frame_index = current_frame_idx; + } + + if is_video { + // Video: wl_shm fallback path (DMA-BUF handled by timer callback) + let _ = self.draw_video_frame_zero_copy(&anim_state, current_frame_idx, start); + self.animation_state = Some(anim_state); + return; + } + + // AVIF: viewport scaling (GPU-accelerated) + self.draw_avif_frame(&mut anim_state, current_frame_idx, start); + self.animation_state = Some(anim_state); + } + + /// Draw a AVIF frame using viewport scaling (GPU-accelerated). + /// + /// Similar to video rendering: + /// 1. Write native-resolution AVIF frame to wl_shm buffer (small, fast) + /// 2. Use wp_viewport to GPU-scale to screen resolution + /// + /// This is much faster than CPU scaling each frame. + #[cfg(feature = "animated")] + fn draw_avif_frame( + &mut self, + anim_state: &mut AnimationState, + current_frame_idx: usize, + start: Instant, + ) { + use sctk::reexports::client::protocol::wl_shm; + use sctk::shell::WaylandSurface; + + // Get current frame from player + let avif_frame = match anim_state.player.current_frame() { + Some(f) => f, + None => return, + }; + + let frame_width = avif_frame.image.width(); + let frame_height = avif_frame.image.height(); + + // Find layers that need redraw and have pools + let layers_needing_redraw: Vec = self + .layers + .iter() + .enumerate() + .filter(|(_, layer)| { + layer.needs_redraw + && layer.pool.is_some() + && layer.fractional_scale.is_some() + && layer.size.is_some() + }) + .map(|(i, _)| i) + .collect(); + + if layers_needing_redraw.is_empty() { + return; + } + + // Create buffer at native AVIF resolution (not screen resolution) + let first_idx = layers_needing_redraw[0]; + let pool = self.layers[first_idx].pool.as_mut().unwrap(); + + let buffer_result = pool.create_buffer( + frame_width as i32, + frame_height as i32, + frame_width as i32 * 4, + wl_shm::Format::Xrgb8888, + ); + + let (buffer, canvas) = match buffer_result { + Ok(b) => b, + Err(why) => { + tracing::error!(?why, "Failed to create AVIF buffer"); + return; + } + }; + + // Write native-resolution frame to buffer (fast - small buffer) + crate::draw::xrgb888_canvas(canvas, &avif_frame.image); + + let wl_buffer = buffer.wl_buffer(); + + // Attach to all surfaces with viewport scaling + for &layer_idx in &layers_needing_redraw { + let layer = &mut self.layers[layer_idx]; + let (logical_width, logical_height) = layer.size.unwrap(); + + let wl_surface = layer.layer.wl_surface(); + + // Damage the buffer + wl_surface.damage_buffer(0, 0, frame_width as i32, frame_height as i32); + + // Request next frame callback + layer + .layer + .wl_surface() + .frame(&self.queue_handle, wl_surface.clone()); + + // Attach the buffer + wl_surface.attach(Some(wl_buffer), 0, 0); + + // Calculate viewport for GPU scaling + let (src_x, src_y, src_w, src_h, dst_w, dst_h) = calculate_viewport( + frame_width, + frame_height, + logical_width, + logical_height, + &self.entry.scaling_mode, + ); + + // Set viewport source and destination for GPU scaling + layer.viewport.set_source(src_x, src_y, src_w, src_h); + layer.viewport.set_destination(dst_w as i32, dst_h as i32); + + wl_surface.commit(); + layer.needs_redraw = false; + } + + tracing::debug!( + frame = current_frame_idx, + src_w = frame_width, + src_h = frame_height, + total = ?start.elapsed(), + "AVIF frame drawn (viewport scaling)" + ); + } + + /// Draw a video frame using viewport scaling with SHARED BUFFER and ZERO-COPY. + /// + /// This is the fastest possible path: + /// 1. Create ONE wl_shm buffer + /// 2. Pull frame directly from GStreamer into that buffer (single copy) + /// 3. Attach the same wl_buffer to ALL surfaces + /// 4. Let compositor GPU-scale via viewport protocol + /// + /// Total: ONE memory copy regardless of output count! + #[cfg(feature = "animated")] + fn draw_video_frame_zero_copy( + &mut self, + anim_state: &AnimationState, + frame_idx: usize, + start: Instant, + ) -> bool { + use sctk::reexports::client::protocol::wl_shm; + use sctk::shell::WaylandSurface; + + // Find layers that need redraw + let layers_needing_redraw: Vec = self + .layers + .iter() + .enumerate() + .filter(|(_, layer)| { + layer.needs_redraw + && layer.pool.is_some() + && layer.fractional_scale.is_some() + && layer.size.is_some() + }) + .map(|(i, _)| i) + .collect(); + + if layers_needing_redraw.is_empty() { + return false; + } + + // Get the video dimensions from the pipeline. + // If not available yet (pipeline not prerolled), skip this frame. + let (frame_width, frame_height) = match anim_state.player.video_dimensions() { + Some(dims) => dims, + None => { + tracing::trace!("Video dimensions not available yet"); + return false; + } + }; + + let first_idx = layers_needing_redraw[0]; + let draw_start = Instant::now(); + + // wl_shm path: GPU decode → system memory → wl_shm buffer + // Create buffer at the ACTUAL video dimensions (not max 4K). + // This ensures the wl_buffer dimensions match what we're writing. + let pool = self.layers[first_idx].pool.as_mut().unwrap(); + let buffer_result = pool.create_buffer( + frame_width as i32, + frame_height as i32, + frame_width as i32 * 4, + wl_shm::Format::Xrgb8888, + ); + + let (buffer, canvas) = match buffer_result { + Ok(b) => b, + Err(why) => { + tracing::error!(?why, "failed to create buffer"); + return false; + } + }; + + // ZERO-COPY: Pull frame directly into wl_shm buffer + // Try to get new frame first, fall back to cached frame if none available + let frame_info = match anim_state.player.pull_frame_to_buffer(canvas) { + Some(info) => { + // Debug: Check if buffer actually has non-zero content + let non_zero_count = canvas.iter().take(1000).filter(|&&b| b != 0).count(); + tracing::trace!( + width = info.width, + height = info.height, + is_bgrx = info.is_bgrx, + non_zero_in_first_1000 = non_zero_count, + "Pulled new video frame" + ); + info + } + None => { + // No new frame available, try cached frame to maintain smooth playback + match anim_state.player.pull_cached_frame(canvas) { + Some(info) => { + tracing::trace!("Reusing cached frame (no new frame available)"); + info + } + None => { + tracing::trace!("No frame available yet (no cache)"); + return false; + } + } + } + }; + + let canvas_time = draw_start.elapsed(); + + // Verify the frame dimensions match what we expected + debug_assert_eq!(frame_info.width, frame_width, "Frame width mismatch"); + debug_assert_eq!(frame_info.height, frame_height, "Frame height mismatch"); + + // Get the underlying wl_buffer for sharing + let wl_buffer = buffer.wl_buffer(); + + // Attach the SAME buffer to ALL surfaces + let surface_start = Instant::now(); + let mut surfaces_updated = 0; + + for &layer_idx in &layers_needing_redraw { + let layer = &mut self.layers[layer_idx]; + + let (logical_width, logical_height) = layer.size.unwrap(); + + let wl_surface = layer.layer.wl_surface(); + + // Damage the entire buffer + wl_surface.damage_buffer(0, 0, frame_width as i32, frame_height as i32); + + // Request our next frame + layer + .layer + .wl_surface() + .frame(&self.queue_handle, wl_surface.clone()); + + // Attach the SHARED buffer + wl_surface.attach(Some(wl_buffer), 0, 0); + + // Calculate viewport based on scaling_mode + let (src_x, src_y, src_w, src_h, dst_w, dst_h) = calculate_viewport( + frame_width, + frame_height, + logical_width, + logical_height, + &self.entry.scaling_mode, + ); + + // Set viewport source (which part of buffer to use) + layer.viewport.set_source(src_x, src_y, src_w, src_h); + + // Set viewport destination (logical size to scale to) + layer.viewport.set_destination(dst_w as i32, dst_h as i32); + + wl_surface.commit(); + layer.needs_redraw = false; + surfaces_updated += 1; + } + + let surface_time = surface_start.elapsed(); + let total_elapsed = Instant::now().duration_since(start); + + // Log the first layer's dimensions for debugging + let (log_dest_w, log_dest_h) = self + .layers + .get(layers_needing_redraw[0]) + .and_then(|l| l.size) + .unwrap_or((0, 0)); + + tracing::debug!( + frame = frame_idx, + ?canvas_time, + ?surface_time, + ?total_elapsed, + surfaces = surfaces_updated, + src_w = frame_width, + src_h = frame_height, + dest_w = log_dest_w, + dest_h = log_dest_h, + "draw timing (ZERO-COPY, shared buffer, viewport GPU scaling)" + ); + + true + } + + /// Try to draw a video frame using DMA-BUF zero-copy (true GPU-only rendering). + /// + /// This is the ultimate performance path: + /// 1. Extract DMA-BUF fd from GStreamer + /// 2. Create wl_buffer from DMA-BUF via zwp_linux_dmabuf_v1 + /// 3. Attach to ALL surfaces (compositor GPU-reads directly) + /// 4. GPU-scale via viewport + /// + /// Total: ZERO CPU copies, all data stays in GPU memory! + /// + /// Returns true if successfully rendered, false to trigger wl_shm fallback. + #[cfg(feature = "animated")] + pub fn try_draw_video_frame_dmabuf( + &mut self, + anim_state: &AnimationState, + _frame_idx: usize, + start: Instant, + dmabuf_global: Option<&wayland_protocols::wp::linux_dmabuf::zv1::client::zwp_linux_dmabuf_v1::ZwpLinuxDmabufV1>, + ) -> bool { + use sctk::shell::WaylandSurface; + + let Some(dmabuf_global) = dmabuf_global else { + return false; + }; + + // Try to get DMA-BUF frame from player + let mut dmabuf_frame = match anim_state.player.try_get_dmabuf_frame() { + Some(f) => { + tracing::debug!( + width = f.width, + height = f.height, + planes = f.planes.len(), + "Got DMA-BUF frame" + ); + f + } + None => return false, + }; + + // Create wl_buffer from DMA-BUF + let wl_buffer = match dmabuf_frame.create_wl_buffer(dmabuf_global, &self.queue_handle) { + Some(b) => b, + None => { + tracing::warn!("Failed to create DMA-BUF wl_buffer - falling back to wl_shm"); + return false; + } + }; + + // Find layers that need redraw + let layers_needing_redraw: Vec = self + .layers + .iter() + .enumerate() + .filter(|(_, layer)| { + layer.needs_redraw && layer.fractional_scale.is_some() && layer.size.is_some() + }) + .map(|(i, _)| i) + .collect(); + + if layers_needing_redraw.is_empty() { + return false; + } + + // Attach the DMA-BUF buffer to all surfaces + for &layer_idx in &layers_needing_redraw { + let layer = &mut self.layers[layer_idx]; + let (logical_width, logical_height) = layer.size.unwrap(); + + let wl_surface = layer.layer.wl_surface(); + + // Damage the entire buffer + wl_surface.damage_buffer(0, 0, dmabuf_frame.width as i32, dmabuf_frame.height as i32); + + // Request our next frame + layer + .layer + .wl_surface() + .frame(&self.queue_handle, wl_surface.clone()); + + // Attach the DMA-BUF buffer + wl_surface.attach(Some(&wl_buffer), 0, 0); + + // Calculate viewport based on scaling_mode + let (src_x, src_y, src_w, src_h, dst_w, dst_h) = calculate_viewport( + dmabuf_frame.width, + dmabuf_frame.height, + logical_width, + logical_height, + &self.entry.scaling_mode, + ); + + // Set viewport source (which part of buffer to use) + layer.viewport.set_source(src_x, src_y, src_w, src_h); + + // Set viewport destination (logical size to scale to) + layer.viewport.set_destination(dst_w as i32, dst_h as i32); + + wl_surface.commit(); + layer.needs_redraw = false; + } + + // Destroy the wl_buffer to trigger compositor cleanup of cached textures. + // This is critical to prevent FD leaks in the compositor - without this, + // the compositor's texture cache accumulates entries for each frame. + wl_buffer.destroy(); + + let total_elapsed = Instant::now().duration_since(start); + tracing::debug!( + ?total_elapsed, + width = dmabuf_frame.width, + height = dmabuf_frame.height, + "DMA-BUF zero-copy render" + ); + + true + } + + /// Initialize animation playback for animated files (AVIF, video). + /// + /// For video files, the video is scaled during decode to match the largest + /// output resolution. This is more efficient than scaling during render. + /// + /// Hardware decode is used automatically via GStreamer's decodebin which + /// selects the best available decoder (VAAPI, NVDEC, or software fallback). + #[cfg(feature = "animated")] + pub fn init_animation(&mut self, path: &std::path::Path) -> bool { + use crate::animated::AnimatedSource; + + let Some(source) = AnimatedSource::from_path(path) else { + tracing::warn!(path = %path.display(), "Not an animated file"); + return false; + }; + + // Get the largest output dimensions from layers + // Video will be scaled to this resolution during decode for efficiency + let (target_width, target_height) = self + .layers + .iter() + .filter_map(|layer| { + let (w, h) = layer.size?; + let scale = layer.fractional_scale.unwrap_or(120); + Some((w * scale / 120, h * scale / 120)) + }) + .max_by_key(|(w, h)| w * h) + .unwrap_or((1920, 1080)); // Default to 1080p if no layers yet + + tracing::debug!( + path = %path.display(), + target_width, + target_height, + "Initializing animated wallpaper (appsink mode)" + ); + + match AnimatedPlayer::new(source, target_width, target_height) { + Ok(player) => { + tracing::debug!(path = %path.display(), "Initialized animated wallpaper"); + self.animation_state = Some(AnimationState::new(player)); + self.register_frame_timer(); + true + } + Err(e) => { + tracing::error!(?e, path = %path.display(), "Failed to initialize animated wallpaper"); + false + } + } + } + + /// Register the frame advancement timer for animation playback. + #[cfg(feature = "animated")] + fn register_frame_timer(&mut self) { + // Remove existing timer if any + if let Some(ref mut anim) = self.animation_state { + if let Some(token) = anim.frame_timer_token.take() { + self.loop_handle.remove(token); + } + } + + let Some(ref anim_state) = self.animation_state else { + return; + }; + + let frame_duration = anim_state.player.current_duration(); + let output_name = self.entry.output.clone(); + + tracing::debug!(?frame_duration, %output_name, "Registering frame timer"); + + let token = self + .loop_handle + .insert_source( + Timer::from_duration(frame_duration), + move |_, _, state: &mut CosmicBg| { + use std::sync::atomic::{AtomicU64, Ordering}; + use std::time::{SystemTime, UNIX_EPOCH}; + let timer_start = Instant::now(); + + static LAST_TICK_US: AtomicU64 = AtomicU64::new(0); + static EXPECTED_TICK_US: AtomicU64 = AtomicU64::new(0); + + let now_us = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_micros() as u64; + let last_us = LAST_TICK_US.swap(now_us, Ordering::Relaxed); + + if last_us > 0 { + let actual_interval_us = now_us.saturating_sub(last_us); + let expected_us = EXPECTED_TICK_US.load(Ordering::Relaxed); + if expected_us > 0 { + let drift_us = actual_interval_us.saturating_sub(expected_us) as i64; + tracing::debug!( + "Timer: actual={}μs, expected={}μs, drift={}μs", + actual_interval_us, + expected_us, + drift_us + ); + } + } + + let Some(wallpaper) = state + .wallpapers + .iter_mut() + .find(|w| w.entry.output == output_name) + else { + return TimeoutAction::Drop; + }; + + // Take animation state to work with it + let Some(mut anim_state) = wallpaper.animation_state.take() else { + return TimeoutAction::Drop; + }; + + // Check if playback should continue (handles EOS/looping) + if !anim_state.player.advance() { + tracing::warn!("Animation ended (no loop), stopping"); + wallpaper.animation_state = Some(anim_state); + return TimeoutAction::Drop; + } + + // Get next duration before putting state back + let next_duration = anim_state.player.current_duration(); + + // Store expected interval for drift calculation (uses static from closure above) + EXPECTED_TICK_US.store(next_duration.as_micros() as u64, Ordering::Relaxed); + + // Mark all layers for redraw + for layer in &mut wallpaper.layers { + layer.needs_redraw = true; + } + + // Try DMA-BUF zero-copy first (if available) + let dmabuf_rendered = wallpaper.try_draw_video_frame_dmabuf( + &anim_state, + anim_state.player.current_frame_index(), + timer_start, + state.dmabuf_state.dmabuf_global.as_ref(), + ); + + // Put state back + wallpaper.animation_state = Some(anim_state); + + // If DMA-BUF didn't work, fallback to regular draw + if !dmabuf_rendered { + wallpaper.draw_animated_frame(timer_start); + } + + let render_time = timer_start.elapsed(); + + // Compensate for render time: schedule next frame sooner + // to maintain correct animation speed + let adjusted_duration = next_duration.saturating_sub(render_time); + let adjusted_duration = adjusted_duration.max(Duration::from_millis(1)); + + tracing::debug!( + render_time_ms = render_time.as_millis(), + frame_duration_ms = next_duration.as_millis(), + adjusted_ms = adjusted_duration.as_millis(), + frame_idx = wallpaper + .animation_state + .as_ref() + .map(|a| a.player.current_frame_index()) + .unwrap_or(999), + "Frame timer tick" + ); + + // Schedule next frame with adjusted duration + TimeoutAction::ToDuration(adjusted_duration) + }, + ) + .ok(); + + if let Some(ref mut anim) = self.animation_state { + anim.frame_timer_token = token; + } + } + + /// Stop animation playback and clean up resources. + #[cfg(feature = "animated")] + pub fn stop_animation(&mut self) { + tracing::debug!("Stopping animation for wallpaper: {}", self.entry.output); + if let Some(ref mut anim_state) = self.animation_state { + // Stop the GStreamer pipeline first + let _ = anim_state.player.stop(); + + // Remove frame timer + if let Some(token) = anim_state.frame_timer_token.take() { + self.loop_handle.remove(token); + } + } + self.animation_state = None; + } + + /// Stop animation playback (no-op when animated feature is disabled). + #[cfg(not(feature = "animated"))] + pub fn stop_animation(&mut self) { + // No animation support compiled in + } + pub fn load_images(&mut self) { let mut image_queue = VecDeque::new(); let xdg_data_dirs: Vec = match std::env::var("XDG_DATA_DIRS") { @@ -233,8 +1041,29 @@ impl Wallpaper { Err(_) => Vec::new(), }; - match self.entry.source { - Source::Path(ref source) => { + match self.entry.source.clone() { + Source::Path(source) => { + // Check if this Path source points to a video/animated file + #[cfg(feature = "animated")] + if is_animated_file(&source) { + tracing::debug!(?source, "Animated file detected - initializing animation"); + if self.init_animation(&source) { + self.current_source = Some(Source::Path(source)); + return; + } + tracing::warn!(?source, "Failed to init animation, falling back to static"); + } + + // Without animated feature, skip video files (they cannot be rendered as static images) + #[cfg(not(feature = "animated"))] + if is_video_file(&source) { + tracing::debug!( + ?source, + "Video file in Path source - skipping (no animated feature)" + ); + return; + } + tracing::debug!(?source, "loading images"); if let Ok(source) = source.canonicalize() { @@ -293,10 +1122,19 @@ impl Wallpaper { } } - image_queue.pop_front().map(|current_image_path| { + if let Some(current_image_path) = image_queue.pop_front() { + // Check if this is an animated file and initialize animation + #[cfg(feature = "animated")] + { + // For animated files, init_animation handles the setup + // For non-animated files, just set the source normally + let _ = is_animated_file(¤t_image_path) + && self.init_animation(¤t_image_path); + } + self.current_source = Some(Source::Path(current_image_path.clone())); image_queue.push_back(current_image_path); - }); + } } Source::Color(ref c) => { @@ -359,14 +1197,24 @@ impl Wallpaper { return TimeoutAction::Drop; // Drop if no item found for this timer }; - while let Some(next) = item.image_queue.pop_front() { + if let Some(next) = item.image_queue.pop_front() { item.current_source = Some(Source::Path(next.clone())); if let Err(err) = item.save_state() { error!("{err}"); } - item.image_queue.push_back(next); + item.image_queue.push_back(next.clone()); item.clear_image(); + + // Check if the next image is an animated file + #[cfg(feature = "animated")] + if is_animated_file(&next) && item.init_animation(&next) { + item.draw(); + return TimeoutAction::ToDuration(Duration::from_secs( + rotation_freq, + )); + } + item.draw(); return TimeoutAction::ToDuration(Duration::from_secs(rotation_freq)); @@ -380,6 +1228,10 @@ impl Wallpaper { } fn clear_image(&mut self) { + // Stop any running animation + #[cfg(feature = "animated")] + self.stop_animation(); + self.current_image = None; for l in &mut self.layers { l.needs_redraw = true; @@ -397,7 +1249,7 @@ fn current_image(output: &str) -> Option { let wallpaper = if output == "all" { wallpapers.next() } else { - wallpapers.into_iter().find(|(name, _path)| name == output) + wallpapers.find(|(name, _path)| name == output) }; wallpaper.map(|(_name, path)| path) diff --git a/tests/fixtures/animated.avif b/tests/fixtures/animated.avif new file mode 100644 index 0000000..496bf8a Binary files /dev/null and b/tests/fixtures/animated.avif differ diff --git a/tests/fixtures/static.avif b/tests/fixtures/static.avif new file mode 100644 index 0000000..26215ca Binary files /dev/null and b/tests/fixtures/static.avif differ