From 450b939c1f644d80362923922afe082c83228e44 Mon Sep 17 00:00:00 2001 From: scottmcm Date: Mon, 23 Dec 2024 18:44:04 -0800 Subject: [PATCH] Fix `EaseFunction::Exponential*` to exactly hit (0, 0) and (1, 1) (#16910) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit And add a bunch of tests to show that all the monotonic easing functions have roughly the expected shape. # Objective The `EaseFunction::Exponential*` variants aren't actually smooth as currently implemented, because they jump by about 1‰ at the start/end/both. - Fixes #16676 - Subsumes #16675 ## Solution This PR slightly tweaks the shifting and scaling of all three variants to ensure they hit (0, 0) and (1, 1) exactly while gradually transitioning between them. Graph demonstration of the new easing function definitions: ![desmos-graph](https://github.com/user-attachments/assets/c87e9fe5-47d9-4407-9c94-80135eef5908) (Yes, they look completely identical to the previous ones at that scale. [Here's a zoomed-in comparison](https://www.desmos.com/calculator/ken6nk89of) between the old and the new if you prefer.) The approach taken was to keep the core 2¹⁰ᵗ shape, but to [ask WolframAlpha](https://www.wolframalpha.com/input?i=solve+over+the+reals%3A+pow%282%2C+10-A%29+-+pow%282%2C+-A%29%3D+1) what scaling factor to use such that f(1)-f(0)=1, then shift the curve down so that goes from zero to one instead of ¹/₁₀₂₃ to ¹⁰²⁴/₁₀₂₃. ## Testing I've included in this PR a bunch of general tests for all monotonic easing functions to ensure they hit (0, 0) to (1, 1), that the InOut functions hit (½, ½), and that they have the expected convexity. You can also see by inspection that the difference is small. The change for `exponential_in` is from `exp2(10 * t - 10)` to `exp2(10 * t - 9.99859…) - 0.0009775171…`. The problem for `exponential_in(0)` is also simple to see without a calculator: 2⁻¹⁰ is obviously not zero, but with the new definition `exp2(-LOG2_1023) - FRAC_1_1023` => `1/(exp2(LOG2_1023)) - FRAC_1_1023` => `FRAC_1_1023 - FRAC_1_1023` => `0`. --- ## Migration Guide This release of bevy slightly tweaked the definitions of `EaseFunction::ExponentialIn`, `EaseFunction::ExponentialOut`, and `EaseFunction::ExponentialInOut`. The previous definitions had small discontinuities, while the new ones are slightly rescaled to be continuous. For the output values that changed, that change was less than 0.001, so visually you might not even notice the difference. However, if you depended on them for determinism, you'll need to define your own curves with the previous definitions. --------- Co-authored-by: IQuick 143 --- crates/bevy_math/src/curve/easing.rs | 108 +++++++++++++++++++++++++-- 1 file changed, 102 insertions(+), 6 deletions(-) diff --git a/crates/bevy_math/src/curve/easing.rs b/crates/bevy_math/src/curve/easing.rs index 0e5406fa732c7..81d5fb6df1d3a 100644 --- a/crates/bevy_math/src/curve/easing.rs +++ b/crates/bevy_math/src/curve/easing.rs @@ -176,9 +176,15 @@ pub enum EaseFunction { /// Behaves as `EaseFunction::CircularIn` for t < 0.5 and as `EaseFunction::CircularOut` for t >= 0.5 CircularInOut, - /// `f(t) = 2.0^(10.0 * (t - 1.0))` + /// `f(t) ≈ 2.0^(10.0 * (t - 1.0))` + /// + /// The precise definition adjusts it slightly so it hits both `(0, 0)` and `(1, 1)`: + /// `f(t) = 2.0^(10.0 * t - A) - B`, where A = log₂(2¹⁰-1) and B = 1/(2¹⁰-1). ExponentialIn, - /// `f(t) = 1.0 - 2.0^(-10.0 * t)` + /// `f(t) ≈ 1.0 - 2.0^(-10.0 * t)` + /// + /// As with `EaseFunction::ExponentialIn`, the precise definition adjusts it slightly + // so it hits both `(0, 0)` and `(1, 1)`. ExponentialOut, /// Behaves as `EaseFunction::ExponentialIn` for t < 0.5 and as `EaseFunction::ExponentialOut` for t >= 0.5 ExponentialInOut, @@ -324,20 +330,30 @@ mod easing_functions { } } + // These are copied from a high precision calculator; I'd rather show them + // with blatantly more digits than needed (since rust will round them to the + // nearest representable value anyway) rather than make it seem like the + // truncated value is somehow carefully chosen. + #[allow(clippy::excessive_precision)] + const LOG2_1023: f32 = 9.998590429745328646459226; + #[allow(clippy::excessive_precision)] + const FRAC_1_1023: f32 = 0.00097751710654936461388074291; #[inline] pub(crate) fn exponential_in(t: f32) -> f32 { - ops::powf(2.0, 10.0 * t - 10.0) + // Derived from a rescaled exponential formula `(2^(10*t) - 1) / (2^10 - 1)` + // See + ops::exp2(10.0 * t - LOG2_1023) - FRAC_1_1023 } #[inline] pub(crate) fn exponential_out(t: f32) -> f32 { - 1.0 - ops::powf(2.0, -10.0 * t) + (FRAC_1_1023 + 1.0) - ops::exp2(-10.0 * t - (LOG2_1023 - 10.0)) } #[inline] pub(crate) fn exponential_in_out(t: f32) -> f32 { if t < 0.5 { - ops::powf(2.0, 20.0 * t - 10.0) / 2.0 + ops::exp2(20.0 * t - (LOG2_1023 + 1.0)) - (FRAC_1_1023 / 2.0) } else { - (2.0 - ops::powf(2.0, -20.0 * t + 10.0)) / 2.0 + (FRAC_1_1023 / 2.0 + 1.0) - ops::exp2(-20.0 * t - (LOG2_1023 - 19.0)) } } @@ -459,3 +475,83 @@ impl EaseFunction { } } } + +#[cfg(test)] +mod tests { + use super::*; + const MONOTONIC_IN_OUT_INOUT: &[[EaseFunction; 3]] = { + use EaseFunction::*; + &[ + [QuadraticIn, QuadraticOut, QuadraticInOut], + [CubicIn, CubicOut, CubicInOut], + [QuarticIn, QuarticOut, QuarticInOut], + [QuinticIn, QuinticOut, QuinticInOut], + [SineIn, SineOut, SineInOut], + [CircularIn, CircularOut, CircularInOut], + [ExponentialIn, ExponentialOut, ExponentialInOut], + ] + }; + + // For easing function we don't care if eval(0) is super-tiny like 2.0e-28, + // so add the same amount of error on both ends of the unit interval. + const TOLERANCE: f32 = 1.0e-6; + const _: () = const { + assert!(1.0 - TOLERANCE != 1.0); + }; + + #[test] + fn ease_functions_zero_to_one() { + for ef in MONOTONIC_IN_OUT_INOUT.iter().flatten() { + let start = ef.eval(0.0); + assert!( + (0.0..=TOLERANCE).contains(&start), + "EaseFunction.{ef:?}(0) was {start:?}", + ); + + let finish = ef.eval(1.0); + assert!( + (1.0 - TOLERANCE..=1.0).contains(&finish), + "EaseFunction.{ef:?}(1) was {start:?}", + ); + } + } + + #[test] + fn ease_function_inout_deciles() { + // convexity gives these built-in tolerances + for [_, _, ef_inout] in MONOTONIC_IN_OUT_INOUT { + for x in [0.1, 0.2, 0.3, 0.4] { + let y = ef_inout.eval(x); + assert!(y < x, "EaseFunction.{ef_inout:?}({x:?}) was {y:?}"); + } + + for x in [0.6, 0.7, 0.8, 0.9] { + let y = ef_inout.eval(x); + assert!(y > x, "EaseFunction.{ef_inout:?}({x:?}) was {y:?}"); + } + } + } + + #[test] + fn ease_function_midpoints() { + for [ef_in, ef_out, ef_inout] in MONOTONIC_IN_OUT_INOUT { + let mid = ef_in.eval(0.5); + assert!( + mid < 0.5 - TOLERANCE, + "EaseFunction.{ef_in:?}(½) was {mid:?}", + ); + + let mid = ef_out.eval(0.5); + assert!( + mid > 0.5 + TOLERANCE, + "EaseFunction.{ef_out:?}(½) was {mid:?}", + ); + + let mid = ef_inout.eval(0.5); + assert!( + (0.5 - TOLERANCE..=0.5 + TOLERANCE).contains(&mid), + "EaseFunction.{ef_inout:?}(½) was {mid:?}", + ); + } + } +}