diff --git a/stl/inc/atomic b/stl/inc/atomic index 839787c5366..3e00dd8d77b 100644 --- a/stl/inc/atomic +++ b/stl/inc/atomic @@ -519,6 +519,11 @@ inline bool __stdcall _Atomic_wait_compare_16_bytes(const void* _Storage, void* #endif // _HAS_CXX20 #endif // TRANSITION, ABI +#if _HAS_CXX20 +_EXPORT_STD template +struct atomic_ref; +#endif // _HAS_CXX20 + template struct _Atomic_storage { // Provides operations common to all specializations of std::atomic, load, store, exchange, and CAS. @@ -652,6 +657,11 @@ public: _Ty _Storage; mutable _Smtx_t _Mutex{}; #endif // ^^^ break ABI ^^^ + +#if _HAS_CXX20 + template + friend struct atomic_ref; +#endif // _HAS_CXX20 }; template @@ -2300,8 +2310,22 @@ public: } } + explicit atomic_ref(_Ty&&) = delete; // per LWG-4472 + atomic_ref(const atomic_ref&) noexcept = default; + template + requires is_convertible_v<_Uty (*)[], _Ty (*)[]> + atomic_ref(const atomic_ref<_Uty>& _Ref) noexcept : _Base(_Ref._Storage) { + if constexpr (!is_always_lock_free) { +#if 1 // TRANSITION, ABI + this->_Spinlock = _Ref._Spinlock; +#else // ^^^ don't break ABI / break ABI vvv + this->_Mutex = _Ref._Mutex; +#endif // ^^^ break ABI ^^^ + } + } + atomic_ref& operator=(const atomic_ref&) = delete; static constexpr bool is_always_lock_free = diff --git a/stl/inc/yvals_core.h b/stl/inc/yvals_core.h index 7301d3fda1e..285d1020248 100644 --- a/stl/inc/yvals_core.h +++ b/stl/inc/yvals_core.h @@ -320,6 +320,7 @@ // P3323R1 Forbid atomic, Specify atomic_ref // (for atomic_ref) // P3349R1 Converting Contiguous Iterators To Pointers +// P3860R1 Make atomic_ref Convertible To atomic_ref // _HAS_CXX23 controls: // P0009R18 diff --git a/tests/std/tests/P0019R8_atomic_ref/test.cpp b/tests/std/tests/P0019R8_atomic_ref/test.cpp index 25c02826768..becfa851f99 100644 --- a/tests/std/tests/P0019R8_atomic_ref/test.cpp +++ b/tests/std/tests/P0019R8_atomic_ref/test.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include struct bigint { @@ -36,7 +37,22 @@ struct int128 { }; // Also test constraints and conditional existence of difference_type specified by -// P3323R1 "Forbid atomic, Specify atomic_ref". +// P3323R1 "Forbid atomic, Specify atomic_ref", +// and convertibility specified by +// P3860R1 "Make atomic_ref Convertible To atomic_ref". + +template +void test_atomic_ref_not_convertible_to() { // COMPILE-ONLY + static_assert(!std::is_convertible_v, std::atomic_ref>); + static_assert(!std::is_convertible_v, std::atomic_ref>); + static_assert(!std::is_convertible_v&, std::atomic_ref>); + static_assert(!std::is_convertible_v&, std::atomic_ref>); + + static_assert(!std::is_constructible_v, std::atomic_ref>); + static_assert(!std::is_constructible_v, const std::atomic_ref>); + static_assert(!std::is_constructible_v, std::atomic_ref&>); + static_assert(!std::is_constructible_v, const std::atomic_ref&>); +} template void test_atomic_ref_constraints_single() { // COMPILE-ONLY @@ -216,15 +232,62 @@ void test_atomic_ref_constraints_single() { // COMPILE-ONLY } } +template +void test_atomic_ref_cv_convertibility() { // COMPILE-ONLY + static_assert(std::is_nothrow_convertible_v, std::atomic_ref>); + static_assert(std::is_nothrow_convertible_v, std::atomic_ref>); + static_assert(std::is_nothrow_convertible_v&, std::atomic_ref>); + static_assert(std::is_nothrow_convertible_v&, std::atomic_ref>); + + static_assert(std::is_nothrow_constructible_v, std::atomic_ref>); + static_assert(std::is_nothrow_constructible_v, const std::atomic_ref>); + static_assert(std::is_nothrow_constructible_v, std::atomic_ref&>); + static_assert(std::is_nothrow_constructible_v, const std::atomic_ref&>); + + [[maybe_unused]] auto instantiator = [](std::atomic_ref& ref) { + [[maybe_unused]] std::atomic_ref ref_cv1 = ref; + [[maybe_unused]] std::atomic_ref ref_cv2{ref}; + [[maybe_unused]] std::atomic_ref ref_cv3 = std::as_const(ref); + [[maybe_unused]] std::atomic_ref ref_cv4{std::as_const(ref)}; + [[maybe_unused]] std::atomic_ref ref_cv5 = std::move(ref); + [[maybe_unused]] std::atomic_ref ref_cv6{std::move(ref)}; + [[maybe_unused]] std::atomic_ref ref_cv7 = std::move(std::as_const(ref)); + [[maybe_unused]] std::atomic_ref ref_cv8{std::move(std::as_const(ref))}; + }; + + test_atomic_ref_not_convertible_to(); +} + template void test_atomic_ref_constraints_cv() { // COMPILE-ONLY static_assert(!std::is_const_v && !std::is_volatile_v); test_atomic_ref_constraints_single(); test_atomic_ref_constraints_single(); + test_atomic_ref_cv_convertibility(); if constexpr (std::atomic_ref::is_always_lock_free) { test_atomic_ref_constraints_single(); test_atomic_ref_constraints_single(); + test_atomic_ref_cv_convertibility(); + test_atomic_ref_cv_convertibility(); + test_atomic_ref_cv_convertibility(); } + + test_atomic_ref_cv_convertibility(); + test_atomic_ref_cv_convertibility(); + test_atomic_ref_cv_convertibility(); + + test_atomic_ref_not_convertible_to(); + test_atomic_ref_not_convertible_to(); + + struct Base { + T t; + }; + struct Derived : Base {}; + + test_atomic_ref_not_convertible_to(); + test_atomic_ref_not_convertible_to(); + test_atomic_ref_not_convertible_to(); + test_atomic_ref_not_convertible_to(); } void test_atomic_ref_constraints() { // COMPILE-ONLY @@ -313,10 +376,8 @@ void test_ops() { auto load = [](const std::atomic_ref& ref) { return static_cast(ref.load()); }; auto xchg0 = [](std::atomic_ref& ref) { return static_cast(ref.exchange(0)); }; - - int (*inc)(std::atomic_ref& ref); - if constexpr (AddViaCas) { - inc = [](std::atomic_ref& ref) { + auto inc = [](std::atomic_ref& ref) { + if constexpr (AddViaCas) { for (;;) { ValueType e = ref.load(); ValueType d = static_cast(static_cast(e) + 1); @@ -324,10 +385,10 @@ void test_ops() { return static_cast(e); } } - }; - } else { - inc = [](std::atomic_ref& ref) { return static_cast(ref.fetch_add(1)); }; - } + } else { + return static_cast(ref.fetch_add(1)); + } + }; assert(std::transform_reduce(par, refs.begin(), refs.end(), 0, std::plus{}, load) == 0); assert(std::transform_reduce(par, refs.begin(), refs.begin() + range, 0, std::plus{}, inc) == 0); @@ -336,16 +397,72 @@ void test_ops() { assert(std::transform_reduce(par, refs.begin(), refs.end(), 0, std::plus{}, load) == range * repetitions * 2); assert(std::transform_reduce(par, refs.begin(), refs.end(), 0, std::plus{}, xchg0) == range * 2); assert(std::transform_reduce(par, refs.begin(), refs.end(), 0, std::plus{}, load) == 0); + + auto load_const = [](std::atomic_ref ref) { return static_cast(ref.load()); }; + + assert(std::transform_reduce(par, refs.begin(), refs.end(), 0, std::plus{}, load_const) == 0); + assert(std::transform_reduce(par, refs.begin(), refs.begin() + range, 0, std::plus{}, inc) == 0); + assert(std::transform_reduce(par, refs.begin(), refs.end(), 0, std::plus{}, load_const) == range * repetitions); + assert(std::transform_reduce(par, refs.begin(), refs.begin() + range, 0, std::plus{}, inc) == range); + assert(std::transform_reduce(par, refs.begin(), refs.end(), 0, std::plus{}, load_const) == range * repetitions * 2); + assert(std::transform_reduce(par, refs.begin(), refs.end(), 0, std::plus{}, xchg0) == range * 2); + assert(std::transform_reduce(par, refs.begin(), refs.end(), 0, std::plus{}, load_const) == 0); + + if constexpr (std::atomic_ref::is_always_lock_free) { + auto load_volatile = [](std::atomic_ref ref) { return static_cast(ref.load()); }; + auto xchg0_volatile = [](std::atomic_ref ref) { return static_cast(ref.exchange(0)); }; + auto inc_volatile = [](std::atomic_ref ref) { + if constexpr (AddViaCas) { + for (;;) { + ValueType e = ref.load(); + ValueType d = static_cast(static_cast(e) + 1); + if (ref.compare_exchange_weak(e, d)) { + return static_cast(e); + } + } + } else { + return static_cast(ref.fetch_add(1)); + } + }; + + assert(std::transform_reduce(par, refs.begin(), refs.end(), 0, std::plus{}, load_volatile) == 0); + assert(std::transform_reduce(par, refs.begin(), refs.begin() + range, 0, std::plus{}, inc_volatile) == 0); + assert( + std::transform_reduce(par, refs.begin(), refs.end(), 0, std::plus{}, load_volatile) == range * repetitions); + assert(std::transform_reduce(par, refs.begin(), refs.begin() + range, 0, std::plus{}, inc_volatile) == range); + assert(std::transform_reduce(par, refs.begin(), refs.end(), 0, std::plus{}, load_volatile) + == range * repetitions * 2); + assert(std::transform_reduce(par, refs.begin(), refs.end(), 0, std::plus{}, xchg0_volatile) == range * 2); + assert(std::transform_reduce(par, refs.begin(), refs.end(), 0, std::plus{}, load_volatile) == 0); + + auto load_const_volatile = [](std::atomic_ref ref) { + return static_cast(ref.load()); + }; + + assert(std::transform_reduce(par, refs.begin(), refs.end(), 0, std::plus{}, load_const_volatile) == 0); + assert(std::transform_reduce(par, refs.begin(), refs.begin() + range, 0, std::plus{}, inc_volatile) == 0); + assert(std::transform_reduce(par, refs.begin(), refs.end(), 0, std::plus{}, load_const_volatile) + == range * repetitions); + assert(std::transform_reduce(par, refs.begin(), refs.begin() + range, 0, std::plus{}, inc_volatile) == range); + assert(std::transform_reduce(par, refs.begin(), refs.end(), 0, std::plus{}, load_const_volatile) + == range * repetitions * 2); + assert(std::transform_reduce(par, refs.begin(), refs.end(), 0, std::plus{}, xchg0_volatile) == range * 2); + assert(std::transform_reduce(par, refs.begin(), refs.end(), 0, std::plus{}, load_const_volatile) == 0); + } } -template -void test_int_ops() { +template +void test_int_ops_add_volatile_if() { + using Referenced = std::conditional_t; + Integer v = 0x40; std::atomic vx(v); std::atomic vy(v); - const std::atomic_ref rx(v); - const std::atomic_ref ry(v); + const std::atomic_ref rx = std::atomic_ref(v); + const std::atomic_ref ry = std::atomic_ref(v); + const std::atomic_ref rxc = rx; + const std::atomic_ref ryc = ry; assert(vx.fetch_add(0x10) == 0x40); assert(rx.fetch_add(0x10) == 0x40); @@ -354,6 +471,8 @@ void test_int_ops() { assert(vy.load() == 0x40); assert(rx.load() == 0x50); assert(ry.load() == 0x50); + assert(rxc.load() == 0x50); + assert(ryc.load() == 0x50); assert(vx.fetch_sub(0x8) == 0x50); assert(rx.fetch_sub(0x8) == 0x50); @@ -362,6 +481,8 @@ void test_int_ops() { assert(vy.load() == 0x40); assert(rx.load() == 0x48); assert(ry.load() == 0x48); + assert(rxc.load() == 0x48); + assert(ryc.load() == 0x48); assert(vx.fetch_or(0xF) == 0x48); assert(rx.fetch_or(0xF) == 0x48); @@ -370,6 +491,8 @@ void test_int_ops() { assert(vy.load() == 0x40); assert(rx.load() == 0x4F); assert(ry.load() == 0x4F); + assert(rxc.load() == 0x4F); + assert(ryc.load() == 0x4F); assert(vx.fetch_and(0x3C) == 0x4F); assert(rx.fetch_and(0x3C) == 0x4F); @@ -378,6 +501,8 @@ void test_int_ops() { assert(vy.load() == 0x40); assert(rx.load() == 0xC); assert(ry.load() == 0xC); + assert(rxc.load() == 0xC); + assert(ryc.load() == 0xC); assert(vx.fetch_xor(0x3F) == 0xC); assert(rx.fetch_xor(0x3F) == 0xC); @@ -386,6 +511,8 @@ void test_int_ops() { assert(vy.load() == 0x40); assert(rx.load() == 0x33); assert(ry.load() == 0x33); + assert(rxc.load() == 0x33); + assert(ryc.load() == 0x33); assert(vx-- == 0x33); assert(rx-- == 0x33); @@ -394,6 +521,8 @@ void test_int_ops() { assert(vy.load() == 0x40); assert(rx.load() == 0x32); assert(ry.load() == 0x32); + assert(rxc.load() == 0x32); + assert(ryc.load() == 0x32); assert(--vx == 0x31); assert(--rx == 0x31); @@ -402,6 +531,8 @@ void test_int_ops() { assert(vy.load() == 0x40); assert(rx.load() == 0x31); assert(ry.load() == 0x31); + assert(rxc.load() == 0x31); + assert(ryc.load() == 0x31); assert(vx++ == 0x31); assert(rx++ == 0x31); @@ -410,6 +541,8 @@ void test_int_ops() { assert(vy.load() == 0x40); assert(rx.load() == 0x32); assert(ry.load() == 0x32); + assert(rxc.load() == 0x32); + assert(ryc.load() == 0x32); assert(++vx == 0x33); assert(++rx == 0x33); @@ -418,17 +551,31 @@ void test_int_ops() { assert(vy.load() == 0x40); assert(rx.load() == 0x33); assert(ry.load() == 0x33); + assert(rxc.load() == 0x33); + assert(ryc.load() == 0x33); } +template +void test_int_ops() { + test_int_ops_add_volatile_if(); + if constexpr (std::atomic_ref::is_always_lock_free) { + test_int_ops_add_volatile_if(); + } +} + + +template +void test_float_ops_add_volatile_if() { + using Referenced = std::conditional_t; -template -void test_float_ops() { Float v = 0x40; std::atomic vx(v); std::atomic vy(v); - const std::atomic_ref rx(v); - const std::atomic_ref ry(v); + const std::atomic_ref rx = std::atomic_ref(v); + const std::atomic_ref ry = std::atomic_ref(v); + const std::atomic_ref rxc = rx; + const std::atomic_ref ryc = ry; assert(vx.fetch_add(0x10) == 0x40); assert(rx.fetch_add(0x10) == 0x40); @@ -437,6 +584,8 @@ void test_float_ops() { assert(vy.load() == 0x40); assert(rx.load() == 0x50); assert(ry.load() == 0x50); + assert(rxc.load() == 0x50); + assert(ryc.load() == 0x50); assert(vx.fetch_sub(0x8) == 0x50); assert(rx.fetch_sub(0x8) == 0x50); @@ -445,6 +594,8 @@ void test_float_ops() { assert(vy.load() == 0x40); assert(rx.load() == 0x48); assert(ry.load() == 0x48); + assert(rxc.load() == 0x48); + assert(ryc.load() == 0x48); vx.store(0x10); rx.store(0x10); @@ -453,17 +604,31 @@ void test_float_ops() { assert(vy.load() == 0x40); assert(rx.load() == 0x10); assert(ry.load() == 0x10); + assert(rxc.load() == 0x10); + assert(ryc.load() == 0x10); } -template -void test_ptr_ops() { +template +void test_float_ops() { + test_float_ops_add_volatile_if(); + if constexpr (std::atomic_ref::is_always_lock_free) { + test_float_ops_add_volatile_if(); + } +} + +template +void test_ptr_ops_add_volatile_if() { + using Referenced = std::conditional_t; + std::remove_pointer_t a[0x100]; Ptr v = a; std::atomic vx(v); std::atomic vy(v); - const std::atomic_ref rx(v); - const std::atomic_ref ry(v); + const std::atomic_ref rx = std::atomic_ref(v); + const std::atomic_ref ry = std::atomic_ref(v); + const std::atomic_ref rxc = rx; + const std::atomic_ref ryc = ry; assert(vx.fetch_add(0x10) == a); assert(rx.fetch_add(0x10) == a); @@ -472,6 +637,8 @@ void test_ptr_ops() { assert(vy.load() == a); assert(rx.load() == a + 0x10); assert(ry.load() == a + 0x10); + assert(rxc.load() == a + 0x10); + assert(ryc.load() == a + 0x10); assert(vx.fetch_sub(0x8) == a + 0x10); assert(rx.fetch_sub(0x8) == a + 0x10); @@ -480,6 +647,8 @@ void test_ptr_ops() { assert(vy.load() == a); assert(rx.load() == a + 0x8); assert(ry.load() == a + 0x8); + assert(rxc.load() == a + 0x8); + assert(ryc.load() == a + 0x8); vx.store(a + 0x10); rx.store(a + 0x10); @@ -488,6 +657,8 @@ void test_ptr_ops() { assert(vy.load() == a); assert(rx.load() == a + 0x10); assert(ry.load() == a + 0x10); + assert(rxc.load() == a + 0x10); + assert(ryc.load() == a + 0x10); assert(vx-- == a + 0x10); assert(rx-- == a + 0x10); @@ -496,6 +667,8 @@ void test_ptr_ops() { assert(vy.load() == a); assert(rx.load() == a + 0xF); assert(ry.load() == a + 0xF); + assert(rxc.load() == a + 0xF); + assert(ryc.load() == a + 0xF); assert(--vx == a + 0xE); assert(--rx == a + 0xE); @@ -504,6 +677,8 @@ void test_ptr_ops() { assert(vy.load() == a); assert(rx.load() == a + 0xE); assert(ry.load() == a + 0xE); + assert(rxc.load() == a + 0xE); + assert(ryc.load() == a + 0xE); assert(vx++ == a + 0xE); assert(rx++ == a + 0xE); @@ -512,6 +687,8 @@ void test_ptr_ops() { assert(vy.load() == a); assert(rx.load() == a + 0xF); assert(ry.load() == a + 0xF); + assert(rxc.load() == a + 0xF); + assert(ryc.load() == a + 0xF); assert(++vx == a + 0x10); assert(++rx == a + 0x10); @@ -520,6 +697,16 @@ void test_ptr_ops() { assert(vy.load() == a); assert(rx.load() == a + 0x10); assert(ry.load() == a + 0x10); + assert(rxc.load() == a + 0x10); + assert(ryc.load() == a + 0x10); +} + +template +void test_ptr_ops() { + test_ptr_ops_add_volatile_if(); + if constexpr (std::atomic_ref::is_always_lock_free) { + test_ptr_ops_add_volatile_if(); + } } // GH-1497 : atomic_ref fails to compile