9 #include <xenium/parameter.hpp>
10 #include <xenium/detail/port.hpp>
24 template <
unsigned Value>
60 template<
class T,
class... Policies>
64 static_assert(std::is_default_constructible_v<T>,
"T must be default constructible");
65 static_assert(std::is_trivially_destructible_v<T>,
"T must be trivially destructible");
66 static_assert(std::is_trivially_copyable_v<T>,
"T must be trivially copyable");
67 static_assert(
sizeof(T) >
sizeof(std::uintptr_t),
68 "For types T with a size less or equal to a pointer use an atomic<T> with a compare_exchange update loop.");
70 static constexpr
unsigned slots = parameter::value_param_t<unsigned,
policy::slots, 1, Policies...>::value;
71 static_assert(slots >= 1,
"slots must be >= 1");
91 template <
class... Args>
93 new(&_data) T(std::forward<Args>(args)...);
118 void store(
const T& value);
131 template <
class Func>
135 using storage_t =
typename std::aligned_storage<
sizeof(T),
alignof(T)>::type;
136 using sequence_t = uintptr_t;
137 using copy_t = uintptr_t;
139 bool is_write_pending(sequence_t seq)
const {
return (seq & 1) != 0; }
141 sequence_t acquire_lock();
142 void release_lock(sequence_t seq);
144 void read_data(T& dest,
const storage_t& src)
const;
145 void store_data(
const T& src, storage_t& dest);
147 std::atomic<sequence_t> _seq{0};
148 storage_t _data[slots];
151 template <
class T,
class... Policies>
155 sequence_t seq = _seq.load(std::memory_order_acquire);
162 while (is_write_pending(seq)) {
164 seq = _seq.load(std::memory_order_acquire);
168 idx = (seq >> 1) % slots;
171 read_data(result, _data[idx]);
174 auto seq2 = _seq.load(std::memory_order_acquire);
175 if (seq2 - seq < (2 * slots - 1))
182 template <
class T,
class... Policies>
183 template <
class Func>
185 auto seq = acquire_lock();
187 auto idx = (seq >> 1) % slots;
188 read_data(data, _data[idx]);
190 store_data(data, _data[(idx + 1) % slots]);
194 template <
class T,
class... Policies>
196 auto seq = acquire_lock();
197 auto idx = ((seq >> 1) + 1) % slots;
198 store_data(value, _data[idx]);
202 template <
class T,
class... Policies>
204 auto seq = _seq.
load(std::memory_order_relaxed);
206 while (is_write_pending(seq))
207 seq = _seq.load(std::memory_order_relaxed);
209 assert(is_write_pending(seq) ==
false);
211 if (_seq.compare_exchange_weak(seq, seq + 1, std::memory_order_acquire, std::memory_order_relaxed))
216 template <
class T,
class... Policies>
217 void seqlock<T, Policies...>::release_lock(sequence_t seq) {
218 assert(seq == _seq.load(std::memory_order_relaxed));
219 assert(is_write_pending(seq));
222 _seq.store(seq + 1, std::memory_order_release);
225 template <
class T,
class... Policies>
226 void seqlock<T, Policies...>::read_data(T& dest,
const storage_t& src)
const {
227 copy_t* pdest =
reinterpret_cast<copy_t*
>(&dest);
228 copy_t* pend = pdest + (
sizeof(T) /
sizeof(copy_t));
229 const std::atomic<copy_t>* psrc =
reinterpret_cast<const std::atomic<copy_t>*
>(&src);
230 for (; pdest != pend; ++psrc, ++pdest) {
231 *pdest = psrc->load(std::memory_order_relaxed);
234 std::atomic_thread_fence(std::memory_order_acquire);
244 template <
class T,
class... Policies>
245 void seqlock<T, Policies...>::store_data(
const T& src, storage_t& dest) {
247 std::atomic_thread_fence(std::memory_order_release);
249 const copy_t* psrc =
reinterpret_cast<const copy_t*
>(&src);
250 const copy_t* pend = psrc + (
sizeof(T) /
sizeof(copy_t));
251 std::atomic<copy_t>* pdest =
reinterpret_cast<std::atomic<copy_t>*
>(&dest);
252 for (; psrc != pend; ++psrc, ++pdest) {
253 pdest->store(*psrc, std::memory_order_relaxed);