return pg_atomic_read_u32_impl(ptr);
}
+/*
+ * pg_atomic_read_membarrier_u32 - read with barrier semantics.
+ *
+ * This read is guaranteed to return the current value, provided that the value
+ * is only ever updated via operations with barrier semantics, such as
+ * pg_atomic_compare_exchange_u32() and pg_atomic_write_membarrier_u32().
+ * While this may be less performant than pg_atomic_read_u32(), it may be
+ * easier to reason about correctness with this function in less performance-
+ * sensitive code.
+ *
+ * Full barrier semantics.
+ */
+static inline uint32
+pg_atomic_read_membarrier_u32(volatile pg_atomic_uint32 *ptr)
+{
+ AssertPointerAlignment(ptr, 4);
+
+ return pg_atomic_read_membarrier_u32_impl(ptr);
+}
+
/*
* pg_atomic_write_u32 - write to atomic variable.
*
pg_atomic_unlocked_write_u32_impl(ptr, val);
}
+/*
+ * pg_atomic_write_membarrier_u32 - write with barrier semantics.
+ *
+ * The write is guaranteed to succeed as a whole, i.e., it's not possible to
+ * observe a partial write for any reader. Note that this correctly interacts
+ * with both pg_atomic_compare_exchange_u32() and
+ * pg_atomic_read_membarrier_u32(). While this may be less performant than
+ * pg_atomic_write_u32(), it may be easier to reason about correctness with
+ * this function in less performance-sensitive code.
+ *
+ * Full barrier semantics.
+ */
+static inline void
+pg_atomic_write_membarrier_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
+{
+ AssertPointerAlignment(ptr, 4);
+
+ pg_atomic_write_membarrier_u32_impl(ptr, val);
+}
+
/*
* pg_atomic_exchange_u32 - exchange newval with current value
*
return pg_atomic_read_u64_impl(ptr);
}
+static inline uint64
+pg_atomic_read_membarrier_u64(volatile pg_atomic_uint64 *ptr)
+{
+#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
+ AssertPointerAlignment(ptr, 8);
+#endif
+ return pg_atomic_read_membarrier_u64_impl(ptr);
+}
+
static inline void
pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
{
pg_atomic_write_u64_impl(ptr, val);
}
+static inline void
+pg_atomic_write_membarrier_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
+{
+#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
+ AssertPointerAlignment(ptr, 8);
+#endif
+ pg_atomic_write_membarrier_u64_impl(ptr, val);
+}
+
static inline uint64
pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
{
}
#endif
+#if !defined(PG_HAVE_ATOMIC_READ_MEMBARRIER_U32) && defined(PG_HAVE_ATOMIC_FETCH_ADD_U32)
+#define PG_HAVE_ATOMIC_READ_MEMBARRIER_U32
+static inline uint32
+pg_atomic_read_membarrier_u32_impl(volatile pg_atomic_uint32 *ptr)
+{
+ return pg_atomic_fetch_add_u32_impl(ptr, 0);
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_WRITE_MEMBARRIER_U32) && defined(PG_HAVE_ATOMIC_EXCHANGE_U32)
+#define PG_HAVE_ATOMIC_WRITE_MEMBARRIER_U32
+static inline void
+pg_atomic_write_membarrier_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
+{
+ (void) pg_atomic_exchange_u32_impl(ptr, val);
+}
+#endif
+
#if !defined(PG_HAVE_ATOMIC_EXCHANGE_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
#define PG_HAVE_ATOMIC_EXCHANGE_U64
static inline uint64
return pg_atomic_fetch_sub_u64_impl(ptr, sub_) - sub_;
}
#endif
+
+#if !defined(PG_HAVE_ATOMIC_READ_MEMBARRIER_U64) && defined(PG_HAVE_ATOMIC_FETCH_ADD_U64)
+#define PG_HAVE_ATOMIC_READ_MEMBARRIER_U64
+static inline uint64
+pg_atomic_read_membarrier_u64_impl(volatile pg_atomic_uint64 *ptr)
+{
+ return pg_atomic_fetch_add_u64_impl(ptr, 0);
+}
+#endif
+
+#if !defined(PG_HAVE_ATOMIC_WRITE_MEMBARRIER_U64) && defined(PG_HAVE_ATOMIC_EXCHANGE_U64)
+#define PG_HAVE_ATOMIC_WRITE_MEMBARRIER_U64
+static inline void
+pg_atomic_write_membarrier_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
+{
+ (void) pg_atomic_exchange_u64_impl(ptr, val);
+}
+#endif