Revert 9.5 pgindent changes to atomics directory files
authorBruce Momjian <bruce@momjian.us>
Mon, 25 May 2015 01:44:57 +0000 (21:44 -0400)
committerBruce Momjian <bruce@momjian.us>
Mon, 25 May 2015 01:45:01 +0000 (21:45 -0400)
This is because there are many __asm__ blocks there that pgindent messes
up.  Also configure pgindent to skip that directory in the future.

src/include/port/atomics/arch-ia64.h
src/include/port/atomics/arch-x86.h
src/include/port/atomics/fallback.h
src/include/port/atomics/generic-acc.h
src/include/port/atomics/generic-gcc.h
src/include/port/atomics/generic-msvc.h
src/include/port/atomics/generic-sunpro.h
src/include/port/atomics/generic-xlc.h
src/include/port/atomics/generic.h
src/tools/pgindent/exclude_file_patterns

index 3fd3918114b8ae668ff5ce4c06a66f59d84884ce..2591a0f1637f36eb0f0689fe81abe46997294470 100644 (file)
@@ -18,9 +18,9 @@
  * fence.
  */
 #if defined(__INTEL_COMPILER)
-#define pg_memory_barrier_impl()        __mf()
+#      define pg_memory_barrier_impl()         __mf()
 #elif defined(__GNUC__)
-#define pg_memory_barrier_impl()        __asm__ __volatile__ ("mf" : : : "memory")
+#      define pg_memory_barrier_impl()         __asm__ __volatile__ ("mf" : : : "memory")
 #elif defined(__hpux)
-#define pg_memory_barrier_impl()        _Asm_mf()
+#      define pg_memory_barrier_impl()         _Asm_mf()
 #endif
index d7f45f325e2c3e6ef9b13aecab71a6575f879210..168a49c79345a7309f769e6cc07c2a3e7f0bb375 100644 (file)
@@ -78,10 +78,9 @@ typedef struct pg_atomic_uint64
 } pg_atomic_uint64;
 #endif
 
-#endif   /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
 
-#endif   /* defined(__GNUC__) &&
-                                                                * !defined(__INTEL_COMPILER) */
+#endif /* defined(__GNUC__) && !defined(__INTEL_COMPILER) */
 
 #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
 
@@ -94,20 +93,20 @@ typedef struct pg_atomic_uint64
  * PAUSE in the inner loop of a spin lock is necessary for good
  * performance:
  *
- *        The PAUSE instruction improves the performance of IA-32
- *        processors supporting Hyper-Threading Technology when
- *        executing spin-wait loops and other routines where one
- *        thread is accessing a shared lock or semaphore in a tight
- *        polling loop. When executing a spin-wait loop, the
- *        processor can suffer a severe performance penalty when
- *        exiting the loop because it detects a possible memory order
- *        violation and flushes the core processor's pipeline. The
- *        PAUSE instruction provides a hint to the processor that the
- *        code sequence is a spin-wait loop. The processor uses this
- *        hint to avoid the memory order violation and prevent the
- *        pipeline flush. In addition, the PAUSE instruction
- *        de-pipelines the spin-wait loop to prevent it from
- *        consuming execution resources excessively.
+ *     The PAUSE instruction improves the performance of IA-32
+ *     processors supporting Hyper-Threading Technology when
+ *     executing spin-wait loops and other routines where one
+ *     thread is accessing a shared lock or semaphore in a tight
+ *     polling loop. When executing a spin-wait loop, the
+ *     processor can suffer a severe performance penalty when
+ *     exiting the loop because it detects a possible memory order
+ *     violation and flushes the core processor's pipeline. The
+ *     PAUSE instruction provides a hint to the processor that the
+ *     code sequence is a spin-wait loop. The processor uses this
+ *     hint to avoid the memory order violation and prevent the
+ *     pipeline flush. In addition, the PAUSE instruction
+ *     de-pipelines the spin-wait loop to prevent it from
+ *     consuming execution resources excessively.
  */
 #if defined(__INTEL_COMPILER)
 #define PG_HAVE_SPIN_DELAY
@@ -121,8 +120,8 @@ pg_spin_delay_impl(void)
 static __inline__ void
 pg_spin_delay_impl(void)
 {
-       __asm__         __volatile__(
-                                                                                " rep; nop                     \n");
+       __asm__ __volatile__(
+               " rep; nop                      \n");
 }
 #elif defined(WIN32_ONLY_COMPILER) && defined(__x86_64__)
 #define PG_HAVE_SPIN_DELAY
@@ -137,10 +136,10 @@ static __forceinline void
 pg_spin_delay_impl(void)
 {
        /* See comment for gcc code. Same code, MASM syntax */
-       __asm rep       nop;
+       __asm rep nop;
 }
 #endif
-#endif   /* !defined(PG_HAVE_SPIN_DELAY) */
+#endif /* !defined(PG_HAVE_SPIN_DELAY) */
 
 
 #if defined(HAVE_ATOMICS)
@@ -154,13 +153,12 @@ pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
 {
        register char _res = 1;
 
-       __asm__         __volatile__(
-                                                                                "      lock                    \n"
-                                                                                "      xchgb   %0,%1   \n"
-                                                        :                       "+q"(_res), "+m"(ptr->value)
-                                                        :
-                                                        :                       "memory");
-
+       __asm__ __volatile__(
+               "       lock                    \n"
+               "       xchgb   %0,%1   \n"
+:              "+q"(_res), "+m"(ptr->value)
+:
+:              "memory");
        return _res == 0;
 }
 
@@ -172,8 +170,7 @@ pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
         * On a TSO architecture like x86 it's sufficient to use a compiler
         * barrier to achieve release semantics.
         */
-       __asm__         __volatile__("":::"memory");
-
+       __asm__ __volatile__("" ::: "memory");
        ptr->value = 0;
 }
 
@@ -182,20 +179,19 @@ static inline bool
 pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
                                                                        uint32 *expected, uint32 newval)
 {
-       char            ret;
+       char    ret;
 
        /*
         * Perform cmpxchg and use the zero flag which it implicitly sets when
         * equal to measure the success.
         */
-       __asm__         __volatile__(
-                                                                  "    lock                            \n"
-                                                                                "      cmpxchgl        %4,%5   \n"
-                                                                  "   setz             %2              \n"
-                                        :                       "=a"(*expected), "=m"(ptr->value), "=q"(ret)
-                                        :                       "a"(*expected), "r"(newval), "m"(ptr->value)
-                                                        :                       "memory", "cc");
-
+       __asm__ __volatile__(
+               "       lock                            \n"
+               "       cmpxchgl        %4,%5   \n"
+               "   setz                %2              \n"
+:              "=a" (*expected), "=m"(ptr->value), "=q" (ret)
+:              "a" (*expected), "r" (newval), "m"(ptr->value)
+:              "memory", "cc");
        return (bool) ret;
 }
 
@@ -203,14 +199,13 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
 static inline uint32
 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
 {
-       uint32          res;
-       __asm__         __volatile__(
-                                                                  "    lock                            \n"
-                                                                                "      xaddl   %0,%1           \n"
-                                                        :                       "=q"(res), "=m"(ptr->value)
-                                                        :                       "0"(add_), "m"(ptr->value)
-                                                        :                       "memory", "cc");
-
+       uint32 res;
+       __asm__ __volatile__(
+               "       lock                            \n"
+               "       xaddl   %0,%1           \n"
+:              "=q"(res), "=m"(ptr->value)
+:              "0" (add_), "m"(ptr->value)
+:              "memory", "cc");
        return res;
 }
 
@@ -221,20 +216,19 @@ static inline bool
 pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
                                                                        uint64 *expected, uint64 newval)
 {
-       char            ret;
+       char    ret;
 
        /*
         * Perform cmpxchg and use the zero flag which it implicitly sets when
         * equal to measure the success.
         */
-       __asm__         __volatile__(
-                                                                  "    lock                            \n"
-                                                                                "      cmpxchgq        %4,%5   \n"
-                                                                  "   setz             %2              \n"
-                                        :                       "=a"(*expected), "=m"(ptr->value), "=q"(ret)
-                                        :                       "a"(*expected), "r"(newval), "m"(ptr->value)
-                                                        :                       "memory", "cc");
-
+       __asm__ __volatile__(
+               "       lock                            \n"
+               "       cmpxchgq        %4,%5   \n"
+               "   setz                %2              \n"
+:              "=a" (*expected), "=m"(ptr->value), "=q" (ret)
+:              "a" (*expected), "r" (newval), "m"(ptr->value)
+:              "memory", "cc");
        return (bool) ret;
 }
 
@@ -242,23 +236,20 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
 static inline uint64
 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
 {
-       uint64          res;
-       __asm__         __volatile__(
-                                                                  "    lock                            \n"
-                                                                                "      xaddq   %0,%1           \n"
-                                                        :                       "=q"(res), "=m"(ptr->value)
-                                                        :                       "0"(add_), "m"(ptr->value)
-                                                        :                       "memory", "cc");
-
+       uint64 res;
+       __asm__ __volatile__(
+               "       lock                            \n"
+               "       xaddq   %0,%1           \n"
+:              "=q"(res), "=m"(ptr->value)
+:              "0" (add_), "m"(ptr->value)
+:              "memory", "cc");
        return res;
 }
 
-#endif   /* __x86_64__ */
+#endif /* __x86_64__ */
 
-#endif   /* defined(__GNUC__) &&
-                                                                * !defined(__INTEL_COMPILER) */
+#endif /* defined(__GNUC__) && !defined(__INTEL_COMPILER) */
 
-#endif   /* HAVE_ATOMICS */
+#endif /* HAVE_ATOMICS */
 
-#endif   /* defined(PG_USE_INLINE) ||
-                                                                * defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
index 01af089f7b5e40dd82be7180bed8fecd79d2185d..4e04f9758b971ac8a97d727c653e7d9ef8ea17b9 100644 (file)
@@ -1,8 +1,8 @@
 /*-------------------------------------------------------------------------
  *
  * fallback.h
- *       Fallback for platforms without spinlock and/or atomics support. Slower
- *       than native atomics support, but not unusably slow.
+ *    Fallback for platforms without spinlock and/or atomics support. Slower
+ *    than native atomics support, but not unusably slow.
  *
  * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
@@ -14,7 +14,7 @@
 
 /* intentionally no include guards, should only be included by atomics.h */
 #ifndef INSIDE_ATOMICS_H
-#error "should be included via atomics.h"
+#      error "should be included via atomics.h"
 #endif
 
 #ifndef pg_memory_barrier_impl
@@ -75,15 +75,14 @@ typedef struct pg_atomic_flag
         * be content with just one byte instead of 4, but that's not too much
         * waste.
         */
-#if defined(__hppa) || defined(__hppa__)               /* HP PA-RISC, GCC and HP
-                                                                                                * compilers */
+#if defined(__hppa) || defined(__hppa__)       /* HP PA-RISC, GCC and HP compilers */
        int                     sema[4];
 #else
        int                     sema;
 #endif
 } pg_atomic_flag;
 
-#endif   /* PG_HAVE_ATOMIC_FLAG_SUPPORT */
+#endif /* PG_HAVE_ATOMIC_FLAG_SUPPORT */
 
 #if !defined(PG_HAVE_ATOMIC_U32_SUPPORT)
 
@@ -93,8 +92,7 @@ typedef struct pg_atomic_flag
 typedef struct pg_atomic_uint32
 {
        /* Check pg_atomic_flag's definition above for an explanation */
-#if defined(__hppa) || defined(__hppa__)               /* HP PA-RISC, GCC and HP
-                                                                                                * compilers */
+#if defined(__hppa) || defined(__hppa__)       /* HP PA-RISC, GCC and HP compilers */
        int                     sema[4];
 #else
        int                     sema;
@@ -102,7 +100,7 @@ typedef struct pg_atomic_uint32
        volatile uint32 value;
 } pg_atomic_uint32;
 
-#endif   /* PG_HAVE_ATOMIC_U32_SUPPORT */
+#endif /* PG_HAVE_ATOMIC_U32_SUPPORT */
 
 #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
 
@@ -130,7 +128,7 @@ pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
        return true;
 }
 
-#endif   /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
+#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
 
 #ifdef PG_HAVE_ATOMIC_U32_SIMULATION
 
@@ -139,13 +137,12 @@ extern void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
 
 #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
 extern bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
-                                                                       uint32 *expected, uint32 newval);
+                                                                                               uint32 *expected, uint32 newval);
 
 #define PG_HAVE_ATOMIC_FETCH_ADD_U32
 extern uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_);
 
-#endif   /* PG_HAVE_ATOMIC_U32_SIMULATION */
+#endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
 
 
-#endif   /* defined(PG_USE_INLINE) ||
-                                                                * defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
index e16cc6f7dc24e6b53f110ba996825f472379ac0f..c5639aadda0b6d11b3f6f653af41934f0e0dd2cd 100644 (file)
@@ -10,9 +10,9 @@
  *
  * Documentation:
  * * inline assembly for Itanium-based HP-UX:
- *      http://h21007.www2.hp.com/portal/download/files/unprot/Itanium/inline_assem_ERS.pdf
+ *   http://h21007.www2.hp.com/portal/download/files/unprot/Itanium/inline_assem_ERS.pdf
  * * Implementing Spinlocks on the Intel (R) Itanium (R) Architecture and PA-RISC
- *      http://h21007.www2.hp.com/portal/download/files/unprot/itanium/spinlocks.pdf
+ *   http://h21007.www2.hp.com/portal/download/files/unprot/itanium/spinlocks.pdf
  *
  * Itanium only supports a small set of numbers (6, -8, -4, -1, 1, 4, 8, 16)
  * for atomic add/sub, so we just implement everything but compare_exchange
@@ -49,7 +49,7 @@ typedef struct pg_atomic_uint64
        volatile uint64 value;
 } pg_atomic_uint64;
 
-#endif   /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
 
 
 #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
@@ -64,25 +64,23 @@ STATIC_IF_INLINE bool
 pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
                                                                        uint32 *expected, uint32 newval)
 {
-       bool            ret;
-       uint32          current;
+       bool    ret;
+       uint32  current;
 
        _Asm_mov_to_ar(_AREG_CCV, *expected, MINOR_FENCE);
-
        /*
         * We want a barrier, not just release/acquire semantics.
         */
        _Asm_mf();
-
        /*
-        * Notes: DOWN_MEM_FENCE | _UP_MEM_FENCE prevents reordering by the
-        * compiler
+        * Notes:
+        * DOWN_MEM_FENCE | _UP_MEM_FENCE prevents reordering by the compiler
         */
-       current = _Asm_cmpxchg(_SZ_W,           /* word */
-                                                  _SEM_REL,
-                                                  &ptr->value,
-                                                  newval, _LDHINT_NONE,
-                                                  _DOWN_MEM_FENCE | _UP_MEM_FENCE);
+       current =  _Asm_cmpxchg(_SZ_W, /* word */
+                                                       _SEM_REL,
+                                                       &ptr->value,
+                                                       newval, _LDHINT_NONE,
+                                                       _DOWN_MEM_FENCE | _UP_MEM_FENCE);
        ret = current == *expected;
        *expected = current;
        return ret;
@@ -94,16 +92,16 @@ STATIC_IF_INLINE bool
 pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
                                                                        uint64 *expected, uint64 newval)
 {
-       bool            ret;
-       uint64          current;
+       bool    ret;
+       uint64  current;
 
        _Asm_mov_to_ar(_AREG_CCV, *expected, MINOR_FENCE);
        _Asm_mf();
-       current = _Asm_cmpxchg(_SZ_D,           /* doubleword */
-                                                  _SEM_REL,
-                                                  &ptr->value,
-                                                  newval, _LDHINT_NONE,
-                                                  _DOWN_MEM_FENCE | _UP_MEM_FENCE);
+       current =  _Asm_cmpxchg(_SZ_D, /* doubleword */
+                                                       _SEM_REL,
+                                                       &ptr->value,
+                                                       newval, _LDHINT_NONE,
+                                                       _DOWN_MEM_FENCE | _UP_MEM_FENCE);
        ret = current == *expected;
        *expected = current;
        return ret;
@@ -111,7 +109,6 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
 
 #undef MINOR_FENCE
 
-#endif   /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
 
-#endif   /* defined(PG_USE_INLINE) ||
-                                                                * defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
index 301ab510bf32dd7f9ff98d594bca364ba1567ba3..591c9fe1eb3efcb4987a04eff991aeb13af6efd0 100644 (file)
@@ -10,9 +10,9 @@
  *
  * Documentation:
  * * Legacy __sync Built-in Functions for Atomic Memory Access
- *      http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html
+ *   http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html
  * * Built-in functions for memory model aware atomic operations
- *      http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html
+ *   http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html
  *
  * src/include/port/atomics/generic-gcc.h
  *
  * definitions where possible, and use this only as a fallback.
  */
 #if !defined(pg_memory_barrier_impl)
-#if defined(HAVE_GCC__ATOMIC_INT32_CAS)
-#define pg_memory_barrier_impl()        __atomic_thread_fence(__ATOMIC_SEQ_CST)
-#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
-#define pg_memory_barrier_impl()        __sync_synchronize()
-#endif
-#endif   /* !defined(pg_memory_barrier_impl) */
+#      if defined(HAVE_GCC__ATOMIC_INT32_CAS)
+#              define pg_memory_barrier_impl()         __atomic_thread_fence(__ATOMIC_SEQ_CST)
+#      elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
+#              define pg_memory_barrier_impl()         __sync_synchronize()
+#      endif
+#endif /* !defined(pg_memory_barrier_impl) */
 
 #if !defined(pg_read_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
 /* acquire semantics include read barrier semantics */
-#define pg_read_barrier_impl()          __atomic_thread_fence(__ATOMIC_ACQUIRE)
+#              define pg_read_barrier_impl()           __atomic_thread_fence(__ATOMIC_ACQUIRE)
 #endif
 
 #if !defined(pg_write_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
 /* release semantics include write barrier semantics */
-#define pg_write_barrier_impl()                 __atomic_thread_fence(__ATOMIC_RELEASE)
+#              define pg_write_barrier_impl()          __atomic_thread_fence(__ATOMIC_RELEASE)
 #endif
 
 #ifdef HAVE_ATOMICS
@@ -75,7 +75,7 @@ typedef struct pg_atomic_flag
 #endif
 } pg_atomic_flag;
 
-#endif   /* !ATOMIC_FLAG_SUPPORT && SYNC_INT32_TAS */
+#endif /* !ATOMIC_FLAG_SUPPORT && SYNC_INT32_TAS */
 
 /* generic gcc based atomic uint32 implementation */
 #if !defined(PG_HAVE_ATOMIC_U32_SUPPORT) \
@@ -87,8 +87,7 @@ typedef struct pg_atomic_uint32
        volatile uint32 value;
 } pg_atomic_uint32;
 
-#endif   /* defined(HAVE_GCC__ATOMIC_INT32_CAS) ||
-                                                                * defined(HAVE_GCC__SYNC_INT32_CAS) */
+#endif /* defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS) */
 
 /* generic gcc based atomic uint64 implementation */
 #if !defined(PG_HAVE_ATOMIC_U64_SUPPORT) \
@@ -102,8 +101,7 @@ typedef struct pg_atomic_uint64
        volatile uint64 value pg_attribute_aligned(8);
 } pg_atomic_uint64;
 
-#endif   /* defined(HAVE_GCC__ATOMIC_INT64_CAS) ||
-                                                                * defined(HAVE_GCC__SYNC_INT64_CAS) */
+#endif /* defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS) */
 
 /*
  * Implementation follows. Inlined or directly included from atomics.c
@@ -125,7 +123,7 @@ pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
 }
 #endif
 
-#endif   /* defined(HAVE_GCC__SYNC_*_TAS) */
+#endif /* defined(HAVE_GCC__SYNC_*_TAS) */
 
 #ifndef PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
 #define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
@@ -154,7 +152,7 @@ pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
 }
 #endif
 
-#endif   /* defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) */
+#endif /* defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) */
 
 /* prefer __atomic, it has a better API */
 #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
@@ -175,9 +173,8 @@ static inline bool
 pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
                                                                        uint32 *expected, uint32 newval)
 {
-       bool            ret;
-       uint32          current;
-
+       bool    ret;
+       uint32  current;
        current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
        ret = current == *expected;
        *expected = current;
@@ -214,9 +211,8 @@ static inline bool
 pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
                                                                        uint64 *expected, uint64 newval)
 {
-       bool            ret;
-       uint64          current;
-
+       bool    ret;
+       uint64  current;
        current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
        ret = current == *expected;
        *expected = current;
@@ -233,9 +229,8 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
 }
 #endif
 
-#endif   /* !defined(PG_DISABLE_64_BIT_ATOMICS) */
+#endif /* !defined(PG_DISABLE_64_BIT_ATOMICS) */
 
-#endif   /* defined(PG_USE_INLINE) ||
-                                                                * defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
 
-#endif   /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
index 3c177fea7e5acf7aad30a64482ce72ee8d0aad7e..d259d6f51d085c1e056eb74479b2e51b702e13b7 100644 (file)
@@ -10,7 +10,7 @@
  *
  * Documentation:
  * * Interlocked Variable Access
- *      http://msdn.microsoft.com/en-us/library/ms684122%28VS.85%29.aspx
+ *   http://msdn.microsoft.com/en-us/library/ms684122%28VS.85%29.aspx
  *
  * src/include/port/atomics/generic-msvc.h
  *
@@ -41,14 +41,12 @@ typedef struct pg_atomic_uint32
 } pg_atomic_uint32;
 
 #define PG_HAVE_ATOMIC_U64_SUPPORT
-typedef struct __declspec (
-                                                  align(8))
-pg_atomic_uint64
+typedef struct __declspec(align(8)) pg_atomic_uint64
 {
        volatile uint64 value;
 } pg_atomic_uint64;
 
-#endif   /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
 
 
 #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
@@ -60,9 +58,8 @@ static inline bool
 pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
                                                                        uint32 *expected, uint32 newval)
 {
-       bool            ret;
-       uint32          current;
-
+       bool    ret;
+       uint32  current;
        current = InterlockedCompareExchange(&ptr->value, newval, *expected);
        ret = current == *expected;
        *expected = current;
@@ -89,9 +86,8 @@ static inline bool
 pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
                                                                        uint64 *expected, uint64 newval)
 {
-       bool            ret;
-       uint64          current;
-
+       bool    ret;
+       uint64  current;
        current = _InterlockedCompareExchange64(&ptr->value, newval, *expected);
        ret = current == *expected;
        *expected = current;
@@ -108,9 +104,8 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
 {
        return _InterlockedExchangeAdd64(&ptr->value, add_);
 }
-#endif   /* _WIN64 */
+#endif /* _WIN64 */
 
-#endif   /* HAVE_ATOMICS */
+#endif /* HAVE_ATOMICS */
 
-#endif   /* defined(PG_USE_INLINE) ||
-                                                                * defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
index e74cbad5028e9a468b348e86d934c472ffab0833..d369207fb34a41ede28bf7e125ec75fc34affedb 100644 (file)
@@ -9,8 +9,8 @@
  *
  * Documentation:
  * * manpage for atomic_cas(3C)
- *      http://www.unix.com/man-page/opensolaris/3c/atomic_cas/
- *      http://docs.oracle.com/cd/E23824_01/html/821-1465/atomic-cas-3c.html
+ *   http://www.unix.com/man-page/opensolaris/3c/atomic_cas/
+ *   http://docs.oracle.com/cd/E23824_01/html/821-1465/atomic-cas-3c.html
  *
  * src/include/port/atomics/generic-sunpro.h
  *
  * membar #StoreStore | #LoadStore | #StoreLoad | #LoadLoad on x86/sparc
  * respectively.
  */
-#define pg_memory_barrier_impl()        __machine_rw_barrier()
+#      define pg_memory_barrier_impl()         __machine_rw_barrier()
 #endif
 #ifndef pg_read_barrier_impl
-#define pg_read_barrier_impl()          __machine_r_barrier()
+#      define pg_read_barrier_impl()           __machine_r_barrier()
 #endif
 #ifndef pg_write_barrier_impl
-#define pg_write_barrier_impl()                 __machine_w_barrier()
+#      define pg_write_barrier_impl()          __machine_w_barrier()
 #endif
 
-#endif   /* HAVE_MBARRIER_H */
+#endif /* HAVE_MBARRIER_H */
 
 /* Older versions of the compiler don't have atomic.h... */
 #ifdef HAVE_ATOMIC_H
@@ -64,9 +64,9 @@ typedef struct pg_atomic_uint64
        volatile uint64 value pg_attribute_aligned(8);
 } pg_atomic_uint64;
 
-#endif   /* HAVE_ATOMIC_H */
+#endif /* HAVE_ATOMIC_H */
 
-#endif   /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
 
 
 #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
@@ -80,8 +80,8 @@ static inline bool
 pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
                                                                        uint32 *expected, uint32 newval)
 {
-       bool            ret;
-       uint32          current;
+       bool    ret;
+       uint32  current;
 
        current = atomic_cas_32(&ptr->value, *expected, newval);
        ret = current == *expected;
@@ -94,8 +94,8 @@ static inline bool
 pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
                                                                        uint64 *expected, uint64 newval)
 {
-       bool            ret;
-       uint64          current;
+       bool    ret;
+       uint64  current;
 
        current = atomic_cas_64(&ptr->value, *expected, newval);
        ret = current == *expected;
@@ -103,9 +103,8 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
        return ret;
 }
 
-#endif   /* HAVE_ATOMIC_H */
+#endif /* HAVE_ATOMIC_H */
 
-#endif   /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
 
-#endif   /* defined(PG_USE_INLINE) ||
-                                                                * defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
index 01c19121eb6c1b55a5b0a1c6a6907e9933a4a3a2..1c743f2bc80606df3cd5f8a68dcab302f2caf764 100644 (file)
@@ -9,7 +9,7 @@
  *
  * Documentation:
  * * Synchronization and atomic built-in functions
- *      http://publib.boulder.ibm.com/infocenter/lnxpcomp/v8v101/topic/com.ibm.xlcpp8l.doc/compiler/ref/bif_sync.htm
+ *   http://publib.boulder.ibm.com/infocenter/lnxpcomp/v8v101/topic/com.ibm.xlcpp8l.doc/compiler/ref/bif_sync.htm
  *
  * src/include/port/atomics/generic-xlc.h
  *
@@ -35,9 +35,9 @@ typedef struct pg_atomic_uint64
        volatile uint64 value pg_attribute_aligned(8);
 } pg_atomic_uint64;
 
-#endif   /* __64BIT__ */
+#endif /* __64BIT__ */
 
-#endif   /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
 
 #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
 
@@ -48,13 +48,13 @@ static inline bool
 pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
                                                                        uint32 *expected, uint32 newval)
 {
-       bool            ret;
-       uint64          current;
+       bool    ret;
+       uint64  current;
 
        /*
-        * xlc's documentation tells us: "If __compare_and_swap is used as a
-        * locking primitive, insert a call to the __isync built-in function at
-        * the start of any critical sections."
+        * xlc's documentation tells us:
+        * "If __compare_and_swap is used as a locking primitive, insert a call to
+        * the __isync built-in function at the start of any critical sections."
         */
        __isync();
 
@@ -62,8 +62,8 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
         * XXX: __compare_and_swap is defined to take signed parameters, but that
         * shouldn't matter since we don't perform any arithmetic operations.
         */
-       current = (uint32) __compare_and_swap((volatile int *) ptr->value,
-                                                                                 (int) *expected, (int) newval);
+       current = (uint32)__compare_and_swap((volatile int*)ptr->value,
+                                                                                (int)*expected, (int)newval);
        ret = current == *expected;
        *expected = current;
        return ret;
@@ -83,13 +83,13 @@ static inline bool
 pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
                                                                        uint64 *expected, uint64 newval)
 {
-       bool            ret;
-       uint64          current;
+       bool    ret;
+       uint64  current;
 
        __isync();
 
-       current = (uint64) __compare_and_swaplp((volatile long *) ptr->value,
-                                                                                       (long) *expected, (long) newval);
+       current = (uint64)__compare_and_swaplp((volatile long*)ptr->value,
+                                                                                  (long)*expected, (long)newval);
        ret = current == *expected;
        *expected = current;
        return ret;
@@ -102,9 +102,8 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
        return __fetch_and_addlp(&ptr->value, add_);
 }
 
-#endif   /* PG_HAVE_ATOMIC_U64_SUPPORT */
+#endif /* PG_HAVE_ATOMIC_U64_SUPPORT */
 
-#endif   /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
 
-#endif   /* defined(PG_USE_INLINE) ||
-                                                                * defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
index 9787f9ee87107a9892696d3d78f2b6c7dd1c7f44..bb31df3623715f2356a1f647f39c2cf4a493e219 100644 (file)
@@ -14,7 +14,7 @@
 
 /* intentionally no include guards, should only be included by atomics.h */
 #ifndef INSIDE_ATOMICS_H
-#error "should be included via atomics.h"
+#      error "should be included via atomics.h"
 #endif
 
 /*
  * barriers.
  */
 #if !defined(pg_read_barrier_impl)
-#define pg_read_barrier_impl pg_memory_barrier_impl
+#      define pg_read_barrier_impl pg_memory_barrier_impl
 #endif
 #if !defined(pg_write_barrier_impl)
-#define pg_write_barrier_impl pg_memory_barrier_impl
+#      define pg_write_barrier_impl pg_memory_barrier_impl
 #endif
 
 #ifndef PG_HAVE_SPIN_DELAY
@@ -113,8 +113,7 @@ pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
 static inline bool
 pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
 {
-       uint32          value = 0;
-
+       uint32 value = 0;
        return pg_atomic_compare_exchange_u32_impl(ptr, &value, 1);
 }
 
@@ -130,23 +129,23 @@ static inline void
 pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
 {
        /*
-        * Use a memory barrier + plain write if we have a native memory barrier.
-        * But don't do so if memory barriers use spinlocks - that'd lead to
-        * circularity if flags are used to implement spinlocks.
+        * Use a memory barrier + plain write if we have a native memory
+        * barrier. But don't do so if memory barriers use spinlocks - that'd lead
+        * to circularity if flags are used to implement spinlocks.
         */
 #ifndef PG_HAVE_MEMORY_BARRIER_EMULATION
        /* XXX: release semantics suffice? */
        pg_memory_barrier_impl();
        pg_atomic_write_u32_impl(ptr, 0);
 #else
-       uint32          value = 1;
+       uint32 value = 1;
        pg_atomic_compare_exchange_u32_impl(ptr, &value, 0);
 #endif
 }
 
 #elif !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG)
-#error "No pg_atomic_test_and_set provided"
-#endif   /* !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) */
+#      error "No pg_atomic_test_and_set provided"
+#endif /* !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) */
 
 
 #ifndef PG_HAVE_ATOMIC_INIT_U32
@@ -163,8 +162,7 @@ pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
 static inline uint32
 pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_)
 {
-       uint32          old;
-
+       uint32 old;
        while (true)
        {
                old = pg_atomic_read_u32_impl(ptr);
@@ -180,8 +178,7 @@ pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_)
 static inline uint32
 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
 {
-       uint32          old;
-
+       uint32 old;
        while (true)
        {
                old = pg_atomic_read_u32_impl(ptr);
@@ -206,8 +203,7 @@ pg_atomic_fetch_sub_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
 static inline uint32
 pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
 {
-       uint32          old;
-
+       uint32 old;
        while (true)
        {
                old = pg_atomic_read_u32_impl(ptr);
@@ -223,8 +219,7 @@ pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
 static inline uint32
 pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
 {
-       uint32          old;
-
+       uint32 old;
        while (true)
        {
                old = pg_atomic_read_u32_impl(ptr);
@@ -260,8 +255,7 @@ pg_atomic_sub_fetch_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
 static inline uint64
 pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 xchg_)
 {
-       uint64          old;
-
+       uint64 old;
        while (true)
        {
                old = ptr->value;
@@ -290,7 +284,7 @@ pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
 static inline uint64
 pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
 {
-       uint64          old = 0;
+       uint64 old = 0;
 
        /*
         * 64 bit reads aren't safe on all platforms. In the generic
@@ -318,8 +312,7 @@ pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
 static inline uint64
 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
 {
-       uint64          old;
-
+       uint64 old;
        while (true)
        {
                old = pg_atomic_read_u64_impl(ptr);
@@ -344,8 +337,7 @@ pg_atomic_fetch_sub_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
 static inline uint64
 pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
 {
-       uint64          old;
-
+       uint64 old;
        while (true)
        {
                old = pg_atomic_read_u64_impl(ptr);
@@ -361,8 +353,7 @@ pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
 static inline uint64
 pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
 {
-       uint64          old;
-
+       uint64 old;
        while (true)
        {
                old = pg_atomic_read_u64_impl(ptr);
@@ -391,7 +382,6 @@ pg_atomic_sub_fetch_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
 }
 #endif
 
-#endif   /* PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64 */
+#endif /* PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64 */
 
-#endif   /* defined(PG_USE_INLINE) ||
-                                                                * defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
index b6d52691971fb6f957d8c9d72b4768537bf00840..fb99842f97f0e715d9a014625b1e84782e4a94e5 100644 (file)
@@ -1,5 +1,6 @@
 #list of file patterns to exclude from pg_indent runs
 /s_lock\.h$
+/atomics/
 /ecpg/test/expected/
 /snowball/libstemmer/
 /pl/plperl/ppport\.h$