diff options
| author | Andres Freund | 2014-09-25 21:49:05 +0000 |
|---|---|---|
| committer | Andres Freund | 2014-09-25 21:49:05 +0000 |
| commit | b64d92f1a5602c55ee8b27a7ac474f03b7aee340 (patch) | |
| tree | 2ac33fb70d31585297ae8baeb674ef757544a1d7 /src/test | |
| parent | 9111d46351e8c3d82452a7454e43ccbf1991b3dc (diff) | |
Add a basic atomic ops API abstracting away platform/architecture details.
Several upcoming performance/scalability improvements require atomic
operations. This new API avoids the need to splatter compiler and
architecture dependent code over all the locations employing atomic
ops.
For several of the potential usages it'd be problematic to maintain
both, a atomics using implementation and one using spinlocks or
similar. In all likelihood one of the implementations would not get
tested regularly under concurrency. To avoid that scenario the new API
provides a automatic fallback of atomic operations to spinlocks. All
properties of atomic operations are maintained. This fallback -
obviously - isn't as fast as just using atomic ops, but it's not bad
either. For one of the future users the atomics ontop spinlocks
implementation was actually slightly faster than the old purely
spinlock using implementation. That's important because it reduces the
fear of regressing older platforms when improving the scalability for
new ones.
The API, loosely modeled after the C11 atomics support, currently
provides 'atomic flags' and 32 bit unsigned integers. If the platform
efficiently supports atomic 64 bit unsigned integers those are also
provided.
To implement atomics support for a platform/architecture/compiler for
a type of atomics 32bit compare and exchange needs to be
implemented. If available and more efficient native support for flags,
32 bit atomic addition, and corresponding 64 bit operations may also
be provided. Additional useful atomic operations are implemented
generically ontop of these.
The implementation for various versions of gcc, msvc and sun studio have
been tested. Additional existing stub implementations for
* Intel icc
* HUPX acc
* IBM xlc
are included but have never been tested. These will likely require
fixes based on buildfarm and user feedback.
As atomic operations also require barriers for some operations the
existing barrier support has been moved into the atomics code.
Author: Andres Freund with contributions from Oskari Saarenmaa
Reviewed-By: Amit Kapila, Robert Haas, Heikki Linnakangas and Álvaro Herrera
Discussion: CA+TgmoYBW+ux5-8Ja=Mcyuy8=VXAnVRHp3Kess6Pn3DMXAPAEA@mail.gmail.com,
20131015123303.GH5300@awork2.anarazel.de,
20131028205522.GI20248@awork2.anarazel.de
Diffstat (limited to 'src/test')
| -rw-r--r-- | src/test/regress/expected/lock.out | 8 | ||||
| -rw-r--r-- | src/test/regress/input/create_function_1.source | 5 | ||||
| -rw-r--r-- | src/test/regress/output/create_function_1.source | 4 | ||||
| -rw-r--r-- | src/test/regress/regress.c | 239 | ||||
| -rw-r--r-- | src/test/regress/sql/lock.sql | 5 |
5 files changed, 261 insertions, 0 deletions
diff --git a/src/test/regress/expected/lock.out b/src/test/regress/expected/lock.out index 0d7c3ba4f3..fd27344503 100644 --- a/src/test/regress/expected/lock.out +++ b/src/test/regress/expected/lock.out @@ -60,3 +60,11 @@ DROP TABLE lock_tbl2; DROP TABLE lock_tbl1; DROP SCHEMA lock_schema1 CASCADE; DROP ROLE regress_rol_lock1; +-- atomic ops tests +RESET search_path; +SELECT test_atomic_ops(); + test_atomic_ops +----------------- + t +(1 row) + diff --git a/src/test/regress/input/create_function_1.source b/src/test/regress/input/create_function_1.source index aef1518287..1fded846a0 100644 --- a/src/test/regress/input/create_function_1.source +++ b/src/test/regress/input/create_function_1.source @@ -57,6 +57,11 @@ CREATE FUNCTION make_tuple_indirect (record) AS '@libdir@/regress@DLSUFFIX@' LANGUAGE C STRICT; +CREATE FUNCTION test_atomic_ops() + RETURNS bool + AS '@libdir@/regress@DLSUFFIX@' + LANGUAGE C; + -- Things that shouldn't work: CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL diff --git a/src/test/regress/output/create_function_1.source b/src/test/regress/output/create_function_1.source index 9761d127e1..9926c9073e 100644 --- a/src/test/regress/output/create_function_1.source +++ b/src/test/regress/output/create_function_1.source @@ -51,6 +51,10 @@ CREATE FUNCTION make_tuple_indirect (record) RETURNS record AS '@libdir@/regress@DLSUFFIX@' LANGUAGE C STRICT; +CREATE FUNCTION test_atomic_ops() + RETURNS bool + AS '@libdir@/regress@DLSUFFIX@' + LANGUAGE C; -- Things that shouldn't work: CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL AS 'SELECT ''not an integer'';'; diff --git a/src/test/regress/regress.c b/src/test/regress/regress.c index 09e027c1e5..14871716c9 100644 --- a/src/test/regress/regress.c +++ b/src/test/regress/regress.c @@ -18,6 +18,7 @@ #include "executor/executor.h" #include "executor/spi.h" #include "miscadmin.h" +#include "port/atomics.h" #include "utils/builtins.h" #include "utils/geo_decls.h" #include "utils/rel.h" @@ -865,3 +866,241 @@ wait_pid(PG_FUNCTION_ARGS) PG_RETURN_VOID(); } + +#ifndef PG_HAVE_ATOMIC_FLAG_SIMULATION +static void +test_atomic_flag(void) +{ + pg_atomic_flag flag; + + pg_atomic_init_flag(&flag); + + if (!pg_atomic_unlocked_test_flag(&flag)) + elog(ERROR, "flag: unexpectedly set"); + + if (!pg_atomic_test_set_flag(&flag)) + elog(ERROR, "flag: couldn't set"); + + if (pg_atomic_unlocked_test_flag(&flag)) + elog(ERROR, "flag: unexpectedly unset"); + + if (pg_atomic_test_set_flag(&flag)) + elog(ERROR, "flag: set spuriously #2"); + + pg_atomic_clear_flag(&flag); + + if (!pg_atomic_unlocked_test_flag(&flag)) + elog(ERROR, "flag: unexpectedly set #2"); + + if (!pg_atomic_test_set_flag(&flag)) + elog(ERROR, "flag: couldn't set"); + + pg_atomic_clear_flag(&flag); +} +#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */ + +static void +test_atomic_uint32(void) +{ + pg_atomic_uint32 var; + uint32 expected; + int i; + + pg_atomic_init_u32(&var, 0); + + if (pg_atomic_read_u32(&var) != 0) + elog(ERROR, "atomic_read_u32() #1 wrong"); + + pg_atomic_write_u32(&var, 3); + + if (pg_atomic_read_u32(&var) != 3) + elog(ERROR, "atomic_read_u32() #2 wrong"); + + if (pg_atomic_fetch_add_u32(&var, 1) != 3) + elog(ERROR, "atomic_fetch_add_u32() #1 wrong"); + + if (pg_atomic_fetch_sub_u32(&var, 1) != 4) + elog(ERROR, "atomic_fetch_sub_u32() #1 wrong"); + + if (pg_atomic_sub_fetch_u32(&var, 3) != 0) + elog(ERROR, "atomic_sub_fetch_u32() #1 wrong"); + + if (pg_atomic_add_fetch_u32(&var, 10) != 10) + elog(ERROR, "atomic_add_fetch_u32() #1 wrong"); + + if (pg_atomic_exchange_u32(&var, 5) != 10) + elog(ERROR, "pg_atomic_exchange_u32() #1 wrong"); + + if (pg_atomic_exchange_u32(&var, 0) != 5) + elog(ERROR, "pg_atomic_exchange_u32() #0 wrong"); + + /* test around numerical limits */ + if (pg_atomic_fetch_add_u32(&var, INT_MAX) != 0) + elog(ERROR, "pg_atomic_fetch_add_u32() #2 wrong"); + + if (pg_atomic_fetch_add_u32(&var, INT_MAX) != INT_MAX) + elog(ERROR, "pg_atomic_add_fetch_u32() #3 wrong"); + + pg_atomic_fetch_add_u32(&var, 1); /* top up to UINT_MAX */ + + if (pg_atomic_read_u32(&var) != UINT_MAX) + elog(ERROR, "atomic_read_u32() #2 wrong"); + + if (pg_atomic_fetch_sub_u32(&var, INT_MAX) != UINT_MAX) + elog(ERROR, "pg_atomic_fetch_sub_u32() #2 wrong"); + + if (pg_atomic_read_u32(&var) != (uint32)INT_MAX + 1) + elog(ERROR, "atomic_read_u32() #3 wrong: %u", pg_atomic_read_u32(&var)); + + expected = pg_atomic_sub_fetch_u32(&var, INT_MAX); + if (expected != 1) + elog(ERROR, "pg_atomic_sub_fetch_u32() #3 wrong: %u", expected); + + pg_atomic_sub_fetch_u32(&var, 1); + + /* fail exchange because of old expected */ + expected = 10; + if (pg_atomic_compare_exchange_u32(&var, &expected, 1)) + elog(ERROR, "atomic_compare_exchange_u32() changed value spuriously"); + + /* CAS is allowed to fail due to interrupts, try a couple of times */ + for (i = 0; i < 1000; i++) + { + expected = 0; + if (!pg_atomic_compare_exchange_u32(&var, &expected, 1)) + break; + } + if (i == 1000) + elog(ERROR, "atomic_compare_exchange_u32() never succeeded"); + if (pg_atomic_read_u32(&var) != 1) + elog(ERROR, "atomic_compare_exchange_u32() didn't set value properly"); + + pg_atomic_write_u32(&var, 0); + + /* try setting flagbits */ + if (pg_atomic_fetch_or_u32(&var, 1) & 1) + elog(ERROR, "pg_atomic_fetch_or_u32() #1 wrong"); + + if (!(pg_atomic_fetch_or_u32(&var, 2) & 1)) + elog(ERROR, "pg_atomic_fetch_or_u32() #2 wrong"); + + if (pg_atomic_read_u32(&var) != 3) + elog(ERROR, "invalid result after pg_atomic_fetch_or_u32()"); + + /* try clearing flagbits */ + if ((pg_atomic_fetch_and_u32(&var, ~2) & 3) != 3) + elog(ERROR, "pg_atomic_fetch_and_u32() #1 wrong"); + + if (pg_atomic_fetch_and_u32(&var, ~1) != 1) + elog(ERROR, "pg_atomic_fetch_and_u32() #2 wrong: is %u", + pg_atomic_read_u32(&var)); + /* no bits set anymore */ + if (pg_atomic_fetch_and_u32(&var, ~0) != 0) + elog(ERROR, "pg_atomic_fetch_and_u32() #3 wrong"); +} + +#ifdef PG_HAVE_ATOMIC_U64_SUPPORT +static void +test_atomic_uint64(void) +{ + pg_atomic_uint64 var; + uint64 expected; + int i; + + pg_atomic_init_u64(&var, 0); + + if (pg_atomic_read_u64(&var) != 0) + elog(ERROR, "atomic_read_u64() #1 wrong"); + + pg_atomic_write_u64(&var, 3); + + if (pg_atomic_read_u64(&var) != 3) + elog(ERROR, "atomic_read_u64() #2 wrong"); + + if (pg_atomic_fetch_add_u64(&var, 1) != 3) + elog(ERROR, "atomic_fetch_add_u64() #1 wrong"); + + if (pg_atomic_fetch_sub_u64(&var, 1) != 4) + elog(ERROR, "atomic_fetch_sub_u64() #1 wrong"); + + if (pg_atomic_sub_fetch_u64(&var, 3) != 0) + elog(ERROR, "atomic_sub_fetch_u64() #1 wrong"); + + if (pg_atomic_add_fetch_u64(&var, 10) != 10) + elog(ERROR, "atomic_add_fetch_u64() #1 wrong"); + + if (pg_atomic_exchange_u64(&var, 5) != 10) + elog(ERROR, "pg_atomic_exchange_u64() #1 wrong"); + + if (pg_atomic_exchange_u64(&var, 0) != 5) + elog(ERROR, "pg_atomic_exchange_u64() #0 wrong"); + + /* fail exchange because of old expected */ + expected = 10; + if (pg_atomic_compare_exchange_u64(&var, &expected, 1)) + elog(ERROR, "atomic_compare_exchange_u64() changed value spuriously"); + + /* CAS is allowed to fail due to interrupts, try a couple of times */ + for (i = 0; i < 100; i++) + { + expected = 0; + if (!pg_atomic_compare_exchange_u64(&var, &expected, 1)) + break; + } + if (i == 100) + elog(ERROR, "atomic_compare_exchange_u64() never succeeded"); + if (pg_atomic_read_u64(&var) != 1) + elog(ERROR, "atomic_compare_exchange_u64() didn't set value properly"); + + pg_atomic_write_u64(&var, 0); + + /* try setting flagbits */ + if (pg_atomic_fetch_or_u64(&var, 1) & 1) + elog(ERROR, "pg_atomic_fetch_or_u64() #1 wrong"); + + if (!(pg_atomic_fetch_or_u64(&var, 2) & 1)) + elog(ERROR, "pg_atomic_fetch_or_u64() #2 wrong"); + + if (pg_atomic_read_u64(&var) != 3) + elog(ERROR, "invalid result after pg_atomic_fetch_or_u64()"); + + /* try clearing flagbits */ + if ((pg_atomic_fetch_and_u64(&var, ~2) & 3) != 3) + elog(ERROR, "pg_atomic_fetch_and_u64() #1 wrong"); + + if (pg_atomic_fetch_and_u64(&var, ~1) != 1) + elog(ERROR, "pg_atomic_fetch_and_u64() #2 wrong: is "UINT64_FORMAT, + pg_atomic_read_u64(&var)); + /* no bits set anymore */ + if (pg_atomic_fetch_and_u64(&var, ~0) != 0) + elog(ERROR, "pg_atomic_fetch_and_u64() #3 wrong"); +} +#endif /* PG_HAVE_ATOMIC_U64_SUPPORT */ + + +PG_FUNCTION_INFO_V1(test_atomic_ops); +Datum +test_atomic_ops(PG_FUNCTION_ARGS) +{ + /* --- + * Can't run the test under the semaphore emulation, it doesn't handle + * checking two edge cases well: + * - pg_atomic_unlocked_test_flag() always returns true + * - locking a already locked flag blocks + * it seems better to not test the semaphore fallback here, than weaken + * the checks for the other cases. The semaphore code will be the same + * everywhere, whereas the efficient implementations wont. + * --- + */ +#ifndef PG_HAVE_ATOMIC_FLAG_SIMULATION + test_atomic_flag(); +#endif + + test_atomic_uint32(); + +#ifdef PG_HAVE_ATOMIC_U64_SUPPORT + test_atomic_uint64(); +#endif + + PG_RETURN_BOOL(true); +} diff --git a/src/test/regress/sql/lock.sql b/src/test/regress/sql/lock.sql index dda212f78c..567e8bccf1 100644 --- a/src/test/regress/sql/lock.sql +++ b/src/test/regress/sql/lock.sql @@ -64,3 +64,8 @@ DROP TABLE lock_tbl2; DROP TABLE lock_tbl1; DROP SCHEMA lock_schema1 CASCADE; DROP ROLE regress_rol_lock1; + + +-- atomic ops tests +RESET search_path; +SELECT test_atomic_ops(); |
