diff options
author | John Naylor | 2023-08-10 04:36:15 +0000 |
---|---|---|
committer | John Naylor | 2023-08-10 04:36:15 +0000 |
commit | 4d14ccd6af6e788a7b79ff3ed77bda5bc71d2edc (patch) | |
tree | bef29664090f4cad6db22352d3c8a401bf724c27 /meson.build | |
parent | fa2e874946c5b9f23394358c131e987df7cc8ffb (diff) |
Use native CRC instructions on 64-bit LoongArch
As with the Intel and Arm CRC instructions, compiler intrinsics for
them must be supported by the compiler. In contrast, no runtime check
is needed. Aligned memory access is faster, so use the Arm coding as
a model.
YANG Xudong
Discussion: https://postgr.es/m/b522a0c5-e3b2-99cc-6387-58134fb88cbe%40ymatrix.cn
Diffstat (limited to 'meson.build')
-rw-r--r-- | meson.build | 24 |
1 files changed, 24 insertions, 0 deletions
diff --git a/meson.build b/meson.build index 0a11efc97a1..2acb2040037 100644 --- a/meson.build +++ b/meson.build @@ -2065,6 +2065,30 @@ int main(void) cdata.set('USE_ARMV8_CRC32C_WITH_RUNTIME_CHECK', 1) have_optimized_crc = true endif + +elif host_cpu == 'loongarch64' + + prog = ''' +int main(void) +{ + unsigned int crc = 0; + crc = __builtin_loongarch_crcc_w_b_w(0, crc); + crc = __builtin_loongarch_crcc_w_h_w(0, crc); + crc = __builtin_loongarch_crcc_w_w_w(0, crc); + crc = __builtin_loongarch_crcc_w_d_w(0, crc); + + /* return computed value, to prevent the above being optimized away */ + return crc == 0; +} +''' + + if cc.links(prog, name: '__builtin_loongarch_crcc_w_b_w, __builtin_loongarch_crcc_w_h_w, __builtin_loongarch_crcc_w_w_w, and __builtin_loongarch_crcc_w_d_w', + args: test_c_args) + # Use LoongArch CRC instruction unconditionally + cdata.set('USE_LOONGARCH_CRC32C', 1) + have_optimized_crc = true + endif + endif if not have_optimized_crc |