teak-llvm/clang/test/CodeGen/atomic.c
Hal Finkel d2208b59cf Add __sync_fetch_and_nand (again)
Prior to GCC 4.4, __sync_fetch_and_nand was implemented as:

  { tmp = *ptr; *ptr = ~tmp & value; return tmp; }

but this was changed in GCC 4.4 to be:

  { tmp = *ptr; *ptr = ~(tmp & value); return tmp; }

in response to this change, support for sync_fetch_and_nand (and
sync_nand_and_fetch) was removed in r99522 in order to avoid miscompiling code
depending on the old semantics. However, at this point:

  1. Many years have passed, and the amount of code relying on the old
     semantics is likely smaller.

  2. Through the work of many contributors, all LLVM backends have been updated
     such that "atomicrmw nand" provides the newer GCC 4.4+ semantics (this process
     was complete July of 2014 (added to the release notes in r212635).

  3. The lack of this intrinsic is now a needless impediment to porting codes
     from GCC to Clang (I've now seen several examples of this).

It is true, however, that we still set GNUC_MINOR to 2 (corresponding to GCC
4.2). To compensate for this, and to address the original concern regarding
code relying on the old semantics, I've added a warning that specifically
details the fact that the semantics have changed and that we provide the newer
semantics.

Fixes PR8842.

llvm-svn: 218905
2014-10-02 20:53:50 +00:00

121 lines
3.7 KiB
C

// RUN: %clang_cc1 %s -emit-llvm -o - -triple=i686-apple-darwin9 | FileCheck %s
int atomic(void) {
// non-sensical test for sync functions
int old;
int val = 1;
char valc = 1;
_Bool valb = 0;
unsigned int uval = 1;
int cmp = 0;
int* ptrval;
old = __sync_fetch_and_add(&val, 1);
// CHECK: atomicrmw add i32* %val, i32 1 seq_cst
old = __sync_fetch_and_sub(&valc, 2);
// CHECK: atomicrmw sub i8* %valc, i8 2 seq_cst
old = __sync_fetch_and_min(&val, 3);
// CHECK: atomicrmw min i32* %val, i32 3 seq_cst
old = __sync_fetch_and_max(&val, 4);
// CHECK: atomicrmw max i32* %val, i32 4 seq_cst
old = __sync_fetch_and_umin(&uval, 5u);
// CHECK: atomicrmw umin i32* %uval, i32 5 seq_cst
old = __sync_fetch_and_umax(&uval, 6u);
// CHECK: atomicrmw umax i32* %uval, i32 6 seq_cst
old = __sync_lock_test_and_set(&val, 7);
// CHECK: atomicrmw xchg i32* %val, i32 7 seq_cst
old = __sync_swap(&val, 8);
// CHECK: atomicrmw xchg i32* %val, i32 8 seq_cst
old = __sync_val_compare_and_swap(&val, 4, 1976);
// CHECK: [[PAIR:%[a-z0-9_.]+]] = cmpxchg i32* %val, i32 4, i32 1976 seq_cst
// CHECK: extractvalue { i32, i1 } [[PAIR]], 0
old = __sync_bool_compare_and_swap(&val, 4, 1976);
// CHECK: [[PAIR:%[a-z0-9_.]+]] = cmpxchg i32* %val, i32 4, i32 1976 seq_cst
// CHECK: extractvalue { i32, i1 } [[PAIR]], 1
old = __sync_fetch_and_and(&val, 0x9);
// CHECK: atomicrmw and i32* %val, i32 9 seq_cst
old = __sync_fetch_and_or(&val, 0xa);
// CHECK: atomicrmw or i32* %val, i32 10 seq_cst
old = __sync_fetch_and_xor(&val, 0xb);
// CHECK: atomicrmw xor i32* %val, i32 11 seq_cst
old = __sync_fetch_and_nand(&val, 0xc);
// CHECK: atomicrmw nand i32* %val, i32 12 seq_cst
old = __sync_add_and_fetch(&val, 1);
// CHECK: atomicrmw add i32* %val, i32 1 seq_cst
old = __sync_sub_and_fetch(&val, 2);
// CHECK: atomicrmw sub i32* %val, i32 2 seq_cst
old = __sync_and_and_fetch(&valc, 3);
// CHECK: atomicrmw and i8* %valc, i8 3 seq_cst
old = __sync_or_and_fetch(&valc, 4);
// CHECK: atomicrmw or i8* %valc, i8 4 seq_cst
old = __sync_xor_and_fetch(&valc, 5);
// CHECK: atomicrmw xor i8* %valc, i8 5 seq_cst
old = __sync_nand_and_fetch(&valc, 6);
// CHECK: atomicrmw nand i8* %valc, i8 6 seq_cst
__sync_val_compare_and_swap((void **)0, (void *)0, (void *)0);
// CHECK: [[PAIR:%[a-z0-9_.]+]] = cmpxchg i32* null, i32 0, i32 0 seq_cst
// CHECK: extractvalue { i32, i1 } [[PAIR]], 0
if ( __sync_val_compare_and_swap(&valb, 0, 1)) {
// CHECK: [[PAIR:%[a-z0-9_.]+]] = cmpxchg i8* %valb, i8 0, i8 1 seq_cst
// CHECK: [[VAL:%[a-z0-9_.]+]] = extractvalue { i8, i1 } [[PAIR]], 0
// CHECK: trunc i8 [[VAL]] to i1
old = 42;
}
__sync_bool_compare_and_swap((void **)0, (void *)0, (void *)0);
// CHECK: cmpxchg i32* null, i32 0, i32 0 seq_cst
__sync_lock_release(&val);
// CHECK: store atomic i32 0, {{.*}} release, align 4
__sync_lock_release(&ptrval);
// CHECK: store atomic i32 0, {{.*}} release, align 4
__sync_synchronize ();
// CHECK: fence seq_cst
return old;
}
// CHECK: @release_return
void release_return(int *lock) {
// Ensure this is actually returning void all the way through.
return __sync_lock_release(lock);
// CHECK: store atomic {{.*}} release, align 4
}
// rdar://8461279 - Atomics with address spaces.
// CHECK: @addrspace
void addrspace(int __attribute__((address_space(256))) * P) {
__sync_bool_compare_and_swap(P, 0, 1);
// CHECK: cmpxchg i32 addrspace(256)*{{.*}}, i32 0, i32 1 seq_cst
__sync_val_compare_and_swap(P, 0, 1);
// CHECK: cmpxchg i32 addrspace(256)*{{.*}}, i32 0, i32 1 seq_cst
__sync_xor_and_fetch(P, 123);
// CHECK: atomicrmw xor i32 addrspace(256)*{{.*}}, i32 123 seq_cst
}