mirror of
https://github.com/Gericom/teak-llvm.git
synced 2025-06-19 11:35:51 -04:00

r326249 wasn't quite enough because we often run out of inlining stack depth limit and for that reason fail to see the atomics we're looking for. Add a more straightforward false positive suppression that is based on the name of the class. I.e. if we're releasing a pointer in a destructor of a "something shared/intrusive/reference/counting something ptr/pointer something", then any use-after-free or double-free that occurs later would likely be a false positive. rdar://problem/38013606 Differential Revision: https://reviews.llvm.org/D44281 llvm-svn: 328066
79 lines
2.6 KiB
C++
79 lines
2.6 KiB
C++
// RUN: %clang_analyze_cc1 -analyzer-checker=core,cplusplus.NewDelete -std=c++11 -verify %s
|
|
// RUN: %clang_analyze_cc1 -analyzer-checker=core,cplusplus.NewDeleteLeaks -DLEAKS -std=c++11 -verify %s
|
|
// RUN: %clang_analyze_cc1 -analyzer-checker=core,cplusplus.NewDelete -std=c++11 -DTEST_INLINABLE_ALLOCATORS -verify %s
|
|
// RUN: %clang_analyze_cc1 -analyzer-checker=core,cplusplus.NewDeleteLeaks -DLEAKS -std=c++11 -DTEST_INLINABLE_ALLOCATORS -verify %s
|
|
// RUN: %clang_analyze_cc1 -analyzer-inline-max-stack-depth 2 -analyzer-config ipa-always-inline-size=2 -analyzer-checker=core,cplusplus.NewDelete -std=c++11 -verify %s
|
|
// RUN: %clang_analyze_cc1 -analyzer-inline-max-stack-depth 2 -analyzer-config ipa-always-inline-size=2 -analyzer-checker=core,cplusplus.NewDeleteLeaks -DLEAKS -std=c++11 -verify %s
|
|
// RUN: %clang_analyze_cc1 -analyzer-inline-max-stack-depth 2 -analyzer-config ipa-always-inline-size=2 -analyzer-checker=core,cplusplus.NewDelete -std=c++11 -DTEST_INLINABLE_ALLOCATORS -verify %s
|
|
// RUN: %clang_analyze_cc1 -analyzer-inline-max-stack-depth 2 -analyzer-config ipa-always-inline-size=2 -analyzer-checker=core,cplusplus.NewDeleteLeaks -DLEAKS -std=c++11 -DTEST_INLINABLE_ALLOCATORS -verify %s
|
|
|
|
// expected-no-diagnostics
|
|
|
|
#include "Inputs/system-header-simulator-cxx.h"
|
|
|
|
typedef enum memory_order {
|
|
memory_order_relaxed = __ATOMIC_RELAXED,
|
|
memory_order_consume = __ATOMIC_CONSUME,
|
|
memory_order_acquire = __ATOMIC_ACQUIRE,
|
|
memory_order_release = __ATOMIC_RELEASE,
|
|
memory_order_acq_rel = __ATOMIC_ACQ_REL,
|
|
memory_order_seq_cst = __ATOMIC_SEQ_CST
|
|
} memory_order;
|
|
|
|
class Obj {
|
|
int RefCnt;
|
|
|
|
public:
|
|
int incRef() {
|
|
return __c11_atomic_fetch_add((volatile _Atomic(int) *)&RefCnt, 1,
|
|
memory_order_relaxed);
|
|
}
|
|
|
|
int decRef() {
|
|
return __c11_atomic_fetch_sub((volatile _Atomic(int) *)&RefCnt, 1,
|
|
memory_order_relaxed);
|
|
}
|
|
|
|
void foo();
|
|
};
|
|
|
|
class IntrusivePtr {
|
|
Obj *Ptr;
|
|
|
|
public:
|
|
IntrusivePtr(Obj *Ptr) : Ptr(Ptr) {
|
|
Ptr->incRef();
|
|
}
|
|
|
|
IntrusivePtr(const IntrusivePtr &Other) : Ptr(Other.Ptr) {
|
|
Ptr->incRef();
|
|
}
|
|
|
|
~IntrusivePtr() {
|
|
// We should not take the path on which the object is deleted.
|
|
if (Ptr->decRef() == 1)
|
|
delete Ptr;
|
|
}
|
|
|
|
Obj *getPtr() const { return Ptr; } // no-warning
|
|
};
|
|
|
|
void testDestroyLocalRefPtr() {
|
|
IntrusivePtr p1(new Obj());
|
|
{
|
|
IntrusivePtr p2(p1);
|
|
}
|
|
|
|
// p1 still maintains ownership. The object is not deleted.
|
|
p1.getPtr()->foo(); // no-warning
|
|
}
|
|
|
|
void testDestroySymbolicRefPtr(const IntrusivePtr &p1) {
|
|
{
|
|
IntrusivePtr p2(p1);
|
|
}
|
|
|
|
// p1 still maintains ownership. The object is not deleted.
|
|
p1.getPtr()->foo(); // no-warning
|
|
}
|