tsan: Rename lib/tsan to lib/libtsan.
For consistency with other vendored C/C++ libraries.
This commit is contained in:
305
lib/libtsan/builtins/assembly.h
Normal file
305
lib/libtsan/builtins/assembly.h
Normal file
@@ -0,0 +1,305 @@
|
||||
//===-- assembly.h - compiler-rt assembler support macros -----------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines macros for use in compiler-rt assembler source.
|
||||
// This file is not part of the interface of this library.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef COMPILERRT_ASSEMBLY_H
|
||||
#define COMPILERRT_ASSEMBLY_H
|
||||
|
||||
#if defined(__linux__) && defined(__CET__)
|
||||
#if __has_include(<cet.h>)
|
||||
#include <cet.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__) && defined(__aarch64__)
|
||||
#define SEPARATOR %%
|
||||
#else
|
||||
#define SEPARATOR ;
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__)
|
||||
#define HIDDEN(name) .private_extern name
|
||||
#define LOCAL_LABEL(name) L_##name
|
||||
// tell linker it can break up file at label boundaries
|
||||
#define FILE_LEVEL_DIRECTIVE .subsections_via_symbols
|
||||
#define SYMBOL_IS_FUNC(name)
|
||||
#define CONST_SECTION .const
|
||||
|
||||
#define NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
#elif defined(__ELF__)
|
||||
|
||||
#define HIDDEN(name) .hidden name
|
||||
#define LOCAL_LABEL(name) .L_##name
|
||||
#define FILE_LEVEL_DIRECTIVE
|
||||
#if defined(__arm__) || defined(__aarch64__)
|
||||
#define SYMBOL_IS_FUNC(name) .type name,%function
|
||||
#else
|
||||
#define SYMBOL_IS_FUNC(name) .type name,@function
|
||||
#endif
|
||||
#define CONST_SECTION .section .rodata
|
||||
|
||||
#if defined(__GNU__) || defined(__FreeBSD__) || defined(__Fuchsia__) || \
|
||||
defined(__linux__)
|
||||
#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits
|
||||
#else
|
||||
#define NO_EXEC_STACK_DIRECTIVE
|
||||
#endif
|
||||
|
||||
#else // !__APPLE__ && !__ELF__
|
||||
|
||||
#define HIDDEN(name)
|
||||
#define LOCAL_LABEL(name) .L ## name
|
||||
#define FILE_LEVEL_DIRECTIVE
|
||||
#define SYMBOL_IS_FUNC(name) \
|
||||
.def name SEPARATOR \
|
||||
.scl 2 SEPARATOR \
|
||||
.type 32 SEPARATOR \
|
||||
.endef
|
||||
#define CONST_SECTION .section .rdata,"rd"
|
||||
|
||||
#define NO_EXEC_STACK_DIRECTIVE
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(__arm__) || defined(__aarch64__)
|
||||
#define FUNC_ALIGN \
|
||||
.text SEPARATOR \
|
||||
.balign 16 SEPARATOR
|
||||
#else
|
||||
#define FUNC_ALIGN
|
||||
#endif
|
||||
|
||||
// BTI and PAC gnu property note
|
||||
#define NT_GNU_PROPERTY_TYPE_0 5
|
||||
#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000
|
||||
#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI 1
|
||||
#define GNU_PROPERTY_AARCH64_FEATURE_1_PAC 2
|
||||
|
||||
#if defined(__ARM_FEATURE_BTI_DEFAULT)
|
||||
#define BTI_FLAG GNU_PROPERTY_AARCH64_FEATURE_1_BTI
|
||||
#else
|
||||
#define BTI_FLAG 0
|
||||
#endif
|
||||
|
||||
#if __ARM_FEATURE_PAC_DEFAULT & 3
|
||||
#define PAC_FLAG GNU_PROPERTY_AARCH64_FEATURE_1_PAC
|
||||
#else
|
||||
#define PAC_FLAG 0
|
||||
#endif
|
||||
|
||||
#define GNU_PROPERTY(type, value) \
|
||||
.pushsection .note.gnu.property, "a" SEPARATOR \
|
||||
.p2align 3 SEPARATOR \
|
||||
.word 4 SEPARATOR \
|
||||
.word 16 SEPARATOR \
|
||||
.word NT_GNU_PROPERTY_TYPE_0 SEPARATOR \
|
||||
.asciz "GNU" SEPARATOR \
|
||||
.word type SEPARATOR \
|
||||
.word 4 SEPARATOR \
|
||||
.word value SEPARATOR \
|
||||
.word 0 SEPARATOR \
|
||||
.popsection
|
||||
|
||||
#if BTI_FLAG != 0
|
||||
#define BTI_C hint #34
|
||||
#define BTI_J hint #36
|
||||
#else
|
||||
#define BTI_C
|
||||
#define BTI_J
|
||||
#endif
|
||||
|
||||
#if (BTI_FLAG | PAC_FLAG) != 0
|
||||
#define GNU_PROPERTY_BTI_PAC \
|
||||
GNU_PROPERTY(GNU_PROPERTY_AARCH64_FEATURE_1_AND, BTI_FLAG | PAC_FLAG)
|
||||
#else
|
||||
#define GNU_PROPERTY_BTI_PAC
|
||||
#endif
|
||||
|
||||
#if defined(__clang__) || defined(__GCC_HAVE_DWARF2_CFI_ASM)
|
||||
#define CFI_START .cfi_startproc
|
||||
#define CFI_END .cfi_endproc
|
||||
#else
|
||||
#define CFI_START
|
||||
#define CFI_END
|
||||
#endif
|
||||
|
||||
#if defined(__arm__)
|
||||
|
||||
// Determine actual [ARM][THUMB[1][2]] ISA using compiler predefined macros:
|
||||
// - for '-mthumb -march=armv6' compiler defines '__thumb__'
|
||||
// - for '-mthumb -march=armv7' compiler defines '__thumb__' and '__thumb2__'
|
||||
#if defined(__thumb2__) || defined(__thumb__)
|
||||
#define DEFINE_CODE_STATE .thumb SEPARATOR
|
||||
#define DECLARE_FUNC_ENCODING .thumb_func SEPARATOR
|
||||
#if defined(__thumb2__)
|
||||
#define USE_THUMB_2
|
||||
#define IT(cond) it cond
|
||||
#define ITT(cond) itt cond
|
||||
#define ITE(cond) ite cond
|
||||
#else
|
||||
#define USE_THUMB_1
|
||||
#define IT(cond)
|
||||
#define ITT(cond)
|
||||
#define ITE(cond)
|
||||
#endif // defined(__thumb__2)
|
||||
#else // !defined(__thumb2__) && !defined(__thumb__)
|
||||
#define DEFINE_CODE_STATE .arm SEPARATOR
|
||||
#define DECLARE_FUNC_ENCODING
|
||||
#define IT(cond)
|
||||
#define ITT(cond)
|
||||
#define ITE(cond)
|
||||
#endif
|
||||
|
||||
#if defined(USE_THUMB_1) && defined(USE_THUMB_2)
|
||||
#error "USE_THUMB_1 and USE_THUMB_2 can't be defined together."
|
||||
#endif
|
||||
|
||||
#if defined(__ARM_ARCH_4T__) || __ARM_ARCH >= 5
|
||||
#define ARM_HAS_BX
|
||||
#endif
|
||||
#if !defined(__ARM_FEATURE_CLZ) && !defined(USE_THUMB_1) && \
|
||||
(__ARM_ARCH >= 6 || (__ARM_ARCH == 5 && !defined(__ARM_ARCH_5__)))
|
||||
#define __ARM_FEATURE_CLZ
|
||||
#endif
|
||||
|
||||
#ifdef ARM_HAS_BX
|
||||
#define JMP(r) bx r
|
||||
#define JMPc(r, c) bx##c r
|
||||
#else
|
||||
#define JMP(r) mov pc, r
|
||||
#define JMPc(r, c) mov##c pc, r
|
||||
#endif
|
||||
|
||||
// pop {pc} can't switch Thumb mode on ARMv4T
|
||||
#if __ARM_ARCH >= 5
|
||||
#define POP_PC() pop {pc}
|
||||
#else
|
||||
#define POP_PC() \
|
||||
pop {ip}; \
|
||||
JMP(ip)
|
||||
#endif
|
||||
|
||||
#if defined(USE_THUMB_2)
|
||||
#define WIDE(op) op.w
|
||||
#else
|
||||
#define WIDE(op) op
|
||||
#endif
|
||||
#else // !defined(__arm)
|
||||
#define DECLARE_FUNC_ENCODING
|
||||
#define DEFINE_CODE_STATE
|
||||
#endif
|
||||
|
||||
#define GLUE2_(a, b) a##b
|
||||
#define GLUE(a, b) GLUE2_(a, b)
|
||||
#define GLUE2(a, b) GLUE2_(a, b)
|
||||
#define GLUE3_(a, b, c) a##b##c
|
||||
#define GLUE3(a, b, c) GLUE3_(a, b, c)
|
||||
#define GLUE4_(a, b, c, d) a##b##c##d
|
||||
#define GLUE4(a, b, c, d) GLUE4_(a, b, c, d)
|
||||
|
||||
#define SYMBOL_NAME(name) GLUE(__USER_LABEL_PREFIX__, name)
|
||||
|
||||
#ifdef VISIBILITY_HIDDEN
|
||||
#define DECLARE_SYMBOL_VISIBILITY(name) \
|
||||
HIDDEN(SYMBOL_NAME(name)) SEPARATOR
|
||||
#define DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name) \
|
||||
HIDDEN(name) SEPARATOR
|
||||
#else
|
||||
#define DECLARE_SYMBOL_VISIBILITY(name)
|
||||
#define DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name)
|
||||
#endif
|
||||
|
||||
#define DEFINE_COMPILERRT_FUNCTION(name) \
|
||||
DEFINE_CODE_STATE \
|
||||
FILE_LEVEL_DIRECTIVE SEPARATOR \
|
||||
.globl SYMBOL_NAME(name) SEPARATOR \
|
||||
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
|
||||
DECLARE_SYMBOL_VISIBILITY(name) \
|
||||
DECLARE_FUNC_ENCODING \
|
||||
SYMBOL_NAME(name):
|
||||
|
||||
#define DEFINE_COMPILERRT_THUMB_FUNCTION(name) \
|
||||
DEFINE_CODE_STATE \
|
||||
FILE_LEVEL_DIRECTIVE SEPARATOR \
|
||||
.globl SYMBOL_NAME(name) SEPARATOR \
|
||||
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
|
||||
DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR \
|
||||
.thumb_func SEPARATOR \
|
||||
SYMBOL_NAME(name):
|
||||
|
||||
#define DEFINE_COMPILERRT_PRIVATE_FUNCTION(name) \
|
||||
DEFINE_CODE_STATE \
|
||||
FILE_LEVEL_DIRECTIVE SEPARATOR \
|
||||
.globl SYMBOL_NAME(name) SEPARATOR \
|
||||
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
|
||||
HIDDEN(SYMBOL_NAME(name)) SEPARATOR \
|
||||
DECLARE_FUNC_ENCODING \
|
||||
SYMBOL_NAME(name):
|
||||
|
||||
#define DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(name) \
|
||||
DEFINE_CODE_STATE \
|
||||
.globl name SEPARATOR \
|
||||
SYMBOL_IS_FUNC(name) SEPARATOR \
|
||||
HIDDEN(name) SEPARATOR \
|
||||
DECLARE_FUNC_ENCODING \
|
||||
name:
|
||||
|
||||
#define DEFINE_COMPILERRT_OUTLINE_FUNCTION_UNMANGLED(name) \
|
||||
DEFINE_CODE_STATE \
|
||||
FUNC_ALIGN \
|
||||
.globl name SEPARATOR \
|
||||
SYMBOL_IS_FUNC(name) SEPARATOR \
|
||||
DECLARE_SYMBOL_VISIBILITY_UNMANGLED(name) SEPARATOR \
|
||||
DECLARE_FUNC_ENCODING \
|
||||
name: \
|
||||
SEPARATOR CFI_START \
|
||||
SEPARATOR BTI_C
|
||||
|
||||
#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
|
||||
.globl SYMBOL_NAME(name) SEPARATOR \
|
||||
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
|
||||
DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR \
|
||||
.set SYMBOL_NAME(name), SYMBOL_NAME(target) SEPARATOR
|
||||
|
||||
#if defined(__ARM_EABI__)
|
||||
#define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name) \
|
||||
DEFINE_COMPILERRT_FUNCTION_ALIAS(aeabi_name, name)
|
||||
#else
|
||||
#define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name)
|
||||
#endif
|
||||
|
||||
#ifdef __ELF__
|
||||
#define END_COMPILERRT_FUNCTION(name) \
|
||||
.size SYMBOL_NAME(name), . - SYMBOL_NAME(name)
|
||||
#define END_COMPILERRT_OUTLINE_FUNCTION(name) \
|
||||
CFI_END SEPARATOR \
|
||||
.size SYMBOL_NAME(name), . - SYMBOL_NAME(name)
|
||||
#else
|
||||
#define END_COMPILERRT_FUNCTION(name)
|
||||
#define END_COMPILERRT_OUTLINE_FUNCTION(name) \
|
||||
CFI_END
|
||||
#endif
|
||||
|
||||
#ifdef __arm__
|
||||
#include "int_endianness.h"
|
||||
|
||||
#if _YUGA_BIG_ENDIAN
|
||||
#define VMOV_TO_DOUBLE(dst, src0, src1) vmov dst, src1, src0 SEPARATOR
|
||||
#define VMOV_FROM_DOUBLE(dst0, dst1, src) vmov dst1, dst0, src SEPARATOR
|
||||
#else
|
||||
#define VMOV_TO_DOUBLE(dst, src0, src1) vmov dst, src0, src1 SEPARATOR
|
||||
#define VMOV_FROM_DOUBLE(dst0, dst1, src) vmov dst0, dst1, src SEPARATOR
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif // COMPILERRT_ASSEMBLY_H
|
||||
391
lib/libtsan/interception/interception.h
Normal file
391
lib/libtsan/interception/interception.h
Normal file
@@ -0,0 +1,391 @@
|
||||
//===-- interception.h ------------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// Machinery for providing replacements/wrappers for system functions.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef INTERCEPTION_H
|
||||
#define INTERCEPTION_H
|
||||
|
||||
#include "sanitizer_common/sanitizer_asm.h"
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
|
||||
#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_APPLE && \
|
||||
!SANITIZER_NETBSD && !SANITIZER_WINDOWS && !SANITIZER_FUCHSIA && \
|
||||
!SANITIZER_SOLARIS
|
||||
# error "Interception doesn't work on this operating system."
|
||||
#endif
|
||||
|
||||
// These typedefs should be used only in the interceptor definitions to replace
|
||||
// the standard system types (e.g. SSIZE_T instead of ssize_t)
|
||||
// On Windows the system headers (basetsd.h) provide a conflicting definition
|
||||
// of SIZE_T/SSIZE_T that do not match the real size_t/ssize_t for 32-bit
|
||||
// systems (using long instead of the expected int). Work around the typedef
|
||||
// redefinition by #defining SIZE_T instead of using a typedef.
|
||||
// TODO: We should be using __sanitizer::usize (and a new ssize) instead of
|
||||
// these new macros as long as we ensure they match the real system definitions.
|
||||
#if SANITIZER_WINDOWS
|
||||
// Ensure that (S)SIZE_T were already defined as we are about to override them.
|
||||
# include <basetsd.h>
|
||||
#endif
|
||||
|
||||
#define SIZE_T __sanitizer::usize
|
||||
#define SSIZE_T __sanitizer::ssize
|
||||
typedef __sanitizer::sptr PTRDIFF_T;
|
||||
typedef __sanitizer::s64 INTMAX_T;
|
||||
typedef __sanitizer::u64 UINTMAX_T;
|
||||
typedef __sanitizer::OFF_T OFF_T;
|
||||
typedef __sanitizer::OFF64_T OFF64_T;
|
||||
|
||||
// How to add an interceptor:
|
||||
// Suppose you need to wrap/replace system function (generally, from libc):
|
||||
// int foo(const char *bar, double baz);
|
||||
// You'll need to:
|
||||
// 1) define INTERCEPTOR(int, foo, const char *bar, double baz) { ... } in
|
||||
// your source file. See the notes below for cases when
|
||||
// INTERCEPTOR_WITH_SUFFIX(...) should be used instead.
|
||||
// 2) Call "INTERCEPT_FUNCTION(foo)" prior to the first call of "foo".
|
||||
// INTERCEPT_FUNCTION(foo) evaluates to "true" iff the function was
|
||||
// intercepted successfully.
|
||||
// You can access original function by calling REAL(foo)(bar, baz).
|
||||
// By default, REAL(foo) will be visible only inside your interceptor, and if
|
||||
// you want to use it in other parts of RTL, you'll need to:
|
||||
// 3a) add DECLARE_REAL(int, foo, const char*, double) to a
|
||||
// header file.
|
||||
// However, if the call "INTERCEPT_FUNCTION(foo)" and definition for
|
||||
// INTERCEPTOR(..., foo, ...) are in different files, you'll instead need to:
|
||||
// 3b) add DECLARE_REAL_AND_INTERCEPTOR(int, foo, const char*, double)
|
||||
// to a header file.
|
||||
|
||||
// Notes: 1. Things may not work properly if macro INTERCEPTOR(...) {...} or
|
||||
// DECLARE_REAL(...) are located inside namespaces.
|
||||
// 2. On Mac you can also use: "OVERRIDE_FUNCTION(foo, zoo)" to
|
||||
// effectively redirect calls from "foo" to "zoo". In this case
|
||||
// you aren't required to implement
|
||||
// INTERCEPTOR(int, foo, const char *bar, double baz) {...}
|
||||
// but instead you'll have to add
|
||||
// DECLARE_REAL(int, foo, const char *bar, double baz) in your
|
||||
// source file (to define a pointer to overriden function).
|
||||
// 3. Some Mac functions have symbol variants discriminated by
|
||||
// additional suffixes, e.g. _$UNIX2003 (see
|
||||
// https://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/index.html
|
||||
// for more details). To intercept such functions you need to use the
|
||||
// INTERCEPTOR_WITH_SUFFIX(...) macro.
|
||||
|
||||
// How it works on Linux
|
||||
// ---------------------
|
||||
//
|
||||
// To replace system functions on Linux we just need to declare functions with
|
||||
// the same names in our library and then obtain the real function pointers
|
||||
// using dlsym().
|
||||
//
|
||||
// There is one complication: a user may also intercept some of the functions we
|
||||
// intercept. To allow for up to 3 interceptors (including ours) of a given
|
||||
// function "func", the interceptor implementation is in ___interceptor_func,
|
||||
// which is aliased by a weak function __interceptor_func, which in turn is
|
||||
// aliased (via a trampoline) by weak wrapper function "func".
|
||||
//
|
||||
// Most user interceptors should define a foreign interceptor as follows:
|
||||
//
|
||||
// - provide a non-weak function "func" that performs interception;
|
||||
// - if __interceptor_func exists, call it to perform the real functionality;
|
||||
// - if it does not exist, figure out the real function and call it instead.
|
||||
//
|
||||
// In rare cases, a foreign interceptor (of another dynamic analysis runtime)
|
||||
// may be defined as follows (on supported architectures):
|
||||
//
|
||||
// - provide a non-weak function __interceptor_func that performs interception;
|
||||
// - if ___interceptor_func exists, call it to perform the real functionality;
|
||||
// - if it does not exist, figure out the real function and call it instead;
|
||||
// - provide a weak function "func" that is an alias to __interceptor_func.
|
||||
//
|
||||
// With this protocol, sanitizer interceptors, foreign user interceptors, and
|
||||
// foreign interceptors of other dynamic analysis runtimes, or any combination
|
||||
// thereof, may co-exist simultaneously.
|
||||
//
|
||||
// How it works on Mac OS
|
||||
// ----------------------
|
||||
//
|
||||
// This is not so on Mac OS, where the two-level namespace makes our replacement
|
||||
// functions invisible to other libraries. This may be overcomed using the
|
||||
// DYLD_FORCE_FLAT_NAMESPACE, but some errors loading the shared libraries in
|
||||
// Chromium were noticed when doing so.
|
||||
//
|
||||
// Instead we create a dylib containing a __DATA,__interpose section that
|
||||
// associates library functions with their wrappers. When this dylib is
|
||||
// preloaded before an executable using DYLD_INSERT_LIBRARIES, it routes all the
|
||||
// calls to interposed functions done through stubs to the wrapper functions.
|
||||
//
|
||||
// As it's decided at compile time which functions are to be intercepted on Mac,
|
||||
// INTERCEPT_FUNCTION() is effectively a no-op on this system.
|
||||
|
||||
#if SANITIZER_APPLE
|
||||
#include <sys/cdefs.h> // For __DARWIN_ALIAS_C().
|
||||
|
||||
// Just a pair of pointers.
|
||||
struct interpose_substitution {
|
||||
const __sanitizer::uptr replacement;
|
||||
const __sanitizer::uptr original;
|
||||
};
|
||||
|
||||
// For a function foo() create a global pair of pointers { wrap_foo, foo } in
|
||||
// the __DATA,__interpose section.
|
||||
// As a result all the calls to foo() will be routed to wrap_foo() at runtime.
|
||||
#define INTERPOSER(func_name) __attribute__((used)) \
|
||||
const interpose_substitution substitution_##func_name[] \
|
||||
__attribute__((section("__DATA, __interpose"))) = { \
|
||||
{ reinterpret_cast<const uptr>(WRAP(func_name)), \
|
||||
reinterpret_cast<const uptr>(func_name) } \
|
||||
}
|
||||
|
||||
// For a function foo() and a wrapper function bar() create a global pair
|
||||
// of pointers { bar, foo } in the __DATA,__interpose section.
|
||||
// As a result all the calls to foo() will be routed to bar() at runtime.
|
||||
#define INTERPOSER_2(func_name, wrapper_name) __attribute__((used)) \
|
||||
const interpose_substitution substitution_##func_name[] \
|
||||
__attribute__((section("__DATA, __interpose"))) = { \
|
||||
{ reinterpret_cast<const uptr>(wrapper_name), \
|
||||
reinterpret_cast<const uptr>(func_name) } \
|
||||
}
|
||||
|
||||
# define WRAP(x) wrap_##x
|
||||
# define TRAMPOLINE(x) WRAP(x)
|
||||
# define INTERCEPTOR_ATTRIBUTE
|
||||
# define DECLARE_WRAPPER(ret_type, func, ...)
|
||||
|
||||
#elif SANITIZER_WINDOWS
|
||||
# define WRAP(x) __asan_wrap_##x
|
||||
# define TRAMPOLINE(x) WRAP(x)
|
||||
# define INTERCEPTOR_ATTRIBUTE __declspec(dllexport)
|
||||
# define DECLARE_WRAPPER(ret_type, func, ...) \
|
||||
extern "C" ret_type func(__VA_ARGS__);
|
||||
# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
|
||||
extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
|
||||
#elif !SANITIZER_FUCHSIA // LINUX, FREEBSD, NETBSD, SOLARIS
|
||||
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
|
||||
# if ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT
|
||||
// Weak aliases of weak aliases do not work, therefore we need to set up a
|
||||
// trampoline function. The function "func" is a weak alias to the trampoline
|
||||
// (so that we may check if "func" was overridden), which calls the weak
|
||||
// function __interceptor_func, which in turn aliases the actual interceptor
|
||||
// implementation ___interceptor_func:
|
||||
//
|
||||
// [wrapper "func": weak] --(alias)--> [TRAMPOLINE(func)]
|
||||
// |
|
||||
// +--------(tail call)-------+
|
||||
// |
|
||||
// v
|
||||
// [__interceptor_func: weak] --(alias)--> [WRAP(func)]
|
||||
//
|
||||
// We use inline assembly to define most of this, because not all compilers
|
||||
// support functions with the "naked" attribute with every architecture.
|
||||
# define WRAP(x) ___interceptor_ ## x
|
||||
# define TRAMPOLINE(x) __interceptor_trampoline_ ## x
|
||||
# if SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||
// FreeBSD's dynamic linker (incompliantly) gives non-weak symbols higher
|
||||
// priority than weak ones so weak aliases won't work for indirect calls
|
||||
// in position-independent (-fPIC / -fPIE) mode.
|
||||
# define __ASM_WEAK_WRAPPER(func) ".globl " #func "\n"
|
||||
# else
|
||||
# define __ASM_WEAK_WRAPPER(func) ".weak " #func "\n"
|
||||
# endif // SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||
# if defined(__arm__) || defined(__aarch64__)
|
||||
# define ASM_TYPE_FUNCTION_STR "%function"
|
||||
# else
|
||||
# define ASM_TYPE_FUNCTION_STR "@function"
|
||||
# endif
|
||||
// Keep trampoline implementation in sync with sanitizer_common/sanitizer_asm.h
|
||||
# define DECLARE_WRAPPER(ret_type, func, ...) \
|
||||
extern "C" ret_type func(__VA_ARGS__); \
|
||||
extern "C" ret_type TRAMPOLINE(func)(__VA_ARGS__); \
|
||||
extern "C" ret_type __interceptor_##func(__VA_ARGS__) \
|
||||
INTERCEPTOR_ATTRIBUTE __attribute__((weak)) ALIAS(WRAP(func)); \
|
||||
asm( \
|
||||
".text\n" \
|
||||
__ASM_WEAK_WRAPPER(func) \
|
||||
".set " #func ", " SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
|
||||
".globl " SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
|
||||
".type " SANITIZER_STRINGIFY(TRAMPOLINE(func)) ", " \
|
||||
ASM_TYPE_FUNCTION_STR "\n" \
|
||||
SANITIZER_STRINGIFY(TRAMPOLINE(func)) ":\n" \
|
||||
C_ASM_STARTPROC "\n" \
|
||||
C_ASM_TAIL_CALL(SANITIZER_STRINGIFY(TRAMPOLINE(func)), \
|
||||
"__interceptor_" \
|
||||
SANITIZER_STRINGIFY(ASM_PREEMPTIBLE_SYM(func))) "\n" \
|
||||
C_ASM_ENDPROC "\n" \
|
||||
".size " SANITIZER_STRINGIFY(TRAMPOLINE(func)) ", " \
|
||||
".-" SANITIZER_STRINGIFY(TRAMPOLINE(func)) "\n" \
|
||||
);
|
||||
# else // ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT
|
||||
// Some architectures cannot implement efficient interceptor trampolines with
|
||||
// just a plain jump due to complexities of resolving a preemptible symbol. In
|
||||
// those cases, revert to just this scheme:
|
||||
//
|
||||
// [wrapper "func": weak] --(alias)--> [WRAP(func)]
|
||||
//
|
||||
# define WRAP(x) __interceptor_ ## x
|
||||
# define TRAMPOLINE(x) WRAP(x)
|
||||
# if SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||
# define __ATTRIBUTE_WEAK_WRAPPER
|
||||
# else
|
||||
# define __ATTRIBUTE_WEAK_WRAPPER __attribute__((weak))
|
||||
# endif // SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||
# define DECLARE_WRAPPER(ret_type, func, ...) \
|
||||
extern "C" ret_type func(__VA_ARGS__) \
|
||||
INTERCEPTOR_ATTRIBUTE __ATTRIBUTE_WEAK_WRAPPER ALIAS(WRAP(func));
|
||||
# endif // ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT
|
||||
#endif
|
||||
|
||||
#if SANITIZER_FUCHSIA
|
||||
// There is no general interception at all on Fuchsia.
|
||||
// Sanitizer runtimes just define functions directly to preempt them,
|
||||
// and have bespoke ways to access the underlying libc functions.
|
||||
# include <zircon/sanitizer.h>
|
||||
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
|
||||
# define REAL(x) __unsanitized_##x
|
||||
# define DECLARE_REAL(ret_type, func, ...)
|
||||
#elif !SANITIZER_APPLE
|
||||
# define PTR_TO_REAL(x) real_##x
|
||||
# define REAL(x) __interception::PTR_TO_REAL(x)
|
||||
# define FUNC_TYPE(x) x##_type
|
||||
|
||||
# define DECLARE_REAL(ret_type, func, ...) \
|
||||
typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
|
||||
namespace __interception { \
|
||||
extern FUNC_TYPE(func) PTR_TO_REAL(func); \
|
||||
}
|
||||
# define ASSIGN_REAL(dst, src) REAL(dst) = REAL(src)
|
||||
#else // SANITIZER_APPLE
|
||||
# define REAL(x) x
|
||||
# define DECLARE_REAL(ret_type, func, ...) \
|
||||
extern "C" ret_type func(__VA_ARGS__);
|
||||
# define ASSIGN_REAL(x, y)
|
||||
#endif // SANITIZER_APPLE
|
||||
|
||||
#if !SANITIZER_FUCHSIA
|
||||
# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
|
||||
DECLARE_REAL(ret_type, func, __VA_ARGS__) \
|
||||
extern "C" ret_type TRAMPOLINE(func)(__VA_ARGS__); \
|
||||
extern "C" ret_type WRAP(func)(__VA_ARGS__);
|
||||
// Declare an interceptor and its wrapper defined in a different translation
|
||||
// unit (ex. asm).
|
||||
# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...) \
|
||||
extern "C" ret_type TRAMPOLINE(func)(__VA_ARGS__); \
|
||||
extern "C" ret_type WRAP(func)(__VA_ARGS__); \
|
||||
extern "C" ret_type func(__VA_ARGS__);
|
||||
#else
|
||||
# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...)
|
||||
# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...)
|
||||
#endif
|
||||
|
||||
// Generally, you don't need to use DEFINE_REAL by itself, as INTERCEPTOR
|
||||
// macros does its job. In exceptional cases you may need to call REAL(foo)
|
||||
// without defining INTERCEPTOR(..., foo, ...). For example, if you override
|
||||
// foo with an interceptor for other function.
|
||||
#if !SANITIZER_APPLE && !SANITIZER_FUCHSIA
|
||||
# define DEFINE_REAL(ret_type, func, ...) \
|
||||
typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
|
||||
namespace __interception { \
|
||||
FUNC_TYPE(func) PTR_TO_REAL(func); \
|
||||
}
|
||||
#else
|
||||
# define DEFINE_REAL(ret_type, func, ...)
|
||||
#endif
|
||||
|
||||
#if SANITIZER_FUCHSIA
|
||||
|
||||
// We need to define the __interceptor_func name just to get
|
||||
// sanitizer_common/scripts/gen_dynamic_list.py to export func.
|
||||
// But we don't need to export __interceptor_func to get that.
|
||||
#define INTERCEPTOR(ret_type, func, ...) \
|
||||
extern "C"[[ gnu::alias(#func), gnu::visibility("hidden") ]] ret_type \
|
||||
__interceptor_##func(__VA_ARGS__); \
|
||||
extern "C" INTERCEPTOR_ATTRIBUTE ret_type func(__VA_ARGS__)
|
||||
|
||||
#elif !SANITIZER_APPLE
|
||||
|
||||
#define INTERCEPTOR(ret_type, func, ...) \
|
||||
DEFINE_REAL(ret_type, func, __VA_ARGS__) \
|
||||
DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \
|
||||
extern "C" INTERCEPTOR_ATTRIBUTE ret_type WRAP(func)(__VA_ARGS__)
|
||||
|
||||
// We don't need INTERCEPTOR_WITH_SUFFIX on non-Darwin for now.
|
||||
#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \
|
||||
INTERCEPTOR(ret_type, func, __VA_ARGS__)
|
||||
|
||||
#else // SANITIZER_APPLE
|
||||
|
||||
#define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \
|
||||
extern "C" ret_type func(__VA_ARGS__) suffix; \
|
||||
extern "C" ret_type WRAP(func)(__VA_ARGS__); \
|
||||
INTERPOSER(func); \
|
||||
extern "C" INTERCEPTOR_ATTRIBUTE ret_type WRAP(func)(__VA_ARGS__)
|
||||
|
||||
#define INTERCEPTOR(ret_type, func, ...) \
|
||||
INTERCEPTOR_ZZZ(/*no symbol variants*/, ret_type, func, __VA_ARGS__)
|
||||
|
||||
#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \
|
||||
INTERCEPTOR_ZZZ(__DARWIN_ALIAS_C(func), ret_type, func, __VA_ARGS__)
|
||||
|
||||
// Override |overridee| with |overrider|.
|
||||
#define OVERRIDE_FUNCTION(overridee, overrider) \
|
||||
INTERPOSER_2(overridee, WRAP(overrider))
|
||||
#endif
|
||||
|
||||
#if SANITIZER_WINDOWS
|
||||
# define INTERCEPTOR_WINAPI(ret_type, func, ...) \
|
||||
typedef ret_type (__stdcall *FUNC_TYPE(func))(__VA_ARGS__); \
|
||||
namespace __interception { \
|
||||
FUNC_TYPE(func) PTR_TO_REAL(func); \
|
||||
} \
|
||||
extern "C" INTERCEPTOR_ATTRIBUTE ret_type __stdcall WRAP(func)(__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
// ISO C++ forbids casting between pointer-to-function and pointer-to-object,
|
||||
// so we use casts via uintptr_t (the local __sanitizer::uptr equivalent).
|
||||
namespace __interception {
|
||||
|
||||
#if defined(__ELF__) && !SANITIZER_FUCHSIA
|
||||
// The use of interceptors makes many sanitizers unusable for static linking.
|
||||
// Define a function, if called, will cause a linker error (undefined _DYNAMIC).
|
||||
// However, -static-pie (which is not common) cannot be detected at link time.
|
||||
extern uptr kDynamic[] asm("_DYNAMIC");
|
||||
inline void DoesNotSupportStaticLinking() {
|
||||
[[maybe_unused]] volatile auto x = &kDynamic;
|
||||
}
|
||||
#else
|
||||
inline void DoesNotSupportStaticLinking() {}
|
||||
#endif
|
||||
} // namespace __interception
|
||||
|
||||
#define INCLUDED_FROM_INTERCEPTION_LIB
|
||||
|
||||
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
|
||||
SANITIZER_SOLARIS
|
||||
|
||||
# include "interception_linux.h"
|
||||
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
|
||||
# define INTERCEPT_FUNCTION_VER(func, symver) \
|
||||
INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver)
|
||||
#elif SANITIZER_APPLE
|
||||
# include "interception_mac.h"
|
||||
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)
|
||||
# define INTERCEPT_FUNCTION_VER(func, symver) \
|
||||
INTERCEPT_FUNCTION_VER_MAC(func, symver)
|
||||
#elif SANITIZER_WINDOWS
|
||||
# include "interception_win.h"
|
||||
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_WIN(func)
|
||||
# define INTERCEPT_FUNCTION_VER(func, symver) \
|
||||
INTERCEPT_FUNCTION_VER_WIN(func, symver)
|
||||
#endif
|
||||
|
||||
#undef INCLUDED_FROM_INTERCEPTION_LIB
|
||||
|
||||
#endif // INTERCEPTION_H
|
||||
83
lib/libtsan/interception/interception_linux.cpp
Normal file
83
lib/libtsan/interception/interception_linux.cpp
Normal file
@@ -0,0 +1,83 @@
|
||||
//===-- interception_linux.cpp ----------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// Linux-specific interception methods.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "interception.h"
|
||||
|
||||
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
|
||||
SANITIZER_SOLARIS
|
||||
|
||||
#include <dlfcn.h> // for dlsym() and dlvsym()
|
||||
|
||||
namespace __interception {
|
||||
|
||||
#if SANITIZER_NETBSD
|
||||
static int StrCmp(const char *s1, const char *s2) {
|
||||
while (true) {
|
||||
if (*s1 != *s2)
|
||||
return false;
|
||||
if (*s1 == 0)
|
||||
return true;
|
||||
s1++;
|
||||
s2++;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void *GetFuncAddr(const char *name, uptr trampoline) {
|
||||
#if SANITIZER_NETBSD
|
||||
// FIXME: Find a better way to handle renames
|
||||
if (StrCmp(name, "sigaction"))
|
||||
name = "__sigaction14";
|
||||
#endif
|
||||
void *addr = dlsym(RTLD_NEXT, name);
|
||||
if (!addr) {
|
||||
// If the lookup using RTLD_NEXT failed, the sanitizer runtime library is
|
||||
// later in the library search order than the DSO that we are trying to
|
||||
// intercept, which means that we cannot intercept this function. We still
|
||||
// want the address of the real definition, though, so look it up using
|
||||
// RTLD_DEFAULT.
|
||||
addr = dlsym(RTLD_DEFAULT, name);
|
||||
|
||||
// In case `name' is not loaded, dlsym ends up finding the actual wrapper.
|
||||
// We don't want to intercept the wrapper and have it point to itself.
|
||||
if ((uptr)addr == trampoline)
|
||||
addr = nullptr;
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
|
||||
bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,
|
||||
uptr trampoline) {
|
||||
void *addr = GetFuncAddr(name, trampoline);
|
||||
*ptr_to_real = (uptr)addr;
|
||||
return addr && (func == trampoline);
|
||||
}
|
||||
|
||||
// dlvsym is a GNU extension supported by some other platforms.
|
||||
#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||
static void *GetFuncAddr(const char *name, const char *ver) {
|
||||
return dlvsym(RTLD_NEXT, name, ver);
|
||||
}
|
||||
|
||||
bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
|
||||
uptr func, uptr trampoline) {
|
||||
void *addr = GetFuncAddr(name, ver);
|
||||
*ptr_to_real = (uptr)addr;
|
||||
return addr && (func == trampoline);
|
||||
}
|
||||
# endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||
|
||||
} // namespace __interception
|
||||
|
||||
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
|
||||
// SANITIZER_SOLARIS
|
||||
55
lib/libtsan/interception/interception_linux.h
Normal file
55
lib/libtsan/interception/interception_linux.h
Normal file
@@ -0,0 +1,55 @@
|
||||
//===-- interception_linux.h ------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// Linux-specific interception methods.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
|
||||
SANITIZER_SOLARIS
|
||||
|
||||
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
|
||||
# error interception_linux.h should be included from interception library only
|
||||
#endif
|
||||
|
||||
#ifndef INTERCEPTION_LINUX_H
|
||||
#define INTERCEPTION_LINUX_H
|
||||
|
||||
namespace __interception {
|
||||
bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,
|
||||
uptr trampoline);
|
||||
bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
|
||||
uptr func, uptr trampoline);
|
||||
} // namespace __interception
|
||||
|
||||
// Cast func to type of REAL(func) before casting to uptr in case it is an
|
||||
// overloaded function, which is the case for some glibc functions when
|
||||
// _FORTIFY_SOURCE is used. This disambiguates which overload to use.
|
||||
#define INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) \
|
||||
::__interception::InterceptFunction( \
|
||||
#func, (::__interception::uptr *)&REAL(func), \
|
||||
(::__interception::uptr)(decltype(REAL(func)))&(func), \
|
||||
(::__interception::uptr) &TRAMPOLINE(func))
|
||||
|
||||
// dlvsym is a GNU extension supported by some other platforms.
|
||||
#if SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
|
||||
::__interception::InterceptFunction( \
|
||||
#func, symver, \
|
||||
(::__interception::uptr *)&REAL(func), \
|
||||
(::__interception::uptr)(decltype(REAL(func)))&(func), \
|
||||
(::__interception::uptr)&TRAMPOLINE(func))
|
||||
#else
|
||||
#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
|
||||
INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
|
||||
#endif // SANITIZER_GLIBC || SANITIZER_FREEBSD || SANITIZER_NETBSD
|
||||
|
||||
#endif // INTERCEPTION_LINUX_H
|
||||
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
|
||||
// SANITIZER_SOLARIS
|
||||
18
lib/libtsan/interception/interception_mac.cpp
Normal file
18
lib/libtsan/interception/interception_mac.cpp
Normal file
@@ -0,0 +1,18 @@
|
||||
//===-- interception_mac.cpp ------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// Mac-specific interception methods.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "interception.h"
|
||||
|
||||
#if SANITIZER_APPLE
|
||||
|
||||
#endif // SANITIZER_APPLE
|
||||
27
lib/libtsan/interception/interception_mac.h
Normal file
27
lib/libtsan/interception/interception_mac.h
Normal file
@@ -0,0 +1,27 @@
|
||||
//===-- interception_mac.h --------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// Mac-specific interception methods.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#if SANITIZER_APPLE
|
||||
|
||||
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
|
||||
# error "interception_mac.h should be included from interception.h only"
|
||||
#endif
|
||||
|
||||
#ifndef INTERCEPTION_MAC_H
|
||||
#define INTERCEPTION_MAC_H
|
||||
|
||||
#define INTERCEPT_FUNCTION_MAC(func)
|
||||
#define INTERCEPT_FUNCTION_VER_MAC(func, symver)
|
||||
|
||||
#endif // INTERCEPTION_MAC_H
|
||||
#endif // SANITIZER_APPLE
|
||||
46
lib/libtsan/interception/interception_type_test.cpp
Normal file
46
lib/libtsan/interception/interception_type_test.cpp
Normal file
@@ -0,0 +1,46 @@
|
||||
//===-- interception_type_test.cpp ------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// Compile-time tests of the internal type definitions.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "interception.h"
|
||||
#include "sanitizer_common/sanitizer_type_traits.h"
|
||||
|
||||
#if __has_include(<sys/types.h>)
|
||||
# include <sys/types.h>
|
||||
#endif
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
COMPILER_CHECK((__sanitizer::is_same<__sanitizer::uptr, ::uintptr_t>::value));
|
||||
COMPILER_CHECK((__sanitizer::is_same<__sanitizer::sptr, ::intptr_t>::value));
|
||||
COMPILER_CHECK((__sanitizer::is_same<__sanitizer::usize, ::size_t>::value));
|
||||
COMPILER_CHECK((__sanitizer::is_same<::PTRDIFF_T, ::ptrdiff_t>::value));
|
||||
COMPILER_CHECK((__sanitizer::is_same<::SIZE_T, ::size_t>::value));
|
||||
#if !SANITIZER_WINDOWS
|
||||
// No ssize_t on Windows.
|
||||
COMPILER_CHECK((__sanitizer::is_same<::SSIZE_T, ::ssize_t>::value));
|
||||
#endif
|
||||
// TODO: These are not actually the same type on Linux (long vs long long)
|
||||
COMPILER_CHECK(sizeof(::INTMAX_T) == sizeof(intmax_t));
|
||||
COMPILER_CHECK(sizeof(::UINTMAX_T) == sizeof(uintmax_t));
|
||||
|
||||
#if SANITIZER_GLIBC || SANITIZER_ANDROID
|
||||
COMPILER_CHECK(sizeof(::OFF64_T) == sizeof(off64_t));
|
||||
#endif
|
||||
|
||||
// The following are the cases when pread (and friends) is used instead of
|
||||
// pread64. In those cases we need OFF_T to match off_t. We don't care about the
|
||||
// rest (they depend on _FILE_OFFSET_BITS setting when building an application).
|
||||
#if !SANITIZER_WINDOWS && (SANITIZER_ANDROID || !defined _FILE_OFFSET_BITS || \
|
||||
_FILE_OFFSET_BITS != 64)
|
||||
COMPILER_CHECK(sizeof(::OFF_T) == sizeof(off_t));
|
||||
#endif
|
||||
1443
lib/libtsan/interception/interception_win.cpp
Normal file
1443
lib/libtsan/interception/interception_win.cpp
Normal file
File diff suppressed because it is too large
Load Diff
91
lib/libtsan/interception/interception_win.h
Normal file
91
lib/libtsan/interception/interception_win.h
Normal file
@@ -0,0 +1,91 @@
|
||||
//===-- interception_linux.h ------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of AddressSanitizer, an address sanity checker.
|
||||
//
|
||||
// Windows-specific interception methods.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#if SANITIZER_WINDOWS
|
||||
|
||||
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
|
||||
# error "interception_win.h should be included from interception library only"
|
||||
#endif
|
||||
|
||||
#ifndef INTERCEPTION_WIN_H
|
||||
#define INTERCEPTION_WIN_H
|
||||
|
||||
namespace __interception {
|
||||
// All the functions in the OverrideFunction() family return true on success,
|
||||
// false on failure (including "couldn't find the function").
|
||||
|
||||
// Overrides a function by its address.
|
||||
bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func = 0);
|
||||
|
||||
// Overrides a function in a system DLL or DLL CRT by its exported name.
|
||||
bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func = 0);
|
||||
|
||||
// Windows-only replacement for GetProcAddress. Useful for some sanitizers.
|
||||
uptr InternalGetProcAddress(void *module, const char *func_name);
|
||||
|
||||
// Overrides a function only when it is called from a specific DLL. For example,
|
||||
// this is used to override calls to HeapAlloc/HeapFree from ucrtbase without
|
||||
// affecting other third party libraries.
|
||||
bool OverrideImportedFunction(const char *module_to_patch,
|
||||
const char *imported_module,
|
||||
const char *function_name, uptr new_function,
|
||||
uptr *orig_old_func);
|
||||
|
||||
// Sets a callback to be used for reporting errors by interception_win. The
|
||||
// callback will be called with printf-like arguments. Intended to be used with
|
||||
// __sanitizer::Report. Pass nullptr to disable error reporting (default).
|
||||
void SetErrorReportCallback(void (*callback)(const char *format, ...));
|
||||
|
||||
#if !SANITIZER_WINDOWS64
|
||||
// Exposed for unittests
|
||||
bool OverrideFunctionWithDetour(
|
||||
uptr old_func, uptr new_func, uptr *orig_old_func);
|
||||
#endif
|
||||
|
||||
// Exposed for unittests
|
||||
bool OverrideFunctionWithRedirectJump(
|
||||
uptr old_func, uptr new_func, uptr *orig_old_func);
|
||||
bool OverrideFunctionWithHotPatch(
|
||||
uptr old_func, uptr new_func, uptr *orig_old_func);
|
||||
bool OverrideFunctionWithTrampoline(
|
||||
uptr old_func, uptr new_func, uptr *orig_old_func);
|
||||
|
||||
// Exposed for unittests
|
||||
void TestOnlyReleaseTrampolineRegions();
|
||||
|
||||
// Exposed for unittests
|
||||
SIZE_T TestOnlyGetInstructionSize(uptr address, SIZE_T *rel_offset);
|
||||
|
||||
} // namespace __interception
|
||||
|
||||
#if defined(INTERCEPTION_DYNAMIC_CRT)
|
||||
#define INTERCEPT_FUNCTION_WIN(func) \
|
||||
::__interception::OverrideFunction(#func, \
|
||||
(::__interception::uptr)WRAP(func), \
|
||||
(::__interception::uptr *)&REAL(func))
|
||||
#else
|
||||
#define INTERCEPT_FUNCTION_WIN(func) \
|
||||
::__interception::OverrideFunction((::__interception::uptr)func, \
|
||||
(::__interception::uptr)WRAP(func), \
|
||||
(::__interception::uptr *)&REAL(func))
|
||||
#endif
|
||||
|
||||
#define INTERCEPT_FUNCTION_VER_WIN(func, symver) INTERCEPT_FUNCTION_WIN(func)
|
||||
|
||||
#define INTERCEPT_FUNCTION_DLLIMPORT(user_dll, provider_dll, func) \
|
||||
::__interception::OverrideImportedFunction( \
|
||||
user_dll, provider_dll, #func, (::__interception::uptr)WRAP(func), \
|
||||
(::__interception::uptr *)&REAL(func))
|
||||
|
||||
#endif // INTERCEPTION_WIN_H
|
||||
#endif // SANITIZER_WINDOWS
|
||||
39
lib/libtsan/sanitizer_common/sancov_flags.h
Normal file
39
lib/libtsan/sanitizer_common/sancov_flags.h
Normal file
@@ -0,0 +1,39 @@
|
||||
//===-- sancov_flags.h ------------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Sanitizer Coverage runtime flags.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANCOV_FLAGS_H
|
||||
#define SANCOV_FLAGS_H
|
||||
|
||||
#include "sanitizer_flag_parser.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
|
||||
namespace __sancov {
|
||||
|
||||
struct SancovFlags {
|
||||
#define SANCOV_FLAG(Type, Name, DefaultValue, Description) Type Name;
|
||||
#include "sancov_flags.inc"
|
||||
#undef SANCOV_FLAG
|
||||
|
||||
void SetDefaults();
|
||||
};
|
||||
|
||||
extern SancovFlags sancov_flags_dont_use_directly;
|
||||
|
||||
inline SancovFlags* sancov_flags() { return &sancov_flags_dont_use_directly; }
|
||||
|
||||
void InitializeSancovFlags();
|
||||
|
||||
} // namespace __sancov
|
||||
|
||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char*
|
||||
__sancov_default_options();
|
||||
|
||||
#endif
|
||||
20
lib/libtsan/sanitizer_common/sancov_flags.inc
Normal file
20
lib/libtsan/sanitizer_common/sancov_flags.inc
Normal file
@@ -0,0 +1,20 @@
|
||||
//===-- sancov_flags.inc ----------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Sanitizer Coverage runtime flags.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANCOV_FLAG
|
||||
#error "Defnine SANCOV_FLAG prior to including this file!"
|
||||
#endif
|
||||
|
||||
SANCOV_FLAG(bool, symbolize, true,
|
||||
"If set, coverage information will be symbolized by sancov tool "
|
||||
"after dumping.")
|
||||
|
||||
SANCOV_FLAG(bool, help, false, "Print flags help.")
|
||||
393
lib/libtsan/sanitizer_common/sanitizer_addrhashmap.h
Normal file
393
lib/libtsan/sanitizer_common/sanitizer_addrhashmap.h
Normal file
@@ -0,0 +1,393 @@
|
||||
//===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Concurrent uptr->T hashmap.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ADDRHASHMAP_H
|
||||
#define SANITIZER_ADDRHASHMAP_H
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_mutex.h"
|
||||
#include "sanitizer_atomic.h"
|
||||
#include "sanitizer_allocator_internal.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// Concurrent uptr->T hashmap.
|
||||
// T must be a POD type, kSize is preferably a prime but can be any number.
|
||||
// Usage example:
|
||||
//
|
||||
// typedef AddrHashMap<uptr, 11> Map;
|
||||
// Map m;
|
||||
// {
|
||||
// Map::Handle h(&m, addr);
|
||||
// use h.operator->() to access the data
|
||||
// if h.created() then the element was just created, and the current thread
|
||||
// has exclusive access to it
|
||||
// otherwise the current thread has only read access to the data
|
||||
// }
|
||||
// {
|
||||
// Map::Handle h(&m, addr, true);
|
||||
// this will remove the data from the map in Handle dtor
|
||||
// the current thread has exclusive access to the data
|
||||
// if !h.exists() then the element never existed
|
||||
// }
|
||||
// {
|
||||
// Map::Handle h(&m, addr, false, true);
|
||||
// this will create a new element or return a handle to an existing element
|
||||
// if !h.created() this thread does *not* have exclusive access to the data
|
||||
// }
|
||||
template<typename T, uptr kSize>
|
||||
class AddrHashMap {
|
||||
private:
|
||||
struct Cell {
|
||||
atomic_uintptr_t addr;
|
||||
T val;
|
||||
};
|
||||
|
||||
struct AddBucket {
|
||||
uptr cap;
|
||||
uptr size;
|
||||
Cell cells[1]; // variable len
|
||||
};
|
||||
|
||||
static const uptr kBucketSize = 3;
|
||||
|
||||
struct Bucket {
|
||||
Mutex mtx;
|
||||
atomic_uintptr_t add;
|
||||
Cell cells[kBucketSize];
|
||||
};
|
||||
|
||||
public:
|
||||
AddrHashMap();
|
||||
|
||||
class Handle {
|
||||
public:
|
||||
Handle(AddrHashMap<T, kSize> *map, uptr addr);
|
||||
Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove);
|
||||
Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove, bool create);
|
||||
|
||||
~Handle();
|
||||
T *operator->();
|
||||
T &operator*();
|
||||
const T &operator*() const;
|
||||
bool created() const;
|
||||
bool exists() const;
|
||||
|
||||
private:
|
||||
friend AddrHashMap<T, kSize>;
|
||||
AddrHashMap<T, kSize> *map_;
|
||||
Bucket *bucket_;
|
||||
Cell *cell_;
|
||||
uptr addr_;
|
||||
uptr addidx_;
|
||||
bool created_;
|
||||
bool remove_;
|
||||
bool create_;
|
||||
};
|
||||
|
||||
typedef void (*ForEachCallback)(const uptr key, const T &val, void *arg);
|
||||
// ForEach acquires a lock on each bucket while iterating over
|
||||
// elements. Note that this only ensures that the structure of the hashmap is
|
||||
// unchanged, there may be a data race to the element itself.
|
||||
void ForEach(ForEachCallback cb, void *arg);
|
||||
|
||||
private:
|
||||
friend class Handle;
|
||||
Bucket *table_;
|
||||
|
||||
void acquire(Handle *h);
|
||||
void release(Handle *h);
|
||||
uptr calcHash(uptr addr);
|
||||
};
|
||||
|
||||
template <typename T, uptr kSize>
|
||||
void AddrHashMap<T, kSize>::ForEach(ForEachCallback cb, void *arg) {
|
||||
for (uptr n = 0; n < kSize; n++) {
|
||||
Bucket *bucket = &table_[n];
|
||||
|
||||
ReadLock lock(&bucket->mtx);
|
||||
|
||||
for (uptr i = 0; i < kBucketSize; i++) {
|
||||
Cell *c = &bucket->cells[i];
|
||||
uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
|
||||
if (addr1 != 0)
|
||||
cb(addr1, c->val, arg);
|
||||
}
|
||||
|
||||
// Iterate over any additional cells.
|
||||
if (AddBucket *add =
|
||||
(AddBucket *)atomic_load(&bucket->add, memory_order_acquire)) {
|
||||
for (uptr i = 0; i < add->size; i++) {
|
||||
Cell *c = &add->cells[i];
|
||||
uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
|
||||
if (addr1 != 0)
|
||||
cb(addr1, c->val, arg);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T, uptr kSize>
|
||||
AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {
|
||||
map_ = map;
|
||||
addr_ = addr;
|
||||
remove_ = false;
|
||||
create_ = true;
|
||||
map_->acquire(this);
|
||||
}
|
||||
|
||||
template<typename T, uptr kSize>
|
||||
AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
|
||||
bool remove) {
|
||||
map_ = map;
|
||||
addr_ = addr;
|
||||
remove_ = remove;
|
||||
create_ = true;
|
||||
map_->acquire(this);
|
||||
}
|
||||
|
||||
template<typename T, uptr kSize>
|
||||
AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
|
||||
bool remove, bool create) {
|
||||
map_ = map;
|
||||
addr_ = addr;
|
||||
remove_ = remove;
|
||||
create_ = create;
|
||||
map_->acquire(this);
|
||||
}
|
||||
|
||||
template<typename T, uptr kSize>
|
||||
AddrHashMap<T, kSize>::Handle::~Handle() {
|
||||
map_->release(this);
|
||||
}
|
||||
|
||||
template <typename T, uptr kSize>
|
||||
T *AddrHashMap<T, kSize>::Handle::operator->() {
|
||||
return &cell_->val;
|
||||
}
|
||||
|
||||
template <typename T, uptr kSize>
|
||||
const T &AddrHashMap<T, kSize>::Handle::operator*() const {
|
||||
return cell_->val;
|
||||
}
|
||||
|
||||
template <typename T, uptr kSize>
|
||||
T &AddrHashMap<T, kSize>::Handle::operator*() {
|
||||
return cell_->val;
|
||||
}
|
||||
|
||||
template<typename T, uptr kSize>
|
||||
bool AddrHashMap<T, kSize>::Handle::created() const {
|
||||
return created_;
|
||||
}
|
||||
|
||||
template<typename T, uptr kSize>
|
||||
bool AddrHashMap<T, kSize>::Handle::exists() const {
|
||||
return cell_ != nullptr;
|
||||
}
|
||||
|
||||
template<typename T, uptr kSize>
|
||||
AddrHashMap<T, kSize>::AddrHashMap() {
|
||||
table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");
|
||||
}
|
||||
|
||||
template <typename T, uptr kSize>
|
||||
void AddrHashMap<T, kSize>::acquire(Handle *h)
|
||||
SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
uptr addr = h->addr_;
|
||||
uptr hash = calcHash(addr);
|
||||
Bucket *b = &table_[hash];
|
||||
|
||||
h->created_ = false;
|
||||
h->addidx_ = -1U;
|
||||
h->bucket_ = b;
|
||||
h->cell_ = nullptr;
|
||||
|
||||
// If we want to remove the element, we need exclusive access to the bucket,
|
||||
// so skip the lock-free phase.
|
||||
if (h->remove_)
|
||||
goto locked;
|
||||
|
||||
retry:
|
||||
// First try to find an existing element w/o read mutex.
|
||||
CHECK(!h->remove_);
|
||||
// Check the embed cells.
|
||||
for (uptr i = 0; i < kBucketSize; i++) {
|
||||
Cell *c = &b->cells[i];
|
||||
uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
|
||||
if (addr1 == addr) {
|
||||
h->cell_ = c;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Check the add cells with read lock.
|
||||
if (atomic_load(&b->add, memory_order_relaxed)) {
|
||||
b->mtx.ReadLock();
|
||||
AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
|
||||
for (uptr i = 0; i < add->size; i++) {
|
||||
Cell *c = &add->cells[i];
|
||||
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
|
||||
if (addr1 == addr) {
|
||||
h->addidx_ = i;
|
||||
h->cell_ = c;
|
||||
return;
|
||||
}
|
||||
}
|
||||
b->mtx.ReadUnlock();
|
||||
}
|
||||
|
||||
locked:
|
||||
// Re-check existence under write lock.
|
||||
// Embed cells.
|
||||
b->mtx.Lock();
|
||||
for (uptr i = 0; i < kBucketSize; i++) {
|
||||
Cell *c = &b->cells[i];
|
||||
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
|
||||
if (addr1 == addr) {
|
||||
if (h->remove_) {
|
||||
h->cell_ = c;
|
||||
return;
|
||||
}
|
||||
b->mtx.Unlock();
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
// Add cells.
|
||||
AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
|
||||
if (add) {
|
||||
for (uptr i = 0; i < add->size; i++) {
|
||||
Cell *c = &add->cells[i];
|
||||
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
|
||||
if (addr1 == addr) {
|
||||
if (h->remove_) {
|
||||
h->addidx_ = i;
|
||||
h->cell_ = c;
|
||||
return;
|
||||
}
|
||||
b->mtx.Unlock();
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The element does not exist, no need to create it if we want to remove.
|
||||
if (h->remove_ || !h->create_) {
|
||||
b->mtx.Unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
// Now try to create it under the mutex.
|
||||
h->created_ = true;
|
||||
// See if we have a free embed cell.
|
||||
for (uptr i = 0; i < kBucketSize; i++) {
|
||||
Cell *c = &b->cells[i];
|
||||
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
|
||||
if (addr1 == 0) {
|
||||
h->cell_ = c;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Store in the add cells.
|
||||
if (!add) {
|
||||
// Allocate a new add array.
|
||||
const uptr kInitSize = 64;
|
||||
add = (AddBucket*)InternalAlloc(kInitSize);
|
||||
internal_memset(add, 0, kInitSize);
|
||||
add->cap = (kInitSize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
|
||||
add->size = 0;
|
||||
atomic_store(&b->add, (uptr)add, memory_order_relaxed);
|
||||
}
|
||||
if (add->size == add->cap) {
|
||||
// Grow existing add array.
|
||||
uptr oldsize = sizeof(*add) + (add->cap - 1) * sizeof(add->cells[0]);
|
||||
uptr newsize = oldsize * 2;
|
||||
AddBucket *add1 = (AddBucket*)InternalAlloc(newsize);
|
||||
internal_memset(add1, 0, newsize);
|
||||
add1->cap = (newsize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
|
||||
add1->size = add->size;
|
||||
internal_memcpy(add1->cells, add->cells, add->size * sizeof(add->cells[0]));
|
||||
InternalFree(add);
|
||||
atomic_store(&b->add, (uptr)add1, memory_order_relaxed);
|
||||
add = add1;
|
||||
}
|
||||
// Store.
|
||||
uptr i = add->size++;
|
||||
Cell *c = &add->cells[i];
|
||||
CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);
|
||||
h->addidx_ = i;
|
||||
h->cell_ = c;
|
||||
}
|
||||
|
||||
template <typename T, uptr kSize>
|
||||
void AddrHashMap<T, kSize>::release(Handle *h)
|
||||
SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
if (!h->cell_)
|
||||
return;
|
||||
Bucket *b = h->bucket_;
|
||||
Cell *c = h->cell_;
|
||||
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
|
||||
if (h->created_) {
|
||||
// Denote completion of insertion.
|
||||
CHECK_EQ(addr1, 0);
|
||||
// After the following store, the element becomes available
|
||||
// for lock-free reads.
|
||||
atomic_store(&c->addr, h->addr_, memory_order_release);
|
||||
b->mtx.Unlock();
|
||||
} else if (h->remove_) {
|
||||
// Denote that the cell is empty now.
|
||||
CHECK_EQ(addr1, h->addr_);
|
||||
atomic_store(&c->addr, 0, memory_order_release);
|
||||
// See if we need to compact the bucket.
|
||||
AddBucket *add = (AddBucket *)atomic_load(&b->add, memory_order_relaxed);
|
||||
if (h->addidx_ == -1U) {
|
||||
// Removed from embed array, move an add element into the freed cell.
|
||||
if (add && add->size != 0) {
|
||||
uptr last = --add->size;
|
||||
Cell *c1 = &add->cells[last];
|
||||
c->val = c1->val;
|
||||
uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
|
||||
atomic_store(&c->addr, addr1, memory_order_release);
|
||||
atomic_store(&c1->addr, 0, memory_order_release);
|
||||
}
|
||||
} else {
|
||||
// Removed from add array, compact it.
|
||||
uptr last = --add->size;
|
||||
Cell *c1 = &add->cells[last];
|
||||
if (c != c1) {
|
||||
*c = *c1;
|
||||
atomic_store(&c1->addr, 0, memory_order_relaxed);
|
||||
}
|
||||
}
|
||||
if (add && add->size == 0) {
|
||||
// FIXME(dvyukov): free add?
|
||||
}
|
||||
b->mtx.Unlock();
|
||||
} else {
|
||||
CHECK_EQ(addr1, h->addr_);
|
||||
if (h->addidx_ != -1U)
|
||||
b->mtx.ReadUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T, uptr kSize>
|
||||
uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {
|
||||
addr += addr << 10;
|
||||
addr ^= addr >> 6;
|
||||
return addr % kSize;
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ADDRHASHMAP_H
|
||||
214
lib/libtsan/sanitizer_common/sanitizer_allocator.cpp
Normal file
214
lib/libtsan/sanitizer_common/sanitizer_allocator.cpp
Normal file
@@ -0,0 +1,214 @@
|
||||
//===-- sanitizer_allocator.cpp -------------------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between AddressSanitizer and ThreadSanitizer
|
||||
// run-time libraries.
|
||||
// This allocator is used inside run-times.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_allocator.h"
|
||||
|
||||
#include "sanitizer_allocator_checks.h"
|
||||
#include "sanitizer_allocator_internal.h"
|
||||
#include "sanitizer_atomic.h"
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_platform.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// Default allocator names.
|
||||
const char *PrimaryAllocatorName = "SizeClassAllocator";
|
||||
const char *SecondaryAllocatorName = "LargeMmapAllocator";
|
||||
|
||||
alignas(64) static char internal_alloc_placeholder[sizeof(InternalAllocator)];
|
||||
static atomic_uint8_t internal_allocator_initialized;
|
||||
static StaticSpinMutex internal_alloc_init_mu;
|
||||
|
||||
static InternalAllocatorCache internal_allocator_cache;
|
||||
static StaticSpinMutex internal_allocator_cache_mu;
|
||||
|
||||
InternalAllocator *internal_allocator() {
|
||||
InternalAllocator *internal_allocator_instance =
|
||||
reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder);
|
||||
if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) {
|
||||
SpinMutexLock l(&internal_alloc_init_mu);
|
||||
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
|
||||
0) {
|
||||
internal_allocator_instance->Init(kReleaseToOSIntervalNever);
|
||||
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
|
||||
}
|
||||
}
|
||||
return internal_allocator_instance;
|
||||
}
|
||||
|
||||
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
|
||||
uptr alignment) {
|
||||
if (alignment == 0) alignment = 8;
|
||||
if (cache == 0) {
|
||||
SpinMutexLock l(&internal_allocator_cache_mu);
|
||||
return internal_allocator()->Allocate(&internal_allocator_cache, size,
|
||||
alignment);
|
||||
}
|
||||
return internal_allocator()->Allocate(cache, size, alignment);
|
||||
}
|
||||
|
||||
static void *RawInternalRealloc(void *ptr, uptr size,
|
||||
InternalAllocatorCache *cache) {
|
||||
constexpr usize alignment = Max<usize>(8, sizeof(void *));
|
||||
if (cache == 0) {
|
||||
SpinMutexLock l(&internal_allocator_cache_mu);
|
||||
return internal_allocator()->Reallocate(&internal_allocator_cache, ptr,
|
||||
size, alignment);
|
||||
}
|
||||
return internal_allocator()->Reallocate(cache, ptr, size, alignment);
|
||||
}
|
||||
|
||||
static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
|
||||
if (!cache) {
|
||||
SpinMutexLock l(&internal_allocator_cache_mu);
|
||||
return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
|
||||
}
|
||||
internal_allocator()->Deallocate(cache, ptr);
|
||||
}
|
||||
|
||||
static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
|
||||
SetAllocatorOutOfMemory();
|
||||
Report("FATAL: %s: internal allocator is out of memory trying to allocate "
|
||||
"0x%zx bytes\n", SanitizerToolName, requested_size);
|
||||
Die();
|
||||
}
|
||||
|
||||
void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
|
||||
void *p = RawInternalAlloc(size, cache, alignment);
|
||||
if (UNLIKELY(!p))
|
||||
ReportInternalAllocatorOutOfMemory(size);
|
||||
return p;
|
||||
}
|
||||
|
||||
void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
|
||||
void *p = RawInternalRealloc(addr, size, cache);
|
||||
if (UNLIKELY(!p))
|
||||
ReportInternalAllocatorOutOfMemory(size);
|
||||
return p;
|
||||
}
|
||||
|
||||
void *InternalReallocArray(void *addr, uptr count, uptr size,
|
||||
InternalAllocatorCache *cache) {
|
||||
if (UNLIKELY(CheckForCallocOverflow(count, size))) {
|
||||
Report(
|
||||
"FATAL: %s: reallocarray parameters overflow: count * size (%zd * %zd) "
|
||||
"cannot be represented in type size_t\n",
|
||||
SanitizerToolName, count, size);
|
||||
Die();
|
||||
}
|
||||
return InternalRealloc(addr, count * size, cache);
|
||||
}
|
||||
|
||||
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
|
||||
if (UNLIKELY(CheckForCallocOverflow(count, size))) {
|
||||
Report("FATAL: %s: calloc parameters overflow: count * size (%zd * %zd) "
|
||||
"cannot be represented in type size_t\n", SanitizerToolName, count,
|
||||
size);
|
||||
Die();
|
||||
}
|
||||
void *p = InternalAlloc(count * size, cache);
|
||||
if (LIKELY(p))
|
||||
internal_memset(p, 0, count * size);
|
||||
return p;
|
||||
}
|
||||
|
||||
void InternalFree(void *addr, InternalAllocatorCache *cache) {
|
||||
RawInternalFree(addr, cache);
|
||||
}
|
||||
|
||||
void InternalAllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
internal_allocator_cache_mu.Lock();
|
||||
internal_allocator()->ForceLock();
|
||||
}
|
||||
|
||||
void InternalAllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
internal_allocator()->ForceUnlock();
|
||||
internal_allocator_cache_mu.Unlock();
|
||||
}
|
||||
|
||||
// LowLevelAllocator
|
||||
constexpr usize kLowLevelAllocatorDefaultAlignment =
|
||||
Max<usize>(8, sizeof(void *));
|
||||
constexpr uptr kMinNumPagesRounded = 16;
|
||||
constexpr uptr kMinRoundedSize = 65536;
|
||||
static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;
|
||||
static LowLevelAllocateCallback low_level_alloc_callback;
|
||||
|
||||
static LowLevelAllocator Alloc;
|
||||
LowLevelAllocator &GetGlobalLowLevelAllocator() { return Alloc; }
|
||||
|
||||
void *LowLevelAllocator::Allocate(uptr size) {
|
||||
// Align allocation size.
|
||||
size = RoundUpTo(size, low_level_alloc_min_alignment);
|
||||
if (allocated_end_ - allocated_current_ < (sptr)size) {
|
||||
uptr size_to_allocate = RoundUpTo(
|
||||
size, Min(GetPageSizeCached() * kMinNumPagesRounded, kMinRoundedSize));
|
||||
allocated_current_ = (char *)MmapOrDie(size_to_allocate, __func__);
|
||||
allocated_end_ = allocated_current_ + size_to_allocate;
|
||||
if (low_level_alloc_callback) {
|
||||
low_level_alloc_callback((uptr)allocated_current_, size_to_allocate);
|
||||
}
|
||||
}
|
||||
CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
|
||||
void *res = allocated_current_;
|
||||
allocated_current_ += size;
|
||||
return res;
|
||||
}
|
||||
|
||||
void SetLowLevelAllocateMinAlignment(uptr alignment) {
|
||||
CHECK(IsPowerOfTwo(alignment));
|
||||
low_level_alloc_min_alignment = Max(alignment, low_level_alloc_min_alignment);
|
||||
}
|
||||
|
||||
void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
|
||||
low_level_alloc_callback = callback;
|
||||
}
|
||||
|
||||
// Allocator's OOM and other errors handling support.
|
||||
|
||||
static atomic_uint8_t allocator_out_of_memory = {0};
|
||||
static atomic_uint8_t allocator_may_return_null = {0};
|
||||
|
||||
bool IsAllocatorOutOfMemory() {
|
||||
return atomic_load_relaxed(&allocator_out_of_memory);
|
||||
}
|
||||
|
||||
void SetAllocatorOutOfMemory() {
|
||||
atomic_store_relaxed(&allocator_out_of_memory, 1);
|
||||
}
|
||||
|
||||
bool AllocatorMayReturnNull() {
|
||||
return atomic_load(&allocator_may_return_null, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void SetAllocatorMayReturnNull(bool may_return_null) {
|
||||
atomic_store(&allocator_may_return_null, may_return_null,
|
||||
memory_order_relaxed);
|
||||
}
|
||||
|
||||
void PrintHintAllocatorCannotReturnNull() {
|
||||
Report("HINT: if you don't care about these errors you may set "
|
||||
"allocator_may_return_null=1\n");
|
||||
}
|
||||
|
||||
static atomic_uint8_t rss_limit_exceeded;
|
||||
|
||||
bool IsRssLimitExceeded() {
|
||||
return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void SetRssLimitExceeded(bool limit_exceeded) {
|
||||
atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
85
lib/libtsan/sanitizer_common/sanitizer_allocator.h
Normal file
85
lib/libtsan/sanitizer_common/sanitizer_allocator.h
Normal file
@@ -0,0 +1,85 @@
|
||||
//===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ALLOCATOR_H
|
||||
#define SANITIZER_ALLOCATOR_H
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_flat_map.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_lfstack.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_list.h"
|
||||
#include "sanitizer_local_address_space_view.h"
|
||||
#include "sanitizer_mutex.h"
|
||||
#include "sanitizer_procmaps.h"
|
||||
#include "sanitizer_type_traits.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// Allows the tools to name their allocations appropriately.
|
||||
extern const char *PrimaryAllocatorName;
|
||||
extern const char *SecondaryAllocatorName;
|
||||
|
||||
// Since flags are immutable and allocator behavior can be changed at runtime
|
||||
// (unit tests or ASan on Android are some examples), allocator_may_return_null
|
||||
// flag value is cached here and can be altered later.
|
||||
bool AllocatorMayReturnNull();
|
||||
void SetAllocatorMayReturnNull(bool may_return_null);
|
||||
|
||||
// Returns true if allocator detected OOM condition. Can be used to avoid memory
|
||||
// hungry operations.
|
||||
bool IsAllocatorOutOfMemory();
|
||||
// Should be called by a particular allocator when OOM is detected.
|
||||
void SetAllocatorOutOfMemory();
|
||||
|
||||
void PrintHintAllocatorCannotReturnNull();
|
||||
|
||||
// Callback type for iterating over chunks.
|
||||
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
|
||||
|
||||
inline u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
|
||||
return (*state = *state * 1103515245 + 12345) >> 16;
|
||||
}
|
||||
|
||||
inline u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
|
||||
|
||||
template<typename T>
|
||||
inline void RandomShuffle(T *a, u32 n, u32 *rand_state) {
|
||||
if (n <= 1) return;
|
||||
u32 state = *rand_state;
|
||||
for (u32 i = n - 1; i > 0; i--)
|
||||
Swap(a[i], a[RandN(&state, i + 1)]);
|
||||
*rand_state = state;
|
||||
}
|
||||
|
||||
struct NoOpMapUnmapCallback {
|
||||
void OnMap(uptr p, uptr size) const {}
|
||||
void OnMapSecondary(uptr p, uptr size, uptr user_begin,
|
||||
uptr user_size) const {}
|
||||
void OnUnmap(uptr p, uptr size) const {}
|
||||
};
|
||||
|
||||
#include "sanitizer_allocator_size_class_map.h"
|
||||
#include "sanitizer_allocator_stats.h"
|
||||
#include "sanitizer_allocator_primary64.h"
|
||||
#include "sanitizer_allocator_primary32.h"
|
||||
#include "sanitizer_allocator_local_cache.h"
|
||||
#include "sanitizer_allocator_secondary.h"
|
||||
#include "sanitizer_allocator_combined.h"
|
||||
|
||||
bool IsRssLimitExceeded();
|
||||
void SetRssLimitExceeded(bool limit_exceeded);
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ALLOCATOR_H
|
||||
22
lib/libtsan/sanitizer_common/sanitizer_allocator_checks.cpp
Normal file
22
lib/libtsan/sanitizer_common/sanitizer_allocator_checks.cpp
Normal file
@@ -0,0 +1,22 @@
|
||||
//===-- sanitizer_allocator_checks.cpp --------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Various checks shared between ThreadSanitizer, MemorySanitizer, etc. memory
|
||||
// allocators.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_errno.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
void SetErrnoToENOMEM() {
|
||||
errno = errno_ENOMEM;
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
76
lib/libtsan/sanitizer_common/sanitizer_allocator_checks.h
Normal file
76
lib/libtsan/sanitizer_common/sanitizer_allocator_checks.h
Normal file
@@ -0,0 +1,76 @@
|
||||
//===-- sanitizer_allocator_checks.h ----------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Various checks shared between ThreadSanitizer, MemorySanitizer, etc. memory
|
||||
// allocators.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ALLOCATOR_CHECKS_H
|
||||
#define SANITIZER_ALLOCATOR_CHECKS_H
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_platform.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// The following is defined in a separate compilation unit to avoid pulling in
|
||||
// sanitizer_errno.h in this header, which leads to conflicts when other system
|
||||
// headers include errno.h. This is usually the result of an unlikely event,
|
||||
// and as such we do not care as much about having it inlined.
|
||||
void SetErrnoToENOMEM();
|
||||
|
||||
// A common errno setting logic shared by almost all sanitizer allocator APIs.
|
||||
inline void *SetErrnoOnNull(void *ptr) {
|
||||
if (UNLIKELY(!ptr))
|
||||
SetErrnoToENOMEM();
|
||||
return ptr;
|
||||
}
|
||||
|
||||
// In case of the check failure, the caller of the following Check... functions
|
||||
// should "return POLICY::OnBadRequest();" where POLICY is the current allocator
|
||||
// failure handling policy.
|
||||
|
||||
// Checks aligned_alloc() parameters, verifies that the alignment is a power of
|
||||
// two and that the size is a multiple of alignment for POSIX implementation,
|
||||
// and a bit relaxed requirement for non-POSIX ones, that the size is a multiple
|
||||
// of alignment.
|
||||
inline bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {
|
||||
#if SANITIZER_POSIX
|
||||
return alignment != 0 && IsPowerOfTwo(alignment) &&
|
||||
(size & (alignment - 1)) == 0;
|
||||
#else
|
||||
return alignment != 0 && size % alignment == 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Checks posix_memalign() parameters, verifies that alignment is a power of two
|
||||
// and a multiple of sizeof(void *).
|
||||
inline bool CheckPosixMemalignAlignment(uptr alignment) {
|
||||
return alignment != 0 && IsPowerOfTwo(alignment) &&
|
||||
(alignment % sizeof(void *)) == 0;
|
||||
}
|
||||
|
||||
// Returns true if calloc(size, n) call overflows on size*n calculation.
|
||||
inline bool CheckForCallocOverflow(uptr size, uptr n) {
|
||||
if (!size)
|
||||
return false;
|
||||
uptr max = (uptr)-1L;
|
||||
return (max / size) < n;
|
||||
}
|
||||
|
||||
// Returns true if the size passed to pvalloc overflows when rounded to the next
|
||||
// multiple of page_size.
|
||||
inline bool CheckForPvallocOverflow(uptr size, uptr page_size) {
|
||||
return RoundUpTo(size, page_size) < size;
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ALLOCATOR_CHECKS_H
|
||||
199
lib/libtsan/sanitizer_common/sanitizer_allocator_combined.h
Normal file
199
lib/libtsan/sanitizer_common/sanitizer_allocator_combined.h
Normal file
@@ -0,0 +1,199 @@
|
||||
//===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Part of the Sanitizer Allocator.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_ALLOCATOR_H
|
||||
#error This file must be included inside sanitizer_allocator.h
|
||||
#endif
|
||||
|
||||
// This class implements a complete memory allocator by using two
|
||||
// internal allocators:
|
||||
// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
|
||||
// When allocating 2^x bytes it should return 2^x aligned chunk.
|
||||
// PrimaryAllocator is used via a local AllocatorCache.
|
||||
// SecondaryAllocator can allocate anything, but is not efficient.
|
||||
template <class PrimaryAllocator,
|
||||
class LargeMmapAllocatorPtrArray = DefaultLargeMmapAllocatorPtrArray>
|
||||
class CombinedAllocator {
|
||||
public:
|
||||
using AllocatorCache = typename PrimaryAllocator::AllocatorCache;
|
||||
using SecondaryAllocator =
|
||||
LargeMmapAllocator<typename PrimaryAllocator::MapUnmapCallback,
|
||||
LargeMmapAllocatorPtrArray,
|
||||
typename PrimaryAllocator::AddressSpaceView>;
|
||||
|
||||
void InitLinkerInitialized(s32 release_to_os_interval_ms,
|
||||
uptr heap_start = 0) {
|
||||
primary_.Init(release_to_os_interval_ms, heap_start);
|
||||
secondary_.InitLinkerInitialized();
|
||||
}
|
||||
|
||||
void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
|
||||
stats_.Init();
|
||||
primary_.Init(release_to_os_interval_ms, heap_start);
|
||||
secondary_.Init();
|
||||
}
|
||||
|
||||
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
|
||||
// Returning 0 on malloc(0) may break a lot of code.
|
||||
if (size == 0)
|
||||
size = 1;
|
||||
if (size + alignment < size) {
|
||||
Report("WARNING: %s: CombinedAllocator allocation overflow: "
|
||||
"0x%zx bytes with 0x%zx alignment requested\n",
|
||||
SanitizerToolName, size, alignment);
|
||||
return nullptr;
|
||||
}
|
||||
uptr original_size = size;
|
||||
// If alignment requirements are to be fulfilled by the frontend allocator
|
||||
// rather than by the primary or secondary, passing an alignment lower than
|
||||
// or equal to 8 will prevent any further rounding up, as well as the later
|
||||
// alignment check.
|
||||
if (alignment > 8)
|
||||
size = RoundUpTo(size, alignment);
|
||||
// The primary allocator should return a 2^x aligned allocation when
|
||||
// requested 2^x bytes, hence using the rounded up 'size' when being
|
||||
// serviced by the primary (this is no longer true when the primary is
|
||||
// using a non-fixed base address). The secondary takes care of the
|
||||
// alignment without such requirement, and allocating 'size' would use
|
||||
// extraneous memory, so we employ 'original_size'.
|
||||
void *res;
|
||||
if (primary_.CanAllocate(size, alignment))
|
||||
res = cache->Allocate(&primary_, primary_.ClassID(size));
|
||||
else
|
||||
res = secondary_.Allocate(&stats_, original_size, alignment);
|
||||
if (alignment > 8)
|
||||
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
|
||||
return res;
|
||||
}
|
||||
|
||||
s32 ReleaseToOSIntervalMs() const {
|
||||
return primary_.ReleaseToOSIntervalMs();
|
||||
}
|
||||
|
||||
void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
|
||||
primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
|
||||
}
|
||||
|
||||
void ForceReleaseToOS() {
|
||||
primary_.ForceReleaseToOS();
|
||||
}
|
||||
|
||||
void Deallocate(AllocatorCache *cache, void *p) {
|
||||
if (!p) return;
|
||||
if (primary_.PointerIsMine(p))
|
||||
cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
|
||||
else
|
||||
secondary_.Deallocate(&stats_, p);
|
||||
}
|
||||
|
||||
void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
|
||||
uptr alignment) {
|
||||
if (!p)
|
||||
return Allocate(cache, new_size, alignment);
|
||||
if (!new_size) {
|
||||
Deallocate(cache, p);
|
||||
return nullptr;
|
||||
}
|
||||
CHECK(PointerIsMine(p));
|
||||
uptr old_size = GetActuallyAllocatedSize(p);
|
||||
uptr memcpy_size = Min(new_size, old_size);
|
||||
void *new_p = Allocate(cache, new_size, alignment);
|
||||
if (new_p)
|
||||
internal_memcpy(new_p, p, memcpy_size);
|
||||
Deallocate(cache, p);
|
||||
return new_p;
|
||||
}
|
||||
|
||||
bool PointerIsMine(const void *p) const {
|
||||
if (primary_.PointerIsMine(p))
|
||||
return true;
|
||||
return secondary_.PointerIsMine(p);
|
||||
}
|
||||
|
||||
bool FromPrimary(const void *p) const { return primary_.PointerIsMine(p); }
|
||||
|
||||
void *GetMetaData(const void *p) {
|
||||
if (primary_.PointerIsMine(p))
|
||||
return primary_.GetMetaData(p);
|
||||
return secondary_.GetMetaData(p);
|
||||
}
|
||||
|
||||
void *GetBlockBegin(const void *p) {
|
||||
if (primary_.PointerIsMine(p))
|
||||
return primary_.GetBlockBegin(p);
|
||||
return secondary_.GetBlockBegin(p);
|
||||
}
|
||||
|
||||
// This function does the same as GetBlockBegin, but is much faster.
|
||||
// Must be called with the allocator locked.
|
||||
void *GetBlockBeginFastLocked(const void *p) {
|
||||
if (primary_.PointerIsMine(p))
|
||||
return primary_.GetBlockBegin(p);
|
||||
return secondary_.GetBlockBeginFastLocked(p);
|
||||
}
|
||||
|
||||
uptr GetActuallyAllocatedSize(void *p) {
|
||||
if (primary_.PointerIsMine(p))
|
||||
return primary_.GetActuallyAllocatedSize(p);
|
||||
return secondary_.GetActuallyAllocatedSize(p);
|
||||
}
|
||||
|
||||
uptr TotalMemoryUsed() {
|
||||
return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
|
||||
}
|
||||
|
||||
void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
|
||||
|
||||
void InitCache(AllocatorCache *cache) {
|
||||
cache->Init(&stats_);
|
||||
}
|
||||
|
||||
void DestroyCache(AllocatorCache *cache) {
|
||||
cache->Destroy(&primary_, &stats_);
|
||||
}
|
||||
|
||||
void SwallowCache(AllocatorCache *cache) {
|
||||
cache->Drain(&primary_);
|
||||
}
|
||||
|
||||
void GetStats(AllocatorStatCounters s) const {
|
||||
stats_.Get(s);
|
||||
}
|
||||
|
||||
void PrintStats() {
|
||||
primary_.PrintStats();
|
||||
secondary_.PrintStats();
|
||||
}
|
||||
|
||||
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
||||
// introspection API.
|
||||
void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
primary_.ForceLock();
|
||||
secondary_.ForceLock();
|
||||
}
|
||||
|
||||
void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
secondary_.ForceUnlock();
|
||||
primary_.ForceUnlock();
|
||||
}
|
||||
|
||||
// Iterate over all existing chunks.
|
||||
// The allocator must be locked when calling this function.
|
||||
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
|
||||
primary_.ForEachChunk(callback, arg);
|
||||
secondary_.ForEachChunk(callback, arg);
|
||||
}
|
||||
|
||||
private:
|
||||
PrimaryAllocator primary_;
|
||||
SecondaryAllocator secondary_;
|
||||
AllocatorGlobalStats stats_;
|
||||
};
|
||||
88
lib/libtsan/sanitizer_common/sanitizer_allocator_dlsym.h
Normal file
88
lib/libtsan/sanitizer_common/sanitizer_allocator_dlsym.h
Normal file
@@ -0,0 +1,88 @@
|
||||
//===-- sanitizer_allocator_dlsym.h -----------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Hack: Sanitizer initializer calls dlsym which may need to allocate and call
|
||||
// back into uninitialized sanitizer.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ALLOCATOR_DLSYM_H
|
||||
#define SANITIZER_ALLOCATOR_DLSYM_H
|
||||
|
||||
#include "sanitizer_allocator_internal.h"
|
||||
#include "sanitizer_common/sanitizer_allocator_checks.h"
|
||||
#include "sanitizer_common/sanitizer_internal_defs.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
template <typename Details>
|
||||
struct DlSymAllocator {
|
||||
static bool Use() {
|
||||
// Fuchsia doesn't use dlsym-based interceptors.
|
||||
return !SANITIZER_FUCHSIA && UNLIKELY(Details::UseImpl());
|
||||
}
|
||||
|
||||
static bool PointerIsMine(const void *ptr) {
|
||||
// Fuchsia doesn't use dlsym-based interceptors.
|
||||
return !SANITIZER_FUCHSIA &&
|
||||
UNLIKELY(internal_allocator()->FromPrimary(ptr));
|
||||
}
|
||||
|
||||
static void *Allocate(uptr size_in_bytes, uptr align = kWordSize) {
|
||||
void *ptr = InternalAlloc(size_in_bytes, nullptr, align);
|
||||
CHECK(internal_allocator()->FromPrimary(ptr));
|
||||
Details::OnAllocate(ptr, GetSize(ptr));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void *Callocate(usize nmemb, usize size) {
|
||||
void *ptr = InternalCalloc(nmemb, size);
|
||||
CHECK(internal_allocator()->FromPrimary(ptr));
|
||||
Details::OnAllocate(ptr, GetSize(ptr));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void Free(void *ptr) {
|
||||
uptr size = GetSize(ptr);
|
||||
Details::OnFree(ptr, size);
|
||||
InternalFree(ptr);
|
||||
}
|
||||
|
||||
static void *Realloc(void *ptr, uptr new_size) {
|
||||
if (!ptr)
|
||||
return Allocate(new_size);
|
||||
CHECK(internal_allocator()->FromPrimary(ptr));
|
||||
if (!new_size) {
|
||||
Free(ptr);
|
||||
return nullptr;
|
||||
}
|
||||
uptr size = GetSize(ptr);
|
||||
uptr memcpy_size = Min(new_size, size);
|
||||
void *new_ptr = Allocate(new_size);
|
||||
if (new_ptr)
|
||||
internal_memcpy(new_ptr, ptr, memcpy_size);
|
||||
Free(ptr);
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
static void *ReallocArray(void *ptr, uptr count, uptr size) {
|
||||
CHECK(!CheckForCallocOverflow(count, size));
|
||||
return Realloc(ptr, count * size);
|
||||
}
|
||||
|
||||
static uptr GetSize(void *ptr) {
|
||||
return internal_allocator()->GetActuallyAllocatedSize(ptr);
|
||||
}
|
||||
|
||||
static void OnAllocate(const void *ptr, uptr size) {}
|
||||
static void OnFree(const void *ptr, uptr size) {}
|
||||
};
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ALLOCATOR_DLSYM_H
|
||||
53
lib/libtsan/sanitizer_common/sanitizer_allocator_interface.h
Normal file
53
lib/libtsan/sanitizer_common/sanitizer_allocator_interface.h
Normal file
@@ -0,0 +1,53 @@
|
||||
//===-- sanitizer_allocator_interface.h ------------------------- C++ -----===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Re-declaration of functions from public sanitizer allocator interface.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ALLOCATOR_INTERFACE_H
|
||||
#define SANITIZER_ALLOCATOR_INTERFACE_H
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
|
||||
using __sanitizer::uptr;
|
||||
|
||||
extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
uptr __sanitizer_get_estimated_allocated_size(uptr size);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_get_ownership(const void *p);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE const void *__sanitizer_get_allocated_begin(
|
||||
const void *p);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE uptr
|
||||
__sanitizer_get_allocated_size(const void *p);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE uptr
|
||||
__sanitizer_get_allocated_size_fast(const void *p);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_current_allocated_bytes();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_heap_size();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_free_bytes();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_unmapped_bytes();
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_install_malloc_and_free_hooks(
|
||||
void (*malloc_hook)(const void *, uptr),
|
||||
void (*free_hook)(const void *));
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_malloc_hook(void *ptr, uptr size);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
|
||||
void __sanitizer_free_hook(void *ptr);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int
|
||||
__sanitizer_ignore_free_hook(void *ptr);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_purge_allocator();
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_print_memory_profile(uptr top_percent, uptr max_number_of_contexts);
|
||||
} // extern "C"
|
||||
|
||||
#endif // SANITIZER_ALLOCATOR_INTERFACE_H
|
||||
56
lib/libtsan/sanitizer_common/sanitizer_allocator_internal.h
Normal file
56
lib/libtsan/sanitizer_common/sanitizer_allocator_internal.h
Normal file
@@ -0,0 +1,56 @@
|
||||
//===-- sanitizer_allocator_internal.h --------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This allocator is used inside run-times.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ALLOCATOR_INTERNAL_H
|
||||
#define SANITIZER_ALLOCATOR_INTERNAL_H
|
||||
|
||||
#include "sanitizer_allocator.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// FIXME: Check if we may use even more compact size class map for internal
|
||||
// purposes.
|
||||
typedef CompactSizeClassMap InternalSizeClassMap;
|
||||
|
||||
struct AP32 {
|
||||
static const uptr kSpaceBeg = 0;
|
||||
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
|
||||
static const uptr kMetadataSize = 0;
|
||||
typedef InternalSizeClassMap SizeClassMap;
|
||||
static const uptr kRegionSizeLog = 20;
|
||||
using AddressSpaceView = LocalAddressSpaceView;
|
||||
typedef NoOpMapUnmapCallback MapUnmapCallback;
|
||||
static const uptr kFlags = 0;
|
||||
};
|
||||
typedef SizeClassAllocator32<AP32> PrimaryInternalAllocator;
|
||||
|
||||
typedef CombinedAllocator<PrimaryInternalAllocator,
|
||||
LargeMmapAllocatorPtrArrayStatic>
|
||||
InternalAllocator;
|
||||
typedef InternalAllocator::AllocatorCache InternalAllocatorCache;
|
||||
|
||||
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,
|
||||
uptr alignment = 0);
|
||||
void *InternalRealloc(void *p, uptr size,
|
||||
InternalAllocatorCache *cache = nullptr);
|
||||
void *InternalReallocArray(void *p, uptr count, uptr size,
|
||||
InternalAllocatorCache *cache = nullptr);
|
||||
void *InternalCalloc(uptr count, uptr size,
|
||||
InternalAllocatorCache *cache = nullptr);
|
||||
void InternalFree(void *p, InternalAllocatorCache *cache = nullptr);
|
||||
void InternalAllocatorLock();
|
||||
void InternalAllocatorUnlock();
|
||||
InternalAllocator *internal_allocator();
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ALLOCATOR_INTERNAL_H
|
||||
271
lib/libtsan/sanitizer_common/sanitizer_allocator_local_cache.h
Normal file
271
lib/libtsan/sanitizer_common/sanitizer_allocator_local_cache.h
Normal file
@@ -0,0 +1,271 @@
|
||||
//===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Part of the Sanitizer Allocator.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_ALLOCATOR_H
|
||||
#error This file must be included inside sanitizer_allocator.h
|
||||
#endif
|
||||
|
||||
// Cache used by SizeClassAllocator64.
|
||||
template <class SizeClassAllocator>
|
||||
struct SizeClassAllocator64LocalCache {
|
||||
typedef SizeClassAllocator Allocator;
|
||||
typedef MemoryMapper<Allocator> MemoryMapperT;
|
||||
|
||||
void Init(AllocatorGlobalStats *s) {
|
||||
stats_.Init();
|
||||
if (s)
|
||||
s->Register(&stats_);
|
||||
}
|
||||
|
||||
void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
|
||||
Drain(allocator);
|
||||
if (s)
|
||||
s->Unregister(&stats_);
|
||||
}
|
||||
|
||||
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
|
||||
CHECK_NE(class_id, 0UL);
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
PerClass *c = &per_class_[class_id];
|
||||
if (UNLIKELY(c->count == 0)) {
|
||||
if (UNLIKELY(!Refill(c, allocator, class_id)))
|
||||
return nullptr;
|
||||
DCHECK_GT(c->count, 0);
|
||||
}
|
||||
CompactPtrT chunk = c->chunks[--c->count];
|
||||
stats_.Add(AllocatorStatAllocated, c->class_size);
|
||||
return reinterpret_cast<void *>(allocator->CompactPtrToPointer(
|
||||
allocator->GetRegionBeginBySizeClass(class_id), chunk));
|
||||
}
|
||||
|
||||
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
|
||||
CHECK_NE(class_id, 0UL);
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
// If the first allocator call on a new thread is a deallocation, then
|
||||
// max_count will be zero, leading to check failure.
|
||||
PerClass *c = &per_class_[class_id];
|
||||
InitCache(c);
|
||||
if (UNLIKELY(c->count == c->max_count))
|
||||
DrainHalfMax(c, allocator, class_id);
|
||||
CompactPtrT chunk = allocator->PointerToCompactPtr(
|
||||
allocator->GetRegionBeginBySizeClass(class_id),
|
||||
reinterpret_cast<uptr>(p));
|
||||
c->chunks[c->count++] = chunk;
|
||||
stats_.Sub(AllocatorStatAllocated, c->class_size);
|
||||
}
|
||||
|
||||
void Drain(SizeClassAllocator *allocator) {
|
||||
MemoryMapperT memory_mapper(*allocator);
|
||||
for (uptr i = 1; i < kNumClasses; i++) {
|
||||
PerClass *c = &per_class_[i];
|
||||
while (c->count > 0) Drain(&memory_mapper, c, allocator, i, c->count);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
typedef typename Allocator::SizeClassMapT SizeClassMap;
|
||||
static const uptr kNumClasses = SizeClassMap::kNumClasses;
|
||||
typedef typename Allocator::CompactPtrT CompactPtrT;
|
||||
|
||||
struct PerClass {
|
||||
u32 count;
|
||||
u32 max_count;
|
||||
uptr class_size;
|
||||
CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
|
||||
};
|
||||
PerClass per_class_[kNumClasses];
|
||||
AllocatorStats stats_;
|
||||
|
||||
void InitCache(PerClass *c) {
|
||||
if (LIKELY(c->max_count))
|
||||
return;
|
||||
for (uptr i = 1; i < kNumClasses; i++) {
|
||||
PerClass *c = &per_class_[i];
|
||||
const uptr size = Allocator::ClassIdToSize(i);
|
||||
c->max_count = 2 * SizeClassMap::MaxCachedHint(size);
|
||||
c->class_size = size;
|
||||
}
|
||||
DCHECK_NE(c->max_count, 0UL);
|
||||
}
|
||||
|
||||
NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
|
||||
uptr class_id) {
|
||||
InitCache(c);
|
||||
const uptr num_requested_chunks = c->max_count / 2;
|
||||
if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
|
||||
num_requested_chunks)))
|
||||
return false;
|
||||
c->count = num_requested_chunks;
|
||||
return true;
|
||||
}
|
||||
|
||||
NOINLINE void DrainHalfMax(PerClass *c, SizeClassAllocator *allocator,
|
||||
uptr class_id) {
|
||||
MemoryMapperT memory_mapper(*allocator);
|
||||
Drain(&memory_mapper, c, allocator, class_id, c->max_count / 2);
|
||||
}
|
||||
|
||||
void Drain(MemoryMapperT *memory_mapper, PerClass *c,
|
||||
SizeClassAllocator *allocator, uptr class_id, uptr count) {
|
||||
CHECK_GE(c->count, count);
|
||||
const uptr first_idx_to_drain = c->count - count;
|
||||
c->count -= count;
|
||||
allocator->ReturnToAllocator(memory_mapper, &stats_, class_id,
|
||||
&c->chunks[first_idx_to_drain], count);
|
||||
}
|
||||
};
|
||||
|
||||
// Cache used by SizeClassAllocator32.
|
||||
template <class SizeClassAllocator>
|
||||
struct SizeClassAllocator32LocalCache {
|
||||
typedef SizeClassAllocator Allocator;
|
||||
typedef typename Allocator::TransferBatch TransferBatch;
|
||||
|
||||
void Init(AllocatorGlobalStats *s) {
|
||||
stats_.Init();
|
||||
if (s)
|
||||
s->Register(&stats_);
|
||||
}
|
||||
|
||||
// Returns a TransferBatch suitable for class_id.
|
||||
TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
|
||||
TransferBatch *b) {
|
||||
if (uptr batch_class_id = per_class_[class_id].batch_class_id)
|
||||
return (TransferBatch*)Allocate(allocator, batch_class_id);
|
||||
return b;
|
||||
}
|
||||
|
||||
// Destroys TransferBatch b.
|
||||
void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
|
||||
TransferBatch *b) {
|
||||
if (uptr batch_class_id = per_class_[class_id].batch_class_id)
|
||||
Deallocate(allocator, batch_class_id, b);
|
||||
}
|
||||
|
||||
void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
|
||||
Drain(allocator);
|
||||
if (s)
|
||||
s->Unregister(&stats_);
|
||||
}
|
||||
|
||||
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
|
||||
CHECK_NE(class_id, 0UL);
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
PerClass *c = &per_class_[class_id];
|
||||
if (UNLIKELY(c->count == 0)) {
|
||||
if (UNLIKELY(!Refill(c, allocator, class_id)))
|
||||
return nullptr;
|
||||
DCHECK_GT(c->count, 0);
|
||||
}
|
||||
void *res = c->batch[--c->count];
|
||||
PREFETCH(c->batch[c->count - 1]);
|
||||
stats_.Add(AllocatorStatAllocated, c->class_size);
|
||||
return res;
|
||||
}
|
||||
|
||||
void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
|
||||
CHECK_NE(class_id, 0UL);
|
||||
CHECK_LT(class_id, kNumClasses);
|
||||
// If the first allocator call on a new thread is a deallocation, then
|
||||
// max_count will be zero, leading to check failure.
|
||||
PerClass *c = &per_class_[class_id];
|
||||
InitCache(c);
|
||||
if (UNLIKELY(c->count == c->max_count))
|
||||
Drain(c, allocator, class_id);
|
||||
c->batch[c->count++] = p;
|
||||
stats_.Sub(AllocatorStatAllocated, c->class_size);
|
||||
}
|
||||
|
||||
void Drain(SizeClassAllocator *allocator) {
|
||||
for (uptr i = 1; i < kNumClasses; i++) {
|
||||
PerClass *c = &per_class_[i];
|
||||
while (c->count > 0)
|
||||
Drain(c, allocator, i);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
typedef typename Allocator::SizeClassMapT SizeClassMap;
|
||||
static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
|
||||
static const uptr kNumClasses = SizeClassMap::kNumClasses;
|
||||
// If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
|
||||
// allocated from kBatchClassID size class (except for those that are needed
|
||||
// for kBatchClassID itself). The goal is to have TransferBatches in a totally
|
||||
// different region of RAM to improve security.
|
||||
static const bool kUseSeparateSizeClassForBatch =
|
||||
Allocator::kUseSeparateSizeClassForBatch;
|
||||
|
||||
struct PerClass {
|
||||
uptr count;
|
||||
uptr max_count;
|
||||
uptr class_size;
|
||||
uptr batch_class_id;
|
||||
void *batch[2 * TransferBatch::kMaxNumCached];
|
||||
};
|
||||
PerClass per_class_[kNumClasses];
|
||||
AllocatorStats stats_;
|
||||
|
||||
void InitCache(PerClass *c) {
|
||||
if (LIKELY(c->max_count))
|
||||
return;
|
||||
const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
|
||||
for (uptr i = 1; i < kNumClasses; i++) {
|
||||
PerClass *c = &per_class_[i];
|
||||
const uptr size = Allocator::ClassIdToSize(i);
|
||||
const uptr max_cached = TransferBatch::MaxCached(size);
|
||||
c->max_count = 2 * max_cached;
|
||||
c->class_size = size;
|
||||
// Precompute the class id to use to store batches for the current class
|
||||
// id. 0 means the class size is large enough to store a batch within one
|
||||
// of the chunks. If using a separate size class, it will always be
|
||||
// kBatchClassID, except for kBatchClassID itself.
|
||||
if (kUseSeparateSizeClassForBatch) {
|
||||
c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
|
||||
} else {
|
||||
c->batch_class_id = (size <
|
||||
TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
|
||||
batch_class_id : 0;
|
||||
}
|
||||
}
|
||||
DCHECK_NE(c->max_count, 0UL);
|
||||
}
|
||||
|
||||
NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
|
||||
uptr class_id) {
|
||||
InitCache(c);
|
||||
TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
|
||||
if (UNLIKELY(!b))
|
||||
return false;
|
||||
CHECK_GT(b->Count(), 0);
|
||||
b->CopyToArray(c->batch);
|
||||
c->count = b->Count();
|
||||
DestroyBatch(class_id, allocator, b);
|
||||
return true;
|
||||
}
|
||||
|
||||
NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator,
|
||||
uptr class_id) {
|
||||
const uptr count = Min(c->max_count / 2, c->count);
|
||||
const uptr first_idx_to_drain = c->count - count;
|
||||
TransferBatch *b = CreateBatch(
|
||||
class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
|
||||
// Failure to allocate a batch while releasing memory is non recoverable.
|
||||
// TODO(alekseys): Figure out how to do it without allocating a new batch.
|
||||
if (UNLIKELY(!b)) {
|
||||
Report("FATAL: Internal error: %s's allocator failed to allocate a "
|
||||
"transfer batch.\n", SanitizerToolName);
|
||||
Die();
|
||||
}
|
||||
b->SetFromArray(&c->batch[first_idx_to_drain], count);
|
||||
c->count -= count;
|
||||
allocator->DeallocateBatch(&stats_, class_id, b);
|
||||
}
|
||||
};
|
||||
381
lib/libtsan/sanitizer_common/sanitizer_allocator_primary32.h
Normal file
381
lib/libtsan/sanitizer_common/sanitizer_allocator_primary32.h
Normal file
@@ -0,0 +1,381 @@
|
||||
//===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Part of the Sanitizer Allocator.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_ALLOCATOR_H
|
||||
#error This file must be included inside sanitizer_allocator.h
|
||||
#endif
|
||||
|
||||
template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
|
||||
|
||||
// SizeClassAllocator32 -- allocator for 32-bit address space.
|
||||
// This allocator can theoretically be used on 64-bit arch, but there it is less
|
||||
// efficient than SizeClassAllocator64.
|
||||
//
|
||||
// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
|
||||
// be returned by MmapOrDie().
|
||||
//
|
||||
// Region:
|
||||
// a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
|
||||
// kRegionSize).
|
||||
// Since the regions are aligned by kRegionSize, there are exactly
|
||||
// kNumPossibleRegions possible regions in the address space and so we keep
|
||||
// a ByteMap possible_regions to store the size classes of each Region.
|
||||
// 0 size class means the region is not used by the allocator.
|
||||
//
|
||||
// One Region is used to allocate chunks of a single size class.
|
||||
// A Region looks like this:
|
||||
// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
|
||||
//
|
||||
// In order to avoid false sharing the objects of this class should be
|
||||
// chache-line aligned.
|
||||
|
||||
struct SizeClassAllocator32FlagMasks { // Bit masks.
|
||||
enum {
|
||||
kRandomShuffleChunks = 1,
|
||||
kUseSeparateSizeClassForBatch = 2,
|
||||
};
|
||||
};
|
||||
|
||||
template <class Params>
|
||||
class SizeClassAllocator32 {
|
||||
private:
|
||||
static const u64 kTwoLevelByteMapSize1 =
|
||||
(Params::kSpaceSize >> Params::kRegionSizeLog) >> 12;
|
||||
static const u64 kMinFirstMapSizeTwoLevelByteMap = 4;
|
||||
|
||||
public:
|
||||
using AddressSpaceView = typename Params::AddressSpaceView;
|
||||
static const uptr kSpaceBeg = Params::kSpaceBeg;
|
||||
static const u64 kSpaceSize = Params::kSpaceSize;
|
||||
static const uptr kMetadataSize = Params::kMetadataSize;
|
||||
typedef typename Params::SizeClassMap SizeClassMap;
|
||||
static const uptr kRegionSizeLog = Params::kRegionSizeLog;
|
||||
typedef typename Params::MapUnmapCallback MapUnmapCallback;
|
||||
using ByteMap = typename conditional<
|
||||
(kTwoLevelByteMapSize1 < kMinFirstMapSizeTwoLevelByteMap),
|
||||
FlatByteMap<(Params::kSpaceSize >> Params::kRegionSizeLog),
|
||||
AddressSpaceView>,
|
||||
TwoLevelByteMap<kTwoLevelByteMapSize1, 1 << 12, AddressSpaceView>>::type;
|
||||
|
||||
COMPILER_CHECK(!SANITIZER_SIGN_EXTENDED_ADDRESSES ||
|
||||
(kSpaceSize & (kSpaceSize - 1)) == 0);
|
||||
|
||||
static const bool kRandomShuffleChunks = Params::kFlags &
|
||||
SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
|
||||
static const bool kUseSeparateSizeClassForBatch = Params::kFlags &
|
||||
SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
|
||||
|
||||
struct TransferBatch {
|
||||
static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
|
||||
void SetFromArray(void *batch[], uptr count) {
|
||||
DCHECK_LE(count, kMaxNumCached);
|
||||
count_ = count;
|
||||
for (uptr i = 0; i < count; i++)
|
||||
batch_[i] = batch[i];
|
||||
}
|
||||
uptr Count() const { return count_; }
|
||||
void Clear() { count_ = 0; }
|
||||
void Add(void *ptr) {
|
||||
batch_[count_++] = ptr;
|
||||
DCHECK_LE(count_, kMaxNumCached);
|
||||
}
|
||||
void CopyToArray(void *to_batch[]) const {
|
||||
for (uptr i = 0, n = Count(); i < n; i++)
|
||||
to_batch[i] = batch_[i];
|
||||
}
|
||||
|
||||
// How much memory do we need for a batch containing n elements.
|
||||
static uptr AllocationSizeRequiredForNElements(uptr n) {
|
||||
return sizeof(uptr) * 2 + sizeof(void *) * n;
|
||||
}
|
||||
static uptr MaxCached(uptr size) {
|
||||
return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size));
|
||||
}
|
||||
|
||||
TransferBatch *next;
|
||||
|
||||
private:
|
||||
uptr count_;
|
||||
void *batch_[kMaxNumCached];
|
||||
};
|
||||
|
||||
static const uptr kBatchSize = sizeof(TransferBatch);
|
||||
COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
|
||||
COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
|
||||
|
||||
static uptr ClassIdToSize(uptr class_id) {
|
||||
return (class_id == SizeClassMap::kBatchClassID) ?
|
||||
kBatchSize : SizeClassMap::Size(class_id);
|
||||
}
|
||||
|
||||
typedef SizeClassAllocator32<Params> ThisT;
|
||||
typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
|
||||
|
||||
void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
|
||||
CHECK(!heap_start);
|
||||
possible_regions.Init();
|
||||
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
|
||||
}
|
||||
|
||||
s32 ReleaseToOSIntervalMs() const {
|
||||
return kReleaseToOSIntervalNever;
|
||||
}
|
||||
|
||||
void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
|
||||
// This is empty here. Currently only implemented in 64-bit allocator.
|
||||
}
|
||||
|
||||
void ForceReleaseToOS() {
|
||||
// Currently implemented in 64-bit allocator only.
|
||||
}
|
||||
|
||||
void *MapWithCallback(uptr size) {
|
||||
void *res = MmapOrDie(size, PrimaryAllocatorName);
|
||||
MapUnmapCallback().OnMap((uptr)res, size);
|
||||
return res;
|
||||
}
|
||||
|
||||
void UnmapWithCallback(uptr beg, uptr size) {
|
||||
MapUnmapCallback().OnUnmap(beg, size);
|
||||
UnmapOrDie(reinterpret_cast<void *>(beg), size);
|
||||
}
|
||||
|
||||
static bool CanAllocate(uptr size, uptr alignment) {
|
||||
return size <= SizeClassMap::kMaxSize &&
|
||||
alignment <= SizeClassMap::kMaxSize;
|
||||
}
|
||||
|
||||
void *GetMetaData(const void *p) {
|
||||
CHECK(kMetadataSize);
|
||||
CHECK(PointerIsMine(p));
|
||||
uptr mem = reinterpret_cast<uptr>(p);
|
||||
uptr beg = ComputeRegionBeg(mem);
|
||||
uptr size = ClassIdToSize(GetSizeClass(p));
|
||||
u32 offset = mem - beg;
|
||||
uptr n = offset / (u32)size; // 32-bit division
|
||||
uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
|
||||
return reinterpret_cast<void*>(meta);
|
||||
}
|
||||
|
||||
NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
|
||||
uptr class_id) {
|
||||
DCHECK_LT(class_id, kNumClasses);
|
||||
SizeClassInfo *sci = GetSizeClassInfo(class_id);
|
||||
SpinMutexLock l(&sci->mutex);
|
||||
if (sci->free_list.empty()) {
|
||||
if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
|
||||
return nullptr;
|
||||
DCHECK(!sci->free_list.empty());
|
||||
}
|
||||
TransferBatch *b = sci->free_list.front();
|
||||
sci->free_list.pop_front();
|
||||
return b;
|
||||
}
|
||||
|
||||
NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
|
||||
TransferBatch *b) {
|
||||
DCHECK_LT(class_id, kNumClasses);
|
||||
CHECK_GT(b->Count(), 0);
|
||||
SizeClassInfo *sci = GetSizeClassInfo(class_id);
|
||||
SpinMutexLock l(&sci->mutex);
|
||||
sci->free_list.push_front(b);
|
||||
}
|
||||
|
||||
bool PointerIsMine(const void *p) const {
|
||||
uptr mem = reinterpret_cast<uptr>(p);
|
||||
if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
|
||||
mem &= (kSpaceSize - 1);
|
||||
if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
|
||||
return false;
|
||||
return GetSizeClass(p) != 0;
|
||||
}
|
||||
|
||||
uptr GetSizeClass(const void *p) const {
|
||||
uptr id = ComputeRegionId(reinterpret_cast<uptr>(p));
|
||||
return possible_regions.contains(id) ? possible_regions[id] : 0;
|
||||
}
|
||||
|
||||
void *GetBlockBegin(const void *p) {
|
||||
CHECK(PointerIsMine(p));
|
||||
uptr mem = reinterpret_cast<uptr>(p);
|
||||
uptr beg = ComputeRegionBeg(mem);
|
||||
uptr size = ClassIdToSize(GetSizeClass(p));
|
||||
u32 offset = mem - beg;
|
||||
u32 n = offset / (u32)size; // 32-bit division
|
||||
uptr res = beg + (n * (u32)size);
|
||||
return reinterpret_cast<void*>(res);
|
||||
}
|
||||
|
||||
uptr GetActuallyAllocatedSize(void *p) {
|
||||
CHECK(PointerIsMine(p));
|
||||
return ClassIdToSize(GetSizeClass(p));
|
||||
}
|
||||
|
||||
static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
|
||||
|
||||
uptr TotalMemoryUsed() {
|
||||
// No need to lock here.
|
||||
uptr res = 0;
|
||||
for (uptr i = 0; i < kNumPossibleRegions; i++)
|
||||
if (possible_regions[i])
|
||||
res += kRegionSize;
|
||||
return res;
|
||||
}
|
||||
|
||||
void TestOnlyUnmap() {
|
||||
for (uptr i = 0; i < kNumPossibleRegions; i++)
|
||||
if (possible_regions[i])
|
||||
UnmapWithCallback((i * kRegionSize), kRegionSize);
|
||||
}
|
||||
|
||||
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
||||
// introspection API.
|
||||
void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
for (uptr i = 0; i < kNumClasses; i++) {
|
||||
GetSizeClassInfo(i)->mutex.Lock();
|
||||
}
|
||||
}
|
||||
|
||||
void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
for (int i = kNumClasses - 1; i >= 0; i--) {
|
||||
GetSizeClassInfo(i)->mutex.Unlock();
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate over all existing chunks.
|
||||
// The allocator must be locked when calling this function.
|
||||
void ForEachChunk(ForEachChunkCallback callback, void *arg) const {
|
||||
for (uptr region = 0; region < kNumPossibleRegions; region++)
|
||||
if (possible_regions.contains(region) && possible_regions[region]) {
|
||||
uptr chunk_size = ClassIdToSize(possible_regions[region]);
|
||||
uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
|
||||
uptr region_beg = region * kRegionSize;
|
||||
for (uptr chunk = region_beg;
|
||||
chunk < region_beg + max_chunks_in_region * chunk_size;
|
||||
chunk += chunk_size) {
|
||||
// Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
|
||||
callback(chunk, arg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void PrintStats() {}
|
||||
|
||||
static uptr AdditionalSize() { return 0; }
|
||||
|
||||
typedef SizeClassMap SizeClassMapT;
|
||||
static const uptr kNumClasses = SizeClassMap::kNumClasses;
|
||||
|
||||
private:
|
||||
static const uptr kRegionSize = 1 << kRegionSizeLog;
|
||||
static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
|
||||
|
||||
struct alignas(SANITIZER_CACHE_LINE_SIZE) SizeClassInfo {
|
||||
StaticSpinMutex mutex;
|
||||
IntrusiveList<TransferBatch> free_list;
|
||||
u32 rand_state;
|
||||
};
|
||||
COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0);
|
||||
|
||||
uptr ComputeRegionId(uptr mem) const {
|
||||
if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
|
||||
mem &= (kSpaceSize - 1);
|
||||
const uptr res = mem >> kRegionSizeLog;
|
||||
CHECK_LT(res, kNumPossibleRegions);
|
||||
return res;
|
||||
}
|
||||
|
||||
uptr ComputeRegionBeg(uptr mem) const { return mem & ~(kRegionSize - 1); }
|
||||
|
||||
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
|
||||
DCHECK_LT(class_id, kNumClasses);
|
||||
const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
|
||||
kRegionSize, kRegionSize, PrimaryAllocatorName));
|
||||
if (UNLIKELY(!res))
|
||||
return 0;
|
||||
MapUnmapCallback().OnMap(res, kRegionSize);
|
||||
stat->Add(AllocatorStatMapped, kRegionSize);
|
||||
CHECK(IsAligned(res, kRegionSize));
|
||||
possible_regions[ComputeRegionId(res)] = class_id;
|
||||
return res;
|
||||
}
|
||||
|
||||
SizeClassInfo *GetSizeClassInfo(uptr class_id) {
|
||||
DCHECK_LT(class_id, kNumClasses);
|
||||
return &size_class_info_array[class_id];
|
||||
}
|
||||
|
||||
bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,
|
||||
TransferBatch **current_batch, uptr max_count,
|
||||
uptr *pointers_array, uptr count) {
|
||||
// If using a separate class for batches, we do not need to shuffle it.
|
||||
if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||
|
||||
class_id != SizeClassMap::kBatchClassID))
|
||||
RandomShuffle(pointers_array, count, &sci->rand_state);
|
||||
TransferBatch *b = *current_batch;
|
||||
for (uptr i = 0; i < count; i++) {
|
||||
if (!b) {
|
||||
b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);
|
||||
if (UNLIKELY(!b))
|
||||
return false;
|
||||
b->Clear();
|
||||
}
|
||||
b->Add((void*)pointers_array[i]);
|
||||
if (b->Count() == max_count) {
|
||||
sci->free_list.push_back(b);
|
||||
b = nullptr;
|
||||
}
|
||||
}
|
||||
*current_batch = b;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
|
||||
SizeClassInfo *sci, uptr class_id) {
|
||||
const uptr region = AllocateRegion(stat, class_id);
|
||||
if (UNLIKELY(!region))
|
||||
return false;
|
||||
if (kRandomShuffleChunks)
|
||||
if (UNLIKELY(sci->rand_state == 0))
|
||||
// The random state is initialized from ASLR (PIE) and time.
|
||||
sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();
|
||||
const uptr size = ClassIdToSize(class_id);
|
||||
const uptr n_chunks = kRegionSize / (size + kMetadataSize);
|
||||
const uptr max_count = TransferBatch::MaxCached(size);
|
||||
DCHECK_GT(max_count, 0);
|
||||
TransferBatch *b = nullptr;
|
||||
constexpr uptr kShuffleArraySize = 48;
|
||||
UNINITIALIZED uptr shuffle_array[kShuffleArraySize];
|
||||
uptr count = 0;
|
||||
for (uptr i = region; i < region + n_chunks * size; i += size) {
|
||||
shuffle_array[count++] = i;
|
||||
if (count == kShuffleArraySize) {
|
||||
if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
|
||||
shuffle_array, count)))
|
||||
return false;
|
||||
count = 0;
|
||||
}
|
||||
}
|
||||
if (count) {
|
||||
if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
|
||||
shuffle_array, count)))
|
||||
return false;
|
||||
}
|
||||
if (b) {
|
||||
CHECK_GT(b->Count(), 0);
|
||||
sci->free_list.push_back(b);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
ByteMap possible_regions;
|
||||
SizeClassInfo size_class_info_array[kNumClasses];
|
||||
};
|
||||
902
lib/libtsan/sanitizer_common/sanitizer_allocator_primary64.h
Normal file
902
lib/libtsan/sanitizer_common/sanitizer_allocator_primary64.h
Normal file
@@ -0,0 +1,902 @@
|
||||
//===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Part of the Sanitizer Allocator.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_ALLOCATOR_H
|
||||
#error This file must be included inside sanitizer_allocator.h
|
||||
#endif
|
||||
|
||||
template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache;
|
||||
|
||||
// SizeClassAllocator64 -- allocator for 64-bit address space.
|
||||
// The template parameter Params is a class containing the actual parameters.
|
||||
//
|
||||
// Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
|
||||
// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically by mmap.
|
||||
// Otherwise SpaceBeg=kSpaceBeg (fixed address).
|
||||
// kSpaceSize is a power of two.
|
||||
// At the beginning the entire space is mprotect-ed, then small parts of it
|
||||
// are mapped on demand.
|
||||
//
|
||||
// Region: a part of Space dedicated to a single size class.
|
||||
// There are kNumClasses Regions of equal size.
|
||||
//
|
||||
// UserChunk: a piece of memory returned to user.
|
||||
// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
|
||||
|
||||
// FreeArray is an array free-d chunks (stored as 4-byte offsets)
|
||||
//
|
||||
// A Region looks like this:
|
||||
// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray
|
||||
|
||||
struct SizeClassAllocator64FlagMasks { // Bit masks.
|
||||
enum {
|
||||
kRandomShuffleChunks = 1,
|
||||
};
|
||||
};
|
||||
|
||||
template <typename Allocator>
|
||||
class MemoryMapper {
|
||||
public:
|
||||
typedef typename Allocator::CompactPtrT CompactPtrT;
|
||||
|
||||
explicit MemoryMapper(const Allocator &allocator) : allocator_(allocator) {}
|
||||
|
||||
bool GetAndResetStats(uptr &ranges, uptr &bytes) {
|
||||
ranges = released_ranges_count_;
|
||||
released_ranges_count_ = 0;
|
||||
bytes = released_bytes_;
|
||||
released_bytes_ = 0;
|
||||
return ranges != 0;
|
||||
}
|
||||
|
||||
u64 *MapPackedCounterArrayBuffer(uptr count) {
|
||||
buffer_.clear();
|
||||
buffer_.resize(count);
|
||||
return buffer_.data();
|
||||
}
|
||||
|
||||
// Releases [from, to) range of pages back to OS.
|
||||
void ReleasePageRangeToOS(uptr class_id, CompactPtrT from, CompactPtrT to) {
|
||||
const uptr region_base = allocator_.GetRegionBeginBySizeClass(class_id);
|
||||
const uptr from_page = allocator_.CompactPtrToPointer(region_base, from);
|
||||
const uptr to_page = allocator_.CompactPtrToPointer(region_base, to);
|
||||
ReleaseMemoryPagesToOS(from_page, to_page);
|
||||
released_ranges_count_++;
|
||||
released_bytes_ += to_page - from_page;
|
||||
}
|
||||
|
||||
private:
|
||||
const Allocator &allocator_;
|
||||
uptr released_ranges_count_ = 0;
|
||||
uptr released_bytes_ = 0;
|
||||
InternalMmapVector<u64> buffer_;
|
||||
};
|
||||
|
||||
template <class Params>
|
||||
class SizeClassAllocator64 {
|
||||
public:
|
||||
using AddressSpaceView = typename Params::AddressSpaceView;
|
||||
static const uptr kSpaceBeg = Params::kSpaceBeg;
|
||||
static const uptr kSpaceSize = Params::kSpaceSize;
|
||||
static const uptr kMetadataSize = Params::kMetadataSize;
|
||||
typedef typename Params::SizeClassMap SizeClassMap;
|
||||
typedef typename Params::MapUnmapCallback MapUnmapCallback;
|
||||
|
||||
static const bool kRandomShuffleChunks =
|
||||
Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
|
||||
|
||||
typedef SizeClassAllocator64<Params> ThisT;
|
||||
typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
|
||||
typedef MemoryMapper<ThisT> MemoryMapperT;
|
||||
|
||||
// When we know the size class (the region base) we can represent a pointer
|
||||
// as a 4-byte integer (offset from the region start shifted right by 4).
|
||||
typedef u32 CompactPtrT;
|
||||
static const uptr kCompactPtrScale = 4;
|
||||
CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) const {
|
||||
return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale);
|
||||
}
|
||||
uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) const {
|
||||
return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
|
||||
}
|
||||
|
||||
// If heap_start is nonzero, assumes kSpaceSize bytes are already mapped R/W
|
||||
// at heap_start and places the heap there. This mode requires kSpaceBeg ==
|
||||
// ~(uptr)0.
|
||||
void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) {
|
||||
uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
|
||||
PremappedHeap = heap_start != 0;
|
||||
if (PremappedHeap) {
|
||||
CHECK(!kUsingConstantSpaceBeg);
|
||||
NonConstSpaceBeg = heap_start;
|
||||
uptr RegionInfoSize = AdditionalSize();
|
||||
RegionInfoSpace =
|
||||
address_range.Init(RegionInfoSize, PrimaryAllocatorName);
|
||||
CHECK_NE(RegionInfoSpace, ~(uptr)0);
|
||||
CHECK_EQ(RegionInfoSpace,
|
||||
address_range.MapOrDie(RegionInfoSpace, RegionInfoSize,
|
||||
"SizeClassAllocator: region info"));
|
||||
MapUnmapCallback().OnMap(RegionInfoSpace, RegionInfoSize);
|
||||
} else {
|
||||
if (kUsingConstantSpaceBeg) {
|
||||
CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize));
|
||||
CHECK_EQ(kSpaceBeg,
|
||||
address_range.Init(TotalSpaceSize, PrimaryAllocatorName,
|
||||
kSpaceBeg));
|
||||
} else {
|
||||
// Combined allocator expects that an 2^N allocation is always aligned
|
||||
// to 2^N. For this to work, the start of the space needs to be aligned
|
||||
// as high as the largest size class (which also needs to be a power of
|
||||
// 2).
|
||||
NonConstSpaceBeg = address_range.InitAligned(
|
||||
TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName);
|
||||
CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
|
||||
}
|
||||
RegionInfoSpace = SpaceEnd();
|
||||
MapWithCallbackOrDie(RegionInfoSpace, AdditionalSize(),
|
||||
"SizeClassAllocator: region info");
|
||||
}
|
||||
SetReleaseToOSIntervalMs(release_to_os_interval_ms);
|
||||
// Check that the RegionInfo array is aligned on the CacheLine size.
|
||||
DCHECK_EQ(RegionInfoSpace % kCacheLineSize, 0);
|
||||
}
|
||||
|
||||
s32 ReleaseToOSIntervalMs() const {
|
||||
return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
|
||||
atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms,
|
||||
memory_order_relaxed);
|
||||
}
|
||||
|
||||
void ForceReleaseToOS() {
|
||||
MemoryMapperT memory_mapper(*this);
|
||||
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
|
||||
Lock l(&GetRegionInfo(class_id)->mutex);
|
||||
MaybeReleaseToOS(&memory_mapper, class_id, true /*force*/);
|
||||
}
|
||||
}
|
||||
|
||||
static bool CanAllocate(uptr size, uptr alignment) {
|
||||
return size <= SizeClassMap::kMaxSize &&
|
||||
alignment <= SizeClassMap::kMaxSize;
|
||||
}
|
||||
|
||||
NOINLINE void ReturnToAllocator(MemoryMapperT *memory_mapper,
|
||||
AllocatorStats *stat, uptr class_id,
|
||||
const CompactPtrT *chunks, uptr n_chunks) {
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
uptr region_beg = GetRegionBeginBySizeClass(class_id);
|
||||
CompactPtrT *free_array = GetFreeArray(region_beg);
|
||||
|
||||
Lock l(®ion->mutex);
|
||||
uptr old_num_chunks = region->num_freed_chunks;
|
||||
uptr new_num_freed_chunks = old_num_chunks + n_chunks;
|
||||
// Failure to allocate free array space while releasing memory is non
|
||||
// recoverable.
|
||||
if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg,
|
||||
new_num_freed_chunks))) {
|
||||
Report(
|
||||
"FATAL: Internal error: %s's allocator exhausted the free list "
|
||||
"space for size class %zu (%zu bytes).\n",
|
||||
SanitizerToolName, class_id, ClassIdToSize(class_id));
|
||||
Die();
|
||||
}
|
||||
for (uptr i = 0; i < n_chunks; i++)
|
||||
free_array[old_num_chunks + i] = chunks[i];
|
||||
region->num_freed_chunks = new_num_freed_chunks;
|
||||
region->stats.n_freed += n_chunks;
|
||||
|
||||
MaybeReleaseToOS(memory_mapper, class_id, false /*force*/);
|
||||
}
|
||||
|
||||
NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
|
||||
CompactPtrT *chunks, uptr n_chunks) {
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
uptr region_beg = GetRegionBeginBySizeClass(class_id);
|
||||
CompactPtrT *free_array = GetFreeArray(region_beg);
|
||||
|
||||
Lock l(®ion->mutex);
|
||||
#if SANITIZER_WINDOWS
|
||||
/* On Windows unmapping of memory during __sanitizer_purge_allocator is
|
||||
explicit and immediate, so unmapped regions must be explicitly mapped back
|
||||
in when they are accessed again. */
|
||||
if (region->rtoi.last_released_bytes > 0) {
|
||||
MmapFixedOrDie(region_beg, region->mapped_user,
|
||||
"SizeClassAllocator: region data");
|
||||
region->rtoi.n_freed_at_last_release = 0;
|
||||
region->rtoi.last_released_bytes = 0;
|
||||
}
|
||||
#endif
|
||||
if (UNLIKELY(region->num_freed_chunks < n_chunks)) {
|
||||
if (UNLIKELY(!PopulateFreeArray(stat, class_id, region,
|
||||
n_chunks - region->num_freed_chunks)))
|
||||
return false;
|
||||
CHECK_GE(region->num_freed_chunks, n_chunks);
|
||||
}
|
||||
region->num_freed_chunks -= n_chunks;
|
||||
uptr base_idx = region->num_freed_chunks;
|
||||
for (uptr i = 0; i < n_chunks; i++)
|
||||
chunks[i] = free_array[base_idx + i];
|
||||
region->stats.n_allocated += n_chunks;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PointerIsMine(const void *p) const {
|
||||
uptr P = reinterpret_cast<uptr>(p);
|
||||
if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
|
||||
return P / kSpaceSize == kSpaceBeg / kSpaceSize;
|
||||
return P >= SpaceBeg() && P < SpaceEnd();
|
||||
}
|
||||
|
||||
uptr GetRegionBegin(const void *p) {
|
||||
if (kUsingConstantSpaceBeg)
|
||||
return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1);
|
||||
uptr space_beg = SpaceBeg();
|
||||
return ((reinterpret_cast<uptr>(p) - space_beg) & ~(kRegionSize - 1)) +
|
||||
space_beg;
|
||||
}
|
||||
|
||||
uptr GetRegionBeginBySizeClass(uptr class_id) const {
|
||||
return SpaceBeg() + kRegionSize * class_id;
|
||||
}
|
||||
|
||||
uptr GetSizeClass(const void *p) {
|
||||
if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
|
||||
return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
|
||||
return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
|
||||
kNumClassesRounded;
|
||||
}
|
||||
|
||||
void *GetBlockBegin(const void *p) {
|
||||
uptr class_id = GetSizeClass(p);
|
||||
if (class_id >= kNumClasses) return nullptr;
|
||||
uptr size = ClassIdToSize(class_id);
|
||||
if (!size) return nullptr;
|
||||
uptr chunk_idx = GetChunkIdx((uptr)p, size);
|
||||
uptr reg_beg = GetRegionBegin(p);
|
||||
uptr beg = chunk_idx * size;
|
||||
uptr next_beg = beg + size;
|
||||
const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id));
|
||||
if (region->mapped_user >= next_beg)
|
||||
return reinterpret_cast<void*>(reg_beg + beg);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
uptr GetActuallyAllocatedSize(void *p) {
|
||||
CHECK(PointerIsMine(p));
|
||||
return ClassIdToSize(GetSizeClass(p));
|
||||
}
|
||||
|
||||
static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
|
||||
|
||||
void *GetMetaData(const void *p) {
|
||||
CHECK(kMetadataSize);
|
||||
uptr class_id = GetSizeClass(p);
|
||||
uptr size = ClassIdToSize(class_id);
|
||||
if (!size)
|
||||
return nullptr;
|
||||
uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
|
||||
uptr region_beg = GetRegionBeginBySizeClass(class_id);
|
||||
return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -
|
||||
(1 + chunk_idx) * kMetadataSize);
|
||||
}
|
||||
|
||||
uptr TotalMemoryUsed() {
|
||||
uptr res = 0;
|
||||
for (uptr i = 0; i < kNumClasses; i++)
|
||||
res += GetRegionInfo(i)->allocated_user;
|
||||
return res;
|
||||
}
|
||||
|
||||
// Test-only.
|
||||
void TestOnlyUnmap() {
|
||||
UnmapWithCallbackOrDie((uptr)address_range.base(), address_range.size());
|
||||
}
|
||||
|
||||
static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats) {
|
||||
for (uptr class_id = 0; class_id < kNumClasses; class_id++)
|
||||
if (stats[class_id] == start)
|
||||
stats[class_id] = rss;
|
||||
}
|
||||
|
||||
void PrintStats(uptr class_id, uptr rss) {
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
if (region->mapped_user == 0) return;
|
||||
uptr in_use = region->stats.n_allocated - region->stats.n_freed;
|
||||
uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
|
||||
Printf(
|
||||
"%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
|
||||
"num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd "
|
||||
"last released: %6lldK region: %p\n",
|
||||
region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id),
|
||||
region->mapped_user >> 10, region->stats.n_allocated,
|
||||
region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,
|
||||
rss >> 10, region->rtoi.num_releases,
|
||||
region->rtoi.last_released_bytes >> 10,
|
||||
(void *)(SpaceBeg() + kRegionSize * class_id));
|
||||
}
|
||||
|
||||
void PrintStats() {
|
||||
uptr rss_stats[kNumClasses];
|
||||
for (uptr class_id = 0; class_id < kNumClasses; class_id++)
|
||||
rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;
|
||||
GetMemoryProfile(FillMemoryProfile, rss_stats);
|
||||
|
||||
uptr total_mapped = 0;
|
||||
uptr total_rss = 0;
|
||||
uptr n_allocated = 0;
|
||||
uptr n_freed = 0;
|
||||
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
if (region->mapped_user != 0) {
|
||||
total_mapped += region->mapped_user;
|
||||
total_rss += rss_stats[class_id];
|
||||
}
|
||||
n_allocated += region->stats.n_allocated;
|
||||
n_freed += region->stats.n_freed;
|
||||
}
|
||||
|
||||
Printf("Stats: SizeClassAllocator64: %zdM mapped (%zdM rss) in "
|
||||
"%zd allocations; remains %zd\n", total_mapped >> 20,
|
||||
total_rss >> 20, n_allocated, n_allocated - n_freed);
|
||||
for (uptr class_id = 1; class_id < kNumClasses; class_id++)
|
||||
PrintStats(class_id, rss_stats[class_id]);
|
||||
}
|
||||
|
||||
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
||||
// introspection API.
|
||||
void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
for (uptr i = 0; i < kNumClasses; i++) {
|
||||
GetRegionInfo(i)->mutex.Lock();
|
||||
}
|
||||
}
|
||||
|
||||
void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
|
||||
for (int i = (int)kNumClasses - 1; i >= 0; i--) {
|
||||
GetRegionInfo(i)->mutex.Unlock();
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate over all existing chunks.
|
||||
// The allocator must be locked when calling this function.
|
||||
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
|
||||
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
uptr chunk_size = ClassIdToSize(class_id);
|
||||
uptr region_beg = SpaceBeg() + class_id * kRegionSize;
|
||||
uptr region_allocated_user_size =
|
||||
AddressSpaceView::Load(region)->allocated_user;
|
||||
for (uptr chunk = region_beg;
|
||||
chunk < region_beg + region_allocated_user_size;
|
||||
chunk += chunk_size) {
|
||||
// Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
|
||||
callback(chunk, arg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static uptr ClassIdToSize(uptr class_id) {
|
||||
return SizeClassMap::Size(class_id);
|
||||
}
|
||||
|
||||
static uptr AdditionalSize() {
|
||||
return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
|
||||
GetPageSizeCached());
|
||||
}
|
||||
|
||||
typedef SizeClassMap SizeClassMapT;
|
||||
static const uptr kNumClasses = SizeClassMap::kNumClasses;
|
||||
static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
|
||||
|
||||
// A packed array of counters. Each counter occupies 2^n bits, enough to store
|
||||
// counter's max_value. Ctor will try to allocate the required buffer via
|
||||
// mapper->MapPackedCounterArrayBuffer and the caller is expected to check
|
||||
// whether the initialization was successful by checking IsAllocated() result.
|
||||
// For the performance sake, none of the accessors check the validity of the
|
||||
// arguments, it is assumed that index is always in [0, n) range and the value
|
||||
// is not incremented past max_value.
|
||||
class PackedCounterArray {
|
||||
public:
|
||||
template <typename MemoryMapper>
|
||||
PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapper *mapper)
|
||||
: n(num_counters) {
|
||||
CHECK_GT(num_counters, 0);
|
||||
CHECK_GT(max_value, 0);
|
||||
constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL;
|
||||
// Rounding counter storage size up to the power of two allows for using
|
||||
// bit shifts calculating particular counter's index and offset.
|
||||
uptr counter_size_bits =
|
||||
RoundUpToPowerOfTwo(MostSignificantSetBitIndex(max_value) + 1);
|
||||
CHECK_LE(counter_size_bits, kMaxCounterBits);
|
||||
counter_size_bits_log = Log2(counter_size_bits);
|
||||
counter_mask = ~0ULL >> (kMaxCounterBits - counter_size_bits);
|
||||
|
||||
uptr packing_ratio = kMaxCounterBits >> counter_size_bits_log;
|
||||
CHECK_GT(packing_ratio, 0);
|
||||
packing_ratio_log = Log2(packing_ratio);
|
||||
bit_offset_mask = packing_ratio - 1;
|
||||
|
||||
buffer = mapper->MapPackedCounterArrayBuffer(
|
||||
RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log);
|
||||
}
|
||||
|
||||
bool IsAllocated() const {
|
||||
return !!buffer;
|
||||
}
|
||||
|
||||
u64 GetCount() const {
|
||||
return n;
|
||||
}
|
||||
|
||||
uptr Get(uptr i) const {
|
||||
DCHECK_LT(i, n);
|
||||
uptr index = i >> packing_ratio_log;
|
||||
uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
|
||||
return (buffer[index] >> bit_offset) & counter_mask;
|
||||
}
|
||||
|
||||
void Inc(uptr i) const {
|
||||
DCHECK_LT(Get(i), counter_mask);
|
||||
uptr index = i >> packing_ratio_log;
|
||||
uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
|
||||
buffer[index] += 1ULL << bit_offset;
|
||||
}
|
||||
|
||||
void IncRange(uptr from, uptr to) const {
|
||||
DCHECK_LE(from, to);
|
||||
for (uptr i = from; i <= to; i++)
|
||||
Inc(i);
|
||||
}
|
||||
|
||||
private:
|
||||
const u64 n;
|
||||
u64 counter_size_bits_log;
|
||||
u64 counter_mask;
|
||||
u64 packing_ratio_log;
|
||||
u64 bit_offset_mask;
|
||||
u64* buffer;
|
||||
};
|
||||
|
||||
template <class MemoryMapperT>
|
||||
class FreePagesRangeTracker {
|
||||
public:
|
||||
FreePagesRangeTracker(MemoryMapperT *mapper, uptr class_id)
|
||||
: memory_mapper(mapper),
|
||||
class_id(class_id),
|
||||
page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)) {}
|
||||
|
||||
void NextPage(bool freed) {
|
||||
if (freed) {
|
||||
if (!in_the_range) {
|
||||
current_range_start_page = current_page;
|
||||
in_the_range = true;
|
||||
}
|
||||
} else {
|
||||
CloseOpenedRange();
|
||||
}
|
||||
current_page++;
|
||||
}
|
||||
|
||||
void Done() {
|
||||
CloseOpenedRange();
|
||||
}
|
||||
|
||||
private:
|
||||
void CloseOpenedRange() {
|
||||
if (in_the_range) {
|
||||
memory_mapper->ReleasePageRangeToOS(
|
||||
class_id, current_range_start_page << page_size_scaled_log,
|
||||
current_page << page_size_scaled_log);
|
||||
in_the_range = false;
|
||||
}
|
||||
}
|
||||
|
||||
MemoryMapperT *const memory_mapper = nullptr;
|
||||
const uptr class_id = 0;
|
||||
const uptr page_size_scaled_log = 0;
|
||||
bool in_the_range = false;
|
||||
uptr current_page = 0;
|
||||
uptr current_range_start_page = 0;
|
||||
};
|
||||
|
||||
// Iterates over the free_array to identify memory pages containing freed
|
||||
// chunks only and returns these pages back to OS.
|
||||
// allocated_pages_count is the total number of pages allocated for the
|
||||
// current bucket.
|
||||
template <typename MemoryMapper>
|
||||
static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,
|
||||
uptr free_array_count, uptr chunk_size,
|
||||
uptr allocated_pages_count,
|
||||
MemoryMapper *memory_mapper,
|
||||
uptr class_id) {
|
||||
const uptr page_size = GetPageSizeCached();
|
||||
|
||||
// Figure out the number of chunks per page and whether we can take a fast
|
||||
// path (the number of chunks per page is the same for all pages).
|
||||
uptr full_pages_chunk_count_max;
|
||||
bool same_chunk_count_per_page;
|
||||
if (chunk_size <= page_size && page_size % chunk_size == 0) {
|
||||
// Same number of chunks per page, no cross overs.
|
||||
full_pages_chunk_count_max = page_size / chunk_size;
|
||||
same_chunk_count_per_page = true;
|
||||
} else if (chunk_size <= page_size && page_size % chunk_size != 0 &&
|
||||
chunk_size % (page_size % chunk_size) == 0) {
|
||||
// Some chunks are crossing page boundaries, which means that the page
|
||||
// contains one or two partial chunks, but all pages contain the same
|
||||
// number of chunks.
|
||||
full_pages_chunk_count_max = page_size / chunk_size + 1;
|
||||
same_chunk_count_per_page = true;
|
||||
} else if (chunk_size <= page_size) {
|
||||
// Some chunks are crossing page boundaries, which means that the page
|
||||
// contains one or two partial chunks.
|
||||
full_pages_chunk_count_max = page_size / chunk_size + 2;
|
||||
same_chunk_count_per_page = false;
|
||||
} else if (chunk_size > page_size && chunk_size % page_size == 0) {
|
||||
// One chunk covers multiple pages, no cross overs.
|
||||
full_pages_chunk_count_max = 1;
|
||||
same_chunk_count_per_page = true;
|
||||
} else if (chunk_size > page_size) {
|
||||
// One chunk covers multiple pages, Some chunks are crossing page
|
||||
// boundaries. Some pages contain one chunk, some contain two.
|
||||
full_pages_chunk_count_max = 2;
|
||||
same_chunk_count_per_page = false;
|
||||
} else {
|
||||
UNREACHABLE("All chunk_size/page_size ratios must be handled.");
|
||||
}
|
||||
|
||||
PackedCounterArray counters(allocated_pages_count,
|
||||
full_pages_chunk_count_max, memory_mapper);
|
||||
if (!counters.IsAllocated())
|
||||
return;
|
||||
|
||||
const uptr chunk_size_scaled = chunk_size >> kCompactPtrScale;
|
||||
const uptr page_size_scaled = page_size >> kCompactPtrScale;
|
||||
const uptr page_size_scaled_log = Log2(page_size_scaled);
|
||||
|
||||
// Iterate over free chunks and count how many free chunks affect each
|
||||
// allocated page.
|
||||
if (chunk_size <= page_size && page_size % chunk_size == 0) {
|
||||
// Each chunk affects one page only.
|
||||
for (uptr i = 0; i < free_array_count; i++)
|
||||
counters.Inc(free_array[i] >> page_size_scaled_log);
|
||||
} else {
|
||||
// In all other cases chunks might affect more than one page.
|
||||
for (uptr i = 0; i < free_array_count; i++) {
|
||||
counters.IncRange(
|
||||
free_array[i] >> page_size_scaled_log,
|
||||
(free_array[i] + chunk_size_scaled - 1) >> page_size_scaled_log);
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate over pages detecting ranges of pages with chunk counters equal
|
||||
// to the expected number of chunks for the particular page.
|
||||
FreePagesRangeTracker<MemoryMapper> range_tracker(memory_mapper, class_id);
|
||||
if (same_chunk_count_per_page) {
|
||||
// Fast path, every page has the same number of chunks affecting it.
|
||||
for (uptr i = 0; i < counters.GetCount(); i++)
|
||||
range_tracker.NextPage(counters.Get(i) == full_pages_chunk_count_max);
|
||||
} else {
|
||||
// Show path, go through the pages keeping count how many chunks affect
|
||||
// each page.
|
||||
const uptr pn =
|
||||
chunk_size < page_size ? page_size_scaled / chunk_size_scaled : 1;
|
||||
const uptr pnc = pn * chunk_size_scaled;
|
||||
// The idea is to increment the current page pointer by the first chunk
|
||||
// size, middle portion size (the portion of the page covered by chunks
|
||||
// except the first and the last one) and then the last chunk size, adding
|
||||
// up the number of chunks on the current page and checking on every step
|
||||
// whether the page boundary was crossed.
|
||||
uptr prev_page_boundary = 0;
|
||||
uptr current_boundary = 0;
|
||||
for (uptr i = 0; i < counters.GetCount(); i++) {
|
||||
uptr page_boundary = prev_page_boundary + page_size_scaled;
|
||||
uptr chunks_per_page = pn;
|
||||
if (current_boundary < page_boundary) {
|
||||
if (current_boundary > prev_page_boundary)
|
||||
chunks_per_page++;
|
||||
current_boundary += pnc;
|
||||
if (current_boundary < page_boundary) {
|
||||
chunks_per_page++;
|
||||
current_boundary += chunk_size_scaled;
|
||||
}
|
||||
}
|
||||
prev_page_boundary = page_boundary;
|
||||
|
||||
range_tracker.NextPage(counters.Get(i) == chunks_per_page);
|
||||
}
|
||||
}
|
||||
range_tracker.Done();
|
||||
}
|
||||
|
||||
private:
|
||||
friend class MemoryMapper<ThisT>;
|
||||
|
||||
ReservedAddressRange address_range;
|
||||
|
||||
static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
|
||||
// FreeArray is the array of free-d chunks (stored as 4-byte offsets).
|
||||
// In the worst case it may require kRegionSize/SizeClassMap::kMinSize
|
||||
// elements, but in reality this will not happen. For simplicity we
|
||||
// dedicate 1/8 of the region's virtual space to FreeArray.
|
||||
static const uptr kFreeArraySize = kRegionSize / 8;
|
||||
|
||||
static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
|
||||
uptr NonConstSpaceBeg;
|
||||
uptr SpaceBeg() const {
|
||||
return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
|
||||
}
|
||||
uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
|
||||
// kRegionSize should be able to satisfy the largest size class.
|
||||
static_assert(kRegionSize >= SizeClassMap::kMaxSize,
|
||||
"Region size exceed largest size");
|
||||
// kRegionSize must be <= 2^36, see CompactPtrT.
|
||||
COMPILER_CHECK((kRegionSize) <=
|
||||
(1ULL << (sizeof(CompactPtrT) * 8 + kCompactPtrScale)));
|
||||
// Call mmap for user memory with at least this size.
|
||||
static const uptr kUserMapSize = 1 << 18;
|
||||
// Call mmap for metadata memory with at least this size.
|
||||
static const uptr kMetaMapSize = 1 << 16;
|
||||
// Call mmap for free array memory with at least this size.
|
||||
static const uptr kFreeArrayMapSize = 1 << 18;
|
||||
|
||||
atomic_sint32_t release_to_os_interval_ms_;
|
||||
|
||||
uptr RegionInfoSpace;
|
||||
|
||||
// True if the user has already mapped the entire heap R/W.
|
||||
bool PremappedHeap;
|
||||
|
||||
struct Stats {
|
||||
uptr n_allocated;
|
||||
uptr n_freed;
|
||||
};
|
||||
|
||||
struct ReleaseToOsInfo {
|
||||
uptr n_freed_at_last_release;
|
||||
uptr num_releases;
|
||||
u64 last_release_at_ns;
|
||||
u64 last_released_bytes;
|
||||
};
|
||||
|
||||
struct alignas(SANITIZER_CACHE_LINE_SIZE) RegionInfo {
|
||||
Mutex mutex;
|
||||
uptr num_freed_chunks; // Number of elements in the freearray.
|
||||
uptr mapped_free_array; // Bytes mapped for freearray.
|
||||
uptr allocated_user; // Bytes allocated for user memory.
|
||||
uptr allocated_meta; // Bytes allocated for metadata.
|
||||
uptr mapped_user; // Bytes mapped for user memory.
|
||||
uptr mapped_meta; // Bytes mapped for metadata.
|
||||
u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks.
|
||||
bool exhausted; // Whether region is out of space for new chunks.
|
||||
Stats stats;
|
||||
ReleaseToOsInfo rtoi;
|
||||
};
|
||||
COMPILER_CHECK(sizeof(RegionInfo) % kCacheLineSize == 0);
|
||||
|
||||
RegionInfo *GetRegionInfo(uptr class_id) const {
|
||||
DCHECK_LT(class_id, kNumClasses);
|
||||
RegionInfo *regions = reinterpret_cast<RegionInfo *>(RegionInfoSpace);
|
||||
return ®ions[class_id];
|
||||
}
|
||||
|
||||
uptr GetMetadataEnd(uptr region_beg) const {
|
||||
return region_beg + kRegionSize - kFreeArraySize;
|
||||
}
|
||||
|
||||
uptr GetChunkIdx(uptr chunk, uptr size) const {
|
||||
if (!kUsingConstantSpaceBeg)
|
||||
chunk -= SpaceBeg();
|
||||
|
||||
uptr offset = chunk % kRegionSize;
|
||||
// Here we divide by a non-constant. This is costly.
|
||||
// size always fits into 32-bits. If the offset fits too, use 32-bit div.
|
||||
if (offset >> (SANITIZER_WORDSIZE / 2))
|
||||
return offset / size;
|
||||
return (u32)offset / (u32)size;
|
||||
}
|
||||
|
||||
CompactPtrT *GetFreeArray(uptr region_beg) const {
|
||||
return reinterpret_cast<CompactPtrT *>(GetMetadataEnd(region_beg));
|
||||
}
|
||||
|
||||
bool MapWithCallback(uptr beg, uptr size, const char *name) {
|
||||
if (PremappedHeap)
|
||||
return beg >= NonConstSpaceBeg &&
|
||||
beg + size <= NonConstSpaceBeg + kSpaceSize;
|
||||
uptr mapped = address_range.Map(beg, size, name);
|
||||
if (UNLIKELY(!mapped))
|
||||
return false;
|
||||
CHECK_EQ(beg, mapped);
|
||||
MapUnmapCallback().OnMap(beg, size);
|
||||
return true;
|
||||
}
|
||||
|
||||
void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) {
|
||||
if (PremappedHeap) {
|
||||
CHECK_GE(beg, NonConstSpaceBeg);
|
||||
CHECK_LE(beg + size, NonConstSpaceBeg + kSpaceSize);
|
||||
return;
|
||||
}
|
||||
CHECK_EQ(beg, address_range.MapOrDie(beg, size, name));
|
||||
MapUnmapCallback().OnMap(beg, size);
|
||||
}
|
||||
|
||||
void UnmapWithCallbackOrDie(uptr beg, uptr size) {
|
||||
if (PremappedHeap)
|
||||
return;
|
||||
MapUnmapCallback().OnUnmap(beg, size);
|
||||
address_range.Unmap(beg, size);
|
||||
}
|
||||
|
||||
bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
|
||||
uptr num_freed_chunks) {
|
||||
uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);
|
||||
if (region->mapped_free_array < needed_space) {
|
||||
uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize);
|
||||
CHECK_LE(new_mapped_free_array, kFreeArraySize);
|
||||
uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +
|
||||
region->mapped_free_array;
|
||||
uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
|
||||
if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size,
|
||||
"SizeClassAllocator: freearray")))
|
||||
return false;
|
||||
region->mapped_free_array = new_mapped_free_array;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check whether this size class is exhausted.
|
||||
bool IsRegionExhausted(RegionInfo *region, uptr class_id,
|
||||
uptr additional_map_size) {
|
||||
if (LIKELY(region->mapped_user + region->mapped_meta +
|
||||
additional_map_size <= kRegionSize - kFreeArraySize))
|
||||
return false;
|
||||
if (!region->exhausted) {
|
||||
region->exhausted = true;
|
||||
Printf("%s: Out of memory. ", SanitizerToolName);
|
||||
Printf(
|
||||
"The process has exhausted %zu MB for size class %zu (%zu bytes).\n",
|
||||
kRegionSize >> 20, class_id, ClassIdToSize(class_id));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
NOINLINE bool PopulateFreeArray(AllocatorStats *stat, uptr class_id,
|
||||
RegionInfo *region, uptr requested_count) {
|
||||
// region->mutex is held.
|
||||
const uptr region_beg = GetRegionBeginBySizeClass(class_id);
|
||||
const uptr size = ClassIdToSize(class_id);
|
||||
|
||||
const uptr total_user_bytes =
|
||||
region->allocated_user + requested_count * size;
|
||||
// Map more space for chunks, if necessary.
|
||||
if (LIKELY(total_user_bytes > region->mapped_user)) {
|
||||
if (UNLIKELY(region->mapped_user == 0)) {
|
||||
if (!kUsingConstantSpaceBeg && kRandomShuffleChunks)
|
||||
// The random state is initialized from ASLR.
|
||||
region->rand_state = static_cast<u32>(region_beg >> 12);
|
||||
// Postpone the first release to OS attempt for ReleaseToOSIntervalMs,
|
||||
// preventing just allocated memory from being released sooner than
|
||||
// necessary and also preventing extraneous ReleaseMemoryPagesToOS calls
|
||||
// for short lived processes.
|
||||
// Do it only when the feature is turned on, to avoid a potentially
|
||||
// extraneous syscall.
|
||||
if (ReleaseToOSIntervalMs() >= 0)
|
||||
region->rtoi.last_release_at_ns = MonotonicNanoTime();
|
||||
}
|
||||
// Do the mmap for the user memory.
|
||||
const uptr user_map_size =
|
||||
RoundUpTo(total_user_bytes - region->mapped_user, kUserMapSize);
|
||||
if (UNLIKELY(IsRegionExhausted(region, class_id, user_map_size)))
|
||||
return false;
|
||||
if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,
|
||||
user_map_size,
|
||||
"SizeClassAllocator: region data")))
|
||||
return false;
|
||||
stat->Add(AllocatorStatMapped, user_map_size);
|
||||
region->mapped_user += user_map_size;
|
||||
}
|
||||
const uptr new_chunks_count =
|
||||
(region->mapped_user - region->allocated_user) / size;
|
||||
|
||||
if (kMetadataSize) {
|
||||
// Calculate the required space for metadata.
|
||||
const uptr total_meta_bytes =
|
||||
region->allocated_meta + new_chunks_count * kMetadataSize;
|
||||
const uptr meta_map_size = (total_meta_bytes > region->mapped_meta) ?
|
||||
RoundUpTo(total_meta_bytes - region->mapped_meta, kMetaMapSize) : 0;
|
||||
// Map more space for metadata, if necessary.
|
||||
if (meta_map_size) {
|
||||
if (UNLIKELY(IsRegionExhausted(region, class_id, meta_map_size)))
|
||||
return false;
|
||||
if (UNLIKELY(!MapWithCallback(
|
||||
GetMetadataEnd(region_beg) - region->mapped_meta - meta_map_size,
|
||||
meta_map_size, "SizeClassAllocator: region metadata")))
|
||||
return false;
|
||||
region->mapped_meta += meta_map_size;
|
||||
}
|
||||
}
|
||||
|
||||
// If necessary, allocate more space for the free array and populate it with
|
||||
// newly allocated chunks.
|
||||
const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count;
|
||||
if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks)))
|
||||
return false;
|
||||
CompactPtrT *free_array = GetFreeArray(region_beg);
|
||||
for (uptr i = 0, chunk = region->allocated_user; i < new_chunks_count;
|
||||
i++, chunk += size)
|
||||
free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk);
|
||||
if (kRandomShuffleChunks)
|
||||
RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count,
|
||||
®ion->rand_state);
|
||||
|
||||
// All necessary memory is mapped and now it is safe to advance all
|
||||
// 'allocated_*' counters.
|
||||
region->num_freed_chunks += new_chunks_count;
|
||||
region->allocated_user += new_chunks_count * size;
|
||||
CHECK_LE(region->allocated_user, region->mapped_user);
|
||||
region->allocated_meta += new_chunks_count * kMetadataSize;
|
||||
CHECK_LE(region->allocated_meta, region->mapped_meta);
|
||||
region->exhausted = false;
|
||||
|
||||
// TODO(alekseyshl): Consider bumping last_release_at_ns here to prevent
|
||||
// MaybeReleaseToOS from releasing just allocated pages or protect these
|
||||
// not yet used chunks some other way.
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Attempts to release RAM occupied by freed chunks back to OS. The region is
|
||||
// expected to be locked.
|
||||
//
|
||||
// TODO(morehouse): Support a callback on memory release so HWASan can release
|
||||
// aliases as well.
|
||||
void MaybeReleaseToOS(MemoryMapperT *memory_mapper, uptr class_id,
|
||||
bool force) {
|
||||
RegionInfo *region = GetRegionInfo(class_id);
|
||||
const uptr chunk_size = ClassIdToSize(class_id);
|
||||
const uptr page_size = GetPageSizeCached();
|
||||
|
||||
uptr n = region->num_freed_chunks;
|
||||
if (n * chunk_size < page_size)
|
||||
return; // No chance to release anything.
|
||||
if ((region->stats.n_freed -
|
||||
region->rtoi.n_freed_at_last_release) * chunk_size < page_size) {
|
||||
return; // Nothing new to release.
|
||||
}
|
||||
|
||||
if (!force) {
|
||||
s32 interval_ms = ReleaseToOSIntervalMs();
|
||||
if (interval_ms < 0)
|
||||
return;
|
||||
|
||||
if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL >
|
||||
MonotonicNanoTime()) {
|
||||
return; // Memory was returned recently.
|
||||
}
|
||||
}
|
||||
|
||||
ReleaseFreeMemoryToOS(
|
||||
GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
|
||||
RoundUpTo(region->allocated_user, page_size) / page_size, memory_mapper,
|
||||
class_id);
|
||||
|
||||
uptr ranges, bytes;
|
||||
if (memory_mapper->GetAndResetStats(ranges, bytes)) {
|
||||
region->rtoi.n_freed_at_last_release = region->stats.n_freed;
|
||||
region->rtoi.num_releases += ranges;
|
||||
region->rtoi.last_released_bytes = bytes;
|
||||
}
|
||||
region->rtoi.last_release_at_ns = MonotonicNanoTime();
|
||||
}
|
||||
};
|
||||
144
lib/libtsan/sanitizer_common/sanitizer_allocator_report.cpp
Normal file
144
lib/libtsan/sanitizer_common/sanitizer_allocator_report.cpp
Normal file
@@ -0,0 +1,144 @@
|
||||
//===-- sanitizer_allocator_report.cpp --------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
///
|
||||
/// \file
|
||||
/// Shared allocator error reporting for ThreadSanitizer, MemorySanitizer, etc.
|
||||
///
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_allocator.h"
|
||||
#include "sanitizer_allocator_report.h"
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_report_decorator.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
class ScopedAllocatorErrorReport {
|
||||
public:
|
||||
ScopedAllocatorErrorReport(const char *error_summary_,
|
||||
const StackTrace *stack_)
|
||||
: error_summary(error_summary_),
|
||||
stack(stack_) {
|
||||
Printf("%s", d.Error());
|
||||
}
|
||||
~ScopedAllocatorErrorReport() {
|
||||
Printf("%s", d.Default());
|
||||
stack->Print();
|
||||
PrintHintAllocatorCannotReturnNull();
|
||||
ReportErrorSummary(error_summary, stack);
|
||||
}
|
||||
|
||||
private:
|
||||
ScopedErrorReportLock lock;
|
||||
const char *error_summary;
|
||||
const StackTrace* const stack;
|
||||
const SanitizerCommonDecorator d;
|
||||
};
|
||||
|
||||
void NORETURN ReportCallocOverflow(uptr count, uptr size,
|
||||
const StackTrace *stack) {
|
||||
{
|
||||
ScopedAllocatorErrorReport report("calloc-overflow", stack);
|
||||
Report("ERROR: %s: calloc parameters overflow: count * size (%zd * %zd) "
|
||||
"cannot be represented in type size_t\n", SanitizerToolName, count,
|
||||
size);
|
||||
}
|
||||
Die();
|
||||
}
|
||||
|
||||
void NORETURN ReportReallocArrayOverflow(uptr count, uptr size,
|
||||
const StackTrace *stack) {
|
||||
{
|
||||
ScopedAllocatorErrorReport report("reallocarray-overflow", stack);
|
||||
Report(
|
||||
"ERROR: %s: reallocarray parameters overflow: count * size (%zd * %zd) "
|
||||
"cannot be represented in type size_t\n",
|
||||
SanitizerToolName, count, size);
|
||||
}
|
||||
Die();
|
||||
}
|
||||
|
||||
void NORETURN ReportPvallocOverflow(uptr size, const StackTrace *stack) {
|
||||
{
|
||||
ScopedAllocatorErrorReport report("pvalloc-overflow", stack);
|
||||
Report("ERROR: %s: pvalloc parameters overflow: size 0x%zx rounded up to "
|
||||
"system page size 0x%zx cannot be represented in type size_t\n",
|
||||
SanitizerToolName, size, GetPageSizeCached());
|
||||
}
|
||||
Die();
|
||||
}
|
||||
|
||||
void NORETURN ReportInvalidAllocationAlignment(uptr alignment,
|
||||
const StackTrace *stack) {
|
||||
{
|
||||
ScopedAllocatorErrorReport report("invalid-allocation-alignment", stack);
|
||||
Report("ERROR: %s: invalid allocation alignment: %zd, alignment must be a "
|
||||
"power of two\n", SanitizerToolName, alignment);
|
||||
}
|
||||
Die();
|
||||
}
|
||||
|
||||
void NORETURN ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,
|
||||
const StackTrace *stack) {
|
||||
{
|
||||
ScopedAllocatorErrorReport report("invalid-aligned-alloc-alignment", stack);
|
||||
#if SANITIZER_POSIX
|
||||
Report("ERROR: %s: invalid alignment requested in "
|
||||
"aligned_alloc: %zd, alignment must be a power of two and the "
|
||||
"requested size 0x%zx must be a multiple of alignment\n",
|
||||
SanitizerToolName, alignment, size);
|
||||
#else
|
||||
Report("ERROR: %s: invalid alignment requested in aligned_alloc: %zd, "
|
||||
"the requested size 0x%zx must be a multiple of alignment\n",
|
||||
SanitizerToolName, alignment, size);
|
||||
#endif
|
||||
}
|
||||
Die();
|
||||
}
|
||||
|
||||
void NORETURN ReportInvalidPosixMemalignAlignment(uptr alignment,
|
||||
const StackTrace *stack) {
|
||||
{
|
||||
ScopedAllocatorErrorReport report("invalid-posix-memalign-alignment",
|
||||
stack);
|
||||
Report(
|
||||
"ERROR: %s: invalid alignment requested in "
|
||||
"posix_memalign: %zd, alignment must be a power of two and a "
|
||||
"multiple of sizeof(void*) == %zd\n",
|
||||
SanitizerToolName, alignment, sizeof(void *));
|
||||
}
|
||||
Die();
|
||||
}
|
||||
|
||||
void NORETURN ReportAllocationSizeTooBig(uptr user_size, uptr max_size,
|
||||
const StackTrace *stack) {
|
||||
{
|
||||
ScopedAllocatorErrorReport report("allocation-size-too-big", stack);
|
||||
Report("ERROR: %s: requested allocation size 0x%zx exceeds maximum "
|
||||
"supported size of 0x%zx\n", SanitizerToolName, user_size, max_size);
|
||||
}
|
||||
Die();
|
||||
}
|
||||
|
||||
void NORETURN ReportOutOfMemory(uptr requested_size, const StackTrace *stack) {
|
||||
{
|
||||
ScopedAllocatorErrorReport report("out-of-memory", stack);
|
||||
ERROR_OOM("allocator is trying to allocate 0x%zx bytes\n", requested_size);
|
||||
}
|
||||
Die();
|
||||
}
|
||||
|
||||
void NORETURN ReportRssLimitExceeded(const StackTrace *stack) {
|
||||
{
|
||||
ScopedAllocatorErrorReport report("rss-limit-exceeded", stack);
|
||||
Report("ERROR: %s: allocator exceeded the RSS limit\n", SanitizerToolName);
|
||||
}
|
||||
Die();
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
40
lib/libtsan/sanitizer_common/sanitizer_allocator_report.h
Normal file
40
lib/libtsan/sanitizer_common/sanitizer_allocator_report.h
Normal file
@@ -0,0 +1,40 @@
|
||||
//===-- sanitizer_allocator_report.h ----------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
///
|
||||
/// \file
|
||||
/// Shared allocator error reporting for ThreadSanitizer, MemorySanitizer, etc.
|
||||
///
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ALLOCATOR_REPORT_H
|
||||
#define SANITIZER_ALLOCATOR_REPORT_H
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_stacktrace.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
void NORETURN ReportCallocOverflow(uptr count, uptr size,
|
||||
const StackTrace *stack);
|
||||
void NORETURN ReportReallocArrayOverflow(uptr count, uptr size,
|
||||
const StackTrace *stack);
|
||||
void NORETURN ReportPvallocOverflow(uptr size, const StackTrace *stack);
|
||||
void NORETURN ReportInvalidAllocationAlignment(uptr alignment,
|
||||
const StackTrace *stack);
|
||||
void NORETURN ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,
|
||||
const StackTrace *stack);
|
||||
void NORETURN ReportInvalidPosixMemalignAlignment(uptr alignment,
|
||||
const StackTrace *stack);
|
||||
void NORETURN ReportAllocationSizeTooBig(uptr user_size, uptr max_size,
|
||||
const StackTrace *stack);
|
||||
void NORETURN ReportOutOfMemory(uptr requested_size, const StackTrace *stack);
|
||||
void NORETURN ReportRssLimitExceeded(const StackTrace *stack);
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ALLOCATOR_REPORT_H
|
||||
322
lib/libtsan/sanitizer_common/sanitizer_allocator_secondary.h
Normal file
322
lib/libtsan/sanitizer_common/sanitizer_allocator_secondary.h
Normal file
@@ -0,0 +1,322 @@
|
||||
//===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Part of the Sanitizer Allocator.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_ALLOCATOR_H
|
||||
#error This file must be included inside sanitizer_allocator.h
|
||||
#endif
|
||||
|
||||
// Fixed array to store LargeMmapAllocator chunks list, limited to 32K total
|
||||
// allocated chunks. To be used in memory constrained or not memory hungry cases
|
||||
// (currently, 32 bits and internal allocator).
|
||||
class LargeMmapAllocatorPtrArrayStatic {
|
||||
public:
|
||||
inline void *Init() { return &p_[0]; }
|
||||
inline void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
|
||||
private:
|
||||
static const int kMaxNumChunks = 1 << 15;
|
||||
uptr p_[kMaxNumChunks];
|
||||
};
|
||||
|
||||
// Much less restricted LargeMmapAllocator chunks list (comparing to
|
||||
// PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks.
|
||||
// ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the
|
||||
// same functionality in Fuchsia case, which does not support MAP_NORESERVE.
|
||||
class LargeMmapAllocatorPtrArrayDynamic {
|
||||
public:
|
||||
inline void *Init() {
|
||||
uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr),
|
||||
SecondaryAllocatorName);
|
||||
CHECK(p);
|
||||
return reinterpret_cast<void*>(p);
|
||||
}
|
||||
|
||||
inline void EnsureSpace(uptr n) {
|
||||
CHECK_LT(n, kMaxNumChunks);
|
||||
DCHECK(n <= n_reserved_);
|
||||
if (UNLIKELY(n == n_reserved_)) {
|
||||
address_range_.MapOrDie(
|
||||
reinterpret_cast<uptr>(address_range_.base()) +
|
||||
n_reserved_ * sizeof(uptr),
|
||||
kChunksBlockCount * sizeof(uptr));
|
||||
n_reserved_ += kChunksBlockCount;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
static const int kMaxNumChunks = 1 << 20;
|
||||
static const int kChunksBlockCount = 1 << 14;
|
||||
ReservedAddressRange address_range_;
|
||||
uptr n_reserved_;
|
||||
};
|
||||
|
||||
#if SANITIZER_WORDSIZE == 32
|
||||
typedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray;
|
||||
#else
|
||||
typedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray;
|
||||
#endif
|
||||
|
||||
// This class can (de)allocate only large chunks of memory using mmap/unmap.
|
||||
// The main purpose of this allocator is to cover large and rare allocation
|
||||
// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
|
||||
template <class MapUnmapCallback = NoOpMapUnmapCallback,
|
||||
class PtrArrayT = DefaultLargeMmapAllocatorPtrArray,
|
||||
class AddressSpaceViewTy = LocalAddressSpaceView>
|
||||
class LargeMmapAllocator {
|
||||
public:
|
||||
using AddressSpaceView = AddressSpaceViewTy;
|
||||
void InitLinkerInitialized() {
|
||||
page_size_ = GetPageSizeCached();
|
||||
chunks_ = reinterpret_cast<Header**>(ptr_array_.Init());
|
||||
}
|
||||
|
||||
void Init() {
|
||||
internal_memset(this, 0, sizeof(*this));
|
||||
InitLinkerInitialized();
|
||||
}
|
||||
|
||||
void *Allocate(AllocatorStats *stat, const uptr size, uptr alignment) {
|
||||
CHECK(IsPowerOfTwo(alignment));
|
||||
uptr map_size = RoundUpMapSize(size);
|
||||
if (alignment > page_size_)
|
||||
map_size += alignment;
|
||||
// Overflow.
|
||||
if (map_size < size) {
|
||||
Report("WARNING: %s: LargeMmapAllocator allocation overflow: "
|
||||
"0x%zx bytes with 0x%zx alignment requested\n",
|
||||
SanitizerToolName, map_size, alignment);
|
||||
return nullptr;
|
||||
}
|
||||
uptr map_beg = reinterpret_cast<uptr>(
|
||||
MmapOrDieOnFatalError(map_size, SecondaryAllocatorName));
|
||||
if (!map_beg)
|
||||
return nullptr;
|
||||
CHECK(IsAligned(map_beg, page_size_));
|
||||
uptr map_end = map_beg + map_size;
|
||||
uptr res = map_beg + page_size_;
|
||||
if (res & (alignment - 1)) // Align.
|
||||
res += alignment - (res & (alignment - 1));
|
||||
MapUnmapCallback().OnMapSecondary(map_beg, map_size, res, size);
|
||||
CHECK(IsAligned(res, alignment));
|
||||
CHECK(IsAligned(res, page_size_));
|
||||
CHECK_GE(res + size, map_beg);
|
||||
CHECK_LE(res + size, map_end);
|
||||
Header *h = GetHeader(res);
|
||||
h->size = size;
|
||||
h->map_beg = map_beg;
|
||||
h->map_size = map_size;
|
||||
uptr size_log = MostSignificantSetBitIndex(map_size);
|
||||
CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
|
||||
{
|
||||
SpinMutexLock l(&mutex_);
|
||||
ptr_array_.EnsureSpace(n_chunks_);
|
||||
uptr idx = n_chunks_++;
|
||||
h->chunk_idx = idx;
|
||||
chunks_[idx] = h;
|
||||
chunks_sorted_ = false;
|
||||
stats.n_allocs++;
|
||||
stats.currently_allocated += map_size;
|
||||
stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
|
||||
stats.by_size_log[size_log]++;
|
||||
stat->Add(AllocatorStatAllocated, map_size);
|
||||
stat->Add(AllocatorStatMapped, map_size);
|
||||
}
|
||||
return reinterpret_cast<void*>(res);
|
||||
}
|
||||
|
||||
void Deallocate(AllocatorStats *stat, void *p) {
|
||||
Header *h = GetHeader(p);
|
||||
{
|
||||
SpinMutexLock l(&mutex_);
|
||||
uptr idx = h->chunk_idx;
|
||||
CHECK_EQ(chunks_[idx], h);
|
||||
CHECK_LT(idx, n_chunks_);
|
||||
chunks_[idx] = chunks_[--n_chunks_];
|
||||
chunks_[idx]->chunk_idx = idx;
|
||||
chunks_sorted_ = false;
|
||||
stats.n_frees++;
|
||||
stats.currently_allocated -= h->map_size;
|
||||
stat->Sub(AllocatorStatAllocated, h->map_size);
|
||||
stat->Sub(AllocatorStatMapped, h->map_size);
|
||||
}
|
||||
MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
|
||||
UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
|
||||
}
|
||||
|
||||
uptr TotalMemoryUsed() {
|
||||
SpinMutexLock l(&mutex_);
|
||||
uptr res = 0;
|
||||
for (uptr i = 0; i < n_chunks_; i++) {
|
||||
Header *h = chunks_[i];
|
||||
CHECK_EQ(h->chunk_idx, i);
|
||||
res += RoundUpMapSize(h->size);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
bool PointerIsMine(const void *p) const {
|
||||
return GetBlockBegin(p) != nullptr;
|
||||
}
|
||||
|
||||
uptr GetActuallyAllocatedSize(void *p) {
|
||||
return RoundUpTo(GetHeader(p)->size, page_size_);
|
||||
}
|
||||
|
||||
// At least page_size_/2 metadata bytes is available.
|
||||
void *GetMetaData(const void *p) {
|
||||
// Too slow: CHECK_EQ(p, GetBlockBegin(p));
|
||||
if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
|
||||
Printf("%s: bad pointer %p\n", SanitizerToolName, p);
|
||||
CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
|
||||
}
|
||||
return GetHeader(p) + 1;
|
||||
}
|
||||
|
||||
void *GetBlockBegin(const void *ptr) const {
|
||||
uptr p = reinterpret_cast<uptr>(ptr);
|
||||
SpinMutexLock l(&mutex_);
|
||||
uptr nearest_chunk = 0;
|
||||
Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
|
||||
// Cache-friendly linear search.
|
||||
for (uptr i = 0; i < n_chunks_; i++) {
|
||||
uptr ch = reinterpret_cast<uptr>(chunks[i]);
|
||||
if (p < ch) continue; // p is at left to this chunk, skip it.
|
||||
if (p - ch < p - nearest_chunk)
|
||||
nearest_chunk = ch;
|
||||
}
|
||||
if (!nearest_chunk)
|
||||
return nullptr;
|
||||
const Header *h =
|
||||
AddressSpaceView::Load(reinterpret_cast<Header *>(nearest_chunk));
|
||||
Header *h_ptr = reinterpret_cast<Header *>(nearest_chunk);
|
||||
CHECK_GE(nearest_chunk, h->map_beg);
|
||||
CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
|
||||
CHECK_LE(nearest_chunk, p);
|
||||
if (h->map_beg + h->map_size <= p)
|
||||
return nullptr;
|
||||
return GetUser(h_ptr);
|
||||
}
|
||||
|
||||
void EnsureSortedChunks() {
|
||||
if (chunks_sorted_) return;
|
||||
Header **chunks = AddressSpaceView::LoadWritable(chunks_, n_chunks_);
|
||||
Sort(reinterpret_cast<uptr *>(chunks), n_chunks_);
|
||||
for (uptr i = 0; i < n_chunks_; i++)
|
||||
AddressSpaceView::LoadWritable(chunks[i])->chunk_idx = i;
|
||||
chunks_sorted_ = true;
|
||||
}
|
||||
|
||||
// This function does the same as GetBlockBegin, but is much faster.
|
||||
// Must be called with the allocator locked.
|
||||
void *GetBlockBeginFastLocked(const void *ptr) {
|
||||
mutex_.CheckLocked();
|
||||
uptr p = reinterpret_cast<uptr>(ptr);
|
||||
uptr n = n_chunks_;
|
||||
if (!n) return nullptr;
|
||||
EnsureSortedChunks();
|
||||
Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
|
||||
auto min_mmap_ = reinterpret_cast<uptr>(chunks[0]);
|
||||
auto max_mmap_ = reinterpret_cast<uptr>(chunks[n - 1]) +
|
||||
AddressSpaceView::Load(chunks[n - 1])->map_size;
|
||||
if (p < min_mmap_ || p >= max_mmap_)
|
||||
return nullptr;
|
||||
uptr beg = 0, end = n - 1;
|
||||
// This loop is a log(n) lower_bound. It does not check for the exact match
|
||||
// to avoid expensive cache-thrashing loads.
|
||||
while (end - beg >= 2) {
|
||||
uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
|
||||
if (p < reinterpret_cast<uptr>(chunks[mid]))
|
||||
end = mid - 1; // We are not interested in chunks[mid].
|
||||
else
|
||||
beg = mid; // chunks[mid] may still be what we want.
|
||||
}
|
||||
|
||||
if (beg < end) {
|
||||
CHECK_EQ(beg + 1, end);
|
||||
// There are 2 chunks left, choose one.
|
||||
if (p >= reinterpret_cast<uptr>(chunks[end]))
|
||||
beg = end;
|
||||
}
|
||||
|
||||
const Header *h = AddressSpaceView::Load(chunks[beg]);
|
||||
Header *h_ptr = chunks[beg];
|
||||
if (h->map_beg + h->map_size <= p || p < h->map_beg)
|
||||
return nullptr;
|
||||
return GetUser(h_ptr);
|
||||
}
|
||||
|
||||
void PrintStats() {
|
||||
Printf("Stats: LargeMmapAllocator: allocated %zd times, "
|
||||
"remains %zd (%zd K) max %zd M; by size logs: ",
|
||||
stats.n_allocs, stats.n_allocs - stats.n_frees,
|
||||
stats.currently_allocated >> 10, stats.max_allocated >> 20);
|
||||
for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
|
||||
uptr c = stats.by_size_log[i];
|
||||
if (!c) continue;
|
||||
Printf("%zd:%zd; ", i, c);
|
||||
}
|
||||
Printf("\n");
|
||||
}
|
||||
|
||||
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
|
||||
// introspection API.
|
||||
void ForceLock() SANITIZER_ACQUIRE(mutex_) { mutex_.Lock(); }
|
||||
|
||||
void ForceUnlock() SANITIZER_RELEASE(mutex_) { mutex_.Unlock(); }
|
||||
|
||||
// Iterate over all existing chunks.
|
||||
// The allocator must be locked when calling this function.
|
||||
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
|
||||
EnsureSortedChunks(); // Avoid doing the sort while iterating.
|
||||
const Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
|
||||
for (uptr i = 0; i < n_chunks_; i++) {
|
||||
const Header *t = chunks[i];
|
||||
callback(reinterpret_cast<uptr>(GetUser(t)), arg);
|
||||
// Consistency check: verify that the array did not change.
|
||||
CHECK_EQ(chunks[i], t);
|
||||
CHECK_EQ(AddressSpaceView::Load(chunks[i])->chunk_idx, i);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
struct Header {
|
||||
uptr map_beg;
|
||||
uptr map_size;
|
||||
uptr size;
|
||||
uptr chunk_idx;
|
||||
};
|
||||
|
||||
Header *GetHeader(uptr p) {
|
||||
CHECK(IsAligned(p, page_size_));
|
||||
return reinterpret_cast<Header*>(p - page_size_);
|
||||
}
|
||||
Header *GetHeader(const void *p) {
|
||||
return GetHeader(reinterpret_cast<uptr>(p));
|
||||
}
|
||||
|
||||
void *GetUser(const Header *h) const {
|
||||
CHECK(IsAligned((uptr)h, page_size_));
|
||||
return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
|
||||
}
|
||||
|
||||
uptr RoundUpMapSize(uptr size) {
|
||||
return RoundUpTo(size, page_size_) + page_size_;
|
||||
}
|
||||
|
||||
uptr page_size_;
|
||||
Header **chunks_;
|
||||
PtrArrayT ptr_array_;
|
||||
uptr n_chunks_;
|
||||
bool chunks_sorted_;
|
||||
struct Stats {
|
||||
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
|
||||
} stats;
|
||||
mutable StaticSpinMutex mutex_;
|
||||
};
|
||||
@@ -0,0 +1,241 @@
|
||||
//===-- sanitizer_allocator_size_class_map.h --------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Part of the Sanitizer Allocator.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_ALLOCATOR_H
|
||||
#error This file must be included inside sanitizer_allocator.h
|
||||
#endif
|
||||
|
||||
// SizeClassMap maps allocation sizes into size classes and back.
|
||||
// Class 0 always corresponds to size 0.
|
||||
// The other sizes are controlled by the template parameters:
|
||||
// kMinSizeLog: defines the class 1 as 2^kMinSizeLog.
|
||||
// kMaxSizeLog: defines the last class as 2^kMaxSizeLog.
|
||||
// kMidSizeLog: the classes starting from 1 increase with step
|
||||
// 2^kMinSizeLog until 2^kMidSizeLog.
|
||||
// kNumBits: the number of non-zero bits in sizes after 2^kMidSizeLog.
|
||||
// E.g. with kNumBits==3 all size classes after 2^kMidSizeLog
|
||||
// look like 0b1xx0..0, where x is either 0 or 1.
|
||||
//
|
||||
// Example: kNumBits=3, kMinSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17:
|
||||
//
|
||||
// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
|
||||
// Next 4 classes: 256 + i * 64 (i = 1 to 4).
|
||||
// Next 4 classes: 512 + i * 128 (i = 1 to 4).
|
||||
// ...
|
||||
// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
|
||||
// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
|
||||
//
|
||||
// This structure of the size class map gives us:
|
||||
// - Efficient table-free class-to-size and size-to-class functions.
|
||||
// - Difference between two consequent size classes is between 14% and 25%
|
||||
//
|
||||
// This class also gives a hint to a thread-caching allocator about the amount
|
||||
// of chunks that need to be cached per-thread:
|
||||
// - kMaxNumCachedHint is a hint for maximal number of chunks per size class.
|
||||
// The actual number is computed in TransferBatch.
|
||||
// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
|
||||
//
|
||||
// Part of output of SizeClassMap::Print():
|
||||
// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
|
||||
// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
|
||||
// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
|
||||
// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
|
||||
// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
|
||||
// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
|
||||
// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
|
||||
// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
|
||||
//
|
||||
// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
|
||||
// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
|
||||
// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
|
||||
// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
|
||||
// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
|
||||
// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
|
||||
// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
|
||||
// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
|
||||
//
|
||||
// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
|
||||
// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
|
||||
// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
|
||||
// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
|
||||
//
|
||||
// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
|
||||
// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
|
||||
// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
|
||||
// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
|
||||
//
|
||||
// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
|
||||
// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
|
||||
// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
|
||||
// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
|
||||
//
|
||||
// ...
|
||||
//
|
||||
// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
|
||||
// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
|
||||
// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
|
||||
// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
|
||||
//
|
||||
// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
|
||||
//
|
||||
//
|
||||
// Another example (kNumBits=2):
|
||||
// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
|
||||
// c01 => s: 32 diff: +32 00% l 5 cached: 64 2048; id 1
|
||||
// c02 => s: 64 diff: +32 100% l 6 cached: 64 4096; id 2
|
||||
// c03 => s: 96 diff: +32 50% l 6 cached: 64 6144; id 3
|
||||
// c04 => s: 128 diff: +32 33% l 7 cached: 64 8192; id 4
|
||||
// c05 => s: 160 diff: +32 25% l 7 cached: 64 10240; id 5
|
||||
// c06 => s: 192 diff: +32 20% l 7 cached: 64 12288; id 6
|
||||
// c07 => s: 224 diff: +32 16% l 7 cached: 64 14336; id 7
|
||||
// c08 => s: 256 diff: +32 14% l 8 cached: 64 16384; id 8
|
||||
// c09 => s: 384 diff: +128 50% l 8 cached: 42 16128; id 9
|
||||
// c10 => s: 512 diff: +128 33% l 9 cached: 32 16384; id 10
|
||||
// c11 => s: 768 diff: +256 50% l 9 cached: 21 16128; id 11
|
||||
// c12 => s: 1024 diff: +256 33% l 10 cached: 16 16384; id 12
|
||||
// c13 => s: 1536 diff: +512 50% l 10 cached: 10 15360; id 13
|
||||
// c14 => s: 2048 diff: +512 33% l 11 cached: 8 16384; id 14
|
||||
// c15 => s: 3072 diff: +1024 50% l 11 cached: 5 15360; id 15
|
||||
// c16 => s: 4096 diff: +1024 33% l 12 cached: 4 16384; id 16
|
||||
// c17 => s: 6144 diff: +2048 50% l 12 cached: 2 12288; id 17
|
||||
// c18 => s: 8192 diff: +2048 33% l 13 cached: 2 16384; id 18
|
||||
// c19 => s: 12288 diff: +4096 50% l 13 cached: 1 12288; id 19
|
||||
// c20 => s: 16384 diff: +4096 33% l 14 cached: 1 16384; id 20
|
||||
// c21 => s: 24576 diff: +8192 50% l 14 cached: 1 24576; id 21
|
||||
// c22 => s: 32768 diff: +8192 33% l 15 cached: 1 32768; id 22
|
||||
// c23 => s: 49152 diff: +16384 50% l 15 cached: 1 49152; id 23
|
||||
// c24 => s: 65536 diff: +16384 33% l 16 cached: 1 65536; id 24
|
||||
// c25 => s: 98304 diff: +32768 50% l 16 cached: 1 98304; id 25
|
||||
// c26 => s: 131072 diff: +32768 33% l 17 cached: 1 131072; id 26
|
||||
|
||||
template <uptr kNumBits, uptr kMinSizeLog, uptr kMidSizeLog, uptr kMaxSizeLog,
|
||||
uptr kMaxNumCachedHintT, uptr kMaxBytesCachedLog>
|
||||
class SizeClassMap {
|
||||
static const uptr kMinSize = 1 << kMinSizeLog;
|
||||
static const uptr kMidSize = 1 << kMidSizeLog;
|
||||
static const uptr kMidClass = kMidSize / kMinSize;
|
||||
static const uptr S = kNumBits - 1;
|
||||
static const uptr M = (1 << S) - 1;
|
||||
|
||||
public:
|
||||
// kMaxNumCachedHintT is a power of two. It serves as a hint
|
||||
// for the size of TransferBatch, the actual size could be a bit smaller.
|
||||
static const uptr kMaxNumCachedHint = kMaxNumCachedHintT;
|
||||
COMPILER_CHECK((kMaxNumCachedHint & (kMaxNumCachedHint - 1)) == 0);
|
||||
|
||||
static const uptr kMaxSize = 1UL << kMaxSizeLog;
|
||||
static const uptr kNumClasses =
|
||||
kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1 + 1;
|
||||
static const uptr kLargestClassID = kNumClasses - 2;
|
||||
static const uptr kBatchClassID = kNumClasses - 1;
|
||||
COMPILER_CHECK(kNumClasses >= 16 && kNumClasses <= 256);
|
||||
static const uptr kNumClassesRounded =
|
||||
kNumClasses <= 32 ? 32 :
|
||||
kNumClasses <= 64 ? 64 :
|
||||
kNumClasses <= 128 ? 128 : 256;
|
||||
|
||||
static uptr Size(uptr class_id) {
|
||||
// Estimate the result for kBatchClassID because this class does not know
|
||||
// the exact size of TransferBatch. It's OK since we are using the actual
|
||||
// sizeof(TransferBatch) where it matters.
|
||||
if (UNLIKELY(class_id == kBatchClassID))
|
||||
return kMaxNumCachedHint * sizeof(uptr);
|
||||
if (class_id <= kMidClass)
|
||||
return kMinSize * class_id;
|
||||
class_id -= kMidClass;
|
||||
uptr t = kMidSize << (class_id >> S);
|
||||
return t + (t >> S) * (class_id & M);
|
||||
}
|
||||
|
||||
static uptr ClassID(uptr size) {
|
||||
if (UNLIKELY(size > kMaxSize))
|
||||
return 0;
|
||||
if (size <= kMidSize)
|
||||
return (size + kMinSize - 1) >> kMinSizeLog;
|
||||
const uptr l = MostSignificantSetBitIndex(size);
|
||||
const uptr hbits = (size >> (l - S)) & M;
|
||||
const uptr lbits = size & ((1U << (l - S)) - 1);
|
||||
const uptr l1 = l - kMidSizeLog;
|
||||
return kMidClass + (l1 << S) + hbits + (lbits > 0);
|
||||
}
|
||||
|
||||
static uptr MaxCachedHint(uptr size) {
|
||||
DCHECK_LE(size, kMaxSize);
|
||||
if (UNLIKELY(size == 0))
|
||||
return 0;
|
||||
uptr n;
|
||||
// Force a 32-bit division if the template parameters allow for it.
|
||||
if (kMaxBytesCachedLog > 31 || kMaxSizeLog > 31)
|
||||
n = (1UL << kMaxBytesCachedLog) / size;
|
||||
else
|
||||
n = (1U << kMaxBytesCachedLog) / static_cast<u32>(size);
|
||||
return Max<uptr>(1U, Min(kMaxNumCachedHint, n));
|
||||
}
|
||||
|
||||
static void Print() {
|
||||
uptr prev_s = 0;
|
||||
uptr total_cached = 0;
|
||||
for (uptr i = 0; i < kNumClasses; i++) {
|
||||
uptr s = Size(i);
|
||||
if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
|
||||
Printf("\n");
|
||||
uptr d = s - prev_s;
|
||||
uptr p = prev_s ? (d * 100 / prev_s) : 0;
|
||||
uptr l = s ? MostSignificantSetBitIndex(s) : 0;
|
||||
uptr cached = MaxCachedHint(s) * s;
|
||||
if (i == kBatchClassID)
|
||||
d = p = l = 0;
|
||||
Printf(
|
||||
"c%02zu => s: %zu diff: +%zu %02zu%% l %zu cached: %zu %zu; id %zu\n",
|
||||
i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s));
|
||||
total_cached += cached;
|
||||
prev_s = s;
|
||||
}
|
||||
Printf("Total cached: %zu\n", total_cached);
|
||||
}
|
||||
|
||||
static void Validate() {
|
||||
for (uptr c = 1; c < kNumClasses; c++) {
|
||||
// Printf("Validate: c%zd\n", c);
|
||||
uptr s = Size(c);
|
||||
CHECK_NE(s, 0U);
|
||||
if (c == kBatchClassID)
|
||||
continue;
|
||||
CHECK_EQ(ClassID(s), c);
|
||||
if (c < kLargestClassID)
|
||||
CHECK_EQ(ClassID(s + 1), c + 1);
|
||||
CHECK_EQ(ClassID(s - 1), c);
|
||||
CHECK_GT(Size(c), Size(c - 1));
|
||||
}
|
||||
CHECK_EQ(ClassID(kMaxSize + 1), 0);
|
||||
|
||||
for (uptr s = 1; s <= kMaxSize; s++) {
|
||||
uptr c = ClassID(s);
|
||||
// Printf("s%zd => c%zd\n", s, c);
|
||||
CHECK_LT(c, kNumClasses);
|
||||
CHECK_GE(Size(c), s);
|
||||
if (c > 0)
|
||||
CHECK_LT(Size(c - 1), s);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
typedef SizeClassMap<3, 4, 8, 17, 128, 16> DefaultSizeClassMap;
|
||||
typedef SizeClassMap<3, 4, 8, 17, 64, 14> CompactSizeClassMap;
|
||||
typedef SizeClassMap<2, 5, 9, 16, 64, 14> VeryCompactSizeClassMap;
|
||||
|
||||
// The following SizeClassMap only holds a way small number of cached entries,
|
||||
// allowing for denser per-class arrays, smaller memory footprint and usually
|
||||
// better performances in threaded environments.
|
||||
typedef SizeClassMap<3, 4, 8, 17, 8, 10> DenseSizeClassMap;
|
||||
// Similar to VeryCompact map above, this one has a small number of different
|
||||
// size classes, and also reduced thread-local caches.
|
||||
typedef SizeClassMap<2, 5, 9, 16, 8, 10> VeryDenseSizeClassMap;
|
||||
103
lib/libtsan/sanitizer_common/sanitizer_allocator_stats.h
Normal file
103
lib/libtsan/sanitizer_common/sanitizer_allocator_stats.h
Normal file
@@ -0,0 +1,103 @@
|
||||
//===-- sanitizer_allocator_stats.h -----------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Part of the Sanitizer Allocator.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_ALLOCATOR_H
|
||||
#error This file must be included inside sanitizer_allocator.h
|
||||
#endif
|
||||
|
||||
// Memory allocator statistics
|
||||
enum AllocatorStat {
|
||||
AllocatorStatAllocated,
|
||||
AllocatorStatMapped,
|
||||
AllocatorStatCount
|
||||
};
|
||||
|
||||
typedef uptr AllocatorStatCounters[AllocatorStatCount];
|
||||
|
||||
// Per-thread stats, live in per-thread cache.
|
||||
class AllocatorStats {
|
||||
public:
|
||||
void Init() { internal_memset(this, 0, sizeof(*this)); }
|
||||
void Add(AllocatorStat i, uptr v) {
|
||||
atomic_fetch_add(&stats_[i], v, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void Sub(AllocatorStat i, uptr v) {
|
||||
atomic_fetch_sub(&stats_[i], v, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void Set(AllocatorStat i, uptr v) {
|
||||
atomic_store(&stats_[i], v, memory_order_relaxed);
|
||||
}
|
||||
|
||||
uptr Get(AllocatorStat i) const {
|
||||
return atomic_load(&stats_[i], memory_order_relaxed);
|
||||
}
|
||||
|
||||
private:
|
||||
friend class AllocatorGlobalStats;
|
||||
AllocatorStats *next_;
|
||||
AllocatorStats *prev_;
|
||||
atomic_uintptr_t stats_[AllocatorStatCount];
|
||||
};
|
||||
|
||||
// Global stats, used for aggregation and querying.
|
||||
class AllocatorGlobalStats : public AllocatorStats {
|
||||
public:
|
||||
void Init() {
|
||||
internal_memset(this, 0, sizeof(*this));
|
||||
}
|
||||
|
||||
void Register(AllocatorStats *s) {
|
||||
SpinMutexLock l(&mu_);
|
||||
LazyInit();
|
||||
s->next_ = next_;
|
||||
s->prev_ = this;
|
||||
next_->prev_ = s;
|
||||
next_ = s;
|
||||
}
|
||||
|
||||
void Unregister(AllocatorStats *s) {
|
||||
SpinMutexLock l(&mu_);
|
||||
s->prev_->next_ = s->next_;
|
||||
s->next_->prev_ = s->prev_;
|
||||
for (int i = 0; i < AllocatorStatCount; i++)
|
||||
Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
|
||||
}
|
||||
|
||||
void Get(AllocatorStatCounters s) const {
|
||||
internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
|
||||
SpinMutexLock l(&mu_);
|
||||
const AllocatorStats *stats = this;
|
||||
for (; stats;) {
|
||||
for (int i = 0; i < AllocatorStatCount; i++)
|
||||
s[i] += stats->Get(AllocatorStat(i));
|
||||
stats = stats->next_;
|
||||
if (stats == this)
|
||||
break;
|
||||
}
|
||||
// All stats must be non-negative.
|
||||
for (int i = 0; i < AllocatorStatCount; i++)
|
||||
s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
|
||||
}
|
||||
|
||||
private:
|
||||
void LazyInit() {
|
||||
if (!next_) {
|
||||
next_ = this;
|
||||
prev_ = this;
|
||||
}
|
||||
}
|
||||
|
||||
mutable StaticSpinMutex mu_;
|
||||
};
|
||||
|
||||
|
||||
123
lib/libtsan/sanitizer_common/sanitizer_array_ref.h
Normal file
123
lib/libtsan/sanitizer_common/sanitizer_array_ref.h
Normal file
@@ -0,0 +1,123 @@
|
||||
//===-- sanitizer_array_ref.h -----------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ARRAY_REF_H
|
||||
#define SANITIZER_ARRAY_REF_H
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
/// ArrayRef - Represent a constant reference to an array (0 or more elements
|
||||
/// consecutively in memory), i.e. a start pointer and a length. It allows
|
||||
/// various APIs to take consecutive elements easily and conveniently.
|
||||
///
|
||||
/// This class does not own the underlying data, it is expected to be used in
|
||||
/// situations where the data resides in some other buffer, whose lifetime
|
||||
/// extends past that of the ArrayRef. For this reason, it is not in general
|
||||
/// safe to store an ArrayRef.
|
||||
///
|
||||
/// This is intended to be trivially copyable, so it should be passed by
|
||||
/// value.
|
||||
template <typename T>
|
||||
class ArrayRef {
|
||||
public:
|
||||
constexpr ArrayRef() {}
|
||||
constexpr ArrayRef(const T *begin, const T *end) : begin_(begin), end_(end) {
|
||||
DCHECK(empty() || begin);
|
||||
}
|
||||
constexpr ArrayRef(const T *data, uptr length)
|
||||
: ArrayRef(data, data + length) {}
|
||||
template <uptr N>
|
||||
constexpr ArrayRef(const T (&src)[N]) : ArrayRef(src, src + N) {}
|
||||
template <typename C>
|
||||
constexpr ArrayRef(const C &src)
|
||||
: ArrayRef(src.data(), src.data() + src.size()) {}
|
||||
ArrayRef(const T &one_elt) : ArrayRef(&one_elt, &one_elt + 1) {}
|
||||
|
||||
const T *data() const { return empty() ? nullptr : begin_; }
|
||||
|
||||
const T *begin() const { return begin_; }
|
||||
const T *end() const { return end_; }
|
||||
|
||||
bool empty() const { return begin_ == end_; }
|
||||
|
||||
uptr size() const { return end_ - begin_; }
|
||||
|
||||
/// equals - Check for element-wise equality.
|
||||
bool equals(ArrayRef rhs) const {
|
||||
if (size() != rhs.size())
|
||||
return false;
|
||||
auto r = rhs.begin();
|
||||
for (auto &l : *this) {
|
||||
if (!(l == *r))
|
||||
return false;
|
||||
++r;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/// slice(n, m) - Chop off the first N elements of the array, and keep M
|
||||
/// elements in the array.
|
||||
ArrayRef<T> slice(uptr N, uptr M) const {
|
||||
DCHECK_LE(N + M, size());
|
||||
return ArrayRef<T>(data() + N, M);
|
||||
}
|
||||
|
||||
/// slice(n) - Chop off the first N elements of the array.
|
||||
ArrayRef<T> slice(uptr N) const { return slice(N, size() - N); }
|
||||
|
||||
/// Drop the first \p N elements of the array.
|
||||
ArrayRef<T> drop_front(uptr N = 1) const {
|
||||
DCHECK_GE(size(), N);
|
||||
return slice(N, size() - N);
|
||||
}
|
||||
|
||||
/// Drop the last \p N elements of the array.
|
||||
ArrayRef<T> drop_back(uptr N = 1) const {
|
||||
DCHECK_GE(size(), N);
|
||||
return slice(0, size() - N);
|
||||
}
|
||||
|
||||
/// Return a copy of *this with only the first \p N elements.
|
||||
ArrayRef<T> take_front(uptr N = 1) const {
|
||||
if (N >= size())
|
||||
return *this;
|
||||
return drop_back(size() - N);
|
||||
}
|
||||
|
||||
/// Return a copy of *this with only the last \p N elements.
|
||||
ArrayRef<T> take_back(uptr N = 1) const {
|
||||
if (N >= size())
|
||||
return *this;
|
||||
return drop_front(size() - N);
|
||||
}
|
||||
|
||||
const T &operator[](uptr index) const {
|
||||
DCHECK_LT(index, size());
|
||||
return begin_[index];
|
||||
}
|
||||
|
||||
private:
|
||||
const T *begin_ = nullptr;
|
||||
const T *end_ = nullptr;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
inline bool operator==(ArrayRef<T> lhs, ArrayRef<T> rhs) {
|
||||
return lhs.equals(rhs);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline bool operator!=(ArrayRef<T> lhs, ArrayRef<T> rhs) {
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ARRAY_REF_H
|
||||
156
lib/libtsan/sanitizer_common/sanitizer_asm.h
Normal file
156
lib/libtsan/sanitizer_common/sanitizer_asm.h
Normal file
@@ -0,0 +1,156 @@
|
||||
//===-- sanitizer_asm.h -----------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Various support for assembler.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
// Some toolchains do not support .cfi asm directives, so we have to hide
|
||||
// them inside macros.
|
||||
#if defined(__clang__) || \
|
||||
(defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM))
|
||||
// GCC defined __GCC_HAVE_DWARF2_CFI_ASM if it supports CFI.
|
||||
// Clang seems to support CFI by default (or not?).
|
||||
// We need two versions of macros: for inline asm and standalone asm files.
|
||||
# define CFI_INL_ADJUST_CFA_OFFSET(n) ".cfi_adjust_cfa_offset " #n ";"
|
||||
|
||||
# define CFI_STARTPROC .cfi_startproc
|
||||
# define CFI_ENDPROC .cfi_endproc
|
||||
# define CFI_ADJUST_CFA_OFFSET(n) .cfi_adjust_cfa_offset n
|
||||
# define CFI_DEF_CFA_OFFSET(n) .cfi_def_cfa_offset n
|
||||
# define CFI_REL_OFFSET(reg, n) .cfi_rel_offset reg, n
|
||||
# define CFI_OFFSET(reg, n) .cfi_offset reg, n
|
||||
# define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg
|
||||
# define CFI_DEF_CFA(reg, n) .cfi_def_cfa reg, n
|
||||
# define CFI_RESTORE(reg) .cfi_restore reg
|
||||
|
||||
#else // No CFI
|
||||
# define CFI_INL_ADJUST_CFA_OFFSET(n)
|
||||
# define CFI_STARTPROC
|
||||
# define CFI_ENDPROC
|
||||
# define CFI_ADJUST_CFA_OFFSET(n)
|
||||
# define CFI_DEF_CFA_OFFSET(n)
|
||||
# define CFI_REL_OFFSET(reg, n)
|
||||
# define CFI_OFFSET(reg, n)
|
||||
# define CFI_DEF_CFA_REGISTER(reg)
|
||||
# define CFI_DEF_CFA(reg, n)
|
||||
# define CFI_RESTORE(reg)
|
||||
#endif
|
||||
|
||||
#if defined(__aarch64__) && defined(__ARM_FEATURE_BTI_DEFAULT)
|
||||
# define ASM_STARTPROC CFI_STARTPROC; hint #34
|
||||
# define C_ASM_STARTPROC SANITIZER_STRINGIFY(CFI_STARTPROC) "\nhint #34"
|
||||
#else
|
||||
# define ASM_STARTPROC CFI_STARTPROC
|
||||
# define C_ASM_STARTPROC SANITIZER_STRINGIFY(CFI_STARTPROC)
|
||||
#endif
|
||||
#define ASM_ENDPROC CFI_ENDPROC
|
||||
#define C_ASM_ENDPROC SANITIZER_STRINGIFY(CFI_ENDPROC)
|
||||
|
||||
#if defined(__x86_64__) || defined(__i386__) || defined(__sparc__)
|
||||
# define ASM_TAIL_CALL jmp
|
||||
#elif defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
|
||||
defined(__powerpc__) || defined(__loongarch_lp64)
|
||||
# define ASM_TAIL_CALL b
|
||||
#elif defined(__s390__)
|
||||
# define ASM_TAIL_CALL jg
|
||||
#elif defined(__riscv)
|
||||
# define ASM_TAIL_CALL tail
|
||||
#endif
|
||||
|
||||
// Currently, almost all of the shared libraries rely on the value of
|
||||
// $t9 to get the address of current function, instead of PCREL, even
|
||||
// on MIPSr6. To be compatiable with them, we have to set $t9 properly.
|
||||
// MIPS uses GOT to get the address of preemptible functions.
|
||||
#if defined(__mips64)
|
||||
# define C_ASM_TAIL_CALL(t_func, i_func) \
|
||||
"lui $t8, %hi(%neg(%gp_rel(" t_func ")))\n" \
|
||||
"daddu $t8, $t8, $t9\n" \
|
||||
"daddiu $t8, $t8, %lo(%neg(%gp_rel(" t_func ")))\n" \
|
||||
"ld $t9, %got_disp(" i_func ")($t8)\n" \
|
||||
"jr $t9\n"
|
||||
#elif defined(__mips__)
|
||||
# define C_ASM_TAIL_CALL(t_func, i_func) \
|
||||
".set noreorder\n" \
|
||||
".cpload $t9\n" \
|
||||
".set reorder\n" \
|
||||
"lw $t9, %got(" i_func ")($gp)\n" \
|
||||
"jr $t9\n"
|
||||
#elif defined(ASM_TAIL_CALL)
|
||||
# define C_ASM_TAIL_CALL(t_func, i_func) \
|
||||
SANITIZER_STRINGIFY(ASM_TAIL_CALL) " " i_func
|
||||
#endif
|
||||
|
||||
#if defined(__ELF__) && defined(__x86_64__) || defined(__i386__) || \
|
||||
defined(__riscv)
|
||||
# define ASM_PREEMPTIBLE_SYM(sym) sym@plt
|
||||
#else
|
||||
# define ASM_PREEMPTIBLE_SYM(sym) sym
|
||||
#endif
|
||||
|
||||
#if !defined(__APPLE__)
|
||||
# define ASM_HIDDEN(symbol) .hidden symbol
|
||||
# if defined(__arm__) || defined(__aarch64__)
|
||||
# define ASM_TYPE_FUNCTION(symbol) .type symbol, %function
|
||||
# else
|
||||
# define ASM_TYPE_FUNCTION(symbol) .type symbol, @function
|
||||
# endif
|
||||
# define ASM_SIZE(symbol) .size symbol, .-symbol
|
||||
# define ASM_SYMBOL(symbol) symbol
|
||||
# define ASM_SYMBOL_INTERCEPTOR(symbol) symbol
|
||||
# if defined(__i386__) || defined(__powerpc__) || defined(__s390__) || \
|
||||
defined(__sparc__)
|
||||
// For details, see interception.h
|
||||
# define ASM_WRAPPER_NAME(symbol) __interceptor_##symbol
|
||||
# define ASM_TRAMPOLINE_ALIAS(symbol, name) \
|
||||
.weak symbol; \
|
||||
.set symbol, ASM_WRAPPER_NAME(name)
|
||||
# define ASM_INTERCEPTOR_TRAMPOLINE(name)
|
||||
# define ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT 0
|
||||
# else // Architecture supports interceptor trampoline
|
||||
// Keep trampoline implementation in sync with interception/interception.h
|
||||
# define ASM_WRAPPER_NAME(symbol) ___interceptor_##symbol
|
||||
# define ASM_TRAMPOLINE_ALIAS(symbol, name) \
|
||||
.weak symbol; \
|
||||
.set symbol, __interceptor_trampoline_##name
|
||||
# define ASM_INTERCEPTOR_TRAMPOLINE(name) \
|
||||
.weak __interceptor_##name; \
|
||||
.set __interceptor_##name, ASM_WRAPPER_NAME(name); \
|
||||
.globl __interceptor_trampoline_##name; \
|
||||
ASM_TYPE_FUNCTION(__interceptor_trampoline_##name); \
|
||||
__interceptor_trampoline_##name: \
|
||||
ASM_STARTPROC; \
|
||||
ASM_TAIL_CALL ASM_PREEMPTIBLE_SYM(__interceptor_##name); \
|
||||
ASM_ENDPROC; \
|
||||
ASM_SIZE(__interceptor_trampoline_##name)
|
||||
# define ASM_INTERCEPTOR_TRAMPOLINE_SUPPORT 1
|
||||
# endif // Architecture supports interceptor trampoline
|
||||
#else
|
||||
# define ASM_HIDDEN(symbol)
|
||||
# define ASM_TYPE_FUNCTION(symbol)
|
||||
# define ASM_SIZE(symbol)
|
||||
# define ASM_SYMBOL(symbol) _##symbol
|
||||
# define ASM_SYMBOL_INTERCEPTOR(symbol) _wrap_##symbol
|
||||
# define ASM_WRAPPER_NAME(symbol) __interceptor_##symbol
|
||||
#endif
|
||||
|
||||
#if defined(__ELF__) && (defined(__GNU__) || defined(__FreeBSD__) || \
|
||||
defined(__Fuchsia__) || defined(__linux__))
|
||||
// clang-format off
|
||||
#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits
|
||||
// clang-format on
|
||||
#else
|
||||
#define NO_EXEC_STACK_DIRECTIVE
|
||||
#endif
|
||||
|
||||
#if (defined(__x86_64__) || defined(__i386__)) && defined(__has_include) && __has_include(<cet.h>)
|
||||
#include <cet.h>
|
||||
#endif
|
||||
#ifndef _CET_ENDBR
|
||||
#define _CET_ENDBR
|
||||
#endif
|
||||
98
lib/libtsan/sanitizer_common/sanitizer_atomic.h
Normal file
98
lib/libtsan/sanitizer_common/sanitizer_atomic.h
Normal file
@@ -0,0 +1,98 @@
|
||||
//===-- sanitizer_atomic.h --------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ATOMIC_H
|
||||
#define SANITIZER_ATOMIC_H
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
enum memory_order {
|
||||
// If the __atomic atomic builtins are supported (Clang/GCC), use the
|
||||
// compiler provided macro values so that we can map the atomic operations
|
||||
// to __atomic_* directly.
|
||||
#ifdef __ATOMIC_SEQ_CST
|
||||
memory_order_relaxed = __ATOMIC_RELAXED,
|
||||
memory_order_consume = __ATOMIC_CONSUME,
|
||||
memory_order_acquire = __ATOMIC_ACQUIRE,
|
||||
memory_order_release = __ATOMIC_RELEASE,
|
||||
memory_order_acq_rel = __ATOMIC_ACQ_REL,
|
||||
memory_order_seq_cst = __ATOMIC_SEQ_CST
|
||||
#else
|
||||
memory_order_relaxed = 1 << 0,
|
||||
memory_order_consume = 1 << 1,
|
||||
memory_order_acquire = 1 << 2,
|
||||
memory_order_release = 1 << 3,
|
||||
memory_order_acq_rel = 1 << 4,
|
||||
memory_order_seq_cst = 1 << 5
|
||||
#endif
|
||||
};
|
||||
|
||||
struct atomic_uint8_t {
|
||||
typedef u8 Type;
|
||||
volatile Type val_dont_use;
|
||||
};
|
||||
|
||||
struct atomic_uint16_t {
|
||||
typedef u16 Type;
|
||||
volatile Type val_dont_use;
|
||||
};
|
||||
|
||||
struct atomic_sint32_t {
|
||||
typedef s32 Type;
|
||||
volatile Type val_dont_use;
|
||||
};
|
||||
|
||||
struct atomic_uint32_t {
|
||||
typedef u32 Type;
|
||||
volatile Type val_dont_use;
|
||||
};
|
||||
|
||||
struct atomic_uint64_t {
|
||||
typedef u64 Type;
|
||||
// On 32-bit platforms u64 is not necessary aligned on 8 bytes.
|
||||
alignas(8) volatile Type val_dont_use;
|
||||
};
|
||||
|
||||
struct atomic_uintptr_t {
|
||||
typedef uptr Type;
|
||||
volatile Type val_dont_use;
|
||||
};
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#if defined(__clang__) || defined(__GNUC__)
|
||||
# include "sanitizer_atomic_clang.h"
|
||||
#elif defined(_MSC_VER)
|
||||
# include "sanitizer_atomic_msvc.h"
|
||||
#else
|
||||
# error "Unsupported compiler"
|
||||
#endif
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// Clutter-reducing helpers.
|
||||
|
||||
template<typename T>
|
||||
inline typename T::Type atomic_load_relaxed(const volatile T *a) {
|
||||
return atomic_load(a, memory_order_relaxed);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline void atomic_store_relaxed(volatile T *a, typename T::Type v) {
|
||||
atomic_store(a, v, memory_order_relaxed);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ATOMIC_H
|
||||
99
lib/libtsan/sanitizer_common/sanitizer_atomic_clang.h
Normal file
99
lib/libtsan/sanitizer_common/sanitizer_atomic_clang.h
Normal file
@@ -0,0 +1,99 @@
|
||||
//===-- sanitizer_atomic_clang.h --------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
|
||||
// Not intended for direct inclusion. Include sanitizer_atomic.h.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ATOMIC_CLANG_H
|
||||
#define SANITIZER_ATOMIC_CLANG_H
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// We use the compiler builtin atomic operations for loads and stores, which
|
||||
// generates correct code for all architectures, but may require libatomic
|
||||
// on platforms where e.g. 64-bit atomics are not supported natively.
|
||||
|
||||
// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
|
||||
// for mappings of the memory model to different processors.
|
||||
|
||||
inline void atomic_signal_fence(memory_order mo) { __atomic_signal_fence(mo); }
|
||||
|
||||
inline void atomic_thread_fence(memory_order mo) { __atomic_thread_fence(mo); }
|
||||
|
||||
inline void proc_yield(int cnt) {
|
||||
__asm__ __volatile__("" ::: "memory");
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
for (int i = 0; i < cnt; i++) __asm__ __volatile__("pause");
|
||||
__asm__ __volatile__("" ::: "memory");
|
||||
#endif
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline typename T::Type atomic_load(const volatile T *a, memory_order mo) {
|
||||
DCHECK(mo == memory_order_relaxed || mo == memory_order_consume ||
|
||||
mo == memory_order_acquire || mo == memory_order_seq_cst);
|
||||
DCHECK(!((uptr)a % sizeof(*a)));
|
||||
return __atomic_load_n(&a->val_dont_use, mo);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
|
||||
DCHECK(mo == memory_order_relaxed || mo == memory_order_release ||
|
||||
mo == memory_order_seq_cst);
|
||||
DCHECK(!((uptr)a % sizeof(*a)));
|
||||
__atomic_store_n(&a->val_dont_use, v, mo);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline typename T::Type atomic_fetch_add(volatile T *a, typename T::Type v,
|
||||
memory_order mo) {
|
||||
DCHECK(!((uptr)a % sizeof(*a)));
|
||||
return __atomic_fetch_add(&a->val_dont_use, v, mo);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline typename T::Type atomic_fetch_sub(volatile T *a, typename T::Type v,
|
||||
memory_order mo) {
|
||||
(void)mo;
|
||||
DCHECK(!((uptr)a % sizeof(*a)));
|
||||
return __atomic_fetch_sub(&a->val_dont_use, v, mo);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline typename T::Type atomic_exchange(volatile T *a, typename T::Type v,
|
||||
memory_order mo) {
|
||||
DCHECK(!((uptr)a % sizeof(*a)));
|
||||
return __atomic_exchange_n(&a->val_dont_use, v, mo);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
|
||||
typename T::Type xchg,
|
||||
memory_order mo) {
|
||||
// Transitioned from __sync_val_compare_and_swap to support targets like
|
||||
// SPARC V8 that cannot inline atomic cmpxchg. __atomic_compare_exchange
|
||||
// can then be resolved from libatomic. __ATOMIC_SEQ_CST is used to best
|
||||
// match the __sync builtin memory order.
|
||||
return __atomic_compare_exchange(&a->val_dont_use, cmp, &xchg, false,
|
||||
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline bool atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp,
|
||||
typename T::Type xchg,
|
||||
memory_order mo) {
|
||||
return atomic_compare_exchange_strong(a, cmp, xchg, mo);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#undef ATOMIC_ORDER
|
||||
|
||||
#endif // SANITIZER_ATOMIC_CLANG_H
|
||||
256
lib/libtsan/sanitizer_common/sanitizer_atomic_msvc.h
Normal file
256
lib/libtsan/sanitizer_common/sanitizer_atomic_msvc.h
Normal file
@@ -0,0 +1,256 @@
|
||||
//===-- sanitizer_atomic_msvc.h ---------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
|
||||
// Not intended for direct inclusion. Include sanitizer_atomic.h.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ATOMIC_MSVC_H
|
||||
#define SANITIZER_ATOMIC_MSVC_H
|
||||
|
||||
extern "C" void _ReadWriteBarrier();
|
||||
#pragma intrinsic(_ReadWriteBarrier)
|
||||
extern "C" void _mm_mfence();
|
||||
#pragma intrinsic(_mm_mfence)
|
||||
extern "C" void _mm_pause();
|
||||
#pragma intrinsic(_mm_pause)
|
||||
extern "C" char _InterlockedExchange8(char volatile *Addend, char Value);
|
||||
#pragma intrinsic(_InterlockedExchange8)
|
||||
extern "C" short _InterlockedExchange16(short volatile *Addend, short Value);
|
||||
#pragma intrinsic(_InterlockedExchange16)
|
||||
extern "C" long _InterlockedExchange(long volatile *Addend, long Value);
|
||||
#pragma intrinsic(_InterlockedExchange)
|
||||
extern "C" long _InterlockedExchangeAdd(long volatile *Addend, long Value);
|
||||
#pragma intrinsic(_InterlockedExchangeAdd)
|
||||
extern "C" char _InterlockedCompareExchange8(char volatile *Destination,
|
||||
char Exchange, char Comparand);
|
||||
#pragma intrinsic(_InterlockedCompareExchange8)
|
||||
extern "C" short _InterlockedCompareExchange16(short volatile *Destination,
|
||||
short Exchange, short Comparand);
|
||||
#pragma intrinsic(_InterlockedCompareExchange16)
|
||||
extern "C" long long _InterlockedCompareExchange64(
|
||||
long long volatile *Destination, long long Exchange, long long Comparand);
|
||||
#pragma intrinsic(_InterlockedCompareExchange64)
|
||||
extern "C" void *_InterlockedCompareExchangePointer(
|
||||
void *volatile *Destination,
|
||||
void *Exchange, void *Comparand);
|
||||
#pragma intrinsic(_InterlockedCompareExchangePointer)
|
||||
extern "C" long __cdecl _InterlockedCompareExchange(long volatile *Destination,
|
||||
long Exchange,
|
||||
long Comparand);
|
||||
#pragma intrinsic(_InterlockedCompareExchange)
|
||||
|
||||
#ifdef _WIN64
|
||||
extern "C" long long _InterlockedExchangeAdd64(long long volatile *Addend,
|
||||
long long Value);
|
||||
#pragma intrinsic(_InterlockedExchangeAdd64)
|
||||
#endif
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
inline void atomic_signal_fence(memory_order) {
|
||||
_ReadWriteBarrier();
|
||||
}
|
||||
|
||||
inline void atomic_thread_fence(memory_order) {
|
||||
_mm_mfence();
|
||||
}
|
||||
|
||||
inline void proc_yield(int cnt) {
|
||||
for (int i = 0; i < cnt; i++)
|
||||
_mm_pause();
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline typename T::Type atomic_load(
|
||||
const volatile T *a, memory_order mo) {
|
||||
DCHECK(mo == memory_order_relaxed || mo == memory_order_consume ||
|
||||
mo == memory_order_acquire || mo == memory_order_seq_cst);
|
||||
DCHECK(!((uptr)a % sizeof(*a)));
|
||||
typename T::Type v;
|
||||
// FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
|
||||
if (mo == memory_order_relaxed) {
|
||||
v = a->val_dont_use;
|
||||
} else {
|
||||
atomic_signal_fence(memory_order_seq_cst);
|
||||
v = a->val_dont_use;
|
||||
atomic_signal_fence(memory_order_seq_cst);
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
|
||||
DCHECK(mo == memory_order_relaxed || mo == memory_order_release ||
|
||||
mo == memory_order_seq_cst);
|
||||
DCHECK(!((uptr)a % sizeof(*a)));
|
||||
// FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
|
||||
if (mo == memory_order_relaxed) {
|
||||
a->val_dont_use = v;
|
||||
} else {
|
||||
atomic_signal_fence(memory_order_seq_cst);
|
||||
a->val_dont_use = v;
|
||||
atomic_signal_fence(memory_order_seq_cst);
|
||||
}
|
||||
if (mo == memory_order_seq_cst)
|
||||
atomic_thread_fence(memory_order_seq_cst);
|
||||
}
|
||||
|
||||
inline u32 atomic_fetch_add(volatile atomic_uint32_t *a,
|
||||
u32 v, memory_order mo) {
|
||||
(void)mo;
|
||||
DCHECK(!((uptr)a % sizeof(*a)));
|
||||
return (u32)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,
|
||||
(long)v);
|
||||
}
|
||||
|
||||
inline uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
|
||||
uptr v, memory_order mo) {
|
||||
(void)mo;
|
||||
DCHECK(!((uptr)a % sizeof(*a)));
|
||||
#ifdef _WIN64
|
||||
return (uptr)_InterlockedExchangeAdd64((volatile long long *)&a->val_dont_use,
|
||||
(long long)v);
|
||||
#else
|
||||
return (uptr)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,
|
||||
(long)v);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
|
||||
u32 v, memory_order mo) {
|
||||
(void)mo;
|
||||
DCHECK(!((uptr)a % sizeof(*a)));
|
||||
return (u32)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,
|
||||
-(long)v);
|
||||
}
|
||||
|
||||
inline uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
|
||||
uptr v, memory_order mo) {
|
||||
(void)mo;
|
||||
DCHECK(!((uptr)a % sizeof(*a)));
|
||||
#ifdef _WIN64
|
||||
return (uptr)_InterlockedExchangeAdd64((volatile long long *)&a->val_dont_use,
|
||||
-(long long)v);
|
||||
#else
|
||||
return (uptr)_InterlockedExchangeAdd((volatile long *)&a->val_dont_use,
|
||||
-(long)v);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline u8 atomic_exchange(volatile atomic_uint8_t *a,
|
||||
u8 v, memory_order mo) {
|
||||
(void)mo;
|
||||
DCHECK(!((uptr)a % sizeof(*a)));
|
||||
return (u8)_InterlockedExchange8((volatile char*)&a->val_dont_use, v);
|
||||
}
|
||||
|
||||
inline u16 atomic_exchange(volatile atomic_uint16_t *a,
|
||||
u16 v, memory_order mo) {
|
||||
(void)mo;
|
||||
DCHECK(!((uptr)a % sizeof(*a)));
|
||||
return (u16)_InterlockedExchange16((volatile short*)&a->val_dont_use, v);
|
||||
}
|
||||
|
||||
inline u32 atomic_exchange(volatile atomic_uint32_t *a,
|
||||
u32 v, memory_order mo) {
|
||||
(void)mo;
|
||||
DCHECK(!((uptr)a % sizeof(*a)));
|
||||
return (u32)_InterlockedExchange((volatile long*)&a->val_dont_use, v);
|
||||
}
|
||||
|
||||
inline bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
|
||||
u8 *cmp,
|
||||
u8 xchgv,
|
||||
memory_order mo) {
|
||||
(void)mo;
|
||||
DCHECK(!((uptr)a % sizeof(*a)));
|
||||
u8 cmpv = *cmp;
|
||||
#ifdef _WIN64
|
||||
u8 prev = (u8)_InterlockedCompareExchange8(
|
||||
(volatile char*)&a->val_dont_use, (char)xchgv, (char)cmpv);
|
||||
#else
|
||||
u8 prev;
|
||||
__asm {
|
||||
mov al, cmpv
|
||||
mov ecx, a
|
||||
mov dl, xchgv
|
||||
lock cmpxchg [ecx], dl
|
||||
mov prev, al
|
||||
}
|
||||
#endif
|
||||
if (prev == cmpv)
|
||||
return true;
|
||||
*cmp = prev;
|
||||
return false;
|
||||
}
|
||||
|
||||
inline bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
|
||||
uptr *cmp,
|
||||
uptr xchg,
|
||||
memory_order mo) {
|
||||
uptr cmpv = *cmp;
|
||||
uptr prev = (uptr)_InterlockedCompareExchangePointer(
|
||||
(void*volatile*)&a->val_dont_use, (void*)xchg, (void*)cmpv);
|
||||
if (prev == cmpv)
|
||||
return true;
|
||||
*cmp = prev;
|
||||
return false;
|
||||
}
|
||||
|
||||
inline bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
|
||||
u16 *cmp,
|
||||
u16 xchg,
|
||||
memory_order mo) {
|
||||
u16 cmpv = *cmp;
|
||||
u16 prev = (u16)_InterlockedCompareExchange16(
|
||||
(volatile short*)&a->val_dont_use, (short)xchg, (short)cmpv);
|
||||
if (prev == cmpv)
|
||||
return true;
|
||||
*cmp = prev;
|
||||
return false;
|
||||
}
|
||||
|
||||
inline bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
|
||||
u32 *cmp,
|
||||
u32 xchg,
|
||||
memory_order mo) {
|
||||
u32 cmpv = *cmp;
|
||||
u32 prev = (u32)_InterlockedCompareExchange(
|
||||
(volatile long*)&a->val_dont_use, (long)xchg, (long)cmpv);
|
||||
if (prev == cmpv)
|
||||
return true;
|
||||
*cmp = prev;
|
||||
return false;
|
||||
}
|
||||
|
||||
inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
|
||||
u64 *cmp,
|
||||
u64 xchg,
|
||||
memory_order mo) {
|
||||
u64 cmpv = *cmp;
|
||||
u64 prev = (u64)_InterlockedCompareExchange64(
|
||||
(volatile long long*)&a->val_dont_use, (long long)xchg, (long long)cmpv);
|
||||
if (prev == cmpv)
|
||||
return true;
|
||||
*cmp = prev;
|
||||
return false;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline bool atomic_compare_exchange_weak(volatile T *a,
|
||||
typename T::Type *cmp,
|
||||
typename T::Type xchg,
|
||||
memory_order mo) {
|
||||
return atomic_compare_exchange_strong(a, cmp, xchg, mo);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ATOMIC_CLANG_H
|
||||
350
lib/libtsan/sanitizer_common/sanitizer_bitvector.h
Normal file
350
lib/libtsan/sanitizer_common/sanitizer_bitvector.h
Normal file
@@ -0,0 +1,350 @@
|
||||
//===-- sanitizer_bitvector.h -----------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Specializer BitVector implementation.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_BITVECTOR_H
|
||||
#define SANITIZER_BITVECTOR_H
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// Fixed size bit vector based on a single basic integer.
|
||||
template <class basic_int_t = uptr>
|
||||
class BasicBitVector {
|
||||
public:
|
||||
enum SizeEnum : uptr { kSize = sizeof(basic_int_t) * 8 };
|
||||
|
||||
uptr size() const { return kSize; }
|
||||
// No CTOR.
|
||||
void clear() { bits_ = 0; }
|
||||
void setAll() { bits_ = ~(basic_int_t)0; }
|
||||
bool empty() const { return bits_ == 0; }
|
||||
|
||||
// Returns true if the bit has changed from 0 to 1.
|
||||
bool setBit(uptr idx) {
|
||||
basic_int_t old = bits_;
|
||||
bits_ |= mask(idx);
|
||||
return bits_ != old;
|
||||
}
|
||||
|
||||
// Returns true if the bit has changed from 1 to 0.
|
||||
bool clearBit(uptr idx) {
|
||||
basic_int_t old = bits_;
|
||||
bits_ &= ~mask(idx);
|
||||
return bits_ != old;
|
||||
}
|
||||
|
||||
bool getBit(uptr idx) const { return (bits_ & mask(idx)) != 0; }
|
||||
|
||||
uptr getAndClearFirstOne() {
|
||||
CHECK(!empty());
|
||||
uptr idx = LeastSignificantSetBitIndex(bits_);
|
||||
clearBit(idx);
|
||||
return idx;
|
||||
}
|
||||
|
||||
// Do "this |= v" and return whether new bits have been added.
|
||||
bool setUnion(const BasicBitVector &v) {
|
||||
basic_int_t old = bits_;
|
||||
bits_ |= v.bits_;
|
||||
return bits_ != old;
|
||||
}
|
||||
|
||||
// Do "this &= v" and return whether any bits have been removed.
|
||||
bool setIntersection(const BasicBitVector &v) {
|
||||
basic_int_t old = bits_;
|
||||
bits_ &= v.bits_;
|
||||
return bits_ != old;
|
||||
}
|
||||
|
||||
// Do "this &= ~v" and return whether any bits have been removed.
|
||||
bool setDifference(const BasicBitVector &v) {
|
||||
basic_int_t old = bits_;
|
||||
bits_ &= ~v.bits_;
|
||||
return bits_ != old;
|
||||
}
|
||||
|
||||
void copyFrom(const BasicBitVector &v) { bits_ = v.bits_; }
|
||||
|
||||
// Returns true if 'this' intersects with 'v'.
|
||||
bool intersectsWith(const BasicBitVector &v) const {
|
||||
return (bits_ & v.bits_) != 0;
|
||||
}
|
||||
|
||||
// for (BasicBitVector<>::Iterator it(bv); it.hasNext();) {
|
||||
// uptr idx = it.next();
|
||||
// use(idx);
|
||||
// }
|
||||
class Iterator {
|
||||
public:
|
||||
Iterator() { }
|
||||
explicit Iterator(const BasicBitVector &bv) : bv_(bv) {}
|
||||
bool hasNext() const { return !bv_.empty(); }
|
||||
uptr next() { return bv_.getAndClearFirstOne(); }
|
||||
void clear() { bv_.clear(); }
|
||||
private:
|
||||
BasicBitVector bv_;
|
||||
};
|
||||
|
||||
private:
|
||||
basic_int_t mask(uptr idx) const {
|
||||
CHECK_LT(idx, size());
|
||||
return (basic_int_t)1UL << idx;
|
||||
}
|
||||
basic_int_t bits_;
|
||||
};
|
||||
|
||||
// Fixed size bit vector of (kLevel1Size*BV::kSize**2) bits.
|
||||
// The implementation is optimized for better performance on
|
||||
// sparse bit vectors, i.e. the those with few set bits.
|
||||
template <uptr kLevel1Size = 1, class BV = BasicBitVector<> >
|
||||
class TwoLevelBitVector {
|
||||
// This is essentially a 2-level bit vector.
|
||||
// Set bit in the first level BV indicates that there are set bits
|
||||
// in the corresponding BV of the second level.
|
||||
// This structure allows O(kLevel1Size) time for clear() and empty(),
|
||||
// as well fast handling of sparse BVs.
|
||||
public:
|
||||
enum SizeEnum : uptr { kSize = BV::kSize * BV::kSize * kLevel1Size };
|
||||
// No CTOR.
|
||||
|
||||
uptr size() const { return kSize; }
|
||||
|
||||
void clear() {
|
||||
for (uptr i = 0; i < kLevel1Size; i++)
|
||||
l1_[i].clear();
|
||||
}
|
||||
|
||||
void setAll() {
|
||||
for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
|
||||
l1_[i0].setAll();
|
||||
for (uptr i1 = 0; i1 < BV::kSize; i1++)
|
||||
l2_[i0][i1].setAll();
|
||||
}
|
||||
}
|
||||
|
||||
bool empty() const {
|
||||
for (uptr i = 0; i < kLevel1Size; i++)
|
||||
if (!l1_[i].empty())
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Returns true if the bit has changed from 0 to 1.
|
||||
bool setBit(uptr idx) {
|
||||
check(idx);
|
||||
uptr i0 = idx0(idx);
|
||||
uptr i1 = idx1(idx);
|
||||
uptr i2 = idx2(idx);
|
||||
if (!l1_[i0].getBit(i1)) {
|
||||
l1_[i0].setBit(i1);
|
||||
l2_[i0][i1].clear();
|
||||
}
|
||||
bool res = l2_[i0][i1].setBit(i2);
|
||||
// Printf("%s: %zd => %zd %zd %zd; %d\n", __func__,
|
||||
// idx, i0, i1, i2, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
bool clearBit(uptr idx) {
|
||||
check(idx);
|
||||
uptr i0 = idx0(idx);
|
||||
uptr i1 = idx1(idx);
|
||||
uptr i2 = idx2(idx);
|
||||
bool res = false;
|
||||
if (l1_[i0].getBit(i1)) {
|
||||
res = l2_[i0][i1].clearBit(i2);
|
||||
if (l2_[i0][i1].empty())
|
||||
l1_[i0].clearBit(i1);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
bool getBit(uptr idx) const {
|
||||
check(idx);
|
||||
uptr i0 = idx0(idx);
|
||||
uptr i1 = idx1(idx);
|
||||
uptr i2 = idx2(idx);
|
||||
// Printf("%s: %zd => %zd %zd %zd\n", __func__, idx, i0, i1, i2);
|
||||
return l1_[i0].getBit(i1) && l2_[i0][i1].getBit(i2);
|
||||
}
|
||||
|
||||
uptr getAndClearFirstOne() {
|
||||
for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
|
||||
if (l1_[i0].empty()) continue;
|
||||
uptr i1 = l1_[i0].getAndClearFirstOne();
|
||||
uptr i2 = l2_[i0][i1].getAndClearFirstOne();
|
||||
if (!l2_[i0][i1].empty())
|
||||
l1_[i0].setBit(i1);
|
||||
uptr res = i0 * BV::kSize * BV::kSize + i1 * BV::kSize + i2;
|
||||
// Printf("getAndClearFirstOne: %zd %zd %zd => %zd\n", i0, i1, i2, res);
|
||||
return res;
|
||||
}
|
||||
CHECK(0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Do "this |= v" and return whether new bits have been added.
|
||||
bool setUnion(const TwoLevelBitVector &v) {
|
||||
bool res = false;
|
||||
for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
|
||||
BV t = v.l1_[i0];
|
||||
while (!t.empty()) {
|
||||
uptr i1 = t.getAndClearFirstOne();
|
||||
if (l1_[i0].setBit(i1))
|
||||
l2_[i0][i1].clear();
|
||||
if (l2_[i0][i1].setUnion(v.l2_[i0][i1]))
|
||||
res = true;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
// Do "this &= v" and return whether any bits have been removed.
|
||||
bool setIntersection(const TwoLevelBitVector &v) {
|
||||
bool res = false;
|
||||
for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
|
||||
if (l1_[i0].setIntersection(v.l1_[i0]))
|
||||
res = true;
|
||||
if (!l1_[i0].empty()) {
|
||||
BV t = l1_[i0];
|
||||
while (!t.empty()) {
|
||||
uptr i1 = t.getAndClearFirstOne();
|
||||
if (l2_[i0][i1].setIntersection(v.l2_[i0][i1]))
|
||||
res = true;
|
||||
if (l2_[i0][i1].empty())
|
||||
l1_[i0].clearBit(i1);
|
||||
}
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
// Do "this &= ~v" and return whether any bits have been removed.
|
||||
bool setDifference(const TwoLevelBitVector &v) {
|
||||
bool res = false;
|
||||
for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
|
||||
BV t = l1_[i0];
|
||||
t.setIntersection(v.l1_[i0]);
|
||||
while (!t.empty()) {
|
||||
uptr i1 = t.getAndClearFirstOne();
|
||||
if (l2_[i0][i1].setDifference(v.l2_[i0][i1]))
|
||||
res = true;
|
||||
if (l2_[i0][i1].empty())
|
||||
l1_[i0].clearBit(i1);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void copyFrom(const TwoLevelBitVector &v) {
|
||||
clear();
|
||||
setUnion(v);
|
||||
}
|
||||
|
||||
// Returns true if 'this' intersects with 'v'.
|
||||
bool intersectsWith(const TwoLevelBitVector &v) const {
|
||||
for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
|
||||
BV t = l1_[i0];
|
||||
t.setIntersection(v.l1_[i0]);
|
||||
while (!t.empty()) {
|
||||
uptr i1 = t.getAndClearFirstOne();
|
||||
if (!v.l1_[i0].getBit(i1)) continue;
|
||||
if (l2_[i0][i1].intersectsWith(v.l2_[i0][i1]))
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// for (TwoLevelBitVector<>::Iterator it(bv); it.hasNext();) {
|
||||
// uptr idx = it.next();
|
||||
// use(idx);
|
||||
// }
|
||||
class Iterator {
|
||||
public:
|
||||
Iterator() { }
|
||||
explicit Iterator(const TwoLevelBitVector &bv) : bv_(bv), i0_(0), i1_(0) {
|
||||
it1_.clear();
|
||||
it2_.clear();
|
||||
}
|
||||
|
||||
bool hasNext() const {
|
||||
if (it1_.hasNext()) return true;
|
||||
for (uptr i = i0_; i < kLevel1Size; i++)
|
||||
if (!bv_.l1_[i].empty()) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
uptr next() {
|
||||
// Printf("++++: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
|
||||
// it2_.hasNext(), kSize);
|
||||
if (!it1_.hasNext() && !it2_.hasNext()) {
|
||||
for (; i0_ < kLevel1Size; i0_++) {
|
||||
if (bv_.l1_[i0_].empty()) continue;
|
||||
it1_ = typename BV::Iterator(bv_.l1_[i0_]);
|
||||
// Printf("+i0: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
|
||||
// it2_.hasNext(), kSize);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!it2_.hasNext()) {
|
||||
CHECK(it1_.hasNext());
|
||||
i1_ = it1_.next();
|
||||
it2_ = typename BV::Iterator(bv_.l2_[i0_][i1_]);
|
||||
// Printf("++i1: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
|
||||
// it2_.hasNext(), kSize);
|
||||
}
|
||||
CHECK(it2_.hasNext());
|
||||
uptr i2 = it2_.next();
|
||||
uptr res = i0_ * BV::kSize * BV::kSize + i1_ * BV::kSize + i2;
|
||||
// Printf("+ret: %zd %zd; %d %d; size %zd; res: %zd\n", i0_, i1_,
|
||||
// it1_.hasNext(), it2_.hasNext(), kSize, res);
|
||||
if (!it1_.hasNext() && !it2_.hasNext())
|
||||
i0_++;
|
||||
return res;
|
||||
}
|
||||
|
||||
private:
|
||||
const TwoLevelBitVector &bv_;
|
||||
uptr i0_, i1_;
|
||||
typename BV::Iterator it1_, it2_;
|
||||
};
|
||||
|
||||
private:
|
||||
void check(uptr idx) const { CHECK_LT(idx, size()); }
|
||||
|
||||
uptr idx0(uptr idx) const {
|
||||
uptr res = idx / (BV::kSize * BV::kSize);
|
||||
CHECK_LT(res, kLevel1Size);
|
||||
return res;
|
||||
}
|
||||
|
||||
uptr idx1(uptr idx) const {
|
||||
uptr res = (idx / BV::kSize) % BV::kSize;
|
||||
CHECK_LT(res, BV::kSize);
|
||||
return res;
|
||||
}
|
||||
|
||||
uptr idx2(uptr idx) const {
|
||||
uptr res = idx % BV::kSize;
|
||||
CHECK_LT(res, BV::kSize);
|
||||
return res;
|
||||
}
|
||||
|
||||
BV l1_[kLevel1Size];
|
||||
BV l2_[kLevel1Size][BV::kSize];
|
||||
};
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_BITVECTOR_H
|
||||
164
lib/libtsan/sanitizer_common/sanitizer_bvgraph.h
Normal file
164
lib/libtsan/sanitizer_common/sanitizer_bvgraph.h
Normal file
@@ -0,0 +1,164 @@
|
||||
//===-- sanitizer_bvgraph.h -------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of Sanitizer runtime.
|
||||
// BVGraph -- a directed graph.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_BVGRAPH_H
|
||||
#define SANITIZER_BVGRAPH_H
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_bitvector.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// Directed graph of fixed size implemented as an array of bit vectors.
|
||||
// Not thread-safe, all accesses should be protected by an external lock.
|
||||
template<class BV>
|
||||
class BVGraph {
|
||||
public:
|
||||
enum SizeEnum : uptr { kSize = BV::kSize };
|
||||
uptr size() const { return kSize; }
|
||||
// No CTOR.
|
||||
void clear() {
|
||||
for (uptr i = 0; i < size(); i++)
|
||||
v[i].clear();
|
||||
}
|
||||
|
||||
bool empty() const {
|
||||
for (uptr i = 0; i < size(); i++)
|
||||
if (!v[i].empty())
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Returns true if a new edge was added.
|
||||
bool addEdge(uptr from, uptr to) {
|
||||
check(from, to);
|
||||
return v[from].setBit(to);
|
||||
}
|
||||
|
||||
// Returns true if at least one new edge was added.
|
||||
uptr addEdges(const BV &from, uptr to, uptr added_edges[],
|
||||
uptr max_added_edges) {
|
||||
uptr res = 0;
|
||||
t1.copyFrom(from);
|
||||
while (!t1.empty()) {
|
||||
uptr node = t1.getAndClearFirstOne();
|
||||
if (v[node].setBit(to))
|
||||
if (res < max_added_edges)
|
||||
added_edges[res++] = node;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
// *EXPERIMENTAL*
|
||||
// Returns true if an edge from=>to exist.
|
||||
// This function does not use any global state except for 'this' itself,
|
||||
// and thus can be called from different threads w/o locking.
|
||||
// This would be racy.
|
||||
// FIXME: investigate how much we can prove about this race being "benign".
|
||||
bool hasEdge(uptr from, uptr to) { return v[from].getBit(to); }
|
||||
|
||||
// Returns true if the edge from=>to was removed.
|
||||
bool removeEdge(uptr from, uptr to) {
|
||||
return v[from].clearBit(to);
|
||||
}
|
||||
|
||||
// Returns true if at least one edge *=>to was removed.
|
||||
bool removeEdgesTo(const BV &to) {
|
||||
bool res = 0;
|
||||
for (uptr from = 0; from < size(); from++) {
|
||||
if (v[from].setDifference(to))
|
||||
res = true;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
// Returns true if at least one edge from=>* was removed.
|
||||
bool removeEdgesFrom(const BV &from) {
|
||||
bool res = false;
|
||||
t1.copyFrom(from);
|
||||
while (!t1.empty()) {
|
||||
uptr idx = t1.getAndClearFirstOne();
|
||||
if (!v[idx].empty()) {
|
||||
v[idx].clear();
|
||||
res = true;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void removeEdgesFrom(uptr from) {
|
||||
return v[from].clear();
|
||||
}
|
||||
|
||||
bool hasEdge(uptr from, uptr to) const {
|
||||
check(from, to);
|
||||
return v[from].getBit(to);
|
||||
}
|
||||
|
||||
// Returns true if there is a path from the node 'from'
|
||||
// to any of the nodes in 'targets'.
|
||||
bool isReachable(uptr from, const BV &targets) {
|
||||
BV &to_visit = t1,
|
||||
&visited = t2;
|
||||
to_visit.copyFrom(v[from]);
|
||||
visited.clear();
|
||||
visited.setBit(from);
|
||||
while (!to_visit.empty()) {
|
||||
uptr idx = to_visit.getAndClearFirstOne();
|
||||
if (visited.setBit(idx))
|
||||
to_visit.setUnion(v[idx]);
|
||||
}
|
||||
return targets.intersectsWith(visited);
|
||||
}
|
||||
|
||||
// Finds a path from 'from' to one of the nodes in 'target',
|
||||
// stores up to 'path_size' items of the path into 'path',
|
||||
// returns the path length, or 0 if there is no path of size 'path_size'.
|
||||
uptr findPath(uptr from, const BV &targets, uptr *path, uptr path_size) {
|
||||
if (path_size == 0)
|
||||
return 0;
|
||||
path[0] = from;
|
||||
if (targets.getBit(from))
|
||||
return 1;
|
||||
// The function is recursive, so we don't want to create BV on stack.
|
||||
// Instead of a getAndClearFirstOne loop we use the slower iterator.
|
||||
for (typename BV::Iterator it(v[from]); it.hasNext(); ) {
|
||||
uptr idx = it.next();
|
||||
if (uptr res = findPath(idx, targets, path + 1, path_size - 1))
|
||||
return res + 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Same as findPath, but finds a shortest path.
|
||||
uptr findShortestPath(uptr from, const BV &targets, uptr *path,
|
||||
uptr path_size) {
|
||||
for (uptr p = 1; p <= path_size; p++)
|
||||
if (findPath(from, targets, path, p) == p)
|
||||
return p;
|
||||
return 0;
|
||||
}
|
||||
|
||||
private:
|
||||
void check(uptr idx1, uptr idx2) const {
|
||||
CHECK_LT(idx1, size());
|
||||
CHECK_LT(idx2, size());
|
||||
}
|
||||
BV v[kSize];
|
||||
// Keep temporary vectors here since we can not create large objects on stack.
|
||||
BV t1, t2;
|
||||
};
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_BVGRAPH_H
|
||||
150
lib/libtsan/sanitizer_common/sanitizer_chained_origin_depot.cpp
Normal file
150
lib/libtsan/sanitizer_common/sanitizer_chained_origin_depot.cpp
Normal file
@@ -0,0 +1,150 @@
|
||||
//===-- sanitizer_chained_origin_depot.cpp --------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// A storage for chained origins.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_chained_origin_depot.h"
|
||||
|
||||
#include "sanitizer_stackdepotbase.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
namespace {
|
||||
struct ChainedOriginDepotDesc {
|
||||
u32 here_id;
|
||||
u32 prev_id;
|
||||
};
|
||||
|
||||
struct ChainedOriginDepotNode {
|
||||
using hash_type = u32;
|
||||
u32 link;
|
||||
u32 here_id;
|
||||
u32 prev_id;
|
||||
|
||||
typedef ChainedOriginDepotDesc args_type;
|
||||
|
||||
bool eq(hash_type hash, const args_type &args) const;
|
||||
|
||||
static uptr allocated() { return 0; }
|
||||
|
||||
static hash_type hash(const args_type &args);
|
||||
|
||||
static bool is_valid(const args_type &args);
|
||||
|
||||
void store(u32 id, const args_type &args, hash_type other_hash);
|
||||
|
||||
args_type load(u32 id) const;
|
||||
|
||||
struct Handle {
|
||||
const ChainedOriginDepotNode *node_ = nullptr;
|
||||
u32 id_ = 0;
|
||||
Handle(const ChainedOriginDepotNode *node, u32 id) : node_(node), id_(id) {}
|
||||
bool valid() const { return node_; }
|
||||
u32 id() const { return id_; }
|
||||
int here_id() const { return node_->here_id; }
|
||||
int prev_id() const { return node_->prev_id; }
|
||||
};
|
||||
|
||||
static Handle get_handle(u32 id);
|
||||
|
||||
typedef Handle handle_type;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
static StackDepotBase<ChainedOriginDepotNode, 4, 20> depot;
|
||||
|
||||
bool ChainedOriginDepotNode::eq(hash_type hash, const args_type &args) const {
|
||||
return here_id == args.here_id && prev_id == args.prev_id;
|
||||
}
|
||||
|
||||
/* This is murmur2 hash for the 64->32 bit case.
|
||||
It does not behave all that well because the keys have a very biased
|
||||
distribution (I've seen 7-element buckets with the table only 14% full).
|
||||
|
||||
here_id is built of
|
||||
* (1 bits) Reserved, zero.
|
||||
* (8 bits) Part id = bits 13..20 of the hash value of here_id's key.
|
||||
* (23 bits) Sequential number (each part has each own sequence).
|
||||
|
||||
prev_id has either the same distribution as here_id (but with 3:8:21)
|
||||
split, or one of two reserved values (-1) or (-2). Either case can
|
||||
dominate depending on the workload.
|
||||
*/
|
||||
ChainedOriginDepotNode::hash_type ChainedOriginDepotNode::hash(
|
||||
const args_type &args) {
|
||||
const u32 m = 0x5bd1e995;
|
||||
const u32 seed = 0x9747b28c;
|
||||
const u32 r = 24;
|
||||
u32 h = seed;
|
||||
u32 k = args.here_id;
|
||||
k *= m;
|
||||
k ^= k >> r;
|
||||
k *= m;
|
||||
h *= m;
|
||||
h ^= k;
|
||||
|
||||
k = args.prev_id;
|
||||
k *= m;
|
||||
k ^= k >> r;
|
||||
k *= m;
|
||||
h *= m;
|
||||
h ^= k;
|
||||
|
||||
h ^= h >> 13;
|
||||
h *= m;
|
||||
h ^= h >> 15;
|
||||
return h;
|
||||
}
|
||||
|
||||
bool ChainedOriginDepotNode::is_valid(const args_type &args) { return true; }
|
||||
|
||||
void ChainedOriginDepotNode::store(u32 id, const args_type &args,
|
||||
hash_type other_hash) {
|
||||
here_id = args.here_id;
|
||||
prev_id = args.prev_id;
|
||||
}
|
||||
|
||||
ChainedOriginDepotNode::args_type ChainedOriginDepotNode::load(u32 id) const {
|
||||
args_type ret = {here_id, prev_id};
|
||||
return ret;
|
||||
}
|
||||
|
||||
ChainedOriginDepotNode::Handle ChainedOriginDepotNode::get_handle(u32 id) {
|
||||
return Handle(&depot.nodes[id], id);
|
||||
}
|
||||
|
||||
ChainedOriginDepot::ChainedOriginDepot() {}
|
||||
|
||||
StackDepotStats ChainedOriginDepot::GetStats() const {
|
||||
return depot.GetStats();
|
||||
}
|
||||
|
||||
bool ChainedOriginDepot::Put(u32 here_id, u32 prev_id, u32 *new_id) {
|
||||
ChainedOriginDepotDesc desc = {here_id, prev_id};
|
||||
bool inserted;
|
||||
*new_id = depot.Put(desc, &inserted);
|
||||
return inserted;
|
||||
}
|
||||
|
||||
u32 ChainedOriginDepot::Get(u32 id, u32 *other) {
|
||||
ChainedOriginDepotDesc desc = depot.Get(id);
|
||||
*other = desc.prev_id;
|
||||
return desc.here_id;
|
||||
}
|
||||
|
||||
void ChainedOriginDepot::LockBeforeFork() { depot.LockBeforeFork(); }
|
||||
|
||||
void ChainedOriginDepot::UnlockAfterFork(bool fork_child) {
|
||||
depot.UnlockAfterFork(fork_child);
|
||||
}
|
||||
|
||||
void ChainedOriginDepot::TestOnlyUnmap() { depot.TestOnlyUnmap(); }
|
||||
|
||||
} // namespace __sanitizer
|
||||
@@ -0,0 +1,46 @@
|
||||
//===-- sanitizer_chained_origin_depot.h ------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// A storage for chained origins.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_CHAINED_ORIGIN_DEPOT_H
|
||||
#define SANITIZER_CHAINED_ORIGIN_DEPOT_H
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
class ChainedOriginDepot {
|
||||
public:
|
||||
ChainedOriginDepot();
|
||||
|
||||
// Gets the statistic of the origin chain storage.
|
||||
StackDepotStats GetStats() const;
|
||||
|
||||
// Stores a chain with StackDepot ID here_id and previous chain ID prev_id.
|
||||
// If successful, returns true and the new chain id new_id.
|
||||
// If the same element already exists, returns false and sets new_id to the
|
||||
// existing ID.
|
||||
bool Put(u32 here_id, u32 prev_id, u32 *new_id);
|
||||
|
||||
// Retrieves the stored StackDepot ID for the given origin ID.
|
||||
u32 Get(u32 id, u32 *other);
|
||||
|
||||
void LockBeforeFork();
|
||||
void UnlockAfterFork(bool fork_child);
|
||||
void TestOnlyUnmap();
|
||||
|
||||
private:
|
||||
ChainedOriginDepot(const ChainedOriginDepot &) = delete;
|
||||
void operator=(const ChainedOriginDepot &) = delete;
|
||||
};
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_CHAINED_ORIGIN_DEPOT_H
|
||||
435
lib/libtsan/sanitizer_common/sanitizer_common.cpp
Normal file
435
lib/libtsan/sanitizer_common/sanitizer_common.cpp
Normal file
@@ -0,0 +1,435 @@
|
||||
//===-- sanitizer_common.cpp ----------------------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between AddressSanitizer and ThreadSanitizer
|
||||
// run-time libraries.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
|
||||
#include "sanitizer_allocator_interface.h"
|
||||
#include "sanitizer_allocator_internal.h"
|
||||
#include "sanitizer_atomic.h"
|
||||
#include "sanitizer_flags.h"
|
||||
#include "sanitizer_interface_internal.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_placement_new.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
const char *SanitizerToolName = "SanitizerTool";
|
||||
|
||||
atomic_uint32_t current_verbosity;
|
||||
uptr PageSizeCached;
|
||||
u32 NumberOfCPUsCached;
|
||||
|
||||
// PID of the tracer task in StopTheWorld. It shares the address space with the
|
||||
// main process, but has a different PID and thus requires special handling.
|
||||
uptr stoptheworld_tracer_pid = 0;
|
||||
// Cached pid of parent process - if the parent process dies, we want to keep
|
||||
// writing to the same log file.
|
||||
uptr stoptheworld_tracer_ppid = 0;
|
||||
|
||||
void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
|
||||
const char *mmap_type, error_t err,
|
||||
bool raw_report) {
|
||||
static int recursion_count;
|
||||
if (raw_report || recursion_count) {
|
||||
// If raw report is requested or we went into recursion just die. The
|
||||
// Report() and CHECK calls below may call mmap recursively and fail.
|
||||
RawWrite("ERROR: Failed to mmap\n");
|
||||
Die();
|
||||
}
|
||||
recursion_count++;
|
||||
if (ErrorIsOOM(err)) {
|
||||
ERROR_OOM("failed to %s 0x%zx (%zd) bytes of %s (error code: %d)\n",
|
||||
mmap_type, size, size, mem_type, err);
|
||||
} else {
|
||||
Report(
|
||||
"ERROR: %s failed to "
|
||||
"%s 0x%zx (%zd) bytes of %s (error code: %d)\n",
|
||||
SanitizerToolName, mmap_type, size, size, mem_type, err);
|
||||
}
|
||||
#if !SANITIZER_GO
|
||||
DumpProcessMap();
|
||||
#endif
|
||||
UNREACHABLE("unable to mmap");
|
||||
}
|
||||
|
||||
void NORETURN ReportMunmapFailureAndDie(void *addr, uptr size, error_t err,
|
||||
bool raw_report) {
|
||||
static int recursion_count;
|
||||
if (raw_report || recursion_count) {
|
||||
// If raw report is requested or we went into recursion just die. The
|
||||
// Report() and CHECK calls below may call munmap recursively and fail.
|
||||
RawWrite("ERROR: Failed to munmap\n");
|
||||
Die();
|
||||
}
|
||||
recursion_count++;
|
||||
Report(
|
||||
"ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p (error "
|
||||
"code: %d)\n",
|
||||
SanitizerToolName, size, size, addr, err);
|
||||
#if !SANITIZER_GO
|
||||
DumpProcessMap();
|
||||
#endif
|
||||
UNREACHABLE("unable to unmmap");
|
||||
}
|
||||
|
||||
typedef bool UptrComparisonFunction(const uptr &a, const uptr &b);
|
||||
typedef bool U32ComparisonFunction(const u32 &a, const u32 &b);
|
||||
|
||||
const char *StripPathPrefix(const char *filepath,
|
||||
const char *strip_path_prefix) {
|
||||
if (!filepath) return nullptr;
|
||||
if (!strip_path_prefix) return filepath;
|
||||
const char *res = filepath;
|
||||
if (const char *pos = internal_strstr(filepath, strip_path_prefix))
|
||||
res = pos + internal_strlen(strip_path_prefix);
|
||||
if (res[0] == '.' && res[1] == '/')
|
||||
res += 2;
|
||||
return res;
|
||||
}
|
||||
|
||||
const char *StripModuleName(const char *module) {
|
||||
if (!module)
|
||||
return nullptr;
|
||||
if (SANITIZER_WINDOWS) {
|
||||
// On Windows, both slash and backslash are possible.
|
||||
// Pick the one that goes last.
|
||||
if (const char *bslash_pos = internal_strrchr(module, '\\'))
|
||||
return StripModuleName(bslash_pos + 1);
|
||||
}
|
||||
if (const char *slash_pos = internal_strrchr(module, '/')) {
|
||||
return slash_pos + 1;
|
||||
}
|
||||
return module;
|
||||
}
|
||||
|
||||
void ReportErrorSummary(const char *error_message, const char *alt_tool_name) {
|
||||
if (!common_flags()->print_summary)
|
||||
return;
|
||||
InternalScopedString buff;
|
||||
buff.AppendF("SUMMARY: %s: %s",
|
||||
alt_tool_name ? alt_tool_name : SanitizerToolName,
|
||||
error_message);
|
||||
__sanitizer_report_error_summary(buff.data());
|
||||
}
|
||||
|
||||
// Removes the ANSI escape sequences from the input string (in-place).
|
||||
void RemoveANSIEscapeSequencesFromString(char *str) {
|
||||
if (!str)
|
||||
return;
|
||||
|
||||
// We are going to remove the escape sequences in place.
|
||||
char *s = str;
|
||||
char *z = str;
|
||||
while (*s != '\0') {
|
||||
CHECK_GE(s, z);
|
||||
// Skip over ANSI escape sequences with pointer 's'.
|
||||
if (*s == '\033' && *(s + 1) == '[') {
|
||||
s = internal_strchrnul(s, 'm');
|
||||
if (*s == '\0') {
|
||||
break;
|
||||
}
|
||||
s++;
|
||||
continue;
|
||||
}
|
||||
// 's' now points at a character we want to keep. Copy over the buffer
|
||||
// content if the escape sequence has been perviously skipped andadvance
|
||||
// both pointers.
|
||||
if (s != z)
|
||||
*z = *s;
|
||||
|
||||
// If we have not seen an escape sequence, just advance both pointers.
|
||||
z++;
|
||||
s++;
|
||||
}
|
||||
|
||||
// Null terminate the string.
|
||||
*z = '\0';
|
||||
}
|
||||
|
||||
void LoadedModule::set(const char *module_name, uptr base_address) {
|
||||
clear();
|
||||
full_name_ = internal_strdup(module_name);
|
||||
base_address_ = base_address;
|
||||
}
|
||||
|
||||
void LoadedModule::set(const char *module_name, uptr base_address,
|
||||
ModuleArch arch, u8 uuid[kModuleUUIDSize],
|
||||
bool instrumented) {
|
||||
set(module_name, base_address);
|
||||
arch_ = arch;
|
||||
internal_memcpy(uuid_, uuid, sizeof(uuid_));
|
||||
uuid_size_ = kModuleUUIDSize;
|
||||
instrumented_ = instrumented;
|
||||
}
|
||||
|
||||
void LoadedModule::setUuid(const char *uuid, uptr size) {
|
||||
if (size > kModuleUUIDSize)
|
||||
size = kModuleUUIDSize;
|
||||
internal_memcpy(uuid_, uuid, size);
|
||||
uuid_size_ = size;
|
||||
}
|
||||
|
||||
void LoadedModule::clear() {
|
||||
InternalFree(full_name_);
|
||||
base_address_ = 0;
|
||||
max_address_ = 0;
|
||||
full_name_ = nullptr;
|
||||
arch_ = kModuleArchUnknown;
|
||||
internal_memset(uuid_, 0, kModuleUUIDSize);
|
||||
instrumented_ = false;
|
||||
while (!ranges_.empty()) {
|
||||
AddressRange *r = ranges_.front();
|
||||
ranges_.pop_front();
|
||||
InternalFree(r);
|
||||
}
|
||||
}
|
||||
|
||||
void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable,
|
||||
bool writable, const char *name) {
|
||||
void *mem = InternalAlloc(sizeof(AddressRange));
|
||||
AddressRange *r =
|
||||
new(mem) AddressRange(beg, end, executable, writable, name);
|
||||
ranges_.push_back(r);
|
||||
max_address_ = Max(max_address_, end);
|
||||
}
|
||||
|
||||
bool LoadedModule::containsAddress(uptr address) const {
|
||||
for (const AddressRange &r : ranges()) {
|
||||
if (r.beg <= address && address < r.end)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static atomic_uintptr_t g_total_mmaped;
|
||||
|
||||
void IncreaseTotalMmap(uptr size) {
|
||||
if (!common_flags()->mmap_limit_mb) return;
|
||||
uptr total_mmaped =
|
||||
atomic_fetch_add(&g_total_mmaped, size, memory_order_relaxed) + size;
|
||||
// Since for now mmap_limit_mb is not a user-facing flag, just kill
|
||||
// a program. Use RAW_CHECK to avoid extra mmaps in reporting.
|
||||
RAW_CHECK((total_mmaped >> 20) < common_flags()->mmap_limit_mb);
|
||||
}
|
||||
|
||||
void DecreaseTotalMmap(uptr size) {
|
||||
if (!common_flags()->mmap_limit_mb) return;
|
||||
atomic_fetch_sub(&g_total_mmaped, size, memory_order_relaxed);
|
||||
}
|
||||
|
||||
bool TemplateMatch(const char *templ, const char *str) {
|
||||
if ((!str) || str[0] == 0)
|
||||
return false;
|
||||
bool start = false;
|
||||
if (templ && templ[0] == '^') {
|
||||
start = true;
|
||||
templ++;
|
||||
}
|
||||
bool asterisk = false;
|
||||
while (templ && templ[0]) {
|
||||
if (templ[0] == '*') {
|
||||
templ++;
|
||||
start = false;
|
||||
asterisk = true;
|
||||
continue;
|
||||
}
|
||||
if (templ[0] == '$')
|
||||
return str[0] == 0 || asterisk;
|
||||
if (str[0] == 0)
|
||||
return false;
|
||||
char *tpos = (char*)internal_strchr(templ, '*');
|
||||
char *tpos1 = (char*)internal_strchr(templ, '$');
|
||||
if ((!tpos) || (tpos1 && tpos1 < tpos))
|
||||
tpos = tpos1;
|
||||
if (tpos)
|
||||
tpos[0] = 0;
|
||||
const char *str0 = str;
|
||||
const char *spos = internal_strstr(str, templ);
|
||||
str = spos + internal_strlen(templ);
|
||||
templ = tpos;
|
||||
if (tpos)
|
||||
tpos[0] = tpos == tpos1 ? '$' : '*';
|
||||
if (!spos)
|
||||
return false;
|
||||
if (start && spos != str0)
|
||||
return false;
|
||||
start = false;
|
||||
asterisk = false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static char binary_name_cache_str[kMaxPathLength];
|
||||
static char process_name_cache_str[kMaxPathLength];
|
||||
|
||||
const char *GetProcessName() {
|
||||
return process_name_cache_str;
|
||||
}
|
||||
|
||||
static uptr ReadProcessName(/*out*/ char *buf, uptr buf_len) {
|
||||
ReadLongProcessName(buf, buf_len);
|
||||
char *s = const_cast<char *>(StripModuleName(buf));
|
||||
uptr len = internal_strlen(s);
|
||||
if (s != buf) {
|
||||
internal_memmove(buf, s, len);
|
||||
buf[len] = '\0';
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
void UpdateProcessName() {
|
||||
ReadProcessName(process_name_cache_str, sizeof(process_name_cache_str));
|
||||
}
|
||||
|
||||
// Call once to make sure that binary_name_cache_str is initialized
|
||||
void CacheBinaryName() {
|
||||
if (binary_name_cache_str[0] != '\0')
|
||||
return;
|
||||
ReadBinaryName(binary_name_cache_str, sizeof(binary_name_cache_str));
|
||||
ReadProcessName(process_name_cache_str, sizeof(process_name_cache_str));
|
||||
}
|
||||
|
||||
uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len) {
|
||||
CacheBinaryName();
|
||||
uptr name_len = internal_strlen(binary_name_cache_str);
|
||||
name_len = (name_len < buf_len - 1) ? name_len : buf_len - 1;
|
||||
if (buf_len == 0)
|
||||
return 0;
|
||||
internal_memcpy(buf, binary_name_cache_str, name_len);
|
||||
buf[name_len] = '\0';
|
||||
return name_len;
|
||||
}
|
||||
|
||||
uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len) {
|
||||
ReadBinaryNameCached(buf, buf_len);
|
||||
const char *exec_name_pos = StripModuleName(buf);
|
||||
uptr name_len = exec_name_pos - buf;
|
||||
buf[name_len] = '\0';
|
||||
return name_len;
|
||||
}
|
||||
|
||||
#if !SANITIZER_GO
|
||||
void PrintCmdline() {
|
||||
char **argv = GetArgv();
|
||||
if (!argv) return;
|
||||
Printf("\nCommand: ");
|
||||
for (uptr i = 0; argv[i]; ++i)
|
||||
Printf("%s ", argv[i]);
|
||||
Printf("\n\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
// Malloc hooks.
|
||||
static const int kMaxMallocFreeHooks = 5;
|
||||
struct MallocFreeHook {
|
||||
void (*malloc_hook)(const void *, uptr);
|
||||
void (*free_hook)(const void *);
|
||||
};
|
||||
|
||||
static MallocFreeHook MFHooks[kMaxMallocFreeHooks];
|
||||
|
||||
void RunMallocHooks(void *ptr, uptr size) {
|
||||
__sanitizer_malloc_hook(ptr, size);
|
||||
for (int i = 0; i < kMaxMallocFreeHooks; i++) {
|
||||
auto hook = MFHooks[i].malloc_hook;
|
||||
if (!hook)
|
||||
break;
|
||||
hook(ptr, size);
|
||||
}
|
||||
}
|
||||
|
||||
// Returns '1' if the call to free() should be ignored (based on
|
||||
// __sanitizer_ignore_free_hook), or '0' otherwise.
|
||||
int RunFreeHooks(void *ptr) {
|
||||
if (__sanitizer_ignore_free_hook(ptr)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
__sanitizer_free_hook(ptr);
|
||||
for (int i = 0; i < kMaxMallocFreeHooks; i++) {
|
||||
auto hook = MFHooks[i].free_hook;
|
||||
if (!hook)
|
||||
break;
|
||||
hook(ptr);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int InstallMallocFreeHooks(void (*malloc_hook)(const void *, uptr),
|
||||
void (*free_hook)(const void *)) {
|
||||
if (!malloc_hook || !free_hook) return 0;
|
||||
for (int i = 0; i < kMaxMallocFreeHooks; i++) {
|
||||
if (MFHooks[i].malloc_hook == nullptr) {
|
||||
MFHooks[i].malloc_hook = malloc_hook;
|
||||
MFHooks[i].free_hook = free_hook;
|
||||
return i + 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void internal_sleep(unsigned seconds) {
|
||||
internal_usleep((u64)seconds * 1000 * 1000);
|
||||
}
|
||||
void SleepForSeconds(unsigned seconds) {
|
||||
internal_usleep((u64)seconds * 1000 * 1000);
|
||||
}
|
||||
void SleepForMillis(unsigned millis) { internal_usleep((u64)millis * 1000); }
|
||||
|
||||
void WaitForDebugger(unsigned seconds, const char *label) {
|
||||
if (seconds) {
|
||||
Report("Sleeping for %u second(s) %s\n", seconds, label);
|
||||
SleepForSeconds(seconds);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
using namespace __sanitizer;
|
||||
|
||||
extern "C" {
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_report_error_summary,
|
||||
const char *error_summary) {
|
||||
Printf("%s\n", error_summary);
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __sanitizer_acquire_crash_state() {
|
||||
static atomic_uint8_t in_crash_state = {};
|
||||
return !atomic_exchange(&in_crash_state, 1, memory_order_relaxed);
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __sanitizer_install_malloc_and_free_hooks(void (*malloc_hook)(const void *,
|
||||
uptr),
|
||||
void (*free_hook)(const void *)) {
|
||||
return InstallMallocFreeHooks(malloc_hook, free_hook);
|
||||
}
|
||||
|
||||
// Provide default (no-op) implementation of malloc hooks.
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook, void *ptr,
|
||||
uptr size) {
|
||||
(void)ptr;
|
||||
(void)size;
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) {
|
||||
(void)ptr;
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_WEAK_DEF(int, __sanitizer_ignore_free_hook, void *ptr) {
|
||||
(void)ptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // extern "C"
|
||||
1114
lib/libtsan/sanitizer_common/sanitizer_common.h
Normal file
1114
lib/libtsan/sanitizer_common/sanitizer_common.h
Normal file
File diff suppressed because it is too large
Load Diff
10590
lib/libtsan/sanitizer_common/sanitizer_common_interceptors.inc
Normal file
10590
lib/libtsan/sanitizer_common/sanitizer_common_interceptors.inc
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,577 @@
|
||||
//===-- sanitizer_common_interceptors_format.inc ----------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Scanf/printf implementation for use in *Sanitizer interceptors.
|
||||
// Follows http://pubs.opengroup.org/onlinepubs/9699919799/functions/fscanf.html
|
||||
// and http://pubs.opengroup.org/onlinepubs/9699919799/functions/fprintf.html
|
||||
// with a few common GNU extensions.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include <stdarg.h>
|
||||
|
||||
static const char *parse_number(const char *p, int *out) {
|
||||
*out = internal_atoll(p);
|
||||
while (*p >= '0' && *p <= '9')
|
||||
++p;
|
||||
return p;
|
||||
}
|
||||
|
||||
static const char *maybe_parse_param_index(const char *p, int *out) {
|
||||
// n$
|
||||
if (*p >= '0' && *p <= '9') {
|
||||
int number;
|
||||
const char *q = parse_number(p, &number);
|
||||
CHECK(q);
|
||||
if (*q == '$') {
|
||||
*out = number;
|
||||
p = q + 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, do not change p. This will be re-parsed later as the field
|
||||
// width.
|
||||
return p;
|
||||
}
|
||||
|
||||
static bool char_is_one_of(char c, const char *s) {
|
||||
return !!internal_strchr(s, c);
|
||||
}
|
||||
|
||||
static const char *maybe_parse_length_modifier(const char *p, char ll[2]) {
|
||||
if (char_is_one_of(*p, "jztLq")) {
|
||||
ll[0] = *p;
|
||||
++p;
|
||||
} else if (*p == 'h') {
|
||||
ll[0] = 'h';
|
||||
++p;
|
||||
if (*p == 'h') {
|
||||
ll[1] = 'h';
|
||||
++p;
|
||||
}
|
||||
} else if (*p == 'l') {
|
||||
ll[0] = 'l';
|
||||
++p;
|
||||
if (*p == 'l') {
|
||||
ll[1] = 'l';
|
||||
++p;
|
||||
}
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
// Returns true if the character is an integer conversion specifier.
|
||||
static bool format_is_integer_conv(char c) {
|
||||
return char_is_one_of(c, "diouxXn");
|
||||
}
|
||||
|
||||
// Returns true if the character is an floating point conversion specifier.
|
||||
static bool format_is_float_conv(char c) {
|
||||
return char_is_one_of(c, "aAeEfFgG");
|
||||
}
|
||||
|
||||
// Returns string output character size for string-like conversions,
|
||||
// or 0 if the conversion is invalid.
|
||||
static int format_get_char_size(char convSpecifier,
|
||||
const char lengthModifier[2]) {
|
||||
if (char_is_one_of(convSpecifier, "CS")) {
|
||||
return sizeof(wchar_t);
|
||||
}
|
||||
|
||||
if (char_is_one_of(convSpecifier, "cs[")) {
|
||||
if (lengthModifier[0] == 'l' && lengthModifier[1] == '\0')
|
||||
return sizeof(wchar_t);
|
||||
else if (lengthModifier[0] == '\0')
|
||||
return sizeof(char);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum FormatStoreSize {
|
||||
// Store size not known in advance; can be calculated as wcslen() of the
|
||||
// destination buffer.
|
||||
FSS_WCSLEN = -2,
|
||||
// Store size not known in advance; can be calculated as strlen() of the
|
||||
// destination buffer.
|
||||
FSS_STRLEN = -1,
|
||||
// Invalid conversion specifier.
|
||||
FSS_INVALID = 0
|
||||
};
|
||||
|
||||
// Returns the memory size of a format directive (if >0), or a value of
|
||||
// FormatStoreSize.
|
||||
static int format_get_value_size(char convSpecifier,
|
||||
const char lengthModifier[2],
|
||||
bool promote_float) {
|
||||
if (format_is_integer_conv(convSpecifier)) {
|
||||
switch (lengthModifier[0]) {
|
||||
case 'h':
|
||||
return lengthModifier[1] == 'h' ? sizeof(char) : sizeof(short);
|
||||
case 'l':
|
||||
return lengthModifier[1] == 'l' ? sizeof(long long) : sizeof(long);
|
||||
case 'q':
|
||||
return sizeof(long long);
|
||||
case 'L':
|
||||
return sizeof(long long);
|
||||
case 'j':
|
||||
return sizeof(INTMAX_T);
|
||||
case 'z':
|
||||
return sizeof(SIZE_T);
|
||||
case 't':
|
||||
return sizeof(PTRDIFF_T);
|
||||
case 0:
|
||||
return sizeof(int);
|
||||
default:
|
||||
return FSS_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
if (format_is_float_conv(convSpecifier)) {
|
||||
switch (lengthModifier[0]) {
|
||||
case 'L':
|
||||
case 'q':
|
||||
return sizeof(long double);
|
||||
case 'l':
|
||||
return lengthModifier[1] == 'l' ? sizeof(long double)
|
||||
: sizeof(double);
|
||||
case 0:
|
||||
// Printf promotes floats to doubles but scanf does not
|
||||
return promote_float ? sizeof(double) : sizeof(float);
|
||||
default:
|
||||
return FSS_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
if (convSpecifier == 'p') {
|
||||
if (lengthModifier[0] != 0)
|
||||
return FSS_INVALID;
|
||||
return sizeof(void *);
|
||||
}
|
||||
|
||||
return FSS_INVALID;
|
||||
}
|
||||
|
||||
struct ScanfDirective {
|
||||
int argIdx; // argument index, or -1 if not specified ("%n$")
|
||||
int fieldWidth;
|
||||
const char *begin;
|
||||
const char *end;
|
||||
bool suppressed; // suppress assignment ("*")
|
||||
bool allocate; // allocate space ("m")
|
||||
char lengthModifier[2];
|
||||
char convSpecifier;
|
||||
bool maybeGnuMalloc;
|
||||
};
|
||||
|
||||
// Parse scanf format string. If a valid directive in encountered, it is
|
||||
// returned in dir. This function returns the pointer to the first
|
||||
// unprocessed character, or 0 in case of error.
|
||||
// In case of the end-of-string, a pointer to the closing \0 is returned.
|
||||
static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
|
||||
ScanfDirective *dir) {
|
||||
internal_memset(dir, 0, sizeof(*dir));
|
||||
dir->argIdx = -1;
|
||||
|
||||
while (*p) {
|
||||
if (*p != '%') {
|
||||
++p;
|
||||
continue;
|
||||
}
|
||||
dir->begin = p;
|
||||
++p;
|
||||
// %%
|
||||
if (*p == '%') {
|
||||
++p;
|
||||
continue;
|
||||
}
|
||||
if (*p == '\0') {
|
||||
return nullptr;
|
||||
}
|
||||
// %n$
|
||||
p = maybe_parse_param_index(p, &dir->argIdx);
|
||||
CHECK(p);
|
||||
// *
|
||||
if (*p == '*') {
|
||||
dir->suppressed = true;
|
||||
++p;
|
||||
}
|
||||
// Field width
|
||||
if (*p >= '0' && *p <= '9') {
|
||||
p = parse_number(p, &dir->fieldWidth);
|
||||
CHECK(p);
|
||||
if (dir->fieldWidth <= 0) // Width if at all must be non-zero
|
||||
return nullptr;
|
||||
}
|
||||
// m
|
||||
if (*p == 'm') {
|
||||
dir->allocate = true;
|
||||
++p;
|
||||
}
|
||||
// Length modifier.
|
||||
p = maybe_parse_length_modifier(p, dir->lengthModifier);
|
||||
// Conversion specifier.
|
||||
dir->convSpecifier = *p++;
|
||||
// Consume %[...] expression.
|
||||
if (dir->convSpecifier == '[') {
|
||||
if (*p == '^')
|
||||
++p;
|
||||
if (*p == ']')
|
||||
++p;
|
||||
while (*p && *p != ']')
|
||||
++p;
|
||||
if (*p == 0)
|
||||
return nullptr; // unexpected end of string
|
||||
// Consume the closing ']'.
|
||||
++p;
|
||||
}
|
||||
// This is unfortunately ambiguous between old GNU extension
|
||||
// of %as, %aS and %a[...] and newer POSIX %a followed by
|
||||
// letters s, S or [.
|
||||
if (allowGnuMalloc && dir->convSpecifier == 'a' &&
|
||||
!dir->lengthModifier[0]) {
|
||||
if (*p == 's' || *p == 'S') {
|
||||
dir->maybeGnuMalloc = true;
|
||||
++p;
|
||||
} else if (*p == '[') {
|
||||
// Watch for %a[h-j%d], if % appears in the
|
||||
// [...] range, then we need to give up, we don't know
|
||||
// if scanf will parse it as POSIX %a [h-j %d ] or
|
||||
// GNU allocation of string with range dh-j plus %.
|
||||
const char *q = p + 1;
|
||||
if (*q == '^')
|
||||
++q;
|
||||
if (*q == ']')
|
||||
++q;
|
||||
while (*q && *q != ']' && *q != '%')
|
||||
++q;
|
||||
if (*q == 0 || *q == '%')
|
||||
return nullptr;
|
||||
p = q + 1; // Consume the closing ']'.
|
||||
dir->maybeGnuMalloc = true;
|
||||
}
|
||||
}
|
||||
dir->end = p;
|
||||
break;
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
static int scanf_get_value_size(ScanfDirective *dir) {
|
||||
if (dir->allocate) {
|
||||
if (!char_is_one_of(dir->convSpecifier, "cCsS["))
|
||||
return FSS_INVALID;
|
||||
return sizeof(char *);
|
||||
}
|
||||
|
||||
if (dir->maybeGnuMalloc) {
|
||||
if (dir->convSpecifier != 'a' || dir->lengthModifier[0])
|
||||
return FSS_INVALID;
|
||||
// This is ambiguous, so check the smaller size of char * (if it is
|
||||
// a GNU extension of %as, %aS or %a[...]) and float (if it is
|
||||
// POSIX %a followed by s, S or [ letters).
|
||||
return sizeof(char *) < sizeof(float) ? sizeof(char *) : sizeof(float);
|
||||
}
|
||||
|
||||
if (char_is_one_of(dir->convSpecifier, "cCsS[")) {
|
||||
bool needsTerminator = char_is_one_of(dir->convSpecifier, "sS[");
|
||||
unsigned charSize =
|
||||
format_get_char_size(dir->convSpecifier, dir->lengthModifier);
|
||||
if (charSize == 0)
|
||||
return FSS_INVALID;
|
||||
if (dir->fieldWidth == 0) {
|
||||
if (!needsTerminator)
|
||||
return charSize;
|
||||
return (charSize == sizeof(char)) ? FSS_STRLEN : FSS_WCSLEN;
|
||||
}
|
||||
return (dir->fieldWidth + needsTerminator) * charSize;
|
||||
}
|
||||
|
||||
return format_get_value_size(dir->convSpecifier, dir->lengthModifier, false);
|
||||
}
|
||||
|
||||
// Common part of *scanf interceptors.
|
||||
// Process format string and va_list, and report all store ranges.
|
||||
// Stops when "consuming" n_inputs input items.
|
||||
static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
|
||||
const char *format, va_list aq) {
|
||||
CHECK_GT(n_inputs, 0);
|
||||
const char *p = format;
|
||||
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);
|
||||
|
||||
while (*p) {
|
||||
ScanfDirective dir;
|
||||
p = scanf_parse_next(p, allowGnuMalloc, &dir);
|
||||
if (!p)
|
||||
break;
|
||||
if (dir.convSpecifier == 0) {
|
||||
// This can only happen at the end of the format string.
|
||||
CHECK_EQ(*p, 0);
|
||||
break;
|
||||
}
|
||||
// Here the directive is valid. Do what it says.
|
||||
if (dir.argIdx != -1) {
|
||||
// Unsupported.
|
||||
break;
|
||||
}
|
||||
if (dir.suppressed)
|
||||
continue;
|
||||
int size = scanf_get_value_size(&dir);
|
||||
if (size == FSS_INVALID) {
|
||||
Report("%s: WARNING: unexpected format specifier in scanf interceptor: %.*s\n",
|
||||
SanitizerToolName, static_cast<int>(dir.end - dir.begin), dir.begin);
|
||||
break;
|
||||
}
|
||||
void *argp = va_arg(aq, void *);
|
||||
if (dir.convSpecifier != 'n')
|
||||
--n_inputs;
|
||||
if (n_inputs < 0)
|
||||
break;
|
||||
if (size == FSS_STRLEN) {
|
||||
size = internal_strlen((const char *)argp) + 1;
|
||||
} else if (size == FSS_WCSLEN) {
|
||||
// FIXME: actually use wcslen() to calculate it.
|
||||
size = 0;
|
||||
}
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
|
||||
// For %mc/%mC/%ms/%m[/%mS, write the allocated output buffer as well.
|
||||
if (dir.allocate) {
|
||||
if (char *buf = *(char **)argp) {
|
||||
if (dir.convSpecifier == 'c')
|
||||
size = 1;
|
||||
else if (dir.convSpecifier == 'C')
|
||||
size = sizeof(wchar_t);
|
||||
else if (dir.convSpecifier == 'S')
|
||||
size = (internal_wcslen((wchar_t *)buf) + 1) * sizeof(wchar_t);
|
||||
else // 's' or '['
|
||||
size = internal_strlen(buf) + 1;
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, size);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if SANITIZER_INTERCEPT_PRINTF
|
||||
|
||||
struct PrintfDirective {
|
||||
int fieldWidth;
|
||||
int fieldPrecision;
|
||||
int argIdx; // width argument index, or -1 if not specified ("%*n$")
|
||||
int precisionIdx; // precision argument index, or -1 if not specified (".*n$")
|
||||
const char *begin;
|
||||
const char *end;
|
||||
bool starredWidth;
|
||||
bool starredPrecision;
|
||||
char lengthModifier[2];
|
||||
char convSpecifier;
|
||||
};
|
||||
|
||||
static const char *maybe_parse_number(const char *p, int *out) {
|
||||
if (*p >= '0' && *p <= '9')
|
||||
p = parse_number(p, out);
|
||||
return p;
|
||||
}
|
||||
|
||||
static const char *maybe_parse_number_or_star(const char *p, int *out,
|
||||
bool *star) {
|
||||
if (*p == '*') {
|
||||
*star = true;
|
||||
++p;
|
||||
} else {
|
||||
*star = false;
|
||||
p = maybe_parse_number(p, out);
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
// Parse printf format string. Same as scanf_parse_next.
|
||||
static const char *printf_parse_next(const char *p, PrintfDirective *dir) {
|
||||
internal_memset(dir, 0, sizeof(*dir));
|
||||
dir->argIdx = -1;
|
||||
dir->precisionIdx = -1;
|
||||
|
||||
while (*p) {
|
||||
if (*p != '%') {
|
||||
++p;
|
||||
continue;
|
||||
}
|
||||
dir->begin = p;
|
||||
++p;
|
||||
// %%
|
||||
if (*p == '%') {
|
||||
++p;
|
||||
continue;
|
||||
}
|
||||
if (*p == '\0') {
|
||||
return nullptr;
|
||||
}
|
||||
// %n$
|
||||
p = maybe_parse_param_index(p, &dir->precisionIdx);
|
||||
CHECK(p);
|
||||
// Flags
|
||||
while (char_is_one_of(*p, "'-+ #0")) {
|
||||
++p;
|
||||
}
|
||||
// Field width
|
||||
p = maybe_parse_number_or_star(p, &dir->fieldWidth,
|
||||
&dir->starredWidth);
|
||||
if (!p)
|
||||
return nullptr;
|
||||
// Precision
|
||||
if (*p == '.') {
|
||||
++p;
|
||||
// Actual precision is optional (surprise!)
|
||||
p = maybe_parse_number_or_star(p, &dir->fieldPrecision,
|
||||
&dir->starredPrecision);
|
||||
if (!p)
|
||||
return nullptr;
|
||||
// m$
|
||||
if (dir->starredPrecision) {
|
||||
p = maybe_parse_param_index(p, &dir->precisionIdx);
|
||||
CHECK(p);
|
||||
}
|
||||
}
|
||||
// Length modifier.
|
||||
p = maybe_parse_length_modifier(p, dir->lengthModifier);
|
||||
// Conversion specifier.
|
||||
dir->convSpecifier = *p++;
|
||||
dir->end = p;
|
||||
break;
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
static int printf_get_value_size(PrintfDirective *dir) {
|
||||
if (char_is_one_of(dir->convSpecifier, "cCsS")) {
|
||||
unsigned charSize =
|
||||
format_get_char_size(dir->convSpecifier, dir->lengthModifier);
|
||||
if (charSize == 0)
|
||||
return FSS_INVALID;
|
||||
if (char_is_one_of(dir->convSpecifier, "sS")) {
|
||||
return (charSize == sizeof(char)) ? FSS_STRLEN : FSS_WCSLEN;
|
||||
}
|
||||
return charSize;
|
||||
}
|
||||
|
||||
return format_get_value_size(dir->convSpecifier, dir->lengthModifier, true);
|
||||
}
|
||||
|
||||
#define SKIP_SCALAR_ARG(aq, convSpecifier, size) \
|
||||
do { \
|
||||
if (format_is_float_conv(convSpecifier)) { \
|
||||
switch (size) { \
|
||||
case 8: \
|
||||
va_arg(*aq, double); \
|
||||
break; \
|
||||
case 12: \
|
||||
va_arg(*aq, long double); \
|
||||
break; \
|
||||
case 16: \
|
||||
va_arg(*aq, long double); \
|
||||
break; \
|
||||
default: \
|
||||
Report("WARNING: unexpected floating-point arg size" \
|
||||
" in printf interceptor: %zu\n", static_cast<uptr>(size)); \
|
||||
return; \
|
||||
} \
|
||||
} else { \
|
||||
switch (size) { \
|
||||
case 1: \
|
||||
case 2: \
|
||||
case 4: \
|
||||
va_arg(*aq, u32); \
|
||||
break; \
|
||||
case 8: \
|
||||
va_arg(*aq, u64); \
|
||||
break; \
|
||||
default: \
|
||||
Report("WARNING: unexpected arg size" \
|
||||
" in printf interceptor: %zu\n", static_cast<uptr>(size)); \
|
||||
return; \
|
||||
} \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
// Common part of *printf interceptors.
|
||||
// Process format string and va_list, and report all load ranges.
|
||||
static void printf_common(void *ctx, const char *format, va_list aq) {
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);
|
||||
|
||||
const char *p = format;
|
||||
|
||||
while (*p) {
|
||||
PrintfDirective dir;
|
||||
p = printf_parse_next(p, &dir);
|
||||
if (!p)
|
||||
break;
|
||||
if (dir.convSpecifier == 0) {
|
||||
// This can only happen at the end of the format string.
|
||||
CHECK_EQ(*p, 0);
|
||||
break;
|
||||
}
|
||||
// Here the directive is valid. Do what it says.
|
||||
if (dir.argIdx != -1 || dir.precisionIdx != -1) {
|
||||
// Unsupported.
|
||||
break;
|
||||
}
|
||||
if (dir.starredWidth) {
|
||||
// Dynamic width
|
||||
SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));
|
||||
}
|
||||
if (dir.starredPrecision) {
|
||||
// Dynamic precision
|
||||
SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));
|
||||
}
|
||||
// %m does not require an argument: strlen(errno).
|
||||
if (dir.convSpecifier == 'm')
|
||||
continue;
|
||||
int size = printf_get_value_size(&dir);
|
||||
if (size == FSS_INVALID) {
|
||||
static int ReportedOnce;
|
||||
if (!ReportedOnce++)
|
||||
Report(
|
||||
"%s: WARNING: unexpected format specifier in printf "
|
||||
"interceptor: %.*s (reported once per process)\n",
|
||||
SanitizerToolName, static_cast<int>(dir.end - dir.begin), dir.begin);
|
||||
break;
|
||||
}
|
||||
if (dir.convSpecifier == 'n') {
|
||||
void *argp = va_arg(aq, void *);
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
|
||||
continue;
|
||||
} else if (size == FSS_STRLEN) {
|
||||
if (void *argp = va_arg(aq, void *)) {
|
||||
uptr len;
|
||||
if (dir.starredPrecision) {
|
||||
// FIXME: properly support starred precision for strings.
|
||||
len = 0;
|
||||
} else if (dir.fieldPrecision > 0) {
|
||||
// Won't read more than "precision" symbols.
|
||||
len = internal_strnlen((const char *)argp, dir.fieldPrecision);
|
||||
if (len < (uptr)dir.fieldPrecision)
|
||||
len++;
|
||||
} else {
|
||||
// Whole string will be accessed.
|
||||
len = internal_strlen((const char *)argp) + 1;
|
||||
}
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, len);
|
||||
}
|
||||
} else if (size == FSS_WCSLEN) {
|
||||
if (void *argp = va_arg(aq, void *)) {
|
||||
// FIXME: Properly support wide-character strings (via wcsrtombs).
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, 0);
|
||||
}
|
||||
} else {
|
||||
// Skip non-pointer args
|
||||
SKIP_SCALAR_ARG(&aq, dir.convSpecifier, size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SANITIZER_INTERCEPT_PRINTF
|
||||
@@ -0,0 +1,612 @@
|
||||
//===-- sanitizer_common_interceptors_ioctl.inc -----------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Ioctl handling in common sanitizer interceptors.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#if !SANITIZER_NETBSD
|
||||
|
||||
#include "sanitizer_flags.h"
|
||||
|
||||
struct ioctl_desc {
|
||||
unsigned req;
|
||||
// FIXME: support read+write arguments. Currently READWRITE and WRITE do the
|
||||
// same thing.
|
||||
// XXX: The declarations below may use WRITE instead of READWRITE, unless
|
||||
// explicitly noted.
|
||||
enum {
|
||||
NONE,
|
||||
READ,
|
||||
WRITE,
|
||||
READWRITE,
|
||||
CUSTOM
|
||||
} type : 3;
|
||||
unsigned size : 29;
|
||||
const char* name;
|
||||
};
|
||||
|
||||
const unsigned ioctl_table_max = 500;
|
||||
static ioctl_desc ioctl_table[ioctl_table_max];
|
||||
static unsigned ioctl_table_size = 0;
|
||||
|
||||
// This can not be declared as a global, because references to struct_*_sz
|
||||
// require a global initializer. And this table must be available before global
|
||||
// initializers are run.
|
||||
static void ioctl_table_fill() {
|
||||
#define _(rq, tp, sz) \
|
||||
if (IOCTL_##rq != IOCTL_NOT_PRESENT) { \
|
||||
CHECK(ioctl_table_size < ioctl_table_max); \
|
||||
ioctl_table[ioctl_table_size].req = IOCTL_##rq; \
|
||||
ioctl_table[ioctl_table_size].type = ioctl_desc::tp; \
|
||||
ioctl_table[ioctl_table_size].size = sz; \
|
||||
ioctl_table[ioctl_table_size].name = #rq; \
|
||||
++ioctl_table_size; \
|
||||
}
|
||||
|
||||
_(FIOASYNC, READ, sizeof(int));
|
||||
_(FIOCLEX, NONE, 0);
|
||||
_(FIOGETOWN, WRITE, sizeof(int));
|
||||
_(FIONBIO, READ, sizeof(int));
|
||||
_(FIONCLEX, NONE, 0);
|
||||
_(FIOSETOWN, READ, sizeof(int));
|
||||
_(SIOCATMARK, WRITE, sizeof(int));
|
||||
_(SIOCGIFCONF, CUSTOM, 0);
|
||||
_(SIOCGPGRP, WRITE, sizeof(int));
|
||||
_(SIOCSPGRP, READ, sizeof(int));
|
||||
#if !SANITIZER_SOLARIS
|
||||
_(TIOCCONS, NONE, 0);
|
||||
#endif
|
||||
_(TIOCEXCL, NONE, 0);
|
||||
_(TIOCGETD, WRITE, sizeof(int));
|
||||
_(TIOCGPGRP, WRITE, pid_t_sz);
|
||||
_(TIOCGWINSZ, WRITE, struct_winsize_sz);
|
||||
_(TIOCMBIC, READ, sizeof(int));
|
||||
_(TIOCMBIS, READ, sizeof(int));
|
||||
_(TIOCMGET, WRITE, sizeof(int));
|
||||
_(TIOCMSET, READ, sizeof(int));
|
||||
_(TIOCNOTTY, NONE, 0);
|
||||
_(TIOCNXCL, NONE, 0);
|
||||
_(TIOCOUTQ, WRITE, sizeof(int));
|
||||
_(TIOCPKT, READ, sizeof(int));
|
||||
_(TIOCSCTTY, NONE, 0);
|
||||
_(TIOCSETD, READ, sizeof(int));
|
||||
_(TIOCSPGRP, READ, pid_t_sz);
|
||||
_(TIOCSTI, READ, sizeof(char));
|
||||
_(TIOCSWINSZ, READ, struct_winsize_sz);
|
||||
|
||||
#if !SANITIZER_IOS
|
||||
_(SIOCADDMULTI, READ, struct_ifreq_sz);
|
||||
_(SIOCDELMULTI, READ, struct_ifreq_sz);
|
||||
_(SIOCGIFADDR, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFBRDADDR, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFDSTADDR, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFFLAGS, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFMETRIC, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFMTU, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFNETMASK, WRITE, struct_ifreq_sz);
|
||||
_(SIOCSIFADDR, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFBRDADDR, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFDSTADDR, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFFLAGS, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFMETRIC, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFMTU, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFNETMASK, READ, struct_ifreq_sz);
|
||||
#endif
|
||||
|
||||
#if (SANITIZER_LINUX && !SANITIZER_ANDROID)
|
||||
_(SIOCGETSGCNT, WRITE, struct_sioc_sg_req_sz);
|
||||
_(SIOCGETVIFCNT, WRITE, struct_sioc_vif_req_sz);
|
||||
#endif
|
||||
|
||||
#if SANITIZER_LINUX
|
||||
// Conflicting request ids.
|
||||
// _(CDROMAUDIOBUFSIZ, NONE, 0);
|
||||
// _(SNDCTL_TMR_CONTINUE, NONE, 0);
|
||||
// _(SNDCTL_TMR_START, NONE, 0);
|
||||
// _(SNDCTL_TMR_STOP, NONE, 0);
|
||||
// _(SOUND_MIXER_READ_LOUD, WRITE, sizeof(int)); // same as ...READ_ENHANCE
|
||||
// _(SOUND_MIXER_READ_MUTE, WRITE, sizeof(int)); // same as ...READ_ENHANCE
|
||||
// _(SOUND_MIXER_WRITE_LOUD, WRITE, sizeof(int)); // same as ...WRITE_ENHANCE
|
||||
// _(SOUND_MIXER_WRITE_MUTE, WRITE, sizeof(int)); // same as ...WRITE_ENHANCE
|
||||
_(BLKFLSBUF, NONE, 0);
|
||||
_(BLKGETSIZE, WRITE, sizeof(uptr));
|
||||
_(BLKRAGET, WRITE, sizeof(uptr));
|
||||
_(BLKRASET, NONE, 0);
|
||||
_(BLKROGET, WRITE, sizeof(int));
|
||||
_(BLKROSET, READ, sizeof(int));
|
||||
_(BLKRRPART, NONE, 0);
|
||||
_(BLKFRASET, NONE, 0);
|
||||
_(BLKFRAGET, WRITE, sizeof(uptr));
|
||||
_(BLKSECTSET, READ, sizeof(short));
|
||||
_(BLKSECTGET, WRITE, sizeof(short));
|
||||
_(BLKSSZGET, WRITE, sizeof(int));
|
||||
_(BLKBSZGET, WRITE, sizeof(int));
|
||||
_(BLKBSZSET, READ, sizeof(uptr));
|
||||
_(BLKGETSIZE64, WRITE, sizeof(u64));
|
||||
_(CDROMEJECT, NONE, 0);
|
||||
_(CDROMEJECT_SW, NONE, 0);
|
||||
_(CDROMMULTISESSION, WRITE, struct_cdrom_multisession_sz);
|
||||
_(CDROMPAUSE, NONE, 0);
|
||||
_(CDROMPLAYMSF, READ, struct_cdrom_msf_sz);
|
||||
_(CDROMPLAYTRKIND, READ, struct_cdrom_ti_sz);
|
||||
_(CDROMREADAUDIO, READ, struct_cdrom_read_audio_sz);
|
||||
_(CDROMREADCOOKED, READ, struct_cdrom_msf_sz);
|
||||
_(CDROMREADMODE1, READ, struct_cdrom_msf_sz);
|
||||
_(CDROMREADMODE2, READ, struct_cdrom_msf_sz);
|
||||
_(CDROMREADRAW, READ, struct_cdrom_msf_sz);
|
||||
_(CDROMREADTOCENTRY, WRITE, struct_cdrom_tocentry_sz);
|
||||
_(CDROMREADTOCHDR, WRITE, struct_cdrom_tochdr_sz);
|
||||
_(CDROMRESET, NONE, 0);
|
||||
_(CDROMRESUME, NONE, 0);
|
||||
_(CDROMSEEK, READ, struct_cdrom_msf_sz);
|
||||
_(CDROMSTART, NONE, 0);
|
||||
_(CDROMSTOP, NONE, 0);
|
||||
_(CDROMSUBCHNL, WRITE, struct_cdrom_subchnl_sz);
|
||||
_(CDROMVOLCTRL, READ, struct_cdrom_volctrl_sz);
|
||||
_(CDROMVOLREAD, WRITE, struct_cdrom_volctrl_sz);
|
||||
_(CDROM_GET_UPC, WRITE, 8);
|
||||
_(EVIOCGABS, WRITE, struct_input_absinfo_sz); // fixup
|
||||
_(EVIOCGBIT, WRITE, struct_input_id_sz); // fixup
|
||||
_(EVIOCGEFFECTS, WRITE, sizeof(int));
|
||||
_(EVIOCGID, WRITE, struct_input_id_sz);
|
||||
_(EVIOCGKEY, WRITE, 0);
|
||||
_(EVIOCGKEYCODE, WRITE, sizeof(int) * 2);
|
||||
_(EVIOCGLED, WRITE, 0);
|
||||
_(EVIOCGNAME, WRITE, 0);
|
||||
_(EVIOCGPHYS, WRITE, 0);
|
||||
_(EVIOCGRAB, READ, sizeof(int));
|
||||
_(EVIOCGREP, WRITE, sizeof(int) * 2);
|
||||
_(EVIOCGSND, WRITE, 0);
|
||||
_(EVIOCGSW, WRITE, 0);
|
||||
_(EVIOCGUNIQ, WRITE, 0);
|
||||
_(EVIOCGVERSION, WRITE, sizeof(int));
|
||||
_(EVIOCRMFF, READ, sizeof(int));
|
||||
_(EVIOCSABS, READ, struct_input_absinfo_sz); // fixup
|
||||
_(EVIOCSFF, READ, struct_ff_effect_sz);
|
||||
_(EVIOCSKEYCODE, READ, sizeof(int) * 2);
|
||||
_(EVIOCSREP, READ, sizeof(int) * 2);
|
||||
_(FDCLRPRM, NONE, 0);
|
||||
_(FDDEFPRM, READ, struct_floppy_struct_sz);
|
||||
_(FDFLUSH, NONE, 0);
|
||||
_(FDFMTBEG, NONE, 0);
|
||||
_(FDFMTEND, NONE, 0);
|
||||
_(FDFMTTRK, READ, struct_format_descr_sz);
|
||||
_(FDGETDRVPRM, WRITE, struct_floppy_drive_params_sz);
|
||||
_(FDGETDRVSTAT, WRITE, struct_floppy_drive_struct_sz);
|
||||
_(FDGETDRVTYP, WRITE, 16);
|
||||
_(FDGETFDCSTAT, WRITE, struct_floppy_fdc_state_sz);
|
||||
_(FDGETMAXERRS, WRITE, struct_floppy_max_errors_sz);
|
||||
_(FDGETPRM, WRITE, struct_floppy_struct_sz);
|
||||
_(FDMSGOFF, NONE, 0);
|
||||
_(FDMSGON, NONE, 0);
|
||||
_(FDPOLLDRVSTAT, WRITE, struct_floppy_drive_struct_sz);
|
||||
_(FDRAWCMD, WRITE, struct_floppy_raw_cmd_sz);
|
||||
_(FDRESET, NONE, 0);
|
||||
_(FDSETDRVPRM, READ, struct_floppy_drive_params_sz);
|
||||
_(FDSETEMSGTRESH, NONE, 0);
|
||||
_(FDSETMAXERRS, READ, struct_floppy_max_errors_sz);
|
||||
_(FDSETPRM, READ, struct_floppy_struct_sz);
|
||||
_(FDTWADDLE, NONE, 0);
|
||||
_(FDWERRORCLR, NONE, 0);
|
||||
_(FDWERRORGET, WRITE, struct_floppy_write_errors_sz);
|
||||
_(HDIO_DRIVE_CMD, WRITE, sizeof(int));
|
||||
_(HDIO_GETGEO, WRITE, struct_hd_geometry_sz);
|
||||
_(HDIO_GET_32BIT, WRITE, sizeof(int));
|
||||
_(HDIO_GET_DMA, WRITE, sizeof(int));
|
||||
_(HDIO_GET_IDENTITY, WRITE, struct_hd_driveid_sz);
|
||||
_(HDIO_GET_KEEPSETTINGS, WRITE, sizeof(int));
|
||||
_(HDIO_GET_MULTCOUNT, WRITE, sizeof(int));
|
||||
_(HDIO_GET_NOWERR, WRITE, sizeof(int));
|
||||
_(HDIO_GET_UNMASKINTR, WRITE, sizeof(int));
|
||||
_(HDIO_SET_32BIT, NONE, 0);
|
||||
_(HDIO_SET_DMA, NONE, 0);
|
||||
_(HDIO_SET_KEEPSETTINGS, NONE, 0);
|
||||
_(HDIO_SET_MULTCOUNT, NONE, 0);
|
||||
_(HDIO_SET_NOWERR, NONE, 0);
|
||||
_(HDIO_SET_UNMASKINTR, NONE, 0);
|
||||
_(MTIOCGET, WRITE, struct_mtget_sz);
|
||||
_(MTIOCPOS, WRITE, struct_mtpos_sz);
|
||||
_(MTIOCTOP, READ, struct_mtop_sz);
|
||||
_(PPPIOCGASYNCMAP, WRITE, sizeof(int));
|
||||
_(PPPIOCGDEBUG, WRITE, sizeof(int));
|
||||
_(PPPIOCGFLAGS, WRITE, sizeof(int));
|
||||
_(PPPIOCGUNIT, WRITE, sizeof(int));
|
||||
_(PPPIOCGXASYNCMAP, WRITE, sizeof(int) * 8);
|
||||
_(PPPIOCSASYNCMAP, READ, sizeof(int));
|
||||
_(PPPIOCSDEBUG, READ, sizeof(int));
|
||||
_(PPPIOCSFLAGS, READ, sizeof(int));
|
||||
_(PPPIOCSMAXCID, READ, sizeof(int));
|
||||
_(PPPIOCSMRU, READ, sizeof(int));
|
||||
_(PPPIOCSXASYNCMAP, READ, sizeof(int) * 8);
|
||||
_(SIOCADDRT, READ, struct_rtentry_sz);
|
||||
_(SIOCDARP, READ, struct_arpreq_sz);
|
||||
_(SIOCDELRT, READ, struct_rtentry_sz);
|
||||
_(SIOCDRARP, READ, struct_arpreq_sz);
|
||||
_(SIOCGARP, WRITE, struct_arpreq_sz);
|
||||
_(SIOCGIFENCAP, WRITE, sizeof(int));
|
||||
_(SIOCGIFHWADDR, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFMAP, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFMEM, WRITE, struct_ifreq_sz);
|
||||
_(SIOCGIFNAME, NONE, 0);
|
||||
_(SIOCGIFSLAVE, NONE, 0);
|
||||
_(SIOCGRARP, WRITE, struct_arpreq_sz);
|
||||
_(SIOCGSTAMP, WRITE, timeval_sz);
|
||||
_(SIOCSARP, READ, struct_arpreq_sz);
|
||||
_(SIOCSIFENCAP, READ, sizeof(int));
|
||||
_(SIOCSIFHWADDR, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFLINK, NONE, 0);
|
||||
_(SIOCSIFMAP, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFMEM, READ, struct_ifreq_sz);
|
||||
_(SIOCSIFSLAVE, NONE, 0);
|
||||
_(SIOCSRARP, READ, struct_arpreq_sz);
|
||||
_(SNDCTL_COPR_HALT, WRITE, struct_copr_debug_buf_sz);
|
||||
_(SNDCTL_COPR_LOAD, READ, struct_copr_buffer_sz);
|
||||
_(SNDCTL_COPR_RCODE, WRITE, struct_copr_debug_buf_sz);
|
||||
_(SNDCTL_COPR_RCVMSG, WRITE, struct_copr_msg_sz);
|
||||
_(SNDCTL_COPR_RDATA, WRITE, struct_copr_debug_buf_sz);
|
||||
_(SNDCTL_COPR_RESET, NONE, 0);
|
||||
_(SNDCTL_COPR_RUN, WRITE, struct_copr_debug_buf_sz);
|
||||
_(SNDCTL_COPR_SENDMSG, READ, struct_copr_msg_sz);
|
||||
_(SNDCTL_COPR_WCODE, READ, struct_copr_debug_buf_sz);
|
||||
_(SNDCTL_COPR_WDATA, READ, struct_copr_debug_buf_sz);
|
||||
_(SNDCTL_DSP_GETBLKSIZE, WRITE, sizeof(int));
|
||||
_(SNDCTL_DSP_GETFMTS, WRITE, sizeof(int));
|
||||
_(SNDCTL_DSP_NONBLOCK, NONE, 0);
|
||||
_(SNDCTL_DSP_POST, NONE, 0);
|
||||
_(SNDCTL_DSP_RESET, NONE, 0);
|
||||
_(SNDCTL_DSP_SETFMT, WRITE, sizeof(int));
|
||||
_(SNDCTL_DSP_SETFRAGMENT, WRITE, sizeof(int));
|
||||
_(SNDCTL_DSP_SPEED, WRITE, sizeof(int));
|
||||
_(SNDCTL_DSP_STEREO, WRITE, sizeof(int));
|
||||
_(SNDCTL_DSP_SUBDIVIDE, WRITE, sizeof(int));
|
||||
_(SNDCTL_DSP_SYNC, NONE, 0);
|
||||
_(SNDCTL_FM_4OP_ENABLE, READ, sizeof(int));
|
||||
_(SNDCTL_FM_LOAD_INSTR, READ, struct_sbi_instrument_sz);
|
||||
_(SNDCTL_MIDI_INFO, WRITE, struct_midi_info_sz);
|
||||
_(SNDCTL_MIDI_PRETIME, WRITE, sizeof(int));
|
||||
_(SNDCTL_SEQ_CTRLRATE, WRITE, sizeof(int));
|
||||
_(SNDCTL_SEQ_GETINCOUNT, WRITE, sizeof(int));
|
||||
_(SNDCTL_SEQ_GETOUTCOUNT, WRITE, sizeof(int));
|
||||
_(SNDCTL_SEQ_NRMIDIS, WRITE, sizeof(int));
|
||||
_(SNDCTL_SEQ_NRSYNTHS, WRITE, sizeof(int));
|
||||
_(SNDCTL_SEQ_OUTOFBAND, READ, struct_seq_event_rec_sz);
|
||||
_(SNDCTL_SEQ_PANIC, NONE, 0);
|
||||
_(SNDCTL_SEQ_PERCMODE, NONE, 0);
|
||||
_(SNDCTL_SEQ_RESET, NONE, 0);
|
||||
_(SNDCTL_SEQ_RESETSAMPLES, READ, sizeof(int));
|
||||
_(SNDCTL_SEQ_SYNC, NONE, 0);
|
||||
_(SNDCTL_SEQ_TESTMIDI, READ, sizeof(int));
|
||||
_(SNDCTL_SEQ_THRESHOLD, READ, sizeof(int));
|
||||
_(SNDCTL_SYNTH_INFO, WRITE, struct_synth_info_sz);
|
||||
_(SNDCTL_SYNTH_MEMAVL, WRITE, sizeof(int));
|
||||
_(SNDCTL_TMR_METRONOME, READ, sizeof(int));
|
||||
_(SNDCTL_TMR_SELECT, WRITE, sizeof(int));
|
||||
_(SNDCTL_TMR_SOURCE, WRITE, sizeof(int));
|
||||
_(SNDCTL_TMR_TEMPO, WRITE, sizeof(int));
|
||||
_(SNDCTL_TMR_TIMEBASE, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_ALTPCM, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_BASS, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_CAPS, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_CD, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_DEVMASK, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_ENHANCE, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_IGAIN, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_IMIX, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_LINE, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_LINE1, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_LINE2, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_LINE3, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_MIC, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_OGAIN, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_PCM, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_RECLEV, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_RECMASK, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_RECSRC, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_SPEAKER, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_STEREODEVS, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_SYNTH, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_TREBLE, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_READ_VOLUME, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_WRITE_ALTPCM, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_WRITE_BASS, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_WRITE_CD, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_WRITE_ENHANCE, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_WRITE_IGAIN, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_WRITE_IMIX, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_WRITE_LINE, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_WRITE_LINE1, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_WRITE_LINE2, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_WRITE_LINE3, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_WRITE_MIC, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_WRITE_OGAIN, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_WRITE_PCM, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_WRITE_RECLEV, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_WRITE_RECSRC, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_WRITE_SPEAKER, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_WRITE_SYNTH, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_WRITE_TREBLE, WRITE, sizeof(int));
|
||||
_(SOUND_MIXER_WRITE_VOLUME, WRITE, sizeof(int));
|
||||
_(SOUND_PCM_READ_BITS, WRITE, sizeof(int));
|
||||
_(SOUND_PCM_READ_CHANNELS, WRITE, sizeof(int));
|
||||
_(SOUND_PCM_READ_FILTER, WRITE, sizeof(int));
|
||||
_(SOUND_PCM_READ_RATE, WRITE, sizeof(int));
|
||||
_(SOUND_PCM_WRITE_CHANNELS, WRITE, sizeof(int));
|
||||
_(SOUND_PCM_WRITE_FILTER, WRITE, sizeof(int));
|
||||
_(TCFLSH, NONE, 0);
|
||||
#if SANITIZER_GLIBC
|
||||
_(TCGETA, WRITE, struct_termio_sz);
|
||||
#endif
|
||||
_(TCGETS, WRITE, struct_termios_sz);
|
||||
_(TCSBRK, NONE, 0);
|
||||
_(TCSBRKP, NONE, 0);
|
||||
#if SANITIZER_GLIBC
|
||||
_(TCSETA, READ, struct_termio_sz);
|
||||
_(TCSETAF, READ, struct_termio_sz);
|
||||
_(TCSETAW, READ, struct_termio_sz);
|
||||
#endif
|
||||
_(TCSETS, READ, struct_termios_sz);
|
||||
_(TCSETSF, READ, struct_termios_sz);
|
||||
_(TCSETSW, READ, struct_termios_sz);
|
||||
_(TCXONC, NONE, 0);
|
||||
_(TIOCGLCKTRMIOS, WRITE, struct_termios_sz);
|
||||
_(TIOCGSOFTCAR, WRITE, sizeof(int));
|
||||
_(TIOCINQ, WRITE, sizeof(int));
|
||||
_(TIOCLINUX, READ, sizeof(char));
|
||||
_(TIOCSERCONFIG, NONE, 0);
|
||||
_(TIOCSERGETLSR, WRITE, sizeof(int));
|
||||
_(TIOCSERGWILD, WRITE, sizeof(int));
|
||||
_(TIOCSERSWILD, READ, sizeof(int));
|
||||
_(TIOCSLCKTRMIOS, READ, struct_termios_sz);
|
||||
_(TIOCSSOFTCAR, READ, sizeof(int));
|
||||
_(VT_ACTIVATE, NONE, 0);
|
||||
_(VT_DISALLOCATE, NONE, 0);
|
||||
_(VT_GETMODE, WRITE, struct_vt_mode_sz);
|
||||
_(VT_GETSTATE, WRITE, struct_vt_stat_sz);
|
||||
_(VT_OPENQRY, WRITE, sizeof(int));
|
||||
_(VT_RELDISP, NONE, 0);
|
||||
_(VT_RESIZE, READ, struct_vt_sizes_sz);
|
||||
_(VT_RESIZEX, READ, struct_vt_consize_sz);
|
||||
_(VT_SENDSIG, NONE, 0);
|
||||
_(VT_SETMODE, READ, struct_vt_mode_sz);
|
||||
_(VT_WAITACTIVE, NONE, 0);
|
||||
#endif
|
||||
|
||||
#if SANITIZER_GLIBC
|
||||
// _(SIOCDEVPLIP, WRITE, struct_ifreq_sz); // the same as EQL_ENSLAVE
|
||||
_(EQL_EMANCIPATE, WRITE, struct_ifreq_sz);
|
||||
_(EQL_ENSLAVE, WRITE, struct_ifreq_sz);
|
||||
_(EQL_GETMASTRCFG, WRITE, struct_ifreq_sz);
|
||||
_(EQL_GETSLAVECFG, WRITE, struct_ifreq_sz);
|
||||
_(EQL_SETMASTRCFG, WRITE, struct_ifreq_sz);
|
||||
_(EQL_SETSLAVECFG, WRITE, struct_ifreq_sz);
|
||||
_(EVIOCGKEYCODE_V2, WRITE, struct_input_keymap_entry_sz);
|
||||
_(EVIOCGPROP, WRITE, 0);
|
||||
_(EVIOCSKEYCODE_V2, READ, struct_input_keymap_entry_sz);
|
||||
_(FS_IOC_GETFLAGS, WRITE, sizeof(int));
|
||||
_(FS_IOC_GETVERSION, WRITE, sizeof(int));
|
||||
_(FS_IOC_SETFLAGS, READ, sizeof(int));
|
||||
_(FS_IOC_SETVERSION, READ, sizeof(int));
|
||||
_(GIO_CMAP, WRITE, 48);
|
||||
_(GIO_FONT, WRITE, 8192);
|
||||
_(GIO_SCRNMAP, WRITE, e_tabsz);
|
||||
_(GIO_UNIMAP, WRITE, struct_unimapdesc_sz);
|
||||
_(GIO_UNISCRNMAP, WRITE, sizeof(short) * e_tabsz);
|
||||
_(KDADDIO, NONE, 0);
|
||||
_(KDDELIO, NONE, 0);
|
||||
_(KDDISABIO, NONE, 0);
|
||||
_(KDENABIO, NONE, 0);
|
||||
_(KDGETKEYCODE, WRITE, struct_kbkeycode_sz);
|
||||
_(KDGETLED, WRITE, 1);
|
||||
_(KDGETMODE, WRITE, sizeof(int));
|
||||
_(KDGKBDIACR, WRITE, struct_kbdiacrs_sz);
|
||||
_(KDGKBENT, WRITE, struct_kbentry_sz);
|
||||
_(KDGKBLED, WRITE, sizeof(int));
|
||||
_(KDGKBMETA, WRITE, sizeof(int));
|
||||
_(KDGKBMODE, WRITE, sizeof(int));
|
||||
_(KDGKBSENT, WRITE, struct_kbsentry_sz);
|
||||
_(KDGKBTYPE, WRITE, 1);
|
||||
_(KDMAPDISP, NONE, 0);
|
||||
_(KDMKTONE, NONE, 0);
|
||||
_(KDSETKEYCODE, READ, struct_kbkeycode_sz);
|
||||
_(KDSETLED, NONE, 0);
|
||||
_(KDSETMODE, NONE, 0);
|
||||
_(KDSIGACCEPT, NONE, 0);
|
||||
_(KDSKBDIACR, READ, struct_kbdiacrs_sz);
|
||||
_(KDSKBENT, READ, struct_kbentry_sz);
|
||||
_(KDSKBLED, NONE, 0);
|
||||
_(KDSKBMETA, NONE, 0);
|
||||
_(KDSKBMODE, NONE, 0);
|
||||
_(KDSKBSENT, READ, struct_kbsentry_sz);
|
||||
_(KDUNMAPDISP, NONE, 0);
|
||||
_(KIOCSOUND, NONE, 0);
|
||||
_(LPABORT, NONE, 0);
|
||||
_(LPABORTOPEN, NONE, 0);
|
||||
_(LPCAREFUL, NONE, 0);
|
||||
_(LPCHAR, NONE, 0);
|
||||
_(LPGETIRQ, WRITE, sizeof(int));
|
||||
_(LPGETSTATUS, WRITE, sizeof(int));
|
||||
_(LPRESET, NONE, 0);
|
||||
_(LPSETIRQ, NONE, 0);
|
||||
_(LPTIME, NONE, 0);
|
||||
_(LPWAIT, NONE, 0);
|
||||
_(MTIOCGETCONFIG, WRITE, struct_mtconfiginfo_sz);
|
||||
_(MTIOCSETCONFIG, READ, struct_mtconfiginfo_sz);
|
||||
_(PIO_CMAP, NONE, 0);
|
||||
_(PIO_FONT, READ, 8192);
|
||||
_(PIO_SCRNMAP, READ, e_tabsz);
|
||||
_(PIO_UNIMAP, READ, struct_unimapdesc_sz);
|
||||
_(PIO_UNIMAPCLR, READ, struct_unimapinit_sz);
|
||||
_(PIO_UNISCRNMAP, READ, sizeof(short) * e_tabsz);
|
||||
_(SCSI_IOCTL_PROBE_HOST, READ, sizeof(int));
|
||||
_(SCSI_IOCTL_TAGGED_DISABLE, NONE, 0);
|
||||
_(SCSI_IOCTL_TAGGED_ENABLE, NONE, 0);
|
||||
_(SNDCTL_DSP_GETISPACE, WRITE, struct_audio_buf_info_sz);
|
||||
_(SNDCTL_DSP_GETOSPACE, WRITE, struct_audio_buf_info_sz);
|
||||
_(TIOCGSERIAL, WRITE, struct_serial_struct_sz);
|
||||
_(TIOCSERGETMULTI, WRITE, struct_serial_multiport_struct_sz);
|
||||
_(TIOCSERSETMULTI, READ, struct_serial_multiport_struct_sz);
|
||||
_(TIOCSSERIAL, READ, struct_serial_struct_sz);
|
||||
|
||||
// The following ioctl requests are shared between AX25, IPX, netrom and
|
||||
// mrouted.
|
||||
// _(SIOCAIPXITFCRT, READ, sizeof(char));
|
||||
// _(SIOCAX25GETUID, READ, struct_sockaddr_ax25_sz);
|
||||
// _(SIOCNRGETPARMS, WRITE, struct_nr_parms_struct_sz);
|
||||
// _(SIOCAIPXPRISLT, READ, sizeof(char));
|
||||
// _(SIOCNRSETPARMS, READ, struct_nr_parms_struct_sz);
|
||||
// _(SIOCAX25ADDUID, READ, struct_sockaddr_ax25_sz);
|
||||
// _(SIOCNRDECOBS, NONE, 0);
|
||||
// _(SIOCAX25DELUID, READ, struct_sockaddr_ax25_sz);
|
||||
// _(SIOCIPXCFGDATA, WRITE, struct_ipx_config_data_sz);
|
||||
// _(SIOCAX25NOUID, READ, sizeof(int));
|
||||
// _(SIOCNRRTCTL, READ, sizeof(int));
|
||||
// _(SIOCAX25DIGCTL, READ, sizeof(int));
|
||||
// _(SIOCAX25GETPARMS, WRITE, struct_ax25_parms_struct_sz);
|
||||
// _(SIOCAX25SETPARMS, READ, struct_ax25_parms_struct_sz);
|
||||
#endif
|
||||
#undef _
|
||||
}
|
||||
|
||||
static bool ioctl_initialized = false;
|
||||
|
||||
struct ioctl_desc_compare {
|
||||
bool operator()(const ioctl_desc& left, const ioctl_desc& right) const {
|
||||
return left.req < right.req;
|
||||
}
|
||||
};
|
||||
|
||||
static void ioctl_init() {
|
||||
ioctl_table_fill();
|
||||
Sort(ioctl_table, ioctl_table_size, ioctl_desc_compare());
|
||||
|
||||
bool bad = false;
|
||||
for (unsigned i = 0; i < ioctl_table_size - 1; ++i) {
|
||||
if (ioctl_table[i].req >= ioctl_table[i + 1].req) {
|
||||
Printf("Duplicate or unsorted ioctl request id %x >= %x (%s vs %s)\n",
|
||||
ioctl_table[i].req, ioctl_table[i + 1].req, ioctl_table[i].name,
|
||||
ioctl_table[i + 1].name);
|
||||
bad = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (bad) Die();
|
||||
|
||||
ioctl_initialized = true;
|
||||
}
|
||||
|
||||
// Handle the most evil ioctls that encode argument value as part of request id.
|
||||
static unsigned ioctl_request_fixup(unsigned req) {
|
||||
#if SANITIZER_LINUX
|
||||
// Strip size and event number.
|
||||
const unsigned kEviocgbitMask =
|
||||
(IOC_SIZEMASK << IOC_SIZESHIFT) | EVIOC_EV_MAX;
|
||||
if ((req & ~kEviocgbitMask) == IOCTL_EVIOCGBIT)
|
||||
return IOCTL_EVIOCGBIT;
|
||||
// Strip absolute axis number.
|
||||
if ((req & ~EVIOC_ABS_MAX) == IOCTL_EVIOCGABS)
|
||||
return IOCTL_EVIOCGABS;
|
||||
if ((req & ~EVIOC_ABS_MAX) == IOCTL_EVIOCSABS)
|
||||
return IOCTL_EVIOCSABS;
|
||||
#endif
|
||||
return req;
|
||||
}
|
||||
|
||||
static const ioctl_desc *ioctl_table_lookup(unsigned req) {
|
||||
int left = 0;
|
||||
int right = ioctl_table_size;
|
||||
while (left < right) {
|
||||
int mid = (left + right) / 2;
|
||||
if (ioctl_table[mid].req < req)
|
||||
left = mid + 1;
|
||||
else
|
||||
right = mid;
|
||||
}
|
||||
if (left == right && ioctl_table[left].req == req)
|
||||
return ioctl_table + left;
|
||||
else
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static bool ioctl_decode(unsigned req, ioctl_desc *desc) {
|
||||
CHECK(desc);
|
||||
desc->req = req;
|
||||
desc->name = "<DECODED_IOCTL>";
|
||||
desc->size = IOC_SIZE(req);
|
||||
// Sanity check.
|
||||
if (desc->size > 0xFFFF) return false;
|
||||
unsigned dir = IOC_DIR(req);
|
||||
switch (dir) {
|
||||
case IOC_NONE:
|
||||
desc->type = ioctl_desc::NONE;
|
||||
break;
|
||||
case IOC_READ | IOC_WRITE:
|
||||
desc->type = ioctl_desc::READWRITE;
|
||||
break;
|
||||
case IOC_READ:
|
||||
desc->type = ioctl_desc::WRITE;
|
||||
break;
|
||||
case IOC_WRITE:
|
||||
desc->type = ioctl_desc::READ;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
// Size can be 0 iff type is NONE.
|
||||
if ((desc->type == IOC_NONE) != (desc->size == 0)) return false;
|
||||
// Sanity check.
|
||||
if (IOC_TYPE(req) == 0) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static const ioctl_desc *ioctl_lookup(unsigned req) {
|
||||
req = ioctl_request_fixup(req);
|
||||
const ioctl_desc *desc = ioctl_table_lookup(req);
|
||||
if (desc) return desc;
|
||||
|
||||
// Try stripping access size from the request id.
|
||||
desc = ioctl_table_lookup(req & ~(IOC_SIZEMASK << IOC_SIZESHIFT));
|
||||
// Sanity check: requests that encode access size are either read or write and
|
||||
// have size of 0 in the table.
|
||||
if (desc && desc->size == 0 &&
|
||||
(desc->type == ioctl_desc::READWRITE || desc->type == ioctl_desc::WRITE ||
|
||||
desc->type == ioctl_desc::READ))
|
||||
return desc;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
|
||||
unsigned request, void *arg) {
|
||||
if (desc->type == ioctl_desc::READ || desc->type == ioctl_desc::READWRITE) {
|
||||
unsigned size = desc->size ? desc->size : IOC_SIZE(request);
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, arg, size);
|
||||
}
|
||||
if (desc->type != ioctl_desc::CUSTOM)
|
||||
return;
|
||||
if (request == IOCTL_SIOCGIFCONF) {
|
||||
struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, (char*)&ifc->ifc_len,
|
||||
sizeof(ifc->ifc_len));
|
||||
}
|
||||
}
|
||||
|
||||
static void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d,
|
||||
unsigned request, void *arg) {
|
||||
if (desc->type == ioctl_desc::WRITE || desc->type == ioctl_desc::READWRITE) {
|
||||
// FIXME: add verbose output
|
||||
unsigned size = desc->size ? desc->size : IOC_SIZE(request);
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, arg, size);
|
||||
}
|
||||
if (desc->type != ioctl_desc::CUSTOM)
|
||||
return;
|
||||
if (request == IOCTL_SIOCGIFCONF) {
|
||||
struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifc->ifc_ifcu.ifcu_req, ifc->ifc_len);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,244 @@
|
||||
//===-- sanitizer_common_interceptors_memintrinsics.inc ---------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Memintrinsic function interceptors for tools like AddressSanitizer,
|
||||
// ThreadSanitizer, MemorySanitizer, etc.
|
||||
//
|
||||
// These interceptors are part of the common interceptors, but separated out so
|
||||
// that implementations may add them, if necessary, to a separate source file
|
||||
// that should define SANITIZER_COMMON_NO_REDEFINE_BUILTINS at the top.
|
||||
//
|
||||
// This file should be included into the tool's memintrinsic interceptor file,
|
||||
// which has to define its own macros:
|
||||
// COMMON_INTERCEPTOR_ENTER
|
||||
// COMMON_INTERCEPTOR_READ_RANGE
|
||||
// COMMON_INTERCEPTOR_WRITE_RANGE
|
||||
// COMMON_INTERCEPTOR_MEMSET_IMPL
|
||||
// COMMON_INTERCEPTOR_MEMMOVE_IMPL
|
||||
// COMMON_INTERCEPTOR_MEMCPY_IMPL
|
||||
// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifdef SANITIZER_REDEFINE_BUILTINS_H
|
||||
#error "Define SANITIZER_COMMON_NO_REDEFINE_BUILTINS in .cpp file"
|
||||
#endif
|
||||
|
||||
#include "interception/interception.h"
|
||||
#include "sanitizer_platform_interceptors.h"
|
||||
|
||||
// Platform-specific options.
|
||||
#if SANITIZER_APPLE
|
||||
#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
|
||||
#elif SANITIZER_WINDOWS64
|
||||
#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 0
|
||||
#else
|
||||
#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE 1
|
||||
#endif // SANITIZER_APPLE
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MEMSET_IMPL
|
||||
#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
|
||||
{ \
|
||||
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
|
||||
return internal_memset(dst, v, size); \
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
|
||||
if (common_flags()->intercept_intrin) \
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
|
||||
return REAL(memset)(dst, v, size); \
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MEMMOVE_IMPL
|
||||
#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size) \
|
||||
{ \
|
||||
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
|
||||
return internal_memmove(dst, src, size); \
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size); \
|
||||
if (common_flags()->intercept_intrin) { \
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
|
||||
} \
|
||||
return REAL(memmove)(dst, src, size); \
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef COMMON_INTERCEPTOR_MEMCPY_IMPL
|
||||
#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size) \
|
||||
{ \
|
||||
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { \
|
||||
return internal_memmove(dst, src, size); \
|
||||
} \
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size); \
|
||||
if (common_flags()->intercept_intrin) { \
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
|
||||
COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
|
||||
} \
|
||||
return REAL(memcpy)(dst, src, size); \
|
||||
}
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_MEMSET
|
||||
INTERCEPTOR(void *, memset, void *dst, int v, usize size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size);
|
||||
}
|
||||
|
||||
#define INIT_MEMSET COMMON_INTERCEPT_FUNCTION(memset)
|
||||
#else
|
||||
#define INIT_MEMSET
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_MEMMOVE
|
||||
INTERCEPTOR(void *, memmove, void *dst, const void *src, usize size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
|
||||
}
|
||||
|
||||
#define INIT_MEMMOVE COMMON_INTERCEPT_FUNCTION(memmove)
|
||||
#else
|
||||
#define INIT_MEMMOVE
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_MEMCPY
|
||||
INTERCEPTOR(void *, memcpy, void *dst, const void *src, usize size) {
|
||||
// On OS X, calling internal_memcpy here will cause memory corruptions,
|
||||
// because memcpy and memmove are actually aliases of the same
|
||||
// implementation. We need to use internal_memmove here.
|
||||
// N.B.: If we switch this to internal_ we'll have to use internal_memmove
|
||||
// due to memcpy being an alias of memmove on OS X.
|
||||
void *ctx;
|
||||
#if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
|
||||
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
|
||||
#else
|
||||
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
|
||||
#endif
|
||||
}
|
||||
|
||||
#define INIT_MEMCPY \
|
||||
do { \
|
||||
if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { \
|
||||
COMMON_INTERCEPT_FUNCTION(memcpy); \
|
||||
} else { \
|
||||
ASSIGN_REAL(memcpy, memmove); \
|
||||
} \
|
||||
CHECK(REAL(memcpy)); \
|
||||
} while (false)
|
||||
|
||||
#else
|
||||
#define INIT_MEMCPY
|
||||
#endif
|
||||
|
||||
#if SANITIZER_INTERCEPT_AEABI_MEM
|
||||
INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, usize size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, usize size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, usize size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, usize size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, usize size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, usize size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
|
||||
}
|
||||
|
||||
// Note the argument order.
|
||||
INTERCEPTOR(void *, __aeabi_memset, void *block, usize size, int c) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memset4, void *block, usize size, int c) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memset8, void *block, usize size, int c) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memclr, void *block, usize size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memclr4, void *block, usize size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
|
||||
}
|
||||
|
||||
INTERCEPTOR(void *, __aeabi_memclr8, void *block, usize size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
|
||||
}
|
||||
|
||||
#define INIT_AEABI_MEM \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove8); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy4); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy8); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memset); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memset4); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memset8); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memclr); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memclr4); \
|
||||
COMMON_INTERCEPT_FUNCTION(__aeabi_memclr8);
|
||||
#else
|
||||
#define INIT_AEABI_MEM
|
||||
#endif // SANITIZER_INTERCEPT_AEABI_MEM
|
||||
|
||||
#if SANITIZER_INTERCEPT___BZERO
|
||||
INTERCEPTOR(void *, __bzero, void *block, usize size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
|
||||
}
|
||||
#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);
|
||||
#else
|
||||
#define INIT___BZERO
|
||||
#endif // SANITIZER_INTERCEPT___BZERO
|
||||
|
||||
#if SANITIZER_INTERCEPT_BZERO
|
||||
INTERCEPTOR(void *, bzero, void *block, usize size) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
|
||||
}
|
||||
#define INIT_BZERO COMMON_INTERCEPT_FUNCTION(bzero);
|
||||
#else
|
||||
#define INIT_BZERO
|
||||
#endif // SANITIZER_INTERCEPT_BZERO
|
||||
|
||||
namespace __sanitizer {
|
||||
// This does not need to be called if InitializeCommonInterceptors() is called.
|
||||
void InitializeMemintrinsicInterceptors() {
|
||||
INIT_MEMSET;
|
||||
INIT_MEMMOVE;
|
||||
INIT_MEMCPY;
|
||||
INIT_AEABI_MEM;
|
||||
INIT___BZERO;
|
||||
INIT_BZERO;
|
||||
}
|
||||
} // namespace __sanitizer
|
||||
@@ -0,0 +1,128 @@
|
||||
//===-- sanitizer_common_interceptors_netbsd_compat.inc ---------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Common function interceptors for tools like AddressSanitizer,
|
||||
// ThreadSanitizer, MemorySanitizer, etc.
|
||||
//
|
||||
// Interceptors for NetBSD old function calls that have been versioned.
|
||||
//
|
||||
// NetBSD minimal version supported 9.0.
|
||||
// NetBSD current version supported 9.99.26.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#if SANITIZER_NETBSD
|
||||
|
||||
// First undef all mangled symbols.
|
||||
// Next, define compat interceptors.
|
||||
// Finally, undef INIT_ and redefine it.
|
||||
// This allows to avoid preprocessor issues.
|
||||
|
||||
#undef fstatvfs
|
||||
#undef fstatvfs1
|
||||
#undef getmntinfo
|
||||
#undef getvfsstat
|
||||
#undef statvfs
|
||||
#undef statvfs1
|
||||
|
||||
INTERCEPTOR(int, statvfs, char *path, void *buf) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, statvfs, path, buf);
|
||||
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
|
||||
// FIXME: under ASan the call below may write to freed memory and corrupt
|
||||
// its metadata. See
|
||||
// https://github.com/google/sanitizers/issues/321.
|
||||
int res = REAL(statvfs)(path, buf);
|
||||
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);
|
||||
return res;
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, fstatvfs, int fd, void *buf) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs, fd, buf);
|
||||
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
|
||||
// FIXME: under ASan the call below may write to freed memory and corrupt
|
||||
// its metadata. See
|
||||
// https://github.com/google/sanitizers/issues/321.
|
||||
int res = REAL(fstatvfs)(fd, buf);
|
||||
if (!res) {
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);
|
||||
if (fd >= 0)
|
||||
COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
#undef INIT_STATVFS
|
||||
#define INIT_STATVFS \
|
||||
COMMON_INTERCEPT_FUNCTION(statvfs); \
|
||||
COMMON_INTERCEPT_FUNCTION(fstatvfs); \
|
||||
COMMON_INTERCEPT_FUNCTION(__statvfs90); \
|
||||
COMMON_INTERCEPT_FUNCTION(__fstatvfs90)
|
||||
|
||||
INTERCEPTOR(int, __getmntinfo13, void **mntbufp, int flags) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, __getmntinfo13, mntbufp, flags);
|
||||
int cnt = REAL(__getmntinfo13)(mntbufp, flags);
|
||||
if (cnt > 0 && mntbufp) {
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mntbufp, sizeof(void *));
|
||||
if (*mntbufp)
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *mntbufp, cnt * struct_statvfs90_sz);
|
||||
}
|
||||
return cnt;
|
||||
}
|
||||
|
||||
#undef INIT_GETMNTINFO
|
||||
#define INIT_GETMNTINFO \
|
||||
COMMON_INTERCEPT_FUNCTION(__getmntinfo13); \
|
||||
COMMON_INTERCEPT_FUNCTION(__getmntinfo90)
|
||||
|
||||
INTERCEPTOR(int, getvfsstat, void *buf, SIZE_T bufsize, int flags) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, getvfsstat, buf, bufsize, flags);
|
||||
int ret = REAL(getvfsstat)(buf, bufsize, flags);
|
||||
if (buf && ret > 0)
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, ret * struct_statvfs90_sz);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#undef INIT_GETVFSSTAT
|
||||
#define INIT_GETVFSSTAT \
|
||||
COMMON_INTERCEPT_FUNCTION(getvfsstat); \
|
||||
COMMON_INTERCEPT_FUNCTION(__getvfsstat90)
|
||||
|
||||
INTERCEPTOR(int, statvfs1, const char *path, void *buf, int flags) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, statvfs1, path, buf, flags);
|
||||
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, internal_strlen(path) + 1);
|
||||
int res = REAL(statvfs1)(path, buf, flags);
|
||||
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);
|
||||
return res;
|
||||
}
|
||||
|
||||
INTERCEPTOR(int, fstatvfs1, int fd, void *buf, int flags) {
|
||||
void *ctx;
|
||||
COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs1, fd, buf, flags);
|
||||
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
|
||||
int res = REAL(fstatvfs1)(fd, buf, flags);
|
||||
if (!res) {
|
||||
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs90_sz);
|
||||
if (fd >= 0)
|
||||
COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
#undef INIT_STATVFS1
|
||||
#define INIT_STATVFS1 \
|
||||
COMMON_INTERCEPT_FUNCTION(statvfs1); \
|
||||
COMMON_INTERCEPT_FUNCTION(fstatvfs1); \
|
||||
COMMON_INTERCEPT_FUNCTION(__statvfs190); \
|
||||
COMMON_INTERCEPT_FUNCTION(__fstatvfs190)
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,48 @@
|
||||
#if defined(__aarch64__) && defined(__linux__)
|
||||
|
||||
#include "sanitizer_common/sanitizer_asm.h"
|
||||
#include "builtins/assembly.h"
|
||||
|
||||
ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)
|
||||
|
||||
.comm _ZN14__interception10real_vforkE,8,8
|
||||
.globl ASM_WRAPPER_NAME(vfork)
|
||||
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
|
||||
ASM_WRAPPER_NAME(vfork):
|
||||
// Save x30 in the off-stack spill area.
|
||||
hint #25 // paciasp
|
||||
stp xzr, x30, [sp, #-16]!
|
||||
bl COMMON_INTERCEPTOR_SPILL_AREA
|
||||
ldp xzr, x30, [sp], 16
|
||||
str x30, [x0]
|
||||
|
||||
// Call real vfork. This may return twice. User code that runs between the first and the second return
|
||||
// may clobber the stack frame of the interceptor; that's why it does not have a frame.
|
||||
adrp x0, _ZN14__interception10real_vforkE
|
||||
ldr x0, [x0, :lo12:_ZN14__interception10real_vforkE]
|
||||
blr x0
|
||||
|
||||
stp x0, xzr, [sp, #-16]!
|
||||
cmp x0, #0
|
||||
b.eq .L_exit
|
||||
|
||||
// x0 != 0 => parent process. Clear stack shadow.
|
||||
add x0, sp, #16
|
||||
bl COMMON_INTERCEPTOR_HANDLE_VFORK
|
||||
|
||||
.L_exit:
|
||||
// Restore x30.
|
||||
bl COMMON_INTERCEPTOR_SPILL_AREA
|
||||
ldr x30, [x0]
|
||||
ldp x0, xzr, [sp], 16
|
||||
hint #29 // autiasp
|
||||
|
||||
ret
|
||||
ASM_SIZE(vfork)
|
||||
|
||||
ASM_INTERCEPTOR_TRAMPOLINE(vfork)
|
||||
ASM_TRAMPOLINE_ALIAS(vfork, vfork)
|
||||
|
||||
GNU_PROPERTY_BTI_PAC
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,49 @@
|
||||
#if defined(__arm__) && defined(__linux__)
|
||||
|
||||
#include "sanitizer_common/sanitizer_asm.h"
|
||||
|
||||
ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)
|
||||
|
||||
.comm _ZN14__interception10real_vforkE,4,4
|
||||
.globl ASM_WRAPPER_NAME(vfork)
|
||||
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
|
||||
ASM_WRAPPER_NAME(vfork):
|
||||
// Save LR in the off-stack spill area.
|
||||
push {r4, lr}
|
||||
bl COMMON_INTERCEPTOR_SPILL_AREA
|
||||
pop {r4, lr}
|
||||
str lr, [r0]
|
||||
|
||||
// Call real vfork. This may return twice. User code that runs between the first and the second return
|
||||
// may clobber the stack frame of the interceptor; that's why it does not have a frame.
|
||||
ldr r0, .LCPI0_0
|
||||
.LPC0_0:
|
||||
ldr r0, [pc, r0]
|
||||
mov lr, pc
|
||||
bx r0
|
||||
|
||||
push {r0, r4}
|
||||
cmp r0, #0
|
||||
beq .L_exit
|
||||
|
||||
// r0 != 0 => parent process. Clear stack shadow.
|
||||
add r0, sp, #8
|
||||
bl COMMON_INTERCEPTOR_HANDLE_VFORK
|
||||
|
||||
.L_exit:
|
||||
// Restore LR.
|
||||
bl COMMON_INTERCEPTOR_SPILL_AREA
|
||||
ldr lr, [r0]
|
||||
pop {r0, r4}
|
||||
|
||||
mov pc, lr
|
||||
|
||||
.LCPI0_0:
|
||||
.long _ZN14__interception10real_vforkE - (.LPC0_0+8)
|
||||
|
||||
ASM_SIZE(vfork)
|
||||
|
||||
ASM_INTERCEPTOR_TRAMPOLINE(vfork)
|
||||
ASM_TRAMPOLINE_ALIAS(vfork, vfork)
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,64 @@
|
||||
#if defined(__i386__) && defined(__linux__)
|
||||
|
||||
#include "sanitizer_common/sanitizer_asm.h"
|
||||
|
||||
.comm _ZN14__interception10real_vforkE,4,4
|
||||
.globl ASM_WRAPPER_NAME(vfork)
|
||||
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
|
||||
ASM_WRAPPER_NAME(vfork):
|
||||
_CET_ENDBR
|
||||
// Store return address in the spill area and tear down the stack frame.
|
||||
sub $12, %esp
|
||||
call COMMON_INTERCEPTOR_SPILL_AREA
|
||||
mov 12(%esp), %ecx
|
||||
mov %ecx, (%eax)
|
||||
add $16, %esp
|
||||
|
||||
call .L0$pb
|
||||
.L0$pb:
|
||||
pop %eax
|
||||
.Ltmp0:
|
||||
add $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L0$pb), %eax
|
||||
call *_ZN14__interception10real_vforkE@GOTOFF(%eax)
|
||||
|
||||
// Restore the stack frame.
|
||||
// 12(%esp) return address
|
||||
// 8(%esp) spill %ebx
|
||||
// 4(%esp) spill REAL(vfork) return value
|
||||
// (%esp) call frame (arg0) for __*_handle_vfork
|
||||
sub $16, %esp
|
||||
mov %ebx, 8(%esp)
|
||||
mov %eax, 4(%esp)
|
||||
|
||||
// Form GOT address in %ebx.
|
||||
call .L1$pb
|
||||
.L1$pb:
|
||||
pop %ebx
|
||||
.Ltmp1:
|
||||
add $_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.L1$pb), %ebx
|
||||
|
||||
// Restore original return address.
|
||||
call COMMON_INTERCEPTOR_SPILL_AREA
|
||||
mov (%eax), %ecx
|
||||
mov %ecx, 12(%esp)
|
||||
mov 4(%esp), %eax
|
||||
|
||||
// Call handle_vfork in the parent process (%rax != 0).
|
||||
test %eax, %eax
|
||||
je .L_exit
|
||||
|
||||
lea 16(%esp), %ecx
|
||||
mov %ecx, (%esp)
|
||||
call COMMON_INTERCEPTOR_HANDLE_VFORK@PLT
|
||||
|
||||
.L_exit:
|
||||
mov 4(%esp), %eax
|
||||
mov 8(%esp), %ebx
|
||||
add $12, %esp
|
||||
ret
|
||||
ASM_SIZE(vfork)
|
||||
|
||||
ASM_INTERCEPTOR_TRAMPOLINE(vfork)
|
||||
ASM_TRAMPOLINE_ALIAS(vfork, vfork)
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,57 @@
|
||||
#if defined(__loongarch_lp64) && defined(__linux__)
|
||||
|
||||
#include "sanitizer_common/sanitizer_asm.h"
|
||||
|
||||
ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)
|
||||
ASM_HIDDEN(_ZN14__interception10real_vforkE)
|
||||
|
||||
.text
|
||||
.globl ASM_WRAPPER_NAME(vfork)
|
||||
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
|
||||
ASM_WRAPPER_NAME(vfork):
|
||||
// Save ra in the off-stack spill area.
|
||||
// allocate space on stack
|
||||
addi.d $sp, $sp, -16
|
||||
// store $ra value
|
||||
st.d $ra, $sp, 8
|
||||
bl COMMON_INTERCEPTOR_SPILL_AREA
|
||||
// restore previous values from stack
|
||||
ld.d $ra, $sp, 8
|
||||
// adjust stack
|
||||
addi.d $sp, $sp, 16
|
||||
// store $ra by $a0
|
||||
st.d $ra, $a0, 0
|
||||
|
||||
// Call real vfork. This may return twice. User code that runs between the first and the second return
|
||||
// may clobber the stack frame of the interceptor; that's why it does not have a frame.
|
||||
la.local $a0, _ZN14__interception10real_vforkE
|
||||
ld.d $a0, $a0, 0
|
||||
jirl $ra, $a0, 0
|
||||
|
||||
// adjust stack
|
||||
addi.d $sp, $sp, -16
|
||||
// store $a0 by adjusted stack
|
||||
st.d $a0, $sp, 8
|
||||
// jump to exit label if $a0 is 0
|
||||
beqz $a0, .L_exit
|
||||
|
||||
// $a0 != 0 => parent process. Clear stack shadow.
|
||||
// put old $sp to $a0
|
||||
addi.d $a0, $sp, 16
|
||||
bl %plt(COMMON_INTERCEPTOR_HANDLE_VFORK)
|
||||
|
||||
.L_exit:
|
||||
// Restore $ra
|
||||
bl COMMON_INTERCEPTOR_SPILL_AREA
|
||||
ld.d $ra, $a0, 0
|
||||
// load value by stack
|
||||
ld.d $a0, $sp, 8
|
||||
// adjust stack
|
||||
addi.d $sp, $sp, 16
|
||||
jr $ra
|
||||
ASM_SIZE(vfork)
|
||||
|
||||
ASM_INTERCEPTOR_TRAMPOLINE(vfork)
|
||||
ASM_TRAMPOLINE_ALIAS(vfork, vfork)
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,56 @@
|
||||
#if (defined(__riscv) && (__riscv_xlen == 64)) && defined(__linux__)
|
||||
|
||||
#include "sanitizer_common/sanitizer_asm.h"
|
||||
|
||||
ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)
|
||||
|
||||
.comm _ZN14__interception10real_vforkE,8,8
|
||||
.globl ASM_WRAPPER_NAME(vfork)
|
||||
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
|
||||
ASM_WRAPPER_NAME(vfork):
|
||||
// Save ra in the off-stack spill area.
|
||||
// allocate space on stack
|
||||
addi sp, sp, -16
|
||||
// store ra value
|
||||
sd ra, 8(sp)
|
||||
call COMMON_INTERCEPTOR_SPILL_AREA
|
||||
// restore previous values from stack
|
||||
ld ra, 8(sp)
|
||||
// adjust stack
|
||||
addi sp, sp, 16
|
||||
// store ra by x10
|
||||
sd ra, 0(x10)
|
||||
|
||||
// Call real vfork. This may return twice. User code that runs between the first and the second return
|
||||
// may clobber the stack frame of the interceptor; that's why it does not have a frame.
|
||||
la x10, _ZN14__interception10real_vforkE
|
||||
ld x10, 0(x10)
|
||||
jalr x10
|
||||
|
||||
// adjust stack
|
||||
addi sp, sp, -16
|
||||
// store x10 by adjusted stack
|
||||
sd x10, 8(sp)
|
||||
// jump to exit label if x10 is 0
|
||||
beqz x10, .L_exit
|
||||
|
||||
// x0 != 0 => parent process. Clear stack shadow.
|
||||
// put old sp to x10
|
||||
addi x10, sp, 16
|
||||
call COMMON_INTERCEPTOR_HANDLE_VFORK
|
||||
|
||||
.L_exit:
|
||||
// Restore ra
|
||||
call COMMON_INTERCEPTOR_SPILL_AREA
|
||||
ld ra, 0(x10)
|
||||
// load value by stack
|
||||
ld x10, 8(sp)
|
||||
// adjust stack
|
||||
addi sp, sp, 16
|
||||
ret
|
||||
ASM_SIZE(vfork)
|
||||
|
||||
ASM_INTERCEPTOR_TRAMPOLINE(vfork)
|
||||
ASM_TRAMPOLINE_ALIAS(vfork, vfork)
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,42 @@
|
||||
#if defined(__x86_64__) && defined(__linux__)
|
||||
|
||||
#include "sanitizer_common/sanitizer_asm.h"
|
||||
|
||||
.comm _ZN14__interception10real_vforkE,8,8
|
||||
.globl ASM_WRAPPER_NAME(vfork)
|
||||
ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
|
||||
ASM_WRAPPER_NAME(vfork):
|
||||
_CET_ENDBR
|
||||
// Store return address in the spill area and tear down the stack frame.
|
||||
push %rcx
|
||||
call COMMON_INTERCEPTOR_SPILL_AREA
|
||||
pop %rcx
|
||||
pop %rdi
|
||||
mov %rdi, (%rax)
|
||||
|
||||
call *_ZN14__interception10real_vforkE(%rip)
|
||||
|
||||
// Restore return address from the spill area.
|
||||
push %rcx
|
||||
push %rax
|
||||
call COMMON_INTERCEPTOR_SPILL_AREA
|
||||
mov (%rax), %rdx
|
||||
mov %rdx, 8(%rsp)
|
||||
mov (%rsp), %rax
|
||||
|
||||
// Call handle_vfork in the parent process (%rax != 0).
|
||||
test %rax, %rax
|
||||
je .L_exit
|
||||
|
||||
lea 16(%rsp), %rdi
|
||||
call COMMON_INTERCEPTOR_HANDLE_VFORK@PLT
|
||||
|
||||
.L_exit:
|
||||
pop %rax
|
||||
ret
|
||||
ASM_SIZE(ASM_WRAPPER_NAME(vfork))
|
||||
|
||||
ASM_INTERCEPTOR_TRAMPOLINE(vfork)
|
||||
ASM_TRAMPOLINE_ALIAS(vfork, vfork)
|
||||
|
||||
#endif
|
||||
61
lib/libtsan/sanitizer_common/sanitizer_common_interface.inc
Normal file
61
lib/libtsan/sanitizer_common/sanitizer_common_interface.inc
Normal file
@@ -0,0 +1,61 @@
|
||||
//===-- sanitizer_common_interface.inc ------------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Sanitizer Common interface list.
|
||||
//===----------------------------------------------------------------------===//
|
||||
INTERFACE_FUNCTION(__sanitizer_acquire_crash_state)
|
||||
INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
|
||||
INTERFACE_FUNCTION(__sanitizer_annotate_double_ended_contiguous_container)
|
||||
INTERFACE_FUNCTION(__sanitizer_copy_contiguous_container_annotations)
|
||||
INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
|
||||
INTERFACE_FUNCTION(
|
||||
__sanitizer_double_ended_contiguous_container_find_bad_address)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_death_callback)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_report_path)
|
||||
INTERFACE_FUNCTION(__sanitizer_set_report_fd)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_report_path)
|
||||
INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
|
||||
INTERFACE_FUNCTION(__sanitizer_verify_double_ended_contiguous_container)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_on_print)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_report_error_summary)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_sandbox_on_notify)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_get_dtls_size)
|
||||
// Sanitizer weak hooks
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_memcmp)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strcmp)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strncmp)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strstr)
|
||||
// Stacktrace interface.
|
||||
INTERFACE_FUNCTION(__sanitizer_get_module_and_offset_for_pc)
|
||||
INTERFACE_FUNCTION(__sanitizer_symbolize_global)
|
||||
INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
|
||||
// Allocator interface.
|
||||
INTERFACE_FUNCTION(__sanitizer_get_allocated_begin)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_allocated_size_fast)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_heap_size)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_ownership)
|
||||
INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
|
||||
INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)
|
||||
INTERFACE_FUNCTION(__sanitizer_purge_allocator)
|
||||
INTERFACE_FUNCTION(__sanitizer_print_memory_profile)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_free_hook)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_malloc_hook)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_ignore_free_hook)
|
||||
// Memintrinsic functions.
|
||||
INTERFACE_FUNCTION(__sanitizer_internal_memcpy)
|
||||
INTERFACE_FUNCTION(__sanitizer_internal_memmove)
|
||||
INTERFACE_FUNCTION(__sanitizer_internal_memset)
|
||||
|
||||
#if SANITIZER_WINDOWS
|
||||
INTERFACE_FUNCTION(__sanitizer_override_function)
|
||||
INTERFACE_FUNCTION(__sanitizer_override_function_by_addr)
|
||||
INTERFACE_FUNCTION(__sanitizer_register_weak_function)
|
||||
#endif
|
||||
@@ -0,0 +1,16 @@
|
||||
//===-- sanitizer_common_interface_posix.inc ------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Sanitizer Common interface list only available for Posix systems.
|
||||
//===----------------------------------------------------------------------===//
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_code)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_data)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_frame)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_demangle)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_flush)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_set_demangle)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_set_inline_frames)
|
||||
256
lib/libtsan/sanitizer_common/sanitizer_common_libcdep.cpp
Normal file
256
lib/libtsan/sanitizer_common/sanitizer_common_libcdep.cpp
Normal file
@@ -0,0 +1,256 @@
|
||||
//===-- sanitizer_common_libcdep.cpp --------------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between AddressSanitizer and ThreadSanitizer
|
||||
// run-time libraries.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_allocator.h"
|
||||
#include "sanitizer_allocator_interface.h"
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_flags.h"
|
||||
#include "sanitizer_interface_internal.h"
|
||||
#include "sanitizer_procmaps.h"
|
||||
#include "sanitizer_stackdepot.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
#if (SANITIZER_LINUX || SANITIZER_NETBSD) && !SANITIZER_GO
|
||||
// Weak default implementation for when sanitizer_stackdepot is not linked in.
|
||||
SANITIZER_WEAK_ATTRIBUTE StackDepotStats StackDepotGetStats() { return {}; }
|
||||
|
||||
void *BackgroundThread(void *arg) {
|
||||
VPrintf(1, "%s: Started BackgroundThread\n", SanitizerToolName);
|
||||
const uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
|
||||
const uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
|
||||
const bool heap_profile = common_flags()->heap_profile;
|
||||
uptr prev_reported_rss = 0;
|
||||
uptr prev_reported_stack_depot_size = 0;
|
||||
bool reached_soft_rss_limit = false;
|
||||
uptr rss_during_last_reported_profile = 0;
|
||||
while (true) {
|
||||
SleepForMillis(100);
|
||||
const uptr current_rss_mb = GetRSS() >> 20;
|
||||
if (Verbosity()) {
|
||||
// If RSS has grown 10% since last time, print some information.
|
||||
if (prev_reported_rss * 11 / 10 < current_rss_mb) {
|
||||
Printf("%s: RSS: %zdMb\n", SanitizerToolName, current_rss_mb);
|
||||
prev_reported_rss = current_rss_mb;
|
||||
}
|
||||
// If stack depot has grown 10% since last time, print it too.
|
||||
StackDepotStats stack_depot_stats = StackDepotGetStats();
|
||||
if (prev_reported_stack_depot_size * 11 / 10 <
|
||||
stack_depot_stats.allocated) {
|
||||
Printf("%s: StackDepot: %zd ids; %zdM allocated\n", SanitizerToolName,
|
||||
stack_depot_stats.n_uniq_ids, stack_depot_stats.allocated >> 20);
|
||||
prev_reported_stack_depot_size = stack_depot_stats.allocated;
|
||||
}
|
||||
}
|
||||
// Check RSS against the limit.
|
||||
if (hard_rss_limit_mb && hard_rss_limit_mb < current_rss_mb) {
|
||||
Report("%s: hard rss limit exhausted (%zdMb vs %zdMb)\n",
|
||||
SanitizerToolName, hard_rss_limit_mb, current_rss_mb);
|
||||
DumpProcessMap();
|
||||
Die();
|
||||
}
|
||||
if (soft_rss_limit_mb) {
|
||||
if (soft_rss_limit_mb < current_rss_mb && !reached_soft_rss_limit) {
|
||||
reached_soft_rss_limit = true;
|
||||
Report("%s: soft rss limit exhausted (%zdMb vs %zdMb)\n",
|
||||
SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
|
||||
SetRssLimitExceeded(true);
|
||||
} else if (soft_rss_limit_mb >= current_rss_mb &&
|
||||
reached_soft_rss_limit) {
|
||||
reached_soft_rss_limit = false;
|
||||
Report("%s: soft rss limit unexhausted (%zdMb vs %zdMb)\n",
|
||||
SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
|
||||
SetRssLimitExceeded(false);
|
||||
}
|
||||
}
|
||||
if (heap_profile &&
|
||||
current_rss_mb > rss_during_last_reported_profile * 1.1) {
|
||||
Printf("\n\nHEAP PROFILE at RSS %zdMb\n", current_rss_mb);
|
||||
__sanitizer_print_memory_profile(90, 20);
|
||||
rss_during_last_reported_profile = current_rss_mb;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MaybeStartBackgroudThread() {
|
||||
// Need to implement/test on other platforms.
|
||||
// Start the background thread if one of the rss limits is given.
|
||||
if (!common_flags()->hard_rss_limit_mb &&
|
||||
!common_flags()->soft_rss_limit_mb &&
|
||||
!common_flags()->heap_profile) return;
|
||||
if (!&internal_pthread_create) {
|
||||
VPrintf(1, "%s: internal_pthread_create undefined\n", SanitizerToolName);
|
||||
return; // Can't spawn the thread anyway.
|
||||
}
|
||||
|
||||
static bool started = false;
|
||||
if (!started) {
|
||||
started = true;
|
||||
internal_start_thread(BackgroundThread, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
# if !SANITIZER_START_BACKGROUND_THREAD_IN_ASAN_INTERNAL
|
||||
# ifdef __clang__
|
||||
# pragma clang diagnostic push
|
||||
// We avoid global-constructors to be sure that globals are ready when
|
||||
// sanitizers need them. This can happend before global constructors executed.
|
||||
// Here we don't mind if thread is started on later stages.
|
||||
# pragma clang diagnostic ignored "-Wglobal-constructors"
|
||||
# endif
|
||||
static struct BackgroudThreadStarted {
|
||||
BackgroudThreadStarted() { MaybeStartBackgroudThread(); }
|
||||
} background_thread_strarter UNUSED;
|
||||
# ifdef __clang__
|
||||
# pragma clang diagnostic pop
|
||||
# endif
|
||||
# endif
|
||||
#else
|
||||
void MaybeStartBackgroudThread() {}
|
||||
#endif
|
||||
|
||||
void WriteToSyslog(const char *msg) {
|
||||
if (!msg)
|
||||
return;
|
||||
InternalScopedString msg_copy;
|
||||
msg_copy.Append(msg);
|
||||
const char *p = msg_copy.data();
|
||||
|
||||
// Print one line at a time.
|
||||
// syslog, at least on Android, has an implicit message length limit.
|
||||
while (char* q = internal_strchr(p, '\n')) {
|
||||
*q = '\0';
|
||||
WriteOneLineToSyslog(p);
|
||||
p = q + 1;
|
||||
}
|
||||
// Print remaining characters, if there are any.
|
||||
// Note that this will add an extra newline at the end.
|
||||
// FIXME: buffer extra output. This would need a thread-local buffer, which
|
||||
// on Android requires plugging into the tools (ex. ASan's) Thread class.
|
||||
if (*p)
|
||||
WriteOneLineToSyslog(p);
|
||||
}
|
||||
|
||||
static void (*sandboxing_callback)();
|
||||
void SetSandboxingCallback(void (*f)()) {
|
||||
sandboxing_callback = f;
|
||||
}
|
||||
|
||||
uptr ReservedAddressRange::InitAligned(uptr size, uptr align,
|
||||
const char *name) {
|
||||
CHECK(IsPowerOfTwo(align));
|
||||
if (align <= GetPageSizeCached())
|
||||
return Init(size, name);
|
||||
uptr start = Init(size + align, name);
|
||||
start += align - (start & (align - 1));
|
||||
return start;
|
||||
}
|
||||
|
||||
#if !SANITIZER_FUCHSIA
|
||||
|
||||
// Reserve memory range [beg, end].
|
||||
// We need to use inclusive range because end+1 may not be representable.
|
||||
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
|
||||
bool madvise_shadow) {
|
||||
CHECK_EQ((beg % GetMmapGranularity()), 0);
|
||||
CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
|
||||
uptr size = end - beg + 1;
|
||||
DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
|
||||
if (madvise_shadow ? !MmapFixedSuperNoReserve(beg, size, name)
|
||||
: !MmapFixedNoReserve(beg, size, name)) {
|
||||
Report(
|
||||
"ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
|
||||
"Perhaps you're using ulimit -v or ulimit -d\n",
|
||||
size);
|
||||
Die();
|
||||
}
|
||||
if (madvise_shadow && common_flags()->use_madv_dontdump)
|
||||
DontDumpShadowMemory(beg, size);
|
||||
}
|
||||
|
||||
void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
|
||||
uptr zero_base_max_shadow_start) {
|
||||
if (!size)
|
||||
return;
|
||||
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
|
||||
if (addr == (uptr)res)
|
||||
return;
|
||||
// A few pages at the start of the address space can not be protected.
|
||||
// But we really want to protect as much as possible, to prevent this memory
|
||||
// being returned as a result of a non-FIXED mmap().
|
||||
if (addr == zero_base_shadow_start) {
|
||||
uptr step = GetMmapGranularity();
|
||||
while (size > step && addr < zero_base_max_shadow_start) {
|
||||
addr += step;
|
||||
size -= step;
|
||||
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
|
||||
if (addr == (uptr)res)
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
Report(
|
||||
"ERROR: Failed to protect the shadow gap. "
|
||||
"%s cannot proceed correctly. ABORTING.\n",
|
||||
SanitizerToolName);
|
||||
DumpProcessMap();
|
||||
Die();
|
||||
}
|
||||
|
||||
#endif // !SANITIZER_FUCHSIA
|
||||
|
||||
#if !SANITIZER_WINDOWS && !SANITIZER_GO
|
||||
// Weak default implementation for when sanitizer_stackdepot is not linked in.
|
||||
SANITIZER_WEAK_ATTRIBUTE void StackDepotStopBackgroundThread() {}
|
||||
static void StopStackDepotBackgroundThread() {
|
||||
StackDepotStopBackgroundThread();
|
||||
}
|
||||
#else
|
||||
// SANITIZER_WEAK_ATTRIBUTE is unsupported.
|
||||
static void StopStackDepotBackgroundThread() {}
|
||||
#endif
|
||||
|
||||
void MemCpyAccessible(void *dest, const void *src, uptr n) {
|
||||
if (TryMemCpy(dest, src, n))
|
||||
return;
|
||||
|
||||
const uptr page_size = GetPageSize();
|
||||
uptr b = reinterpret_cast<uptr>(src);
|
||||
uptr b_up = RoundUpTo(b, page_size);
|
||||
|
||||
uptr e = reinterpret_cast<uptr>(src) + n;
|
||||
uptr e_down = RoundDownTo(e, page_size);
|
||||
|
||||
auto copy_or_zero = [dest, src](uptr beg, uptr end) {
|
||||
const uptr udest = reinterpret_cast<uptr>(dest);
|
||||
const uptr usrc = reinterpret_cast<uptr>(src);
|
||||
void *d = reinterpret_cast<void *>(udest + (beg - usrc));
|
||||
const uptr size = end - beg;
|
||||
if (!TryMemCpy(d, reinterpret_cast<void *>(beg), size))
|
||||
internal_memset(d, 0, size);
|
||||
};
|
||||
|
||||
copy_or_zero(b, b_up);
|
||||
for (uptr p = b_up; p < e_down; p += page_size)
|
||||
copy_or_zero(p, p + page_size);
|
||||
copy_or_zero(e_down, e);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify,
|
||||
__sanitizer_sandbox_arguments *args) {
|
||||
__sanitizer::StopStackDepotBackgroundThread();
|
||||
__sanitizer::PlatformPrepareForSandboxing(args);
|
||||
if (__sanitizer::sandboxing_callback)
|
||||
__sanitizer::sandboxing_callback();
|
||||
}
|
||||
37
lib/libtsan/sanitizer_common/sanitizer_common_nolibc.cpp
Normal file
37
lib/libtsan/sanitizer_common/sanitizer_common_nolibc.cpp
Normal file
@@ -0,0 +1,37 @@
|
||||
//===-- sanitizer_common_nolibc.cpp ---------------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file contains stubs for libc function to facilitate optional use of
|
||||
// libc in no-libcdep sources.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_flags.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_platform.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// The Windows implementations of these functions use the win32 API directly,
|
||||
// bypassing libc.
|
||||
#if !SANITIZER_WINDOWS
|
||||
# if SANITIZER_LINUX
|
||||
void LogMessageOnPrintf(const char *str) {}
|
||||
void InitTlsSize() {}
|
||||
# endif
|
||||
void WriteToSyslog(const char *buffer) {}
|
||||
void Abort() { internal__exit(1); }
|
||||
bool CreateDir(const char *pathname) { return false; }
|
||||
#endif // !SANITIZER_WINDOWS
|
||||
|
||||
#if !SANITIZER_WINDOWS && !SANITIZER_APPLE
|
||||
void ListOfModules::init() {}
|
||||
void InitializePlatformCommonFlags(CommonFlags *cf) {}
|
||||
#endif
|
||||
|
||||
} // namespace __sanitizer
|
||||
3217
lib/libtsan/sanitizer_common/sanitizer_common_syscalls.inc
Normal file
3217
lib/libtsan/sanitizer_common/sanitizer_common_syscalls.inc
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,43 @@
|
||||
//===-- sanitizer_coverage_interface.inc ----------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Sanitizer Coverage interface list.
|
||||
//===----------------------------------------------------------------------===//
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_dump)
|
||||
INTERFACE_FUNCTION(__sanitizer_cov_reset)
|
||||
INTERFACE_FUNCTION(__sanitizer_dump_coverage)
|
||||
INTERFACE_FUNCTION(__sanitizer_dump_trace_pc_guard_coverage)
|
||||
INTERFACE_WEAK_FUNCTION(__sancov_default_options)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp1)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp2)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp4)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp8)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp1)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp2)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp4)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp8)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_div4)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_div8)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_gep)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard_init)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_indir)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load1)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load2)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load4)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load8)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_load16)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store1)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store2)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store4)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store8)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_store16)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_switch)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_8bit_counters_init)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_bool_flag_init)
|
||||
INTERFACE_WEAK_FUNCTION(__sanitizer_cov_pcs_init)
|
||||
41
lib/libtsan/sanitizer_common/sanitizer_dbghelp.h
Normal file
41
lib/libtsan/sanitizer_common/sanitizer_dbghelp.h
Normal file
@@ -0,0 +1,41 @@
|
||||
//===-- sanitizer_dbghelp.h ------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Wrappers for lazy loaded dbghelp.dll. Provides function pointers and a
|
||||
// callback to initialize them.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_SYMBOLIZER_WIN_H
|
||||
#define SANITIZER_SYMBOLIZER_WIN_H
|
||||
|
||||
#if !SANITIZER_WINDOWS
|
||||
#error "sanitizer_dbghelp.h is a Windows-only header"
|
||||
#endif
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
#include <dbghelp.h>
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
extern decltype(::StackWalk64) *StackWalk64;
|
||||
extern decltype(::SymCleanup) *SymCleanup;
|
||||
extern decltype(::SymFromAddr) *SymFromAddr;
|
||||
extern decltype(::SymFunctionTableAccess64) *SymFunctionTableAccess64;
|
||||
extern decltype(::SymGetLineFromAddr64) *SymGetLineFromAddr64;
|
||||
extern decltype(::SymGetModuleBase64) *SymGetModuleBase64;
|
||||
extern decltype(::SymGetSearchPathW) *SymGetSearchPathW;
|
||||
extern decltype(::SymInitialize) *SymInitialize;
|
||||
extern decltype(::SymSetOptions) *SymSetOptions;
|
||||
extern decltype(::SymSetSearchPathW) *SymSetSearchPathW;
|
||||
extern decltype(::UnDecorateSymbolName) *UnDecorateSymbolName;
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_SYMBOLIZER_WIN_H
|
||||
410
lib/libtsan/sanitizer_common/sanitizer_deadlock_detector.h
Normal file
410
lib/libtsan/sanitizer_common/sanitizer_deadlock_detector.h
Normal file
@@ -0,0 +1,410 @@
|
||||
//===-- sanitizer_deadlock_detector.h ---------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of Sanitizer runtime.
|
||||
// The deadlock detector maintains a directed graph of lock acquisitions.
|
||||
// When a lock event happens, the detector checks if the locks already held by
|
||||
// the current thread are reachable from the newly acquired lock.
|
||||
//
|
||||
// The detector can handle only a fixed amount of simultaneously live locks
|
||||
// (a lock is alive if it has been locked at least once and has not been
|
||||
// destroyed). When the maximal number of locks is reached the entire graph
|
||||
// is flushed and the new lock epoch is started. The node ids from the old
|
||||
// epochs can not be used with any of the detector methods except for
|
||||
// nodeBelongsToCurrentEpoch().
|
||||
//
|
||||
// FIXME: this is work in progress, nothing really works yet.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_DEADLOCK_DETECTOR_H
|
||||
#define SANITIZER_DEADLOCK_DETECTOR_H
|
||||
|
||||
#include "sanitizer_bvgraph.h"
|
||||
#include "sanitizer_common.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// Thread-local state for DeadlockDetector.
|
||||
// It contains the locks currently held by the owning thread.
|
||||
template <class BV>
|
||||
class DeadlockDetectorTLS {
|
||||
public:
|
||||
// No CTOR.
|
||||
void clear() {
|
||||
bv_.clear();
|
||||
epoch_ = 0;
|
||||
n_recursive_locks = 0;
|
||||
n_all_locks_ = 0;
|
||||
}
|
||||
|
||||
bool empty() const { return bv_.empty(); }
|
||||
|
||||
void ensureCurrentEpoch(uptr current_epoch) {
|
||||
if (epoch_ == current_epoch) return;
|
||||
bv_.clear();
|
||||
epoch_ = current_epoch;
|
||||
n_recursive_locks = 0;
|
||||
n_all_locks_ = 0;
|
||||
}
|
||||
|
||||
uptr getEpoch() const { return epoch_; }
|
||||
|
||||
// Returns true if this is the first (non-recursive) acquisition of this lock.
|
||||
bool addLock(uptr lock_id, uptr current_epoch, u32 stk) {
|
||||
CHECK_EQ(epoch_, current_epoch);
|
||||
if (!bv_.setBit(lock_id)) {
|
||||
// The lock is already held by this thread, it must be recursive.
|
||||
CHECK_LT(n_recursive_locks, ARRAY_SIZE(recursive_locks));
|
||||
recursive_locks[n_recursive_locks++] = lock_id;
|
||||
return false;
|
||||
}
|
||||
CHECK_LT(n_all_locks_, ARRAY_SIZE(all_locks_with_contexts_));
|
||||
// lock_id < BV::kSize, can cast to a smaller int.
|
||||
u32 lock_id_short = static_cast<u32>(lock_id);
|
||||
LockWithContext l = {lock_id_short, stk};
|
||||
all_locks_with_contexts_[n_all_locks_++] = l;
|
||||
return true;
|
||||
}
|
||||
|
||||
void removeLock(uptr lock_id) {
|
||||
if (n_recursive_locks) {
|
||||
for (sptr i = n_recursive_locks - 1; i >= 0; i--) {
|
||||
if (recursive_locks[i] == lock_id) {
|
||||
n_recursive_locks--;
|
||||
Swap(recursive_locks[i], recursive_locks[n_recursive_locks]);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!bv_.clearBit(lock_id))
|
||||
return; // probably addLock happened before flush
|
||||
if (n_all_locks_) {
|
||||
for (sptr i = n_all_locks_ - 1; i >= 0; i--) {
|
||||
if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) {
|
||||
Swap(all_locks_with_contexts_[i],
|
||||
all_locks_with_contexts_[n_all_locks_ - 1]);
|
||||
n_all_locks_--;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
u32 findLockContext(uptr lock_id) {
|
||||
for (uptr i = 0; i < n_all_locks_; i++)
|
||||
if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id))
|
||||
return all_locks_with_contexts_[i].stk;
|
||||
return 0;
|
||||
}
|
||||
|
||||
const BV &getLocks(uptr current_epoch) const {
|
||||
CHECK_EQ(epoch_, current_epoch);
|
||||
return bv_;
|
||||
}
|
||||
|
||||
uptr getNumLocks() const { return n_all_locks_; }
|
||||
uptr getLock(uptr idx) const { return all_locks_with_contexts_[idx].lock; }
|
||||
|
||||
private:
|
||||
BV bv_;
|
||||
uptr epoch_;
|
||||
uptr recursive_locks[64];
|
||||
uptr n_recursive_locks;
|
||||
struct LockWithContext {
|
||||
u32 lock;
|
||||
u32 stk;
|
||||
};
|
||||
LockWithContext all_locks_with_contexts_[128];
|
||||
uptr n_all_locks_;
|
||||
};
|
||||
|
||||
// DeadlockDetector.
|
||||
// For deadlock detection to work we need one global DeadlockDetector object
|
||||
// and one DeadlockDetectorTLS object per evey thread.
|
||||
// This class is not thread safe, all concurrent accesses should be guarded
|
||||
// by an external lock.
|
||||
// Most of the methods of this class are not thread-safe (i.e. should
|
||||
// be protected by an external lock) unless explicitly told otherwise.
|
||||
template <class BV>
|
||||
class DeadlockDetector {
|
||||
public:
|
||||
typedef BV BitVector;
|
||||
|
||||
uptr size() const { return g_.size(); }
|
||||
|
||||
// No CTOR.
|
||||
void clear() {
|
||||
current_epoch_ = 0;
|
||||
available_nodes_.clear();
|
||||
recycled_nodes_.clear();
|
||||
g_.clear();
|
||||
n_edges_ = 0;
|
||||
}
|
||||
|
||||
// Allocate new deadlock detector node.
|
||||
// If we are out of available nodes first try to recycle some.
|
||||
// If there is nothing to recycle, flush the graph and increment the epoch.
|
||||
// Associate 'data' (opaque user's object) with the new node.
|
||||
uptr newNode(uptr data) {
|
||||
if (!available_nodes_.empty())
|
||||
return getAvailableNode(data);
|
||||
if (!recycled_nodes_.empty()) {
|
||||
for (sptr i = n_edges_ - 1; i >= 0; i--) {
|
||||
if (recycled_nodes_.getBit(edges_[i].from) ||
|
||||
recycled_nodes_.getBit(edges_[i].to)) {
|
||||
Swap(edges_[i], edges_[n_edges_ - 1]);
|
||||
n_edges_--;
|
||||
}
|
||||
}
|
||||
CHECK(available_nodes_.empty());
|
||||
// removeEdgesFrom was called in removeNode.
|
||||
g_.removeEdgesTo(recycled_nodes_);
|
||||
available_nodes_.setUnion(recycled_nodes_);
|
||||
recycled_nodes_.clear();
|
||||
return getAvailableNode(data);
|
||||
}
|
||||
// We are out of vacant nodes. Flush and increment the current_epoch_.
|
||||
current_epoch_ += size();
|
||||
recycled_nodes_.clear();
|
||||
available_nodes_.setAll();
|
||||
g_.clear();
|
||||
n_edges_ = 0;
|
||||
return getAvailableNode(data);
|
||||
}
|
||||
|
||||
// Get data associated with the node created by newNode().
|
||||
uptr getData(uptr node) const { return data_[nodeToIndex(node)]; }
|
||||
|
||||
bool nodeBelongsToCurrentEpoch(uptr node) {
|
||||
return node && (node / size() * size()) == current_epoch_;
|
||||
}
|
||||
|
||||
void removeNode(uptr node) {
|
||||
uptr idx = nodeToIndex(node);
|
||||
CHECK(!available_nodes_.getBit(idx));
|
||||
CHECK(recycled_nodes_.setBit(idx));
|
||||
g_.removeEdgesFrom(idx);
|
||||
}
|
||||
|
||||
void ensureCurrentEpoch(DeadlockDetectorTLS<BV> *dtls) {
|
||||
dtls->ensureCurrentEpoch(current_epoch_);
|
||||
}
|
||||
|
||||
// Returns true if there is a cycle in the graph after this lock event.
|
||||
// Ideally should be called before the lock is acquired so that we can
|
||||
// report a deadlock before a real deadlock happens.
|
||||
bool onLockBefore(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
|
||||
ensureCurrentEpoch(dtls);
|
||||
uptr cur_idx = nodeToIndex(cur_node);
|
||||
return g_.isReachable(cur_idx, dtls->getLocks(current_epoch_));
|
||||
}
|
||||
|
||||
u32 findLockContext(DeadlockDetectorTLS<BV> *dtls, uptr node) {
|
||||
return dtls->findLockContext(nodeToIndex(node));
|
||||
}
|
||||
|
||||
// Add cur_node to the set of locks held currently by dtls.
|
||||
void onLockAfter(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
|
||||
ensureCurrentEpoch(dtls);
|
||||
uptr cur_idx = nodeToIndex(cur_node);
|
||||
dtls->addLock(cur_idx, current_epoch_, stk);
|
||||
}
|
||||
|
||||
// Experimental *racy* fast path function.
|
||||
// Returns true if all edges from the currently held locks to cur_node exist.
|
||||
bool hasAllEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
|
||||
uptr local_epoch = dtls->getEpoch();
|
||||
// Read from current_epoch_ is racy.
|
||||
if (cur_node && local_epoch == current_epoch_ &&
|
||||
local_epoch == nodeToEpoch(cur_node)) {
|
||||
uptr cur_idx = nodeToIndexUnchecked(cur_node);
|
||||
for (uptr i = 0, n = dtls->getNumLocks(); i < n; i++) {
|
||||
if (!g_.hasEdge(dtls->getLock(i), cur_idx))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Adds edges from currently held locks to cur_node,
|
||||
// returns the number of added edges, and puts the sources of added edges
|
||||
// into added_edges[].
|
||||
// Should be called before onLockAfter.
|
||||
uptr addEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk,
|
||||
int unique_tid) {
|
||||
ensureCurrentEpoch(dtls);
|
||||
uptr cur_idx = nodeToIndex(cur_node);
|
||||
uptr added_edges[40];
|
||||
uptr n_added_edges = g_.addEdges(dtls->getLocks(current_epoch_), cur_idx,
|
||||
added_edges, ARRAY_SIZE(added_edges));
|
||||
for (uptr i = 0; i < n_added_edges; i++) {
|
||||
if (n_edges_ < ARRAY_SIZE(edges_)) {
|
||||
Edge e = {(u16)added_edges[i], (u16)cur_idx,
|
||||
dtls->findLockContext(added_edges[i]), stk,
|
||||
unique_tid};
|
||||
edges_[n_edges_++] = e;
|
||||
}
|
||||
}
|
||||
return n_added_edges;
|
||||
}
|
||||
|
||||
bool findEdge(uptr from_node, uptr to_node, u32 *stk_from, u32 *stk_to,
|
||||
int *unique_tid) {
|
||||
uptr from_idx = nodeToIndex(from_node);
|
||||
uptr to_idx = nodeToIndex(to_node);
|
||||
for (uptr i = 0; i < n_edges_; i++) {
|
||||
if (edges_[i].from == from_idx && edges_[i].to == to_idx) {
|
||||
*stk_from = edges_[i].stk_from;
|
||||
*stk_to = edges_[i].stk_to;
|
||||
*unique_tid = edges_[i].unique_tid;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Test-only function. Handles the before/after lock events,
|
||||
// returns true if there is a cycle.
|
||||
bool onLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
|
||||
ensureCurrentEpoch(dtls);
|
||||
bool is_reachable = !isHeld(dtls, cur_node) && onLockBefore(dtls, cur_node);
|
||||
addEdges(dtls, cur_node, stk, 0);
|
||||
onLockAfter(dtls, cur_node, stk);
|
||||
return is_reachable;
|
||||
}
|
||||
|
||||
// Handles the try_lock event, returns false.
|
||||
// When a try_lock event happens (i.e. a try_lock call succeeds) we need
|
||||
// to add this lock to the currently held locks, but we should not try to
|
||||
// change the lock graph or to detect a cycle. We may want to investigate
|
||||
// whether a more aggressive strategy is possible for try_lock.
|
||||
bool onTryLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
|
||||
ensureCurrentEpoch(dtls);
|
||||
uptr cur_idx = nodeToIndex(cur_node);
|
||||
dtls->addLock(cur_idx, current_epoch_, stk);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Returns true iff dtls is empty (no locks are currently held) and we can
|
||||
// add the node to the currently held locks w/o changing the global state.
|
||||
// This operation is thread-safe as it only touches the dtls.
|
||||
bool onFirstLock(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
|
||||
if (!dtls->empty()) return false;
|
||||
if (dtls->getEpoch() && dtls->getEpoch() == nodeToEpoch(node)) {
|
||||
dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Finds a path between the lock 'cur_node' (currently not held in dtls)
|
||||
// and some currently held lock, returns the length of the path
|
||||
// or 0 on failure.
|
||||
uptr findPathToLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, uptr *path,
|
||||
uptr path_size) {
|
||||
tmp_bv_.copyFrom(dtls->getLocks(current_epoch_));
|
||||
uptr idx = nodeToIndex(cur_node);
|
||||
CHECK(!tmp_bv_.getBit(idx));
|
||||
uptr res = g_.findShortestPath(idx, tmp_bv_, path, path_size);
|
||||
for (uptr i = 0; i < res; i++)
|
||||
path[i] = indexToNode(path[i]);
|
||||
if (res)
|
||||
CHECK_EQ(path[0], cur_node);
|
||||
return res;
|
||||
}
|
||||
|
||||
// Handle the unlock event.
|
||||
// This operation is thread-safe as it only touches the dtls.
|
||||
void onUnlock(DeadlockDetectorTLS<BV> *dtls, uptr node) {
|
||||
if (dtls->getEpoch() == nodeToEpoch(node))
|
||||
dtls->removeLock(nodeToIndexUnchecked(node));
|
||||
}
|
||||
|
||||
// Tries to handle the lock event w/o writing to global state.
|
||||
// Returns true on success.
|
||||
// This operation is thread-safe as it only touches the dtls
|
||||
// (modulo racy nature of hasAllEdges).
|
||||
bool onLockFast(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
|
||||
if (hasAllEdges(dtls, node)) {
|
||||
dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool isHeld(DeadlockDetectorTLS<BV> *dtls, uptr node) const {
|
||||
return dtls->getLocks(current_epoch_).getBit(nodeToIndex(node));
|
||||
}
|
||||
|
||||
uptr testOnlyGetEpoch() const { return current_epoch_; }
|
||||
bool testOnlyHasEdge(uptr l1, uptr l2) {
|
||||
return g_.hasEdge(nodeToIndex(l1), nodeToIndex(l2));
|
||||
}
|
||||
// idx1 and idx2 are raw indices to g_, not lock IDs.
|
||||
bool testOnlyHasEdgeRaw(uptr idx1, uptr idx2) {
|
||||
return g_.hasEdge(idx1, idx2);
|
||||
}
|
||||
|
||||
void Print() {
|
||||
for (uptr from = 0; from < size(); from++)
|
||||
for (uptr to = 0; to < size(); to++)
|
||||
if (g_.hasEdge(from, to))
|
||||
Printf(" %zx => %zx\n", from, to);
|
||||
}
|
||||
|
||||
private:
|
||||
void check_idx(uptr idx) const { CHECK_LT(idx, size()); }
|
||||
|
||||
void check_node(uptr node) const {
|
||||
CHECK_GE(node, size());
|
||||
CHECK_EQ(current_epoch_, nodeToEpoch(node));
|
||||
}
|
||||
|
||||
uptr indexToNode(uptr idx) const {
|
||||
check_idx(idx);
|
||||
return idx + current_epoch_;
|
||||
}
|
||||
|
||||
uptr nodeToIndexUnchecked(uptr node) const { return node % size(); }
|
||||
|
||||
uptr nodeToIndex(uptr node) const {
|
||||
check_node(node);
|
||||
return nodeToIndexUnchecked(node);
|
||||
}
|
||||
|
||||
uptr nodeToEpoch(uptr node) const { return node / size() * size(); }
|
||||
|
||||
uptr getAvailableNode(uptr data) {
|
||||
uptr idx = available_nodes_.getAndClearFirstOne();
|
||||
data_[idx] = data;
|
||||
return indexToNode(idx);
|
||||
}
|
||||
|
||||
struct Edge {
|
||||
u16 from;
|
||||
u16 to;
|
||||
u32 stk_from;
|
||||
u32 stk_to;
|
||||
int unique_tid;
|
||||
};
|
||||
|
||||
uptr current_epoch_;
|
||||
BV available_nodes_;
|
||||
BV recycled_nodes_;
|
||||
BV tmp_bv_;
|
||||
BVGraph<BV> g_;
|
||||
uptr data_[BV::kSize];
|
||||
Edge edges_[BV::kSize * 32];
|
||||
uptr n_edges_;
|
||||
};
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_DEADLOCK_DETECTOR_H
|
||||
194
lib/libtsan/sanitizer_common/sanitizer_deadlock_detector1.cpp
Normal file
194
lib/libtsan/sanitizer_common/sanitizer_deadlock_detector1.cpp
Normal file
@@ -0,0 +1,194 @@
|
||||
//===-- sanitizer_deadlock_detector1.cpp ----------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Deadlock detector implementation based on NxN adjacency bit matrix.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_deadlock_detector_interface.h"
|
||||
#include "sanitizer_deadlock_detector.h"
|
||||
#include "sanitizer_allocator_internal.h"
|
||||
#include "sanitizer_placement_new.h"
|
||||
#include "sanitizer_mutex.h"
|
||||
|
||||
#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
typedef TwoLevelBitVector<> DDBV; // DeadlockDetector's bit vector.
|
||||
|
||||
struct DDPhysicalThread {
|
||||
};
|
||||
|
||||
struct DDLogicalThread {
|
||||
u64 ctx;
|
||||
DeadlockDetectorTLS<DDBV> dd;
|
||||
DDReport rep;
|
||||
bool report_pending;
|
||||
};
|
||||
|
||||
struct DD final : public DDetector {
|
||||
SpinMutex mtx;
|
||||
DeadlockDetector<DDBV> dd;
|
||||
DDFlags flags;
|
||||
|
||||
explicit DD(const DDFlags *flags);
|
||||
|
||||
DDPhysicalThread *CreatePhysicalThread() override;
|
||||
void DestroyPhysicalThread(DDPhysicalThread *pt) override;
|
||||
|
||||
DDLogicalThread *CreateLogicalThread(u64 ctx) override;
|
||||
void DestroyLogicalThread(DDLogicalThread *lt) override;
|
||||
|
||||
void MutexInit(DDCallback *cb, DDMutex *m) override;
|
||||
void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) override;
|
||||
void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
|
||||
bool trylock) override;
|
||||
void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) override;
|
||||
void MutexDestroy(DDCallback *cb, DDMutex *m) override;
|
||||
|
||||
DDReport *GetReport(DDCallback *cb) override;
|
||||
|
||||
void MutexEnsureID(DDLogicalThread *lt, DDMutex *m);
|
||||
void ReportDeadlock(DDCallback *cb, DDMutex *m);
|
||||
};
|
||||
|
||||
DDetector *DDetector::Create(const DDFlags *flags) {
|
||||
(void)flags;
|
||||
void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
|
||||
return new(mem) DD(flags);
|
||||
}
|
||||
|
||||
DD::DD(const DDFlags *flags)
|
||||
: flags(*flags) {
|
||||
dd.clear();
|
||||
}
|
||||
|
||||
DDPhysicalThread* DD::CreatePhysicalThread() {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
|
||||
}
|
||||
|
||||
DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
|
||||
DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(sizeof(*lt));
|
||||
lt->ctx = ctx;
|
||||
lt->dd.clear();
|
||||
lt->report_pending = false;
|
||||
return lt;
|
||||
}
|
||||
|
||||
void DD::DestroyLogicalThread(DDLogicalThread *lt) {
|
||||
lt->~DDLogicalThread();
|
||||
InternalFree(lt);
|
||||
}
|
||||
|
||||
void DD::MutexInit(DDCallback *cb, DDMutex *m) {
|
||||
m->id = 0;
|
||||
m->stk = cb->Unwind();
|
||||
}
|
||||
|
||||
void DD::MutexEnsureID(DDLogicalThread *lt, DDMutex *m) {
|
||||
if (!dd.nodeBelongsToCurrentEpoch(m->id))
|
||||
m->id = dd.newNode(reinterpret_cast<uptr>(m));
|
||||
dd.ensureCurrentEpoch(<->dd);
|
||||
}
|
||||
|
||||
void DD::MutexBeforeLock(DDCallback *cb,
|
||||
DDMutex *m, bool wlock) {
|
||||
DDLogicalThread *lt = cb->lt;
|
||||
if (lt->dd.empty()) return; // This will be the first lock held by lt.
|
||||
if (dd.hasAllEdges(<->dd, m->id)) return; // We already have all edges.
|
||||
SpinMutexLock lk(&mtx);
|
||||
MutexEnsureID(lt, m);
|
||||
if (dd.isHeld(<->dd, m->id))
|
||||
return; // FIXME: allow this only for recursive locks.
|
||||
if (dd.onLockBefore(<->dd, m->id)) {
|
||||
// Actually add this edge now so that we have all the stack traces.
|
||||
dd.addEdges(<->dd, m->id, cb->Unwind(), cb->UniqueTid());
|
||||
ReportDeadlock(cb, m);
|
||||
}
|
||||
}
|
||||
|
||||
void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {
|
||||
DDLogicalThread *lt = cb->lt;
|
||||
uptr path[20];
|
||||
uptr len = dd.findPathToLock(<->dd, m->id, path, ARRAY_SIZE(path));
|
||||
if (len == 0U) {
|
||||
// A cycle of 20+ locks? Well, that's a bit odd...
|
||||
Printf("WARNING: too long mutex cycle found\n");
|
||||
return;
|
||||
}
|
||||
CHECK_EQ(m->id, path[0]);
|
||||
lt->report_pending = true;
|
||||
len = Min<uptr>(len, DDReport::kMaxLoopSize);
|
||||
DDReport *rep = <->rep;
|
||||
rep->n = len;
|
||||
for (uptr i = 0; i < len; i++) {
|
||||
uptr from = path[i];
|
||||
uptr to = path[(i + 1) % len];
|
||||
DDMutex *m0 = (DDMutex*)dd.getData(from);
|
||||
DDMutex *m1 = (DDMutex*)dd.getData(to);
|
||||
|
||||
u32 stk_from = 0, stk_to = 0;
|
||||
int unique_tid = 0;
|
||||
dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid);
|
||||
// Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
|
||||
// unique_tid);
|
||||
rep->loop[i].thr_ctx = unique_tid;
|
||||
rep->loop[i].mtx_ctx0 = m0->ctx;
|
||||
rep->loop[i].mtx_ctx1 = m1->ctx;
|
||||
rep->loop[i].stk[0] = stk_to;
|
||||
rep->loop[i].stk[1] = stk_from;
|
||||
}
|
||||
}
|
||||
|
||||
void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) {
|
||||
DDLogicalThread *lt = cb->lt;
|
||||
u32 stk = 0;
|
||||
if (flags.second_deadlock_stack)
|
||||
stk = cb->Unwind();
|
||||
// Printf("T%p MutexLock: %zx stk %u\n", lt, m->id, stk);
|
||||
if (dd.onFirstLock(<->dd, m->id, stk))
|
||||
return;
|
||||
if (dd.onLockFast(<->dd, m->id, stk))
|
||||
return;
|
||||
|
||||
SpinMutexLock lk(&mtx);
|
||||
MutexEnsureID(lt, m);
|
||||
if (wlock) // Only a recursive rlock may be held.
|
||||
CHECK(!dd.isHeld(<->dd, m->id));
|
||||
if (!trylock)
|
||||
dd.addEdges(<->dd, m->id, stk ? stk : cb->Unwind(), cb->UniqueTid());
|
||||
dd.onLockAfter(<->dd, m->id, stk);
|
||||
}
|
||||
|
||||
void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
|
||||
// Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id);
|
||||
dd.onUnlock(&cb->lt->dd, m->id);
|
||||
}
|
||||
|
||||
void DD::MutexDestroy(DDCallback *cb,
|
||||
DDMutex *m) {
|
||||
if (!m->id) return;
|
||||
SpinMutexLock lk(&mtx);
|
||||
if (dd.nodeBelongsToCurrentEpoch(m->id))
|
||||
dd.removeNode(m->id);
|
||||
m->id = 0;
|
||||
}
|
||||
|
||||
DDReport *DD::GetReport(DDCallback *cb) {
|
||||
if (!cb->lt->report_pending)
|
||||
return nullptr;
|
||||
cb->lt->report_pending = false;
|
||||
return &cb->lt->rep;
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
|
||||
421
lib/libtsan/sanitizer_common/sanitizer_deadlock_detector2.cpp
Normal file
421
lib/libtsan/sanitizer_common/sanitizer_deadlock_detector2.cpp
Normal file
@@ -0,0 +1,421 @@
|
||||
//===-- sanitizer_deadlock_detector2.cpp ----------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Deadlock detector implementation based on adjacency lists.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_deadlock_detector_interface.h"
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_allocator_internal.h"
|
||||
#include "sanitizer_placement_new.h"
|
||||
#include "sanitizer_mutex.h"
|
||||
|
||||
#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
const int kMaxNesting = 64;
|
||||
const u32 kNoId = -1;
|
||||
const u32 kEndId = -2;
|
||||
const int kMaxLink = 8;
|
||||
const int kL1Size = 1024;
|
||||
const int kL2Size = 1024;
|
||||
const int kMaxMutex = kL1Size * kL2Size;
|
||||
|
||||
struct Id {
|
||||
u32 id;
|
||||
u32 seq;
|
||||
|
||||
explicit Id(u32 id = 0, u32 seq = 0)
|
||||
: id(id)
|
||||
, seq(seq) {
|
||||
}
|
||||
};
|
||||
|
||||
struct Link {
|
||||
u32 id;
|
||||
u32 seq;
|
||||
u32 tid;
|
||||
u32 stk0;
|
||||
u32 stk1;
|
||||
|
||||
explicit Link(u32 id = 0, u32 seq = 0, u32 tid = 0, u32 s0 = 0, u32 s1 = 0)
|
||||
: id(id)
|
||||
, seq(seq)
|
||||
, tid(tid)
|
||||
, stk0(s0)
|
||||
, stk1(s1) {
|
||||
}
|
||||
};
|
||||
|
||||
struct DDPhysicalThread {
|
||||
DDReport rep;
|
||||
bool report_pending;
|
||||
bool visited[kMaxMutex];
|
||||
Link pending[kMaxMutex];
|
||||
Link path[kMaxMutex];
|
||||
};
|
||||
|
||||
struct ThreadMutex {
|
||||
u32 id;
|
||||
u32 stk;
|
||||
};
|
||||
|
||||
struct DDLogicalThread {
|
||||
u64 ctx;
|
||||
ThreadMutex locked[kMaxNesting];
|
||||
int nlocked;
|
||||
};
|
||||
|
||||
struct MutexState {
|
||||
StaticSpinMutex mtx;
|
||||
u32 seq;
|
||||
int nlink;
|
||||
Link link[kMaxLink];
|
||||
};
|
||||
|
||||
struct DD final : public DDetector {
|
||||
explicit DD(const DDFlags *flags);
|
||||
|
||||
DDPhysicalThread* CreatePhysicalThread();
|
||||
void DestroyPhysicalThread(DDPhysicalThread *pt);
|
||||
|
||||
DDLogicalThread* CreateLogicalThread(u64 ctx);
|
||||
void DestroyLogicalThread(DDLogicalThread *lt);
|
||||
|
||||
void MutexInit(DDCallback *cb, DDMutex *m);
|
||||
void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
|
||||
void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
|
||||
bool trylock);
|
||||
void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
|
||||
void MutexDestroy(DDCallback *cb, DDMutex *m);
|
||||
|
||||
DDReport *GetReport(DDCallback *cb);
|
||||
|
||||
void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
|
||||
void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
|
||||
u32 allocateId(DDCallback *cb);
|
||||
MutexState *getMutex(u32 id);
|
||||
u32 getMutexId(MutexState *m);
|
||||
|
||||
DDFlags flags;
|
||||
|
||||
MutexState *mutex[kL1Size];
|
||||
|
||||
SpinMutex mtx;
|
||||
InternalMmapVector<u32> free_id;
|
||||
int id_gen = 0;
|
||||
};
|
||||
|
||||
DDetector *DDetector::Create(const DDFlags *flags) {
|
||||
(void)flags;
|
||||
void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
|
||||
return new(mem) DD(flags);
|
||||
}
|
||||
|
||||
DD::DD(const DDFlags *flags) : flags(*flags) { free_id.reserve(1024); }
|
||||
|
||||
DDPhysicalThread* DD::CreatePhysicalThread() {
|
||||
DDPhysicalThread *pt = (DDPhysicalThread*)MmapOrDie(sizeof(DDPhysicalThread),
|
||||
"deadlock detector (physical thread)");
|
||||
return pt;
|
||||
}
|
||||
|
||||
void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
|
||||
pt->~DDPhysicalThread();
|
||||
UnmapOrDie(pt, sizeof(DDPhysicalThread));
|
||||
}
|
||||
|
||||
DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
|
||||
DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(
|
||||
sizeof(DDLogicalThread));
|
||||
lt->ctx = ctx;
|
||||
lt->nlocked = 0;
|
||||
return lt;
|
||||
}
|
||||
|
||||
void DD::DestroyLogicalThread(DDLogicalThread *lt) {
|
||||
lt->~DDLogicalThread();
|
||||
InternalFree(lt);
|
||||
}
|
||||
|
||||
void DD::MutexInit(DDCallback *cb, DDMutex *m) {
|
||||
VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb->lt->ctx, m);
|
||||
m->id = kNoId;
|
||||
m->recursion = 0;
|
||||
atomic_store(&m->owner, 0, memory_order_relaxed);
|
||||
}
|
||||
|
||||
MutexState *DD::getMutex(u32 id) { return &mutex[id / kL2Size][id % kL2Size]; }
|
||||
|
||||
u32 DD::getMutexId(MutexState *m) {
|
||||
for (int i = 0; i < kL1Size; i++) {
|
||||
MutexState *tab = mutex[i];
|
||||
if (tab == 0)
|
||||
break;
|
||||
if (m >= tab && m < tab + kL2Size)
|
||||
return i * kL2Size + (m - tab);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
u32 DD::allocateId(DDCallback *cb) {
|
||||
u32 id = -1;
|
||||
SpinMutexLock l(&mtx);
|
||||
if (free_id.size() > 0) {
|
||||
id = free_id.back();
|
||||
free_id.pop_back();
|
||||
} else {
|
||||
CHECK_LT(id_gen, kMaxMutex);
|
||||
if ((id_gen % kL2Size) == 0) {
|
||||
mutex[id_gen / kL2Size] = (MutexState *)MmapOrDie(
|
||||
kL2Size * sizeof(MutexState), "deadlock detector (mutex table)");
|
||||
}
|
||||
id = id_gen++;
|
||||
}
|
||||
CHECK_LE(id, kMaxMutex);
|
||||
VPrintf(3, "#%llu: DD::allocateId assign id %d\n", cb->lt->ctx, id);
|
||||
return id;
|
||||
}
|
||||
|
||||
void DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {
|
||||
VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
|
||||
cb->lt->ctx, m, wlock, cb->lt->nlocked);
|
||||
DDPhysicalThread *pt = cb->pt;
|
||||
DDLogicalThread *lt = cb->lt;
|
||||
|
||||
uptr owner = atomic_load(&m->owner, memory_order_relaxed);
|
||||
if (owner == (uptr)cb->lt) {
|
||||
VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
|
||||
cb->lt->ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
CHECK_LE(lt->nlocked, kMaxNesting);
|
||||
|
||||
// FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
|
||||
if (m->id == kNoId)
|
||||
m->id = allocateId(cb);
|
||||
|
||||
ThreadMutex *tm = <->locked[lt->nlocked++];
|
||||
tm->id = m->id;
|
||||
if (flags.second_deadlock_stack)
|
||||
tm->stk = cb->Unwind();
|
||||
if (lt->nlocked == 1) {
|
||||
VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
|
||||
cb->lt->ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
bool added = false;
|
||||
MutexState *mtx = getMutex(m->id);
|
||||
for (int i = 0; i < lt->nlocked - 1; i++) {
|
||||
u32 id1 = lt->locked[i].id;
|
||||
u32 stk1 = lt->locked[i].stk;
|
||||
MutexState *mtx1 = getMutex(id1);
|
||||
SpinMutexLock l(&mtx1->mtx);
|
||||
if (mtx1->nlink == kMaxLink) {
|
||||
// FIXME(dvyukov): check stale links
|
||||
continue;
|
||||
}
|
||||
int li = 0;
|
||||
for (; li < mtx1->nlink; li++) {
|
||||
Link *link = &mtx1->link[li];
|
||||
if (link->id == m->id) {
|
||||
if (link->seq != mtx->seq) {
|
||||
link->seq = mtx->seq;
|
||||
link->tid = lt->ctx;
|
||||
link->stk0 = stk1;
|
||||
link->stk1 = cb->Unwind();
|
||||
added = true;
|
||||
VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
|
||||
cb->lt->ctx, getMutexId(mtx1), m->id);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (li == mtx1->nlink) {
|
||||
// FIXME(dvyukov): check stale links
|
||||
Link *link = &mtx1->link[mtx1->nlink++];
|
||||
link->id = m->id;
|
||||
link->seq = mtx->seq;
|
||||
link->tid = lt->ctx;
|
||||
link->stk0 = stk1;
|
||||
link->stk1 = cb->Unwind();
|
||||
added = true;
|
||||
VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
|
||||
cb->lt->ctx, getMutexId(mtx1), m->id);
|
||||
}
|
||||
}
|
||||
|
||||
if (!added || mtx->nlink == 0) {
|
||||
VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
|
||||
cb->lt->ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
CycleCheck(pt, lt, m);
|
||||
}
|
||||
|
||||
void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
|
||||
bool trylock) {
|
||||
VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
|
||||
cb->lt->ctx, m, wlock, trylock, cb->lt->nlocked);
|
||||
DDLogicalThread *lt = cb->lt;
|
||||
|
||||
uptr owner = atomic_load(&m->owner, memory_order_relaxed);
|
||||
if (owner == (uptr)cb->lt) {
|
||||
VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb->lt->ctx);
|
||||
CHECK(wlock);
|
||||
m->recursion++;
|
||||
return;
|
||||
}
|
||||
CHECK_EQ(owner, 0);
|
||||
if (wlock) {
|
||||
VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb->lt->ctx);
|
||||
CHECK_EQ(m->recursion, 0);
|
||||
m->recursion = 1;
|
||||
atomic_store(&m->owner, (uptr)cb->lt, memory_order_relaxed);
|
||||
}
|
||||
|
||||
if (!trylock)
|
||||
return;
|
||||
|
||||
CHECK_LE(lt->nlocked, kMaxNesting);
|
||||
if (m->id == kNoId)
|
||||
m->id = allocateId(cb);
|
||||
ThreadMutex *tm = <->locked[lt->nlocked++];
|
||||
tm->id = m->id;
|
||||
if (flags.second_deadlock_stack)
|
||||
tm->stk = cb->Unwind();
|
||||
}
|
||||
|
||||
void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
|
||||
VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
|
||||
cb->lt->ctx, m, wlock, cb->lt->nlocked);
|
||||
DDLogicalThread *lt = cb->lt;
|
||||
|
||||
uptr owner = atomic_load(&m->owner, memory_order_relaxed);
|
||||
if (owner == (uptr)cb->lt) {
|
||||
VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb->lt->ctx);
|
||||
if (--m->recursion > 0)
|
||||
return;
|
||||
VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb->lt->ctx);
|
||||
atomic_store(&m->owner, 0, memory_order_relaxed);
|
||||
}
|
||||
CHECK_NE(m->id, kNoId);
|
||||
int last = lt->nlocked - 1;
|
||||
for (int i = last; i >= 0; i--) {
|
||||
if (cb->lt->locked[i].id == m->id) {
|
||||
lt->locked[i] = lt->locked[last];
|
||||
lt->nlocked--;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void DD::MutexDestroy(DDCallback *cb, DDMutex *m) {
|
||||
VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
|
||||
cb->lt->ctx, m);
|
||||
DDLogicalThread *lt = cb->lt;
|
||||
|
||||
if (m->id == kNoId)
|
||||
return;
|
||||
|
||||
// Remove the mutex from lt->locked if there.
|
||||
int last = lt->nlocked - 1;
|
||||
for (int i = last; i >= 0; i--) {
|
||||
if (lt->locked[i].id == m->id) {
|
||||
lt->locked[i] = lt->locked[last];
|
||||
lt->nlocked--;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Clear and invalidate the mutex descriptor.
|
||||
{
|
||||
MutexState *mtx = getMutex(m->id);
|
||||
SpinMutexLock l(&mtx->mtx);
|
||||
mtx->seq++;
|
||||
mtx->nlink = 0;
|
||||
}
|
||||
|
||||
// Return id to cache.
|
||||
{
|
||||
SpinMutexLock l(&mtx);
|
||||
free_id.push_back(m->id);
|
||||
}
|
||||
}
|
||||
|
||||
void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
|
||||
DDMutex *m) {
|
||||
internal_memset(pt->visited, 0, sizeof(pt->visited));
|
||||
int npath = 0;
|
||||
int npending = 0;
|
||||
{
|
||||
MutexState *mtx = getMutex(m->id);
|
||||
SpinMutexLock l(&mtx->mtx);
|
||||
for (int li = 0; li < mtx->nlink; li++)
|
||||
pt->pending[npending++] = mtx->link[li];
|
||||
}
|
||||
while (npending > 0) {
|
||||
Link link = pt->pending[--npending];
|
||||
if (link.id == kEndId) {
|
||||
npath--;
|
||||
continue;
|
||||
}
|
||||
if (pt->visited[link.id])
|
||||
continue;
|
||||
MutexState *mtx1 = getMutex(link.id);
|
||||
SpinMutexLock l(&mtx1->mtx);
|
||||
if (mtx1->seq != link.seq)
|
||||
continue;
|
||||
pt->visited[link.id] = true;
|
||||
if (mtx1->nlink == 0)
|
||||
continue;
|
||||
pt->path[npath++] = link;
|
||||
pt->pending[npending++] = Link(kEndId);
|
||||
if (link.id == m->id)
|
||||
return Report(pt, lt, npath); // Bingo!
|
||||
for (int li = 0; li < mtx1->nlink; li++) {
|
||||
Link *link1 = &mtx1->link[li];
|
||||
// MutexState *mtx2 = getMutex(link->id);
|
||||
// FIXME(dvyukov): fast seq check
|
||||
// FIXME(dvyukov): fast nlink != 0 check
|
||||
// FIXME(dvyukov): fast pending check?
|
||||
// FIXME(dvyukov): npending can be larger than kMaxMutex
|
||||
pt->pending[npending++] = *link1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void DD::Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath) {
|
||||
DDReport *rep = &pt->rep;
|
||||
rep->n = npath;
|
||||
for (int i = 0; i < npath; i++) {
|
||||
Link *link = &pt->path[i];
|
||||
Link *link0 = &pt->path[i ? i - 1 : npath - 1];
|
||||
rep->loop[i].thr_ctx = link->tid;
|
||||
rep->loop[i].mtx_ctx0 = link0->id;
|
||||
rep->loop[i].mtx_ctx1 = link->id;
|
||||
rep->loop[i].stk[0] = flags.second_deadlock_stack ? link->stk0 : 0;
|
||||
rep->loop[i].stk[1] = link->stk1;
|
||||
}
|
||||
pt->report_pending = true;
|
||||
}
|
||||
|
||||
DDReport *DD::GetReport(DDCallback *cb) {
|
||||
if (!cb->pt->report_pending)
|
||||
return 0;
|
||||
cb->pt->report_pending = false;
|
||||
return &cb->pt->rep;
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
|
||||
@@ -0,0 +1,98 @@
|
||||
//===-- sanitizer_deadlock_detector_interface.h -----------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of Sanitizer runtime.
|
||||
// Abstract deadlock detector interface.
|
||||
// FIXME: this is work in progress, nothing really works yet.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
|
||||
#define SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
|
||||
|
||||
#ifndef SANITIZER_DEADLOCK_DETECTOR_VERSION
|
||||
# define SANITIZER_DEADLOCK_DETECTOR_VERSION 1
|
||||
#endif
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_atomic.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// dd - deadlock detector.
|
||||
// lt - logical (user) thread.
|
||||
// pt - physical (OS) thread.
|
||||
|
||||
struct DDPhysicalThread;
|
||||
struct DDLogicalThread;
|
||||
|
||||
struct DDMutex {
|
||||
#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
|
||||
uptr id;
|
||||
u32 stk; // creation stack
|
||||
#elif SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
|
||||
u32 id;
|
||||
u32 recursion;
|
||||
atomic_uintptr_t owner;
|
||||
#else
|
||||
# error "BAD SANITIZER_DEADLOCK_DETECTOR_VERSION"
|
||||
#endif
|
||||
u64 ctx;
|
||||
};
|
||||
|
||||
struct DDFlags {
|
||||
bool second_deadlock_stack;
|
||||
};
|
||||
|
||||
struct DDReport {
|
||||
enum { kMaxLoopSize = 20 };
|
||||
int n; // number of entries in loop
|
||||
struct {
|
||||
u64 thr_ctx; // user thread context
|
||||
u64 mtx_ctx0; // user mutex context, start of the edge
|
||||
u64 mtx_ctx1; // user mutex context, end of the edge
|
||||
u32 stk[2]; // stack ids for the edge
|
||||
} loop[kMaxLoopSize];
|
||||
};
|
||||
|
||||
struct DDCallback {
|
||||
DDPhysicalThread *pt;
|
||||
DDLogicalThread *lt;
|
||||
|
||||
virtual u32 Unwind() { return 0; }
|
||||
virtual int UniqueTid() { return 0; }
|
||||
|
||||
protected:
|
||||
~DDCallback() {}
|
||||
};
|
||||
|
||||
struct DDetector {
|
||||
static DDetector *Create(const DDFlags *flags);
|
||||
|
||||
virtual DDPhysicalThread* CreatePhysicalThread() { return nullptr; }
|
||||
virtual void DestroyPhysicalThread(DDPhysicalThread *pt) {}
|
||||
|
||||
virtual DDLogicalThread* CreateLogicalThread(u64 ctx) { return nullptr; }
|
||||
virtual void DestroyLogicalThread(DDLogicalThread *lt) {}
|
||||
|
||||
virtual void MutexInit(DDCallback *cb, DDMutex *m) {}
|
||||
virtual void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {}
|
||||
virtual void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
|
||||
bool trylock) {}
|
||||
virtual void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {}
|
||||
virtual void MutexDestroy(DDCallback *cb, DDMutex *m) {}
|
||||
|
||||
virtual DDReport *GetReport(DDCallback *cb) { return nullptr; }
|
||||
|
||||
protected:
|
||||
~DDetector() {}
|
||||
};
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
|
||||
717
lib/libtsan/sanitizer_common/sanitizer_dense_map.h
Normal file
717
lib/libtsan/sanitizer_common/sanitizer_dense_map.h
Normal file
@@ -0,0 +1,717 @@
|
||||
//===- sanitizer_dense_map.h - Dense probed hash table ----------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This is fork of llvm/ADT/DenseMap.h class with the following changes:
|
||||
// * Use mmap to allocate.
|
||||
// * No iterators.
|
||||
// * Does not shrink.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_DENSE_MAP_H
|
||||
#define SANITIZER_DENSE_MAP_H
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_dense_map_info.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_type_traits.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
|
||||
typename BucketT>
|
||||
class DenseMapBase {
|
||||
public:
|
||||
using size_type = unsigned;
|
||||
using key_type = KeyT;
|
||||
using mapped_type = ValueT;
|
||||
using value_type = BucketT;
|
||||
|
||||
WARN_UNUSED_RESULT bool empty() const { return getNumEntries() == 0; }
|
||||
unsigned size() const { return getNumEntries(); }
|
||||
|
||||
/// Grow the densemap so that it can contain at least \p NumEntries items
|
||||
/// before resizing again.
|
||||
void reserve(size_type NumEntries) {
|
||||
auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
|
||||
if (NumBuckets > getNumBuckets())
|
||||
grow(NumBuckets);
|
||||
}
|
||||
|
||||
void clear() {
|
||||
if (getNumEntries() == 0 && getNumTombstones() == 0)
|
||||
return;
|
||||
|
||||
const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
|
||||
if (__sanitizer::is_trivially_destructible<ValueT>::value) {
|
||||
// Use a simpler loop when values don't need destruction.
|
||||
for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
|
||||
P->getFirst() = EmptyKey;
|
||||
} else {
|
||||
unsigned NumEntries = getNumEntries();
|
||||
for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
|
||||
if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
|
||||
if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
|
||||
P->getSecond().~ValueT();
|
||||
--NumEntries;
|
||||
}
|
||||
P->getFirst() = EmptyKey;
|
||||
}
|
||||
}
|
||||
CHECK_EQ(NumEntries, 0);
|
||||
}
|
||||
setNumEntries(0);
|
||||
setNumTombstones(0);
|
||||
}
|
||||
|
||||
/// Return true if the specified key is in the map, false otherwise.
|
||||
bool contains(const KeyT &Key) const { return doFind(Key) != nullptr; }
|
||||
|
||||
/// Return 1 if the specified key is in the map, 0 otherwise.
|
||||
size_type count(const KeyT &Key) const { return contains(Key) ? 1 : 0; }
|
||||
|
||||
value_type *find(const KeyT &Key) { return doFind(Key); }
|
||||
const value_type *find(const KeyT &Key) const { return doFind(Key); }
|
||||
|
||||
/// Alternate version of find() which allows a different, and possibly
|
||||
/// less expensive, key type.
|
||||
/// The DenseMapInfo is responsible for supplying methods
|
||||
/// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
|
||||
/// type used.
|
||||
template <class LookupKeyT>
|
||||
value_type *find_as(const LookupKeyT &Key) {
|
||||
return doFind(Key);
|
||||
}
|
||||
template <class LookupKeyT>
|
||||
const value_type *find_as(const LookupKeyT &Key) const {
|
||||
return doFind(Key);
|
||||
}
|
||||
|
||||
/// lookup - Return the entry for the specified key, or a default
|
||||
/// constructed value if no such entry exists.
|
||||
ValueT lookup(const KeyT &Key) const {
|
||||
if (const BucketT *Bucket = doFind(Key))
|
||||
return Bucket->getSecond();
|
||||
return ValueT();
|
||||
}
|
||||
|
||||
// Inserts key,value pair into the map if the key isn't already in the map.
|
||||
// If the key is already in the map, it returns false and doesn't update the
|
||||
// value.
|
||||
detail::DenseMapPair<value_type *, bool> insert(const value_type &KV) {
|
||||
return try_emplace(KV.first, KV.second);
|
||||
}
|
||||
|
||||
// Inserts key,value pair into the map if the key isn't already in the map.
|
||||
// If the key is already in the map, it returns false and doesn't update the
|
||||
// value.
|
||||
detail::DenseMapPair<value_type *, bool> insert(value_type &&KV) {
|
||||
return try_emplace(__sanitizer::move(KV.first),
|
||||
__sanitizer::move(KV.second));
|
||||
}
|
||||
|
||||
// Inserts key,value pair into the map if the key isn't already in the map.
|
||||
// The value is constructed in-place if the key is not in the map, otherwise
|
||||
// it is not moved.
|
||||
template <typename... Ts>
|
||||
detail::DenseMapPair<value_type *, bool> try_emplace(KeyT &&Key,
|
||||
Ts &&...Args) {
|
||||
BucketT *TheBucket;
|
||||
if (LookupBucketFor(Key, TheBucket))
|
||||
return {TheBucket, false}; // Already in map.
|
||||
|
||||
// Otherwise, insert the new element.
|
||||
TheBucket = InsertIntoBucket(TheBucket, __sanitizer::move(Key),
|
||||
__sanitizer::forward<Ts>(Args)...);
|
||||
return {TheBucket, true};
|
||||
}
|
||||
|
||||
// Inserts key,value pair into the map if the key isn't already in the map.
|
||||
// The value is constructed in-place if the key is not in the map, otherwise
|
||||
// it is not moved.
|
||||
template <typename... Ts>
|
||||
detail::DenseMapPair<value_type *, bool> try_emplace(const KeyT &Key,
|
||||
Ts &&...Args) {
|
||||
BucketT *TheBucket;
|
||||
if (LookupBucketFor(Key, TheBucket))
|
||||
return {TheBucket, false}; // Already in map.
|
||||
|
||||
// Otherwise, insert the new element.
|
||||
TheBucket =
|
||||
InsertIntoBucket(TheBucket, Key, __sanitizer::forward<Ts>(Args)...);
|
||||
return {TheBucket, true};
|
||||
}
|
||||
|
||||
/// Alternate version of insert() which allows a different, and possibly
|
||||
/// less expensive, key type.
|
||||
/// The DenseMapInfo is responsible for supplying methods
|
||||
/// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
|
||||
/// type used.
|
||||
template <typename LookupKeyT>
|
||||
detail::DenseMapPair<value_type *, bool> insert_as(value_type &&KV,
|
||||
const LookupKeyT &Val) {
|
||||
BucketT *TheBucket;
|
||||
if (LookupBucketFor(Val, TheBucket))
|
||||
return {TheBucket, false}; // Already in map.
|
||||
|
||||
// Otherwise, insert the new element.
|
||||
TheBucket =
|
||||
InsertIntoBucketWithLookup(TheBucket, __sanitizer::move(KV.first),
|
||||
__sanitizer::move(KV.second), Val);
|
||||
return {TheBucket, true};
|
||||
}
|
||||
|
||||
bool erase(const KeyT &Val) {
|
||||
BucketT *TheBucket = doFind(Val);
|
||||
if (!TheBucket)
|
||||
return false; // not in map.
|
||||
|
||||
TheBucket->getSecond().~ValueT();
|
||||
TheBucket->getFirst() = getTombstoneKey();
|
||||
decrementNumEntries();
|
||||
incrementNumTombstones();
|
||||
return true;
|
||||
}
|
||||
|
||||
void erase(value_type *I) {
|
||||
CHECK_NE(I, nullptr);
|
||||
BucketT *TheBucket = &*I;
|
||||
TheBucket->getSecond().~ValueT();
|
||||
TheBucket->getFirst() = getTombstoneKey();
|
||||
decrementNumEntries();
|
||||
incrementNumTombstones();
|
||||
}
|
||||
|
||||
value_type &FindAndConstruct(const KeyT &Key) {
|
||||
BucketT *TheBucket;
|
||||
if (LookupBucketFor(Key, TheBucket))
|
||||
return *TheBucket;
|
||||
|
||||
return *InsertIntoBucket(TheBucket, Key);
|
||||
}
|
||||
|
||||
ValueT &operator[](const KeyT &Key) { return FindAndConstruct(Key).second; }
|
||||
|
||||
value_type &FindAndConstruct(KeyT &&Key) {
|
||||
BucketT *TheBucket;
|
||||
if (LookupBucketFor(Key, TheBucket))
|
||||
return *TheBucket;
|
||||
|
||||
return *InsertIntoBucket(TheBucket, __sanitizer::move(Key));
|
||||
}
|
||||
|
||||
ValueT &operator[](KeyT &&Key) {
|
||||
return FindAndConstruct(__sanitizer::move(Key)).second;
|
||||
}
|
||||
|
||||
/// Iterate over active entries of the container.
|
||||
///
|
||||
/// Function can return fast to stop the process.
|
||||
template <class Fn>
|
||||
void forEach(Fn fn) {
|
||||
const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
|
||||
for (auto *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
|
||||
const KeyT K = P->getFirst();
|
||||
if (!KeyInfoT::isEqual(K, EmptyKey) &&
|
||||
!KeyInfoT::isEqual(K, TombstoneKey)) {
|
||||
if (!fn(*P))
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class Fn>
|
||||
void forEach(Fn fn) const {
|
||||
const_cast<DenseMapBase *>(this)->forEach(
|
||||
[&](const value_type &KV) { return fn(KV); });
|
||||
}
|
||||
|
||||
protected:
|
||||
DenseMapBase() = default;
|
||||
|
||||
void destroyAll() {
|
||||
if (getNumBuckets() == 0) // Nothing to do.
|
||||
return;
|
||||
|
||||
const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
|
||||
for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
|
||||
if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
|
||||
!KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
|
||||
P->getSecond().~ValueT();
|
||||
P->getFirst().~KeyT();
|
||||
}
|
||||
}
|
||||
|
||||
void initEmpty() {
|
||||
setNumEntries(0);
|
||||
setNumTombstones(0);
|
||||
|
||||
CHECK_EQ((getNumBuckets() & (getNumBuckets() - 1)), 0);
|
||||
const KeyT EmptyKey = getEmptyKey();
|
||||
for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
|
||||
::new (&B->getFirst()) KeyT(EmptyKey);
|
||||
}
|
||||
|
||||
/// Returns the number of buckets to allocate to ensure that the DenseMap can
|
||||
/// accommodate \p NumEntries without need to grow().
|
||||
unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
|
||||
// Ensure that "NumEntries * 4 < NumBuckets * 3"
|
||||
if (NumEntries == 0)
|
||||
return 0;
|
||||
// +1 is required because of the strict equality.
|
||||
// For example if NumEntries is 48, we need to return 401.
|
||||
return RoundUpToPowerOfTwo((NumEntries * 4 / 3 + 1) + /* NextPowerOf2 */ 1);
|
||||
}
|
||||
|
||||
void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
|
||||
initEmpty();
|
||||
|
||||
// Insert all the old elements.
|
||||
const KeyT EmptyKey = getEmptyKey();
|
||||
const KeyT TombstoneKey = getTombstoneKey();
|
||||
for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
|
||||
if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
|
||||
!KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
|
||||
// Insert the key/value into the new table.
|
||||
BucketT *DestBucket;
|
||||
bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
|
||||
(void)FoundVal; // silence warning.
|
||||
CHECK(!FoundVal);
|
||||
DestBucket->getFirst() = __sanitizer::move(B->getFirst());
|
||||
::new (&DestBucket->getSecond())
|
||||
ValueT(__sanitizer::move(B->getSecond()));
|
||||
incrementNumEntries();
|
||||
|
||||
// Free the value.
|
||||
B->getSecond().~ValueT();
|
||||
}
|
||||
B->getFirst().~KeyT();
|
||||
}
|
||||
}
|
||||
|
||||
template <typename OtherBaseT>
|
||||
void copyFrom(
|
||||
const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
|
||||
CHECK_NE(&other, this);
|
||||
CHECK_EQ(getNumBuckets(), other.getNumBuckets());
|
||||
|
||||
setNumEntries(other.getNumEntries());
|
||||
setNumTombstones(other.getNumTombstones());
|
||||
|
||||
if (__sanitizer::is_trivially_copyable<KeyT>::value &&
|
||||
__sanitizer::is_trivially_copyable<ValueT>::value)
|
||||
internal_memcpy(reinterpret_cast<void *>(getBuckets()),
|
||||
other.getBuckets(), getNumBuckets() * sizeof(BucketT));
|
||||
else
|
||||
for (uptr i = 0; i < getNumBuckets(); ++i) {
|
||||
::new (&getBuckets()[i].getFirst())
|
||||
KeyT(other.getBuckets()[i].getFirst());
|
||||
if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
|
||||
!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
|
||||
::new (&getBuckets()[i].getSecond())
|
||||
ValueT(other.getBuckets()[i].getSecond());
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned getHashValue(const KeyT &Val) {
|
||||
return KeyInfoT::getHashValue(Val);
|
||||
}
|
||||
|
||||
template <typename LookupKeyT>
|
||||
static unsigned getHashValue(const LookupKeyT &Val) {
|
||||
return KeyInfoT::getHashValue(Val);
|
||||
}
|
||||
|
||||
static const KeyT getEmptyKey() { return KeyInfoT::getEmptyKey(); }
|
||||
|
||||
static const KeyT getTombstoneKey() { return KeyInfoT::getTombstoneKey(); }
|
||||
|
||||
private:
|
||||
unsigned getNumEntries() const {
|
||||
return static_cast<const DerivedT *>(this)->getNumEntries();
|
||||
}
|
||||
|
||||
void setNumEntries(unsigned Num) {
|
||||
static_cast<DerivedT *>(this)->setNumEntries(Num);
|
||||
}
|
||||
|
||||
void incrementNumEntries() { setNumEntries(getNumEntries() + 1); }
|
||||
|
||||
void decrementNumEntries() { setNumEntries(getNumEntries() - 1); }
|
||||
|
||||
unsigned getNumTombstones() const {
|
||||
return static_cast<const DerivedT *>(this)->getNumTombstones();
|
||||
}
|
||||
|
||||
void setNumTombstones(unsigned Num) {
|
||||
static_cast<DerivedT *>(this)->setNumTombstones(Num);
|
||||
}
|
||||
|
||||
void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); }
|
||||
|
||||
void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); }
|
||||
|
||||
const BucketT *getBuckets() const {
|
||||
return static_cast<const DerivedT *>(this)->getBuckets();
|
||||
}
|
||||
|
||||
BucketT *getBuckets() { return static_cast<DerivedT *>(this)->getBuckets(); }
|
||||
|
||||
unsigned getNumBuckets() const {
|
||||
return static_cast<const DerivedT *>(this)->getNumBuckets();
|
||||
}
|
||||
|
||||
BucketT *getBucketsEnd() { return getBuckets() + getNumBuckets(); }
|
||||
|
||||
const BucketT *getBucketsEnd() const {
|
||||
return getBuckets() + getNumBuckets();
|
||||
}
|
||||
|
||||
void grow(unsigned AtLeast) { static_cast<DerivedT *>(this)->grow(AtLeast); }
|
||||
|
||||
template <typename KeyArg, typename... ValueArgs>
|
||||
BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
|
||||
ValueArgs &&...Values) {
|
||||
TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
|
||||
|
||||
TheBucket->getFirst() = __sanitizer::forward<KeyArg>(Key);
|
||||
::new (&TheBucket->getSecond())
|
||||
ValueT(__sanitizer::forward<ValueArgs>(Values)...);
|
||||
return TheBucket;
|
||||
}
|
||||
|
||||
template <typename LookupKeyT>
|
||||
BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
|
||||
ValueT &&Value, LookupKeyT &Lookup) {
|
||||
TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
|
||||
|
||||
TheBucket->getFirst() = __sanitizer::move(Key);
|
||||
::new (&TheBucket->getSecond()) ValueT(__sanitizer::move(Value));
|
||||
return TheBucket;
|
||||
}
|
||||
|
||||
template <typename LookupKeyT>
|
||||
BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
|
||||
BucketT *TheBucket) {
|
||||
// If the load of the hash table is more than 3/4, or if fewer than 1/8 of
|
||||
// the buckets are empty (meaning that many are filled with tombstones),
|
||||
// grow the table.
|
||||
//
|
||||
// The later case is tricky. For example, if we had one empty bucket with
|
||||
// tons of tombstones, failing lookups (e.g. for insertion) would have to
|
||||
// probe almost the entire table until it found the empty bucket. If the
|
||||
// table completely filled with tombstones, no lookup would ever succeed,
|
||||
// causing infinite loops in lookup.
|
||||
unsigned NewNumEntries = getNumEntries() + 1;
|
||||
unsigned NumBuckets = getNumBuckets();
|
||||
if (UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
|
||||
this->grow(NumBuckets * 2);
|
||||
LookupBucketFor(Lookup, TheBucket);
|
||||
NumBuckets = getNumBuckets();
|
||||
} else if (UNLIKELY(NumBuckets - (NewNumEntries + getNumTombstones()) <=
|
||||
NumBuckets / 8)) {
|
||||
this->grow(NumBuckets);
|
||||
LookupBucketFor(Lookup, TheBucket);
|
||||
}
|
||||
CHECK(TheBucket);
|
||||
|
||||
// Only update the state after we've grown our bucket space appropriately
|
||||
// so that when growing buckets we have self-consistent entry count.
|
||||
incrementNumEntries();
|
||||
|
||||
// If we are writing over a tombstone, remember this.
|
||||
const KeyT EmptyKey = getEmptyKey();
|
||||
if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
|
||||
decrementNumTombstones();
|
||||
|
||||
return TheBucket;
|
||||
}
|
||||
|
||||
template <typename LookupKeyT>
|
||||
BucketT *doFind(const LookupKeyT &Val) {
|
||||
BucketT *BucketsPtr = getBuckets();
|
||||
const unsigned NumBuckets = getNumBuckets();
|
||||
if (NumBuckets == 0)
|
||||
return nullptr;
|
||||
|
||||
const KeyT EmptyKey = getEmptyKey();
|
||||
unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
|
||||
unsigned ProbeAmt = 1;
|
||||
while (true) {
|
||||
BucketT *Bucket = BucketsPtr + BucketNo;
|
||||
if (LIKELY(KeyInfoT::isEqual(Val, Bucket->getFirst())))
|
||||
return Bucket;
|
||||
if (LIKELY(KeyInfoT::isEqual(Bucket->getFirst(), EmptyKey)))
|
||||
return nullptr;
|
||||
|
||||
// Otherwise, it's a hash collision or a tombstone, continue quadratic
|
||||
// probing.
|
||||
BucketNo += ProbeAmt++;
|
||||
BucketNo &= NumBuckets - 1;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename LookupKeyT>
|
||||
const BucketT *doFind(const LookupKeyT &Val) const {
|
||||
return const_cast<DenseMapBase *>(this)->doFind(Val);
|
||||
}
|
||||
|
||||
/// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
|
||||
/// FoundBucket. If the bucket contains the key and a value, this returns
|
||||
/// true, otherwise it returns a bucket with an empty marker or tombstone and
|
||||
/// returns false.
|
||||
template <typename LookupKeyT>
|
||||
bool LookupBucketFor(const LookupKeyT &Val,
|
||||
const BucketT *&FoundBucket) const {
|
||||
const BucketT *BucketsPtr = getBuckets();
|
||||
const unsigned NumBuckets = getNumBuckets();
|
||||
|
||||
if (NumBuckets == 0) {
|
||||
FoundBucket = nullptr;
|
||||
return false;
|
||||
}
|
||||
|
||||
// FoundTombstone - Keep track of whether we find a tombstone while probing.
|
||||
const BucketT *FoundTombstone = nullptr;
|
||||
const KeyT EmptyKey = getEmptyKey();
|
||||
const KeyT TombstoneKey = getTombstoneKey();
|
||||
CHECK(!KeyInfoT::isEqual(Val, EmptyKey));
|
||||
CHECK(!KeyInfoT::isEqual(Val, TombstoneKey));
|
||||
|
||||
unsigned BucketNo = getHashValue(Val) & (NumBuckets - 1);
|
||||
unsigned ProbeAmt = 1;
|
||||
while (true) {
|
||||
const BucketT *ThisBucket = BucketsPtr + BucketNo;
|
||||
// Found Val's bucket? If so, return it.
|
||||
if (LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
|
||||
FoundBucket = ThisBucket;
|
||||
return true;
|
||||
}
|
||||
|
||||
// If we found an empty bucket, the key doesn't exist in the set.
|
||||
// Insert it and return the default value.
|
||||
if (LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
|
||||
// If we've already seen a tombstone while probing, fill it in instead
|
||||
// of the empty bucket we eventually probed to.
|
||||
FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
|
||||
return false;
|
||||
}
|
||||
|
||||
// If this is a tombstone, remember it. If Val ends up not in the map, we
|
||||
// prefer to return it than something that would require more probing.
|
||||
if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
|
||||
!FoundTombstone)
|
||||
FoundTombstone = ThisBucket; // Remember the first tombstone found.
|
||||
|
||||
// Otherwise, it's a hash collision or a tombstone, continue quadratic
|
||||
// probing.
|
||||
BucketNo += ProbeAmt++;
|
||||
BucketNo &= (NumBuckets - 1);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename LookupKeyT>
|
||||
bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
|
||||
const BucketT *ConstFoundBucket;
|
||||
bool Result = const_cast<const DenseMapBase *>(this)->LookupBucketFor(
|
||||
Val, ConstFoundBucket);
|
||||
FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
|
||||
return Result;
|
||||
}
|
||||
|
||||
public:
|
||||
/// Return the approximate size (in bytes) of the actual map.
|
||||
/// This is just the raw memory used by DenseMap.
|
||||
/// If entries are pointers to objects, the size of the referenced objects
|
||||
/// are not included.
|
||||
uptr getMemorySize() const {
|
||||
return RoundUpTo(getNumBuckets() * sizeof(BucketT), GetPageSizeCached());
|
||||
}
|
||||
};
|
||||
|
||||
/// Equality comparison for DenseMap.
|
||||
///
|
||||
/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
|
||||
/// is also in RHS, and that no additional pairs are in RHS.
|
||||
/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
|
||||
/// complexity is linear, worst case is O(N^2) (if every hash collides).
|
||||
template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
|
||||
typename BucketT>
|
||||
bool operator==(
|
||||
const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
|
||||
const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
|
||||
if (LHS.size() != RHS.size())
|
||||
return false;
|
||||
|
||||
bool R = true;
|
||||
LHS.forEach(
|
||||
[&](const typename DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT,
|
||||
BucketT>::value_type &KV) -> bool {
|
||||
const auto *I = RHS.find(KV.first);
|
||||
if (!I || I->second != KV.second) {
|
||||
R = false;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
|
||||
return R;
|
||||
}
|
||||
|
||||
/// Inequality comparison for DenseMap.
|
||||
///
|
||||
/// Equivalent to !(LHS == RHS). See operator== for performance notes.
|
||||
template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
|
||||
typename BucketT>
|
||||
bool operator!=(
|
||||
const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
|
||||
const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
|
||||
return !(LHS == RHS);
|
||||
}
|
||||
|
||||
template <typename KeyT, typename ValueT,
|
||||
typename KeyInfoT = DenseMapInfo<KeyT>,
|
||||
typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
|
||||
class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
|
||||
KeyT, ValueT, KeyInfoT, BucketT> {
|
||||
friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
|
||||
|
||||
// Lift some types from the dependent base class into this class for
|
||||
// simplicity of referring to them.
|
||||
using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
|
||||
|
||||
BucketT *Buckets = nullptr;
|
||||
unsigned NumEntries = 0;
|
||||
unsigned NumTombstones = 0;
|
||||
unsigned NumBuckets = 0;
|
||||
|
||||
public:
|
||||
/// Create a DenseMap with an optional \p InitialReserve that guarantee that
|
||||
/// this number of elements can be inserted in the map without grow()
|
||||
explicit DenseMap(unsigned InitialReserve) { init(InitialReserve); }
|
||||
constexpr DenseMap() = default;
|
||||
|
||||
DenseMap(const DenseMap &other) : BaseT() {
|
||||
init(0);
|
||||
copyFrom(other);
|
||||
}
|
||||
|
||||
DenseMap(DenseMap &&other) : BaseT() {
|
||||
init(0);
|
||||
swap(other);
|
||||
}
|
||||
|
||||
~DenseMap() {
|
||||
this->destroyAll();
|
||||
deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets);
|
||||
}
|
||||
|
||||
void swap(DenseMap &RHS) {
|
||||
Swap(Buckets, RHS.Buckets);
|
||||
Swap(NumEntries, RHS.NumEntries);
|
||||
Swap(NumTombstones, RHS.NumTombstones);
|
||||
Swap(NumBuckets, RHS.NumBuckets);
|
||||
}
|
||||
|
||||
DenseMap &operator=(const DenseMap &other) {
|
||||
if (&other != this)
|
||||
copyFrom(other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
DenseMap &operator=(DenseMap &&other) {
|
||||
this->destroyAll();
|
||||
deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
|
||||
init(0);
|
||||
swap(other);
|
||||
return *this;
|
||||
}
|
||||
|
||||
void copyFrom(const DenseMap &other) {
|
||||
this->destroyAll();
|
||||
deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets);
|
||||
if (allocateBuckets(other.NumBuckets)) {
|
||||
this->BaseT::copyFrom(other);
|
||||
} else {
|
||||
NumEntries = 0;
|
||||
NumTombstones = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void init(unsigned InitNumEntries) {
|
||||
auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
|
||||
if (allocateBuckets(InitBuckets)) {
|
||||
this->BaseT::initEmpty();
|
||||
} else {
|
||||
NumEntries = 0;
|
||||
NumTombstones = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void grow(unsigned AtLeast) {
|
||||
unsigned OldNumBuckets = NumBuckets;
|
||||
BucketT *OldBuckets = Buckets;
|
||||
|
||||
allocateBuckets(RoundUpToPowerOfTwo(Max<unsigned>(64, AtLeast)));
|
||||
CHECK(Buckets);
|
||||
if (!OldBuckets) {
|
||||
this->BaseT::initEmpty();
|
||||
return;
|
||||
}
|
||||
|
||||
this->moveFromOldBuckets(OldBuckets, OldBuckets + OldNumBuckets);
|
||||
|
||||
// Free the old table.
|
||||
deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets);
|
||||
}
|
||||
|
||||
private:
|
||||
unsigned getNumEntries() const { return NumEntries; }
|
||||
|
||||
void setNumEntries(unsigned Num) { NumEntries = Num; }
|
||||
|
||||
unsigned getNumTombstones() const { return NumTombstones; }
|
||||
|
||||
void setNumTombstones(unsigned Num) { NumTombstones = Num; }
|
||||
|
||||
BucketT *getBuckets() const { return Buckets; }
|
||||
|
||||
unsigned getNumBuckets() const { return NumBuckets; }
|
||||
|
||||
bool allocateBuckets(unsigned Num) {
|
||||
NumBuckets = Num;
|
||||
if (NumBuckets == 0) {
|
||||
Buckets = nullptr;
|
||||
return false;
|
||||
}
|
||||
|
||||
uptr Size = sizeof(BucketT) * NumBuckets;
|
||||
if (Size * 2 <= GetPageSizeCached()) {
|
||||
// We always allocate at least a page, so use entire space.
|
||||
unsigned Log2 = MostSignificantSetBitIndex(GetPageSizeCached() / Size);
|
||||
Size <<= Log2;
|
||||
NumBuckets <<= Log2;
|
||||
CHECK_EQ(Size, sizeof(BucketT) * NumBuckets);
|
||||
CHECK_GT(Size * 2, GetPageSizeCached());
|
||||
}
|
||||
Buckets = static_cast<BucketT *>(allocate_buffer(Size));
|
||||
return true;
|
||||
}
|
||||
|
||||
static void *allocate_buffer(uptr Size) {
|
||||
return MmapOrDie(RoundUpTo(Size, GetPageSizeCached()), "DenseMap");
|
||||
}
|
||||
|
||||
static void deallocate_buffer(void *Ptr, uptr Size) {
|
||||
UnmapOrDie(Ptr, RoundUpTo(Size, GetPageSizeCached()));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_DENSE_MAP_H
|
||||
282
lib/libtsan/sanitizer_common/sanitizer_dense_map_info.h
Normal file
282
lib/libtsan/sanitizer_common/sanitizer_dense_map_info.h
Normal file
@@ -0,0 +1,282 @@
|
||||
//===- sanitizer_dense_map_info.h - Type traits for DenseMap ----*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_DENSE_MAP_INFO_H
|
||||
#define SANITIZER_DENSE_MAP_INFO_H
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_type_traits.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
namespace detail {
|
||||
|
||||
/// Simplistic combination of 32-bit hash values into 32-bit hash values.
|
||||
static constexpr unsigned combineHashValue(unsigned a, unsigned b) {
|
||||
u64 key = (u64)a << 32 | (u64)b;
|
||||
key += ~(key << 32);
|
||||
key ^= (key >> 22);
|
||||
key += ~(key << 13);
|
||||
key ^= (key >> 8);
|
||||
key += (key << 3);
|
||||
key ^= (key >> 15);
|
||||
key += ~(key << 27);
|
||||
key ^= (key >> 31);
|
||||
return (unsigned)key;
|
||||
}
|
||||
|
||||
// We extend a pair to allow users to override the bucket type with their own
|
||||
// implementation without requiring two members.
|
||||
template <typename KeyT, typename ValueT>
|
||||
struct DenseMapPair {
|
||||
KeyT first = {};
|
||||
ValueT second = {};
|
||||
constexpr DenseMapPair() = default;
|
||||
constexpr DenseMapPair(const KeyT &f, const ValueT &s)
|
||||
: first(f), second(s) {}
|
||||
|
||||
template <typename KeyT2, typename ValueT2>
|
||||
constexpr DenseMapPair(KeyT2 &&f, ValueT2 &&s)
|
||||
: first(__sanitizer::forward<KeyT2>(f)),
|
||||
second(__sanitizer::forward<ValueT2>(s)) {}
|
||||
|
||||
constexpr DenseMapPair(const DenseMapPair &other) = default;
|
||||
constexpr DenseMapPair &operator=(const DenseMapPair &other) = default;
|
||||
constexpr DenseMapPair(DenseMapPair &&other) = default;
|
||||
constexpr DenseMapPair &operator=(DenseMapPair &&other) = default;
|
||||
|
||||
KeyT &getFirst() { return first; }
|
||||
const KeyT &getFirst() const { return first; }
|
||||
ValueT &getSecond() { return second; }
|
||||
const ValueT &getSecond() const { return second; }
|
||||
};
|
||||
|
||||
} // end namespace detail
|
||||
|
||||
template <typename T>
|
||||
struct DenseMapInfo {
|
||||
// static T getEmptyKey();
|
||||
// static T getTombstoneKey();
|
||||
// static unsigned getHashValue(const T &Val);
|
||||
// static bool isEqual(const T &LHS, const T &RHS);
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for all pointers. Come up with sentinel pointer values
|
||||
// that are aligned to alignof(T) bytes, but try to avoid requiring T to be
|
||||
// complete. This allows clients to instantiate DenseMap<T*, ...> with forward
|
||||
// declared key types. Assume that no pointer key type requires more than 4096
|
||||
// bytes of alignment.
|
||||
template <typename T>
|
||||
struct DenseMapInfo<T *> {
|
||||
// The following should hold, but it would require T to be complete:
|
||||
// static_assert(alignof(T) <= (1 << Log2MaxAlign),
|
||||
// "DenseMap does not support pointer keys requiring more than "
|
||||
// "Log2MaxAlign bits of alignment");
|
||||
static constexpr uptr Log2MaxAlign = 12;
|
||||
|
||||
static constexpr T *getEmptyKey() {
|
||||
uptr Val = static_cast<uptr>(-1);
|
||||
Val <<= Log2MaxAlign;
|
||||
return reinterpret_cast<T *>(Val);
|
||||
}
|
||||
|
||||
static constexpr T *getTombstoneKey() {
|
||||
uptr Val = static_cast<uptr>(-2);
|
||||
Val <<= Log2MaxAlign;
|
||||
return reinterpret_cast<T *>(Val);
|
||||
}
|
||||
|
||||
static constexpr unsigned getHashValue(const T *PtrVal) {
|
||||
return (unsigned((uptr)PtrVal) >> 4) ^ (unsigned((uptr)PtrVal) >> 9);
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const T *LHS, const T *RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for chars.
|
||||
template <>
|
||||
struct DenseMapInfo<char> {
|
||||
static constexpr char getEmptyKey() { return ~0; }
|
||||
static constexpr char getTombstoneKey() { return ~0 - 1; }
|
||||
static constexpr unsigned getHashValue(const char &Val) { return Val * 37U; }
|
||||
|
||||
static constexpr bool isEqual(const char &LHS, const char &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for unsigned chars.
|
||||
template <>
|
||||
struct DenseMapInfo<unsigned char> {
|
||||
static constexpr unsigned char getEmptyKey() { return ~0; }
|
||||
static constexpr unsigned char getTombstoneKey() { return ~0 - 1; }
|
||||
static constexpr unsigned getHashValue(const unsigned char &Val) {
|
||||
return Val * 37U;
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const unsigned char &LHS,
|
||||
const unsigned char &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for unsigned shorts.
|
||||
template <>
|
||||
struct DenseMapInfo<unsigned short> {
|
||||
static constexpr unsigned short getEmptyKey() { return 0xFFFF; }
|
||||
static constexpr unsigned short getTombstoneKey() { return 0xFFFF - 1; }
|
||||
static constexpr unsigned getHashValue(const unsigned short &Val) {
|
||||
return Val * 37U;
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const unsigned short &LHS,
|
||||
const unsigned short &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for unsigned ints.
|
||||
template <>
|
||||
struct DenseMapInfo<unsigned> {
|
||||
static constexpr unsigned getEmptyKey() { return ~0U; }
|
||||
static constexpr unsigned getTombstoneKey() { return ~0U - 1; }
|
||||
static constexpr unsigned getHashValue(const unsigned &Val) {
|
||||
return Val * 37U;
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const unsigned &LHS, const unsigned &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for unsigned longs.
|
||||
template <>
|
||||
struct DenseMapInfo<unsigned long> {
|
||||
static constexpr unsigned long getEmptyKey() { return ~0UL; }
|
||||
static constexpr unsigned long getTombstoneKey() { return ~0UL - 1L; }
|
||||
|
||||
static constexpr unsigned getHashValue(const unsigned long &Val) {
|
||||
return (unsigned)(Val * 37UL);
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const unsigned long &LHS,
|
||||
const unsigned long &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for unsigned long longs.
|
||||
template <>
|
||||
struct DenseMapInfo<unsigned long long> {
|
||||
static constexpr unsigned long long getEmptyKey() { return ~0ULL; }
|
||||
static constexpr unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; }
|
||||
|
||||
static constexpr unsigned getHashValue(const unsigned long long &Val) {
|
||||
return (unsigned)(Val * 37ULL);
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const unsigned long long &LHS,
|
||||
const unsigned long long &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for shorts.
|
||||
template <>
|
||||
struct DenseMapInfo<short> {
|
||||
static constexpr short getEmptyKey() { return 0x7FFF; }
|
||||
static constexpr short getTombstoneKey() { return -0x7FFF - 1; }
|
||||
static constexpr unsigned getHashValue(const short &Val) { return Val * 37U; }
|
||||
static constexpr bool isEqual(const short &LHS, const short &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for ints.
|
||||
template <>
|
||||
struct DenseMapInfo<int> {
|
||||
static constexpr int getEmptyKey() { return 0x7fffffff; }
|
||||
static constexpr int getTombstoneKey() { return -0x7fffffff - 1; }
|
||||
static constexpr unsigned getHashValue(const int &Val) {
|
||||
return (unsigned)(Val * 37U);
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const int &LHS, const int &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for longs.
|
||||
template <>
|
||||
struct DenseMapInfo<long> {
|
||||
static constexpr long getEmptyKey() {
|
||||
return (1UL << (sizeof(long) * 8 - 1)) - 1UL;
|
||||
}
|
||||
|
||||
static constexpr long getTombstoneKey() { return getEmptyKey() - 1L; }
|
||||
|
||||
static constexpr unsigned getHashValue(const long &Val) {
|
||||
return (unsigned)(Val * 37UL);
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const long &LHS, const long &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for long longs.
|
||||
template <>
|
||||
struct DenseMapInfo<long long> {
|
||||
static constexpr long long getEmptyKey() { return 0x7fffffffffffffffLL; }
|
||||
static constexpr long long getTombstoneKey() {
|
||||
return -0x7fffffffffffffffLL - 1;
|
||||
}
|
||||
|
||||
static constexpr unsigned getHashValue(const long long &Val) {
|
||||
return (unsigned)(Val * 37ULL);
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const long long &LHS, const long long &RHS) {
|
||||
return LHS == RHS;
|
||||
}
|
||||
};
|
||||
|
||||
// Provide DenseMapInfo for all pairs whose members have info.
|
||||
template <typename T, typename U>
|
||||
struct DenseMapInfo<detail::DenseMapPair<T, U>> {
|
||||
using Pair = detail::DenseMapPair<T, U>;
|
||||
using FirstInfo = DenseMapInfo<T>;
|
||||
using SecondInfo = DenseMapInfo<U>;
|
||||
|
||||
static constexpr Pair getEmptyKey() {
|
||||
return detail::DenseMapPair<T, U>(FirstInfo::getEmptyKey(),
|
||||
SecondInfo::getEmptyKey());
|
||||
}
|
||||
|
||||
static constexpr Pair getTombstoneKey() {
|
||||
return detail::DenseMapPair<T, U>(FirstInfo::getTombstoneKey(),
|
||||
SecondInfo::getTombstoneKey());
|
||||
}
|
||||
|
||||
static constexpr unsigned getHashValue(const Pair &PairVal) {
|
||||
return detail::combineHashValue(FirstInfo::getHashValue(PairVal.first),
|
||||
SecondInfo::getHashValue(PairVal.second));
|
||||
}
|
||||
|
||||
static constexpr bool isEqual(const Pair &LHS, const Pair &RHS) {
|
||||
return FirstInfo::isEqual(LHS.first, RHS.first) &&
|
||||
SecondInfo::isEqual(LHS.second, RHS.second);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_DENSE_MAP_INFO_H
|
||||
37
lib/libtsan/sanitizer_common/sanitizer_dl.cpp
Normal file
37
lib/libtsan/sanitizer_common/sanitizer_dl.cpp
Normal file
@@ -0,0 +1,37 @@
|
||||
//===-- sanitizer_dl.cpp --------------------------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file has helper functions that depend on libc's dynamic loading
|
||||
// introspection.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_dl.h"
|
||||
|
||||
#include "sanitizer_common/sanitizer_platform.h"
|
||||
|
||||
#if SANITIZER_GLIBC
|
||||
# include <dlfcn.h>
|
||||
#endif
|
||||
|
||||
namespace __sanitizer {
|
||||
extern const char *SanitizerToolName;
|
||||
|
||||
const char *DladdrSelfFName(void) {
|
||||
#if SANITIZER_GLIBC
|
||||
Dl_info info;
|
||||
int ret = dladdr((void *)&SanitizerToolName, &info);
|
||||
if (ret) {
|
||||
return info.dli_fname;
|
||||
}
|
||||
#endif
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
26
lib/libtsan/sanitizer_common/sanitizer_dl.h
Normal file
26
lib/libtsan/sanitizer_common/sanitizer_dl.h
Normal file
@@ -0,0 +1,26 @@
|
||||
//===-- sanitizer_dl.h ----------------------------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file has helper functions that depend on libc's dynamic loading
|
||||
// introspection.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_DL_H
|
||||
#define SANITIZER_DL_H
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// Returns the path to the shared object or - in the case of statically linked
|
||||
// sanitizers
|
||||
// - the main program itself, that contains the sanitizer.
|
||||
const char* DladdrSelfFName(void);
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_DL_H
|
||||
35
lib/libtsan/sanitizer_common/sanitizer_errno.cpp
Normal file
35
lib/libtsan/sanitizer_common/sanitizer_errno.cpp
Normal file
@@ -0,0 +1,35 @@
|
||||
//===-- sanitizer_errno.cpp -------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between sanitizers run-time libraries.
|
||||
//
|
||||
// Defines errno to avoid including errno.h and its dependencies into other
|
||||
// files (e.g. interceptors are not supposed to include any system headers).
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_errno_codes.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
|
||||
#include <errno.h>
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
COMPILER_CHECK(errno_ENOMEM == ENOMEM);
|
||||
COMPILER_CHECK(errno_EBUSY == EBUSY);
|
||||
COMPILER_CHECK(errno_EINVAL == EINVAL);
|
||||
COMPILER_CHECK(errno_ERANGE == ERANGE);
|
||||
|
||||
// EOWNERDEAD is not present in some older platforms.
|
||||
#if defined(EOWNERDEAD)
|
||||
extern const int errno_EOWNERDEAD = EOWNERDEAD;
|
||||
#else
|
||||
extern const int errno_EOWNERDEAD = -1;
|
||||
#endif
|
||||
|
||||
} // namespace __sanitizer
|
||||
38
lib/libtsan/sanitizer_common/sanitizer_errno.h
Normal file
38
lib/libtsan/sanitizer_common/sanitizer_errno.h
Normal file
@@ -0,0 +1,38 @@
|
||||
//===-- sanitizer_errno.h ---------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between sanitizers run-time libraries.
|
||||
//
|
||||
// Defines errno to avoid including errno.h and its dependencies into sensitive
|
||||
// files (e.g. interceptors are not supposed to include any system headers).
|
||||
// It's ok to use errno.h directly when your file already depend on other system
|
||||
// includes though.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ERRNO_H
|
||||
#define SANITIZER_ERRNO_H
|
||||
|
||||
#include "sanitizer_errno_codes.h"
|
||||
#include "sanitizer_platform.h"
|
||||
|
||||
#if SANITIZER_FREEBSD || SANITIZER_APPLE
|
||||
# define __errno_location __error
|
||||
#elif SANITIZER_ANDROID || SANITIZER_NETBSD
|
||||
# define __errno_location __errno
|
||||
#elif SANITIZER_SOLARIS
|
||||
# define __errno_location ___errno
|
||||
#elif SANITIZER_WINDOWS
|
||||
# define __errno_location _errno
|
||||
#endif
|
||||
|
||||
extern "C" int *__errno_location();
|
||||
|
||||
#define errno (*__errno_location())
|
||||
|
||||
#endif // SANITIZER_ERRNO_H
|
||||
36
lib/libtsan/sanitizer_common/sanitizer_errno_codes.h
Normal file
36
lib/libtsan/sanitizer_common/sanitizer_errno_codes.h
Normal file
@@ -0,0 +1,36 @@
|
||||
//===-- sanitizer_errno_codes.h ---------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between sanitizers run-time libraries.
|
||||
//
|
||||
// Defines errno codes to avoid including errno.h and its dependencies into
|
||||
// sensitive files (e.g. interceptors are not supposed to include any system
|
||||
// headers).
|
||||
// It's ok to use errno.h directly when your file already depend on other system
|
||||
// includes though.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ERRNO_CODES_H
|
||||
#define SANITIZER_ERRNO_CODES_H
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
#define errno_ENOMEM 12
|
||||
#define errno_EBUSY 16
|
||||
#define errno_EINVAL 22
|
||||
#define errno_ERANGE 34
|
||||
#define errno_ENAMETOOLONG 36
|
||||
#define errno_ENOSYS 38
|
||||
|
||||
// Those might not present or their value differ on different platforms.
|
||||
extern const int errno_EOWNERDEAD;
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ERRNO_CODES_H
|
||||
253
lib/libtsan/sanitizer_common/sanitizer_file.cpp
Normal file
253
lib/libtsan/sanitizer_common/sanitizer_file.cpp
Normal file
@@ -0,0 +1,253 @@
|
||||
//===-- sanitizer_file.cpp -----------------------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===---------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between AddressSanitizer and ThreadSanitizer
|
||||
// run-time libraries. It defines filesystem-related interfaces. This
|
||||
// is separate from sanitizer_common.cpp so that it's simpler to disable
|
||||
// all the filesystem support code for a port that doesn't use it.
|
||||
//
|
||||
//===---------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_platform.h"
|
||||
|
||||
#if !SANITIZER_FUCHSIA
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_file.h"
|
||||
# include "sanitizer_interface_internal.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
void CatastrophicErrorWrite(const char *buffer, uptr length) {
|
||||
WriteToFile(kStderrFd, buffer, length);
|
||||
}
|
||||
|
||||
StaticSpinMutex report_file_mu;
|
||||
ReportFile report_file = {&report_file_mu, kStderrFd, "", "", 0};
|
||||
|
||||
void RawWrite(const char *buffer) {
|
||||
report_file.Write(buffer, internal_strlen(buffer));
|
||||
}
|
||||
|
||||
void ReportFile::ReopenIfNecessary() {
|
||||
mu->CheckLocked();
|
||||
if (fd == kStdoutFd || fd == kStderrFd) return;
|
||||
|
||||
uptr pid = internal_getpid();
|
||||
// If in tracer, use the parent's file.
|
||||
if (pid == stoptheworld_tracer_pid)
|
||||
pid = stoptheworld_tracer_ppid;
|
||||
if (fd != kInvalidFd) {
|
||||
// If the report file is already opened by the current process,
|
||||
// do nothing. Otherwise the report file was opened by the parent
|
||||
// process, close it now.
|
||||
if (fd_pid == pid)
|
||||
return;
|
||||
else
|
||||
CloseFile(fd);
|
||||
}
|
||||
|
||||
const char *exe_name = GetProcessName();
|
||||
if (common_flags()->log_exe_name && exe_name) {
|
||||
internal_snprintf(full_path, kMaxPathLength, "%s.%s.%zu", path_prefix,
|
||||
exe_name, pid);
|
||||
} else {
|
||||
internal_snprintf(full_path, kMaxPathLength, "%s.%zu", path_prefix, pid);
|
||||
}
|
||||
if (common_flags()->log_suffix) {
|
||||
internal_strlcat(full_path, common_flags()->log_suffix, kMaxPathLength);
|
||||
}
|
||||
error_t err;
|
||||
fd = OpenFile(full_path, WrOnly, &err);
|
||||
if (fd == kInvalidFd) {
|
||||
const char *ErrorMsgPrefix = "ERROR: Can't open file: ";
|
||||
WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
|
||||
WriteToFile(kStderrFd, full_path, internal_strlen(full_path));
|
||||
char errmsg[100];
|
||||
internal_snprintf(errmsg, sizeof(errmsg), " (reason: %d)\n", err);
|
||||
WriteToFile(kStderrFd, errmsg, internal_strlen(errmsg));
|
||||
Die();
|
||||
}
|
||||
fd_pid = pid;
|
||||
}
|
||||
|
||||
static void RecursiveCreateParentDirs(char *path) {
|
||||
if (path[0] == '\0')
|
||||
return;
|
||||
for (int i = 1; path[i] != '\0'; ++i) {
|
||||
char save = path[i];
|
||||
if (!IsPathSeparator(path[i]))
|
||||
continue;
|
||||
path[i] = '\0';
|
||||
if (!DirExists(path) && !CreateDir(path)) {
|
||||
const char *ErrorMsgPrefix = "ERROR: Can't create directory: ";
|
||||
WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
|
||||
WriteToFile(kStderrFd, path, internal_strlen(path));
|
||||
const char *ErrorMsgSuffix = "\n";
|
||||
WriteToFile(kStderrFd, ErrorMsgSuffix, internal_strlen(ErrorMsgSuffix));
|
||||
Die();
|
||||
}
|
||||
path[i] = save;
|
||||
}
|
||||
}
|
||||
|
||||
void ReportFile::SetReportPath(const char *path) {
|
||||
if (path) {
|
||||
uptr len = internal_strlen(path);
|
||||
if (len > sizeof(path_prefix) - 100) {
|
||||
Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n", path[0], path[1],
|
||||
path[2], path[3], path[4], path[5], path[6], path[7]);
|
||||
Die();
|
||||
}
|
||||
}
|
||||
|
||||
SpinMutexLock l(mu);
|
||||
if (fd != kStdoutFd && fd != kStderrFd && fd != kInvalidFd)
|
||||
CloseFile(fd);
|
||||
fd = kInvalidFd;
|
||||
if (!path || internal_strcmp(path, "stderr") == 0) {
|
||||
fd = kStderrFd;
|
||||
} else if (internal_strcmp(path, "stdout") == 0) {
|
||||
fd = kStdoutFd;
|
||||
} else {
|
||||
internal_snprintf(path_prefix, kMaxPathLength, "%s", path);
|
||||
RecursiveCreateParentDirs(path_prefix);
|
||||
}
|
||||
}
|
||||
|
||||
const char *ReportFile::GetReportPath() {
|
||||
SpinMutexLock l(mu);
|
||||
ReopenIfNecessary();
|
||||
return full_path;
|
||||
}
|
||||
|
||||
bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
|
||||
uptr *read_len, uptr max_len, error_t *errno_p) {
|
||||
*buff = nullptr;
|
||||
*buff_size = 0;
|
||||
*read_len = 0;
|
||||
if (!max_len)
|
||||
return true;
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
uptr kMinFileLen = Min(PageSize, max_len);
|
||||
|
||||
// The files we usually open are not seekable, so try different buffer sizes.
|
||||
for (uptr size = kMinFileLen;; size = Min(size * 2, max_len)) {
|
||||
UnmapOrDie(*buff, *buff_size);
|
||||
*buff = (char*)MmapOrDie(size, __func__);
|
||||
*buff_size = size;
|
||||
fd_t fd = OpenFile(file_name, RdOnly, errno_p);
|
||||
if (fd == kInvalidFd) {
|
||||
UnmapOrDie(*buff, *buff_size);
|
||||
return false;
|
||||
}
|
||||
*read_len = 0;
|
||||
// Read up to one page at a time.
|
||||
bool reached_eof = false;
|
||||
while (*read_len < size) {
|
||||
uptr just_read;
|
||||
if (!ReadFromFile(fd, *buff + *read_len, size - *read_len, &just_read,
|
||||
errno_p)) {
|
||||
UnmapOrDie(*buff, *buff_size);
|
||||
CloseFile(fd);
|
||||
return false;
|
||||
}
|
||||
*read_len += just_read;
|
||||
if (just_read == 0 || *read_len == max_len) {
|
||||
reached_eof = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
CloseFile(fd);
|
||||
if (reached_eof) // We've read the whole file.
|
||||
break;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ReadFileToVector(const char *file_name,
|
||||
InternalMmapVectorNoCtor<char> *buff, uptr max_len,
|
||||
error_t *errno_p) {
|
||||
buff->clear();
|
||||
if (!max_len)
|
||||
return true;
|
||||
uptr PageSize = GetPageSizeCached();
|
||||
fd_t fd = OpenFile(file_name, RdOnly, errno_p);
|
||||
if (fd == kInvalidFd)
|
||||
return false;
|
||||
uptr read_len = 0;
|
||||
while (read_len < max_len) {
|
||||
if (read_len >= buff->size())
|
||||
buff->resize(Min(Max(PageSize, read_len * 2), max_len));
|
||||
CHECK_LT(read_len, buff->size());
|
||||
CHECK_LE(buff->size(), max_len);
|
||||
uptr just_read;
|
||||
if (!ReadFromFile(fd, buff->data() + read_len, buff->size() - read_len,
|
||||
&just_read, errno_p)) {
|
||||
CloseFile(fd);
|
||||
return false;
|
||||
}
|
||||
read_len += just_read;
|
||||
if (!just_read)
|
||||
break;
|
||||
}
|
||||
CloseFile(fd);
|
||||
buff->resize(read_len);
|
||||
return true;
|
||||
}
|
||||
|
||||
static const char kPathSeparator = SANITIZER_WINDOWS ? ';' : ':';
|
||||
|
||||
char *FindPathToBinary(const char *name) {
|
||||
if (FileExists(name)) {
|
||||
return internal_strdup(name);
|
||||
}
|
||||
|
||||
const char *path = GetEnv("PATH");
|
||||
if (!path)
|
||||
return nullptr;
|
||||
uptr name_len = internal_strlen(name);
|
||||
InternalMmapVector<char> buffer(kMaxPathLength);
|
||||
const char *beg = path;
|
||||
while (true) {
|
||||
const char *end = internal_strchrnul(beg, kPathSeparator);
|
||||
uptr prefix_len = end - beg;
|
||||
if (prefix_len + name_len + 2 <= kMaxPathLength) {
|
||||
internal_memcpy(buffer.data(), beg, prefix_len);
|
||||
buffer[prefix_len] = '/';
|
||||
internal_memcpy(&buffer[prefix_len + 1], name, name_len);
|
||||
buffer[prefix_len + 1 + name_len] = '\0';
|
||||
if (FileExists(buffer.data()))
|
||||
return internal_strdup(buffer.data());
|
||||
}
|
||||
if (*end == '\0') break;
|
||||
beg = end + 1;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
using namespace __sanitizer;
|
||||
|
||||
extern "C" {
|
||||
void __sanitizer_set_report_path(const char *path) {
|
||||
report_file.SetReportPath(path);
|
||||
}
|
||||
|
||||
void __sanitizer_set_report_fd(void *fd) {
|
||||
report_file.fd = (fd_t)reinterpret_cast<uptr>(fd);
|
||||
report_file.fd_pid = internal_getpid();
|
||||
}
|
||||
|
||||
const char *__sanitizer_get_report_path() {
|
||||
return report_file.GetReportPath();
|
||||
}
|
||||
} // extern "C"
|
||||
|
||||
#endif // !SANITIZER_FUCHSIA
|
||||
110
lib/libtsan/sanitizer_common/sanitizer_file.h
Normal file
110
lib/libtsan/sanitizer_common/sanitizer_file.h
Normal file
@@ -0,0 +1,110 @@
|
||||
//===-- sanitizer_file.h ---------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===---------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between run-time libraries of sanitizers.
|
||||
// It declares filesystem-related interfaces. This is separate from
|
||||
// sanitizer_common.h so that it's simpler to disable all the filesystem
|
||||
// support code for a port that doesn't use it.
|
||||
//
|
||||
//===---------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_FILE_H
|
||||
#define SANITIZER_FILE_H
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_mutex.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
struct ReportFile {
|
||||
void Write(const char *buffer, uptr length);
|
||||
bool SupportsColors();
|
||||
void SetReportPath(const char *path);
|
||||
const char *GetReportPath();
|
||||
|
||||
// Don't use fields directly. They are only declared public to allow
|
||||
// aggregate initialization.
|
||||
|
||||
// Protects fields below.
|
||||
StaticSpinMutex *mu;
|
||||
// Opened file descriptor. Defaults to stderr. It may be equal to
|
||||
// kInvalidFd, in which case new file will be opened when necessary.
|
||||
fd_t fd;
|
||||
// Path prefix of report file, set via __sanitizer_set_report_path.
|
||||
char path_prefix[kMaxPathLength];
|
||||
// Full path to report, obtained as <path_prefix>.PID
|
||||
char full_path[kMaxPathLength];
|
||||
// PID of the process that opened fd. If a fork() occurs,
|
||||
// the PID of child will be different from fd_pid.
|
||||
uptr fd_pid;
|
||||
|
||||
private:
|
||||
void ReopenIfNecessary();
|
||||
};
|
||||
extern ReportFile report_file;
|
||||
|
||||
enum FileAccessMode {
|
||||
RdOnly,
|
||||
WrOnly,
|
||||
RdWr
|
||||
};
|
||||
|
||||
// Returns kInvalidFd on error.
|
||||
fd_t OpenFile(const char *filename, FileAccessMode mode,
|
||||
error_t *errno_p = nullptr);
|
||||
void CloseFile(fd_t);
|
||||
|
||||
// Return true on success, false on error.
|
||||
bool ReadFromFile(fd_t fd, void *buff, uptr buff_size,
|
||||
uptr *bytes_read = nullptr, error_t *error_p = nullptr);
|
||||
bool WriteToFile(fd_t fd, const void *buff, uptr buff_size,
|
||||
uptr *bytes_written = nullptr, error_t *error_p = nullptr);
|
||||
|
||||
// Scoped file handle closer.
|
||||
struct FileCloser {
|
||||
explicit FileCloser(fd_t fd) : fd(fd) {}
|
||||
~FileCloser() { CloseFile(fd); }
|
||||
fd_t fd;
|
||||
};
|
||||
|
||||
bool SupportsColoredOutput(fd_t fd);
|
||||
|
||||
// OS
|
||||
const char *GetPwd();
|
||||
bool FileExists(const char *filename);
|
||||
bool DirExists(const char *path);
|
||||
char *FindPathToBinary(const char *name);
|
||||
bool IsPathSeparator(const char c);
|
||||
bool IsAbsolutePath(const char *path);
|
||||
// Returns true on success, false on failure.
|
||||
bool CreateDir(const char *pathname);
|
||||
// Starts a subprocess and returns its pid.
|
||||
// If *_fd parameters are not kInvalidFd their corresponding input/output
|
||||
// streams will be redirect to the file. The files will always be closed
|
||||
// in parent process even in case of an error.
|
||||
// The child process will close all fds after STDERR_FILENO
|
||||
// before passing control to a program.
|
||||
pid_t StartSubprocess(const char *filename, const char *const argv[],
|
||||
const char *const envp[], fd_t stdin_fd = kInvalidFd,
|
||||
fd_t stdout_fd = kInvalidFd, fd_t stderr_fd = kInvalidFd);
|
||||
// Checks if specified process is still running
|
||||
bool IsProcessRunning(pid_t pid);
|
||||
// Waits for the process to finish and returns its exit code.
|
||||
// Returns -1 in case of an error.
|
||||
int WaitForProcess(pid_t pid);
|
||||
|
||||
// Maps given file to virtual memory, and returns pointer to it
|
||||
// (or NULL if mapping fails). Stores the size of mmaped region
|
||||
// in '*buff_size'.
|
||||
void *MapFileToMemory(const char *file_name, uptr *buff_size);
|
||||
void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset);
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_FILE_H
|
||||
190
lib/libtsan/sanitizer_common/sanitizer_flag_parser.cpp
Normal file
190
lib/libtsan/sanitizer_common/sanitizer_flag_parser.cpp
Normal file
@@ -0,0 +1,190 @@
|
||||
//===-- sanitizer_flag_parser.cpp -----------------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_flag_parser.h"
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_flag_parser.h"
|
||||
#include "sanitizer_flags.h"
|
||||
#include "sanitizer_libc.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
class UnknownFlags {
|
||||
static const int kMaxUnknownFlags = 20;
|
||||
const char *unknown_flags_[kMaxUnknownFlags];
|
||||
int n_unknown_flags_;
|
||||
|
||||
public:
|
||||
void Add(const char *name) {
|
||||
CHECK_LT(n_unknown_flags_, kMaxUnknownFlags);
|
||||
unknown_flags_[n_unknown_flags_++] = name;
|
||||
}
|
||||
|
||||
void Report() {
|
||||
if (!n_unknown_flags_) return;
|
||||
Printf("WARNING: found %d unrecognized flag(s):\n", n_unknown_flags_);
|
||||
for (int i = 0; i < n_unknown_flags_; ++i)
|
||||
Printf(" %s\n", unknown_flags_[i]);
|
||||
n_unknown_flags_ = 0;
|
||||
}
|
||||
};
|
||||
|
||||
UnknownFlags unknown_flags;
|
||||
|
||||
void ReportUnrecognizedFlags() {
|
||||
unknown_flags.Report();
|
||||
}
|
||||
|
||||
char *FlagParser::ll_strndup(const char *s, uptr n) {
|
||||
uptr len = internal_strnlen(s, n);
|
||||
char *s2 = (char *)GetGlobalLowLevelAllocator().Allocate(len + 1);
|
||||
internal_memcpy(s2, s, len);
|
||||
s2[len] = 0;
|
||||
return s2;
|
||||
}
|
||||
|
||||
void FlagParser::PrintFlagDescriptions() {
|
||||
char buffer[128];
|
||||
buffer[sizeof(buffer) - 1] = '\0';
|
||||
Printf("Available flags for %s:\n", SanitizerToolName);
|
||||
for (int i = 0; i < n_flags_; ++i) {
|
||||
bool truncated = !(flags_[i].handler->Format(buffer, sizeof(buffer)));
|
||||
CHECK_EQ(buffer[sizeof(buffer) - 1], '\0');
|
||||
const char *truncation_str = truncated ? " Truncated" : "";
|
||||
Printf("\t%s\n\t\t- %s (Current Value%s: %s)\n", flags_[i].name,
|
||||
flags_[i].desc, truncation_str, buffer);
|
||||
}
|
||||
}
|
||||
|
||||
void FlagParser::fatal_error(const char *err) {
|
||||
Printf("%s: ERROR: %s\n", SanitizerToolName, err);
|
||||
Die();
|
||||
}
|
||||
|
||||
bool FlagParser::is_space(char c) {
|
||||
return c == ' ' || c == ',' || c == ':' || c == '\n' || c == '\t' ||
|
||||
c == '\r';
|
||||
}
|
||||
|
||||
void FlagParser::skip_whitespace() {
|
||||
while (is_space(buf_[pos_])) ++pos_;
|
||||
}
|
||||
|
||||
void FlagParser::parse_flag(const char *env_option_name) {
|
||||
uptr name_start = pos_;
|
||||
while (buf_[pos_] != 0 && buf_[pos_] != '=' && !is_space(buf_[pos_])) ++pos_;
|
||||
if (buf_[pos_] != '=') {
|
||||
if (env_option_name) {
|
||||
Printf("%s: ERROR: expected '=' in %s\n", SanitizerToolName,
|
||||
env_option_name);
|
||||
Die();
|
||||
} else {
|
||||
fatal_error("expected '='");
|
||||
}
|
||||
}
|
||||
char *name = ll_strndup(buf_ + name_start, pos_ - name_start);
|
||||
|
||||
uptr value_start = ++pos_;
|
||||
char *value;
|
||||
if (buf_[pos_] == '\'' || buf_[pos_] == '"') {
|
||||
char quote = buf_[pos_++];
|
||||
while (buf_[pos_] != 0 && buf_[pos_] != quote) ++pos_;
|
||||
if (buf_[pos_] == 0) fatal_error("unterminated string");
|
||||
value = ll_strndup(buf_ + value_start + 1, pos_ - value_start - 1);
|
||||
++pos_; // consume the closing quote
|
||||
} else {
|
||||
while (buf_[pos_] != 0 && !is_space(buf_[pos_])) ++pos_;
|
||||
if (buf_[pos_] != 0 && !is_space(buf_[pos_]))
|
||||
fatal_error("expected separator or eol");
|
||||
value = ll_strndup(buf_ + value_start, pos_ - value_start);
|
||||
}
|
||||
|
||||
bool res = run_handler(name, value);
|
||||
if (!res) fatal_error("Flag parsing failed.");
|
||||
}
|
||||
|
||||
void FlagParser::parse_flags(const char *env_option_name) {
|
||||
while (true) {
|
||||
skip_whitespace();
|
||||
if (buf_[pos_] == 0) break;
|
||||
parse_flag(env_option_name);
|
||||
}
|
||||
|
||||
// Do a sanity check for certain flags.
|
||||
if (common_flags_dont_use.malloc_context_size < 1)
|
||||
common_flags_dont_use.malloc_context_size = 1;
|
||||
}
|
||||
|
||||
void FlagParser::ParseStringFromEnv(const char *env_name) {
|
||||
const char *env = GetEnv(env_name);
|
||||
VPrintf(1, "%s: %s\n", env_name, env ? env : "<empty>");
|
||||
ParseString(env, env_name);
|
||||
}
|
||||
|
||||
void FlagParser::ParseString(const char *s, const char *env_option_name) {
|
||||
if (!s) return;
|
||||
// Backup current parser state to allow nested ParseString() calls.
|
||||
const char *old_buf_ = buf_;
|
||||
uptr old_pos_ = pos_;
|
||||
buf_ = s;
|
||||
pos_ = 0;
|
||||
|
||||
parse_flags(env_option_name);
|
||||
|
||||
buf_ = old_buf_;
|
||||
pos_ = old_pos_;
|
||||
}
|
||||
|
||||
bool FlagParser::ParseFile(const char *path, bool ignore_missing) {
|
||||
static const uptr kMaxIncludeSize = 1 << 15;
|
||||
char *data;
|
||||
uptr data_mapped_size;
|
||||
error_t err;
|
||||
uptr len;
|
||||
if (!ReadFileToBuffer(path, &data, &data_mapped_size, &len,
|
||||
Max(kMaxIncludeSize, GetPageSizeCached()), &err)) {
|
||||
if (ignore_missing)
|
||||
return true;
|
||||
Printf("Failed to read options from '%s': error %d\n", path, err);
|
||||
return false;
|
||||
}
|
||||
ParseString(data, path);
|
||||
UnmapOrDie(data, data_mapped_size);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FlagParser::run_handler(const char *name, const char *value) {
|
||||
for (int i = 0; i < n_flags_; ++i) {
|
||||
if (internal_strcmp(name, flags_[i].name) == 0)
|
||||
return flags_[i].handler->Parse(value);
|
||||
}
|
||||
// Unrecognized flag. This is not a fatal error, we may print a warning later.
|
||||
unknown_flags.Add(name);
|
||||
return true;
|
||||
}
|
||||
|
||||
void FlagParser::RegisterHandler(const char *name, FlagHandlerBase *handler,
|
||||
const char *desc) {
|
||||
CHECK_LT(n_flags_, kMaxFlags);
|
||||
flags_[n_flags_].name = name;
|
||||
flags_[n_flags_].desc = desc;
|
||||
flags_[n_flags_].handler = handler;
|
||||
++n_flags_;
|
||||
}
|
||||
|
||||
FlagParser::FlagParser() : n_flags_(0), buf_(nullptr), pos_(0) {
|
||||
flags_ =
|
||||
(Flag *)GetGlobalLowLevelAllocator().Allocate(sizeof(Flag) * kMaxFlags);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
202
lib/libtsan/sanitizer_common/sanitizer_flag_parser.h
Normal file
202
lib/libtsan/sanitizer_common/sanitizer_flag_parser.h
Normal file
@@ -0,0 +1,202 @@
|
||||
//===-- sanitizer_flag_parser.h ---------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_FLAG_REGISTRY_H
|
||||
#define SANITIZER_FLAG_REGISTRY_H
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_libc.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
class FlagHandlerBase {
|
||||
public:
|
||||
virtual bool Parse(const char *value) { return false; }
|
||||
// Write the C string representation of the current value (truncated to fit)
|
||||
// into the buffer of size `size`. Returns false if truncation occurred and
|
||||
// returns true otherwise.
|
||||
virtual bool Format(char *buffer, uptr size) {
|
||||
if (size > 0)
|
||||
buffer[0] = '\0';
|
||||
return false;
|
||||
}
|
||||
|
||||
protected:
|
||||
~FlagHandlerBase() {}
|
||||
|
||||
inline bool FormatString(char *buffer, uptr size, const char *str_to_use) {
|
||||
uptr num_symbols_should_write =
|
||||
internal_snprintf(buffer, size, "%s", str_to_use);
|
||||
return num_symbols_should_write < size;
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class FlagHandler final : public FlagHandlerBase {
|
||||
T *t_;
|
||||
|
||||
public:
|
||||
explicit FlagHandler(T *t) : t_(t) {}
|
||||
bool Parse(const char *value) final;
|
||||
bool Format(char *buffer, uptr size) final;
|
||||
};
|
||||
|
||||
inline bool ParseBool(const char *value, bool *b) {
|
||||
if (internal_strcmp(value, "0") == 0 ||
|
||||
internal_strcmp(value, "no") == 0 ||
|
||||
internal_strcmp(value, "false") == 0) {
|
||||
*b = false;
|
||||
return true;
|
||||
}
|
||||
if (internal_strcmp(value, "1") == 0 ||
|
||||
internal_strcmp(value, "yes") == 0 ||
|
||||
internal_strcmp(value, "true") == 0) {
|
||||
*b = true;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool FlagHandler<bool>::Parse(const char *value) {
|
||||
if (ParseBool(value, t_)) return true;
|
||||
Printf("ERROR: Invalid value for bool option: '%s'\n", value);
|
||||
return false;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool FlagHandler<bool>::Format(char *buffer, uptr size) {
|
||||
return FormatString(buffer, size, *t_ ? "true" : "false");
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool FlagHandler<HandleSignalMode>::Parse(const char *value) {
|
||||
bool b;
|
||||
if (ParseBool(value, &b)) {
|
||||
*t_ = b ? kHandleSignalYes : kHandleSignalNo;
|
||||
return true;
|
||||
}
|
||||
if (internal_strcmp(value, "2") == 0 ||
|
||||
internal_strcmp(value, "exclusive") == 0) {
|
||||
*t_ = kHandleSignalExclusive;
|
||||
return true;
|
||||
}
|
||||
Printf("ERROR: Invalid value for signal handler option: '%s'\n", value);
|
||||
return false;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool FlagHandler<HandleSignalMode>::Format(char *buffer, uptr size) {
|
||||
uptr num_symbols_should_write = internal_snprintf(buffer, size, "%d", *t_);
|
||||
return num_symbols_should_write < size;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool FlagHandler<const char *>::Parse(const char *value) {
|
||||
*t_ = value;
|
||||
return true;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool FlagHandler<const char *>::Format(char *buffer, uptr size) {
|
||||
return FormatString(buffer, size, *t_);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool FlagHandler<int>::Parse(const char *value) {
|
||||
const char *value_end;
|
||||
*t_ = internal_simple_strtoll(value, &value_end, 10);
|
||||
bool ok = *value_end == 0;
|
||||
if (!ok) Printf("ERROR: Invalid value for int option: '%s'\n", value);
|
||||
return ok;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool FlagHandler<int>::Format(char *buffer, uptr size) {
|
||||
uptr num_symbols_should_write = internal_snprintf(buffer, size, "%d", *t_);
|
||||
return num_symbols_should_write < size;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool FlagHandler<uptr>::Parse(const char *value) {
|
||||
const char *value_end;
|
||||
*t_ = internal_simple_strtoll(value, &value_end, 10);
|
||||
bool ok = *value_end == 0;
|
||||
if (!ok) Printf("ERROR: Invalid value for uptr option: '%s'\n", value);
|
||||
return ok;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool FlagHandler<uptr>::Format(char *buffer, uptr size) {
|
||||
uptr num_symbols_should_write = internal_snprintf(buffer, size, "0x%zx", *t_);
|
||||
return num_symbols_should_write < size;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool FlagHandler<s64>::Parse(const char *value) {
|
||||
const char *value_end;
|
||||
*t_ = internal_simple_strtoll(value, &value_end, 10);
|
||||
bool ok = *value_end == 0;
|
||||
if (!ok) Printf("ERROR: Invalid value for s64 option: '%s'\n", value);
|
||||
return ok;
|
||||
}
|
||||
|
||||
template <>
|
||||
inline bool FlagHandler<s64>::Format(char *buffer, uptr size) {
|
||||
uptr num_symbols_should_write = internal_snprintf(buffer, size, "%lld", *t_);
|
||||
return num_symbols_should_write < size;
|
||||
}
|
||||
|
||||
class FlagParser {
|
||||
static const int kMaxFlags = 200;
|
||||
struct Flag {
|
||||
const char *name;
|
||||
const char *desc;
|
||||
FlagHandlerBase *handler;
|
||||
} *flags_;
|
||||
int n_flags_;
|
||||
|
||||
const char *buf_;
|
||||
uptr pos_;
|
||||
|
||||
public:
|
||||
FlagParser();
|
||||
void RegisterHandler(const char *name, FlagHandlerBase *handler,
|
||||
const char *desc);
|
||||
void ParseString(const char *s, const char *env_name = 0);
|
||||
void ParseStringFromEnv(const char *env_name);
|
||||
bool ParseFile(const char *path, bool ignore_missing);
|
||||
void PrintFlagDescriptions();
|
||||
|
||||
private:
|
||||
void fatal_error(const char *err);
|
||||
bool is_space(char c);
|
||||
void skip_whitespace();
|
||||
void parse_flags(const char *env_option_name);
|
||||
void parse_flag(const char *env_option_name);
|
||||
bool run_handler(const char *name, const char *value);
|
||||
char *ll_strndup(const char *s, uptr n);
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
static void RegisterFlag(FlagParser *parser, const char *name, const char *desc,
|
||||
T *var) {
|
||||
FlagHandler<T> *fh = new (GetGlobalLowLevelAllocator()) FlagHandler<T>(var);
|
||||
parser->RegisterHandler(name, fh, desc);
|
||||
}
|
||||
|
||||
void ReportUnrecognizedFlags();
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_FLAG_REGISTRY_H
|
||||
139
lib/libtsan/sanitizer_common/sanitizer_flags.cpp
Normal file
139
lib/libtsan/sanitizer_common/sanitizer_flags.cpp
Normal file
@@ -0,0 +1,139 @@
|
||||
//===-- sanitizer_flags.cpp -----------------------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_flags.h"
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_flag_parser.h"
|
||||
#include "sanitizer_libc.h"
|
||||
#include "sanitizer_linux.h"
|
||||
#include "sanitizer_list.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
CommonFlags common_flags_dont_use;
|
||||
|
||||
void CommonFlags::SetDefaults() {
|
||||
#define COMMON_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
|
||||
#include "sanitizer_flags.inc"
|
||||
#undef COMMON_FLAG
|
||||
}
|
||||
|
||||
void CommonFlags::CopyFrom(const CommonFlags &other) {
|
||||
internal_memcpy(this, &other, sizeof(*this));
|
||||
}
|
||||
|
||||
// Copy the string from "s" to "out", making the following substitutions:
|
||||
// %b = binary basename
|
||||
// %p = pid
|
||||
// %d = binary directory
|
||||
void SubstituteForFlagValue(const char *s, char *out, uptr out_size) {
|
||||
char *out_end = out + out_size;
|
||||
while (*s && out < out_end - 1) {
|
||||
if (s[0] != '%') {
|
||||
*out++ = *s++;
|
||||
continue;
|
||||
}
|
||||
switch (s[1]) {
|
||||
case 'b': {
|
||||
const char *base = GetProcessName();
|
||||
CHECK(base);
|
||||
while (*base && out < out_end - 1)
|
||||
*out++ = *base++;
|
||||
s += 2; // skip "%b"
|
||||
break;
|
||||
}
|
||||
case 'p': {
|
||||
int pid = internal_getpid();
|
||||
char buf[32];
|
||||
char *buf_pos = buf + 32;
|
||||
do {
|
||||
*--buf_pos = (pid % 10) + '0';
|
||||
pid /= 10;
|
||||
} while (pid);
|
||||
while (buf_pos < buf + 32 && out < out_end - 1)
|
||||
*out++ = *buf_pos++;
|
||||
s += 2; // skip "%p"
|
||||
break;
|
||||
}
|
||||
case 'd': {
|
||||
uptr len = ReadBinaryDir(out, out_end - out);
|
||||
out += len;
|
||||
s += 2; // skip "%d"
|
||||
break;
|
||||
}
|
||||
default:
|
||||
*out++ = *s++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
CHECK(out < out_end - 1);
|
||||
*out = '\0';
|
||||
}
|
||||
|
||||
class FlagHandlerInclude final : public FlagHandlerBase {
|
||||
FlagParser *parser_;
|
||||
bool ignore_missing_;
|
||||
const char *original_path_;
|
||||
|
||||
public:
|
||||
explicit FlagHandlerInclude(FlagParser *parser, bool ignore_missing)
|
||||
: parser_(parser), ignore_missing_(ignore_missing), original_path_("") {}
|
||||
bool Parse(const char *value) final {
|
||||
original_path_ = value;
|
||||
if (internal_strchr(value, '%')) {
|
||||
char *buf = (char *)MmapOrDie(kMaxPathLength, "FlagHandlerInclude");
|
||||
SubstituteForFlagValue(value, buf, kMaxPathLength);
|
||||
bool res = parser_->ParseFile(buf, ignore_missing_);
|
||||
UnmapOrDie(buf, kMaxPathLength);
|
||||
return res;
|
||||
}
|
||||
return parser_->ParseFile(value, ignore_missing_);
|
||||
}
|
||||
bool Format(char *buffer, uptr size) override {
|
||||
// Note `original_path_` isn't actually what's parsed due to `%`
|
||||
// substitutions. Printing the substituted path would require holding onto
|
||||
// mmap'ed memory.
|
||||
return FormatString(buffer, size, original_path_);
|
||||
}
|
||||
};
|
||||
|
||||
void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf) {
|
||||
FlagHandlerInclude *fh_include = new (GetGlobalLowLevelAllocator())
|
||||
FlagHandlerInclude(parser, /*ignore_missing*/ false);
|
||||
parser->RegisterHandler("include", fh_include,
|
||||
"read more options from the given file");
|
||||
FlagHandlerInclude *fh_include_if_exists = new (GetGlobalLowLevelAllocator())
|
||||
FlagHandlerInclude(parser, /*ignore_missing*/ true);
|
||||
parser->RegisterHandler(
|
||||
"include_if_exists", fh_include_if_exists,
|
||||
"read more options from the given file (if it exists)");
|
||||
}
|
||||
|
||||
void RegisterCommonFlags(FlagParser *parser, CommonFlags *cf) {
|
||||
#define COMMON_FLAG(Type, Name, DefaultValue, Description) \
|
||||
RegisterFlag(parser, #Name, Description, &cf->Name);
|
||||
#include "sanitizer_flags.inc"
|
||||
#undef COMMON_FLAG
|
||||
|
||||
RegisterIncludeFlags(parser, cf);
|
||||
}
|
||||
|
||||
void InitializeCommonFlags(CommonFlags *cf) {
|
||||
// need to record coverage to generate coverage report.
|
||||
cf->coverage |= cf->html_cov_report;
|
||||
SetVerbosity(cf->verbosity);
|
||||
|
||||
InitializePlatformCommonFlags(cf);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
71
lib/libtsan/sanitizer_common/sanitizer_flags.h
Normal file
71
lib/libtsan/sanitizer_common/sanitizer_flags.h
Normal file
@@ -0,0 +1,71 @@
|
||||
//===-- sanitizer_flags.h ---------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_FLAGS_H
|
||||
#define SANITIZER_FLAGS_H
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
enum HandleSignalMode {
|
||||
kHandleSignalNo,
|
||||
kHandleSignalYes,
|
||||
kHandleSignalExclusive,
|
||||
};
|
||||
|
||||
struct CommonFlags {
|
||||
#define COMMON_FLAG(Type, Name, DefaultValue, Description) Type Name;
|
||||
#include "sanitizer_flags.inc"
|
||||
#undef COMMON_FLAG
|
||||
|
||||
void SetDefaults();
|
||||
void CopyFrom(const CommonFlags &other);
|
||||
};
|
||||
|
||||
// Functions to get/set global CommonFlags shared by all sanitizer runtimes:
|
||||
extern CommonFlags common_flags_dont_use;
|
||||
inline const CommonFlags *common_flags() {
|
||||
return &common_flags_dont_use;
|
||||
}
|
||||
|
||||
inline void SetCommonFlagsDefaults() {
|
||||
common_flags_dont_use.SetDefaults();
|
||||
}
|
||||
|
||||
// This function can only be used to setup tool-specific overrides for
|
||||
// CommonFlags defaults. Generally, it should only be used right after
|
||||
// SetCommonFlagsDefaults(), but before ParseCommonFlagsFromString(), and
|
||||
// only during the flags initialization (i.e. before they are used for
|
||||
// the first time).
|
||||
inline void OverrideCommonFlags(const CommonFlags &cf) {
|
||||
common_flags_dont_use.CopyFrom(cf);
|
||||
}
|
||||
|
||||
void SubstituteForFlagValue(const char *s, char *out, uptr out_size);
|
||||
|
||||
class FlagParser;
|
||||
void RegisterCommonFlags(FlagParser *parser,
|
||||
CommonFlags *cf = &common_flags_dont_use);
|
||||
void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf);
|
||||
|
||||
// Should be called after parsing all flags. Sets up common flag values
|
||||
// and perform initializations common to all sanitizers (e.g. setting
|
||||
// verbosity).
|
||||
void InitializeCommonFlags(CommonFlags *cf = &common_flags_dont_use);
|
||||
|
||||
// Platform specific flags initialization.
|
||||
void InitializePlatformCommonFlags(CommonFlags *cf);
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_FLAGS_H
|
||||
284
lib/libtsan/sanitizer_common/sanitizer_flags.inc
Normal file
284
lib/libtsan/sanitizer_common/sanitizer_flags.inc
Normal file
@@ -0,0 +1,284 @@
|
||||
//===-- sanitizer_flags.h ---------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file describes common flags available in all sanitizers.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef COMMON_FLAG
|
||||
#error "Define COMMON_FLAG prior to including this file!"
|
||||
#endif
|
||||
|
||||
// COMMON_FLAG(Type, Name, DefaultValue, Description)
|
||||
// Supported types: bool, const char *, int, uptr.
|
||||
// Default value must be a compile-time constant.
|
||||
// Description must be a string literal.
|
||||
|
||||
COMMON_FLAG(
|
||||
bool, symbolize, true,
|
||||
"If set, use the online symbolizer from common sanitizer runtime to turn "
|
||||
"virtual addresses to file/line locations.")
|
||||
COMMON_FLAG(
|
||||
const char *, external_symbolizer_path, nullptr,
|
||||
"Path to external symbolizer. If empty, the tool will search $PATH for "
|
||||
"the symbolizer.")
|
||||
COMMON_FLAG(
|
||||
bool, allow_addr2line, false,
|
||||
"If set, allows online symbolizer to run addr2line binary to symbolize "
|
||||
"stack traces (addr2line will only be used if llvm-symbolizer binary is "
|
||||
"unavailable.")
|
||||
COMMON_FLAG(const char *, strip_path_prefix, "",
|
||||
"Strips this prefix from file paths in error reports.")
|
||||
COMMON_FLAG(bool, fast_unwind_on_check, false,
|
||||
"If available, use the fast frame-pointer-based unwinder on "
|
||||
"internal CHECK failures.")
|
||||
COMMON_FLAG(bool, fast_unwind_on_fatal, false,
|
||||
"If available, use the fast frame-pointer-based unwinder on fatal "
|
||||
"errors.")
|
||||
// ARM thumb/thumb2 frame pointer is inconsistent on GCC and Clang [1]
|
||||
// and fast-unwider is also unreliable with mixing arm and thumb code [2].
|
||||
// [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92172
|
||||
// [2] https://bugs.llvm.org/show_bug.cgi?id=44158
|
||||
COMMON_FLAG(bool, fast_unwind_on_malloc,
|
||||
!(SANITIZER_LINUX && !SANITIZER_ANDROID && SANITIZER_ARM),
|
||||
"If available, use the fast frame-pointer-based unwinder on "
|
||||
"malloc/free.")
|
||||
COMMON_FLAG(bool, handle_ioctl, false, "Intercept and handle ioctl requests.")
|
||||
COMMON_FLAG(int, malloc_context_size, 1,
|
||||
"Max number of stack frames kept for each allocation/deallocation.")
|
||||
COMMON_FLAG(
|
||||
const char *, log_path, nullptr,
|
||||
"Write logs to \"log_path.pid\". The special values are \"stdout\" and "
|
||||
"\"stderr\". If unspecified, defaults to \"stderr\".")
|
||||
COMMON_FLAG(
|
||||
bool, log_exe_name, false,
|
||||
"Mention name of executable when reporting error and "
|
||||
"append executable name to logs (as in \"log_path.exe_name.pid\").")
|
||||
COMMON_FLAG(const char *, log_suffix, nullptr,
|
||||
"String to append to log file name, e.g. \".txt\".")
|
||||
COMMON_FLAG(
|
||||
bool, log_to_syslog, (bool)SANITIZER_ANDROID || (bool)SANITIZER_APPLE,
|
||||
"Write all sanitizer output to syslog in addition to other means of "
|
||||
"logging.")
|
||||
COMMON_FLAG(
|
||||
int, verbosity, 0,
|
||||
"Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).")
|
||||
COMMON_FLAG(bool, strip_env, true,
|
||||
"Whether to remove the sanitizer from DYLD_INSERT_LIBRARIES to "
|
||||
"avoid passing it to children on Apple platforms. Default is true.")
|
||||
COMMON_FLAG(bool, verify_interceptors, true,
|
||||
"Verify that interceptors are working on Apple platforms. Default "
|
||||
"is true.")
|
||||
COMMON_FLAG(bool, detect_leaks, !SANITIZER_APPLE, "Enable memory leak detection.")
|
||||
COMMON_FLAG(
|
||||
bool, leak_check_at_exit, true,
|
||||
"Invoke leak checking in an atexit handler. Has no effect if "
|
||||
"detect_leaks=false, or if __lsan_do_leak_check() is called before the "
|
||||
"handler has a chance to run.")
|
||||
COMMON_FLAG(bool, allocator_may_return_null, false,
|
||||
"If false, the allocator will crash instead of returning 0 on "
|
||||
"out-of-memory.")
|
||||
COMMON_FLAG(bool, print_summary, true,
|
||||
"If false, disable printing error summaries in addition to error "
|
||||
"reports.")
|
||||
COMMON_FLAG(int, print_module_map, 0,
|
||||
"Print the process module map where supported (0 - don't print, "
|
||||
"1 - print only once before process exits, 2 - print after each "
|
||||
"report).")
|
||||
COMMON_FLAG(bool, check_printf, true, "Check printf arguments.")
|
||||
#define COMMON_FLAG_HANDLE_SIGNAL_HELP(signal) \
|
||||
"Controls custom tool's " #signal " handler (0 - do not registers the " \
|
||||
"handler, 1 - register the handler and allow user to set own, " \
|
||||
"2 - registers the handler and block user from changing it). "
|
||||
COMMON_FLAG(HandleSignalMode, handle_segv, kHandleSignalYes,
|
||||
COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGSEGV))
|
||||
COMMON_FLAG(HandleSignalMode, handle_sigbus, kHandleSignalYes,
|
||||
COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGBUS))
|
||||
COMMON_FLAG(HandleSignalMode, handle_abort, kHandleSignalNo,
|
||||
COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGABRT))
|
||||
COMMON_FLAG(HandleSignalMode, handle_sigill, kHandleSignalNo,
|
||||
COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGILL))
|
||||
COMMON_FLAG(HandleSignalMode, handle_sigtrap, kHandleSignalNo,
|
||||
COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGTRAP))
|
||||
COMMON_FLAG(HandleSignalMode, handle_sigfpe, kHandleSignalYes,
|
||||
COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGFPE))
|
||||
#undef COMMON_FLAG_HANDLE_SIGNAL_HELP
|
||||
COMMON_FLAG(bool, allow_user_segv_handler, true,
|
||||
"Deprecated. True has no effect, use handle_sigbus=1. If false, "
|
||||
"handle_*=1 will be upgraded to handle_*=2.")
|
||||
COMMON_FLAG(bool, use_sigaltstack, true,
|
||||
"If set, uses alternate stack for signal handling.")
|
||||
COMMON_FLAG(bool, detect_deadlocks, true,
|
||||
"If set, deadlock detection is enabled.")
|
||||
COMMON_FLAG(
|
||||
uptr, clear_shadow_mmap_threshold, 64 * 1024,
|
||||
"Large shadow regions are zero-filled using mmap(NORESERVE) instead of "
|
||||
"memset(). This is the threshold size in bytes.")
|
||||
COMMON_FLAG(const char *, color, "auto",
|
||||
"Colorize reports: (always|never|auto).")
|
||||
COMMON_FLAG(
|
||||
bool, legacy_pthread_cond, false,
|
||||
"Enables support for dynamic libraries linked with libpthread 2.2.5.")
|
||||
COMMON_FLAG(bool, intercept_tls_get_addr, false, "Intercept __tls_get_addr.")
|
||||
COMMON_FLAG(bool, help, false, "Print the flag descriptions.")
|
||||
COMMON_FLAG(uptr, mmap_limit_mb, 0,
|
||||
"Limit the amount of mmap-ed memory (excluding shadow) in Mb; "
|
||||
"not a user-facing flag, used mosly for testing the tools")
|
||||
COMMON_FLAG(uptr, hard_rss_limit_mb, 0,
|
||||
"Hard RSS limit in Mb."
|
||||
" If non-zero, a background thread is spawned at startup"
|
||||
" which periodically reads RSS and aborts the process if the"
|
||||
" limit is reached")
|
||||
COMMON_FLAG(uptr, soft_rss_limit_mb, 0,
|
||||
"Soft RSS limit in Mb."
|
||||
" If non-zero, a background thread is spawned at startup"
|
||||
" which periodically reads RSS. If the limit is reached"
|
||||
" all subsequent malloc/new calls will fail or return NULL"
|
||||
" (depending on the value of allocator_may_return_null)"
|
||||
" until the RSS goes below the soft limit."
|
||||
" This limit does not affect memory allocations other than"
|
||||
" malloc/new.")
|
||||
COMMON_FLAG(uptr, max_allocation_size_mb, 0,
|
||||
"If non-zero, malloc/new calls larger than this size will return "
|
||||
"nullptr (or crash if allocator_may_return_null=false).")
|
||||
COMMON_FLAG(bool, heap_profile, false, "Experimental heap profiler, asan-only")
|
||||
COMMON_FLAG(s32, allocator_release_to_os_interval_ms,
|
||||
((bool)SANITIZER_FUCHSIA || (bool)SANITIZER_WINDOWS) ? -1 : 5000,
|
||||
"Only affects a 64-bit allocator. If set, tries to release unused "
|
||||
"memory to the OS, but not more often than this interval (in "
|
||||
"milliseconds). Negative values mean do not attempt to release "
|
||||
"memory to the OS.\n")
|
||||
COMMON_FLAG(bool, can_use_proc_maps_statm, true,
|
||||
"If false, do not attempt to read /proc/maps/statm."
|
||||
" Mostly useful for testing sanitizers.")
|
||||
COMMON_FLAG(
|
||||
bool, coverage, false,
|
||||
"If set, coverage information will be dumped at program shutdown (if the "
|
||||
"coverage instrumentation was enabled at compile time).")
|
||||
COMMON_FLAG(const char *, coverage_dir, ".",
|
||||
"Target directory for coverage dumps. Defaults to the current "
|
||||
"directory.")
|
||||
COMMON_FLAG(const char *, cov_8bit_counters_out, "",
|
||||
"If non-empty, write 8bit counters to this file. ")
|
||||
COMMON_FLAG(const char *, cov_pcs_out, "",
|
||||
"If non-empty, write the coverage pc table to this file. ")
|
||||
COMMON_FLAG(bool, full_address_space, false,
|
||||
"Sanitize complete address space; "
|
||||
"by default kernel area on 32-bit platforms will not be sanitized")
|
||||
COMMON_FLAG(bool, print_suppressions, true,
|
||||
"Print matched suppressions at exit.")
|
||||
COMMON_FLAG(
|
||||
bool, disable_coredump, (SANITIZER_WORDSIZE == 64) && !SANITIZER_GO,
|
||||
"Disable core dumping. By default, disable_coredump=1 on 64-bit to avoid"
|
||||
" dumping a 16T+ core file. Ignored on OSes that don't dump core by"
|
||||
" default and for sanitizers that don't reserve lots of virtual memory.")
|
||||
COMMON_FLAG(bool, use_madv_dontdump, true,
|
||||
"If set, instructs kernel to not store the (huge) shadow "
|
||||
"in core file.")
|
||||
COMMON_FLAG(bool, symbolize_inline_frames, true,
|
||||
"Print inlined frames in stacktraces. Defaults to true.")
|
||||
COMMON_FLAG(bool, demangle, true, "Print demangled symbols.")
|
||||
COMMON_FLAG(bool, symbolize_vs_style, false,
|
||||
"Print file locations in Visual Studio style (e.g: "
|
||||
" file(10,42): ...")
|
||||
COMMON_FLAG(int, dedup_token_length, 0,
|
||||
"If positive, after printing a stack trace also print a short "
|
||||
"string token based on this number of frames that will simplify "
|
||||
"deduplication of the reports. "
|
||||
"Example: 'DEDUP_TOKEN: foo-bar-main'. Default is 0.")
|
||||
COMMON_FLAG(const char *, stack_trace_format, "DEFAULT",
|
||||
"Format string used to render stack frames. "
|
||||
"See sanitizer_stacktrace_printer.h for the format description. "
|
||||
"Use DEFAULT to get default format.")
|
||||
COMMON_FLAG(int, compress_stack_depot, 0,
|
||||
"Compress stack depot to save memory.")
|
||||
COMMON_FLAG(bool, no_huge_pages_for_shadow, true,
|
||||
"If true, the shadow is not allowed to use huge pages. ")
|
||||
COMMON_FLAG(bool, strict_string_checks, false,
|
||||
"If set check that string arguments are properly null-terminated")
|
||||
COMMON_FLAG(bool, intercept_strstr, true,
|
||||
"If set, uses custom wrappers for strstr and strcasestr functions "
|
||||
"to find more errors.")
|
||||
COMMON_FLAG(bool, intercept_strspn, true,
|
||||
"If set, uses custom wrappers for strspn and strcspn function "
|
||||
"to find more errors.")
|
||||
COMMON_FLAG(bool, intercept_strtok, true,
|
||||
"If set, uses a custom wrapper for the strtok function "
|
||||
"to find more errors.")
|
||||
COMMON_FLAG(bool, intercept_strpbrk, true,
|
||||
"If set, uses custom wrappers for strpbrk function "
|
||||
"to find more errors.")
|
||||
COMMON_FLAG(
|
||||
bool, intercept_strcmp, true,
|
||||
"If set, uses custom wrappers for strcmp functions to find more errors.")
|
||||
COMMON_FLAG(bool, intercept_strlen, true,
|
||||
"If set, uses custom wrappers for strlen and strnlen functions "
|
||||
"to find more errors.")
|
||||
COMMON_FLAG(bool, intercept_strndup, true,
|
||||
"If set, uses custom wrappers for strndup functions "
|
||||
"to find more errors.")
|
||||
COMMON_FLAG(bool, intercept_strchr, true,
|
||||
"If set, uses custom wrappers for strchr, strchrnul, and strrchr "
|
||||
"functions to find more errors.")
|
||||
COMMON_FLAG(bool, intercept_memcmp, true,
|
||||
"If set, uses custom wrappers for memcmp function "
|
||||
"to find more errors.")
|
||||
COMMON_FLAG(bool, strict_memcmp, true,
|
||||
"If true, assume that memcmp(p1, p2, n) always reads n bytes before "
|
||||
"comparing p1 and p2.")
|
||||
COMMON_FLAG(bool, intercept_memmem, true,
|
||||
"If set, uses a wrapper for memmem() to find more errors.")
|
||||
COMMON_FLAG(bool, intercept_intrin, true,
|
||||
"If set, uses custom wrappers for memset/memcpy/memmove "
|
||||
"intrinsics to find more errors.")
|
||||
COMMON_FLAG(bool, intercept_stat, true,
|
||||
"If set, uses custom wrappers for *stat functions "
|
||||
"to find more errors.")
|
||||
COMMON_FLAG(bool, intercept_send, true,
|
||||
"If set, uses custom wrappers for send* functions "
|
||||
"to find more errors.")
|
||||
COMMON_FLAG(bool, decorate_proc_maps, (bool)SANITIZER_ANDROID,
|
||||
"If set, decorate sanitizer mappings in /proc/self/maps with "
|
||||
"user-readable names")
|
||||
COMMON_FLAG(int, exitcode, 1, "Override the program exit status if the tool "
|
||||
"found an error")
|
||||
COMMON_FLAG(
|
||||
bool, abort_on_error, (bool)SANITIZER_ANDROID || (bool)SANITIZER_APPLE,
|
||||
"If set, the tool calls abort() instead of _exit() after printing the "
|
||||
"error report.")
|
||||
COMMON_FLAG(bool, suppress_equal_pcs, true,
|
||||
"Deduplicate multiple reports for single source location in "
|
||||
"halt_on_error=false mode (asan only).")
|
||||
COMMON_FLAG(bool, print_cmdline, false, "Print command line on crash "
|
||||
"(asan only).")
|
||||
COMMON_FLAG(bool, html_cov_report, false, "Generate html coverage report.")
|
||||
COMMON_FLAG(const char *, sancov_path, "sancov", "Sancov tool location.")
|
||||
COMMON_FLAG(bool, dump_instruction_bytes, false,
|
||||
"If true, dump 16 bytes starting at the instruction that caused SEGV")
|
||||
COMMON_FLAG(bool, dump_registers, true,
|
||||
"If true, dump values of CPU registers when SEGV happens. Only "
|
||||
"available on OS X for now.")
|
||||
COMMON_FLAG(bool, detect_write_exec, false,
|
||||
"If true, triggers warning when writable-executable pages requests "
|
||||
"are being made")
|
||||
COMMON_FLAG(bool, test_only_emulate_no_memorymap, false,
|
||||
"TEST ONLY fail to read memory mappings to emulate sanitized "
|
||||
"\"init\"")
|
||||
// With static linking, dladdr((void*)pthread_join) or similar will return the
|
||||
// path to the main program. This flag will replace dlopen(<main program,...>
|
||||
// with dlopen(NULL,...), which is the correct way to get a handle to the main
|
||||
// program.
|
||||
COMMON_FLAG(bool, test_only_replace_dlopen_main_program, false,
|
||||
"TEST ONLY replace dlopen(<main program>,...) with dlopen(NULL)")
|
||||
|
||||
COMMON_FLAG(bool, enable_symbolizer_markup, SANITIZER_FUCHSIA,
|
||||
"Use sanitizer symbolizer markup, available on Linux "
|
||||
"and always set true for Fuchsia.")
|
||||
|
||||
COMMON_FLAG(bool, detect_invalid_join, true,
|
||||
"If set, check invalid joins of threads.")
|
||||
166
lib/libtsan/sanitizer_common/sanitizer_flat_map.h
Normal file
166
lib/libtsan/sanitizer_common/sanitizer_flat_map.h
Normal file
@@ -0,0 +1,166 @@
|
||||
//===-- sanitizer_flat_map.h ------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Part of the Sanitizer Allocator.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_FLAT_MAP_H
|
||||
#define SANITIZER_FLAT_MAP_H
|
||||
|
||||
#include "sanitizer_atomic.h"
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_local_address_space_view.h"
|
||||
#include "sanitizer_mutex.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// Maps integers in rage [0, kSize) to values.
|
||||
template <typename T, u64 kSize,
|
||||
typename AddressSpaceViewTy = LocalAddressSpaceView>
|
||||
class FlatMap {
|
||||
public:
|
||||
using AddressSpaceView = AddressSpaceViewTy;
|
||||
void Init() { internal_memset(map_, 0, sizeof(map_)); }
|
||||
|
||||
constexpr uptr size() const { return kSize; }
|
||||
|
||||
bool contains(uptr idx) const {
|
||||
CHECK_LT(idx, kSize);
|
||||
return true;
|
||||
}
|
||||
|
||||
T &operator[](uptr idx) {
|
||||
DCHECK_LT(idx, kSize);
|
||||
return map_[idx];
|
||||
}
|
||||
|
||||
const T &operator[](uptr idx) const {
|
||||
DCHECK_LT(idx, kSize);
|
||||
return map_[idx];
|
||||
}
|
||||
|
||||
private:
|
||||
T map_[kSize];
|
||||
};
|
||||
|
||||
// TwoLevelMap maps integers in range [0, kSize1*kSize2) to values.
|
||||
// It is implemented as a two-dimensional array: array of kSize1 pointers
|
||||
// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
|
||||
// Each value is initially zero and can be set to something else only once.
|
||||
// Setting and getting values from multiple threads is safe w/o extra locking.
|
||||
template <typename T, u64 kSize1, u64 kSize2,
|
||||
typename AddressSpaceViewTy = LocalAddressSpaceView>
|
||||
class TwoLevelMap {
|
||||
static_assert(IsPowerOfTwo(kSize2), "Use a power of two for performance.");
|
||||
|
||||
public:
|
||||
using AddressSpaceView = AddressSpaceViewTy;
|
||||
void Init() {
|
||||
mu_.Init();
|
||||
internal_memset(map1_, 0, sizeof(map1_));
|
||||
}
|
||||
|
||||
void TestOnlyUnmap() {
|
||||
for (uptr i = 0; i < kSize1; i++) {
|
||||
T *p = Get(i);
|
||||
if (!p)
|
||||
continue;
|
||||
UnmapOrDie(p, kSize2);
|
||||
}
|
||||
Init();
|
||||
}
|
||||
|
||||
uptr MemoryUsage() const {
|
||||
uptr res = 0;
|
||||
for (uptr i = 0; i < kSize1; i++) {
|
||||
T *p = Get(i);
|
||||
if (!p)
|
||||
continue;
|
||||
res += MmapSize();
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
constexpr uptr size() const { return kSize1 * kSize2; }
|
||||
constexpr uptr size1() const { return kSize1; }
|
||||
constexpr uptr size2() const { return kSize2; }
|
||||
|
||||
bool contains(uptr idx) const {
|
||||
CHECK_LT(idx, kSize1 * kSize2);
|
||||
return Get(idx / kSize2);
|
||||
}
|
||||
|
||||
const T &operator[](uptr idx) const {
|
||||
DCHECK_LT(idx, kSize1 * kSize2);
|
||||
T *map2 = GetOrCreate(idx / kSize2);
|
||||
return *AddressSpaceView::Load(&map2[idx % kSize2]);
|
||||
}
|
||||
|
||||
T &operator[](uptr idx) {
|
||||
DCHECK_LT(idx, kSize1 * kSize2);
|
||||
T *map2 = GetOrCreate(idx / kSize2);
|
||||
return *AddressSpaceView::LoadWritable(&map2[idx % kSize2]);
|
||||
}
|
||||
|
||||
void Lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mu_.Lock(); }
|
||||
|
||||
void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mu_.Unlock(); }
|
||||
|
||||
private:
|
||||
constexpr uptr MmapSize() const {
|
||||
return RoundUpTo(kSize2 * sizeof(T), GetPageSizeCached());
|
||||
}
|
||||
|
||||
T *Get(uptr idx) const {
|
||||
DCHECK_LT(idx, kSize1);
|
||||
return reinterpret_cast<T *>(
|
||||
atomic_load(&map1_[idx], memory_order_acquire));
|
||||
}
|
||||
|
||||
T *GetOrCreate(uptr idx) const {
|
||||
DCHECK_LT(idx, kSize1);
|
||||
// This code needs to use memory_order_acquire/consume, but we use
|
||||
// memory_order_relaxed for performance reasons (matters for arm64). We
|
||||
// expect memory_order_relaxed to be effectively equivalent to
|
||||
// memory_order_consume in this case for all relevant architectures: all
|
||||
// dependent data is reachable only by dereferencing the resulting pointer.
|
||||
// If relaxed load fails to see stored ptr, the code will fall back to
|
||||
// Create() and reload the value again with locked mutex as a memory
|
||||
// barrier.
|
||||
T *res = reinterpret_cast<T *>(atomic_load_relaxed(&map1_[idx]));
|
||||
if (LIKELY(res))
|
||||
return res;
|
||||
return Create(idx);
|
||||
}
|
||||
|
||||
NOINLINE T *Create(uptr idx) const {
|
||||
SpinMutexLock l(&mu_);
|
||||
T *res = Get(idx);
|
||||
if (!res) {
|
||||
res = reinterpret_cast<T *>(MmapOrDie(MmapSize(), "TwoLevelMap"));
|
||||
atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
|
||||
memory_order_release);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
mutable StaticSpinMutex mu_;
|
||||
mutable atomic_uintptr_t map1_[kSize1];
|
||||
};
|
||||
|
||||
template <u64 kSize, typename AddressSpaceViewTy = LocalAddressSpaceView>
|
||||
using FlatByteMap = FlatMap<u8, kSize, AddressSpaceViewTy>;
|
||||
|
||||
template <u64 kSize1, u64 kSize2,
|
||||
typename AddressSpaceViewTy = LocalAddressSpaceView>
|
||||
using TwoLevelByteMap = TwoLevelMap<u8, kSize1, kSize2, AddressSpaceViewTy>;
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif
|
||||
567
lib/libtsan/sanitizer_common/sanitizer_fuchsia.cpp
Normal file
567
lib/libtsan/sanitizer_common/sanitizer_fuchsia.cpp
Normal file
@@ -0,0 +1,567 @@
|
||||
//===-- sanitizer_fuchsia.cpp ---------------------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between AddressSanitizer and other sanitizer
|
||||
// run-time libraries and implements Fuchsia-specific functions from
|
||||
// sanitizer_common.h.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_fuchsia.h"
|
||||
#if SANITIZER_FUCHSIA
|
||||
|
||||
# include <pthread.h>
|
||||
# include <stdlib.h>
|
||||
# include <unistd.h>
|
||||
# include <zircon/errors.h>
|
||||
# include <zircon/process.h>
|
||||
# include <zircon/syscalls.h>
|
||||
# include <zircon/utc.h>
|
||||
|
||||
# include "sanitizer_common.h"
|
||||
# include "sanitizer_interface_internal.h"
|
||||
# include "sanitizer_libc.h"
|
||||
# include "sanitizer_mutex.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
|
||||
|
||||
uptr internal_sched_yield() {
|
||||
zx_status_t status = _zx_thread_legacy_yield(0u);
|
||||
CHECK_EQ(status, ZX_OK);
|
||||
return 0; // Why doesn't this return void?
|
||||
}
|
||||
|
||||
void internal_usleep(u64 useconds) {
|
||||
zx_status_t status = _zx_nanosleep(_zx_deadline_after(ZX_USEC(useconds)));
|
||||
CHECK_EQ(status, ZX_OK);
|
||||
}
|
||||
|
||||
u64 NanoTime() {
|
||||
zx_handle_t utc_clock = _zx_utc_reference_get();
|
||||
CHECK_NE(utc_clock, ZX_HANDLE_INVALID);
|
||||
zx_time_t time;
|
||||
zx_status_t status = _zx_clock_read(utc_clock, &time);
|
||||
CHECK_EQ(status, ZX_OK);
|
||||
return time;
|
||||
}
|
||||
|
||||
u64 MonotonicNanoTime() { return _zx_clock_get_monotonic(); }
|
||||
|
||||
uptr internal_getpid() {
|
||||
zx_info_handle_basic_t info;
|
||||
zx_status_t status =
|
||||
_zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info,
|
||||
sizeof(info), NULL, NULL);
|
||||
CHECK_EQ(status, ZX_OK);
|
||||
uptr pid = static_cast<uptr>(info.koid);
|
||||
CHECK_EQ(pid, info.koid);
|
||||
return pid;
|
||||
}
|
||||
|
||||
int internal_dlinfo(void *handle, int request, void *p) { UNIMPLEMENTED(); }
|
||||
|
||||
uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
|
||||
|
||||
tid_t GetTid() { return GetThreadSelf(); }
|
||||
|
||||
void Abort() { abort(); }
|
||||
|
||||
int Atexit(void (*function)(void)) { return atexit(function); }
|
||||
|
||||
void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
|
||||
pthread_attr_t attr;
|
||||
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
|
||||
void *base;
|
||||
size_t size;
|
||||
CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
|
||||
CHECK_EQ(pthread_attr_destroy(&attr), 0);
|
||||
|
||||
*stack_bottom = reinterpret_cast<uptr>(base);
|
||||
*stack_top = *stack_bottom + size;
|
||||
}
|
||||
|
||||
void InitializePlatformEarly() {}
|
||||
void CheckASLR() {}
|
||||
void CheckMPROTECT() {}
|
||||
void PlatformPrepareForSandboxing(void *args) {}
|
||||
void DisableCoreDumperIfNecessary() {}
|
||||
void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
|
||||
void SetAlternateSignalStack() {}
|
||||
void UnsetAlternateSignalStack() {}
|
||||
|
||||
bool SignalContext::IsStackOverflow() const { return false; }
|
||||
void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
|
||||
const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
|
||||
|
||||
void FutexWait(atomic_uint32_t *p, u32 cmp) {
|
||||
zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(p), cmp,
|
||||
ZX_HANDLE_INVALID, ZX_TIME_INFINITE);
|
||||
if (status != ZX_ERR_BAD_STATE) // Normal race.
|
||||
CHECK_EQ(status, ZX_OK);
|
||||
}
|
||||
|
||||
void FutexWake(atomic_uint32_t *p, u32 count) {
|
||||
zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(p), count);
|
||||
CHECK_EQ(status, ZX_OK);
|
||||
}
|
||||
|
||||
uptr GetPageSize() { return _zx_system_get_page_size(); }
|
||||
|
||||
uptr GetMmapGranularity() { return _zx_system_get_page_size(); }
|
||||
|
||||
sanitizer_shadow_bounds_t ShadowBounds;
|
||||
|
||||
void InitShadowBounds() { ShadowBounds = __sanitizer_shadow_bounds(); }
|
||||
|
||||
uptr GetMaxUserVirtualAddress() {
|
||||
InitShadowBounds();
|
||||
return ShadowBounds.memory_limit - 1;
|
||||
}
|
||||
|
||||
uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
|
||||
|
||||
bool ErrorIsOOM(error_t err) { return err == ZX_ERR_NO_MEMORY; }
|
||||
|
||||
// For any sanitizer internal that needs to map something which can be unmapped
|
||||
// later, first attempt to map to a pre-allocated VMAR. This helps reduce
|
||||
// fragmentation from many small anonymous mmap calls. A good value for this
|
||||
// VMAR size would be the total size of your typical sanitizer internal objects
|
||||
// allocated in an "average" process lifetime. Examples of this include:
|
||||
// FakeStack, LowLevelAllocator mappings, TwoLevelMap, InternalMmapVector,
|
||||
// StackStore, CreateAsanThread, etc.
|
||||
//
|
||||
// This is roughly equal to the total sum of sanitizer internal mappings for a
|
||||
// large test case.
|
||||
constexpr size_t kSanitizerHeapVmarSize = 13ULL << 20;
|
||||
static zx_handle_t gSanitizerHeapVmar = ZX_HANDLE_INVALID;
|
||||
|
||||
static zx_status_t GetSanitizerHeapVmar(zx_handle_t *vmar) {
|
||||
zx_status_t status = ZX_OK;
|
||||
if (gSanitizerHeapVmar == ZX_HANDLE_INVALID) {
|
||||
CHECK_EQ(kSanitizerHeapVmarSize % GetPageSizeCached(), 0);
|
||||
uintptr_t base;
|
||||
status = _zx_vmar_allocate(
|
||||
_zx_vmar_root_self(),
|
||||
ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
|
||||
kSanitizerHeapVmarSize, &gSanitizerHeapVmar, &base);
|
||||
}
|
||||
*vmar = gSanitizerHeapVmar;
|
||||
if (status == ZX_OK)
|
||||
CHECK_NE(gSanitizerHeapVmar, ZX_HANDLE_INVALID);
|
||||
return status;
|
||||
}
|
||||
|
||||
static zx_status_t TryVmoMapSanitizerVmar(zx_vm_option_t options,
|
||||
size_t vmar_offset, zx_handle_t vmo,
|
||||
size_t size, uintptr_t *addr,
|
||||
zx_handle_t *vmar_used = nullptr) {
|
||||
zx_handle_t vmar;
|
||||
zx_status_t status = GetSanitizerHeapVmar(&vmar);
|
||||
if (status != ZX_OK)
|
||||
return status;
|
||||
|
||||
status = _zx_vmar_map(gSanitizerHeapVmar, options, vmar_offset, vmo,
|
||||
/*vmo_offset=*/0, size, addr);
|
||||
if (vmar_used)
|
||||
*vmar_used = gSanitizerHeapVmar;
|
||||
if (status == ZX_ERR_NO_RESOURCES || status == ZX_ERR_INVALID_ARGS) {
|
||||
// This means there's no space in the heap VMAR, so fallback to the root
|
||||
// VMAR.
|
||||
status = _zx_vmar_map(_zx_vmar_root_self(), options, vmar_offset, vmo,
|
||||
/*vmo_offset=*/0, size, addr);
|
||||
if (vmar_used)
|
||||
*vmar_used = _zx_vmar_root_self();
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
|
||||
bool raw_report, bool die_for_nomem) {
|
||||
size = RoundUpTo(size, GetPageSize());
|
||||
|
||||
zx_handle_t vmo;
|
||||
zx_status_t status = _zx_vmo_create(size, 0, &vmo);
|
||||
if (status != ZX_OK) {
|
||||
if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
|
||||
ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status,
|
||||
raw_report);
|
||||
return nullptr;
|
||||
}
|
||||
_zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
|
||||
internal_strlen(mem_type));
|
||||
|
||||
uintptr_t addr;
|
||||
status = TryVmoMapSanitizerVmar(ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
|
||||
/*vmar_offset=*/0, vmo, size, &addr);
|
||||
_zx_handle_close(vmo);
|
||||
|
||||
if (status != ZX_OK) {
|
||||
if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
|
||||
ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status,
|
||||
raw_report);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
IncreaseTotalMmap(size);
|
||||
|
||||
return reinterpret_cast<void *>(addr);
|
||||
}
|
||||
|
||||
void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
|
||||
return DoAnonymousMmapOrDie(size, mem_type, raw_report, true);
|
||||
}
|
||||
|
||||
void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
|
||||
return MmapOrDie(size, mem_type);
|
||||
}
|
||||
|
||||
void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
|
||||
return DoAnonymousMmapOrDie(size, mem_type, false, false);
|
||||
}
|
||||
|
||||
uptr ReservedAddressRange::Init(uptr init_size, const char *name,
|
||||
uptr fixed_addr) {
|
||||
init_size = RoundUpTo(init_size, GetPageSize());
|
||||
DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID);
|
||||
uintptr_t base;
|
||||
zx_handle_t vmar;
|
||||
zx_status_t status = _zx_vmar_allocate(
|
||||
_zx_vmar_root_self(),
|
||||
ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
|
||||
init_size, &vmar, &base);
|
||||
if (status != ZX_OK)
|
||||
ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status);
|
||||
base_ = reinterpret_cast<void *>(base);
|
||||
size_ = init_size;
|
||||
name_ = name;
|
||||
os_handle_ = vmar;
|
||||
|
||||
return reinterpret_cast<uptr>(base_);
|
||||
}
|
||||
|
||||
static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
|
||||
void *base, const char *name, bool die_for_nomem) {
|
||||
uptr offset = fixed_addr - reinterpret_cast<uptr>(base);
|
||||
map_size = RoundUpTo(map_size, GetPageSize());
|
||||
zx_handle_t vmo;
|
||||
zx_status_t status = _zx_vmo_create(map_size, 0, &vmo);
|
||||
if (status != ZX_OK) {
|
||||
if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
|
||||
ReportMmapFailureAndDie(map_size, name, "zx_vmo_create", status);
|
||||
return 0;
|
||||
}
|
||||
_zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name));
|
||||
DCHECK_GE(base + size_, map_size + offset);
|
||||
uintptr_t addr;
|
||||
|
||||
status =
|
||||
_zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
|
||||
offset, vmo, 0, map_size, &addr);
|
||||
_zx_handle_close(vmo);
|
||||
if (status != ZX_OK) {
|
||||
if (status != ZX_ERR_NO_MEMORY || die_for_nomem) {
|
||||
ReportMmapFailureAndDie(map_size, name, "zx_vmar_map", status);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
IncreaseTotalMmap(map_size);
|
||||
return addr;
|
||||
}
|
||||
|
||||
uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size,
|
||||
const char *name) {
|
||||
return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
|
||||
name ? name : name_, false);
|
||||
}
|
||||
|
||||
uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size,
|
||||
const char *name) {
|
||||
return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
|
||||
name ? name : name_, true);
|
||||
}
|
||||
|
||||
void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar,
|
||||
bool raw_report) {
|
||||
if (!addr || !size)
|
||||
return;
|
||||
size = RoundUpTo(size, GetPageSize());
|
||||
|
||||
zx_status_t status =
|
||||
_zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
|
||||
if (status == ZX_ERR_INVALID_ARGS && target_vmar == gSanitizerHeapVmar) {
|
||||
// If there wasn't any space in the heap vmar, the fallback was the root
|
||||
// vmar.
|
||||
status = _zx_vmar_unmap(_zx_vmar_root_self(),
|
||||
reinterpret_cast<uintptr_t>(addr), size);
|
||||
}
|
||||
if (status != ZX_OK)
|
||||
ReportMunmapFailureAndDie(addr, size, status, raw_report);
|
||||
|
||||
DecreaseTotalMmap(size);
|
||||
}
|
||||
|
||||
void ReservedAddressRange::Unmap(uptr addr, uptr size) {
|
||||
CHECK_LE(size, size_);
|
||||
const zx_handle_t vmar = static_cast<zx_handle_t>(os_handle_);
|
||||
if (addr == reinterpret_cast<uptr>(base_)) {
|
||||
if (size == size_) {
|
||||
// Destroying the vmar effectively unmaps the whole mapping.
|
||||
_zx_vmar_destroy(vmar);
|
||||
_zx_handle_close(vmar);
|
||||
os_handle_ = static_cast<uptr>(ZX_HANDLE_INVALID);
|
||||
DecreaseTotalMmap(size);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_);
|
||||
}
|
||||
// Partial unmapping does not affect the fact that the initial range is still
|
||||
// reserved, and the resulting unmapped memory can't be reused.
|
||||
UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar,
|
||||
/*raw_report=*/false);
|
||||
}
|
||||
|
||||
// This should never be called.
|
||||
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
bool MprotectNoAccess(uptr addr, uptr size) {
|
||||
return _zx_vmar_protect(_zx_vmar_root_self(), 0, addr, size) == ZX_OK;
|
||||
}
|
||||
|
||||
bool MprotectReadOnly(uptr addr, uptr size) {
|
||||
return _zx_vmar_protect(_zx_vmar_root_self(), ZX_VM_PERM_READ, addr, size) ==
|
||||
ZX_OK;
|
||||
}
|
||||
|
||||
bool MprotectReadWrite(uptr addr, uptr size) {
|
||||
return _zx_vmar_protect(_zx_vmar_root_self(),
|
||||
ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, addr,
|
||||
size) == ZX_OK;
|
||||
}
|
||||
|
||||
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
|
||||
const char *mem_type) {
|
||||
CHECK_GE(size, GetPageSize());
|
||||
CHECK(IsPowerOfTwo(size));
|
||||
CHECK(IsPowerOfTwo(alignment));
|
||||
|
||||
zx_handle_t vmo;
|
||||
zx_status_t status = _zx_vmo_create(size, 0, &vmo);
|
||||
if (status != ZX_OK) {
|
||||
if (status != ZX_ERR_NO_MEMORY)
|
||||
ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false);
|
||||
return nullptr;
|
||||
}
|
||||
_zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
|
||||
internal_strlen(mem_type));
|
||||
|
||||
// Map a larger size to get a chunk of address space big enough that
|
||||
// it surely contains an aligned region of the requested size. Then
|
||||
// overwrite the aligned middle portion with a mapping from the
|
||||
// beginning of the VMO, and unmap the excess before and after.
|
||||
size_t map_size = size + alignment;
|
||||
uintptr_t addr;
|
||||
zx_handle_t vmar_used;
|
||||
status = TryVmoMapSanitizerVmar(ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
|
||||
/*vmar_offset=*/0, vmo, map_size, &addr,
|
||||
&vmar_used);
|
||||
if (status == ZX_OK) {
|
||||
uintptr_t map_addr = addr;
|
||||
uintptr_t map_end = map_addr + map_size;
|
||||
addr = RoundUpTo(map_addr, alignment);
|
||||
uintptr_t end = addr + size;
|
||||
if (addr != map_addr) {
|
||||
zx_info_vmar_t info;
|
||||
status = _zx_object_get_info(vmar_used, ZX_INFO_VMAR, &info, sizeof(info),
|
||||
NULL, NULL);
|
||||
if (status == ZX_OK) {
|
||||
uintptr_t new_addr;
|
||||
status = _zx_vmar_map(
|
||||
vmar_used,
|
||||
ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
|
||||
addr - info.base, vmo, 0, size, &new_addr);
|
||||
if (status == ZX_OK)
|
||||
CHECK_EQ(new_addr, addr);
|
||||
}
|
||||
}
|
||||
if (status == ZX_OK && addr != map_addr)
|
||||
status = _zx_vmar_unmap(vmar_used, map_addr, addr - map_addr);
|
||||
if (status == ZX_OK && end != map_end)
|
||||
status = _zx_vmar_unmap(vmar_used, end, map_end - end);
|
||||
}
|
||||
_zx_handle_close(vmo);
|
||||
|
||||
if (status != ZX_OK) {
|
||||
if (status != ZX_ERR_NO_MEMORY)
|
||||
ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
IncreaseTotalMmap(size);
|
||||
|
||||
return reinterpret_cast<void *>(addr);
|
||||
}
|
||||
|
||||
void UnmapOrDie(void *addr, uptr size, bool raw_report) {
|
||||
UnmapOrDieVmar(addr, size, gSanitizerHeapVmar, raw_report);
|
||||
}
|
||||
|
||||
void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
|
||||
uptr beg_aligned = RoundUpTo(beg, GetPageSize());
|
||||
uptr end_aligned = RoundDownTo(end, GetPageSize());
|
||||
if (beg_aligned < end_aligned) {
|
||||
zx_handle_t root_vmar = _zx_vmar_root_self();
|
||||
CHECK_NE(root_vmar, ZX_HANDLE_INVALID);
|
||||
zx_status_t status =
|
||||
_zx_vmar_op_range(root_vmar, ZX_VMAR_OP_DECOMMIT, beg_aligned,
|
||||
end_aligned - beg_aligned, nullptr, 0);
|
||||
CHECK_EQ(status, ZX_OK);
|
||||
}
|
||||
}
|
||||
|
||||
void DumpProcessMap() {
|
||||
// TODO(mcgrathr): write it
|
||||
return;
|
||||
}
|
||||
|
||||
bool IsAccessibleMemoryRange(uptr beg, uptr size) {
|
||||
// TODO(mcgrathr): Figure out a better way.
|
||||
zx_handle_t vmo;
|
||||
zx_status_t status = _zx_vmo_create(size, 0, &vmo);
|
||||
if (status == ZX_OK) {
|
||||
status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size);
|
||||
_zx_handle_close(vmo);
|
||||
}
|
||||
return status == ZX_OK;
|
||||
}
|
||||
|
||||
bool TryMemCpy(void *dest, const void *src, uptr n) {
|
||||
// TODO: implement.
|
||||
return false;
|
||||
}
|
||||
|
||||
// FIXME implement on this platform.
|
||||
void GetMemoryProfile(fill_profile_f cb, uptr *stats) {}
|
||||
|
||||
bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
|
||||
uptr *read_len, uptr max_len, error_t *errno_p) {
|
||||
*errno_p = ZX_ERR_NOT_SUPPORTED;
|
||||
return false;
|
||||
}
|
||||
|
||||
void RawWrite(const char *buffer) {
|
||||
constexpr size_t size = 128;
|
||||
static _Thread_local char line[size];
|
||||
static _Thread_local size_t lastLineEnd = 0;
|
||||
static _Thread_local size_t cur = 0;
|
||||
|
||||
while (*buffer) {
|
||||
if (cur >= size) {
|
||||
if (lastLineEnd == 0)
|
||||
lastLineEnd = size;
|
||||
__sanitizer_log_write(line, lastLineEnd);
|
||||
internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
|
||||
cur = cur - lastLineEnd;
|
||||
lastLineEnd = 0;
|
||||
}
|
||||
if (*buffer == '\n')
|
||||
lastLineEnd = cur + 1;
|
||||
line[cur++] = *buffer++;
|
||||
}
|
||||
// Flush all complete lines before returning.
|
||||
if (lastLineEnd != 0) {
|
||||
__sanitizer_log_write(line, lastLineEnd);
|
||||
internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
|
||||
cur = cur - lastLineEnd;
|
||||
lastLineEnd = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void CatastrophicErrorWrite(const char *buffer, uptr length) {
|
||||
__sanitizer_log_write(buffer, length);
|
||||
}
|
||||
|
||||
char **StoredArgv;
|
||||
char **StoredEnviron;
|
||||
|
||||
char **GetArgv() { return StoredArgv; }
|
||||
char **GetEnviron() { return StoredEnviron; }
|
||||
|
||||
const char *GetEnv(const char *name) {
|
||||
if (StoredEnviron) {
|
||||
uptr NameLen = internal_strlen(name);
|
||||
for (char **Env = StoredEnviron; *Env != 0; Env++) {
|
||||
if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
|
||||
return (*Env) + NameLen + 1;
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
|
||||
const char *argv0 = "<UNKNOWN>";
|
||||
if (StoredArgv && StoredArgv[0]) {
|
||||
argv0 = StoredArgv[0];
|
||||
}
|
||||
internal_strncpy(buf, argv0, buf_len);
|
||||
return internal_strlen(buf);
|
||||
}
|
||||
|
||||
uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
|
||||
return ReadBinaryName(buf, buf_len);
|
||||
}
|
||||
|
||||
uptr MainThreadStackBase, MainThreadStackSize;
|
||||
|
||||
bool GetRandom(void *buffer, uptr length, bool blocking) {
|
||||
_zx_cprng_draw(buffer, length);
|
||||
return true;
|
||||
}
|
||||
|
||||
u32 GetNumberOfCPUs() { return zx_system_get_num_cpus(); }
|
||||
|
||||
uptr GetRSS() { UNIMPLEMENTED(); }
|
||||
|
||||
void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; }
|
||||
void internal_join_thread(void *th) {}
|
||||
|
||||
void InitializePlatformCommonFlags(CommonFlags *cf) {}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
using namespace __sanitizer;
|
||||
|
||||
extern "C" {
|
||||
void __sanitizer_startup_hook(int argc, char **argv, char **envp,
|
||||
void *stack_base, size_t stack_size) {
|
||||
__sanitizer::StoredArgv = argv;
|
||||
__sanitizer::StoredEnviron = envp;
|
||||
__sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base);
|
||||
__sanitizer::MainThreadStackSize = stack_size;
|
||||
}
|
||||
|
||||
void __sanitizer_set_report_path(const char *path) {
|
||||
// Handle the initialization code in each sanitizer, but no other calls.
|
||||
// This setting is never consulted on Fuchsia.
|
||||
DCHECK_EQ(path, common_flags()->log_path);
|
||||
}
|
||||
|
||||
void __sanitizer_set_report_fd(void *fd) {
|
||||
UNREACHABLE("not available on Fuchsia");
|
||||
}
|
||||
|
||||
const char *__sanitizer_get_report_path() {
|
||||
UNREACHABLE("not available on Fuchsia");
|
||||
}
|
||||
} // extern "C"
|
||||
|
||||
#endif // SANITIZER_FUCHSIA
|
||||
38
lib/libtsan/sanitizer_common/sanitizer_fuchsia.h
Normal file
38
lib/libtsan/sanitizer_common/sanitizer_fuchsia.h
Normal file
@@ -0,0 +1,38 @@
|
||||
//===-- sanitizer_fuchsia.h ------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===---------------------------------------------------------------------===//
|
||||
//
|
||||
// Fuchsia-specific sanitizer support.
|
||||
//
|
||||
//===---------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_FUCHSIA_H
|
||||
#define SANITIZER_FUCHSIA_H
|
||||
|
||||
#include "sanitizer_platform.h"
|
||||
#if SANITIZER_FUCHSIA
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
|
||||
#include <zircon/sanitizer.h>
|
||||
#include <zircon/syscalls/object.h>
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
extern uptr MainThreadStackBase, MainThreadStackSize;
|
||||
extern sanitizer_shadow_bounds_t ShadowBounds;
|
||||
|
||||
struct MemoryMappingLayoutData {
|
||||
InternalMmapVector<zx_info_maps_t> data;
|
||||
size_t current; // Current index into the vector.
|
||||
};
|
||||
|
||||
void InitShadowBounds();
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_FUCHSIA
|
||||
#endif // SANITIZER_FUCHSIA_H
|
||||
59
lib/libtsan/sanitizer_common/sanitizer_getauxval.h
Normal file
59
lib/libtsan/sanitizer_common/sanitizer_getauxval.h
Normal file
@@ -0,0 +1,59 @@
|
||||
//===-- sanitizer_getauxval.h -----------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Common getauxval() guards and definitions.
|
||||
// getauxval() is not defined until glibc version 2.16, or until API level 21
|
||||
// for Android.
|
||||
// Implement the getauxval() compat function for NetBSD.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_GETAUXVAL_H
|
||||
#define SANITIZER_GETAUXVAL_H
|
||||
|
||||
#include "sanitizer_platform.h"
|
||||
#include "sanitizer_glibc_version.h"
|
||||
|
||||
#if SANITIZER_LINUX || SANITIZER_FUCHSIA
|
||||
|
||||
# if (__GLIBC_PREREQ(2, 16) || SANITIZER_ANDROID || SANITIZER_FUCHSIA) && \
|
||||
!SANITIZER_GO
|
||||
# define SANITIZER_USE_GETAUXVAL 1
|
||||
# else
|
||||
# define SANITIZER_USE_GETAUXVAL 0
|
||||
# endif
|
||||
|
||||
# if SANITIZER_USE_GETAUXVAL
|
||||
# include <sys/auxv.h>
|
||||
# else
|
||||
// The weak getauxval definition allows to check for the function at runtime.
|
||||
// This is useful for Android, when compiled at a lower API level yet running
|
||||
// on a more recent platform that offers the function.
|
||||
extern "C" SANITIZER_WEAK_ATTRIBUTE unsigned long getauxval(unsigned long type);
|
||||
# endif
|
||||
|
||||
#elif SANITIZER_NETBSD
|
||||
|
||||
#define SANITIZER_USE_GETAUXVAL 1
|
||||
|
||||
#include <dlfcn.h>
|
||||
#include <elf.h>
|
||||
|
||||
static inline decltype(AuxInfo::a_v) getauxval(decltype(AuxInfo::a_type) type) {
|
||||
for (const AuxInfo *aux = (const AuxInfo *)_dlauxinfo();
|
||||
aux->a_type != AT_NULL; ++aux) {
|
||||
if (type == aux->a_type)
|
||||
return aux->a_v;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif // SANITIZER_GETAUXVAL_H
|
||||
26
lib/libtsan/sanitizer_common/sanitizer_glibc_version.h
Normal file
26
lib/libtsan/sanitizer_common/sanitizer_glibc_version.h
Normal file
@@ -0,0 +1,26 @@
|
||||
//===-- sanitizer_glibc_version.h -----------------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is a part of Sanitizer common code.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_GLIBC_VERSION_H
|
||||
#define SANITIZER_GLIBC_VERSION_H
|
||||
|
||||
#include "sanitizer_platform.h"
|
||||
|
||||
#if SANITIZER_LINUX || SANITIZER_FUCHSIA
|
||||
#include <features.h>
|
||||
#endif
|
||||
|
||||
#ifndef __GLIBC_PREREQ
|
||||
#define __GLIBC_PREREQ(x, y) 0
|
||||
#endif
|
||||
|
||||
#endif
|
||||
67
lib/libtsan/sanitizer_common/sanitizer_hash.h
Normal file
67
lib/libtsan/sanitizer_common/sanitizer_hash.h
Normal file
@@ -0,0 +1,67 @@
|
||||
//===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file implements a simple hash function.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_HASH_H
|
||||
#define SANITIZER_HASH_H
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
class MurMur2HashBuilder {
|
||||
static const u32 m = 0x5bd1e995;
|
||||
static const u32 seed = 0x9747b28c;
|
||||
static const u32 r = 24;
|
||||
u32 h;
|
||||
|
||||
public:
|
||||
explicit MurMur2HashBuilder(u32 init = 0) { h = seed ^ init; }
|
||||
void add(u32 k) {
|
||||
k *= m;
|
||||
k ^= k >> r;
|
||||
k *= m;
|
||||
h *= m;
|
||||
h ^= k;
|
||||
}
|
||||
u32 get() {
|
||||
u32 x = h;
|
||||
x ^= x >> 13;
|
||||
x *= m;
|
||||
x ^= x >> 15;
|
||||
return x;
|
||||
}
|
||||
};
|
||||
|
||||
class MurMur2Hash64Builder {
|
||||
static const u64 m = 0xc6a4a7935bd1e995ull;
|
||||
static const u64 seed = 0x9747b28c9747b28cull;
|
||||
static const u64 r = 47;
|
||||
u64 h;
|
||||
|
||||
public:
|
||||
explicit MurMur2Hash64Builder(u64 init = 0) { h = seed ^ (init * m); }
|
||||
void add(u64 k) {
|
||||
k *= m;
|
||||
k ^= k >> r;
|
||||
k *= m;
|
||||
h ^= k;
|
||||
h *= m;
|
||||
}
|
||||
u64 get() {
|
||||
u64 x = h;
|
||||
x ^= x >> r;
|
||||
x *= m;
|
||||
x ^= x >> r;
|
||||
return x;
|
||||
}
|
||||
};
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_HASH_H
|
||||
1533
lib/libtsan/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc
Normal file
1533
lib/libtsan/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc
Normal file
File diff suppressed because it is too large
Load Diff
163
lib/libtsan/sanitizer_common/sanitizer_interface_internal.h
Normal file
163
lib/libtsan/sanitizer_common/sanitizer_interface_internal.h
Normal file
@@ -0,0 +1,163 @@
|
||||
//===-- sanitizer_interface_internal.h --------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between run-time libraries of sanitizers.
|
||||
//
|
||||
// This header declares the sanitizer runtime interface functions.
|
||||
// The runtime library has to define these functions so the instrumented program
|
||||
// could call them.
|
||||
//
|
||||
// See also include/sanitizer/common_interface_defs.h
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_INTERFACE_INTERNAL_H
|
||||
#define SANITIZER_INTERFACE_INTERNAL_H
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
|
||||
extern "C" {
|
||||
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
|
||||
// The special values are "stdout" and "stderr".
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_set_report_path(const char *path);
|
||||
// Tell the tools to write their reports to the provided file descriptor
|
||||
// (casted to void *).
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_set_report_fd(void *fd);
|
||||
// Get the current full report file path, if a path was specified by
|
||||
// an earlier call to __sanitizer_set_report_path. Returns null otherwise.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
const char *__sanitizer_get_report_path();
|
||||
|
||||
typedef struct {
|
||||
int coverage_sandboxed;
|
||||
__sanitizer::sptr coverage_fd;
|
||||
unsigned int coverage_max_block_size;
|
||||
} __sanitizer_sandbox_arguments;
|
||||
|
||||
// Notify the tools that the sandbox is going to be turned on.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
|
||||
|
||||
// This function is called by the tool when it has just finished reporting
|
||||
// an error. 'error_summary' is a one-line string that summarizes
|
||||
// the error message. This function can be overridden by the client.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_report_error_summary(const char *error_summary);
|
||||
|
||||
// Returns size of dynamically allocated block. This function can be overridden
|
||||
// by the client.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE __sanitizer::uptr
|
||||
__sanitizer_get_dtls_size(const void *tls_begin);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(
|
||||
const __sanitizer::uptr *pcs, const __sanitizer::uptr len);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage();
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
|
||||
|
||||
// Returns 1 on the first call, then returns 0 thereafter. Called by the tool
|
||||
// to ensure only one report is printed when multiple errors occur
|
||||
// simultaneously.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_acquire_crash_state();
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_annotate_contiguous_container(const void *beg, const void *end,
|
||||
const void *old_mid,
|
||||
const void *new_mid);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_annotate_double_ended_contiguous_container(
|
||||
const void *storage_beg, const void *storage_end,
|
||||
const void *old_container_beg, const void *old_container_end,
|
||||
const void *new_container_beg, const void *new_container_end);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
void __sanitizer_copy_contiguous_container_annotations(const void *src_begin,
|
||||
const void *src_end,
|
||||
const void *dst_begin,
|
||||
const void *dst_end);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
|
||||
const void *end);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __sanitizer_verify_double_ended_contiguous_container(
|
||||
const void *storage_beg, const void *container_beg,
|
||||
const void *container_end, const void *storage_end);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
const void *__sanitizer_contiguous_container_find_bad_address(const void *beg,
|
||||
const void *mid,
|
||||
const void *end);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
const void *__sanitizer_double_ended_contiguous_container_find_bad_address(
|
||||
const void *storage_beg, const void *container_beg,
|
||||
const void *container_end, const void *storage_end);
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE
|
||||
int __sanitizer_get_module_and_offset_for_pc(void *pc, char *module_path,
|
||||
__sanitizer::uptr module_path_len,
|
||||
void **pc_offset);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_cmp();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_cmp1();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_cmp2();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_cmp4();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_cmp8();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_const_cmp1();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_const_cmp2();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_const_cmp4();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_const_cmp8();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_switch();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_div4();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_div8();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_gep();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_pc_indir();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_load1();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_load2();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_load4();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_load8();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_load16();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_store1();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_store2();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_store4();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_store8();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_store16();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_pc_guard(__sanitizer::u32 *);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_trace_pc_guard_init(__sanitizer::u32 *, __sanitizer::u32 *);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_8bit_counters_init(char *, char *);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_bool_flag_init();
|
||||
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
|
||||
__sanitizer_cov_pcs_init(const __sanitizer::uptr *, const __sanitizer::uptr *);
|
||||
} // extern "C"
|
||||
|
||||
#endif // SANITIZER_INTERFACE_INTERNAL_H
|
||||
500
lib/libtsan/sanitizer_common/sanitizer_internal_defs.h
Normal file
500
lib/libtsan/sanitizer_common/sanitizer_internal_defs.h
Normal file
@@ -0,0 +1,500 @@
|
||||
//===-- sanitizer_internal_defs.h -------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between AddressSanitizer and ThreadSanitizer.
|
||||
// It contains macro used in run-time libraries code.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_DEFS_H
|
||||
#define SANITIZER_DEFS_H
|
||||
|
||||
#include "sanitizer_platform.h"
|
||||
#include "sanitizer_redefine_builtins.h"
|
||||
|
||||
// GCC does not understand __has_feature.
|
||||
#if !defined(__has_feature)
|
||||
#define __has_feature(x) 0
|
||||
#endif
|
||||
|
||||
#ifndef SANITIZER_DEBUG
|
||||
# define SANITIZER_DEBUG 0
|
||||
#endif
|
||||
|
||||
#define SANITIZER_STRINGIFY_(S) #S
|
||||
#define SANITIZER_STRINGIFY(S) SANITIZER_STRINGIFY_(S)
|
||||
|
||||
// Only use SANITIZER_*ATTRIBUTE* before the function return type!
|
||||
#if SANITIZER_WINDOWS
|
||||
#if SANITIZER_IMPORT_INTERFACE
|
||||
# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllimport)
|
||||
#else
|
||||
# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)
|
||||
#endif
|
||||
# define SANITIZER_WEAK_ATTRIBUTE
|
||||
# define SANITIZER_WEAK_IMPORT
|
||||
#elif SANITIZER_GO
|
||||
# define SANITIZER_INTERFACE_ATTRIBUTE
|
||||
# define SANITIZER_WEAK_ATTRIBUTE
|
||||
# define SANITIZER_WEAK_IMPORT
|
||||
#else
|
||||
# define SANITIZER_INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
|
||||
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
|
||||
# if SANITIZER_APPLE
|
||||
# define SANITIZER_WEAK_IMPORT extern "C" __attribute((weak_import))
|
||||
# else
|
||||
# define SANITIZER_WEAK_IMPORT extern "C" SANITIZER_WEAK_ATTRIBUTE
|
||||
# endif // SANITIZER_APPLE
|
||||
#endif // SANITIZER_WINDOWS
|
||||
|
||||
//--------------------------- WEAK FUNCTIONS ---------------------------------//
|
||||
// When working with weak functions, to simplify the code and make it more
|
||||
// portable, when possible define a default implementation using this macro:
|
||||
//
|
||||
// SANITIZER_INTERFACE_WEAK_DEF(<return_type>, <name>, <parameter list>)
|
||||
//
|
||||
// For example:
|
||||
// SANITIZER_INTERFACE_WEAK_DEF(bool, compare, int a, int b) { return a > b; }
|
||||
//
|
||||
#if SANITIZER_WINDOWS
|
||||
#include "sanitizer_win_defs.h"
|
||||
# define SANITIZER_INTERFACE_WEAK_DEF(ReturnType, Name, ...) \
|
||||
WIN_WEAK_EXPORT_DEF(ReturnType, Name, __VA_ARGS__)
|
||||
#else
|
||||
# define SANITIZER_INTERFACE_WEAK_DEF(ReturnType, Name, ...) \
|
||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE \
|
||||
ReturnType Name(__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
// SANITIZER_SUPPORTS_WEAK_HOOKS means that we support real weak functions that
|
||||
// will evaluate to a null pointer when not defined.
|
||||
#ifndef SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
#if (SANITIZER_LINUX || SANITIZER_SOLARIS) && !SANITIZER_GO
|
||||
# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
|
||||
// Before Xcode 4.5, the Darwin linker doesn't reliably support undefined
|
||||
// weak symbols. Mac OS X 10.9/Darwin 13 is the first release only supported
|
||||
// by Xcode >= 4.5.
|
||||
#elif SANITIZER_APPLE && \
|
||||
__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1090 && !SANITIZER_GO
|
||||
# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
|
||||
#else
|
||||
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
|
||||
#endif
|
||||
#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
|
||||
// For some weak hooks that will be called very often and we want to avoid the
|
||||
// overhead of executing the default implementation when it is not necessary,
|
||||
// we can use the flag SANITIZER_SUPPORTS_WEAK_HOOKS to only define the default
|
||||
// implementation for platforms that doesn't support weak symbols. For example:
|
||||
//
|
||||
// #if !SANITIZER_SUPPORT_WEAK_HOOKS
|
||||
// SANITIZER_INTERFACE_WEAK_DEF(bool, compare_hook, int a, int b) {
|
||||
// return a > b;
|
||||
// }
|
||||
// #endif
|
||||
//
|
||||
// And then use it as: if (compare_hook) compare_hook(a, b);
|
||||
//----------------------------------------------------------------------------//
|
||||
|
||||
|
||||
// We can use .preinit_array section on Linux to call sanitizer initialization
|
||||
// functions very early in the process startup (unless PIC macro is defined).
|
||||
//
|
||||
// On FreeBSD, .preinit_array functions are called with rtld_bind_lock writer
|
||||
// lock held. It will lead to dead lock if unresolved PLT functions (which helds
|
||||
// rtld_bind_lock reader lock) are called inside .preinit_array functions.
|
||||
//
|
||||
// FIXME: do we have anything like this on Mac?
|
||||
#ifndef SANITIZER_CAN_USE_PREINIT_ARRAY
|
||||
#if (SANITIZER_LINUX || SANITIZER_FUCHSIA || SANITIZER_NETBSD) && !defined(PIC)
|
||||
#define SANITIZER_CAN_USE_PREINIT_ARRAY 1
|
||||
// Before Solaris 11.4, .preinit_array is fully supported only with GNU ld.
|
||||
// FIXME: Check for those conditions.
|
||||
#elif SANITIZER_SOLARIS && !defined(PIC)
|
||||
# define SANITIZER_CAN_USE_PREINIT_ARRAY 1
|
||||
#else
|
||||
# define SANITIZER_CAN_USE_PREINIT_ARRAY 0
|
||||
#endif
|
||||
#endif // SANITIZER_CAN_USE_PREINIT_ARRAY
|
||||
|
||||
// GCC does not understand __has_feature
|
||||
#if !defined(__has_feature)
|
||||
# define __has_feature(x) 0
|
||||
#endif
|
||||
|
||||
// Older GCCs do not understand __has_attribute.
|
||||
#if !defined(__has_attribute)
|
||||
# define __has_attribute(x) 0
|
||||
#endif
|
||||
|
||||
#if !defined(__has_cpp_attribute)
|
||||
# define __has_cpp_attribute(x) 0
|
||||
#endif
|
||||
|
||||
// For portability reasons we do not include stddef.h, stdint.h or any other
|
||||
// system header, but we do need some basic types that are not defined
|
||||
// in a portable way by the language itself.
|
||||
namespace __sanitizer {
|
||||
|
||||
#if defined(__UINTPTR_TYPE__)
|
||||
# if defined(__arm__) && defined(__linux__)
|
||||
// Linux Arm headers redefine __UINTPTR_TYPE__ and disagree with clang/gcc.
|
||||
typedef unsigned int uptr;
|
||||
typedef int sptr;
|
||||
# else
|
||||
typedef __UINTPTR_TYPE__ uptr;
|
||||
typedef __INTPTR_TYPE__ sptr;
|
||||
# endif
|
||||
#elif defined(_WIN64)
|
||||
// 64-bit Windows uses LLP64 data model.
|
||||
typedef unsigned long long uptr;
|
||||
typedef signed long long sptr;
|
||||
#elif defined(_WIN32)
|
||||
typedef unsigned int uptr;
|
||||
typedef signed int sptr;
|
||||
#else
|
||||
# error Unsupported compiler, missing __UINTPTR_TYPE__
|
||||
#endif // defined(__UINTPTR_TYPE__)
|
||||
#if defined(__x86_64__)
|
||||
// Since x32 uses ILP32 data model in 64-bit hardware mode, we must use
|
||||
// 64-bit pointer to unwind stack frame.
|
||||
typedef unsigned long long uhwptr;
|
||||
#else
|
||||
typedef uptr uhwptr;
|
||||
#endif
|
||||
typedef unsigned char u8;
|
||||
typedef unsigned short u16;
|
||||
typedef unsigned int u32;
|
||||
typedef unsigned long long u64;
|
||||
typedef signed char s8;
|
||||
typedef signed short s16;
|
||||
typedef signed int s32;
|
||||
typedef signed long long s64;
|
||||
#if SANITIZER_WINDOWS
|
||||
// On Windows, files are HANDLE, which is a synonim of void*.
|
||||
// Use void* to avoid including <windows.h> everywhere.
|
||||
typedef void* fd_t;
|
||||
typedef unsigned error_t;
|
||||
#else
|
||||
typedef int fd_t;
|
||||
typedef int error_t;
|
||||
#endif
|
||||
#if SANITIZER_SOLARIS && !defined(_LP64)
|
||||
typedef long pid_t;
|
||||
#else
|
||||
typedef int pid_t;
|
||||
#endif
|
||||
|
||||
#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_APPLE || \
|
||||
(SANITIZER_SOLARIS && (defined(_LP64) || _FILE_OFFSET_BITS == 64)) || \
|
||||
(SANITIZER_LINUX && !SANITIZER_GLIBC && !SANITIZER_ANDROID) || \
|
||||
(SANITIZER_LINUX && (defined(__x86_64__) || defined(__hexagon__)))
|
||||
typedef u64 OFF_T;
|
||||
#else
|
||||
typedef uptr OFF_T;
|
||||
#endif
|
||||
typedef u64 OFF64_T;
|
||||
|
||||
#ifdef __SIZE_TYPE__
|
||||
typedef __SIZE_TYPE__ usize;
|
||||
#else
|
||||
typedef uptr usize;
|
||||
#endif
|
||||
|
||||
#if defined(__s390__) && !defined(__s390x__)
|
||||
typedef long ssize;
|
||||
#else
|
||||
typedef sptr ssize;
|
||||
#endif
|
||||
|
||||
typedef u64 tid_t;
|
||||
|
||||
// ----------- ATTENTION -------------
|
||||
// This header should NOT include any other headers to avoid portability issues.
|
||||
|
||||
// Common defs.
|
||||
#define INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
|
||||
#define SANITIZER_WEAK_DEFAULT_IMPL \
|
||||
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
|
||||
#define SANITIZER_WEAK_CXX_DEFAULT_IMPL \
|
||||
extern "C++" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
|
||||
|
||||
// Platform-specific defs.
|
||||
#if defined(_MSC_VER)
|
||||
# define ALWAYS_INLINE __forceinline
|
||||
// FIXME(timurrrr): do we need this on Windows?
|
||||
# define ALIAS(x)
|
||||
# define ALIGNED(x) __declspec(align(x))
|
||||
# define FORMAT(f, a)
|
||||
# define NOINLINE __declspec(noinline)
|
||||
# define NORETURN __declspec(noreturn)
|
||||
# define THREADLOCAL __declspec(thread)
|
||||
# define LIKELY(x) (x)
|
||||
# define UNLIKELY(x) (x)
|
||||
# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */ (void)0
|
||||
# define WARN_UNUSED_RESULT
|
||||
#else // _MSC_VER
|
||||
# define ALWAYS_INLINE inline __attribute__((always_inline))
|
||||
# define ALIAS(x) __attribute__((alias(SANITIZER_STRINGIFY(x))))
|
||||
// Please only use the ALIGNED macro before the type.
|
||||
// Using ALIGNED after the variable declaration is not portable!
|
||||
# define ALIGNED(x) __attribute__((aligned(x)))
|
||||
# define FORMAT(f, a) __attribute__((format(printf, f, a)))
|
||||
# define NOINLINE __attribute__((noinline))
|
||||
# define NORETURN __attribute__((noreturn))
|
||||
# define THREADLOCAL __thread
|
||||
# define LIKELY(x) __builtin_expect(!!(x), 1)
|
||||
# define UNLIKELY(x) __builtin_expect(!!(x), 0)
|
||||
# if defined(__i386__) || defined(__x86_64__)
|
||||
// __builtin_prefetch(x) generates prefetchnt0 on x86
|
||||
# define PREFETCH(x) __asm__("prefetchnta (%0)" : : "r" (x))
|
||||
# else
|
||||
# define PREFETCH(x) __builtin_prefetch(x)
|
||||
# endif
|
||||
# define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
|
||||
#endif // _MSC_VER
|
||||
|
||||
#if !defined(_MSC_VER) || defined(__clang__)
|
||||
# define UNUSED __attribute__((unused))
|
||||
# define USED __attribute__((used))
|
||||
#else
|
||||
# define UNUSED
|
||||
# define USED
|
||||
#endif
|
||||
|
||||
#if !defined(_MSC_VER) || defined(__clang__) || MSC_PREREQ(1900)
|
||||
# define NOEXCEPT noexcept
|
||||
#else
|
||||
# define NOEXCEPT throw()
|
||||
#endif
|
||||
|
||||
#if __has_cpp_attribute(clang::fallthrough)
|
||||
# define FALLTHROUGH [[clang::fallthrough]]
|
||||
#elif __has_cpp_attribute(fallthrough)
|
||||
# define FALLTHROUGH [[fallthrough]]
|
||||
#else
|
||||
# define FALLTHROUGH
|
||||
#endif
|
||||
|
||||
#if __has_attribute(uninitialized)
|
||||
# define UNINITIALIZED __attribute__((uninitialized))
|
||||
#else
|
||||
# define UNINITIALIZED
|
||||
#endif
|
||||
|
||||
// Unaligned versions of basic types.
|
||||
typedef ALIGNED(1) u16 uu16;
|
||||
typedef ALIGNED(1) u32 uu32;
|
||||
typedef ALIGNED(1) u64 uu64;
|
||||
typedef ALIGNED(1) s16 us16;
|
||||
typedef ALIGNED(1) s32 us32;
|
||||
typedef ALIGNED(1) s64 us64;
|
||||
|
||||
#if SANITIZER_WINDOWS
|
||||
} // namespace __sanitizer
|
||||
typedef unsigned long DWORD;
|
||||
namespace __sanitizer {
|
||||
typedef DWORD thread_return_t;
|
||||
# define THREAD_CALLING_CONV __stdcall
|
||||
#else // _WIN32
|
||||
typedef void* thread_return_t;
|
||||
# define THREAD_CALLING_CONV
|
||||
#endif // _WIN32
|
||||
typedef thread_return_t (THREAD_CALLING_CONV *thread_callback_t)(void* arg);
|
||||
|
||||
// NOTE: Functions below must be defined in each run-time.
|
||||
void NORETURN Die();
|
||||
|
||||
void NORETURN CheckFailed(const char *file, int line, const char *cond,
|
||||
u64 v1, u64 v2);
|
||||
|
||||
// Check macro
|
||||
#define RAW_CHECK_MSG(expr, msg, ...) \
|
||||
do { \
|
||||
if (UNLIKELY(!(expr))) { \
|
||||
const char* msgs[] = {msg, __VA_ARGS__}; \
|
||||
for (const char* m : msgs) RawWrite(m); \
|
||||
Die(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define RAW_CHECK(expr) RAW_CHECK_MSG(expr, #expr "\n", )
|
||||
#define RAW_CHECK_VA(expr, ...) RAW_CHECK_MSG(expr, #expr "\n", __VA_ARGS__)
|
||||
|
||||
#define CHECK_IMPL(c1, op, c2) \
|
||||
do { \
|
||||
__sanitizer::u64 v1 = (__sanitizer::u64)(c1); \
|
||||
__sanitizer::u64 v2 = (__sanitizer::u64)(c2); \
|
||||
if (UNLIKELY(!(v1 op v2))) \
|
||||
__sanitizer::CheckFailed(__FILE__, __LINE__, \
|
||||
"(" #c1 ") " #op " (" #c2 ")", v1, v2); \
|
||||
} while (false) \
|
||||
/**/
|
||||
|
||||
#define CHECK(a) CHECK_IMPL((a), !=, 0)
|
||||
#define CHECK_EQ(a, b) CHECK_IMPL((a), ==, (b))
|
||||
#define CHECK_NE(a, b) CHECK_IMPL((a), !=, (b))
|
||||
#define CHECK_LT(a, b) CHECK_IMPL((a), <, (b))
|
||||
#define CHECK_LE(a, b) CHECK_IMPL((a), <=, (b))
|
||||
#define CHECK_GT(a, b) CHECK_IMPL((a), >, (b))
|
||||
#define CHECK_GE(a, b) CHECK_IMPL((a), >=, (b))
|
||||
|
||||
#if SANITIZER_DEBUG
|
||||
#define DCHECK(a) CHECK(a)
|
||||
#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
|
||||
#define DCHECK_NE(a, b) CHECK_NE(a, b)
|
||||
#define DCHECK_LT(a, b) CHECK_LT(a, b)
|
||||
#define DCHECK_LE(a, b) CHECK_LE(a, b)
|
||||
#define DCHECK_GT(a, b) CHECK_GT(a, b)
|
||||
#define DCHECK_GE(a, b) CHECK_GE(a, b)
|
||||
#else
|
||||
#define DCHECK(a)
|
||||
#define DCHECK_EQ(a, b)
|
||||
#define DCHECK_NE(a, b)
|
||||
#define DCHECK_LT(a, b)
|
||||
#define DCHECK_LE(a, b)
|
||||
#define DCHECK_GT(a, b)
|
||||
#define DCHECK_GE(a, b)
|
||||
#endif
|
||||
|
||||
#define UNREACHABLE(msg) do { \
|
||||
CHECK(0 && msg); \
|
||||
Die(); \
|
||||
} while (0)
|
||||
|
||||
#define UNIMPLEMENTED() UNREACHABLE("unimplemented")
|
||||
|
||||
#define COMPILER_CHECK(pred) static_assert(pred, "")
|
||||
|
||||
#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
|
||||
|
||||
// Limits for integral types. We have to redefine it in case we don't
|
||||
// have stdint.h (like in Visual Studio 9).
|
||||
#undef __INT64_C
|
||||
#undef __UINT64_C
|
||||
#if SANITIZER_WORDSIZE == 64
|
||||
# define __INT64_C(c) c ## L
|
||||
# define __UINT64_C(c) c ## UL
|
||||
#else
|
||||
# define __INT64_C(c) c ## LL
|
||||
# define __UINT64_C(c) c ## ULL
|
||||
#endif // SANITIZER_WORDSIZE == 64
|
||||
#undef INT32_MIN
|
||||
#define INT32_MIN (-2147483647-1)
|
||||
#undef INT32_MAX
|
||||
#define INT32_MAX (2147483647)
|
||||
#undef UINT32_MAX
|
||||
#define UINT32_MAX (4294967295U)
|
||||
#undef INT64_MIN
|
||||
#define INT64_MIN (-__INT64_C(9223372036854775807)-1)
|
||||
#undef INT64_MAX
|
||||
#define INT64_MAX (__INT64_C(9223372036854775807))
|
||||
#undef UINT64_MAX
|
||||
#define UINT64_MAX (__UINT64_C(18446744073709551615))
|
||||
#undef UINTPTR_MAX
|
||||
#if SANITIZER_WORDSIZE == 64
|
||||
# define UINTPTR_MAX (18446744073709551615UL)
|
||||
#else
|
||||
# define UINTPTR_MAX (4294967295U)
|
||||
#endif // SANITIZER_WORDSIZE == 64
|
||||
|
||||
enum LinkerInitialized { LINKER_INITIALIZED = 0 };
|
||||
|
||||
#if !defined(_MSC_VER) || defined(__clang__)
|
||||
# define GET_CALLER_PC() \
|
||||
((__sanitizer::uptr)__builtin_extract_return_addr( \
|
||||
__builtin_return_address(0)))
|
||||
# define GET_CURRENT_FRAME() ((__sanitizer::uptr)__builtin_frame_address(0))
|
||||
inline void Trap() {
|
||||
__builtin_trap();
|
||||
}
|
||||
#else
|
||||
extern "C" void* _ReturnAddress(void);
|
||||
extern "C" void* _AddressOfReturnAddress(void);
|
||||
# pragma intrinsic(_ReturnAddress)
|
||||
# pragma intrinsic(_AddressOfReturnAddress)
|
||||
# define GET_CALLER_PC() ((__sanitizer::uptr)_ReturnAddress())
|
||||
// CaptureStackBackTrace doesn't need to know BP on Windows.
|
||||
# define GET_CURRENT_FRAME() \
|
||||
(((__sanitizer::uptr)_AddressOfReturnAddress()) + sizeof(__sanitizer::uptr))
|
||||
|
||||
extern "C" void __ud2(void);
|
||||
# pragma intrinsic(__ud2)
|
||||
inline void Trap() {
|
||||
__ud2();
|
||||
}
|
||||
#endif
|
||||
|
||||
#define HANDLE_EINTR(res, f) \
|
||||
{ \
|
||||
int rverrno; \
|
||||
do { \
|
||||
res = (f); \
|
||||
} while (internal_iserror(res, &rverrno) && rverrno == EINTR); \
|
||||
}
|
||||
|
||||
// Forces the compiler to generate a frame pointer in the function.
|
||||
#define ENABLE_FRAME_POINTER \
|
||||
do { \
|
||||
volatile __sanitizer::uptr enable_fp; \
|
||||
enable_fp = GET_CURRENT_FRAME(); \
|
||||
(void)enable_fp; \
|
||||
} while (0)
|
||||
|
||||
// Internal thread identifier allocated by ThreadRegistry.
|
||||
typedef u32 Tid;
|
||||
constexpr Tid kInvalidTid = -1;
|
||||
constexpr Tid kMainTid = 0;
|
||||
|
||||
// Stack depot stack identifier.
|
||||
typedef u32 StackID;
|
||||
const StackID kInvalidStackID = 0;
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
namespace __asan {
|
||||
using namespace __sanitizer;
|
||||
}
|
||||
namespace __dsan {
|
||||
using namespace __sanitizer;
|
||||
}
|
||||
namespace __dfsan {
|
||||
using namespace __sanitizer;
|
||||
}
|
||||
namespace __lsan {
|
||||
using namespace __sanitizer;
|
||||
}
|
||||
namespace __msan {
|
||||
using namespace __sanitizer;
|
||||
}
|
||||
namespace __nsan {
|
||||
using namespace __sanitizer;
|
||||
}
|
||||
namespace __hwasan {
|
||||
using namespace __sanitizer;
|
||||
}
|
||||
namespace __tsan {
|
||||
using namespace __sanitizer;
|
||||
}
|
||||
namespace __scudo {
|
||||
using namespace __sanitizer;
|
||||
}
|
||||
namespace __ubsan {
|
||||
using namespace __sanitizer;
|
||||
}
|
||||
namespace __xray {
|
||||
using namespace __sanitizer;
|
||||
}
|
||||
namespace __interception {
|
||||
using namespace __sanitizer;
|
||||
}
|
||||
namespace __hwasan {
|
||||
using namespace __sanitizer;
|
||||
}
|
||||
namespace __memprof {
|
||||
using namespace __sanitizer;
|
||||
}
|
||||
|
||||
#endif // SANITIZER_DEFS_H
|
||||
87
lib/libtsan/sanitizer_common/sanitizer_leb128.h
Normal file
87
lib/libtsan/sanitizer_common/sanitizer_leb128.h
Normal file
@@ -0,0 +1,87 @@
|
||||
//===-- sanitizer_leb128.h --------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_LEB128_H
|
||||
#define SANITIZER_LEB128_H
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
template <typename T, typename It>
|
||||
It EncodeSLEB128(T value, It begin, It end) {
|
||||
bool more;
|
||||
do {
|
||||
u8 byte = value & 0x7f;
|
||||
// NOTE: this assumes that this signed shift is an arithmetic right shift.
|
||||
value >>= 7;
|
||||
more = !((((value == 0) && ((byte & 0x40) == 0)) ||
|
||||
((value == -1) && ((byte & 0x40) != 0))));
|
||||
if (more)
|
||||
byte |= 0x80;
|
||||
if (UNLIKELY(begin == end))
|
||||
break;
|
||||
*(begin++) = byte;
|
||||
} while (more);
|
||||
return begin;
|
||||
}
|
||||
|
||||
template <typename T, typename It>
|
||||
It DecodeSLEB128(It begin, It end, T* v) {
|
||||
T value = 0;
|
||||
unsigned shift = 0;
|
||||
u8 byte;
|
||||
do {
|
||||
if (UNLIKELY(begin == end))
|
||||
return begin;
|
||||
byte = *(begin++);
|
||||
T slice = byte & 0x7f;
|
||||
value |= slice << shift;
|
||||
shift += 7;
|
||||
} while (byte >= 128);
|
||||
if (shift < 64 && (byte & 0x40))
|
||||
value |= (-1ULL) << shift;
|
||||
*v = value;
|
||||
return begin;
|
||||
}
|
||||
|
||||
template <typename T, typename It>
|
||||
It EncodeULEB128(T value, It begin, It end) {
|
||||
do {
|
||||
u8 byte = value & 0x7f;
|
||||
value >>= 7;
|
||||
if (value)
|
||||
byte |= 0x80;
|
||||
if (UNLIKELY(begin == end))
|
||||
break;
|
||||
*(begin++) = byte;
|
||||
} while (value);
|
||||
return begin;
|
||||
}
|
||||
|
||||
template <typename T, typename It>
|
||||
It DecodeULEB128(It begin, It end, T* v) {
|
||||
T value = 0;
|
||||
unsigned shift = 0;
|
||||
u8 byte;
|
||||
do {
|
||||
if (UNLIKELY(begin == end))
|
||||
return begin;
|
||||
byte = *(begin++);
|
||||
T slice = byte & 0x7f;
|
||||
value += slice << shift;
|
||||
shift += 7;
|
||||
} while (byte >= 128);
|
||||
*v = value;
|
||||
return begin;
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_LEB128_H
|
||||
72
lib/libtsan/sanitizer_common/sanitizer_lfstack.h
Normal file
72
lib/libtsan/sanitizer_common/sanitizer_lfstack.h
Normal file
@@ -0,0 +1,72 @@
|
||||
//===-- sanitizer_lfstack.h -=-----------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Lock-free stack.
|
||||
// Uses 32/17 bits as ABA-counter on 32/64-bit platforms.
|
||||
// The memory passed to Push() must not be ever munmap'ed.
|
||||
// The type T must contain T *next field.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_LFSTACK_H
|
||||
#define SANITIZER_LFSTACK_H
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_atomic.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
template<typename T>
|
||||
struct LFStack {
|
||||
void Clear() {
|
||||
atomic_store(&head_, 0, memory_order_relaxed);
|
||||
}
|
||||
|
||||
bool Empty() const {
|
||||
return (atomic_load(&head_, memory_order_relaxed) & kPtrMask) == 0;
|
||||
}
|
||||
|
||||
void Push(T *p) {
|
||||
u64 cmp = atomic_load(&head_, memory_order_relaxed);
|
||||
for (;;) {
|
||||
u64 cnt = (cmp & kCounterMask) + kCounterInc;
|
||||
u64 xch = (u64)(uptr)p | cnt;
|
||||
p->next = (T*)(uptr)(cmp & kPtrMask);
|
||||
if (atomic_compare_exchange_weak(&head_, &cmp, xch,
|
||||
memory_order_release))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
T *Pop() {
|
||||
u64 cmp = atomic_load(&head_, memory_order_acquire);
|
||||
for (;;) {
|
||||
T *cur = (T*)(uptr)(cmp & kPtrMask);
|
||||
if (!cur)
|
||||
return nullptr;
|
||||
T *nxt = cur->next;
|
||||
u64 cnt = (cmp & kCounterMask);
|
||||
u64 xch = (u64)(uptr)nxt | cnt;
|
||||
if (atomic_compare_exchange_weak(&head_, &cmp, xch,
|
||||
memory_order_acquire))
|
||||
return cur;
|
||||
}
|
||||
}
|
||||
|
||||
// private:
|
||||
static const int kCounterBits = FIRST_32_SECOND_64(32, 17);
|
||||
static const u64 kPtrMask = ((u64)-1) >> kCounterBits;
|
||||
static const u64 kCounterMask = ~kPtrMask;
|
||||
static const u64 kCounterInc = kPtrMask + 1;
|
||||
|
||||
atomic_uint64_t head_;
|
||||
};
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_LFSTACK_H
|
||||
317
lib/libtsan/sanitizer_common/sanitizer_libc.cpp
Normal file
317
lib/libtsan/sanitizer_common/sanitizer_libc.cpp
Normal file
@@ -0,0 +1,317 @@
|
||||
//===-- sanitizer_libc.cpp ------------------------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between AddressSanitizer and ThreadSanitizer
|
||||
// run-time libraries. See sanitizer_libc.h for details.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
// Do not redefine builtins; this file is defining the builtin replacements.
|
||||
#define SANITIZER_COMMON_NO_REDEFINE_BUILTINS
|
||||
|
||||
#include "sanitizer_allocator_internal.h"
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_libc.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
s64 internal_atoll(const char *nptr) {
|
||||
return internal_simple_strtoll(nptr, nullptr, 10);
|
||||
}
|
||||
|
||||
void *internal_memchr(const void *s, int c, uptr n) {
|
||||
const char *t = (const char *)s;
|
||||
for (uptr i = 0; i < n; ++i, ++t)
|
||||
if (*t == c)
|
||||
return reinterpret_cast<void *>(const_cast<char *>(t));
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void *internal_memrchr(const void *s, int c, uptr n) {
|
||||
const char *t = (const char *)s;
|
||||
void *res = nullptr;
|
||||
for (uptr i = 0; i < n; ++i, ++t) {
|
||||
if (*t == c) res = reinterpret_cast<void *>(const_cast<char *>(t));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
int internal_memcmp(const void* s1, const void* s2, uptr n) {
|
||||
const char *t1 = (const char *)s1;
|
||||
const char *t2 = (const char *)s2;
|
||||
for (uptr i = 0; i < n; ++i, ++t1, ++t2)
|
||||
if (*t1 != *t2)
|
||||
return *t1 < *t2 ? -1 : 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memcpy(void *dest,
|
||||
const void *src,
|
||||
uptr n) {
|
||||
char *d = (char*)dest;
|
||||
const char *s = (const char *)src;
|
||||
for (uptr i = 0; i < n; ++i)
|
||||
d[i] = s[i];
|
||||
return dest;
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memmove(
|
||||
void *dest, const void *src, uptr n) {
|
||||
char *d = (char*)dest;
|
||||
const char *s = (const char *)src;
|
||||
sptr i, signed_n = (sptr)n;
|
||||
CHECK_GE(signed_n, 0);
|
||||
if (d < s) {
|
||||
for (i = 0; i < signed_n; ++i)
|
||||
d[i] = s[i];
|
||||
} else {
|
||||
if (d > s && signed_n > 0) {
|
||||
for (i = signed_n - 1; i >= 0; --i) {
|
||||
d[i] = s[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
return dest;
|
||||
}
|
||||
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memset(void *s, int c,
|
||||
uptr n) {
|
||||
// Optimize for the most performance-critical case:
|
||||
if ((reinterpret_cast<uptr>(s) % 16) == 0 && (n % 16) == 0) {
|
||||
u64 *p = reinterpret_cast<u64*>(s);
|
||||
u64 *e = p + n / 8;
|
||||
u64 v = c;
|
||||
v |= v << 8;
|
||||
v |= v << 16;
|
||||
v |= v << 32;
|
||||
for (; p < e; p += 2)
|
||||
p[0] = p[1] = v;
|
||||
return s;
|
||||
}
|
||||
// The next line prevents Clang from making a call to memset() instead of the
|
||||
// loop below.
|
||||
// FIXME: building the runtime with -ffreestanding is a better idea. However
|
||||
// there currently are linktime problems due to PR12396.
|
||||
char volatile *t = (char*)s;
|
||||
for (uptr i = 0; i < n; ++i, ++t) {
|
||||
*t = c;
|
||||
}
|
||||
return s;
|
||||
}
|
||||
} // extern "C"
|
||||
|
||||
uptr internal_strcspn(const char *s, const char *reject) {
|
||||
uptr i;
|
||||
for (i = 0; s[i]; i++) {
|
||||
if (internal_strchr(reject, s[i]))
|
||||
return i;
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
char* internal_strdup(const char *s) {
|
||||
uptr len = internal_strlen(s);
|
||||
char *s2 = (char*)InternalAlloc(len + 1);
|
||||
internal_memcpy(s2, s, len);
|
||||
s2[len] = 0;
|
||||
return s2;
|
||||
}
|
||||
|
||||
int internal_strcmp(const char *s1, const char *s2) {
|
||||
while (true) {
|
||||
unsigned c1 = *s1;
|
||||
unsigned c2 = *s2;
|
||||
if (c1 != c2) return (c1 < c2) ? -1 : 1;
|
||||
if (c1 == 0) break;
|
||||
s1++;
|
||||
s2++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int internal_strncmp(const char *s1, const char *s2, uptr n) {
|
||||
for (uptr i = 0; i < n; i++) {
|
||||
unsigned c1 = *s1;
|
||||
unsigned c2 = *s2;
|
||||
if (c1 != c2) return (c1 < c2) ? -1 : 1;
|
||||
if (c1 == 0) break;
|
||||
s1++;
|
||||
s2++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
char* internal_strchr(const char *s, int c) {
|
||||
while (true) {
|
||||
if (*s == (char)c)
|
||||
return const_cast<char *>(s);
|
||||
if (*s == 0)
|
||||
return nullptr;
|
||||
s++;
|
||||
}
|
||||
}
|
||||
|
||||
char *internal_strchrnul(const char *s, int c) {
|
||||
char *res = internal_strchr(s, c);
|
||||
if (!res)
|
||||
res = const_cast<char *>(s) + internal_strlen(s);
|
||||
return res;
|
||||
}
|
||||
|
||||
char *internal_strrchr(const char *s, int c) {
|
||||
const char *res = nullptr;
|
||||
for (uptr i = 0; s[i]; i++) {
|
||||
if (s[i] == c) res = s + i;
|
||||
}
|
||||
return const_cast<char *>(res);
|
||||
}
|
||||
|
||||
uptr internal_strlen(const char *s) {
|
||||
uptr i = 0;
|
||||
while (s[i]) i++;
|
||||
return i;
|
||||
}
|
||||
|
||||
uptr internal_strlcat(char *dst, const char *src, uptr maxlen) {
|
||||
const uptr srclen = internal_strlen(src);
|
||||
const uptr dstlen = internal_strnlen(dst, maxlen);
|
||||
if (dstlen == maxlen) return maxlen + srclen;
|
||||
if (srclen < maxlen - dstlen) {
|
||||
internal_memmove(dst + dstlen, src, srclen + 1);
|
||||
} else {
|
||||
internal_memmove(dst + dstlen, src, maxlen - dstlen - 1);
|
||||
dst[maxlen - 1] = '\0';
|
||||
}
|
||||
return dstlen + srclen;
|
||||
}
|
||||
|
||||
char *internal_strncat(char *dst, const char *src, uptr n) {
|
||||
uptr len = internal_strlen(dst);
|
||||
uptr i;
|
||||
for (i = 0; i < n && src[i]; i++)
|
||||
dst[len + i] = src[i];
|
||||
dst[len + i] = 0;
|
||||
return dst;
|
||||
}
|
||||
|
||||
wchar_t *internal_wcscpy(wchar_t *dst, const wchar_t *src) {
|
||||
wchar_t *dst_it = dst;
|
||||
do {
|
||||
*dst_it++ = *src++;
|
||||
} while (*src);
|
||||
return dst;
|
||||
}
|
||||
|
||||
uptr internal_strlcpy(char *dst, const char *src, uptr maxlen) {
|
||||
const uptr srclen = internal_strlen(src);
|
||||
if (srclen < maxlen) {
|
||||
internal_memmove(dst, src, srclen + 1);
|
||||
} else if (maxlen != 0) {
|
||||
internal_memmove(dst, src, maxlen - 1);
|
||||
dst[maxlen - 1] = '\0';
|
||||
}
|
||||
return srclen;
|
||||
}
|
||||
|
||||
char *internal_strncpy(char *dst, const char *src, uptr n) {
|
||||
uptr i;
|
||||
for (i = 0; i < n && src[i]; i++)
|
||||
dst[i] = src[i];
|
||||
internal_memset(dst + i, '\0', n - i);
|
||||
return dst;
|
||||
}
|
||||
|
||||
wchar_t *internal_wcsncpy(wchar_t *dst, const wchar_t *src, uptr n) {
|
||||
uptr i;
|
||||
for (i = 0; i < n && src[i]; ++i)
|
||||
dst[i] = src[i];
|
||||
internal_memset(dst + i, 0, (n - i) * sizeof(wchar_t));
|
||||
return dst;
|
||||
}
|
||||
|
||||
uptr internal_strnlen(const char *s, uptr maxlen) {
|
||||
uptr i = 0;
|
||||
while (i < maxlen && s[i]) i++;
|
||||
return i;
|
||||
}
|
||||
|
||||
char *internal_strstr(const char *haystack, const char *needle) {
|
||||
// This is O(N^2), but we are not using it in hot places.
|
||||
uptr len1 = internal_strlen(haystack);
|
||||
uptr len2 = internal_strlen(needle);
|
||||
if (len1 < len2) return nullptr;
|
||||
for (uptr pos = 0; pos <= len1 - len2; pos++) {
|
||||
if (internal_memcmp(haystack + pos, needle, len2) == 0)
|
||||
return const_cast<char *>(haystack) + pos;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
s64 internal_simple_strtoll(const char *nptr, const char **endptr, int base) {
|
||||
CHECK_EQ(base, 10);
|
||||
while (IsSpace(*nptr)) nptr++;
|
||||
int sgn = 1;
|
||||
u64 res = 0;
|
||||
bool have_digits = false;
|
||||
char *old_nptr = const_cast<char *>(nptr);
|
||||
if (*nptr == '+') {
|
||||
sgn = 1;
|
||||
nptr++;
|
||||
} else if (*nptr == '-') {
|
||||
sgn = -1;
|
||||
nptr++;
|
||||
}
|
||||
while (IsDigit(*nptr)) {
|
||||
res = (res <= UINT64_MAX / 10) ? res * 10 : UINT64_MAX;
|
||||
int digit = ((*nptr) - '0');
|
||||
res = (res <= UINT64_MAX - digit) ? res + digit : UINT64_MAX;
|
||||
have_digits = true;
|
||||
nptr++;
|
||||
}
|
||||
if (endptr) {
|
||||
*endptr = (have_digits) ? const_cast<char *>(nptr) : old_nptr;
|
||||
}
|
||||
if (sgn > 0) {
|
||||
return (s64)(Min((u64)INT64_MAX, res));
|
||||
} else {
|
||||
return (res > INT64_MAX) ? INT64_MIN : ((s64)res * -1);
|
||||
}
|
||||
}
|
||||
|
||||
uptr internal_wcslen(const wchar_t *s) {
|
||||
uptr i = 0;
|
||||
while (s[i]) i++;
|
||||
return i;
|
||||
}
|
||||
|
||||
uptr internal_wcsnlen(const wchar_t *s, uptr maxlen) {
|
||||
uptr i = 0;
|
||||
while (i < maxlen && s[i]) i++;
|
||||
return i;
|
||||
}
|
||||
|
||||
bool mem_is_zero(const char *beg, uptr size) {
|
||||
CHECK_LE(size, 1ULL << FIRST_32_SECOND_64(30, 40)); // Sanity check.
|
||||
const char *end = beg + size;
|
||||
uptr *aligned_beg = (uptr *)RoundUpTo((uptr)beg, sizeof(uptr));
|
||||
uptr *aligned_end = (uptr *)RoundDownTo((uptr)end, sizeof(uptr));
|
||||
uptr all = 0;
|
||||
// Prologue.
|
||||
for (const char *mem = beg; mem < (char*)aligned_beg && mem < end; mem++)
|
||||
all |= *mem;
|
||||
// Aligned loop.
|
||||
for (; aligned_beg < aligned_end; aligned_beg++)
|
||||
all |= *aligned_beg;
|
||||
// Epilogue.
|
||||
if ((char *)aligned_end >= beg) {
|
||||
for (const char *mem = (char *)aligned_end; mem < end; mem++) all |= *mem;
|
||||
}
|
||||
return all == 0;
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
108
lib/libtsan/sanitizer_common/sanitizer_libc.h
Normal file
108
lib/libtsan/sanitizer_common/sanitizer_libc.h
Normal file
@@ -0,0 +1,108 @@
|
||||
//===-- sanitizer_libc.h ----------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between AddressSanitizer and ThreadSanitizer
|
||||
// run-time libraries.
|
||||
// These tools can not use some of the libc functions directly because those
|
||||
// functions are intercepted. Instead, we implement a tiny subset of libc here.
|
||||
// FIXME: Some of functions declared in this file are in fact POSIX, not libc.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_LIBC_H
|
||||
#define SANITIZER_LIBC_H
|
||||
|
||||
// ----------- ATTENTION -------------
|
||||
// This header should NOT include any other headers from sanitizer runtime.
|
||||
#include "sanitizer_internal_defs.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// internal_X() is a custom implementation of X() for use in RTL.
|
||||
|
||||
extern "C" {
|
||||
// These are used as builtin replacements; see sanitizer_redefine_builtins.h.
|
||||
// In normal runtime code, use the __sanitizer::internal_X() aliases instead.
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memcpy(void *dest,
|
||||
const void *src,
|
||||
uptr n);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memmove(
|
||||
void *dest, const void *src, uptr n);
|
||||
SANITIZER_INTERFACE_ATTRIBUTE void *__sanitizer_internal_memset(void *s, int c,
|
||||
uptr n);
|
||||
} // extern "C"
|
||||
|
||||
// String functions
|
||||
s64 internal_atoll(const char *nptr);
|
||||
void *internal_memchr(const void *s, int c, uptr n);
|
||||
void *internal_memrchr(const void *s, int c, uptr n);
|
||||
int internal_memcmp(const void* s1, const void* s2, uptr n);
|
||||
ALWAYS_INLINE void *internal_memcpy(void *dest, const void *src, uptr n) {
|
||||
return __sanitizer_internal_memcpy(dest, src, n);
|
||||
}
|
||||
ALWAYS_INLINE void *internal_memmove(void *dest, const void *src, uptr n) {
|
||||
return __sanitizer_internal_memmove(dest, src, n);
|
||||
}
|
||||
// Should not be used in performance-critical places.
|
||||
ALWAYS_INLINE void *internal_memset(void *s, int c, uptr n) {
|
||||
return __sanitizer_internal_memset(s, c, n);
|
||||
}
|
||||
char* internal_strchr(const char *s, int c);
|
||||
char *internal_strchrnul(const char *s, int c);
|
||||
int internal_strcmp(const char *s1, const char *s2);
|
||||
uptr internal_strcspn(const char *s, const char *reject);
|
||||
char *internal_strdup(const char *s);
|
||||
uptr internal_strlen(const char *s);
|
||||
uptr internal_strlcat(char *dst, const char *src, uptr maxlen);
|
||||
char *internal_strncat(char *dst, const char *src, uptr n);
|
||||
int internal_strncmp(const char *s1, const char *s2, uptr n);
|
||||
uptr internal_strlcpy(char *dst, const char *src, uptr maxlen);
|
||||
char *internal_strncpy(char *dst, const char *src, uptr n);
|
||||
uptr internal_strnlen(const char *s, uptr maxlen);
|
||||
char *internal_strrchr(const char *s, int c);
|
||||
char *internal_strstr(const char *haystack, const char *needle);
|
||||
// Works only for base=10 and doesn't set errno.
|
||||
s64 internal_simple_strtoll(const char *nptr, const char **endptr, int base);
|
||||
int internal_snprintf(char *buffer, uptr length, const char *format, ...)
|
||||
FORMAT(3, 4);
|
||||
uptr internal_wcslen(const wchar_t *s);
|
||||
uptr internal_wcsnlen(const wchar_t *s, uptr maxlen);
|
||||
wchar_t *internal_wcscpy(wchar_t *dst, const wchar_t *src);
|
||||
wchar_t *internal_wcsncpy(wchar_t *dst, const wchar_t *src, uptr maxlen);
|
||||
// Return true if all bytes in [mem, mem+size) are zero.
|
||||
// Optimized for the case when the result is true.
|
||||
bool mem_is_zero(const char *mem, uptr size);
|
||||
|
||||
// I/O
|
||||
// Define these as macros so we can use them in linker initialized global
|
||||
// structs without dynamic initialization.
|
||||
#define kInvalidFd ((fd_t)-1)
|
||||
#define kStdinFd ((fd_t)0)
|
||||
#define kStdoutFd ((fd_t)1)
|
||||
#define kStderrFd ((fd_t)2)
|
||||
|
||||
uptr internal_ftruncate(fd_t fd, uptr size);
|
||||
|
||||
// OS
|
||||
void NORETURN internal__exit(int exitcode);
|
||||
void internal_sleep(unsigned seconds);
|
||||
void internal_usleep(u64 useconds);
|
||||
|
||||
uptr internal_getpid();
|
||||
uptr internal_getppid();
|
||||
|
||||
int internal_dlinfo(void *handle, int request, void *p);
|
||||
|
||||
// Threading
|
||||
uptr internal_sched_yield();
|
||||
|
||||
// Error handling
|
||||
bool internal_iserror(uptr retval, int *rverrno = nullptr);
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_LIBC_H
|
||||
131
lib/libtsan/sanitizer_common/sanitizer_libignore.cpp
Normal file
131
lib/libtsan/sanitizer_common/sanitizer_libignore.cpp
Normal file
@@ -0,0 +1,131 @@
|
||||
//===-- sanitizer_libignore.cpp -------------------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_platform.h"
|
||||
|
||||
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_APPLE || \
|
||||
SANITIZER_NETBSD
|
||||
|
||||
#include "sanitizer_libignore.h"
|
||||
#include "sanitizer_flags.h"
|
||||
#include "sanitizer_posix.h"
|
||||
#include "sanitizer_procmaps.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
LibIgnore::LibIgnore(LinkerInitialized) {
|
||||
}
|
||||
|
||||
void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
|
||||
Lock lock(&mutex_);
|
||||
if (count_ >= kMaxLibs) {
|
||||
Report("%s: too many ignored libraries (max: %zu)\n", SanitizerToolName,
|
||||
kMaxLibs);
|
||||
Die();
|
||||
}
|
||||
Lib *lib = &libs_[count_++];
|
||||
lib->templ = internal_strdup(name_templ);
|
||||
lib->name = nullptr;
|
||||
lib->real_name = nullptr;
|
||||
lib->range_id = kInvalidCodeRangeId;
|
||||
}
|
||||
|
||||
void LibIgnore::OnLibraryLoaded(const char *name) {
|
||||
Lock lock(&mutex_);
|
||||
// Try to match suppressions with symlink target.
|
||||
InternalMmapVector<char> buf(kMaxPathLength);
|
||||
if (name && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&
|
||||
buf[0]) {
|
||||
for (uptr i = 0; i < count_; i++) {
|
||||
Lib *lib = &libs_[i];
|
||||
if (!lib->loaded() && (!lib->real_name) &&
|
||||
TemplateMatch(lib->templ, name))
|
||||
lib->real_name = internal_strdup(buf.data());
|
||||
}
|
||||
}
|
||||
|
||||
// Scan suppressions list and find newly loaded and unloaded libraries.
|
||||
ListOfModules modules;
|
||||
modules.init();
|
||||
for (uptr i = 0; i < count_; i++) {
|
||||
Lib *lib = &libs_[i];
|
||||
bool loaded = false;
|
||||
for (const auto &mod : modules) {
|
||||
for (const auto &range : mod.ranges()) {
|
||||
if (!range.executable)
|
||||
continue;
|
||||
if (!TemplateMatch(lib->templ, mod.full_name()) &&
|
||||
!(lib->real_name &&
|
||||
internal_strcmp(lib->real_name, mod.full_name()) == 0))
|
||||
continue;
|
||||
if (loaded) {
|
||||
Report("%s: called_from_lib suppression '%s' is matched against"
|
||||
" 2 libraries: '%s' and '%s'\n",
|
||||
SanitizerToolName, lib->templ, lib->name, mod.full_name());
|
||||
Die();
|
||||
}
|
||||
loaded = true;
|
||||
if (lib->loaded())
|
||||
continue;
|
||||
VReport(1,
|
||||
"Matched called_from_lib suppression '%s' against library"
|
||||
" '%s'\n",
|
||||
lib->templ, mod.full_name());
|
||||
lib->name = internal_strdup(mod.full_name());
|
||||
const uptr idx =
|
||||
atomic_load(&ignored_ranges_count_, memory_order_relaxed);
|
||||
CHECK_LT(idx, ARRAY_SIZE(ignored_code_ranges_));
|
||||
ignored_code_ranges_[idx].OnLoad(range.beg, range.end);
|
||||
// Record the index of the ignored range.
|
||||
lib->range_id = idx;
|
||||
atomic_store(&ignored_ranges_count_, idx + 1, memory_order_release);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (lib->loaded() && !loaded) {
|
||||
VReport(1,
|
||||
"%s: library '%s' that was matched against called_from_lib"
|
||||
" suppression '%s' is unloaded\n",
|
||||
SanitizerToolName, lib->name, lib->templ);
|
||||
// The library is unloaded so mark the ignored code range as unloaded.
|
||||
ignored_code_ranges_[lib->range_id].OnUnload();
|
||||
lib->range_id = kInvalidCodeRangeId;
|
||||
}
|
||||
}
|
||||
|
||||
// Track instrumented ranges.
|
||||
if (track_instrumented_libs_) {
|
||||
for (const auto &mod : modules) {
|
||||
if (!mod.instrumented())
|
||||
continue;
|
||||
for (const auto &range : mod.ranges()) {
|
||||
if (!range.executable)
|
||||
continue;
|
||||
if (IsPcInstrumented(range.beg) && IsPcInstrumented(range.end - 1))
|
||||
continue;
|
||||
VReport(1, "Adding instrumented range %p-%p from library '%s'\n",
|
||||
(void *)range.beg, (void *)range.end, mod.full_name());
|
||||
const uptr idx =
|
||||
atomic_load(&instrumented_ranges_count_, memory_order_relaxed);
|
||||
CHECK_LT(idx, ARRAY_SIZE(instrumented_code_ranges_));
|
||||
instrumented_code_ranges_[idx].OnLoad(range.beg, range.end);
|
||||
atomic_store(&instrumented_ranges_count_, idx + 1,
|
||||
memory_order_release);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void LibIgnore::OnLibraryUnloaded() {
|
||||
OnLibraryLoaded(nullptr);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_APPLE ||
|
||||
// SANITIZER_NETBSD
|
||||
126
lib/libtsan/sanitizer_common/sanitizer_libignore.h
Normal file
126
lib/libtsan/sanitizer_common/sanitizer_libignore.h
Normal file
@@ -0,0 +1,126 @@
|
||||
//===-- sanitizer_libignore.h -----------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// LibIgnore allows to ignore all interceptors called from a particular set
|
||||
// of dynamic libraries. LibIgnore can be initialized with several templates
|
||||
// of names of libraries to be ignored. It finds code ranges for the libraries;
|
||||
// and checks whether the provided PC value belongs to the code ranges.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_LIBIGNORE_H
|
||||
#define SANITIZER_LIBIGNORE_H
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_atomic.h"
|
||||
#include "sanitizer_mutex.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
class LibIgnore {
|
||||
public:
|
||||
explicit LibIgnore(LinkerInitialized);
|
||||
|
||||
// Must be called during initialization.
|
||||
void AddIgnoredLibrary(const char *name_templ);
|
||||
void IgnoreNoninstrumentedModules(bool enable) {
|
||||
track_instrumented_libs_ = enable;
|
||||
}
|
||||
|
||||
// Must be called after a new dynamic library is loaded.
|
||||
void OnLibraryLoaded(const char *name);
|
||||
|
||||
// Must be called after a dynamic library is unloaded.
|
||||
void OnLibraryUnloaded();
|
||||
|
||||
// Checks whether the provided PC belongs to one of the ignored libraries or
|
||||
// the PC should be ignored because it belongs to an non-instrumented module
|
||||
// (when ignore_noninstrumented_modules=1). Also returns true via
|
||||
// "pc_in_ignored_lib" if the PC is in an ignored library, false otherwise.
|
||||
bool IsIgnored(uptr pc, bool *pc_in_ignored_lib) const;
|
||||
|
||||
// Checks whether the provided PC belongs to an instrumented module.
|
||||
bool IsPcInstrumented(uptr pc) const;
|
||||
|
||||
private:
|
||||
static const uptr kMaxIgnoredRanges = 128;
|
||||
static const uptr kMaxInstrumentedRanges = 1024;
|
||||
static const uptr kMaxLibs = 1024;
|
||||
static const uptr kInvalidCodeRangeId = -1;
|
||||
|
||||
struct Lib {
|
||||
char *templ;
|
||||
char *name;
|
||||
char *real_name; // target of symlink
|
||||
uptr range_id;
|
||||
bool loaded() const { return range_id != kInvalidCodeRangeId; };
|
||||
};
|
||||
|
||||
struct LibCodeRange {
|
||||
bool IsInRange(uptr pc) const {
|
||||
return (pc >= begin && pc < atomic_load(&end, memory_order_acquire));
|
||||
}
|
||||
|
||||
void OnLoad(uptr b, uptr e) {
|
||||
begin = b;
|
||||
atomic_store(&end, e, memory_order_release);
|
||||
}
|
||||
|
||||
void OnUnload() { atomic_store(&end, 0, memory_order_release); }
|
||||
|
||||
private:
|
||||
uptr begin;
|
||||
// A value of 0 means the associated module was unloaded.
|
||||
atomic_uintptr_t end;
|
||||
};
|
||||
|
||||
// Hot part:
|
||||
atomic_uintptr_t ignored_ranges_count_;
|
||||
LibCodeRange ignored_code_ranges_[kMaxIgnoredRanges];
|
||||
|
||||
atomic_uintptr_t instrumented_ranges_count_;
|
||||
LibCodeRange instrumented_code_ranges_[kMaxInstrumentedRanges];
|
||||
|
||||
// Cold part:
|
||||
Mutex mutex_;
|
||||
uptr count_;
|
||||
Lib libs_[kMaxLibs];
|
||||
bool track_instrumented_libs_;
|
||||
|
||||
// Disallow copying of LibIgnore objects.
|
||||
LibIgnore(const LibIgnore&); // not implemented
|
||||
void operator = (const LibIgnore&); // not implemented
|
||||
};
|
||||
|
||||
inline bool LibIgnore::IsIgnored(uptr pc, bool *pc_in_ignored_lib) const {
|
||||
const uptr n = atomic_load(&ignored_ranges_count_, memory_order_acquire);
|
||||
for (uptr i = 0; i < n; i++) {
|
||||
if (ignored_code_ranges_[i].IsInRange(pc)) {
|
||||
*pc_in_ignored_lib = true;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
*pc_in_ignored_lib = false;
|
||||
if (track_instrumented_libs_ && !IsPcInstrumented(pc))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
inline bool LibIgnore::IsPcInstrumented(uptr pc) const {
|
||||
const uptr n = atomic_load(&instrumented_ranges_count_, memory_order_acquire);
|
||||
for (uptr i = 0; i < n; i++) {
|
||||
if (instrumented_code_ranges_[i].IsInRange(pc))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_LIBIGNORE_H
|
||||
2862
lib/libtsan/sanitizer_common/sanitizer_linux.cpp
Normal file
2862
lib/libtsan/sanitizer_common/sanitizer_linux.cpp
Normal file
File diff suppressed because it is too large
Load Diff
207
lib/libtsan/sanitizer_common/sanitizer_linux.h
Normal file
207
lib/libtsan/sanitizer_common/sanitizer_linux.h
Normal file
@@ -0,0 +1,207 @@
|
||||
//===-- sanitizer_linux.h ---------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Linux-specific syscall wrappers and classes.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_LINUX_H
|
||||
#define SANITIZER_LINUX_H
|
||||
|
||||
#include "sanitizer_platform.h"
|
||||
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
|
||||
SANITIZER_SOLARIS
|
||||
# include "sanitizer_common.h"
|
||||
# include "sanitizer_internal_defs.h"
|
||||
# include "sanitizer_platform_limits_freebsd.h"
|
||||
# include "sanitizer_platform_limits_netbsd.h"
|
||||
# include "sanitizer_platform_limits_posix.h"
|
||||
# include "sanitizer_platform_limits_solaris.h"
|
||||
# include "sanitizer_posix.h"
|
||||
|
||||
struct link_map; // Opaque type returned by dlopen().
|
||||
struct utsname;
|
||||
|
||||
namespace __sanitizer {
|
||||
// Dirent structure for getdents(). Note that this structure is different from
|
||||
// the one in <dirent.h>, which is used by readdir().
|
||||
struct linux_dirent;
|
||||
|
||||
struct ProcSelfMapsBuff {
|
||||
char *data;
|
||||
uptr mmaped_size;
|
||||
uptr len;
|
||||
};
|
||||
|
||||
struct MemoryMappingLayoutData {
|
||||
ProcSelfMapsBuff proc_self_maps;
|
||||
const char *current;
|
||||
};
|
||||
|
||||
void ReadProcMaps(ProcSelfMapsBuff *proc_maps);
|
||||
|
||||
// Syscall wrappers.
|
||||
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
|
||||
uptr internal_sigaltstack(const void *ss, void *oss);
|
||||
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
|
||||
__sanitizer_sigset_t *oldset);
|
||||
|
||||
void SetSigProcMask(__sanitizer_sigset_t *set, __sanitizer_sigset_t *oldset);
|
||||
void BlockSignals(__sanitizer_sigset_t *oldset = nullptr);
|
||||
struct ScopedBlockSignals {
|
||||
explicit ScopedBlockSignals(__sanitizer_sigset_t *copy);
|
||||
~ScopedBlockSignals();
|
||||
|
||||
ScopedBlockSignals &operator=(const ScopedBlockSignals &) = delete;
|
||||
ScopedBlockSignals(const ScopedBlockSignals &) = delete;
|
||||
|
||||
private:
|
||||
__sanitizer_sigset_t saved_;
|
||||
};
|
||||
|
||||
# if SANITIZER_GLIBC
|
||||
uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp);
|
||||
# endif
|
||||
|
||||
// Linux-only syscalls.
|
||||
# if SANITIZER_LINUX
|
||||
uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
|
||||
# if defined(__x86_64__)
|
||||
uptr internal_arch_prctl(int option, uptr arg2);
|
||||
# endif
|
||||
// Used only by sanitizer_stoptheworld. Signal handlers that are actually used
|
||||
// (like the process-wide error reporting SEGV handler) must use
|
||||
// internal_sigaction instead.
|
||||
int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
|
||||
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
|
||||
# if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
|
||||
defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
|
||||
defined(__arm__) || SANITIZER_RISCV64 || SANITIZER_LOONGARCH64
|
||||
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
|
||||
int *parent_tidptr, void *newtls, int *child_tidptr);
|
||||
# endif
|
||||
int internal_uname(struct utsname *buf);
|
||||
# elif SANITIZER_FREEBSD
|
||||
uptr internal_procctl(int type, int id, int cmd, void *data);
|
||||
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
|
||||
# elif SANITIZER_NETBSD
|
||||
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
|
||||
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg);
|
||||
# endif // SANITIZER_LINUX
|
||||
|
||||
// This class reads thread IDs from /proc/<pid>/task using only syscalls.
|
||||
class ThreadLister {
|
||||
public:
|
||||
explicit ThreadLister(pid_t pid);
|
||||
enum Result {
|
||||
Error,
|
||||
Incomplete,
|
||||
Ok,
|
||||
};
|
||||
Result ListThreads(InternalMmapVector<tid_t> *threads);
|
||||
const char *LoadStatus(tid_t tid);
|
||||
|
||||
private:
|
||||
bool IsAlive(tid_t tid);
|
||||
|
||||
InternalScopedString task_path_;
|
||||
InternalScopedString status_path_;
|
||||
InternalMmapVector<char> buffer_;
|
||||
};
|
||||
|
||||
// Exposed for testing.
|
||||
uptr ThreadDescriptorSize();
|
||||
uptr ThreadSelf();
|
||||
|
||||
// Matches a library's file name against a base name (stripping path and version
|
||||
// information).
|
||||
bool LibraryNameIs(const char *full_name, const char *base_name);
|
||||
|
||||
// Call cb for each region mapped by map.
|
||||
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr));
|
||||
|
||||
// Releases memory pages entirely within the [beg, end] address range.
|
||||
// The pages no longer count toward RSS; reads are guaranteed to return 0.
|
||||
// Requires (but does not verify!) that pages are MAP_PRIVATE.
|
||||
inline void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {
|
||||
// man madvise on Linux promises zero-fill for anonymous private pages.
|
||||
// Testing shows the same behaviour for private (but not anonymous) mappings
|
||||
// of shm_open() files, as long as the underlying file is untouched.
|
||||
CHECK(SANITIZER_LINUX);
|
||||
ReleaseMemoryPagesToOS(beg, end);
|
||||
}
|
||||
|
||||
# if SANITIZER_ANDROID
|
||||
|
||||
# if defined(__aarch64__)
|
||||
# define __get_tls() \
|
||||
({ \
|
||||
void **__v; \
|
||||
__asm__("mrs %0, tpidr_el0" : "=r"(__v)); \
|
||||
__v; \
|
||||
})
|
||||
# elif defined(__arm__)
|
||||
# define __get_tls() \
|
||||
({ \
|
||||
void **__v; \
|
||||
__asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__v)); \
|
||||
__v; \
|
||||
})
|
||||
# elif defined(__mips__)
|
||||
// On mips32r1, this goes via a kernel illegal instruction trap that's
|
||||
// optimized for v1.
|
||||
# define __get_tls() \
|
||||
({ \
|
||||
register void **__v asm("v1"); \
|
||||
__asm__( \
|
||||
".set push\n" \
|
||||
".set mips32r2\n" \
|
||||
"rdhwr %0,$29\n" \
|
||||
".set pop\n" \
|
||||
: "=r"(__v)); \
|
||||
__v; \
|
||||
})
|
||||
# elif defined(__riscv)
|
||||
# define __get_tls() \
|
||||
({ \
|
||||
void **__v; \
|
||||
__asm__("mv %0, tp" : "=r"(__v)); \
|
||||
__v; \
|
||||
})
|
||||
# elif defined(__i386__)
|
||||
# define __get_tls() \
|
||||
({ \
|
||||
void **__v; \
|
||||
__asm__("movl %%gs:0, %0" : "=r"(__v)); \
|
||||
__v; \
|
||||
})
|
||||
# elif defined(__x86_64__)
|
||||
# define __get_tls() \
|
||||
({ \
|
||||
void **__v; \
|
||||
__asm__("mov %%fs:0, %0" : "=r"(__v)); \
|
||||
__v; \
|
||||
})
|
||||
# else
|
||||
# error "Unsupported architecture."
|
||||
# endif
|
||||
|
||||
// The Android Bionic team has allocated a TLS slot for sanitizers starting
|
||||
// with Q, given that Android currently doesn't support ELF TLS. It is used to
|
||||
// store sanitizer thread specific data.
|
||||
static const int TLS_SLOT_SANITIZER = 6;
|
||||
|
||||
ALWAYS_INLINE uptr *get_android_tls_ptr() {
|
||||
return reinterpret_cast<uptr *>(&__get_tls()[TLS_SLOT_SANITIZER]);
|
||||
}
|
||||
|
||||
# endif // SANITIZER_ANDROID
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif
|
||||
#endif // SANITIZER_LINUX_H
|
||||
1142
lib/libtsan/sanitizer_common/sanitizer_linux_libcdep.cpp
Normal file
1142
lib/libtsan/sanitizer_common/sanitizer_linux_libcdep.cpp
Normal file
File diff suppressed because it is too large
Load Diff
222
lib/libtsan/sanitizer_common/sanitizer_linux_s390.cpp
Normal file
222
lib/libtsan/sanitizer_common/sanitizer_linux_s390.cpp
Normal file
@@ -0,0 +1,222 @@
|
||||
//===-- sanitizer_linux_s390.cpp ------------------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between AddressSanitizer and ThreadSanitizer
|
||||
// run-time libraries and implements s390-linux-specific functions from
|
||||
// sanitizer_libc.h.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_platform.h"
|
||||
|
||||
#if SANITIZER_LINUX && SANITIZER_S390
|
||||
|
||||
# include <dlfcn.h>
|
||||
# include <errno.h>
|
||||
# include <sys/syscall.h>
|
||||
# include <sys/utsname.h>
|
||||
# include <unistd.h>
|
||||
|
||||
# include "sanitizer_libc.h"
|
||||
# include "sanitizer_linux.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// --------------- sanitizer_libc.h
|
||||
uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
|
||||
u64 offset) {
|
||||
struct s390_mmap_params {
|
||||
unsigned long addr;
|
||||
unsigned long length;
|
||||
unsigned long prot;
|
||||
unsigned long flags;
|
||||
unsigned long fd;
|
||||
unsigned long offset;
|
||||
} params = {
|
||||
(unsigned long)addr, (unsigned long)length, (unsigned long)prot,
|
||||
(unsigned long)flags, (unsigned long)fd,
|
||||
# ifdef __s390x__
|
||||
(unsigned long)offset,
|
||||
# else
|
||||
(unsigned long)(offset / 4096),
|
||||
# endif
|
||||
};
|
||||
# ifdef __s390x__
|
||||
return syscall(__NR_mmap, ¶ms);
|
||||
# else
|
||||
return syscall(__NR_mmap2, ¶ms);
|
||||
# endif
|
||||
}
|
||||
|
||||
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
|
||||
int *parent_tidptr, void *newtls, int *child_tidptr) {
|
||||
if (!fn || !child_stack) {
|
||||
errno = EINVAL;
|
||||
return -1;
|
||||
}
|
||||
CHECK_EQ(0, (uptr)child_stack % 16);
|
||||
// Minimum frame size.
|
||||
# ifdef __s390x__
|
||||
child_stack = (char *)child_stack - 160;
|
||||
# else
|
||||
child_stack = (char *)child_stack - 96;
|
||||
# endif
|
||||
// Terminate unwind chain.
|
||||
((unsigned long *)child_stack)[0] = 0;
|
||||
// And pass parameters.
|
||||
((unsigned long *)child_stack)[1] = (uptr)fn;
|
||||
((unsigned long *)child_stack)[2] = (uptr)arg;
|
||||
register uptr res __asm__("r2");
|
||||
register void *__cstack __asm__("r2") = child_stack;
|
||||
register long __flags __asm__("r3") = flags;
|
||||
register int *__ptidptr __asm__("r4") = parent_tidptr;
|
||||
register int *__ctidptr __asm__("r5") = child_tidptr;
|
||||
register void *__newtls __asm__("r6") = newtls;
|
||||
|
||||
__asm__ __volatile__(
|
||||
/* Clone. */
|
||||
"svc %1\n"
|
||||
|
||||
/* if (%r2 != 0)
|
||||
* return;
|
||||
*/
|
||||
# ifdef __s390x__
|
||||
"cghi %%r2, 0\n"
|
||||
# else
|
||||
"chi %%r2, 0\n"
|
||||
# endif
|
||||
"jne 1f\n"
|
||||
|
||||
/* Call "fn(arg)". */
|
||||
# ifdef __s390x__
|
||||
"lmg %%r1, %%r2, 8(%%r15)\n"
|
||||
# else
|
||||
"lm %%r1, %%r2, 4(%%r15)\n"
|
||||
# endif
|
||||
"basr %%r14, %%r1\n"
|
||||
|
||||
/* Call _exit(%r2). */
|
||||
"svc %2\n"
|
||||
|
||||
/* Return to parent. */
|
||||
"1:\n"
|
||||
: "=r"(res)
|
||||
: "i"(__NR_clone), "i"(__NR_exit), "r"(__cstack), "r"(__flags),
|
||||
"r"(__ptidptr), "r"(__ctidptr), "r"(__newtls)
|
||||
: "memory", "cc");
|
||||
if (res >= (uptr)-4095) {
|
||||
errno = -res;
|
||||
return -1;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
# if SANITIZER_S390_64
|
||||
static bool FixedCVE_2016_2143() {
|
||||
// Try to determine if the running kernel has a fix for CVE-2016-2143,
|
||||
// return false if in doubt (better safe than sorry). Distros may want to
|
||||
// adjust this for their own kernels.
|
||||
struct utsname buf;
|
||||
unsigned int major, minor, patch = 0;
|
||||
// This should never fail, but just in case...
|
||||
if (internal_uname(&buf))
|
||||
return false;
|
||||
const char *ptr = buf.release;
|
||||
major = internal_simple_strtoll(ptr, &ptr, 10);
|
||||
// At least first 2 should be matched.
|
||||
if (ptr[0] != '.')
|
||||
return false;
|
||||
minor = internal_simple_strtoll(ptr + 1, &ptr, 10);
|
||||
// Third is optional.
|
||||
if (ptr[0] == '.')
|
||||
patch = internal_simple_strtoll(ptr + 1, &ptr, 10);
|
||||
if (major < 3) {
|
||||
if (major == 2 && minor == 6 && patch == 32 && ptr[0] == '-' &&
|
||||
internal_strstr(ptr, ".el6")) {
|
||||
// Check RHEL6
|
||||
int r1 = internal_simple_strtoll(ptr + 1, &ptr, 10);
|
||||
if (r1 >= 657) // 2.6.32-657.el6 or later
|
||||
return true;
|
||||
if (r1 == 642 && ptr[0] == '.') {
|
||||
int r2 = internal_simple_strtoll(ptr + 1, &ptr, 10);
|
||||
if (r2 >= 9) // 2.6.32-642.9.1.el6 or later
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// <3.0 is bad.
|
||||
return false;
|
||||
} else if (major == 3) {
|
||||
// 3.2.79+ is OK.
|
||||
if (minor == 2 && patch >= 79)
|
||||
return true;
|
||||
// 3.12.58+ is OK.
|
||||
if (minor == 12 && patch >= 58)
|
||||
return true;
|
||||
if (minor == 10 && patch == 0 && ptr[0] == '-' &&
|
||||
internal_strstr(ptr, ".el7")) {
|
||||
// Check RHEL7
|
||||
int r1 = internal_simple_strtoll(ptr + 1, &ptr, 10);
|
||||
if (r1 >= 426) // 3.10.0-426.el7 or later
|
||||
return true;
|
||||
if (r1 == 327 && ptr[0] == '.') {
|
||||
int r2 = internal_simple_strtoll(ptr + 1, &ptr, 10);
|
||||
if (r2 >= 27) // 3.10.0-327.27.1.el7 or later
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// Otherwise, bad.
|
||||
return false;
|
||||
} else if (major == 4) {
|
||||
// 4.1.21+ is OK.
|
||||
if (minor == 1 && patch >= 21)
|
||||
return true;
|
||||
// 4.4.6+ is OK.
|
||||
if (minor == 4 && patch >= 6)
|
||||
return true;
|
||||
if (minor == 4 && patch == 0 && ptr[0] == '-' &&
|
||||
internal_strstr(buf.version, "Ubuntu")) {
|
||||
// Check Ubuntu 16.04
|
||||
int r1 = internal_simple_strtoll(ptr + 1, &ptr, 10);
|
||||
if (r1 >= 13) // 4.4.0-13 or later
|
||||
return true;
|
||||
}
|
||||
// Otherwise, OK if 4.5+.
|
||||
return minor >= 5;
|
||||
} else {
|
||||
// Linux 5 and up are fine.
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
void AvoidCVE_2016_2143() {
|
||||
// Older kernels are affected by CVE-2016-2143 - they will crash hard
|
||||
// if someone uses 4-level page tables (ie. virtual addresses >= 4TB)
|
||||
// and fork() in the same process. Unfortunately, sanitizers tend to
|
||||
// require such addresses. Since this is very likely to crash the whole
|
||||
// machine (sanitizers themselves use fork() for llvm-symbolizer, for one),
|
||||
// abort the process at initialization instead.
|
||||
if (FixedCVE_2016_2143())
|
||||
return;
|
||||
if (GetEnv("SANITIZER_IGNORE_CVE_2016_2143"))
|
||||
return;
|
||||
Report(
|
||||
"ERROR: Your kernel seems to be vulnerable to CVE-2016-2143. Using "
|
||||
"ASan,\n"
|
||||
"MSan, TSan, DFSan or LSan with such kernel can and will crash your\n"
|
||||
"machine, or worse.\n"
|
||||
"\n"
|
||||
"If you are certain your kernel is not vulnerable (you have compiled it\n"
|
||||
"yourself, or are using an unrecognized distribution kernel), you can\n"
|
||||
"override this safety check by exporting SANITIZER_IGNORE_CVE_2016_2143\n"
|
||||
"with any value.\n");
|
||||
Die();
|
||||
}
|
||||
# endif
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_LINUX && SANITIZER_S390
|
||||
166
lib/libtsan/sanitizer_common/sanitizer_list.h
Normal file
166
lib/libtsan/sanitizer_common/sanitizer_list.h
Normal file
@@ -0,0 +1,166 @@
|
||||
//===-- sanitizer_list.h ----------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file contains implementation of a list class to be used by
|
||||
// ThreadSanitizer, etc run-times.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_LIST_H
|
||||
#define SANITIZER_LIST_H
|
||||
|
||||
#include "sanitizer_internal_defs.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
// Intrusive singly-linked list with size(), push_back(), push_front()
|
||||
// pop_front(), append_front() and append_back().
|
||||
// This class should be a POD (so that it can be put into TLS)
|
||||
// and an object with all zero fields should represent a valid empty list.
|
||||
// This class does not have a CTOR, so clear() should be called on all
|
||||
// non-zero-initialized objects before using.
|
||||
template<class Item>
|
||||
struct IntrusiveList {
|
||||
friend class Iterator;
|
||||
|
||||
void clear() {
|
||||
first_ = last_ = nullptr;
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
bool empty() const { return size_ == 0; }
|
||||
uptr size() const { return size_; }
|
||||
|
||||
void push_back(Item *x) {
|
||||
if (empty()) {
|
||||
x->next = nullptr;
|
||||
first_ = last_ = x;
|
||||
size_ = 1;
|
||||
} else {
|
||||
x->next = nullptr;
|
||||
last_->next = x;
|
||||
last_ = x;
|
||||
size_++;
|
||||
}
|
||||
}
|
||||
|
||||
void push_front(Item *x) {
|
||||
if (empty()) {
|
||||
x->next = nullptr;
|
||||
first_ = last_ = x;
|
||||
size_ = 1;
|
||||
} else {
|
||||
x->next = first_;
|
||||
first_ = x;
|
||||
size_++;
|
||||
}
|
||||
}
|
||||
|
||||
void pop_front() {
|
||||
CHECK(!empty());
|
||||
first_ = first_->next;
|
||||
if (!first_)
|
||||
last_ = nullptr;
|
||||
size_--;
|
||||
}
|
||||
|
||||
void extract(Item *prev, Item *x) {
|
||||
CHECK(!empty());
|
||||
CHECK_NE(prev, nullptr);
|
||||
CHECK_NE(x, nullptr);
|
||||
CHECK_EQ(prev->next, x);
|
||||
prev->next = x->next;
|
||||
if (last_ == x)
|
||||
last_ = prev;
|
||||
size_--;
|
||||
}
|
||||
|
||||
Item *front() { return first_; }
|
||||
const Item *front() const { return first_; }
|
||||
Item *back() { return last_; }
|
||||
const Item *back() const { return last_; }
|
||||
|
||||
void append_front(IntrusiveList<Item> *l) {
|
||||
CHECK_NE(this, l);
|
||||
if (l->empty())
|
||||
return;
|
||||
if (empty()) {
|
||||
*this = *l;
|
||||
} else if (!l->empty()) {
|
||||
l->last_->next = first_;
|
||||
first_ = l->first_;
|
||||
size_ += l->size();
|
||||
}
|
||||
l->clear();
|
||||
}
|
||||
|
||||
void append_back(IntrusiveList<Item> *l) {
|
||||
CHECK_NE(this, l);
|
||||
if (l->empty())
|
||||
return;
|
||||
if (empty()) {
|
||||
*this = *l;
|
||||
} else {
|
||||
last_->next = l->first_;
|
||||
last_ = l->last_;
|
||||
size_ += l->size();
|
||||
}
|
||||
l->clear();
|
||||
}
|
||||
|
||||
void CheckConsistency() {
|
||||
if (size_ == 0) {
|
||||
CHECK_EQ(first_, 0);
|
||||
CHECK_EQ(last_, 0);
|
||||
} else {
|
||||
uptr count = 0;
|
||||
for (Item *i = first_; ; i = i->next) {
|
||||
count++;
|
||||
if (i == last_) break;
|
||||
}
|
||||
CHECK_EQ(size(), count);
|
||||
CHECK_EQ(last_->next, 0);
|
||||
}
|
||||
}
|
||||
|
||||
template<class ItemTy>
|
||||
class IteratorBase {
|
||||
public:
|
||||
explicit IteratorBase(ItemTy *current) : current_(current) {}
|
||||
IteratorBase &operator++() {
|
||||
current_ = current_->next;
|
||||
return *this;
|
||||
}
|
||||
bool operator!=(IteratorBase other) const {
|
||||
return current_ != other.current_;
|
||||
}
|
||||
ItemTy &operator*() {
|
||||
return *current_;
|
||||
}
|
||||
private:
|
||||
ItemTy *current_;
|
||||
};
|
||||
|
||||
typedef IteratorBase<Item> Iterator;
|
||||
typedef IteratorBase<const Item> ConstIterator;
|
||||
|
||||
Iterator begin() { return Iterator(first_); }
|
||||
Iterator end() { return Iterator(0); }
|
||||
|
||||
ConstIterator begin() const { return ConstIterator(first_); }
|
||||
ConstIterator end() const { return ConstIterator(0); }
|
||||
|
||||
// private, don't use directly.
|
||||
uptr size_;
|
||||
Item *first_;
|
||||
Item *last_;
|
||||
};
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_LIST_H
|
||||
@@ -0,0 +1,76 @@
|
||||
//===-- sanitizer_local_address_space_view.h --------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// `LocalAddressSpaceView` provides the local (i.e. target and current address
|
||||
// space are the same) implementation of the `AddressSpaceView` interface which
|
||||
// provides a simple interface to load memory from another process (i.e.
|
||||
// out-of-process)
|
||||
//
|
||||
// The `AddressSpaceView` interface requires that the type can be used as a
|
||||
// template parameter to objects that wish to be able to operate in an
|
||||
// out-of-process manner. In normal usage, objects are in-process and are thus
|
||||
// instantiated with the `LocalAddressSpaceView` type. This type is used to
|
||||
// load any pointers in instance methods. This implementation is effectively
|
||||
// a no-op. When an object is to be used in an out-of-process manner it is
|
||||
// instantiated with the `RemoteAddressSpaceView` type.
|
||||
//
|
||||
// By making `AddressSpaceView` a template parameter of an object, it can
|
||||
// change its implementation at compile time which has no run time overhead.
|
||||
// This also allows unifying in-process and out-of-process code which avoids
|
||||
// code duplication.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_LOCAL_ADDRES_SPACE_VIEW_H
|
||||
#define SANITIZER_LOCAL_ADDRES_SPACE_VIEW_H
|
||||
|
||||
namespace __sanitizer {
|
||||
struct LocalAddressSpaceView {
|
||||
// Load memory `sizeof(T) * num_elements` bytes of memory from the target
|
||||
// process (always local for this implementation) starting at address
|
||||
// `target_address`. The local copy of this memory is returned as a pointer.
|
||||
// The caller should not write to this memory. The behaviour when doing so is
|
||||
// undefined. Callers should use `LoadWritable()` to get access to memory
|
||||
// that is writable.
|
||||
//
|
||||
// The lifetime of loaded memory is implementation defined.
|
||||
template <typename T>
|
||||
static const T *Load(const T *target_address, uptr num_elements = 1) {
|
||||
// The target address space is the local address space so
|
||||
// nothing needs to be copied. Just return the pointer.
|
||||
return target_address;
|
||||
}
|
||||
|
||||
// Load memory `sizeof(T) * num_elements` bytes of memory from the target
|
||||
// process (always local for this implementation) starting at address
|
||||
// `target_address`. The local copy of this memory is returned as a pointer.
|
||||
// The memory returned may be written to.
|
||||
//
|
||||
// Writes made to the returned memory will be visible in the memory returned
|
||||
// by subsequent `Load()` or `LoadWritable()` calls provided the
|
||||
// `target_address` parameter is the same. It is not guaranteed that the
|
||||
// memory returned by previous calls to `Load()` will contain any performed
|
||||
// writes. If two or more overlapping regions of memory are loaded via
|
||||
// separate calls to `LoadWritable()`, it is implementation defined whether
|
||||
// writes made to the region returned by one call are visible in the regions
|
||||
// returned by other calls.
|
||||
//
|
||||
// Given the above it is recommended to load the largest possible object
|
||||
// that requires modification (e.g. a class) rather than individual fields
|
||||
// from a class to avoid issues with overlapping writable regions.
|
||||
//
|
||||
// The lifetime of loaded memory is implementation defined.
|
||||
template <typename T>
|
||||
static T *LoadWritable(T *target_address, uptr num_elements = 1) {
|
||||
// The target address space is the local address space so
|
||||
// nothing needs to be copied. Just return the pointer.
|
||||
return target_address;
|
||||
}
|
||||
};
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif
|
||||
159
lib/libtsan/sanitizer_common/sanitizer_lzw.h
Normal file
159
lib/libtsan/sanitizer_common/sanitizer_lzw.h
Normal file
@@ -0,0 +1,159 @@
|
||||
//===-- sanitizer_lzw.h -----------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// Lempel–Ziv–Welch encoding/decoding
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_LZW_H
|
||||
#define SANITIZER_LZW_H
|
||||
|
||||
#include "sanitizer_dense_map.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
using LzwCodeType = u32;
|
||||
|
||||
template <class T, class ItIn, class ItOut>
|
||||
ItOut LzwEncode(ItIn begin, ItIn end, ItOut out) {
|
||||
using Substring =
|
||||
detail::DenseMapPair<LzwCodeType /* Prefix */, T /* Next input */>;
|
||||
|
||||
// Sentinel value for substrings of len 1.
|
||||
static constexpr LzwCodeType kNoPrefix =
|
||||
Min(DenseMapInfo<Substring>::getEmptyKey().first,
|
||||
DenseMapInfo<Substring>::getTombstoneKey().first) -
|
||||
1;
|
||||
DenseMap<Substring, LzwCodeType> prefix_to_code;
|
||||
{
|
||||
// Add all substring of len 1 as initial dictionary.
|
||||
InternalMmapVector<T> dict_len1;
|
||||
for (auto it = begin; it != end; ++it)
|
||||
if (prefix_to_code.try_emplace({kNoPrefix, *it}, 0).second)
|
||||
dict_len1.push_back(*it);
|
||||
|
||||
// Slightly helps with later delta encoding.
|
||||
Sort(dict_len1.data(), dict_len1.size());
|
||||
|
||||
// For large sizeof(T) we have to store dict_len1. Smaller types like u8 can
|
||||
// just generate them.
|
||||
*out = dict_len1.size();
|
||||
++out;
|
||||
|
||||
for (uptr i = 0; i != dict_len1.size(); ++i) {
|
||||
// Remap after the Sort.
|
||||
prefix_to_code[{kNoPrefix, dict_len1[i]}] = i;
|
||||
*out = dict_len1[i];
|
||||
++out;
|
||||
}
|
||||
CHECK_EQ(prefix_to_code.size(), dict_len1.size());
|
||||
}
|
||||
|
||||
if (begin == end)
|
||||
return out;
|
||||
|
||||
// Main LZW encoding loop.
|
||||
LzwCodeType match = prefix_to_code.find({kNoPrefix, *begin})->second;
|
||||
++begin;
|
||||
for (auto it = begin; it != end; ++it) {
|
||||
// Extend match with the new item.
|
||||
auto ins = prefix_to_code.try_emplace({match, *it}, prefix_to_code.size());
|
||||
if (ins.second) {
|
||||
// This is a new substring, but emit the code for the current match
|
||||
// (before extend). This allows LZW decoder to recover the dictionary.
|
||||
*out = match;
|
||||
++out;
|
||||
// Reset the match to a single item, which must be already in the map.
|
||||
match = prefix_to_code.find({kNoPrefix, *it})->second;
|
||||
} else {
|
||||
// Already known, use as the current match.
|
||||
match = ins.first->second;
|
||||
}
|
||||
}
|
||||
|
||||
*out = match;
|
||||
++out;
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
template <class T, class ItIn, class ItOut>
|
||||
ItOut LzwDecode(ItIn begin, ItIn end, ItOut out) {
|
||||
if (begin == end)
|
||||
return out;
|
||||
|
||||
// Load dictionary of len 1 substrings. Theses correspont to lowest codes.
|
||||
InternalMmapVector<T> dict_len1(*begin);
|
||||
++begin;
|
||||
|
||||
if (begin == end)
|
||||
return out;
|
||||
|
||||
for (auto& v : dict_len1) {
|
||||
v = *begin;
|
||||
++begin;
|
||||
}
|
||||
|
||||
// Substrings of len 2 and up. Indexes are shifted because [0,
|
||||
// dict_len1.size()) stored in dict_len1. Substings get here after being
|
||||
// emitted to the output, so we can use output position.
|
||||
InternalMmapVector<detail::DenseMapPair<ItOut /* begin. */, ItOut /* end */>>
|
||||
code_to_substr;
|
||||
|
||||
// Copies already emitted substrings into the output again.
|
||||
auto copy = [&code_to_substr, &dict_len1](LzwCodeType code, ItOut out) {
|
||||
if (code < dict_len1.size()) {
|
||||
*out = dict_len1[code];
|
||||
++out;
|
||||
return out;
|
||||
}
|
||||
const auto& s = code_to_substr[code - dict_len1.size()];
|
||||
|
||||
for (ItOut it = s.first; it != s.second; ++it, ++out) *out = *it;
|
||||
return out;
|
||||
};
|
||||
|
||||
// Returns lens of the substring with the given code.
|
||||
auto code_to_len = [&code_to_substr, &dict_len1](LzwCodeType code) -> uptr {
|
||||
if (code < dict_len1.size())
|
||||
return 1;
|
||||
const auto& s = code_to_substr[code - dict_len1.size()];
|
||||
return s.second - s.first;
|
||||
};
|
||||
|
||||
// Main LZW decoding loop.
|
||||
LzwCodeType prev_code = *begin;
|
||||
++begin;
|
||||
out = copy(prev_code, out);
|
||||
for (auto it = begin; it != end; ++it) {
|
||||
LzwCodeType code = *it;
|
||||
auto start = out;
|
||||
if (code == dict_len1.size() + code_to_substr.size()) {
|
||||
// Special LZW case. The code is not in the dictionary yet. This is
|
||||
// possible only when the new substring is the same as previous one plus
|
||||
// the first item of the previous substring. We can emit that in two
|
||||
// steps.
|
||||
out = copy(prev_code, out);
|
||||
*out = *start;
|
||||
++out;
|
||||
} else {
|
||||
out = copy(code, out);
|
||||
}
|
||||
|
||||
// Every time encoded emits the code, it also creates substing of len + 1
|
||||
// including the first item of the just emmited substring. Do the same here.
|
||||
uptr len = code_to_len(prev_code);
|
||||
code_to_substr.push_back({start - len, start + 1});
|
||||
|
||||
prev_code = code;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
#endif
|
||||
1465
lib/libtsan/sanitizer_common/sanitizer_mac.cpp
Normal file
1465
lib/libtsan/sanitizer_common/sanitizer_mac.cpp
Normal file
File diff suppressed because it is too large
Load Diff
79
lib/libtsan/sanitizer_common/sanitizer_mac.h
Normal file
79
lib/libtsan/sanitizer_common/sanitizer_mac.h
Normal file
@@ -0,0 +1,79 @@
|
||||
//===-- sanitizer_mac.h -----------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between various sanitizers' runtime libraries and
|
||||
// provides definitions for OSX-specific functions.
|
||||
//===----------------------------------------------------------------------===//
|
||||
#ifndef SANITIZER_APPLE_H
|
||||
#define SANITIZER_APPLE_H
|
||||
|
||||
#include "sanitizer_common.h"
|
||||
#include "sanitizer_platform.h"
|
||||
#if SANITIZER_APPLE
|
||||
#include "sanitizer_posix.h"
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
struct MemoryMappingLayoutData {
|
||||
int current_image;
|
||||
u32 current_magic;
|
||||
u32 current_filetype;
|
||||
ModuleArch current_arch;
|
||||
u8 current_uuid[kModuleUUIDSize];
|
||||
int current_load_cmd_count;
|
||||
const char *current_load_cmd_addr;
|
||||
bool current_instrumented;
|
||||
};
|
||||
|
||||
template <typename VersionType>
|
||||
struct VersionBase {
|
||||
u16 major;
|
||||
u16 minor;
|
||||
|
||||
VersionBase(u16 major, u16 minor) : major(major), minor(minor) {}
|
||||
|
||||
bool operator==(const VersionType &other) const {
|
||||
return major == other.major && minor == other.minor;
|
||||
}
|
||||
bool operator>=(const VersionType &other) const {
|
||||
return major > other.major ||
|
||||
(major == other.major && minor >= other.minor);
|
||||
}
|
||||
bool operator<(const VersionType &other) const { return !(*this >= other); }
|
||||
};
|
||||
|
||||
struct MacosVersion : VersionBase<MacosVersion> {
|
||||
MacosVersion(u16 major, u16 minor) : VersionBase(major, minor) {}
|
||||
};
|
||||
|
||||
struct DarwinKernelVersion : VersionBase<DarwinKernelVersion> {
|
||||
DarwinKernelVersion(u16 major, u16 minor) : VersionBase(major, minor) {}
|
||||
};
|
||||
|
||||
MacosVersion GetMacosAlignedVersion();
|
||||
DarwinKernelVersion GetDarwinKernelVersion();
|
||||
|
||||
char **GetEnviron();
|
||||
|
||||
void RestrictMemoryToMaxAddress(uptr max_address);
|
||||
|
||||
using ThreadEventCallback = void (*)(uptr thread);
|
||||
using ThreadCreateEventCallback = void (*)(uptr thread, bool gcd_worker);
|
||||
struct ThreadEventCallbacks {
|
||||
ThreadCreateEventCallback create;
|
||||
ThreadEventCallback start;
|
||||
ThreadEventCallback terminate;
|
||||
ThreadEventCallback destroy;
|
||||
};
|
||||
|
||||
void InstallPthreadIntrospectionHook(const ThreadEventCallbacks &callbacks);
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_APPLE
|
||||
#endif // SANITIZER_APPLE_H
|
||||
29
lib/libtsan/sanitizer_common/sanitizer_mac_libcdep.cpp
Normal file
29
lib/libtsan/sanitizer_common/sanitizer_mac_libcdep.cpp
Normal file
@@ -0,0 +1,29 @@
|
||||
//===-- sanitizer_mac_libcdep.cpp -----------------------------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between various sanitizers' runtime libraries and
|
||||
// implements OSX-specific functions.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_platform.h"
|
||||
#if SANITIZER_APPLE
|
||||
#include "sanitizer_mac.h"
|
||||
|
||||
#include <sys/mman.h>
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
void RestrictMemoryToMaxAddress(uptr max_address) {
|
||||
uptr size_to_mmap = GetMaxUserVirtualAddress() + 1 - max_address;
|
||||
void *res = MmapFixedNoAccess(max_address, size_to_mmap, "high gap");
|
||||
CHECK(res != MAP_FAILED);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_APPLE
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user