stage2: fix Cache deadlock and build more of TSAN

* rename is_compiler_rt_or_libc to skip_linker_dependencies
   and set it to `true` for all sub-Compilations. I believe
   this resolves the deadlock we were experiencing on Drone
   CI and on some users' computers. I will remove the CI workaround in
   a follow-up commit.
 * enabling TSAN automatically causes the Compilation to link against
   libc++ even if not requested, because TSAN depends on libc++.
 * add -fno-rtti flags where appropriate when building TSAN objects.
   Thanks Firefox317 for pointing this out.
 * TSAN support: resolve all the undefined symbols. We are still seeing
   a dependency on __gcc_personality_v0 but will resolve this one in a
   follow-up commit.
 * static libs do not try to build libc++ or libc++abi.
This commit is contained in:
Andrew Kelley
2020-12-22 19:25:24 -07:00
parent 42b4a48bc9
commit 8219d92987
48 changed files with 10172 additions and 41 deletions

View File

@@ -0,0 +1,83 @@
//===-- interception_linux.cpp ----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Linux-specific interception methods.
//===----------------------------------------------------------------------===//
#include "interception.h"
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
SANITIZER_OPENBSD || SANITIZER_SOLARIS
#include <dlfcn.h> // for dlsym() and dlvsym()
namespace __interception {
#if SANITIZER_NETBSD
static int StrCmp(const char *s1, const char *s2) {
while (true) {
if (*s1 != *s2)
return false;
if (*s1 == 0)
return true;
s1++;
s2++;
}
}
#endif
static void *GetFuncAddr(const char *name, uptr wrapper_addr) {
#if SANITIZER_NETBSD
// FIXME: Find a better way to handle renames
if (StrCmp(name, "sigaction"))
name = "__sigaction14";
#endif
void *addr = dlsym(RTLD_NEXT, name);
if (!addr) {
// If the lookup using RTLD_NEXT failed, the sanitizer runtime library is
// later in the library search order than the DSO that we are trying to
// intercept, which means that we cannot intercept this function. We still
// want the address of the real definition, though, so look it up using
// RTLD_DEFAULT.
addr = dlsym(RTLD_DEFAULT, name);
// In case `name' is not loaded, dlsym ends up finding the actual wrapper.
// We don't want to intercept the wrapper and have it point to itself.
if ((uptr)addr == wrapper_addr)
addr = nullptr;
}
return addr;
}
bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,
uptr wrapper) {
void *addr = GetFuncAddr(name, wrapper);
*ptr_to_real = (uptr)addr;
return addr && (func == wrapper);
}
// Android and Solaris do not have dlvsym
#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS && !SANITIZER_OPENBSD
static void *GetFuncAddr(const char *name, const char *ver) {
return dlvsym(RTLD_NEXT, name, ver);
}
bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
uptr func, uptr wrapper) {
void *addr = GetFuncAddr(name, ver);
*ptr_to_real = (uptr)addr;
return addr && (func == wrapper);
}
#endif // !SANITIZER_ANDROID
} // namespace __interception
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
// SANITIZER_OPENBSD || SANITIZER_SOLARIS

View File

@@ -0,0 +1,18 @@
//===-- interception_mac.cpp ------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Mac-specific interception methods.
//===----------------------------------------------------------------------===//
#include "interception.h"
#if SANITIZER_MAC
#endif // SANITIZER_MAC

View File

@@ -0,0 +1,39 @@
//===-- interception_type_test.cpp ------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Compile-time tests of the internal type definitions.
//===----------------------------------------------------------------------===//
#include "interception.h"
#if SANITIZER_LINUX || SANITIZER_MAC
#include <sys/types.h>
#include <stddef.h>
#include <stdint.h>
COMPILER_CHECK(sizeof(::SIZE_T) == sizeof(size_t));
COMPILER_CHECK(sizeof(::SSIZE_T) == sizeof(ssize_t));
COMPILER_CHECK(sizeof(::PTRDIFF_T) == sizeof(ptrdiff_t));
COMPILER_CHECK(sizeof(::INTMAX_T) == sizeof(intmax_t));
#if !SANITIZER_MAC
COMPILER_CHECK(sizeof(::OFF64_T) == sizeof(off64_t));
#endif
// The following are the cases when pread (and friends) is used instead of
// pread64. In those cases we need OFF_T to match off_t. We don't care about the
// rest (they depend on _FILE_OFFSET_BITS setting when building an application).
# if SANITIZER_ANDROID || !defined _FILE_OFFSET_BITS || \
_FILE_OFFSET_BITS != 64
COMPILER_CHECK(sizeof(::OFF_T) == sizeof(off_t));
# endif
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,22 @@
//===-- sanitizer_allocator_checks.cpp --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Various checks shared between ThreadSanitizer, MemorySanitizer, etc. memory
// allocators.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_errno.h"
namespace __sanitizer {
void SetErrnoToENOMEM() {
errno = errno_ENOMEM;
}
} // namespace __sanitizer

View File

@@ -0,0 +1,137 @@
//===-- sanitizer_allocator_report.cpp --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Shared allocator error reporting for ThreadSanitizer, MemorySanitizer, etc.
///
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator.h"
#include "sanitizer_allocator_report.h"
#include "sanitizer_common.h"
#include "sanitizer_report_decorator.h"
namespace __sanitizer {
class ScopedAllocatorErrorReport {
public:
ScopedAllocatorErrorReport(const char *error_summary_,
const StackTrace *stack_)
: error_summary(error_summary_),
stack(stack_) {
Printf("%s", d.Error());
}
~ScopedAllocatorErrorReport() {
Printf("%s", d.Default());
stack->Print();
PrintHintAllocatorCannotReturnNull();
ReportErrorSummary(error_summary, stack);
}
private:
ScopedErrorReportLock lock;
const char *error_summary;
const StackTrace* const stack;
const SanitizerCommonDecorator d;
};
void NORETURN ReportCallocOverflow(uptr count, uptr size,
const StackTrace *stack) {
{
ScopedAllocatorErrorReport report("calloc-overflow", stack);
Report("ERROR: %s: calloc parameters overflow: count * size (%zd * %zd) "
"cannot be represented in type size_t\n", SanitizerToolName, count,
size);
}
Die();
}
void NORETURN ReportReallocArrayOverflow(uptr count, uptr size,
const StackTrace *stack) {
{
ScopedAllocatorErrorReport report("reallocarray-overflow", stack);
Report(
"ERROR: %s: reallocarray parameters overflow: count * size (%zd * %zd) "
"cannot be represented in type size_t\n",
SanitizerToolName, count, size);
}
Die();
}
void NORETURN ReportPvallocOverflow(uptr size, const StackTrace *stack) {
{
ScopedAllocatorErrorReport report("pvalloc-overflow", stack);
Report("ERROR: %s: pvalloc parameters overflow: size 0x%zx rounded up to "
"system page size 0x%zx cannot be represented in type size_t\n",
SanitizerToolName, size, GetPageSizeCached());
}
Die();
}
void NORETURN ReportInvalidAllocationAlignment(uptr alignment,
const StackTrace *stack) {
{
ScopedAllocatorErrorReport report("invalid-allocation-alignment", stack);
Report("ERROR: %s: invalid allocation alignment: %zd, alignment must be a "
"power of two\n", SanitizerToolName, alignment);
}
Die();
}
void NORETURN ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,
const StackTrace *stack) {
{
ScopedAllocatorErrorReport report("invalid-aligned-alloc-alignment", stack);
#if SANITIZER_POSIX
Report("ERROR: %s: invalid alignment requested in "
"aligned_alloc: %zd, alignment must be a power of two and the "
"requested size 0x%zx must be a multiple of alignment\n",
SanitizerToolName, alignment, size);
#else
Report("ERROR: %s: invalid alignment requested in aligned_alloc: %zd, "
"the requested size 0x%zx must be a multiple of alignment\n",
SanitizerToolName, alignment, size);
#endif
}
Die();
}
void NORETURN ReportInvalidPosixMemalignAlignment(uptr alignment,
const StackTrace *stack) {
{
ScopedAllocatorErrorReport report("invalid-posix-memalign-alignment",
stack);
Report(
"ERROR: %s: invalid alignment requested in "
"posix_memalign: %zd, alignment must be a power of two and a "
"multiple of sizeof(void*) == %zd\n",
SanitizerToolName, alignment, sizeof(void *));
}
Die();
}
void NORETURN ReportAllocationSizeTooBig(uptr user_size, uptr max_size,
const StackTrace *stack) {
{
ScopedAllocatorErrorReport report("allocation-size-too-big", stack);
Report("ERROR: %s: requested allocation size 0x%zx exceeds maximum "
"supported size of 0x%zx\n", SanitizerToolName, user_size, max_size);
}
Die();
}
void NORETURN ReportOutOfMemory(uptr requested_size, const StackTrace *stack) {
{
ScopedAllocatorErrorReport report("out-of-memory", stack);
Report("ERROR: %s: allocator is out of memory trying to allocate 0x%zx "
"bytes\n", SanitizerToolName, requested_size);
}
Die();
}
} // namespace __sanitizer

View File

@@ -0,0 +1,149 @@
//===-- sanitizer_common_libcdep.cpp --------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator_interface.h"
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_procmaps.h"
namespace __sanitizer {
static void (*SoftRssLimitExceededCallback)(bool exceeded);
void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
CHECK_EQ(SoftRssLimitExceededCallback, nullptr);
SoftRssLimitExceededCallback = Callback;
}
#if (SANITIZER_LINUX || SANITIZER_NETBSD) && !SANITIZER_GO
// Weak default implementation for when sanitizer_stackdepot is not linked in.
SANITIZER_WEAK_ATTRIBUTE StackDepotStats *StackDepotGetStats() {
return nullptr;
}
void *BackgroundThread(void *arg) {
const uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
const uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
const bool heap_profile = common_flags()->heap_profile;
uptr prev_reported_rss = 0;
uptr prev_reported_stack_depot_size = 0;
bool reached_soft_rss_limit = false;
uptr rss_during_last_reported_profile = 0;
while (true) {
SleepForMillis(100);
const uptr current_rss_mb = GetRSS() >> 20;
if (Verbosity()) {
// If RSS has grown 10% since last time, print some information.
if (prev_reported_rss * 11 / 10 < current_rss_mb) {
Printf("%s: RSS: %zdMb\n", SanitizerToolName, current_rss_mb);
prev_reported_rss = current_rss_mb;
}
// If stack depot has grown 10% since last time, print it too.
StackDepotStats *stack_depot_stats = StackDepotGetStats();
if (stack_depot_stats) {
if (prev_reported_stack_depot_size * 11 / 10 <
stack_depot_stats->allocated) {
Printf("%s: StackDepot: %zd ids; %zdM allocated\n",
SanitizerToolName,
stack_depot_stats->n_uniq_ids,
stack_depot_stats->allocated >> 20);
prev_reported_stack_depot_size = stack_depot_stats->allocated;
}
}
}
// Check RSS against the limit.
if (hard_rss_limit_mb && hard_rss_limit_mb < current_rss_mb) {
Report("%s: hard rss limit exhausted (%zdMb vs %zdMb)\n",
SanitizerToolName, hard_rss_limit_mb, current_rss_mb);
DumpProcessMap();
Die();
}
if (soft_rss_limit_mb) {
if (soft_rss_limit_mb < current_rss_mb && !reached_soft_rss_limit) {
reached_soft_rss_limit = true;
Report("%s: soft rss limit exhausted (%zdMb vs %zdMb)\n",
SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
if (SoftRssLimitExceededCallback)
SoftRssLimitExceededCallback(true);
} else if (soft_rss_limit_mb >= current_rss_mb &&
reached_soft_rss_limit) {
reached_soft_rss_limit = false;
if (SoftRssLimitExceededCallback)
SoftRssLimitExceededCallback(false);
}
}
if (heap_profile &&
current_rss_mb > rss_during_last_reported_profile * 1.1) {
Printf("\n\nHEAP PROFILE at RSS %zdMb\n", current_rss_mb);
__sanitizer_print_memory_profile(90, 20);
rss_during_last_reported_profile = current_rss_mb;
}
}
}
#endif
void WriteToSyslog(const char *msg) {
InternalScopedString msg_copy(kErrorMessageBufferSize);
msg_copy.append("%s", msg);
char *p = msg_copy.data();
char *q;
// Print one line at a time.
// syslog, at least on Android, has an implicit message length limit.
while ((q = internal_strchr(p, '\n'))) {
*q = '\0';
WriteOneLineToSyslog(p);
p = q + 1;
}
// Print remaining characters, if there are any.
// Note that this will add an extra newline at the end.
// FIXME: buffer extra output. This would need a thread-local buffer, which
// on Android requires plugging into the tools (ex. ASan's) Thread class.
if (*p)
WriteOneLineToSyslog(p);
}
void MaybeStartBackgroudThread() {
#if (SANITIZER_LINUX || SANITIZER_NETBSD) && \
!SANITIZER_GO // Need to implement/test on other platforms.
// Start the background thread if one of the rss limits is given.
if (!common_flags()->hard_rss_limit_mb &&
!common_flags()->soft_rss_limit_mb &&
!common_flags()->heap_profile) return;
if (!&real_pthread_create) return; // Can't spawn the thread anyway.
internal_start_thread(BackgroundThread, nullptr);
#endif
}
static void (*sandboxing_callback)();
void SetSandboxingCallback(void (*f)()) {
sandboxing_callback = f;
}
uptr ReservedAddressRange::InitAligned(uptr size, uptr align,
const char *name) {
CHECK(IsPowerOfTwo(align));
if (align <= GetPageSizeCached())
return Init(size, name);
uptr start = Init(size + align, name);
start += align - (start & (align - 1));
return start;
}
} // namespace __sanitizer
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify,
__sanitizer_sandbox_arguments *args) {
__sanitizer::PlatformPrepareForSandboxing(args);
if (__sanitizer::sandboxing_callback)
__sanitizer::sandboxing_callback();
}

View File

@@ -0,0 +1,34 @@
//===-- sanitizer_common_nolibc.cpp ---------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains stubs for libc function to facilitate optional use of
// libc in no-libcdep sources.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
namespace __sanitizer {
// The Windows implementations of these functions use the win32 API directly,
// bypassing libc.
#if !SANITIZER_WINDOWS
#if SANITIZER_LINUX
void LogMessageOnPrintf(const char *str) {}
#endif
void WriteToSyslog(const char *buffer) {}
void Abort() { internal__exit(1); }
void SleepForSeconds(int seconds) { internal_sleep(seconds); }
#endif // !SANITIZER_WINDOWS
#if !SANITIZER_WINDOWS && !SANITIZER_MAC
void ListOfModules::init() {}
#endif
} // namespace __sanitizer

View File

@@ -0,0 +1,846 @@
//===-- sanitizer_linux_libcdep.cpp ---------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries and implements linux-specific functions from
// sanitizer_libc.h.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_OPENBSD || SANITIZER_SOLARIS
#include "sanitizer_allocator_internal.h"
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
#include "sanitizer_file.h"
#include "sanitizer_flags.h"
#include "sanitizer_freebsd.h"
#include "sanitizer_getauxval.h"
#include "sanitizer_glibc_version.h"
#include "sanitizer_linux.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
#include <dlfcn.h> // for dlsym()
#include <link.h>
#include <pthread.h>
#include <signal.h>
#include <sys/resource.h>
#include <syslog.h>
#if !defined(ElfW)
#define ElfW(type) Elf_##type
#endif
#if SANITIZER_FREEBSD
#include <pthread_np.h>
#include <osreldate.h>
#include <sys/sysctl.h>
#define pthread_getattr_np pthread_attr_get_np
#endif
#if SANITIZER_OPENBSD
#include <pthread_np.h>
#include <sys/sysctl.h>
#endif
#if SANITIZER_NETBSD
#include <sys/sysctl.h>
#include <sys/tls.h>
#include <lwp.h>
#endif
#if SANITIZER_SOLARIS
#include <stdlib.h>
#include <thread.h>
#endif
#if SANITIZER_ANDROID
#include <android/api-level.h>
#if !defined(CPU_COUNT) && !defined(__aarch64__)
#include <dirent.h>
#include <fcntl.h>
struct __sanitizer::linux_dirent {
long d_ino;
off_t d_off;
unsigned short d_reclen;
char d_name[];
};
#endif
#endif
#if !SANITIZER_ANDROID
#include <elf.h>
#include <unistd.h>
#endif
namespace __sanitizer {
SANITIZER_WEAK_ATTRIBUTE int
real_sigaction(int signum, const void *act, void *oldact);
int internal_sigaction(int signum, const void *act, void *oldact) {
#if !SANITIZER_GO
if (&real_sigaction)
return real_sigaction(signum, act, oldact);
#endif
return sigaction(signum, (const struct sigaction *)act,
(struct sigaction *)oldact);
}
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
uptr *stack_bottom) {
CHECK(stack_top);
CHECK(stack_bottom);
if (at_initialization) {
// This is the main thread. Libpthread may not be initialized yet.
struct rlimit rl;
CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
// Find the mapping that contains a stack variable.
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
if (proc_maps.Error()) {
*stack_top = *stack_bottom = 0;
return;
}
MemoryMappedSegment segment;
uptr prev_end = 0;
while (proc_maps.Next(&segment)) {
if ((uptr)&rl < segment.end) break;
prev_end = segment.end;
}
CHECK((uptr)&rl >= segment.start && (uptr)&rl < segment.end);
// Get stacksize from rlimit, but clip it so that it does not overlap
// with other mappings.
uptr stacksize = rl.rlim_cur;
if (stacksize > segment.end - prev_end) stacksize = segment.end - prev_end;
// When running with unlimited stack size, we still want to set some limit.
// The unlimited stack size is caused by 'ulimit -s unlimited'.
// Also, for some reason, GNU make spawns subprocesses with unlimited stack.
if (stacksize > kMaxThreadStackSize)
stacksize = kMaxThreadStackSize;
*stack_top = segment.end;
*stack_bottom = segment.end - stacksize;
return;
}
uptr stacksize = 0;
void *stackaddr = nullptr;
#if SANITIZER_SOLARIS
stack_t ss;
CHECK_EQ(thr_stksegment(&ss), 0);
stacksize = ss.ss_size;
stackaddr = (char *)ss.ss_sp - stacksize;
#elif SANITIZER_OPENBSD
stack_t sattr;
CHECK_EQ(pthread_stackseg_np(pthread_self(), &sattr), 0);
stackaddr = sattr.ss_sp;
stacksize = sattr.ss_size;
#else // !SANITIZER_SOLARIS
pthread_attr_t attr;
pthread_attr_init(&attr);
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
my_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
pthread_attr_destroy(&attr);
#endif // SANITIZER_SOLARIS
*stack_top = (uptr)stackaddr + stacksize;
*stack_bottom = (uptr)stackaddr;
}
#if !SANITIZER_GO
bool SetEnv(const char *name, const char *value) {
void *f = dlsym(RTLD_NEXT, "setenv");
if (!f)
return false;
typedef int(*setenv_ft)(const char *name, const char *value, int overwrite);
setenv_ft setenv_f;
CHECK_EQ(sizeof(setenv_f), sizeof(f));
internal_memcpy(&setenv_f, &f, sizeof(f));
return setenv_f(name, value, 1) == 0;
}
#endif
__attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,
int *patch) {
#ifdef _CS_GNU_LIBC_VERSION
char buf[64];
uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));
if (len >= sizeof(buf))
return false;
buf[len] = 0;
static const char kGLibC[] = "glibc ";
if (internal_strncmp(buf, kGLibC, sizeof(kGLibC) - 1) != 0)
return false;
const char *p = buf + sizeof(kGLibC) - 1;
*major = internal_simple_strtoll(p, &p, 10);
*minor = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0;
*patch = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0;
return true;
#else
return false;
#endif
}
#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO && \
!SANITIZER_NETBSD && !SANITIZER_OPENBSD && !SANITIZER_SOLARIS
static uptr g_tls_size;
#ifdef __i386__
# define CHECK_GET_TLS_STATIC_INFO_VERSION (!__GLIBC_PREREQ(2, 27))
#else
# define CHECK_GET_TLS_STATIC_INFO_VERSION 0
#endif
#if CHECK_GET_TLS_STATIC_INFO_VERSION
# define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
#else
# define DL_INTERNAL_FUNCTION
#endif
namespace {
struct GetTlsStaticInfoCall {
typedef void (*get_tls_func)(size_t*, size_t*);
};
struct GetTlsStaticInfoRegparmCall {
typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
};
template <typename T>
void CallGetTls(void* ptr, size_t* size, size_t* align) {
typename T::get_tls_func get_tls;
CHECK_EQ(sizeof(get_tls), sizeof(ptr));
internal_memcpy(&get_tls, &ptr, sizeof(ptr));
CHECK_NE(get_tls, 0);
get_tls(size, align);
}
bool CmpLibcVersion(int major, int minor, int patch) {
int ma;
int mi;
int pa;
if (!GetLibcVersion(&ma, &mi, &pa))
return false;
if (ma > major)
return true;
if (ma < major)
return false;
if (mi > minor)
return true;
if (mi < minor)
return false;
return pa >= patch;
}
} // namespace
void InitTlsSize() {
// all current supported platforms have 16 bytes stack alignment
const size_t kStackAlign = 16;
void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
size_t tls_size = 0;
size_t tls_align = 0;
// On i?86, _dl_get_tls_static_info used to be internal_function, i.e.
// __attribute__((regparm(3), stdcall)) before glibc 2.27 and is normal
// function in 2.27 and later.
if (CHECK_GET_TLS_STATIC_INFO_VERSION && !CmpLibcVersion(2, 27, 0))
CallGetTls<GetTlsStaticInfoRegparmCall>(get_tls_static_info_ptr,
&tls_size, &tls_align);
else
CallGetTls<GetTlsStaticInfoCall>(get_tls_static_info_ptr,
&tls_size, &tls_align);
if (tls_align < kStackAlign)
tls_align = kStackAlign;
g_tls_size = RoundUpTo(tls_size, tls_align);
}
#else
void InitTlsSize() { }
#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO &&
// !SANITIZER_NETBSD && !SANITIZER_SOLARIS
#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) || \
defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) || \
defined(__arm__)) && \
SANITIZER_LINUX && !SANITIZER_ANDROID
// sizeof(struct pthread) from glibc.
static atomic_uintptr_t thread_descriptor_size;
uptr ThreadDescriptorSize() {
uptr val = atomic_load_relaxed(&thread_descriptor_size);
if (val)
return val;
#if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
int major;
int minor;
int patch;
if (GetLibcVersion(&major, &minor, &patch) && major == 2) {
/* sizeof(struct pthread) values from various glibc versions. */
if (SANITIZER_X32)
val = 1728; // Assume only one particular version for x32.
// For ARM sizeof(struct pthread) changed in Glibc 2.23.
else if (SANITIZER_ARM)
val = minor <= 22 ? 1120 : 1216;
else if (minor <= 3)
val = FIRST_32_SECOND_64(1104, 1696);
else if (minor == 4)
val = FIRST_32_SECOND_64(1120, 1728);
else if (minor == 5)
val = FIRST_32_SECOND_64(1136, 1728);
else if (minor <= 9)
val = FIRST_32_SECOND_64(1136, 1712);
else if (minor == 10)
val = FIRST_32_SECOND_64(1168, 1776);
else if (minor == 11 || (minor == 12 && patch == 1))
val = FIRST_32_SECOND_64(1168, 2288);
else if (minor <= 14)
val = FIRST_32_SECOND_64(1168, 2304);
else
val = FIRST_32_SECOND_64(1216, 2304);
}
#elif defined(__mips__)
// TODO(sagarthakur): add more values as per different glibc versions.
val = FIRST_32_SECOND_64(1152, 1776);
#elif defined(__aarch64__)
// The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22.
val = 1776;
#elif defined(__powerpc64__)
val = 1776; // from glibc.ppc64le 2.20-8.fc21
#elif defined(__s390__)
val = FIRST_32_SECOND_64(1152, 1776); // valid for glibc 2.22
#endif
if (val)
atomic_store_relaxed(&thread_descriptor_size, val);
return val;
}
// The offset at which pointer to self is located in the thread descriptor.
const uptr kThreadSelfOffset = FIRST_32_SECOND_64(8, 16);
uptr ThreadSelfOffset() {
return kThreadSelfOffset;
}
#if defined(__mips__) || defined(__powerpc64__)
// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
// head structure. It lies before the static tls blocks.
static uptr TlsPreTcbSize() {
# if defined(__mips__)
const uptr kTcbHead = 16; // sizeof (tcbhead_t)
# elif defined(__powerpc64__)
const uptr kTcbHead = 88; // sizeof (tcbhead_t)
# endif
const uptr kTlsAlign = 16;
const uptr kTlsPreTcbSize =
RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign);
return kTlsPreTcbSize;
}
#endif
uptr ThreadSelf() {
uptr descr_addr;
# if defined(__i386__)
asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
# elif defined(__x86_64__)
asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
# elif defined(__mips__)
// MIPS uses TLS variant I. The thread pointer (in hardware register $29)
// points to the end of the TCB + 0x7000. The pthread_descr structure is
// immediately in front of the TCB. TlsPreTcbSize() includes the size of the
// TCB and the size of pthread_descr.
const uptr kTlsTcbOffset = 0x7000;
uptr thread_pointer;
asm volatile(".set push;\
.set mips64r2;\
rdhwr %0,$29;\
.set pop" : "=r" (thread_pointer));
descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
# elif defined(__aarch64__) || defined(__arm__)
descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
ThreadDescriptorSize();
# elif defined(__s390__)
descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer());
# elif defined(__powerpc64__)
// PPC64LE uses TLS variant I. The thread pointer (in GPR 13)
// points to the end of the TCB + 0x7000. The pthread_descr structure is
// immediately in front of the TCB. TlsPreTcbSize() includes the size of the
// TCB and the size of pthread_descr.
const uptr kTlsTcbOffset = 0x7000;
uptr thread_pointer;
asm("addi %0,13,%1" : "=r"(thread_pointer) : "I"(-kTlsTcbOffset));
descr_addr = thread_pointer - TlsPreTcbSize();
# else
# error "unsupported CPU arch"
# endif
return descr_addr;
}
#endif // (x86_64 || i386 || MIPS) && SANITIZER_LINUX
#if SANITIZER_FREEBSD
static void **ThreadSelfSegbase() {
void **segbase = 0;
# if defined(__i386__)
// sysarch(I386_GET_GSBASE, segbase);
__asm __volatile("mov %%gs:0, %0" : "=r" (segbase));
# elif defined(__x86_64__)
// sysarch(AMD64_GET_FSBASE, segbase);
__asm __volatile("movq %%fs:0, %0" : "=r" (segbase));
# else
# error "unsupported CPU arch"
# endif
return segbase;
}
uptr ThreadSelf() {
return (uptr)ThreadSelfSegbase()[2];
}
#endif // SANITIZER_FREEBSD
#if SANITIZER_NETBSD
static struct tls_tcb * ThreadSelfTlsTcb() {
return (struct tls_tcb *)_lwp_getprivate();
}
uptr ThreadSelf() {
return (uptr)ThreadSelfTlsTcb()->tcb_pthread;
}
int GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) {
const Elf_Phdr *hdr = info->dlpi_phdr;
const Elf_Phdr *last_hdr = hdr + info->dlpi_phnum;
for (; hdr != last_hdr; ++hdr) {
if (hdr->p_type == PT_TLS && info->dlpi_tls_modid == 1) {
*(uptr*)data = hdr->p_memsz;
break;
}
}
return 0;
}
#endif // SANITIZER_NETBSD
#if !SANITIZER_GO
static void GetTls(uptr *addr, uptr *size) {
#if SANITIZER_LINUX && !SANITIZER_ANDROID
# if defined(__x86_64__) || defined(__i386__) || defined(__s390__)
*addr = ThreadSelf();
*size = GetTlsSize();
*addr -= *size;
*addr += ThreadDescriptorSize();
# elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) \
|| defined(__arm__)
*addr = ThreadSelf();
*size = GetTlsSize();
# else
*addr = 0;
*size = 0;
# endif
#elif SANITIZER_FREEBSD
void** segbase = ThreadSelfSegbase();
*addr = 0;
*size = 0;
if (segbase != 0) {
// tcbalign = 16
// tls_size = round(tls_static_space, tcbalign);
// dtv = segbase[1];
// dtv[2] = segbase - tls_static_space;
void **dtv = (void**) segbase[1];
*addr = (uptr) dtv[2];
*size = (*addr == 0) ? 0 : ((uptr) segbase[0] - (uptr) dtv[2]);
}
#elif SANITIZER_NETBSD
struct tls_tcb * const tcb = ThreadSelfTlsTcb();
*addr = 0;
*size = 0;
if (tcb != 0) {
// Find size (p_memsz) of dlpi_tls_modid 1 (TLS block of the main program).
// ld.elf_so hardcodes the index 1.
dl_iterate_phdr(GetSizeFromHdr, size);
if (*size != 0) {
// The block has been found and tcb_dtv[1] contains the base address
*addr = (uptr)tcb->tcb_dtv[1];
}
}
#elif SANITIZER_OPENBSD
*addr = 0;
*size = 0;
#elif SANITIZER_ANDROID
*addr = 0;
*size = 0;
#elif SANITIZER_SOLARIS
// FIXME
*addr = 0;
*size = 0;
#else
# error "Unknown OS"
#endif
}
#endif
#if !SANITIZER_GO
uptr GetTlsSize() {
#if SANITIZER_FREEBSD || SANITIZER_ANDROID || SANITIZER_NETBSD || \
SANITIZER_OPENBSD || SANITIZER_SOLARIS
uptr addr, size;
GetTls(&addr, &size);
return size;
#elif defined(__mips__) || defined(__powerpc64__)
return RoundUpTo(g_tls_size + TlsPreTcbSize(), 16);
#else
return g_tls_size;
#endif
}
#endif
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
#if SANITIZER_GO
// Stub implementation for Go.
*stk_addr = *stk_size = *tls_addr = *tls_size = 0;
#else
GetTls(tls_addr, tls_size);
uptr stack_top, stack_bottom;
GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
*stk_addr = stack_bottom;
*stk_size = stack_top - stack_bottom;
if (!main) {
// If stack and tls intersect, make them non-intersecting.
if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) {
CHECK_GT(*tls_addr + *tls_size, *stk_addr);
CHECK_LE(*tls_addr + *tls_size, *stk_addr + *stk_size);
*stk_size -= *tls_size;
*tls_addr = *stk_addr + *stk_size;
}
}
#endif
}
#if !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
typedef ElfW(Phdr) Elf_Phdr;
#elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2
#define Elf_Phdr XElf32_Phdr
#define dl_phdr_info xdl_phdr_info
#define dl_iterate_phdr(c, b) xdl_iterate_phdr((c), (b))
#endif // !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
struct DlIteratePhdrData {
InternalMmapVectorNoCtor<LoadedModule> *modules;
bool first;
};
static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
DlIteratePhdrData *data = (DlIteratePhdrData*)arg;
InternalScopedString module_name(kMaxPathLength);
if (data->first) {
data->first = false;
// First module is the binary itself.
ReadBinaryNameCached(module_name.data(), module_name.size());
} else if (info->dlpi_name) {
module_name.append("%s", info->dlpi_name);
}
if (module_name[0] == '\0')
return 0;
LoadedModule cur_module;
cur_module.set(module_name.data(), info->dlpi_addr);
for (int i = 0; i < (int)info->dlpi_phnum; i++) {
const Elf_Phdr *phdr = &info->dlpi_phdr[i];
if (phdr->p_type == PT_LOAD) {
uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
uptr cur_end = cur_beg + phdr->p_memsz;
bool executable = phdr->p_flags & PF_X;
bool writable = phdr->p_flags & PF_W;
cur_module.addAddressRange(cur_beg, cur_end, executable,
writable);
}
}
data->modules->push_back(cur_module);
return 0;
}
#if SANITIZER_ANDROID && __ANDROID_API__ < 21
extern "C" __attribute__((weak)) int dl_iterate_phdr(
int (*)(struct dl_phdr_info *, size_t, void *), void *);
#endif
static bool requiresProcmaps() {
#if SANITIZER_ANDROID && __ANDROID_API__ <= 22
// Fall back to /proc/maps if dl_iterate_phdr is unavailable or broken.
// The runtime check allows the same library to work with
// both K and L (and future) Android releases.
return AndroidGetApiLevel() <= ANDROID_LOLLIPOP_MR1;
#else
return false;
#endif
}
static void procmapsInit(InternalMmapVectorNoCtor<LoadedModule> *modules) {
MemoryMappingLayout memory_mapping(/*cache_enabled*/true);
memory_mapping.DumpListOfModules(modules);
}
void ListOfModules::init() {
clearOrInit();
if (requiresProcmaps()) {
procmapsInit(&modules_);
} else {
DlIteratePhdrData data = {&modules_, true};
dl_iterate_phdr(dl_iterate_phdr_cb, &data);
}
}
// When a custom loader is used, dl_iterate_phdr may not contain the full
// list of modules. Allow callers to fall back to using procmaps.
void ListOfModules::fallbackInit() {
if (!requiresProcmaps()) {
clearOrInit();
procmapsInit(&modules_);
} else {
clear();
}
}
// getrusage does not give us the current RSS, only the max RSS.
// Still, this is better than nothing if /proc/self/statm is not available
// for some reason, e.g. due to a sandbox.
static uptr GetRSSFromGetrusage() {
struct rusage usage;
if (getrusage(RUSAGE_SELF, &usage)) // Failed, probably due to a sandbox.
return 0;
return usage.ru_maxrss << 10; // ru_maxrss is in Kb.
}
uptr GetRSS() {
if (!common_flags()->can_use_proc_maps_statm)
return GetRSSFromGetrusage();
fd_t fd = OpenFile("/proc/self/statm", RdOnly);
if (fd == kInvalidFd)
return GetRSSFromGetrusage();
char buf[64];
uptr len = internal_read(fd, buf, sizeof(buf) - 1);
internal_close(fd);
if ((sptr)len <= 0)
return 0;
buf[len] = 0;
// The format of the file is:
// 1084 89 69 11 0 79 0
// We need the second number which is RSS in pages.
char *pos = buf;
// Skip the first number.
while (*pos >= '0' && *pos <= '9')
pos++;
// Skip whitespaces.
while (!(*pos >= '0' && *pos <= '9') && *pos != 0)
pos++;
// Read the number.
uptr rss = 0;
while (*pos >= '0' && *pos <= '9')
rss = rss * 10 + *pos++ - '0';
return rss * GetPageSizeCached();
}
// sysconf(_SC_NPROCESSORS_{CONF,ONLN}) cannot be used on most platforms as
// they allocate memory.
u32 GetNumberOfCPUs() {
#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD
u32 ncpu;
int req[2];
uptr len = sizeof(ncpu);
req[0] = CTL_HW;
req[1] = HW_NCPU;
CHECK_EQ(internal_sysctl(req, 2, &ncpu, &len, NULL, 0), 0);
return ncpu;
#elif SANITIZER_ANDROID && !defined(CPU_COUNT) && !defined(__aarch64__)
// Fall back to /sys/devices/system/cpu on Android when cpu_set_t doesn't
// exist in sched.h. That is the case for toolchains generated with older
// NDKs.
// This code doesn't work on AArch64 because internal_getdents makes use of
// the 64bit getdents syscall, but cpu_set_t seems to always exist on AArch64.
uptr fd = internal_open("/sys/devices/system/cpu", O_RDONLY | O_DIRECTORY);
if (internal_iserror(fd))
return 0;
InternalMmapVector<u8> buffer(4096);
uptr bytes_read = buffer.size();
uptr n_cpus = 0;
u8 *d_type;
struct linux_dirent *entry = (struct linux_dirent *)&buffer[bytes_read];
while (true) {
if ((u8 *)entry >= &buffer[bytes_read]) {
bytes_read = internal_getdents(fd, (struct linux_dirent *)buffer.data(),
buffer.size());
if (internal_iserror(bytes_read) || !bytes_read)
break;
entry = (struct linux_dirent *)buffer.data();
}
d_type = (u8 *)entry + entry->d_reclen - 1;
if (d_type >= &buffer[bytes_read] ||
(u8 *)&entry->d_name[3] >= &buffer[bytes_read])
break;
if (entry->d_ino != 0 && *d_type == DT_DIR) {
if (entry->d_name[0] == 'c' && entry->d_name[1] == 'p' &&
entry->d_name[2] == 'u' &&
entry->d_name[3] >= '0' && entry->d_name[3] <= '9')
n_cpus++;
}
entry = (struct linux_dirent *)(((u8 *)entry) + entry->d_reclen);
}
internal_close(fd);
return n_cpus;
#elif SANITIZER_SOLARIS
return sysconf(_SC_NPROCESSORS_ONLN);
#else
cpu_set_t CPUs;
CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
return CPU_COUNT(&CPUs);
#endif
}
#if SANITIZER_LINUX
# if SANITIZER_ANDROID
static atomic_uint8_t android_log_initialized;
void AndroidLogInit() {
openlog(GetProcessName(), 0, LOG_USER);
atomic_store(&android_log_initialized, 1, memory_order_release);
}
static bool ShouldLogAfterPrintf() {
return atomic_load(&android_log_initialized, memory_order_acquire);
}
extern "C" SANITIZER_WEAK_ATTRIBUTE
int async_safe_write_log(int pri, const char* tag, const char* msg);
extern "C" SANITIZER_WEAK_ATTRIBUTE
int __android_log_write(int prio, const char* tag, const char* msg);
// ANDROID_LOG_INFO is 4, but can't be resolved at runtime.
#define SANITIZER_ANDROID_LOG_INFO 4
// async_safe_write_log is a new public version of __libc_write_log that is
// used behind syslog. It is preferable to syslog as it will not do any dynamic
// memory allocation or formatting.
// If the function is not available, syslog is preferred for L+ (it was broken
// pre-L) as __android_log_write triggers a racey behavior with the strncpy
// interceptor. Fallback to __android_log_write pre-L.
void WriteOneLineToSyslog(const char *s) {
if (&async_safe_write_log) {
async_safe_write_log(SANITIZER_ANDROID_LOG_INFO, GetProcessName(), s);
} else if (AndroidGetApiLevel() > ANDROID_KITKAT) {
syslog(LOG_INFO, "%s", s);
} else {
CHECK(&__android_log_write);
__android_log_write(SANITIZER_ANDROID_LOG_INFO, nullptr, s);
}
}
extern "C" SANITIZER_WEAK_ATTRIBUTE
void android_set_abort_message(const char *);
void SetAbortMessage(const char *str) {
if (&android_set_abort_message)
android_set_abort_message(str);
}
# else
void AndroidLogInit() {}
static bool ShouldLogAfterPrintf() { return true; }
void WriteOneLineToSyslog(const char *s) { syslog(LOG_INFO, "%s", s); }
void SetAbortMessage(const char *str) {}
# endif // SANITIZER_ANDROID
void LogMessageOnPrintf(const char *str) {
if (common_flags()->log_to_syslog && ShouldLogAfterPrintf())
WriteToSyslog(str);
}
#endif // SANITIZER_LINUX
#if SANITIZER_LINUX && !SANITIZER_GO
// glibc crashes when using clock_gettime from a preinit_array function as the
// vDSO function pointers haven't been initialized yet. __progname is
// initialized after the vDSO function pointers, so if it exists, is not null
// and is not empty, we can use clock_gettime.
extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname;
INLINE bool CanUseVDSO() {
// Bionic is safe, it checks for the vDSO function pointers to be initialized.
if (SANITIZER_ANDROID)
return true;
if (&__progname && __progname && *__progname)
return true;
return false;
}
// MonotonicNanoTime is a timing function that can leverage the vDSO by calling
// clock_gettime. real_clock_gettime only exists if clock_gettime is
// intercepted, so define it weakly and use it if available.
extern "C" SANITIZER_WEAK_ATTRIBUTE
int real_clock_gettime(u32 clk_id, void *tp);
u64 MonotonicNanoTime() {
timespec ts;
if (CanUseVDSO()) {
if (&real_clock_gettime)
real_clock_gettime(CLOCK_MONOTONIC, &ts);
else
clock_gettime(CLOCK_MONOTONIC, &ts);
} else {
internal_clock_gettime(CLOCK_MONOTONIC, &ts);
}
return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
}
#else
// Non-Linux & Go always use the syscall.
u64 MonotonicNanoTime() {
timespec ts;
internal_clock_gettime(CLOCK_MONOTONIC, &ts);
return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
}
#endif // SANITIZER_LINUX && !SANITIZER_GO
#if !SANITIZER_OPENBSD
void ReExec() {
const char *pathname = "/proc/self/exe";
#if SANITIZER_NETBSD
static const int name[] = {
CTL_KERN,
KERN_PROC_ARGS,
-1,
KERN_PROC_PATHNAME,
};
char path[400];
uptr len;
len = sizeof(path);
if (internal_sysctl(name, ARRAY_SIZE(name), path, &len, NULL, 0) != -1)
pathname = path;
#elif SANITIZER_SOLARIS
pathname = getexecname();
CHECK_NE(pathname, NULL);
#elif SANITIZER_USE_GETAUXVAL
// Calling execve with /proc/self/exe sets that as $EXEC_ORIGIN. Binaries that
// rely on that will fail to load shared libraries. Query AT_EXECFN instead.
pathname = reinterpret_cast<const char *>(getauxval(AT_EXECFN));
#endif
uptr rv = internal_execve(pathname, GetArgv(), GetEnviron());
int rverrno;
CHECK_EQ(internal_iserror(rv, &rverrno), true);
Printf("execve failed, errno %d\n", rverrno);
Die();
}
#endif // !SANITIZER_OPENBSD
} // namespace __sanitizer
#endif

View File

@@ -0,0 +1,29 @@
//===-- sanitizer_mac_libcdep.cpp -----------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is shared between various sanitizers' runtime libraries and
// implements OSX-specific functions.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_MAC
#include "sanitizer_mac.h"
#include <sys/mman.h>
namespace __sanitizer {
void RestrictMemoryToMaxAddress(uptr max_address) {
uptr size_to_mmap = GetMaxUserVirtualAddress() + 1 - max_address;
void *res = MmapFixedNoAccess(max_address, size_to_mmap, "high gap");
CHECK(res != MAP_FAILED);
}
} // namespace __sanitizer
#endif // SANITIZER_MAC

View File

@@ -0,0 +1,509 @@
//===-- sanitizer_posix_libcdep.cpp ---------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries and implements libc-dependent POSIX-specific functions
// from sanitizer_libc.h.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_POSIX
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_platform_limits_netbsd.h"
#include "sanitizer_platform_limits_openbsd.h"
#include "sanitizer_platform_limits_posix.h"
#include "sanitizer_platform_limits_solaris.h"
#include "sanitizer_posix.h"
#include "sanitizer_procmaps.h"
#include <errno.h>
#include <fcntl.h>
#include <pthread.h>
#include <signal.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#if SANITIZER_FREEBSD
// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
// that, it was never implemented. So just define it to zero.
#undef MAP_NORESERVE
#define MAP_NORESERVE 0
#endif
typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
namespace __sanitizer {
u32 GetUid() {
return getuid();
}
uptr GetThreadSelf() {
return (uptr)pthread_self();
}
void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
uptr page_size = GetPageSizeCached();
uptr beg_aligned = RoundUpTo(beg, page_size);
uptr end_aligned = RoundDownTo(end, page_size);
if (beg_aligned < end_aligned)
// In the default Solaris compilation environment, madvise() is declared
// to take a caddr_t arg; casting it to void * results in an invalid
// conversion error, so use char * instead.
madvise((char *)beg_aligned, end_aligned - beg_aligned,
SANITIZER_MADVISE_DONTNEED);
}
void SetShadowRegionHugePageMode(uptr addr, uptr size) {
#ifdef MADV_NOHUGEPAGE // May not be defined on old systems.
if (common_flags()->no_huge_pages_for_shadow)
madvise((char *)addr, size, MADV_NOHUGEPAGE);
else
madvise((char *)addr, size, MADV_HUGEPAGE);
#endif // MADV_NOHUGEPAGE
}
bool DontDumpShadowMemory(uptr addr, uptr length) {
#if defined(MADV_DONTDUMP)
return madvise((char *)addr, length, MADV_DONTDUMP) == 0;
#elif defined(MADV_NOCORE)
return madvise((char *)addr, length, MADV_NOCORE) == 0;
#else
return true;
#endif // MADV_DONTDUMP
}
static rlim_t getlim(int res) {
rlimit rlim;
CHECK_EQ(0, getrlimit(res, &rlim));
return rlim.rlim_cur;
}
static void setlim(int res, rlim_t lim) {
struct rlimit rlim;
if (getrlimit(res, const_cast<struct rlimit *>(&rlim))) {
Report("ERROR: %s getrlimit() failed %d\n", SanitizerToolName, errno);
Die();
}
rlim.rlim_cur = lim;
if (setrlimit(res, const_cast<struct rlimit *>(&rlim))) {
Report("ERROR: %s setrlimit() failed %d\n", SanitizerToolName, errno);
Die();
}
}
void DisableCoreDumperIfNecessary() {
if (common_flags()->disable_coredump) {
setlim(RLIMIT_CORE, 0);
}
}
bool StackSizeIsUnlimited() {
rlim_t stack_size = getlim(RLIMIT_STACK);
return (stack_size == RLIM_INFINITY);
}
void SetStackSizeLimitInBytes(uptr limit) {
setlim(RLIMIT_STACK, (rlim_t)limit);
CHECK(!StackSizeIsUnlimited());
}
bool AddressSpaceIsUnlimited() {
rlim_t as_size = getlim(RLIMIT_AS);
return (as_size == RLIM_INFINITY);
}
void SetAddressSpaceUnlimited() {
setlim(RLIMIT_AS, RLIM_INFINITY);
CHECK(AddressSpaceIsUnlimited());
}
void SleepForSeconds(int seconds) {
sleep(seconds);
}
void SleepForMillis(int millis) {
usleep(millis * 1000);
}
void Abort() {
#if !SANITIZER_GO
// If we are handling SIGABRT, unhandle it first.
// TODO(vitalybuka): Check if handler belongs to sanitizer.
if (GetHandleSignalMode(SIGABRT) != kHandleSignalNo) {
struct sigaction sigact;
internal_memset(&sigact, 0, sizeof(sigact));
sigact.sa_sigaction = (sa_sigaction_t)SIG_DFL;
internal_sigaction(SIGABRT, &sigact, nullptr);
}
#endif
abort();
}
int Atexit(void (*function)(void)) {
#if !SANITIZER_GO
return atexit(function);
#else
return 0;
#endif
}
bool SupportsColoredOutput(fd_t fd) {
return isatty(fd) != 0;
}
#if !SANITIZER_GO
// TODO(glider): different tools may require different altstack size.
static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough.
void SetAlternateSignalStack() {
stack_t altstack, oldstack;
CHECK_EQ(0, sigaltstack(nullptr, &oldstack));
// If the alternate stack is already in place, do nothing.
// Android always sets an alternate stack, but it's too small for us.
if (!SANITIZER_ANDROID && !(oldstack.ss_flags & SS_DISABLE)) return;
// TODO(glider): the mapped stack should have the MAP_STACK flag in the
// future. It is not required by man 2 sigaltstack now (they're using
// malloc()).
void* base = MmapOrDie(kAltStackSize, __func__);
altstack.ss_sp = (char*) base;
altstack.ss_flags = 0;
altstack.ss_size = kAltStackSize;
CHECK_EQ(0, sigaltstack(&altstack, nullptr));
}
void UnsetAlternateSignalStack() {
stack_t altstack, oldstack;
altstack.ss_sp = nullptr;
altstack.ss_flags = SS_DISABLE;
altstack.ss_size = kAltStackSize; // Some sane value required on Darwin.
CHECK_EQ(0, sigaltstack(&altstack, &oldstack));
UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
}
static void MaybeInstallSigaction(int signum,
SignalHandlerType handler) {
if (GetHandleSignalMode(signum) == kHandleSignalNo) return;
struct sigaction sigact;
internal_memset(&sigact, 0, sizeof(sigact));
sigact.sa_sigaction = (sa_sigaction_t)handler;
// Do not block the signal from being received in that signal's handler.
// Clients are responsible for handling this correctly.
sigact.sa_flags = SA_SIGINFO | SA_NODEFER;
if (common_flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;
CHECK_EQ(0, internal_sigaction(signum, &sigact, nullptr));
VReport(1, "Installed the sigaction for signal %d\n", signum);
}
void InstallDeadlySignalHandlers(SignalHandlerType handler) {
// Set the alternate signal stack for the main thread.
// This will cause SetAlternateSignalStack to be called twice, but the stack
// will be actually set only once.
if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
MaybeInstallSigaction(SIGSEGV, handler);
MaybeInstallSigaction(SIGBUS, handler);
MaybeInstallSigaction(SIGABRT, handler);
MaybeInstallSigaction(SIGFPE, handler);
MaybeInstallSigaction(SIGILL, handler);
MaybeInstallSigaction(SIGTRAP, handler);
}
bool SignalContext::IsStackOverflow() const {
// Access at a reasonable offset above SP, or slightly below it (to account
// for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is
// probably a stack overflow.
#ifdef __s390__
// On s390, the fault address in siginfo points to start of the page, not
// to the precise word that was accessed. Mask off the low bits of sp to
// take it into account.
bool IsStackAccess = addr >= (sp & ~0xFFF) && addr < sp + 0xFFFF;
#else
// Let's accept up to a page size away from top of stack. Things like stack
// probing can trigger accesses with such large offsets.
bool IsStackAccess = addr + GetPageSizeCached() > sp && addr < sp + 0xFFFF;
#endif
#if __powerpc__
// Large stack frames can be allocated with e.g.
// lis r0,-10000
// stdux r1,r1,r0 # store sp to [sp-10000] and update sp by -10000
// If the store faults then sp will not have been updated, so test above
// will not work, because the fault address will be more than just "slightly"
// below sp.
if (!IsStackAccess && IsAccessibleMemoryRange(pc, 4)) {
u32 inst = *(unsigned *)pc;
u32 ra = (inst >> 16) & 0x1F;
u32 opcd = inst >> 26;
u32 xo = (inst >> 1) & 0x3FF;
// Check for store-with-update to sp. The instructions we accept are:
// stbu rs,d(ra) stbux rs,ra,rb
// sthu rs,d(ra) sthux rs,ra,rb
// stwu rs,d(ra) stwux rs,ra,rb
// stdu rs,ds(ra) stdux rs,ra,rb
// where ra is r1 (the stack pointer).
if (ra == 1 &&
(opcd == 39 || opcd == 45 || opcd == 37 || opcd == 62 ||
(opcd == 31 && (xo == 247 || xo == 439 || xo == 183 || xo == 181))))
IsStackAccess = true;
}
#endif // __powerpc__
// We also check si_code to filter out SEGV caused by something else other
// then hitting the guard page or unmapped memory, like, for example,
// unaligned memory access.
auto si = static_cast<const siginfo_t *>(siginfo);
return IsStackAccess &&
(si->si_code == si_SEGV_MAPERR || si->si_code == si_SEGV_ACCERR);
}
#endif // SANITIZER_GO
bool IsAccessibleMemoryRange(uptr beg, uptr size) {
uptr page_size = GetPageSizeCached();
// Checking too large memory ranges is slow.
CHECK_LT(size, page_size * 10);
int sock_pair[2];
if (pipe(sock_pair))
return false;
uptr bytes_written =
internal_write(sock_pair[1], reinterpret_cast<void *>(beg), size);
int write_errno;
bool result;
if (internal_iserror(bytes_written, &write_errno)) {
CHECK_EQ(EFAULT, write_errno);
result = false;
} else {
result = (bytes_written == size);
}
internal_close(sock_pair[0]);
internal_close(sock_pair[1]);
return result;
}
void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
// Some kinds of sandboxes may forbid filesystem access, so we won't be able
// to read the file mappings from /proc/self/maps. Luckily, neither the
// process will be able to load additional libraries, so it's fine to use the
// cached mappings.
MemoryMappingLayout::CacheMemoryMappings();
}
static bool MmapFixed(uptr fixed_addr, uptr size, int additional_flags,
const char *name) {
size = RoundUpTo(size, GetPageSizeCached());
fixed_addr = RoundDownTo(fixed_addr, GetPageSizeCached());
uptr p =
MmapNamed((void *)fixed_addr, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_FIXED | additional_flags | MAP_ANON, name);
int reserrno;
if (internal_iserror(p, &reserrno)) {
Report("ERROR: %s failed to "
"allocate 0x%zx (%zd) bytes at address %zx (errno: %d)\n",
SanitizerToolName, size, size, fixed_addr, reserrno);
return false;
}
IncreaseTotalMmap(size);
return true;
}
bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
return MmapFixed(fixed_addr, size, MAP_NORESERVE, name);
}
bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size, const char *name) {
#if SANITIZER_FREEBSD
if (common_flags()->no_huge_pages_for_shadow)
return MmapFixedNoReserve(fixed_addr, size, name);
// MAP_NORESERVE is implicit with FreeBSD
return MmapFixed(fixed_addr, size, MAP_ALIGNED_SUPER, name);
#else
bool r = MmapFixedNoReserve(fixed_addr, size, name);
if (r)
SetShadowRegionHugePageMode(fixed_addr, size);
return r;
#endif
}
uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {
base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size, name)
: MmapNoAccess(size);
size_ = size;
name_ = name;
(void)os_handle_; // unsupported
return reinterpret_cast<uptr>(base_);
}
// Uses fixed_addr for now.
// Will use offset instead once we've implemented this function for real.
uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size, const char *name) {
return reinterpret_cast<uptr>(
MmapFixedOrDieOnFatalError(fixed_addr, size, name));
}
uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size,
const char *name) {
return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size, name));
}
void ReservedAddressRange::Unmap(uptr addr, uptr size) {
CHECK_LE(size, size_);
if (addr == reinterpret_cast<uptr>(base_))
// If we unmap the whole range, just null out the base.
base_ = (size == size_) ? nullptr : reinterpret_cast<void*>(addr + size);
else
CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_);
size_ -= size;
UnmapOrDie(reinterpret_cast<void*>(addr), size);
}
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
return (void *)MmapNamed((void *)fixed_addr, size, PROT_NONE,
MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE | MAP_ANON,
name);
}
void *MmapNoAccess(uptr size) {
unsigned flags = MAP_PRIVATE | MAP_ANON | MAP_NORESERVE;
return (void *)internal_mmap(nullptr, size, PROT_NONE, flags, -1, 0);
}
// This function is defined elsewhere if we intercepted pthread_attr_getstack.
extern "C" {
SANITIZER_WEAK_ATTRIBUTE int
real_pthread_attr_getstack(void *attr, void **addr, size_t *size);
} // extern "C"
int my_pthread_attr_getstack(void *attr, void **addr, uptr *size) {
#if !SANITIZER_GO && !SANITIZER_MAC
if (&real_pthread_attr_getstack)
return real_pthread_attr_getstack((pthread_attr_t *)attr, addr,
(size_t *)size);
#endif
return pthread_attr_getstack((pthread_attr_t *)attr, addr, (size_t *)size);
}
#if !SANITIZER_GO
void AdjustStackSize(void *attr_) {
pthread_attr_t *attr = (pthread_attr_t *)attr_;
uptr stackaddr = 0;
uptr stacksize = 0;
my_pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
// GLibC will return (0 - stacksize) as the stack address in the case when
// stacksize is set, but stackaddr is not.
bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);
// We place a lot of tool data into TLS, account for that.
const uptr minstacksize = GetTlsSize() + 128*1024;
if (stacksize < minstacksize) {
if (!stack_set) {
if (stacksize != 0) {
VPrintf(1, "Sanitizer: increasing stacksize %zu->%zu\n", stacksize,
minstacksize);
pthread_attr_setstacksize(attr, minstacksize);
}
} else {
Printf("Sanitizer: pre-allocated stack size is insufficient: "
"%zu < %zu\n", stacksize, minstacksize);
Printf("Sanitizer: pthread_create is likely to fail.\n");
}
}
}
#endif // !SANITIZER_GO
pid_t StartSubprocess(const char *program, const char *const argv[],
const char *const envp[], fd_t stdin_fd, fd_t stdout_fd,
fd_t stderr_fd) {
auto file_closer = at_scope_exit([&] {
if (stdin_fd != kInvalidFd) {
internal_close(stdin_fd);
}
if (stdout_fd != kInvalidFd) {
internal_close(stdout_fd);
}
if (stderr_fd != kInvalidFd) {
internal_close(stderr_fd);
}
});
int pid = internal_fork();
if (pid < 0) {
int rverrno;
if (internal_iserror(pid, &rverrno)) {
Report("WARNING: failed to fork (errno %d)\n", rverrno);
}
return pid;
}
if (pid == 0) {
// Child subprocess
if (stdin_fd != kInvalidFd) {
internal_close(STDIN_FILENO);
internal_dup2(stdin_fd, STDIN_FILENO);
internal_close(stdin_fd);
}
if (stdout_fd != kInvalidFd) {
internal_close(STDOUT_FILENO);
internal_dup2(stdout_fd, STDOUT_FILENO);
internal_close(stdout_fd);
}
if (stderr_fd != kInvalidFd) {
internal_close(STDERR_FILENO);
internal_dup2(stderr_fd, STDERR_FILENO);
internal_close(stderr_fd);
}
for (int fd = sysconf(_SC_OPEN_MAX); fd > 2; fd--) internal_close(fd);
internal_execve(program, const_cast<char **>(&argv[0]),
const_cast<char *const *>(envp));
internal__exit(1);
}
return pid;
}
bool IsProcessRunning(pid_t pid) {
int process_status;
uptr waitpid_status = internal_waitpid(pid, &process_status, WNOHANG);
int local_errno;
if (internal_iserror(waitpid_status, &local_errno)) {
VReport(1, "Waiting on the process failed (errno %d).\n", local_errno);
return false;
}
return waitpid_status == 0;
}
int WaitForProcess(pid_t pid) {
int process_status;
uptr waitpid_status = internal_waitpid(pid, &process_status, 0);
int local_errno;
if (internal_iserror(waitpid_status, &local_errno)) {
VReport(1, "Waiting on the process failed (errno %d).\n", local_errno);
return -1;
}
return process_status;
}
bool IsStateDetached(int state) {
return state == PTHREAD_CREATE_DETACHED;
}
} // namespace __sanitizer
#endif // SANITIZER_POSIX

View File

@@ -0,0 +1,149 @@
//===-- sanitizer_stackdepot.cpp ------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
//===----------------------------------------------------------------------===//
#include "sanitizer_stackdepot.h"
#include "sanitizer_common.h"
#include "sanitizer_hash.h"
#include "sanitizer_stackdepotbase.h"
namespace __sanitizer {
struct StackDepotNode {
StackDepotNode *link;
u32 id;
atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
u32 size;
u32 tag;
uptr stack[1]; // [size]
static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20;
// Lower kTabSizeLog bits are equal for all items in one bucket.
// We use these bits to store the per-stack use counter.
static const u32 kUseCountBits = kTabSizeLog;
static const u32 kMaxUseCount = 1 << kUseCountBits;
static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
static const u32 kHashMask = ~kUseCountMask;
typedef StackTrace args_type;
bool eq(u32 hash, const args_type &args) const {
u32 hash_bits =
atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
if ((hash & kHashMask) != hash_bits || args.size != size || args.tag != tag)
return false;
uptr i = 0;
for (; i < size; i++) {
if (stack[i] != args.trace[i]) return false;
}
return true;
}
static uptr storage_size(const args_type &args) {
return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
}
static u32 hash(const args_type &args) {
MurMur2HashBuilder H(args.size * sizeof(uptr));
for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]);
return H.get();
}
static bool is_valid(const args_type &args) {
return args.size > 0 && args.trace;
}
void store(const args_type &args, u32 hash) {
atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
size = args.size;
tag = args.tag;
internal_memcpy(stack, args.trace, size * sizeof(uptr));
}
args_type load() const {
return args_type(&stack[0], size, tag);
}
StackDepotHandle get_handle() { return StackDepotHandle(this); }
typedef StackDepotHandle handle_type;
};
COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
u32 StackDepotHandle::id() { return node_->id; }
int StackDepotHandle::use_count() {
return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
StackDepotNode::kUseCountMask;
}
void StackDepotHandle::inc_use_count_unsafe() {
u32 prev =
atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
StackDepotNode::kUseCountMask;
CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
}
// FIXME(dvyukov): this single reserved bit is used in TSan.
typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
StackDepot;
static StackDepot theDepot;
StackDepotStats *StackDepotGetStats() {
return theDepot.GetStats();
}
u32 StackDepotPut(StackTrace stack) {
StackDepotHandle h = theDepot.Put(stack);
return h.valid() ? h.id() : 0;
}
StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
return theDepot.Put(stack);
}
StackTrace StackDepotGet(u32 id) {
return theDepot.Get(id);
}
void StackDepotLockAll() {
theDepot.LockAll();
}
void StackDepotUnlockAll() {
theDepot.UnlockAll();
}
bool StackDepotReverseMap::IdDescPair::IdComparator(
const StackDepotReverseMap::IdDescPair &a,
const StackDepotReverseMap::IdDescPair &b) {
return a.id < b.id;
}
StackDepotReverseMap::StackDepotReverseMap() {
map_.reserve(StackDepotGetStats()->n_uniq_ids + 100);
for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
atomic_uintptr_t *p = &theDepot.tab[idx];
uptr v = atomic_load(p, memory_order_consume);
StackDepotNode *s = (StackDepotNode*)(v & ~1);
for (; s; s = s->link) {
IdDescPair pair = {s->id, s};
map_.push_back(pair);
}
}
Sort(map_.data(), map_.size(), &IdDescPair::IdComparator);
}
StackTrace StackDepotReverseMap::Get(u32 id) {
if (!map_.size())
return StackTrace();
IdDescPair pair = {id, nullptr};
uptr idx =
InternalLowerBound(map_, 0, map_.size(), pair, IdDescPair::IdComparator);
if (idx > map_.size() || map_[idx].id != id)
return StackTrace();
return map_[idx].desc->load();
}
} // namespace __sanitizer

View File

@@ -0,0 +1,133 @@
//===-- sanitizer_stacktrace.cpp ------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_stacktrace.h"
namespace __sanitizer {
uptr StackTrace::GetNextInstructionPc(uptr pc) {
#if defined(__sparc__) || defined(__mips__)
return pc + 8;
#elif defined(__powerpc__) || defined(__arm__) || defined(__aarch64__)
return pc + 4;
#else
return pc + 1;
#endif
}
uptr StackTrace::GetCurrentPc() {
return GET_CALLER_PC();
}
void BufferedStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
size = cnt + !!extra_top_pc;
CHECK_LE(size, kStackTraceMax);
internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0]));
if (extra_top_pc)
trace_buffer[cnt] = extra_top_pc;
top_frame_bp = 0;
}
// Sparc implemention is in its own file.
#if !defined(__sparc__)
// In GCC on ARM bp points to saved lr, not fp, so we should check the next
// cell in stack to be a saved frame pointer. GetCanonicFrame returns the
// pointer to saved frame pointer in any case.
static inline uhwptr *GetCanonicFrame(uptr bp,
uptr stack_top,
uptr stack_bottom) {
CHECK_GT(stack_top, stack_bottom);
#ifdef __arm__
if (!IsValidFrame(bp, stack_top, stack_bottom)) return 0;
uhwptr *bp_prev = (uhwptr *)bp;
if (IsValidFrame((uptr)bp_prev[0], stack_top, stack_bottom)) return bp_prev;
// The next frame pointer does not look right. This could be a GCC frame, step
// back by 1 word and try again.
if (IsValidFrame((uptr)bp_prev[-1], stack_top, stack_bottom))
return bp_prev - 1;
// Nope, this does not look right either. This means the frame after next does
// not have a valid frame pointer, but we can still extract the caller PC.
// Unfortunately, there is no way to decide between GCC and LLVM frame
// layouts. Assume LLVM.
return bp_prev;
#else
return (uhwptr*)bp;
#endif
}
void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
uptr stack_bottom, u32 max_depth) {
// TODO(yln): add arg sanity check for stack_top/stack_bottom
CHECK_GE(max_depth, 2);
const uptr kPageSize = GetPageSizeCached();
trace_buffer[0] = pc;
size = 1;
if (stack_top < 4096) return; // Sanity check for stack top.
uhwptr *frame = GetCanonicFrame(bp, stack_top, stack_bottom);
// Lowest possible address that makes sense as the next frame pointer.
// Goes up as we walk the stack.
uptr bottom = stack_bottom;
// Avoid infinite loop when frame == frame[0] by using frame > prev_frame.
while (IsValidFrame((uptr)frame, stack_top, bottom) &&
IsAligned((uptr)frame, sizeof(*frame)) &&
size < max_depth) {
#ifdef __powerpc__
// PowerPC ABIs specify that the return address is saved at offset
// 16 of the *caller's* stack frame. Thus we must dereference the
// back chain to find the caller frame before extracting it.
uhwptr *caller_frame = (uhwptr*)frame[0];
if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||
!IsAligned((uptr)caller_frame, sizeof(uhwptr)))
break;
uhwptr pc1 = caller_frame[2];
#elif defined(__s390__)
uhwptr pc1 = frame[14];
#else
uhwptr pc1 = frame[1];
#endif
// Let's assume that any pointer in the 0th page (i.e. <0x1000 on i386 and
// x86_64) is invalid and stop unwinding here. If we're adding support for
// a platform where this isn't true, we need to reconsider this check.
if (pc1 < kPageSize)
break;
if (pc1 != pc) {
trace_buffer[size++] = (uptr) pc1;
}
bottom = (uptr)frame;
frame = GetCanonicFrame((uptr)frame[0], stack_top, bottom);
}
}
#endif // !defined(__sparc__)
void BufferedStackTrace::PopStackFrames(uptr count) {
CHECK_LT(count, size);
size -= count;
for (uptr i = 0; i < size; ++i) {
trace_buffer[i] = trace_buffer[i + count];
}
}
static uptr Distance(uptr a, uptr b) { return a < b ? b - a : a - b; }
uptr BufferedStackTrace::LocatePcInTrace(uptr pc) {
uptr best = 0;
for (uptr i = 1; i < size; ++i) {
if (Distance(trace[i], pc) < Distance(trace[best], pc)) best = i;
}
return best;
}
} // namespace __sanitizer

View File

@@ -0,0 +1,159 @@
//===-- sanitizer_stacktrace_libcdep.cpp ----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_stacktrace.h"
#include "sanitizer_stacktrace_printer.h"
#include "sanitizer_symbolizer.h"
namespace __sanitizer {
void StackTrace::Print() const {
if (trace == nullptr || size == 0) {
Printf(" <empty stack>\n\n");
return;
}
InternalScopedString frame_desc(GetPageSizeCached() * 2);
InternalScopedString dedup_token(GetPageSizeCached());
int dedup_frames = common_flags()->dedup_token_length;
uptr frame_num = 0;
for (uptr i = 0; i < size && trace[i]; i++) {
// PCs in stack traces are actually the return addresses, that is,
// addresses of the next instructions after the call.
uptr pc = GetPreviousInstructionPc(trace[i]);
SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(pc);
CHECK(frames);
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
frame_desc.clear();
RenderFrame(&frame_desc, common_flags()->stack_trace_format, frame_num++,
cur->info, common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
Printf("%s\n", frame_desc.data());
if (dedup_frames-- > 0) {
if (dedup_token.length())
dedup_token.append("--");
if (cur->info.function != nullptr)
dedup_token.append(cur->info.function);
}
}
frames->ClearAll();
}
// Always print a trailing empty line after stack trace.
Printf("\n");
if (dedup_token.length())
Printf("DEDUP_TOKEN: %s\n", dedup_token.data());
}
void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context,
uptr stack_top, uptr stack_bottom,
bool request_fast_unwind) {
// Ensures all call sites get what they requested.
CHECK_EQ(request_fast_unwind, WillUseFastUnwind(request_fast_unwind));
top_frame_bp = (max_depth > 0) ? bp : 0;
// Avoid doing any work for small max_depth.
if (max_depth == 0) {
size = 0;
return;
}
if (max_depth == 1) {
size = 1;
trace_buffer[0] = pc;
return;
}
if (!WillUseFastUnwind(request_fast_unwind)) {
#if SANITIZER_CAN_SLOW_UNWIND
if (context)
UnwindSlow(pc, context, max_depth);
else
UnwindSlow(pc, max_depth);
#else
UNREACHABLE("slow unwind requested but not available");
#endif
} else {
UnwindFast(pc, bp, stack_top, stack_bottom, max_depth);
}
}
static int GetModuleAndOffsetForPc(uptr pc, char *module_name,
uptr module_name_len, uptr *pc_offset) {
const char *found_module_name = nullptr;
bool ok = Symbolizer::GetOrInit()->GetModuleNameAndOffsetForPC(
pc, &found_module_name, pc_offset);
if (!ok) return false;
if (module_name && module_name_len) {
internal_strncpy(module_name, found_module_name, module_name_len);
module_name[module_name_len - 1] = '\x00';
}
return true;
}
} // namespace __sanitizer
using namespace __sanitizer;
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf,
uptr out_buf_size) {
if (!out_buf_size) return;
pc = StackTrace::GetPreviousInstructionPc(pc);
SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
if (!frame) {
internal_strncpy(out_buf, "<can't symbolize>", out_buf_size);
out_buf[out_buf_size - 1] = 0;
return;
}
InternalScopedString frame_desc(GetPageSizeCached());
uptr frame_num = 0;
// Reserve one byte for the final 0.
char *out_end = out_buf + out_buf_size - 1;
for (SymbolizedStack *cur = frame; cur && out_buf < out_end;
cur = cur->next) {
frame_desc.clear();
RenderFrame(&frame_desc, fmt, frame_num++, cur->info,
common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
if (!frame_desc.length())
continue;
// Reserve one byte for the terminating 0.
uptr n = out_end - out_buf - 1;
internal_strncpy(out_buf, frame_desc.data(), n);
out_buf += __sanitizer::Min<uptr>(n, frame_desc.length());
*out_buf++ = 0;
}
CHECK(out_buf <= out_end);
*out_buf = 0;
}
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_symbolize_global(uptr data_addr, const char *fmt,
char *out_buf, uptr out_buf_size) {
if (!out_buf_size) return;
out_buf[0] = 0;
DataInfo DI;
if (!Symbolizer::GetOrInit()->SymbolizeData(data_addr, &DI)) return;
InternalScopedString data_desc(GetPageSizeCached());
RenderData(&data_desc, fmt, &DI, common_flags()->strip_path_prefix);
internal_strncpy(out_buf, data_desc.data(), out_buf_size);
out_buf[out_buf_size - 1] = 0;
}
SANITIZER_INTERFACE_ATTRIBUTE
int __sanitizer_get_module_and_offset_for_pc(uptr pc, char *module_name,
uptr module_name_len,
uptr *pc_offset) {
return __sanitizer::GetModuleAndOffsetForPc(pc, module_name, module_name_len,
pc_offset);
}
} // extern "C"

View File

@@ -0,0 +1,263 @@
//===-- sanitizer_common.cpp ----------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is shared between sanitizers' run-time libraries.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_stacktrace_printer.h"
#include "sanitizer_file.h"
#include "sanitizer_fuchsia.h"
namespace __sanitizer {
// sanitizer_symbolizer_markup.cpp implements these differently.
#if !SANITIZER_SYMBOLIZER_MARKUP
static const char *StripFunctionName(const char *function, const char *prefix) {
if (!function) return nullptr;
if (!prefix) return function;
uptr prefix_len = internal_strlen(prefix);
if (0 == internal_strncmp(function, prefix, prefix_len))
return function + prefix_len;
return function;
}
static const char *DemangleFunctionName(const char *function) {
if (!function) return nullptr;
// NetBSD uses indirection for old threading functions for historical reasons
// The mangled names are internal implementation detail and should not be
// exposed even in backtraces.
#if SANITIZER_NETBSD
if (!internal_strcmp(function, "__libc_mutex_init"))
return "pthread_mutex_init";
if (!internal_strcmp(function, "__libc_mutex_lock"))
return "pthread_mutex_lock";
if (!internal_strcmp(function, "__libc_mutex_trylock"))
return "pthread_mutex_trylock";
if (!internal_strcmp(function, "__libc_mutex_unlock"))
return "pthread_mutex_unlock";
if (!internal_strcmp(function, "__libc_mutex_destroy"))
return "pthread_mutex_destroy";
if (!internal_strcmp(function, "__libc_mutexattr_init"))
return "pthread_mutexattr_init";
if (!internal_strcmp(function, "__libc_mutexattr_settype"))
return "pthread_mutexattr_settype";
if (!internal_strcmp(function, "__libc_mutexattr_destroy"))
return "pthread_mutexattr_destroy";
if (!internal_strcmp(function, "__libc_cond_init"))
return "pthread_cond_init";
if (!internal_strcmp(function, "__libc_cond_signal"))
return "pthread_cond_signal";
if (!internal_strcmp(function, "__libc_cond_broadcast"))
return "pthread_cond_broadcast";
if (!internal_strcmp(function, "__libc_cond_wait"))
return "pthread_cond_wait";
if (!internal_strcmp(function, "__libc_cond_timedwait"))
return "pthread_cond_timedwait";
if (!internal_strcmp(function, "__libc_cond_destroy"))
return "pthread_cond_destroy";
if (!internal_strcmp(function, "__libc_rwlock_init"))
return "pthread_rwlock_init";
if (!internal_strcmp(function, "__libc_rwlock_rdlock"))
return "pthread_rwlock_rdlock";
if (!internal_strcmp(function, "__libc_rwlock_wrlock"))
return "pthread_rwlock_wrlock";
if (!internal_strcmp(function, "__libc_rwlock_tryrdlock"))
return "pthread_rwlock_tryrdlock";
if (!internal_strcmp(function, "__libc_rwlock_trywrlock"))
return "pthread_rwlock_trywrlock";
if (!internal_strcmp(function, "__libc_rwlock_unlock"))
return "pthread_rwlock_unlock";
if (!internal_strcmp(function, "__libc_rwlock_destroy"))
return "pthread_rwlock_destroy";
if (!internal_strcmp(function, "__libc_thr_keycreate"))
return "pthread_key_create";
if (!internal_strcmp(function, "__libc_thr_setspecific"))
return "pthread_setspecific";
if (!internal_strcmp(function, "__libc_thr_getspecific"))
return "pthread_getspecific";
if (!internal_strcmp(function, "__libc_thr_keydelete"))
return "pthread_key_delete";
if (!internal_strcmp(function, "__libc_thr_once"))
return "pthread_once";
if (!internal_strcmp(function, "__libc_thr_self"))
return "pthread_self";
if (!internal_strcmp(function, "__libc_thr_exit"))
return "pthread_exit";
if (!internal_strcmp(function, "__libc_thr_setcancelstate"))
return "pthread_setcancelstate";
if (!internal_strcmp(function, "__libc_thr_equal"))
return "pthread_equal";
if (!internal_strcmp(function, "__libc_thr_curcpu"))
return "pthread_curcpu_np";
if (!internal_strcmp(function, "__libc_thr_sigsetmask"))
return "pthread_sigmask";
#endif
return function;
}
static const char kDefaultFormat[] = " #%n %p %F %L";
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
const AddressInfo &info, bool vs_style,
const char *strip_path_prefix, const char *strip_func_prefix) {
if (0 == internal_strcmp(format, "DEFAULT"))
format = kDefaultFormat;
for (const char *p = format; *p != '\0'; p++) {
if (*p != '%') {
buffer->append("%c", *p);
continue;
}
p++;
switch (*p) {
case '%':
buffer->append("%%");
break;
// Frame number and all fields of AddressInfo structure.
case 'n':
buffer->append("%zu", frame_no);
break;
case 'p':
buffer->append("0x%zx", info.address);
break;
case 'm':
buffer->append("%s", StripPathPrefix(info.module, strip_path_prefix));
break;
case 'o':
buffer->append("0x%zx", info.module_offset);
break;
case 'f':
buffer->append("%s",
DemangleFunctionName(
StripFunctionName(info.function, strip_func_prefix)));
break;
case 'q':
buffer->append("0x%zx", info.function_offset != AddressInfo::kUnknown
? info.function_offset
: 0x0);
break;
case 's':
buffer->append("%s", StripPathPrefix(info.file, strip_path_prefix));
break;
case 'l':
buffer->append("%d", info.line);
break;
case 'c':
buffer->append("%d", info.column);
break;
// Smarter special cases.
case 'F':
// Function name and offset, if file is unknown.
if (info.function) {
buffer->append("in %s",
DemangleFunctionName(
StripFunctionName(info.function, strip_func_prefix)));
if (!info.file && info.function_offset != AddressInfo::kUnknown)
buffer->append("+0x%zx", info.function_offset);
}
break;
case 'S':
// File/line information.
RenderSourceLocation(buffer, info.file, info.line, info.column, vs_style,
strip_path_prefix);
break;
case 'L':
// Source location, or module location.
if (info.file) {
RenderSourceLocation(buffer, info.file, info.line, info.column,
vs_style, strip_path_prefix);
} else if (info.module) {
RenderModuleLocation(buffer, info.module, info.module_offset,
info.module_arch, strip_path_prefix);
} else {
buffer->append("(<unknown module>)");
}
break;
case 'M':
// Module basename and offset, or PC.
if (info.address & kExternalPCBit)
{} // There PCs are not meaningful.
else if (info.module)
// Always strip the module name for %M.
RenderModuleLocation(buffer, StripModuleName(info.module),
info.module_offset, info.module_arch, "");
else
buffer->append("(%p)", (void *)info.address);
break;
default:
Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n", *p,
*p);
Die();
}
}
}
void RenderData(InternalScopedString *buffer, const char *format,
const DataInfo *DI, const char *strip_path_prefix) {
for (const char *p = format; *p != '\0'; p++) {
if (*p != '%') {
buffer->append("%c", *p);
continue;
}
p++;
switch (*p) {
case '%':
buffer->append("%%");
break;
case 's':
buffer->append("%s", StripPathPrefix(DI->file, strip_path_prefix));
break;
case 'l':
buffer->append("%d", DI->line);
break;
case 'g':
buffer->append("%s", DI->name);
break;
default:
Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n", *p,
*p);
Die();
}
}
}
#endif // !SANITIZER_SYMBOLIZER_MARKUP
void RenderSourceLocation(InternalScopedString *buffer, const char *file,
int line, int column, bool vs_style,
const char *strip_path_prefix) {
if (vs_style && line > 0) {
buffer->append("%s(%d", StripPathPrefix(file, strip_path_prefix), line);
if (column > 0)
buffer->append(",%d", column);
buffer->append(")");
return;
}
buffer->append("%s", StripPathPrefix(file, strip_path_prefix));
if (line > 0) {
buffer->append(":%d", line);
if (column > 0)
buffer->append(":%d", column);
}
}
void RenderModuleLocation(InternalScopedString *buffer, const char *module,
uptr offset, ModuleArch arch,
const char *strip_path_prefix) {
buffer->append("(%s", StripPathPrefix(module, strip_path_prefix));
if (arch != kModuleArchUnknown) {
buffer->append(":%s", ModuleArchToString(arch));
}
buffer->append("+0x%zx)", offset);
}
} // namespace __sanitizer

View File

@@ -0,0 +1,85 @@
//===-- sanitizer_stacktrace_sparc.cpp ------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
//
// Implemention of fast stack unwinding for Sparc.
//===----------------------------------------------------------------------===//
#if defined(__sparc__)
#if defined(__arch64__) || defined(__sparcv9)
#define STACK_BIAS 2047
#else
#define STACK_BIAS 0
#endif
#include "sanitizer_common.h"
#include "sanitizer_stacktrace.h"
namespace __sanitizer {
void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
uptr stack_bottom, u32 max_depth) {
// TODO(yln): add arg sanity check for stack_top/stack_bottom
CHECK_GE(max_depth, 2);
const uptr kPageSize = GetPageSizeCached();
#if defined(__GNUC__)
// __builtin_return_address returns the address of the call instruction
// on the SPARC and not the return address, so we need to compensate.
trace_buffer[0] = GetNextInstructionPc(pc);
#else
trace_buffer[0] = pc;
#endif
size = 1;
if (stack_top < 4096) return; // Sanity check for stack top.
// Flush register windows to memory
#if defined(__sparc_v9__) || defined(__sparcv9__) || defined(__sparcv9)
asm volatile("flushw" ::: "memory");
#else
asm volatile("ta 3" ::: "memory");
#endif
// On the SPARC, the return address is not in the frame, it is in a
// register. There is no way to access it off of the current frame
// pointer, but it can be accessed off the previous frame pointer by
// reading the value from the register window save area.
uptr prev_bp = GET_CURRENT_FRAME();
uptr next_bp = prev_bp;
unsigned int i = 0;
while (next_bp != bp && IsAligned(next_bp, sizeof(uhwptr)) && i++ < 8) {
prev_bp = next_bp;
next_bp = (uptr)((uhwptr *)next_bp)[14] + STACK_BIAS;
}
if (next_bp == bp)
bp = prev_bp;
// Lowest possible address that makes sense as the next frame pointer.
// Goes up as we walk the stack.
uptr bottom = stack_bottom;
// Avoid infinite loop when frame == frame[0] by using frame > prev_frame.
while (IsValidFrame(bp, stack_top, bottom) && IsAligned(bp, sizeof(uhwptr)) &&
size < max_depth) {
uhwptr pc1 = ((uhwptr *)bp)[15];
// Let's assume that any pointer in the 0th page is invalid and
// stop unwinding here. If we're adding support for a platform
// where this isn't true, we need to reconsider this check.
if (pc1 < kPageSize)
break;
if (pc1 != pc) {
// %o7 contains the address of the call instruction and not the
// return address, so we need to compensate.
trace_buffer[size++] = GetNextInstructionPc((uptr)pc1);
}
bottom = bp;
bp = (uptr)((uhwptr *)bp)[14] + STACK_BIAS;
}
}
} // namespace __sanitizer
#endif // !defined(__sparc__)

View File

@@ -0,0 +1,573 @@
//===-- sanitizer_stoptheworld_linux_libcdep.cpp --------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// See sanitizer_stoptheworld.h for details.
// This implementation was inspired by Markus Gutschke's linuxthreads.cc.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) || \
defined(__aarch64__) || defined(__powerpc64__) || \
defined(__s390__) || defined(__i386__) || \
defined(__arm__))
#include "sanitizer_stoptheworld.h"
#include "sanitizer_platform_limits_posix.h"
#include "sanitizer_atomic.h"
#include <errno.h>
#include <sched.h> // for CLONE_* definitions
#include <stddef.h>
#include <sys/prctl.h> // for PR_* definitions
#include <sys/ptrace.h> // for PTRACE_* definitions
#include <sys/types.h> // for pid_t
#include <sys/uio.h> // for iovec
#include <elf.h> // for NT_PRSTATUS
#if defined(__aarch64__) && !SANITIZER_ANDROID
// GLIBC 2.20+ sys/user does not include asm/ptrace.h
# include <asm/ptrace.h>
#endif
#include <sys/user.h> // for user_regs_struct
#if SANITIZER_ANDROID && SANITIZER_MIPS
# include <asm/reg.h> // for mips SP register in sys/user.h
#endif
#include <sys/wait.h> // for signal-related stuff
#ifdef sa_handler
# undef sa_handler
#endif
#ifdef sa_sigaction
# undef sa_sigaction
#endif
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_libc.h"
#include "sanitizer_linux.h"
#include "sanitizer_mutex.h"
#include "sanitizer_placement_new.h"
// Sufficiently old kernel headers don't provide this value, but we can still
// call prctl with it. If the runtime kernel is new enough, the prctl call will
// have the desired effect; if the kernel is too old, the call will error and we
// can ignore said error.
#ifndef PR_SET_PTRACER
#define PR_SET_PTRACER 0x59616d61
#endif
// This module works by spawning a Linux task which then attaches to every
// thread in the caller process with ptrace. This suspends the threads, and
// PTRACE_GETREGS can then be used to obtain their register state. The callback
// supplied to StopTheWorld() is run in the tracer task while the threads are
// suspended.
// The tracer task must be placed in a different thread group for ptrace to
// work, so it cannot be spawned as a pthread. Instead, we use the low-level
// clone() interface (we want to share the address space with the caller
// process, so we prefer clone() over fork()).
//
// We don't use any libc functions, relying instead on direct syscalls. There
// are two reasons for this:
// 1. calling a library function while threads are suspended could cause a
// deadlock, if one of the treads happens to be holding a libc lock;
// 2. it's generally not safe to call libc functions from the tracer task,
// because clone() does not set up a thread-local storage for it. Any
// thread-local variables used by libc will be shared between the tracer task
// and the thread which spawned it.
namespace __sanitizer {
class SuspendedThreadsListLinux : public SuspendedThreadsList {
public:
SuspendedThreadsListLinux() { thread_ids_.reserve(1024); }
tid_t GetThreadID(uptr index) const;
uptr ThreadCount() const;
bool ContainsTid(tid_t thread_id) const;
void Append(tid_t tid);
PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
uptr *sp) const;
uptr RegisterCount() const;
private:
InternalMmapVector<tid_t> thread_ids_;
};
// Structure for passing arguments into the tracer thread.
struct TracerThreadArgument {
StopTheWorldCallback callback;
void *callback_argument;
// The tracer thread waits on this mutex while the parent finishes its
// preparations.
BlockingMutex mutex;
// Tracer thread signals its completion by setting done.
atomic_uintptr_t done;
uptr parent_pid;
};
// This class handles thread suspending/unsuspending in the tracer thread.
class ThreadSuspender {
public:
explicit ThreadSuspender(pid_t pid, TracerThreadArgument *arg)
: arg(arg)
, pid_(pid) {
CHECK_GE(pid, 0);
}
bool SuspendAllThreads();
void ResumeAllThreads();
void KillAllThreads();
SuspendedThreadsListLinux &suspended_threads_list() {
return suspended_threads_list_;
}
TracerThreadArgument *arg;
private:
SuspendedThreadsListLinux suspended_threads_list_;
pid_t pid_;
bool SuspendThread(tid_t thread_id);
};
bool ThreadSuspender::SuspendThread(tid_t tid) {
// Are we already attached to this thread?
// Currently this check takes linear time, however the number of threads is
// usually small.
if (suspended_threads_list_.ContainsTid(tid)) return false;
int pterrno;
if (internal_iserror(internal_ptrace(PTRACE_ATTACH, tid, nullptr, nullptr),
&pterrno)) {
// Either the thread is dead, or something prevented us from attaching.
// Log this event and move on.
VReport(1, "Could not attach to thread %zu (errno %d).\n", (uptr)tid,
pterrno);
return false;
} else {
VReport(2, "Attached to thread %zu.\n", (uptr)tid);
// The thread is not guaranteed to stop before ptrace returns, so we must
// wait on it. Note: if the thread receives a signal concurrently,
// we can get notification about the signal before notification about stop.
// In such case we need to forward the signal to the thread, otherwise
// the signal will be missed (as we do PTRACE_DETACH with arg=0) and
// any logic relying on signals will break. After forwarding we need to
// continue to wait for stopping, because the thread is not stopped yet.
// We do ignore delivery of SIGSTOP, because we want to make stop-the-world
// as invisible as possible.
for (;;) {
int status;
uptr waitpid_status;
HANDLE_EINTR(waitpid_status, internal_waitpid(tid, &status, __WALL));
int wperrno;
if (internal_iserror(waitpid_status, &wperrno)) {
// Got a ECHILD error. I don't think this situation is possible, but it
// doesn't hurt to report it.
VReport(1, "Waiting on thread %zu failed, detaching (errno %d).\n",
(uptr)tid, wperrno);
internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr);
return false;
}
if (WIFSTOPPED(status) && WSTOPSIG(status) != SIGSTOP) {
internal_ptrace(PTRACE_CONT, tid, nullptr,
(void*)(uptr)WSTOPSIG(status));
continue;
}
break;
}
suspended_threads_list_.Append(tid);
return true;
}
}
void ThreadSuspender::ResumeAllThreads() {
for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++) {
pid_t tid = suspended_threads_list_.GetThreadID(i);
int pterrno;
if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr),
&pterrno)) {
VReport(2, "Detached from thread %d.\n", tid);
} else {
// Either the thread is dead, or we are already detached.
// The latter case is possible, for instance, if this function was called
// from a signal handler.
VReport(1, "Could not detach from thread %d (errno %d).\n", tid, pterrno);
}
}
}
void ThreadSuspender::KillAllThreads() {
for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++)
internal_ptrace(PTRACE_KILL, suspended_threads_list_.GetThreadID(i),
nullptr, nullptr);
}
bool ThreadSuspender::SuspendAllThreads() {
ThreadLister thread_lister(pid_);
bool retry = true;
InternalMmapVector<tid_t> threads;
threads.reserve(128);
for (int i = 0; i < 30 && retry; ++i) {
retry = false;
switch (thread_lister.ListThreads(&threads)) {
case ThreadLister::Error:
ResumeAllThreads();
return false;
case ThreadLister::Incomplete:
retry = true;
break;
case ThreadLister::Ok:
break;
}
for (tid_t tid : threads) {
if (SuspendThread(tid))
retry = true;
}
}
return suspended_threads_list_.ThreadCount();
}
// Pointer to the ThreadSuspender instance for use in signal handler.
static ThreadSuspender *thread_suspender_instance = nullptr;
// Synchronous signals that should not be blocked.
static const int kSyncSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS,
SIGXCPU, SIGXFSZ };
static void TracerThreadDieCallback() {
// Generally a call to Die() in the tracer thread should be fatal to the
// parent process as well, because they share the address space.
// This really only works correctly if all the threads are suspended at this
// point. So we correctly handle calls to Die() from within the callback, but
// not those that happen before or after the callback. Hopefully there aren't
// a lot of opportunities for that to happen...
ThreadSuspender *inst = thread_suspender_instance;
if (inst && stoptheworld_tracer_pid == internal_getpid()) {
inst->KillAllThreads();
thread_suspender_instance = nullptr;
}
}
// Signal handler to wake up suspended threads when the tracer thread dies.
static void TracerThreadSignalHandler(int signum, __sanitizer_siginfo *siginfo,
void *uctx) {
SignalContext ctx(siginfo, uctx);
Printf("Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n", signum,
ctx.addr, ctx.pc, ctx.sp);
ThreadSuspender *inst = thread_suspender_instance;
if (inst) {
if (signum == SIGABRT)
inst->KillAllThreads();
else
inst->ResumeAllThreads();
RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
thread_suspender_instance = nullptr;
atomic_store(&inst->arg->done, 1, memory_order_relaxed);
}
internal__exit((signum == SIGABRT) ? 1 : 2);
}
// Size of alternative stack for signal handlers in the tracer thread.
static const int kHandlerStackSize = 8192;
// This function will be run as a cloned task.
static int TracerThread(void* argument) {
TracerThreadArgument *tracer_thread_argument =
(TracerThreadArgument *)argument;
internal_prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
// Check if parent is already dead.
if (internal_getppid() != tracer_thread_argument->parent_pid)
internal__exit(4);
// Wait for the parent thread to finish preparations.
tracer_thread_argument->mutex.Lock();
tracer_thread_argument->mutex.Unlock();
RAW_CHECK(AddDieCallback(TracerThreadDieCallback));
ThreadSuspender thread_suspender(internal_getppid(), tracer_thread_argument);
// Global pointer for the signal handler.
thread_suspender_instance = &thread_suspender;
// Alternate stack for signal handling.
InternalMmapVector<char> handler_stack_memory(kHandlerStackSize);
stack_t handler_stack;
internal_memset(&handler_stack, 0, sizeof(handler_stack));
handler_stack.ss_sp = handler_stack_memory.data();
handler_stack.ss_size = kHandlerStackSize;
internal_sigaltstack(&handler_stack, nullptr);
// Install our handler for synchronous signals. Other signals should be
// blocked by the mask we inherited from the parent thread.
for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) {
__sanitizer_sigaction act;
internal_memset(&act, 0, sizeof(act));
act.sigaction = TracerThreadSignalHandler;
act.sa_flags = SA_ONSTACK | SA_SIGINFO;
internal_sigaction_norestorer(kSyncSignals[i], &act, 0);
}
int exit_code = 0;
if (!thread_suspender.SuspendAllThreads()) {
VReport(1, "Failed suspending threads.\n");
exit_code = 3;
} else {
tracer_thread_argument->callback(thread_suspender.suspended_threads_list(),
tracer_thread_argument->callback_argument);
thread_suspender.ResumeAllThreads();
exit_code = 0;
}
RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
thread_suspender_instance = nullptr;
atomic_store(&tracer_thread_argument->done, 1, memory_order_relaxed);
return exit_code;
}
class ScopedStackSpaceWithGuard {
public:
explicit ScopedStackSpaceWithGuard(uptr stack_size) {
stack_size_ = stack_size;
guard_size_ = GetPageSizeCached();
// FIXME: Omitting MAP_STACK here works in current kernels but might break
// in the future.
guard_start_ = (uptr)MmapOrDie(stack_size_ + guard_size_,
"ScopedStackWithGuard");
CHECK(MprotectNoAccess((uptr)guard_start_, guard_size_));
}
~ScopedStackSpaceWithGuard() {
UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_);
}
void *Bottom() const {
return (void *)(guard_start_ + stack_size_ + guard_size_);
}
private:
uptr stack_size_;
uptr guard_size_;
uptr guard_start_;
};
// We have a limitation on the stack frame size, so some stuff had to be moved
// into globals.
static __sanitizer_sigset_t blocked_sigset;
static __sanitizer_sigset_t old_sigset;
class StopTheWorldScope {
public:
StopTheWorldScope() {
// Make this process dumpable. Processes that are not dumpable cannot be
// attached to.
process_was_dumpable_ = internal_prctl(PR_GET_DUMPABLE, 0, 0, 0, 0);
if (!process_was_dumpable_)
internal_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
}
~StopTheWorldScope() {
// Restore the dumpable flag.
if (!process_was_dumpable_)
internal_prctl(PR_SET_DUMPABLE, 0, 0, 0, 0);
}
private:
int process_was_dumpable_;
};
// When sanitizer output is being redirected to file (i.e. by using log_path),
// the tracer should write to the parent's log instead of trying to open a new
// file. Alert the logging code to the fact that we have a tracer.
struct ScopedSetTracerPID {
explicit ScopedSetTracerPID(uptr tracer_pid) {
stoptheworld_tracer_pid = tracer_pid;
stoptheworld_tracer_ppid = internal_getpid();
}
~ScopedSetTracerPID() {
stoptheworld_tracer_pid = 0;
stoptheworld_tracer_ppid = 0;
}
};
void StopTheWorld(StopTheWorldCallback callback, void *argument) {
StopTheWorldScope in_stoptheworld;
// Prepare the arguments for TracerThread.
struct TracerThreadArgument tracer_thread_argument;
tracer_thread_argument.callback = callback;
tracer_thread_argument.callback_argument = argument;
tracer_thread_argument.parent_pid = internal_getpid();
atomic_store(&tracer_thread_argument.done, 0, memory_order_relaxed);
const uptr kTracerStackSize = 2 * 1024 * 1024;
ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize);
// Block the execution of TracerThread until after we have set ptrace
// permissions.
tracer_thread_argument.mutex.Lock();
// Signal handling story.
// We don't want async signals to be delivered to the tracer thread,
// so we block all async signals before creating the thread. An async signal
// handler can temporary modify errno, which is shared with this thread.
// We ought to use pthread_sigmask here, because sigprocmask has undefined
// behavior in multithreaded programs. However, on linux sigprocmask is
// equivalent to pthread_sigmask with the exception that pthread_sigmask
// does not allow to block some signals used internally in pthread
// implementation. We are fine with blocking them here, we are really not
// going to pthread_cancel the thread.
// The tracer thread should not raise any synchronous signals. But in case it
// does, we setup a special handler for sync signals that properly kills the
// parent as well. Note: we don't pass CLONE_SIGHAND to clone, so handlers
// in the tracer thread won't interfere with user program. Double note: if a
// user does something along the lines of 'kill -11 pid', that can kill the
// process even if user setup own handler for SEGV.
// Thing to watch out for: this code should not change behavior of user code
// in any observable way. In particular it should not override user signal
// handlers.
internal_sigfillset(&blocked_sigset);
for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++)
internal_sigdelset(&blocked_sigset, kSyncSignals[i]);
int rv = internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
CHECK_EQ(rv, 0);
uptr tracer_pid = internal_clone(
TracerThread, tracer_stack.Bottom(),
CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED,
&tracer_thread_argument, nullptr /* parent_tidptr */,
nullptr /* newtls */, nullptr /* child_tidptr */);
internal_sigprocmask(SIG_SETMASK, &old_sigset, 0);
int local_errno = 0;
if (internal_iserror(tracer_pid, &local_errno)) {
VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno);
tracer_thread_argument.mutex.Unlock();
} else {
ScopedSetTracerPID scoped_set_tracer_pid(tracer_pid);
// On some systems we have to explicitly declare that we want to be traced
// by the tracer thread.
internal_prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0);
// Allow the tracer thread to start.
tracer_thread_argument.mutex.Unlock();
// NOTE: errno is shared between this thread and the tracer thread.
// internal_waitpid() may call syscall() which can access/spoil errno,
// so we can't call it now. Instead we for the tracer thread to finish using
// the spin loop below. Man page for sched_yield() says "In the Linux
// implementation, sched_yield() always succeeds", so let's hope it does not
// spoil errno. Note that this spin loop runs only for brief periods before
// the tracer thread has suspended us and when it starts unblocking threads.
while (atomic_load(&tracer_thread_argument.done, memory_order_relaxed) == 0)
sched_yield();
// Now the tracer thread is about to exit and does not touch errno,
// wait for it.
for (;;) {
uptr waitpid_status = internal_waitpid(tracer_pid, nullptr, __WALL);
if (!internal_iserror(waitpid_status, &local_errno))
break;
if (local_errno == EINTR)
continue;
VReport(1, "Waiting on the tracer thread failed (errno %d).\n",
local_errno);
break;
}
}
}
// Platform-specific methods from SuspendedThreadsList.
#if SANITIZER_ANDROID && defined(__arm__)
typedef pt_regs regs_struct;
#define REG_SP ARM_sp
#elif SANITIZER_LINUX && defined(__arm__)
typedef user_regs regs_struct;
#define REG_SP uregs[13]
#elif defined(__i386__) || defined(__x86_64__)
typedef user_regs_struct regs_struct;
#if defined(__i386__)
#define REG_SP esp
#else
#define REG_SP rsp
#endif
#elif defined(__powerpc__) || defined(__powerpc64__)
typedef pt_regs regs_struct;
#define REG_SP gpr[PT_R1]
#elif defined(__mips__)
typedef struct user regs_struct;
# if SANITIZER_ANDROID
# define REG_SP regs[EF_R29]
# else
# define REG_SP regs[EF_REG29]
# endif
#elif defined(__aarch64__)
typedef struct user_pt_regs regs_struct;
#define REG_SP sp
#define ARCH_IOVEC_FOR_GETREGSET
#elif defined(__s390__)
typedef _user_regs_struct regs_struct;
#define REG_SP gprs[15]
#define ARCH_IOVEC_FOR_GETREGSET
#else
#error "Unsupported architecture"
#endif // SANITIZER_ANDROID && defined(__arm__)
tid_t SuspendedThreadsListLinux::GetThreadID(uptr index) const {
CHECK_LT(index, thread_ids_.size());
return thread_ids_[index];
}
uptr SuspendedThreadsListLinux::ThreadCount() const {
return thread_ids_.size();
}
bool SuspendedThreadsListLinux::ContainsTid(tid_t thread_id) const {
for (uptr i = 0; i < thread_ids_.size(); i++) {
if (thread_ids_[i] == thread_id) return true;
}
return false;
}
void SuspendedThreadsListLinux::Append(tid_t tid) {
thread_ids_.push_back(tid);
}
PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
uptr index, uptr *buffer, uptr *sp) const {
pid_t tid = GetThreadID(index);
regs_struct regs;
int pterrno;
#ifdef ARCH_IOVEC_FOR_GETREGSET
struct iovec regset_io;
regset_io.iov_base = &regs;
regset_io.iov_len = sizeof(regs_struct);
bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGSET, tid,
(void*)NT_PRSTATUS, (void*)&regset_io),
&pterrno);
#else
bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, nullptr,
&regs), &pterrno);
#endif
if (isErr) {
VReport(1, "Could not get registers from thread %d (errno %d).\n", tid,
pterrno);
// ESRCH means that the given thread is not suspended or already dead.
// Therefore it's unsafe to inspect its data (e.g. walk through stack) and
// we should notify caller about this.
return pterrno == ESRCH ? REGISTERS_UNAVAILABLE_FATAL
: REGISTERS_UNAVAILABLE;
}
*sp = regs.REG_SP;
internal_memcpy(buffer, &regs, sizeof(regs));
return REGISTERS_AVAILABLE;
}
uptr SuspendedThreadsListLinux::RegisterCount() const {
return sizeof(regs_struct) / sizeof(uptr);
}
} // namespace __sanitizer
#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)
// || defined(__aarch64__) || defined(__powerpc64__)
// || defined(__s390__) || defined(__i386__) || defined(__arm__)

View File

@@ -0,0 +1,364 @@
//===-- sanitizer_stoptheworld_netbsd_libcdep.cpp -------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// See sanitizer_stoptheworld.h for details.
// This implementation was inspired by Markus Gutschke's linuxthreads.cc.
//
// This is a NetBSD variation of Linux stoptheworld implementation
// See sanitizer_stoptheworld_linux_libcdep.cpp for code comments.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_NETBSD
#include "sanitizer_stoptheworld.h"
#include "sanitizer_atomic.h"
#include "sanitizer_platform_limits_posix.h"
#include <sys/types.h>
#include <sys/ptrace.h>
#include <sys/uio.h>
#include <sys/wait.h>
#include <machine/reg.h>
#include <elf.h>
#include <errno.h>
#include <sched.h>
#include <signal.h>
#include <stddef.h>
#define internal_sigaction_norestorer internal_sigaction
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_libc.h"
#include "sanitizer_linux.h"
#include "sanitizer_mutex.h"
#include "sanitizer_placement_new.h"
namespace __sanitizer {
class SuspendedThreadsListNetBSD : public SuspendedThreadsList {
public:
SuspendedThreadsListNetBSD() { thread_ids_.reserve(1024); }
tid_t GetThreadID(uptr index) const;
uptr ThreadCount() const;
bool ContainsTid(tid_t thread_id) const;
void Append(tid_t tid);
PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
uptr *sp) const;
uptr RegisterCount() const;
private:
InternalMmapVector<tid_t> thread_ids_;
};
struct TracerThreadArgument {
StopTheWorldCallback callback;
void *callback_argument;
BlockingMutex mutex;
atomic_uintptr_t done;
uptr parent_pid;
};
class ThreadSuspender {
public:
explicit ThreadSuspender(pid_t pid, TracerThreadArgument *arg)
: arg(arg), pid_(pid) {
CHECK_GE(pid, 0);
}
bool SuspendAllThreads();
void ResumeAllThreads();
void KillAllThreads();
SuspendedThreadsListNetBSD &suspended_threads_list() {
return suspended_threads_list_;
}
TracerThreadArgument *arg;
private:
SuspendedThreadsListNetBSD suspended_threads_list_;
pid_t pid_;
};
void ThreadSuspender::ResumeAllThreads() {
int pterrno;
if (!internal_iserror(internal_ptrace(PT_DETACH, pid_, (void *)(uptr)1, 0),
&pterrno)) {
VReport(2, "Detached from process %d.\n", pid_);
} else {
VReport(1, "Could not detach from process %d (errno %d).\n", pid_, pterrno);
}
}
void ThreadSuspender::KillAllThreads() {
internal_ptrace(PT_KILL, pid_, nullptr, 0);
}
bool ThreadSuspender::SuspendAllThreads() {
int pterrno;
if (internal_iserror(internal_ptrace(PT_ATTACH, pid_, nullptr, 0),
&pterrno)) {
Printf("Could not attach to process %d (errno %d).\n", pid_, pterrno);
return false;
}
int status;
uptr waitpid_status;
HANDLE_EINTR(waitpid_status, internal_waitpid(pid_, &status, 0));
VReport(2, "Attached to process %d.\n", pid_);
#ifdef PT_LWPNEXT
struct ptrace_lwpstatus pl;
int op = PT_LWPNEXT;
#else
struct ptrace_lwpinfo pl;
int op = PT_LWPINFO;
#endif
pl.pl_lwpid = 0;
int val;
while ((val = ptrace(op, pid_, (void *)&pl, sizeof(pl))) != -1 &&
pl.pl_lwpid != 0) {
suspended_threads_list_.Append(pl.pl_lwpid);
VReport(2, "Appended thread %d in process %d.\n", pl.pl_lwpid, pid_);
}
return true;
}
// Pointer to the ThreadSuspender instance for use in signal handler.
static ThreadSuspender *thread_suspender_instance = nullptr;
// Synchronous signals that should not be blocked.
static const int kSyncSignals[] = {SIGABRT, SIGILL, SIGFPE, SIGSEGV,
SIGBUS, SIGXCPU, SIGXFSZ};
static void TracerThreadDieCallback() {
ThreadSuspender *inst = thread_suspender_instance;
if (inst && stoptheworld_tracer_pid == internal_getpid()) {
inst->KillAllThreads();
thread_suspender_instance = nullptr;
}
}
// Signal handler to wake up suspended threads when the tracer thread dies.
static void TracerThreadSignalHandler(int signum, __sanitizer_siginfo *siginfo,
void *uctx) {
SignalContext ctx(siginfo, uctx);
Printf("Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n", signum,
ctx.addr, ctx.pc, ctx.sp);
ThreadSuspender *inst = thread_suspender_instance;
if (inst) {
if (signum == SIGABRT)
inst->KillAllThreads();
else
inst->ResumeAllThreads();
RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
thread_suspender_instance = nullptr;
atomic_store(&inst->arg->done, 1, memory_order_relaxed);
}
internal__exit((signum == SIGABRT) ? 1 : 2);
}
// Size of alternative stack for signal handlers in the tracer thread.
static const int kHandlerStackSize = 8192;
// This function will be run as a cloned task.
static int TracerThread(void *argument) {
TracerThreadArgument *tracer_thread_argument =
(TracerThreadArgument *)argument;
// Check if parent is already dead.
if (internal_getppid() != tracer_thread_argument->parent_pid)
internal__exit(4);
// Wait for the parent thread to finish preparations.
tracer_thread_argument->mutex.Lock();
tracer_thread_argument->mutex.Unlock();
RAW_CHECK(AddDieCallback(TracerThreadDieCallback));
ThreadSuspender thread_suspender(internal_getppid(), tracer_thread_argument);
// Global pointer for the signal handler.
thread_suspender_instance = &thread_suspender;
// Alternate stack for signal handling.
InternalMmapVector<char> handler_stack_memory(kHandlerStackSize);
stack_t handler_stack;
internal_memset(&handler_stack, 0, sizeof(handler_stack));
handler_stack.ss_sp = handler_stack_memory.data();
handler_stack.ss_size = kHandlerStackSize;
internal_sigaltstack(&handler_stack, nullptr);
// Install our handler for synchronous signals. Other signals should be
// blocked by the mask we inherited from the parent thread.
for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) {
__sanitizer_sigaction act;
internal_memset(&act, 0, sizeof(act));
act.sigaction = TracerThreadSignalHandler;
act.sa_flags = SA_ONSTACK | SA_SIGINFO;
internal_sigaction_norestorer(kSyncSignals[i], &act, 0);
}
int exit_code = 0;
if (!thread_suspender.SuspendAllThreads()) {
VReport(1, "Failed suspending threads.\n");
exit_code = 3;
} else {
tracer_thread_argument->callback(thread_suspender.suspended_threads_list(),
tracer_thread_argument->callback_argument);
thread_suspender.ResumeAllThreads();
exit_code = 0;
}
RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
thread_suspender_instance = nullptr;
atomic_store(&tracer_thread_argument->done, 1, memory_order_relaxed);
return exit_code;
}
class ScopedStackSpaceWithGuard {
public:
explicit ScopedStackSpaceWithGuard(uptr stack_size) {
stack_size_ = stack_size;
guard_size_ = GetPageSizeCached();
// FIXME: Omitting MAP_STACK here works in current kernels but might break
// in the future.
guard_start_ =
(uptr)MmapOrDie(stack_size_ + guard_size_, "ScopedStackWithGuard");
CHECK(MprotectNoAccess((uptr)guard_start_, guard_size_));
}
~ScopedStackSpaceWithGuard() {
UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_);
}
void *Bottom() const {
return (void *)(guard_start_ + stack_size_ + guard_size_);
}
private:
uptr stack_size_;
uptr guard_size_;
uptr guard_start_;
};
static __sanitizer_sigset_t blocked_sigset;
static __sanitizer_sigset_t old_sigset;
struct ScopedSetTracerPID {
explicit ScopedSetTracerPID(uptr tracer_pid) {
stoptheworld_tracer_pid = tracer_pid;
stoptheworld_tracer_ppid = internal_getpid();
}
~ScopedSetTracerPID() {
stoptheworld_tracer_pid = 0;
stoptheworld_tracer_ppid = 0;
}
};
void StopTheWorld(StopTheWorldCallback callback, void *argument) {
// Prepare the arguments for TracerThread.
struct TracerThreadArgument tracer_thread_argument;
tracer_thread_argument.callback = callback;
tracer_thread_argument.callback_argument = argument;
tracer_thread_argument.parent_pid = internal_getpid();
atomic_store(&tracer_thread_argument.done, 0, memory_order_relaxed);
const uptr kTracerStackSize = 2 * 1024 * 1024;
ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize);
tracer_thread_argument.mutex.Lock();
internal_sigfillset(&blocked_sigset);
for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++)
internal_sigdelset(&blocked_sigset, kSyncSignals[i]);
int rv = internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
CHECK_EQ(rv, 0);
uptr tracer_pid = internal_clone(TracerThread, tracer_stack.Bottom(),
CLONE_VM | CLONE_FS | CLONE_FILES,
&tracer_thread_argument);
internal_sigprocmask(SIG_SETMASK, &old_sigset, 0);
int local_errno = 0;
if (internal_iserror(tracer_pid, &local_errno)) {
VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno);
tracer_thread_argument.mutex.Unlock();
} else {
ScopedSetTracerPID scoped_set_tracer_pid(tracer_pid);
tracer_thread_argument.mutex.Unlock();
while (atomic_load(&tracer_thread_argument.done, memory_order_relaxed) == 0)
sched_yield();
for (;;) {
uptr waitpid_status = internal_waitpid(tracer_pid, nullptr, __WALL);
if (!internal_iserror(waitpid_status, &local_errno))
break;
if (local_errno == EINTR)
continue;
VReport(1, "Waiting on the tracer thread failed (errno %d).\n",
local_errno);
break;
}
}
}
tid_t SuspendedThreadsListNetBSD::GetThreadID(uptr index) const {
CHECK_LT(index, thread_ids_.size());
return thread_ids_[index];
}
uptr SuspendedThreadsListNetBSD::ThreadCount() const {
return thread_ids_.size();
}
bool SuspendedThreadsListNetBSD::ContainsTid(tid_t thread_id) const {
for (uptr i = 0; i < thread_ids_.size(); i++) {
if (thread_ids_[i] == thread_id)
return true;
}
return false;
}
void SuspendedThreadsListNetBSD::Append(tid_t tid) {
thread_ids_.push_back(tid);
}
PtraceRegistersStatus SuspendedThreadsListNetBSD::GetRegistersAndSP(
uptr index, uptr *buffer, uptr *sp) const {
lwpid_t tid = GetThreadID(index);
pid_t ppid = internal_getppid();
struct reg regs;
int pterrno;
bool isErr =
internal_iserror(internal_ptrace(PT_GETREGS, ppid, &regs, tid), &pterrno);
if (isErr) {
VReport(1,
"Could not get registers from process %d thread %d (errno %d).\n",
ppid, tid, pterrno);
return pterrno == ESRCH ? REGISTERS_UNAVAILABLE_FATAL
: REGISTERS_UNAVAILABLE;
}
*sp = PTRACE_REG_SP(&regs);
internal_memcpy(buffer, &regs, sizeof(regs));
return REGISTERS_AVAILABLE;
}
uptr SuspendedThreadsListNetBSD::RegisterCount() const {
return sizeof(struct reg) / sizeof(uptr);
}
} // namespace __sanitizer
#endif

View File

@@ -0,0 +1,135 @@
//===-- sanitizer_symbolizer.cpp ------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator_internal.h"
#include "sanitizer_platform.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_symbolizer_internal.h"
namespace __sanitizer {
AddressInfo::AddressInfo() {
internal_memset(this, 0, sizeof(AddressInfo));
function_offset = kUnknown;
}
void AddressInfo::Clear() {
InternalFree(module);
InternalFree(function);
InternalFree(file);
internal_memset(this, 0, sizeof(AddressInfo));
function_offset = kUnknown;
}
void AddressInfo::FillModuleInfo(const char *mod_name, uptr mod_offset,
ModuleArch mod_arch) {
module = internal_strdup(mod_name);
module_offset = mod_offset;
module_arch = mod_arch;
}
SymbolizedStack::SymbolizedStack() : next(nullptr), info() {}
SymbolizedStack *SymbolizedStack::New(uptr addr) {
void *mem = InternalAlloc(sizeof(SymbolizedStack));
SymbolizedStack *res = new(mem) SymbolizedStack();
res->info.address = addr;
return res;
}
void SymbolizedStack::ClearAll() {
info.Clear();
if (next)
next->ClearAll();
InternalFree(this);
}
DataInfo::DataInfo() {
internal_memset(this, 0, sizeof(DataInfo));
}
void DataInfo::Clear() {
InternalFree(module);
InternalFree(file);
InternalFree(name);
internal_memset(this, 0, sizeof(DataInfo));
}
void FrameInfo::Clear() {
InternalFree(module);
for (LocalInfo &local : locals) {
InternalFree(local.function_name);
InternalFree(local.name);
InternalFree(local.decl_file);
}
locals.clear();
}
Symbolizer *Symbolizer::symbolizer_;
StaticSpinMutex Symbolizer::init_mu_;
LowLevelAllocator Symbolizer::symbolizer_allocator_;
void Symbolizer::InvalidateModuleList() {
modules_fresh_ = false;
}
void Symbolizer::AddHooks(Symbolizer::StartSymbolizationHook start_hook,
Symbolizer::EndSymbolizationHook end_hook) {
CHECK(start_hook_ == 0 && end_hook_ == 0);
start_hook_ = start_hook;
end_hook_ = end_hook;
}
const char *Symbolizer::ModuleNameOwner::GetOwnedCopy(const char *str) {
mu_->CheckLocked();
// 'str' will be the same string multiple times in a row, optimize this case.
if (last_match_ && !internal_strcmp(last_match_, str))
return last_match_;
// FIXME: this is linear search.
// We should optimize this further if this turns out to be a bottleneck later.
for (uptr i = 0; i < storage_.size(); ++i) {
if (!internal_strcmp(storage_[i], str)) {
last_match_ = storage_[i];
return last_match_;
}
}
last_match_ = internal_strdup(str);
storage_.push_back(last_match_);
return last_match_;
}
Symbolizer::Symbolizer(IntrusiveList<SymbolizerTool> tools)
: module_names_(&mu_), modules_(), modules_fresh_(false), tools_(tools),
start_hook_(0), end_hook_(0) {}
Symbolizer::SymbolizerScope::SymbolizerScope(const Symbolizer *sym)
: sym_(sym) {
if (sym_->start_hook_)
sym_->start_hook_();
}
Symbolizer::SymbolizerScope::~SymbolizerScope() {
if (sym_->end_hook_)
sym_->end_hook_();
}
void Symbolizer::LateInitializeTools() {
for (auto &tool : tools_) {
tool.LateInitialize();
}
}
} // namespace __sanitizer

View File

@@ -0,0 +1,209 @@
//===-- sanitizer_symbolizer_libbacktrace.cpp -----------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
// Libbacktrace implementation of symbolizer parts.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_symbolizer.h"
#include "sanitizer_symbolizer_libbacktrace.h"
#if SANITIZER_LIBBACKTRACE
# include "backtrace-supported.h"
# if SANITIZER_POSIX && BACKTRACE_SUPPORTED && !BACKTRACE_USES_MALLOC
# include "backtrace.h"
# if SANITIZER_CP_DEMANGLE
# undef ARRAY_SIZE
# include "demangle.h"
# endif
# else
# define SANITIZER_LIBBACKTRACE 0
# endif
#endif
namespace __sanitizer {
static char *DemangleAlloc(const char *name, bool always_alloc);
#if SANITIZER_LIBBACKTRACE
namespace {
# if SANITIZER_CP_DEMANGLE
struct CplusV3DemangleData {
char *buf;
uptr size, allocated;
};
extern "C" {
static void CplusV3DemangleCallback(const char *s, size_t l, void *vdata) {
CplusV3DemangleData *data = (CplusV3DemangleData *)vdata;
uptr needed = data->size + l + 1;
if (needed > data->allocated) {
data->allocated *= 2;
if (needed > data->allocated)
data->allocated = needed;
char *buf = (char *)InternalAlloc(data->allocated);
if (data->buf) {
internal_memcpy(buf, data->buf, data->size);
InternalFree(data->buf);
}
data->buf = buf;
}
internal_memcpy(data->buf + data->size, s, l);
data->buf[data->size + l] = '\0';
data->size += l;
}
} // extern "C"
char *CplusV3Demangle(const char *name) {
CplusV3DemangleData data;
data.buf = 0;
data.size = 0;
data.allocated = 0;
if (cplus_demangle_v3_callback(name, DMGL_PARAMS | DMGL_ANSI,
CplusV3DemangleCallback, &data)) {
if (data.size + 64 > data.allocated)
return data.buf;
char *buf = internal_strdup(data.buf);
InternalFree(data.buf);
return buf;
}
if (data.buf)
InternalFree(data.buf);
return 0;
}
# endif // SANITIZER_CP_DEMANGLE
struct SymbolizeCodeCallbackArg {
SymbolizedStack *first;
SymbolizedStack *last;
uptr frames_symbolized;
AddressInfo *get_new_frame(uintptr_t addr) {
CHECK(last);
if (frames_symbolized > 0) {
SymbolizedStack *cur = SymbolizedStack::New(addr);
AddressInfo *info = &cur->info;
info->FillModuleInfo(first->info.module, first->info.module_offset,
first->info.module_arch);
last->next = cur;
last = cur;
}
CHECK_EQ(addr, first->info.address);
CHECK_EQ(addr, last->info.address);
return &last->info;
}
};
extern "C" {
static int SymbolizeCodePCInfoCallback(void *vdata, uintptr_t addr,
const char *filename, int lineno,
const char *function) {
SymbolizeCodeCallbackArg *cdata = (SymbolizeCodeCallbackArg *)vdata;
if (function) {
AddressInfo *info = cdata->get_new_frame(addr);
info->function = DemangleAlloc(function, /*always_alloc*/ true);
if (filename)
info->file = internal_strdup(filename);
info->line = lineno;
cdata->frames_symbolized++;
}
return 0;
}
static void SymbolizeCodeCallback(void *vdata, uintptr_t addr,
const char *symname, uintptr_t, uintptr_t) {
SymbolizeCodeCallbackArg *cdata = (SymbolizeCodeCallbackArg *)vdata;
if (symname) {
AddressInfo *info = cdata->get_new_frame(addr);
info->function = DemangleAlloc(symname, /*always_alloc*/ true);
cdata->frames_symbolized++;
}
}
static void SymbolizeDataCallback(void *vdata, uintptr_t, const char *symname,
uintptr_t symval, uintptr_t symsize) {
DataInfo *info = (DataInfo *)vdata;
if (symname && symval) {
info->name = DemangleAlloc(symname, /*always_alloc*/ true);
info->start = symval;
info->size = symsize;
}
}
static void ErrorCallback(void *, const char *, int) {}
} // extern "C"
} // namespace
LibbacktraceSymbolizer *LibbacktraceSymbolizer::get(LowLevelAllocator *alloc) {
// State created in backtrace_create_state is leaked.
void *state = (void *)(backtrace_create_state("/proc/self/exe", 0,
ErrorCallback, NULL));
if (!state)
return 0;
return new(*alloc) LibbacktraceSymbolizer(state);
}
bool LibbacktraceSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
SymbolizeCodeCallbackArg data;
data.first = stack;
data.last = stack;
data.frames_symbolized = 0;
backtrace_pcinfo((backtrace_state *)state_, addr, SymbolizeCodePCInfoCallback,
ErrorCallback, &data);
if (data.frames_symbolized > 0)
return true;
backtrace_syminfo((backtrace_state *)state_, addr, SymbolizeCodeCallback,
ErrorCallback, &data);
return (data.frames_symbolized > 0);
}
bool LibbacktraceSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
backtrace_syminfo((backtrace_state *)state_, addr, SymbolizeDataCallback,
ErrorCallback, info);
return true;
}
#else // SANITIZER_LIBBACKTRACE
LibbacktraceSymbolizer *LibbacktraceSymbolizer::get(LowLevelAllocator *alloc) {
return 0;
}
bool LibbacktraceSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
(void)state_;
return false;
}
bool LibbacktraceSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
return false;
}
#endif // SANITIZER_LIBBACKTRACE
static char *DemangleAlloc(const char *name, bool always_alloc) {
#if SANITIZER_LIBBACKTRACE && SANITIZER_CP_DEMANGLE
if (char *demangled = CplusV3Demangle(name))
return demangled;
#endif
if (always_alloc)
return internal_strdup(name);
return 0;
}
const char *LibbacktraceSymbolizer::Demangle(const char *name) {
return DemangleAlloc(name, /*always_alloc*/ false);
}
} // namespace __sanitizer

View File

@@ -0,0 +1,554 @@
//===-- sanitizer_symbolizer_libcdep.cpp ----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator_internal.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_symbolizer_internal.h"
namespace __sanitizer {
Symbolizer *Symbolizer::GetOrInit() {
SpinMutexLock l(&init_mu_);
if (symbolizer_)
return symbolizer_;
symbolizer_ = PlatformInit();
CHECK(symbolizer_);
return symbolizer_;
}
// See sanitizer_symbolizer_markup.cpp.
#if !SANITIZER_SYMBOLIZER_MARKUP
const char *ExtractToken(const char *str, const char *delims, char **result) {
uptr prefix_len = internal_strcspn(str, delims);
*result = (char*)InternalAlloc(prefix_len + 1);
internal_memcpy(*result, str, prefix_len);
(*result)[prefix_len] = '\0';
const char *prefix_end = str + prefix_len;
if (*prefix_end != '\0') prefix_end++;
return prefix_end;
}
const char *ExtractInt(const char *str, const char *delims, int *result) {
char *buff = nullptr;
const char *ret = ExtractToken(str, delims, &buff);
if (buff) {
*result = (int)internal_atoll(buff);
}
InternalFree(buff);
return ret;
}
const char *ExtractUptr(const char *str, const char *delims, uptr *result) {
char *buff = nullptr;
const char *ret = ExtractToken(str, delims, &buff);
if (buff) {
*result = (uptr)internal_atoll(buff);
}
InternalFree(buff);
return ret;
}
const char *ExtractSptr(const char *str, const char *delims, sptr *result) {
char *buff = nullptr;
const char *ret = ExtractToken(str, delims, &buff);
if (buff) {
*result = (sptr)internal_atoll(buff);
}
InternalFree(buff);
return ret;
}
const char *ExtractTokenUpToDelimiter(const char *str, const char *delimiter,
char **result) {
const char *found_delimiter = internal_strstr(str, delimiter);
uptr prefix_len =
found_delimiter ? found_delimiter - str : internal_strlen(str);
*result = (char *)InternalAlloc(prefix_len + 1);
internal_memcpy(*result, str, prefix_len);
(*result)[prefix_len] = '\0';
const char *prefix_end = str + prefix_len;
if (*prefix_end != '\0') prefix_end += internal_strlen(delimiter);
return prefix_end;
}
SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
BlockingMutexLock l(&mu_);
const char *module_name = nullptr;
uptr module_offset;
ModuleArch arch;
SymbolizedStack *res = SymbolizedStack::New(addr);
if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset,
&arch))
return res;
// Always fill data about module name and offset.
res->info.FillModuleInfo(module_name, module_offset, arch);
for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
if (tool.SymbolizePC(addr, res)) {
return res;
}
}
return res;
}
bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
BlockingMutexLock l(&mu_);
const char *module_name = nullptr;
uptr module_offset;
ModuleArch arch;
if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset,
&arch))
return false;
info->Clear();
info->module = internal_strdup(module_name);
info->module_offset = module_offset;
info->module_arch = arch;
for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
if (tool.SymbolizeData(addr, info)) {
return true;
}
}
return true;
}
bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) {
BlockingMutexLock l(&mu_);
const char *module_name = nullptr;
if (!FindModuleNameAndOffsetForAddress(
addr, &module_name, &info->module_offset, &info->module_arch))
return false;
info->module = internal_strdup(module_name);
for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
if (tool.SymbolizeFrame(addr, info)) {
return true;
}
}
return true;
}
bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
uptr *module_address) {
BlockingMutexLock l(&mu_);
const char *internal_module_name = nullptr;
ModuleArch arch;
if (!FindModuleNameAndOffsetForAddress(pc, &internal_module_name,
module_address, &arch))
return false;
if (module_name)
*module_name = module_names_.GetOwnedCopy(internal_module_name);
return true;
}
void Symbolizer::Flush() {
BlockingMutexLock l(&mu_);
for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
tool.Flush();
}
}
const char *Symbolizer::Demangle(const char *name) {
BlockingMutexLock l(&mu_);
for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
if (const char *demangled = tool.Demangle(name))
return demangled;
}
return PlatformDemangle(name);
}
bool Symbolizer::FindModuleNameAndOffsetForAddress(uptr address,
const char **module_name,
uptr *module_offset,
ModuleArch *module_arch) {
const LoadedModule *module = FindModuleForAddress(address);
if (!module)
return false;
*module_name = module->full_name();
*module_offset = address - module->base_address();
*module_arch = module->arch();
return true;
}
void Symbolizer::RefreshModules() {
modules_.init();
fallback_modules_.fallbackInit();
RAW_CHECK(modules_.size() > 0);
modules_fresh_ = true;
}
static const LoadedModule *SearchForModule(const ListOfModules &modules,
uptr address) {
for (uptr i = 0; i < modules.size(); i++) {
if (modules[i].containsAddress(address)) {
return &modules[i];
}
}
return nullptr;
}
const LoadedModule *Symbolizer::FindModuleForAddress(uptr address) {
bool modules_were_reloaded = false;
if (!modules_fresh_) {
RefreshModules();
modules_were_reloaded = true;
}
const LoadedModule *module = SearchForModule(modules_, address);
if (module) return module;
// dlopen/dlclose interceptors invalidate the module list, but when
// interception is disabled, we need to retry if the lookup fails in
// case the module list changed.
#if !SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
if (!modules_were_reloaded) {
RefreshModules();
module = SearchForModule(modules_, address);
if (module) return module;
}
#endif
if (fallback_modules_.size()) {
module = SearchForModule(fallback_modules_, address);
}
return module;
}
// For now we assume the following protocol:
// For each request of the form
// <module_name> <module_offset>
// passed to STDIN, external symbolizer prints to STDOUT response:
// <function_name>
// <file_name>:<line_number>:<column_number>
// <function_name>
// <file_name>:<line_number>:<column_number>
// ...
// <empty line>
class LLVMSymbolizerProcess : public SymbolizerProcess {
public:
explicit LLVMSymbolizerProcess(const char *path)
: SymbolizerProcess(path, /*use_posix_spawn=*/SANITIZER_MAC) {}
private:
bool ReachedEndOfOutput(const char *buffer, uptr length) const override {
// Empty line marks the end of llvm-symbolizer output.
return length >= 2 && buffer[length - 1] == '\n' &&
buffer[length - 2] == '\n';
}
// When adding a new architecture, don't forget to also update
// script/asan_symbolize.py and sanitizer_common.h.
void GetArgV(const char *path_to_binary,
const char *(&argv)[kArgVMax]) const override {
#if defined(__x86_64h__)
const char* const kSymbolizerArch = "--default-arch=x86_64h";
#elif defined(__x86_64__)
const char* const kSymbolizerArch = "--default-arch=x86_64";
#elif defined(__i386__)
const char* const kSymbolizerArch = "--default-arch=i386";
#elif defined(__aarch64__)
const char* const kSymbolizerArch = "--default-arch=arm64";
#elif defined(__arm__)
const char* const kSymbolizerArch = "--default-arch=arm";
#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
const char* const kSymbolizerArch = "--default-arch=powerpc64";
#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
const char* const kSymbolizerArch = "--default-arch=powerpc64le";
#elif defined(__s390x__)
const char* const kSymbolizerArch = "--default-arch=s390x";
#elif defined(__s390__)
const char* const kSymbolizerArch = "--default-arch=s390";
#else
const char* const kSymbolizerArch = "--default-arch=unknown";
#endif
const char *const inline_flag = common_flags()->symbolize_inline_frames
? "--inlining=true"
: "--inlining=false";
int i = 0;
argv[i++] = path_to_binary;
argv[i++] = inline_flag;
argv[i++] = kSymbolizerArch;
argv[i++] = nullptr;
}
};
LLVMSymbolizer::LLVMSymbolizer(const char *path, LowLevelAllocator *allocator)
: symbolizer_process_(new(*allocator) LLVMSymbolizerProcess(path)) {}
// Parse a <file>:<line>[:<column>] buffer. The file path may contain colons on
// Windows, so extract tokens from the right hand side first. The column info is
// also optional.
static const char *ParseFileLineInfo(AddressInfo *info, const char *str) {
char *file_line_info = nullptr;
str = ExtractToken(str, "\n", &file_line_info);
CHECK(file_line_info);
if (uptr size = internal_strlen(file_line_info)) {
char *back = file_line_info + size - 1;
for (int i = 0; i < 2; ++i) {
while (back > file_line_info && IsDigit(*back)) --back;
if (*back != ':' || !IsDigit(back[1])) break;
info->column = info->line;
info->line = internal_atoll(back + 1);
// Truncate the string at the colon to keep only filename.
*back = '\0';
--back;
}
ExtractToken(file_line_info, "", &info->file);
}
InternalFree(file_line_info);
return str;
}
// Parses one or more two-line strings in the following format:
// <function_name>
// <file_name>:<line_number>[:<column_number>]
// Used by LLVMSymbolizer, Addr2LinePool and InternalSymbolizer, since all of
// them use the same output format.
void ParseSymbolizePCOutput(const char *str, SymbolizedStack *res) {
bool top_frame = true;
SymbolizedStack *last = res;
while (true) {
char *function_name = nullptr;
str = ExtractToken(str, "\n", &function_name);
CHECK(function_name);
if (function_name[0] == '\0') {
// There are no more frames.
InternalFree(function_name);
break;
}
SymbolizedStack *cur;
if (top_frame) {
cur = res;
top_frame = false;
} else {
cur = SymbolizedStack::New(res->info.address);
cur->info.FillModuleInfo(res->info.module, res->info.module_offset,
res->info.module_arch);
last->next = cur;
last = cur;
}
AddressInfo *info = &cur->info;
info->function = function_name;
str = ParseFileLineInfo(info, str);
// Functions and filenames can be "??", in which case we write 0
// to address info to mark that names are unknown.
if (0 == internal_strcmp(info->function, "??")) {
InternalFree(info->function);
info->function = 0;
}
if (0 == internal_strcmp(info->file, "??")) {
InternalFree(info->file);
info->file = 0;
}
}
}
// Parses a two-line string in the following format:
// <symbol_name>
// <start_address> <size>
// Used by LLVMSymbolizer and InternalSymbolizer.
void ParseSymbolizeDataOutput(const char *str, DataInfo *info) {
str = ExtractToken(str, "\n", &info->name);
str = ExtractUptr(str, " ", &info->start);
str = ExtractUptr(str, "\n", &info->size);
}
static void ParseSymbolizeFrameOutput(const char *str,
InternalMmapVector<LocalInfo> *locals) {
if (internal_strncmp(str, "??", 2) == 0)
return;
while (*str) {
LocalInfo local;
str = ExtractToken(str, "\n", &local.function_name);
str = ExtractToken(str, "\n", &local.name);
AddressInfo addr;
str = ParseFileLineInfo(&addr, str);
local.decl_file = addr.file;
local.decl_line = addr.line;
local.has_frame_offset = internal_strncmp(str, "??", 2) != 0;
str = ExtractSptr(str, " ", &local.frame_offset);
local.has_size = internal_strncmp(str, "??", 2) != 0;
str = ExtractUptr(str, " ", &local.size);
local.has_tag_offset = internal_strncmp(str, "??", 2) != 0;
str = ExtractUptr(str, "\n", &local.tag_offset);
locals->push_back(local);
}
}
bool LLVMSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
AddressInfo *info = &stack->info;
const char *buf = FormatAndSendCommand(
"CODE", info->module, info->module_offset, info->module_arch);
if (!buf)
return false;
ParseSymbolizePCOutput(buf, stack);
return true;
}
bool LLVMSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
const char *buf = FormatAndSendCommand(
"DATA", info->module, info->module_offset, info->module_arch);
if (!buf)
return false;
ParseSymbolizeDataOutput(buf, info);
info->start += (addr - info->module_offset); // Add the base address.
return true;
}
bool LLVMSymbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) {
const char *buf = FormatAndSendCommand(
"FRAME", info->module, info->module_offset, info->module_arch);
if (!buf)
return false;
ParseSymbolizeFrameOutput(buf, &info->locals);
return true;
}
const char *LLVMSymbolizer::FormatAndSendCommand(const char *command_prefix,
const char *module_name,
uptr module_offset,
ModuleArch arch) {
CHECK(module_name);
int size_needed = 0;
if (arch == kModuleArchUnknown)
size_needed = internal_snprintf(buffer_, kBufferSize, "%s \"%s\" 0x%zx\n",
command_prefix, module_name, module_offset);
else
size_needed = internal_snprintf(buffer_, kBufferSize,
"%s \"%s:%s\" 0x%zx\n", command_prefix,
module_name, ModuleArchToString(arch),
module_offset);
if (size_needed >= static_cast<int>(kBufferSize)) {
Report("WARNING: Command buffer too small");
return nullptr;
}
return symbolizer_process_->SendCommand(buffer_);
}
SymbolizerProcess::SymbolizerProcess(const char *path, bool use_posix_spawn)
: path_(path),
input_fd_(kInvalidFd),
output_fd_(kInvalidFd),
times_restarted_(0),
failed_to_start_(false),
reported_invalid_path_(false),
use_posix_spawn_(use_posix_spawn) {
CHECK(path_);
CHECK_NE(path_[0], '\0');
}
static bool IsSameModule(const char* path) {
if (const char* ProcessName = GetProcessName()) {
if (const char* SymbolizerName = StripModuleName(path)) {
return !internal_strcmp(ProcessName, SymbolizerName);
}
}
return false;
}
const char *SymbolizerProcess::SendCommand(const char *command) {
if (failed_to_start_)
return nullptr;
if (IsSameModule(path_)) {
Report("WARNING: Symbolizer was blocked from starting itself!\n");
failed_to_start_ = true;
return nullptr;
}
for (; times_restarted_ < kMaxTimesRestarted; times_restarted_++) {
// Start or restart symbolizer if we failed to send command to it.
if (const char *res = SendCommandImpl(command))
return res;
Restart();
}
if (!failed_to_start_) {
Report("WARNING: Failed to use and restart external symbolizer!\n");
failed_to_start_ = true;
}
return nullptr;
}
const char *SymbolizerProcess::SendCommandImpl(const char *command) {
if (input_fd_ == kInvalidFd || output_fd_ == kInvalidFd)
return nullptr;
if (!WriteToSymbolizer(command, internal_strlen(command)))
return nullptr;
if (!ReadFromSymbolizer(buffer_, kBufferSize))
return nullptr;
return buffer_;
}
bool SymbolizerProcess::Restart() {
if (input_fd_ != kInvalidFd)
CloseFile(input_fd_);
if (output_fd_ != kInvalidFd)
CloseFile(output_fd_);
return StartSymbolizerSubprocess();
}
bool SymbolizerProcess::ReadFromSymbolizer(char *buffer, uptr max_length) {
if (max_length == 0)
return true;
uptr read_len = 0;
while (true) {
uptr just_read = 0;
bool success = ReadFromFile(input_fd_, buffer + read_len,
max_length - read_len - 1, &just_read);
// We can't read 0 bytes, as we don't expect external symbolizer to close
// its stdout.
if (!success || just_read == 0) {
Report("WARNING: Can't read from symbolizer at fd %d\n", input_fd_);
return false;
}
read_len += just_read;
if (ReachedEndOfOutput(buffer, read_len))
break;
if (read_len + 1 == max_length) {
Report("WARNING: Symbolizer buffer too small\n");
read_len = 0;
break;
}
}
buffer[read_len] = '\0';
return true;
}
bool SymbolizerProcess::WriteToSymbolizer(const char *buffer, uptr length) {
if (length == 0)
return true;
uptr write_len = 0;
bool success = WriteToFile(output_fd_, buffer, length, &write_len);
if (!success || write_len != length) {
Report("WARNING: Can't write to symbolizer at fd %d\n", output_fd_);
return false;
}
return true;
}
#endif // !SANITIZER_SYMBOLIZER_MARKUP
} // namespace __sanitizer

View File

@@ -0,0 +1,249 @@
//===-- sanitizer_symbolizer_mac.cpp --------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is shared between various sanitizers' runtime libraries.
//
// Implementation of Mac-specific "atos" symbolizer.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_MAC
#include "sanitizer_allocator_internal.h"
#include "sanitizer_mac.h"
#include "sanitizer_symbolizer_mac.h"
#include <dlfcn.h>
#include <errno.h>
#include <mach/mach.h>
#include <stdlib.h>
#include <sys/wait.h>
#include <unistd.h>
#include <util.h>
namespace __sanitizer {
bool DlAddrSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
Dl_info info;
int result = dladdr((const void *)addr, &info);
if (!result) return false;
CHECK(addr >= reinterpret_cast<uptr>(info.dli_saddr));
stack->info.function_offset = addr - reinterpret_cast<uptr>(info.dli_saddr);
const char *demangled = DemangleSwiftAndCXX(info.dli_sname);
if (!demangled) return false;
stack->info.function = internal_strdup(demangled);
return true;
}
bool DlAddrSymbolizer::SymbolizeData(uptr addr, DataInfo *datainfo) {
Dl_info info;
int result = dladdr((const void *)addr, &info);
if (!result) return false;
const char *demangled = DemangleSwiftAndCXX(info.dli_sname);
datainfo->name = internal_strdup(demangled);
datainfo->start = (uptr)info.dli_saddr;
return true;
}
#define K_ATOS_ENV_VAR "__check_mach_ports_lookup"
// This cannot live in `AtosSymbolizerProcess` because instances of that object
// are allocated by the internal allocator which under ASan is poisoned with
// kAsanInternalHeapMagic.
static char kAtosMachPortEnvEntry[] = K_ATOS_ENV_VAR "=000000000000000";
class AtosSymbolizerProcess : public SymbolizerProcess {
public:
explicit AtosSymbolizerProcess(const char *path)
: SymbolizerProcess(path, /*use_posix_spawn*/ true) {
pid_str_[0] = '\0';
}
void LateInitialize() {
if (SANITIZER_IOSSIM) {
// `putenv()` may call malloc/realloc so it is only safe to do this
// during LateInitialize() or later (i.e. we can't do this in the
// constructor). We also can't do this in `StartSymbolizerSubprocess()`
// because in TSan we switch allocators when we're symbolizing.
// We use `putenv()` rather than `setenv()` so that we can later directly
// write into the storage without LibC getting involved to change what the
// variable is set to
int result = putenv(kAtosMachPortEnvEntry);
CHECK_EQ(result, 0);
}
}
private:
bool StartSymbolizerSubprocess() override {
// Configure sandbox before starting atos process.
// Put the string command line argument in the object so that it outlives
// the call to GetArgV.
internal_snprintf(pid_str_, sizeof(pid_str_), "%d", internal_getpid());
if (SANITIZER_IOSSIM) {
// `atos` in the simulator is restricted in its ability to retrieve the
// task port for the target process (us) so we need to do extra work
// to pass our task port to it.
mach_port_t ports[]{mach_task_self()};
kern_return_t ret =
mach_ports_register(mach_task_self(), ports, /*count=*/1);
CHECK_EQ(ret, KERN_SUCCESS);
// Set environment variable that signals to `atos` that it should look
// for our task port. We can't call `setenv()` here because it might call
// malloc/realloc. To avoid that we instead update the
// `mach_port_env_var_entry_` variable with our current PID.
uptr count = internal_snprintf(kAtosMachPortEnvEntry,
sizeof(kAtosMachPortEnvEntry),
K_ATOS_ENV_VAR "=%s", pid_str_);
CHECK_GE(count, sizeof(K_ATOS_ENV_VAR) + internal_strlen(pid_str_));
// Document our assumption but without calling `getenv()` in normal
// builds.
DCHECK(getenv(K_ATOS_ENV_VAR));
DCHECK_EQ(internal_strcmp(getenv(K_ATOS_ENV_VAR), pid_str_), 0);
}
return SymbolizerProcess::StartSymbolizerSubprocess();
}
bool ReachedEndOfOutput(const char *buffer, uptr length) const override {
return (length >= 1 && buffer[length - 1] == '\n');
}
void GetArgV(const char *path_to_binary,
const char *(&argv)[kArgVMax]) const override {
int i = 0;
argv[i++] = path_to_binary;
argv[i++] = "-p";
argv[i++] = &pid_str_[0];
if (GetMacosAlignedVersion() == MacosVersion(10, 9)) {
// On Mavericks atos prints a deprecation warning which we suppress by
// passing -d. The warning isn't present on other OSX versions, even the
// newer ones.
argv[i++] = "-d";
}
argv[i++] = nullptr;
}
char pid_str_[16];
// Space for `\0` in `K_ATOS_ENV_VAR` is reused for `=`.
static_assert(sizeof(kAtosMachPortEnvEntry) ==
(sizeof(K_ATOS_ENV_VAR) + sizeof(pid_str_)),
"sizes should match");
};
#undef K_ATOS_ENV_VAR
static bool ParseCommandOutput(const char *str, uptr addr, char **out_name,
char **out_module, char **out_file, uptr *line,
uptr *start_address) {
// Trim ending newlines.
char *trim;
ExtractTokenUpToDelimiter(str, "\n", &trim);
// The line from `atos` is in one of these formats:
// myfunction (in library.dylib) (sourcefile.c:17)
// myfunction (in library.dylib) + 0x1fe
// myfunction (in library.dylib) + 15
// 0xdeadbeef (in library.dylib) + 0x1fe
// 0xdeadbeef (in library.dylib) + 15
// 0xdeadbeef (in library.dylib)
// 0xdeadbeef
const char *rest = trim;
char *symbol_name;
rest = ExtractTokenUpToDelimiter(rest, " (in ", &symbol_name);
if (rest[0] == '\0') {
InternalFree(symbol_name);
InternalFree(trim);
return false;
}
if (internal_strncmp(symbol_name, "0x", 2) != 0)
*out_name = symbol_name;
else
InternalFree(symbol_name);
rest = ExtractTokenUpToDelimiter(rest, ") ", out_module);
if (rest[0] == '(') {
if (out_file) {
rest++;
rest = ExtractTokenUpToDelimiter(rest, ":", out_file);
char *extracted_line_number;
rest = ExtractTokenUpToDelimiter(rest, ")", &extracted_line_number);
if (line) *line = (uptr)internal_atoll(extracted_line_number);
InternalFree(extracted_line_number);
}
} else if (rest[0] == '+') {
rest += 2;
uptr offset = internal_atoll(rest);
if (start_address) *start_address = addr - offset;
}
InternalFree(trim);
return true;
}
AtosSymbolizer::AtosSymbolizer(const char *path, LowLevelAllocator *allocator)
: process_(new (*allocator) AtosSymbolizerProcess(path)) {}
bool AtosSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
if (!process_) return false;
if (addr == 0) return false;
char command[32];
internal_snprintf(command, sizeof(command), "0x%zx\n", addr);
const char *buf = process_->SendCommand(command);
if (!buf) return false;
uptr line;
uptr start_address = AddressInfo::kUnknown;
if (!ParseCommandOutput(buf, addr, &stack->info.function, &stack->info.module,
&stack->info.file, &line, &start_address)) {
process_ = nullptr;
return false;
}
stack->info.line = (int)line;
if (start_address == AddressInfo::kUnknown) {
// Fallback to dladdr() to get function start address if atos doesn't report
// it.
Dl_info info;
int result = dladdr((const void *)addr, &info);
if (result)
start_address = reinterpret_cast<uptr>(info.dli_saddr);
}
// Only assig to `function_offset` if we were able to get the function's
// start address.
if (start_address != AddressInfo::kUnknown) {
CHECK(addr >= start_address);
stack->info.function_offset = addr - start_address;
}
return true;
}
bool AtosSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
if (!process_) return false;
char command[32];
internal_snprintf(command, sizeof(command), "0x%zx\n", addr);
const char *buf = process_->SendCommand(command);
if (!buf) return false;
if (!ParseCommandOutput(buf, addr, &info->name, &info->module, nullptr,
nullptr, &info->start)) {
process_ = nullptr;
return false;
}
return true;
}
void AtosSymbolizer::LateInitialize() { process_->LateInitialize(); }
} // namespace __sanitizer
#endif // SANITIZER_MAC

View File

@@ -0,0 +1,146 @@
//===-- sanitizer_symbolizer_markup.cpp -----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is shared between various sanitizers' runtime libraries.
//
// Implementation of offline markup symbolizer.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_SYMBOLIZER_MARKUP
#if SANITIZER_FUCHSIA
#include "sanitizer_symbolizer_fuchsia.h"
#elif SANITIZER_RTEMS
#include "sanitizer_symbolizer_rtems.h"
#endif
#include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h"
#include <limits.h>
#include <unwind.h>
namespace __sanitizer {
// This generic support for offline symbolizing is based on the
// Fuchsia port. We don't do any actual symbolization per se.
// Instead, we emit text containing raw addresses and raw linkage
// symbol names, embedded in Fuchsia's symbolization markup format.
// Fuchsia's logging infrastructure emits enough information about
// process memory layout that a post-processing filter can do the
// symbolization and pretty-print the markup. See the spec at:
// https://fuchsia.googlesource.com/zircon/+/master/docs/symbolizer_markup.md
// This is used by UBSan for type names, and by ASan for global variable names.
// It's expected to return a static buffer that will be reused on each call.
const char *Symbolizer::Demangle(const char *name) {
static char buffer[kFormatDemangleMax];
internal_snprintf(buffer, sizeof(buffer), kFormatDemangle, name);
return buffer;
}
// This is used mostly for suppression matching. Making it work
// would enable "interceptor_via_lib" suppressions. It's also used
// once in UBSan to say "in module ..." in a message that also
// includes an address in the module, so post-processing can already
// pretty-print that so as to indicate the module.
bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
uptr *module_address) {
return false;
}
// This is used in some places for suppression checking, which we
// don't really support for Fuchsia. It's also used in UBSan to
// identify a PC location to a function name, so we always fill in
// the function member with a string containing markup around the PC
// value.
// TODO(mcgrathr): Under SANITIZER_GO, it's currently used by TSan
// to render stack frames, but that should be changed to use
// RenderStackFrame.
SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
SymbolizedStack *s = SymbolizedStack::New(addr);
char buffer[kFormatFunctionMax];
internal_snprintf(buffer, sizeof(buffer), kFormatFunction, addr);
s->info.function = internal_strdup(buffer);
return s;
}
// Always claim we succeeded, so that RenderDataInfo will be called.
bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
info->Clear();
info->start = addr;
return true;
}
// We ignore the format argument to __sanitizer_symbolize_global.
void RenderData(InternalScopedString *buffer, const char *format,
const DataInfo *DI, const char *strip_path_prefix) {
buffer->append(kFormatData, DI->start);
}
// We don't support the stack_trace_format flag at all.
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
const AddressInfo &info, bool vs_style,
const char *strip_path_prefix, const char *strip_func_prefix) {
buffer->append(kFormatFrame, frame_no, info.address);
}
Symbolizer *Symbolizer::PlatformInit() {
return new (symbolizer_allocator_) Symbolizer({});
}
void Symbolizer::LateInitialize() {
Symbolizer::GetOrInit()->LateInitializeTools();
}
void StartReportDeadlySignal() {}
void ReportDeadlySignal(const SignalContext &sig, u32 tid,
UnwindSignalStackCallbackType unwind,
const void *unwind_context) {}
#if SANITIZER_CAN_SLOW_UNWIND
struct UnwindTraceArg {
BufferedStackTrace *stack;
u32 max_depth;
};
_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
UnwindTraceArg *arg = static_cast<UnwindTraceArg *>(param);
CHECK_LT(arg->stack->size, arg->max_depth);
uptr pc = _Unwind_GetIP(ctx);
if (pc < PAGE_SIZE) return _URC_NORMAL_STOP;
arg->stack->trace_buffer[arg->stack->size++] = pc;
return (arg->stack->size == arg->max_depth ? _URC_NORMAL_STOP
: _URC_NO_REASON);
}
void BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {
CHECK_GE(max_depth, 2);
size = 0;
UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
_Unwind_Backtrace(Unwind_Trace, &arg);
CHECK_GT(size, 0);
// We need to pop a few frames so that pc is on top.
uptr to_pop = LocatePcInTrace(pc);
// trace_buffer[0] belongs to the current function so we always pop it,
// unless there is only 1 frame in the stack trace (1 frame is always better
// than 0!).
PopStackFrames(Min(to_pop, static_cast<uptr>(1)));
trace_buffer[0] = pc;
}
void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
CHECK(context);
CHECK_GE(max_depth, 2);
UNREACHABLE("signal context doesn't exist");
}
#endif // SANITIZER_CAN_SLOW_UNWIND
} // namespace __sanitizer
#endif // SANITIZER_SYMBOLIZER_MARKUP

View File

@@ -0,0 +1,492 @@
//===-- sanitizer_symbolizer_posix_libcdep.cpp ----------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
// POSIX-specific implementation of symbolizer parts.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_POSIX
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
#include "sanitizer_file.h"
#include "sanitizer_flags.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_linux.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_posix.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_symbolizer_internal.h"
#include "sanitizer_symbolizer_libbacktrace.h"
#include "sanitizer_symbolizer_mac.h"
#include <dlfcn.h> // for dlsym()
#include <errno.h>
#include <stdint.h>
#include <stdlib.h>
#include <sys/wait.h>
#include <unistd.h>
// C++ demangling function, as required by Itanium C++ ABI. This is weak,
// because we do not require a C++ ABI library to be linked to a program
// using sanitizers; if it's not present, we'll just use the mangled name.
namespace __cxxabiv1 {
extern "C" SANITIZER_WEAK_ATTRIBUTE
char *__cxa_demangle(const char *mangled, char *buffer,
size_t *length, int *status);
}
namespace __sanitizer {
// Attempts to demangle the name via __cxa_demangle from __cxxabiv1.
const char *DemangleCXXABI(const char *name) {
// FIXME: __cxa_demangle aggressively insists on allocating memory.
// There's not much we can do about that, short of providing our
// own demangler (libc++abi's implementation could be adapted so that
// it does not allocate). For now, we just call it anyway, and we leak
// the returned value.
if (&__cxxabiv1::__cxa_demangle)
if (const char *demangled_name =
__cxxabiv1::__cxa_demangle(name, 0, 0, 0))
return demangled_name;
return name;
}
// As of now, there are no headers for the Swift runtime. Once they are
// present, we will weakly link since we do not require Swift runtime to be
// linked.
typedef char *(*swift_demangle_ft)(const char *mangledName,
size_t mangledNameLength, char *outputBuffer,
size_t *outputBufferSize, uint32_t flags);
static swift_demangle_ft swift_demangle_f;
// This must not happen lazily at symbolication time, because dlsym uses
// malloc and thread-local storage, which is not a good thing to do during
// symbolication.
static void InitializeSwiftDemangler() {
swift_demangle_f = (swift_demangle_ft)dlsym(RTLD_DEFAULT, "swift_demangle");
(void)dlerror(); // Cleanup error message in case of failure
}
// Attempts to demangle a Swift name. The demangler will return nullptr if a
// non-Swift name is passed in.
const char *DemangleSwift(const char *name) {
if (swift_demangle_f)
return swift_demangle_f(name, internal_strlen(name), 0, 0, 0);
return nullptr;
}
const char *DemangleSwiftAndCXX(const char *name) {
if (!name) return nullptr;
if (const char *swift_demangled_name = DemangleSwift(name))
return swift_demangled_name;
return DemangleCXXABI(name);
}
static bool CreateTwoHighNumberedPipes(int *infd_, int *outfd_) {
int *infd = NULL;
int *outfd = NULL;
// The client program may close its stdin and/or stdout and/or stderr
// thus allowing socketpair to reuse file descriptors 0, 1 or 2.
// In this case the communication between the forked processes may be
// broken if either the parent or the child tries to close or duplicate
// these descriptors. The loop below produces two pairs of file
// descriptors, each greater than 2 (stderr).
int sock_pair[5][2];
for (int i = 0; i < 5; i++) {
if (pipe(sock_pair[i]) == -1) {
for (int j = 0; j < i; j++) {
internal_close(sock_pair[j][0]);
internal_close(sock_pair[j][1]);
}
return false;
} else if (sock_pair[i][0] > 2 && sock_pair[i][1] > 2) {
if (infd == NULL) {
infd = sock_pair[i];
} else {
outfd = sock_pair[i];
for (int j = 0; j < i; j++) {
if (sock_pair[j] == infd) continue;
internal_close(sock_pair[j][0]);
internal_close(sock_pair[j][1]);
}
break;
}
}
}
CHECK(infd);
CHECK(outfd);
infd_[0] = infd[0];
infd_[1] = infd[1];
outfd_[0] = outfd[0];
outfd_[1] = outfd[1];
return true;
}
bool SymbolizerProcess::StartSymbolizerSubprocess() {
if (!FileExists(path_)) {
if (!reported_invalid_path_) {
Report("WARNING: invalid path to external symbolizer!\n");
reported_invalid_path_ = true;
}
return false;
}
const char *argv[kArgVMax];
GetArgV(path_, argv);
pid_t pid;
// Report how symbolizer is being launched for debugging purposes.
if (Verbosity() >= 3) {
// Only use `Report` for first line so subsequent prints don't get prefixed
// with current PID.
Report("Launching Symbolizer process: ");
for (unsigned index = 0; index < kArgVMax && argv[index]; ++index)
Printf("%s ", argv[index]);
Printf("\n");
}
if (use_posix_spawn_) {
#if SANITIZER_MAC
fd_t fd = internal_spawn(argv, const_cast<const char **>(GetEnvP()), &pid);
if (fd == kInvalidFd) {
Report("WARNING: failed to spawn external symbolizer (errno: %d)\n",
errno);
return false;
}
input_fd_ = fd;
output_fd_ = fd;
#else // SANITIZER_MAC
UNIMPLEMENTED();
#endif // SANITIZER_MAC
} else {
fd_t infd[2] = {}, outfd[2] = {};
if (!CreateTwoHighNumberedPipes(infd, outfd)) {
Report("WARNING: Can't create a socket pair to start "
"external symbolizer (errno: %d)\n", errno);
return false;
}
pid = StartSubprocess(path_, argv, GetEnvP(), /* stdin */ outfd[0],
/* stdout */ infd[1]);
if (pid < 0) {
internal_close(infd[0]);
internal_close(outfd[1]);
return false;
}
input_fd_ = infd[0];
output_fd_ = outfd[1];
}
CHECK_GT(pid, 0);
// Check that symbolizer subprocess started successfully.
SleepForMillis(kSymbolizerStartupTimeMillis);
if (!IsProcessRunning(pid)) {
// Either waitpid failed, or child has already exited.
Report("WARNING: external symbolizer didn't start up correctly!\n");
return false;
}
return true;
}
class Addr2LineProcess : public SymbolizerProcess {
public:
Addr2LineProcess(const char *path, const char *module_name)
: SymbolizerProcess(path), module_name_(internal_strdup(module_name)) {}
const char *module_name() const { return module_name_; }
private:
void GetArgV(const char *path_to_binary,
const char *(&argv)[kArgVMax]) const override {
int i = 0;
argv[i++] = path_to_binary;
argv[i++] = "-iCfe";
argv[i++] = module_name_;
argv[i++] = nullptr;
}
bool ReachedEndOfOutput(const char *buffer, uptr length) const override;
bool ReadFromSymbolizer(char *buffer, uptr max_length) override {
if (!SymbolizerProcess::ReadFromSymbolizer(buffer, max_length))
return false;
// The returned buffer is empty when output is valid, but exceeds
// max_length.
if (*buffer == '\0')
return true;
// We should cut out output_terminator_ at the end of given buffer,
// appended by addr2line to mark the end of its meaningful output.
// We cannot scan buffer from it's beginning, because it is legal for it
// to start with output_terminator_ in case given offset is invalid. So,
// scanning from second character.
char *garbage = internal_strstr(buffer + 1, output_terminator_);
// This should never be NULL since buffer must end up with
// output_terminator_.
CHECK(garbage);
// Trim the buffer.
garbage[0] = '\0';
return true;
}
const char *module_name_; // Owned, leaked.
static const char output_terminator_[];
};
const char Addr2LineProcess::output_terminator_[] = "??\n??:0\n";
bool Addr2LineProcess::ReachedEndOfOutput(const char *buffer,
uptr length) const {
const size_t kTerminatorLen = sizeof(output_terminator_) - 1;
// Skip, if we read just kTerminatorLen bytes, because Addr2Line output
// should consist at least of two pairs of lines:
// 1. First one, corresponding to given offset to be symbolized
// (may be equal to output_terminator_, if offset is not valid).
// 2. Second one for output_terminator_, itself to mark the end of output.
if (length <= kTerminatorLen) return false;
// Addr2Line output should end up with output_terminator_.
return !internal_memcmp(buffer + length - kTerminatorLen,
output_terminator_, kTerminatorLen);
}
class Addr2LinePool : public SymbolizerTool {
public:
explicit Addr2LinePool(const char *addr2line_path,
LowLevelAllocator *allocator)
: addr2line_path_(addr2line_path), allocator_(allocator) {
addr2line_pool_.reserve(16);
}
bool SymbolizePC(uptr addr, SymbolizedStack *stack) override {
if (const char *buf =
SendCommand(stack->info.module, stack->info.module_offset)) {
ParseSymbolizePCOutput(buf, stack);
return true;
}
return false;
}
bool SymbolizeData(uptr addr, DataInfo *info) override {
return false;
}
private:
const char *SendCommand(const char *module_name, uptr module_offset) {
Addr2LineProcess *addr2line = 0;
for (uptr i = 0; i < addr2line_pool_.size(); ++i) {
if (0 ==
internal_strcmp(module_name, addr2line_pool_[i]->module_name())) {
addr2line = addr2line_pool_[i];
break;
}
}
if (!addr2line) {
addr2line =
new(*allocator_) Addr2LineProcess(addr2line_path_, module_name);
addr2line_pool_.push_back(addr2line);
}
CHECK_EQ(0, internal_strcmp(module_name, addr2line->module_name()));
char buffer[kBufferSize];
internal_snprintf(buffer, kBufferSize, "0x%zx\n0x%zx\n",
module_offset, dummy_address_);
return addr2line->SendCommand(buffer);
}
static const uptr kBufferSize = 64;
const char *addr2line_path_;
LowLevelAllocator *allocator_;
InternalMmapVector<Addr2LineProcess*> addr2line_pool_;
static const uptr dummy_address_ =
FIRST_32_SECOND_64(UINT32_MAX, UINT64_MAX);
};
#if SANITIZER_SUPPORTS_WEAK_HOOKS
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
__sanitizer_symbolize_code(const char *ModuleName, u64 ModuleOffset,
char *Buffer, int MaxLength,
bool SymbolizeInlineFrames);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
bool __sanitizer_symbolize_data(const char *ModuleName, u64 ModuleOffset,
char *Buffer, int MaxLength);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_symbolize_flush();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
int __sanitizer_symbolize_demangle(const char *Name, char *Buffer,
int MaxLength);
} // extern "C"
class InternalSymbolizer : public SymbolizerTool {
public:
static InternalSymbolizer *get(LowLevelAllocator *alloc) {
if (__sanitizer_symbolize_code != 0 &&
__sanitizer_symbolize_data != 0) {
return new(*alloc) InternalSymbolizer();
}
return 0;
}
bool SymbolizePC(uptr addr, SymbolizedStack *stack) override {
bool result = __sanitizer_symbolize_code(
stack->info.module, stack->info.module_offset, buffer_, kBufferSize,
common_flags()->symbolize_inline_frames);
if (result) ParseSymbolizePCOutput(buffer_, stack);
return result;
}
bool SymbolizeData(uptr addr, DataInfo *info) override {
bool result = __sanitizer_symbolize_data(info->module, info->module_offset,
buffer_, kBufferSize);
if (result) {
ParseSymbolizeDataOutput(buffer_, info);
info->start += (addr - info->module_offset); // Add the base address.
}
return result;
}
void Flush() override {
if (__sanitizer_symbolize_flush)
__sanitizer_symbolize_flush();
}
const char *Demangle(const char *name) override {
if (__sanitizer_symbolize_demangle) {
for (uptr res_length = 1024;
res_length <= InternalSizeClassMap::kMaxSize;) {
char *res_buff = static_cast<char*>(InternalAlloc(res_length));
uptr req_length =
__sanitizer_symbolize_demangle(name, res_buff, res_length);
if (req_length > res_length) {
res_length = req_length + 1;
InternalFree(res_buff);
continue;
}
return res_buff;
}
}
return name;
}
private:
InternalSymbolizer() { }
static const int kBufferSize = 16 * 1024;
char buffer_[kBufferSize];
};
#else // SANITIZER_SUPPORTS_WEAK_HOOKS
class InternalSymbolizer : public SymbolizerTool {
public:
static InternalSymbolizer *get(LowLevelAllocator *alloc) { return 0; }
};
#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
const char *Symbolizer::PlatformDemangle(const char *name) {
return DemangleSwiftAndCXX(name);
}
static SymbolizerTool *ChooseExternalSymbolizer(LowLevelAllocator *allocator) {
const char *path = common_flags()->external_symbolizer_path;
const char *binary_name = path ? StripModuleName(path) : "";
if (path && path[0] == '\0') {
VReport(2, "External symbolizer is explicitly disabled.\n");
return nullptr;
} else if (!internal_strcmp(binary_name, "llvm-symbolizer")) {
VReport(2, "Using llvm-symbolizer at user-specified path: %s\n", path);
return new(*allocator) LLVMSymbolizer(path, allocator);
} else if (!internal_strcmp(binary_name, "atos")) {
#if SANITIZER_MAC
VReport(2, "Using atos at user-specified path: %s\n", path);
return new(*allocator) AtosSymbolizer(path, allocator);
#else // SANITIZER_MAC
Report("ERROR: Using `atos` is only supported on Darwin.\n");
Die();
#endif // SANITIZER_MAC
} else if (!internal_strcmp(binary_name, "addr2line")) {
VReport(2, "Using addr2line at user-specified path: %s\n", path);
return new(*allocator) Addr2LinePool(path, allocator);
} else if (path) {
Report("ERROR: External symbolizer path is set to '%s' which isn't "
"a known symbolizer. Please set the path to the llvm-symbolizer "
"binary or other known tool.\n", path);
Die();
}
// Otherwise symbolizer program is unknown, let's search $PATH
CHECK(path == nullptr);
#if SANITIZER_MAC
if (const char *found_path = FindPathToBinary("atos")) {
VReport(2, "Using atos found at: %s\n", found_path);
return new(*allocator) AtosSymbolizer(found_path, allocator);
}
#endif // SANITIZER_MAC
if (const char *found_path = FindPathToBinary("llvm-symbolizer")) {
VReport(2, "Using llvm-symbolizer found at: %s\n", found_path);
return new(*allocator) LLVMSymbolizer(found_path, allocator);
}
if (common_flags()->allow_addr2line) {
if (const char *found_path = FindPathToBinary("addr2line")) {
VReport(2, "Using addr2line found at: %s\n", found_path);
return new(*allocator) Addr2LinePool(found_path, allocator);
}
}
return nullptr;
}
static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
LowLevelAllocator *allocator) {
if (!common_flags()->symbolize) {
VReport(2, "Symbolizer is disabled.\n");
return;
}
if (IsAllocatorOutOfMemory()) {
VReport(2, "Cannot use internal symbolizer: out of memory\n");
} else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
VReport(2, "Using internal symbolizer.\n");
list->push_back(tool);
return;
}
if (SymbolizerTool *tool = LibbacktraceSymbolizer::get(allocator)) {
VReport(2, "Using libbacktrace symbolizer.\n");
list->push_back(tool);
return;
}
if (SymbolizerTool *tool = ChooseExternalSymbolizer(allocator)) {
list->push_back(tool);
}
#if SANITIZER_MAC
VReport(2, "Using dladdr symbolizer.\n");
list->push_back(new(*allocator) DlAddrSymbolizer());
#endif // SANITIZER_MAC
}
Symbolizer *Symbolizer::PlatformInit() {
IntrusiveList<SymbolizerTool> list;
list.clear();
ChooseSymbolizerTools(&list, &symbolizer_allocator_);
return new(symbolizer_allocator_) Symbolizer(list);
}
void Symbolizer::LateInitialize() {
Symbolizer::GetOrInit()->LateInitializeTools();
InitializeSwiftDemangler();
}
} // namespace __sanitizer
#endif // SANITIZER_POSIX

View File

@@ -0,0 +1,293 @@
//===-- sanitizer_symbolizer_report.cpp -----------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// This file is shared between AddressSanitizer and other sanitizer run-time
/// libraries and implements symbolized reports related functions.
///
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
#include "sanitizer_file.h"
#include "sanitizer_flags.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_report_decorator.h"
#include "sanitizer_stacktrace.h"
#include "sanitizer_stacktrace_printer.h"
#include "sanitizer_symbolizer.h"
#if SANITIZER_POSIX
# include "sanitizer_posix.h"
# include <sys/mman.h>
#endif
namespace __sanitizer {
#if !SANITIZER_GO
void ReportErrorSummary(const char *error_type, const AddressInfo &info,
const char *alt_tool_name) {
if (!common_flags()->print_summary) return;
InternalScopedString buff(kMaxSummaryLength);
buff.append("%s ", error_type);
RenderFrame(&buff, "%L %F", 0, info, common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
ReportErrorSummary(buff.data(), alt_tool_name);
}
#endif
#if !SANITIZER_FUCHSIA
bool ReportFile::SupportsColors() {
SpinMutexLock l(mu);
ReopenIfNecessary();
return SupportsColoredOutput(fd);
}
static INLINE bool ReportSupportsColors() {
return report_file.SupportsColors();
}
#else // SANITIZER_FUCHSIA
// Fuchsia's logs always go through post-processing that handles colorization.
static INLINE bool ReportSupportsColors() { return true; }
#endif // !SANITIZER_FUCHSIA
bool ColorizeReports() {
// FIXME: Add proper Windows support to AnsiColorDecorator and re-enable color
// printing on Windows.
if (SANITIZER_WINDOWS)
return false;
const char *flag = common_flags()->color;
return internal_strcmp(flag, "always") == 0 ||
(internal_strcmp(flag, "auto") == 0 && ReportSupportsColors());
}
void ReportErrorSummary(const char *error_type, const StackTrace *stack,
const char *alt_tool_name) {
#if !SANITIZER_GO
if (!common_flags()->print_summary)
return;
if (stack->size == 0) {
ReportErrorSummary(error_type);
return;
}
// Currently, we include the first stack frame into the report summary.
// Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
ReportErrorSummary(error_type, frame->info, alt_tool_name);
frame->ClearAll();
#endif
}
void ReportMmapWriteExec(int prot) {
#if SANITIZER_POSIX && (!SANITIZER_GO && !SANITIZER_ANDROID)
if ((prot & (PROT_WRITE | PROT_EXEC)) != (PROT_WRITE | PROT_EXEC))
return;
ScopedErrorReportLock l;
SanitizerCommonDecorator d;
InternalMmapVector<BufferedStackTrace> stack_buffer(1);
BufferedStackTrace *stack = stack_buffer.data();
stack->Reset();
uptr top = 0;
uptr bottom = 0;
GET_CALLER_PC_BP_SP;
(void)sp;
bool fast = common_flags()->fast_unwind_on_fatal;
if (StackTrace::WillUseFastUnwind(fast)) {
GetThreadStackTopAndBottom(false, &top, &bottom);
stack->Unwind(kStackTraceMax, pc, bp, nullptr, top, bottom, true);
} else {
stack->Unwind(kStackTraceMax, pc, 0, nullptr, 0, 0, false);
}
Printf("%s", d.Warning());
Report("WARNING: %s: writable-executable page usage\n", SanitizerToolName);
Printf("%s", d.Default());
stack->Print();
ReportErrorSummary("w-and-x-usage", stack);
#endif
}
#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS && !SANITIZER_GO
void StartReportDeadlySignal() {
// Write the first message using fd=2, just in case.
// It may actually fail to write in case stderr is closed.
CatastrophicErrorWrite(SanitizerToolName, internal_strlen(SanitizerToolName));
static const char kDeadlySignal[] = ":DEADLYSIGNAL\n";
CatastrophicErrorWrite(kDeadlySignal, sizeof(kDeadlySignal) - 1);
}
static void MaybeReportNonExecRegion(uptr pc) {
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
MemoryMappedSegment segment;
while (proc_maps.Next(&segment)) {
if (pc >= segment.start && pc < segment.end && !segment.IsExecutable())
Report("Hint: PC is at a non-executable region. Maybe a wild jump?\n");
}
#endif
}
static void PrintMemoryByte(InternalScopedString *str, const char *before,
u8 byte) {
SanitizerCommonDecorator d;
str->append("%s%s%x%x%s ", before, d.MemoryByte(), byte >> 4, byte & 15,
d.Default());
}
static void MaybeDumpInstructionBytes(uptr pc) {
if (!common_flags()->dump_instruction_bytes || (pc < GetPageSizeCached()))
return;
InternalScopedString str(1024);
str.append("First 16 instruction bytes at pc: ");
if (IsAccessibleMemoryRange(pc, 16)) {
for (int i = 0; i < 16; ++i) {
PrintMemoryByte(&str, "", ((u8 *)pc)[i]);
}
str.append("\n");
} else {
str.append("unaccessible\n");
}
Report("%s", str.data());
}
static void MaybeDumpRegisters(void *context) {
if (!common_flags()->dump_registers) return;
SignalContext::DumpAllRegisters(context);
}
static void ReportStackOverflowImpl(const SignalContext &sig, u32 tid,
UnwindSignalStackCallbackType unwind,
const void *unwind_context) {
SanitizerCommonDecorator d;
Printf("%s", d.Warning());
static const char kDescription[] = "stack-overflow";
Report("ERROR: %s: %s on address %p (pc %p bp %p sp %p T%d)\n",
SanitizerToolName, kDescription, (void *)sig.addr, (void *)sig.pc,
(void *)sig.bp, (void *)sig.sp, tid);
Printf("%s", d.Default());
InternalMmapVector<BufferedStackTrace> stack_buffer(1);
BufferedStackTrace *stack = stack_buffer.data();
stack->Reset();
unwind(sig, unwind_context, stack);
stack->Print();
ReportErrorSummary(kDescription, stack);
}
static void ReportDeadlySignalImpl(const SignalContext &sig, u32 tid,
UnwindSignalStackCallbackType unwind,
const void *unwind_context) {
SanitizerCommonDecorator d;
Printf("%s", d.Warning());
const char *description = sig.Describe();
if (sig.is_memory_access && !sig.is_true_faulting_addr)
Report("ERROR: %s: %s on unknown address (pc %p bp %p sp %p T%d)\n",
SanitizerToolName, description, (void *)sig.pc, (void *)sig.bp,
(void *)sig.sp, tid);
else
Report("ERROR: %s: %s on unknown address %p (pc %p bp %p sp %p T%d)\n",
SanitizerToolName, description, (void *)sig.addr, (void *)sig.pc,
(void *)sig.bp, (void *)sig.sp, tid);
Printf("%s", d.Default());
if (sig.pc < GetPageSizeCached())
Report("Hint: pc points to the zero page.\n");
if (sig.is_memory_access) {
const char *access_type =
sig.write_flag == SignalContext::WRITE
? "WRITE"
: (sig.write_flag == SignalContext::READ ? "READ" : "UNKNOWN");
Report("The signal is caused by a %s memory access.\n", access_type);
if (!sig.is_true_faulting_addr)
Report("Hint: this fault was caused by a dereference of a high value "
"address (see register values below). Dissassemble the provided "
"pc to learn which register was used.\n");
else if (sig.addr < GetPageSizeCached())
Report("Hint: address points to the zero page.\n");
}
MaybeReportNonExecRegion(sig.pc);
InternalMmapVector<BufferedStackTrace> stack_buffer(1);
BufferedStackTrace *stack = stack_buffer.data();
stack->Reset();
unwind(sig, unwind_context, stack);
stack->Print();
MaybeDumpInstructionBytes(sig.pc);
MaybeDumpRegisters(sig.context);
Printf("%s can not provide additional info.\n", SanitizerToolName);
ReportErrorSummary(description, stack);
}
void ReportDeadlySignal(const SignalContext &sig, u32 tid,
UnwindSignalStackCallbackType unwind,
const void *unwind_context) {
if (sig.IsStackOverflow())
ReportStackOverflowImpl(sig, tid, unwind, unwind_context);
else
ReportDeadlySignalImpl(sig, tid, unwind, unwind_context);
}
void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
UnwindSignalStackCallbackType unwind,
const void *unwind_context) {
StartReportDeadlySignal();
ScopedErrorReportLock rl;
SignalContext sig(siginfo, context);
ReportDeadlySignal(sig, tid, unwind, unwind_context);
Report("ABORTING\n");
Die();
}
#endif // !SANITIZER_FUCHSIA && !SANITIZER_GO
static atomic_uintptr_t reporting_thread = {0};
static StaticSpinMutex CommonSanitizerReportMutex;
ScopedErrorReportLock::ScopedErrorReportLock() {
uptr current = GetThreadSelf();
for (;;) {
uptr expected = 0;
if (atomic_compare_exchange_strong(&reporting_thread, &expected, current,
memory_order_relaxed)) {
// We've claimed reporting_thread so proceed.
CommonSanitizerReportMutex.Lock();
return;
}
if (expected == current) {
// This is either asynch signal or nested error during error reporting.
// Fail simple to avoid deadlocks in Report().
// Can't use Report() here because of potential deadlocks in nested
// signal handlers.
CatastrophicErrorWrite(SanitizerToolName,
internal_strlen(SanitizerToolName));
static const char msg[] = ": nested bug in the same thread, aborting.\n";
CatastrophicErrorWrite(msg, sizeof(msg) - 1);
internal__exit(common_flags()->exitcode);
}
internal_sched_yield();
}
}
ScopedErrorReportLock::~ScopedErrorReportLock() {
CommonSanitizerReportMutex.Unlock();
atomic_store_relaxed(&reporting_thread, 0);
}
void ScopedErrorReportLock::CheckLocked() {
CommonSanitizerReportMutex.CheckLocked();
}
} // namespace __sanitizer

View File

@@ -0,0 +1,318 @@
//===-- sanitizer_symbolizer_win.cpp --------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is shared between AddressSanitizer and ThreadSanitizer
// run-time libraries.
// Windows-specific implementation of symbolizer parts.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_WINDOWS
#include "sanitizer_dbghelp.h"
#include "sanitizer_symbolizer_internal.h"
namespace __sanitizer {
decltype(::StackWalk64) *StackWalk64;
decltype(::SymCleanup) *SymCleanup;
decltype(::SymFromAddr) *SymFromAddr;
decltype(::SymFunctionTableAccess64) *SymFunctionTableAccess64;
decltype(::SymGetLineFromAddr64) *SymGetLineFromAddr64;
decltype(::SymGetModuleBase64) *SymGetModuleBase64;
decltype(::SymGetSearchPathW) *SymGetSearchPathW;
decltype(::SymInitialize) *SymInitialize;
decltype(::SymSetOptions) *SymSetOptions;
decltype(::SymSetSearchPathW) *SymSetSearchPathW;
decltype(::UnDecorateSymbolName) *UnDecorateSymbolName;
namespace {
class WinSymbolizerTool : public SymbolizerTool {
public:
// The constructor is provided to avoid synthesized memsets.
WinSymbolizerTool() {}
bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
bool SymbolizeData(uptr addr, DataInfo *info) override {
return false;
}
const char *Demangle(const char *name) override;
};
bool is_dbghelp_initialized = false;
bool TrySymInitialize() {
SymSetOptions(SYMOPT_DEFERRED_LOADS | SYMOPT_UNDNAME | SYMOPT_LOAD_LINES);
return SymInitialize(GetCurrentProcess(), 0, TRUE);
// FIXME: We don't call SymCleanup() on exit yet - should we?
}
} // namespace
// Initializes DbgHelp library, if it's not yet initialized. Calls to this
// function should be synchronized with respect to other calls to DbgHelp API
// (e.g. from WinSymbolizerTool).
void InitializeDbgHelpIfNeeded() {
if (is_dbghelp_initialized)
return;
HMODULE dbghelp = LoadLibraryA("dbghelp.dll");
CHECK(dbghelp && "failed to load dbghelp.dll");
#define DBGHELP_IMPORT(name) \
do { \
name = \
reinterpret_cast<decltype(::name) *>(GetProcAddress(dbghelp, #name)); \
CHECK(name != nullptr); \
} while (0)
DBGHELP_IMPORT(StackWalk64);
DBGHELP_IMPORT(SymCleanup);
DBGHELP_IMPORT(SymFromAddr);
DBGHELP_IMPORT(SymFunctionTableAccess64);
DBGHELP_IMPORT(SymGetLineFromAddr64);
DBGHELP_IMPORT(SymGetModuleBase64);
DBGHELP_IMPORT(SymGetSearchPathW);
DBGHELP_IMPORT(SymInitialize);
DBGHELP_IMPORT(SymSetOptions);
DBGHELP_IMPORT(SymSetSearchPathW);
DBGHELP_IMPORT(UnDecorateSymbolName);
#undef DBGHELP_IMPORT
if (!TrySymInitialize()) {
// OK, maybe the client app has called SymInitialize already.
// That's a bit unfortunate for us as all the DbgHelp functions are
// single-threaded and we can't coordinate with the app.
// FIXME: Can we stop the other threads at this point?
// Anyways, we have to reconfigure stuff to make sure that SymInitialize
// has all the appropriate options set.
// Cross our fingers and reinitialize DbgHelp.
Report("*** WARNING: Failed to initialize DbgHelp! ***\n");
Report("*** Most likely this means that the app is already ***\n");
Report("*** using DbgHelp, possibly with incompatible flags. ***\n");
Report("*** Due to technical reasons, symbolization might crash ***\n");
Report("*** or produce wrong results. ***\n");
SymCleanup(GetCurrentProcess());
TrySymInitialize();
}
is_dbghelp_initialized = true;
// When an executable is run from a location different from the one where it
// was originally built, we may not see the nearby PDB files.
// To work around this, let's append the directory of the main module
// to the symbol search path. All the failures below are not fatal.
const size_t kSymPathSize = 2048;
static wchar_t path_buffer[kSymPathSize + 1 + MAX_PATH];
if (!SymGetSearchPathW(GetCurrentProcess(), path_buffer, kSymPathSize)) {
Report("*** WARNING: Failed to SymGetSearchPathW ***\n");
return;
}
size_t sz = wcslen(path_buffer);
if (sz) {
CHECK_EQ(0, wcscat_s(path_buffer, L";"));
sz++;
}
DWORD res = GetModuleFileNameW(NULL, path_buffer + sz, MAX_PATH);
if (res == 0 || res == MAX_PATH) {
Report("*** WARNING: Failed to getting the EXE directory ***\n");
return;
}
// Write the zero character in place of the last backslash to get the
// directory of the main module at the end of path_buffer.
wchar_t *last_bslash = wcsrchr(path_buffer + sz, L'\\');
CHECK_NE(last_bslash, 0);
*last_bslash = L'\0';
if (!SymSetSearchPathW(GetCurrentProcess(), path_buffer)) {
Report("*** WARNING: Failed to SymSetSearchPathW\n");
return;
}
}
bool WinSymbolizerTool::SymbolizePC(uptr addr, SymbolizedStack *frame) {
InitializeDbgHelpIfNeeded();
// See http://msdn.microsoft.com/en-us/library/ms680578(VS.85).aspx
char buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(CHAR)];
PSYMBOL_INFO symbol = (PSYMBOL_INFO)buffer;
symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
symbol->MaxNameLen = MAX_SYM_NAME;
DWORD64 offset = 0;
BOOL got_objname = SymFromAddr(GetCurrentProcess(),
(DWORD64)addr, &offset, symbol);
if (!got_objname)
return false;
DWORD unused;
IMAGEHLP_LINE64 line_info;
line_info.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
BOOL got_fileline = SymGetLineFromAddr64(GetCurrentProcess(), (DWORD64)addr,
&unused, &line_info);
frame->info.function = internal_strdup(symbol->Name);
frame->info.function_offset = (uptr)offset;
if (got_fileline) {
frame->info.file = internal_strdup(line_info.FileName);
frame->info.line = line_info.LineNumber;
}
// Only consider this a successful symbolization attempt if we got file info.
// Otherwise, try llvm-symbolizer.
return got_fileline;
}
const char *WinSymbolizerTool::Demangle(const char *name) {
CHECK(is_dbghelp_initialized);
static char demangle_buffer[1000];
if (name[0] == '\01' &&
UnDecorateSymbolName(name + 1, demangle_buffer, sizeof(demangle_buffer),
UNDNAME_NAME_ONLY))
return demangle_buffer;
else
return name;
}
const char *Symbolizer::PlatformDemangle(const char *name) {
return name;
}
namespace {
struct ScopedHandle {
ScopedHandle() : h_(nullptr) {}
explicit ScopedHandle(HANDLE h) : h_(h) {}
~ScopedHandle() {
if (h_)
::CloseHandle(h_);
}
HANDLE get() { return h_; }
HANDLE *receive() { return &h_; }
HANDLE release() {
HANDLE h = h_;
h_ = nullptr;
return h;
}
HANDLE h_;
};
} // namespace
bool SymbolizerProcess::StartSymbolizerSubprocess() {
// Create inherited pipes for stdin and stdout.
ScopedHandle stdin_read, stdin_write;
ScopedHandle stdout_read, stdout_write;
SECURITY_ATTRIBUTES attrs;
attrs.nLength = sizeof(SECURITY_ATTRIBUTES);
attrs.bInheritHandle = TRUE;
attrs.lpSecurityDescriptor = nullptr;
if (!::CreatePipe(stdin_read.receive(), stdin_write.receive(), &attrs, 0) ||
!::CreatePipe(stdout_read.receive(), stdout_write.receive(), &attrs, 0)) {
VReport(2, "WARNING: %s CreatePipe failed (error code: %d)\n",
SanitizerToolName, path_, GetLastError());
return false;
}
// Don't inherit the writing end of stdin or the reading end of stdout.
if (!SetHandleInformation(stdin_write.get(), HANDLE_FLAG_INHERIT, 0) ||
!SetHandleInformation(stdout_read.get(), HANDLE_FLAG_INHERIT, 0)) {
VReport(2, "WARNING: %s SetHandleInformation failed (error code: %d)\n",
SanitizerToolName, path_, GetLastError());
return false;
}
// Compute the command line. Wrap double quotes around everything.
const char *argv[kArgVMax];
GetArgV(path_, argv);
InternalScopedString command_line(kMaxPathLength * 3);
for (int i = 0; argv[i]; i++) {
const char *arg = argv[i];
int arglen = internal_strlen(arg);
// Check that tool command lines are simple and that complete escaping is
// unnecessary.
CHECK(!internal_strchr(arg, '"') && "quotes in args unsupported");
CHECK(!internal_strstr(arg, "\\\\") &&
"double backslashes in args unsupported");
CHECK(arglen > 0 && arg[arglen - 1] != '\\' &&
"args ending in backslash and empty args unsupported");
command_line.append("\"%s\" ", arg);
}
VReport(3, "Launching symbolizer command: %s\n", command_line.data());
// Launch llvm-symbolizer with stdin and stdout redirected.
STARTUPINFOA si;
memset(&si, 0, sizeof(si));
si.cb = sizeof(si);
si.dwFlags |= STARTF_USESTDHANDLES;
si.hStdInput = stdin_read.get();
si.hStdOutput = stdout_write.get();
PROCESS_INFORMATION pi;
memset(&pi, 0, sizeof(pi));
if (!CreateProcessA(path_, // Executable
command_line.data(), // Command line
nullptr, // Process handle not inheritable
nullptr, // Thread handle not inheritable
TRUE, // Set handle inheritance to TRUE
0, // Creation flags
nullptr, // Use parent's environment block
nullptr, // Use parent's starting directory
&si, &pi)) {
VReport(2, "WARNING: %s failed to create process for %s (error code: %d)\n",
SanitizerToolName, path_, GetLastError());
return false;
}
// Process creation succeeded, so transfer handle ownership into the fields.
input_fd_ = stdout_read.release();
output_fd_ = stdin_write.release();
// The llvm-symbolizer process is responsible for quitting itself when the
// stdin pipe is closed, so we don't need these handles. Close them to prevent
// leaks. If we ever want to try to kill the symbolizer process from the
// parent, we'll want to hang on to these handles.
CloseHandle(pi.hProcess);
CloseHandle(pi.hThread);
return true;
}
static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
LowLevelAllocator *allocator) {
if (!common_flags()->symbolize) {
VReport(2, "Symbolizer is disabled.\n");
return;
}
// Add llvm-symbolizer in case the binary has dwarf.
const char *user_path = common_flags()->external_symbolizer_path;
const char *path =
user_path ? user_path : FindPathToBinary("llvm-symbolizer.exe");
if (path) {
VReport(2, "Using llvm-symbolizer at %spath: %s\n",
user_path ? "user-specified " : "", path);
list->push_back(new(*allocator) LLVMSymbolizer(path, allocator));
} else {
if (user_path && user_path[0] == '\0') {
VReport(2, "External symbolizer is explicitly disabled.\n");
} else {
VReport(2, "External symbolizer is not present.\n");
}
}
// Add the dbghelp based symbolizer.
list->push_back(new(*allocator) WinSymbolizerTool());
}
Symbolizer *Symbolizer::PlatformInit() {
IntrusiveList<SymbolizerTool> list;
list.clear();
ChooseSymbolizerTools(&list, &symbolizer_allocator_);
return new(symbolizer_allocator_) Symbolizer(list);
}
void Symbolizer::LateInitialize() {
Symbolizer::GetOrInit()->LateInitializeTools();
}
} // namespace __sanitizer
#endif // _WIN32

View File

@@ -0,0 +1,180 @@
//===-- sanitizer_unwind_linux_libcdep.cpp --------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the unwind.h-based (aka "slow") stack unwinding routines
// available to the tools on Linux, Android, NetBSD, FreeBSD, and Solaris.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_SOLARIS
#include "sanitizer_common.h"
#include "sanitizer_stacktrace.h"
#if SANITIZER_ANDROID
#include <dlfcn.h> // for dlopen()
#endif
#if SANITIZER_FREEBSD
#define _GNU_SOURCE // to declare _Unwind_Backtrace() from <unwind.h>
#endif
#include <unwind.h>
namespace __sanitizer {
namespace {
//---------------------------- UnwindSlow --------------------------------------
typedef struct {
uptr absolute_pc;
uptr stack_top;
uptr stack_size;
} backtrace_frame_t;
extern "C" {
typedef void *(*acquire_my_map_info_list_func)();
typedef void (*release_my_map_info_list_func)(void *map);
typedef sptr (*unwind_backtrace_signal_arch_func)(
void *siginfo, void *sigcontext, void *map_info_list,
backtrace_frame_t *backtrace, uptr ignore_depth, uptr max_depth);
acquire_my_map_info_list_func acquire_my_map_info_list;
release_my_map_info_list_func release_my_map_info_list;
unwind_backtrace_signal_arch_func unwind_backtrace_signal_arch;
} // extern "C"
#if defined(__arm__) && !SANITIZER_NETBSD
// NetBSD uses dwarf EH
#define UNWIND_STOP _URC_END_OF_STACK
#define UNWIND_CONTINUE _URC_NO_REASON
#else
#define UNWIND_STOP _URC_NORMAL_STOP
#define UNWIND_CONTINUE _URC_NO_REASON
#endif
uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
#if defined(__arm__) && !SANITIZER_MAC
uptr val;
_Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
15 /* r15 = PC */, _UVRSD_UINT32, &val);
CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
// Clear the Thumb bit.
return val & ~(uptr)1;
#else
return (uptr)_Unwind_GetIP(ctx);
#endif
}
struct UnwindTraceArg {
BufferedStackTrace *stack;
u32 max_depth;
};
_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
UnwindTraceArg *arg = (UnwindTraceArg*)param;
CHECK_LT(arg->stack->size, arg->max_depth);
uptr pc = Unwind_GetIP(ctx);
const uptr kPageSize = GetPageSizeCached();
// Let's assume that any pointer in the 0th page (i.e. <0x1000 on i386 and
// x86_64) is invalid and stop unwinding here. If we're adding support for
// a platform where this isn't true, we need to reconsider this check.
if (pc < kPageSize) return UNWIND_STOP;
arg->stack->trace_buffer[arg->stack->size++] = pc;
if (arg->stack->size == arg->max_depth) return UNWIND_STOP;
return UNWIND_CONTINUE;
}
} // namespace
#if SANITIZER_ANDROID
void SanitizerInitializeUnwinder() {
if (AndroidGetApiLevel() >= ANDROID_LOLLIPOP_MR1) return;
// Pre-lollipop Android can not unwind through signal handler frames with
// libgcc unwinder, but it has a libcorkscrew.so library with the necessary
// workarounds.
void *p = dlopen("libcorkscrew.so", RTLD_LAZY);
if (!p) {
VReport(1,
"Failed to open libcorkscrew.so. You may see broken stack traces "
"in SEGV reports.");
return;
}
acquire_my_map_info_list =
(acquire_my_map_info_list_func)(uptr)dlsym(p, "acquire_my_map_info_list");
release_my_map_info_list =
(release_my_map_info_list_func)(uptr)dlsym(p, "release_my_map_info_list");
unwind_backtrace_signal_arch = (unwind_backtrace_signal_arch_func)(uptr)dlsym(
p, "unwind_backtrace_signal_arch");
if (!acquire_my_map_info_list || !release_my_map_info_list ||
!unwind_backtrace_signal_arch) {
VReport(1,
"Failed to find one of the required symbols in libcorkscrew.so. "
"You may see broken stack traces in SEGV reports.");
acquire_my_map_info_list = 0;
unwind_backtrace_signal_arch = 0;
release_my_map_info_list = 0;
}
}
#endif
void BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {
CHECK_GE(max_depth, 2);
size = 0;
UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
_Unwind_Backtrace(Unwind_Trace, &arg);
// We need to pop a few frames so that pc is on top.
uptr to_pop = LocatePcInTrace(pc);
// trace_buffer[0] belongs to the current function so we always pop it,
// unless there is only 1 frame in the stack trace (1 frame is always better
// than 0!).
// 1-frame stacks don't normally happen, but this depends on the actual
// unwinder implementation (libgcc, libunwind, etc) which is outside of our
// control.
if (to_pop == 0 && size > 1)
to_pop = 1;
PopStackFrames(to_pop);
#if defined(__GNUC__) && defined(__sparc__)
// __builtin_return_address returns the address of the call instruction
// on the SPARC and not the return address, so we need to compensate.
trace_buffer[0] = GetNextInstructionPc(pc);
#else
trace_buffer[0] = pc;
#endif
}
void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
CHECK(context);
CHECK_GE(max_depth, 2);
if (!unwind_backtrace_signal_arch) {
UnwindSlow(pc, max_depth);
return;
}
void *map = acquire_my_map_info_list();
CHECK(map);
InternalMmapVector<backtrace_frame_t> frames(kStackTraceMax);
// siginfo argument appears to be unused.
sptr res = unwind_backtrace_signal_arch(/* siginfo */ 0, context, map,
frames.data(),
/* ignore_depth */ 0, max_depth);
release_my_map_info_list(map);
if (res < 0) return;
CHECK_LE((uptr)res, kStackTraceMax);
size = 0;
// +2 compensate for libcorkscrew unwinder returning addresses of call
// instructions instead of raw return addresses.
for (sptr i = 0; i < res; ++i)
trace_buffer[size++] = frames[i].absolute_pc + 2;
}
} // namespace __sanitizer
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD ||
// SANITIZER_SOLARIS

View File

@@ -0,0 +1,75 @@
//===-- sanitizer_unwind_win.cpp ------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// Sanitizer unwind Windows specific functions.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
#if SANITIZER_WINDOWS
#define WIN32_LEAN_AND_MEAN
#define NOGDI
#include <windows.h>
#include "sanitizer_dbghelp.h" // for StackWalk64
#include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h" // for InitializeDbgHelpIfNeeded
using namespace __sanitizer;
#if !SANITIZER_GO
void BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {
CHECK_GE(max_depth, 2);
// FIXME: CaptureStackBackTrace might be too slow for us.
// FIXME: Compare with StackWalk64.
// FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc
size = CaptureStackBackTrace(1, Min(max_depth, kStackTraceMax),
(void **)&trace_buffer[0], 0);
if (size == 0)
return;
// Skip the RTL frames by searching for the PC in the stacktrace.
uptr pc_location = LocatePcInTrace(pc);
PopStackFrames(pc_location);
}
void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
CHECK(context);
CHECK_GE(max_depth, 2);
CONTEXT ctx = *(CONTEXT *)context;
STACKFRAME64 stack_frame;
memset(&stack_frame, 0, sizeof(stack_frame));
InitializeDbgHelpIfNeeded();
size = 0;
#if defined(_WIN64)
int machine_type = IMAGE_FILE_MACHINE_AMD64;
stack_frame.AddrPC.Offset = ctx.Rip;
stack_frame.AddrFrame.Offset = ctx.Rbp;
stack_frame.AddrStack.Offset = ctx.Rsp;
#else
int machine_type = IMAGE_FILE_MACHINE_I386;
stack_frame.AddrPC.Offset = ctx.Eip;
stack_frame.AddrFrame.Offset = ctx.Ebp;
stack_frame.AddrStack.Offset = ctx.Esp;
#endif
stack_frame.AddrPC.Mode = AddrModeFlat;
stack_frame.AddrFrame.Mode = AddrModeFlat;
stack_frame.AddrStack.Mode = AddrModeFlat;
while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(),
&stack_frame, &ctx, NULL, SymFunctionTableAccess64,
SymGetModuleBase64, NULL) &&
size < Min(max_depth, kStackTraceMax)) {
trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset;
}
}
#endif // #if !SANITIZER_GO
#endif // SANITIZER_WINDOWS

View File

@@ -0,0 +1,519 @@
//===-- tsan_interceptors_mac.cpp -----------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// Mac-specific interceptors.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_MAC
#include "interception/interception.h"
#include "tsan_interceptors.h"
#include "tsan_interface.h"
#include "tsan_interface_ann.h"
#include "sanitizer_common/sanitizer_addrhashmap.h"
#include <errno.h>
#include <libkern/OSAtomic.h>
#include <objc/objc-sync.h>
#include <os/lock.h>
#include <sys/ucontext.h>
#if defined(__has_include) && __has_include(<xpc/xpc.h>)
#include <xpc/xpc.h>
#endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
typedef long long_t;
extern "C" {
int getcontext(ucontext_t *ucp) __attribute__((returns_twice));
int setcontext(const ucontext_t *ucp);
}
namespace __tsan {
// The non-barrier versions of OSAtomic* functions are semantically mo_relaxed,
// but the two variants (e.g. OSAtomicAdd32 and OSAtomicAdd32Barrier) are
// actually aliases of each other, and we cannot have different interceptors for
// them, because they're actually the same function. Thus, we have to stay
// conservative and treat the non-barrier versions as mo_acq_rel.
static const morder kMacOrderBarrier = mo_acq_rel;
static const morder kMacOrderNonBarrier = mo_acq_rel;
#define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \
}
#define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \
}
#define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
SCOPED_TSAN_INTERCEPTOR(f, ptr); \
return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \
}
#define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
mo) \
TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
SCOPED_TSAN_INTERCEPTOR(f, ptr); \
return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1; \
}
#define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m) \
m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
kMacOrderNonBarrier) \
m(int32_t, int32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
kMacOrderBarrier) \
m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f, \
kMacOrderNonBarrier) \
m(int64_t, int64_t, a64, f##64##Barrier, __tsan_atomic64_##tsan_atomic_f, \
kMacOrderBarrier)
#define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig) \
m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
kMacOrderNonBarrier) \
m(int32_t, uint32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
kMacOrderBarrier) \
m_orig(int32_t, uint32_t, a32, f##32##Orig, __tsan_atomic32_##tsan_atomic_f, \
kMacOrderNonBarrier) \
m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier, \
__tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier)
OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicAdd, fetch_add,
OSATOMIC_INTERCEPTOR_PLUS_X)
OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicIncrement, fetch_add,
OSATOMIC_INTERCEPTOR_PLUS_1)
OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicDecrement, fetch_sub,
OSATOMIC_INTERCEPTOR_MINUS_1)
OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicOr, fetch_or, OSATOMIC_INTERCEPTOR_PLUS_X,
OSATOMIC_INTERCEPTOR)
OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicAnd, fetch_and,
OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor,
OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
#define OSATOMIC_INTERCEPTORS_CAS(f, tsan_atomic_f, tsan_t, t) \
TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) { \
SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr); \
return tsan_atomic_f##_compare_exchange_strong( \
(volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
kMacOrderNonBarrier, kMacOrderNonBarrier); \
} \
\
TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value, \
t volatile *ptr) { \
SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr); \
return tsan_atomic_f##_compare_exchange_strong( \
(volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
kMacOrderBarrier, kMacOrderNonBarrier); \
}
OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapInt, __tsan_atomic32, a32, int)
OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapLong, __tsan_atomic64, a64,
long_t)
OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapPtr, __tsan_atomic64, a64,
void *)
OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap32, __tsan_atomic32, a32,
int32_t)
OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap64, __tsan_atomic64, a64,
int64_t)
#define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo) \
TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \
SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \
volatile char *byte_ptr = ((volatile char *)ptr) + (n >> 3); \
char bit = 0x80u >> (n & 7); \
char mask = clear ? ~bit : bit; \
char orig_byte = op((volatile a8 *)byte_ptr, mask, mo); \
return orig_byte & bit; \
}
#define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear) \
OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \
OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier)
OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or, false)
OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndClear, __tsan_atomic8_fetch_and,
true)
TSAN_INTERCEPTOR(void, OSAtomicEnqueue, OSQueueHead *list, void *item,
size_t offset) {
SCOPED_TSAN_INTERCEPTOR(OSAtomicEnqueue, list, item, offset);
__tsan_release(item);
REAL(OSAtomicEnqueue)(list, item, offset);
}
TSAN_INTERCEPTOR(void *, OSAtomicDequeue, OSQueueHead *list, size_t offset) {
SCOPED_TSAN_INTERCEPTOR(OSAtomicDequeue, list, offset);
void *item = REAL(OSAtomicDequeue)(list, offset);
if (item) __tsan_acquire(item);
return item;
}
// OSAtomicFifoEnqueue and OSAtomicFifoDequeue are only on OS X.
#if !SANITIZER_IOS
TSAN_INTERCEPTOR(void, OSAtomicFifoEnqueue, OSFifoQueueHead *list, void *item,
size_t offset) {
SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoEnqueue, list, item, offset);
__tsan_release(item);
REAL(OSAtomicFifoEnqueue)(list, item, offset);
}
TSAN_INTERCEPTOR(void *, OSAtomicFifoDequeue, OSFifoQueueHead *list,
size_t offset) {
SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoDequeue, list, offset);
void *item = REAL(OSAtomicFifoDequeue)(list, offset);
if (item) __tsan_acquire(item);
return item;
}
#endif
TSAN_INTERCEPTOR(void, OSSpinLockLock, volatile OSSpinLock *lock) {
CHECK(!cur_thread()->is_dead);
if (!cur_thread()->is_inited) {
return REAL(OSSpinLockLock)(lock);
}
SCOPED_TSAN_INTERCEPTOR(OSSpinLockLock, lock);
REAL(OSSpinLockLock)(lock);
Acquire(thr, pc, (uptr)lock);
}
TSAN_INTERCEPTOR(bool, OSSpinLockTry, volatile OSSpinLock *lock) {
CHECK(!cur_thread()->is_dead);
if (!cur_thread()->is_inited) {
return REAL(OSSpinLockTry)(lock);
}
SCOPED_TSAN_INTERCEPTOR(OSSpinLockTry, lock);
bool result = REAL(OSSpinLockTry)(lock);
if (result)
Acquire(thr, pc, (uptr)lock);
return result;
}
TSAN_INTERCEPTOR(void, OSSpinLockUnlock, volatile OSSpinLock *lock) {
CHECK(!cur_thread()->is_dead);
if (!cur_thread()->is_inited) {
return REAL(OSSpinLockUnlock)(lock);
}
SCOPED_TSAN_INTERCEPTOR(OSSpinLockUnlock, lock);
Release(thr, pc, (uptr)lock);
REAL(OSSpinLockUnlock)(lock);
}
TSAN_INTERCEPTOR(void, os_lock_lock, void *lock) {
CHECK(!cur_thread()->is_dead);
if (!cur_thread()->is_inited) {
return REAL(os_lock_lock)(lock);
}
SCOPED_TSAN_INTERCEPTOR(os_lock_lock, lock);
REAL(os_lock_lock)(lock);
Acquire(thr, pc, (uptr)lock);
}
TSAN_INTERCEPTOR(bool, os_lock_trylock, void *lock) {
CHECK(!cur_thread()->is_dead);
if (!cur_thread()->is_inited) {
return REAL(os_lock_trylock)(lock);
}
SCOPED_TSAN_INTERCEPTOR(os_lock_trylock, lock);
bool result = REAL(os_lock_trylock)(lock);
if (result)
Acquire(thr, pc, (uptr)lock);
return result;
}
TSAN_INTERCEPTOR(void, os_lock_unlock, void *lock) {
CHECK(!cur_thread()->is_dead);
if (!cur_thread()->is_inited) {
return REAL(os_lock_unlock)(lock);
}
SCOPED_TSAN_INTERCEPTOR(os_lock_unlock, lock);
Release(thr, pc, (uptr)lock);
REAL(os_lock_unlock)(lock);
}
TSAN_INTERCEPTOR(void, os_unfair_lock_lock, os_unfair_lock_t lock) {
if (!cur_thread()->is_inited || cur_thread()->is_dead) {
return REAL(os_unfair_lock_lock)(lock);
}
SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_lock, lock);
REAL(os_unfair_lock_lock)(lock);
Acquire(thr, pc, (uptr)lock);
}
TSAN_INTERCEPTOR(void, os_unfair_lock_lock_with_options, os_unfair_lock_t lock,
u32 options) {
if (!cur_thread()->is_inited || cur_thread()->is_dead) {
return REAL(os_unfair_lock_lock_with_options)(lock, options);
}
SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_lock_with_options, lock, options);
REAL(os_unfair_lock_lock_with_options)(lock, options);
Acquire(thr, pc, (uptr)lock);
}
TSAN_INTERCEPTOR(bool, os_unfair_lock_trylock, os_unfair_lock_t lock) {
if (!cur_thread()->is_inited || cur_thread()->is_dead) {
return REAL(os_unfair_lock_trylock)(lock);
}
SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_trylock, lock);
bool result = REAL(os_unfair_lock_trylock)(lock);
if (result)
Acquire(thr, pc, (uptr)lock);
return result;
}
TSAN_INTERCEPTOR(void, os_unfair_lock_unlock, os_unfair_lock_t lock) {
if (!cur_thread()->is_inited || cur_thread()->is_dead) {
return REAL(os_unfair_lock_unlock)(lock);
}
SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_unlock, lock);
Release(thr, pc, (uptr)lock);
REAL(os_unfair_lock_unlock)(lock);
}
#if defined(__has_include) && __has_include(<xpc/xpc.h>)
TSAN_INTERCEPTOR(void, xpc_connection_set_event_handler,
xpc_connection_t connection, xpc_handler_t handler) {
SCOPED_TSAN_INTERCEPTOR(xpc_connection_set_event_handler, connection,
handler);
Release(thr, pc, (uptr)connection);
xpc_handler_t new_handler = ^(xpc_object_t object) {
{
SCOPED_INTERCEPTOR_RAW(xpc_connection_set_event_handler);
Acquire(thr, pc, (uptr)connection);
}
handler(object);
};
REAL(xpc_connection_set_event_handler)(connection, new_handler);
}
TSAN_INTERCEPTOR(void, xpc_connection_send_barrier, xpc_connection_t connection,
dispatch_block_t barrier) {
SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_barrier, connection, barrier);
Release(thr, pc, (uptr)connection);
dispatch_block_t new_barrier = ^() {
{
SCOPED_INTERCEPTOR_RAW(xpc_connection_send_barrier);
Acquire(thr, pc, (uptr)connection);
}
barrier();
};
REAL(xpc_connection_send_barrier)(connection, new_barrier);
}
TSAN_INTERCEPTOR(void, xpc_connection_send_message_with_reply,
xpc_connection_t connection, xpc_object_t message,
dispatch_queue_t replyq, xpc_handler_t handler) {
SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_message_with_reply, connection,
message, replyq, handler);
Release(thr, pc, (uptr)connection);
xpc_handler_t new_handler = ^(xpc_object_t object) {
{
SCOPED_INTERCEPTOR_RAW(xpc_connection_send_message_with_reply);
Acquire(thr, pc, (uptr)connection);
}
handler(object);
};
REAL(xpc_connection_send_message_with_reply)
(connection, message, replyq, new_handler);
}
TSAN_INTERCEPTOR(void, xpc_connection_cancel, xpc_connection_t connection) {
SCOPED_TSAN_INTERCEPTOR(xpc_connection_cancel, connection);
Release(thr, pc, (uptr)connection);
REAL(xpc_connection_cancel)(connection);
}
#endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
// Determines whether the Obj-C object pointer is a tagged pointer. Tagged
// pointers encode the object data directly in their pointer bits and do not
// have an associated memory allocation. The Obj-C runtime uses tagged pointers
// to transparently optimize small objects.
static bool IsTaggedObjCPointer(id obj) {
const uptr kPossibleTaggedBits = 0x8000000000000001ull;
return ((uptr)obj & kPossibleTaggedBits) != 0;
}
// Returns an address which can be used to inform TSan about synchronization
// points (MutexLock/Unlock). The TSan infrastructure expects this to be a valid
// address in the process space. We do a small allocation here to obtain a
// stable address (the array backing the hash map can change). The memory is
// never free'd (leaked) and allocation and locking are slow, but this code only
// runs for @synchronized with tagged pointers, which is very rare.
static uptr GetOrCreateSyncAddress(uptr addr, ThreadState *thr, uptr pc) {
typedef AddrHashMap<uptr, 5> Map;
static Map Addresses;
Map::Handle h(&Addresses, addr);
if (h.created()) {
ThreadIgnoreBegin(thr, pc);
*h = (uptr) user_alloc(thr, pc, /*size=*/1);
ThreadIgnoreEnd(thr, pc);
}
return *h;
}
// Returns an address on which we can synchronize given an Obj-C object pointer.
// For normal object pointers, this is just the address of the object in memory.
// Tagged pointers are not backed by an actual memory allocation, so we need to
// synthesize a valid address.
static uptr SyncAddressForObjCObject(id obj, ThreadState *thr, uptr pc) {
if (IsTaggedObjCPointer(obj))
return GetOrCreateSyncAddress((uptr)obj, thr, pc);
return (uptr)obj;
}
TSAN_INTERCEPTOR(int, objc_sync_enter, id obj) {
SCOPED_TSAN_INTERCEPTOR(objc_sync_enter, obj);
if (!obj) return REAL(objc_sync_enter)(obj);
uptr addr = SyncAddressForObjCObject(obj, thr, pc);
MutexPreLock(thr, pc, addr, MutexFlagWriteReentrant);
int result = REAL(objc_sync_enter)(obj);
CHECK_EQ(result, OBJC_SYNC_SUCCESS);
MutexPostLock(thr, pc, addr, MutexFlagWriteReentrant);
return result;
}
TSAN_INTERCEPTOR(int, objc_sync_exit, id obj) {
SCOPED_TSAN_INTERCEPTOR(objc_sync_exit, obj);
if (!obj) return REAL(objc_sync_exit)(obj);
uptr addr = SyncAddressForObjCObject(obj, thr, pc);
MutexUnlock(thr, pc, addr);
int result = REAL(objc_sync_exit)(obj);
if (result != OBJC_SYNC_SUCCESS) MutexInvalidAccess(thr, pc, addr);
return result;
}
TSAN_INTERCEPTOR(int, swapcontext, ucontext_t *oucp, const ucontext_t *ucp) {
{
SCOPED_INTERCEPTOR_RAW(swapcontext, oucp, ucp);
}
// Bacause of swapcontext() semantics we have no option but to copy its
// impementation here
if (!oucp || !ucp) {
errno = EINVAL;
return -1;
}
ThreadState *thr = cur_thread();
const int UCF_SWAPPED = 0x80000000;
oucp->uc_onstack &= ~UCF_SWAPPED;
thr->ignore_interceptors++;
int ret = getcontext(oucp);
if (!(oucp->uc_onstack & UCF_SWAPPED)) {
thr->ignore_interceptors--;
if (!ret) {
oucp->uc_onstack |= UCF_SWAPPED;
ret = setcontext(ucp);
}
}
return ret;
}
// On macOS, libc++ is always linked dynamically, so intercepting works the
// usual way.
#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
namespace {
struct fake_shared_weak_count {
volatile a64 shared_owners;
volatile a64 shared_weak_owners;
virtual void _unused_0x0() = 0;
virtual void _unused_0x8() = 0;
virtual void on_zero_shared() = 0;
virtual void _unused_0x18() = 0;
virtual void on_zero_shared_weak() = 0;
};
} // namespace
// The following code adds libc++ interceptors for:
// void __shared_weak_count::__release_shared() _NOEXCEPT;
// bool __shared_count::__release_shared() _NOEXCEPT;
// Shared and weak pointers in C++ maintain reference counts via atomics in
// libc++.dylib, which are TSan-invisible, and this leads to false positives in
// destructor code. These interceptors re-implements the whole functions so that
// the mo_acq_rel semantics of the atomic decrement are visible.
//
// Unfortunately, the interceptors cannot simply Acquire/Release some sync
// object and call the original function, because it would have a race between
// the sync and the destruction of the object. Calling both under a lock will
// not work because the destructor can invoke this interceptor again (and even
// in a different thread, so recursive locks don't help).
STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv,
fake_shared_weak_count *o) {
if (!flags()->shared_ptr_interceptor)
return REAL(_ZNSt3__119__shared_weak_count16__release_sharedEv)(o);
SCOPED_TSAN_INTERCEPTOR(_ZNSt3__119__shared_weak_count16__release_sharedEv,
o);
if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
Acquire(thr, pc, (uptr)&o->shared_owners);
o->on_zero_shared();
if (__tsan_atomic64_fetch_add(&o->shared_weak_owners, -1, mo_release) ==
0) {
Acquire(thr, pc, (uptr)&o->shared_weak_owners);
o->on_zero_shared_weak();
}
}
}
STDCXX_INTERCEPTOR(bool, _ZNSt3__114__shared_count16__release_sharedEv,
fake_shared_weak_count *o) {
if (!flags()->shared_ptr_interceptor)
return REAL(_ZNSt3__114__shared_count16__release_sharedEv)(o);
SCOPED_TSAN_INTERCEPTOR(_ZNSt3__114__shared_count16__release_sharedEv, o);
if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
Acquire(thr, pc, (uptr)&o->shared_owners);
o->on_zero_shared();
return true;
}
return false;
}
namespace {
struct call_once_callback_args {
void (*orig_func)(void *arg);
void *orig_arg;
void *flag;
};
void call_once_callback_wrapper(void *arg) {
call_once_callback_args *new_args = (call_once_callback_args *)arg;
new_args->orig_func(new_args->orig_arg);
__tsan_release(new_args->flag);
}
} // namespace
// This adds a libc++ interceptor for:
// void __call_once(volatile unsigned long&, void*, void(*)(void*));
// C++11 call_once is implemented via an internal function __call_once which is
// inside libc++.dylib, and the atomic release store inside it is thus
// TSan-invisible. To avoid false positives, this interceptor wraps the callback
// function and performs an explicit Release after the user code has run.
STDCXX_INTERCEPTOR(void, _ZNSt3__111__call_onceERVmPvPFvS2_E, void *flag,
void *arg, void (*func)(void *arg)) {
call_once_callback_args new_args = {func, arg, flag};
REAL(_ZNSt3__111__call_onceERVmPvPFvS2_E)(flag, &new_args,
call_once_callback_wrapper);
}
} // namespace __tsan
#endif // SANITIZER_MAC

View File

@@ -0,0 +1,52 @@
//===-- tsan_interceptors_mach_vm.cpp -------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// Interceptors for mach_vm_* user space memory routines on Darwin.
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
#include "tsan_interceptors.h"
#include "tsan_platform.h"
#include <mach/mach.h>
namespace __tsan {
static bool intersects_with_shadow(mach_vm_address_t *address,
mach_vm_size_t size, int flags) {
// VM_FLAGS_FIXED is 0x0, so we have to test for VM_FLAGS_ANYWHERE.
if (flags & VM_FLAGS_ANYWHERE) return false;
uptr ptr = *address;
return !IsAppMem(ptr) || !IsAppMem(ptr + size - 1);
}
TSAN_INTERCEPTOR(kern_return_t, mach_vm_allocate, vm_map_t target,
mach_vm_address_t *address, mach_vm_size_t size, int flags) {
SCOPED_TSAN_INTERCEPTOR(mach_vm_allocate, target, address, size, flags);
if (target != mach_task_self())
return REAL(mach_vm_allocate)(target, address, size, flags);
if (intersects_with_shadow(address, size, flags))
return KERN_NO_SPACE;
kern_return_t res = REAL(mach_vm_allocate)(target, address, size, flags);
if (res == KERN_SUCCESS)
MemoryRangeImitateWriteOrResetRange(thr, pc, *address, size);
return res;
}
TSAN_INTERCEPTOR(kern_return_t, mach_vm_deallocate, vm_map_t target,
mach_vm_address_t address, mach_vm_size_t size) {
SCOPED_TSAN_INTERCEPTOR(mach_vm_deallocate, target, address, size);
if (target != mach_task_self())
return REAL(mach_vm_deallocate)(target, address, size);
UnmapShadow(thr, address, size);
return REAL(mach_vm_deallocate)(target, address, size);
}
} // namespace __tsan

View File

@@ -0,0 +1,517 @@
//===-- tsan_platform_linux.cpp -------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// Linux- and BSD-specific code.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
SANITIZER_OPENBSD
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
#include "sanitizer_common/sanitizer_platform_limits_openbsd.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "tsan_flags.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
#include <fcntl.h>
#include <pthread.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <sys/mman.h>
#if SANITIZER_LINUX
#include <sys/personality.h>
#include <setjmp.h>
#endif
#include <sys/syscall.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <unistd.h>
#include <sched.h>
#include <dlfcn.h>
#if SANITIZER_LINUX
#define __need_res_state
#include <resolv.h>
#endif
#ifdef sa_handler
# undef sa_handler
#endif
#ifdef sa_sigaction
# undef sa_sigaction
#endif
#if SANITIZER_FREEBSD
extern "C" void *__libc_stack_end;
void *__libc_stack_end = 0;
#endif
#if SANITIZER_LINUX && defined(__aarch64__) && !SANITIZER_GO
# define INIT_LONGJMP_XOR_KEY 1
#else
# define INIT_LONGJMP_XOR_KEY 0
#endif
#if INIT_LONGJMP_XOR_KEY
#include "interception/interception.h"
// Must be declared outside of other namespaces.
DECLARE_REAL(int, _setjmp, void *env)
#endif
namespace __tsan {
#if INIT_LONGJMP_XOR_KEY
static void InitializeLongjmpXorKey();
static uptr longjmp_xor_key;
#endif
#ifdef TSAN_RUNTIME_VMA
// Runtime detected VMA size.
uptr vmaSize;
#endif
enum {
MemTotal = 0,
MemShadow = 1,
MemMeta = 2,
MemFile = 3,
MemMmap = 4,
MemTrace = 5,
MemHeap = 6,
MemOther = 7,
MemCount = 8,
};
void FillProfileCallback(uptr p, uptr rss, bool file,
uptr *mem, uptr stats_size) {
mem[MemTotal] += rss;
if (p >= ShadowBeg() && p < ShadowEnd())
mem[MemShadow] += rss;
else if (p >= MetaShadowBeg() && p < MetaShadowEnd())
mem[MemMeta] += rss;
#if !SANITIZER_GO
else if (p >= HeapMemBeg() && p < HeapMemEnd())
mem[MemHeap] += rss;
else if (p >= LoAppMemBeg() && p < LoAppMemEnd())
mem[file ? MemFile : MemMmap] += rss;
else if (p >= HiAppMemBeg() && p < HiAppMemEnd())
mem[file ? MemFile : MemMmap] += rss;
#else
else if (p >= AppMemBeg() && p < AppMemEnd())
mem[file ? MemFile : MemMmap] += rss;
#endif
else if (p >= TraceMemBeg() && p < TraceMemEnd())
mem[MemTrace] += rss;
else
mem[MemOther] += rss;
}
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
uptr mem[MemCount];
internal_memset(mem, 0, sizeof(mem[0]) * MemCount);
__sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
StackDepotStats *stacks = StackDepotGetStats();
internal_snprintf(buf, buf_size,
"RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
" trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n",
mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20,
mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20,
mem[MemHeap] >> 20, mem[MemOther] >> 20,
stacks->allocated >> 20, stacks->n_uniq_ids,
nlive, nthread);
}
#if SANITIZER_LINUX
void FlushShadowMemoryCallback(
const SuspendedThreadsList &suspended_threads_list,
void *argument) {
ReleaseMemoryPagesToOS(ShadowBeg(), ShadowEnd());
}
#endif
void FlushShadowMemory() {
#if SANITIZER_LINUX
StopTheWorld(FlushShadowMemoryCallback, 0);
#endif
}
#if !SANITIZER_GO
// Mark shadow for .rodata sections with the special kShadowRodata marker.
// Accesses to .rodata can't race, so this saves time, memory and trace space.
static void MapRodata() {
// First create temp file.
const char *tmpdir = GetEnv("TMPDIR");
if (tmpdir == 0)
tmpdir = GetEnv("TEST_TMPDIR");
#ifdef P_tmpdir
if (tmpdir == 0)
tmpdir = P_tmpdir;
#endif
if (tmpdir == 0)
return;
char name[256];
internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d",
tmpdir, (int)internal_getpid());
uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600);
if (internal_iserror(openrv))
return;
internal_unlink(name); // Unlink it now, so that we can reuse the buffer.
fd_t fd = openrv;
// Fill the file with kShadowRodata.
const uptr kMarkerSize = 512 * 1024 / sizeof(u64);
InternalMmapVector<u64> marker(kMarkerSize);
// volatile to prevent insertion of memset
for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++)
*p = kShadowRodata;
internal_write(fd, marker.data(), marker.size() * sizeof(u64));
// Map the file into memory.
uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, fd, 0);
if (internal_iserror(page)) {
internal_close(fd);
return;
}
// Map the file into shadow of .rodata sections.
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
// Reusing the buffer 'name'.
MemoryMappedSegment segment(name, ARRAY_SIZE(name));
while (proc_maps.Next(&segment)) {
if (segment.filename[0] != 0 && segment.filename[0] != '[' &&
segment.IsReadable() && segment.IsExecutable() &&
!segment.IsWritable() && IsAppMem(segment.start)) {
// Assume it's .rodata
char *shadow_start = (char *)MemToShadow(segment.start);
char *shadow_end = (char *)MemToShadow(segment.end);
for (char *p = shadow_start; p < shadow_end;
p += marker.size() * sizeof(u64)) {
internal_mmap(p, Min<uptr>(marker.size() * sizeof(u64), shadow_end - p),
PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0);
}
}
}
internal_close(fd);
}
void InitializeShadowMemoryPlatform() {
MapRodata();
}
#endif // #if !SANITIZER_GO
void InitializePlatformEarly() {
#ifdef TSAN_RUNTIME_VMA
vmaSize =
(MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
#if defined(__aarch64__)
# if !SANITIZER_GO
if (vmaSize != 39 && vmaSize != 42 && vmaSize != 48) {
Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
Printf("FATAL: Found %zd - Supported 39, 42 and 48\n", vmaSize);
Die();
}
#else
if (vmaSize != 48) {
Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
Printf("FATAL: Found %zd - Supported 48\n", vmaSize);
Die();
}
#endif
#elif defined(__powerpc64__)
# if !SANITIZER_GO
if (vmaSize != 44 && vmaSize != 46 && vmaSize != 47) {
Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
Printf("FATAL: Found %zd - Supported 44, 46, and 47\n", vmaSize);
Die();
}
# else
if (vmaSize != 46 && vmaSize != 47) {
Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
Printf("FATAL: Found %zd - Supported 46, and 47\n", vmaSize);
Die();
}
# endif
#endif
#endif
}
void InitializePlatform() {
DisableCoreDumperIfNecessary();
// Go maps shadow memory lazily and works fine with limited address space.
// Unlimited stack is not a problem as well, because the executable
// is not compiled with -pie.
#if !SANITIZER_GO
{
bool reexec = false;
// TSan doesn't play well with unlimited stack size (as stack
// overlaps with shadow memory). If we detect unlimited stack size,
// we re-exec the program with limited stack size as a best effort.
if (StackSizeIsUnlimited()) {
const uptr kMaxStackSize = 32 * 1024 * 1024;
VReport(1, "Program is run with unlimited stack size, which wouldn't "
"work with ThreadSanitizer.\n"
"Re-execing with stack size limited to %zd bytes.\n",
kMaxStackSize);
SetStackSizeLimitInBytes(kMaxStackSize);
reexec = true;
}
if (!AddressSpaceIsUnlimited()) {
Report("WARNING: Program is run with limited virtual address space,"
" which wouldn't work with ThreadSanitizer.\n");
Report("Re-execing with unlimited virtual address space.\n");
SetAddressSpaceUnlimited();
reexec = true;
}
#if SANITIZER_LINUX && defined(__aarch64__)
// After patch "arm64: mm: support ARCH_MMAP_RND_BITS." is introduced in
// linux kernel, the random gap between stack and mapped area is increased
// from 128M to 36G on 39-bit aarch64. As it is almost impossible to cover
// this big range, we should disable randomized virtual space on aarch64.
int old_personality = personality(0xffffffff);
if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {
VReport(1, "WARNING: Program is run with randomized virtual address "
"space, which wouldn't work with ThreadSanitizer.\n"
"Re-execing with fixed virtual address space.\n");
CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
reexec = true;
}
// Initialize the xor key used in {sig}{set,long}jump.
InitializeLongjmpXorKey();
#endif
if (reexec)
ReExec();
}
CheckAndProtect();
InitTlsSize();
#endif // !SANITIZER_GO
}
#if !SANITIZER_GO
// Extract file descriptors passed to glibc internal __res_iclose function.
// This is required to properly "close" the fds, because we do not see internal
// closes within glibc. The code is a pure hack.
int ExtractResolvFDs(void *state, int *fds, int nfd) {
#if SANITIZER_LINUX && !SANITIZER_ANDROID
int cnt = 0;
struct __res_state *statp = (struct __res_state*)state;
for (int i = 0; i < MAXNS && cnt < nfd; i++) {
if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1)
fds[cnt++] = statp->_u._ext.nssocks[i];
}
return cnt;
#else
return 0;
#endif
}
// Extract file descriptors passed via UNIX domain sockets.
// This is requried to properly handle "open" of these fds.
// see 'man recvmsg' and 'man 3 cmsg'.
int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) {
int res = 0;
msghdr *msg = (msghdr*)msgp;
struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg);
for (; cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS)
continue;
int n = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(fds[0]);
for (int i = 0; i < n; i++) {
fds[res++] = ((int*)CMSG_DATA(cmsg))[i];
if (res == nfd)
return res;
}
}
return res;
}
// Reverse operation of libc stack pointer mangling
static uptr UnmangleLongJmpSp(uptr mangled_sp) {
#if defined(__x86_64__)
# if SANITIZER_LINUX
// Reverse of:
// xor %fs:0x30, %rsi
// rol $0x11, %rsi
uptr sp;
asm("ror $0x11, %0 \n"
"xor %%fs:0x30, %0 \n"
: "=r" (sp)
: "0" (mangled_sp));
return sp;
# else
return mangled_sp;
# endif
#elif defined(__aarch64__)
# if SANITIZER_LINUX
return mangled_sp ^ longjmp_xor_key;
# else
return mangled_sp;
# endif
#elif defined(__powerpc64__)
// Reverse of:
// ld r4, -28696(r13)
// xor r4, r3, r4
uptr xor_key;
asm("ld %0, -28696(%%r13)" : "=r" (xor_key));
return mangled_sp ^ xor_key;
#elif defined(__mips__)
return mangled_sp;
#else
#error "Unknown platform"
#endif
}
#ifdef __powerpc__
# define LONG_JMP_SP_ENV_SLOT 0
#elif SANITIZER_FREEBSD
# define LONG_JMP_SP_ENV_SLOT 2
#elif SANITIZER_NETBSD
# define LONG_JMP_SP_ENV_SLOT 6
#elif SANITIZER_LINUX
# ifdef __aarch64__
# define LONG_JMP_SP_ENV_SLOT 13
# elif defined(__mips64)
# define LONG_JMP_SP_ENV_SLOT 1
# else
# define LONG_JMP_SP_ENV_SLOT 6
# endif
#endif
uptr ExtractLongJmpSp(uptr *env) {
uptr mangled_sp = env[LONG_JMP_SP_ENV_SLOT];
return UnmangleLongJmpSp(mangled_sp);
}
#if INIT_LONGJMP_XOR_KEY
// GLIBC mangles the function pointers in jmp_buf (used in {set,long}*jmp
// functions) by XORing them with a random key. For AArch64 it is a global
// variable rather than a TCB one (as for x86_64/powerpc). We obtain the key by
// issuing a setjmp and XORing the SP pointer values to derive the key.
static void InitializeLongjmpXorKey() {
// 1. Call REAL(setjmp), which stores the mangled SP in env.
jmp_buf env;
REAL(_setjmp)(env);
// 2. Retrieve vanilla/mangled SP.
uptr sp;
asm("mov %0, sp" : "=r" (sp));
uptr mangled_sp = ((uptr *)&env)[LONG_JMP_SP_ENV_SLOT];
// 3. xor SPs to obtain key.
longjmp_xor_key = mangled_sp ^ sp;
}
#endif
void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
// Check that the thr object is in tls;
const uptr thr_beg = (uptr)thr;
const uptr thr_end = (uptr)thr + sizeof(*thr);
CHECK_GE(thr_beg, tls_addr);
CHECK_LE(thr_beg, tls_addr + tls_size);
CHECK_GE(thr_end, tls_addr);
CHECK_LE(thr_end, tls_addr + tls_size);
// Since the thr object is huge, skip it.
MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, thr_beg - tls_addr);
MemoryRangeImitateWrite(thr, /*pc=*/2, thr_end,
tls_addr + tls_size - thr_end);
}
// Note: this function runs with async signals enabled,
// so it must not touch any tsan state.
int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
void *abstime), void *c, void *m, void *abstime,
void(*cleanup)(void *arg), void *arg) {
// pthread_cleanup_push/pop are hardcore macros mess.
// We can't intercept nor call them w/o including pthread.h.
int res;
pthread_cleanup_push(cleanup, arg);
res = fn(c, m, abstime);
pthread_cleanup_pop(0);
return res;
}
#endif // !SANITIZER_GO
#if !SANITIZER_GO
void ReplaceSystemMalloc() { }
#endif
#if !SANITIZER_GO
#if SANITIZER_ANDROID
// On Android, one thread can call intercepted functions after
// DestroyThreadState(), so add a fake thread state for "dead" threads.
static ThreadState *dead_thread_state = nullptr;
ThreadState *cur_thread() {
ThreadState* thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr());
if (thr == nullptr) {
__sanitizer_sigset_t emptyset;
internal_sigfillset(&emptyset);
__sanitizer_sigset_t oldset;
CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset));
thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr());
if (thr == nullptr) {
thr = reinterpret_cast<ThreadState*>(MmapOrDie(sizeof(ThreadState),
"ThreadState"));
*get_android_tls_ptr() = reinterpret_cast<uptr>(thr);
if (dead_thread_state == nullptr) {
dead_thread_state = reinterpret_cast<ThreadState*>(
MmapOrDie(sizeof(ThreadState), "ThreadState"));
dead_thread_state->fast_state.SetIgnoreBit();
dead_thread_state->ignore_interceptors = 1;
dead_thread_state->is_dead = true;
*const_cast<int*>(&dead_thread_state->tid) = -1;
CHECK_EQ(0, internal_mprotect(dead_thread_state, sizeof(ThreadState),
PROT_READ));
}
}
CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr));
}
return thr;
}
void set_cur_thread(ThreadState *thr) {
*get_android_tls_ptr() = reinterpret_cast<uptr>(thr);
}
void cur_thread_finalize() {
__sanitizer_sigset_t emptyset;
internal_sigfillset(&emptyset);
__sanitizer_sigset_t oldset;
CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset));
ThreadState* thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr());
if (thr != dead_thread_state) {
*get_android_tls_ptr() = reinterpret_cast<uptr>(dead_thread_state);
UnmapOrDie(thr, sizeof(ThreadState));
}
CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr));
}
#endif // SANITIZER_ANDROID
#endif // if !SANITIZER_GO
} // namespace __tsan
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
// SANITIZER_OPENBSD

View File

@@ -0,0 +1,324 @@
//===-- tsan_platform_mac.cpp ---------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// Mac-specific code.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_MAC
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_ptrauth.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
#include "tsan_flags.h"
#include <mach/mach.h>
#include <pthread.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <unistd.h>
#include <errno.h>
#include <sched.h>
namespace __tsan {
#if !SANITIZER_GO
static void *SignalSafeGetOrAllocate(uptr *dst, uptr size) {
atomic_uintptr_t *a = (atomic_uintptr_t *)dst;
void *val = (void *)atomic_load_relaxed(a);
atomic_signal_fence(memory_order_acquire); // Turns the previous load into
// acquire wrt signals.
if (UNLIKELY(val == nullptr)) {
val = (void *)internal_mmap(nullptr, size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
CHECK(val);
void *cmp = nullptr;
if (!atomic_compare_exchange_strong(a, (uintptr_t *)&cmp, (uintptr_t)val,
memory_order_acq_rel)) {
internal_munmap(val, size);
val = cmp;
}
}
return val;
}
// On OS X, accessing TLVs via __thread or manually by using pthread_key_* is
// problematic, because there are several places where interceptors are called
// when TLVs are not accessible (early process startup, thread cleanup, ...).
// The following provides a "poor man's TLV" implementation, where we use the
// shadow memory of the pointer returned by pthread_self() to store a pointer to
// the ThreadState object. The main thread's ThreadState is stored separately
// in a static variable, because we need to access it even before the
// shadow memory is set up.
static uptr main_thread_identity = 0;
ALIGNED(64) static char main_thread_state[sizeof(ThreadState)];
static ThreadState *main_thread_state_loc = (ThreadState *)main_thread_state;
// We cannot use pthread_self() before libpthread has been initialized. Our
// current heuristic for guarding this is checking `main_thread_identity` which
// is only assigned in `__tsan::InitializePlatform`.
static ThreadState **cur_thread_location() {
if (main_thread_identity == 0)
return &main_thread_state_loc;
uptr thread_identity = (uptr)pthread_self();
if (thread_identity == main_thread_identity)
return &main_thread_state_loc;
return (ThreadState **)MemToShadow(thread_identity);
}
ThreadState *cur_thread() {
return (ThreadState *)SignalSafeGetOrAllocate(
(uptr *)cur_thread_location(), sizeof(ThreadState));
}
void set_cur_thread(ThreadState *thr) {
*cur_thread_location() = thr;
}
// TODO(kuba.brecka): This is not async-signal-safe. In particular, we call
// munmap first and then clear `fake_tls`; if we receive a signal in between,
// handler will try to access the unmapped ThreadState.
void cur_thread_finalize() {
ThreadState **thr_state_loc = cur_thread_location();
if (thr_state_loc == &main_thread_state_loc) {
// Calling dispatch_main() or xpc_main() actually invokes pthread_exit to
// exit the main thread. Let's keep the main thread's ThreadState.
return;
}
internal_munmap(*thr_state_loc, sizeof(ThreadState));
*thr_state_loc = nullptr;
}
#endif
void FlushShadowMemory() {
}
static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) {
vm_address_t address = start;
vm_address_t end_address = end;
uptr resident_pages = 0;
uptr dirty_pages = 0;
while (address < end_address) {
vm_size_t vm_region_size;
mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT;
vm_region_extended_info_data_t vm_region_info;
mach_port_t object_name;
kern_return_t ret = vm_region_64(
mach_task_self(), &address, &vm_region_size, VM_REGION_EXTENDED_INFO,
(vm_region_info_t)&vm_region_info, &count, &object_name);
if (ret != KERN_SUCCESS) break;
resident_pages += vm_region_info.pages_resident;
dirty_pages += vm_region_info.pages_dirtied;
address += vm_region_size;
}
*res = resident_pages * GetPageSizeCached();
*dirty = dirty_pages * GetPageSizeCached();
}
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
uptr shadow_res, shadow_dirty;
uptr meta_res, meta_dirty;
uptr trace_res, trace_dirty;
RegionMemUsage(ShadowBeg(), ShadowEnd(), &shadow_res, &shadow_dirty);
RegionMemUsage(MetaShadowBeg(), MetaShadowEnd(), &meta_res, &meta_dirty);
RegionMemUsage(TraceMemBeg(), TraceMemEnd(), &trace_res, &trace_dirty);
#if !SANITIZER_GO
uptr low_res, low_dirty;
uptr high_res, high_dirty;
uptr heap_res, heap_dirty;
RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &low_res, &low_dirty);
RegionMemUsage(HiAppMemBeg(), HiAppMemEnd(), &high_res, &high_dirty);
RegionMemUsage(HeapMemBeg(), HeapMemEnd(), &heap_res, &heap_dirty);
#else // !SANITIZER_GO
uptr app_res, app_dirty;
RegionMemUsage(AppMemBeg(), AppMemEnd(), &app_res, &app_dirty);
#endif
StackDepotStats *stacks = StackDepotGetStats();
internal_snprintf(buf, buf_size,
"shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
"meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
"traces (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
#if !SANITIZER_GO
"low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
"high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
"heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
#else // !SANITIZER_GO
"app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
#endif
"stacks: %zd unique IDs, %zd kB allocated\n"
"threads: %zd total, %zd live\n"
"------------------------------\n",
ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024,
MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024,
TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024,
#if !SANITIZER_GO
LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024,
HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024,
HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024,
#else // !SANITIZER_GO
AppMemBeg(), AppMemEnd(), app_res / 1024, app_dirty / 1024,
#endif
stacks->n_uniq_ids, stacks->allocated / 1024,
nthread, nlive);
}
#if !SANITIZER_GO
void InitializeShadowMemoryPlatform() { }
// On OS X, GCD worker threads are created without a call to pthread_create. We
// need to properly register these threads with ThreadCreate and ThreadStart.
// These threads don't have a parent thread, as they are created "spuriously".
// We're using a libpthread API that notifies us about a newly created thread.
// The `thread == pthread_self()` check indicates this is actually a worker
// thread. If it's just a regular thread, this hook is called on the parent
// thread.
typedef void (*pthread_introspection_hook_t)(unsigned int event,
pthread_t thread, void *addr,
size_t size);
extern "C" pthread_introspection_hook_t pthread_introspection_hook_install(
pthread_introspection_hook_t hook);
static const uptr PTHREAD_INTROSPECTION_THREAD_CREATE = 1;
static const uptr PTHREAD_INTROSPECTION_THREAD_TERMINATE = 3;
static pthread_introspection_hook_t prev_pthread_introspection_hook;
static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
void *addr, size_t size) {
if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) {
if (thread == pthread_self()) {
// The current thread is a newly created GCD worker thread.
ThreadState *thr = cur_thread();
Processor *proc = ProcCreate();
ProcWire(proc, thr);
ThreadState *parent_thread_state = nullptr; // No parent.
int tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true);
CHECK_NE(tid, 0);
ThreadStart(thr, tid, GetTid(), ThreadType::Worker);
}
} else if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
if (thread == pthread_self()) {
ThreadState *thr = cur_thread();
if (thr->tctx) {
DestroyThreadState();
}
}
}
if (prev_pthread_introspection_hook != nullptr)
prev_pthread_introspection_hook(event, thread, addr, size);
}
#endif
void InitializePlatformEarly() {
#if defined(__aarch64__)
uptr max_vm = GetMaxUserVirtualAddress() + 1;
if (max_vm != Mapping::kHiAppMemEnd) {
Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n",
max_vm, Mapping::kHiAppMemEnd);
Die();
}
#endif
}
static uptr longjmp_xor_key = 0;
void InitializePlatform() {
DisableCoreDumperIfNecessary();
#if !SANITIZER_GO
CheckAndProtect();
CHECK_EQ(main_thread_identity, 0);
main_thread_identity = (uptr)pthread_self();
prev_pthread_introspection_hook =
pthread_introspection_hook_install(&my_pthread_introspection_hook);
#endif
if (GetMacosAlignedVersion() >= MacosVersion(10, 14)) {
// Libsystem currently uses a process-global key; this might change.
const unsigned kTLSLongjmpXorKeySlot = 0x7;
longjmp_xor_key = (uptr)pthread_getspecific(kTLSLongjmpXorKeySlot);
}
}
#ifdef __aarch64__
# define LONG_JMP_SP_ENV_SLOT \
((GetMacosAlignedVersion() >= MacosVersion(10, 14)) ? 12 : 13)
#else
# define LONG_JMP_SP_ENV_SLOT 2
#endif
uptr ExtractLongJmpSp(uptr *env) {
uptr mangled_sp = env[LONG_JMP_SP_ENV_SLOT];
uptr sp = mangled_sp ^ longjmp_xor_key;
sp = (uptr)ptrauth_auth_data((void *)sp, ptrauth_key_asdb,
ptrauth_string_discriminator("sp"));
return sp;
}
#if !SANITIZER_GO
void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
// The pointer to the ThreadState object is stored in the shadow memory
// of the tls.
uptr tls_end = tls_addr + tls_size;
uptr thread_identity = (uptr)pthread_self();
if (thread_identity == main_thread_identity) {
MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, tls_size);
} else {
uptr thr_state_start = thread_identity;
uptr thr_state_end = thr_state_start + sizeof(uptr);
CHECK_GE(thr_state_start, tls_addr);
CHECK_LE(thr_state_start, tls_addr + tls_size);
CHECK_GE(thr_state_end, tls_addr);
CHECK_LE(thr_state_end, tls_addr + tls_size);
MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr,
thr_state_start - tls_addr);
MemoryRangeImitateWrite(thr, /*pc=*/2, thr_state_end,
tls_end - thr_state_end);
}
}
#endif
#if !SANITIZER_GO
// Note: this function runs with async signals enabled,
// so it must not touch any tsan state.
int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
void *abstime), void *c, void *m, void *abstime,
void(*cleanup)(void *arg), void *arg) {
// pthread_cleanup_push/pop are hardcore macros mess.
// We can't intercept nor call them w/o including pthread.h.
int res;
pthread_cleanup_push(cleanup, arg);
res = fn(c, m, abstime);
pthread_cleanup_pop(0);
return res;
}
#endif
} // namespace __tsan
#endif // SANITIZER_MAC

View File

@@ -0,0 +1,167 @@
//===-- tsan_platform_posix.cpp -------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// POSIX-specific code.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_POSIX
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
namespace __tsan {
static const char kShadowMemoryMappingWarning[] =
"FATAL: %s can not madvise shadow region [%zx, %zx] with %s (errno: %d)\n";
static const char kShadowMemoryMappingHint[] =
"HINT: if %s is not supported in your environment, you may set "
"TSAN_OPTIONS=%s=0\n";
static void NoHugePagesInShadow(uptr addr, uptr size) {
SetShadowRegionHugePageMode(addr, size);
}
static void DontDumpShadow(uptr addr, uptr size) {
if (common_flags()->use_madv_dontdump)
if (!DontDumpShadowMemory(addr, size)) {
Printf(kShadowMemoryMappingWarning, SanitizerToolName, addr, addr + size,
"MADV_DONTDUMP", errno);
Printf(kShadowMemoryMappingHint, "MADV_DONTDUMP", "use_madv_dontdump");
Die();
}
}
#if !SANITIZER_GO
void InitializeShadowMemory() {
// Map memory shadow.
if (!MmapFixedNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(), "shadow")) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
Die();
}
// This memory range is used for thread stacks and large user mmaps.
// Frequently a thread uses only a small part of stack and similarly
// a program uses a small part of large mmap. On some programs
// we see 20% memory usage reduction without huge pages for this range.
// FIXME: don't use constants here.
#if defined(__x86_64__)
const uptr kMadviseRangeBeg = 0x7f0000000000ull;
const uptr kMadviseRangeSize = 0x010000000000ull;
#elif defined(__mips64)
const uptr kMadviseRangeBeg = 0xff00000000ull;
const uptr kMadviseRangeSize = 0x0100000000ull;
#elif defined(__aarch64__) && defined(__APPLE__)
uptr kMadviseRangeBeg = LoAppMemBeg();
uptr kMadviseRangeSize = LoAppMemEnd() - LoAppMemBeg();
#elif defined(__aarch64__)
uptr kMadviseRangeBeg = 0;
uptr kMadviseRangeSize = 0;
if (vmaSize == 39) {
kMadviseRangeBeg = 0x7d00000000ull;
kMadviseRangeSize = 0x0300000000ull;
} else if (vmaSize == 42) {
kMadviseRangeBeg = 0x3f000000000ull;
kMadviseRangeSize = 0x01000000000ull;
} else {
DCHECK(0);
}
#elif defined(__powerpc64__)
uptr kMadviseRangeBeg = 0;
uptr kMadviseRangeSize = 0;
if (vmaSize == 44) {
kMadviseRangeBeg = 0x0f60000000ull;
kMadviseRangeSize = 0x0010000000ull;
} else if (vmaSize == 46) {
kMadviseRangeBeg = 0x3f0000000000ull;
kMadviseRangeSize = 0x010000000000ull;
} else {
DCHECK(0);
}
#endif
NoHugePagesInShadow(MemToShadow(kMadviseRangeBeg),
kMadviseRangeSize * kShadowMultiplier);
DontDumpShadow(ShadowBeg(), ShadowEnd() - ShadowBeg());
DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
ShadowBeg(), ShadowEnd(),
(ShadowEnd() - ShadowBeg()) >> 30);
// Map meta shadow.
const uptr meta = MetaShadowBeg();
const uptr meta_size = MetaShadowEnd() - meta;
if (!MmapFixedNoReserve(meta, meta_size, "meta shadow")) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
Die();
}
NoHugePagesInShadow(meta, meta_size);
DontDumpShadow(meta, meta_size);
DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
meta, meta + meta_size, meta_size >> 30);
InitializeShadowMemoryPlatform();
}
static void ProtectRange(uptr beg, uptr end) {
CHECK_LE(beg, end);
if (beg == end)
return;
if (beg != (uptr)MmapFixedNoAccess(beg, end - beg)) {
Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end);
Printf("FATAL: Make sure you are not using unlimited stack\n");
Die();
}
}
void CheckAndProtect() {
// Ensure that the binary is indeed compiled with -pie.
MemoryMappingLayout proc_maps(true);
MemoryMappedSegment segment;
while (proc_maps.Next(&segment)) {
if (IsAppMem(segment.start)) continue;
if (segment.start >= HeapMemEnd() && segment.start < HeapEnd()) continue;
if (segment.protection == 0) // Zero page or mprotected.
continue;
if (segment.start >= VdsoBeg()) // vdso
break;
Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n",
segment.start, segment.end);
Die();
}
#if defined(__aarch64__) && defined(__APPLE__)
ProtectRange(HeapMemEnd(), ShadowBeg());
ProtectRange(ShadowEnd(), MetaShadowBeg());
ProtectRange(MetaShadowEnd(), TraceMemBeg());
#else
ProtectRange(LoAppMemEnd(), ShadowBeg());
ProtectRange(ShadowEnd(), MetaShadowBeg());
#ifdef TSAN_MID_APP_RANGE
ProtectRange(MetaShadowEnd(), MidAppMemBeg());
ProtectRange(MidAppMemEnd(), TraceMemBeg());
#else
ProtectRange(MetaShadowEnd(), TraceMemBeg());
#endif
// Memory for traces is mapped lazily in MapThreadTrace.
// Protect the whole range for now, so that user does not map something here.
ProtectRange(TraceMemBeg(), TraceMemEnd());
ProtectRange(TraceMemEnd(), HeapMemBeg());
ProtectRange(HeapEnd(), HiAppMemBeg());
#endif
}
#endif
} // namespace __tsan
#endif // SANITIZER_POSIX

245
lib/tsan/tsan_rtl_aarch64.S Normal file
View File

@@ -0,0 +1,245 @@
// The content of this file is AArch64-only:
#if defined(__aarch64__)
#include "sanitizer_common/sanitizer_asm.h"
#if defined(__APPLE__)
.align 2
.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
.long _setjmp$non_lazy_ptr
_setjmp$non_lazy_ptr:
.indirect_symbol _setjmp
.long 0
.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
.long __setjmp$non_lazy_ptr
__setjmp$non_lazy_ptr:
.indirect_symbol __setjmp
.long 0
.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
.long _sigsetjmp$non_lazy_ptr
_sigsetjmp$non_lazy_ptr:
.indirect_symbol _sigsetjmp
.long 0
#endif
#if !defined(__APPLE__)
.section .text
#else
.section __TEXT,__text
.align 3
#endif
ASM_HIDDEN(__tsan_setjmp)
.comm _ZN14__interception11real_setjmpE,8,8
.globl ASM_SYMBOL_INTERCEPTOR(setjmp)
ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(setjmp))
ASM_SYMBOL_INTERCEPTOR(setjmp):
CFI_STARTPROC
// Save frame/link register
stp x29, x30, [sp, -32]!
CFI_DEF_CFA_OFFSET (32)
CFI_OFFSET (29, -32)
CFI_OFFSET (30, -24)
// Adjust the SP for previous frame
add x29, sp, 0
CFI_DEF_CFA_REGISTER (29)
// Save env parameter
str x0, [sp, 16]
CFI_OFFSET (0, -16)
// Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
add x0, x29, 32
// call tsan interceptor
bl ASM_SYMBOL(__tsan_setjmp)
// Restore env parameter
ldr x0, [sp, 16]
CFI_RESTORE (0)
// Restore frame/link register
ldp x29, x30, [sp], 32
CFI_RESTORE (29)
CFI_RESTORE (30)
CFI_DEF_CFA (31, 0)
// tail jump to libc setjmp
#if !defined(__APPLE__)
adrp x1, :got:_ZN14__interception11real_setjmpE
ldr x1, [x1, #:got_lo12:_ZN14__interception11real_setjmpE]
ldr x1, [x1]
#else
adrp x1, _setjmp$non_lazy_ptr@page
add x1, x1, _setjmp$non_lazy_ptr@pageoff
ldr x1, [x1]
#endif
br x1
CFI_ENDPROC
ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(setjmp))
.comm _ZN14__interception12real__setjmpE,8,8
.globl ASM_SYMBOL_INTERCEPTOR(_setjmp)
ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(_setjmp))
ASM_SYMBOL_INTERCEPTOR(_setjmp):
CFI_STARTPROC
// Save frame/link register
stp x29, x30, [sp, -32]!
CFI_DEF_CFA_OFFSET (32)
CFI_OFFSET (29, -32)
CFI_OFFSET (30, -24)
// Adjust the SP for previous frame
add x29, sp, 0
CFI_DEF_CFA_REGISTER (29)
// Save env parameter
str x0, [sp, 16]
CFI_OFFSET (0, -16)
// Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
add x0, x29, 32
// call tsan interceptor
bl ASM_SYMBOL(__tsan_setjmp)
// Restore env parameter
ldr x0, [sp, 16]
CFI_RESTORE (0)
// Restore frame/link register
ldp x29, x30, [sp], 32
CFI_RESTORE (29)
CFI_RESTORE (30)
CFI_DEF_CFA (31, 0)
// tail jump to libc setjmp
#if !defined(__APPLE__)
adrp x1, :got:_ZN14__interception12real__setjmpE
ldr x1, [x1, #:got_lo12:_ZN14__interception12real__setjmpE]
ldr x1, [x1]
#else
adrp x1, __setjmp$non_lazy_ptr@page
add x1, x1, __setjmp$non_lazy_ptr@pageoff
ldr x1, [x1]
#endif
br x1
CFI_ENDPROC
ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(_setjmp))
.comm _ZN14__interception14real_sigsetjmpE,8,8
.globl ASM_SYMBOL_INTERCEPTOR(sigsetjmp)
ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
ASM_SYMBOL_INTERCEPTOR(sigsetjmp):
CFI_STARTPROC
// Save frame/link register
stp x29, x30, [sp, -32]!
CFI_DEF_CFA_OFFSET (32)
CFI_OFFSET (29, -32)
CFI_OFFSET (30, -24)
// Adjust the SP for previous frame
add x29, sp, 0
CFI_DEF_CFA_REGISTER (29)
// Save env and savesigs parameter
stp x0, x1, [sp, 16]
CFI_OFFSET (0, -16)
CFI_OFFSET (1, -8)
// Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
add x0, x29, 32
// call tsan interceptor
bl ASM_SYMBOL(__tsan_setjmp)
// Restore env and savesigs parameter
ldp x0, x1, [sp, 16]
CFI_RESTORE (0)
CFI_RESTORE (1)
// Restore frame/link register
ldp x29, x30, [sp], 32
CFI_RESTORE (29)
CFI_RESTORE (30)
CFI_DEF_CFA (31, 0)
// tail jump to libc sigsetjmp
#if !defined(__APPLE__)
adrp x2, :got:_ZN14__interception14real_sigsetjmpE
ldr x2, [x2, #:got_lo12:_ZN14__interception14real_sigsetjmpE]
ldr x2, [x2]
#else
adrp x2, _sigsetjmp$non_lazy_ptr@page
add x2, x2, _sigsetjmp$non_lazy_ptr@pageoff
ldr x2, [x2]
#endif
br x2
CFI_ENDPROC
ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
#if !defined(__APPLE__)
.comm _ZN14__interception16real___sigsetjmpE,8,8
.globl ASM_SYMBOL_INTERCEPTOR(__sigsetjmp)
ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
ASM_SYMBOL_INTERCEPTOR(__sigsetjmp):
CFI_STARTPROC
// Save frame/link register
stp x29, x30, [sp, -32]!
CFI_DEF_CFA_OFFSET (32)
CFI_OFFSET (29, -32)
CFI_OFFSET (30, -24)
// Adjust the SP for previous frame
add x29, sp, 0
CFI_DEF_CFA_REGISTER (29)
// Save env and savesigs parameter
stp x0, x1, [sp, 16]
CFI_OFFSET (0, -16)
CFI_OFFSET (1, -8)
// Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
add x0, x29, 32
// call tsan interceptor
bl ASM_SYMBOL(__tsan_setjmp)
// Restore env and savesigs parameter
ldp x0, x1, [sp, 16]
CFI_RESTORE (0)
CFI_RESTORE (1)
// Restore frame/link register
ldp x29, x30, [sp], 32
CFI_RESTORE (29)
CFI_RESTORE (30)
CFI_DEF_CFA (31, 0)
// tail jump to libc __sigsetjmp
#if !defined(__APPLE__)
adrp x2, :got:_ZN14__interception16real___sigsetjmpE
ldr x2, [x2, #:got_lo12:_ZN14__interception16real___sigsetjmpE]
ldr x2, [x2]
#else
adrp x2, ASM_SYMBOL(__sigsetjmp)@page
add x2, x2, ASM_SYMBOL(__sigsetjmp)@pageoff
#endif
br x2
CFI_ENDPROC
ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
#endif
NO_EXEC_STACK_DIRECTIVE
#endif

366
lib/tsan/tsan_rtl_amd64.S Normal file
View File

@@ -0,0 +1,366 @@
// The content of this file is x86_64-only:
#if defined(__x86_64__)
#include "sanitizer_common/sanitizer_asm.h"
#if !defined(__APPLE__)
.section .text
#else
.section __TEXT,__text
#endif
ASM_HIDDEN(__tsan_trace_switch)
.globl ASM_SYMBOL(__tsan_trace_switch_thunk)
ASM_SYMBOL(__tsan_trace_switch_thunk):
CFI_STARTPROC
# Save scratch registers.
push %rax
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rax, 0)
push %rcx
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rcx, 0)
push %rdx
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rdx, 0)
push %rsi
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rsi, 0)
push %rdi
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rdi, 0)
push %r8
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%r8, 0)
push %r9
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%r9, 0)
push %r10
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%r10, 0)
push %r11
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%r11, 0)
# Align stack frame.
push %rbx # non-scratch
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rbx, 0)
mov %rsp, %rbx # save current rsp
CFI_DEF_CFA_REGISTER(%rbx)
shr $4, %rsp # clear 4 lsb, align to 16
shl $4, %rsp
call ASM_SYMBOL(__tsan_trace_switch)
# Unalign stack frame back.
mov %rbx, %rsp # restore the original rsp
CFI_DEF_CFA_REGISTER(%rsp)
pop %rbx
CFI_ADJUST_CFA_OFFSET(-8)
# Restore scratch registers.
pop %r11
CFI_ADJUST_CFA_OFFSET(-8)
pop %r10
CFI_ADJUST_CFA_OFFSET(-8)
pop %r9
CFI_ADJUST_CFA_OFFSET(-8)
pop %r8
CFI_ADJUST_CFA_OFFSET(-8)
pop %rdi
CFI_ADJUST_CFA_OFFSET(-8)
pop %rsi
CFI_ADJUST_CFA_OFFSET(-8)
pop %rdx
CFI_ADJUST_CFA_OFFSET(-8)
pop %rcx
CFI_ADJUST_CFA_OFFSET(-8)
pop %rax
CFI_ADJUST_CFA_OFFSET(-8)
CFI_RESTORE(%rax)
CFI_RESTORE(%rbx)
CFI_RESTORE(%rcx)
CFI_RESTORE(%rdx)
CFI_RESTORE(%rsi)
CFI_RESTORE(%rdi)
CFI_RESTORE(%r8)
CFI_RESTORE(%r9)
CFI_RESTORE(%r10)
CFI_RESTORE(%r11)
ret
CFI_ENDPROC
ASM_HIDDEN(__tsan_report_race)
.globl ASM_SYMBOL(__tsan_report_race_thunk)
ASM_SYMBOL(__tsan_report_race_thunk):
CFI_STARTPROC
# Save scratch registers.
push %rax
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rax, 0)
push %rcx
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rcx, 0)
push %rdx
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rdx, 0)
push %rsi
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rsi, 0)
push %rdi
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rdi, 0)
push %r8
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%r8, 0)
push %r9
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%r9, 0)
push %r10
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%r10, 0)
push %r11
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%r11, 0)
# Align stack frame.
push %rbx # non-scratch
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rbx, 0)
mov %rsp, %rbx # save current rsp
CFI_DEF_CFA_REGISTER(%rbx)
shr $4, %rsp # clear 4 lsb, align to 16
shl $4, %rsp
call ASM_SYMBOL(__tsan_report_race)
# Unalign stack frame back.
mov %rbx, %rsp # restore the original rsp
CFI_DEF_CFA_REGISTER(%rsp)
pop %rbx
CFI_ADJUST_CFA_OFFSET(-8)
# Restore scratch registers.
pop %r11
CFI_ADJUST_CFA_OFFSET(-8)
pop %r10
CFI_ADJUST_CFA_OFFSET(-8)
pop %r9
CFI_ADJUST_CFA_OFFSET(-8)
pop %r8
CFI_ADJUST_CFA_OFFSET(-8)
pop %rdi
CFI_ADJUST_CFA_OFFSET(-8)
pop %rsi
CFI_ADJUST_CFA_OFFSET(-8)
pop %rdx
CFI_ADJUST_CFA_OFFSET(-8)
pop %rcx
CFI_ADJUST_CFA_OFFSET(-8)
pop %rax
CFI_ADJUST_CFA_OFFSET(-8)
CFI_RESTORE(%rax)
CFI_RESTORE(%rbx)
CFI_RESTORE(%rcx)
CFI_RESTORE(%rdx)
CFI_RESTORE(%rsi)
CFI_RESTORE(%rdi)
CFI_RESTORE(%r8)
CFI_RESTORE(%r9)
CFI_RESTORE(%r10)
CFI_RESTORE(%r11)
ret
CFI_ENDPROC
ASM_HIDDEN(__tsan_setjmp)
#if defined(__NetBSD__)
.comm _ZN14__interception15real___setjmp14E,8,8
#elif !defined(__APPLE__)
.comm _ZN14__interception11real_setjmpE,8,8
#endif
#if defined(__NetBSD__)
.globl ASM_SYMBOL_INTERCEPTOR(__setjmp14)
ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__setjmp14))
ASM_SYMBOL_INTERCEPTOR(__setjmp14):
#else
.globl ASM_SYMBOL_INTERCEPTOR(setjmp)
ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(setjmp))
ASM_SYMBOL_INTERCEPTOR(setjmp):
#endif
CFI_STARTPROC
// save env parameter
push %rdi
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rdi, 0)
// obtain SP, store in %rdi, first argument to `void __tsan_setjmp(uptr sp)`
#if defined(__FreeBSD__) || defined(__NetBSD__)
lea 8(%rsp), %rdi
#elif defined(__linux__) || defined(__APPLE__)
lea 16(%rsp), %rdi
#else
# error "Unknown platform"
#endif
// call tsan interceptor
call ASM_SYMBOL(__tsan_setjmp)
// restore env parameter
pop %rdi
CFI_ADJUST_CFA_OFFSET(-8)
CFI_RESTORE(%rdi)
// tail jump to libc setjmp
movl $0, %eax
#if defined(__NetBSD__)
movq _ZN14__interception15real___setjmp14E@GOTPCREL(%rip), %rdx
jmp *(%rdx)
#elif !defined(__APPLE__)
movq _ZN14__interception11real_setjmpE@GOTPCREL(%rip), %rdx
jmp *(%rdx)
#else
jmp ASM_SYMBOL(setjmp)
#endif
CFI_ENDPROC
#if defined(__NetBSD__)
ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__setjmp14))
#else
ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(setjmp))
#endif
.comm _ZN14__interception12real__setjmpE,8,8
.globl ASM_SYMBOL_INTERCEPTOR(_setjmp)
ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(_setjmp))
ASM_SYMBOL_INTERCEPTOR(_setjmp):
CFI_STARTPROC
// save env parameter
push %rdi
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rdi, 0)
// obtain SP, store in %rdi, first argument to `void __tsan_setjmp(uptr sp)`
#if defined(__FreeBSD__) || defined(__NetBSD__)
lea 8(%rsp), %rdi
#elif defined(__linux__) || defined(__APPLE__)
lea 16(%rsp), %rdi
#else
# error "Unknown platform"
#endif
// call tsan interceptor
call ASM_SYMBOL(__tsan_setjmp)
// restore env parameter
pop %rdi
CFI_ADJUST_CFA_OFFSET(-8)
CFI_RESTORE(%rdi)
// tail jump to libc setjmp
movl $0, %eax
#if !defined(__APPLE__)
movq _ZN14__interception12real__setjmpE@GOTPCREL(%rip), %rdx
jmp *(%rdx)
#else
jmp ASM_SYMBOL(_setjmp)
#endif
CFI_ENDPROC
ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(_setjmp))
#if defined(__NetBSD__)
.comm _ZN14__interception18real___sigsetjmp14E,8,8
.globl ASM_SYMBOL_INTERCEPTOR(__sigsetjmp14)
ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp14))
ASM_SYMBOL_INTERCEPTOR(__sigsetjmp14):
#else
.comm _ZN14__interception14real_sigsetjmpE,8,8
.globl ASM_SYMBOL_INTERCEPTOR(sigsetjmp)
ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
ASM_SYMBOL_INTERCEPTOR(sigsetjmp):
#endif
CFI_STARTPROC
// save env parameter
push %rdi
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rdi, 0)
// save savesigs parameter
push %rsi
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rsi, 0)
// align stack frame
sub $8, %rsp
CFI_ADJUST_CFA_OFFSET(8)
// obtain SP, store in %rdi, first argument to `void __tsan_setjmp(uptr sp)`
#if defined(__FreeBSD__) || defined(__NetBSD__)
lea 24(%rsp), %rdi
#elif defined(__linux__) || defined(__APPLE__)
lea 32(%rsp), %rdi
#else
# error "Unknown platform"
#endif
// call tsan interceptor
call ASM_SYMBOL(__tsan_setjmp)
// unalign stack frame
add $8, %rsp
CFI_ADJUST_CFA_OFFSET(-8)
// restore savesigs parameter
pop %rsi
CFI_ADJUST_CFA_OFFSET(-8)
CFI_RESTORE(%rsi)
// restore env parameter
pop %rdi
CFI_ADJUST_CFA_OFFSET(-8)
CFI_RESTORE(%rdi)
// tail jump to libc sigsetjmp
movl $0, %eax
#if defined(__NetBSD__)
movq _ZN14__interception18real___sigsetjmp14E@GOTPCREL(%rip), %rdx
jmp *(%rdx)
#elif !defined(__APPLE__)
movq _ZN14__interception14real_sigsetjmpE@GOTPCREL(%rip), %rdx
jmp *(%rdx)
#else
jmp ASM_SYMBOL(sigsetjmp)
#endif
CFI_ENDPROC
#if defined(__NetBSD__)
ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp14))
#else
ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
#endif
#if !defined(__APPLE__) && !defined(__NetBSD__)
.comm _ZN14__interception16real___sigsetjmpE,8,8
.globl ASM_SYMBOL_INTERCEPTOR(__sigsetjmp)
ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
ASM_SYMBOL_INTERCEPTOR(__sigsetjmp):
CFI_STARTPROC
// save env parameter
push %rdi
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rdi, 0)
// save savesigs parameter
push %rsi
CFI_ADJUST_CFA_OFFSET(8)
CFI_REL_OFFSET(%rsi, 0)
// align stack frame
sub $8, %rsp
CFI_ADJUST_CFA_OFFSET(8)
// obtain SP, store in %rdi, first argument to `void __tsan_setjmp(uptr sp)`
#if defined(__FreeBSD__)
lea 24(%rsp), %rdi
#else
lea 32(%rsp), %rdi
#endif
// call tsan interceptor
call ASM_SYMBOL(__tsan_setjmp)
// unalign stack frame
add $8, %rsp
CFI_ADJUST_CFA_OFFSET(-8)
// restore savesigs parameter
pop %rsi
CFI_ADJUST_CFA_OFFSET(-8)
CFI_RESTORE(%rsi)
// restore env parameter
pop %rdi
CFI_ADJUST_CFA_OFFSET(-8)
CFI_RESTORE(%rdi)
// tail jump to libc sigsetjmp
movl $0, %eax
movq _ZN14__interception16real___sigsetjmpE@GOTPCREL(%rip), %rdx
jmp *(%rdx)
CFI_ENDPROC
ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
#endif // !defined(__APPLE__) && !defined(__NetBSD__)
NO_EXEC_STACK_DIRECTIVE
#endif

214
lib/tsan/tsan_rtl_mips64.S Normal file
View File

@@ -0,0 +1,214 @@
.section .text
.set noreorder
.hidden __tsan_setjmp
.comm _ZN14__interception11real_setjmpE,8,8
.globl setjmp
.type setjmp, @function
setjmp:
// save env parameters
daddiu $sp,$sp,-40
sd $s0,32($sp)
sd $ra,24($sp)
sd $fp,16($sp)
sd $gp,8($sp)
// calculate and save pointer to GOT
lui $gp,%hi(%neg(%gp_rel(setjmp)))
daddu $gp,$gp,$t9
daddiu $gp,$gp,%lo(%neg(%gp_rel(setjmp)))
move $s0,$gp
// save jmp_buf
sd $a0,0($sp)
// obtain $sp
dadd $a0,$zero,$sp
// call tsan interceptor
jal __tsan_setjmp
daddiu $a1,$a0,40
// restore jmp_buf
ld $a0,0($sp)
// restore gp
move $gp,$s0
// load pointer of libc setjmp to t9
dla $t9,(_ZN14__interception11real_setjmpE)
// restore env parameters
ld $gp,8($sp)
ld $fp,16($sp)
ld $ra,24($sp)
ld $s0,32($sp)
daddiu $sp,$sp,40
// tail jump to libc setjmp
ld $t9,0($t9)
jr $t9
nop
.size setjmp, .-setjmp
.hidden __tsan_setjmp
.globl _setjmp
.comm _ZN14__interception12real__setjmpE,8,8
.type _setjmp, @function
_setjmp:
// Save env parameters
daddiu $sp,$sp,-40
sd $s0,32($sp)
sd $ra,24($sp)
sd $fp,16($sp)
sd $gp,8($sp)
// calculate and save pointer to GOT
lui $gp,%hi(%neg(%gp_rel(_setjmp)))
daddu $gp,$gp,$t9
daddiu $gp,$gp,%lo(%neg(%gp_rel(_setjmp)))
move $s0,$gp
// save jmp_buf
sd $a0,0($sp)
// obtain $sp
dadd $a0,$zero,$sp
// call tsan interceptor
jal __tsan_setjmp
daddiu $a1,$a0,40
// restore jmp_buf
ld $a0,0($sp)
// restore gp
move $gp,$s0
// load pointer of libc _setjmp to t9
dla $t9,(_ZN14__interception12real__setjmpE)
// restore env parameters
ld $gp,8($sp)
ld $fp,16($sp)
ld $ra,24($sp)
ld $s0,32($sp)
daddiu $sp,$sp,40
// tail jump to libc _setjmp
ld $t9,0($t9)
jr $t9
nop
.size _setjmp, .-_setjmp
.hidden __tsan_setjmp
.globl sigsetjmp
.comm _ZN14__interception14real_sigsetjmpE,8,8
.type sigsetjmp, @function
sigsetjmp:
// Save env parameters
daddiu $sp,$sp,-48
sd $s0,40($sp)
sd $ra,32($sp)
sd $fp,24($sp)
sd $gp,16($sp)
// calculate and save pointer to GOT
lui $gp,%hi(%neg(%gp_rel(sigsetjmp)))
daddu $gp,$gp,$t9
daddiu $gp,$gp,%lo(%neg(%gp_rel(sigsetjmp)))
move $s0,$gp
// save jmp_buf and savesig
sd $a0,0($sp)
sd $a1,8($sp)
// obtain $sp
dadd $a0,$zero,$sp
// call tsan interceptor
jal __tsan_setjmp
daddiu $a1,$a0,48
// restore jmp_buf and savesig
ld $a0,0($sp)
ld $a1,8($sp)
// restore gp
move $gp,$s0
// load pointer of libc sigsetjmp to t9
dla $t9,(_ZN14__interception14real_sigsetjmpE)
// restore env parameters
ld $gp,16($sp)
ld $fp,24($sp)
ld $ra,32($sp)
ld $s0,40($sp)
daddiu $sp,$sp,48
// tail jump to libc sigsetjmp
ld $t9,0($t9)
jr $t9
nop
.size sigsetjmp, .-sigsetjmp
.hidden __tsan_setjmp
.comm _ZN14__interception16real___sigsetjmpE,8,8
.globl __sigsetjmp
.type __sigsetjmp, @function
__sigsetjmp:
// Save env parameters
daddiu $sp,$sp,-48
sd $s0,40($sp)
sd $ra,32($sp)
sd $fp,24($sp)
sd $gp,16($sp)
// calculate and save pointer to GOT
lui $gp,%hi(%neg(%gp_rel(__sigsetjmp)))
daddu $gp,$gp,$t9
daddiu $gp,$gp,%lo(%neg(%gp_rel(__sigsetjmp)))
move $s0,$gp
// save jmp_buf and savesig
sd $a0,0($sp)
sd $a1,8($sp)
// obtain $sp
dadd $a0,$zero,$sp
// call tsan interceptor
jal __tsan_setjmp
daddiu $a1,$a0,48
// restore jmp_buf and savesig
ld $a0,0($sp)
ld $a1,8($sp)
// restore gp
move $gp,$s0
// load pointer to libc __sigsetjmp in t9
dla $t9,(_ZN14__interception16real___sigsetjmpE)
// restore env parameters
ld $gp,16($sp)
ld $fp,24($sp)
ld $ra,32($sp)
ld $s0,40($sp)
daddiu $sp,$sp,48
// tail jump to libc __sigsetjmp
ld $t9,0($t9)
jr $t9
nop
.size __sigsetjmp, .-__sigsetjmp

288
lib/tsan/tsan_rtl_ppc64.S Normal file
View File

@@ -0,0 +1,288 @@
#include "tsan_ppc_regs.h"
.section .text
.hidden __tsan_setjmp
.globl _setjmp
.type _setjmp, @function
.align 4
#if _CALL_ELF == 2
_setjmp:
#else
.section ".opd","aw"
.align 3
_setjmp:
.quad .L._setjmp,.TOC.@tocbase,0
.previous
#endif
.L._setjmp:
mflr r0
stdu r1,-48(r1)
std r2,24(r1)
std r3,32(r1)
std r0,40(r1)
// r3 is the original stack pointer.
addi r3,r1,48
// r4 is the mangled stack pointer (see glibc)
ld r4,-28696(r13)
xor r4,r3,r4
// Materialize a TOC in case we were called from libc.
// For big-endian, we load the TOC from the OPD. For little-
// endian, we use the .TOC. symbol to find it.
nop
bcl 20,31,0f
0:
mflr r2
#if _CALL_ELF == 2
addis r2,r2,.TOC.-0b@ha
addi r2,r2,.TOC.-0b@l
#else
addis r2,r2,_setjmp-0b@ha
addi r2,r2,_setjmp-0b@l
ld r2,8(r2)
#endif
// Call the interceptor.
bl __tsan_setjmp
nop
// Restore regs needed for setjmp.
ld r3,32(r1)
ld r0,40(r1)
// Emulate the real setjmp function. We do this because we can't
// perform a sibcall: The real setjmp function trashes the TOC
// pointer, and with a sibcall we have no way to restore it.
// This way we can make sure our caller's stack pointer and
// link register are saved correctly in the jmpbuf.
ld r6,-28696(r13)
addi r5,r1,48 // original stack ptr of caller
xor r5,r6,r5
std r5,0(r3) // mangled stack ptr of caller
ld r5,24(r1)
std r5,8(r3) // caller's saved TOC pointer
xor r0,r6,r0
std r0,16(r3) // caller's mangled return address
mfcr r0
// Nonvolatiles.
std r14,24(r3)
stfd f14,176(r3)
stw r0,172(r3) // CR
std r15,32(r3)
stfd f15,184(r3)
std r16,40(r3)
stfd f16,192(r3)
std r17,48(r3)
stfd f17,200(r3)
std r18,56(r3)
stfd f18,208(r3)
std r19,64(r3)
stfd f19,216(r3)
std r20,72(r3)
stfd f20,224(r3)
std r21,80(r3)
stfd f21,232(r3)
std r22,88(r3)
stfd f22,240(r3)
std r23,96(r3)
stfd f23,248(r3)
std r24,104(r3)
stfd f24,256(r3)
std r25,112(r3)
stfd f25,264(r3)
std r26,120(r3)
stfd f26,272(r3)
std r27,128(r3)
stfd f27,280(r3)
std r28,136(r3)
stfd f28,288(r3)
std r29,144(r3)
stfd f29,296(r3)
std r30,152(r3)
stfd f30,304(r3)
std r31,160(r3)
stfd f31,312(r3)
addi r5,r3,320
mfspr r0,256
stw r0,168(r3) // VRSAVE
addi r6,r5,16
stvx v20,0,r5
addi r5,r5,32
stvx v21,0,r6
addi r6,r6,32
stvx v22,0,r5
addi r5,r5,32
stvx v23,0,r6
addi r6,r6,32
stvx v24,0,r5
addi r5,r5,32
stvx v25,0,r6
addi r6,r6,32
stvx v26,0,r5
addi r5,r5,32
stvx v27,0,r6
addi r6,r6,32
stvx v28,0,r5
addi r5,r5,32
stvx v29,0,r6
addi r6,r6,32
stvx v30,0,r5
stvx v31,0,r6
// Clear the "mask-saved" slot.
li r4,0
stw r4,512(r3)
// Restore TOC, LR, and stack and return to caller.
ld r2,24(r1)
ld r0,40(r1)
addi r1,r1,48
li r3,0 // This is the setjmp return path
mtlr r0
blr
.size _setjmp, .-.L._setjmp
.globl setjmp
.type setjmp, @function
.align 4
setjmp:
b _setjmp
.size setjmp, .-setjmp
// sigsetjmp is like setjmp, except that the mask in r4 needs
// to be saved at offset 512 of the jump buffer.
.globl __sigsetjmp
.type __sigsetjmp, @function
.align 4
#if _CALL_ELF == 2
__sigsetjmp:
#else
.section ".opd","aw"
.align 3
__sigsetjmp:
.quad .L.__sigsetjmp,.TOC.@tocbase,0
.previous
#endif
.L.__sigsetjmp:
mflr r0
stdu r1,-64(r1)
std r2,24(r1)
std r3,32(r1)
std r4,40(r1)
std r0,48(r1)
// r3 is the original stack pointer.
addi r3,r1,64
// r4 is the mangled stack pointer (see glibc)
ld r4,-28696(r13)
xor r4,r3,r4
// Materialize a TOC in case we were called from libc.
// For big-endian, we load the TOC from the OPD. For little-
// endian, we use the .TOC. symbol to find it.
nop
bcl 20,31,1f
1:
mflr r2
#if _CALL_ELF == 2
addis r2,r2,.TOC.-1b@ha
addi r2,r2,.TOC.-1b@l
#else
addis r2,r2,_setjmp-1b@ha
addi r2,r2,_setjmp-1b@l
ld r2,8(r2)
#endif
// Call the interceptor.
bl __tsan_setjmp
nop
// Restore regs needed for __sigsetjmp.
ld r3,32(r1)
ld r4,40(r1)
ld r0,48(r1)
// Emulate the real sigsetjmp function. We do this because we can't
// perform a sibcall: The real sigsetjmp function trashes the TOC
// pointer, and with a sibcall we have no way to restore it.
// This way we can make sure our caller's stack pointer and
// link register are saved correctly in the jmpbuf.
ld r6,-28696(r13)
addi r5,r1,64 // original stack ptr of caller
xor r5,r6,r5
std r5,0(r3) // mangled stack ptr of caller
ld r5,24(r1)
std r5,8(r3) // caller's saved TOC pointer
xor r0,r6,r0
std r0,16(r3) // caller's mangled return address
mfcr r0
// Nonvolatiles.
std r14,24(r3)
stfd f14,176(r3)
stw r0,172(r3) // CR
std r15,32(r3)
stfd f15,184(r3)
std r16,40(r3)
stfd f16,192(r3)
std r17,48(r3)
stfd f17,200(r3)
std r18,56(r3)
stfd f18,208(r3)
std r19,64(r3)
stfd f19,216(r3)
std r20,72(r3)
stfd f20,224(r3)
std r21,80(r3)
stfd f21,232(r3)
std r22,88(r3)
stfd f22,240(r3)
std r23,96(r3)
stfd f23,248(r3)
std r24,104(r3)
stfd f24,256(r3)
std r25,112(r3)
stfd f25,264(r3)
std r26,120(r3)
stfd f26,272(r3)
std r27,128(r3)
stfd f27,280(r3)
std r28,136(r3)
stfd f28,288(r3)
std r29,144(r3)
stfd f29,296(r3)
std r30,152(r3)
stfd f30,304(r3)
std r31,160(r3)
stfd f31,312(r3)
addi r5,r3,320
mfspr r0,256
stw r0,168(r3) // VRSAVE
addi r6,r5,16
stvx v20,0,r5
addi r5,r5,32
stvx v21,0,r6
addi r6,r6,32
stvx v22,0,r5
addi r5,r5,32
stvx v23,0,r6
addi r6,r6,32
stvx v24,0,r5
addi r5,r5,32
stvx v25,0,r6
addi r6,r6,32
stvx v26,0,r5
addi r5,r5,32
stvx v27,0,r6
addi r6,r6,32
stvx v28,0,r5
addi r5,r5,32
stvx v29,0,r6
addi r6,r6,32
stvx v30,0,r5
stvx v31,0,r6
// Save into the "mask-saved" slot.
stw r4,512(r3)
// Restore TOC, LR, and stack and return to caller.
ld r2,24(r1)
ld r0,48(r1)
addi r1,r1,64
li r3,0 // This is the sigsetjmp return path
mtlr r0
blr
.size __sigsetjmp, .-.L.__sigsetjmp
.globl sigsetjmp
.type sigsetjmp, @function
.align 4
sigsetjmp:
b __sigsetjmp
.size sigsetjmp, .-sigsetjmp

View File

@@ -429,7 +429,11 @@ pub const InitOptions = struct {
verbose_llvm_cpu_features: bool = false,
is_test: bool = false,
test_evented_io: bool = false,
is_compiler_rt_or_libc: bool = false,
/// Normally when you create a `Compilation`, Zig will automatically build
/// and link in required dependencies, such as compiler-rt and libc. When
/// building such dependencies themselves, this flag must be set to avoid
/// infinite recursion.
skip_linker_dependencies: bool = false,
parent_compilation_link_libc: bool = false,
stack_size_override: ?u64 = null,
image_base_override: ?u64 = null,
@@ -499,7 +503,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
.Lib => is_dyn_lib,
.Exe => true,
};
const needs_c_symbols = !options.is_compiler_rt_or_libc and
const needs_c_symbols = !options.skip_linker_dependencies and
(is_exe_or_dyn_lib or (options.target.isWasm() and options.output_mode != .Obj));
const comp: *Compilation = comp: {
@@ -677,6 +681,9 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
break :pic explicit;
} else pie or must_pic;
// TSAN is implemented in C++ so it requires linking libc++.
const link_libcpp = options.link_libcpp or tsan;
// Make a decision on whether to use Clang for translate-c and compiling C files.
const use_clang = if (options.use_clang) |explicit| explicit else blk: {
if (build_options.have_llvm) {
@@ -765,7 +772,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
cache.hash.add(options.function_sections);
cache.hash.add(strip);
cache.hash.add(link_libc);
cache.hash.add(options.link_libcpp);
cache.hash.add(link_libcpp);
cache.hash.add(options.output_mode);
cache.hash.add(options.machine_code_model);
cache.hash.addOptionalEmitLoc(options.emit_bin);
@@ -793,7 +800,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
hash.add(single_threaded);
hash.add(dll_export_fns);
hash.add(options.is_test);
hash.add(options.is_compiler_rt_or_libc);
hash.add(options.skip_linker_dependencies);
hash.add(options.parent_compilation_link_libc);
const digest = hash.final();
@@ -930,7 +937,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
.use_llvm = use_llvm,
.system_linker_hack = darwin_options.system_linker_hack,
.link_libc = link_libc,
.link_libcpp = options.link_libcpp,
.link_libcpp = link_libcpp,
.objects = options.link_objects,
.frameworks = options.frameworks,
.framework_dirs = options.framework_dirs,
@@ -970,7 +977,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
.dll_export_fns = dll_export_fns,
.error_return_tracing = error_return_tracing,
.llvm_cpu_features = llvm_cpu_features,
.is_compiler_rt_or_libc = options.is_compiler_rt_or_libc,
.skip_linker_dependencies = options.skip_linker_dependencies,
.parent_compilation_link_libc = options.parent_compilation_link_libc,
.each_lib_rpath = options.each_lib_rpath orelse options.is_native_os,
.disable_lld_caching = options.disable_lld_caching,
@@ -1044,7 +1051,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
comp.c_object_table.putAssumeCapacityNoClobber(c_object, {});
}
if (comp.bin_file.options.emit != null and !comp.bin_file.options.is_compiler_rt_or_libc) {
if (comp.bin_file.options.emit != null and !comp.bin_file.options.skip_linker_dependencies) {
// If we need to build glibc for the target, add work items for it.
// We go through the work queue so that building can be done in parallel.
if (comp.wantBuildGLibCFromSource()) {
@@ -1097,9 +1104,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
if (comp.wantBuildLibUnwindFromSource()) {
try comp.work_queue.writeItem(.{ .libunwind = {} });
}
if (build_options.have_llvm and comp.bin_file.options.output_mode != .Obj and
comp.bin_file.options.link_libcpp)
{
if (build_options.have_llvm and is_exe_or_dyn_lib and comp.bin_file.options.link_libcpp) {
try comp.work_queue.writeItem(.libcxx);
try comp.work_queue.writeItem(.libcxxabi);
}
@@ -2772,7 +2777,7 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: *Allocator) ![]u8
// in. For example, compiler_rt will not export the __chkstk symbol if it
// knows libc will provide it, and likewise c.zig will not export memcpy.
const link_libc = comp.bin_file.options.link_libc or
(comp.bin_file.options.is_compiler_rt_or_libc and comp.bin_file.options.parent_compilation_link_libc);
(comp.bin_file.options.skip_linker_dependencies and comp.bin_file.options.parent_compilation_link_libc);
try buffer.writer().print(
\\pub const object_format = ObjectFormat.{};
@@ -2927,7 +2932,7 @@ fn buildOutputFromZig(
.verbose_cimport = comp.verbose_cimport,
.verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features,
.clang_passthrough_mode = comp.clang_passthrough_mode,
.is_compiler_rt_or_libc = true,
.skip_linker_dependencies = true,
.parent_compilation_link_libc = comp.bin_file.options.link_libc,
});
defer sub_compilation.destroy();
@@ -3305,7 +3310,7 @@ pub fn build_crt_file(
.verbose_cimport = comp.verbose_cimport,
.verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features,
.clang_passthrough_mode = comp.clang_passthrough_mode,
.is_compiler_rt_or_libc = true,
.skip_linker_dependencies = true,
.parent_compilation_link_libc = comp.bin_file.options.link_libc,
});
defer sub_compilation.destroy();
@@ -3326,7 +3331,7 @@ pub fn stage1AddLinkLib(comp: *Compilation, lib_name: []const u8) !void {
// Avoid deadlocking on building import libs such as kernel32.lib
// This can happen when the user uses `build-exe foo.obj -lkernel32` and then
// when we create a sub-Compilation for zig libc, it also tries to build kernel32.lib.
if (comp.bin_file.options.is_compiler_rt_or_libc) return;
if (comp.bin_file.options.skip_linker_dependencies) return;
// This happens when an `extern "foo"` function is referenced by the stage1 backend.
// If we haven't seen this library yet and we're targeting Windows, we need to queue up

View File

@@ -962,7 +962,7 @@ fn buildSharedLib(
.version_script = map_file_path,
.soname = soname,
.c_source_files = &c_source_files,
.is_compiler_rt_or_libc = true,
.skip_linker_dependencies = true,
});
defer sub_compilation.destroy();

View File

@@ -188,6 +188,7 @@ pub fn buildLibCXX(comp: *Compilation) !void {
.verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features,
.clang_passthrough_mode = comp.clang_passthrough_mode,
.link_libc = true,
.skip_linker_dependencies = true,
});
defer sub_compilation.destroy();
@@ -308,6 +309,7 @@ pub fn buildLibCXXABI(comp: *Compilation) !void {
.verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features,
.clang_passthrough_mode = comp.clang_passthrough_mode,
.link_libc = true,
.skip_linker_dependencies = true,
});
defer sub_compilation.destroy();

View File

@@ -34,7 +34,7 @@ pub fn buildTsan(comp: *Compilation) !void {
};
var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena);
try c_source_files.ensureCapacity(tsan_sources.len + sanitizer_common_sources.len);
try c_source_files.ensureCapacity(c_source_files.items.len + tsan_sources.len);
const tsan_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{"tsan"});
for (tsan_sources) |tsan_src| {
@@ -48,6 +48,7 @@ pub fn buildTsan(comp: *Compilation) !void {
try cflags.append("-nostdinc++");
try cflags.append("-fvisibility-inlines-hidden");
try cflags.append("-std=c++14");
try cflags.append("-fno-rtti");
c_source_files.appendAssumeCapacity(.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "tsan", tsan_src }),
@@ -55,6 +56,51 @@ pub fn buildTsan(comp: *Compilation) !void {
});
}
const platform_tsan_sources = if (target.isDarwin())
&darwin_tsan_sources
else
&unix_tsan_sources;
try c_source_files.ensureCapacity(c_source_files.items.len + platform_tsan_sources.len);
for (platform_tsan_sources) |tsan_src| {
var cflags = std.ArrayList([]const u8).init(arena);
try cflags.append("-I");
try cflags.append(tsan_include_path);
try cflags.append("-O3");
try cflags.append("-DNDEBUG");
try cflags.append("-nostdinc++");
try cflags.append("-fvisibility-inlines-hidden");
try cflags.append("-std=c++14");
try cflags.append("-fno-rtti");
c_source_files.appendAssumeCapacity(.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "tsan", tsan_src }),
.extra_flags = cflags.items,
});
}
{
const asm_source = switch (target.cpu.arch) {
.aarch64 => "tsan_rtl_aarch64.S",
.x86_64 => "tsan_rtl_amd64.S",
.mips64 => "tsan_rtl_mips64.S",
.powerpc64 => "tsan_rtl_ppc64.S",
else => return error.TSANUnsupportedCPUArchitecture,
};
var cflags = std.ArrayList([]const u8).init(arena);
try cflags.append("-I");
try cflags.append(tsan_include_path);
try cflags.append("-DNDEBUG");
c_source_files.appendAssumeCapacity(.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "tsan", asm_source }),
.extra_flags = cflags.items,
});
}
try c_source_files.ensureCapacity(c_source_files.items.len + sanitizer_common_sources.len);
const sanitizer_common_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"tsan", "sanitizer_common",
});
@@ -69,6 +115,7 @@ pub fn buildTsan(comp: *Compilation) !void {
try cflags.append("-nostdinc++");
try cflags.append("-fvisibility-inlines-hidden");
try cflags.append("-std=c++14");
try cflags.append("-fno-rtti");
c_source_files.appendAssumeCapacity(.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
@@ -78,6 +125,88 @@ pub fn buildTsan(comp: *Compilation) !void {
});
}
const to_c_or_not_to_c_sources = if (comp.bin_file.options.link_libc)
&sanitizer_libcdep_sources
else
&sanitizer_nolibc_sources;
try c_source_files.ensureCapacity(c_source_files.items.len + to_c_or_not_to_c_sources.len);
for (to_c_or_not_to_c_sources) |c_src| {
var cflags = std.ArrayList([]const u8).init(arena);
try cflags.append("-I");
try cflags.append(sanitizer_common_include_path);
try cflags.append("-O3");
try cflags.append("-DNDEBUG");
try cflags.append("-nostdinc++");
try cflags.append("-fvisibility-inlines-hidden");
try cflags.append("-std=c++14");
try cflags.append("-fno-rtti");
c_source_files.appendAssumeCapacity(.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"tsan", "sanitizer_common", c_src,
}),
.extra_flags = cflags.items,
});
}
try c_source_files.ensureCapacity(c_source_files.items.len + sanitizer_symbolizer_sources.len);
for (sanitizer_symbolizer_sources) |c_src| {
var cflags = std.ArrayList([]const u8).init(arena);
try cflags.append("-I");
try cflags.append(tsan_include_path);
try cflags.append("-O3");
try cflags.append("-DNDEBUG");
try cflags.append("-nostdinc++");
try cflags.append("-fvisibility-inlines-hidden");
try cflags.append("-std=c++14");
try cflags.append("-fno-rtti");
c_source_files.appendAssumeCapacity(.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"tsan", "sanitizer_common", c_src,
}),
.extra_flags = cflags.items,
});
}
const interception_include_path = try comp.zig_lib_directory.join(
arena,
&[_][]const u8{"interception"},
);
try c_source_files.ensureCapacity(c_source_files.items.len + interception_sources.len);
for (interception_sources) |c_src| {
var cflags = std.ArrayList([]const u8).init(arena);
try cflags.append("-I");
try cflags.append(interception_include_path);
try cflags.append("-I");
try cflags.append(tsan_include_path);
try cflags.append("-O3");
try cflags.append("-DNDEBUG");
try cflags.append("-nostdinc++");
try cflags.append("-fvisibility-inlines-hidden");
try cflags.append("-std=c++14");
try cflags.append("-fno-rtti");
c_source_files.appendAssumeCapacity(.{
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
"tsan", "interception", c_src,
}),
.extra_flags = cflags.items,
});
}
const common_flags = [_][]const u8{
"-DTSAN_CONTAINS_UBSAN=0",
};
const sub_compilation = try Compilation.create(comp.gpa, .{
.local_cache_directory = comp.global_cache_directory,
.global_cache_directory = comp.global_cache_directory,
@@ -113,6 +242,8 @@ pub fn buildTsan(comp: *Compilation) !void {
.verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features,
.clang_passthrough_mode = comp.clang_passthrough_mode,
.link_libc = true,
.skip_linker_dependencies = true,
.clang_argv = &common_flags,
});
defer sub_compilation.destroy();
@@ -159,6 +290,18 @@ const tsan_sources = [_][]const u8{
"tsan_sync.cpp",
};
const darwin_tsan_sources = [_][]const u8{
"tsan_interceptors_mac.cpp",
"tsan_interceptors_mach_vm.cpp",
"tsan_platform_mac.cpp",
"tsan_platform_posix.cpp",
};
const unix_tsan_sources = [_][]const u8{
"tsan_platform_linux.cpp",
"tsan_platform_posix.cpp",
};
const sanitizer_common_sources = [_][]const u8{
"sanitizer_allocator.cpp",
"sanitizer_common.cpp",
@@ -203,17 +346,42 @@ const sanitizer_common_sources = [_][]const u8{
"sanitizer_win.cpp",
};
// TODO This is next up
//const sanitizer_nolibc_sources = [_][]const u8{
// "sanitizer_common_nolibc.cpp",
//};
//
//const sanitizer_libcdep_sources = [_][]const u8{
// "sanitizer_common_libcdep.cpp",
// "sanitizer_allocator_checks.cpp",
// "sanitizer_linux_libcdep.cpp",
// "sanitizer_mac_libcdep.cpp",
// "sanitizer_posix_libcdep.cpp",
// "sanitizer_stoptheworld_linux_libcdep.cpp",
// "sanitizer_stoptheworld_netbsd_libcdep.cpp",
//};
const sanitizer_nolibc_sources = [_][]const u8{
"sanitizer_common_nolibc.cpp",
};
const sanitizer_libcdep_sources = [_][]const u8{
"sanitizer_common_libcdep.cpp",
"sanitizer_allocator_checks.cpp",
"sanitizer_linux_libcdep.cpp",
"sanitizer_mac_libcdep.cpp",
"sanitizer_posix_libcdep.cpp",
"sanitizer_stoptheworld_linux_libcdep.cpp",
"sanitizer_stoptheworld_netbsd_libcdep.cpp",
};
const sanitizer_symbolizer_sources = [_][]const u8{
"sanitizer_allocator_report.cpp",
"sanitizer_stackdepot.cpp",
"sanitizer_stacktrace.cpp",
"sanitizer_stacktrace_libcdep.cpp",
"sanitizer_stacktrace_printer.cpp",
"sanitizer_stacktrace_sparc.cpp",
"sanitizer_symbolizer.cpp",
"sanitizer_symbolizer_libbacktrace.cpp",
"sanitizer_symbolizer_libcdep.cpp",
"sanitizer_symbolizer_mac.cpp",
"sanitizer_symbolizer_markup.cpp",
"sanitizer_symbolizer_posix_libcdep.cpp",
"sanitizer_symbolizer_report.cpp",
"sanitizer_symbolizer_win.cpp",
"sanitizer_unwind_linux_libcdep.cpp",
"sanitizer_unwind_win.cpp",
};
const interception_sources = [_][]const u8{
"interception_linux.cpp",
"interception_mac.cpp",
"interception_win.cpp",
"interception_type_test.cpp",
};

View File

@@ -122,6 +122,7 @@ pub fn buildStaticLib(comp: *Compilation) !void {
.verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features,
.clang_passthrough_mode = comp.clang_passthrough_mode,
.link_libc = true,
.skip_linker_dependencies = true,
});
defer sub_compilation.destroy();

View File

@@ -81,7 +81,7 @@ pub const Options = struct {
verbose_link: bool,
dll_export_fns: bool,
error_return_tracing: bool,
is_compiler_rt_or_libc: bool,
skip_linker_dependencies: bool,
parent_compilation_link_libc: bool,
each_lib_rpath: bool,
disable_lld_caching: bool,

View File

@@ -835,7 +835,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
man.hash.addOptional(self.base.options.image_base_override);
man.hash.addListOfBytes(self.base.options.extra_lld_args);
man.hash.addListOfBytes(self.base.options.lib_dirs);
man.hash.add(self.base.options.is_compiler_rt_or_libc);
man.hash.add(self.base.options.skip_linker_dependencies);
if (self.base.options.link_libc) {
man.hash.add(self.base.options.libc_installation != null);
if (self.base.options.libc_installation) |libc_installation| {
@@ -1125,7 +1125,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
}
// compiler-rt, libc and libssp
if (is_exe_or_dyn_lib and !self.base.options.is_compiler_rt_or_libc) {
if (is_exe_or_dyn_lib and !self.base.options.skip_linker_dependencies) {
if (!self.base.options.link_libc) {
try argv.append(comp.libc_static_lib.?.full_object_path);
}

View File

@@ -1310,7 +1310,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
man.hash.addListOfBytes(self.base.options.lib_dirs);
man.hash.addListOfBytes(self.base.options.rpath_list);
man.hash.add(self.base.options.each_lib_rpath);
man.hash.add(self.base.options.is_compiler_rt_or_libc);
man.hash.add(self.base.options.skip_linker_dependencies);
man.hash.add(self.base.options.z_nodelete);
man.hash.add(self.base.options.z_defs);
if (self.base.options.link_libc) {
@@ -1552,7 +1552,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
}
// libc
if (is_exe_or_dyn_lib and !self.base.options.is_compiler_rt_or_libc and !self.base.options.link_libc) {
if (is_exe_or_dyn_lib and !self.base.options.skip_linker_dependencies and !self.base.options.link_libc) {
try argv.append(comp.libc_static_lib.?.full_object_path);
}
@@ -1574,9 +1574,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
const arg = if (ext == .shared_library) link_lib else try std.fmt.allocPrint(arena, "-l{}", .{link_lib});
argv.appendAssumeCapacity(arg);
}
}
if (!is_obj) {
// libc++ dep
if (self.base.options.link_libcpp) {
try argv.append(comp.libcxxabi_static_lib.?.full_object_path);

View File

@@ -438,7 +438,7 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
man.hash.addListOfBytes(self.base.options.framework_dirs);
man.hash.addListOfBytes(self.base.options.frameworks);
man.hash.addListOfBytes(self.base.options.rpath_list);
man.hash.add(self.base.options.is_compiler_rt_or_libc);
man.hash.add(self.base.options.skip_linker_dependencies);
man.hash.add(self.base.options.z_nodelete);
man.hash.add(self.base.options.z_defs);
if (is_dyn_lib) {
@@ -633,7 +633,7 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
}
// compiler_rt on darwin is missing some stuff, so we still build it and rely on LinkOnce
if (is_exe_or_dyn_lib and !self.base.options.is_compiler_rt_or_libc) {
if (is_exe_or_dyn_lib and !self.base.options.skip_linker_dependencies) {
try argv.append(comp.compiler_rt_static_lib.?.full_object_path);
}

View File

@@ -387,7 +387,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
}
if (self.base.options.output_mode != .Obj and
!self.base.options.is_compiler_rt_or_libc and
!self.base.options.skip_linker_dependencies and
!self.base.options.link_libc)
{
try argv.append(comp.libc_static_lib.?.full_object_path);

View File

@@ -225,7 +225,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
.c_source_files = &[_]Compilation.CSourceFile{
.{ .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "musl", "libc.s" }) },
},
.is_compiler_rt_or_libc = true,
.skip_linker_dependencies = true,
.soname = "libc.so",
});
defer sub_compilation.destroy();