Skip to content

[TSan] Clarify and enforce shadow end alignment #144648

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 8 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,6 @@ void __tsan_java_move(jptr src, jptr dst, jptr size) {
DCHECK_GE(dst, jctx->heap_begin);
DCHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
DCHECK_NE(dst, src);
DCHECK_NE(size, 0);

// Assuming it's not running concurrently with threads that do
// memory accesses and mutex operations (stop-the-world phase).
Expand Down
21 changes: 18 additions & 3 deletions compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -566,17 +566,32 @@ static bool IsValidMmapRange(uptr addr, uptr size) {
return false;
}

void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
void UnmapShadow(ThreadState* thr, uptr addr, uptr size) {
if (size == 0 || !IsValidMmapRange(addr, size))
return;
DontNeedShadowFor(addr, size);
// unmap shadow is related to semantic of mmap/munmap, so we
// should clear the whole shadow range, including the tail shadow
// while addr + size % kShadowCell != 0.
uptr size_for_shadow = RoundUp(addr + size, kShadowCell) - addr;
DontNeedShadowFor(addr, size_for_shadow);
ScopedGlobalProcessor sgp;
SlotLocker locker(thr, true);
ctx->metamap.ResetRange(thr->proc(), addr, size, true);
uptr size_for_meta = RoundUp(addr + size, kMetaShadowCell) - addr;
ctx->metamap.ResetRange(thr->proc(), addr, size_for_meta, true);
}
#endif

void MapShadow(uptr addr, uptr size) {
// Although named MapShadow, this function's semantic is unrelated to
// UnmapShadow. This function currently only used for Go's lazy allocation
// of shadow, whose targets are program section (e.g., bss, data, etc.).
// Therefore, we can guarantee that the addr and size align to kShadowCell
// and kMetaShadowCell by the following assertions.
DCHECK_EQ(addr % kShadowCell, 0);
DCHECK_EQ(size % kShadowCell, 0);
DCHECK_EQ(addr % kMetaShadowCell, 0);
DCHECK_EQ(size % kMetaShadowCell, 0);

// Ensure thead registry lock held, so as to synchronize
// with DoReset, which also access the mapped_shadow_* ctxt fields.
ThreadRegistryLock lock0(&ctx->thread_registry);
Expand Down
11 changes: 6 additions & 5 deletions compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -684,16 +684,17 @@ void MemoryAccessRangeT(ThreadState* thr, uptr pc, uptr addr, uptr size) {
DCHECK(IsShadowMem(shadow_mem));
}

RawShadow* shadow_mem_end = reinterpret_cast<RawShadow*>(
reinterpret_cast<uptr>(shadow_mem) + size * kShadowMultiplier - 1);
if (!IsShadowMem(shadow_mem_end)) {
Printf("Bad shadow end addr: %p (%p)\n", shadow_mem_end,
uptr size1 =
(RoundUpTo(addr + size, kShadowCell) - RoundDownTo(addr, kShadowCell));
RawShadow* shadow_mem_end = shadow_mem + size1 / kShadowCell * kShadowCnt;
if (!IsShadowMem(shadow_mem_end - 1)) {
Printf("Bad shadow end addr: %p (%p)\n", shadow_mem_end - 1,
(void*)(addr + size - 1));
Printf(
"Shadow start addr (ok): %p (%p); size: 0x%zx; kShadowMultiplier: "
"%zx\n",
shadow_mem, (void*)addr, size, kShadowMultiplier);
DCHECK(IsShadowMem(shadow_mem_end));
DCHECK(IsShadowMem(shadow_mem_end - 1));
}
#endif

Expand Down
14 changes: 14 additions & 0 deletions compiler-rt/lib/tsan/rtl/tsan_sync.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,20 @@ void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
// there are no concurrent accesses to the regions (e.g. stop-the-world).
CHECK_NE(src, dst);
CHECK_NE(sz, 0);

// The current MoveMemory implementation behaves incorrectly when src, dst,
// and sz are not aligned to kMetaShadowCell.
// For example, with kMetaShadowCell == 8:
// - src = 4: unexpectedly clears the metadata for the range [0, 4).
// - src = 16, dst = 4, size = 8: A sync variable for addr = 20, which should
// be moved to the metadata for address 8, is incorrectly moved to the
// metadata for address 0 instead.
// - src = 0, sz = 4: fails to move the tail metadata.
// Therefore, the following assertions is needed.
DCHECK_EQ(src % kMetaShadowCell, 0);
DCHECK_EQ(dst % kMetaShadowCell, 0);
DCHECK_EQ(sz % kMetaShadowCell, 0);

uptr diff = dst - src;
u32 *src_meta = MemToMeta(src);
u32 *dst_meta = MemToMeta(dst);
Expand Down
32 changes: 32 additions & 0 deletions compiler-rt/test/tsan/java_heap_init2.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
// RUN: %clangxx_tsan -O1 %s -o %t && %run %t 2>&1 | FileCheck %s
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please add these test cases to a separate pull request. Since your fix is not committed yet, the tests would need an "XFAIL" annotation. Then, after the test cases are landed, this fix PR would update the test by removing the XFAIL. This two-step procedure makes it clear that 1) TSan was broken 2) this PR fixes it.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If it was unfixed in your environment, that was an exception (and would you mind providing more information about the test failure, such as the triggering environment?);

This relevant bug should indeed be fixed by this PR, and I have passed this test (Ubuntu24.04@x86_64) before I pushed the commit.

FYI, this test case manages to trigger a situation that munmap clears the meta incompletely due to the incorrect usage of an inclusive meta end. However, this bug should be fixed in this PR, as follows:

  // This round-up gives the exclusive meta end.
  uptr size_for_meta = RoundUp(addr + size, kMetaShadowCell) - addr;
  ctx->metamap.ResetRange(thr->proc(), addr, size_for_meta, true);

#include "java.h"
#include <errno.h>
#include <sys/mman.h>

int main() {
// Test a non-regular kHeapSize
// Previously __tsan_java_init failed because it encountered non-zero meta
// shadow for the destination.
size_t const kPageSize = sysconf(_SC_PAGESIZE);
int const kSize = kPageSize - 1;
jptr jheap2 = (jptr)mmap(0, kSize, PROT_READ | PROT_WRITE,
MAP_ANON | MAP_PRIVATE, -1, 0);
if (jheap2 == (jptr)MAP_FAILED)
return printf("mmap failed with %d\n", errno);
__atomic_store_n((int *)(jheap2 + kSize - 3), 1, __ATOMIC_RELEASE);
// Due to the previous incorrect meta-end calculation, the following munmap
// did not clear the tail meta shadow.
munmap((void *)jheap2, kSize);
int const kHeapSize2 = kSize + 1;
jheap2 = (jptr)mmap((void *)jheap2, kHeapSize2, PROT_READ | PROT_WRITE,
MAP_ANON | MAP_PRIVATE, -1, 0);
if (jheap2 == (jptr)MAP_FAILED)
return printf("second mmap failed with %d\n", errno);
__tsan_java_init(jheap2, kHeapSize2);
__tsan_java_move(jheap2, jheap2 + kHeapSize2 - 8, 8);
fprintf(stderr, "DONE\n");
return __tsan_java_fini();
}

// CHECK-NOT: WARNING: ThreadSanitizer: data race
// CHECK: DONE
56 changes: 56 additions & 0 deletions compiler-rt/test/tsan/munmap_clear_shadow.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
// RUN: %clang_tsan %s -o %t && %run %t | FileCheck %s
#include "test.h"
#include <assert.h>
#include <pthread.h>
#include <stdint.h>
#include <stdio.h>
#include <sys/mman.h>
#include <unistd.h>

void __tsan_read1(void *addr);

struct thread_params {
char *buf;
unsigned int size;
};

static void *thread_func(void *arg) {
struct thread_params *p = (struct thread_params *)arg;
// Access 1
p->buf[0] = 0x42;
p->buf[p->size - 1] = 0x42;
barrier_wait(&barrier);
return 0;
}

int main() {
const unsigned int kPageSize = sysconf(_SC_PAGESIZE);
// The relevant shadow memory size should be exactly multiple of kPageSize,
// even if Size = kPageSize - 1.
const unsigned int Size = kPageSize - 1;

barrier_init(&barrier, 2);
char *buf = (char *)mmap(NULL, Size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
assert(buf != MAP_FAILED);
assert(((uintptr_t)buf % kPageSize) == 0);

pthread_t t;
struct thread_params p = {buf, Size};
pthread_create(&t, 0, thread_func, &p);

barrier_wait(&barrier);
// Should clear all the shadow memory related to the mmaped memory.
munmap(buf, Size);

// If the shadow memory is cleared completely, the following read should not
// cause a race.
// CHECK-NOT: WARNING: ThreadSanitizer: data race
__tsan_read1(&buf[0]); // Access 2
__tsan_read1(&buf[Size - 1]); // Access 2
pthread_join(t, 0);

puts("DONE");

return 0;
}
Loading