Skip to content

Commit

Permalink
Rework selection of error type to avoid spin lock.
Browse files Browse the repository at this point in the history
- Remove storage of write_flag from GuardedPageAllocator::SlotMetadata.
- Remove method GuardedPageAllocator::SetWriteFlag
- Remove refining the return value of GetErrorType (as write_flag is no
  longer present)
- Add refining the error value to the SegVHandler.

PiperOrigin-RevId: 573306812
Change-Id: I75d90ac2daeb18a5375dd172b4242bb90d39fcba
  • Loading branch information
kda authored and copybara-github committed Oct 13, 2023
1 parent 2ee0510 commit 9a8c27e
Show file tree
Hide file tree
Showing 3 changed files with 43 additions and 37 deletions.
35 changes: 3 additions & 32 deletions tcmalloc/guarded_page_allocator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,6 @@ GuardedAllocWithStatus GuardedPageAllocator::Allocate(size_t size,
d.alloc_trace.tid = absl::base_internal::GetTID();
d.requested_size = size;
d.allocation_start = reinterpret_cast<uintptr_t>(result);
d.write_flag = WriteFlag::Unknown;

ASSERT(!alignment || d.allocation_start % alignment == 0);
return {result, Profile::Sample::GuardedStatus::Guarded};
Expand Down Expand Up @@ -245,13 +244,6 @@ size_t GuardedPageAllocator::SuccessfulAllocations() {
return num_allocation_requests_ - num_failed_allocations_;
}

void GuardedPageAllocator::SetWriteFlag(const void* ptr, WriteFlag write_flag) {
const uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
size_t slot = GetNearestSlot(addr);
absl::base_internal::SpinLockHolder h(&guarded_page_lock_);
data_[slot].write_flag = write_flag;
}

// Maps 2 * total_pages_ + 1 pages so that there are total_pages_ unique pages
// we can return from Allocate with guard pages before and after them.
void GuardedPageAllocator::MapPages() {
Expand Down Expand Up @@ -377,34 +369,13 @@ GuardedAllocationsErrorType GuardedPageAllocator::GetErrorType(
if (write_overflow_detected_)
return GuardedAllocationsErrorType::kBufferOverflowOnDealloc;
if (d.dealloc_trace.depth > 0) {
switch (d.write_flag) {
case WriteFlag::Write:
return GuardedAllocationsErrorType::kUseAfterFreeWrite;
case WriteFlag::Read:
return GuardedAllocationsErrorType::kUseAfterFreeRead;
default:
return GuardedAllocationsErrorType::kUseAfterFree;
}
return GuardedAllocationsErrorType::kUseAfterFree;
}
if (addr < d.allocation_start) {
switch (d.write_flag) {
case WriteFlag::Write:
return GuardedAllocationsErrorType::kBufferUnderflowWrite;
case WriteFlag::Read:
return GuardedAllocationsErrorType::kBufferUnderflowRead;
default:
return GuardedAllocationsErrorType::kBufferUnderflow;
}
return GuardedAllocationsErrorType::kBufferUnderflow;
}
if (addr >= d.allocation_start + d.requested_size) {
switch (d.write_flag) {
case WriteFlag::Write:
return GuardedAllocationsErrorType::kBufferOverflowWrite;
case WriteFlag::Read:
return GuardedAllocationsErrorType::kBufferOverflowRead;
default:
return GuardedAllocationsErrorType::kBufferOverflow;
}
return GuardedAllocationsErrorType::kBufferOverflow;
}
return GuardedAllocationsErrorType::kUnknown;
}
Expand Down
4 changes: 0 additions & 4 deletions tcmalloc/guarded_page_allocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -173,9 +173,6 @@ class GuardedPageAllocator {

size_t SuccessfulAllocations() ABSL_LOCKS_EXCLUDED(guarded_page_lock_);

void SetWriteFlag(const void* ptr, WriteFlag write_flag)
ABSL_LOCKS_EXCLUDED(guarded_page_lock_);

size_t page_size() const { return page_size_; }

private:
Expand All @@ -185,7 +182,6 @@ class GuardedPageAllocator {
GuardedAllocationsStackTrace dealloc_trace;
size_t requested_size = 0;
uintptr_t allocation_start = 0;
WriteFlag write_flag = WriteFlag::Unknown;
};

// Max number of magic bytes we use to detect write-overflows at deallocation.
Expand Down
41 changes: 40 additions & 1 deletion tcmalloc/segv_handler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,45 @@ static WriteFlag ExtractWriteFlagFromContext(void* context) {
#endif
}

GuardedAllocationsErrorType RefineErrorTypeBasedOnWriteFlag(
GuardedAllocationsErrorType error, WriteFlag write_flag) {
switch (error) {
case GuardedAllocationsErrorType::kUseAfterFree:
switch (write_flag) {
case WriteFlag::Write:
return GuardedAllocationsErrorType::kUseAfterFreeWrite;
case WriteFlag::Read:
return GuardedAllocationsErrorType::kUseAfterFreeRead;
default:
break;
}
break;
case GuardedAllocationsErrorType::kBufferUnderflow:
switch (write_flag) {
case WriteFlag::Write:
return GuardedAllocationsErrorType::kBufferUnderflowWrite;
case WriteFlag::Read:
return GuardedAllocationsErrorType::kBufferUnderflowRead;
default:
break;
}
break;
case GuardedAllocationsErrorType::kBufferOverflow:
switch (write_flag) {
case WriteFlag::Write:
return GuardedAllocationsErrorType::kBufferOverflowWrite;
case WriteFlag::Read:
return GuardedAllocationsErrorType::kBufferOverflowRead;
default:
break;
}
break;
default:
break;
}
return error;
}

// A SEGV handler that prints stack traces for the allocation and deallocation
// of relevant memory as well as the location of the memory error.
void SegvHandler(int signo, siginfo_t* info, void* context) {
Expand All @@ -160,13 +199,13 @@ void SegvHandler(int signo, siginfo_t* info, void* context) {

// Store load/store from context.
WriteFlag write_flag = ExtractWriteFlagFromContext(context);
tc_globals.guardedpage_allocator().SetWriteFlag(fault, write_flag);

GuardedAllocationsStackTrace *alloc_trace, *dealloc_trace;
GuardedAllocationsErrorType error =
tc_globals.guardedpage_allocator().GetStackTraces(fault, &alloc_trace,
&dealloc_trace);
if (error == GuardedAllocationsErrorType::kUnknown) return;
error = RefineErrorTypeBasedOnWriteFlag(error, write_flag);
pid_t current_thread = absl::base_internal::GetTID();
off_t offset;
size_t size;
Expand Down

0 comments on commit 9a8c27e

Please sign in to comment.