Skip to content

Commit

Permalink
Avoid reading many-object span sampled state in assertions.
Browse files Browse the repository at this point in the history
Spans with multiple objects are mutated by the CentralFreeList as objects are
allocate/deallocated from the span.  The Span contains a bitfield that shares
its byte with nonempty_index_ (mutated by CentralFreeList) and sampled_,
leading to a data race.

This assertion is superfluous, as we assert that the PageMap has a non-zero
size class for the object consistent with the one provided.  We also confirm
that sampled span do not have non-zero size classes upon
allocation/deallocation.

We add a stress test as well, for GetAllocatedSize, since this exercises a path
that checks sampled(), but only on large allocations.

PiperOrigin-RevId: 573323553
Change-Id: I0ab4ae1ff63130318113d397495f685aa0ec12dc
  • Loading branch information
ckennelly authored and copybara-github committed Oct 13, 2023
1 parent bf12669 commit 2811c61
Show file tree
Hide file tree
Showing 4 changed files with 13 additions and 8 deletions.
3 changes: 3 additions & 0 deletions tcmalloc/allocation_sampling.h
Original file line number Diff line number Diff line change
Expand Up @@ -455,6 +455,7 @@ static sized_ptr_t SampleifyAllocation(State& state, Policy policy,
ASSERT(size_class != 0);
FreeProxyObject(state, obj, size_class);
}
ASSERT(state.pagemap().sizeclass(span->first_page()) == 0);
return {(alloc_with_status.alloc != nullptr) ? alloc_with_status.alloc
: span->start_address(),
capacity};
Expand All @@ -466,6 +467,8 @@ inline void MaybeUnsampleAllocation(State& state, void* ptr, Span* span) {
// state cleared only once. External synchronization when freeing is required;
// otherwise, concurrent writes here would likely report a double-free.
if (SampledAllocation* sampled_allocation = span->Unsample()) {
ASSERT(state.pagemap().sizeclass(PageIdContaining(ptr)) == 0);

void* const proxy = sampled_allocation->sampled_stack.proxy;
const size_t weight = sampled_allocation->sampled_stack.weight;
const size_t requested_size =
Expand Down
5 changes: 2 additions & 3 deletions tcmalloc/tcmalloc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -722,8 +722,8 @@ static size_t GetSizeClass(void* ptr) {
template <bool have_size_class>
inline ABSL_ATTRIBUTE_ALWAYS_INLINE void do_free_with_size_class(
void* ptr, size_t size_class) {
// !have_size_class -> size_class == 0
ASSERT(have_size_class || size_class == 0);
// !have_size_class <-> size_class == 0
ASSERT(have_size_class != (size_class == 0));

// if we have_size_class, then we've excluded ptr == nullptr case. See
// comment in do_free_with_size. Thus we only bother testing nullptr
Expand All @@ -746,7 +746,6 @@ inline ABSL_ATTRIBUTE_ALWAYS_INLINE void do_free_with_size_class(
if (have_size_class || ABSL_PREDICT_TRUE(size_class != 0)) {
ASSERT(size_class == GetSizeClass(ptr));
ASSERT(ptr != nullptr);
ASSERT(!tc_globals.pagemap().GetExistingDescriptor(p)->sampled());
FreeSmall(ptr, size_class);
} else {
InvokeHooksAndFreePages(ptr);
Expand Down
2 changes: 2 additions & 0 deletions tcmalloc/testing/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -902,7 +902,9 @@ cc_test(
copts = TCMALLOC_DEFAULT_COPTS,
malloc = "//tcmalloc/internal:system_malloc",
deps = [
"//tcmalloc:malloc_extension",
"//tcmalloc:tcmalloc_internal_methods_only", # buildcleaner: keep
"@com_google_absl//absl/log:absl_check",
"@com_google_absl//absl/random",
"@com_google_absl//absl/time",
"@com_google_googletest//:gtest_main",
Expand Down
11 changes: 6 additions & 5 deletions tcmalloc/testing/parallel_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,11 @@
#include <vector>

#include "gtest/gtest.h"
#include "absl/log/absl_check.h"
#include "absl/random/random.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tcmalloc/malloc_extension.h"

extern "C" {

Expand Down Expand Up @@ -50,6 +52,10 @@ struct Allocator {
v.push_back(TCMallocInternalNew(size));
}

for (void* ptr : v) {
ABSL_CHECK_GE(*MallocExtension::GetAllocatedSize(ptr), size);
}

for (void* ptr : v) {
if (do_sized_delete) {
TCMallocInternalDeleteSized(ptr, size);
Expand All @@ -66,11 +72,6 @@ struct Allocator {
};

TEST(ParallelTest, Stable) {
#ifdef ABSL_HAVE_THREAD_SANITIZER
// TODO(b/274996721): Enable this when Span::nonempty_index_ does not
// conflict with other bitfields.
GTEST_SKIP() << "Skipping under tsan.";
#endif
std::atomic<bool> stop{false};
Allocator a1(stop, /*do_sized_delete=*/true),
a2(stop, /*do_sized_delete=*/true), a3(stop, /*do_sized_delete=*/false);
Expand Down

0 comments on commit 2811c61

Please sign in to comment.