Skip to content

Commit

Permalink
Add [[nodiscard]] to various operations that return pointers/sizes.
Browse files Browse the repository at this point in the history
Migrate existing ABSL_MUST_USE_RESULTs to C++17 [[nodiscard]].

PiperOrigin-RevId: 685737414
Change-Id: Ie7a6f171c16400150caed8c0875d2e4f8459374f
  • Loading branch information
ckennelly authored and copybara-github committed Oct 14, 2024
1 parent 58818cf commit 5742e19
Show file tree
Hide file tree
Showing 10 changed files with 56 additions and 43 deletions.
7 changes: 4 additions & 3 deletions tcmalloc/central_freelist.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,9 @@ class StaticForwarder {
static size_t class_to_size(int size_class);
static Length class_to_pages(int size_class);
static void MapObjectsToSpans(absl::Span<void*> batch, Span** spans);
static Span* AllocateSpan(int size_class, size_t objects_per_span,
Length pages_per_span)
[[nodiscard]] static Span* AllocateSpan(int size_class,
size_t objects_per_span,
Length pages_per_span)
ABSL_LOCKS_EXCLUDED(pageheap_lock);
static void DeallocateSpans(size_t objects_per_span,
absl::Span<Span*> free_spans)
Expand Down Expand Up @@ -112,7 +113,7 @@ class CentralFreeList {

// Fill a prefix of batch[0..N-1] with up to N elements removed from central
// freelist. Return the number of elements removed.
ABSL_MUST_USE_RESULT int RemoveRange(absl::Span<void*> batch)
[[nodiscard]] int RemoveRange(absl::Span<void*> batch)
ABSL_LOCKS_EXCLUDED(lock_);

// Returns the number of free objects in cache.
Expand Down
5 changes: 3 additions & 2 deletions tcmalloc/central_freelist_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -121,8 +121,9 @@ TEST_P(StaticForwarderTest, Simple) {
}

for (void* ptr : batch) {
span->FreelistPush(ptr, object_size_, size_reciprocal_,
max_span_cache_size);
EXPECT_EQ(span->FreelistPush(ptr, object_size_, size_reciprocal_,
max_span_cache_size),
ptr != batch.back());
}

StaticForwarder::DeallocateSpans(objects_per_span_, absl::MakeSpan(&span, 1));
Expand Down
27 changes: 17 additions & 10 deletions tcmalloc/cpu_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,13 +90,14 @@ constexpr inline uint8_t kTotalPossibleSlabs =
// testing.
class StaticForwarder {
public:
static void* Alloc(size_t size, std::align_val_t alignment)
[[nodiscard]] static void* Alloc(size_t size, std::align_val_t alignment)
ABSL_LOCKS_EXCLUDED(pageheap_lock) {
TC_ASSERT(tc_globals.IsInited());
PageHeapSpinLockHolder l;
return tc_globals.arena().Alloc(size, alignment);
}
static void* AllocReportedImpending(size_t size, std::align_val_t alignment)
[[nodiscard]] static void* AllocReportedImpending(size_t size,
std::align_val_t alignment)
ABSL_LOCKS_EXCLUDED(pageheap_lock) {
TC_ASSERT(tc_globals.IsInited());
PageHeapSpinLockHolder l;
Expand Down Expand Up @@ -304,15 +305,15 @@ class CpuCache {

// Allocate an object of the given size class.
// Returns nullptr when allocation fails.
void* Allocate(size_t size_class);
[[nodiscard]] void* Allocate(size_t size_class);
// Separate allocation fast/slow paths.
// The fast path succeeds iff the thread has already cached the slab pointer
// (done by AllocateSlow) and there is an available object in the slab.
void* AllocateFast(size_t size_class);
void* AllocateSlow(size_t size_class);
[[nodiscard]] void* AllocateFast(size_t size_class);
[[nodiscard]] void* AllocateSlow(size_t size_class);
// A slightly faster version of AllocateSlow that may be called only
// when it's known that no hooks are installed.
void* AllocateSlowNoHooks(size_t size_class);
[[nodiscard]] void* AllocateSlowNoHooks(size_t size_class);

// Free an object of the given class.
void Deallocate(void* ptr, size_t size_class);
Expand Down Expand Up @@ -603,12 +604,13 @@ class CpuCache {
GetShiftMaxCapacity GetMaxCapacityFunctor(uint8_t shift) const;

// Fetches objects from backing transfer cache.
int FetchFromBackingCache(size_t size_class, absl::Span<void*> batch);
[[nodiscard]] int FetchFromBackingCache(size_t size_class,
absl::Span<void*> batch);

// Releases free batch of objects to the backing transfer cache.
void ReleaseToBackingCache(size_t size_class, absl::Span<void*> batch);

void* Refill(int cpu, size_t size_class);
[[nodiscard]] void* Refill(int cpu, size_t size_class);
std::pair<int, bool> CacheCpuSlab();
void Populate(int cpu);

Expand Down Expand Up @@ -684,7 +686,7 @@ class CpuCache {
// <shift_offset> is the offset of the shift in slabs_by_shift_. Note that we
// can't calculate this from `shift` directly due to numa shift.
// Returns the allocated slabs and the number of reused bytes.
ABSL_MUST_USE_RESULT std::pair<void*, size_t> AllocOrReuseSlabs(
[[nodiscard]] std::pair<void*, size_t> AllocOrReuseSlabs(
absl::FunctionRef<void*(size_t, std::align_val_t)> alloc,
subtle::percpu::Shift shift, int num_cpus, uint8_t shift_offset,
uint8_t resize_offset);
Expand Down Expand Up @@ -1078,7 +1080,12 @@ void* CpuCache<Forwarder>::AllocateSlowNoHooks(size_t size_class) {
if (ABSL_PREDICT_FALSE(cpu < 0)) {
// The cpu is stopped.
void* ptr = nullptr;
FetchFromBackingCache(size_class, absl::MakeSpan(&ptr, 1));
int r = FetchFromBackingCache(size_class, absl::MakeSpan(&ptr, 1));
#ifndef NDEBUG
TC_ASSERT(r == 1 || ptr == nullptr);
#else
(void)r;
#endif
return ptr;
}
if (void* ret = AllocateFast(size_class)) {
Expand Down
4 changes: 2 additions & 2 deletions tcmalloc/mock_central_freelist.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ class FakeCentralFreeListBase {
class FakeCentralFreeList : public FakeCentralFreeListBase {
public:
void InsertRange(absl::Span<void*> batch);
int RemoveRange(absl::Span<void*> batch);
[[nodiscard]] int RemoveRange(absl::Span<void*> batch);

void AllocateBatch(absl::Span<void*> batch);
void FreeBatch(absl::Span<void*> batch);
Expand All @@ -66,7 +66,7 @@ class FakeCentralFreeList : public FakeCentralFreeListBase {
class MinimalFakeCentralFreeList : public FakeCentralFreeListBase {
public:
void InsertRange(absl::Span<void*> batch);
int RemoveRange(absl::Span<void*> batch);
[[nodiscard]] int RemoveRange(absl::Span<void*> batch);

void AllocateBatch(absl::Span<void*> batch);
void FreeBatch(absl::Span<void*> batch);
Expand Down
5 changes: 3 additions & 2 deletions tcmalloc/mock_static_forwarder.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ class FakeStaticForwarder {
}
}

Span* MapObjectToSpan(const void* object) {
[[nodiscard]] Span* MapObjectToSpan(const void* object) {
const PageId page = PageIdContaining(object);

absl::MutexLock l(&mu_);
Expand All @@ -82,7 +82,8 @@ class FakeStaticForwarder {
return nullptr;
}

Span* AllocateSpan(int, size_t objects_per_span, Length pages_per_span) {
[[nodiscard]] Span* AllocateSpan(int, size_t objects_per_span,
Length pages_per_span) {
void* backing =
::operator new(pages_per_span.in_bytes(), std::align_val_t(kPageSize));
PageId page = PageIdContaining(backing);
Expand Down
13 changes: 7 additions & 6 deletions tcmalloc/span.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ class Span final : public SpanList::Elem {
// non-sampling span. It will also update the global counters on the total
// size of sampled allocations.
// REQUIRES: this is a SAMPLED span.
SampledAllocation* Unsample();
[[nodiscard]] SampledAllocation* Unsample();

// Returns the sampled allocation of the span.
// pageheap_lock is not required, but caller either needs to hold the lock or
Expand Down Expand Up @@ -213,21 +213,22 @@ class Span final : public SpanList::Elem {
// just return false.
//
// If the freelist becomes full, we do not push the object onto the freelist.
bool FreelistPush(void* ptr, size_t size, uint32_t reciprocal,
uint32_t max_cache_size);
[[nodiscard]] bool FreelistPush(void* ptr, size_t size, uint32_t reciprocal,
uint32_t max_cache_size);

// Pops up to N objects from the freelist and returns them in the batch array.
// Returns number of objects actually popped.
size_t FreelistPopBatch(absl::Span<void*> batch, size_t size);
[[nodiscard]] size_t FreelistPopBatch(absl::Span<void*> batch, size_t size);

// Reset a Span object to track the range [p, p + n).
void Init(PageId p, Length n);

// Initialize freelist to contain all objects in the span.
// Pops up to N objects from the freelist and returns them in the batch array.
// Returns number of objects actually popped.
int BuildFreelist(size_t size, size_t count, absl::Span<void*> batch,
uint32_t max_cache_size, uint64_t alloc_time);
[[nodiscard]] int BuildFreelist(size_t size, size_t count,
absl::Span<void*> batch,
uint32_t max_cache_size, uint64_t alloc_time);

// Prefetch cacheline containing most important span information.
void Prefetch();
Expand Down
11 changes: 6 additions & 5 deletions tcmalloc/span_benchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,9 @@ class RawSpan {
int res = posix_memalign(&mem, kPageSize, npages.in_bytes());
TC_CHECK_EQ(res, 0);
span_.Init(PageIdContaining(mem), npages);
span_.BuildFreelist(size, objects_per_span, {}, kMaxCacheSize,
kSpanAllocTime);
TC_CHECK_EQ(span_.BuildFreelist(size, objects_per_span, {}, kMaxCacheSize,
kSpanAllocTime),
0);
}

~RawSpan() { free(span_.start_address()); }
Expand Down Expand Up @@ -81,7 +82,7 @@ void BM_single_span(benchmark::State& state) {
processed += n;

for (int j = 0; j < n; j++) {
span.FreelistPush(batch[j], size, reciprocal, kMaxCacheSize);
(void)span.FreelistPush(batch[j], size, reciprocal, kMaxCacheSize);
}
}

Expand Down Expand Up @@ -203,8 +204,8 @@ void BM_multiple_spans(benchmark::State& state) {
processed += n;

for (int j = 0; j < n; j++) {
spans[current_span].span().FreelistPush(batch[j], size, reciprocal,
kMaxCacheSize);
(void)spans[current_span].span().FreelistPush(batch[j], size, reciprocal,
kMaxCacheSize);
}
}

Expand Down
5 changes: 3 additions & 2 deletions tcmalloc/span_fuzz.cc
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,9 @@ void FuzzSpan(const std::string& s) {
Span* span = new (buf) Span();
span->Init(PageIdContaining(mem), pages);
// TODO(b/271282540): Fuzz the initial allocation during freelist building.
span->BuildFreelist(object_size, objects_per_span, {}, max_span_cache_size,
alloc_time);
TC_CHECK_EQ(span->BuildFreelist(object_size, objects_per_span, {},
max_span_cache_size, alloc_time),
0);

TC_CHECK_EQ(span->Allocated(), 0);

Expand Down
5 changes: 3 additions & 2 deletions tcmalloc/span_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,9 @@ class RawSpan {

span_ = new (buf_) Span();
span_->Init(PageIdContaining(mem_), npages);
span_->BuildFreelist(size, objects_per_span, {}, max_cache_size,
kSpanAllocTime);
TC_CHECK_EQ(span_->BuildFreelist(size, objects_per_span, {}, max_cache_size,
kSpanAllocTime),
0);
}

~RawSpan() {
Expand Down
17 changes: 8 additions & 9 deletions tcmalloc/transfer_cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ class BackingTransferCache {
public:
void Init(int size_class) { size_class_ = size_class; }
void InsertRange(absl::Span<void *> batch) const;
ABSL_MUST_USE_RESULT int RemoveRange(absl::Span<void *> batch) const;
[[nodiscard]] int RemoveRange(absl::Span<void *> batch) const;
int size_class() const { return size_class_; }

private:
Expand Down Expand Up @@ -194,7 +194,7 @@ class ShardedTransferCacheManagerBase {
return objects;
}

void *Pop(int size_class) {
[[nodiscard]] void *Pop(int size_class) {
TC_ASSERT(subtle::percpu::IsFastNoInit());
void *batch[1];
const int got =
Expand Down Expand Up @@ -276,7 +276,7 @@ class ShardedTransferCacheManagerBase {
return stats;
}

int RemoveRange(int size_class, absl::Span<void *> batch) {
[[nodiscard]] int RemoveRange(int size_class, absl::Span<void *> batch) {
return get_cache(size_class).RemoveRange(size_class, batch);
}

Expand Down Expand Up @@ -423,8 +423,7 @@ class TransferCacheManager : public StaticForwarder {
cache_[size_class].tc.InsertRange(size_class, batch);
}

ABSL_MUST_USE_RESULT int RemoveRange(int size_class,
absl::Span<void *> batch) {
[[nodiscard]] int RemoveRange(int size_class, absl::Span<void *> batch) {
return cache_[size_class].tc.RemoveRange(size_class, batch);
}

Expand Down Expand Up @@ -558,8 +557,7 @@ class TransferCacheManager {
freelist_[size_class].InsertRange(batch);
}

ABSL_MUST_USE_RESULT int RemoveRange(int size_class,
absl::Span<void*> batch) {
[[nodiscard]] int RemoveRange(int size_class, absl::Span<void*> batch) {
return freelist_[size_class].RemoveRange(batch);
}

Expand Down Expand Up @@ -587,9 +585,10 @@ struct ShardedTransferCacheManager {
constexpr ShardedTransferCacheManager(std::nullptr_t, std::nullptr_t) {}
static constexpr void Init() {}
static constexpr bool should_use(int size_class) { return false; }
static constexpr void* Pop(int size_class) { return nullptr; }
[[nodiscard]] static constexpr void* Pop(int size_class) { return nullptr; }
static constexpr void Push(int size_class, void* ptr) {}
static constexpr int RemoveRange(int size_class, absl::Span<void*> batch) {
[[nodiscard]] static constexpr int RemoveRange(int size_class,
absl::Span<void*> batch) {
return 0;
}
static constexpr void InsertRange(int size_class, absl::Span<void*> batch) {}
Expand Down

0 comments on commit 5742e19

Please sign in to comment.