diff --git a/tcmalloc/central_freelist.h b/tcmalloc/central_freelist.h index 435088e84..2304d323f 100644 --- a/tcmalloc/central_freelist.h +++ b/tcmalloc/central_freelist.h @@ -67,8 +67,9 @@ class StaticForwarder { static size_t class_to_size(int size_class); static Length class_to_pages(int size_class); static void MapObjectsToSpans(absl::Span batch, Span** spans); - static Span* AllocateSpan(int size_class, size_t objects_per_span, - Length pages_per_span) + [[nodiscard]] static Span* AllocateSpan(int size_class, + size_t objects_per_span, + Length pages_per_span) ABSL_LOCKS_EXCLUDED(pageheap_lock); static void DeallocateSpans(size_t objects_per_span, absl::Span free_spans) @@ -112,7 +113,7 @@ class CentralFreeList { // Fill a prefix of batch[0..N-1] with up to N elements removed from central // freelist. Return the number of elements removed. - ABSL_MUST_USE_RESULT int RemoveRange(absl::Span batch) + [[nodiscard]] int RemoveRange(absl::Span batch) ABSL_LOCKS_EXCLUDED(lock_); // Returns the number of free objects in cache. diff --git a/tcmalloc/central_freelist_test.cc b/tcmalloc/central_freelist_test.cc index cd4b850f4..f8d777cf4 100644 --- a/tcmalloc/central_freelist_test.cc +++ b/tcmalloc/central_freelist_test.cc @@ -121,8 +121,9 @@ TEST_P(StaticForwarderTest, Simple) { } for (void* ptr : batch) { - span->FreelistPush(ptr, object_size_, size_reciprocal_, - max_span_cache_size); + EXPECT_EQ(span->FreelistPush(ptr, object_size_, size_reciprocal_, + max_span_cache_size), + ptr != batch.back()); } StaticForwarder::DeallocateSpans(objects_per_span_, absl::MakeSpan(&span, 1)); diff --git a/tcmalloc/cpu_cache.h b/tcmalloc/cpu_cache.h index 70679033a..79d23ae96 100644 --- a/tcmalloc/cpu_cache.h +++ b/tcmalloc/cpu_cache.h @@ -90,13 +90,14 @@ constexpr inline uint8_t kTotalPossibleSlabs = // testing. class StaticForwarder { public: - static void* Alloc(size_t size, std::align_val_t alignment) + [[nodiscard]] static void* Alloc(size_t size, std::align_val_t alignment) ABSL_LOCKS_EXCLUDED(pageheap_lock) { TC_ASSERT(tc_globals.IsInited()); PageHeapSpinLockHolder l; return tc_globals.arena().Alloc(size, alignment); } - static void* AllocReportedImpending(size_t size, std::align_val_t alignment) + [[nodiscard]] static void* AllocReportedImpending(size_t size, + std::align_val_t alignment) ABSL_LOCKS_EXCLUDED(pageheap_lock) { TC_ASSERT(tc_globals.IsInited()); PageHeapSpinLockHolder l; @@ -304,15 +305,15 @@ class CpuCache { // Allocate an object of the given size class. // Returns nullptr when allocation fails. - void* Allocate(size_t size_class); + [[nodiscard]] void* Allocate(size_t size_class); // Separate allocation fast/slow paths. // The fast path succeeds iff the thread has already cached the slab pointer // (done by AllocateSlow) and there is an available object in the slab. - void* AllocateFast(size_t size_class); - void* AllocateSlow(size_t size_class); + [[nodiscard]] void* AllocateFast(size_t size_class); + [[nodiscard]] void* AllocateSlow(size_t size_class); // A slightly faster version of AllocateSlow that may be called only // when it's known that no hooks are installed. - void* AllocateSlowNoHooks(size_t size_class); + [[nodiscard]] void* AllocateSlowNoHooks(size_t size_class); // Free an object of the given class. void Deallocate(void* ptr, size_t size_class); @@ -603,12 +604,13 @@ class CpuCache { GetShiftMaxCapacity GetMaxCapacityFunctor(uint8_t shift) const; // Fetches objects from backing transfer cache. - int FetchFromBackingCache(size_t size_class, absl::Span batch); + [[nodiscard]] int FetchFromBackingCache(size_t size_class, + absl::Span batch); // Releases free batch of objects to the backing transfer cache. void ReleaseToBackingCache(size_t size_class, absl::Span batch); - void* Refill(int cpu, size_t size_class); + [[nodiscard]] void* Refill(int cpu, size_t size_class); std::pair CacheCpuSlab(); void Populate(int cpu); @@ -684,7 +686,7 @@ class CpuCache { // is the offset of the shift in slabs_by_shift_. Note that we // can't calculate this from `shift` directly due to numa shift. // Returns the allocated slabs and the number of reused bytes. - ABSL_MUST_USE_RESULT std::pair AllocOrReuseSlabs( + [[nodiscard]] std::pair AllocOrReuseSlabs( absl::FunctionRef alloc, subtle::percpu::Shift shift, int num_cpus, uint8_t shift_offset, uint8_t resize_offset); @@ -1078,7 +1080,12 @@ void* CpuCache::AllocateSlowNoHooks(size_t size_class) { if (ABSL_PREDICT_FALSE(cpu < 0)) { // The cpu is stopped. void* ptr = nullptr; - FetchFromBackingCache(size_class, absl::MakeSpan(&ptr, 1)); + int r = FetchFromBackingCache(size_class, absl::MakeSpan(&ptr, 1)); +#ifndef NDEBUG + TC_ASSERT(r == 1 || ptr == nullptr); +#else + (void)r; +#endif return ptr; } if (void* ret = AllocateFast(size_class)) { diff --git a/tcmalloc/mock_central_freelist.h b/tcmalloc/mock_central_freelist.h index 36933d4df..00a650023 100644 --- a/tcmalloc/mock_central_freelist.h +++ b/tcmalloc/mock_central_freelist.h @@ -53,7 +53,7 @@ class FakeCentralFreeListBase { class FakeCentralFreeList : public FakeCentralFreeListBase { public: void InsertRange(absl::Span batch); - int RemoveRange(absl::Span batch); + [[nodiscard]] int RemoveRange(absl::Span batch); void AllocateBatch(absl::Span batch); void FreeBatch(absl::Span batch); @@ -66,7 +66,7 @@ class FakeCentralFreeList : public FakeCentralFreeListBase { class MinimalFakeCentralFreeList : public FakeCentralFreeListBase { public: void InsertRange(absl::Span batch); - int RemoveRange(absl::Span batch); + [[nodiscard]] int RemoveRange(absl::Span batch); void AllocateBatch(absl::Span batch); void FreeBatch(absl::Span batch); diff --git a/tcmalloc/mock_static_forwarder.h b/tcmalloc/mock_static_forwarder.h index ec6f8f9c8..5ad809193 100644 --- a/tcmalloc/mock_static_forwarder.h +++ b/tcmalloc/mock_static_forwarder.h @@ -66,7 +66,7 @@ class FakeStaticForwarder { } } - Span* MapObjectToSpan(const void* object) { + [[nodiscard]] Span* MapObjectToSpan(const void* object) { const PageId page = PageIdContaining(object); absl::MutexLock l(&mu_); @@ -82,7 +82,8 @@ class FakeStaticForwarder { return nullptr; } - Span* AllocateSpan(int, size_t objects_per_span, Length pages_per_span) { + [[nodiscard]] Span* AllocateSpan(int, size_t objects_per_span, + Length pages_per_span) { void* backing = ::operator new(pages_per_span.in_bytes(), std::align_val_t(kPageSize)); PageId page = PageIdContaining(backing); diff --git a/tcmalloc/span.h b/tcmalloc/span.h index 176a64fb0..a0bc72266 100644 --- a/tcmalloc/span.h +++ b/tcmalloc/span.h @@ -139,7 +139,7 @@ class Span final : public SpanList::Elem { // non-sampling span. It will also update the global counters on the total // size of sampled allocations. // REQUIRES: this is a SAMPLED span. - SampledAllocation* Unsample(); + [[nodiscard]] SampledAllocation* Unsample(); // Returns the sampled allocation of the span. // pageheap_lock is not required, but caller either needs to hold the lock or @@ -213,12 +213,12 @@ class Span final : public SpanList::Elem { // just return false. // // If the freelist becomes full, we do not push the object onto the freelist. - bool FreelistPush(void* ptr, size_t size, uint32_t reciprocal, - uint32_t max_cache_size); + [[nodiscard]] bool FreelistPush(void* ptr, size_t size, uint32_t reciprocal, + uint32_t max_cache_size); // Pops up to N objects from the freelist and returns them in the batch array. // Returns number of objects actually popped. - size_t FreelistPopBatch(absl::Span batch, size_t size); + [[nodiscard]] size_t FreelistPopBatch(absl::Span batch, size_t size); // Reset a Span object to track the range [p, p + n). void Init(PageId p, Length n); @@ -226,8 +226,9 @@ class Span final : public SpanList::Elem { // Initialize freelist to contain all objects in the span. // Pops up to N objects from the freelist and returns them in the batch array. // Returns number of objects actually popped. - int BuildFreelist(size_t size, size_t count, absl::Span batch, - uint32_t max_cache_size, uint64_t alloc_time); + [[nodiscard]] int BuildFreelist(size_t size, size_t count, + absl::Span batch, + uint32_t max_cache_size, uint64_t alloc_time); // Prefetch cacheline containing most important span information. void Prefetch(); diff --git a/tcmalloc/span_benchmark.cc b/tcmalloc/span_benchmark.cc index ade470fa1..41c98932c 100644 --- a/tcmalloc/span_benchmark.cc +++ b/tcmalloc/span_benchmark.cc @@ -49,8 +49,9 @@ class RawSpan { int res = posix_memalign(&mem, kPageSize, npages.in_bytes()); TC_CHECK_EQ(res, 0); span_.Init(PageIdContaining(mem), npages); - span_.BuildFreelist(size, objects_per_span, {}, kMaxCacheSize, - kSpanAllocTime); + TC_CHECK_EQ(span_.BuildFreelist(size, objects_per_span, {}, kMaxCacheSize, + kSpanAllocTime), + 0); } ~RawSpan() { free(span_.start_address()); } @@ -81,7 +82,7 @@ void BM_single_span(benchmark::State& state) { processed += n; for (int j = 0; j < n; j++) { - span.FreelistPush(batch[j], size, reciprocal, kMaxCacheSize); + (void)span.FreelistPush(batch[j], size, reciprocal, kMaxCacheSize); } } @@ -203,8 +204,8 @@ void BM_multiple_spans(benchmark::State& state) { processed += n; for (int j = 0; j < n; j++) { - spans[current_span].span().FreelistPush(batch[j], size, reciprocal, - kMaxCacheSize); + (void)spans[current_span].span().FreelistPush(batch[j], size, reciprocal, + kMaxCacheSize); } } diff --git a/tcmalloc/span_fuzz.cc b/tcmalloc/span_fuzz.cc index e9ed81bcb..ea4c1a41a 100644 --- a/tcmalloc/span_fuzz.cc +++ b/tcmalloc/span_fuzz.cc @@ -70,8 +70,9 @@ void FuzzSpan(const std::string& s) { Span* span = new (buf) Span(); span->Init(PageIdContaining(mem), pages); // TODO(b/271282540): Fuzz the initial allocation during freelist building. - span->BuildFreelist(object_size, objects_per_span, {}, max_span_cache_size, - alloc_time); + TC_CHECK_EQ(span->BuildFreelist(object_size, objects_per_span, {}, + max_span_cache_size, alloc_time), + 0); TC_CHECK_EQ(span->Allocated(), 0); diff --git a/tcmalloc/span_test.cc b/tcmalloc/span_test.cc index 7b7efa5b8..0af5e5b14 100644 --- a/tcmalloc/span_test.cc +++ b/tcmalloc/span_test.cc @@ -62,8 +62,9 @@ class RawSpan { span_ = new (buf_) Span(); span_->Init(PageIdContaining(mem_), npages); - span_->BuildFreelist(size, objects_per_span, {}, max_cache_size, - kSpanAllocTime); + TC_CHECK_EQ(span_->BuildFreelist(size, objects_per_span, {}, max_cache_size, + kSpanAllocTime), + 0); } ~RawSpan() { diff --git a/tcmalloc/transfer_cache.h b/tcmalloc/transfer_cache.h index 914ca88e1..354fd2460 100644 --- a/tcmalloc/transfer_cache.h +++ b/tcmalloc/transfer_cache.h @@ -112,7 +112,7 @@ class BackingTransferCache { public: void Init(int size_class) { size_class_ = size_class; } void InsertRange(absl::Span batch) const; - ABSL_MUST_USE_RESULT int RemoveRange(absl::Span batch) const; + [[nodiscard]] int RemoveRange(absl::Span batch) const; int size_class() const { return size_class_; } private: @@ -194,7 +194,7 @@ class ShardedTransferCacheManagerBase { return objects; } - void *Pop(int size_class) { + [[nodiscard]] void *Pop(int size_class) { TC_ASSERT(subtle::percpu::IsFastNoInit()); void *batch[1]; const int got = @@ -276,7 +276,7 @@ class ShardedTransferCacheManagerBase { return stats; } - int RemoveRange(int size_class, absl::Span batch) { + [[nodiscard]] int RemoveRange(int size_class, absl::Span batch) { return get_cache(size_class).RemoveRange(size_class, batch); } @@ -423,8 +423,7 @@ class TransferCacheManager : public StaticForwarder { cache_[size_class].tc.InsertRange(size_class, batch); } - ABSL_MUST_USE_RESULT int RemoveRange(int size_class, - absl::Span batch) { + [[nodiscard]] int RemoveRange(int size_class, absl::Span batch) { return cache_[size_class].tc.RemoveRange(size_class, batch); } @@ -558,8 +557,7 @@ class TransferCacheManager { freelist_[size_class].InsertRange(batch); } - ABSL_MUST_USE_RESULT int RemoveRange(int size_class, - absl::Span batch) { + [[nodiscard]] int RemoveRange(int size_class, absl::Span batch) { return freelist_[size_class].RemoveRange(batch); } @@ -587,9 +585,10 @@ struct ShardedTransferCacheManager { constexpr ShardedTransferCacheManager(std::nullptr_t, std::nullptr_t) {} static constexpr void Init() {} static constexpr bool should_use(int size_class) { return false; } - static constexpr void* Pop(int size_class) { return nullptr; } + [[nodiscard]] static constexpr void* Pop(int size_class) { return nullptr; } static constexpr void Push(int size_class, void* ptr) {} - static constexpr int RemoveRange(int size_class, absl::Span batch) { + [[nodiscard]] static constexpr int RemoveRange(int size_class, + absl::Span batch) { return 0; } static constexpr void InsertRange(int size_class, absl::Span batch) {}