Skip to content

Commit

Permalink
Fix span_benchmark.
Browse files Browse the repository at this point in the history
We might not be using all the kNumClasses. Add a check on size so that we are always running the benchmark for valid size classes.

PiperOrigin-RevId: 572625518
Change-Id: I94f0b6166b1cf5541c838cdf439b537e81d2f561
  • Loading branch information
v-gogte authored and copybara-github committed Oct 11, 2023
1 parent b4a0a7f commit b67b083
Showing 1 changed file with 16 additions and 8 deletions.
24 changes: 16 additions & 8 deletions tcmalloc/span_benchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,8 @@

#include <stdlib.h>

#include <utility>
#include <vector>

#include "absl/base/internal/spinlock.h"
#include "absl/random/random.h"
#include "benchmark/benchmark.h"
#include "tcmalloc/common.h"
Expand All @@ -35,6 +33,7 @@ class RawSpan {
public:
void Init(size_t size_class) {
size_t size = tc_globals.sizemap().class_to_size(size_class);
CHECK_CONDITION(size > 0);
auto npages = Length(tc_globals.sizemap().class_to_pages(size_class));
size_t objects_per_span = npages.in_bytes() / size;

Expand All @@ -59,6 +58,7 @@ void BM_single_span(benchmark::State& state) {
const int size_class = state.range(0);

size_t size = tc_globals.sizemap().class_to_size(size_class);
CHECK_CONDITION(size > 0);
size_t batch_size = tc_globals.sizemap().num_objects_to_move(size_class);
RawSpan raw_span;
raw_span.Init(size_class);
Expand All @@ -85,6 +85,7 @@ void BM_single_span_fulldrain(benchmark::State& state) {
const int size_class = state.range(0);

size_t size = tc_globals.sizemap().class_to_size(size_class);
CHECK_CONDITION(size > 0);
size_t npages = tc_globals.sizemap().class_to_pages(size_class);
size_t batch_size = tc_globals.sizemap().num_objects_to_move(size_class);
size_t objects_per_span = npages * kPageSize / size;
Expand Down Expand Up @@ -131,7 +132,7 @@ BENCHMARK(BM_single_span)
->Arg(20)
->Arg(30)
->Arg(40)
->Arg(kNumClasses - 1);
->Arg(80);

BENCHMARK(BM_single_span_fulldrain)
->Arg(1)
Expand All @@ -146,14 +147,20 @@ BENCHMARK(BM_single_span_fulldrain)
->Arg(20)
->Arg(30)
->Arg(40)
->Arg(kNumClasses - 1);
->Arg(80);

void BM_NewDelete(benchmark::State& state) {
AllocationGuardSpinLockHolder h(&pageheap_lock);
constexpr SpanAllocInfo kSpanInfo = {/*objects_per_span=*/7,
AccessDensityPrediction::kSparse};
for (auto s : state) {
Span* sp = Span::New(PageId{0}, Length(1));
Span* sp = tc_globals.page_allocator().New(Length(1), kSpanInfo,
MemoryTag::kNormal);

benchmark::DoNotOptimize(sp);
Span::Delete(sp);

AllocationGuardSpinLockHolder h(&pageheap_lock);
tc_globals.page_allocator().Delete(sp, kSpanInfo.objects_per_span,
MemoryTag::kNormal);
}
state.SetItemsProcessed(state.iterations());
}
Expand All @@ -167,6 +174,7 @@ void BM_multiple_spans(benchmark::State& state) {
const int num_spans = 10000000;
std::vector<RawSpan> spans(num_spans);
size_t size = tc_globals.sizemap().class_to_size(size_class);
CHECK_CONDITION(size > 0);
size_t batch_size = tc_globals.sizemap().num_objects_to_move(size_class);
for (int i = 0; i < num_spans; i++) {
spans[i].Init(size_class);
Expand Down Expand Up @@ -203,7 +211,7 @@ BENCHMARK(BM_multiple_spans)
->Arg(20)
->Arg(30)
->Arg(40)
->Arg(kNumClasses - 1);
->Arg(80);

} // namespace
} // namespace tcmalloc_internal
Expand Down

0 comments on commit b67b083

Please sign in to comment.