Skip to content

Commit

Permalink
Release free pages more gradually from HugeRegion.
Browse files Browse the repository at this point in the history
We release a fraction of free and backed hugepages from HugeRegion. By default, we release up to 10% of the releasable hugepages.

PiperOrigin-RevId: 569194283
Change-Id: I86360e7a314f425ea1d78d20e2647de2f23020d4
  • Loading branch information
v-gogte authored and copybara-github committed Sep 28, 2023
1 parent 2299082 commit d8a7899
Show file tree
Hide file tree
Showing 4 changed files with 81 additions and 25 deletions.
3 changes: 2 additions & 1 deletion tcmalloc/huge_page_aware_allocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -823,7 +823,8 @@ inline Length HugePageAwareAllocator<Forwarder>::ReleaseAtLeastNPages(
// the experiment is enabled. We can also explore releasing only a desired
// number of pages.
if (regions_.UseHugeRegionMoreOften()) {
released += regions_.ReleasePages();
constexpr double kFractionPagesToRelease = 0.1;
released += regions_.ReleasePages(kFractionPagesToRelease);
}

info_.RecordRelease(num_pages, released);
Expand Down
31 changes: 25 additions & 6 deletions tcmalloc/huge_page_aware_allocator_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,6 @@ TEST_P(HugePageAwareAllocatorTest, UseHugeRegion) {
// Check stats to confirm that pages have been allocated from huge regions.
RefreshStats();
size_t unmapped_bytes = region_stats.unmapped_bytes;
size_t backed_bytes = region_stats.system_bytes - region_stats.unmapped_bytes;
if (UseHugeRegionMoreOften()) {
EXPECT_GT(unmapped_bytes, 0);
}
Expand All @@ -505,12 +504,32 @@ TEST_P(HugePageAwareAllocatorTest, UseHugeRegion) {
EXPECT_EQ(region_stats.unmapped_bytes, unmapped_bytes);
}

// Release pages and make sure we release all the free-but-backed pages from
// huge region, more than what we ask allocator to release.
size_t backed_bytes = region_stats.system_bytes - region_stats.unmapped_bytes;

// Release pages and make sure we release a few free-but-backed pages from
// huge region. As we release pages from HugeRegion gradually, first make sure
// that we do not release all the free pages.
if (UseHugeRegionMoreOften()) {
absl::base_internal::SpinLockHolder l(&pageheap_lock);
Length released = allocator_->ReleaseAtLeastNPages(Length(1));
EXPECT_GT(released.in_bytes(), backed_bytes);
Length released;
{
absl::base_internal::SpinLockHolder l(&pageheap_lock);
released = allocator_->ReleaseAtLeastNPages(Length(1));
}
EXPECT_LT(released.in_bytes(), backed_bytes);
RefreshStats();
backed_bytes = region_stats.system_bytes - region_stats.unmapped_bytes;
}

while (true) {
if (!UseHugeRegionMoreOften() || backed_bytes == 0) break;
Length released;
{
absl::base_internal::SpinLockHolder l(&pageheap_lock);
released = allocator_->ReleaseAtLeastNPages(Length(1));
}
EXPECT_GT(released.in_bytes(), 0);
RefreshStats();
backed_bytes = region_stats.system_bytes - region_stats.unmapped_bytes;
}

for (auto s : small_spans) {
Expand Down
40 changes: 26 additions & 14 deletions tcmalloc/huge_region.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,10 @@ class HugeRegion : public TList<HugeRegion>::Elem {
// REQUIRES: [p, p + n) was the result of a previous MaybeGet.
void Put(PageId p, Length n, bool release);

// Release any hugepages that are unused but backed.
HugeLength Release();
// Release <release_fraction> times free-and-backed number of hugepages from
// region. Note that this clamps release_fraction between 0 and 1 if a
// fraction outside those bounds is specified.
HugeLength Release(double release_fraction);

// Is p located in this region?
bool contains(PageId p) { return location_.contains(p); }
Expand Down Expand Up @@ -171,8 +173,11 @@ class HugeRegionSet {
// Add region to the set.
void Contribute(Region* region);

// Release any hugepages that are unused but backed.
Length ReleasePages();
// Release hugepages that are unused but backed.
// Releases up to <release_fraction> times number of free-but-backed hugepages
// from each huge region. Note that this clamps release_fraction between 0 and
// 1 if a fraction outside those bounds is specified.
Length ReleasePages(double release_fraction);

void Print(Printer* out) const;
void PrintInPbtxt(PbtxtRegion* hpaa) const;
Expand Down Expand Up @@ -281,21 +286,28 @@ inline void HugeRegion::Put(PageId p, Length n, bool release) {
Dec(p, n, release);
}

// Release any hugepages that are unused but backed.
// TODO(b/199203282): We release all unused but backed pages from the region. We
// can explore a more sophisticated mechanism similar to Filler, that accounts
// for a recent peak while releasing pages.
inline HugeLength HugeRegion::Release() {
HugeLength r = NHugePages(0);
// Release hugepages that are unused but backed.
// TODO(b/199203282): We release up to <release_fraction> times the number of
// free but backed hugepages from the region. We can explore a more
// sophisticated mechanism similar to Filler/Cache, that accounts for a recent
// peak while releasing pages.
inline HugeLength HugeRegion::Release(double release_fraction) {
const size_t free_yet_backed = free_backed().raw_num();
size_t to_release = std::max<size_t>(
free_yet_backed * std::clamp<double>(release_fraction, 0, 1), 1);

HugeLength released = NHugePages(0);
bool should_unback[kNumHugePages] = {};
for (size_t i = 0; i < kNumHugePages; ++i) {
if (backed_[i] && pages_used_[i] == Length(0)) {
should_unback[i] = true;
++r;
++released;
}

if (released.raw_num() >= to_release) break;
}
UnbackHugepages(should_unback);
return r;
return released;
}

inline void HugeRegion::AddSpanStats(SmallSpanStats* small,
Expand Down Expand Up @@ -530,10 +542,10 @@ inline void HugeRegionSet<Region>::Contribute(Region* region) {
}

template <typename Region>
inline Length HugeRegionSet<Region>::ReleasePages() {
inline Length HugeRegionSet<Region>::ReleasePages(double release_fraction) {
Length released;
for (Region* region : list_) {
released += region->Release().in_pages();
released += region->Release(release_fraction).in_pages();
}
return released;
}
Expand Down
32 changes: 28 additions & 4 deletions tcmalloc/huge_region_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,30 @@ TEST_F(HugeRegionTest, ReqsBacking) {
}
}

TEST_F(HugeRegionTest, ReleaseFrac) {
const Length n = kPagesPerHugePage;
bool from_released;
auto a = Allocate(n * 20, &from_released);
EXPECT_TRUE(from_released);

Delete(a);
ExpectUnback({p_ + NHugePages(0), NHugePages(2)});
EXPECT_EQ(NHugePages(2), region_.Release(/*release_fraction=*/0.1));
CheckMock();

ExpectUnback({p_ + NHugePages(2), NHugePages(1)});
EXPECT_EQ(NHugePages(1), region_.Release(/*release_fraction=*/0.1));
CheckMock();

ExpectUnback({p_ + NHugePages(3), NHugePages(8)});
EXPECT_EQ(NHugePages(8), region_.Release(/*release_fraction=*/0.5));
CheckMock();

ExpectUnback({p_ + NHugePages(11), NHugePages(9)});
EXPECT_EQ(NHugePages(9), region_.Release(/*release_fraction=*/1.0));
CheckMock();
}

TEST_F(HugeRegionTest, Release) {
const Length n = kPagesPerHugePage;
bool from_released;
Expand All @@ -217,18 +241,18 @@ TEST_F(HugeRegionTest, Release) {
// overlap with others.
Delete(b);
ExpectUnback({p_ + NHugePages(4), NHugePages(2)});
EXPECT_EQ(NHugePages(2), region_.Release());
EXPECT_EQ(NHugePages(2), region_.Release(/*release_fraction=*/1.0));
CheckMock();

// Now we're on exact boundaries so we should unback the whole range.
Delete(d);
ExpectUnback({p_ + NHugePages(12), NHugePages(2)});
EXPECT_EQ(NHugePages(2), region_.Release());
EXPECT_EQ(NHugePages(2), region_.Release(/*release_fraction=*/1.0));
CheckMock();

Delete(a);
ExpectUnback({p_ + NHugePages(0), NHugePages(4)});
EXPECT_EQ(NHugePages(4), region_.Release());
EXPECT_EQ(NHugePages(4), region_.Release(/*release_fraction=*/1.0));
CheckMock();

// Should work just as well with aggressive Put():
Expand Down Expand Up @@ -540,7 +564,7 @@ TEST_P(HugeRegionSetTest, Release) {
// huge-region-more-often feature is enabled.
EXPECT_EQ(r1->free_backed().raw_num(),
UseHugeRegionMoreOften() ? Region::size().raw_num() : 0);
Length released = set_.ReleasePages();
Length released = set_.ReleasePages(/*release_fraction=*/1.0);
stats = set_.stats();
EXPECT_EQ(released.in_bytes(),
UseHugeRegionMoreOften() ? stats.system_bytes : 0);
Expand Down

0 comments on commit d8a7899

Please sign in to comment.