Skip to content

Commit

Permalink
trivial: use default member initialization for PageStat
Browse files Browse the repository at this point in the history
otherwise we are at the mercy of the caller to initialize it,
and the only call point does it by hand right now.

minor associated cleanups: the tests can use PageStats{} when
we are expecting all zeros and explicitly use FieldsAre when
asserting all of the elements. this also means they won't break
if somebody merges a bad operator==.

PiperOrigin-RevId: 571970426
Change-Id: Ia2dd1f19b1763a46053e56580ed58f3380972e1a
  • Loading branch information
patrickxia authored and copybara-github committed Oct 9, 2023
1 parent db8a827 commit 87d8e40
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 28 deletions.
2 changes: 0 additions & 2 deletions tcmalloc/internal/pageflags.cc
Original file line number Diff line number Diff line change
Expand Up @@ -189,8 +189,6 @@ std::optional<PageFlags::PageStats> PageFlags::Get(const void* const addr,
last_head_read_ = -1;

PageStats ret;
ret.bytes_stale = 0;
ret.bytes_locked = 0;
if (size == 0) return ret;
uint64_t result_flags = 0;
bool is_huge = false;
Expand Down
24 changes: 13 additions & 11 deletions tcmalloc/internal/pageflags.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,19 @@ class PageFlags {
PageFlags();
~PageFlags();

struct PageStats {
size_t bytes_stale = 0;
size_t bytes_locked = 0;

// This is currently used only by tests. It'll be good to convert this to
// C++20 "= default" when we increase the baseline compiler requirement.
bool operator==(const PageStats& rhs) const {
return bytes_stale == rhs.bytes_stale && bytes_locked == rhs.bytes_locked;
}

bool operator!=(const PageStats& rhs) const { return !(*this == rhs); }
};

// Query a span of memory starting from `addr` for `size` bytes. The memory
// span must consist of only native-size pages and THP hugepages; the behavior
// is undefined if we encounter other hugepages (such as hugetlbfs). We try to
Expand All @@ -59,17 +72,6 @@ class PageFlags {
// dynamic memory allocation would happen. In contrast, absl::StatusOr may
// dynamically allocate memory when needed. Using std::optional allows us to
// use the function in places where memory allocation is prohibited.
struct PageStats {
size_t bytes_stale;
size_t bytes_locked;

bool operator==(const PageStats& rhs) const {
return bytes_stale == rhs.bytes_stale && bytes_locked == rhs.bytes_locked;
}

bool operator!=(const PageStats& rhs) const { return !(*this == rhs); }
};

std::optional<PageStats> Get(const void* addr, size_t size);

private:
Expand Down
36 changes: 21 additions & 15 deletions tcmalloc/internal/pageflags_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ std::ostream& operator<<(std::ostream& os, const PageFlags::PageStats& s) {

namespace {

using ::testing::FieldsAre;
using ::testing::Optional;
using PageStats = PageFlags::PageStats;

Expand All @@ -84,6 +85,12 @@ constexpr size_t kPagemapEntrySize = 8;
constexpr size_t kHugePageSize = 2 << 20;
constexpr size_t kHugePageMask = ~(kHugePageSize - 1);

TEST(PageFlagsTest, Smoke) {
GTEST_SKIP() << "pageflags not commonly available";
auto res = PageFlags{}.Get(nullptr, 0);
EXPECT_THAT(res, Optional(PageStats{}));
}

TEST(PageFlagsTest, Stack) {
GTEST_SKIP() << "pageflags not commonly available";

Expand All @@ -93,7 +100,7 @@ TEST(PageFlagsTest, Stack) {

PageFlags s;
EXPECT_THAT(s.Get(reinterpret_cast<void*>(buf), sizeof(buf)),
Optional(PageStats{0, 0}));
Optional(PageStats{}));
}

TEST(PageFlagsTest, Alignment) {
Expand All @@ -112,8 +119,7 @@ TEST(PageFlagsTest, Alignment) {
ASSERT_EQ(madvise(p, kPageSize * kNumPages, MADV_HUGEPAGE), 0) << errno;

PageFlags s;
EXPECT_THAT(s.Get(p, kPageSize * kNumPages), Optional(PageStats{0, 0}))
<< p;
EXPECT_THAT(s.Get(p, kPageSize * kNumPages), Optional(PageStats{})) << p;
munmap(p, kNumPages * kPageSize);
}
}
Expand Down Expand Up @@ -176,7 +182,7 @@ TEST(PageFlagsTest, Stale) {
ASSERT_EQ(madvise(p + 5 * kHugePageSize, kHugePageSize, MADV_HUGEPAGE), 0)
<< errno;
PageFlags s;
ASSERT_THAT(s.Get(p, kPageSize * kNumPages), Optional(PageStats{0, 0}));
ASSERT_THAT(s.Get(p, kPageSize * kNumPages), Optional(PageStats{}));

// This doesn't work within a short test timeout. But if you have your own
// machine with appropriate patches, you can try it out!
Expand Down Expand Up @@ -209,34 +215,34 @@ TEST(PageFlagsTest, Stale) {
// only passing when the machine you get scheduled on is out of
// hugepages.
EXPECT_THAT(mocks.Get(nullptr, num_pages * kPageSize + offset),
Optional(PageStats{num_pages * kPageSize + offset, 0}))
Optional(FieldsAre(num_pages * kPageSize + offset, 0)))
<< num_pages << "," << offset;

EXPECT_THAT(
mocks.Get((char*)fake_p - offset, num_pages * kPageSize + offset),
Optional(PageStats{num_pages * kPageSize + offset, 0}))
Optional(FieldsAre(num_pages * kPageSize + offset, 0)))
<< num_pages << "," << offset;

EXPECT_THAT(mocks.Get(fake_p, num_pages * kPageSize + offset),
Optional(PageStats{num_pages * kPageSize + offset, 0}))
Optional(FieldsAre(num_pages * kPageSize + offset, 0)))
<< num_pages << "," << offset;

EXPECT_THAT(
mocks.Get((char*)fake_p + offset, num_pages * kPageSize + offset),
Optional(PageStats{num_pages * kPageSize + offset, 0}))
Optional(FieldsAre(num_pages * kPageSize + offset, 0)))
<< num_pages << "," << offset;

EXPECT_THAT(
mocks.Get((char*)kHugePageSize + offset, num_pages * kPageSize),
Optional(PageStats{num_pages * kPageSize, 0}))
Optional(FieldsAre(num_pages * kPageSize, 0)))
<< num_pages << "," << offset;
}
}

EXPECT_THAT(mocks.Get(reinterpret_cast<char*>(2 * kHugePageSize +
16 * kPageSize + 2),
kHugePageSize * 3),
Optional(PageStats{kHugePageSize * 3, 0}));
Optional(FieldsAre(kHugePageSize * 3, 0)));
}

ASSERT_EQ(munmap(p, kNumPages * kPageSize), 0) << errno;
Expand Down Expand Up @@ -264,7 +270,7 @@ TEST(PageFlagsTest, Locked) {
}

PageFlags s;
ASSERT_THAT(s.Get(p, kPageSize * kNumPages), Optional(PageStats{0, 0}));
ASSERT_THAT(s.Get(p, kPageSize * kNumPages), Optional(PageStats{}));

ASSERT_EQ(madvise(p, kHugePageSize, MADV_NOHUGEPAGE), 0) << errno;
ASSERT_EQ(madvise(p + kHugePageSize, 3 * kHugePageSize, MADV_HUGEPAGE), 0)
Expand All @@ -274,7 +280,7 @@ TEST(PageFlagsTest, Locked) {
ASSERT_EQ(madvise(p + 5 * kHugePageSize, kHugePageSize, MADV_HUGEPAGE), 0)
<< errno;

ASSERT_THAT(s.Get(p, kPageSize * kNumPages), Optional(PageStats{0, 0}));
ASSERT_THAT(s.Get(p, kPageSize * kNumPages), Optional(PageStats{}));

ASSERT_EQ(mlock(p, kPageSize * kNumPages), 0) << errno;

Expand Down Expand Up @@ -348,12 +354,12 @@ TEST(PageFlagsTest, TooManyTails) {

PageFlagsFriend s(file_path.c_str());
EXPECT_THAT(s.Get(reinterpret_cast<char*>(kHugePageSize), kHugePageSize),
Optional(PageStats{0, 0}));
Optional(PageStats{}));
EXPECT_THAT(s.Get(reinterpret_cast<char*>(kHugePageSize), 3 * kHugePageSize),
Optional(PageStats{0, 0}));
Optional(PageStats{}));

EXPECT_THAT(s.Get(reinterpret_cast<char*>(3 * kHugePageSize), kHugePageSize),
Optional(PageStats{0, 0}));
Optional(PageStats{}));
EXPECT_THAT(
s.Get(reinterpret_cast<char*>(3 * kHugePageSize), 2 * kHugePageSize),
std::nullopt);
Expand Down

0 comments on commit 87d8e40

Please sign in to comment.