Skip to content

Improvements to Decommit Strategies #120

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Feb 4, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/ds/helpers.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ namespace snmalloc
length == bits::next_pow2_const(length), "Must be a power of two.");

private:
T value;
T value = 0;

public:
operator T()
Expand Down
41 changes: 6 additions & 35 deletions src/mem/alloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -930,8 +930,7 @@ namespace snmalloc

if (super != nullptr)
{
Slab* slab =
super->alloc_short_slab(sizeclass, large_allocator.memory_provider);
Slab* slab = super->alloc_short_slab(sizeclass);
assert(super->is_full());
return slab;
}
Expand All @@ -941,8 +940,7 @@ namespace snmalloc
if ((allow_reserve == NoReserve) && (super == nullptr))
return nullptr;

Slab* slab =
super->alloc_short_slab(sizeclass, large_allocator.memory_provider);
Slab* slab = super->alloc_short_slab(sizeclass);
reposition_superslab(super);
return slab;
}
Expand All @@ -952,8 +950,7 @@ namespace snmalloc
if ((allow_reserve == NoReserve) && (super == nullptr))
return nullptr;

Slab* slab =
super->alloc_slab(sizeclass, large_allocator.memory_provider);
Slab* slab = super->alloc_slab(sizeclass);
reposition_superslab(super);
return slab;
}
Expand Down Expand Up @@ -1074,7 +1071,7 @@ namespace snmalloc
SlabList* sl = &small_classes[sizeclass];
Slab* slab = Metaslab::get_slab(p);
Superslab::Action a =
slab->dealloc_slow(sl, super, p, large_allocator.memory_provider);
slab->dealloc_slow(sl, super, p);
if (likely(a == Superslab::NoSlabReturn))
return;
stats().sizeclass_dealloc_slab(sizeclass);
Expand Down Expand Up @@ -1114,20 +1111,6 @@ namespace snmalloc
{
super_available.remove(super);

if constexpr (decommit_strategy == DecommitSuper)
{
large_allocator.memory_provider.notify_not_using(
pointer_offset(super, OS_PAGE_SIZE),
SUPERSLAB_SIZE - OS_PAGE_SIZE);
}
else if constexpr (decommit_strategy == DecommitSuperLazy)
{
static_assert(
pal_supports<LowMemoryNotification, MemoryProvider>,
"A lazy decommit strategy cannot be implemented on platforms "
"without low memory notifications");
}

chunkmap().clear_slab(super);
large_allocator.dealloc(super, 0);
stats().superslab_push();
Expand Down Expand Up @@ -1191,7 +1174,7 @@ namespace snmalloc
{
MEASURE_TIME(medium_dealloc, 4, 16);
stats().sizeclass_dealloc(sizeclass);
bool was_full = slab->dealloc(p, large_allocator.memory_provider);
bool was_full = slab->dealloc(p);

#ifdef CHECK_CLIENT
if (!is_multiple_of_sizeclass(
Expand All @@ -1211,12 +1194,6 @@ namespace snmalloc
sc->remove(slab);
}

if constexpr (decommit_strategy == DecommitSuper)
{
large_allocator.memory_provider.notify_not_using(
pointer_offset(slab, OS_PAGE_SIZE), SUPERSLAB_SIZE - OS_PAGE_SIZE);
}

chunkmap().clear_slab(slab);
large_allocator.dealloc(slab, 0);
stats().superslab_push();
Expand Down Expand Up @@ -1264,19 +1241,13 @@ namespace snmalloc
MEASURE_TIME(large_dealloc, 4, 16);

size_t size_bits = bits::next_pow2_bits(size);
size_t rsize = bits::one_at_bit(size_bits);
assert(rsize >= SUPERSLAB_SIZE);
assert(bits::one_at_bit(size_bits) >= SUPERSLAB_SIZE);
size_t large_class = size_bits - SUPERSLAB_BITS;

chunkmap().clear_large_size(p, size);

stats().large_dealloc(large_class);

// Cross-reference largealloc's alloc() decommitted condition.
if ((decommit_strategy != DecommitNone) || (large_class > 0))
large_allocator.memory_provider.notify_not_using(
pointer_offset(p, OS_PAGE_SIZE), rsize - OS_PAGE_SIZE);

// Initialise in order to set the correct SlabKind.
Largeslab* slab = static_cast<Largeslab*>(p);
slab->init();
Expand Down
4 changes: 0 additions & 4 deletions src/mem/allocconfig.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,10 +68,6 @@ namespace snmalloc
* Decommit superslabs when they are entirely empty.
*/
DecommitSuper,
/**
* Decommit all slabs once they are empty.
*/
DecommitAll,
/**
* Decommit superslabs only when we are informed of memory pressure by the
* OS, do not decommit anything in normal operation.
Expand Down
26 changes: 23 additions & 3 deletions src/mem/largealloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ namespace snmalloc
*/
std::atomic<uint64_t> last_low_memory_epoch = 0;
std::atomic_flag lazy_decommit_guard;
void lazy_decommit()
SNMALLOC_SLOW_PATH void lazy_decommit()
{
// If another thread is try to do lazy decommit, let it continue. If
// we try to parallelise this, we'll most likely end up waiting on the
Expand All @@ -93,6 +93,7 @@ namespace snmalloc
// the memory that we can. Start with the small size classes so that we
// hit cached superslabs first.
// FIXME: We probably shouldn't do this all at once.
// FIXME: We currently Decommit all the sizeclasses larger than 0.
for (size_t large_class = 0; large_class < NUM_LARGE_CLASSES;
large_class++)
{
Expand Down Expand Up @@ -327,7 +328,8 @@ namespace snmalloc
void* alloc(size_t large_class, size_t size)
{
size_t rsize = bits::one_at_bit(SUPERSLAB_BITS) << large_class;
if (size == 0)
// For superslab size, we always commit the whole range.
if (large_class == 0)
size = rsize;

void* p = memory_provider.large_stack[large_class].pop();
Expand Down Expand Up @@ -362,7 +364,7 @@ namespace snmalloc
bool decommitted =
((decommit_strategy == DecommitSuperLazy) &&
(static_cast<Baseslab*>(p)->get_kind() == Decommitted)) ||
(large_class > 0) || (decommit_strategy != DecommitNone);
(large_class > 0) || (decommit_strategy == DecommitSuper);

if (decommitted)
{
Expand Down Expand Up @@ -392,6 +394,24 @@ namespace snmalloc

void dealloc(void* p, size_t large_class)
{
if constexpr (decommit_strategy == DecommitSuperLazy)
{
static_assert(
pal_supports<LowMemoryNotification, MemoryProvider>,
"A lazy decommit strategy cannot be implemented on platforms "
"without low memory notifications");
}

// Cross-reference largealloc's alloc() decommitted condition.
if ((decommit_strategy != DecommitNone)
&& (large_class != 0 || decommit_strategy == DecommitSuper))
{
size_t rsize = bits::one_at_bit(SUPERSLAB_BITS) << large_class;

memory_provider.notify_not_using(
pointer_offset(p, OS_PAGE_SIZE), rsize - OS_PAGE_SIZE);
}

stats.superslab_push();
memory_provider.large_stack[large_class].push(static_cast<Largeslab*>(p));
memory_provider.lazy_decommit_if_needed();
Expand Down
10 changes: 2 additions & 8 deletions src/mem/mediumslab.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,16 +87,13 @@ namespace snmalloc
assert(is_aligned_block<OS_PAGE_SIZE>(p, OS_PAGE_SIZE));
size = bits::align_up(size, OS_PAGE_SIZE);

if constexpr (decommit_strategy == DecommitAll)
memory_provider.template notify_using<zero_mem>(p, size);
else if constexpr (zero_mem == YesZero)
if constexpr (zero_mem == YesZero)
memory_provider.template zero<true>(p, size);

return p;
}

template<typename MemoryProvider>
bool dealloc(void* p, MemoryProvider& memory_provider)
bool dealloc(void* p)
{
assert(head > 0);

Expand All @@ -105,9 +102,6 @@ namespace snmalloc
free++;
stack[--head] = pointer_to_index(p);

if constexpr (decommit_strategy == DecommitAll)
memory_provider.notify_not_using(p, sizeclass_to_size(sizeclass));

return was_full;
}

Expand Down
4 changes: 2 additions & 2 deletions src/mem/metaslab.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ namespace snmalloc
* The list will be (allocated - needed - 1) long. The -1 is
* for the `link` element which is not in the free list.
*/
void* head;
void* head = nullptr;

/**
* How many entries are not in the free list of slab, i.e.
Expand All @@ -51,7 +51,7 @@ namespace snmalloc
/**
* How many entries have been allocated from this slab.
*/
uint16_t allocated;
uint16_t allocated = 0;

// When a slab has free space it will be on the has space list for
// that size class. We use an empty block in this slab to be the
Expand Down
27 changes: 13 additions & 14 deletions src/mem/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,10 @@ namespace snmalloc
}
else
{
// Allocate the last object on the current page if there is one,
// and then thread the next free list worth of allocations.
bool crossed_page_boundary = false;
void* curr = nullptr;
bool commit = false;
while (true)
{
size_t newbumpptr = bumpptr + rsize;
Expand All @@ -78,15 +80,12 @@ namespace snmalloc

if (alignedbumpptr != alignednewbumpptr)
{
// We have committed once already.
if (commit)
// We have crossed a page boundary already, so
// lets stop building our free list.
if (crossed_page_boundary)
break;

memory_provider.template notify_using<NoZero>(
pointer_offset(this, alignedbumpptr),
alignednewbumpptr - alignedbumpptr);

commit = true;
crossed_page_boundary = true;
}

if (curr == nullptr)
Expand Down Expand Up @@ -179,9 +178,8 @@ namespace snmalloc
// This does not need to remove the "use" as done by the fast path.
// Returns a complex return code for managing the superslab meta data.
// i.e. This deallocation could make an entire superslab free.
template<typename MemoryProvider>
SNMALLOC_SLOW_PATH typename Superslab::Action dealloc_slow(
SlabList* sl, Superslab* super, void* p, MemoryProvider& memory_provider)
SlabList* sl, Superslab* super, void* p)
{
Metaslab& meta = super->get_meta(this);

Expand All @@ -192,9 +190,9 @@ namespace snmalloc
{
// Dealloc on the superslab.
if (is_short())
return super->dealloc_short_slab(memory_provider);
return super->dealloc_short_slab();

return super->dealloc_slab(this, memory_provider);
return super->dealloc_slab(this);
}
// Update the head and the sizeclass link.
uint16_t index = pointer_to_index(p);
Expand All @@ -213,10 +211,11 @@ namespace snmalloc
sl->remove(meta.get_link(this));

if (is_short())
return super->dealloc_short_slab(memory_provider);
return super->dealloc_short_slab();

return super->dealloc_slab(this, memory_provider);
return super->dealloc_slab(this);
}

bool is_short()
{
return Metaslab::is_short(this);
Expand Down
Loading