Skip to content

Alternative Parallel Full GC #24621

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/parallel/parMarkBitMap.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ inline HeapWord* ParMarkBitMap::find_obj_beg(HeapWord* beg, HeapWord* end) const
inline HeapWord* ParMarkBitMap::find_obj_beg_reverse(HeapWord* beg, HeapWord* end) const {
const idx_t beg_bit = addr_to_bit(beg);
const idx_t end_bit = addr_to_bit(end);
const idx_t res_bit = _beg_bits.find_last_set_bit_aligned_left(beg_bit, end_bit);
const idx_t res_bit = _beg_bits.find_last_set_bit(beg_bit, end_bit);
return bit_to_addr(res_bit);
}

Expand Down
55 changes: 47 additions & 8 deletions src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
#include "gc/parallel/psMemoryPool.hpp"
#include "gc/parallel/psParallelCompact.inline.hpp"
#include "gc/parallel/psParallelCompactNew.inline.hpp"
#include "gc/parallel/psPromotionManager.hpp"
#include "gc/parallel/psScavenge.hpp"
#include "gc/parallel/psVMOperations.hpp"
Expand Down Expand Up @@ -120,8 +121,14 @@ jint ParallelScavengeHeap::initialize() {
_gc_policy_counters =
new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);

if (!PSParallelCompact::initialize_aux_data()) {
return JNI_ENOMEM;
if (UseNewCode) {
if (!PSParallelCompactNew::initialize_aux_data()) {
return JNI_ENOMEM;
}
} else {
if (!PSParallelCompact::initialize_aux_data()) {
return JNI_ENOMEM;
}
}

// Create CPU time counter
Expand Down Expand Up @@ -184,7 +191,11 @@ void ParallelScavengeHeap::post_initialize() {
CollectedHeap::post_initialize();
// Need to init the tenuring threshold
PSScavenge::initialize();
PSParallelCompact::post_initialize();
if (UseNewCode) {
PSParallelCompactNew::post_initialize();
} else {
PSParallelCompact::post_initialize();
}
PSPromotionManager::initialize();

ScavengableNMethods::initialize(&_is_scavengable);
Expand Down Expand Up @@ -391,7 +402,11 @@ HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
}

void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
PSParallelCompact::invoke(clear_all_soft_refs);
if (UseNewCode) {
PSParallelCompactNew::invoke(clear_all_soft_refs, false /* serial */);
} else {
PSParallelCompact::invoke(clear_all_soft_refs);
}
}

HeapWord* ParallelScavengeHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
Expand Down Expand Up @@ -429,7 +444,11 @@ HeapWord* ParallelScavengeHeap::satisfy_failed_allocation(size_t size, bool is_t
HeapMaximumCompactionInterval = 0;

const bool clear_all_soft_refs = true;
PSParallelCompact::invoke(clear_all_soft_refs);
if (UseNewCode) {
PSParallelCompactNew::invoke(clear_all_soft_refs, false /* serial */);
} else {
PSParallelCompact::invoke(clear_all_soft_refs);
}

// Restore
HeapMaximumCompactionInterval = old_interval;
Expand All @@ -440,6 +459,14 @@ HeapWord* ParallelScavengeHeap::satisfy_failed_allocation(size_t size, bool is_t
return result;
}

if (UseNewCode) {
PSParallelCompactNew::invoke(true /* clear_soft_refs */, true /* serial */);
result = expand_heap_and_allocate(size, is_tlab);
if (result != nullptr) {
return result;
}
}

// What else? We might try synchronous finalization later. If the total
// space available is large enough for the allocation, then a more
// complete compaction phase than we've tried so far might be
Expand Down Expand Up @@ -535,7 +562,11 @@ void ParallelScavengeHeap::collect_at_safepoint(bool full) {
}
// Upgrade to Full-GC if young-gc fails
}
PSParallelCompact::invoke(clear_soft_refs);
if (UseNewCode) {
PSParallelCompactNew::invoke(clear_soft_refs, false /* serial */);
} else {
PSParallelCompact::invoke(clear_soft_refs);
}
}

void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
Expand Down Expand Up @@ -682,7 +713,11 @@ void ParallelScavengeHeap::print_on_error(outputStream* st) const {
}

st->cr();
PSParallelCompact::print_on_error(st);
if (UseNewCode) {
PSParallelCompactNew::print_on_error(st);
} else {
PSParallelCompact::print_on_error(st);
}
}

void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
Expand All @@ -692,7 +727,11 @@ void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
void ParallelScavengeHeap::print_tracing_info() const {
AdaptiveSizePolicyOutput::print();
log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
if (UseNewCode) {
log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompactNew::accumulated_time()->seconds());
} else {
log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
}
}

PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
Expand Down
167 changes: 167 additions & 0 deletions src/hotspot/share/gc/parallel/psCompactionManagerNew.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,167 @@
/*
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/

#include "gc/parallel/objectStartArray.hpp"
#include "gc/parallel/parMarkBitMap.inline.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psCompactionManagerNew.inline.hpp"
#include "gc/parallel/psOldGen.hpp"
#include "gc/parallel/psParallelCompactNew.inline.hpp"
#include "gc/shared/partialArraySplitter.inline.hpp"
#include "gc/shared/partialArrayState.hpp"
#include "gc/shared/preservedMarks.inline.hpp"
#include "memory/iterator.inline.hpp"

PSOldGen* ParCompactionManagerNew::_old_gen = nullptr;
ParCompactionManagerNew** ParCompactionManagerNew::_manager_array = nullptr;

ParCompactionManagerNew::PSMarkTasksQueueSet* ParCompactionManagerNew::_marking_stacks = nullptr;
PartialArrayStateManager* ParCompactionManagerNew::_partial_array_state_manager = nullptr;

ObjectStartArray* ParCompactionManagerNew::_start_array = nullptr;
ParMarkBitMap* ParCompactionManagerNew::_mark_bitmap = nullptr;

PreservedMarksSet* ParCompactionManagerNew::_preserved_marks_set = nullptr;

ParCompactionManagerNew::ParCompactionManagerNew(PreservedMarks* preserved_marks,
ReferenceProcessor* ref_processor,
uint parallel_gc_threads)
:_partial_array_splitter(_partial_array_state_manager, parallel_gc_threads),
_mark_and_push_closure(this, ref_processor) {

_old_gen = ParallelScavengeHeap::old_gen();
_start_array = old_gen()->start_array();

_preserved_marks = preserved_marks;
}

void ParCompactionManagerNew::initialize(ParMarkBitMap* mbm) {
assert(ParallelScavengeHeap::heap() != nullptr, "Needed for initialization");
assert(PSParallelCompactNew::ref_processor() != nullptr, "precondition");
assert(ParallelScavengeHeap::heap()->workers().max_workers() != 0, "Not initialized?");

_mark_bitmap = mbm;

uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().max_workers();

assert(_manager_array == nullptr, "Attempt to initialize twice");
_manager_array = NEW_C_HEAP_ARRAY(ParCompactionManagerNew*, parallel_gc_threads, mtGC);

assert(_partial_array_state_manager == nullptr, "Attempt to initialize twice");
_partial_array_state_manager
= new PartialArrayStateManager(parallel_gc_threads);
_marking_stacks = new PSMarkTasksQueueSet(parallel_gc_threads);

_preserved_marks_set = new PreservedMarksSet(true);
_preserved_marks_set->init(parallel_gc_threads);

// Create and register the ParCompactionManagerNew(s) for the worker threads.
for(uint i=0; i<parallel_gc_threads; i++) {
_manager_array[i] = new ParCompactionManagerNew(_preserved_marks_set->get(i),
PSParallelCompactNew::ref_processor(),
parallel_gc_threads);
marking_stacks()->register_queue(i, _manager_array[i]->marking_stack());
}
}

void ParCompactionManagerNew::flush_all_string_dedup_requests() {
uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().max_workers();
for (uint i=0; i<parallel_gc_threads; i++) {
_manager_array[i]->flush_string_dedup_requests();
}
}

ParCompactionManagerNew*
ParCompactionManagerNew::gc_thread_compaction_manager(uint index) {
assert(index < ParallelGCThreads, "index out of range");
assert(_manager_array != nullptr, "Sanity");
return _manager_array[index];
}

void ParCompactionManagerNew::push_objArray(oop obj) {
assert(obj->is_objArray(), "precondition");
_mark_and_push_closure.do_klass(obj->klass());

objArrayOop obj_array = objArrayOop(obj);
size_t array_length = obj_array->length();
size_t initial_chunk_size =
_partial_array_splitter.start(&_marking_stack, obj_array, nullptr, array_length);
follow_array(obj_array, 0, initial_chunk_size);
}

void ParCompactionManagerNew::process_array_chunk(PartialArrayState* state, bool stolen) {
// Access before release by claim().
oop obj = state->source();
PartialArraySplitter::Claim claim =
_partial_array_splitter.claim(state, &_marking_stack, stolen);
follow_array(objArrayOop(obj), claim._start, claim._end);
}

void ParCompactionManagerNew::follow_marking_stacks() {
ScannerTask task;
do {
// First, try to move tasks from the overflow stack into the shared buffer, so
// that other threads can steal. Otherwise process the overflow stack first.
while (marking_stack()->pop_overflow(task)) {
if (!marking_stack()->try_push_to_taskqueue(task)) {
follow_contents(task, false);
}
}
while (marking_stack()->pop_local(task)) {
follow_contents(task, false);
}
} while (!marking_stack_empty());

assert(marking_stack_empty(), "Sanity");
}

#if TASKQUEUE_STATS
void ParCompactionManagerNew::print_and_reset_taskqueue_stats() {
marking_stacks()->print_and_reset_taskqueue_stats("Marking Stacks");

auto get_pa_stats = [&](uint i) {
return _manager_array[i]->partial_array_task_stats();
};
PartialArrayTaskStats::log_set(ParallelGCThreads, get_pa_stats,
"Partial Array Task Stats");
uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().max_workers();
for (uint i = 0; i < parallel_gc_threads; ++i) {
get_pa_stats(i)->reset();
}
}

PartialArrayTaskStats* ParCompactionManagerNew::partial_array_task_stats() {
return _partial_array_splitter.stats();
}
#endif // TASKQUEUE_STATS

#ifdef ASSERT
void ParCompactionManagerNew::verify_all_marking_stack_empty() {
uint parallel_gc_threads = ParallelGCThreads;
for (uint i = 0; i < parallel_gc_threads; i++) {
assert(_manager_array[i]->marking_stack_empty(), "Marking stack should be empty");
}
}
#endif
Loading